./PaxHeaders/nordugrid-arc-7.1.10000644000000000000000000000013215067751432013430 xustar0030 mtime=1759499034.646508392 30 atime=1759499034.761510139 30 ctime=1759499034.646508392 nordugrid-arc-7.1.1/0000755000175000002070000000000015067751432015252 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327015407 xustar0030 mtime=1759498967.622758609 30 atime=1759498967.805492739 30 ctime=1759499024.661325529 nordugrid-arc-7.1.1/Makefile.am0000644000175000002070000000054115067751327017311 0ustar00mockbuildmock00000000000000# /opt/local is the location for macports on MacOS X ACLOCAL_AMFLAGS = -I m4 `test -d /opt/local/share/aclocal && echo -I /opt/local/share/aclocal` if SWIG_ENABLED SWIG_SD = swig endif SUBDIRS = src include $(SWIG_SD) python $(POSUB) debian DIST_SUBDIRS = src include swig python po debian EXTRA_DIST = nordugrid-arc.spec autogen.sh LICENSE NOTICE nordugrid-arc-7.1.1/PaxHeaders/configure0000644000000000000000000000013215067751344015256 xustar0030 mtime=1759498980.707688788 30 atime=1759498992.281864659 30 ctime=1759499024.662327964 nordugrid-arc-7.1.1/configure0000755000175000002070000447172015067751344017202 0ustar00mockbuildmock00000000000000#! /bin/sh # Guess values for system-dependent variables and create Makefiles. # Generated by GNU Autoconf 2.69 for nordugrid-arc 7.1.1. # # Report bugs to . # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. # # # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # Use a proper internal environment variable to ensure we don't fall # into an infinite loop, continuously re-executing ourselves. if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then _as_can_reexec=no; export _as_can_reexec; # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 as_fn_exit 255 fi # We don't want this to propagate to other subprocesses. { _as_can_reexec=; unset _as_can_reexec;} if test "x$CONFIG_SHELL" = x; then as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else case \`(set -o) 2>/dev/null\` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi " as_required="as_fn_return () { (exit \$1); } as_fn_success () { as_fn_return 0; } as_fn_failure () { as_fn_return 1; } as_fn_ret_success () { return 0; } as_fn_ret_failure () { return 1; } exitcode=0 as_fn_success || { exitcode=1; echo as_fn_success failed.; } as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : else exitcode=1; echo positional parameters were not saved. fi test x\$exitcode = x0 || exit 1 test -x / || exit 1" as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 test \$(( 1 + 1 )) = 2 || exit 1 test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || ( ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO PATH=/empty FPATH=/empty; export PATH FPATH test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\ || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1" if (eval "$as_required") 2>/dev/null; then : as_have_required=yes else as_have_required=no fi if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_found=false for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. as_found=: case $as_dir in #( /*) for as_base in sh bash ksh sh5; do # Try only shells that exist, to save several forks. as_shell=$as_dir/$as_base if { test -f "$as_shell" || test -f "$as_shell.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : CONFIG_SHELL=$as_shell as_have_required=yes if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : break 2 fi fi done;; esac as_found=false done $as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : CONFIG_SHELL=$SHELL as_have_required=yes fi; } IFS=$as_save_IFS if test "x$CONFIG_SHELL" != x; then : export CONFIG_SHELL # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 exit 255 fi if test x$as_have_required = xno; then : $as_echo "$0: This script requires a shell more modern than all" $as_echo "$0: the shells that I found on your system." if test x${ZSH_VERSION+set} = xset ; then $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" $as_echo "$0: be upgraded to zsh 4.3.4 or later." else $as_echo "$0: Please tell bug-autoconf@gnu.org and $0: http://bugzilla.nordugrid.org/ about your system, $0: including any error possibly output before this $0: message. Then install a modern shell, or manually run $0: the script under such a shell if you do have one." fi exit 1 fi fi fi SHELL=${CONFIG_SHELL-/bin/sh} export SHELL # Unset more variables known to interfere with behavior of common tools. CLICOLOR_FORCE= GREP_OPTIONS= unset CLICOLOR_FORCE GREP_OPTIONS ## --------------------- ## ## M4sh Shell Functions. ## ## --------------------- ## # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits as_lineno_1=$LINENO as_lineno_1a=$LINENO as_lineno_2=$LINENO as_lineno_2a=$LINENO eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } # If we had to re-execute with $CONFIG_SHELL, we're ensured to have # already done that, so ensure we don't try to do so again and fall # in an infinite loop. This has already happened in practice. _as_can_reexec=no; export _as_can_reexec # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" SHELL=${CONFIG_SHELL-/bin/sh} test -n "$DJDIR" || exec 7<&0 &1 # Name of the host. # hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` # # Initializations. # ac_default_prefix=/usr/local ac_clean_files= ac_config_libobj_dir=. LIBOBJS= cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= # Identity of this package. PACKAGE_NAME='nordugrid-arc' PACKAGE_TARNAME='nordugrid-arc' PACKAGE_VERSION='7.1.1' PACKAGE_STRING='nordugrid-arc 7.1.1' PACKAGE_BUGREPORT='http://bugzilla.nordugrid.org/' PACKAGE_URL='' ac_unique_file="Makefile.am" # Factoring default headers for most tests. ac_includes_default="\ #include #ifdef HAVE_SYS_TYPES_H # include #endif #ifdef HAVE_SYS_STAT_H # include #endif #ifdef STDC_HEADERS # include # include #else # ifdef HAVE_STDLIB_H # include # endif #endif #ifdef HAVE_STRING_H # if !defined STDC_HEADERS && defined HAVE_MEMORY_H # include # endif # include #endif #ifdef HAVE_STRINGS_H # include #endif #ifdef HAVE_INTTYPES_H # include #endif #ifdef HAVE_STDINT_H # include #endif #ifdef HAVE_UNISTD_H # include #endif" gt_needs= ac_header_list= ac_func_list= ac_subst_vars='am__EXEEXT_FALSE am__EXEEXT_TRUE LTLIBOBJS SPECDATE DATER DATE posix_shell nodename gnu_time tmp_dir arc_location HED_ENABLED_FALSE HED_ENABLED_TRUE ARCREST_ENABLED_FALSE ARCREST_ENABLED_TRUE DATA_CLIENT_ENABLED_FALSE DATA_CLIENT_ENABLED_TRUE CREDENTIALS_CLIENT_ENABLED_FALSE CREDENTIALS_CLIENT_ENABLED_TRUE COMPUTE_CLIENT_ENABLED_FALSE COMPUTE_CLIENT_ENABLED_TRUE DATADELIVERY_SERVICE_ENABLED_FALSE DATADELIVERY_SERVICE_ENABLED_TRUE CANDYPOND_ENABLED_FALSE CANDYPOND_ENABLED_TRUE MONITOR_ENABLED_FALSE MONITOR_ENABLED_TRUE LDAP_SERVICE_ENABLED_FALSE LDAP_SERVICE_ENABLED_TRUE INTERNAL_ENABLED_FALSE INTERNAL_ENABLED_TRUE A_REX_SERVICE_ENABLED_FALSE A_REX_SERVICE_ENABLED_TRUE ALTPYDOXYGEN_FALSE ALTPYDOXYGEN_TRUE PYDOXYGEN_FALSE PYDOXYGEN_TRUE DOC_ENABLED_FALSE DOC_ENABLED_TRUE DOT DOXYGEN PDFLATEX LIBRESOLV LIBOBJS bashcompdir BASH_COMPLETION_LIBS BASH_COMPLETION_CFLAGS DLOPEN_LIBS UUID_LIBS PERL_TEST_DIR SRM_DMC_ENABLED_FALSE SRM_DMC_ENABLED_TRUE CPPUNIT_ENABLED_FALSE CPPUNIT_ENABLED_TRUE XMLSEC_ENABLED_FALSE XMLSEC_ENABLED_TRUE XROOTD_ENABLED_FALSE XROOTD_ENABLED_TRUE S3_DMC_ENABLED_FALSE S3_DMC_ENABLED_TRUE GFAL_ENABLED_FALSE GFAL_ENABLED_TRUE MOCK_DMC_ENABLED_FALSE MOCK_DMC_ENABLED_TRUE GRIDFTP_ENABLED_FALSE GRIDFTP_ENABLED_TRUE GLOBUSUTILS_ENABLED_FALSE GLOBUSUTILS_ENABLED_TRUE XROOTD_LIBS XROOTD_CPPFLAGS S3_LIBS S3_CPPFLAGS GFAL2_LIBS GFAL2_CFLAGS LCMAPS_LIBS LCMAPS_CFLAGS LCMAPS_LOCATION LCAS_LIBS LCAS_CFLAGS LCAS_LOCATION DEFAULT_GLOBUS_LOCATION GLOBUS_OPENSSL_LIBS GLOBUS_OPENSSL_CFLAGS GLOBUS_OPENSSL_MODULE_LIBS GLOBUS_OPENSSL_MODULE_CFLAGS GLOBUS_GSI_CREDENTIAL_LIBS GLOBUS_GSI_CREDENTIAL_CFLAGS GLOBUS_GSI_CERT_UTILS_LIBS GLOBUS_GSI_CERT_UTILS_CFLAGS GLOBUS_IO_LIBS GLOBUS_IO_CFLAGS GLOBUS_FTP_CONTROL_LIBS GLOBUS_FTP_CONTROL_CFLAGS GLOBUS_FTP_CLIENT_LIBS GLOBUS_FTP_CLIENT_CFLAGS GLOBUS_GSI_CALLBACK_LIBS GLOBUS_GSI_CALLBACK_CFLAGS GLOBUS_GSS_ASSIST_LIBS GLOBUS_GSS_ASSIST_CFLAGS GLOBUS_GSSAPI_GSI_LIBS GLOBUS_GSSAPI_GSI_CFLAGS GPT_QUERY GPT_FLAVOR_CONFIGURATION GLOBUS_MAKEFILE_HEADER GLOBUS_COMMON_LIBS GLOBUS_COMMON_CFLAGS SQLITEJSTORE_ENABLED_FALSE SQLITEJSTORE_ENABLED_TRUE ZLIB_LIBS ZLIB_CFLAGS monitor_prefix XMLSEC_OPENSSL_LIBS XMLSEC_OPENSSL_CFLAGS XMLSEC_LIBS XMLSEC_CFLAGS MACOSX_FALSE MACOSX_TRUE LDNS_ENABLED_FALSE LDNS_ENABLED_TRUE LDNS_CONFIG LDNS_LIBS LDNS_CFLAGS TEST_DIR CPPUNIT_CONFIG CPPUNIT_LIBS CPPUNIT_CFLAGS SQLITE_ENABLED_FALSE SQLITE_ENABLED_TRUE SQLITE_LIBS SQLITE_CFLAGS NSS_ENABLED_FALSE NSS_ENABLED_TRUE NSS_LIBS NSS_CFLAGS OPENSSL_LIBS OPENSSL_CFLAGS LIBXML2_LIBS LIBXML2_CFLAGS GLIBMM_LIBS GLIBMM_CFLAGS SYSTEMD_DAEMON_LIBS PYLINT_ENABLED_FALSE PYLINT_ENABLED_TRUE PYLINT_ARGS_ARGUMENTS_DIFFER PYLINT_ARGS PYLINT ALTPYTHON3_FALSE ALTPYTHON3_TRUE ALTPYTHON_ENABLED_FALSE ALTPYTHON_ENABLED_TRUE ALTPYTHON_SITE_LIB ALTPYTHON_SITE_ARCH ALTPYTHON_EXT_SUFFIX ALTPYTHON_VERSION ALTPYTHON_LIBS ALTPYTHON_CFLAGS ALTPYTHON PYTHON_SERVICE_FALSE PYTHON_SERVICE_TRUE PYTHON_SWIG_ENABLED_FALSE PYTHON_SWIG_ENABLED_TRUE PYTHON3_FALSE PYTHON3_TRUE PYTHON_ENABLED_FALSE PYTHON_ENABLED_TRUE PYTHON_SITE_LIB PYTHON_SITE_ARCH PYTHON_EXT_SUFFIX PYTHON_VERSION PYTHON_LIBS PYTHON_CFLAGS PKG_CONFIG_LIBDIR PKG_CONFIG_PATH PYTHON SWIG_ENABLED_FALSE SWIG_ENABLED_TRUE SWIG_PYTHON_NAMING SWIG2 SWIG PEDANTIC_COMPILE_FALSE PEDANTIC_COMPILE_TRUE AM_CXXFLAGS pkgconfigdir PKG_CONFIG POSUB LTLIBINTL LIBINTL INTLLIBS LTLIBICONV LIBICONV INTL_MACOSX_LIBS XGETTEXT_EXTRA_OPTIONS MSGMERGE XGETTEXT_015 XGETTEXT GMSGFMT_015 MSGFMT_015 GMSGFMT MSGFMT GETTEXT_MACRO_VERSION USE_NLS cronddir SYSV_SCRIPTS_ENABLED_FALSE SYSV_SCRIPTS_ENABLED_TRUE initddir SYSTEMD_UNITS_ENABLED_FALSE SYSTEMD_UNITS_ENABLED_TRUE unitsdir pkgdatasubdir pkgdatadir_rel_to_pkglibexecdir bindir_rel_to_pkglibexecdir sbindir_rel_to_pkglibexecdir pkglibdir_rel_to_pkglibexecdir pkglibexecsubdir pkglibsubdir libsubdir ARCXMLSEC_CFLAGS ARCXMLSEC_LIBS ARCWSSECURITY_CFLAGS ARCWSSECURITY_LIBS ARCWSADDRESSING_CFLAGS ARCWSADDRESSING_LIBS ARCINFOSYS_CFLAGS ARCINFOSYS_LIBS ARCOTOKENS_CFLAGS ARCOTOKENS_LIBS ARCSECURITY_CFLAGS ARCSECURITY_LIBS ARCMESSAGE_CFLAGS ARCMESSAGE_LIBS ARCLOADER_CFLAGS ARCLOADER_LIBS ARCJOB_CFLAGS ARCJOB_LIBS ARCDATA_CFLAGS ARCDATA_LIBS ARCCREDENTIAL_CFLAGS ARCCREDENTIAL_LIBS ARCCOMMON_CFLAGS ARCCOMMON_LIBS ARCCLIENT_CFLAGS ARCCLIENT_LIBS pkglibexecdir extpkglibdir pkglibdir pkgincludedir pkgdatadir PERL CXXCPP LT_SYS_LIBRARY_PATH OTOOL64 OTOOL LIPO NMEDIT DSYMUTIL MANIFEST_TOOL RANLIB ac_ct_AR AR DLLTOOL OBJDUMP NM ac_ct_DUMPBIN DUMPBIN LD FGREP SED host_os host_vendor host_cpu host build_os build_vendor build_cpu build LIBTOOL LN_S EGREP GREP CPP am__fastdepCC_FALSE am__fastdepCC_TRUE CCDEPMODE ac_ct_CC CFLAGS CC am__fastdepCXX_FALSE am__fastdepCXX_TRUE CXXDEPMODE am__nodep AMDEPBACKSLASH AMDEP_FALSE AMDEP_TRUE am__include DEPDIR OBJEXT EXEEXT ac_ct_CXX CPPFLAGS LDFLAGS CXXFLAGS CXX ARC_VERSION ARC_VERSION_NUM ARC_VERSION_PATCH ARC_VERSION_MINOR ARC_VERSION_MAJOR debianversion fedorasetupopts fedorarelease preversion baseversion AM_BACKSLASH AM_DEFAULT_VERBOSITY AM_DEFAULT_V AM_V am__untar am__tar AMTAR am__leading_dot SET_MAKE AWK mkdir_p MKDIR_P INSTALL_STRIP_PROGRAM STRIP install_sh MAKEINFO AUTOHEADER AUTOMAKE AUTOCONF ACLOCAL VERSION PACKAGE CYGPATH_W am__isrc INSTALL_DATA INSTALL_SCRIPT INSTALL_PROGRAM target_alias host_alias build_alias LIBS ECHO_T ECHO_N ECHO_C DEFS mandir localedir libdir psdir pdfdir dvidir htmldir infodir docdir oldincludedir includedir runstatedir localstatedir sharedstatedir sysconfdir datadir datarootdir libexecdir sbindir bindir program_transform_name prefix exec_prefix PACKAGE_URL PACKAGE_BUGREPORT PACKAGE_STRING PACKAGE_VERSION PACKAGE_TARNAME PACKAGE_NAME PATH_SEPARATOR SHELL am__quote' ac_subst_files='' ac_user_opts=' enable_option_checking enable_silent_rules enable_dependency_tracking enable_static enable_shared with_pic enable_fast_install with_aix_soname with_gnu_ld with_sysroot enable_libtool_lock with_systemd_units_location with_sysv_scripts_location with_cron_scripts_prefix enable_nls enable_rpath with_libiconv_prefix with_libintl_prefix enable_largefile enable_all enable_all_clients enable_all_data_clients enable_all_services enable_hed enable_pedantic_compile enable_swig_python enable_swig enable_python with_python with_python_site_arch with_python_site_lib enable_altpython with_altpython with_altpython_site_arch with_altpython_site_lib enable_pylint enable_systemd enable_nss enable_cppunit enable_ldns enable_xmlsec1 with_xmlsec1 enable_monitor with_monitor with_zlib enable_sqlitejstore with_flavor with_lcas_location with_lcmaps_location enable_mock_dmc enable_gfal enable_s3 with_s3 enable_xrootd with_xrootd enable_doc enable_a_rex_service enable_internal enable_ldap_service enable_candypond enable_datadelivery_service enable_compute_client enable_credentials_client enable_data_client enable_arcrest_client ' ac_precious_vars='build_alias host_alias target_alias CXX CXXFLAGS LDFLAGS LIBS CPPFLAGS CCC CC CFLAGS CPP LT_SYS_LIBRARY_PATH CXXCPP PKG_CONFIG PKG_CONFIG_PATH PKG_CONFIG_LIBDIR PYTHON_CFLAGS PYTHON_LIBS ALTPYTHON_CFLAGS ALTPYTHON_LIBS GLIBMM_CFLAGS GLIBMM_LIBS LIBXML2_CFLAGS LIBXML2_LIBS OPENSSL_CFLAGS OPENSSL_LIBS NSS_CFLAGS NSS_LIBS SQLITE_CFLAGS SQLITE_LIBS CPPUNIT_CFLAGS CPPUNIT_LIBS LDNS_CFLAGS LDNS_LIBS XMLSEC_CFLAGS XMLSEC_LIBS XMLSEC_OPENSSL_CFLAGS XMLSEC_OPENSSL_LIBS GLOBUS_COMMON_CFLAGS GLOBUS_COMMON_LIBS GLOBUS_MAKEFILE_HEADER GPT_FLAVOR_CONFIGURATION GPT_QUERY GLOBUS_GSSAPI_GSI_CFLAGS GLOBUS_GSSAPI_GSI_LIBS GLOBUS_GSS_ASSIST_CFLAGS GLOBUS_GSS_ASSIST_LIBS GLOBUS_GSI_CALLBACK_CFLAGS GLOBUS_GSI_CALLBACK_LIBS GLOBUS_FTP_CLIENT_CFLAGS GLOBUS_FTP_CLIENT_LIBS GLOBUS_FTP_CONTROL_CFLAGS GLOBUS_FTP_CONTROL_LIBS GLOBUS_IO_CFLAGS GLOBUS_IO_LIBS GLOBUS_GSI_CERT_UTILS_CFLAGS GLOBUS_GSI_CERT_UTILS_LIBS GLOBUS_GSI_CREDENTIAL_CFLAGS GLOBUS_GSI_CREDENTIAL_LIBS GLOBUS_OPENSSL_MODULE_CFLAGS GLOBUS_OPENSSL_MODULE_LIBS GLOBUS_OPENSSL_CFLAGS GLOBUS_OPENSSL_LIBS GFAL2_CFLAGS GFAL2_LIBS BASH_COMPLETION_CFLAGS BASH_COMPLETION_LIBS' # Initialize some variables set by options. ac_init_help= ac_init_version=false ac_unrecognized_opts= ac_unrecognized_sep= # The variables have the same names as the options, with # dashes changed to underlines. cache_file=/dev/null exec_prefix=NONE no_create= no_recursion= prefix=NONE program_prefix=NONE program_suffix=NONE program_transform_name=s,x,x, silent= site= srcdir= verbose= x_includes=NONE x_libraries=NONE # Installation directory options. # These are left unexpanded so users can "make install exec_prefix=/foo" # and all the variables that are supposed to be based on exec_prefix # by default will actually change. # Use braces instead of parens because sh, perl, etc. also accept them. # (The list follows the same order as the GNU Coding Standards.) bindir='${exec_prefix}/bin' sbindir='${exec_prefix}/sbin' libexecdir='${exec_prefix}/libexec' datarootdir='${prefix}/share' datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' runstatedir='${localstatedir}/run' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' infodir='${datarootdir}/info' htmldir='${docdir}' dvidir='${docdir}' pdfdir='${docdir}' psdir='${docdir}' libdir='${exec_prefix}/lib' localedir='${datarootdir}/locale' mandir='${datarootdir}/man' ac_prev= ac_dashdash= for ac_option do # If the previous option needs an argument, assign it. if test -n "$ac_prev"; then eval $ac_prev=\$ac_option ac_prev= continue fi case $ac_option in *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; *=) ac_optarg= ;; *) ac_optarg=yes ;; esac # Accept the important Cygnus configure options, so we can diagnose typos. case $ac_dashdash$ac_option in --) ac_dashdash=yes ;; -bindir | --bindir | --bindi | --bind | --bin | --bi) ac_prev=bindir ;; -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) bindir=$ac_optarg ;; -build | --build | --buil | --bui | --bu) ac_prev=build_alias ;; -build=* | --build=* | --buil=* | --bui=* | --bu=*) build_alias=$ac_optarg ;; -cache-file | --cache-file | --cache-fil | --cache-fi \ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) ac_prev=cache_file ;; -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) cache_file=$ac_optarg ;; --config-cache | -C) cache_file=config.cache ;; -datadir | --datadir | --datadi | --datad) ac_prev=datadir ;; -datadir=* | --datadir=* | --datadi=* | --datad=*) datadir=$ac_optarg ;; -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ | --dataroo | --dataro | --datar) ac_prev=datarootdir ;; -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) datarootdir=$ac_optarg ;; -disable-* | --disable-*) ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=no ;; -docdir | --docdir | --docdi | --doc | --do) ac_prev=docdir ;; -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) docdir=$ac_optarg ;; -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) ac_prev=dvidir ;; -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) dvidir=$ac_optarg ;; -enable-* | --enable-*) ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=\$ac_optarg ;; -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ | --exec | --exe | --ex) ac_prev=exec_prefix ;; -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ | --exec=* | --exe=* | --ex=*) exec_prefix=$ac_optarg ;; -gas | --gas | --ga | --g) # Obsolete; use --with-gas. with_gas=yes ;; -help | --help | --hel | --he | -h) ac_init_help=long ;; -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) ac_init_help=recursive ;; -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) ac_init_help=short ;; -host | --host | --hos | --ho) ac_prev=host_alias ;; -host=* | --host=* | --hos=* | --ho=*) host_alias=$ac_optarg ;; -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) ac_prev=htmldir ;; -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ | --ht=*) htmldir=$ac_optarg ;; -includedir | --includedir | --includedi | --included | --include \ | --includ | --inclu | --incl | --inc) ac_prev=includedir ;; -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ | --includ=* | --inclu=* | --incl=* | --inc=*) includedir=$ac_optarg ;; -infodir | --infodir | --infodi | --infod | --info | --inf) ac_prev=infodir ;; -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) infodir=$ac_optarg ;; -libdir | --libdir | --libdi | --libd) ac_prev=libdir ;; -libdir=* | --libdir=* | --libdi=* | --libd=*) libdir=$ac_optarg ;; -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ | --libexe | --libex | --libe) ac_prev=libexecdir ;; -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ | --libexe=* | --libex=* | --libe=*) libexecdir=$ac_optarg ;; -localedir | --localedir | --localedi | --localed | --locale) ac_prev=localedir ;; -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) localedir=$ac_optarg ;; -localstatedir | --localstatedir | --localstatedi | --localstated \ | --localstate | --localstat | --localsta | --localst | --locals) ac_prev=localstatedir ;; -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) localstatedir=$ac_optarg ;; -mandir | --mandir | --mandi | --mand | --man | --ma | --m) ac_prev=mandir ;; -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) mandir=$ac_optarg ;; -nfp | --nfp | --nf) # Obsolete; use --without-fp. with_fp=no ;; -no-create | --no-create | --no-creat | --no-crea | --no-cre \ | --no-cr | --no-c | -n) no_create=yes ;; -no-recursion | --no-recursion | --no-recursio | --no-recursi \ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) no_recursion=yes ;; -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ | --oldin | --oldi | --old | --ol | --o) ac_prev=oldincludedir ;; -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) oldincludedir=$ac_optarg ;; -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) ac_prev=prefix ;; -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) prefix=$ac_optarg ;; -program-prefix | --program-prefix | --program-prefi | --program-pref \ | --program-pre | --program-pr | --program-p) ac_prev=program_prefix ;; -program-prefix=* | --program-prefix=* | --program-prefi=* \ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) program_prefix=$ac_optarg ;; -program-suffix | --program-suffix | --program-suffi | --program-suff \ | --program-suf | --program-su | --program-s) ac_prev=program_suffix ;; -program-suffix=* | --program-suffix=* | --program-suffi=* \ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) program_suffix=$ac_optarg ;; -program-transform-name | --program-transform-name \ | --program-transform-nam | --program-transform-na \ | --program-transform-n | --program-transform- \ | --program-transform | --program-transfor \ | --program-transfo | --program-transf \ | --program-trans | --program-tran \ | --progr-tra | --program-tr | --program-t) ac_prev=program_transform_name ;; -program-transform-name=* | --program-transform-name=* \ | --program-transform-nam=* | --program-transform-na=* \ | --program-transform-n=* | --program-transform-=* \ | --program-transform=* | --program-transfor=* \ | --program-transfo=* | --program-transf=* \ | --program-trans=* | --program-tran=* \ | --progr-tra=* | --program-tr=* | --program-t=*) program_transform_name=$ac_optarg ;; -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) ac_prev=pdfdir ;; -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) pdfdir=$ac_optarg ;; -psdir | --psdir | --psdi | --psd | --ps) ac_prev=psdir ;; -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) psdir=$ac_optarg ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) silent=yes ;; -runstatedir | --runstatedir | --runstatedi | --runstated \ | --runstate | --runstat | --runsta | --runst | --runs \ | --run | --ru | --r) ac_prev=runstatedir ;; -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \ | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \ | --run=* | --ru=* | --r=*) runstatedir=$ac_optarg ;; -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ | --sbi=* | --sb=*) sbindir=$ac_optarg ;; -sharedstatedir | --sharedstatedir | --sharedstatedi \ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ | --sharedst | --shareds | --shared | --share | --shar \ | --sha | --sh) ac_prev=sharedstatedir ;; -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ | --sha=* | --sh=*) sharedstatedir=$ac_optarg ;; -site | --site | --sit) ac_prev=site ;; -site=* | --site=* | --sit=*) site=$ac_optarg ;; -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) ac_prev=srcdir ;; -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) srcdir=$ac_optarg ;; -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ | --syscon | --sysco | --sysc | --sys | --sy) ac_prev=sysconfdir ;; -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) sysconfdir=$ac_optarg ;; -target | --target | --targe | --targ | --tar | --ta | --t) ac_prev=target_alias ;; -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) target_alias=$ac_optarg ;; -v | -verbose | --verbose | --verbos | --verbo | --verb) verbose=yes ;; -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=\$ac_optarg ;; -without-* | --without-*) ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=no ;; --x) # Obsolete; use --with-x. with_x=yes ;; -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ | --x-incl | --x-inc | --x-in | --x-i) ac_prev=x_includes ;; -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) x_includes=$ac_optarg ;; -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; -*) as_fn_error $? "unrecognized option: \`$ac_option' Try \`$0 --help' for more information" ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. case $ac_envvar in #( '' | [0-9]* | *[!_$as_cr_alnum]* ) as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; esac eval $ac_envvar=\$ac_optarg export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" ;; esac done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` as_fn_error $? "missing argument to $ac_option" fi if test -n "$ac_unrecognized_opts"; then case $enable_option_checking in no) ;; fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; esac fi # Check all directory arguments for consistency. for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ libdir localedir mandir runstatedir do eval ac_val=\$$ac_var # Remove trailing slashes. case $ac_val in */ ) ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` eval $ac_var=\$ac_val;; esac # Be sure to have absolute directory names. case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" done # There might be people who depend on the old broken behavior: `$host' # used to hold the argument of --host etc. # FIXME: To remove some day. build=$build_alias host=$host_alias target=$target_alias # FIXME: To remove some day. if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes fi fi ac_tool_prefix= test -n "$host_alias" && ac_tool_prefix=$host_alias- test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || as_fn_error $? "working directory cannot be determined" test "X$ac_ls_di" = "X$ac_pwd_ls_di" || as_fn_error $? "pwd does not report name of working directory" # Find the source files, if location was not specified. if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then the parent directory. ac_confdir=`$as_dirname -- "$as_myself" || $as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_myself" : 'X\(//\)[^/]' \| \ X"$as_myself" : 'X\(//\)$' \| \ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_myself" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` srcdir=$ac_confdir if test ! -r "$srcdir/$ac_unique_file"; then srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" pwd)` # When building in place, set srcdir=. if test "$ac_abs_confdir" = "$ac_pwd"; then srcdir=. fi # Remove unnecessary trailing slashes from srcdir. # Double slashes in file names in object file debugging info # mess up M-x gdb in Emacs. case $srcdir in */) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; esac for ac_var in $ac_precious_vars; do eval ac_env_${ac_var}_set=\${${ac_var}+set} eval ac_env_${ac_var}_value=\$${ac_var} eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} eval ac_cv_env_${ac_var}_value=\$${ac_var} done # # Report the --help message. # if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF \`configure' configures nordugrid-arc 7.1.1 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... To assign environment variables (e.g., CC, CFLAGS...), specify them as VAR=VALUE. See below for descriptions of some of the useful variables. Defaults for the options are specified in brackets. Configuration: -h, --help display this help and exit --help=short display options specific to this package --help=recursive display the short help of all the included packages -V, --version display version information and exit -q, --quiet, --silent do not print \`checking ...' messages --cache-file=FILE cache test results in FILE [disabled] -C, --config-cache alias for \`--cache-file=config.cache' -n, --no-create do not create output files --srcdir=DIR find the sources in DIR [configure dir or \`..'] Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX [$ac_default_prefix] --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX [PREFIX] By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify an installation prefix other than \`$ac_default_prefix' using \`--prefix', for instance \`--prefix=\$HOME'. For better control, use the options below. Fine tuning of the installation directories: --bindir=DIR user executables [EPREFIX/bin] --sbindir=DIR system admin executables [EPREFIX/sbin] --libexecdir=DIR program executables [EPREFIX/libexec] --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] --datadir=DIR read-only architecture-independent data [DATAROOTDIR] --infodir=DIR info documentation [DATAROOTDIR/info] --localedir=DIR locale-dependent data [DATAROOTDIR/locale] --mandir=DIR man documentation [DATAROOTDIR/man] --docdir=DIR documentation root [DATAROOTDIR/doc/nordugrid-arc] --htmldir=DIR html documentation [DOCDIR] --dvidir=DIR dvi documentation [DOCDIR] --pdfdir=DIR pdf documentation [DOCDIR] --psdir=DIR ps documentation [DOCDIR] _ACEOF cat <<\_ACEOF Program names: --program-prefix=PREFIX prepend PREFIX to installed program names --program-suffix=SUFFIX append SUFFIX to installed program names --program-transform-name=PROGRAM run sed PROGRAM on installed program names System types: --build=BUILD configure for building on BUILD [guessed] --host=HOST cross-compile to build programs to run on HOST [BUILD] _ACEOF fi if test -n "$ac_init_help"; then case $ac_init_help in short | recursive ) echo "Configuration of nordugrid-arc 7.1.1:";; esac cat <<\_ACEOF Optional Features: --disable-option-checking ignore unrecognized --enable/--with options --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) --enable-FEATURE[=ARG] include FEATURE [ARG=yes] --enable-silent-rules less verbose build output (undo: "make V=1") --disable-silent-rules verbose build output (undo: "make V=0") --enable-dependency-tracking do not reject slow dependency extractors --disable-dependency-tracking speeds up one-time build --enable-static[=PKGS] build static libraries [default=no] --enable-shared[=PKGS] build shared libraries [default=yes] --enable-fast-install[=PKGS] optimize for fast installation [default=yes] --disable-libtool-lock avoid locking (might break parallel builds) --disable-nls do not use Native Language Support --disable-rpath do not hardcode runtime library paths --disable-largefile omit support for large files --disable-all disables all buildable components. Can be overwritten with --enable-* for group or specific component. It is also possible to use --enable-all to overwrite defaults for most of components. --disable-all-clients disables all buildable client components. Can be overwritten with --enable-* for specific component. It is also possible to use --enable-all-clients to overwrite defaults and --enable-all. --disable-all-data-clients disables all buildable client components providing data handling abilities. Can be overwritten with --enable-* for specific component. It is also possible to use --enable-all-data-clients to overwrite defaults, --enable-all and --enable-all-clients. --disable-all-services disables all buildable service componets. Can be overwritten with --enable-* for specific component. It is also possible to use --enable-all-services to overwrite defaults and --enable-all. --disable-hed disable building HED libraries and plugins. Do not do that unless You do not want to build anything. Even in that case better use --disable-all. --enable-pedantic-compile add pedantic compiler flags --disable-swig-python disable SWIG python bindings --disable-swig disable all bindings through SWIG --disable-python disable Python components --disable-altpython enable alternative Python binding --disable-pylint disable python example checking using pylint --enable-systemd enable use of the systemd daemon integration features --disable-nss disable use of the mozilla nss library --disable-cppunit disable cppunit-based UNIT testing of code --disable-ldns disable ldns library usage (makes ARCHERY client unavailable) --disable-xmlsec1 disable features which need xmlsec1 library --enable-monitor enable use of the monitor --disable-sqlitejstore disable storing local job information in SQLite --enable-mock-dmc enable mock DMC, default is disable --enable-gfal enable the GFAL support, default is disable --enable-s3 enable the S3 support, default is disable --disable-xrootd disable the xrootd support, default is enable --disable-doc disable building documentation (requires doxygen and pdflatex) --disable-a-rex-service disable building A-Rex service --enable-internal enable building the internal job plugin --disable-ldap-service disable building LDAP Infosystem Service --disable-monitor disable building LDAP Monitor --disable-candypond disable building candypond --disable-datadelivery-service disable building DataDelivery service --disable-compute-client disable building compute (job management) client tools --disable-credentials-client disable building client tools for handling X.509 credentials --disable-data-client disable building generic client tools for handling data --disable-arcrest-client disables building ARC REST python module. Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) --with-pic[=PKGS] try to use only PIC/non-PIC objects [default=use both] --with-aix-soname=aix|svr4|both shared library versioning (aka "SONAME") variant to provide on AIX, [default=aix]. --with-gnu-ld assume the C compiler uses GNU ld [default=no] --with-sysroot[=DIR] Search for dependent libraries within DIR (or the compiler's sysroot if not specified). --with-systemd-units-location= Location of the systemd unit files. [[None]] --with-sysv-scripts-location= Location of the SYSV init scripts. [[autodetect]] --with-cron-scripts-prefix= Specify the location of the cron directory. [[SYSCONFDIR/cron.d]] --with-gnu-ld assume the C compiler uses GNU ld default=no --with-libiconv-prefix[=DIR] search for libiconv in DIR/include and DIR/lib --without-libiconv-prefix don't search for libiconv in includedir and libdir --with-libintl-prefix[=DIR] search for libintl in DIR/include and DIR/lib --without-libintl-prefix don't search for libintl in includedir and libdir --with-python=(PYTHON) specify python program from PATH --with-python-site-arch=directory Direcory where Python modules will be installed - defaults is to query the Python binary --with-python-site-lib=directory Direcory where Python modules will be installed - defaults is to query the Python binary --with-altpython=(PYTHON) specify alternative python program from PATH --with-altpython-site-arch=directory Direcory where Python modules will be installed - defaults is to query the Python binary --with-altpython-site-lib=directory Direcory where Python modules will be installed - defaults is to query the Python binary --with-xmlsec1=(PATH) xmlsec1 location --with-monitor=(PATH) where to install the monitor, eg /var/www/monitor or /usr/share/arc/monitor --with-zlib=PATH where zlib is installed --with-flavor=(flavor) Specify the gpt build flavor [[autodetect]] --with-lcas-location= Specify the LCAS installation path. [/opt/glite] --with-lcmaps-location= Specify the LCMAPS installation path. [/opt/glite] --with-s3=(PATH) libs3 location --with-xrootd=(PATH) Xrootd location Some influential environment variables: CXX C++ compiler command CXXFLAGS C++ compiler flags LDFLAGS linker flags, e.g. -L if you have libraries in a nonstandard directory LIBS libraries to pass to the linker, e.g. -l CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if you have headers in a nonstandard directory CC C compiler command CFLAGS C compiler flags CPP C preprocessor LT_SYS_LIBRARY_PATH User-defined run-time library search path. CXXCPP C++ preprocessor PKG_CONFIG path to pkg-config utility PKG_CONFIG_PATH directories to add to pkg-config's search path PKG_CONFIG_LIBDIR path overriding pkg-config's built-in search path PYTHON_CFLAGS C compiler flags for PYTHON, overriding pkg-config PYTHON_LIBS linker flags for PYTHON, overriding pkg-config ALTPYTHON_CFLAGS C compiler flags for ALTPYTHON, overriding pkg-config ALTPYTHON_LIBS linker flags for ALTPYTHON, overriding pkg-config GLIBMM_CFLAGS C compiler flags for GLIBMM, overriding pkg-config GLIBMM_LIBS linker flags for GLIBMM, overriding pkg-config LIBXML2_CFLAGS C compiler flags for LIBXML2, overriding pkg-config LIBXML2_LIBS linker flags for LIBXML2, overriding pkg-config OPENSSL_CFLAGS C compiler flags for OPENSSL, overriding pkg-config OPENSSL_LIBS linker flags for OPENSSL, overriding pkg-config NSS_CFLAGS C compiler flags for NSS, overriding pkg-config NSS_LIBS linker flags for NSS, overriding pkg-config SQLITE_CFLAGS C compiler flags for SQLITE, overriding pkg-config SQLITE_LIBS linker flags for SQLITE, overriding pkg-config CPPUNIT_CFLAGS C compiler flags for CPPUNIT, overriding pkg-config CPPUNIT_LIBS linker flags for CPPUNIT, overriding pkg-config LDNS_CFLAGS C compiler flags for LDNS, overriding pkg-config LDNS_LIBS linker flags for LDNS, overriding pkg-config XMLSEC_CFLAGS C compiler flags for XMLSEC, overriding pkg-config XMLSEC_LIBS linker flags for XMLSEC, overriding pkg-config XMLSEC_OPENSSL_CFLAGS C compiler flags for XMLSEC_OPENSSL, overriding pkg-config XMLSEC_OPENSSL_LIBS linker flags for XMLSEC_OPENSSL, overriding pkg-config GLOBUS_COMMON_CFLAGS C compiler flags for GLOBUS_COMMON, overriding pkg-config GLOBUS_COMMON_LIBS linker flags for GLOBUS_COMMON, overriding pkg-config GLOBUS_MAKEFILE_HEADER path to globus-makefile-header GPT_FLAVOR_CONFIGURATION path to gpt-flavor-configuration GPT_QUERY path to gpt-query GLOBUS_GSSAPI_GSI_CFLAGS C compiler flags for GLOBUS_GSSAPI_GSI, overriding pkg-config GLOBUS_GSSAPI_GSI_LIBS linker flags for GLOBUS_GSSAPI_GSI, overriding pkg-config GLOBUS_GSS_ASSIST_CFLAGS C compiler flags for GLOBUS_GSS_ASSIST, overriding pkg-config GLOBUS_GSS_ASSIST_LIBS linker flags for GLOBUS_GSS_ASSIST, overriding pkg-config GLOBUS_GSI_CALLBACK_CFLAGS C compiler flags for GLOBUS_GSI_CALLBACK, overriding pkg-config GLOBUS_GSI_CALLBACK_LIBS linker flags for GLOBUS_GSI_CALLBACK, overriding pkg-config GLOBUS_FTP_CLIENT_CFLAGS C compiler flags for GLOBUS_FTP_CLIENT, overriding pkg-config GLOBUS_FTP_CLIENT_LIBS linker flags for GLOBUS_FTP_CLIENT, overriding pkg-config GLOBUS_FTP_CONTROL_CFLAGS C compiler flags for GLOBUS_FTP_CONTROL, overriding pkg-config GLOBUS_FTP_CONTROL_LIBS linker flags for GLOBUS_FTP_CONTROL, overriding pkg-config GLOBUS_IO_CFLAGS C compiler flags for GLOBUS_IO, overriding pkg-config GLOBUS_IO_LIBS linker flags for GLOBUS_IO, overriding pkg-config GLOBUS_GSI_CERT_UTILS_CFLAGS C compiler flags for GLOBUS_GSI_CERT_UTILS, overriding pkg-config GLOBUS_GSI_CERT_UTILS_LIBS linker flags for GLOBUS_GSI_CERT_UTILS, overriding pkg-config GLOBUS_GSI_CREDENTIAL_CFLAGS C compiler flags for GLOBUS_GSI_CREDENTIAL, overriding pkg-config GLOBUS_GSI_CREDENTIAL_LIBS linker flags for GLOBUS_GSI_CREDENTIAL, overriding pkg-config GLOBUS_OPENSSL_MODULE_CFLAGS C compiler flags for GLOBUS_OPENSSL_MODULE, overriding pkg-config GLOBUS_OPENSSL_MODULE_LIBS linker flags for GLOBUS_OPENSSL_MODULE, overriding pkg-config GLOBUS_OPENSSL_CFLAGS C compiler flags for GLOBUS_OPENSSL, overriding pkg-config GLOBUS_OPENSSL_LIBS linker flags for GLOBUS_OPENSSL, overriding pkg-config GFAL2_CFLAGS C compiler flags for GFAL2, overriding pkg-config GFAL2_LIBS linker flags for GFAL2, overriding pkg-config BASH_COMPLETION_CFLAGS C compiler flags for BASH_COMPLETION, overriding pkg-config BASH_COMPLETION_LIBS linker flags for BASH_COMPLETION, overriding pkg-config Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. Report bugs to . _ACEOF ac_status=$? fi if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue test -d "$ac_dir" || { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || continue ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix cd "$ac_dir" || { ac_status=$?; continue; } # Check for guested configure. if test -f "$ac_srcdir/configure.gnu"; then echo && $SHELL "$ac_srcdir/configure.gnu" --help=recursive elif test -f "$ac_srcdir/configure"; then echo && $SHELL "$ac_srcdir/configure" --help=recursive else $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi || ac_status=$? cd "$ac_pwd" || { ac_status=$?; break; } done fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF nordugrid-arc configure 7.1.1 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF exit fi ## ------------------------ ## ## Autoconf initialization. ## ## ------------------------ ## # ac_fn_cxx_try_compile LINENO # ---------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_compile # ac_fn_c_try_compile LINENO # -------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_compile # ac_fn_c_try_cpp LINENO # ---------------------- # Try to preprocess conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_cpp () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } > conftest.i && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_cpp # ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists, giving a warning if it cannot be compiled using # the include files in INCLUDES and setting the cache variable VAR # accordingly. ac_fn_c_check_header_mongrel () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if eval \${$3+:} false; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 $as_echo_n "checking $2 usability... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_header_compiler=yes else ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 $as_echo_n "checking $2 presence... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <$2> _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : ac_header_preproc=yes else ac_header_preproc=no fi rm -f conftest.err conftest.i conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( yes:no: ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; no:yes:* ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ( $as_echo "## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ##" ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=\$ac_header_compiler" fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_mongrel # ac_fn_c_try_run LINENO # ---------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. Assumes # that executables *can* be run. ac_fn_c_try_run () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then : ac_retval=0 else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=$ac_status fi rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_run # ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists and can be compiled using the include files in # INCLUDES, setting the cache variable VAR accordingly. ac_fn_c_check_header_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_compile # ac_fn_c_try_link LINENO # ----------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext conftest$ac_exeext if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || test -x conftest$ac_exeext }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_link # ac_fn_c_check_func LINENO FUNC VAR # ---------------------------------- # Tests whether FUNC exists, setting the cache variable VAR accordingly ac_fn_c_check_func () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Define $2 to an innocuous variant, in case declares $2. For example, HP-UX 11i declares gettimeofday. */ #define $2 innocuous_$2 /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $2 (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $2 /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $2 (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$2 || defined __stub___$2 choke me #endif int main () { return $2 (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_func # ac_fn_cxx_try_cpp LINENO # ------------------------ # Try to preprocess conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_cpp () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } > conftest.i && { test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || test ! -s conftest.err }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_cpp # ac_fn_cxx_try_link LINENO # ------------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext conftest$ac_exeext if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || test -x conftest$ac_exeext }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_link # ac_fn_cxx_check_header_mongrel LINENO HEADER VAR INCLUDES # --------------------------------------------------------- # Tests whether HEADER exists, giving a warning if it cannot be compiled using # the include files in INCLUDES and setting the cache variable VAR # accordingly. ac_fn_cxx_check_header_mongrel () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if eval \${$3+:} false; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 $as_echo_n "checking $2 usability... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_header_compiler=yes else ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 $as_echo_n "checking $2 presence... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <$2> _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : ac_header_preproc=yes else ac_header_preproc=no fi rm -f conftest.err conftest.i conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_cxx_preproc_warn_flag in #(( yes:no: ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; no:yes:* ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ( $as_echo "## --------------------------------------------- ## ## Report this to http://bugzilla.nordugrid.org/ ## ## --------------------------------------------- ##" ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=\$ac_header_compiler" fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_cxx_check_header_mongrel # ac_fn_c_check_type LINENO TYPE VAR INCLUDES # ------------------------------------------- # Tests whether TYPE exists after having included INCLUDES, setting cache # variable VAR accordingly. ac_fn_c_check_type () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=no" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { if (sizeof ($2)) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { if (sizeof (($2))) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else eval "$3=yes" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_type # ac_fn_c_check_member LINENO AGGR MEMBER VAR INCLUDES # ---------------------------------------------------- # Tries to find if the field MEMBER exists in type AGGR, after including # INCLUDES, setting cache variable VAR accordingly. ac_fn_c_check_member () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2.$3" >&5 $as_echo_n "checking for $2.$3... " >&6; } if eval \${$4+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $5 int main () { static $2 ac_aggr; if (ac_aggr.$3) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$4=yes" else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $5 int main () { static $2 ac_aggr; if (sizeof ac_aggr.$3) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$4=yes" else eval "$4=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$4 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_member # ac_fn_c_check_decl LINENO SYMBOL VAR INCLUDES # --------------------------------------------- # Tests whether SYMBOL is declared in INCLUDES, setting cache variable VAR # accordingly. ac_fn_c_check_decl () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack as_decl_name=`echo $2|sed 's/ *(.*//'` as_decl_use=`echo $2|sed -e 's/(/((/' -e 's/)/) 0&/' -e 's/,/) 0& (/g'` { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $as_decl_name is declared" >&5 $as_echo_n "checking whether $as_decl_name is declared... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { #ifndef $as_decl_name #ifdef __cplusplus (void) $as_decl_use; #else (void) $as_decl_name; #endif #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_decl cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by nordugrid-arc $as_me 7.1.1, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ _ACEOF exec 5>>config.log { cat <<_ASUNAME ## --------- ## ## Platform. ## ## --------- ## hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` /bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` /bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` /usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` /bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` /bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. $as_echo "PATH: $as_dir" done IFS=$as_save_IFS } >&5 cat >&5 <<_ACEOF ## ----------- ## ## Core tests. ## ## ----------- ## _ACEOF # Keep a trace of the command line. # Strip out --no-create and --no-recursion so they do not pile up. # Strip out --silent because we don't want to record it for future runs. # Also quote any args containing shell meta-characters. # Make two passes to allow for proper duplicate-argument suppression. ac_configure_args= ac_configure_args0= ac_configure_args1= ac_must_keep_next=false for ac_pass in 1 2 do for ac_arg do case $ac_arg in -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; 2) as_fn_append ac_configure_args1 " '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else case $ac_arg in *=* | --config-cache | -C | -disable-* | --disable-* \ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ | -with-* | --with-* | -without-* | --without-* | --x) case "$ac_configure_args0 " in "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; esac ;; -* ) ac_must_keep_next=true ;; esac fi as_fn_append ac_configure_args " '$ac_arg'" ;; esac done done { ac_configure_args0=; unset ac_configure_args0;} { ac_configure_args1=; unset ac_configure_args1;} # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there # would cause problems or look ugly. # WARNING: Use '\'' to represent an apostrophe within the trap. # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. trap 'exit_status=$? # Save into config.log some information that might help in debugging. { echo $as_echo "## ---------------- ## ## Cache variables. ## ## ---------------- ##" echo # The following way of writing the cache mishandles newlines in values, ( for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( *${as_nl}ac_space=\ *) sed -n \ "s/'\''/'\''\\\\'\'''\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" ;; #( *) sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) echo $as_echo "## ----------------- ## ## Output variables. ## ## ----------------- ##" echo for ac_var in $ac_subst_vars do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo if test -n "$ac_subst_files"; then $as_echo "## ------------------- ## ## File substitutions. ## ## ------------------- ##" echo for ac_var in $ac_subst_files do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo fi if test -s confdefs.h; then $as_echo "## ----------- ## ## confdefs.h. ## ## ----------- ##" echo cat confdefs.h echo fi test "$ac_signal" != 0 && $as_echo "$as_me: caught signal $ac_signal" $as_echo "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h $as_echo "/* confdefs.h */" > confdefs.h # Predefined preprocessor variables. cat >>confdefs.h <<_ACEOF #define PACKAGE_NAME "$PACKAGE_NAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_TARNAME "$PACKAGE_TARNAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_VERSION "$PACKAGE_VERSION" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_STRING "$PACKAGE_STRING" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_URL "$PACKAGE_URL" _ACEOF # Let the site file select an alternate cache file if it wants to. # Prefer an explicitly selected file to automatically selected ones. ac_site_file1=NONE ac_site_file2=NONE if test -n "$CONFIG_SITE"; then # We do not want a PATH search for config.site. case $CONFIG_SITE in #(( -*) ac_site_file1=./$CONFIG_SITE;; */*) ac_site_file1=$CONFIG_SITE;; *) ac_site_file1=./$CONFIG_SITE;; esac elif test "x$prefix" != xNONE; then ac_site_file1=$prefix/share/config.site ac_site_file2=$prefix/etc/config.site else ac_site_file1=$ac_default_prefix/share/config.site ac_site_file2=$ac_default_prefix/etc/config.site fi for ac_site_file in "$ac_site_file1" "$ac_site_file2" do test "x$ac_site_file" = xNONE && continue if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 $as_echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" \ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "failed to load site script $ac_site_file See \`config.log' for more details" "$LINENO" 5; } fi done if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special files # actually), so we avoid doing that. DJGPP emulates it as a regular file. if test /dev/null != "$cache_file" && test -f "$cache_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 $as_echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 $as_echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi gt_needs="$gt_needs " as_fn_append ac_header_list " sys/time.h" as_fn_append ac_header_list " unistd.h" as_fn_append ac_func_list " alarm" # Check that the precious variables saved in the cache have kept the same # value. ac_cache_corrupted=false for ac_var in $ac_precious_vars; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val=\$ac_cv_env_${ac_var}_value eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then # differences in whitespace do not lead to failure. ac_old_val_w=`echo x $ac_old_val` ac_new_val_w=`echo x $ac_new_val` if test "$ac_old_val_w" != "$ac_new_val_w"; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 $as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} ac_cache_corrupted=: else { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 $as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} eval $ac_var=\$ac_old_val fi { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 $as_echo "$as_me: former value: \`$ac_old_val'" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 $as_echo "$as_me: current value: \`$ac_new_val'" >&2;} fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. *) as_fn_append ac_configure_args " '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 $as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 fi ## -------------------- ## ## Main body of script. ## ## -------------------- ## ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu am__api_version='1.16' ac_aux_dir= for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do if test -f "$ac_dir/install-sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install-sh -c" break elif test -f "$ac_dir/install.sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install.sh -c" break elif test -f "$ac_dir/shtool"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/shtool install -c" break fi done if test -z "$ac_aux_dir"; then as_fn_error $? "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5 fi # These three variables are undocumented and unsupported, # and are intended to be withdrawn in a future Autoconf release. # They can cause serious problems if a builder's source tree is in a directory # whose full name contains unusual characters. ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. # Find a good install program. We prefer a C program (faster), # so one script is as good as another. But avoid the broken or # incompatible versions: # SysV /etc/install, /usr/sbin/install # SunOS /usr/etc/install # IRIX /sbin/install # AIX /bin/install # AmigaOS /C/install, which installs bootblocks on floppy discs # AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag # AFS /usr/afsws/bin/install, which mishandles nonexistent args # SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" # OS/2's system install, which has a completely different semantic # ./install, which can be erroneously created by make from ./install.sh. # Reject install programs that cannot install multiple files. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 $as_echo_n "checking for a BSD-compatible install... " >&6; } if test -z "$INSTALL"; then if ${ac_cv_path_install+:} false; then : $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. # Account for people who put trailing slashes in PATH elements. case $as_dir/ in #(( ./ | .// | /[cC]/* | \ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ /usr/ucb/* ) ;; *) # OSF1 and SCO ODT 3.0 have their own names for install. # Don't use installbsd from OSF since it installs stuff as root # by default. for ac_prog in ginstall scoinst install; do for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then if test $ac_prog = install && grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # AIX install. It has an incompatible calling convention. : elif test $ac_prog = install && grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # program-specific install script used by HP pwplus--don't use. : else rm -rf conftest.one conftest.two conftest.dir echo one > conftest.one echo two > conftest.two mkdir conftest.dir if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && test -s conftest.one && test -s conftest.two && test -s conftest.dir/conftest.one && test -s conftest.dir/conftest.two then ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" break 3 fi fi fi done done ;; esac done IFS=$as_save_IFS rm -rf conftest.one conftest.two conftest.dir fi if test "${ac_cv_path_install+set}" = set; then INSTALL=$ac_cv_path_install else # As a last resort, use the slow shell script. Don't cache a # value for INSTALL within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. INSTALL=$ac_install_sh fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 $as_echo "$INSTALL" >&6; } # Use test -z because SunOS4 sh mishandles braces in ${var-val}. # It thinks the first close brace ends the variable substitution. test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5 $as_echo_n "checking whether build environment is sane... " >&6; } # Reject unsafe characters in $srcdir or the absolute working directory # name. Accept space and tab only in the latter. am_lf=' ' case `pwd` in *[\\\"\#\$\&\'\`$am_lf]*) as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;; esac case $srcdir in *[\\\"\#\$\&\'\`$am_lf\ \ ]*) as_fn_error $? "unsafe srcdir value: '$srcdir'" "$LINENO" 5;; esac # Do 'set' in a subshell so we don't clobber the current shell's # arguments. Must try -L first in case configure is actually a # symlink; some systems play weird games with the mod time of symlinks # (eg FreeBSD returns the mod time of the symlink's containing # directory). if ( am_has_slept=no for am_try in 1 2; do echo "timestamp, slept: $am_has_slept" > conftest.file set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` if test "$*" = "X"; then # -L didn't work. set X `ls -t "$srcdir/configure" conftest.file` fi if test "$*" != "X $srcdir/configure conftest.file" \ && test "$*" != "X conftest.file $srcdir/configure"; then # If neither matched, then we have a broken ls. This can happen # if, for instance, CONFIG_SHELL is bash and it inherits a # broken ls alias from the environment. This has actually # happened. Such a system could not be considered "sane". as_fn_error $? "ls -t appears to fail. Make sure there is not a broken alias in your environment" "$LINENO" 5 fi if test "$2" = conftest.file || test $am_try -eq 2; then break fi # Just in case. sleep 1 am_has_slept=yes done test "$2" = conftest.file ) then # Ok. : else as_fn_error $? "newly created file is older than distributed files! Check your system clock" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } # If we didn't sleep, we still need to ensure time stamps of config.status and # generated files are strictly newer. am_sleep_pid= if grep 'slept: no' conftest.file >/dev/null 2>&1; then ( sleep 1 ) & am_sleep_pid=$! fi rm -f conftest.file test "$program_prefix" != NONE && program_transform_name="s&^&$program_prefix&;$program_transform_name" # Use a double $ so make ignores it. test "$program_suffix" != NONE && program_transform_name="s&\$&$program_suffix&;$program_transform_name" # Double any \ or $. # By default was `s,x,x', remove it if useless. ac_script='s/[\\$]/&&/g;s/;s,x,x,$//' program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"` # Expand $ac_aux_dir to an absolute path. am_aux_dir=`cd "$ac_aux_dir" && pwd` if test x"${MISSING+set}" != xset; then case $am_aux_dir in *\ * | *\ *) MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; *) MISSING="\${SHELL} $am_aux_dir/missing" ;; esac fi # Use eval to expand $SHELL if eval "$MISSING --is-lightweight"; then am_missing_run="$MISSING " else am_missing_run= { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5 $as_echo "$as_me: WARNING: 'missing' script is too old or missing" >&2;} fi if test x"${install_sh+set}" != xset; then case $am_aux_dir in *\ * | *\ *) install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; *) install_sh="\${SHELL} $am_aux_dir/install-sh" esac fi # Installed binaries are usually stripped using 'strip' when the user # run "make install-strip". However 'strip' might not be the right # tool to use in cross-compilation environments, therefore Automake # will honor the 'STRIP' environment variable to overrule this program. if test "$cross_compiling" != no; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. set dummy ${ac_tool_prefix}strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_STRIP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$STRIP"; then ac_cv_prog_STRIP="$STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_STRIP="${ac_tool_prefix}strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi STRIP=$ac_cv_prog_STRIP if test -n "$STRIP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 $as_echo "$STRIP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_STRIP"; then ac_ct_STRIP=$STRIP # Extract the first word of "strip", so it can be a program name with args. set dummy strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_STRIP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_STRIP"; then ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_STRIP="strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP if test -n "$ac_ct_STRIP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 $as_echo "$ac_ct_STRIP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_STRIP" = x; then STRIP=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac STRIP=$ac_ct_STRIP fi else STRIP="$ac_cv_prog_STRIP" fi fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5 $as_echo_n "checking for a thread-safe mkdir -p... " >&6; } if test -z "$MKDIR_P"; then if ${ac_cv_path_mkdir+:} false; then : $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in mkdir gmkdir; do for ac_exec_ext in '' $ac_executable_extensions; do as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext" || continue case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #( 'mkdir (GNU coreutils) '* | \ 'mkdir (coreutils) '* | \ 'mkdir (fileutils) '4.1*) ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext break 3;; esac done done done IFS=$as_save_IFS fi test -d ./--version && rmdir ./--version if test "${ac_cv_path_mkdir+set}" = set; then MKDIR_P="$ac_cv_path_mkdir -p" else # As a last resort, use the slow shell script. Don't cache a # value for MKDIR_P within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. MKDIR_P="$ac_install_sh -d" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5 $as_echo "$MKDIR_P" >&6; } for ac_prog in gawk mawk nawk awk do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_AWK+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$AWK"; then ac_cv_prog_AWK="$AWK" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_AWK="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AWK=$ac_cv_prog_AWK if test -n "$AWK"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 $as_echo "$AWK" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$AWK" && break done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 $as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } set x ${MAKE-make} ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : $as_echo_n "(cached) " >&6 else cat >conftest.make <<\_ACEOF SHELL = /bin/sh all: @echo '@@@%%%=$(MAKE)=@@@%%%' _ACEOF # GNU make sometimes prints "make[1]: Entering ...", which would confuse us. case `${MAKE-make} -f conftest.make 2>/dev/null` in *@@@%%%=?*=@@@%%%*) eval ac_cv_prog_make_${ac_make}_set=yes;; *) eval ac_cv_prog_make_${ac_make}_set=no;; esac rm -f conftest.make fi if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } SET_MAKE= else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } SET_MAKE="MAKE=${MAKE-make}" fi rm -rf .tst 2>/dev/null mkdir .tst 2>/dev/null if test -d .tst; then am__leading_dot=. else am__leading_dot=_ fi rmdir .tst 2>/dev/null # Check whether --enable-silent-rules was given. if test "${enable_silent_rules+set}" = set; then : enableval=$enable_silent_rules; fi case $enable_silent_rules in # ((( yes) AM_DEFAULT_VERBOSITY=0;; no) AM_DEFAULT_VERBOSITY=1;; *) AM_DEFAULT_VERBOSITY=1;; esac am_make=${MAKE-make} { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5 $as_echo_n "checking whether $am_make supports nested variables... " >&6; } if ${am_cv_make_support_nested_variables+:} false; then : $as_echo_n "(cached) " >&6 else if $as_echo 'TRUE=$(BAR$(V)) BAR0=false BAR1=true V=1 am__doit: @$(TRUE) .PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then am_cv_make_support_nested_variables=yes else am_cv_make_support_nested_variables=no fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5 $as_echo "$am_cv_make_support_nested_variables" >&6; } if test $am_cv_make_support_nested_variables = yes; then AM_V='$(V)' AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' else AM_V=$AM_DEFAULT_VERBOSITY AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY fi AM_BACKSLASH='\' if test "`cd $srcdir && pwd`" != "`pwd`"; then # Use -I$(srcdir) only when $(srcdir) != ., so that make's output # is not polluted with repeated "-I." am__isrc=' -I$(srcdir)' # test to see if srcdir already configured if test -f $srcdir/config.status; then as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5 fi fi # test whether we have cygpath if test -z "$CYGPATH_W"; then if (cygpath --version) >/dev/null 2>/dev/null; then CYGPATH_W='cygpath -w' else CYGPATH_W=echo fi fi # Define the identity of the package. PACKAGE='nordugrid-arc' VERSION='7.1.1' cat >>confdefs.h <<_ACEOF #define PACKAGE "$PACKAGE" _ACEOF cat >>confdefs.h <<_ACEOF #define VERSION "$VERSION" _ACEOF # Some tools Automake needs. ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"} AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"} AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"} AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"} MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} # For better backward compatibility. To be removed once Automake 1.9.x # dies out for good. For more background, see: # # mkdir_p='$(MKDIR_P)' # We need awk for the "check" target (and possibly the TAP driver). The # system "awk" is bad on some platforms. # Always define AMTAR for backward compatibility. Yes, it's still used # in the wild :-( We should find a proper way to deprecate it ... AMTAR='$${TAR-tar}' # We'll loop over all known methods to create a tar archive until one works. _am_tools='gnutar pax cpio none' { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to create a pax tar archive" >&5 $as_echo_n "checking how to create a pax tar archive... " >&6; } # Go ahead even if we have the value already cached. We do so because we # need to set the values for the 'am__tar' and 'am__untar' variables. _am_tools=${am_cv_prog_tar_pax-$_am_tools} for _am_tool in $_am_tools; do case $_am_tool in gnutar) for _am_tar in tar gnutar gtar; do { echo "$as_me:$LINENO: $_am_tar --version" >&5 ($_am_tar --version) >&5 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } && break done am__tar="$_am_tar --format=posix -chf - "'"$$tardir"' am__tar_="$_am_tar --format=posix -chf - "'"$tardir"' am__untar="$_am_tar -xf -" ;; plaintar) # Must skip GNU tar: if it does not support --format= it doesn't create # ustar tarball either. (tar --version) >/dev/null 2>&1 && continue am__tar='tar chf - "$$tardir"' am__tar_='tar chf - "$tardir"' am__untar='tar xf -' ;; pax) am__tar='pax -L -x pax -w "$$tardir"' am__tar_='pax -L -x pax -w "$tardir"' am__untar='pax -r' ;; cpio) am__tar='find "$$tardir" -print | cpio -o -H pax -L' am__tar_='find "$tardir" -print | cpio -o -H pax -L' am__untar='cpio -i -H pax -d' ;; none) am__tar=false am__tar_=false am__untar=false ;; esac # If the value was cached, stop now. We just wanted to have am__tar # and am__untar set. test -n "${am_cv_prog_tar_pax}" && break # tar/untar a dummy directory, and stop if the command works. rm -rf conftest.dir mkdir conftest.dir echo GrepMe > conftest.dir/file { echo "$as_me:$LINENO: tardir=conftest.dir && eval $am__tar_ >conftest.tar" >&5 (tardir=conftest.dir && eval $am__tar_ >conftest.tar) >&5 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } rm -rf conftest.dir if test -s conftest.tar; then { echo "$as_me:$LINENO: $am__untar &5 ($am__untar &5 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } { echo "$as_me:$LINENO: cat conftest.dir/file" >&5 (cat conftest.dir/file) >&5 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } grep GrepMe conftest.dir/file >/dev/null 2>&1 && break fi done rm -rf conftest.dir if ${am_cv_prog_tar_pax+:} false; then : $as_echo_n "(cached) " >&6 else am_cv_prog_tar_pax=$_am_tool fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_tar_pax" >&5 $as_echo "$am_cv_prog_tar_pax" >&6; } # POSIX will say in a future version that running "rm -f" with no argument # is OK; and we want to be able to make that assumption in our Makefile # recipes. So use an aggressive probe to check that the usage we want is # actually supported "in the wild" to an acceptable degree. # See automake bug#10828. # To make any issue more visible, cause the running configure to be aborted # by default if the 'rm' program in use doesn't match our expectations; the # user can still override this though. if rm -f && rm -fr && rm -rf; then : OK; else cat >&2 <<'END' Oops! Your 'rm' program seems unable to run without file operands specified on the command line, even when the '-f' option is present. This is contrary to the behaviour of most rm programs out there, and not conforming with the upcoming POSIX standard: Please tell bug-automake@gnu.org about your system, including the value of your $PATH and any error possibly output before this message. This can help us improve future automake versions. END if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then echo 'Configuration will proceed anyway, since you have set the' >&2 echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2 echo >&2 else cat >&2 <<'END' Aborting the configuration process, to ensure you take notice of the issue. You can download and install GNU coreutils to get an 'rm' implementation that behaves properly: . If you want to complete the configuration process using your problematic 'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM to "yes", and re-run configure. END as_fn_error $? "Your 'rm' program is bad, sorry." "$LINENO" 5 fi fi ac_config_headers="$ac_config_headers config.h" baseversion=`echo $VERSION | sed 's/[^0-9.].*//'` preversion=`echo $VERSION | sed 's/^[0-9.]*//'` if test "x$baseversion" = "x" ; then baseversion=$VERSION preversion="" fi if test "x$preversion" = "x" ; then fedorarelease="1" fedorasetupopts="-q" debianversion="$baseversion" else fedorarelease="0.$preversion" fedorasetupopts="-q -n %{name}-%{version}$preversion" debianversion="$baseversion~$preversion" fi # numeric ARC_VERSION_* used for API fall back to current release seriese (e.g. when 'master' is specified in VESRION file, the "6.0.0" will be used) ARC_VERSION_MAJOR=`echo $VERSION | awk -F. '{print match($1, /^[0-9]+$/) ? $1 : "6"}'` ARC_VERSION_MINOR=`echo $VERSION | awk -F. '{print match($2, /[^ ]/) ? $2 : "0"}'` ARC_VERSION_PATCH=`echo $VERSION | awk -F. '{print match($3, /[^ ]/) ? $3 : "0"}'` ARC_VERSION_NUM=`printf "0x%02x%02x%02x" $ARC_VERSION_MAJOR $ARC_VERSION_MINOR $ARC_VERSION_PATCH` ARC_VERSION=`echo $ARC_VERSION_MAJOR.$ARC_VERSION_MINOR.$ARC_VERSION_PATCH` # This macro was introduced in autoconf 2.57g? but we currently only require 2.56 ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu if test -z "$CXX"; then if test -n "$CCC"; then CXX=$CCC else if test -n "$ac_tool_prefix"; then for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CXX+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CXX"; then ac_cv_prog_CXX="$CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CXX=$ac_cv_prog_CXX if test -n "$CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 $as_echo "$CXX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CXX" && break done fi if test -z "$CXX"; then ac_ct_CXX=$CXX for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CXX+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CXX"; then ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CXX="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CXX=$ac_cv_prog_ac_ct_CXX if test -n "$ac_ct_CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 $as_echo "$ac_ct_CXX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CXX" && break done if test "x$ac_ct_CXX" = x; then CXX="g++" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CXX=$ac_ct_CXX fi fi fi fi # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C++ compiler works" >&5 $as_echo_n "checking whether the C++ compiler works... " >&6; } ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` # The possible output files: ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" ac_rmfiles= for ac_file in $ac_files do case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; * ) ac_rmfiles="$ac_rmfiles $ac_file";; esac done rm -f $ac_rmfiles if { { ac_try="$ac_link_default" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link_default") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' # in a Makefile. We should not override ac_cv_exeext if it was cached, # so that the user can short-circuit this test for compilers unknown to # Autoconf. for ac_file in $ac_files '' do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; [ab].out ) # We found the default executable, but exeext='' is most # certainly right. break;; *.* ) if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; then :; else ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` fi # We set ac_cv_exeext here because the later test for it is not # safe: cross compilers may not add the suffix if given an `-o' # argument, so we may need to know it at that point already. # Even if this section looks crufty: it has the advantage of # actually working. break;; * ) break;; esac done test "$ac_cv_exeext" = no && ac_cv_exeext= else ac_file='' fi if test -z "$ac_file"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "C++ compiler cannot create executables See \`config.log' for more details" "$LINENO" 5; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler default output file name" >&5 $as_echo_n "checking for C++ compiler default output file name... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 $as_echo "$ac_file" >&6; } ac_exeext=$ac_cv_exeext rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 $as_echo_n "checking for suffix of executables... " >&6; } if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will # work properly (i.e., refer to `conftest.exe'), while it won't with # `rm'. for ac_file in conftest.exe conftest conftest.*; do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` break;; * ) break;; esac done else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of executables: cannot compile and link See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest conftest$ac_cv_exeext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 $as_echo "$ac_cv_exeext" >&6; } rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext ac_exeext=$EXEEXT cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { FILE *f = fopen ("conftest.out", "w"); return ferror (f) || fclose (f) != 0; ; return 0; } _ACEOF ac_clean_files="$ac_clean_files conftest.out" # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 $as_echo_n "checking whether we are cross compiling... " >&6; } if test "$cross_compiling" != yes; then { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if { ac_try='./conftest$ac_cv_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then cross_compiling=no else if test "$cross_compiling" = maybe; then cross_compiling=yes else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot run C++ compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details" "$LINENO" 5; } fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 $as_echo "$cross_compiling" >&6; } rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 $as_echo_n "checking for suffix of object files... " >&6; } if ${ac_cv_objext+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.o conftest.obj if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : for ac_file in conftest.o conftest.obj conftest.*; do test -f "$ac_file" || continue; case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` break;; esac done else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of object files: cannot compile See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 $as_echo "$ac_cv_objext" >&6; } OBJEXT=$ac_cv_objext ac_objext=$OBJEXT { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5 $as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } if ${ac_cv_cxx_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_cxx_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 $as_echo "$ac_cv_cxx_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GXX=yes else GXX= fi ac_test_CXXFLAGS=${CXXFLAGS+set} ac_save_CXXFLAGS=$CXXFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 $as_echo_n "checking whether $CXX accepts -g... " >&6; } if ${ac_cv_prog_cxx_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_cxx_werror_flag=$ac_cxx_werror_flag ac_cxx_werror_flag=yes ac_cv_prog_cxx_g=no CXXFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_cv_prog_cxx_g=yes else CXXFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : else ac_cxx_werror_flag=$ac_save_cxx_werror_flag CXXFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_cv_prog_cxx_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cxx_werror_flag=$ac_save_cxx_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 $as_echo "$ac_cv_prog_cxx_g" >&6; } if test "$ac_test_CXXFLAGS" = set; then CXXFLAGS=$ac_save_CXXFLAGS elif test $ac_cv_prog_cxx_g = yes; then if test "$GXX" = yes; then CXXFLAGS="-g -O2" else CXXFLAGS="-g" fi else if test "$GXX" = yes; then CXXFLAGS="-O2" else CXXFLAGS= fi fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu DEPDIR="${am__leading_dot}deps" ac_config_commands="$ac_config_commands depfiles" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} supports the include directive" >&5 $as_echo_n "checking whether ${MAKE-make} supports the include directive... " >&6; } cat > confinc.mk << 'END' am__doit: @echo this is the am__doit target >confinc.out .PHONY: am__doit END am__include="#" am__quote= # BSD make does it like this. echo '.include "confinc.mk" # ignored' > confmf.BSD # Other make implementations (GNU, Solaris 10, AIX) do it like this. echo 'include confinc.mk # ignored' > confmf.GNU _am_result=no for s in GNU BSD; do { echo "$as_me:$LINENO: ${MAKE-make} -f confmf.$s && cat confinc.out" >&5 (${MAKE-make} -f confmf.$s && cat confinc.out) >&5 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } case $?:`cat confinc.out 2>/dev/null` in #( '0:this is the am__doit target') : case $s in #( BSD) : am__include='.include' am__quote='"' ;; #( *) : am__include='include' am__quote='' ;; esac ;; #( *) : ;; esac if test "$am__include" != "#"; then _am_result="yes ($s style)" break fi done rm -f confinc.* confmf.* { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${_am_result}" >&5 $as_echo "${_am_result}" >&6; } # Check whether --enable-dependency-tracking was given. if test "${enable_dependency_tracking+set}" = set; then : enableval=$enable_dependency_tracking; fi if test "x$enable_dependency_tracking" != xno; then am_depcomp="$ac_aux_dir/depcomp" AMDEPBACKSLASH='\' am__nodep='_no' fi if test "x$enable_dependency_tracking" != xno; then AMDEP_TRUE= AMDEP_FALSE='#' else AMDEP_TRUE='#' AMDEP_FALSE= fi depcc="$CXX" am_compiler_list= { $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 $as_echo_n "checking dependency style of $depcc... " >&6; } if ${am_cv_CXX_dependencies_compiler_type+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named 'D' -- because '-MD' means "put the output # in D". rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_CXX_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` fi am__universal=false case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with # Solaris 10 /bin/sh. echo '/* dummy */' > sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with '-c' and '-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle '-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs. am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # After this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested. if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok '-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_CXX_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_CXX_dependencies_compiler_type=none fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5 $as_echo "$am_cv_CXX_dependencies_compiler_type" >&6; } CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type if test "x$enable_dependency_tracking" != xno \ && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then am__fastdepCXX_TRUE= am__fastdepCXX_FALSE='#' else am__fastdepCXX_TRUE='#' am__fastdepCXX_FALSE= fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi else CC="$ac_cv_prog_CC" fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else ac_prog_rejected=no as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS if test $ac_prog_rejected = yes; then # We found a bogon in the path, so make sure we never use it. set dummy $ac_cv_prog_CC shift if test $# != 0; then # We chose a different compiler from the bogus one. # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then for ac_prog in cl.exe do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in cl.exe do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi fi test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "no acceptable C compiler found in \$PATH See \`config.log' for more details" "$LINENO" 5; } # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if ${ac_cv_c_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if ${ac_cv_prog_cc_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes else CFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if ${ac_cv_prog_cc_c89+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include struct stat; /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_c89=$ac_arg fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac if test "x$ac_cv_prog_cc_c89" != xno; then : fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5 $as_echo_n "checking whether $CC understands -c and -o together... " >&6; } if ${am_cv_prog_cc_c_o+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF # Make sure it works both with $CC and with simple cc. # Following AC_PROG_CC_C_O, we do the test twice because some # compilers refuse to overwrite an existing .o file with -o, # though they will create one. am_cv_prog_cc_c_o=yes for am_i in 1 2; do if { echo "$as_me:$LINENO: $CC -c conftest.$ac_ext -o conftest2.$ac_objext" >&5 ($CC -c conftest.$ac_ext -o conftest2.$ac_objext) >&5 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } \ && test -f conftest2.$ac_objext; then : OK else am_cv_prog_cc_c_o=no break fi done rm -f core conftest* unset am_i fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5 $as_echo "$am_cv_prog_cc_c_o" >&6; } if test "$am_cv_prog_cc_c_o" != yes; then # Losing compiler, so override with the script. # FIXME: It is wrong to rewrite CC. # But if we don't then we get into trouble of one sort or another. # A longer-term fix would be to have automake use am__CC in this case, # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" CC="$am_aux_dir/compile $CC" fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu depcc="$CC" am_compiler_list= { $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 $as_echo_n "checking dependency style of $depcc... " >&6; } if ${am_cv_CC_dependencies_compiler_type+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named 'D' -- because '-MD' means "put the output # in D". rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_CC_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` fi am__universal=false case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with # Solaris 10 /bin/sh. echo '/* dummy */' > sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with '-c' and '-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle '-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs. am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # After this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested. if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok '-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_CC_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_CC_dependencies_compiler_type=none fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 $as_echo "$am_cv_CC_dependencies_compiler_type" >&6; } CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type if test "x$enable_dependency_tracking" != xno \ && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then am__fastdepCC_TRUE= am__fastdepCC_FALSE='#' else am__fastdepCC_TRUE='#' am__fastdepCC_FALSE= fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 $as_echo_n "checking how to run the C preprocessor... " >&6; } # On Suns, sometimes $CPP names a directory. if test -n "$CPP" && test -d "$CPP"; then CPP= fi if test -z "$CPP"; then if ${ac_cv_prog_CPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CPP needs to be expanded for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" do ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CPP=$CPP fi CPP=$ac_cv_prog_CPP else ac_cv_prog_CPP=$CPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 $as_echo "$CPP" >&6; } ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 $as_echo_n "checking for grep that handles long lines and -e... " >&6; } if ${ac_cv_path_GREP+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$GREP"; then ac_path_GREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in grep ggrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_GREP" || continue # Check for GNU ac_path_GREP and select it if it is found. # Check for GNU $ac_path_GREP case `"$ac_path_GREP" --version 2>&1` in *GNU*) ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'GREP' >> "conftest.nl" "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_GREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_GREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_GREP"; then as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_GREP=$GREP fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 $as_echo "$ac_cv_path_GREP" >&6; } GREP="$ac_cv_path_GREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 $as_echo_n "checking for egrep... " >&6; } if ${ac_cv_path_EGREP+:} false; then : $as_echo_n "(cached) " >&6 else if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 then ac_cv_path_EGREP="$GREP -E" else if test -z "$EGREP"; then ac_path_EGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in egrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_EGREP" || continue # Check for GNU ac_path_EGREP and select it if it is found. # Check for GNU $ac_path_EGREP case `"$ac_path_EGREP" --version 2>&1` in *GNU*) ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'EGREP' >> "conftest.nl" "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_EGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_EGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_EGREP"; then as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_EGREP=$EGREP fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 $as_echo "$ac_cv_path_EGREP" >&6; } EGREP="$ac_cv_path_EGREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if ${ac_cv_header_stdc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdc=yes else ac_cv_header_stdc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : : else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ (('a' <= (c) && (c) <= 'i') \ || ('j' <= (c) && (c) <= 'r') \ || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif #define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : else ac_cv_header_stdc=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then $as_echo "#define STDC_HEADERS 1" >>confdefs.h fi # On IRIX 5.3, sys/types and inttypes.h are conflicting. for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ inttypes.h stdint.h unistd.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default " if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done ac_fn_c_check_header_mongrel "$LINENO" "minix/config.h" "ac_cv_header_minix_config_h" "$ac_includes_default" if test "x$ac_cv_header_minix_config_h" = xyes; then : MINIX=yes else MINIX= fi if test "$MINIX" = yes; then $as_echo "#define _POSIX_SOURCE 1" >>confdefs.h $as_echo "#define _POSIX_1_SOURCE 2" >>confdefs.h $as_echo "#define _MINIX 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether it is safe to define __EXTENSIONS__" >&5 $as_echo_n "checking whether it is safe to define __EXTENSIONS__... " >&6; } if ${ac_cv_safe_to_define___extensions__+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ # define __EXTENSIONS__ 1 $ac_includes_default int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_safe_to_define___extensions__=yes else ac_cv_safe_to_define___extensions__=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_safe_to_define___extensions__" >&5 $as_echo "$ac_cv_safe_to_define___extensions__" >&6; } test $ac_cv_safe_to_define___extensions__ = yes && $as_echo "#define __EXTENSIONS__ 1" >>confdefs.h $as_echo "#define _ALL_SOURCE 1" >>confdefs.h $as_echo "#define _GNU_SOURCE 1" >>confdefs.h $as_echo "#define _POSIX_PTHREAD_SEMANTICS 1" >>confdefs.h $as_echo "#define _TANDEM_SOURCE 1" >>confdefs.h for ac_prog in gawk mawk nawk awk do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_AWK+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$AWK"; then ac_cv_prog_AWK="$AWK" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_AWK="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AWK=$ac_cv_prog_AWK if test -n "$AWK"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 $as_echo "$AWK" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$AWK" && break done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 $as_echo_n "checking whether ln -s works... " >&6; } LN_S=$as_ln_s if test "$LN_S" = "ln -s"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5 $as_echo "no, using $LN_S" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 $as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } set x ${MAKE-make} ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : $as_echo_n "(cached) " >&6 else cat >conftest.make <<\_ACEOF SHELL = /bin/sh all: @echo '@@@%%%=$(MAKE)=@@@%%%' _ACEOF # GNU make sometimes prints "make[1]: Entering ...", which would confuse us. case `${MAKE-make} -f conftest.make 2>/dev/null` in *@@@%%%=?*=@@@%%%*) eval ac_cv_prog_make_${ac_make}_set=yes;; *) eval ac_cv_prog_make_${ac_make}_set=no;; esac rm -f conftest.make fi if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } SET_MAKE= else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } SET_MAKE="MAKE=${MAKE-make}" fi # Check whether --enable-static was given. if test "${enable_static+set}" = set; then : enableval=$enable_static; p=${PACKAGE-default} case $enableval in yes) enable_static=yes ;; no) enable_static=no ;; *) enable_static=no # Look at the argument we got. We use all the common list separators. lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, for pkg in $enableval; do IFS=$lt_save_ifs if test "X$pkg" = "X$p"; then enable_static=yes fi done IFS=$lt_save_ifs ;; esac else enable_static=no fi case `pwd` in *\ * | *\ *) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 $as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; esac macro_version='2.4.6' macro_revision='2.4.6' ltmain=$ac_aux_dir/ltmain.sh # Make sure we can run config.sub. $SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 $as_echo_n "checking build system type... " >&6; } if ${ac_cv_build+:} false; then : $as_echo_n "(cached) " >&6 else ac_build_alias=$build_alias test "x$ac_build_alias" = x && ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` test "x$ac_build_alias" = x && as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 $as_echo "$ac_cv_build" >&6; } case $ac_cv_build in *-*-*) ;; *) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; esac build=$ac_cv_build ac_save_IFS=$IFS; IFS='-' set x $ac_cv_build shift build_cpu=$1 build_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: build_os=$* IFS=$ac_save_IFS case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 $as_echo_n "checking host system type... " >&6; } if ${ac_cv_host+:} false; then : $as_echo_n "(cached) " >&6 else if test "x$host_alias" = x; then ac_cv_host=$ac_cv_build else ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 $as_echo "$ac_cv_host" >&6; } case $ac_cv_host in *-*-*) ;; *) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; esac host=$ac_cv_host ac_save_IFS=$IFS; IFS='-' set x $ac_cv_host shift host_cpu=$1 host_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: host_os=$* IFS=$ac_save_IFS case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac # Backslashify metacharacters that are still active within # double-quoted strings. sed_quote_subst='s/\(["`$\\]\)/\\\1/g' # Same as above, but do not quote variable references. double_quote_subst='s/\(["`\\]\)/\\\1/g' # Sed substitution to delay expansion of an escaped shell variable in a # double_quote_subst'ed string. delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' # Sed substitution to delay expansion of an escaped single quote. delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' # Sed substitution to avoid accidental globbing in evaled expressions no_glob_subst='s/\*/\\\*/g' ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 $as_echo_n "checking how to print strings... " >&6; } # Test print first, because it will be a builtin if present. if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then ECHO='print -r --' elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then ECHO='printf %s\n' else # Use this function as a fallback that always works. func_fallback_echo () { eval 'cat <<_LTECHO_EOF $1 _LTECHO_EOF' } ECHO='func_fallback_echo' fi # func_echo_all arg... # Invoke $ECHO with all args, space-separated. func_echo_all () { $ECHO "" } case $ECHO in printf*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: printf" >&5 $as_echo "printf" >&6; } ;; print*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: print -r" >&5 $as_echo "print -r" >&6; } ;; *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: cat" >&5 $as_echo "cat" >&6; } ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 $as_echo_n "checking for a sed that does not truncate output... " >&6; } if ${ac_cv_path_SED+:} false; then : $as_echo_n "(cached) " >&6 else ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ for ac_i in 1 2 3 4 5 6 7; do ac_script="$ac_script$as_nl$ac_script" done echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed { ac_script=; unset ac_script;} if test -z "$SED"; then ac_path_SED_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in sed gsed; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_SED="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_SED" || continue # Check for GNU ac_path_SED and select it if it is found. # Check for GNU $ac_path_SED case `"$ac_path_SED" --version 2>&1` in *GNU*) ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo '' >> "conftest.nl" "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_SED_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_SED="$ac_path_SED" ac_path_SED_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_SED_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_SED"; then as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5 fi else ac_cv_path_SED=$SED fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 $as_echo "$ac_cv_path_SED" >&6; } SED="$ac_cv_path_SED" rm -f conftest.sed test -z "$SED" && SED=sed Xsed="$SED -e 1s/^X//" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5 $as_echo_n "checking for fgrep... " >&6; } if ${ac_cv_path_FGREP+:} false; then : $as_echo_n "(cached) " >&6 else if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1 then ac_cv_path_FGREP="$GREP -F" else if test -z "$FGREP"; then ac_path_FGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in fgrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_FGREP" || continue # Check for GNU ac_path_FGREP and select it if it is found. # Check for GNU $ac_path_FGREP case `"$ac_path_FGREP" --version 2>&1` in *GNU*) ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'FGREP' >> "conftest.nl" "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_FGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_FGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_FGREP"; then as_fn_error $? "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_FGREP=$FGREP fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5 $as_echo "$ac_cv_path_FGREP" >&6; } FGREP="$ac_cv_path_FGREP" test -z "$GREP" && GREP=grep # Check whether --with-gnu-ld was given. if test "${with_gnu_ld+set}" = set; then : withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes else with_gnu_ld=no fi ac_prog=ld if test yes = "$GCC"; then # Check if gcc -print-prog-name=ld gives a path. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 $as_echo_n "checking for ld used by $CC... " >&6; } case $host in *-*-mingw*) # gcc leaves a trailing carriage return, which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [\\/]* | ?:[\\/]*) re_direlt='/[^/][^/]*/\.\./' # Canonicalize the pathname of ld ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` done test -z "$LD" && LD=$ac_prog ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test yes = "$with_gnu_ld"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 $as_echo_n "checking for GNU ld... " >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 $as_echo_n "checking for non-GNU ld... " >&6; } fi if ${lt_cv_path_LD+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$LD"; then lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS=$lt_save_ifs test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then lt_cv_path_LD=$ac_dir/$ac_prog # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$lt_cv_path_LD" -v 2>&1 &5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 $as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } if ${lt_cv_prog_gnu_ld+:} false; then : $as_echo_n "(cached) " >&6 else # I'd rather use --version here, but apparently some GNU lds only accept -v. case `$LD -v 2>&1 &5 $as_echo "$lt_cv_prog_gnu_ld" >&6; } with_gnu_ld=$lt_cv_prog_gnu_ld { $as_echo "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5 $as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; } if ${lt_cv_path_NM+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$NM"; then # Let the user override the test. lt_cv_path_NM=$NM else lt_nm_to_check=${ac_tool_prefix}nm if test -n "$ac_tool_prefix" && test "$build" = "$host"; then lt_nm_to_check="$lt_nm_to_check nm" fi for lt_tmp_nm in $lt_nm_to_check; do lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do IFS=$lt_save_ifs test -z "$ac_dir" && ac_dir=. tmp_nm=$ac_dir/$lt_tmp_nm if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then # Check to see if the nm accepts a BSD-compat flag. # Adding the 'sed 1q' prevents false positives on HP-UX, which says: # nm: unknown option "B" ignored # Tru64's nm complains that /dev/null is an invalid object file # MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty case $build_os in mingw*) lt_bad_file=conftest.nm/nofile ;; *) lt_bad_file=/dev/null ;; esac case `"$tmp_nm" -B $lt_bad_file 2>&1 | sed '1q'` in *$lt_bad_file* | *'Invalid file or object type'*) lt_cv_path_NM="$tmp_nm -B" break 2 ;; *) case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in */dev/null*) lt_cv_path_NM="$tmp_nm -p" break 2 ;; *) lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but continue # so that we can try to find one that supports BSD flags ;; esac ;; esac fi done IFS=$lt_save_ifs done : ${lt_cv_path_NM=no} fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5 $as_echo "$lt_cv_path_NM" >&6; } if test no != "$lt_cv_path_NM"; then NM=$lt_cv_path_NM else # Didn't find any BSD compatible name lister, look for dumpbin. if test -n "$DUMPBIN"; then : # Let the user override the test. else if test -n "$ac_tool_prefix"; then for ac_prog in dumpbin "link -dump" do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_DUMPBIN+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$DUMPBIN"; then ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DUMPBIN=$ac_cv_prog_DUMPBIN if test -n "$DUMPBIN"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5 $as_echo "$DUMPBIN" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$DUMPBIN" && break done fi if test -z "$DUMPBIN"; then ac_ct_DUMPBIN=$DUMPBIN for ac_prog in dumpbin "link -dump" do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_DUMPBIN+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_DUMPBIN"; then ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_DUMPBIN="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN if test -n "$ac_ct_DUMPBIN"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5 $as_echo "$ac_ct_DUMPBIN" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_DUMPBIN" && break done if test "x$ac_ct_DUMPBIN" = x; then DUMPBIN=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac DUMPBIN=$ac_ct_DUMPBIN fi fi case `$DUMPBIN -symbols -headers /dev/null 2>&1 | sed '1q'` in *COFF*) DUMPBIN="$DUMPBIN -symbols -headers" ;; *) DUMPBIN=: ;; esac fi if test : != "$DUMPBIN"; then NM=$DUMPBIN fi fi test -z "$NM" && NM=nm { $as_echo "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5 $as_echo_n "checking the name lister ($NM) interface... " >&6; } if ${lt_cv_nm_interface+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_nm_interface="BSD nm" echo "int some_variable = 0;" > conftest.$ac_ext (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5) (eval "$ac_compile" 2>conftest.err) cat conftest.err >&5 (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&5) (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) cat conftest.err >&5 (eval echo "\"\$as_me:$LINENO: output\"" >&5) cat conftest.out >&5 if $GREP 'External.*some_variable' conftest.out > /dev/null; then lt_cv_nm_interface="MS dumpbin" fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5 $as_echo "$lt_cv_nm_interface" >&6; } # find the maximum length of command line arguments { $as_echo "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5 $as_echo_n "checking the maximum length of command line arguments... " >&6; } if ${lt_cv_sys_max_cmd_len+:} false; then : $as_echo_n "(cached) " >&6 else i=0 teststring=ABCD case $build_os in msdosdjgpp*) # On DJGPP, this test can blow up pretty badly due to problems in libc # (any single argument exceeding 2000 bytes causes a buffer overrun # during glob expansion). Even if it were fixed, the result of this # check would be larger than it should be. lt_cv_sys_max_cmd_len=12288; # 12K is about right ;; gnu*) # Under GNU Hurd, this test is not required because there is # no limit to the length of command line arguments. # Libtool will interpret -1 as no limit whatsoever lt_cv_sys_max_cmd_len=-1; ;; cygwin* | mingw* | cegcc*) # On Win9x/ME, this test blows up -- it succeeds, but takes # about 5 minutes as the teststring grows exponentially. # Worse, since 9x/ME are not pre-emptively multitasking, # you end up with a "frozen" computer, even though with patience # the test eventually succeeds (with a max line length of 256k). # Instead, let's just punt: use the minimum linelength reported by # all of the supported platforms: 8192 (on NT/2K/XP). lt_cv_sys_max_cmd_len=8192; ;; mint*) # On MiNT this can take a long time and run out of memory. lt_cv_sys_max_cmd_len=8192; ;; amigaos*) # On AmigaOS with pdksh, this test takes hours, literally. # So we just punt and use a minimum line length of 8192. lt_cv_sys_max_cmd_len=8192; ;; bitrig* | darwin* | dragonfly* | freebsd* | netbsd* | openbsd*) # This has been around since 386BSD, at least. Likely further. if test -x /sbin/sysctl; then lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` elif test -x /usr/sbin/sysctl; then lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` else lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs fi # And add a safety zone lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` ;; interix*) # We know the value 262144 and hardcode it with a safety zone (like BSD) lt_cv_sys_max_cmd_len=196608 ;; os2*) # The test takes a long time on OS/2. lt_cv_sys_max_cmd_len=8192 ;; osf*) # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not # nice to cause kernel panics so lets avoid the loop below. # First set a reasonable default. lt_cv_sys_max_cmd_len=16384 # if test -x /sbin/sysconfig; then case `/sbin/sysconfig -q proc exec_disable_arg_limit` in *1*) lt_cv_sys_max_cmd_len=-1 ;; esac fi ;; sco3.2v5*) lt_cv_sys_max_cmd_len=102400 ;; sysv5* | sco5v6* | sysv4.2uw2*) kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` if test -n "$kargmax"; then lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'` else lt_cv_sys_max_cmd_len=32768 fi ;; *) lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` if test -n "$lt_cv_sys_max_cmd_len" && \ test undefined != "$lt_cv_sys_max_cmd_len"; then lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` else # Make teststring a little bigger before we do anything with it. # a 1K string should be a reasonable start. for i in 1 2 3 4 5 6 7 8; do teststring=$teststring$teststring done SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} # If test is not a shell built-in, we'll probably end up computing a # maximum length that is only half of the actual maximum length, but # we can't tell. while { test X`env echo "$teststring$teststring" 2>/dev/null` \ = "X$teststring$teststring"; } >/dev/null 2>&1 && test 17 != "$i" # 1/2 MB should be enough do i=`expr $i + 1` teststring=$teststring$teststring done # Only check the string length outside the loop. lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` teststring= # Add a significant safety factor because C++ compilers can tack on # massive amounts of additional arguments before passing them to the # linker. It appears as though 1/2 is a usable value. lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` fi ;; esac fi if test -n "$lt_cv_sys_max_cmd_len"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5 $as_echo "$lt_cv_sys_max_cmd_len" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5 $as_echo "none" >&6; } fi max_cmd_len=$lt_cv_sys_max_cmd_len : ${CP="cp -f"} : ${MV="mv -f"} : ${RM="rm -f"} if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then lt_unset=unset else lt_unset=false fi # test EBCDIC or ASCII case `echo X|tr X '\101'` in A) # ASCII based system # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr lt_SP2NL='tr \040 \012' lt_NL2SP='tr \015\012 \040\040' ;; *) # EBCDIC based system lt_SP2NL='tr \100 \n' lt_NL2SP='tr \r\n \100\100' ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 $as_echo_n "checking how to convert $build file names to $host format... " >&6; } if ${lt_cv_to_host_file_cmd+:} false; then : $as_echo_n "(cached) " >&6 else case $host in *-*-mingw* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 ;; *-*-cygwin* ) lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 ;; * ) # otherwise, assume *nix lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 ;; esac ;; *-*-cygwin* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin ;; *-*-cygwin* ) lt_cv_to_host_file_cmd=func_convert_file_noop ;; * ) # otherwise, assume *nix lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin ;; esac ;; * ) # unhandled hosts (and "normal" native builds) lt_cv_to_host_file_cmd=func_convert_file_noop ;; esac fi to_host_file_cmd=$lt_cv_to_host_file_cmd { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 $as_echo "$lt_cv_to_host_file_cmd" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 $as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } if ${lt_cv_to_tool_file_cmd+:} false; then : $as_echo_n "(cached) " >&6 else #assume ordinary cross tools, or native build. lt_cv_to_tool_file_cmd=func_convert_file_noop case $host in *-*-mingw* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 ;; esac ;; esac fi to_tool_file_cmd=$lt_cv_to_tool_file_cmd { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 $as_echo "$lt_cv_to_tool_file_cmd" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 $as_echo_n "checking for $LD option to reload object files... " >&6; } if ${lt_cv_ld_reload_flag+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_ld_reload_flag='-r' fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5 $as_echo "$lt_cv_ld_reload_flag" >&6; } reload_flag=$lt_cv_ld_reload_flag case $reload_flag in "" | " "*) ;; *) reload_flag=" $reload_flag" ;; esac reload_cmds='$LD$reload_flag -o $output$reload_objs' case $host_os in cygwin* | mingw* | pw32* | cegcc*) if test yes != "$GCC"; then reload_cmds=false fi ;; darwin*) if test yes = "$GCC"; then reload_cmds='$LTCC $LTCFLAGS -nostdlib $wl-r -o $output$reload_objs' else reload_cmds='$LD$reload_flag -o $output$reload_objs' fi ;; esac if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. set dummy ${ac_tool_prefix}objdump; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_OBJDUMP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$OBJDUMP"; then ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OBJDUMP=$ac_cv_prog_OBJDUMP if test -n "$OBJDUMP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 $as_echo "$OBJDUMP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OBJDUMP"; then ac_ct_OBJDUMP=$OBJDUMP # Extract the first word of "objdump", so it can be a program name with args. set dummy objdump; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_OBJDUMP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OBJDUMP"; then ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_OBJDUMP="objdump" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP if test -n "$ac_ct_OBJDUMP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 $as_echo "$ac_ct_OBJDUMP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OBJDUMP" = x; then OBJDUMP="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OBJDUMP=$ac_ct_OBJDUMP fi else OBJDUMP="$ac_cv_prog_OBJDUMP" fi test -z "$OBJDUMP" && OBJDUMP=objdump { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5 $as_echo_n "checking how to recognize dependent libraries... " >&6; } if ${lt_cv_deplibs_check_method+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_file_magic_cmd='$MAGIC_CMD' lt_cv_file_magic_test_file= lt_cv_deplibs_check_method='unknown' # Need to set the preceding variable on all platforms that support # interlibrary dependencies. # 'none' -- dependencies not supported. # 'unknown' -- same as none, but documents that we really don't know. # 'pass_all' -- all dependencies passed with no checks. # 'test_compile' -- check by making test program. # 'file_magic [[regex]]' -- check by looking for files in library path # that responds to the $file_magic_cmd with a given extended regex. # If you have 'file' or equivalent on your system and you're not sure # whether 'pass_all' will *always* work, you probably want this one. case $host_os in aix[4-9]*) lt_cv_deplibs_check_method=pass_all ;; beos*) lt_cv_deplibs_check_method=pass_all ;; bsdi[45]*) lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' lt_cv_file_magic_cmd='/usr/bin/file -L' lt_cv_file_magic_test_file=/shlib/libc.so ;; cygwin*) # func_win32_libid is a shell function defined in ltmain.sh lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' lt_cv_file_magic_cmd='func_win32_libid' ;; mingw* | pw32*) # Base MSYS/MinGW do not provide the 'file' command needed by # func_win32_libid shell function, so use a weaker test based on 'objdump', # unless we find 'file', for example because we are cross-compiling. if ( file / ) >/dev/null 2>&1; then lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' lt_cv_file_magic_cmd='func_win32_libid' else # Keep this pattern in sync with the one in func_win32_libid. lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' lt_cv_file_magic_cmd='$OBJDUMP -f' fi ;; cegcc*) # use the weaker test based on 'objdump'. See mingw*. lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' lt_cv_file_magic_cmd='$OBJDUMP -f' ;; darwin* | rhapsody*) lt_cv_deplibs_check_method=pass_all ;; freebsd* | dragonfly*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then case $host_cpu in i*86 ) # Not sure whether the presence of OpenBSD here was a mistake. # Let's accept both of them until this is cleared up. lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` ;; esac else lt_cv_deplibs_check_method=pass_all fi ;; haiku*) lt_cv_deplibs_check_method=pass_all ;; hpux10.20* | hpux11*) lt_cv_file_magic_cmd=/usr/bin/file case $host_cpu in ia64*) lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so ;; hppa*64*) lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]' lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl ;; *) lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9]\.[0-9]) shared library' lt_cv_file_magic_test_file=/usr/lib/libc.sl ;; esac ;; interix[3-9]*) # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' ;; irix5* | irix6* | nonstopux*) case $LD in *-32|*"-32 ") libmagic=32-bit;; *-n32|*"-n32 ") libmagic=N32;; *-64|*"-64 ") libmagic=64-bit;; *) libmagic=never-match;; esac lt_cv_deplibs_check_method=pass_all ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) lt_cv_deplibs_check_method=pass_all ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' fi ;; newos6*) lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=/usr/lib/libnls.so ;; *nto* | *qnx*) lt_cv_deplibs_check_method=pass_all ;; openbsd* | bitrig*) if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' fi ;; osf3* | osf4* | osf5*) lt_cv_deplibs_check_method=pass_all ;; rdos*) lt_cv_deplibs_check_method=pass_all ;; solaris*) lt_cv_deplibs_check_method=pass_all ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) lt_cv_deplibs_check_method=pass_all ;; sysv4 | sysv4.3*) case $host_vendor in motorola) lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` ;; ncr) lt_cv_deplibs_check_method=pass_all ;; sequent) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' ;; sni) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" lt_cv_file_magic_test_file=/lib/libc.so ;; siemens) lt_cv_deplibs_check_method=pass_all ;; pc) lt_cv_deplibs_check_method=pass_all ;; esac ;; tpf*) lt_cv_deplibs_check_method=pass_all ;; os2*) lt_cv_deplibs_check_method=pass_all ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 $as_echo "$lt_cv_deplibs_check_method" >&6; } file_magic_glob= want_nocaseglob=no if test "$build" = "$host"; then case $host_os in mingw* | pw32*) if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then want_nocaseglob=yes else file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` fi ;; esac fi file_magic_cmd=$lt_cv_file_magic_cmd deplibs_check_method=$lt_cv_deplibs_check_method test -z "$deplibs_check_method" && deplibs_check_method=unknown if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. set dummy ${ac_tool_prefix}dlltool; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_DLLTOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$DLLTOOL"; then ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DLLTOOL=$ac_cv_prog_DLLTOOL if test -n "$DLLTOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 $as_echo "$DLLTOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_DLLTOOL"; then ac_ct_DLLTOOL=$DLLTOOL # Extract the first word of "dlltool", so it can be a program name with args. set dummy dlltool; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_DLLTOOL"; then ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_DLLTOOL="dlltool" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL if test -n "$ac_ct_DLLTOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 $as_echo "$ac_ct_DLLTOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_DLLTOOL" = x; then DLLTOOL="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac DLLTOOL=$ac_ct_DLLTOOL fi else DLLTOOL="$ac_cv_prog_DLLTOOL" fi test -z "$DLLTOOL" && DLLTOOL=dlltool { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 $as_echo_n "checking how to associate runtime and link libraries... " >&6; } if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_sharedlib_from_linklib_cmd='unknown' case $host_os in cygwin* | mingw* | pw32* | cegcc*) # two different shell functions defined in ltmain.sh; # decide which one to use based on capabilities of $DLLTOOL case `$DLLTOOL --help 2>&1` in *--identify-strict*) lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib ;; *) lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback ;; esac ;; *) # fallback: assume linklib IS sharedlib lt_cv_sharedlib_from_linklib_cmd=$ECHO ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 $as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO if test -n "$ac_tool_prefix"; then for ac_prog in ar do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_AR+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$AR"; then ac_cv_prog_AR="$AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_AR="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AR=$ac_cv_prog_AR if test -n "$AR"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 $as_echo "$AR" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$AR" && break done fi if test -z "$AR"; then ac_ct_AR=$AR for ac_prog in ar do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_AR+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_AR"; then ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_AR="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_AR=$ac_cv_prog_ac_ct_AR if test -n "$ac_ct_AR"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 $as_echo "$ac_ct_AR" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_AR" && break done if test "x$ac_ct_AR" = x; then AR="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac AR=$ac_ct_AR fi fi : ${AR=ar} : ${AR_FLAGS=cru} { $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 $as_echo_n "checking for archiver @FILE support... " >&6; } if ${lt_cv_ar_at_file+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_ar_at_file=no cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : echo conftest.$ac_objext > conftest.lst lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 (eval $lt_ar_try) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if test 0 -eq "$ac_status"; then # Ensure the archiver fails upon bogus file names. rm -f conftest.$ac_objext libconftest.a { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 (eval $lt_ar_try) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if test 0 -ne "$ac_status"; then lt_cv_ar_at_file=@ fi fi rm -f conftest.* libconftest.a fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 $as_echo "$lt_cv_ar_at_file" >&6; } if test no = "$lt_cv_ar_at_file"; then archiver_list_spec= else archiver_list_spec=$lt_cv_ar_at_file fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. set dummy ${ac_tool_prefix}strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_STRIP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$STRIP"; then ac_cv_prog_STRIP="$STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_STRIP="${ac_tool_prefix}strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi STRIP=$ac_cv_prog_STRIP if test -n "$STRIP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 $as_echo "$STRIP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_STRIP"; then ac_ct_STRIP=$STRIP # Extract the first word of "strip", so it can be a program name with args. set dummy strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_STRIP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_STRIP"; then ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_STRIP="strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP if test -n "$ac_ct_STRIP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 $as_echo "$ac_ct_STRIP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_STRIP" = x; then STRIP=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac STRIP=$ac_ct_STRIP fi else STRIP="$ac_cv_prog_STRIP" fi test -z "$STRIP" && STRIP=: if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. set dummy ${ac_tool_prefix}ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_RANLIB+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$RANLIB"; then ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi RANLIB=$ac_cv_prog_RANLIB if test -n "$RANLIB"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 $as_echo "$RANLIB" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_RANLIB"; then ac_ct_RANLIB=$RANLIB # Extract the first word of "ranlib", so it can be a program name with args. set dummy ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_RANLIB"; then ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_RANLIB="ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB if test -n "$ac_ct_RANLIB"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 $as_echo "$ac_ct_RANLIB" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_RANLIB" = x; then RANLIB=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac RANLIB=$ac_ct_RANLIB fi else RANLIB="$ac_cv_prog_RANLIB" fi test -z "$RANLIB" && RANLIB=: # Determine commands to create old-style static archives. old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' old_postinstall_cmds='chmod 644 $oldlib' old_postuninstall_cmds= if test -n "$RANLIB"; then case $host_os in bitrig* | openbsd*) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" ;; *) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" ;; esac old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" fi case $host_os in darwin*) lock_old_archive_extraction=yes ;; *) lock_old_archive_extraction=no ;; esac # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC # Check for command to grab the raw symbol name followed by C symbol from nm. { $as_echo "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5 $as_echo_n "checking command to parse $NM output from $compiler object... " >&6; } if ${lt_cv_sys_global_symbol_pipe+:} false; then : $as_echo_n "(cached) " >&6 else # These are sane defaults that work on at least a few old systems. # [They come from Ultrix. What could be older than Ultrix?!! ;)] # Character class describing NM global symbol codes. symcode='[BCDEGRST]' # Regexp to match symbols that can be accessed directly from C. sympat='\([_A-Za-z][_A-Za-z0-9]*\)' # Define system-specific variables. case $host_os in aix*) symcode='[BCDT]' ;; cygwin* | mingw* | pw32* | cegcc*) symcode='[ABCDGISTW]' ;; hpux*) if test ia64 = "$host_cpu"; then symcode='[ABCDEGRST]' fi ;; irix* | nonstopux*) symcode='[BCDEGRST]' ;; osf*) symcode='[BCDEGQRST]' ;; solaris*) symcode='[BDRT]' ;; sco3.2v5*) symcode='[DT]' ;; sysv4.2uw2*) symcode='[DT]' ;; sysv5* | sco5v6* | unixware* | OpenUNIX*) symcode='[ABDT]' ;; sysv4) symcode='[DFNSTU]' ;; esac # If we're using GNU nm, then use its standard symbol codes. case `$NM -V 2>&1` in *GNU* | *'with BFD'*) symcode='[ABCDGIRSTW]' ;; esac if test "$lt_cv_nm_interface" = "MS dumpbin"; then # Gets list of data symbols to import. lt_cv_sys_global_symbol_to_import="sed -n -e 's/^I .* \(.*\)$/\1/p'" # Adjust the below global symbol transforms to fixup imported variables. lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'" lt_c_name_hook=" -e 's/^I .* \(.*\)$/ {\"\1\", (void *) 0},/p'" lt_c_name_lib_hook="\ -e 's/^I .* \(lib.*\)$/ {\"\1\", (void *) 0},/p'\ -e 's/^I .* \(.*\)$/ {\"lib\1\", (void *) 0},/p'" else # Disable hooks by default. lt_cv_sys_global_symbol_to_import= lt_cdecl_hook= lt_c_name_hook= lt_c_name_lib_hook= fi # Transform an extracted symbol line into a proper C declaration. # Some systems (esp. on ia64) link data and code symbols differently, # so use this general approach. lt_cv_sys_global_symbol_to_cdecl="sed -n"\ $lt_cdecl_hook\ " -e 's/^T .* \(.*\)$/extern int \1();/p'"\ " -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'" # Transform an extracted symbol line into symbol name and symbol address lt_cv_sys_global_symbol_to_c_name_address="sed -n"\ $lt_c_name_hook\ " -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ " -e 's/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/p'" # Transform an extracted symbol line into symbol name with lib prefix and # symbol address. lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n"\ $lt_c_name_lib_hook\ " -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ " -e 's/^$symcode$symcode* .* \(lib.*\)$/ {\"\1\", (void *) \&\1},/p'"\ " -e 's/^$symcode$symcode* .* \(.*\)$/ {\"lib\1\", (void *) \&\1},/p'" # Handle CRLF in mingw tool chain opt_cr= case $build_os in mingw*) opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp ;; esac # Try without a prefix underscore, then with it. for ac_symprfx in "" "_"; do # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. symxfrm="\\1 $ac_symprfx\\2 \\2" # Write the raw and C identifiers. if test "$lt_cv_nm_interface" = "MS dumpbin"; then # Fake it for dumpbin and say T for any non-static function, # D for any global variable and I for any imported variable. # Also find C++ and __fastcall symbols from MSVC++, # which start with @ or ?. lt_cv_sys_global_symbol_pipe="$AWK '"\ " {last_section=section; section=\$ 3};"\ " /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ " /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ " /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\ " /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\ " /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\ " \$ 0!~/External *\|/{next};"\ " / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ " {if(hide[section]) next};"\ " {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\ " {split(\$ 0,a,/\||\r/); split(a[2],s)};"\ " s[1]~/^[@?]/{print f,s[1],s[1]; next};"\ " s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\ " ' prfx=^$ac_symprfx" else lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" fi lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" # Check to see that the pipe works correctly. pipe_works=no rm -f conftest* cat > conftest.$ac_ext <<_LT_EOF #ifdef __cplusplus extern "C" { #endif char nm_test_var; void nm_test_func(void); void nm_test_func(void){} #ifdef __cplusplus } #endif int main(){nm_test_var='a';nm_test_func();return(0);} _LT_EOF if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then # Now try to grab the symbols. nlist=conftest.nm if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist\""; } >&5 (eval $NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s "$nlist"; then # Try sorting and uniquifying the output. if sort "$nlist" | uniq > "$nlist"T; then mv -f "$nlist"T "$nlist" else rm -f "$nlist"T fi # Make sure that we snagged all the symbols we need. if $GREP ' nm_test_var$' "$nlist" >/dev/null; then if $GREP ' nm_test_func$' "$nlist" >/dev/null; then cat <<_LT_EOF > conftest.$ac_ext /* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ #if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE /* DATA imports from DLLs on WIN32 can't be const, because runtime relocations are performed -- see ld's documentation on pseudo-relocs. */ # define LT_DLSYM_CONST #elif defined __osf__ /* This system does not cope well with relocations in const data. */ # define LT_DLSYM_CONST #else # define LT_DLSYM_CONST const #endif #ifdef __cplusplus extern "C" { #endif _LT_EOF # Now generate the symbol file. eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' cat <<_LT_EOF >> conftest.$ac_ext /* The mapping between symbol names and symbols. */ LT_DLSYM_CONST struct { const char *name; void *address; } lt__PROGRAM__LTX_preloaded_symbols[] = { { "@PROGRAM@", (void *) 0 }, _LT_EOF $SED "s/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext cat <<\_LT_EOF >> conftest.$ac_ext {0, (void *) 0} }; /* This works around a problem in FreeBSD linker */ #ifdef FREEBSD_WORKAROUND static const void *lt_preloaded_setup() { return lt__PROGRAM__LTX_preloaded_symbols; } #endif #ifdef __cplusplus } #endif _LT_EOF # Now try linking the two files. mv conftest.$ac_objext conftstm.$ac_objext lt_globsym_save_LIBS=$LIBS lt_globsym_save_CFLAGS=$CFLAGS LIBS=conftstm.$ac_objext CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s conftest$ac_exeext; then pipe_works=yes fi LIBS=$lt_globsym_save_LIBS CFLAGS=$lt_globsym_save_CFLAGS else echo "cannot find nm_test_func in $nlist" >&5 fi else echo "cannot find nm_test_var in $nlist" >&5 fi else echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5 fi else echo "$progname: failed program was:" >&5 cat conftest.$ac_ext >&5 fi rm -rf conftest* conftst* # Do not use the global_symbol_pipe unless it works. if test yes = "$pipe_works"; then break else lt_cv_sys_global_symbol_pipe= fi done fi if test -z "$lt_cv_sys_global_symbol_pipe"; then lt_cv_sys_global_symbol_to_cdecl= fi if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5 $as_echo "failed" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 $as_echo "ok" >&6; } fi # Response file support. if test "$lt_cv_nm_interface" = "MS dumpbin"; then nm_file_list_spec='@' elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then nm_file_list_spec='@' fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 $as_echo_n "checking for sysroot... " >&6; } # Check whether --with-sysroot was given. if test "${with_sysroot+set}" = set; then : withval=$with_sysroot; else with_sysroot=no fi lt_sysroot= case $with_sysroot in #( yes) if test yes = "$GCC"; then lt_sysroot=`$CC --print-sysroot 2>/dev/null` fi ;; #( /*) lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` ;; #( no|'') ;; #( *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_sysroot" >&5 $as_echo "$with_sysroot" >&6; } as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 $as_echo "${lt_sysroot:-no}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a working dd" >&5 $as_echo_n "checking for a working dd... " >&6; } if ${ac_cv_path_lt_DD+:} false; then : $as_echo_n "(cached) " >&6 else printf 0123456789abcdef0123456789abcdef >conftest.i cat conftest.i conftest.i >conftest2.i : ${lt_DD:=$DD} if test -z "$lt_DD"; then ac_path_lt_DD_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in dd; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_lt_DD="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_lt_DD" || continue if "$ac_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then cmp -s conftest.i conftest.out \ && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=: fi $ac_path_lt_DD_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_lt_DD"; then : fi else ac_cv_path_lt_DD=$lt_DD fi rm -f conftest.i conftest2.i conftest.out fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_lt_DD" >&5 $as_echo "$ac_cv_path_lt_DD" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to truncate binary pipes" >&5 $as_echo_n "checking how to truncate binary pipes... " >&6; } if ${lt_cv_truncate_bin+:} false; then : $as_echo_n "(cached) " >&6 else printf 0123456789abcdef0123456789abcdef >conftest.i cat conftest.i conftest.i >conftest2.i lt_cv_truncate_bin= if "$ac_cv_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then cmp -s conftest.i conftest.out \ && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1" fi rm -f conftest.i conftest2.i conftest.out test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_truncate_bin" >&5 $as_echo "$lt_cv_truncate_bin" >&6; } # Calculate cc_basename. Skip known compiler wrappers and cross-prefix. func_cc_basename () { for cc_temp in $*""; do case $cc_temp in compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; \-*) ;; *) break;; esac done func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` } # Check whether --enable-libtool-lock was given. if test "${enable_libtool_lock+set}" = set; then : enableval=$enable_libtool_lock; fi test no = "$enable_libtool_lock" || enable_libtool_lock=yes # Some flags need to be propagated to the compiler or linker for good # libtool support. case $host in ia64-*-hpux*) # Find out what ABI is being produced by ac_compile, and set mode # options accordingly. echo 'int i;' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then case `/usr/bin/file conftest.$ac_objext` in *ELF-32*) HPUX_IA64_MODE=32 ;; *ELF-64*) HPUX_IA64_MODE=64 ;; esac fi rm -rf conftest* ;; *-*-irix6*) # Find out what ABI is being produced by ac_compile, and set linker # options accordingly. echo '#line '$LINENO' "configure"' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then if test yes = "$lt_cv_prog_gnu_ld"; then case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -melf32bsmip" ;; *N32*) LD="${LD-ld} -melf32bmipn32" ;; *64-bit*) LD="${LD-ld} -melf64bmip" ;; esac else case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -32" ;; *N32*) LD="${LD-ld} -n32" ;; *64-bit*) LD="${LD-ld} -64" ;; esac fi fi rm -rf conftest* ;; mips64*-*linux*) # Find out what ABI is being produced by ac_compile, and set linker # options accordingly. echo '#line '$LINENO' "configure"' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then emul=elf case `/usr/bin/file conftest.$ac_objext` in *32-bit*) emul="${emul}32" ;; *64-bit*) emul="${emul}64" ;; esac case `/usr/bin/file conftest.$ac_objext` in *MSB*) emul="${emul}btsmip" ;; *LSB*) emul="${emul}ltsmip" ;; esac case `/usr/bin/file conftest.$ac_objext` in *N32*) emul="${emul}n32" ;; esac LD="${LD-ld} -m $emul" fi rm -rf conftest* ;; x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) # Find out what ABI is being produced by ac_compile, and set linker # options accordingly. Note that the listed cases only cover the # situations where additional linker options are needed (such as when # doing 32-bit compilation for a host where ld defaults to 64-bit, or # vice versa); the common cases where no linker options are needed do # not appear in the list. echo 'int i;' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then case `/usr/bin/file conftest.o` in *32-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_i386_fbsd" ;; x86_64-*linux*) case `/usr/bin/file conftest.o` in *x86-64*) LD="${LD-ld} -m elf32_x86_64" ;; *) LD="${LD-ld} -m elf_i386" ;; esac ;; powerpc64le-*linux*) LD="${LD-ld} -m elf32lppclinux" ;; powerpc64-*linux*) LD="${LD-ld} -m elf32ppclinux" ;; s390x-*linux*) LD="${LD-ld} -m elf_s390" ;; sparc64-*linux*) LD="${LD-ld} -m elf32_sparc" ;; esac ;; *64-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_x86_64_fbsd" ;; x86_64-*linux*) LD="${LD-ld} -m elf_x86_64" ;; powerpcle-*linux*) LD="${LD-ld} -m elf64lppc" ;; powerpc-*linux*) LD="${LD-ld} -m elf64ppc" ;; s390*-*linux*|s390*-*tpf*) LD="${LD-ld} -m elf64_s390" ;; sparc*-*linux*) LD="${LD-ld} -m elf64_sparc" ;; esac ;; esac fi rm -rf conftest* ;; *-*-sco3.2v5*) # On SCO OpenServer 5, we need -belf to get full-featured binaries. SAVE_CFLAGS=$CFLAGS CFLAGS="$CFLAGS -belf" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5 $as_echo_n "checking whether the C compiler needs -belf... " >&6; } if ${lt_cv_cc_needs_belf+:} false; then : $as_echo_n "(cached) " >&6 else ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_cv_cc_needs_belf=yes else lt_cv_cc_needs_belf=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5 $as_echo "$lt_cv_cc_needs_belf" >&6; } if test yes != "$lt_cv_cc_needs_belf"; then # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf CFLAGS=$SAVE_CFLAGS fi ;; *-*solaris*) # Find out what ABI is being produced by ac_compile, and set linker # options accordingly. echo 'int i;' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then case `/usr/bin/file conftest.o` in *64-bit*) case $lt_cv_prog_gnu_ld in yes*) case $host in i?86-*-solaris*|x86_64-*-solaris*) LD="${LD-ld} -m elf_x86_64" ;; sparc*-*-solaris*) LD="${LD-ld} -m elf64_sparc" ;; esac # GNU ld 2.21 introduced _sol2 emulations. Use them if available. if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then LD=${LD-ld}_sol2 fi ;; *) if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then LD="${LD-ld} -64" fi ;; esac ;; esac fi rm -rf conftest* ;; esac need_locks=$enable_libtool_lock if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. set dummy ${ac_tool_prefix}mt; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$MANIFEST_TOOL"; then ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL if test -n "$MANIFEST_TOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 $as_echo "$MANIFEST_TOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_MANIFEST_TOOL"; then ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL # Extract the first word of "mt", so it can be a program name with args. set dummy mt; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_MANIFEST_TOOL"; then ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL if test -n "$ac_ct_MANIFEST_TOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 $as_echo "$ac_ct_MANIFEST_TOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_MANIFEST_TOOL" = x; then MANIFEST_TOOL=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL fi else MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" fi test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 $as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } if ${lt_cv_path_mainfest_tool+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_path_mainfest_tool=no echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out cat conftest.err >&5 if $GREP 'Manifest Tool' conftest.out > /dev/null; then lt_cv_path_mainfest_tool=yes fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 $as_echo "$lt_cv_path_mainfest_tool" >&6; } if test yes != "$lt_cv_path_mainfest_tool"; then MANIFEST_TOOL=: fi case $host_os in rhapsody* | darwin*) if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_DSYMUTIL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$DSYMUTIL"; then ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DSYMUTIL=$ac_cv_prog_DSYMUTIL if test -n "$DSYMUTIL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5 $as_echo "$DSYMUTIL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_DSYMUTIL"; then ac_ct_DSYMUTIL=$DSYMUTIL # Extract the first word of "dsymutil", so it can be a program name with args. set dummy dsymutil; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_DSYMUTIL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_DSYMUTIL"; then ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL if test -n "$ac_ct_DSYMUTIL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5 $as_echo "$ac_ct_DSYMUTIL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_DSYMUTIL" = x; then DSYMUTIL=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac DSYMUTIL=$ac_ct_DSYMUTIL fi else DSYMUTIL="$ac_cv_prog_DSYMUTIL" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. set dummy ${ac_tool_prefix}nmedit; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_NMEDIT+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$NMEDIT"; then ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi NMEDIT=$ac_cv_prog_NMEDIT if test -n "$NMEDIT"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5 $as_echo "$NMEDIT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_NMEDIT"; then ac_ct_NMEDIT=$NMEDIT # Extract the first word of "nmedit", so it can be a program name with args. set dummy nmedit; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_NMEDIT+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_NMEDIT"; then ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_NMEDIT="nmedit" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT if test -n "$ac_ct_NMEDIT"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5 $as_echo "$ac_ct_NMEDIT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_NMEDIT" = x; then NMEDIT=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac NMEDIT=$ac_ct_NMEDIT fi else NMEDIT="$ac_cv_prog_NMEDIT" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args. set dummy ${ac_tool_prefix}lipo; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_LIPO+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$LIPO"; then ac_cv_prog_LIPO="$LIPO" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_LIPO="${ac_tool_prefix}lipo" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi LIPO=$ac_cv_prog_LIPO if test -n "$LIPO"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5 $as_echo "$LIPO" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_LIPO"; then ac_ct_LIPO=$LIPO # Extract the first word of "lipo", so it can be a program name with args. set dummy lipo; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_LIPO+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_LIPO"; then ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_LIPO="lipo" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO if test -n "$ac_ct_LIPO"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5 $as_echo "$ac_ct_LIPO" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_LIPO" = x; then LIPO=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac LIPO=$ac_ct_LIPO fi else LIPO="$ac_cv_prog_LIPO" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args. set dummy ${ac_tool_prefix}otool; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_OTOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$OTOOL"; then ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_OTOOL="${ac_tool_prefix}otool" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OTOOL=$ac_cv_prog_OTOOL if test -n "$OTOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5 $as_echo "$OTOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OTOOL"; then ac_ct_OTOOL=$OTOOL # Extract the first word of "otool", so it can be a program name with args. set dummy otool; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_OTOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OTOOL"; then ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_OTOOL="otool" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL if test -n "$ac_ct_OTOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5 $as_echo "$ac_ct_OTOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OTOOL" = x; then OTOOL=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OTOOL=$ac_ct_OTOOL fi else OTOOL="$ac_cv_prog_OTOOL" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args. set dummy ${ac_tool_prefix}otool64; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_OTOOL64+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$OTOOL64"; then ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OTOOL64=$ac_cv_prog_OTOOL64 if test -n "$OTOOL64"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5 $as_echo "$OTOOL64" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OTOOL64"; then ac_ct_OTOOL64=$OTOOL64 # Extract the first word of "otool64", so it can be a program name with args. set dummy otool64; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_OTOOL64+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OTOOL64"; then ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_OTOOL64="otool64" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64 if test -n "$ac_ct_OTOOL64"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5 $as_echo "$ac_ct_OTOOL64" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OTOOL64" = x; then OTOOL64=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OTOOL64=$ac_ct_OTOOL64 fi else OTOOL64="$ac_cv_prog_OTOOL64" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5 $as_echo_n "checking for -single_module linker flag... " >&6; } if ${lt_cv_apple_cc_single_mod+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_apple_cc_single_mod=no if test -z "$LT_MULTI_MODULE"; then # By default we will add the -single_module flag. You can override # by either setting the environment variable LT_MULTI_MODULE # non-empty at configure time, or by adding -multi_module to the # link flags. rm -rf libconftest.dylib* echo "int foo(void){return 1;}" > conftest.c echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c" >&5 $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c 2>conftest.err _lt_result=$? # If there is a non-empty error log, and "single_module" # appears in it, assume the flag caused a linker warning if test -s conftest.err && $GREP single_module conftest.err; then cat conftest.err >&5 # Otherwise, if the output was created with a 0 exit code from # the compiler, it worked. elif test -f libconftest.dylib && test 0 = "$_lt_result"; then lt_cv_apple_cc_single_mod=yes else cat conftest.err >&5 fi rm -rf libconftest.dylib* rm -f conftest.* fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5 $as_echo "$lt_cv_apple_cc_single_mod" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5 $as_echo_n "checking for -exported_symbols_list linker flag... " >&6; } if ${lt_cv_ld_exported_symbols_list+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_ld_exported_symbols_list=no save_LDFLAGS=$LDFLAGS echo "_main" > conftest.sym LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_cv_ld_exported_symbols_list=yes else lt_cv_ld_exported_symbols_list=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS=$save_LDFLAGS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5 $as_echo "$lt_cv_ld_exported_symbols_list" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5 $as_echo_n "checking for -force_load linker flag... " >&6; } if ${lt_cv_ld_force_load+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_ld_force_load=no cat > conftest.c << _LT_EOF int forced_loaded() { return 2;} _LT_EOF echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5 $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 echo "$AR cru libconftest.a conftest.o" >&5 $AR cru libconftest.a conftest.o 2>&5 echo "$RANLIB libconftest.a" >&5 $RANLIB libconftest.a 2>&5 cat > conftest.c << _LT_EOF int main() { return 0;} _LT_EOF echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5 $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err _lt_result=$? if test -s conftest.err && $GREP force_load conftest.err; then cat conftest.err >&5 elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then lt_cv_ld_force_load=yes else cat conftest.err >&5 fi rm -f conftest.err libconftest.a conftest conftest.c rm -rf conftest.dSYM fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5 $as_echo "$lt_cv_ld_force_load" >&6; } case $host_os in rhapsody* | darwin1.[012]) _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;; darwin1.*) _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; darwin*) # darwin 5.x on # if running on 10.5 or later, the deployment target defaults # to the OS version, if on x86, and 10.4, the deployment # target defaults to 10.4. Don't you love it? case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in 10.0,*86*-darwin8*|10.0,*-darwin[91]*) _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; 10.[012][,.]*) _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; 10.*) _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; esac ;; esac if test yes = "$lt_cv_apple_cc_single_mod"; then _lt_dar_single_mod='$single_module' fi if test yes = "$lt_cv_ld_exported_symbols_list"; then _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' else _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib' fi if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then _lt_dsymutil='~$DSYMUTIL $lib || :' else _lt_dsymutil= fi ;; esac # func_munge_path_list VARIABLE PATH # ----------------------------------- # VARIABLE is name of variable containing _space_ separated list of # directories to be munged by the contents of PATH, which is string # having a format: # "DIR[:DIR]:" # string "DIR[ DIR]" will be prepended to VARIABLE # ":DIR[:DIR]" # string "DIR[ DIR]" will be appended to VARIABLE # "DIRP[:DIRP]::[DIRA:]DIRA" # string "DIRP[ DIRP]" will be prepended to VARIABLE and string # "DIRA[ DIRA]" will be appended to VARIABLE # "DIR[:DIR]" # VARIABLE will be replaced by "DIR[ DIR]" func_munge_path_list () { case x$2 in x) ;; *:) eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" ;; x:*) eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" ;; *::*) eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" ;; *) eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" ;; esac } for ac_header in dlfcn.h do : ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default " if test "x$ac_cv_header_dlfcn_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_DLFCN_H 1 _ACEOF fi done func_stripname_cnf () { case $2 in .*) func_stripname_result=`$ECHO "$3" | $SED "s%^$1%%; s%\\\\$2\$%%"`;; *) func_stripname_result=`$ECHO "$3" | $SED "s%^$1%%; s%$2\$%%"`;; esac } # func_stripname_cnf # Set options enable_dlopen=no enable_win32_dll=no # Check whether --enable-shared was given. if test "${enable_shared+set}" = set; then : enableval=$enable_shared; p=${PACKAGE-default} case $enableval in yes) enable_shared=yes ;; no) enable_shared=no ;; *) enable_shared=no # Look at the argument we got. We use all the common list separators. lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, for pkg in $enableval; do IFS=$lt_save_ifs if test "X$pkg" = "X$p"; then enable_shared=yes fi done IFS=$lt_save_ifs ;; esac else enable_shared=yes fi # Check whether --with-pic was given. if test "${with_pic+set}" = set; then : withval=$with_pic; lt_p=${PACKAGE-default} case $withval in yes|no) pic_mode=$withval ;; *) pic_mode=default # Look at the argument we got. We use all the common list separators. lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, for lt_pkg in $withval; do IFS=$lt_save_ifs if test "X$lt_pkg" = "X$lt_p"; then pic_mode=yes fi done IFS=$lt_save_ifs ;; esac else pic_mode=default fi # Check whether --enable-fast-install was given. if test "${enable_fast_install+set}" = set; then : enableval=$enable_fast_install; p=${PACKAGE-default} case $enableval in yes) enable_fast_install=yes ;; no) enable_fast_install=no ;; *) enable_fast_install=no # Look at the argument we got. We use all the common list separators. lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, for pkg in $enableval; do IFS=$lt_save_ifs if test "X$pkg" = "X$p"; then enable_fast_install=yes fi done IFS=$lt_save_ifs ;; esac else enable_fast_install=yes fi shared_archive_member_spec= case $host,$enable_shared in power*-*-aix[5-9]*,yes) { $as_echo "$as_me:${as_lineno-$LINENO}: checking which variant of shared library versioning to provide" >&5 $as_echo_n "checking which variant of shared library versioning to provide... " >&6; } # Check whether --with-aix-soname was given. if test "${with_aix_soname+set}" = set; then : withval=$with_aix_soname; case $withval in aix|svr4|both) ;; *) as_fn_error $? "Unknown argument to --with-aix-soname" "$LINENO" 5 ;; esac lt_cv_with_aix_soname=$with_aix_soname else if ${lt_cv_with_aix_soname+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_with_aix_soname=aix fi with_aix_soname=$lt_cv_with_aix_soname fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_aix_soname" >&5 $as_echo "$with_aix_soname" >&6; } if test aix != "$with_aix_soname"; then # For the AIX way of multilib, we name the shared archive member # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o', # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File. # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag, # the AIX toolchain works better with OBJECT_MODE set (default 32). if test 64 = "${OBJECT_MODE-32}"; then shared_archive_member_spec=shr_64 else shared_archive_member_spec=shr fi fi ;; *) with_aix_soname=aix ;; esac # This can be used to rebuild libtool when needed LIBTOOL_DEPS=$ltmain # Always use our own libtool. LIBTOOL='$(SHELL) $(top_builddir)/libtool' test -z "$LN_S" && LN_S="ln -s" if test -n "${ZSH_VERSION+set}"; then setopt NO_GLOB_SUBST fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5 $as_echo_n "checking for objdir... " >&6; } if ${lt_cv_objdir+:} false; then : $as_echo_n "(cached) " >&6 else rm -f .libs 2>/dev/null mkdir .libs 2>/dev/null if test -d .libs; then lt_cv_objdir=.libs else # MS-DOS does not allow filenames that begin with a dot. lt_cv_objdir=_libs fi rmdir .libs 2>/dev/null fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5 $as_echo "$lt_cv_objdir" >&6; } objdir=$lt_cv_objdir cat >>confdefs.h <<_ACEOF #define LT_OBJDIR "$lt_cv_objdir/" _ACEOF case $host_os in aix3*) # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test set != "${COLLECT_NAMES+set}"; then COLLECT_NAMES= export COLLECT_NAMES fi ;; esac # Global variables: ofile=libtool can_build_shared=yes # All known linkers require a '.a' archive for static linking (except MSVC, # which needs '.lib'). libext=a with_gnu_ld=$lt_cv_prog_gnu_ld old_CC=$CC old_CFLAGS=$CFLAGS # Set sane defaults for various variables test -z "$CC" && CC=cc test -z "$LTCC" && LTCC=$CC test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS test -z "$LD" && LD=ld test -z "$ac_objext" && ac_objext=o func_cc_basename $compiler cc_basename=$func_cc_basename_result # Only perform the check for file, if the check method requires it test -z "$MAGIC_CMD" && MAGIC_CMD=file case $deplibs_check_method in file_magic*) if test "$file_magic_cmd" = '$MAGIC_CMD'; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5 $as_echo_n "checking for ${ac_tool_prefix}file... " >&6; } if ${lt_cv_path_MAGIC_CMD+:} false; then : $as_echo_n "(cached) " >&6 else case $MAGIC_CMD in [\\/*] | ?:[\\/]*) lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. ;; *) lt_save_MAGIC_CMD=$MAGIC_CMD lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" for ac_dir in $ac_dummy; do IFS=$lt_save_ifs test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/${ac_tool_prefix}file"; then lt_cv_path_MAGIC_CMD=$ac_dir/"${ac_tool_prefix}file" if test -n "$file_magic_test_file"; then case $deplibs_check_method in "file_magic "*) file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` MAGIC_CMD=$lt_cv_path_MAGIC_CMD if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | $EGREP "$file_magic_regex" > /dev/null; then : else cat <<_LT_EOF 1>&2 *** Warning: the command libtool uses to detect shared libraries, *** $file_magic_cmd, produces output that libtool cannot recognize. *** The result is that libtool may fail to recognize shared libraries *** as such. This will affect the creation of libtool libraries that *** depend on shared libraries, but programs linked with such libtool *** libraries will work regardless of this problem. Nevertheless, you *** may want to report the problem to your system manager and/or to *** bug-libtool@gnu.org _LT_EOF fi ;; esac fi break fi done IFS=$lt_save_ifs MAGIC_CMD=$lt_save_MAGIC_CMD ;; esac fi MAGIC_CMD=$lt_cv_path_MAGIC_CMD if test -n "$MAGIC_CMD"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 $as_echo "$MAGIC_CMD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test -z "$lt_cv_path_MAGIC_CMD"; then if test -n "$ac_tool_prefix"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for file" >&5 $as_echo_n "checking for file... " >&6; } if ${lt_cv_path_MAGIC_CMD+:} false; then : $as_echo_n "(cached) " >&6 else case $MAGIC_CMD in [\\/*] | ?:[\\/]*) lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. ;; *) lt_save_MAGIC_CMD=$MAGIC_CMD lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" for ac_dir in $ac_dummy; do IFS=$lt_save_ifs test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/file"; then lt_cv_path_MAGIC_CMD=$ac_dir/"file" if test -n "$file_magic_test_file"; then case $deplibs_check_method in "file_magic "*) file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` MAGIC_CMD=$lt_cv_path_MAGIC_CMD if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | $EGREP "$file_magic_regex" > /dev/null; then : else cat <<_LT_EOF 1>&2 *** Warning: the command libtool uses to detect shared libraries, *** $file_magic_cmd, produces output that libtool cannot recognize. *** The result is that libtool may fail to recognize shared libraries *** as such. This will affect the creation of libtool libraries that *** depend on shared libraries, but programs linked with such libtool *** libraries will work regardless of this problem. Nevertheless, you *** may want to report the problem to your system manager and/or to *** bug-libtool@gnu.org _LT_EOF fi ;; esac fi break fi done IFS=$lt_save_ifs MAGIC_CMD=$lt_save_MAGIC_CMD ;; esac fi MAGIC_CMD=$lt_cv_path_MAGIC_CMD if test -n "$MAGIC_CMD"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 $as_echo "$MAGIC_CMD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi else MAGIC_CMD=: fi fi fi ;; esac # Use C for the default configuration in the libtool script lt_save_CC=$CC ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu # Source file extension for C test sources. ac_ext=c # Object file extension for compiled C test sources. objext=o objext=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(){return(0);}' # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC # Save the default compiler, since it gets overwritten when the other # tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. compiler_DEFAULT=$CC # save warnings/boilerplate of simple test code ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" >conftest.$ac_ext eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_compiler_boilerplate=`cat conftest.err` $RM conftest* ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` $RM -r conftest* ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... if test -n "$compiler"; then lt_prog_compiler_no_builtin_flag= if test yes = "$GCC"; then case $cc_basename in nvcc*) lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;; *) lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 $as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } if ${lt_cv_prog_compiler_rtti_exceptions+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_rtti_exceptions=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-fno-rtti -fno-exceptions" ## exclude from sc_useless_quotes_in_assignment # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_rtti_exceptions=yes fi fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 $as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; } if test yes = "$lt_cv_prog_compiler_rtti_exceptions"; then lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" else : fi fi lt_prog_compiler_wl= lt_prog_compiler_pic= lt_prog_compiler_static= if test yes = "$GCC"; then lt_prog_compiler_wl='-Wl,' lt_prog_compiler_static='-static' case $host_os in aix*) # All AIX code is PIC. if test ia64 = "$host_cpu"; then # AIX 5 now supports IA64 processor lt_prog_compiler_static='-Bstatic' fi lt_prog_compiler_pic='-fPIC' ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support lt_prog_compiler_pic='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the '-m68020' flag to GCC prevents building anything better, # like '-m68040'. lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries lt_prog_compiler_pic='-DDLL_EXPORT' case $host_os in os2*) lt_prog_compiler_static='$wl-static' ;; esac ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files lt_prog_compiler_pic='-fno-common' ;; haiku*) # PIC is the default for Haiku. # The "-static" flag exists, but is broken. lt_prog_compiler_static= ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) # +Z the default ;; *) lt_prog_compiler_pic='-fPIC' ;; esac ;; interix[3-9]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; msdosdjgpp*) # Just because we use GCC doesn't mean we suddenly get shared libraries # on systems that don't support them. lt_prog_compiler_can_build_shared=no enable_shared=no ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic='-fPIC -shared' ;; sysv4*MP*) if test -d /usr/nec; then lt_prog_compiler_pic=-Kconform_pic fi ;; *) lt_prog_compiler_pic='-fPIC' ;; esac case $cc_basename in nvcc*) # Cuda Compiler Driver 2.2 lt_prog_compiler_wl='-Xlinker ' if test -n "$lt_prog_compiler_pic"; then lt_prog_compiler_pic="-Xcompiler $lt_prog_compiler_pic" fi ;; esac else # PORTME Check for flag to pass linker flags through the system compiler. case $host_os in aix*) lt_prog_compiler_wl='-Wl,' if test ia64 = "$host_cpu"; then # AIX 5 now supports IA64 processor lt_prog_compiler_static='-Bstatic' else lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' fi ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files lt_prog_compiler_pic='-fno-common' case $cc_basename in nagfor*) # NAG Fortran compiler lt_prog_compiler_wl='-Wl,-Wl,,' lt_prog_compiler_pic='-PIC' lt_prog_compiler_static='-Bstatic' ;; esac ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). lt_prog_compiler_pic='-DDLL_EXPORT' case $host_os in os2*) lt_prog_compiler_static='$wl-static' ;; esac ;; hpux9* | hpux10* | hpux11*) lt_prog_compiler_wl='-Wl,' # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but # not for PA HP-UX. case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) lt_prog_compiler_pic='+Z' ;; esac # Is there a better lt_prog_compiler_static that works with the bundled CC? lt_prog_compiler_static='$wl-a ${wl}archive' ;; irix5* | irix6* | nonstopux*) lt_prog_compiler_wl='-Wl,' # PIC (with -KPIC) is the default. lt_prog_compiler_static='-non_shared' ;; linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) case $cc_basename in # old Intel for x86_64, which still supported -KPIC. ecc*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-static' ;; # icc used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. icc* | ifort*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fPIC' lt_prog_compiler_static='-static' ;; # Lahey Fortran 8.1. lf95*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='--shared' lt_prog_compiler_static='--static' ;; nagfor*) # NAG Fortran compiler lt_prog_compiler_wl='-Wl,-Wl,,' lt_prog_compiler_pic='-PIC' lt_prog_compiler_static='-Bstatic' ;; tcc*) # Fabrice Bellard et al's Tiny C Compiler lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fPIC' lt_prog_compiler_static='-static' ;; pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) # Portland Group compilers (*not* the Pentium gcc compiler, # which looks to be a dead project) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fpic' lt_prog_compiler_static='-Bstatic' ;; ccc*) lt_prog_compiler_wl='-Wl,' # All Alpha code is PIC. lt_prog_compiler_static='-non_shared' ;; xl* | bgxl* | bgf* | mpixl*) # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-qpic' lt_prog_compiler_static='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*) # Sun Fortran 8.3 passes all unrecognized flags to the linker lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' lt_prog_compiler_wl='' ;; *Sun\ F* | *Sun*Fortran*) lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' lt_prog_compiler_wl='-Qoption ld ' ;; *Sun\ C*) # Sun C 5.9 lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' lt_prog_compiler_wl='-Wl,' ;; *Intel*\ [CF]*Compiler*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fPIC' lt_prog_compiler_static='-static' ;; *Portland\ Group*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fpic' lt_prog_compiler_static='-Bstatic' ;; esac ;; esac ;; newsos6) lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic='-fPIC -shared' ;; osf3* | osf4* | osf5*) lt_prog_compiler_wl='-Wl,' # All OSF/1 code is PIC. lt_prog_compiler_static='-non_shared' ;; rdos*) lt_prog_compiler_static='-non_shared' ;; solaris*) lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' case $cc_basename in f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) lt_prog_compiler_wl='-Qoption ld ';; *) lt_prog_compiler_wl='-Wl,';; esac ;; sunos4*) lt_prog_compiler_wl='-Qoption ld ' lt_prog_compiler_pic='-PIC' lt_prog_compiler_static='-Bstatic' ;; sysv4 | sysv4.2uw2* | sysv4.3*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' ;; sysv4*MP*) if test -d /usr/nec; then lt_prog_compiler_pic='-Kconform_pic' lt_prog_compiler_static='-Bstatic' fi ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' ;; unicos*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_can_build_shared=no ;; uts4*) lt_prog_compiler_pic='-pic' lt_prog_compiler_static='-Bstatic' ;; *) lt_prog_compiler_can_build_shared=no ;; esac fi case $host_os in # For platforms that do not support PIC, -DPIC is meaningless: *djgpp*) lt_prog_compiler_pic= ;; *) lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 $as_echo_n "checking for $compiler option to produce PIC... " >&6; } if ${lt_cv_prog_compiler_pic+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic=$lt_prog_compiler_pic fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 $as_echo "$lt_cv_prog_compiler_pic" >&6; } lt_prog_compiler_pic=$lt_cv_prog_compiler_pic # # Check to make sure the PIC flag actually works. # if test -n "$lt_prog_compiler_pic"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 $as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } if ${lt_cv_prog_compiler_pic_works+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic_works=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$lt_prog_compiler_pic -DPIC" ## exclude from sc_useless_quotes_in_assignment # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_pic_works=yes fi fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5 $as_echo "$lt_cv_prog_compiler_pic_works" >&6; } if test yes = "$lt_cv_prog_compiler_pic_works"; then case $lt_prog_compiler_pic in "" | " "*) ;; *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; esac else lt_prog_compiler_pic= lt_prog_compiler_can_build_shared=no fi fi # # Check to make sure the static flag actually works. # wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 $as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } if ${lt_cv_prog_compiler_static_works+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_static_works=no save_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $lt_tmp_static_flag" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&5 $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_static_works=yes fi else lt_cv_prog_compiler_static_works=yes fi fi $RM -r conftest* LDFLAGS=$save_LDFLAGS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5 $as_echo "$lt_cv_prog_compiler_static_works" >&6; } if test yes = "$lt_cv_prog_compiler_static_works"; then : else lt_prog_compiler_static= fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if ${lt_cv_prog_compiler_c_o+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 $as_echo "$lt_cv_prog_compiler_c_o" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if ${lt_cv_prog_compiler_c_o+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 $as_echo "$lt_cv_prog_compiler_c_o" >&6; } hard_links=nottested if test no = "$lt_cv_prog_compiler_c_o" && test no != "$need_locks"; then # do not overwrite the value of need_locks provided by the user { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 $as_echo_n "checking if we can lock with hard links... " >&6; } hard_links=yes $RM conftest* ln conftest.a conftest.b 2>/dev/null && hard_links=no touch conftest.a ln conftest.a conftest.b 2>&5 || hard_links=no ln conftest.a conftest.b 2>/dev/null && hard_links=no { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 $as_echo "$hard_links" >&6; } if test no = "$hard_links"; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5 $as_echo "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;} need_locks=warn fi else need_locks=no fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 $as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } runpath_var= allow_undefined_flag= always_export_symbols=no archive_cmds= archive_expsym_cmds= compiler_needs_object=no enable_shared_with_static_runtimes=no export_dynamic_flag_spec= export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' hardcode_automatic=no hardcode_direct=no hardcode_direct_absolute=no hardcode_libdir_flag_spec= hardcode_libdir_separator= hardcode_minus_L=no hardcode_shlibpath_var=unsupported inherit_rpath=no link_all_deplibs=unknown module_cmds= module_expsym_cmds= old_archive_from_new_cmds= old_archive_from_expsyms_cmds= thread_safe_flag_spec= whole_archive_flag_spec= # include_expsyms should be a list of space-separated symbols to be *always* # included in the symbol list include_expsyms= # exclude_expsyms can be an extended regexp of symbols to exclude # it will be wrapped by ' (' and ')$', so one must not match beginning or # end of line. Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc', # as well as any symbol that contains 'd'. exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out # platforms (ab)use it in PIC code, but their linkers get confused if # the symbol is explicitly referenced. Since portable code cannot # rely on this symbol name, it's probably fine to never include it in # preloaded symbol tables. # Exclude shared library initialization/finalization symbols. extract_expsyms_cmds= case $host_os in cygwin* | mingw* | pw32* | cegcc*) # FIXME: the MSVC++ port hasn't been tested in a loooong time # When not using gcc, we currently assume that we are using # Microsoft Visual C++. if test yes != "$GCC"; then with_gnu_ld=no fi ;; interix*) # we just hope/assume this is gcc and not c89 (= MSVC++) with_gnu_ld=yes ;; openbsd* | bitrig*) with_gnu_ld=no ;; esac ld_shlibs=yes # On some targets, GNU ld is compatible enough with the native linker # that we're better off using the native interface for both. lt_use_gnu_ld_interface=no if test yes = "$with_gnu_ld"; then case $host_os in aix*) # The AIX port of GNU ld has always aspired to compatibility # with the native linker. However, as the warning in the GNU ld # block says, versions before 2.19.5* couldn't really create working # shared libraries, regardless of the interface used. case `$LD -v 2>&1` in *\ \(GNU\ Binutils\)\ 2.19.5*) ;; *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;; *\ \(GNU\ Binutils\)\ [3-9]*) ;; *) lt_use_gnu_ld_interface=yes ;; esac ;; *) lt_use_gnu_ld_interface=yes ;; esac fi if test yes = "$lt_use_gnu_ld_interface"; then # If archive_cmds runs LD, not CC, wlarc should be empty wlarc='$wl' # Set some defaults for GNU ld with shared library support. These # are reset later if shared libraries are not supported. Putting them # here allows them to be overridden if necessary. runpath_var=LD_RUN_PATH hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' export_dynamic_flag_spec='$wl--export-dynamic' # ancient GNU ld didn't support --whole-archive et. al. if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then whole_archive_flag_spec=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' else whole_archive_flag_spec= fi supports_anon_versioning=no case `$LD -v | $SED -e 's/(^)\+)\s\+//' 2>&1` in *GNU\ gold*) supports_anon_versioning=yes ;; *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... *\ 2.11.*) ;; # other 2.11 versions *) supports_anon_versioning=yes ;; esac # See if GNU ld supports shared libraries. case $host_os in aix[3-9]*) # On AIX/PPC, the GNU linker is very broken if test ia64 != "$host_cpu"; then ld_shlibs=no cat <<_LT_EOF 1>&2 *** Warning: the GNU linker, at least up to release 2.19, is reported *** to be unable to reliably create shared libraries on AIX. *** Therefore, libtool is disabling shared libraries support. If you *** really care for shared libraries, you may want to install binutils *** 2.20 or above, or modify your PATH so that a non-GNU linker is found. *** You will then need to restart the configuration process. _LT_EOF fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds='' ;; m68k) archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes ;; esac ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then allow_undefined_flag=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' else ld_shlibs=no fi ;; cygwin* | mingw* | pw32* | cegcc*) # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, # as there is no search path for DLLs. hardcode_libdir_flag_spec='-L$libdir' export_dynamic_flag_spec='$wl--export-all-symbols' allow_undefined_flag=unsupported always_export_symbols=no enable_shared_with_static_runtimes=yes export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file, use it as # is; otherwise, prepend EXPORTS... archive_expsym_cmds='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else ld_shlibs=no fi ;; haiku*) archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' link_all_deplibs=yes ;; os2*) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes allow_undefined_flag=unsupported shrext_cmds=.dll archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ prefix_cmds="$SED"~ if test EXPORTS = "`$SED 1q $export_symbols`"; then prefix_cmds="$prefix_cmds -e 1d"; fi~ prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' enable_shared_with_static_runtimes=yes ;; interix[3-9]*) hardcode_direct=no hardcode_shlibpath_var=no hardcode_libdir_flag_spec='$wl-rpath,$libdir' export_dynamic_flag_spec='$wl-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' archive_expsym_cmds='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) tmp_diet=no if test linux-dietlibc = "$host_os"; then case $cc_basename in diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) esac fi if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ && test no = "$tmp_diet" then tmp_addflag=' $pic_flag' tmp_sharedflag='-shared' case $cc_basename,$host_cpu in pgcc*) # Portland Group C compiler whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' tmp_addflag=' $pic_flag' ;; pgf77* | pgf90* | pgf95* | pgfortran*) # Portland Group f77 and f90 compilers whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' tmp_addflag=' $pic_flag -Mnomain' ;; ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 tmp_addflag=' -i_dynamic' ;; efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 tmp_addflag=' -i_dynamic -nofor_main' ;; ifc* | ifort*) # Intel Fortran compiler tmp_addflag=' -nofor_main' ;; lf95*) # Lahey Fortran 8.1 whole_archive_flag_spec= tmp_sharedflag='--shared' ;; nagfor*) # NAGFOR 5.3 tmp_sharedflag='-Wl,-shared' ;; xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below) tmp_sharedflag='-qmkshrobj' tmp_addflag= ;; nvcc*) # Cuda Compiler Driver 2.2 whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' compiler_needs_object=yes ;; esac case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C 5.9 whole_archive_flag_spec='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' compiler_needs_object=yes tmp_sharedflag='-G' ;; *Sun\ F*) # Sun Fortran 8.3 tmp_sharedflag='-G' ;; esac archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' if test yes = "$supports_anon_versioning"; then archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' fi case $cc_basename in tcc*) export_dynamic_flag_spec='-rdynamic' ;; xlf* | bgf* | bgxlf* | mpixlf*) # IBM XL Fortran 10.1 on PPC cannot create shared libs itself whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' if test yes = "$supports_anon_versioning"; then archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' fi ;; esac else ld_shlibs=no fi ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' wlarc= else archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' fi ;; solaris*) if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then ld_shlibs=no cat <<_LT_EOF 1>&2 *** Warning: The releases 2.8.* of the GNU linker cannot reliably *** create shared libraries on Solaris systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.9.1 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) case `$LD -v 2>&1` in *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) ld_shlibs=no cat <<_LT_EOF 1>&2 *** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot *** reliably create shared libraries on SCO systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.16.91.0.3 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF ;; *) # For security reasons, it is highly recommended that you always # use absolute paths for naming shared libraries, and exclude the # DT_RUNPATH tag from executables and libraries. But doing so # requires that you compile everything twice, which is a pain. if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; esac ;; sunos4*) archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' wlarc= hardcode_direct=yes hardcode_shlibpath_var=no ;; *) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; esac if test no = "$ld_shlibs"; then runpath_var= hardcode_libdir_flag_spec= export_dynamic_flag_spec= whole_archive_flag_spec= fi else # PORTME fill in a description of your system's linker (not GNU ld) case $host_os in aix3*) allow_undefined_flag=unsupported always_export_symbols=yes archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' # Note: this linker hardcodes the directories in LIBPATH if there # are no directories specified by -L. hardcode_minus_L=yes if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then # Neither direct hardcoding nor static linking is supported with a # broken collect2. hardcode_direct=unsupported fi ;; aix[4-9]*) if test ia64 = "$host_cpu"; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag= else # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to GNU nm, but means don't demangle to AIX nm. # Without the "-l" option, or with the "-B" option, AIX nm treats # weak defined symbols like other global defined symbols, whereas # GNU nm marks them as "W". # While the 'weak' keyword is ignored in the Export File, we need # it in the Import File for the 'aix-soname' feature, so we have # to replace the "-B" option with "-P" for AIX nm. if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' else export_symbols_cmds='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' fi aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # have runtime linking enabled, and use it for executables. # For shared libraries, we enable/disable runtime linking # depending on the kind of the shared library created - # when "with_aix_soname,aix_use_runtimelinking" is: # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables # "aix,yes" lib.so shared, rtl:yes, for executables # lib.a static archive # "both,no" lib.so.V(shr.o) shared, rtl:yes # lib.a(lib.so.V) shared, rtl:no, for executables # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables # lib.a(lib.so.V) shared, rtl:no # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables # lib.a static archive case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) for ld_flag in $LDFLAGS; do if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then aix_use_runtimelinking=yes break fi done if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then # With aix-soname=svr4, we create the lib.so.V shared archives only, # so we don't have lib.a shared libs to link our executables. # We have to force runtime linking in this case. aix_use_runtimelinking=yes LDFLAGS="$LDFLAGS -Wl,-brtl" fi ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. archive_cmds='' hardcode_direct=yes hardcode_direct_absolute=yes hardcode_libdir_separator=':' link_all_deplibs=yes file_list_spec='$wl-f,' case $with_aix_soname,$aix_use_runtimelinking in aix,*) ;; # traditional, no import file svr4,* | *,yes) # use import file # The Import File defines what to hardcode. hardcode_direct=no hardcode_direct_absolute=no ;; esac if test yes = "$GCC"; then case $host_os in aix4.[012]|aix4.[012].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`$CC -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 hardcode_direct=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking hardcode_minus_L=yes hardcode_libdir_flag_spec='-L$libdir' hardcode_libdir_separator= fi ;; esac shared_flag='-shared' if test yes = "$aix_use_runtimelinking"; then shared_flag="$shared_flag "'$wl-G' fi # Need to ensure runtime linking is disabled for the traditional # shared library, or the linker may eventually find shared libraries # /with/ Import File - we do not want to mix them. shared_flag_aix='-shared' shared_flag_svr4='-shared $wl-G' else # not using gcc if test ia64 = "$host_cpu"; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test yes = "$aix_use_runtimelinking"; then shared_flag='$wl-G' else shared_flag='$wl-bM:SRE' fi shared_flag_aix='$wl-bM:SRE' shared_flag_svr4='$wl-G' fi fi export_dynamic_flag_spec='$wl-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to export. always_export_symbols=yes if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. allow_undefined_flag='-berok' # Determine the default libpath from the value encoded in an # empty executable. if test set = "${lt_cv_aix_libpath+set}"; then aix_libpath=$lt_cv_aix_libpath else if ${lt_cv_aix_libpath_+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }' lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$lt_cv_aix_libpath_"; then lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test -z "$lt_cv_aix_libpath_"; then lt_cv_aix_libpath_=/usr/lib:/lib fi fi aix_libpath=$lt_cv_aix_libpath_ fi hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath" archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag else if test ia64 = "$host_cpu"; then hardcode_libdir_flag_spec='$wl-R $libdir:/usr/lib:/lib' allow_undefined_flag="-z nodefs" archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. if test set = "${lt_cv_aix_libpath+set}"; then aix_libpath=$lt_cv_aix_libpath else if ${lt_cv_aix_libpath_+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }' lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$lt_cv_aix_libpath_"; then lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test -z "$lt_cv_aix_libpath_"; then lt_cv_aix_libpath_=/usr/lib:/lib fi fi aix_libpath=$lt_cv_aix_libpath_ fi hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. no_undefined_flag=' $wl-bernotok' allow_undefined_flag=' $wl-berok' if test yes = "$with_gnu_ld"; then # We only use this code for GNU lds that support --whole-archive. whole_archive_flag_spec='$wl--whole-archive$convenience $wl--no-whole-archive' else # Exported symbols can be pulled into shared objects from archives whole_archive_flag_spec='$convenience' fi archive_cmds_need_lc=yes archive_expsym_cmds='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' # -brtl affects multiple linker settings, -berok does not and is overridden later compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`' if test svr4 != "$with_aix_soname"; then # This is similar to how AIX traditionally builds its shared libraries. archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' fi if test aix != "$with_aix_soname"; then archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' else # used by -dlpreopen to get the symbols archive_expsym_cmds="$archive_expsym_cmds"'~$MV $output_objdir/$realname.d/$soname $output_objdir' fi archive_expsym_cmds="$archive_expsym_cmds"'~$RM -r $output_objdir/$realname.d' fi fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds='' ;; m68k) archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes ;; esac ;; bsdi[45]*) export_dynamic_flag_spec=-rdynamic ;; cygwin* | mingw* | pw32* | cegcc*) # When not using gcc, we currently assume that we are using # Microsoft Visual C++. # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. case $cc_basename in cl*) # Native MSVC hardcode_libdir_flag_spec=' ' allow_undefined_flag=unsupported always_export_symbols=yes file_list_spec='@' # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=.dll # FIXME: Setting linknames here is a bad hack. archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' archive_expsym_cmds='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then cp "$export_symbols" "$output_objdir/$soname.def"; echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; else $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; fi~ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ linknames=' # The linker will not automatically build a static lib if we build a DLL. # _LT_TAGVAR(old_archive_from_new_cmds, )='true' enable_shared_with_static_runtimes=yes exclude_expsyms='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' # Don't use ranlib old_postinstall_cmds='chmod 644 $oldlib' postlink_cmds='lt_outputfile="@OUTPUT@"~ lt_tool_outputfile="@TOOL_OUTPUT@"~ case $lt_outputfile in *.exe|*.EXE) ;; *) lt_outputfile=$lt_outputfile.exe lt_tool_outputfile=$lt_tool_outputfile.exe ;; esac~ if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; $RM "$lt_outputfile.manifest"; fi' ;; *) # Assume MSVC wrapper hardcode_libdir_flag_spec=' ' allow_undefined_flag=unsupported # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=.dll # FIXME: Setting linknames here is a bad hack. archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' # The linker will automatically build a .lib file if we build a DLL. old_archive_from_new_cmds='true' # FIXME: Should let the user specify the lib program. old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' enable_shared_with_static_runtimes=yes ;; esac ;; darwin* | rhapsody*) archive_cmds_need_lc=no hardcode_direct=no hardcode_automatic=yes hardcode_shlibpath_var=unsupported if test yes = "$lt_cv_ld_force_load"; then whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' else whole_archive_flag_spec='' fi link_all_deplibs=yes allow_undefined_flag=$_lt_dar_allow_undefined case $cc_basename in ifort*|nagfor*) _lt_dar_can_shared=yes ;; *) _lt_dar_can_shared=$GCC ;; esac if test yes = "$_lt_dar_can_shared"; then output_verbose_link_cmd=func_echo_all archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" archive_expsym_cmds="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" module_expsym_cmds="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" else ld_shlibs=no fi ;; dgux*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-L$libdir' hardcode_shlibpath_var=no ;; # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor # support. Future versions do this automatically, but an explicit c++rt0.o # does not break anything, and helps significantly (at the cost of a little # extra space). freebsd2.2*) archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; # Unfortunately, older versions of FreeBSD 2 do not have this feature. freebsd2.*) archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes hardcode_minus_L=yes hardcode_shlibpath_var=no ;; # FreeBSD 3 and greater uses gcc -shared to do shared libraries. freebsd* | dragonfly*) archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; hpux9*) if test yes = "$GCC"; then archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' else archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' fi hardcode_libdir_flag_spec='$wl+b $wl$libdir' hardcode_libdir_separator=: hardcode_direct=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes export_dynamic_flag_spec='$wl-E' ;; hpux10*) if test yes,no = "$GCC,$with_gnu_ld"; then archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' fi if test no = "$with_gnu_ld"; then hardcode_libdir_flag_spec='$wl+b $wl$libdir' hardcode_libdir_separator=: hardcode_direct=yes hardcode_direct_absolute=yes export_dynamic_flag_spec='$wl-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes fi ;; hpux11*) if test yes,no = "$GCC,$with_gnu_ld"; then case $host_cpu in hppa*64*) archive_cmds='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ;; esac else case $host_cpu in hppa*64*) archive_cmds='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) archive_cmds='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) # Older versions of the 11.00 compiler do not understand -b yet # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5 $as_echo_n "checking if $CC understands -b... " >&6; } if ${lt_cv_prog_compiler__b+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler__b=no save_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS -b" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&5 $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler__b=yes fi else lt_cv_prog_compiler__b=yes fi fi $RM -r conftest* LDFLAGS=$save_LDFLAGS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5 $as_echo "$lt_cv_prog_compiler__b" >&6; } if test yes = "$lt_cv_prog_compiler__b"; then archive_cmds='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' fi ;; esac fi if test no = "$with_gnu_ld"; then hardcode_libdir_flag_spec='$wl+b $wl$libdir' hardcode_libdir_separator=: case $host_cpu in hppa*64*|ia64*) hardcode_direct=no hardcode_shlibpath_var=no ;; *) hardcode_direct=yes hardcode_direct_absolute=yes export_dynamic_flag_spec='$wl-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes ;; esac fi ;; irix5* | irix6* | nonstopux*) if test yes = "$GCC"; then archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' # Try to use the -exported_symbol ld option, if it does not # work, assume that -exports_file does not work either and # implicitly export all symbols. # This should be the same for all languages, so no per-tag cache variable. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 $as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } if ${lt_cv_irix_exported_symbol+:} false; then : $as_echo_n "(cached) " >&6 else save_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int foo (void) { return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_cv_irix_exported_symbol=yes else lt_cv_irix_exported_symbol=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS=$save_LDFLAGS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 $as_echo "$lt_cv_irix_exported_symbol" >&6; } if test yes = "$lt_cv_irix_exported_symbol"; then archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib' fi else archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib' fi archive_cmds_need_lc='no' hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' hardcode_libdir_separator=: inherit_rpath=yes link_all_deplibs=yes ;; linux*) case $cc_basename in tcc*) # Fabrice Bellard et al's Tiny C Compiler ld_shlibs=yes archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out else archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF fi hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; newsos6) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' hardcode_libdir_separator=: hardcode_shlibpath_var=no ;; *nto* | *qnx*) ;; openbsd* | bitrig*) if test -f /usr/libexec/ld.so; then hardcode_direct=yes hardcode_shlibpath_var=no hardcode_direct_absolute=yes if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols' hardcode_libdir_flag_spec='$wl-rpath,$libdir' export_dynamic_flag_spec='$wl-E' else archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' hardcode_libdir_flag_spec='$wl-rpath,$libdir' fi else ld_shlibs=no fi ;; os2*) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes allow_undefined_flag=unsupported shrext_cmds=.dll archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ prefix_cmds="$SED"~ if test EXPORTS = "`$SED 1q $export_symbols`"; then prefix_cmds="$prefix_cmds -e 1d"; fi~ prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' enable_shared_with_static_runtimes=yes ;; osf3*) if test yes = "$GCC"; then allow_undefined_flag=' $wl-expect_unresolved $wl\*' archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' else allow_undefined_flag=' -expect_unresolved \*' archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' fi archive_cmds_need_lc='no' hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' hardcode_libdir_separator=: ;; osf4* | osf5*) # as osf3* with the addition of -msym flag if test yes = "$GCC"; then allow_undefined_flag=' $wl-expect_unresolved $wl\*' archive_cmds='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' else allow_undefined_flag=' -expect_unresolved \*' archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp' # Both c and cxx compiler support -rpath directly hardcode_libdir_flag_spec='-rpath $libdir' fi archive_cmds_need_lc='no' hardcode_libdir_separator=: ;; solaris*) no_undefined_flag=' -z defs' if test yes = "$GCC"; then wlarc='$wl' archive_cmds='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' else case `$CC -V 2>&1` in *"Compilers 5.0"*) wlarc='' archive_cmds='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags' archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' ;; *) wlarc='$wl' archive_cmds='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ;; esac fi hardcode_libdir_flag_spec='-R$libdir' hardcode_shlibpath_var=no case $host_os in solaris2.[0-5] | solaris2.[0-5].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands '-z linker_flag'. GCC discards it without '$wl', # but is careful enough not to reorder. # Supported since Solaris 2.6 (maybe 2.5.1?) if test yes = "$GCC"; then whole_archive_flag_spec='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' else whole_archive_flag_spec='-z allextract$convenience -z defaultextract' fi ;; esac link_all_deplibs=yes ;; sunos4*) if test sequent = "$host_vendor"; then # Use $CC to link under sequent, because it throws in some extra .o # files that make .init and .fini sections work. archive_cmds='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' fi hardcode_libdir_flag_spec='-L$libdir' hardcode_direct=yes hardcode_minus_L=yes hardcode_shlibpath_var=no ;; sysv4) case $host_vendor in sni) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes # is this really true??? ;; siemens) ## LD is ld it makes a PLAMLIB ## CC just makes a GrossModule. archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' reload_cmds='$CC -r -o $output$reload_objs' hardcode_direct=no ;; motorola) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=no #Motorola manual says yes, but my tests say they lie ;; esac runpath_var='LD_RUN_PATH' hardcode_shlibpath_var=no ;; sysv4.3*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_shlibpath_var=no export_dynamic_flag_spec='-Bexport' ;; sysv4*MP*) if test -d /usr/nec; then archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_shlibpath_var=no runpath_var=LD_RUN_PATH hardcode_runpath_var=yes ld_shlibs=yes fi ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) no_undefined_flag='$wl-z,text' archive_cmds_need_lc=no hardcode_shlibpath_var=no runpath_var='LD_RUN_PATH' if test yes = "$GCC"; then archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We CANNOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. no_undefined_flag='$wl-z,text' allow_undefined_flag='$wl-z,nodefs' archive_cmds_need_lc=no hardcode_shlibpath_var=no hardcode_libdir_flag_spec='$wl-R,$libdir' hardcode_libdir_separator=':' link_all_deplibs=yes export_dynamic_flag_spec='$wl-Bexport' runpath_var='LD_RUN_PATH' if test yes = "$GCC"; then archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; uts4*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-L$libdir' hardcode_shlibpath_var=no ;; *) ld_shlibs=no ;; esac if test sni = "$host_vendor"; then case $host in sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) export_dynamic_flag_spec='$wl-Blargedynsym' ;; esac fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5 $as_echo "$ld_shlibs" >&6; } test no = "$ld_shlibs" && can_build_shared=no with_gnu_ld=$with_gnu_ld # # Do we need to explicitly link libc? # case "x$archive_cmds_need_lc" in x|xyes) # Assume -lc should be added archive_cmds_need_lc=yes if test yes,yes = "$GCC,$enable_shared"; then case $archive_cmds in *'~'*) # FIXME: we may have to deal with multi-command sequences. ;; '$CC '*) # Test whether the compiler implicitly links with -lc since on some # systems, -lgcc has to come before -lc. If gcc already passes -lc # to ld, don't add -lc before -lgcc. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 $as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } if ${lt_cv_archive_cmds_need_lc+:} false; then : $as_echo_n "(cached) " >&6 else $RM conftest* echo "$lt_simple_compile_test_code" > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } 2>conftest.err; then soname=conftest lib=conftest libobjs=conftest.$ac_objext deplibs= wl=$lt_prog_compiler_wl pic_flag=$lt_prog_compiler_pic compiler_flags=-v linker_flags=-v verstring= output_objdir=. libname=conftest lt_save_allow_undefined_flag=$allow_undefined_flag allow_undefined_flag= if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } then lt_cv_archive_cmds_need_lc=no else lt_cv_archive_cmds_need_lc=yes fi allow_undefined_flag=$lt_save_allow_undefined_flag else cat conftest.err 1>&5 fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5 $as_echo "$lt_cv_archive_cmds_need_lc" >&6; } archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc ;; esac fi ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 $as_echo_n "checking dynamic linker characteristics... " >&6; } if test yes = "$GCC"; then case $host_os in darwin*) lt_awk_arg='/^libraries:/,/LR/' ;; *) lt_awk_arg='/^libraries:/' ;; esac case $host_os in mingw* | cegcc*) lt_sed_strip_eq='s|=\([A-Za-z]:\)|\1|g' ;; *) lt_sed_strip_eq='s|=/|/|g' ;; esac lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` case $lt_search_path_spec in *\;*) # if the path contains ";" then we assume it to be the separator # otherwise default to the standard path separator (i.e. ":") - it is # assumed that no part of a normal pathname contains ";" but that should # okay in the real world where ";" in dirpaths is itself problematic. lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` ;; *) lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` ;; esac # Ok, now we have the path, separated by spaces, we can step through it # and add multilib dir if necessary... lt_tmp_lt_search_path_spec= lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` # ...but if some path component already ends with the multilib dir we assume # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer). case "$lt_multi_os_dir; $lt_search_path_spec " in "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*) lt_multi_os_dir= ;; esac for lt_sys_path in $lt_search_path_spec; do if test -d "$lt_sys_path$lt_multi_os_dir"; then lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir" elif test -n "$lt_multi_os_dir"; then test -d "$lt_sys_path" && \ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" fi done lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' BEGIN {RS = " "; FS = "/|\n";} { lt_foo = ""; lt_count = 0; for (lt_i = NF; lt_i > 0; lt_i--) { if ($lt_i != "" && $lt_i != ".") { if ($lt_i == "..") { lt_count++; } else { if (lt_count == 0) { lt_foo = "/" $lt_i lt_foo; } else { lt_count--; } } } } if (lt_foo != "") { lt_freq[lt_foo]++; } if (lt_freq[lt_foo] == 1) { print lt_foo; } }'` # AWK program above erroneously prepends '/' to C:/dos/paths # for these hosts. case $host_os in mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ $SED 's|/\([A-Za-z]:\)|\1|g'` ;; esac sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` else sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" fi library_names_spec= libname_spec='lib$name' soname_spec= shrext_cmds=.so postinstall_cmds= postuninstall_cmds= finish_cmds= finish_eval= shlibpath_var= shlibpath_overrides_runpath=unknown version_type=none dynamic_linker="$host_os ld.so" sys_lib_dlsearch_path_spec="/lib /usr/lib" need_lib_prefix=unknown hardcode_into_libs=no # when you set need_version to no, make sure it does not cause -set_version # flags to be left without arguments need_version=unknown case $host_os in aix3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname.a' shlibpath_var=LIBPATH # AIX 3 has no versioning support, so we append a major version to the name. soname_spec='$libname$release$shared_ext$major' ;; aix[4-9]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no hardcode_into_libs=yes if test ia64 = "$host_cpu"; then # AIX 5 supports IA64 library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH else # With GCC up to 2.95.x, collect2 would create an import file # for dependence libraries. The import file would start with # the line '#! .'. This would cause the generated library to # depend on '.', always an invalid library. This was fixed in # development snapshots of GCC prior to 3.0. case $host_os in aix4 | aix4.[01] | aix4.[01].*) if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' echo ' yes ' echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then : else can_build_shared=no fi ;; esac # Using Import Files as archive members, it is possible to support # filename-based versioning of shared library archives on AIX. While # this would work for both with and without runtime linking, it will # prevent static linking of such archives. So we do filename-based # shared library versioning with .so extension only, which is used # when both runtime linking and shared linking is enabled. # Unfortunately, runtime linking may impact performance, so we do # not want this to be the default eventually. Also, we use the # versioned .so libs for executables only if there is the -brtl # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. # To allow for filename-based versioning support, we need to create # libNAME.so.V as an archive file, containing: # *) an Import File, referring to the versioned filename of the # archive as well as the shared archive member, telling the # bitwidth (32 or 64) of that shared object, and providing the # list of exported symbols of that shared object, eventually # decorated with the 'weak' keyword # *) the shared object with the F_LOADONLY flag set, to really avoid # it being seen by the linker. # At run time we better use the real file rather than another symlink, # but for link time we create the symlink libNAME.so -> libNAME.so.V case $with_aix_soname,$aix_use_runtimelinking in # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct # soname into executable. Probably we can add versioning support to # collect2, so additional links can be useful in future. aix,yes) # traditional libtool dynamic_linker='AIX unversionable lib.so' # If using run time linking (on AIX 4.2 or later) use lib.so # instead of lib.a to let people know that these are not # typical AIX shared libraries. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ;; aix,no) # traditional AIX only dynamic_linker='AIX lib.a(lib.so.V)' # We preserve .a as extension for shared libraries through AIX4.2 # and later when we are not doing run time linking. library_names_spec='$libname$release.a $libname.a' soname_spec='$libname$release$shared_ext$major' ;; svr4,*) # full svr4 only dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)" library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' # We do not specify a path in Import Files, so LIBPATH fires. shlibpath_overrides_runpath=yes ;; *,yes) # both, prefer svr4 dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)" library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' # unpreferred sharedlib libNAME.a needs extra handling postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' # We do not specify a path in Import Files, so LIBPATH fires. shlibpath_overrides_runpath=yes ;; *,no) # both, prefer aix dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)" library_names_spec='$libname$release.a $libname.a' soname_spec='$libname$release$shared_ext$major' # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' ;; esac shlibpath_var=LIBPATH fi ;; amigaos*) case $host_cpu in powerpc) # Since July 2007 AmigaOS4 officially supports .so libraries. # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ;; m68k) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; esac ;; beos*) library_names_spec='$libname$shared_ext' dynamic_linker="$host_os ld.so" shlibpath_var=LIBRARY_PATH ;; bsdi[45]*) version_type=linux # correct to gnu/linux during the next big refactor need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" # the default ld.so.conf also contains /usr/contrib/lib and # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow # libtool to hard-code these into programs ;; cygwin* | mingw* | pw32* | cegcc*) version_type=windows shrext_cmds=.dll need_version=no need_lib_prefix=no case $GCC,$cc_basename in yes,*) # gcc library_names_spec='$libname.dll.a' # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes case $host_os in cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo $libname | sed -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api" ;; mingw* | cegcc*) # MinGW DLLs use traditional 'lib' prefix soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ;; pw32*) # pw32 DLLs use 'pw' prefix rather than 'lib' library_names_spec='`echo $libname | sed -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ;; esac dynamic_linker='Win32 ld.exe' ;; *,cl*) # Native MSVC libname_spec='$name' soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' library_names_spec='$libname.dll.lib' case $build_os in mingw*) sys_lib_search_path_spec= lt_save_ifs=$IFS IFS=';' for lt_path in $LIB do IFS=$lt_save_ifs # Let DOS variable expansion print the short 8.3 style file name. lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" done IFS=$lt_save_ifs # Convert to MSYS style. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` ;; cygwin*) # Convert to unix form, then to dos form, then back to unix form # but this time dos style (no spaces!) so that the unix form looks # like /cygdrive/c/PROGRA~1:/cygdr... sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ;; *) sys_lib_search_path_spec=$LIB if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then # It is most probably a Windows format PATH. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` else sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi # FIXME: find the short name or the path components, as spaces are # common. (e.g. "Program Files" -> "PROGRA~1") ;; esac # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes dynamic_linker='Win32 link.exe' ;; *) # Assume MSVC wrapper library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib' dynamic_linker='Win32 ld.exe' ;; esac # FIXME: first we should search . and the directory the executable is in shlibpath_var=PATH ;; darwin* | rhapsody*) dynamic_linker="$host_os dyld" version_type=darwin need_lib_prefix=no need_version=no library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' soname_spec='$libname$release$major$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib" sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; dgux*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH ;; freebsd* | dragonfly*) # DragonFly does not have aout. When/if they implement a new # versioning mechanism, adjust this. if test -x /usr/bin/objformat; then objformat=`/usr/bin/objformat` else case $host_os in freebsd[23].*) objformat=aout ;; *) objformat=elf ;; esac fi version_type=freebsd-$objformat case $version_type in freebsd-elf*) library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' need_version=no need_lib_prefix=no ;; freebsd-*) library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' need_version=yes ;; esac shlibpath_var=LD_LIBRARY_PATH case $host_os in freebsd2.*) shlibpath_overrides_runpath=yes ;; freebsd3.[01]* | freebsdelf3.[01]*) shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; *) # from 4.6 on, and DragonFly shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; esac ;; haiku*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no dynamic_linker="$host_os runtime_loader" library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LIBRARY_PATH shlibpath_overrides_runpath=no sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' hardcode_into_libs=yes ;; hpux9* | hpux10* | hpux11*) # Give a soname corresponding to the major version so that dld.sl refuses to # link against other versions. version_type=sunos need_lib_prefix=no need_version=no case $host_cpu in ia64*) shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' if test 32 = "$HPUX_IA64_MODE"; then sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" sys_lib_dlsearch_path_spec=/usr/lib/hpux32 else sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" sys_lib_dlsearch_path_spec=/usr/lib/hpux64 fi ;; hppa*64*) shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' ;; esac # HP-UX runs *really* slowly unless shared libraries are mode 555, ... postinstall_cmds='chmod 555 $lib' # or fails outright, so override atomically: install_override_mode=555 ;; interix[3-9]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; irix5* | irix6* | nonstopux*) case $host_os in nonstopux*) version_type=nonstopux ;; *) if test yes = "$lt_cv_prog_gnu_ld"; then version_type=linux # correct to gnu/linux during the next big refactor else version_type=irix fi ;; esac need_lib_prefix=no need_version=no soname_spec='$libname$release$shared_ext$major' library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' case $host_os in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in # libtool.m4 will add one of these switches to LD *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= libmagic=32-bit;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 libmagic=N32;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 libmagic=64-bit;; *) libsuff= shlibsuff= libmagic=never-match;; esac ;; esac shlibpath_var=LD_LIBRARY${shlibsuff}_PATH shlibpath_overrides_runpath=no sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" hardcode_into_libs=yes ;; # No shared lib support for Linux oldld, aout, or coff. linux*oldld* | linux*aout* | linux*coff*) dynamic_linker=no ;; linux*android*) version_type=none # Android doesn't support versioned libraries. need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext' soname_spec='$libname$release$shared_ext' finish_cmds= shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes dynamic_linker='Android linker' # Don't embed -rpath directories since the linker doesn't support them. hardcode_libdir_flag_spec='-L$libdir' ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no # Some binutils ld are patched to set DT_RUNPATH if ${lt_cv_shlibpath_overrides_runpath+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_shlibpath_overrides_runpath=no save_LDFLAGS=$LDFLAGS save_libdir=$libdir eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \ LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\"" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : lt_cv_shlibpath_overrides_runpath=yes fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS=$save_LDFLAGS libdir=$save_libdir fi shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes # Add ABI-specific directories to the system library path. sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" # Ideally, we could use ldconfig to report *all* directores which are # searched for libraries, however this is still not possible. Aside from not # being certain /sbin/ldconfig is available, command # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, # even though it is searched at run-time. Try to do the best guess by # appending ld.so.conf contents (and includes) to the search path. if test -f /etc/ld.so.conf; then lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, # most powerpc-linux boxes support dynamic linking these days and # people can always --disable-shared, the test was removed, and we # assume the GNU/Linux dynamic linker is in use. dynamic_linker='GNU/Linux ld.so' ;; netbsd*) version_type=sunos need_lib_prefix=no need_version=no if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' dynamic_linker='NetBSD ld.elf_so' fi shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; newsos6) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; *nto* | *qnx*) version_type=qnx need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='ldqnx.so' ;; openbsd* | bitrig*) version_type=sunos sys_lib_dlsearch_path_spec=/usr/lib need_lib_prefix=no if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then need_version=no else need_version=yes fi library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; os2*) libname_spec='$name' version_type=windows shrext_cmds=.dll need_version=no need_lib_prefix=no # OS/2 can only load a DLL with a base name of 8 characters or less. soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; v=$($ECHO $release$versuffix | tr -d .-); n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); $ECHO $n$v`$shared_ext' library_names_spec='${libname}_dll.$libext' dynamic_linker='OS/2 ld.exe' shlibpath_var=BEGINLIBPATH sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' ;; osf3* | osf4* | osf5*) version_type=osf need_lib_prefix=no need_version=no soname_spec='$libname$release$shared_ext$major' library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; rdos*) dynamic_linker=no ;; solaris*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes # ldd complains unless libraries are executable postinstall_cmds='chmod +x $lib' ;; sunos4*) version_type=sunos library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes if test yes = "$with_gnu_ld"; then need_lib_prefix=no fi need_version=yes ;; sysv4 | sysv4.3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH case $host_vendor in sni) shlibpath_overrides_runpath=no need_lib_prefix=no runpath_var=LD_RUN_PATH ;; siemens) need_lib_prefix=no ;; motorola) need_lib_prefix=no need_version=no shlibpath_overrides_runpath=no sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' ;; esac ;; sysv4*MP*) if test -d /usr/nec; then version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' soname_spec='$libname$shared_ext.$major' shlibpath_var=LD_LIBRARY_PATH fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) version_type=sco need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes if test yes = "$with_gnu_ld"; then sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' else sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' case $host_os in sco3.2v5*) sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" ;; esac fi sys_lib_dlsearch_path_spec='/usr/lib' ;; tpf*) # TPF is a cross-target only. Preferred cross-host = GNU/Linux. version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; uts4*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH ;; *) dynamic_linker=no ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 $as_echo "$dynamic_linker" >&6; } test no = "$dynamic_linker" && can_build_shared=no variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test yes = "$GCC"; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec fi if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec fi # remember unaugmented sys_lib_dlsearch_path content for libtool script decls... configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec # ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" # to be used as default LT_SYS_LIBRARY_PATH value in generated libtool configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 $as_echo_n "checking how to hardcode library paths into programs... " >&6; } hardcode_action= if test -n "$hardcode_libdir_flag_spec" || test -n "$runpath_var" || test yes = "$hardcode_automatic"; then # We can hardcode non-existent directories. if test no != "$hardcode_direct" && # If the only mechanism to avoid hardcoding is shlibpath_var, we # have to relink, otherwise we might link with an installed library # when we should be linking with a yet-to-be-installed one ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, )" && test no != "$hardcode_minus_L"; then # Linking always hardcodes the temporary library directory. hardcode_action=relink else # We can link without hardcoding, and we can hardcode nonexisting dirs. hardcode_action=immediate fi else # We cannot hardcode anything, or else we can only hardcode existing # directories. hardcode_action=unsupported fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5 $as_echo "$hardcode_action" >&6; } if test relink = "$hardcode_action" || test yes = "$inherit_rpath"; then # Fast installation is not supported enable_fast_install=no elif test yes = "$shlibpath_overrides_runpath" || test no = "$enable_shared"; then # Fast installation is not necessary enable_fast_install=needless fi if test yes != "$enable_dlopen"; then enable_dlopen=unknown enable_dlopen_self=unknown enable_dlopen_self_static=unknown else lt_cv_dlopen=no lt_cv_dlopen_libs= case $host_os in beos*) lt_cv_dlopen=load_add_on lt_cv_dlopen_libs= lt_cv_dlopen_self=yes ;; mingw* | pw32* | cegcc*) lt_cv_dlopen=LoadLibrary lt_cv_dlopen_libs= ;; cygwin*) lt_cv_dlopen=dlopen lt_cv_dlopen_libs= ;; darwin*) # if libdl is installed we need to link against it { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 $as_echo_n "checking for dlopen in -ldl... " >&6; } if ${ac_cv_lib_dl_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dl_dlopen=yes else ac_cv_lib_dl_dlopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 $as_echo "$ac_cv_lib_dl_dlopen" >&6; } if test "x$ac_cv_lib_dl_dlopen" = xyes; then : lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl else lt_cv_dlopen=dyld lt_cv_dlopen_libs= lt_cv_dlopen_self=yes fi ;; tpf*) # Don't try to run any link tests for TPF. We know it's impossible # because TPF is a cross-compiler, and we know how we open DSOs. lt_cv_dlopen=dlopen lt_cv_dlopen_libs= lt_cv_dlopen_self=no ;; *) ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load" if test "x$ac_cv_func_shl_load" = xyes; then : lt_cv_dlopen=shl_load else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5 $as_echo_n "checking for shl_load in -ldld... " >&6; } if ${ac_cv_lib_dld_shl_load+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char shl_load (); int main () { return shl_load (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dld_shl_load=yes else ac_cv_lib_dld_shl_load=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5 $as_echo "$ac_cv_lib_dld_shl_load" >&6; } if test "x$ac_cv_lib_dld_shl_load" = xyes; then : lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld else ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen" if test "x$ac_cv_func_dlopen" = xyes; then : lt_cv_dlopen=dlopen else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 $as_echo_n "checking for dlopen in -ldl... " >&6; } if ${ac_cv_lib_dl_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dl_dlopen=yes else ac_cv_lib_dl_dlopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 $as_echo "$ac_cv_lib_dl_dlopen" >&6; } if test "x$ac_cv_lib_dl_dlopen" = xyes; then : lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5 $as_echo_n "checking for dlopen in -lsvld... " >&6; } if ${ac_cv_lib_svld_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lsvld $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_svld_dlopen=yes else ac_cv_lib_svld_dlopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5 $as_echo "$ac_cv_lib_svld_dlopen" >&6; } if test "x$ac_cv_lib_svld_dlopen" = xyes; then : lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5 $as_echo_n "checking for dld_link in -ldld... " >&6; } if ${ac_cv_lib_dld_dld_link+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dld_link (); int main () { return dld_link (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dld_dld_link=yes else ac_cv_lib_dld_dld_link=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5 $as_echo "$ac_cv_lib_dld_dld_link" >&6; } if test "x$ac_cv_lib_dld_dld_link" = xyes; then : lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld fi fi fi fi fi fi ;; esac if test no = "$lt_cv_dlopen"; then enable_dlopen=no else enable_dlopen=yes fi case $lt_cv_dlopen in dlopen) save_CPPFLAGS=$CPPFLAGS test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" save_LDFLAGS=$LDFLAGS wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" save_LIBS=$LIBS LIBS="$lt_cv_dlopen_libs $LIBS" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5 $as_echo_n "checking whether a program can dlopen itself... " >&6; } if ${lt_cv_dlopen_self+:} false; then : $as_echo_n "(cached) " >&6 else if test yes = "$cross_compiling"; then : lt_cv_dlopen_self=cross else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF #line $LINENO "configure" #include "confdefs.h" #if HAVE_DLFCN_H #include #endif #include #ifdef RTLD_GLOBAL # define LT_DLGLOBAL RTLD_GLOBAL #else # ifdef DL_GLOBAL # define LT_DLGLOBAL DL_GLOBAL # else # define LT_DLGLOBAL 0 # endif #endif /* We may have to define LT_DLLAZY_OR_NOW in the command line if we find out it does not work in some platform. */ #ifndef LT_DLLAZY_OR_NOW # ifdef RTLD_LAZY # define LT_DLLAZY_OR_NOW RTLD_LAZY # else # ifdef DL_LAZY # define LT_DLLAZY_OR_NOW DL_LAZY # else # ifdef RTLD_NOW # define LT_DLLAZY_OR_NOW RTLD_NOW # else # ifdef DL_NOW # define LT_DLLAZY_OR_NOW DL_NOW # else # define LT_DLLAZY_OR_NOW 0 # endif # endif # endif # endif #endif /* When -fvisibility=hidden is used, assume the code has been annotated correspondingly for the symbols needed. */ #if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) int fnord () __attribute__((visibility("default"))); #endif int fnord () { return 42; } int main () { void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); int status = $lt_dlunknown; if (self) { if (dlsym (self,"fnord")) status = $lt_dlno_uscore; else { if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; else puts (dlerror ()); } /* dlclose (self); */ } else puts (dlerror ()); return status; } _LT_EOF if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then (./conftest; exit; ) >&5 2>/dev/null lt_status=$? case x$lt_status in x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; esac else : # compilation failed lt_cv_dlopen_self=no fi fi rm -fr conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5 $as_echo "$lt_cv_dlopen_self" >&6; } if test yes = "$lt_cv_dlopen_self"; then wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5 $as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; } if ${lt_cv_dlopen_self_static+:} false; then : $as_echo_n "(cached) " >&6 else if test yes = "$cross_compiling"; then : lt_cv_dlopen_self_static=cross else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF #line $LINENO "configure" #include "confdefs.h" #if HAVE_DLFCN_H #include #endif #include #ifdef RTLD_GLOBAL # define LT_DLGLOBAL RTLD_GLOBAL #else # ifdef DL_GLOBAL # define LT_DLGLOBAL DL_GLOBAL # else # define LT_DLGLOBAL 0 # endif #endif /* We may have to define LT_DLLAZY_OR_NOW in the command line if we find out it does not work in some platform. */ #ifndef LT_DLLAZY_OR_NOW # ifdef RTLD_LAZY # define LT_DLLAZY_OR_NOW RTLD_LAZY # else # ifdef DL_LAZY # define LT_DLLAZY_OR_NOW DL_LAZY # else # ifdef RTLD_NOW # define LT_DLLAZY_OR_NOW RTLD_NOW # else # ifdef DL_NOW # define LT_DLLAZY_OR_NOW DL_NOW # else # define LT_DLLAZY_OR_NOW 0 # endif # endif # endif # endif #endif /* When -fvisibility=hidden is used, assume the code has been annotated correspondingly for the symbols needed. */ #if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) int fnord () __attribute__((visibility("default"))); #endif int fnord () { return 42; } int main () { void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); int status = $lt_dlunknown; if (self) { if (dlsym (self,"fnord")) status = $lt_dlno_uscore; else { if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; else puts (dlerror ()); } /* dlclose (self); */ } else puts (dlerror ()); return status; } _LT_EOF if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then (./conftest; exit; ) >&5 2>/dev/null lt_status=$? case x$lt_status in x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; esac else : # compilation failed lt_cv_dlopen_self_static=no fi fi rm -fr conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5 $as_echo "$lt_cv_dlopen_self_static" >&6; } fi CPPFLAGS=$save_CPPFLAGS LDFLAGS=$save_LDFLAGS LIBS=$save_LIBS ;; esac case $lt_cv_dlopen_self in yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; *) enable_dlopen_self=unknown ;; esac case $lt_cv_dlopen_self_static in yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; *) enable_dlopen_self_static=unknown ;; esac fi striplib= old_striplib= { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5 $as_echo_n "checking whether stripping libraries is possible... " >&6; } if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" test -z "$striplib" && striplib="$STRIP --strip-unneeded" { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else # FIXME - insert some real tests, host_os isn't really good enough case $host_os in darwin*) if test -n "$STRIP"; then striplib="$STRIP -x" old_striplib="$STRIP -S" { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi ;; *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } ;; esac fi # Report what library types will actually be built { $as_echo "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5 $as_echo_n "checking if libtool supports shared libraries... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5 $as_echo "$can_build_shared" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5 $as_echo_n "checking whether to build shared libraries... " >&6; } test no = "$can_build_shared" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test yes = "$enable_shared" && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[4-9]*) if test ia64 != "$host_cpu"; then case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in yes,aix,yes) ;; # shared object as lib.so file only yes,svr4,*) ;; # shared object as lib.so archive member only yes,*) enable_static=no ;; # shared object in lib.a archive as well esac fi ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5 $as_echo "$enable_shared" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5 $as_echo_n "checking whether to build static libraries... " >&6; } # Make sure either enable_shared or enable_static is yes. test yes = "$enable_shared" || enable_static=yes { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5 $as_echo "$enable_static" >&6; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu CC=$lt_save_CC if test -n "$CXX" && ( test no != "$CXX" && ( (test g++ = "$CXX" && `g++ -v >/dev/null 2>&1` ) || (test g++ != "$CXX"))); then ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5 $as_echo_n "checking how to run the C++ preprocessor... " >&6; } if test -z "$CXXCPP"; then if ${ac_cv_prog_CXXCPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CXXCPP needs to be expanded for CXXCPP in "$CXX -E" "/lib/cpp" do ac_preproc_ok=false for ac_cxx_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CXXCPP=$CXXCPP fi CXXCPP=$ac_cv_prog_CXXCPP else ac_cv_prog_CXXCPP=$CXXCPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5 $as_echo "$CXXCPP" >&6; } ac_preproc_ok=false for ac_cxx_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu else _lt_caught_CXX_error=yes fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu archive_cmds_need_lc_CXX=no allow_undefined_flag_CXX= always_export_symbols_CXX=no archive_expsym_cmds_CXX= compiler_needs_object_CXX=no export_dynamic_flag_spec_CXX= hardcode_direct_CXX=no hardcode_direct_absolute_CXX=no hardcode_libdir_flag_spec_CXX= hardcode_libdir_separator_CXX= hardcode_minus_L_CXX=no hardcode_shlibpath_var_CXX=unsupported hardcode_automatic_CXX=no inherit_rpath_CXX=no module_cmds_CXX= module_expsym_cmds_CXX= link_all_deplibs_CXX=unknown old_archive_cmds_CXX=$old_archive_cmds reload_flag_CXX=$reload_flag reload_cmds_CXX=$reload_cmds no_undefined_flag_CXX= whole_archive_flag_spec_CXX= enable_shared_with_static_runtimes_CXX=no # Source file extension for C++ test sources. ac_ext=cpp # Object file extension for compiled C++ test sources. objext=o objext_CXX=$objext # No sense in running all these tests if we already determined that # the CXX compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test yes != "$_lt_caught_CXX_error"; then # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(int, char *[]) { return(0); }' # ltmain only uses $CC for tagged configurations so make sure $CC is set. # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC # save warnings/boilerplate of simple test code ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" >conftest.$ac_ext eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_compiler_boilerplate=`cat conftest.err` $RM conftest* ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` $RM -r conftest* # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_CFLAGS=$CFLAGS lt_save_LD=$LD lt_save_GCC=$GCC GCC=$GXX lt_save_with_gnu_ld=$with_gnu_ld lt_save_path_LD=$lt_cv_path_LD if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx else $as_unset lt_cv_prog_gnu_ld fi if test -n "${lt_cv_path_LDCXX+set}"; then lt_cv_path_LD=$lt_cv_path_LDCXX else $as_unset lt_cv_path_LD fi test -z "${LDCXX+set}" || LD=$LDCXX CC=${CXX-"c++"} CFLAGS=$CXXFLAGS compiler=$CC compiler_CXX=$CC func_cc_basename $compiler cc_basename=$func_cc_basename_result if test -n "$compiler"; then # We don't want -fno-exception when compiling C++ code, so set the # no_builtin_flag separately if test yes = "$GXX"; then lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin' else lt_prog_compiler_no_builtin_flag_CXX= fi if test yes = "$GXX"; then # Set up default GNU C++ configuration # Check whether --with-gnu-ld was given. if test "${with_gnu_ld+set}" = set; then : withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes else with_gnu_ld=no fi ac_prog=ld if test yes = "$GCC"; then # Check if gcc -print-prog-name=ld gives a path. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 $as_echo_n "checking for ld used by $CC... " >&6; } case $host in *-*-mingw*) # gcc leaves a trailing carriage return, which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [\\/]* | ?:[\\/]*) re_direlt='/[^/][^/]*/\.\./' # Canonicalize the pathname of ld ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` done test -z "$LD" && LD=$ac_prog ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test yes = "$with_gnu_ld"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 $as_echo_n "checking for GNU ld... " >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 $as_echo_n "checking for non-GNU ld... " >&6; } fi if ${lt_cv_path_LD+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$LD"; then lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS=$lt_save_ifs test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then lt_cv_path_LD=$ac_dir/$ac_prog # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$lt_cv_path_LD" -v 2>&1 &5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 $as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } if ${lt_cv_prog_gnu_ld+:} false; then : $as_echo_n "(cached) " >&6 else # I'd rather use --version here, but apparently some GNU lds only accept -v. case `$LD -v 2>&1 &5 $as_echo "$lt_cv_prog_gnu_ld" >&6; } with_gnu_ld=$lt_cv_prog_gnu_ld # Check if GNU C++ uses GNU ld as the underlying linker, since the # archiving commands below assume that GNU ld is being used. if test yes = "$with_gnu_ld"; then archive_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' export_dynamic_flag_spec_CXX='$wl--export-dynamic' # If archive_cmds runs LD, not CC, wlarc should be empty # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to # investigate it a little bit more. (MM) wlarc='$wl' # ancient GNU ld didn't support --whole-archive et. al. if eval "`$CC -print-prog-name=ld` --help 2>&1" | $GREP 'no-whole-archive' > /dev/null; then whole_archive_flag_spec_CXX=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' else whole_archive_flag_spec_CXX= fi else with_gnu_ld=no wlarc= # A generic and very simple default shared library creation # command for GNU C++ for the case where it uses the native # linker, instead of GNU ld. If possible, this setting should # overridden to take advantage of the native linker features on # the platform it is being used on. archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' fi # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else GXX=no with_gnu_ld=no wlarc= fi # PORTME: fill in a description of your system's C++ link characteristics { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 $as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } ld_shlibs_CXX=yes case $host_os in aix3*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; aix[4-9]*) if test ia64 = "$host_cpu"; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag= else aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # have runtime linking enabled, and use it for executables. # For shared libraries, we enable/disable runtime linking # depending on the kind of the shared library created - # when "with_aix_soname,aix_use_runtimelinking" is: # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables # "aix,yes" lib.so shared, rtl:yes, for executables # lib.a static archive # "both,no" lib.so.V(shr.o) shared, rtl:yes # lib.a(lib.so.V) shared, rtl:no, for executables # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables # lib.a(lib.so.V) shared, rtl:no # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables # lib.a static archive case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) for ld_flag in $LDFLAGS; do case $ld_flag in *-brtl*) aix_use_runtimelinking=yes break ;; esac done if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then # With aix-soname=svr4, we create the lib.so.V shared archives only, # so we don't have lib.a shared libs to link our executables. # We have to force runtime linking in this case. aix_use_runtimelinking=yes LDFLAGS="$LDFLAGS -Wl,-brtl" fi ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. archive_cmds_CXX='' hardcode_direct_CXX=yes hardcode_direct_absolute_CXX=yes hardcode_libdir_separator_CXX=':' link_all_deplibs_CXX=yes file_list_spec_CXX='$wl-f,' case $with_aix_soname,$aix_use_runtimelinking in aix,*) ;; # no import file svr4,* | *,yes) # use import file # The Import File defines what to hardcode. hardcode_direct_CXX=no hardcode_direct_absolute_CXX=no ;; esac if test yes = "$GXX"; then case $host_os in aix4.[012]|aix4.[012].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`$CC -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 hardcode_direct_CXX=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking hardcode_minus_L_CXX=yes hardcode_libdir_flag_spec_CXX='-L$libdir' hardcode_libdir_separator_CXX= fi esac shared_flag='-shared' if test yes = "$aix_use_runtimelinking"; then shared_flag=$shared_flag' $wl-G' fi # Need to ensure runtime linking is disabled for the traditional # shared library, or the linker may eventually find shared libraries # /with/ Import File - we do not want to mix them. shared_flag_aix='-shared' shared_flag_svr4='-shared $wl-G' else # not using gcc if test ia64 = "$host_cpu"; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test yes = "$aix_use_runtimelinking"; then shared_flag='$wl-G' else shared_flag='$wl-bM:SRE' fi shared_flag_aix='$wl-bM:SRE' shared_flag_svr4='$wl-G' fi fi export_dynamic_flag_spec_CXX='$wl-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to # export. always_export_symbols_CXX=yes if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. # The "-G" linker flag allows undefined symbols. no_undefined_flag_CXX='-bernotok' # Determine the default libpath from the value encoded in an empty # executable. if test set = "${lt_cv_aix_libpath+set}"; then aix_libpath=$lt_cv_aix_libpath else if ${lt_cv_aix_libpath__CXX+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_link "$LINENO"; then : lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }' lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$lt_cv_aix_libpath__CXX"; then lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test -z "$lt_cv_aix_libpath__CXX"; then lt_cv_aix_libpath__CXX=/usr/lib:/lib fi fi aix_libpath=$lt_cv_aix_libpath__CXX fi hardcode_libdir_flag_spec_CXX='$wl-blibpath:$libdir:'"$aix_libpath" archive_expsym_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag else if test ia64 = "$host_cpu"; then hardcode_libdir_flag_spec_CXX='$wl-R $libdir:/usr/lib:/lib' allow_undefined_flag_CXX="-z nodefs" archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. if test set = "${lt_cv_aix_libpath+set}"; then aix_libpath=$lt_cv_aix_libpath else if ${lt_cv_aix_libpath__CXX+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_link "$LINENO"; then : lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }' lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$lt_cv_aix_libpath__CXX"; then lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test -z "$lt_cv_aix_libpath__CXX"; then lt_cv_aix_libpath__CXX=/usr/lib:/lib fi fi aix_libpath=$lt_cv_aix_libpath__CXX fi hardcode_libdir_flag_spec_CXX='$wl-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. no_undefined_flag_CXX=' $wl-bernotok' allow_undefined_flag_CXX=' $wl-berok' if test yes = "$with_gnu_ld"; then # We only use this code for GNU lds that support --whole-archive. whole_archive_flag_spec_CXX='$wl--whole-archive$convenience $wl--no-whole-archive' else # Exported symbols can be pulled into shared objects from archives whole_archive_flag_spec_CXX='$convenience' fi archive_cmds_need_lc_CXX=yes archive_expsym_cmds_CXX='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' # -brtl affects multiple linker settings, -berok does not and is overridden later compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`' if test svr4 != "$with_aix_soname"; then # This is similar to how AIX traditionally builds its shared # libraries. Need -bnortl late, we may have -brtl in LDFLAGS. archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' fi if test aix != "$with_aix_soname"; then archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' else # used by -dlpreopen to get the symbols archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$MV $output_objdir/$realname.d/$soname $output_objdir' fi archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$RM -r $output_objdir/$realname.d' fi fi ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then allow_undefined_flag_CXX=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' else ld_shlibs_CXX=no fi ;; chorus*) case $cc_basename in *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; cygwin* | mingw* | pw32* | cegcc*) case $GXX,$cc_basename in ,cl* | no,cl*) # Native MSVC # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. hardcode_libdir_flag_spec_CXX=' ' allow_undefined_flag_CXX=unsupported always_export_symbols_CXX=yes file_list_spec_CXX='@' # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=.dll # FIXME: Setting linknames here is a bad hack. archive_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' archive_expsym_cmds_CXX='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then cp "$export_symbols" "$output_objdir/$soname.def"; echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; else $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; fi~ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ linknames=' # The linker will not automatically build a static lib if we build a DLL. # _LT_TAGVAR(old_archive_from_new_cmds, CXX)='true' enable_shared_with_static_runtimes_CXX=yes # Don't use ranlib old_postinstall_cmds_CXX='chmod 644 $oldlib' postlink_cmds_CXX='lt_outputfile="@OUTPUT@"~ lt_tool_outputfile="@TOOL_OUTPUT@"~ case $lt_outputfile in *.exe|*.EXE) ;; *) lt_outputfile=$lt_outputfile.exe lt_tool_outputfile=$lt_tool_outputfile.exe ;; esac~ func_to_tool_file "$lt_outputfile"~ if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; $RM "$lt_outputfile.manifest"; fi' ;; *) # g++ # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, # as there is no search path for DLLs. hardcode_libdir_flag_spec_CXX='-L$libdir' export_dynamic_flag_spec_CXX='$wl--export-all-symbols' allow_undefined_flag_CXX=unsupported always_export_symbols_CXX=no enable_shared_with_static_runtimes_CXX=yes if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file, use it as # is; otherwise, prepend EXPORTS... archive_expsym_cmds_CXX='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else ld_shlibs_CXX=no fi ;; esac ;; darwin* | rhapsody*) archive_cmds_need_lc_CXX=no hardcode_direct_CXX=no hardcode_automatic_CXX=yes hardcode_shlibpath_var_CXX=unsupported if test yes = "$lt_cv_ld_force_load"; then whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' else whole_archive_flag_spec_CXX='' fi link_all_deplibs_CXX=yes allow_undefined_flag_CXX=$_lt_dar_allow_undefined case $cc_basename in ifort*|nagfor*) _lt_dar_can_shared=yes ;; *) _lt_dar_can_shared=$GCC ;; esac if test yes = "$_lt_dar_can_shared"; then output_verbose_link_cmd=func_echo_all archive_cmds_CXX="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" module_cmds_CXX="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" archive_expsym_cmds_CXX="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" module_expsym_cmds_CXX="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" if test yes != "$lt_cv_apple_cc_single_mod"; then archive_cmds_CXX="\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dsymutil" archive_expsym_cmds_CXX="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dar_export_syms$_lt_dsymutil" fi else ld_shlibs_CXX=no fi ;; os2*) hardcode_libdir_flag_spec_CXX='-L$libdir' hardcode_minus_L_CXX=yes allow_undefined_flag_CXX=unsupported shrext_cmds=.dll archive_cmds_CXX='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' archive_expsym_cmds_CXX='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ prefix_cmds="$SED"~ if test EXPORTS = "`$SED 1q $export_symbols`"; then prefix_cmds="$prefix_cmds -e 1d"; fi~ prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' old_archive_From_new_cmds_CXX='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' enable_shared_with_static_runtimes_CXX=yes ;; dgux*) case $cc_basename in ec++*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; ghcx*) # Green Hills C++ Compiler # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; freebsd2.*) # C++ shared libraries reported to be fairly broken before # switch to ELF ld_shlibs_CXX=no ;; freebsd-elf*) archive_cmds_need_lc_CXX=no ;; freebsd* | dragonfly*) # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF # conventions ld_shlibs_CXX=yes ;; haiku*) archive_cmds_CXX='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' link_all_deplibs_CXX=yes ;; hpux9*) hardcode_libdir_flag_spec_CXX='$wl+b $wl$libdir' hardcode_libdir_separator_CXX=: export_dynamic_flag_spec_CXX='$wl-E' hardcode_direct_CXX=yes hardcode_minus_L_CXX=yes # Not in the search PATH, # but as the default # location of the library. case $cc_basename in CC*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; aCC*) archive_cmds_CXX='$RM $output_objdir/$soname~$CC -b $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test yes = "$GXX"; then archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' else # FIXME: insert proper C++ library support ld_shlibs_CXX=no fi ;; esac ;; hpux10*|hpux11*) if test no = "$with_gnu_ld"; then hardcode_libdir_flag_spec_CXX='$wl+b $wl$libdir' hardcode_libdir_separator_CXX=: case $host_cpu in hppa*64*|ia64*) ;; *) export_dynamic_flag_spec_CXX='$wl-E' ;; esac fi case $host_cpu in hppa*64*|ia64*) hardcode_direct_CXX=no hardcode_shlibpath_var_CXX=no ;; *) hardcode_direct_CXX=yes hardcode_direct_absolute_CXX=yes hardcode_minus_L_CXX=yes # Not in the search PATH, # but as the default # location of the library. ;; esac case $cc_basename in CC*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; aCC*) case $host_cpu in hppa*64*) archive_cmds_CXX='$CC -b $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) archive_cmds_CXX='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) archive_cmds_CXX='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test yes = "$GXX"; then if test no = "$with_gnu_ld"; then case $host_cpu in hppa*64*) archive_cmds_CXX='$CC -shared -nostdlib -fPIC $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) archive_cmds_CXX='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) archive_cmds_CXX='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac fi else # FIXME: insert proper C++ library support ld_shlibs_CXX=no fi ;; esac ;; interix[3-9]*) hardcode_direct_CXX=no hardcode_shlibpath_var_CXX=no hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' export_dynamic_flag_spec_CXX='$wl-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' archive_expsym_cmds_CXX='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; irix5* | irix6*) case $cc_basename in CC*) # SGI C++ archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' # Archives containing C++ object files must be created using # "CC -ar", where "CC" is the IRIX C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs' ;; *) if test yes = "$GXX"; then if test no = "$with_gnu_ld"; then archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' else archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` -o $lib' fi fi link_all_deplibs_CXX=yes ;; esac hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' hardcode_libdir_separator_CXX=: inherit_rpath_CXX=yes ;; linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib $wl-retain-symbols-file,$export_symbols; mv \$templib $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' export_dynamic_flag_spec_CXX='$wl--export-dynamic' # Archives containing C++ object files must be created using # "CC -Bstatic", where "CC" is the KAI C++ compiler. old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;; icpc* | ecpc* ) # Intel C++ with_gnu_ld=yes # version 8.0 and above of icpc choke on multiply defined symbols # if we add $predep_objects and $postdep_objects, however 7.1 and # earlier do not add the objects themselves. case `$CC -V 2>&1` in *"Version 7."*) archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ;; *) # Version 8.0 or newer tmp_idyn= case $host_cpu in ia64*) tmp_idyn=' -i_dynamic';; esac archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ;; esac archive_cmds_need_lc_CXX=no hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' export_dynamic_flag_spec_CXX='$wl--export-dynamic' whole_archive_flag_spec_CXX='$wl--whole-archive$convenience $wl--no-whole-archive' ;; pgCC* | pgcpp*) # Portland Group C++ compiler case `$CC -V` in *pgCC\ [1-5].* | *pgcpp\ [1-5].*) prelink_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' old_archive_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ $RANLIB $oldlib' archive_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ;; *) # Version 6 and above use weak symbols archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ;; esac hardcode_libdir_flag_spec_CXX='$wl--rpath $wl$libdir' export_dynamic_flag_spec_CXX='$wl--export-dynamic' whole_archive_flag_spec_CXX='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ;; cxx*) # Compaq C++ archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib $wl-retain-symbols-file $wl$export_symbols' runpath_var=LD_RUN_PATH hardcode_libdir_flag_spec_CXX='-rpath $libdir' hardcode_libdir_separator_CXX=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' ;; xl* | mpixl* | bgxl*) # IBM XL 8.0 on PPC, with GNU ld hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' export_dynamic_flag_spec_CXX='$wl--export-dynamic' archive_cmds_CXX='$CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' if test yes = "$supports_anon_versioning"; then archive_expsym_cmds_CXX='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' fi ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 no_undefined_flag_CXX=' -zdefs' archive_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' archive_expsym_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file $wl$export_symbols' hardcode_libdir_flag_spec_CXX='-R$libdir' whole_archive_flag_spec_CXX='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' compiler_needs_object_CXX=yes # Not sure whether something based on # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 # would be better. output_verbose_link_cmd='func_echo_all' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' ;; esac ;; esac ;; lynxos*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; m88k*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; mvs*) case $cc_basename in cxx*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then archive_cmds_CXX='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' wlarc= hardcode_libdir_flag_spec_CXX='-R$libdir' hardcode_direct_CXX=yes hardcode_shlibpath_var_CXX=no fi # Workaround some broken pre-1.5 toolchains output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' ;; *nto* | *qnx*) ld_shlibs_CXX=yes ;; openbsd* | bitrig*) if test -f /usr/libexec/ld.so; then hardcode_direct_CXX=yes hardcode_shlibpath_var_CXX=no hardcode_direct_absolute_CXX=yes archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`"; then archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file,$export_symbols -o $lib' export_dynamic_flag_spec_CXX='$wl-E' whole_archive_flag_spec_CXX=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' fi output_verbose_link_cmd=func_echo_all else ld_shlibs_CXX=no fi ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' hardcode_libdir_separator_CXX=: # Archives containing C++ object files must be created using # the KAI C++ compiler. case $host in osf3*) old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;; *) old_archive_cmds_CXX='$CC -o $oldlib $oldobjs' ;; esac ;; RCC*) # Rational C++ 2.4.1 # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; cxx*) case $host in osf3*) allow_undefined_flag_CXX=' $wl-expect_unresolved $wl\*' archive_cmds_CXX='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $soname `test -n "$verstring" && func_echo_all "$wl-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' ;; *) allow_undefined_flag_CXX=' -expect_unresolved \*' archive_cmds_CXX='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ echo "-hidden">> $lib.exp~ $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname $wl-input $wl$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~ $RM $lib.exp' hardcode_libdir_flag_spec_CXX='-rpath $libdir' ;; esac hardcode_libdir_separator_CXX=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test yes,no = "$GXX,$with_gnu_ld"; then allow_undefined_flag_CXX=' $wl-expect_unresolved $wl\*' case $host in osf3*) archive_cmds_CXX='$CC -shared -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ;; *) archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ;; esac hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' hardcode_libdir_separator_CXX=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else # FIXME: insert proper C++ library support ld_shlibs_CXX=no fi ;; esac ;; psos*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; lcc*) # Lucid # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; solaris*) case $cc_basename in CC* | sunCC*) # Sun C++ 4.2, 5.x and Centerline C++ archive_cmds_need_lc_CXX=yes no_undefined_flag_CXX=' -zdefs' archive_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G$allow_undefined_flag $wl-M $wl$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' hardcode_libdir_flag_spec_CXX='-R$libdir' hardcode_shlibpath_var_CXX=no case $host_os in solaris2.[0-5] | solaris2.[0-5].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands '-z linker_flag'. # Supported since Solaris 2.6 (maybe 2.5.1?) whole_archive_flag_spec_CXX='-z allextract$convenience -z defaultextract' ;; esac link_all_deplibs_CXX=yes output_verbose_link_cmd='func_echo_all' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' ;; gcx*) # Green Hills C++ Compiler archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' # The C++ compiler must be used to create the archive. old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs' ;; *) # GNU C++ compiler with Solaris linker if test yes,no = "$GXX,$with_gnu_ld"; then no_undefined_flag_CXX=' $wl-z ${wl}defs' if $CC --version | $GREP -v '^2\.7' > /dev/null; then archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared $pic_flag -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else # g++ 2.7 appears to require '-G' NOT '-shared' on this # platform. archive_cmds_CXX='$CC -G -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' fi hardcode_libdir_flag_spec_CXX='$wl-R $wl$libdir' case $host_os in solaris2.[0-5] | solaris2.[0-5].*) ;; *) whole_archive_flag_spec_CXX='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' ;; esac fi ;; esac ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) no_undefined_flag_CXX='$wl-z,text' archive_cmds_need_lc_CXX=no hardcode_shlibpath_var_CXX=no runpath_var='LD_RUN_PATH' case $cc_basename in CC*) archive_cmds_CXX='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; *) archive_cmds_CXX='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We CANNOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. no_undefined_flag_CXX='$wl-z,text' allow_undefined_flag_CXX='$wl-z,nodefs' archive_cmds_need_lc_CXX=no hardcode_shlibpath_var_CXX=no hardcode_libdir_flag_spec_CXX='$wl-R,$libdir' hardcode_libdir_separator_CXX=':' link_all_deplibs_CXX=yes export_dynamic_flag_spec_CXX='$wl-Bexport' runpath_var='LD_RUN_PATH' case $cc_basename in CC*) archive_cmds_CXX='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' old_archive_cmds_CXX='$CC -Tprelink_objects $oldobjs~ '"$old_archive_cmds_CXX" reload_cmds_CXX='$CC -Tprelink_objects $reload_objs~ '"$reload_cmds_CXX" ;; *) archive_cmds_CXX='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; vxworks*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 $as_echo "$ld_shlibs_CXX" >&6; } test no = "$ld_shlibs_CXX" && can_build_shared=no GCC_CXX=$GXX LD_CXX=$LD ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... # Dependencies to place before and after the object being linked: predep_objects_CXX= postdep_objects_CXX= predeps_CXX= postdeps_CXX= compiler_lib_search_path_CXX= cat > conftest.$ac_ext <<_LT_EOF class Foo { public: Foo (void) { a = 0; } private: int a; }; _LT_EOF _lt_libdeps_save_CFLAGS=$CFLAGS case "$CC $CFLAGS " in #( *\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; *\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; *\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; esac if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then # Parse the compiler output and extract the necessary # objects, libraries and library flags. # Sentinel used to keep track of whether or not we are before # the conftest object file. pre_test_object_deps_done=no for p in `eval "$output_verbose_link_cmd"`; do case $prev$p in -L* | -R* | -l*) # Some compilers place space between "-{L,R}" and the path. # Remove the space. if test x-L = "$p" || test x-R = "$p"; then prev=$p continue fi # Expand the sysroot to ease extracting the directories later. if test -z "$prev"; then case $p in -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; esac fi case $p in =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; esac if test no = "$pre_test_object_deps_done"; then case $prev in -L | -R) # Internal compiler library paths should come after those # provided the user. The postdeps already come after the # user supplied libs so there is no need to process them. if test -z "$compiler_lib_search_path_CXX"; then compiler_lib_search_path_CXX=$prev$p else compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} $prev$p" fi ;; # The "-l" case would never come before the object being # linked, so don't bother handling this case. esac else if test -z "$postdeps_CXX"; then postdeps_CXX=$prev$p else postdeps_CXX="${postdeps_CXX} $prev$p" fi fi prev= ;; *.lto.$objext) ;; # Ignore GCC LTO objects *.$objext) # This assumes that the test object file only shows up # once in the compiler output. if test "$p" = "conftest.$objext"; then pre_test_object_deps_done=yes continue fi if test no = "$pre_test_object_deps_done"; then if test -z "$predep_objects_CXX"; then predep_objects_CXX=$p else predep_objects_CXX="$predep_objects_CXX $p" fi else if test -z "$postdep_objects_CXX"; then postdep_objects_CXX=$p else postdep_objects_CXX="$postdep_objects_CXX $p" fi fi ;; *) ;; # Ignore the rest. esac done # Clean up. rm -f a.out a.exe else echo "libtool.m4: error: problem compiling CXX test program" fi $RM -f confest.$objext CFLAGS=$_lt_libdeps_save_CFLAGS # PORTME: override above test on systems where it is broken case $host_os in interix[3-9]*) # Interix 3.5 installs completely hosed .la files for C++, so rather than # hack all around it, let's just trust "g++" to DTRT. predep_objects_CXX= postdep_objects_CXX= postdeps_CXX= ;; esac case " $postdeps_CXX " in *" -lc "*) archive_cmds_need_lc_CXX=no ;; esac compiler_lib_search_dirs_CXX= if test -n "${compiler_lib_search_path_CXX}"; then compiler_lib_search_dirs_CXX=`echo " ${compiler_lib_search_path_CXX}" | $SED -e 's! -L! !g' -e 's!^ !!'` fi lt_prog_compiler_wl_CXX= lt_prog_compiler_pic_CXX= lt_prog_compiler_static_CXX= # C++ specific cases for pic, static, wl, etc. if test yes = "$GXX"; then lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='-static' case $host_os in aix*) # All AIX code is PIC. if test ia64 = "$host_cpu"; then # AIX 5 now supports IA64 processor lt_prog_compiler_static_CXX='-Bstatic' fi lt_prog_compiler_pic_CXX='-fPIC' ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support lt_prog_compiler_pic_CXX='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the '-m68020' flag to GCC prevents building anything better, # like '-m68040'. lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | os2* | pw32* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries lt_prog_compiler_pic_CXX='-DDLL_EXPORT' case $host_os in os2*) lt_prog_compiler_static_CXX='$wl-static' ;; esac ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files lt_prog_compiler_pic_CXX='-fno-common' ;; *djgpp*) # DJGPP does not support shared libraries at all lt_prog_compiler_pic_CXX= ;; haiku*) # PIC is the default for Haiku. # The "-static" flag exists, but is broken. lt_prog_compiler_static_CXX= ;; interix[3-9]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; sysv4*MP*) if test -d /usr/nec; then lt_prog_compiler_pic_CXX=-Kconform_pic fi ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) ;; *) lt_prog_compiler_pic_CXX='-fPIC' ;; esac ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic_CXX='-fPIC -shared' ;; *) lt_prog_compiler_pic_CXX='-fPIC' ;; esac else case $host_os in aix[4-9]*) # All AIX code is PIC. if test ia64 = "$host_cpu"; then # AIX 5 now supports IA64 processor lt_prog_compiler_static_CXX='-Bstatic' else lt_prog_compiler_static_CXX='-bnso -bI:/lib/syscalls.exp' fi ;; chorus*) case $cc_basename in cxch68*) # Green Hills C++ Compiler # _LT_TAGVAR(lt_prog_compiler_static, CXX)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" ;; esac ;; mingw* | cygwin* | os2* | pw32* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). lt_prog_compiler_pic_CXX='-DDLL_EXPORT' ;; dgux*) case $cc_basename in ec++*) lt_prog_compiler_pic_CXX='-KPIC' ;; ghcx*) # Green Hills C++ Compiler lt_prog_compiler_pic_CXX='-pic' ;; *) ;; esac ;; freebsd* | dragonfly*) # FreeBSD uses GNU C++ ;; hpux9* | hpux10* | hpux11*) case $cc_basename in CC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='$wl-a ${wl}archive' if test ia64 != "$host_cpu"; then lt_prog_compiler_pic_CXX='+Z' fi ;; aCC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='$wl-a ${wl}archive' case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) lt_prog_compiler_pic_CXX='+Z' ;; esac ;; *) ;; esac ;; interix*) # This is c89, which is MS Visual C++ (no shared libs) # Anyone wants to do a port? ;; irix5* | irix6* | nonstopux*) case $cc_basename in CC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='-non_shared' # CC pic flag -KPIC is the default. ;; *) ;; esac ;; linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) case $cc_basename in KCC*) # KAI C++ Compiler lt_prog_compiler_wl_CXX='--backend -Wl,' lt_prog_compiler_pic_CXX='-fPIC' ;; ecpc* ) # old Intel C++ for x86_64, which still supported -KPIC. lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-static' ;; icpc* ) # Intel C++, used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-fPIC' lt_prog_compiler_static_CXX='-static' ;; pgCC* | pgcpp*) # Portland Group C++ compiler lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-fpic' lt_prog_compiler_static_CXX='-Bstatic' ;; cxx*) # Compaq C++ # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. lt_prog_compiler_pic_CXX= lt_prog_compiler_static_CXX='-non_shared' ;; xlc* | xlC* | bgxl[cC]* | mpixl[cC]*) # IBM XL 8.0, 9.0 on PPC and BlueGene lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-qpic' lt_prog_compiler_static_CXX='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-Bstatic' lt_prog_compiler_wl_CXX='-Qoption ld ' ;; esac ;; esac ;; lynxos*) ;; m88k*) ;; mvs*) case $cc_basename in cxx*) lt_prog_compiler_pic_CXX='-W c,exportall' ;; *) ;; esac ;; netbsd*) ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic_CXX='-fPIC -shared' ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) lt_prog_compiler_wl_CXX='--backend -Wl,' ;; RCC*) # Rational C++ 2.4.1 lt_prog_compiler_pic_CXX='-pic' ;; cxx*) # Digital/Compaq C++ lt_prog_compiler_wl_CXX='-Wl,' # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. lt_prog_compiler_pic_CXX= lt_prog_compiler_static_CXX='-non_shared' ;; *) ;; esac ;; psos*) ;; solaris*) case $cc_basename in CC* | sunCC*) # Sun C++ 4.2, 5.x and Centerline C++ lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-Bstatic' lt_prog_compiler_wl_CXX='-Qoption ld ' ;; gcx*) # Green Hills C++ Compiler lt_prog_compiler_pic_CXX='-PIC' ;; *) ;; esac ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x lt_prog_compiler_pic_CXX='-pic' lt_prog_compiler_static_CXX='-Bstatic' ;; lcc*) # Lucid lt_prog_compiler_pic_CXX='-pic' ;; *) ;; esac ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) case $cc_basename in CC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-Bstatic' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 lt_prog_compiler_pic_CXX='-KPIC' ;; *) ;; esac ;; vxworks*) ;; *) lt_prog_compiler_can_build_shared_CXX=no ;; esac fi case $host_os in # For platforms that do not support PIC, -DPIC is meaningless: *djgpp*) lt_prog_compiler_pic_CXX= ;; *) lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC" ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 $as_echo_n "checking for $compiler option to produce PIC... " >&6; } if ${lt_cv_prog_compiler_pic_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic_CXX=$lt_prog_compiler_pic_CXX fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_CXX" >&5 $as_echo "$lt_cv_prog_compiler_pic_CXX" >&6; } lt_prog_compiler_pic_CXX=$lt_cv_prog_compiler_pic_CXX # # Check to make sure the PIC flag actually works. # if test -n "$lt_prog_compiler_pic_CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5 $as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... " >&6; } if ${lt_cv_prog_compiler_pic_works_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic_works_CXX=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC" ## exclude from sc_useless_quotes_in_assignment # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_pic_works_CXX=yes fi fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works_CXX" >&5 $as_echo "$lt_cv_prog_compiler_pic_works_CXX" >&6; } if test yes = "$lt_cv_prog_compiler_pic_works_CXX"; then case $lt_prog_compiler_pic_CXX in "" | " "*) ;; *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;; esac else lt_prog_compiler_pic_CXX= lt_prog_compiler_can_build_shared_CXX=no fi fi # # Check to make sure the static flag actually works. # wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 $as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } if ${lt_cv_prog_compiler_static_works_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_static_works_CXX=no save_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $lt_tmp_static_flag" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&5 $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_static_works_CXX=yes fi else lt_cv_prog_compiler_static_works_CXX=yes fi fi $RM -r conftest* LDFLAGS=$save_LDFLAGS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works_CXX" >&5 $as_echo "$lt_cv_prog_compiler_static_works_CXX" >&6; } if test yes = "$lt_cv_prog_compiler_static_works_CXX"; then : else lt_prog_compiler_static_CXX= fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if ${lt_cv_prog_compiler_c_o_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o_CXX=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o_CXX=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 $as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if ${lt_cv_prog_compiler_c_o_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o_CXX=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o_CXX=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 $as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } hard_links=nottested if test no = "$lt_cv_prog_compiler_c_o_CXX" && test no != "$need_locks"; then # do not overwrite the value of need_locks provided by the user { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 $as_echo_n "checking if we can lock with hard links... " >&6; } hard_links=yes $RM conftest* ln conftest.a conftest.b 2>/dev/null && hard_links=no touch conftest.a ln conftest.a conftest.b 2>&5 || hard_links=no ln conftest.a conftest.b 2>/dev/null && hard_links=no { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 $as_echo "$hard_links" >&6; } if test no = "$hard_links"; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5 $as_echo "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;} need_locks=warn fi else need_locks=no fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 $as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' case $host_os in aix[4-9]*) # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to GNU nm, but means don't demangle to AIX nm. # Without the "-l" option, or with the "-B" option, AIX nm treats # weak defined symbols like other global defined symbols, whereas # GNU nm marks them as "W". # While the 'weak' keyword is ignored in the Export File, we need # it in the Import File for the 'aix-soname' feature, so we have # to replace the "-B" option with "-P" for AIX nm. if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' else export_symbols_cmds_CXX='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' fi ;; pw32*) export_symbols_cmds_CXX=$ltdll_cmds ;; cygwin* | mingw* | cegcc*) case $cc_basename in cl*) exclude_expsyms_CXX='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' ;; *) export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' exclude_expsyms_CXX='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' ;; esac ;; *) export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 $as_echo "$ld_shlibs_CXX" >&6; } test no = "$ld_shlibs_CXX" && can_build_shared=no with_gnu_ld_CXX=$with_gnu_ld # # Do we need to explicitly link libc? # case "x$archive_cmds_need_lc_CXX" in x|xyes) # Assume -lc should be added archive_cmds_need_lc_CXX=yes if test yes,yes = "$GCC,$enable_shared"; then case $archive_cmds_CXX in *'~'*) # FIXME: we may have to deal with multi-command sequences. ;; '$CC '*) # Test whether the compiler implicitly links with -lc since on some # systems, -lgcc has to come before -lc. If gcc already passes -lc # to ld, don't add -lc before -lgcc. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 $as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } if ${lt_cv_archive_cmds_need_lc_CXX+:} false; then : $as_echo_n "(cached) " >&6 else $RM conftest* echo "$lt_simple_compile_test_code" > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } 2>conftest.err; then soname=conftest lib=conftest libobjs=conftest.$ac_objext deplibs= wl=$lt_prog_compiler_wl_CXX pic_flag=$lt_prog_compiler_pic_CXX compiler_flags=-v linker_flags=-v verstring= output_objdir=. libname=conftest lt_save_allow_undefined_flag=$allow_undefined_flag_CXX allow_undefined_flag_CXX= if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 (eval $archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } then lt_cv_archive_cmds_need_lc_CXX=no else lt_cv_archive_cmds_need_lc_CXX=yes fi allow_undefined_flag_CXX=$lt_save_allow_undefined_flag else cat conftest.err 1>&5 fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc_CXX" >&5 $as_echo "$lt_cv_archive_cmds_need_lc_CXX" >&6; } archive_cmds_need_lc_CXX=$lt_cv_archive_cmds_need_lc_CXX ;; esac fi ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 $as_echo_n "checking dynamic linker characteristics... " >&6; } library_names_spec= libname_spec='lib$name' soname_spec= shrext_cmds=.so postinstall_cmds= postuninstall_cmds= finish_cmds= finish_eval= shlibpath_var= shlibpath_overrides_runpath=unknown version_type=none dynamic_linker="$host_os ld.so" sys_lib_dlsearch_path_spec="/lib /usr/lib" need_lib_prefix=unknown hardcode_into_libs=no # when you set need_version to no, make sure it does not cause -set_version # flags to be left without arguments need_version=unknown case $host_os in aix3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname.a' shlibpath_var=LIBPATH # AIX 3 has no versioning support, so we append a major version to the name. soname_spec='$libname$release$shared_ext$major' ;; aix[4-9]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no hardcode_into_libs=yes if test ia64 = "$host_cpu"; then # AIX 5 supports IA64 library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH else # With GCC up to 2.95.x, collect2 would create an import file # for dependence libraries. The import file would start with # the line '#! .'. This would cause the generated library to # depend on '.', always an invalid library. This was fixed in # development snapshots of GCC prior to 3.0. case $host_os in aix4 | aix4.[01] | aix4.[01].*) if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' echo ' yes ' echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then : else can_build_shared=no fi ;; esac # Using Import Files as archive members, it is possible to support # filename-based versioning of shared library archives on AIX. While # this would work for both with and without runtime linking, it will # prevent static linking of such archives. So we do filename-based # shared library versioning with .so extension only, which is used # when both runtime linking and shared linking is enabled. # Unfortunately, runtime linking may impact performance, so we do # not want this to be the default eventually. Also, we use the # versioned .so libs for executables only if there is the -brtl # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. # To allow for filename-based versioning support, we need to create # libNAME.so.V as an archive file, containing: # *) an Import File, referring to the versioned filename of the # archive as well as the shared archive member, telling the # bitwidth (32 or 64) of that shared object, and providing the # list of exported symbols of that shared object, eventually # decorated with the 'weak' keyword # *) the shared object with the F_LOADONLY flag set, to really avoid # it being seen by the linker. # At run time we better use the real file rather than another symlink, # but for link time we create the symlink libNAME.so -> libNAME.so.V case $with_aix_soname,$aix_use_runtimelinking in # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct # soname into executable. Probably we can add versioning support to # collect2, so additional links can be useful in future. aix,yes) # traditional libtool dynamic_linker='AIX unversionable lib.so' # If using run time linking (on AIX 4.2 or later) use lib.so # instead of lib.a to let people know that these are not # typical AIX shared libraries. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ;; aix,no) # traditional AIX only dynamic_linker='AIX lib.a(lib.so.V)' # We preserve .a as extension for shared libraries through AIX4.2 # and later when we are not doing run time linking. library_names_spec='$libname$release.a $libname.a' soname_spec='$libname$release$shared_ext$major' ;; svr4,*) # full svr4 only dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)" library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' # We do not specify a path in Import Files, so LIBPATH fires. shlibpath_overrides_runpath=yes ;; *,yes) # both, prefer svr4 dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)" library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' # unpreferred sharedlib libNAME.a needs extra handling postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' # We do not specify a path in Import Files, so LIBPATH fires. shlibpath_overrides_runpath=yes ;; *,no) # both, prefer aix dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)" library_names_spec='$libname$release.a $libname.a' soname_spec='$libname$release$shared_ext$major' # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' ;; esac shlibpath_var=LIBPATH fi ;; amigaos*) case $host_cpu in powerpc) # Since July 2007 AmigaOS4 officially supports .so libraries. # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ;; m68k) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; esac ;; beos*) library_names_spec='$libname$shared_ext' dynamic_linker="$host_os ld.so" shlibpath_var=LIBRARY_PATH ;; bsdi[45]*) version_type=linux # correct to gnu/linux during the next big refactor need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" # the default ld.so.conf also contains /usr/contrib/lib and # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow # libtool to hard-code these into programs ;; cygwin* | mingw* | pw32* | cegcc*) version_type=windows shrext_cmds=.dll need_version=no need_lib_prefix=no case $GCC,$cc_basename in yes,*) # gcc library_names_spec='$libname.dll.a' # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes case $host_os in cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo $libname | sed -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ;; mingw* | cegcc*) # MinGW DLLs use traditional 'lib' prefix soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ;; pw32*) # pw32 DLLs use 'pw' prefix rather than 'lib' library_names_spec='`echo $libname | sed -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ;; esac dynamic_linker='Win32 ld.exe' ;; *,cl*) # Native MSVC libname_spec='$name' soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' library_names_spec='$libname.dll.lib' case $build_os in mingw*) sys_lib_search_path_spec= lt_save_ifs=$IFS IFS=';' for lt_path in $LIB do IFS=$lt_save_ifs # Let DOS variable expansion print the short 8.3 style file name. lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" done IFS=$lt_save_ifs # Convert to MSYS style. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` ;; cygwin*) # Convert to unix form, then to dos form, then back to unix form # but this time dos style (no spaces!) so that the unix form looks # like /cygdrive/c/PROGRA~1:/cygdr... sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ;; *) sys_lib_search_path_spec=$LIB if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then # It is most probably a Windows format PATH. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` else sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi # FIXME: find the short name or the path components, as spaces are # common. (e.g. "Program Files" -> "PROGRA~1") ;; esac # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes dynamic_linker='Win32 link.exe' ;; *) # Assume MSVC wrapper library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib' dynamic_linker='Win32 ld.exe' ;; esac # FIXME: first we should search . and the directory the executable is in shlibpath_var=PATH ;; darwin* | rhapsody*) dynamic_linker="$host_os dyld" version_type=darwin need_lib_prefix=no need_version=no library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' soname_spec='$libname$release$major$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; dgux*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH ;; freebsd* | dragonfly*) # DragonFly does not have aout. When/if they implement a new # versioning mechanism, adjust this. if test -x /usr/bin/objformat; then objformat=`/usr/bin/objformat` else case $host_os in freebsd[23].*) objformat=aout ;; *) objformat=elf ;; esac fi version_type=freebsd-$objformat case $version_type in freebsd-elf*) library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' need_version=no need_lib_prefix=no ;; freebsd-*) library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' need_version=yes ;; esac shlibpath_var=LD_LIBRARY_PATH case $host_os in freebsd2.*) shlibpath_overrides_runpath=yes ;; freebsd3.[01]* | freebsdelf3.[01]*) shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; *) # from 4.6 on, and DragonFly shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; esac ;; haiku*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no dynamic_linker="$host_os runtime_loader" library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LIBRARY_PATH shlibpath_overrides_runpath=no sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' hardcode_into_libs=yes ;; hpux9* | hpux10* | hpux11*) # Give a soname corresponding to the major version so that dld.sl refuses to # link against other versions. version_type=sunos need_lib_prefix=no need_version=no case $host_cpu in ia64*) shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' if test 32 = "$HPUX_IA64_MODE"; then sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" sys_lib_dlsearch_path_spec=/usr/lib/hpux32 else sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" sys_lib_dlsearch_path_spec=/usr/lib/hpux64 fi ;; hppa*64*) shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' ;; esac # HP-UX runs *really* slowly unless shared libraries are mode 555, ... postinstall_cmds='chmod 555 $lib' # or fails outright, so override atomically: install_override_mode=555 ;; interix[3-9]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; irix5* | irix6* | nonstopux*) case $host_os in nonstopux*) version_type=nonstopux ;; *) if test yes = "$lt_cv_prog_gnu_ld"; then version_type=linux # correct to gnu/linux during the next big refactor else version_type=irix fi ;; esac need_lib_prefix=no need_version=no soname_spec='$libname$release$shared_ext$major' library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' case $host_os in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in # libtool.m4 will add one of these switches to LD *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= libmagic=32-bit;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 libmagic=N32;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 libmagic=64-bit;; *) libsuff= shlibsuff= libmagic=never-match;; esac ;; esac shlibpath_var=LD_LIBRARY${shlibsuff}_PATH shlibpath_overrides_runpath=no sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" hardcode_into_libs=yes ;; # No shared lib support for Linux oldld, aout, or coff. linux*oldld* | linux*aout* | linux*coff*) dynamic_linker=no ;; linux*android*) version_type=none # Android doesn't support versioned libraries. need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext' soname_spec='$libname$release$shared_ext' finish_cmds= shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes dynamic_linker='Android linker' # Don't embed -rpath directories since the linker doesn't support them. hardcode_libdir_flag_spec_CXX='-L$libdir' ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no # Some binutils ld are patched to set DT_RUNPATH if ${lt_cv_shlibpath_overrides_runpath+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_shlibpath_overrides_runpath=no save_LDFLAGS=$LDFLAGS save_libdir=$libdir eval "libdir=/foo; wl=\"$lt_prog_compiler_wl_CXX\"; \ LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec_CXX\"" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_link "$LINENO"; then : if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : lt_cv_shlibpath_overrides_runpath=yes fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS=$save_LDFLAGS libdir=$save_libdir fi shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes # Add ABI-specific directories to the system library path. sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" # Ideally, we could use ldconfig to report *all* directores which are # searched for libraries, however this is still not possible. Aside from not # being certain /sbin/ldconfig is available, command # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, # even though it is searched at run-time. Try to do the best guess by # appending ld.so.conf contents (and includes) to the search path. if test -f /etc/ld.so.conf; then lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, # most powerpc-linux boxes support dynamic linking these days and # people can always --disable-shared, the test was removed, and we # assume the GNU/Linux dynamic linker is in use. dynamic_linker='GNU/Linux ld.so' ;; netbsd*) version_type=sunos need_lib_prefix=no need_version=no if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' dynamic_linker='NetBSD ld.elf_so' fi shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; newsos6) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; *nto* | *qnx*) version_type=qnx need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='ldqnx.so' ;; openbsd* | bitrig*) version_type=sunos sys_lib_dlsearch_path_spec=/usr/lib need_lib_prefix=no if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then need_version=no else need_version=yes fi library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; os2*) libname_spec='$name' version_type=windows shrext_cmds=.dll need_version=no need_lib_prefix=no # OS/2 can only load a DLL with a base name of 8 characters or less. soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; v=$($ECHO $release$versuffix | tr -d .-); n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); $ECHO $n$v`$shared_ext' library_names_spec='${libname}_dll.$libext' dynamic_linker='OS/2 ld.exe' shlibpath_var=BEGINLIBPATH sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' ;; osf3* | osf4* | osf5*) version_type=osf need_lib_prefix=no need_version=no soname_spec='$libname$release$shared_ext$major' library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; rdos*) dynamic_linker=no ;; solaris*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes # ldd complains unless libraries are executable postinstall_cmds='chmod +x $lib' ;; sunos4*) version_type=sunos library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes if test yes = "$with_gnu_ld"; then need_lib_prefix=no fi need_version=yes ;; sysv4 | sysv4.3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH case $host_vendor in sni) shlibpath_overrides_runpath=no need_lib_prefix=no runpath_var=LD_RUN_PATH ;; siemens) need_lib_prefix=no ;; motorola) need_lib_prefix=no need_version=no shlibpath_overrides_runpath=no sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' ;; esac ;; sysv4*MP*) if test -d /usr/nec; then version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' soname_spec='$libname$shared_ext.$major' shlibpath_var=LD_LIBRARY_PATH fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) version_type=sco need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes if test yes = "$with_gnu_ld"; then sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' else sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' case $host_os in sco3.2v5*) sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" ;; esac fi sys_lib_dlsearch_path_spec='/usr/lib' ;; tpf*) # TPF is a cross-target only. Preferred cross-host = GNU/Linux. version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; uts4*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH ;; *) dynamic_linker=no ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 $as_echo "$dynamic_linker" >&6; } test no = "$dynamic_linker" && can_build_shared=no variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test yes = "$GCC"; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec fi if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec fi # remember unaugmented sys_lib_dlsearch_path content for libtool script decls... configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec # ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" # to be used as default LT_SYS_LIBRARY_PATH value in generated libtool configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 $as_echo_n "checking how to hardcode library paths into programs... " >&6; } hardcode_action_CXX= if test -n "$hardcode_libdir_flag_spec_CXX" || test -n "$runpath_var_CXX" || test yes = "$hardcode_automatic_CXX"; then # We can hardcode non-existent directories. if test no != "$hardcode_direct_CXX" && # If the only mechanism to avoid hardcoding is shlibpath_var, we # have to relink, otherwise we might link with an installed library # when we should be linking with a yet-to-be-installed one ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, CXX)" && test no != "$hardcode_minus_L_CXX"; then # Linking always hardcodes the temporary library directory. hardcode_action_CXX=relink else # We can link without hardcoding, and we can hardcode nonexisting dirs. hardcode_action_CXX=immediate fi else # We cannot hardcode anything, or else we can only hardcode existing # directories. hardcode_action_CXX=unsupported fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action_CXX" >&5 $as_echo "$hardcode_action_CXX" >&6; } if test relink = "$hardcode_action_CXX" || test yes = "$inherit_rpath_CXX"; then # Fast installation is not supported enable_fast_install=no elif test yes = "$shlibpath_overrides_runpath" || test no = "$enable_shared"; then # Fast installation is not necessary enable_fast_install=needless fi fi # test -n "$compiler" CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS LDCXX=$LD LD=$lt_save_LD GCC=$lt_save_GCC with_gnu_ld=$lt_save_with_gnu_ld lt_cv_path_LDCXX=$lt_cv_path_LD lt_cv_path_LD=$lt_save_path_LD lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld fi # test yes != "$_lt_caught_CXX_error" ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_config_commands="$ac_config_commands libtool" # Only expand once: cat > cxxtst.cxx < int main() { exit(__cplusplus < 201103L ? EXIT_FAILURE : EXIT_SUCCESS); } EOF cxx11=no for CXX11FLAG in '' -std=gnu++11 -std=gnu++0x -std=c++11 -std=c++0x; do $CXX -o cxxtst $CXX11FLAG $CXXFLAGS $CPPFLAGS $LDFLAGS cxxtst.cxx 2>/dev/null if test "$?" = '0' ; then if ./cxxtst ; then if test -z "$CXX11FLAG" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: No extra flag for C++ 11" >&5 $as_echo "$as_me: No extra flag for C++ 11" >&6;} else { $as_echo "$as_me:${as_lineno-$LINENO}: Extra flag for C++ 11: $CXX11FLAG" >&5 $as_echo "$as_me: Extra flag for C++ 11: $CXX11FLAG" >&6;} CXXFLAGS="$CXXFLAGS $CXX11FLAG" fi cxx11=yes break fi fi done rm -f cxxtst.cxx cxxtst if test x$cxx11 = xno ; then as_fn_error $? "ARC requires a C++ 11 capable compiler" "$LINENO" 5 fi # Extract the first word of "perl", so it can be a program name with args. set dummy perl; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_PERL+:} false; then : $as_echo_n "(cached) " >&6 else case $PERL in [\\/]* | ?:[\\/]*) ac_cv_path_PERL="$PERL" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_dummy=":" for as_dir in $as_dummy do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_PERL="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_PERL" && ac_cv_path_PERL="/usr/bin/perl" ;; esac fi PERL=$ac_cv_path_PERL if test -n "$PERL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PERL" >&5 $as_echo "$PERL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # EL-5 compatibility. $(mkdir_p) is now obsolete. test -n "$MKDIR_P" || MKDIR_P="$mkdir_p" # Use arc for "pkgdir" instead of nordugrid-arc (@PACKAGE@) pkgdatadir='${datadir}/arc' pkgincludedir='${includedir}/arc' pkglibdir='${libdir}/arc' extpkglibdir='${libdir}/arc/external' pkglibexecdir='${libexecdir}/arc' ARCCLIENT_LIBS='$(top_builddir)/src/hed/libs/compute/libarccompute.la' ARCCLIENT_CFLAGS='-I$(top_srcdir)/include' ARCCOMMON_LIBS='$(top_builddir)/src/hed/libs/common/libarccommon.la' ARCCOMMON_CFLAGS='-I$(top_srcdir)/include' ARCCREDENTIAL_LIBS='$(top_builddir)/src/hed/libs/credential/libarccredential.la' ARCCREDENTIAL_CFLAGS='-I$(top_srcdir)/include' ARCDATA_LIBS='$(top_builddir)/src/hed/libs/data/libarcdata.la' ARCDATA_CFLAGS='-I$(top_srcdir)/include' ARCJOB_LIBS='$(top_builddir)/src/hed/libs/job/libarcjob.la' ARCJOB_CFLAGS='-I$(top_srcdir)/include' ARCLOADER_LIBS='$(top_builddir)/src/hed/libs/loader/libarcloader.la' ARCLOADER_CFLAGS='-I$(top_srcdir)/include' ARCMESSAGE_LIBS='$(top_builddir)/src/hed/libs/message/libarcmessage.la' ARCMESSAGE_CFLAGS='-I$(top_srcdir)/include' ARCSECURITY_LIBS='$(top_builddir)/src/hed/libs/security/libarcsecurity.la' ARCSECURITY_CFLAGS='-I$(top_srcdir)/include' ARCOTOKENS_LIBS='$(top_builddir)/src/hed/libs/security/libarcotokens.la' ARCOTOKENS_CFLAGS='-I$(top_srcdir)/include' ARCINFOSYS_LIBS='$(top_builddir)/src/hed/libs/infosys/libarcinfosys.la' ARCINFOSYS_CFLAGS='-I$(top_srcdir)/include' ARCWSADDRESSING_LIBS='$(top_builddir)/src/hed/libs/ws-addressing/libarcwsaddressing.la' ARCWSADDRESSING_CFLAGS='-I$(top_srcdir)/include' ARCWSSECURITY_LIBS='$(top_builddir)/src/hed/libs/ws-security/libarcwssecurity.la' ARCWSSECURITY_CFLAGS='-I$(top_srcdir)/include' ARCXMLSEC_LIBS='$(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la' ARCXMLSEC_CFLAGS='-I$(top_srcdir)/include' get_relative_path() { olddir=`echo $1 | sed -e 's|/+|/|g' -e 's|^/||' -e 's|/*$|/|'` newdir=`echo $2 | sed -e 's|/+|/|g' -e 's|^/||' -e 's|/*$|/|'` O_IFS=$IFS IFS=/ relative="" common="" for i in $olddir; do if echo "$newdir" | grep -q "^$common$i/"; then common="$common$i/" else relative="../$relative" fi done IFS=$O_IFS echo $newdir | sed "s|^$common|$relative|" | sed 's|/*$||' } if test "X$prefix" = "XNONE"; then acl_final_prefix="$ac_default_prefix" else acl_final_prefix="$prefix" fi if test "X$exec_prefix" = "XNONE"; then acl_final_exec_prefix='${prefix}' else acl_final_exec_prefix="$exec_prefix" fi acl_save_prefix="$prefix" prefix="$acl_final_prefix" eval acl_final_exec_prefix=\"$acl_final_exec_prefix\" prefix="$acl_save_prefix" acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval instprefix="\"${exec_prefix}\"" eval arc_libdir="\"${libdir}\"" eval arc_bindir="\"${bindir}\"" eval arc_sbindir="\"${sbindir}\"" eval arc_pkglibdir="\"${libdir}/arc\"" eval arc_pkglibexecdir="\"${libexecdir}/arc\"" # It seems arc_datadir should be evaluated twice to be expanded fully. eval arc_datadir="\"${datadir}/arc\"" eval arc_datadir="\"${arc_datadir}\"" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" libsubdir=`get_relative_path "$instprefix" "$arc_libdir"` pkglibsubdir=`get_relative_path "$instprefix" "$arc_pkglibdir"` pkglibexecsubdir=`get_relative_path "$instprefix" "$arc_pkglibexecdir"` pkgdatasubdir=`get_relative_path "$instprefix" "$arc_datadir"` pkglibdir_rel_to_pkglibexecdir=`get_relative_path "$arc_pkglibexecdir" "$arc_pkglibdir"` sbindir_rel_to_pkglibexecdir=`get_relative_path "$arc_pkglibexecdir" "$arc_sbindir"` bindir_rel_to_pkglibexecdir=`get_relative_path "$arc_pkglibexecdir" "$arc_bindir"` pkgdatadir_rel_to_pkglibexecdir=`get_relative_path "$arc_pkglibexecdir" "$arc_datadir"` { $as_echo "$as_me:${as_lineno-$LINENO}: pkglib subdirectory is: $pkglibsubdir" >&5 $as_echo "$as_me: pkglib subdirectory is: $pkglibsubdir" >&6;} { $as_echo "$as_me:${as_lineno-$LINENO}: pkglibexec subdirectory is: $pkglibexecsubdir" >&5 $as_echo "$as_me: pkglibexec subdirectory is: $pkglibexecsubdir" >&6;} { $as_echo "$as_me:${as_lineno-$LINENO}: relative path of pkglib to pkglibexec is: $pkglibdir_rel_to_pkglibexecdir" >&5 $as_echo "$as_me: relative path of pkglib to pkglibexec is: $pkglibdir_rel_to_pkglibexecdir" >&6;} cat >>confdefs.h <<_ACEOF #define INSTPREFIX "${instprefix}" _ACEOF cat >>confdefs.h <<_ACEOF #define LIBSUBDIR "${libsubdir}" _ACEOF cat >>confdefs.h <<_ACEOF #define PKGLIBSUBDIR "${pkglibsubdir}" _ACEOF cat >>confdefs.h <<_ACEOF #define PKGLIBEXECSUBDIR "${pkglibexecsubdir}" _ACEOF cat >>confdefs.h <<_ACEOF #define PKGDATASUBDIR "${pkgdatasubdir}" _ACEOF # Check whether --with-systemd-units-location was given. if test "${with_systemd_units_location+set}" = set; then : withval=$with_systemd_units_location; unitsdir="$withval" else unitsdir= fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $unitsdir" >&5 $as_echo "$unitsdir" >&6; } if test "x$unitsdir" != "x"; then SYSTEMD_UNITS_ENABLED_TRUE= SYSTEMD_UNITS_ENABLED_FALSE='#' else SYSTEMD_UNITS_ENABLED_TRUE='#' SYSTEMD_UNITS_ENABLED_FALSE= fi # Check whether --with-sysv-scripts-location was given. if test "${with_sysv_scripts_location+set}" = set; then : withval=$with_sysv_scripts_location; initddirauto="no" initddir="$withval" else initddirauto="yes" initddir= case "${host}" in *linux* | *kfreebsd* | *gnu* ) for i in init.d rc.d/init.d rc.d; do if test -d "/etc/$i" -a ! -h "/etc/$i" ; then initddir="$sysconfdir/$i" break fi done if test -z "$initddir"; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: could not find a suitable location for the SYSV init scripts - not installing" >&5 $as_echo "$as_me: WARNING: could not find a suitable location for the SYSV init scripts - not installing" >&2;} fi ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $initddir" >&5 $as_echo "$initddir" >&6; } if ( test "x$initddirauto" == "xno" || test "x$unitsdir" = "x" ) && test "x$initddir" != "x"; then SYSV_SCRIPTS_ENABLED_TRUE= SYSV_SCRIPTS_ENABLED_FALSE='#' else SYSV_SCRIPTS_ENABLED_TRUE='#' SYSV_SCRIPTS_ENABLED_FALSE= fi # Check whether --with-cron-scripts-prefix was given. if test "${with_cron_scripts_prefix+set}" = set; then : withval=$with_cron_scripts_prefix; cronddir="$withval" else cronddir="$sysconfdir/cron.d" fi # gettext mkdir_p="$MKDIR_P" case $mkdir_p in [\\/$]* | ?:[\\/]*) ;; */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether NLS is requested" >&5 $as_echo_n "checking whether NLS is requested... " >&6; } # Check whether --enable-nls was given. if test "${enable_nls+set}" = set; then : enableval=$enable_nls; USE_NLS=$enableval else USE_NLS=yes fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_NLS" >&5 $as_echo "$USE_NLS" >&6; } GETTEXT_MACRO_VERSION=0.17 # Prepare PATH_SEPARATOR. # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then echo "#! /bin/sh" >conf$$.sh echo "exit 0" >>conf$$.sh chmod +x conf$$.sh if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then PATH_SEPARATOR=';' else PATH_SEPARATOR=: fi rm -f conf$$.sh fi # Find out how to test for executable files. Don't use a zero-byte file, # as systems may use methods other than mode bits to determine executability. cat >conf$$.file <<_ASEOF #! /bin/sh exit 0 _ASEOF chmod +x conf$$.file if test -x conf$$.file >/dev/null 2>&1; then ac_executable_p="test -x" else ac_executable_p="test -f" fi rm -f conf$$.file # Extract the first word of "msgfmt", so it can be a program name with args. set dummy msgfmt; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_MSGFMT+:} false; then : $as_echo_n "(cached) " >&6 else case "$MSGFMT" in [\\/]* | ?:[\\/]*) ac_cv_path_MSGFMT="$MSGFMT" # Let the user override the test with a path. ;; *) ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$ac_save_IFS" test -z "$ac_dir" && ac_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then echo "$as_me: trying $ac_dir/$ac_word..." >&5 if $ac_dir/$ac_word --statistics /dev/null >&5 2>&1 && (if $ac_dir/$ac_word --statistics /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi); then ac_cv_path_MSGFMT="$ac_dir/$ac_word$ac_exec_ext" break 2 fi fi done done IFS="$ac_save_IFS" test -z "$ac_cv_path_MSGFMT" && ac_cv_path_MSGFMT=":" ;; esac fi MSGFMT="$ac_cv_path_MSGFMT" if test "$MSGFMT" != ":"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MSGFMT" >&5 $as_echo "$MSGFMT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Extract the first word of "gmsgfmt", so it can be a program name with args. set dummy gmsgfmt; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_GMSGFMT+:} false; then : $as_echo_n "(cached) " >&6 else case $GMSGFMT in [\\/]* | ?:[\\/]*) ac_cv_path_GMSGFMT="$GMSGFMT" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_GMSGFMT="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_GMSGFMT" && ac_cv_path_GMSGFMT="$MSGFMT" ;; esac fi GMSGFMT=$ac_cv_path_GMSGFMT if test -n "$GMSGFMT"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $GMSGFMT" >&5 $as_echo "$GMSGFMT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi case `$MSGFMT --version | sed 1q | sed -e 's,^[^0-9]*,,'` in '' | 0.[0-9] | 0.[0-9].* | 0.1[0-4] | 0.1[0-4].*) MSGFMT_015=: ;; *) MSGFMT_015=$MSGFMT ;; esac case `$GMSGFMT --version | sed 1q | sed -e 's,^[^0-9]*,,'` in '' | 0.[0-9] | 0.[0-9].* | 0.1[0-4] | 0.1[0-4].*) GMSGFMT_015=: ;; *) GMSGFMT_015=$GMSGFMT ;; esac # Prepare PATH_SEPARATOR. # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then echo "#! /bin/sh" >conf$$.sh echo "exit 0" >>conf$$.sh chmod +x conf$$.sh if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then PATH_SEPARATOR=';' else PATH_SEPARATOR=: fi rm -f conf$$.sh fi # Find out how to test for executable files. Don't use a zero-byte file, # as systems may use methods other than mode bits to determine executability. cat >conf$$.file <<_ASEOF #! /bin/sh exit 0 _ASEOF chmod +x conf$$.file if test -x conf$$.file >/dev/null 2>&1; then ac_executable_p="test -x" else ac_executable_p="test -f" fi rm -f conf$$.file # Extract the first word of "xgettext", so it can be a program name with args. set dummy xgettext; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_XGETTEXT+:} false; then : $as_echo_n "(cached) " >&6 else case "$XGETTEXT" in [\\/]* | ?:[\\/]*) ac_cv_path_XGETTEXT="$XGETTEXT" # Let the user override the test with a path. ;; *) ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$ac_save_IFS" test -z "$ac_dir" && ac_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then echo "$as_me: trying $ac_dir/$ac_word..." >&5 if $ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null >&5 2>&1 && (if $ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi); then ac_cv_path_XGETTEXT="$ac_dir/$ac_word$ac_exec_ext" break 2 fi fi done done IFS="$ac_save_IFS" test -z "$ac_cv_path_XGETTEXT" && ac_cv_path_XGETTEXT=":" ;; esac fi XGETTEXT="$ac_cv_path_XGETTEXT" if test "$XGETTEXT" != ":"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $XGETTEXT" >&5 $as_echo "$XGETTEXT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi rm -f messages.po case `$XGETTEXT --version | sed 1q | sed -e 's,^[^0-9]*,,'` in '' | 0.[0-9] | 0.[0-9].* | 0.1[0-4] | 0.1[0-4].*) XGETTEXT_015=: ;; *) XGETTEXT_015=$XGETTEXT ;; esac # Prepare PATH_SEPARATOR. # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then echo "#! /bin/sh" >conf$$.sh echo "exit 0" >>conf$$.sh chmod +x conf$$.sh if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then PATH_SEPARATOR=';' else PATH_SEPARATOR=: fi rm -f conf$$.sh fi # Find out how to test for executable files. Don't use a zero-byte file, # as systems may use methods other than mode bits to determine executability. cat >conf$$.file <<_ASEOF #! /bin/sh exit 0 _ASEOF chmod +x conf$$.file if test -x conf$$.file >/dev/null 2>&1; then ac_executable_p="test -x" else ac_executable_p="test -f" fi rm -f conf$$.file # Extract the first word of "msgmerge", so it can be a program name with args. set dummy msgmerge; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_MSGMERGE+:} false; then : $as_echo_n "(cached) " >&6 else case "$MSGMERGE" in [\\/]* | ?:[\\/]*) ac_cv_path_MSGMERGE="$MSGMERGE" # Let the user override the test with a path. ;; *) ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$ac_save_IFS" test -z "$ac_dir" && ac_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then echo "$as_me: trying $ac_dir/$ac_word..." >&5 if $ac_dir/$ac_word --update -q /dev/null /dev/null >&5 2>&1; then ac_cv_path_MSGMERGE="$ac_dir/$ac_word$ac_exec_ext" break 2 fi fi done done IFS="$ac_save_IFS" test -z "$ac_cv_path_MSGMERGE" && ac_cv_path_MSGMERGE=":" ;; esac fi MSGMERGE="$ac_cv_path_MSGMERGE" if test "$MSGMERGE" != ":"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MSGMERGE" >&5 $as_echo "$MSGMERGE" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$localedir" || localedir='${datadir}/locale' test -n "${XGETTEXT_EXTRA_OPTIONS+set}" || XGETTEXT_EXTRA_OPTIONS= ac_config_commands="$ac_config_commands po-directories" # Check whether --with-gnu-ld was given. if test "${with_gnu_ld+set}" = set; then : withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes else with_gnu_ld=no fi # Prepare PATH_SEPARATOR. # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then echo "#! /bin/sh" >conf$$.sh echo "exit 0" >>conf$$.sh chmod +x conf$$.sh if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then PATH_SEPARATOR=';' else PATH_SEPARATOR=: fi rm -f conf$$.sh fi ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by GCC" >&5 $as_echo_n "checking for ld used by GCC... " >&6; } case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [\\/]* | [A-Za-z]:[\\/]*) re_direlt='/[^/][^/]*/\.\./' # Canonicalize the path of ld ac_prog=`echo $ac_prog| sed 's%\\\\%/%g'` while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do ac_prog=`echo $ac_prog| sed "s%$re_direlt%/%"` done test -z "$LD" && LD="$ac_prog" ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test "$with_gnu_ld" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 $as_echo_n "checking for GNU ld... " >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 $as_echo_n "checking for non-GNU ld... " >&6; } fi if ${acl_cv_path_LD+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$LD"; then IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR-:}" for ac_dir in $PATH; do test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then acl_cv_path_LD="$ac_dir/$ac_prog" # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some GNU ld's only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$acl_cv_path_LD" -v 2>&1 < /dev/null` in *GNU* | *'with BFD'*) test "$with_gnu_ld" != no && break ;; *) test "$with_gnu_ld" != yes && break ;; esac fi done IFS="$ac_save_ifs" else acl_cv_path_LD="$LD" # Let the user override the test with a path. fi fi LD="$acl_cv_path_LD" if test -n "$LD"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LD" >&5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 $as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } if ${acl_cv_prog_gnu_ld+:} false; then : $as_echo_n "(cached) " >&6 else # I'd rather use --version here, but apparently some GNU ld's only accept -v. case `$LD -v 2>&1 &5 $as_echo "$acl_cv_prog_gnu_ld" >&6; } with_gnu_ld=$acl_cv_prog_gnu_ld { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shared library run path origin" >&5 $as_echo_n "checking for shared library run path origin... " >&6; } if ${acl_cv_rpath+:} false; then : $as_echo_n "(cached) " >&6 else CC="$CC" GCC="$GCC" LDFLAGS="$LDFLAGS" LD="$LD" with_gnu_ld="$with_gnu_ld" \ ${CONFIG_SHELL-/bin/sh} "$ac_aux_dir/config.rpath" "$host" > conftest.sh . ./conftest.sh rm -f ./conftest.sh acl_cv_rpath=done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $acl_cv_rpath" >&5 $as_echo "$acl_cv_rpath" >&6; } wl="$acl_cv_wl" acl_libext="$acl_cv_libext" acl_shlibext="$acl_cv_shlibext" acl_libname_spec="$acl_cv_libname_spec" acl_library_names_spec="$acl_cv_library_names_spec" acl_hardcode_libdir_flag_spec="$acl_cv_hardcode_libdir_flag_spec" acl_hardcode_libdir_separator="$acl_cv_hardcode_libdir_separator" acl_hardcode_direct="$acl_cv_hardcode_direct" acl_hardcode_minus_L="$acl_cv_hardcode_minus_L" # Check whether --enable-rpath was given. if test "${enable_rpath+set}" = set; then : enableval=$enable_rpath; : else enable_rpath=yes fi acl_libdirstem=lib searchpath=`(LC_ALL=C $CC -print-search-dirs) 2>/dev/null | sed -n -e 's,^libraries: ,,p' | sed -e 's,^=,,'` if test -n "$searchpath"; then acl_save_IFS="${IFS= }"; IFS=":" for searchdir in $searchpath; do if test -d "$searchdir"; then case "$searchdir" in */lib64/ | */lib64 ) acl_libdirstem=lib64 ;; *) searchdir=`cd "$searchdir" && pwd` case "$searchdir" in */lib64 ) acl_libdirstem=lib64 ;; esac ;; esac fi done IFS="$acl_save_IFS" fi use_additional=yes acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" # Check whether --with-libiconv-prefix was given. if test "${with_libiconv_prefix+set}" = set; then : withval=$with_libiconv_prefix; if test "X$withval" = "Xno"; then use_additional=no else if test "X$withval" = "X"; then acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" else additional_includedir="$withval/include" additional_libdir="$withval/$acl_libdirstem" fi fi fi LIBICONV= LTLIBICONV= INCICONV= LIBICONV_PREFIX= rpathdirs= ltrpathdirs= names_already_handled= names_next_round='iconv ' while test -n "$names_next_round"; do names_this_round="$names_next_round" names_next_round= for name in $names_this_round; do already_handled= for n in $names_already_handled; do if test "$n" = "$name"; then already_handled=yes break fi done if test -z "$already_handled"; then names_already_handled="$names_already_handled $name" uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./-|ABCDEFGHIJKLMNOPQRSTUVWXYZ___|'` eval value=\"\$HAVE_LIB$uppername\" if test -n "$value"; then if test "$value" = yes; then eval value=\"\$LIB$uppername\" test -z "$value" || LIBICONV="${LIBICONV}${LIBICONV:+ }$value" eval value=\"\$LTLIB$uppername\" test -z "$value" || LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }$value" else : fi else found_dir= found_la= found_so= found_a= eval libname=\"$acl_libname_spec\" # typically: libname=lib$name if test -n "$acl_shlibext"; then shrext=".$acl_shlibext" # typically: shrext=.so else shrext= fi if test $use_additional = yes; then dir="$additional_libdir" if test -n "$acl_shlibext"; then if test -f "$dir/$libname$shrext"; then found_dir="$dir" found_so="$dir/$libname$shrext" else if test "$acl_library_names_spec" = '$libname$shrext$versuffix'; then ver=`(cd "$dir" && \ for f in "$libname$shrext".*; do echo "$f"; done \ | sed -e "s,^$libname$shrext\\\\.,," \ | sort -t '.' -n -r -k1,1 -k2,2 -k3,3 -k4,4 -k5,5 \ | sed 1q ) 2>/dev/null` if test -n "$ver" && test -f "$dir/$libname$shrext.$ver"; then found_dir="$dir" found_so="$dir/$libname$shrext.$ver" fi else eval library_names=\"$acl_library_names_spec\" for f in $library_names; do if test -f "$dir/$f"; then found_dir="$dir" found_so="$dir/$f" break fi done fi fi fi if test "X$found_dir" = "X"; then if test -f "$dir/$libname.$acl_libext"; then found_dir="$dir" found_a="$dir/$libname.$acl_libext" fi fi if test "X$found_dir" != "X"; then if test -f "$dir/$libname.la"; then found_la="$dir/$libname.la" fi fi fi if test "X$found_dir" = "X"; then for x in $LDFLAGS $LTLIBICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" case "$x" in -L*) dir=`echo "X$x" | sed -e 's/^X-L//'` if test -n "$acl_shlibext"; then if test -f "$dir/$libname$shrext"; then found_dir="$dir" found_so="$dir/$libname$shrext" else if test "$acl_library_names_spec" = '$libname$shrext$versuffix'; then ver=`(cd "$dir" && \ for f in "$libname$shrext".*; do echo "$f"; done \ | sed -e "s,^$libname$shrext\\\\.,," \ | sort -t '.' -n -r -k1,1 -k2,2 -k3,3 -k4,4 -k5,5 \ | sed 1q ) 2>/dev/null` if test -n "$ver" && test -f "$dir/$libname$shrext.$ver"; then found_dir="$dir" found_so="$dir/$libname$shrext.$ver" fi else eval library_names=\"$acl_library_names_spec\" for f in $library_names; do if test -f "$dir/$f"; then found_dir="$dir" found_so="$dir/$f" break fi done fi fi fi if test "X$found_dir" = "X"; then if test -f "$dir/$libname.$acl_libext"; then found_dir="$dir" found_a="$dir/$libname.$acl_libext" fi fi if test "X$found_dir" != "X"; then if test -f "$dir/$libname.la"; then found_la="$dir/$libname.la" fi fi ;; esac if test "X$found_dir" != "X"; then break fi done fi if test "X$found_dir" != "X"; then LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-L$found_dir -l$name" if test "X$found_so" != "X"; then if test "$enable_rpath" = no || test "X$found_dir" = "X/usr/$acl_libdirstem"; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" else haveit= for x in $ltrpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $found_dir" fi if test "$acl_hardcode_direct" = yes; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" else if test -n "$acl_hardcode_libdir_flag_spec" && test "$acl_hardcode_minus_L" = no; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" haveit= for x in $rpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $found_dir" fi else haveit= for x in $LDFLAGS $LIBICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then LIBICONV="${LIBICONV}${LIBICONV:+ }-L$found_dir" fi if test "$acl_hardcode_minus_L" != no; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" else LIBICONV="${LIBICONV}${LIBICONV:+ }-l$name" fi fi fi fi else if test "X$found_a" != "X"; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_a" else LIBICONV="${LIBICONV}${LIBICONV:+ }-L$found_dir -l$name" fi fi additional_includedir= case "$found_dir" in */$acl_libdirstem | */$acl_libdirstem/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem/"'*$,,'` LIBICONV_PREFIX="$basedir" additional_includedir="$basedir/include" ;; esac if test "X$additional_includedir" != "X"; then if test "X$additional_includedir" != "X/usr/include"; then haveit= if test "X$additional_includedir" = "X/usr/local/include"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then for x in $CPPFLAGS $INCICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-I$additional_includedir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_includedir"; then INCICONV="${INCICONV}${INCICONV:+ }-I$additional_includedir" fi fi fi fi fi if test -n "$found_la"; then save_libdir="$libdir" case "$found_la" in */* | *\\*) . "$found_la" ;; *) . "./$found_la" ;; esac libdir="$save_libdir" for dep in $dependency_libs; do case "$dep" in -L*) additional_libdir=`echo "X$dep" | sed -e 's/^X-L//'` if test "X$additional_libdir" != "X/usr/$acl_libdirstem"; then haveit= if test "X$additional_libdir" = "X/usr/local/$acl_libdirstem"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then haveit= for x in $LDFLAGS $LIBICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then LIBICONV="${LIBICONV}${LIBICONV:+ }-L$additional_libdir" fi fi haveit= for x in $LDFLAGS $LTLIBICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-L$additional_libdir" fi fi fi fi ;; -R*) dir=`echo "X$dep" | sed -e 's/^X-R//'` if test "$enable_rpath" != no; then haveit= for x in $rpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $dir" fi haveit= for x in $ltrpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $dir" fi fi ;; -l*) names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'` ;; *.la) names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` ;; *) LIBICONV="${LIBICONV}${LIBICONV:+ }$dep" LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }$dep" ;; esac done fi else LIBICONV="${LIBICONV}${LIBICONV:+ }-l$name" LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-l$name" fi fi fi done done if test "X$rpathdirs" != "X"; then if test -n "$acl_hardcode_libdir_separator"; then alldirs= for found_dir in $rpathdirs; do alldirs="${alldirs}${alldirs:+$acl_hardcode_libdir_separator}$found_dir" done acl_save_libdir="$libdir" libdir="$alldirs" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIBICONV="${LIBICONV}${LIBICONV:+ }$flag" else for found_dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$found_dir" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIBICONV="${LIBICONV}${LIBICONV:+ }$flag" done fi fi if test "X$ltrpathdirs" != "X"; then for found_dir in $ltrpathdirs; do LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-R$found_dir" done fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CFPreferencesCopyAppValue" >&5 $as_echo_n "checking for CFPreferencesCopyAppValue... " >&6; } if ${gt_cv_func_CFPreferencesCopyAppValue+:} false; then : $as_echo_n "(cached) " >&6 else gt_save_LIBS="$LIBS" LIBS="$LIBS -Wl,-framework -Wl,CoreFoundation" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { CFPreferencesCopyAppValue(NULL, NULL) ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : gt_cv_func_CFPreferencesCopyAppValue=yes else gt_cv_func_CFPreferencesCopyAppValue=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS="$gt_save_LIBS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_CFPreferencesCopyAppValue" >&5 $as_echo "$gt_cv_func_CFPreferencesCopyAppValue" >&6; } if test $gt_cv_func_CFPreferencesCopyAppValue = yes; then $as_echo "#define HAVE_CFPREFERENCESCOPYAPPVALUE 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CFLocaleCopyCurrent" >&5 $as_echo_n "checking for CFLocaleCopyCurrent... " >&6; } if ${gt_cv_func_CFLocaleCopyCurrent+:} false; then : $as_echo_n "(cached) " >&6 else gt_save_LIBS="$LIBS" LIBS="$LIBS -Wl,-framework -Wl,CoreFoundation" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { CFLocaleCopyCurrent(); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : gt_cv_func_CFLocaleCopyCurrent=yes else gt_cv_func_CFLocaleCopyCurrent=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS="$gt_save_LIBS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_cv_func_CFLocaleCopyCurrent" >&5 $as_echo "$gt_cv_func_CFLocaleCopyCurrent" >&6; } if test $gt_cv_func_CFLocaleCopyCurrent = yes; then $as_echo "#define HAVE_CFLOCALECOPYCURRENT 1" >>confdefs.h fi INTL_MACOSX_LIBS= if test $gt_cv_func_CFPreferencesCopyAppValue = yes || test $gt_cv_func_CFLocaleCopyCurrent = yes; then INTL_MACOSX_LIBS="-Wl,-framework -Wl,CoreFoundation" fi LIBINTL= LTLIBINTL= POSUB= case " $gt_needs " in *" need-formatstring-macros "*) gt_api_version=3 ;; *" need-ngettext "*) gt_api_version=2 ;; *) gt_api_version=1 ;; esac gt_func_gnugettext_libc="gt_cv_func_gnugettext${gt_api_version}_libc" gt_func_gnugettext_libintl="gt_cv_func_gnugettext${gt_api_version}_libintl" if test "$USE_NLS" = "yes"; then gt_use_preinstalled_gnugettext=no if test $gt_api_version -ge 3; then gt_revision_test_code=' #ifndef __GNU_GETTEXT_SUPPORTED_REVISION #define __GNU_GETTEXT_SUPPORTED_REVISION(major) ((major) == 0 ? 0 : -1) #endif typedef int array [2 * (__GNU_GETTEXT_SUPPORTED_REVISION(0) >= 1) - 1]; ' else gt_revision_test_code= fi if test $gt_api_version -ge 2; then gt_expression_test_code=' + * ngettext ("", "", 0)' else gt_expression_test_code= fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU gettext in libc" >&5 $as_echo_n "checking for GNU gettext in libc... " >&6; } if eval \${$gt_func_gnugettext_libc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include $gt_revision_test_code extern int _nl_msg_cat_cntr; extern int *_nl_domain_bindings; int main () { bindtextdomain ("", ""); return * gettext ("")$gt_expression_test_code + _nl_msg_cat_cntr + *_nl_domain_bindings ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$gt_func_gnugettext_libc=yes" else eval "$gt_func_gnugettext_libc=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$gt_func_gnugettext_libc { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if { eval "gt_val=\$$gt_func_gnugettext_libc"; test "$gt_val" != "yes"; }; then am_save_CPPFLAGS="$CPPFLAGS" for element in $INCICONV; do haveit= for x in $CPPFLAGS; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X$element"; then haveit=yes break fi done if test -z "$haveit"; then CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }$element" fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for iconv" >&5 $as_echo_n "checking for iconv... " >&6; } if ${am_cv_func_iconv+:} false; then : $as_echo_n "(cached) " >&6 else am_cv_func_iconv="no, consider installing GNU libiconv" am_cv_lib_iconv=no cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { iconv_t cd = iconv_open("",""); iconv(cd,NULL,NULL,NULL,NULL); iconv_close(cd); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : am_cv_func_iconv=yes fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test "$am_cv_func_iconv" != yes; then am_save_LIBS="$LIBS" LIBS="$LIBS $LIBICONV" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { iconv_t cd = iconv_open("",""); iconv(cd,NULL,NULL,NULL,NULL); iconv_close(cd); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : am_cv_lib_iconv=yes am_cv_func_iconv=yes fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS="$am_save_LIBS" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_func_iconv" >&5 $as_echo "$am_cv_func_iconv" >&6; } if test "$am_cv_func_iconv" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working iconv" >&5 $as_echo_n "checking for working iconv... " >&6; } if ${am_cv_func_iconv_works+:} false; then : $as_echo_n "(cached) " >&6 else am_save_LIBS="$LIBS" if test $am_cv_lib_iconv = yes; then LIBS="$LIBS $LIBICONV" fi if test "$cross_compiling" = yes; then : case "$host_os" in aix* | hpux*) am_cv_func_iconv_works="guessing no" ;; *) am_cv_func_iconv_works="guessing yes" ;; esac else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { /* Test against AIX 5.1 bug: Failures are not distinguishable from successful returns. */ { iconv_t cd_utf8_to_88591 = iconv_open ("ISO8859-1", "UTF-8"); if (cd_utf8_to_88591 != (iconv_t)(-1)) { static const char input[] = "\342\202\254"; /* EURO SIGN */ char buf[10]; const char *inptr = input; size_t inbytesleft = strlen (input); char *outptr = buf; size_t outbytesleft = sizeof (buf); size_t res = iconv (cd_utf8_to_88591, (char **) &inptr, &inbytesleft, &outptr, &outbytesleft); if (res == 0) return 1; } } #if 0 /* This bug could be worked around by the caller. */ /* Test against HP-UX 11.11 bug: Positive return value instead of 0. */ { iconv_t cd_88591_to_utf8 = iconv_open ("utf8", "iso88591"); if (cd_88591_to_utf8 != (iconv_t)(-1)) { static const char input[] = "\304rger mit b\366sen B\374bchen ohne Augenma\337"; char buf[50]; const char *inptr = input; size_t inbytesleft = strlen (input); char *outptr = buf; size_t outbytesleft = sizeof (buf); size_t res = iconv (cd_88591_to_utf8, (char **) &inptr, &inbytesleft, &outptr, &outbytesleft); if ((int)res > 0) return 1; } } #endif /* Test against HP-UX 11.11 bug: No converter from EUC-JP to UTF-8 is provided. */ if (/* Try standardized names. */ iconv_open ("UTF-8", "EUC-JP") == (iconv_t)(-1) /* Try IRIX, OSF/1 names. */ && iconv_open ("UTF-8", "eucJP") == (iconv_t)(-1) /* Try AIX names. */ && iconv_open ("UTF-8", "IBM-eucJP") == (iconv_t)(-1) /* Try HP-UX names. */ && iconv_open ("utf8", "eucJP") == (iconv_t)(-1)) return 1; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : am_cv_func_iconv_works=yes else am_cv_func_iconv_works=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi LIBS="$am_save_LIBS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_func_iconv_works" >&5 $as_echo "$am_cv_func_iconv_works" >&6; } case "$am_cv_func_iconv_works" in *no) am_func_iconv=no am_cv_lib_iconv=no ;; *) am_func_iconv=yes ;; esac else am_func_iconv=no am_cv_lib_iconv=no fi if test "$am_func_iconv" = yes; then $as_echo "#define HAVE_ICONV 1" >>confdefs.h fi if test "$am_cv_lib_iconv" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libiconv" >&5 $as_echo_n "checking how to link with libiconv... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBICONV" >&5 $as_echo "$LIBICONV" >&6; } else CPPFLAGS="$am_save_CPPFLAGS" LIBICONV= LTLIBICONV= fi use_additional=yes acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" # Check whether --with-libintl-prefix was given. if test "${with_libintl_prefix+set}" = set; then : withval=$with_libintl_prefix; if test "X$withval" = "Xno"; then use_additional=no else if test "X$withval" = "X"; then acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" else additional_includedir="$withval/include" additional_libdir="$withval/$acl_libdirstem" fi fi fi LIBINTL= LTLIBINTL= INCINTL= LIBINTL_PREFIX= rpathdirs= ltrpathdirs= names_already_handled= names_next_round='intl ' while test -n "$names_next_round"; do names_this_round="$names_next_round" names_next_round= for name in $names_this_round; do already_handled= for n in $names_already_handled; do if test "$n" = "$name"; then already_handled=yes break fi done if test -z "$already_handled"; then names_already_handled="$names_already_handled $name" uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./-|ABCDEFGHIJKLMNOPQRSTUVWXYZ___|'` eval value=\"\$HAVE_LIB$uppername\" if test -n "$value"; then if test "$value" = yes; then eval value=\"\$LIB$uppername\" test -z "$value" || LIBINTL="${LIBINTL}${LIBINTL:+ }$value" eval value=\"\$LTLIB$uppername\" test -z "$value" || LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }$value" else : fi else found_dir= found_la= found_so= found_a= eval libname=\"$acl_libname_spec\" # typically: libname=lib$name if test -n "$acl_shlibext"; then shrext=".$acl_shlibext" # typically: shrext=.so else shrext= fi if test $use_additional = yes; then dir="$additional_libdir" if test -n "$acl_shlibext"; then if test -f "$dir/$libname$shrext"; then found_dir="$dir" found_so="$dir/$libname$shrext" else if test "$acl_library_names_spec" = '$libname$shrext$versuffix'; then ver=`(cd "$dir" && \ for f in "$libname$shrext".*; do echo "$f"; done \ | sed -e "s,^$libname$shrext\\\\.,," \ | sort -t '.' -n -r -k1,1 -k2,2 -k3,3 -k4,4 -k5,5 \ | sed 1q ) 2>/dev/null` if test -n "$ver" && test -f "$dir/$libname$shrext.$ver"; then found_dir="$dir" found_so="$dir/$libname$shrext.$ver" fi else eval library_names=\"$acl_library_names_spec\" for f in $library_names; do if test -f "$dir/$f"; then found_dir="$dir" found_so="$dir/$f" break fi done fi fi fi if test "X$found_dir" = "X"; then if test -f "$dir/$libname.$acl_libext"; then found_dir="$dir" found_a="$dir/$libname.$acl_libext" fi fi if test "X$found_dir" != "X"; then if test -f "$dir/$libname.la"; then found_la="$dir/$libname.la" fi fi fi if test "X$found_dir" = "X"; then for x in $LDFLAGS $LTLIBINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" case "$x" in -L*) dir=`echo "X$x" | sed -e 's/^X-L//'` if test -n "$acl_shlibext"; then if test -f "$dir/$libname$shrext"; then found_dir="$dir" found_so="$dir/$libname$shrext" else if test "$acl_library_names_spec" = '$libname$shrext$versuffix'; then ver=`(cd "$dir" && \ for f in "$libname$shrext".*; do echo "$f"; done \ | sed -e "s,^$libname$shrext\\\\.,," \ | sort -t '.' -n -r -k1,1 -k2,2 -k3,3 -k4,4 -k5,5 \ | sed 1q ) 2>/dev/null` if test -n "$ver" && test -f "$dir/$libname$shrext.$ver"; then found_dir="$dir" found_so="$dir/$libname$shrext.$ver" fi else eval library_names=\"$acl_library_names_spec\" for f in $library_names; do if test -f "$dir/$f"; then found_dir="$dir" found_so="$dir/$f" break fi done fi fi fi if test "X$found_dir" = "X"; then if test -f "$dir/$libname.$acl_libext"; then found_dir="$dir" found_a="$dir/$libname.$acl_libext" fi fi if test "X$found_dir" != "X"; then if test -f "$dir/$libname.la"; then found_la="$dir/$libname.la" fi fi ;; esac if test "X$found_dir" != "X"; then break fi done fi if test "X$found_dir" != "X"; then LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-L$found_dir -l$name" if test "X$found_so" != "X"; then if test "$enable_rpath" = no || test "X$found_dir" = "X/usr/$acl_libdirstem"; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" else haveit= for x in $ltrpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $found_dir" fi if test "$acl_hardcode_direct" = yes; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" else if test -n "$acl_hardcode_libdir_flag_spec" && test "$acl_hardcode_minus_L" = no; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" haveit= for x in $rpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $found_dir" fi else haveit= for x in $LDFLAGS $LIBINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then LIBINTL="${LIBINTL}${LIBINTL:+ }-L$found_dir" fi if test "$acl_hardcode_minus_L" != no; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_so" else LIBINTL="${LIBINTL}${LIBINTL:+ }-l$name" fi fi fi fi else if test "X$found_a" != "X"; then LIBINTL="${LIBINTL}${LIBINTL:+ }$found_a" else LIBINTL="${LIBINTL}${LIBINTL:+ }-L$found_dir -l$name" fi fi additional_includedir= case "$found_dir" in */$acl_libdirstem | */$acl_libdirstem/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem/"'*$,,'` LIBINTL_PREFIX="$basedir" additional_includedir="$basedir/include" ;; esac if test "X$additional_includedir" != "X"; then if test "X$additional_includedir" != "X/usr/include"; then haveit= if test "X$additional_includedir" = "X/usr/local/include"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then for x in $CPPFLAGS $INCINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-I$additional_includedir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_includedir"; then INCINTL="${INCINTL}${INCINTL:+ }-I$additional_includedir" fi fi fi fi fi if test -n "$found_la"; then save_libdir="$libdir" case "$found_la" in */* | *\\*) . "$found_la" ;; *) . "./$found_la" ;; esac libdir="$save_libdir" for dep in $dependency_libs; do case "$dep" in -L*) additional_libdir=`echo "X$dep" | sed -e 's/^X-L//'` if test "X$additional_libdir" != "X/usr/$acl_libdirstem"; then haveit= if test "X$additional_libdir" = "X/usr/local/$acl_libdirstem"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then haveit= for x in $LDFLAGS $LIBINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then LIBINTL="${LIBINTL}${LIBINTL:+ }-L$additional_libdir" fi fi haveit= for x in $LDFLAGS $LTLIBINTL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-L$additional_libdir" fi fi fi fi ;; -R*) dir=`echo "X$dep" | sed -e 's/^X-R//'` if test "$enable_rpath" != no; then haveit= for x in $rpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $dir" fi haveit= for x in $ltrpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $dir" fi fi ;; -l*) names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'` ;; *.la) names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` ;; *) LIBINTL="${LIBINTL}${LIBINTL:+ }$dep" LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }$dep" ;; esac done fi else LIBINTL="${LIBINTL}${LIBINTL:+ }-l$name" LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-l$name" fi fi fi done done if test "X$rpathdirs" != "X"; then if test -n "$acl_hardcode_libdir_separator"; then alldirs= for found_dir in $rpathdirs; do alldirs="${alldirs}${alldirs:+$acl_hardcode_libdir_separator}$found_dir" done acl_save_libdir="$libdir" libdir="$alldirs" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIBINTL="${LIBINTL}${LIBINTL:+ }$flag" else for found_dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$found_dir" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIBINTL="${LIBINTL}${LIBINTL:+ }$flag" done fi fi if test "X$ltrpathdirs" != "X"; then for found_dir in $ltrpathdirs; do LTLIBINTL="${LTLIBINTL}${LTLIBINTL:+ }-R$found_dir" done fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU gettext in libintl" >&5 $as_echo_n "checking for GNU gettext in libintl... " >&6; } if eval \${$gt_func_gnugettext_libintl+:} false; then : $as_echo_n "(cached) " >&6 else gt_save_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS $INCINTL" gt_save_LIBS="$LIBS" LIBS="$LIBS $LIBINTL" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include $gt_revision_test_code extern int _nl_msg_cat_cntr; extern #ifdef __cplusplus "C" #endif const char *_nl_expand_alias (const char *); int main () { bindtextdomain ("", ""); return * gettext ("")$gt_expression_test_code + _nl_msg_cat_cntr + *_nl_expand_alias ("") ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$gt_func_gnugettext_libintl=yes" else eval "$gt_func_gnugettext_libintl=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" != yes; } && test -n "$LIBICONV"; then LIBS="$LIBS $LIBICONV" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include $gt_revision_test_code extern int _nl_msg_cat_cntr; extern #ifdef __cplusplus "C" #endif const char *_nl_expand_alias (const char *); int main () { bindtextdomain ("", ""); return * gettext ("")$gt_expression_test_code + _nl_msg_cat_cntr + *_nl_expand_alias ("") ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : LIBINTL="$LIBINTL $LIBICONV" LTLIBINTL="$LTLIBINTL $LTLIBICONV" eval "$gt_func_gnugettext_libintl=yes" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi CPPFLAGS="$gt_save_CPPFLAGS" LIBS="$gt_save_LIBS" fi eval ac_res=\$$gt_func_gnugettext_libintl { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi if { eval "gt_val=\$$gt_func_gnugettext_libc"; test "$gt_val" = "yes"; } \ || { { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; } \ && test "$PACKAGE" != gettext-runtime \ && test "$PACKAGE" != gettext-tools; }; then gt_use_preinstalled_gnugettext=yes else LIBINTL= LTLIBINTL= INCINTL= fi if test -n "$INTL_MACOSX_LIBS"; then if test "$gt_use_preinstalled_gnugettext" = "yes" \ || test "$nls_cv_use_gnu_gettext" = "yes"; then LIBINTL="$LIBINTL $INTL_MACOSX_LIBS" LTLIBINTL="$LTLIBINTL $INTL_MACOSX_LIBS" fi fi if test "$gt_use_preinstalled_gnugettext" = "yes" \ || test "$nls_cv_use_gnu_gettext" = "yes"; then $as_echo "#define ENABLE_NLS 1" >>confdefs.h else USE_NLS=no fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use NLS" >&5 $as_echo_n "checking whether to use NLS... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_NLS" >&5 $as_echo "$USE_NLS" >&6; } if test "$USE_NLS" = "yes"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking where the gettext function comes from" >&5 $as_echo_n "checking where the gettext function comes from... " >&6; } if test "$gt_use_preinstalled_gnugettext" = "yes"; then if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; }; then gt_source="external libintl" else gt_source="libc" fi else gt_source="included intl directory" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gt_source" >&5 $as_echo "$gt_source" >&6; } fi if test "$USE_NLS" = "yes"; then if test "$gt_use_preinstalled_gnugettext" = "yes"; then if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; }; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libintl" >&5 $as_echo_n "checking how to link with libintl... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBINTL" >&5 $as_echo "$LIBINTL" >&6; } for element in $INCINTL; do haveit= for x in $CPPFLAGS; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X$element"; then haveit=yes break fi done if test -z "$haveit"; then CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }$element" fi done fi $as_echo "#define HAVE_GETTEXT 1" >>confdefs.h $as_echo "#define HAVE_DCGETTEXT 1" >>confdefs.h fi POSUB=po fi INTLLIBS="$LIBINTL" [ -r $srcdir/po/POTFILES.in ] || touch $srcdir/po/POTFILES.in # Portable 64bit file offsets # Check whether --enable-largefile was given. if test "${enable_largefile+set}" = set; then : enableval=$enable_largefile; fi if test "$enable_largefile" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for special C compiler options needed for large files" >&5 $as_echo_n "checking for special C compiler options needed for large files... " >&6; } if ${ac_cv_sys_largefile_CC+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_sys_largefile_CC=no if test "$GCC" != yes; then ac_save_CC=$CC while :; do # IRIX 6.2 and later do not support large files by default, # so use the C compiler's -n32 option if that helps. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : break fi rm -f core conftest.err conftest.$ac_objext CC="$CC -n32" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_largefile_CC=' -n32'; break fi rm -f core conftest.err conftest.$ac_objext break done CC=$ac_save_CC rm -f conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_CC" >&5 $as_echo "$ac_cv_sys_largefile_CC" >&6; } if test "$ac_cv_sys_largefile_CC" != no; then CC=$CC$ac_cv_sys_largefile_CC fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _FILE_OFFSET_BITS value needed for large files" >&5 $as_echo_n "checking for _FILE_OFFSET_BITS value needed for large files... " >&6; } if ${ac_cv_sys_file_offset_bits+:} false; then : $as_echo_n "(cached) " >&6 else while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_file_offset_bits=no; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #define _FILE_OFFSET_BITS 64 #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_file_offset_bits=64; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_sys_file_offset_bits=unknown break done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_file_offset_bits" >&5 $as_echo "$ac_cv_sys_file_offset_bits" >&6; } case $ac_cv_sys_file_offset_bits in #( no | unknown) ;; *) cat >>confdefs.h <<_ACEOF #define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits _ACEOF ;; esac rm -rf conftest* if test $ac_cv_sys_file_offset_bits = unknown; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _LARGE_FILES value needed for large files" >&5 $as_echo_n "checking for _LARGE_FILES value needed for large files... " >&6; } if ${ac_cv_sys_large_files+:} false; then : $as_echo_n "(cached) " >&6 else while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_large_files=no; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #define _LARGE_FILES 1 #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_large_files=1; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_sys_large_files=unknown break done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_large_files" >&5 $as_echo "$ac_cv_sys_large_files" >&6; } case $ac_cv_sys_large_files in #( no | unknown) ;; *) cat >>confdefs.h <<_ACEOF #define _LARGE_FILES $ac_cv_sys_large_files _ACEOF ;; esac rm -rf conftest* fi fi # pkg-config needed for many checks if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}pkg-config", so it can be a program name with args. set dummy ${ac_tool_prefix}pkg-config; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_PKG_CONFIG+:} false; then : $as_echo_n "(cached) " >&6 else case $PKG_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_PKG_CONFIG="$PKG_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi PKG_CONFIG=$ac_cv_path_PKG_CONFIG if test -n "$PKG_CONFIG"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PKG_CONFIG" >&5 $as_echo "$PKG_CONFIG" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_path_PKG_CONFIG"; then ac_pt_PKG_CONFIG=$PKG_CONFIG # Extract the first word of "pkg-config", so it can be a program name with args. set dummy pkg-config; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_ac_pt_PKG_CONFIG+:} false; then : $as_echo_n "(cached) " >&6 else case $ac_pt_PKG_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_ac_pt_PKG_CONFIG="$ac_pt_PKG_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_ac_pt_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi ac_pt_PKG_CONFIG=$ac_cv_path_ac_pt_PKG_CONFIG if test -n "$ac_pt_PKG_CONFIG"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_PKG_CONFIG" >&5 $as_echo "$ac_pt_PKG_CONFIG" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_pt_PKG_CONFIG" = x; then PKG_CONFIG="no" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac PKG_CONFIG=$ac_pt_PKG_CONFIG fi else PKG_CONFIG="$ac_cv_path_PKG_CONFIG" fi if test "x$PKG_CONFIG" = "xno"; then as_fn_error $? " *** pkg-config not found" "$LINENO" 5 else pkgconfigdir=${libdir}/pkgconfig fi # Default enable/disable switches # Features enables_systemd=no enables_swig_python=yes # Features directly related to components enables_cppunit=yes enables_python=yes enables_altpython=yes enables_pylint=yes enables_mock_dmc=no enables_gfal=no enables_s3=no enables_xrootd=yes enables_xmlsec1=yes enables_sqlitejstore=yes enables_ldns=yes # Libraries and plugins # Currently no fine-grained choice is supported. # Also this variable is used to check if source # build is needed at all because no component can # be built without HED. enables_hed=yes # Services enables_a_rex_service=yes enables_internal=no enables_ldap_service=yes enables_candypond=yes enables_datadelivery_service=yes enables_monitor=yes # Clients enables_compute_client=yes enables_credentials_client=yes enables_data_client=yes enables_arcrest_client=yes # Documentation enables_doc=yes # Handle group enable/disable switches # Check whether --enable-all was given. if test "${enable_all+set}" = set; then : enableval=$enable_all; enables_a_rex_service=$enableval enables_internal=$enableval enables_ldap_service=$enableval enables_monitor=$enableval enables_candypond=$enableval enables_datadelivery_service=$enableval enables_compute_client=$enableval enables_credentials_client=$enableval enables_echo_client=$enableval enables_data_client=$enableval enables_arcrest_client=$enableval enables_hed=$enableval enables_python=$enableval enables_altpython=$enableval enables_pylint=$enableval enables_mock_dmc=$enableval enables_gfal=$enableval enables_s3=$enableval enables_xrootd=$enableval enables_xmlsec1=$enableval enables_cppunit=$enableval enables_doc=$enableval enables_sqlitejstore=$enableval enables_ldns=$enableval fi # Check whether --enable-all-clients was given. if test "${enable_all_clients+set}" = set; then : enableval=$enable_all_clients; enables_compute_client=$enableval enables_credentials_client=$enableval enables_echo_client=$enableval enables_data_client=$enableval enables_arcrest_client=$enableval enables_doc=$enableval fi # Check whether --enable-all-data-clients was given. if test "${enable_all_data_clients+set}" = set; then : enableval=$enable_all_data_clients; enables_data_client=$enableval fi # Check whether --enable-all-services was given. if test "${enable_all_services+set}" = set; then : enableval=$enable_all_services; enables_a_rex_service=$enableval enables_ldap_service=$enableval enables_monitor=$enableval enables_candypond=$enableval enables_datadelivery_service=$enableval fi # Check whether --enable-hed was given. if test "${enable_hed+set}" = set; then : enableval=$enable_hed; enables_hed=$enableval fi if test "$enables_hed" = "no" ; then enables_a_rex_service=no enables_candypond=no enables_datadelivery_service=no enables_compute_client=no enables_credentials_client=no enables_data_client=no enables_swig_python=no fi # Be pedantic about compiler warnings. # Check whether --enable-pedantic-compile was given. if test "${enable_pedantic_compile+set}" = set; then : enableval=$enable_pedantic_compile; enables_pedantic_compile="yes" else enables_pedantic_compile="no" fi if test "x$enables_pedantic_compile" = "xyes"; then # This check need to be enhanced. It won't work in case of cross-compilation # and if path to compiler is explicitly specified. if test x"$CXX" = x"g++"; then # GNU C/C++ flags AM_CXXFLAGS="-Wall -Wextra -Werror -Wno-sign-compare -Wno-unused" SAVE_CPPFLAGS=$CPPFLAGS ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu CPPFLAGS="$CPPFLAGS -Wno-unused-result" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : AM_CXXFLAGS="$AM_CXXFLAGS -Wno-unused-result" else { $as_echo "$as_me:${as_lineno-$LINENO}: compilation flag -Wno-unused-result is not supported" >&5 $as_echo "$as_me: compilation flag -Wno-unused-result is not supported" >&6;} fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu CPPFLAGS=$SAVE_CPPFLAGS else # TODO: set generic flags for generic compiler AM_CXXFLAGS="" fi fi if test "x$enables_pedantic_compile" = "xyes"; then PEDANTIC_COMPILE_TRUE= PEDANTIC_COMPILE_FALSE='#' else PEDANTIC_COMPILE_TRUE='#' PEDANTIC_COMPILE_FALSE= fi # Enable/disable switches for third-party. # Swig # Check whether --enable-swig-python was given. if test "${enable_swig_python+set}" = set; then : enableval=$enable_swig_python; enables_swig_python=$enableval fi # Check whether --enable-swig was given. if test "${enable_swig+set}" = set; then : enableval=$enable_swig; enables_swig_python=$enableval fi if test "$enables_swig_python" = "yes"; then for ac_prog in swig do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_SWIG+:} false; then : $as_echo_n "(cached) " >&6 else case $SWIG in [\\/]* | ?:[\\/]*) ac_cv_path_SWIG="$SWIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_SWIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi SWIG=$ac_cv_path_SWIG if test -n "$SWIG"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $SWIG" >&5 $as_echo "$SWIG" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$SWIG" && break done if test "x$SWIG" = "x"; then enables_swig="no" else swigver=`$SWIG -version 2>&1 | grep Version | sed 's/.* //'` swigver1=`echo $swigver | cut -d. -f1` swigver2=`echo $swigver | cut -d. -f2` swigver3=`echo $swigver | cut -d. -f3` if test $swigver1 -lt 1 || ( test $swigver1 -eq 1 && ( \ test $swigver2 -lt 3 || ( test $swigver2 -eq 3 && ( \ test $swigver3 -lt 25 ) ) ) ) ; then { $as_echo "$as_me:${as_lineno-$LINENO}: swig is too old (< 1.3.25)" >&5 $as_echo "$as_me: swig is too old (< 1.3.25)" >&6;} SWIG="" enables_swig="no" elif test $swigver1 -eq 1 && test $swigver2 -eq 3 && test $swigver3 -eq 38 ; then { $as_echo "$as_me:${as_lineno-$LINENO}: swig version 1.3.38 has bug which prevents it from being used for this software. Please upgrade or downgrade." >&5 $as_echo "$as_me: swig version 1.3.38 has bug which prevents it from being used for this software. Please upgrade or downgrade." >&6;} SWIG="" enables_swig="no" else SWIG2="no" if test $swigver1 -ge 2 then SWIG2="yes" fi SWIG_PYTHON_NAMING="SwigPy" # In SWIG version 1.3.37 naming was changed from "PySwig" to "SwigPy". if test $swigver1 -lt 1 || ( test $swigver1 -eq 1 && ( \ test $swigver2 -lt 3 || ( test $swigver2 -eq 3 && ( \ test $swigver3 -lt 37 ) ) ) ) ; then SWIG_PYTHON_NAMING="PySwig" fi fi fi else SWIG="" fi if test "x$enables_swig" = "xyes"; then SWIG_ENABLED_TRUE= SWIG_ENABLED_FALSE='#' else SWIG_ENABLED_TRUE='#' SWIG_ENABLED_FALSE= fi # Python # Check whether --enable-python was given. if test "${enable_python+set}" = set; then : enableval=$enable_python; enables_arcrest_client=$enableval enables_swig_python=$enableval fi enables_python=yes if test "$enables_python" = "yes"; then # Check whether --with-python was given. if test "${with_python+set}" = set; then : withval=$with_python; fi # We do not look for python binary when cross-compiling # but we need to make the variable non-empty if test "${build}" = "${host}"; then for ac_prog in $with_python python3 do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_PYTHON+:} false; then : $as_echo_n "(cached) " >&6 else case $PYTHON in [\\/]* | ?:[\\/]*) ac_cv_path_PYTHON="$PYTHON" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_PYTHON="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi PYTHON=$ac_cv_path_PYTHON if test -n "$PYTHON"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PYTHON" >&5 $as_echo "$PYTHON" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$PYTHON" && break done else PYTHON=/usr/bin/python3 fi if test "X$PYTHON" != "X"; then PYNAME=`basename $PYTHON` if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}pkg-config", so it can be a program name with args. set dummy ${ac_tool_prefix}pkg-config; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_PKG_CONFIG+:} false; then : $as_echo_n "(cached) " >&6 else case $PKG_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_PKG_CONFIG="$PKG_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi PKG_CONFIG=$ac_cv_path_PKG_CONFIG if test -n "$PKG_CONFIG"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PKG_CONFIG" >&5 $as_echo "$PKG_CONFIG" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_path_PKG_CONFIG"; then ac_pt_PKG_CONFIG=$PKG_CONFIG # Extract the first word of "pkg-config", so it can be a program name with args. set dummy pkg-config; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_ac_pt_PKG_CONFIG+:} false; then : $as_echo_n "(cached) " >&6 else case $ac_pt_PKG_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_ac_pt_PKG_CONFIG="$ac_pt_PKG_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_ac_pt_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi ac_pt_PKG_CONFIG=$ac_cv_path_ac_pt_PKG_CONFIG if test -n "$ac_pt_PKG_CONFIG"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_PKG_CONFIG" >&5 $as_echo "$ac_pt_PKG_CONFIG" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_pt_PKG_CONFIG" = x; then PKG_CONFIG="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac PKG_CONFIG=$ac_pt_PKG_CONFIG fi else PKG_CONFIG="$ac_cv_path_PKG_CONFIG" fi fi if test -n "$PKG_CONFIG"; then _pkg_min_version=0.9.0 { $as_echo "$as_me:${as_lineno-$LINENO}: checking pkg-config is at least version $_pkg_min_version" >&5 $as_echo_n "checking pkg-config is at least version $_pkg_min_version... " >&6; } if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } PKG_CONFIG="" fi fi pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PYTHON" >&5 $as_echo_n "checking for PYTHON... " >&6; } if test -n "$PYTHON_CFLAGS"; then pkg_cv_PYTHON_CFLAGS="$PYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME-embed\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME-embed") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_CFLAGS=`$PKG_CONFIG --cflags "$PYNAME-embed" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$PYTHON_LIBS"; then pkg_cv_PYTHON_LIBS="$PYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME-embed\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME-embed") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_LIBS=`$PKG_CONFIG --libs "$PYNAME-embed" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then PYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$PYNAME-embed" 2>&1` else PYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$PYNAME-embed" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$PYTHON_PKG_ERRORS" >&5 pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PYTHON" >&5 $as_echo_n "checking for PYTHON... " >&6; } if test -n "$PYTHON_CFLAGS"; then pkg_cv_PYTHON_CFLAGS="$PYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_CFLAGS=`$PKG_CONFIG --cflags "$PYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$PYTHON_LIBS"; then pkg_cv_PYTHON_LIBS="$PYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_LIBS=`$PKG_CONFIG --libs "$PYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then PYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$PYNAME" 2>&1` else PYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$PYNAME" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$PYTHON_PKG_ERRORS" >&5 PYNAME=python-`$PYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PYTHON" >&5 $as_echo_n "checking for PYTHON... " >&6; } if test -n "$PYTHON_CFLAGS"; then pkg_cv_PYTHON_CFLAGS="$PYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME-embed\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME-embed") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_CFLAGS=`$PKG_CONFIG --cflags "$PYNAME-embed" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$PYTHON_LIBS"; then pkg_cv_PYTHON_LIBS="$PYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME-embed\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME-embed") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_LIBS=`$PKG_CONFIG --libs "$PYNAME-embed" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then PYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$PYNAME-embed" 2>&1` else PYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$PYNAME-embed" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$PYTHON_PKG_ERRORS" >&5 pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PYTHON" >&5 $as_echo_n "checking for PYTHON... " >&6; } if test -n "$PYTHON_CFLAGS"; then pkg_cv_PYTHON_CFLAGS="$PYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_CFLAGS=`$PKG_CONFIG --cflags "$PYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$PYTHON_LIBS"; then pkg_cv_PYTHON_LIBS="$PYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_LIBS=`$PKG_CONFIG --libs "$PYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then PYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$PYNAME" 2>&1` else PYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$PYNAME" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$PYTHON_PKG_ERRORS" >&5 PYTHON_VERSION=`$PYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` PYTHON_MAJOR=`$PYTHON -c 'import sys; print(sys.version_info[0])'` PYTHON_CFLAGS=-I`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` PY_LIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` PY_SYSLIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` PY_LIBDEST=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` PYTHON_LIBS="$PY_LIBS $PY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$PYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} PYTHON_LIBS="-lpython$PYTHON_VERSION $PYTHON_LIBS" else LDFLAGS="-L$PY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} PYTHON_LIBS="-L$PY_LIBDEST/config -lpython$PYTHON_VERSION $PYTHON_LIBS" else PYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } PYTHON_VERSION=`$PYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` PYTHON_MAJOR=`$PYTHON -c 'import sys; print(sys.version_info[0])'` PYTHON_CFLAGS=-I`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` PY_LIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` PY_SYSLIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` PY_LIBDEST=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` PYTHON_LIBS="$PY_LIBS $PY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$PYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} PYTHON_LIBS="-lpython$PYTHON_VERSION $PYTHON_LIBS" else LDFLAGS="-L$PY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} PYTHON_LIBS="-L$PY_LIBDEST/config -lpython$PYTHON_VERSION $PYTHON_LIBS" else PYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS else PYTHON_CFLAGS=$pkg_cv_PYTHON_CFLAGS PYTHON_LIBS=$pkg_cv_PYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } PYTHON_VERSION=`$PKG_CONFIG --modversion $PYNAME` PYTHON_MAJOR=`echo $PYTHON_VERSION|cut -f1 -d.` fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PYTHON" >&5 $as_echo_n "checking for PYTHON... " >&6; } if test -n "$PYTHON_CFLAGS"; then pkg_cv_PYTHON_CFLAGS="$PYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_CFLAGS=`$PKG_CONFIG --cflags "$PYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$PYTHON_LIBS"; then pkg_cv_PYTHON_LIBS="$PYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_LIBS=`$PKG_CONFIG --libs "$PYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then PYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$PYNAME" 2>&1` else PYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$PYNAME" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$PYTHON_PKG_ERRORS" >&5 PYTHON_VERSION=`$PYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` PYTHON_MAJOR=`$PYTHON -c 'import sys; print(sys.version_info[0])'` PYTHON_CFLAGS=-I`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` PY_LIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` PY_SYSLIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` PY_LIBDEST=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` PYTHON_LIBS="$PY_LIBS $PY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$PYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} PYTHON_LIBS="-lpython$PYTHON_VERSION $PYTHON_LIBS" else LDFLAGS="-L$PY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} PYTHON_LIBS="-L$PY_LIBDEST/config -lpython$PYTHON_VERSION $PYTHON_LIBS" else PYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } PYTHON_VERSION=`$PYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` PYTHON_MAJOR=`$PYTHON -c 'import sys; print(sys.version_info[0])'` PYTHON_CFLAGS=-I`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` PY_LIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` PY_SYSLIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` PY_LIBDEST=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` PYTHON_LIBS="$PY_LIBS $PY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$PYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} PYTHON_LIBS="-lpython$PYTHON_VERSION $PYTHON_LIBS" else LDFLAGS="-L$PY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} PYTHON_LIBS="-L$PY_LIBDEST/config -lpython$PYTHON_VERSION $PYTHON_LIBS" else PYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS else PYTHON_CFLAGS=$pkg_cv_PYTHON_CFLAGS PYTHON_LIBS=$pkg_cv_PYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } PYTHON_VERSION=`$PKG_CONFIG --modversion $PYNAME` PYTHON_MAJOR=`echo $PYTHON_VERSION|cut -f1 -d.` fi else PYTHON_CFLAGS=$pkg_cv_PYTHON_CFLAGS PYTHON_LIBS=$pkg_cv_PYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } PYTHON_VERSION=`$PKG_CONFIG --modversion $PYNAME-embed` PYTHON_MAJOR=`echo $PYTHON_VERSION|cut -f1 -d.` fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } PYNAME=python-`$PYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PYTHON" >&5 $as_echo_n "checking for PYTHON... " >&6; } if test -n "$PYTHON_CFLAGS"; then pkg_cv_PYTHON_CFLAGS="$PYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME-embed\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME-embed") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_CFLAGS=`$PKG_CONFIG --cflags "$PYNAME-embed" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$PYTHON_LIBS"; then pkg_cv_PYTHON_LIBS="$PYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME-embed\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME-embed") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_LIBS=`$PKG_CONFIG --libs "$PYNAME-embed" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then PYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$PYNAME-embed" 2>&1` else PYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$PYNAME-embed" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$PYTHON_PKG_ERRORS" >&5 pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PYTHON" >&5 $as_echo_n "checking for PYTHON... " >&6; } if test -n "$PYTHON_CFLAGS"; then pkg_cv_PYTHON_CFLAGS="$PYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_CFLAGS=`$PKG_CONFIG --cflags "$PYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$PYTHON_LIBS"; then pkg_cv_PYTHON_LIBS="$PYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_LIBS=`$PKG_CONFIG --libs "$PYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then PYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$PYNAME" 2>&1` else PYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$PYNAME" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$PYTHON_PKG_ERRORS" >&5 PYTHON_VERSION=`$PYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` PYTHON_MAJOR=`$PYTHON -c 'import sys; print(sys.version_info[0])'` PYTHON_CFLAGS=-I`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` PY_LIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` PY_SYSLIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` PY_LIBDEST=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` PYTHON_LIBS="$PY_LIBS $PY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$PYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} PYTHON_LIBS="-lpython$PYTHON_VERSION $PYTHON_LIBS" else LDFLAGS="-L$PY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} PYTHON_LIBS="-L$PY_LIBDEST/config -lpython$PYTHON_VERSION $PYTHON_LIBS" else PYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } PYTHON_VERSION=`$PYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` PYTHON_MAJOR=`$PYTHON -c 'import sys; print(sys.version_info[0])'` PYTHON_CFLAGS=-I`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` PY_LIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` PY_SYSLIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` PY_LIBDEST=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` PYTHON_LIBS="$PY_LIBS $PY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$PYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} PYTHON_LIBS="-lpython$PYTHON_VERSION $PYTHON_LIBS" else LDFLAGS="-L$PY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} PYTHON_LIBS="-L$PY_LIBDEST/config -lpython$PYTHON_VERSION $PYTHON_LIBS" else PYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS else PYTHON_CFLAGS=$pkg_cv_PYTHON_CFLAGS PYTHON_LIBS=$pkg_cv_PYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } PYTHON_VERSION=`$PKG_CONFIG --modversion $PYNAME` PYTHON_MAJOR=`echo $PYTHON_VERSION|cut -f1 -d.` fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PYTHON" >&5 $as_echo_n "checking for PYTHON... " >&6; } if test -n "$PYTHON_CFLAGS"; then pkg_cv_PYTHON_CFLAGS="$PYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_CFLAGS=`$PKG_CONFIG --cflags "$PYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$PYTHON_LIBS"; then pkg_cv_PYTHON_LIBS="$PYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_LIBS=`$PKG_CONFIG --libs "$PYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then PYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$PYNAME" 2>&1` else PYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$PYNAME" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$PYTHON_PKG_ERRORS" >&5 PYTHON_VERSION=`$PYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` PYTHON_MAJOR=`$PYTHON -c 'import sys; print(sys.version_info[0])'` PYTHON_CFLAGS=-I`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` PY_LIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` PY_SYSLIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` PY_LIBDEST=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` PYTHON_LIBS="$PY_LIBS $PY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$PYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} PYTHON_LIBS="-lpython$PYTHON_VERSION $PYTHON_LIBS" else LDFLAGS="-L$PY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} PYTHON_LIBS="-L$PY_LIBDEST/config -lpython$PYTHON_VERSION $PYTHON_LIBS" else PYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } PYTHON_VERSION=`$PYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` PYTHON_MAJOR=`$PYTHON -c 'import sys; print(sys.version_info[0])'` PYTHON_CFLAGS=-I`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` PY_LIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` PY_SYSLIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` PY_LIBDEST=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` PYTHON_LIBS="$PY_LIBS $PY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$PYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} PYTHON_LIBS="-lpython$PYTHON_VERSION $PYTHON_LIBS" else LDFLAGS="-L$PY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} PYTHON_LIBS="-L$PY_LIBDEST/config -lpython$PYTHON_VERSION $PYTHON_LIBS" else PYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS else PYTHON_CFLAGS=$pkg_cv_PYTHON_CFLAGS PYTHON_LIBS=$pkg_cv_PYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } PYTHON_VERSION=`$PKG_CONFIG --modversion $PYNAME` PYTHON_MAJOR=`echo $PYTHON_VERSION|cut -f1 -d.` fi else PYTHON_CFLAGS=$pkg_cv_PYTHON_CFLAGS PYTHON_LIBS=$pkg_cv_PYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } PYTHON_VERSION=`$PKG_CONFIG --modversion $PYNAME-embed` PYTHON_MAJOR=`echo $PYTHON_VERSION|cut -f1 -d.` fi else PYTHON_CFLAGS=$pkg_cv_PYTHON_CFLAGS PYTHON_LIBS=$pkg_cv_PYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } PYTHON_VERSION=`$PKG_CONFIG --modversion $PYNAME` PYTHON_MAJOR=`echo $PYTHON_VERSION|cut -f1 -d.` fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PYTHON" >&5 $as_echo_n "checking for PYTHON... " >&6; } if test -n "$PYTHON_CFLAGS"; then pkg_cv_PYTHON_CFLAGS="$PYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_CFLAGS=`$PKG_CONFIG --cflags "$PYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$PYTHON_LIBS"; then pkg_cv_PYTHON_LIBS="$PYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_LIBS=`$PKG_CONFIG --libs "$PYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then PYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$PYNAME" 2>&1` else PYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$PYNAME" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$PYTHON_PKG_ERRORS" >&5 PYNAME=python-`$PYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PYTHON" >&5 $as_echo_n "checking for PYTHON... " >&6; } if test -n "$PYTHON_CFLAGS"; then pkg_cv_PYTHON_CFLAGS="$PYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME-embed\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME-embed") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_CFLAGS=`$PKG_CONFIG --cflags "$PYNAME-embed" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$PYTHON_LIBS"; then pkg_cv_PYTHON_LIBS="$PYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME-embed\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME-embed") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_LIBS=`$PKG_CONFIG --libs "$PYNAME-embed" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then PYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$PYNAME-embed" 2>&1` else PYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$PYNAME-embed" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$PYTHON_PKG_ERRORS" >&5 pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PYTHON" >&5 $as_echo_n "checking for PYTHON... " >&6; } if test -n "$PYTHON_CFLAGS"; then pkg_cv_PYTHON_CFLAGS="$PYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_CFLAGS=`$PKG_CONFIG --cflags "$PYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$PYTHON_LIBS"; then pkg_cv_PYTHON_LIBS="$PYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_LIBS=`$PKG_CONFIG --libs "$PYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then PYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$PYNAME" 2>&1` else PYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$PYNAME" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$PYTHON_PKG_ERRORS" >&5 PYTHON_VERSION=`$PYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` PYTHON_MAJOR=`$PYTHON -c 'import sys; print(sys.version_info[0])'` PYTHON_CFLAGS=-I`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` PY_LIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` PY_SYSLIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` PY_LIBDEST=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` PYTHON_LIBS="$PY_LIBS $PY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$PYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} PYTHON_LIBS="-lpython$PYTHON_VERSION $PYTHON_LIBS" else LDFLAGS="-L$PY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} PYTHON_LIBS="-L$PY_LIBDEST/config -lpython$PYTHON_VERSION $PYTHON_LIBS" else PYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } PYTHON_VERSION=`$PYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` PYTHON_MAJOR=`$PYTHON -c 'import sys; print(sys.version_info[0])'` PYTHON_CFLAGS=-I`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` PY_LIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` PY_SYSLIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` PY_LIBDEST=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` PYTHON_LIBS="$PY_LIBS $PY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$PYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} PYTHON_LIBS="-lpython$PYTHON_VERSION $PYTHON_LIBS" else LDFLAGS="-L$PY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} PYTHON_LIBS="-L$PY_LIBDEST/config -lpython$PYTHON_VERSION $PYTHON_LIBS" else PYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS else PYTHON_CFLAGS=$pkg_cv_PYTHON_CFLAGS PYTHON_LIBS=$pkg_cv_PYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } PYTHON_VERSION=`$PKG_CONFIG --modversion $PYNAME` PYTHON_MAJOR=`echo $PYTHON_VERSION|cut -f1 -d.` fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PYTHON" >&5 $as_echo_n "checking for PYTHON... " >&6; } if test -n "$PYTHON_CFLAGS"; then pkg_cv_PYTHON_CFLAGS="$PYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_CFLAGS=`$PKG_CONFIG --cflags "$PYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$PYTHON_LIBS"; then pkg_cv_PYTHON_LIBS="$PYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_LIBS=`$PKG_CONFIG --libs "$PYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then PYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$PYNAME" 2>&1` else PYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$PYNAME" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$PYTHON_PKG_ERRORS" >&5 PYTHON_VERSION=`$PYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` PYTHON_MAJOR=`$PYTHON -c 'import sys; print(sys.version_info[0])'` PYTHON_CFLAGS=-I`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` PY_LIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` PY_SYSLIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` PY_LIBDEST=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` PYTHON_LIBS="$PY_LIBS $PY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$PYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} PYTHON_LIBS="-lpython$PYTHON_VERSION $PYTHON_LIBS" else LDFLAGS="-L$PY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} PYTHON_LIBS="-L$PY_LIBDEST/config -lpython$PYTHON_VERSION $PYTHON_LIBS" else PYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } PYTHON_VERSION=`$PYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` PYTHON_MAJOR=`$PYTHON -c 'import sys; print(sys.version_info[0])'` PYTHON_CFLAGS=-I`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` PY_LIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` PY_SYSLIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` PY_LIBDEST=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` PYTHON_LIBS="$PY_LIBS $PY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$PYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} PYTHON_LIBS="-lpython$PYTHON_VERSION $PYTHON_LIBS" else LDFLAGS="-L$PY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} PYTHON_LIBS="-L$PY_LIBDEST/config -lpython$PYTHON_VERSION $PYTHON_LIBS" else PYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS else PYTHON_CFLAGS=$pkg_cv_PYTHON_CFLAGS PYTHON_LIBS=$pkg_cv_PYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } PYTHON_VERSION=`$PKG_CONFIG --modversion $PYNAME` PYTHON_MAJOR=`echo $PYTHON_VERSION|cut -f1 -d.` fi else PYTHON_CFLAGS=$pkg_cv_PYTHON_CFLAGS PYTHON_LIBS=$pkg_cv_PYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } PYTHON_VERSION=`$PKG_CONFIG --modversion $PYNAME-embed` PYTHON_MAJOR=`echo $PYTHON_VERSION|cut -f1 -d.` fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } PYNAME=python-`$PYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PYTHON" >&5 $as_echo_n "checking for PYTHON... " >&6; } if test -n "$PYTHON_CFLAGS"; then pkg_cv_PYTHON_CFLAGS="$PYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME-embed\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME-embed") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_CFLAGS=`$PKG_CONFIG --cflags "$PYNAME-embed" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$PYTHON_LIBS"; then pkg_cv_PYTHON_LIBS="$PYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME-embed\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME-embed") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_LIBS=`$PKG_CONFIG --libs "$PYNAME-embed" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then PYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$PYNAME-embed" 2>&1` else PYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$PYNAME-embed" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$PYTHON_PKG_ERRORS" >&5 pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PYTHON" >&5 $as_echo_n "checking for PYTHON... " >&6; } if test -n "$PYTHON_CFLAGS"; then pkg_cv_PYTHON_CFLAGS="$PYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_CFLAGS=`$PKG_CONFIG --cflags "$PYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$PYTHON_LIBS"; then pkg_cv_PYTHON_LIBS="$PYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_LIBS=`$PKG_CONFIG --libs "$PYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then PYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$PYNAME" 2>&1` else PYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$PYNAME" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$PYTHON_PKG_ERRORS" >&5 PYTHON_VERSION=`$PYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` PYTHON_MAJOR=`$PYTHON -c 'import sys; print(sys.version_info[0])'` PYTHON_CFLAGS=-I`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` PY_LIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` PY_SYSLIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` PY_LIBDEST=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` PYTHON_LIBS="$PY_LIBS $PY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$PYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} PYTHON_LIBS="-lpython$PYTHON_VERSION $PYTHON_LIBS" else LDFLAGS="-L$PY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} PYTHON_LIBS="-L$PY_LIBDEST/config -lpython$PYTHON_VERSION $PYTHON_LIBS" else PYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } PYTHON_VERSION=`$PYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` PYTHON_MAJOR=`$PYTHON -c 'import sys; print(sys.version_info[0])'` PYTHON_CFLAGS=-I`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` PY_LIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` PY_SYSLIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` PY_LIBDEST=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` PYTHON_LIBS="$PY_LIBS $PY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$PYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} PYTHON_LIBS="-lpython$PYTHON_VERSION $PYTHON_LIBS" else LDFLAGS="-L$PY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} PYTHON_LIBS="-L$PY_LIBDEST/config -lpython$PYTHON_VERSION $PYTHON_LIBS" else PYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS else PYTHON_CFLAGS=$pkg_cv_PYTHON_CFLAGS PYTHON_LIBS=$pkg_cv_PYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } PYTHON_VERSION=`$PKG_CONFIG --modversion $PYNAME` PYTHON_MAJOR=`echo $PYTHON_VERSION|cut -f1 -d.` fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PYTHON" >&5 $as_echo_n "checking for PYTHON... " >&6; } if test -n "$PYTHON_CFLAGS"; then pkg_cv_PYTHON_CFLAGS="$PYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_CFLAGS=`$PKG_CONFIG --cflags "$PYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$PYTHON_LIBS"; then pkg_cv_PYTHON_LIBS="$PYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$PYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$PYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_PYTHON_LIBS=`$PKG_CONFIG --libs "$PYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then PYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$PYNAME" 2>&1` else PYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$PYNAME" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$PYTHON_PKG_ERRORS" >&5 PYTHON_VERSION=`$PYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` PYTHON_MAJOR=`$PYTHON -c 'import sys; print(sys.version_info[0])'` PYTHON_CFLAGS=-I`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` PY_LIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` PY_SYSLIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` PY_LIBDEST=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` PYTHON_LIBS="$PY_LIBS $PY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$PYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} PYTHON_LIBS="-lpython$PYTHON_VERSION $PYTHON_LIBS" else LDFLAGS="-L$PY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} PYTHON_LIBS="-L$PY_LIBDEST/config -lpython$PYTHON_VERSION $PYTHON_LIBS" else PYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } PYTHON_VERSION=`$PYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` PYTHON_MAJOR=`$PYTHON -c 'import sys; print(sys.version_info[0])'` PYTHON_CFLAGS=-I`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` PY_LIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` PY_SYSLIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` PY_LIBDEST=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` PYTHON_LIBS="$PY_LIBS $PY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$PYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} PYTHON_LIBS="-lpython$PYTHON_VERSION $PYTHON_LIBS" else LDFLAGS="-L$PY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$PYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$PYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$PYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$PYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} PYTHON_LIBS="-L$PY_LIBDEST/config -lpython$PYTHON_VERSION $PYTHON_LIBS" else PYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS else PYTHON_CFLAGS=$pkg_cv_PYTHON_CFLAGS PYTHON_LIBS=$pkg_cv_PYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } PYTHON_VERSION=`$PKG_CONFIG --modversion $PYNAME` PYTHON_MAJOR=`echo $PYTHON_VERSION|cut -f1 -d.` fi else PYTHON_CFLAGS=$pkg_cv_PYTHON_CFLAGS PYTHON_LIBS=$pkg_cv_PYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } PYTHON_VERSION=`$PKG_CONFIG --modversion $PYNAME-embed` PYTHON_MAJOR=`echo $PYTHON_VERSION|cut -f1 -d.` fi else PYTHON_CFLAGS=$pkg_cv_PYTHON_CFLAGS PYTHON_LIBS=$pkg_cv_PYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } PYTHON_VERSION=`$PKG_CONFIG --modversion $PYNAME` PYTHON_MAJOR=`echo $PYTHON_VERSION|cut -f1 -d.` fi else PYTHON_CFLAGS=$pkg_cv_PYTHON_CFLAGS PYTHON_LIBS=$pkg_cv_PYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } PYTHON_VERSION=`$PKG_CONFIG --modversion $PYNAME-embed` PYTHON_MAJOR=`echo $PYTHON_VERSION|cut -f1 -d.` fi if test "${build}" = "${host}"; then PYTHON_EXT_SUFFIX=`$PYTHON -c "from distutils import sysconfig; v = sysconfig.get_config_vars(); print(v.get('EXT_SUFFIX', v.get('SO')))" | sed s/None//` else PYTHON_EXT_SUFFIX="" fi # Check whether --with-python-site-arch was given. if test "${with_python_site_arch+set}" = set; then : withval=$with_python_site_arch; fi if test "X$PYTHON_SITE_ARCH" = "X"; then if test "${build}" = "${host}"; then PYTHON_SITE_ARCH=`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_lib(1,0,"${prefix}"))'` else PYTHON_SITE_ARCH="${libdir}/python${PYTHON_VERSION}/site-packages" fi fi # Check whether --with-python-site-lib was given. if test "${with_python_site_lib+set}" = set; then : withval=$with_python_site_lib; fi if test "X$PYTHON_SITE_LIB" = "X"; then if test "${build}" = "${host}"; then PYTHON_SITE_LIB=`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_lib(0,0,"${prefix}"))'` else PYTHON_SITE_LIB="${libdir}/python${PYTHON_VERSION}/site-packages" fi fi SAVE_LDFLAGS=$LDFLAGS SAVE_CPPFLAGS=$CPPFLAGS LDFLAGS="$LDFLAGS $PYTHON_LIBS" CPPFLAGS="$CPPFLAGS $PYTHON_CFLAGS" ac_fn_c_check_header_mongrel "$LINENO" "Python.h" "ac_cv_header_Python_h" "$ac_includes_default" if test "x$ac_cv_header_Python_h" = xyes; then : pythonh="yes" else pythonh="no" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { Py_InitializeEx(0) ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Python includes functionality of skipping initialization registration of signal handlers" >&5 $as_echo "$as_me: Python includes functionality of skipping initialization registration of signal handlers" >&6;} $as_echo "#define HAVE_PYTHON_INITIALIZE_EX 1" >>confdefs.h enables_python_service="yes" else { $as_echo "$as_me:${as_lineno-$LINENO}: Python does not include functionality of skipping initialization registration of signal handlers, since its version is below 2.4" >&5 $as_echo "$as_me: Python does not include functionality of skipping initialization registration of signal handlers, since its version is below 2.4" >&6;} enables_python_service="no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext LDFLAGS=$SAVE_LDFLAGS CPPFLAGS=$SAVE_CPPFLAGS fi if test "X$PYTHON" = "X"; then { $as_echo "$as_me:${as_lineno-$LINENO}: Missing Python - skipping Python components" >&5 $as_echo "$as_me: Missing Python - skipping Python components" >&6;} enables_python=no elif test "X$PYTHON_SITE_ARCH" = "X" || test "X$PYTHON_SITE_LIB" = "X"; then { $as_echo "$as_me:${as_lineno-$LINENO}: Missing python site packages location - skipping Python components" >&5 $as_echo "$as_me: Missing python site packages location - skipping Python components" >&6;} enables_python=no else { $as_echo "$as_me:${as_lineno-$LINENO}: Python available: $PYTHON_VERSION" >&5 $as_echo "$as_me: Python available: $PYTHON_VERSION" >&6;} fi if test "x$enables_python" != "xyes"; then { $as_echo "$as_me:${as_lineno-$LINENO}: Missing Python - skipping Python bindings" >&5 $as_echo "$as_me: Missing Python - skipping Python bindings" >&6;} enables_swig_python=no elif test "X$PYTHON_LIBS" = "X"; then { $as_echo "$as_me:${as_lineno-$LINENO}: Missing Python library - skipping Python bindings" >&5 $as_echo "$as_me: Missing Python library - skipping Python bindings" >&6;} enables_swig_python=no elif test "X$pythonh" != "Xyes"; then { $as_echo "$as_me:${as_lineno-$LINENO}: Missing Python header - skipping Python bindings" >&5 $as_echo "$as_me: Missing Python header - skipping Python bindings" >&6;} enables_swig_python=no elif ! test -f python/arc_wrap.cpp && test "x$enables_swig_python" != "xyes"; then { $as_echo "$as_me:${as_lineno-$LINENO}: Missing pre-compiled Python wrapper and SWIG - skipping Python bindings" >&5 $as_echo "$as_me: Missing pre-compiled Python wrapper and SWIG - skipping Python bindings" >&6;} enables_swig_python=no fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: Python enabled: $enables_python" >&5 $as_echo "$as_me: Python enabled: $enables_python" >&6;} if test "$enables_python" = "no"; then as_fn_error $? "Python is not optional..." "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: Python SWIG bindings enabled: $enables_swig_python" >&5 $as_echo "$as_me: Python SWIG bindings enabled: $enables_swig_python" >&6;} if test "x$enables_python" = "xyes"; then PYTHON_ENABLED_TRUE= PYTHON_ENABLED_FALSE='#' else PYTHON_ENABLED_TRUE='#' PYTHON_ENABLED_FALSE= fi if test "x$enables_python" = "xyes" && test "x$PYTHON_MAJOR" = "x3"; then PYTHON3_TRUE= PYTHON3_FALSE='#' else PYTHON3_TRUE='#' PYTHON3_FALSE= fi if test "x$enables_swig_python" = "xyes"; then PYTHON_SWIG_ENABLED_TRUE= PYTHON_SWIG_ENABLED_FALSE='#' else PYTHON_SWIG_ENABLED_TRUE='#' PYTHON_SWIG_ENABLED_FALSE= fi if test "x$enables_swig_python" = "xyes" && test "x$enables_python_service" = "xyes"; then PYTHON_SERVICE_TRUE= PYTHON_SERVICE_FALSE='#' else PYTHON_SERVICE_TRUE='#' PYTHON_SERVICE_FALSE= fi # Alternative Python if test "$enables_hed" = "yes"; then # Check whether --enable-altpython was given. if test "${enable_altpython+set}" = set; then : enableval=$enable_altpython; enables_altpython=$enableval fi if test "$enables_altpython" = "yes"; then # Check whether --with-altpython was given. if test "${with_altpython+set}" = set; then : withval=$with_altpython; fi for ac_prog in $with_altpython do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_ALTPYTHON+:} false; then : $as_echo_n "(cached) " >&6 else case $ALTPYTHON in [\\/]* | ?:[\\/]*) ac_cv_path_ALTPYTHON="$ALTPYTHON" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_ALTPYTHON="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi ALTPYTHON=$ac_cv_path_ALTPYTHON if test -n "$ALTPYTHON"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ALTPYTHON" >&5 $as_echo "$ALTPYTHON" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ALTPYTHON" && break done if test "X$ALTPYTHON" != "X"; then ALTPYNAME=`basename $ALTPYTHON` pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ALTPYTHON" >&5 $as_echo_n "checking for ALTPYTHON... " >&6; } if test -n "$ALTPYTHON_CFLAGS"; then pkg_cv_ALTPYTHON_CFLAGS="$ALTPYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME-embed\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME-embed") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_CFLAGS=`$PKG_CONFIG --cflags "$ALTPYNAME-embed" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$ALTPYTHON_LIBS"; then pkg_cv_ALTPYTHON_LIBS="$ALTPYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME-embed\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME-embed") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_LIBS=`$PKG_CONFIG --libs "$ALTPYNAME-embed" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$ALTPYNAME-embed" 2>&1` else ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$ALTPYNAME-embed" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$ALTPYTHON_PKG_ERRORS" >&5 pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ALTPYTHON" >&5 $as_echo_n "checking for ALTPYTHON... " >&6; } if test -n "$ALTPYTHON_CFLAGS"; then pkg_cv_ALTPYTHON_CFLAGS="$ALTPYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_CFLAGS=`$PKG_CONFIG --cflags "$ALTPYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$ALTPYTHON_LIBS"; then pkg_cv_ALTPYTHON_LIBS="$ALTPYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_LIBS=`$PKG_CONFIG --libs "$ALTPYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$ALTPYNAME" 2>&1` else ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$ALTPYNAME" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$ALTPYTHON_PKG_ERRORS" >&5 ALTPYNAME=python-`$ALTPYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ALTPYTHON" >&5 $as_echo_n "checking for ALTPYTHON... " >&6; } if test -n "$ALTPYTHON_CFLAGS"; then pkg_cv_ALTPYTHON_CFLAGS="$ALTPYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME-embed\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME-embed") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_CFLAGS=`$PKG_CONFIG --cflags "$ALTPYNAME-embed" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$ALTPYTHON_LIBS"; then pkg_cv_ALTPYTHON_LIBS="$ALTPYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME-embed\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME-embed") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_LIBS=`$PKG_CONFIG --libs "$ALTPYNAME-embed" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$ALTPYNAME-embed" 2>&1` else ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$ALTPYNAME-embed" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$ALTPYTHON_PKG_ERRORS" >&5 pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ALTPYTHON" >&5 $as_echo_n "checking for ALTPYTHON... " >&6; } if test -n "$ALTPYTHON_CFLAGS"; then pkg_cv_ALTPYTHON_CFLAGS="$ALTPYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_CFLAGS=`$PKG_CONFIG --cflags "$ALTPYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$ALTPYTHON_LIBS"; then pkg_cv_ALTPYTHON_LIBS="$ALTPYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_LIBS=`$PKG_CONFIG --libs "$ALTPYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$ALTPYNAME" 2>&1` else ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$ALTPYNAME" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$ALTPYTHON_PKG_ERRORS" >&5 ALTPYTHON_VERSION=`$ALTPYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` ALTPYTHON_MAJOR=`$ALTPYTHON -c 'import sys; print(sys.version_info[0])'` ALTPYTHON_CFLAGS=-I`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` ALTPY_LIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` ALTPY_SYSLIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` ALTPY_LIBDEST=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` ALTPYTHON_LIBS="$ALTPY_LIBS $ALTPY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$ALTPYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} ALTPYTHON_LIBS="-lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else LDFLAGS="-L$ALTPY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} ALTPYTHON_LIBS="-L$ALTPY_LIBDEST/config -lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else ALTPYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } ALTPYTHON_VERSION=`$ALTPYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` ALTPYTHON_MAJOR=`$ALTPYTHON -c 'import sys; print(sys.version_info[0])'` ALTPYTHON_CFLAGS=-I`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` ALTPY_LIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` ALTPY_SYSLIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` ALTPY_LIBDEST=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` ALTPYTHON_LIBS="$ALTPY_LIBS $ALTPY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$ALTPYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} ALTPYTHON_LIBS="-lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else LDFLAGS="-L$ALTPY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} ALTPYTHON_LIBS="-L$ALTPY_LIBDEST/config -lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else ALTPYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS else ALTPYTHON_CFLAGS=$pkg_cv_ALTPYTHON_CFLAGS ALTPYTHON_LIBS=$pkg_cv_ALTPYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } ALTPYTHON_VERSION=`$PKG_CONFIG --modversion $ALTPYNAME` ALTPYTHON_MAJOR=`echo $ALTPYTHON_VERSION|cut -f1 -d.` fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ALTPYTHON" >&5 $as_echo_n "checking for ALTPYTHON... " >&6; } if test -n "$ALTPYTHON_CFLAGS"; then pkg_cv_ALTPYTHON_CFLAGS="$ALTPYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_CFLAGS=`$PKG_CONFIG --cflags "$ALTPYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$ALTPYTHON_LIBS"; then pkg_cv_ALTPYTHON_LIBS="$ALTPYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_LIBS=`$PKG_CONFIG --libs "$ALTPYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$ALTPYNAME" 2>&1` else ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$ALTPYNAME" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$ALTPYTHON_PKG_ERRORS" >&5 ALTPYTHON_VERSION=`$ALTPYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` ALTPYTHON_MAJOR=`$ALTPYTHON -c 'import sys; print(sys.version_info[0])'` ALTPYTHON_CFLAGS=-I`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` ALTPY_LIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` ALTPY_SYSLIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` ALTPY_LIBDEST=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` ALTPYTHON_LIBS="$ALTPY_LIBS $ALTPY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$ALTPYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} ALTPYTHON_LIBS="-lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else LDFLAGS="-L$ALTPY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} ALTPYTHON_LIBS="-L$ALTPY_LIBDEST/config -lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else ALTPYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } ALTPYTHON_VERSION=`$ALTPYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` ALTPYTHON_MAJOR=`$ALTPYTHON -c 'import sys; print(sys.version_info[0])'` ALTPYTHON_CFLAGS=-I`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` ALTPY_LIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` ALTPY_SYSLIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` ALTPY_LIBDEST=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` ALTPYTHON_LIBS="$ALTPY_LIBS $ALTPY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$ALTPYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} ALTPYTHON_LIBS="-lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else LDFLAGS="-L$ALTPY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} ALTPYTHON_LIBS="-L$ALTPY_LIBDEST/config -lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else ALTPYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS else ALTPYTHON_CFLAGS=$pkg_cv_ALTPYTHON_CFLAGS ALTPYTHON_LIBS=$pkg_cv_ALTPYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } ALTPYTHON_VERSION=`$PKG_CONFIG --modversion $ALTPYNAME` ALTPYTHON_MAJOR=`echo $ALTPYTHON_VERSION|cut -f1 -d.` fi else ALTPYTHON_CFLAGS=$pkg_cv_ALTPYTHON_CFLAGS ALTPYTHON_LIBS=$pkg_cv_ALTPYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } ALTPYTHON_VERSION=`$PKG_CONFIG --modversion $ALTPYNAME-embed` ALTPYTHON_MAJOR=`echo $ALTPYTHON_VERSION|cut -f1 -d.` fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } ALTPYNAME=python-`$ALTPYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ALTPYTHON" >&5 $as_echo_n "checking for ALTPYTHON... " >&6; } if test -n "$ALTPYTHON_CFLAGS"; then pkg_cv_ALTPYTHON_CFLAGS="$ALTPYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME-embed\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME-embed") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_CFLAGS=`$PKG_CONFIG --cflags "$ALTPYNAME-embed" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$ALTPYTHON_LIBS"; then pkg_cv_ALTPYTHON_LIBS="$ALTPYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME-embed\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME-embed") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_LIBS=`$PKG_CONFIG --libs "$ALTPYNAME-embed" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$ALTPYNAME-embed" 2>&1` else ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$ALTPYNAME-embed" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$ALTPYTHON_PKG_ERRORS" >&5 pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ALTPYTHON" >&5 $as_echo_n "checking for ALTPYTHON... " >&6; } if test -n "$ALTPYTHON_CFLAGS"; then pkg_cv_ALTPYTHON_CFLAGS="$ALTPYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_CFLAGS=`$PKG_CONFIG --cflags "$ALTPYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$ALTPYTHON_LIBS"; then pkg_cv_ALTPYTHON_LIBS="$ALTPYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_LIBS=`$PKG_CONFIG --libs "$ALTPYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$ALTPYNAME" 2>&1` else ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$ALTPYNAME" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$ALTPYTHON_PKG_ERRORS" >&5 ALTPYTHON_VERSION=`$ALTPYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` ALTPYTHON_MAJOR=`$ALTPYTHON -c 'import sys; print(sys.version_info[0])'` ALTPYTHON_CFLAGS=-I`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` ALTPY_LIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` ALTPY_SYSLIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` ALTPY_LIBDEST=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` ALTPYTHON_LIBS="$ALTPY_LIBS $ALTPY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$ALTPYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} ALTPYTHON_LIBS="-lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else LDFLAGS="-L$ALTPY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} ALTPYTHON_LIBS="-L$ALTPY_LIBDEST/config -lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else ALTPYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } ALTPYTHON_VERSION=`$ALTPYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` ALTPYTHON_MAJOR=`$ALTPYTHON -c 'import sys; print(sys.version_info[0])'` ALTPYTHON_CFLAGS=-I`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` ALTPY_LIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` ALTPY_SYSLIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` ALTPY_LIBDEST=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` ALTPYTHON_LIBS="$ALTPY_LIBS $ALTPY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$ALTPYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} ALTPYTHON_LIBS="-lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else LDFLAGS="-L$ALTPY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} ALTPYTHON_LIBS="-L$ALTPY_LIBDEST/config -lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else ALTPYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS else ALTPYTHON_CFLAGS=$pkg_cv_ALTPYTHON_CFLAGS ALTPYTHON_LIBS=$pkg_cv_ALTPYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } ALTPYTHON_VERSION=`$PKG_CONFIG --modversion $ALTPYNAME` ALTPYTHON_MAJOR=`echo $ALTPYTHON_VERSION|cut -f1 -d.` fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ALTPYTHON" >&5 $as_echo_n "checking for ALTPYTHON... " >&6; } if test -n "$ALTPYTHON_CFLAGS"; then pkg_cv_ALTPYTHON_CFLAGS="$ALTPYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_CFLAGS=`$PKG_CONFIG --cflags "$ALTPYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$ALTPYTHON_LIBS"; then pkg_cv_ALTPYTHON_LIBS="$ALTPYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_LIBS=`$PKG_CONFIG --libs "$ALTPYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$ALTPYNAME" 2>&1` else ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$ALTPYNAME" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$ALTPYTHON_PKG_ERRORS" >&5 ALTPYTHON_VERSION=`$ALTPYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` ALTPYTHON_MAJOR=`$ALTPYTHON -c 'import sys; print(sys.version_info[0])'` ALTPYTHON_CFLAGS=-I`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` ALTPY_LIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` ALTPY_SYSLIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` ALTPY_LIBDEST=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` ALTPYTHON_LIBS="$ALTPY_LIBS $ALTPY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$ALTPYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} ALTPYTHON_LIBS="-lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else LDFLAGS="-L$ALTPY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} ALTPYTHON_LIBS="-L$ALTPY_LIBDEST/config -lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else ALTPYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } ALTPYTHON_VERSION=`$ALTPYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` ALTPYTHON_MAJOR=`$ALTPYTHON -c 'import sys; print(sys.version_info[0])'` ALTPYTHON_CFLAGS=-I`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` ALTPY_LIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` ALTPY_SYSLIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` ALTPY_LIBDEST=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` ALTPYTHON_LIBS="$ALTPY_LIBS $ALTPY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$ALTPYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} ALTPYTHON_LIBS="-lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else LDFLAGS="-L$ALTPY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} ALTPYTHON_LIBS="-L$ALTPY_LIBDEST/config -lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else ALTPYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS else ALTPYTHON_CFLAGS=$pkg_cv_ALTPYTHON_CFLAGS ALTPYTHON_LIBS=$pkg_cv_ALTPYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } ALTPYTHON_VERSION=`$PKG_CONFIG --modversion $ALTPYNAME` ALTPYTHON_MAJOR=`echo $ALTPYTHON_VERSION|cut -f1 -d.` fi else ALTPYTHON_CFLAGS=$pkg_cv_ALTPYTHON_CFLAGS ALTPYTHON_LIBS=$pkg_cv_ALTPYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } ALTPYTHON_VERSION=`$PKG_CONFIG --modversion $ALTPYNAME-embed` ALTPYTHON_MAJOR=`echo $ALTPYTHON_VERSION|cut -f1 -d.` fi else ALTPYTHON_CFLAGS=$pkg_cv_ALTPYTHON_CFLAGS ALTPYTHON_LIBS=$pkg_cv_ALTPYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } ALTPYTHON_VERSION=`$PKG_CONFIG --modversion $ALTPYNAME` ALTPYTHON_MAJOR=`echo $ALTPYTHON_VERSION|cut -f1 -d.` fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ALTPYTHON" >&5 $as_echo_n "checking for ALTPYTHON... " >&6; } if test -n "$ALTPYTHON_CFLAGS"; then pkg_cv_ALTPYTHON_CFLAGS="$ALTPYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_CFLAGS=`$PKG_CONFIG --cflags "$ALTPYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$ALTPYTHON_LIBS"; then pkg_cv_ALTPYTHON_LIBS="$ALTPYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_LIBS=`$PKG_CONFIG --libs "$ALTPYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$ALTPYNAME" 2>&1` else ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$ALTPYNAME" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$ALTPYTHON_PKG_ERRORS" >&5 ALTPYNAME=python-`$ALTPYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ALTPYTHON" >&5 $as_echo_n "checking for ALTPYTHON... " >&6; } if test -n "$ALTPYTHON_CFLAGS"; then pkg_cv_ALTPYTHON_CFLAGS="$ALTPYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME-embed\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME-embed") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_CFLAGS=`$PKG_CONFIG --cflags "$ALTPYNAME-embed" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$ALTPYTHON_LIBS"; then pkg_cv_ALTPYTHON_LIBS="$ALTPYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME-embed\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME-embed") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_LIBS=`$PKG_CONFIG --libs "$ALTPYNAME-embed" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$ALTPYNAME-embed" 2>&1` else ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$ALTPYNAME-embed" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$ALTPYTHON_PKG_ERRORS" >&5 pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ALTPYTHON" >&5 $as_echo_n "checking for ALTPYTHON... " >&6; } if test -n "$ALTPYTHON_CFLAGS"; then pkg_cv_ALTPYTHON_CFLAGS="$ALTPYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_CFLAGS=`$PKG_CONFIG --cflags "$ALTPYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$ALTPYTHON_LIBS"; then pkg_cv_ALTPYTHON_LIBS="$ALTPYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_LIBS=`$PKG_CONFIG --libs "$ALTPYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$ALTPYNAME" 2>&1` else ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$ALTPYNAME" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$ALTPYTHON_PKG_ERRORS" >&5 ALTPYTHON_VERSION=`$ALTPYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` ALTPYTHON_MAJOR=`$ALTPYTHON -c 'import sys; print(sys.version_info[0])'` ALTPYTHON_CFLAGS=-I`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` ALTPY_LIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` ALTPY_SYSLIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` ALTPY_LIBDEST=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` ALTPYTHON_LIBS="$ALTPY_LIBS $ALTPY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$ALTPYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} ALTPYTHON_LIBS="-lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else LDFLAGS="-L$ALTPY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} ALTPYTHON_LIBS="-L$ALTPY_LIBDEST/config -lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else ALTPYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } ALTPYTHON_VERSION=`$ALTPYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` ALTPYTHON_MAJOR=`$ALTPYTHON -c 'import sys; print(sys.version_info[0])'` ALTPYTHON_CFLAGS=-I`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` ALTPY_LIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` ALTPY_SYSLIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` ALTPY_LIBDEST=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` ALTPYTHON_LIBS="$ALTPY_LIBS $ALTPY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$ALTPYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} ALTPYTHON_LIBS="-lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else LDFLAGS="-L$ALTPY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} ALTPYTHON_LIBS="-L$ALTPY_LIBDEST/config -lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else ALTPYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS else ALTPYTHON_CFLAGS=$pkg_cv_ALTPYTHON_CFLAGS ALTPYTHON_LIBS=$pkg_cv_ALTPYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } ALTPYTHON_VERSION=`$PKG_CONFIG --modversion $ALTPYNAME` ALTPYTHON_MAJOR=`echo $ALTPYTHON_VERSION|cut -f1 -d.` fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ALTPYTHON" >&5 $as_echo_n "checking for ALTPYTHON... " >&6; } if test -n "$ALTPYTHON_CFLAGS"; then pkg_cv_ALTPYTHON_CFLAGS="$ALTPYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_CFLAGS=`$PKG_CONFIG --cflags "$ALTPYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$ALTPYTHON_LIBS"; then pkg_cv_ALTPYTHON_LIBS="$ALTPYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_LIBS=`$PKG_CONFIG --libs "$ALTPYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$ALTPYNAME" 2>&1` else ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$ALTPYNAME" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$ALTPYTHON_PKG_ERRORS" >&5 ALTPYTHON_VERSION=`$ALTPYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` ALTPYTHON_MAJOR=`$ALTPYTHON -c 'import sys; print(sys.version_info[0])'` ALTPYTHON_CFLAGS=-I`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` ALTPY_LIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` ALTPY_SYSLIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` ALTPY_LIBDEST=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` ALTPYTHON_LIBS="$ALTPY_LIBS $ALTPY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$ALTPYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} ALTPYTHON_LIBS="-lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else LDFLAGS="-L$ALTPY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} ALTPYTHON_LIBS="-L$ALTPY_LIBDEST/config -lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else ALTPYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } ALTPYTHON_VERSION=`$ALTPYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` ALTPYTHON_MAJOR=`$ALTPYTHON -c 'import sys; print(sys.version_info[0])'` ALTPYTHON_CFLAGS=-I`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` ALTPY_LIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` ALTPY_SYSLIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` ALTPY_LIBDEST=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` ALTPYTHON_LIBS="$ALTPY_LIBS $ALTPY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$ALTPYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} ALTPYTHON_LIBS="-lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else LDFLAGS="-L$ALTPY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} ALTPYTHON_LIBS="-L$ALTPY_LIBDEST/config -lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else ALTPYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS else ALTPYTHON_CFLAGS=$pkg_cv_ALTPYTHON_CFLAGS ALTPYTHON_LIBS=$pkg_cv_ALTPYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } ALTPYTHON_VERSION=`$PKG_CONFIG --modversion $ALTPYNAME` ALTPYTHON_MAJOR=`echo $ALTPYTHON_VERSION|cut -f1 -d.` fi else ALTPYTHON_CFLAGS=$pkg_cv_ALTPYTHON_CFLAGS ALTPYTHON_LIBS=$pkg_cv_ALTPYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } ALTPYTHON_VERSION=`$PKG_CONFIG --modversion $ALTPYNAME-embed` ALTPYTHON_MAJOR=`echo $ALTPYTHON_VERSION|cut -f1 -d.` fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } ALTPYNAME=python-`$ALTPYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ALTPYTHON" >&5 $as_echo_n "checking for ALTPYTHON... " >&6; } if test -n "$ALTPYTHON_CFLAGS"; then pkg_cv_ALTPYTHON_CFLAGS="$ALTPYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME-embed\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME-embed") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_CFLAGS=`$PKG_CONFIG --cflags "$ALTPYNAME-embed" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$ALTPYTHON_LIBS"; then pkg_cv_ALTPYTHON_LIBS="$ALTPYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME-embed\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME-embed") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_LIBS=`$PKG_CONFIG --libs "$ALTPYNAME-embed" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$ALTPYNAME-embed" 2>&1` else ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$ALTPYNAME-embed" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$ALTPYTHON_PKG_ERRORS" >&5 pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ALTPYTHON" >&5 $as_echo_n "checking for ALTPYTHON... " >&6; } if test -n "$ALTPYTHON_CFLAGS"; then pkg_cv_ALTPYTHON_CFLAGS="$ALTPYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_CFLAGS=`$PKG_CONFIG --cflags "$ALTPYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$ALTPYTHON_LIBS"; then pkg_cv_ALTPYTHON_LIBS="$ALTPYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_LIBS=`$PKG_CONFIG --libs "$ALTPYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$ALTPYNAME" 2>&1` else ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$ALTPYNAME" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$ALTPYTHON_PKG_ERRORS" >&5 ALTPYTHON_VERSION=`$ALTPYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` ALTPYTHON_MAJOR=`$ALTPYTHON -c 'import sys; print(sys.version_info[0])'` ALTPYTHON_CFLAGS=-I`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` ALTPY_LIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` ALTPY_SYSLIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` ALTPY_LIBDEST=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` ALTPYTHON_LIBS="$ALTPY_LIBS $ALTPY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$ALTPYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} ALTPYTHON_LIBS="-lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else LDFLAGS="-L$ALTPY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} ALTPYTHON_LIBS="-L$ALTPY_LIBDEST/config -lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else ALTPYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } ALTPYTHON_VERSION=`$ALTPYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` ALTPYTHON_MAJOR=`$ALTPYTHON -c 'import sys; print(sys.version_info[0])'` ALTPYTHON_CFLAGS=-I`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` ALTPY_LIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` ALTPY_SYSLIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` ALTPY_LIBDEST=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` ALTPYTHON_LIBS="$ALTPY_LIBS $ALTPY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$ALTPYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} ALTPYTHON_LIBS="-lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else LDFLAGS="-L$ALTPY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} ALTPYTHON_LIBS="-L$ALTPY_LIBDEST/config -lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else ALTPYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS else ALTPYTHON_CFLAGS=$pkg_cv_ALTPYTHON_CFLAGS ALTPYTHON_LIBS=$pkg_cv_ALTPYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } ALTPYTHON_VERSION=`$PKG_CONFIG --modversion $ALTPYNAME` ALTPYTHON_MAJOR=`echo $ALTPYTHON_VERSION|cut -f1 -d.` fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ALTPYTHON" >&5 $as_echo_n "checking for ALTPYTHON... " >&6; } if test -n "$ALTPYTHON_CFLAGS"; then pkg_cv_ALTPYTHON_CFLAGS="$ALTPYTHON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_CFLAGS=`$PKG_CONFIG --cflags "$ALTPYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$ALTPYTHON_LIBS"; then pkg_cv_ALTPYTHON_LIBS="$ALTPYTHON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"\$ALTPYNAME\""; } >&5 ($PKG_CONFIG --exists --print-errors "$ALTPYNAME") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_ALTPYTHON_LIBS=`$PKG_CONFIG --libs "$ALTPYNAME" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$ALTPYNAME" 2>&1` else ALTPYTHON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$ALTPYNAME" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$ALTPYTHON_PKG_ERRORS" >&5 ALTPYTHON_VERSION=`$ALTPYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` ALTPYTHON_MAJOR=`$ALTPYTHON -c 'import sys; print(sys.version_info[0])'` ALTPYTHON_CFLAGS=-I`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` ALTPY_LIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` ALTPY_SYSLIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` ALTPY_LIBDEST=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` ALTPYTHON_LIBS="$ALTPY_LIBS $ALTPY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$ALTPYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} ALTPYTHON_LIBS="-lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else LDFLAGS="-L$ALTPY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} ALTPYTHON_LIBS="-L$ALTPY_LIBDEST/config -lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else ALTPYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } ALTPYTHON_VERSION=`$ALTPYTHON -c 'import sys; print(".".join(sys.version.split(" ")[0].split(".")[:2]))'` ALTPYTHON_MAJOR=`$ALTPYTHON -c 'import sys; print(sys.version_info[0])'` ALTPYTHON_CFLAGS=-I`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` ALTPY_LIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` ALTPY_SYSLIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` ALTPY_LIBDEST=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` ALTPYTHON_LIBS="$ALTPY_LIBS $ALTPY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$ALTPYTHON_LIBS $LDFLAGS" as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Initialize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Initialize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Initialize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Initialize (); int main () { return Py_Initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: No additional path to python library needed" >&5 $as_echo "$as_me: No additional path to python library needed" >&6;} ALTPYTHON_LIBS="-lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else LDFLAGS="-L$ALTPY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value as_ac_Lib=`$as_echo "ac_cv_lib_python$ALTPYTHON_VERSION''_Py_Finalize" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Py_Finalize in -lpython$ALTPYTHON_VERSION" >&5 $as_echo_n "checking for Py_Finalize in -lpython$ALTPYTHON_VERSION... " >&6; } if eval \${$as_ac_Lib+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpython$ALTPYTHON_VERSION $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char Py_Finalize (); int main () { return Py_Finalize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$as_ac_Lib=yes" else eval "$as_ac_Lib=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi eval ac_res=\$$as_ac_Lib { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Adding path to python library" >&5 $as_echo "$as_me: Adding path to python library" >&6;} ALTPYTHON_LIBS="-L$ALTPY_LIBDEST/config -lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS" else ALTPYTHON_LIBS="" fi fi LDFLAGS=$SAVE_LDFLAGS else ALTPYTHON_CFLAGS=$pkg_cv_ALTPYTHON_CFLAGS ALTPYTHON_LIBS=$pkg_cv_ALTPYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } ALTPYTHON_VERSION=`$PKG_CONFIG --modversion $ALTPYNAME` ALTPYTHON_MAJOR=`echo $ALTPYTHON_VERSION|cut -f1 -d.` fi else ALTPYTHON_CFLAGS=$pkg_cv_ALTPYTHON_CFLAGS ALTPYTHON_LIBS=$pkg_cv_ALTPYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } ALTPYTHON_VERSION=`$PKG_CONFIG --modversion $ALTPYNAME-embed` ALTPYTHON_MAJOR=`echo $ALTPYTHON_VERSION|cut -f1 -d.` fi else ALTPYTHON_CFLAGS=$pkg_cv_ALTPYTHON_CFLAGS ALTPYTHON_LIBS=$pkg_cv_ALTPYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } ALTPYTHON_VERSION=`$PKG_CONFIG --modversion $ALTPYNAME` ALTPYTHON_MAJOR=`echo $ALTPYTHON_VERSION|cut -f1 -d.` fi else ALTPYTHON_CFLAGS=$pkg_cv_ALTPYTHON_CFLAGS ALTPYTHON_LIBS=$pkg_cv_ALTPYTHON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } ALTPYTHON_VERSION=`$PKG_CONFIG --modversion $ALTPYNAME-embed` ALTPYTHON_MAJOR=`echo $ALTPYTHON_VERSION|cut -f1 -d.` fi ALTPYTHON_EXT_SUFFIX=`$ALTPYTHON -c "from distutils import sysconfig; v = sysconfig.get_config_vars(); print(v.get('EXT_SUFFIX', v.get('SO')))" | sed s/None//` # Check whether --with-altpython-site-arch was given. if test "${with_altpython_site_arch+set}" = set; then : withval=$with_altpython_site_arch; fi if test "X$ALTPYTHON_SITE_ARCH" = "X"; then ALTPYTHON_SITE_ARCH=`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_lib(1,0,"${prefix}"))'` fi # Check whether --with-altpython-site-lib was given. if test "${with_altpython_site_lib+set}" = set; then : withval=$with_altpython_site_lib; fi if test "X$ALTPYTHON_SITE_LIB" = "X"; then ALTPYTHON_SITE_LIB=`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_lib(0,0,"${prefix}"))'` fi SAVE_LDFLAGS=$LDFLAGS SAVE_CPPFLAGS=$CPPFLAGS LDFLAGS="$LDFLAGS $ALTPYTHON_LIBS" CPPFLAGS="$CPPFLAGS $ALTPYTHON_CFLAGS" ac_fn_c_check_header_mongrel "$LINENO" "Python.h" "ac_cv_header_Python_h" "$ac_includes_default" if test "x$ac_cv_header_Python_h" = xyes; then : altpythonh="yes" else altpythonh="no" fi LDFLAGS=$SAVE_LDFLAGS CPPFLAGS=$SAVE_CPPFLAGS fi if test "X$ALTPYTHON" = "X"; then { $as_echo "$as_me:${as_lineno-$LINENO}: Missing alternative Python - skipping alternative Python" >&5 $as_echo "$as_me: Missing alternative Python - skipping alternative Python" >&6;} enables_altpython=no elif test "X$ALTPYTHON_LIBS" = "X"; then { $as_echo "$as_me:${as_lineno-$LINENO}: Missing alternative Python library - skipping alternative Python bindings" >&5 $as_echo "$as_me: Missing alternative Python library - skipping alternative Python bindings" >&6;} enables_altpython=no elif test "X$altpythonh" != "Xyes"; then { $as_echo "$as_me:${as_lineno-$LINENO}: Missing alternative Python header - skipping alternative Python bindings" >&5 $as_echo "$as_me: Missing alternative Python header - skipping alternative Python bindings" >&6;} enables_altpython=no elif test "X$ALTPYTHON_SITE_ARCH" = "X" || test "X$ALTPYTHON_SITE_LIB" = "X"; then { $as_echo "$as_me:${as_lineno-$LINENO}: Missing python site packages location - skipping Python bindings" >&5 $as_echo "$as_me: Missing python site packages location - skipping Python bindings" >&6;} enables_altpython=no else { $as_echo "$as_me:${as_lineno-$LINENO}: Alternative Python available: $ALTPYTHON_VERSION" >&5 $as_echo "$as_me: Alternative Python available: $ALTPYTHON_VERSION" >&6;} fi if test "x$enables_altpython" != "xyes"; then { $as_echo "$as_me:${as_lineno-$LINENO}: Missing alternative Python - skipping alternative Python bindings" >&5 $as_echo "$as_me: Missing alternative Python - skipping alternative Python bindings" >&6;} enables_altpython=no elif ! test -f python/arc_wrap.cpp && test "x$enables_swig_python" != "xyes"; then { $as_echo "$as_me:${as_lineno-$LINENO}: Missing pre-compiled Python wrapper and SWIG - skipping alternative Python bindings" >&5 $as_echo "$as_me: Missing pre-compiled Python wrapper and SWIG - skipping alternative Python bindings" >&6;} enables_altpython=no fi fi else enables_altpython=no fi { $as_echo "$as_me:${as_lineno-$LINENO}: Alternative Python enabled: $enables_altpython" >&5 $as_echo "$as_me: Alternative Python enabled: $enables_altpython" >&6;} if test "x$enables_altpython" = "xyes"; then ALTPYTHON_ENABLED_TRUE= ALTPYTHON_ENABLED_FALSE='#' else ALTPYTHON_ENABLED_TRUE='#' ALTPYTHON_ENABLED_FALSE= fi if test "x$enables_altpython" = "xyes" && test "x$ALTPYTHON_MAJOR" = "x3"; then ALTPYTHON3_TRUE= ALTPYTHON3_FALSE='#' else ALTPYTHON3_TRUE='#' ALTPYTHON3_FALSE= fi # check for pylint if test "$enables_hed" = "yes"; then # Check whether --enable-pylint was given. if test "${enable_pylint+set}" = set; then : enableval=$enable_pylint; enables_pylint=$enableval fi if test "$enables_pylint" = "yes"; then for ac_prog in pylint do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_PYLINT+:} false; then : $as_echo_n "(cached) " >&6 else case $PYLINT in [\\/]* | ?:[\\/]*) ac_cv_path_PYLINT="$PYLINT" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_PYLINT="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi PYLINT=$ac_cv_path_PYLINT if test -n "$PYLINT"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PYLINT" >&5 $as_echo "$PYLINT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$PYLINT" && break done if test "x$PYLINT" = "x"; then enables_pylint="no" else PYLINT_VERSION=`$PYLINT --version 2> /dev/null | sed -n 's/^pylint \([0-9.]*\).*/\1/p'` # Check if pylint supports the following arguments, otherwise disable pylint (python example checking). # Do not generate report # Disable convention and recommendation messages - we are only interested in fatals, errors and warnings. PYLINT_ARGS="--reports=no --disable=C,R" if $PYLINT $PYLINT_ARGS /dev/null > /dev/null 2>&1 ; then { $as_echo "$as_me:${as_lineno-$LINENO}: pylint version $PYLINT_VERSION found - version ok" >&5 $as_echo "$as_me: pylint version $PYLINT_VERSION found - version ok" >&6;} enables_pylint="yes" else { $as_echo "$as_me:${as_lineno-$LINENO}: pylint version $PYLINT_VERSION found - bad version" >&5 $as_echo "$as_me: pylint version $PYLINT_VERSION found - bad version" >&6;} enables_pylint="no" PYLINT_ARGS="" fi fi # Check if the --disable=W0221 option is supported # W0221: Disable arguments differ messages since Swig uses tuple syntax (*args). if test "$enables_pylint" = "yes"; then PYLINT_ARGS_ARGUMENTS_DIFFER="--disable=W0221" if ! $PYLINT $PYLINT_ARGS $PYLINT_ARGS_ARGUMENTS_DIFFER /dev/null > /dev/null 2>&1 ; then PYLINT_ARGS_ARGUMENTS_DIFFER="" fi fi fi fi if test "x$enables_pylint" = "xyes"; then PYLINT_ENABLED_TRUE= PYLINT_ENABLED_FALSE='#' else PYLINT_ENABLED_TRUE='#' PYLINT_ENABLED_FALSE= fi { $as_echo "$as_me:${as_lineno-$LINENO}: Python example checking with pylint enabled: $enables_pylint" >&5 $as_echo "$as_me: Python example checking with pylint enabled: $enables_pylint" >&6;} # check systemd daemon integration # Check whether --enable-systemd was given. if test "${enable_systemd+set}" = set; then : enableval=$enable_systemd; enables_systemd="$enableval" fi if test "x$enables_systemd" = "xyes"; then systemd_daemon_save_LIBS=$LIBS LIBS= { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing sd_listen_fds" >&5 $as_echo_n "checking for library containing sd_listen_fds... " >&6; } if ${ac_cv_search_sd_listen_fds+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char sd_listen_fds (); int main () { return sd_listen_fds (); ; return 0; } _ACEOF for ac_lib in '' systemd systemd-daemon; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $systemd_daemon_save_LIBS $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_sd_listen_fds=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_sd_listen_fds+:} false; then : break fi done if ${ac_cv_search_sd_listen_fds+:} false; then : else ac_cv_search_sd_listen_fds=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_sd_listen_fds" >&5 $as_echo "$ac_cv_search_sd_listen_fds" >&6; } ac_res=$ac_cv_search_sd_listen_fds if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" have_sd_listen_fds=yes else have_sd_listen_fds=no fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing sd_notify" >&5 $as_echo_n "checking for library containing sd_notify... " >&6; } if ${ac_cv_search_sd_notify+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char sd_notify (); int main () { return sd_notify (); ; return 0; } _ACEOF for ac_lib in '' systemd systemd-daemon; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $systemd_daemon_save_LIBS $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_sd_notify=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_sd_notify+:} false; then : break fi done if ${ac_cv_search_sd_notify+:} false; then : else ac_cv_search_sd_notify=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_sd_notify" >&5 $as_echo "$ac_cv_search_sd_notify" >&6; } ac_res=$ac_cv_search_sd_notify if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" have_sd_notify=yes else have_sd_notify=no fi for ac_header in systemd/sd-daemon.h do : ac_fn_c_check_header_mongrel "$LINENO" "systemd/sd-daemon.h" "ac_cv_header_systemd_sd_daemon_h" "$ac_includes_default" if test "x$ac_cv_header_systemd_sd_daemon_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_SYSTEMD_SD_DAEMON_H 1 _ACEOF have_systemd_sd_daemon_h=yes else have_systemd_sd_daemon_h=no fi done if test x"$have_sd_listen_fds" = x"yes" && \ test x"$have_sd_notify" = x"yes" && \ test x"$have_systemd_sd_daemon_h" = x"yes"; then $as_echo "#define HAVE_SYSTEMD_DAEMON 1" >>confdefs.h SYSTEMD_DAEMON_LIBS=$LIBS else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "--enable-systemd was given, but test for systemd libraries had failed See \`config.log' for more details" "$LINENO" 5; } fi LIBS=$systemd_daemon_save_LIBS fi # check glibmm # check for API version 2.68 first, then API version 2.4 if test "$enables_hed" = "yes"; then "$PKG_CONFIG" glibmm-2.68 if test "$?" = '1'; then pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GLIBMM" >&5 $as_echo_n "checking for GLIBMM... " >&6; } if test -n "$GLIBMM_CFLAGS"; then pkg_cv_GLIBMM_CFLAGS="$GLIBMM_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"giomm-2.4 glibmm-2.4\""; } >&5 ($PKG_CONFIG --exists --print-errors "giomm-2.4 glibmm-2.4") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLIBMM_CFLAGS=`$PKG_CONFIG --cflags "giomm-2.4 glibmm-2.4" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLIBMM_LIBS"; then pkg_cv_GLIBMM_LIBS="$GLIBMM_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"giomm-2.4 glibmm-2.4\""; } >&5 ($PKG_CONFIG --exists --print-errors "giomm-2.4 glibmm-2.4") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLIBMM_LIBS=`$PKG_CONFIG --libs "giomm-2.4 glibmm-2.4" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLIBMM_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "giomm-2.4 glibmm-2.4" 2>&1` else GLIBMM_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "giomm-2.4 glibmm-2.4" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLIBMM_PKG_ERRORS" >&5 as_fn_error $? "Package requirements (giomm-2.4 glibmm-2.4) were not met: $GLIBMM_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables GLIBMM_CFLAGS and GLIBMM_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details." "$LINENO" 5 elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. Alternatively, you may set the environment variables GLIBMM_CFLAGS and GLIBMM_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. To get pkg-config, see . See \`config.log' for more details" "$LINENO" 5; } else GLIBMM_CFLAGS=$pkg_cv_GLIBMM_CFLAGS GLIBMM_LIBS=$pkg_cv_GLIBMM_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi else pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GLIBMM" >&5 $as_echo_n "checking for GLIBMM... " >&6; } if test -n "$GLIBMM_CFLAGS"; then pkg_cv_GLIBMM_CFLAGS="$GLIBMM_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"giomm-2.68 glibmm-2.68\""; } >&5 ($PKG_CONFIG --exists --print-errors "giomm-2.68 glibmm-2.68") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLIBMM_CFLAGS=`$PKG_CONFIG --cflags "giomm-2.68 glibmm-2.68" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLIBMM_LIBS"; then pkg_cv_GLIBMM_LIBS="$GLIBMM_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"giomm-2.68 glibmm-2.68\""; } >&5 ($PKG_CONFIG --exists --print-errors "giomm-2.68 glibmm-2.68") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLIBMM_LIBS=`$PKG_CONFIG --libs "giomm-2.68 glibmm-2.68" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLIBMM_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "giomm-2.68 glibmm-2.68" 2>&1` else GLIBMM_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "giomm-2.68 glibmm-2.68" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLIBMM_PKG_ERRORS" >&5 as_fn_error $? "Package requirements (giomm-2.68 glibmm-2.68) were not met: $GLIBMM_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables GLIBMM_CFLAGS and GLIBMM_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details." "$LINENO" 5 elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. Alternatively, you may set the environment variables GLIBMM_CFLAGS and GLIBMM_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. To get pkg-config, see . See \`config.log' for more details" "$LINENO" 5; } else GLIBMM_CFLAGS=$pkg_cv_GLIBMM_CFLAGS GLIBMM_LIBS=$pkg_cv_GLIBMM_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi $as_echo "#define HAVE_GLIBMM_268 1" >>confdefs.h fi SAVE_CPPFLAGS=$CPPFLAGS ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu CPPFLAGS="$CPPFLAGS $GLIBMM_CFLAGS" ac_fn_cxx_check_header_mongrel "$LINENO" "glibmm/optioncontext.h" "ac_cv_header_glibmm_optioncontext_h" "$ac_includes_default" if test "x$ac_cv_header_glibmm_optioncontext_h" = xyes; then : cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { Glib::OptionContext ctx; ctx.set_summary("summary") ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : $as_echo "#define HAVE_GLIBMM_OPTIONCONTEXT_SET_SUMMARY 1" >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: using glibmm command line parsing" >&5 $as_echo "$as_me: using glibmm command line parsing" >&6;} else { $as_echo "$as_me:${as_lineno-$LINENO}: using getopt_long command line parsing" >&5 $as_echo "$as_me: using getopt_long command line parsing" >&6;} fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { Glib::OptionContext ctx; ctx.get_help(); ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : $as_echo "#define HAVE_GLIBMM_OPTIONCONTEXT_GET_HELP 1" >>confdefs.h fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { Glib::ModuleFlags flags = Glib::MODULE_BIND_LOCAL; ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : glibmm_bind_local=yes else glibmm_bind_local=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "$glibmm_bind_local" = yes; then $as_echo "#define HAVE_GLIBMM_BIND_LOCAL 1" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: glibmm has no way to limit scope of symbols of shared libraries. Make sure external libraries used by plugins have no conflicting symbols. HINT: use Globus compiled against system OpenSSL library." >&5 $as_echo "$as_me: WARNING: glibmm has no way to limit scope of symbols of shared libraries. Make sure external libraries used by plugins have no conflicting symbols. HINT: use Globus compiled against system OpenSSL library." >&6;} fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { Glib::getenv(""); ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : glibmm_getenv=yes else glibmm_getenv=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "$glibmm_getenv" = yes; then $as_echo "#define HAVE_GLIBMM_GETENV 1" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: glibmm has no support for getenv. Usage of libc getenv is unsafe in multi-threaded applications." >&5 $as_echo "$as_me: WARNING: glibmm has no support for getenv. Usage of libc getenv is unsafe in multi-threaded applications." >&6;} fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { Glib::setenv("", ""); ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : glibmm_setenv=yes else glibmm_setenv=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "$glibmm_setenv" = yes; then $as_echo "#define HAVE_GLIBMM_SETENV 1" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: glibmm has no support for setenv. Usage of libc setenv may be unsafe in multi-threaded applications." >&5 $as_echo "$as_me: WARNING: glibmm has no support for setenv. Usage of libc setenv may be unsafe in multi-threaded applications." >&6;} fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { Glib::unsetenv(""); ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : glibmm_unsetenv=yes else glibmm_unsetenv=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "$glibmm_unsetenv" = yes; then $as_echo "#define HAVE_GLIBMM_UNSETENV 1" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: glibmm has no support for unsetenv. Usage of libc unsetenv may be unsafe in multi-threaded applications." >&5 $as_echo "$as_me: WARNING: glibmm has no support for unsetenv. Usage of libc unsetenv may be unsafe in multi-threaded applications." >&6;} fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { Glib::listenv(); ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : glibmm_listenv=yes else glibmm_listenv=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test "$glibmm_listenv" = yes; then $as_echo "#define HAVE_GLIBMM_LISTENV 1" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: glibmm has no support for listenv. Usage of libc environ is unsafe in multi-threaded applications." >&5 $as_echo "$as_me: WARNING: glibmm has no support for listenv. Usage of libc environ is unsafe in multi-threaded applications." >&6;} fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu CPPFLAGS=$SAVE_CPPFLAGS fi # check libxml if test "$enables_hed" = "yes"; then pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for LIBXML2" >&5 $as_echo_n "checking for LIBXML2... " >&6; } if test -n "$LIBXML2_CFLAGS"; then pkg_cv_LIBXML2_CFLAGS="$LIBXML2_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libxml-2.0 >= 2.4.0\""; } >&5 ($PKG_CONFIG --exists --print-errors "libxml-2.0 >= 2.4.0") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_LIBXML2_CFLAGS=`$PKG_CONFIG --cflags "libxml-2.0 >= 2.4.0" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$LIBXML2_LIBS"; then pkg_cv_LIBXML2_LIBS="$LIBXML2_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libxml-2.0 >= 2.4.0\""; } >&5 ($PKG_CONFIG --exists --print-errors "libxml-2.0 >= 2.4.0") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_LIBXML2_LIBS=`$PKG_CONFIG --libs "libxml-2.0 >= 2.4.0" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then LIBXML2_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libxml-2.0 >= 2.4.0" 2>&1` else LIBXML2_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libxml-2.0 >= 2.4.0" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$LIBXML2_PKG_ERRORS" >&5 as_fn_error $? "Package requirements (libxml-2.0 >= 2.4.0) were not met: $LIBXML2_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables LIBXML2_CFLAGS and LIBXML2_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details." "$LINENO" 5 elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. Alternatively, you may set the environment variables LIBXML2_CFLAGS and LIBXML2_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. To get pkg-config, see . See \`config.log' for more details" "$LINENO" 5; } else LIBXML2_CFLAGS=$pkg_cv_LIBXML2_CFLAGS LIBXML2_LIBS=$pkg_cv_LIBXML2_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi fi # check openssl if test "$enables_hed" = "yes"; then pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for OPENSSL" >&5 $as_echo_n "checking for OPENSSL... " >&6; } if test -n "$OPENSSL_CFLAGS"; then pkg_cv_OPENSSL_CFLAGS="$OPENSSL_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"openssl >= 1.1.1\""; } >&5 ($PKG_CONFIG --exists --print-errors "openssl >= 1.1.1") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_OPENSSL_CFLAGS=`$PKG_CONFIG --cflags "openssl >= 1.1.1" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$OPENSSL_LIBS"; then pkg_cv_OPENSSL_LIBS="$OPENSSL_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"openssl >= 1.1.1\""; } >&5 ($PKG_CONFIG --exists --print-errors "openssl >= 1.1.1") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_OPENSSL_LIBS=`$PKG_CONFIG --libs "openssl >= 1.1.1" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then OPENSSL_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "openssl >= 1.1.1" 2>&1` else OPENSSL_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "openssl >= 1.1.1" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$OPENSSL_PKG_ERRORS" >&5 as_fn_error $? "OpenSSL not found or is pre-1.1.1" "$LINENO" 5 elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } as_fn_error $? "OpenSSL not found or is pre-1.1.1" "$LINENO" 5 else OPENSSL_CFLAGS=$pkg_cv_OPENSSL_CFLAGS OPENSSL_LIBS=$pkg_cv_OPENSSL_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } OPENSSL_CFLAGS="$OPENSSL_CFLAGS -DOPENSSL_API_COMPAT=0x10101000L" { $as_echo "$as_me:${as_lineno-$LINENO}: Forcing off deprecated functions for OpenSSL" >&5 $as_echo "$as_me: Forcing off deprecated functions for OpenSSL" >&6;} fi fi # Check for available *_method functions in OpenSSL SAVE_CPPFLAGS=$CPPFLAGS SAVE_LIBS=$LIBS CPPFLAGS="$CPPFLAGS $OPENSSL_CFLAGS" LIBS="$LIBS $OPENSSL_LIBS" ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include void _test(void) { (void)SSLv3_method(); } int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : $as_echo "#define HAVE_SSLV3_METHOD 1" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: No SSLv3_method function avialable" >&5 $as_echo "$as_me: No SSLv3_method function avialable" >&6;} fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include void _test(void) { (void)TLSv1_method(); } int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : $as_echo "#define HAVE_TLSV1_METHOD 1" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: No TLSv1_method function avialable" >&5 $as_echo "$as_me: No TLSv1_method function avialable" >&6;} fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include void _test(void) { (void)TLSv1_1_method(); } int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : $as_echo "#define HAVE_TLSV1_1_METHOD 1" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: No TLSv1_1_method function avialable" >&5 $as_echo "$as_me: No TLSv1_1_method function avialable" >&6;} fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include void _test(void) { (void)TLSv1_2_method(); } int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : $as_echo "#define HAVE_TLSV1_2_METHOD 1" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: No TLSv1_2_method function avialable" >&5 $as_echo "$as_me: No TLSv1_2_method function avialable" >&6;} fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include void _test(void) { (void)TLS_method(); } int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : $as_echo "#define HAVE_TLS_METHOD 1" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: No TLS_method function avialable" >&5 $as_echo "$as_me: No TLS_method function avialable" >&6;} fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include void _test(void) { (void)DTLSv1_method(); } int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : $as_echo "#define HAVE_DTLSV1_METHOD 1" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: No DTLSv1_method function avialable" >&5 $as_echo "$as_me: No DTLSv1_method function avialable" >&6;} fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include void _test(void) { (void)DTLSv1_2_method(); } int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : $as_echo "#define HAVE_DTLSV1_2_METHOD 1" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: No DTLSv1_2_method function avialable" >&5 $as_echo "$as_me: No DTLSv1_2_method function avialable" >&6;} fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include void _test(void) { (void)DTLS_method(); } int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : $as_echo "#define HAVE_DTLS_METHOD 1" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: No DTLS_method function avialable" >&5 $as_echo "$as_me: No DTLS_method function avialable" >&6;} fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu CPPFLAGS=$SAVE_CPPFLAGS LIBS=$SAVE_LIBS #check mozilla nss enables_nss=yes NSS_INSTALLED=no # Check whether --enable-nss was given. if test "${enable_nss+set}" = set; then : enableval=$enable_nss; enables_nss="$enableval" fi if test "$enables_nss" = "yes"; then pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for NSS" >&5 $as_echo_n "checking for NSS... " >&6; } if test -n "$NSS_CFLAGS"; then pkg_cv_NSS_CFLAGS="$NSS_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"nss >= 3.10\""; } >&5 ($PKG_CONFIG --exists --print-errors "nss >= 3.10") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_NSS_CFLAGS=`$PKG_CONFIG --cflags "nss >= 3.10" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$NSS_LIBS"; then pkg_cv_NSS_LIBS="$NSS_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"nss >= 3.10\""; } >&5 ($PKG_CONFIG --exists --print-errors "nss >= 3.10") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_NSS_LIBS=`$PKG_CONFIG --libs "nss >= 3.10" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then NSS_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "nss >= 3.10" 2>&1` else NSS_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "nss >= 3.10" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$NSS_PKG_ERRORS" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Cannot locate nss lib" >&5 $as_echo "$as_me: WARNING: Cannot locate nss lib" >&2;} NSS_INSTALLED=no enables_nss=no elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Cannot locate nss lib" >&5 $as_echo "$as_me: WARNING: Cannot locate nss lib" >&2;} NSS_INSTALLED=no enables_nss=no else NSS_CFLAGS=$pkg_cv_NSS_CFLAGS NSS_LIBS=$pkg_cv_NSS_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } NSS_INSTALLED=yes fi if test "x$NSS_INSTALLED" = "xyes" ; then $as_echo "#define HAVE_NSS 1" >>confdefs.h fi fi if test x$NSS_INSTALLED = xyes; then NSS_ENABLED_TRUE= NSS_ENABLED_FALSE='#' else NSS_ENABLED_TRUE='#' NSS_ENABLED_FALSE= fi #check SQLite SQLITE_INSTALLED=no pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for SQLITE" >&5 $as_echo_n "checking for SQLITE... " >&6; } if test -n "$SQLITE_CFLAGS"; then pkg_cv_SQLITE_CFLAGS="$SQLITE_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"sqlite3 >= 3.6\""; } >&5 ($PKG_CONFIG --exists --print-errors "sqlite3 >= 3.6") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_SQLITE_CFLAGS=`$PKG_CONFIG --cflags "sqlite3 >= 3.6" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$SQLITE_LIBS"; then pkg_cv_SQLITE_LIBS="$SQLITE_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"sqlite3 >= 3.6\""; } >&5 ($PKG_CONFIG --exists --print-errors "sqlite3 >= 3.6") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_SQLITE_LIBS=`$PKG_CONFIG --libs "sqlite3 >= 3.6" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then SQLITE_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "sqlite3 >= 3.6" 2>&1` else SQLITE_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "sqlite3 >= 3.6" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$SQLITE_PKG_ERRORS" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Cannot locate SQLite newer than 3.6" >&5 $as_echo "$as_me: WARNING: Cannot locate SQLite newer than 3.6" >&2;} SQLITE_INSTALLED=no elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Cannot locate SQLite newer than 3.6" >&5 $as_echo "$as_me: WARNING: Cannot locate SQLite newer than 3.6" >&2;} SQLITE_INSTALLED=no else SQLITE_CFLAGS=$pkg_cv_SQLITE_CFLAGS SQLITE_LIBS=$pkg_cv_SQLITE_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } SQLITE_INSTALLED=yes fi if test "x$SQLITE_INSTALLED" = "xyes" ; then $as_echo "#define HAVE_SQLITE 1" >>confdefs.h # Check for function available since 3.8 SAVE_CFLAGS=$CFLAGS SAVE_LIBS=$LIBS CFLAGS="$CFLAGS $SQLITE_CFLAGS" LIBS="$LIBS $SQLITE_LIBS" for ac_func in sqlite3_errstr do : ac_fn_c_check_func "$LINENO" "sqlite3_errstr" "ac_cv_func_sqlite3_errstr" if test "x$ac_cv_func_sqlite3_errstr" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_SQLITE3_ERRSTR 1 _ACEOF fi done CFLAGS=$SAVE_CFLAGS LIBS=$SAVE_LIBS fi if test x$SQLITE_INSTALLED = xyes; then SQLITE_ENABLED_TRUE= SQLITE_ENABLED_FALSE='#' else SQLITE_ENABLED_TRUE='#' SQLITE_ENABLED_FALSE= fi # check cppunit if test "$enables_hed" = "yes"; then # Check whether --enable-cppunit was given. if test "${enable_cppunit+set}" = set; then : enableval=$enable_cppunit; enables_cppunit=$enableval fi if test "$enables_cppunit" = "yes"; then pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CPPUNIT" >&5 $as_echo_n "checking for CPPUNIT... " >&6; } if test -n "$CPPUNIT_CFLAGS"; then pkg_cv_CPPUNIT_CFLAGS="$CPPUNIT_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"cppunit\""; } >&5 ($PKG_CONFIG --exists --print-errors "cppunit") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_CPPUNIT_CFLAGS=`$PKG_CONFIG --cflags "cppunit" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$CPPUNIT_LIBS"; then pkg_cv_CPPUNIT_LIBS="$CPPUNIT_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"cppunit\""; } >&5 ($PKG_CONFIG --exists --print-errors "cppunit") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_CPPUNIT_LIBS=`$PKG_CONFIG --libs "cppunit" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then CPPUNIT_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "cppunit" 2>&1` else CPPUNIT_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "cppunit" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$CPPUNIT_PKG_ERRORS" >&5 # Extract the first word of "cppunit-config", so it can be a program name with args. set dummy cppunit-config; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_CPPUNIT_CONFIG+:} false; then : $as_echo_n "(cached) " >&6 else case $CPPUNIT_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_CPPUNIT_CONFIG="$CPPUNIT_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_CPPUNIT_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_CPPUNIT_CONFIG" && ac_cv_path_CPPUNIT_CONFIG="no" ;; esac fi CPPUNIT_CONFIG=$ac_cv_path_CPPUNIT_CONFIG if test -n "$CPPUNIT_CONFIG"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPPUNIT_CONFIG" >&5 $as_echo "$CPPUNIT_CONFIG" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$CPPUNIT_CONFIG" = "xno"; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cppunit-config not found - no UNIT testing will be performed" >&5 $as_echo "$as_me: WARNING: cppunit-config not found - no UNIT testing will be performed" >&2;} CPPUNIT_CFLAGS= CPPUNIT_LIBS= enables_cppunit="no" else CPPUNIT_CFLAGS="`$CPPUNIT_CONFIG --cflags`" CPPUNIT_LIBS="`$CPPUNIT_CONFIG --libs`" fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } # Extract the first word of "cppunit-config", so it can be a program name with args. set dummy cppunit-config; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_CPPUNIT_CONFIG+:} false; then : $as_echo_n "(cached) " >&6 else case $CPPUNIT_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_CPPUNIT_CONFIG="$CPPUNIT_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_CPPUNIT_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_CPPUNIT_CONFIG" && ac_cv_path_CPPUNIT_CONFIG="no" ;; esac fi CPPUNIT_CONFIG=$ac_cv_path_CPPUNIT_CONFIG if test -n "$CPPUNIT_CONFIG"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPPUNIT_CONFIG" >&5 $as_echo "$CPPUNIT_CONFIG" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$CPPUNIT_CONFIG" = "xno"; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cppunit-config not found - no UNIT testing will be performed" >&5 $as_echo "$as_me: WARNING: cppunit-config not found - no UNIT testing will be performed" >&2;} CPPUNIT_CFLAGS= CPPUNIT_LIBS= enables_cppunit="no" else CPPUNIT_CFLAGS="`$CPPUNIT_CONFIG --cflags`" CPPUNIT_LIBS="`$CPPUNIT_CONFIG --libs`" fi else CPPUNIT_CFLAGS=$pkg_cv_CPPUNIT_CFLAGS CPPUNIT_LIBS=$pkg_cv_CPPUNIT_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi if test "x$CPPUNIT_CONFIG" != "xno" || test "x$CPPUNIT_PKG_ERRORS" != "x" then TEST_DIR=test else enables_cppunit=no TEST_DIR= fi fi else enables_cppunit="no" fi # check ldns library if test "$enables_compute_client" = "yes"; then # Check whether --enable-ldns was given. if test "${enable_ldns+set}" = set; then : enableval=$enable_ldns; enables_ldns=$enableval fi if test "$enables_ldns" = "yes"; then pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for LDNS" >&5 $as_echo_n "checking for LDNS... " >&6; } if test -n "$LDNS_CFLAGS"; then pkg_cv_LDNS_CFLAGS="$LDNS_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"ldns\""; } >&5 ($PKG_CONFIG --exists --print-errors "ldns") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_LDNS_CFLAGS=`$PKG_CONFIG --cflags "ldns" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$LDNS_LIBS"; then pkg_cv_LDNS_LIBS="$LDNS_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"ldns\""; } >&5 ($PKG_CONFIG --exists --print-errors "ldns") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_LDNS_LIBS=`$PKG_CONFIG --libs "ldns" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then LDNS_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "ldns" 2>&1` else LDNS_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "ldns" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$LDNS_PKG_ERRORS" >&5 # Extract the first word of "ldns-config", so it can be a program name with args. set dummy ldns-config; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_LDNS_CONFIG+:} false; then : $as_echo_n "(cached) " >&6 else case $LDNS_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_LDNS_CONFIG="$LDNS_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_LDNS_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_LDNS_CONFIG" && ac_cv_path_LDNS_CONFIG="no" ;; esac fi LDNS_CONFIG=$ac_cv_path_LDNS_CONFIG if test -n "$LDNS_CONFIG"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LDNS_CONFIG" >&5 $as_echo "$LDNS_CONFIG" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$LDNS_CONFIG" = "xno"; then ac_fn_c_check_header_mongrel "$LINENO" "ldns/ldns.h" "ac_cv_header_ldns_ldns_h" "$ac_includes_default" if test "x$ac_cv_header_ldns_ldns_h" = xyes; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ldns_dname_new_frm_str in -lldns" >&5 $as_echo_n "checking for ldns_dname_new_frm_str in -lldns... " >&6; } if ${ac_cv_lib_ldns_ldns_dname_new_frm_str+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lldns $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char ldns_dname_new_frm_str (); int main () { return ldns_dname_new_frm_str (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_ldns_ldns_dname_new_frm_str=yes else ac_cv_lib_ldns_ldns_dname_new_frm_str=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ldns_ldns_dname_new_frm_str" >&5 $as_echo "$ac_cv_lib_ldns_ldns_dname_new_frm_str" >&6; } if test "x$ac_cv_lib_ldns_ldns_dname_new_frm_str" = xyes; then : LDNS_CFLAGS="$LDNS_CFLAGS" LDNS_LIBS="$LDNS_LIBS -lldns" else enables_ldns="no" fi else enables_ldns="no" fi else LDNS_CFLAGS="`$LDNS_CONFIG --cflags`" LDNS_LIBS="`$LDNS_CONFIG --libs`" fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } # Extract the first word of "ldns-config", so it can be a program name with args. set dummy ldns-config; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_LDNS_CONFIG+:} false; then : $as_echo_n "(cached) " >&6 else case $LDNS_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_LDNS_CONFIG="$LDNS_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_LDNS_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_LDNS_CONFIG" && ac_cv_path_LDNS_CONFIG="no" ;; esac fi LDNS_CONFIG=$ac_cv_path_LDNS_CONFIG if test -n "$LDNS_CONFIG"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LDNS_CONFIG" >&5 $as_echo "$LDNS_CONFIG" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$LDNS_CONFIG" = "xno"; then ac_fn_c_check_header_mongrel "$LINENO" "ldns/ldns.h" "ac_cv_header_ldns_ldns_h" "$ac_includes_default" if test "x$ac_cv_header_ldns_ldns_h" = xyes; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ldns_dname_new_frm_str in -lldns" >&5 $as_echo_n "checking for ldns_dname_new_frm_str in -lldns... " >&6; } if ${ac_cv_lib_ldns_ldns_dname_new_frm_str+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lldns $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char ldns_dname_new_frm_str (); int main () { return ldns_dname_new_frm_str (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_ldns_ldns_dname_new_frm_str=yes else ac_cv_lib_ldns_ldns_dname_new_frm_str=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ldns_ldns_dname_new_frm_str" >&5 $as_echo "$ac_cv_lib_ldns_ldns_dname_new_frm_str" >&6; } if test "x$ac_cv_lib_ldns_ldns_dname_new_frm_str" = xyes; then : LDNS_CFLAGS="$LDNS_CFLAGS" LDNS_LIBS="$LDNS_LIBS -lldns" else enables_ldns="no" fi else enables_ldns="no" fi else LDNS_CFLAGS="`$LDNS_CONFIG --cflags`" LDNS_LIBS="`$LDNS_CONFIG --libs`" fi else LDNS_CFLAGS=$pkg_cv_LDNS_CFLAGS LDNS_LIBS=$pkg_cv_LDNS_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi if test "$enables_ldns" = "no"; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: ldns library was not found. Compute clients will be built without ARCHERY support." >&5 $as_echo "$as_me: WARNING: ldns library was not found. Compute clients will be built without ARCHERY support." >&2;} fi fi else enables_ldns="no" fi if test "x$enables_ldns" = "xyes" ; then $as_echo "#define HAVE_LDNS 1" >>confdefs.h else LDNS_CFLAGS= LDNS_LIBS= fi if test "x$enables_ldns" = "xyes"; then LDNS_ENABLED_TRUE= LDNS_ENABLED_FALSE='#' else LDNS_ENABLED_TRUE='#' LDNS_ENABLED_FALSE= fi ############################## # # Check xmlsec1 # ############################# MACOSX="" case "${host}" in *darwin*) MACOSX="yes" ;; esac if test "x$MACOSX" = "xyes"; then $as_echo "#define _MACOSX 1" >>confdefs.h fi if test "x$MACOSX" = "xyes"; then MACOSX_TRUE= MACOSX_FALSE='#' else MACOSX_TRUE='#' MACOSX_FALSE= fi if test "$enables_hed" = "yes"; then XMLSEC_MIN_VERSION="1.2.4" XMLSEC_OPENSSL_MIN_VERSION="1.2.4" XMLSEC_CONFIG="${XMLSEC1_CONFIG:-xmlsec1-config}" XMLSEC_CFLAGS="" XMLSEC_LIBS="" XMLSEC_INSTALLED=no # Check whether --enable-xmlsec1 was given. if test "${enable_xmlsec1+set}" = set; then : enableval=$enable_xmlsec1; enables_xmlsec1=$enableval fi if test "x$enables_xmlsec1" = "xyes"; then # Check whether --with-xmlsec1 was given. if test "${with_xmlsec1+set}" = set; then : withval=$with_xmlsec1; fi if test "x$with_xmlsec1" = "x" ; then pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for XMLSEC" >&5 $as_echo_n "checking for XMLSEC... " >&6; } if test -n "$XMLSEC_CFLAGS"; then pkg_cv_XMLSEC_CFLAGS="$XMLSEC_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"xmlsec1 >= \$XMLSEC_MIN_VERSION\""; } >&5 ($PKG_CONFIG --exists --print-errors "xmlsec1 >= $XMLSEC_MIN_VERSION") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_XMLSEC_CFLAGS=`$PKG_CONFIG --cflags "xmlsec1 >= $XMLSEC_MIN_VERSION" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$XMLSEC_LIBS"; then pkg_cv_XMLSEC_LIBS="$XMLSEC_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"xmlsec1 >= \$XMLSEC_MIN_VERSION\""; } >&5 ($PKG_CONFIG --exists --print-errors "xmlsec1 >= $XMLSEC_MIN_VERSION") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_XMLSEC_LIBS=`$PKG_CONFIG --libs "xmlsec1 >= $XMLSEC_MIN_VERSION" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then XMLSEC_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "xmlsec1 >= $XMLSEC_MIN_VERSION" 2>&1` else XMLSEC_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "xmlsec1 >= $XMLSEC_MIN_VERSION" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$XMLSEC_PKG_ERRORS" >&5 XMLSEC_INSTALLED=no elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } XMLSEC_INSTALLED=no else XMLSEC_CFLAGS=$pkg_cv_XMLSEC_CFLAGS XMLSEC_LIBS=$pkg_cv_XMLSEC_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } XMLSEC_INSTALLED=yes fi if test "x$XMLSEC_INSTALLED" = "xyes" ; then pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for XMLSEC_OPENSSL" >&5 $as_echo_n "checking for XMLSEC_OPENSSL... " >&6; } if test -n "$XMLSEC_OPENSSL_CFLAGS"; then pkg_cv_XMLSEC_OPENSSL_CFLAGS="$XMLSEC_OPENSSL_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"xmlsec1-openssl >= \$XMLSEC_OPENSSL_MIN_VERSION\""; } >&5 ($PKG_CONFIG --exists --print-errors "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_XMLSEC_OPENSSL_CFLAGS=`$PKG_CONFIG --cflags "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$XMLSEC_OPENSSL_LIBS"; then pkg_cv_XMLSEC_OPENSSL_LIBS="$XMLSEC_OPENSSL_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"xmlsec1-openssl >= \$XMLSEC_OPENSSL_MIN_VERSION\""; } >&5 ($PKG_CONFIG --exists --print-errors "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_XMLSEC_OPENSSL_LIBS=`$PKG_CONFIG --libs "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then XMLSEC_OPENSSL_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>&1` else XMLSEC_OPENSSL_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$XMLSEC_OPENSSL_PKG_ERRORS" >&5 XMLSEC_INSTALLED=no elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } XMLSEC_INSTALLED=no else XMLSEC_OPENSSL_CFLAGS=$pkg_cv_XMLSEC_OPENSSL_CFLAGS XMLSEC_OPENSSL_LIBS=$pkg_cv_XMLSEC_OPENSSL_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } XMLSEC_INSTALLED=yes fi fi # Find number of backslashes in XMLSEC_CFLAGS n=$(echo $XMLSEC_CFLAGS|sed 's/.*-DXMLSEC_CRYPTO=\([^ ]*\).*/\1/'|tr -d '[A-Za-z0-1\n"]'| wc -c) # Fixes due to bugs in pkg-config and/or xmlsec1 # # 0: Indicates a bug in pkg-config which removes the escaping of the quotes # 2: Correct value with escaped quotes # 6: Old xmlsec1 version which used 3 back-slashes to escape quotes # See eg. https://bugzilla.redhat.com/show_bug.cgi?id=675334 # Make sure that the quotes are escaped with single backslash if test $n = 0 -o $n = 6; then { $as_echo "$as_me:${as_lineno-$LINENO}: Working around bad combination of pkgconfig and xmlsec1 with $n back-slashes" >&5 $as_echo "$as_me: Working around bad combination of pkgconfig and xmlsec1 with $n back-slashes" >&6;} XMLSEC_CFLAGS=$(echo $XMLSEC_CFLAGS|sed 's/\(.*-DXMLSEC_CRYPTO=\)\\*"\([^ \\"]*\)\\*" \(.*\)/\1\\"\2\\" \3/') XMLSEC_OPENSSL_CFLAGS=$(echo $XMLSEC_OPENSSL_CFLAGS|sed 's/\(.*-DXMLSEC_CRYPTO=\)\\*"\([^ \\"]*\)\\*" \(.*\)/\1\\"\2\\" \3/') fi fi if test "x$XMLSEC_INSTALLED" = "xno" -a "x$MACOSX" != "xyes"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for xmlsec1 libraries >= $XMLSEC_MIN_VERSION" >&5 $as_echo_n "checking for xmlsec1 libraries >= $XMLSEC_MIN_VERSION... " >&6; } if test "x$with_xmlsec1" != "x" ; then XMLSEC_CONFIG=$with_xmlsec1/bin/$XMLSEC_CONFIG fi "$XMLSEC_CONFIG" --version 2>/dev/null 1>/dev/null if test "$?" != '0' ; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Could not find xmlsec1 anywhere; The xml security related functionality will not be compiled" >&5 $as_echo "$as_me: WARNING: Could not find xmlsec1 anywhere; The xml security related functionality will not be compiled" >&2;} else vers=`$XMLSEC_CONFIG --version 2>/dev/null | awk -F. '{ printf "%d", ($1 * 1000 + $2) * 1000 + $3;}'` minvers=`echo $XMLSEC_MIN_VERSION | awk -F. '{ printf "%d", ($1 * 1000 + $2) * 1000 + $3;}'` if test "$vers" -ge "$minvers" ; then XMLSEC_LIBS="`$XMLSEC_CONFIG --libs`" XMLSEC_CFLAGS="`$XMLSEC_CONFIG --cflags`" #check the xmlsec1-openssl here if test "x$PKG_CONFIG_PATH" != "x"; then PKG_CONFIG_PATH="$with_xmlsec1/lib/pkgconfig:$PKG_CONFIG_PATH" else PKG_CONFIG_PATH="$with_xmlsec1/lib/pkgconfig" fi pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for XMLSEC_OPENSSL" >&5 $as_echo_n "checking for XMLSEC_OPENSSL... " >&6; } if test -n "$XMLSEC_OPENSSL_CFLAGS"; then pkg_cv_XMLSEC_OPENSSL_CFLAGS="$XMLSEC_OPENSSL_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"xmlsec1-openssl >= \$XMLSEC_OPENSSL_MIN_VERSION\""; } >&5 ($PKG_CONFIG --exists --print-errors "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_XMLSEC_OPENSSL_CFLAGS=`$PKG_CONFIG --cflags "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$XMLSEC_OPENSSL_LIBS"; then pkg_cv_XMLSEC_OPENSSL_LIBS="$XMLSEC_OPENSSL_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"xmlsec1-openssl >= \$XMLSEC_OPENSSL_MIN_VERSION\""; } >&5 ($PKG_CONFIG --exists --print-errors "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_XMLSEC_OPENSSL_LIBS=`$PKG_CONFIG --libs "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then XMLSEC_OPENSSL_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>&1` else XMLSEC_OPENSSL_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$XMLSEC_OPENSSL_PKG_ERRORS" >&5 XMLSEC_INSTALLED=no elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } XMLSEC_INSTALLED=no else XMLSEC_OPENSSL_CFLAGS=$pkg_cv_XMLSEC_OPENSSL_CFLAGS XMLSEC_OPENSSL_LIBS=$pkg_cv_XMLSEC_OPENSSL_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } XMLSEC_INSTALLED=yes fi else { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: You need at least xmlsec1 $XMLSEC_MIN_VERSION for this version of arc" >&5 $as_echo "$as_me: WARNING: You need at least xmlsec1 $XMLSEC_MIN_VERSION for this version of arc" >&2;} fi fi elif test "x$XMLSEC_INSTALLED" = "xno" -a "x$MACOSX" = "xyes"; then #MACOSX has no "ldd" which is needed by xmlsec1-config, so here simply we use PKG_CHECK_MODULES if test "x$PKG_CONFIG_PATH" != "x"; then PKG_CONFIG_PATH="$with_xmlsec1/lib/pkgconfig:$PKG_CONFIG_PATH" else PKG_CONFIG_PATH="$with_xmlsec1/lib/pkgconfig" fi pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for XMLSEC" >&5 $as_echo_n "checking for XMLSEC... " >&6; } if test -n "$XMLSEC_CFLAGS"; then pkg_cv_XMLSEC_CFLAGS="$XMLSEC_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"xmlsec1 >= \$XMLSEC_MIN_VERSION\""; } >&5 ($PKG_CONFIG --exists --print-errors "xmlsec1 >= $XMLSEC_MIN_VERSION") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_XMLSEC_CFLAGS=`$PKG_CONFIG --cflags "xmlsec1 >= $XMLSEC_MIN_VERSION" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$XMLSEC_LIBS"; then pkg_cv_XMLSEC_LIBS="$XMLSEC_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"xmlsec1 >= \$XMLSEC_MIN_VERSION\""; } >&5 ($PKG_CONFIG --exists --print-errors "xmlsec1 >= $XMLSEC_MIN_VERSION") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_XMLSEC_LIBS=`$PKG_CONFIG --libs "xmlsec1 >= $XMLSEC_MIN_VERSION" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then XMLSEC_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "xmlsec1 >= $XMLSEC_MIN_VERSION" 2>&1` else XMLSEC_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "xmlsec1 >= $XMLSEC_MIN_VERSION" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$XMLSEC_PKG_ERRORS" >&5 XMLSEC_INSTALLED=no elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } XMLSEC_INSTALLED=no else XMLSEC_CFLAGS=$pkg_cv_XMLSEC_CFLAGS XMLSEC_LIBS=$pkg_cv_XMLSEC_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } XMLSEC_INSTALLED=yes fi if test "x$XMLSEC_INSTALLED" = "xyes" ; then pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for XMLSEC_OPENSSL" >&5 $as_echo_n "checking for XMLSEC_OPENSSL... " >&6; } if test -n "$XMLSEC_OPENSSL_CFLAGS"; then pkg_cv_XMLSEC_OPENSSL_CFLAGS="$XMLSEC_OPENSSL_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"xmlsec1-openssl >= \$XMLSEC_OPENSSL_MIN_VERSION\""; } >&5 ($PKG_CONFIG --exists --print-errors "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_XMLSEC_OPENSSL_CFLAGS=`$PKG_CONFIG --cflags "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$XMLSEC_OPENSSL_LIBS"; then pkg_cv_XMLSEC_OPENSSL_LIBS="$XMLSEC_OPENSSL_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"xmlsec1-openssl >= \$XMLSEC_OPENSSL_MIN_VERSION\""; } >&5 ($PKG_CONFIG --exists --print-errors "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_XMLSEC_OPENSSL_LIBS=`$PKG_CONFIG --libs "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then XMLSEC_OPENSSL_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>&1` else XMLSEC_OPENSSL_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$XMLSEC_OPENSSL_PKG_ERRORS" >&5 XMLSEC_INSTALLED=no elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } XMLSEC_INSTALLED=no else XMLSEC_OPENSSL_CFLAGS=$pkg_cv_XMLSEC_OPENSSL_CFLAGS XMLSEC_OPENSSL_LIBS=$pkg_cv_XMLSEC_OPENSSL_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } XMLSEC_INSTALLED=yes fi fi fi #AC_SUBST(XMLSEC_CONFIG) #AC_SUBST(XMLSEC_MIN_VERSION) enables_xmlsec1="$XMLSEC_INSTALLED" fi else enables_xmlsec1="no" fi # Check monitor # Check whether --enable-monitor was given. if test "${enable_monitor+set}" = set; then : enableval=$enable_monitor; enables_monitor="$enableval" fi if test "x$enables_monitor" = "xyes"; then # Check whether --with-monitor was given. if test "${with_monitor+set}" = set; then : withval=$with_monitor; fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for monitor installation path" >&5 $as_echo_n "checking for monitor installation path... " >&6; } if test "x$with_monitor" != "x" ; then monitor_prefix=$with_monitor else monitor_prefix=${datadir}/arc/monitor fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $monitor_prefix" >&5 $as_echo "$monitor_prefix" >&6; } fi # check zlib ZLIB_CFLAGS= ZLIB_LDFLAGS= ZLIB_LIBS= if test "$enables_hed" = "yes"; then SAVE_CPPFLAGS=$CPPFLAGS SAVE_LDFLAGS=$LDFLAGS # Check whether --with-zlib was given. if test "${with_zlib+set}" = set; then : withval=$with_zlib; if test -d "$withval"; then ZLIB_CFLAGS="${CPPFLAGS} -I$withval/include" ZLIB_LDFLAGS="${LDFLAGS} -L$withval/lib" fi fi CPPFLAGS="$CPPFLAGS $ZLIB_CFLAGS" LDFLAGS="$LDFLAGS $ZLIB_LDFLAGS" ac_fn_c_check_header_mongrel "$LINENO" "zlib.h" "ac_cv_header_zlib_h" "$ac_includes_default" if test "x$ac_cv_header_zlib_h" = xyes; then : ZLIB_CFLAGS="$ZLIB_CFLAGS" else as_fn_error $? "unable to find zlib header files" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for deflateInit2_ in -lz" >&5 $as_echo_n "checking for deflateInit2_ in -lz... " >&6; } if ${ac_cv_lib_z_deflateInit2_+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lz $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char deflateInit2_ (); int main () { return deflateInit2_ (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_z_deflateInit2_=yes else ac_cv_lib_z_deflateInit2_=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_z_deflateInit2_" >&5 $as_echo "$ac_cv_lib_z_deflateInit2_" >&6; } if test "x$ac_cv_lib_z_deflateInit2_" = xyes; then : ZLIB_LIBS="$ZLIB_LDFLAGS -lz" else as_fn_error $? "unable to link with zlib library" "$LINENO" 5 fi CPPFLAGS=$SAVE_CPPFLAGS LDFLAGS=$SAVE_LDFLAGS fi # SQLITEJSTORE (storing job information in SQLite) # Check whether --enable-sqlitejstore was given. if test "${enable_sqlitejstore+set}" = set; then : enableval=$enable_sqlitejstore; enables_sqlitejstore=$enableval fi if test "$enables_sqlitejstore" = "yes"; then if test "x$SQLITE_INSTALLED" != "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: For storing jobs in SQLite install SQLite 3.6 or newer - disabling" >&5 $as_echo "$as_me: For storing jobs in SQLite install SQLite 3.6 or newer - disabling" >&6;} enables_sqlitejstore="no" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: Storing jobs in SQLite enabled: $enables_sqlitejstore" >&5 $as_echo "$as_me: Storing jobs in SQLite enabled: $enables_sqlitejstore" >&6;} if test "x$enables_sqlitejstore" = "xyes"; then SQLITEJSTORE_ENABLED_TRUE= SQLITEJSTORE_ENABLED_FALSE='#' else SQLITEJSTORE_ENABLED_TRUE='#' SQLITEJSTORE_ENABLED_FALSE= fi if test "x$enables_sqlitejstore" = "xyes"; then $as_echo "#define SQLITEJSTORE_ENABLED 1" >>confdefs.h fi # globus/gpt packages # globus/gpt packages if test "$enables_hed" = "yes"; then if test "x$ac_cv_env_GLOBUS_MAKEFILE_HEADER_set" != "xset"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}globus-makefile-header", so it can be a program name with args. set dummy ${ac_tool_prefix}globus-makefile-header; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_GLOBUS_MAKEFILE_HEADER+:} false; then : $as_echo_n "(cached) " >&6 else case $GLOBUS_MAKEFILE_HEADER in [\\/]* | ?:[\\/]*) ac_cv_path_GLOBUS_MAKEFILE_HEADER="$GLOBUS_MAKEFILE_HEADER" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_dummy="$PATH:/opt/globus/bin" for as_dir in $as_dummy do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_GLOBUS_MAKEFILE_HEADER="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi GLOBUS_MAKEFILE_HEADER=$ac_cv_path_GLOBUS_MAKEFILE_HEADER if test -n "$GLOBUS_MAKEFILE_HEADER"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $GLOBUS_MAKEFILE_HEADER" >&5 $as_echo "$GLOBUS_MAKEFILE_HEADER" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_path_GLOBUS_MAKEFILE_HEADER"; then ac_pt_GLOBUS_MAKEFILE_HEADER=$GLOBUS_MAKEFILE_HEADER # Extract the first word of "globus-makefile-header", so it can be a program name with args. set dummy globus-makefile-header; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_ac_pt_GLOBUS_MAKEFILE_HEADER+:} false; then : $as_echo_n "(cached) " >&6 else case $ac_pt_GLOBUS_MAKEFILE_HEADER in [\\/]* | ?:[\\/]*) ac_cv_path_ac_pt_GLOBUS_MAKEFILE_HEADER="$ac_pt_GLOBUS_MAKEFILE_HEADER" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_dummy="$PATH:/opt/globus/bin" for as_dir in $as_dummy do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_ac_pt_GLOBUS_MAKEFILE_HEADER="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi ac_pt_GLOBUS_MAKEFILE_HEADER=$ac_cv_path_ac_pt_GLOBUS_MAKEFILE_HEADER if test -n "$ac_pt_GLOBUS_MAKEFILE_HEADER"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_GLOBUS_MAKEFILE_HEADER" >&5 $as_echo "$ac_pt_GLOBUS_MAKEFILE_HEADER" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_pt_GLOBUS_MAKEFILE_HEADER" = x; then GLOBUS_MAKEFILE_HEADER="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac GLOBUS_MAKEFILE_HEADER=$ac_pt_GLOBUS_MAKEFILE_HEADER fi else GLOBUS_MAKEFILE_HEADER="$ac_cv_path_GLOBUS_MAKEFILE_HEADER" fi fi if test -f "$GLOBUS_MAKEFILE_HEADER" && test "x$GLOBUS_LOCATION" = "x"; then GLOBUS_LOCATION=`dirname $GLOBUS_MAKEFILE_HEADER` GLOBUS_LOCATION=`dirname $GLOBUS_LOCATION` export GLOBUS_LOCATION fi if test "x$ac_cv_env_GPT_FLAVOR_CONFIGURATION_set" != "xset"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gpt-flavor-configuration", so it can be a program name with args. set dummy ${ac_tool_prefix}gpt-flavor-configuration; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_GPT_FLAVOR_CONFIGURATION+:} false; then : $as_echo_n "(cached) " >&6 else case $GPT_FLAVOR_CONFIGURATION in [\\/]* | ?:[\\/]*) ac_cv_path_GPT_FLAVOR_CONFIGURATION="$GPT_FLAVOR_CONFIGURATION" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_dummy="$PATH:/usr/sbin:/opt/gpt/sbin" for as_dir in $as_dummy do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_GPT_FLAVOR_CONFIGURATION="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi GPT_FLAVOR_CONFIGURATION=$ac_cv_path_GPT_FLAVOR_CONFIGURATION if test -n "$GPT_FLAVOR_CONFIGURATION"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $GPT_FLAVOR_CONFIGURATION" >&5 $as_echo "$GPT_FLAVOR_CONFIGURATION" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_path_GPT_FLAVOR_CONFIGURATION"; then ac_pt_GPT_FLAVOR_CONFIGURATION=$GPT_FLAVOR_CONFIGURATION # Extract the first word of "gpt-flavor-configuration", so it can be a program name with args. set dummy gpt-flavor-configuration; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_ac_pt_GPT_FLAVOR_CONFIGURATION+:} false; then : $as_echo_n "(cached) " >&6 else case $ac_pt_GPT_FLAVOR_CONFIGURATION in [\\/]* | ?:[\\/]*) ac_cv_path_ac_pt_GPT_FLAVOR_CONFIGURATION="$ac_pt_GPT_FLAVOR_CONFIGURATION" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_dummy="$PATH:/usr/sbin:/opt/gpt/sbin" for as_dir in $as_dummy do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_ac_pt_GPT_FLAVOR_CONFIGURATION="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi ac_pt_GPT_FLAVOR_CONFIGURATION=$ac_cv_path_ac_pt_GPT_FLAVOR_CONFIGURATION if test -n "$ac_pt_GPT_FLAVOR_CONFIGURATION"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_GPT_FLAVOR_CONFIGURATION" >&5 $as_echo "$ac_pt_GPT_FLAVOR_CONFIGURATION" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_pt_GPT_FLAVOR_CONFIGURATION" = x; then GPT_FLAVOR_CONFIGURATION="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac GPT_FLAVOR_CONFIGURATION=$ac_pt_GPT_FLAVOR_CONFIGURATION fi else GPT_FLAVOR_CONFIGURATION="$ac_cv_path_GPT_FLAVOR_CONFIGURATION" fi fi if test -f "$GPT_FLAVOR_CONFIGURATION" && test "x$GPT_LOCATION" = "x"; then GPT_LOCATION=`dirname $GPT_FLAVOR_CONFIGURATION` GPT_LOCATION=`dirname $GPT_LOCATION` export GPT_LOCATION fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gpt flavor" >&5 $as_echo_n "checking for gpt flavor... " >&6; } # Check whether --with-flavor was given. if test "${with_flavor+set}" = set; then : withval=$with_flavor; GPT_FLAVOR=$withval else if test -n "$GPT_FLAVOR_CONFIGURATION" ; then GPT_FLAVOR=`$GPT_FLAVOR_CONFIGURATION | \\ grep '^[a-zA-Z].*:$' | cut -f1 -d: | grep thr | tail -1` fi fi if test -n "$GPT_FLAVOR"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $GPT_FLAVOR" >&5 $as_echo "$GPT_FLAVOR" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: none detected, is globus_core-devel installed?" >&5 $as_echo "none detected, is globus_core-devel installed?" >&6; } fi if test "x$ac_cv_env_GPT_QUERY_set" != "xset"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gpt-query", so it can be a program name with args. set dummy ${ac_tool_prefix}gpt-query; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_GPT_QUERY+:} false; then : $as_echo_n "(cached) " >&6 else case $GPT_QUERY in [\\/]* | ?:[\\/]*) ac_cv_path_GPT_QUERY="$GPT_QUERY" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_dummy="$PATH:/usr/sbin:/opt/gpt/sbin" for as_dir in $as_dummy do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_GPT_QUERY="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi GPT_QUERY=$ac_cv_path_GPT_QUERY if test -n "$GPT_QUERY"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $GPT_QUERY" >&5 $as_echo "$GPT_QUERY" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_path_GPT_QUERY"; then ac_pt_GPT_QUERY=$GPT_QUERY # Extract the first word of "gpt-query", so it can be a program name with args. set dummy gpt-query; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_ac_pt_GPT_QUERY+:} false; then : $as_echo_n "(cached) " >&6 else case $ac_pt_GPT_QUERY in [\\/]* | ?:[\\/]*) ac_cv_path_ac_pt_GPT_QUERY="$ac_pt_GPT_QUERY" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_dummy="$PATH:/usr/sbin:/opt/gpt/sbin" for as_dir in $as_dummy do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_ac_pt_GPT_QUERY="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi ac_pt_GPT_QUERY=$ac_cv_path_ac_pt_GPT_QUERY if test -n "$ac_pt_GPT_QUERY"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_GPT_QUERY" >&5 $as_echo "$ac_pt_GPT_QUERY" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_pt_GPT_QUERY" = x; then GPT_QUERY="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac GPT_QUERY=$ac_pt_GPT_QUERY fi else GPT_QUERY="$ac_cv_path_GPT_QUERY" fi fi if test -f "$GPT_QUERY" && test "x$GPT_LOCATION" = "x"; then GPT_LOCATION=`dirname $GPT_QUERY` GPT_LOCATION=`dirname $GPT_LOCATION` export GPT_LOCATION fi pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GLOBUS_COMMON" >&5 $as_echo_n "checking for GLOBUS_COMMON... " >&6; } if test -n "$GLOBUS_COMMON_CFLAGS"; then pkg_cv_GLOBUS_COMMON_CFLAGS="$GLOBUS_COMMON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"globus-common\""; } >&5 ($PKG_CONFIG --exists --print-errors "globus-common") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLOBUS_COMMON_CFLAGS=`$PKG_CONFIG --cflags "globus-common" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLOBUS_COMMON_LIBS"; then pkg_cv_GLOBUS_COMMON_LIBS="$GLOBUS_COMMON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"globus-common\""; } >&5 ($PKG_CONFIG --exists --print-errors "globus-common") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLOBUS_COMMON_LIBS=`$PKG_CONFIG --libs "globus-common" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLOBUS_COMMON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "globus-common" 2>&1` else GLOBUS_COMMON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "globus-common" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLOBUS_COMMON_PKG_ERRORS" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for globus_common" >&5 $as_echo_n "checking for globus_common... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_common_version=`$GPT_QUERY globus_common-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_common_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_common | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_common_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_common_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_common_version"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gpt_cv_globus_common_version" >&5 $as_echo "$gpt_cv_globus_common_version" >&6; } GLOBUS_COMMON_VERSION=$gpt_cv_globus_common_version GLOBUS_COMMON_LIBS=$gpt_cv_globus_common_libs GLOBUS_COMMON_CFLAGS=$gpt_cv_globus_common_cflags else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for globus_common" >&5 $as_echo_n "checking for globus_common... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_common_version=`$GPT_QUERY globus_common-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_common_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_common | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_common_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_common_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_common_version"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gpt_cv_globus_common_version" >&5 $as_echo "$gpt_cv_globus_common_version" >&6; } GLOBUS_COMMON_VERSION=$gpt_cv_globus_common_version GLOBUS_COMMON_LIBS=$gpt_cv_globus_common_libs GLOBUS_COMMON_CFLAGS=$gpt_cv_globus_common_cflags else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi else GLOBUS_COMMON_CFLAGS=$pkg_cv_GLOBUS_COMMON_CFLAGS GLOBUS_COMMON_LIBS=$pkg_cv_GLOBUS_COMMON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } GLOBUS_COMMON_VERSION=`$PKG_CONFIG --modversion globus-common` fi pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GLOBUS_GSSAPI_GSI" >&5 $as_echo_n "checking for GLOBUS_GSSAPI_GSI... " >&6; } if test -n "$GLOBUS_GSSAPI_GSI_CFLAGS"; then pkg_cv_GLOBUS_GSSAPI_GSI_CFLAGS="$GLOBUS_GSSAPI_GSI_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"globus-gssapi-gsi\""; } >&5 ($PKG_CONFIG --exists --print-errors "globus-gssapi-gsi") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLOBUS_GSSAPI_GSI_CFLAGS=`$PKG_CONFIG --cflags "globus-gssapi-gsi" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLOBUS_GSSAPI_GSI_LIBS"; then pkg_cv_GLOBUS_GSSAPI_GSI_LIBS="$GLOBUS_GSSAPI_GSI_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"globus-gssapi-gsi\""; } >&5 ($PKG_CONFIG --exists --print-errors "globus-gssapi-gsi") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLOBUS_GSSAPI_GSI_LIBS=`$PKG_CONFIG --libs "globus-gssapi-gsi" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLOBUS_GSSAPI_GSI_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "globus-gssapi-gsi" 2>&1` else GLOBUS_GSSAPI_GSI_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "globus-gssapi-gsi" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLOBUS_GSSAPI_GSI_PKG_ERRORS" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for globus_gssapi_gsi" >&5 $as_echo_n "checking for globus_gssapi_gsi... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_gssapi_gsi_version=`$GPT_QUERY globus_gssapi_gsi-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_gssapi_gsi_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_gssapi_gsi | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_gssapi_gsi_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_gssapi_gsi_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_gssapi_gsi_version"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gpt_cv_globus_gssapi_gsi_version" >&5 $as_echo "$gpt_cv_globus_gssapi_gsi_version" >&6; } GLOBUS_GSSAPI_GSI_VERSION=$gpt_cv_globus_gssapi_gsi_version GLOBUS_GSSAPI_GSI_LIBS=$gpt_cv_globus_gssapi_gsi_libs GLOBUS_GSSAPI_GSI_CFLAGS=$gpt_cv_globus_gssapi_gsi_cflags else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for globus_gssapi_gsi" >&5 $as_echo_n "checking for globus_gssapi_gsi... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_gssapi_gsi_version=`$GPT_QUERY globus_gssapi_gsi-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_gssapi_gsi_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_gssapi_gsi | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_gssapi_gsi_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_gssapi_gsi_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_gssapi_gsi_version"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gpt_cv_globus_gssapi_gsi_version" >&5 $as_echo "$gpt_cv_globus_gssapi_gsi_version" >&6; } GLOBUS_GSSAPI_GSI_VERSION=$gpt_cv_globus_gssapi_gsi_version GLOBUS_GSSAPI_GSI_LIBS=$gpt_cv_globus_gssapi_gsi_libs GLOBUS_GSSAPI_GSI_CFLAGS=$gpt_cv_globus_gssapi_gsi_cflags else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi else GLOBUS_GSSAPI_GSI_CFLAGS=$pkg_cv_GLOBUS_GSSAPI_GSI_CFLAGS GLOBUS_GSSAPI_GSI_LIBS=$pkg_cv_GLOBUS_GSSAPI_GSI_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } GLOBUS_GSSAPI_GSI_VERSION=`$PKG_CONFIG --modversion globus-gssapi-gsi` fi pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GLOBUS_GSS_ASSIST" >&5 $as_echo_n "checking for GLOBUS_GSS_ASSIST... " >&6; } if test -n "$GLOBUS_GSS_ASSIST_CFLAGS"; then pkg_cv_GLOBUS_GSS_ASSIST_CFLAGS="$GLOBUS_GSS_ASSIST_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"globus-gss-assist\""; } >&5 ($PKG_CONFIG --exists --print-errors "globus-gss-assist") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLOBUS_GSS_ASSIST_CFLAGS=`$PKG_CONFIG --cflags "globus-gss-assist" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLOBUS_GSS_ASSIST_LIBS"; then pkg_cv_GLOBUS_GSS_ASSIST_LIBS="$GLOBUS_GSS_ASSIST_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"globus-gss-assist\""; } >&5 ($PKG_CONFIG --exists --print-errors "globus-gss-assist") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLOBUS_GSS_ASSIST_LIBS=`$PKG_CONFIG --libs "globus-gss-assist" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLOBUS_GSS_ASSIST_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "globus-gss-assist" 2>&1` else GLOBUS_GSS_ASSIST_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "globus-gss-assist" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLOBUS_GSS_ASSIST_PKG_ERRORS" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for globus_gss_assist" >&5 $as_echo_n "checking for globus_gss_assist... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_gss_assist_version=`$GPT_QUERY globus_gss_assist-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_gss_assist_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_gss_assist | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_gss_assist_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_gss_assist_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_gss_assist_version"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gpt_cv_globus_gss_assist_version" >&5 $as_echo "$gpt_cv_globus_gss_assist_version" >&6; } GLOBUS_GSS_ASSIST_VERSION=$gpt_cv_globus_gss_assist_version GLOBUS_GSS_ASSIST_LIBS=$gpt_cv_globus_gss_assist_libs GLOBUS_GSS_ASSIST_CFLAGS=$gpt_cv_globus_gss_assist_cflags else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for globus_gss_assist" >&5 $as_echo_n "checking for globus_gss_assist... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_gss_assist_version=`$GPT_QUERY globus_gss_assist-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_gss_assist_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_gss_assist | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_gss_assist_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_gss_assist_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_gss_assist_version"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gpt_cv_globus_gss_assist_version" >&5 $as_echo "$gpt_cv_globus_gss_assist_version" >&6; } GLOBUS_GSS_ASSIST_VERSION=$gpt_cv_globus_gss_assist_version GLOBUS_GSS_ASSIST_LIBS=$gpt_cv_globus_gss_assist_libs GLOBUS_GSS_ASSIST_CFLAGS=$gpt_cv_globus_gss_assist_cflags else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi else GLOBUS_GSS_ASSIST_CFLAGS=$pkg_cv_GLOBUS_GSS_ASSIST_CFLAGS GLOBUS_GSS_ASSIST_LIBS=$pkg_cv_GLOBUS_GSS_ASSIST_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } GLOBUS_GSS_ASSIST_VERSION=`$PKG_CONFIG --modversion globus-gss-assist` fi pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GLOBUS_GSI_CALLBACK" >&5 $as_echo_n "checking for GLOBUS_GSI_CALLBACK... " >&6; } if test -n "$GLOBUS_GSI_CALLBACK_CFLAGS"; then pkg_cv_GLOBUS_GSI_CALLBACK_CFLAGS="$GLOBUS_GSI_CALLBACK_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"globus-gsi-callback\""; } >&5 ($PKG_CONFIG --exists --print-errors "globus-gsi-callback") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLOBUS_GSI_CALLBACK_CFLAGS=`$PKG_CONFIG --cflags "globus-gsi-callback" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLOBUS_GSI_CALLBACK_LIBS"; then pkg_cv_GLOBUS_GSI_CALLBACK_LIBS="$GLOBUS_GSI_CALLBACK_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"globus-gsi-callback\""; } >&5 ($PKG_CONFIG --exists --print-errors "globus-gsi-callback") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLOBUS_GSI_CALLBACK_LIBS=`$PKG_CONFIG --libs "globus-gsi-callback" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLOBUS_GSI_CALLBACK_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "globus-gsi-callback" 2>&1` else GLOBUS_GSI_CALLBACK_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "globus-gsi-callback" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLOBUS_GSI_CALLBACK_PKG_ERRORS" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for globus_gsi_callback" >&5 $as_echo_n "checking for globus_gsi_callback... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_gsi_callback_version=`$GPT_QUERY globus_gsi_callback-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_gsi_callback_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_gsi_callback | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_gsi_callback_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_gsi_callback_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_gsi_callback_version"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gpt_cv_globus_gsi_callback_version" >&5 $as_echo "$gpt_cv_globus_gsi_callback_version" >&6; } GLOBUS_GSI_CALLBACK_VERSION=$gpt_cv_globus_gsi_callback_version GLOBUS_GSI_CALLBACK_LIBS=$gpt_cv_globus_gsi_callback_libs GLOBUS_GSI_CALLBACK_CFLAGS=$gpt_cv_globus_gsi_callback_cflags else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for globus_gsi_callback" >&5 $as_echo_n "checking for globus_gsi_callback... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_gsi_callback_version=`$GPT_QUERY globus_gsi_callback-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_gsi_callback_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_gsi_callback | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_gsi_callback_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_gsi_callback_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_gsi_callback_version"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gpt_cv_globus_gsi_callback_version" >&5 $as_echo "$gpt_cv_globus_gsi_callback_version" >&6; } GLOBUS_GSI_CALLBACK_VERSION=$gpt_cv_globus_gsi_callback_version GLOBUS_GSI_CALLBACK_LIBS=$gpt_cv_globus_gsi_callback_libs GLOBUS_GSI_CALLBACK_CFLAGS=$gpt_cv_globus_gsi_callback_cflags else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi else GLOBUS_GSI_CALLBACK_CFLAGS=$pkg_cv_GLOBUS_GSI_CALLBACK_CFLAGS GLOBUS_GSI_CALLBACK_LIBS=$pkg_cv_GLOBUS_GSI_CALLBACK_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } GLOBUS_GSI_CALLBACK_VERSION=`$PKG_CONFIG --modversion globus-gsi-callback` fi pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GLOBUS_FTP_CLIENT" >&5 $as_echo_n "checking for GLOBUS_FTP_CLIENT... " >&6; } if test -n "$GLOBUS_FTP_CLIENT_CFLAGS"; then pkg_cv_GLOBUS_FTP_CLIENT_CFLAGS="$GLOBUS_FTP_CLIENT_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"globus-ftp-client\""; } >&5 ($PKG_CONFIG --exists --print-errors "globus-ftp-client") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLOBUS_FTP_CLIENT_CFLAGS=`$PKG_CONFIG --cflags "globus-ftp-client" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLOBUS_FTP_CLIENT_LIBS"; then pkg_cv_GLOBUS_FTP_CLIENT_LIBS="$GLOBUS_FTP_CLIENT_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"globus-ftp-client\""; } >&5 ($PKG_CONFIG --exists --print-errors "globus-ftp-client") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLOBUS_FTP_CLIENT_LIBS=`$PKG_CONFIG --libs "globus-ftp-client" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLOBUS_FTP_CLIENT_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "globus-ftp-client" 2>&1` else GLOBUS_FTP_CLIENT_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "globus-ftp-client" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLOBUS_FTP_CLIENT_PKG_ERRORS" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for globus_ftp_client" >&5 $as_echo_n "checking for globus_ftp_client... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_ftp_client_version=`$GPT_QUERY globus_ftp_client-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_ftp_client_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_ftp_client | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_ftp_client_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_ftp_client_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_ftp_client_version"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gpt_cv_globus_ftp_client_version" >&5 $as_echo "$gpt_cv_globus_ftp_client_version" >&6; } GLOBUS_FTP_CLIENT_VERSION=$gpt_cv_globus_ftp_client_version GLOBUS_FTP_CLIENT_LIBS=$gpt_cv_globus_ftp_client_libs GLOBUS_FTP_CLIENT_CFLAGS=$gpt_cv_globus_ftp_client_cflags else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for globus_ftp_client" >&5 $as_echo_n "checking for globus_ftp_client... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_ftp_client_version=`$GPT_QUERY globus_ftp_client-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_ftp_client_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_ftp_client | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_ftp_client_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_ftp_client_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_ftp_client_version"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gpt_cv_globus_ftp_client_version" >&5 $as_echo "$gpt_cv_globus_ftp_client_version" >&6; } GLOBUS_FTP_CLIENT_VERSION=$gpt_cv_globus_ftp_client_version GLOBUS_FTP_CLIENT_LIBS=$gpt_cv_globus_ftp_client_libs GLOBUS_FTP_CLIENT_CFLAGS=$gpt_cv_globus_ftp_client_cflags else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi else GLOBUS_FTP_CLIENT_CFLAGS=$pkg_cv_GLOBUS_FTP_CLIENT_CFLAGS GLOBUS_FTP_CLIENT_LIBS=$pkg_cv_GLOBUS_FTP_CLIENT_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } GLOBUS_FTP_CLIENT_VERSION=`$PKG_CONFIG --modversion globus-ftp-client` fi pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GLOBUS_FTP_CONTROL" >&5 $as_echo_n "checking for GLOBUS_FTP_CONTROL... " >&6; } if test -n "$GLOBUS_FTP_CONTROL_CFLAGS"; then pkg_cv_GLOBUS_FTP_CONTROL_CFLAGS="$GLOBUS_FTP_CONTROL_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"globus-ftp-control\""; } >&5 ($PKG_CONFIG --exists --print-errors "globus-ftp-control") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLOBUS_FTP_CONTROL_CFLAGS=`$PKG_CONFIG --cflags "globus-ftp-control" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLOBUS_FTP_CONTROL_LIBS"; then pkg_cv_GLOBUS_FTP_CONTROL_LIBS="$GLOBUS_FTP_CONTROL_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"globus-ftp-control\""; } >&5 ($PKG_CONFIG --exists --print-errors "globus-ftp-control") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLOBUS_FTP_CONTROL_LIBS=`$PKG_CONFIG --libs "globus-ftp-control" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLOBUS_FTP_CONTROL_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "globus-ftp-control" 2>&1` else GLOBUS_FTP_CONTROL_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "globus-ftp-control" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLOBUS_FTP_CONTROL_PKG_ERRORS" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for globus_ftp_control" >&5 $as_echo_n "checking for globus_ftp_control... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_ftp_control_version=`$GPT_QUERY globus_ftp_control-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_ftp_control_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_ftp_control | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_ftp_control_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_ftp_control_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_ftp_control_version"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gpt_cv_globus_ftp_control_version" >&5 $as_echo "$gpt_cv_globus_ftp_control_version" >&6; } GLOBUS_FTP_CONTROL_VERSION=$gpt_cv_globus_ftp_control_version GLOBUS_FTP_CONTROL_LIBS=$gpt_cv_globus_ftp_control_libs GLOBUS_FTP_CONTROL_CFLAGS=$gpt_cv_globus_ftp_control_cflags else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for globus_ftp_control" >&5 $as_echo_n "checking for globus_ftp_control... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_ftp_control_version=`$GPT_QUERY globus_ftp_control-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_ftp_control_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_ftp_control | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_ftp_control_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_ftp_control_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_ftp_control_version"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gpt_cv_globus_ftp_control_version" >&5 $as_echo "$gpt_cv_globus_ftp_control_version" >&6; } GLOBUS_FTP_CONTROL_VERSION=$gpt_cv_globus_ftp_control_version GLOBUS_FTP_CONTROL_LIBS=$gpt_cv_globus_ftp_control_libs GLOBUS_FTP_CONTROL_CFLAGS=$gpt_cv_globus_ftp_control_cflags else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi else GLOBUS_FTP_CONTROL_CFLAGS=$pkg_cv_GLOBUS_FTP_CONTROL_CFLAGS GLOBUS_FTP_CONTROL_LIBS=$pkg_cv_GLOBUS_FTP_CONTROL_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } GLOBUS_FTP_CONTROL_VERSION=`$PKG_CONFIG --modversion globus-ftp-control` fi pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GLOBUS_IO" >&5 $as_echo_n "checking for GLOBUS_IO... " >&6; } if test -n "$GLOBUS_IO_CFLAGS"; then pkg_cv_GLOBUS_IO_CFLAGS="$GLOBUS_IO_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"globus-io\""; } >&5 ($PKG_CONFIG --exists --print-errors "globus-io") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLOBUS_IO_CFLAGS=`$PKG_CONFIG --cflags "globus-io" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLOBUS_IO_LIBS"; then pkg_cv_GLOBUS_IO_LIBS="$GLOBUS_IO_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"globus-io\""; } >&5 ($PKG_CONFIG --exists --print-errors "globus-io") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLOBUS_IO_LIBS=`$PKG_CONFIG --libs "globus-io" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLOBUS_IO_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "globus-io" 2>&1` else GLOBUS_IO_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "globus-io" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLOBUS_IO_PKG_ERRORS" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for globus_io" >&5 $as_echo_n "checking for globus_io... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_io_version=`$GPT_QUERY globus_io-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_io_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_io | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_io_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_io_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_io_version"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gpt_cv_globus_io_version" >&5 $as_echo "$gpt_cv_globus_io_version" >&6; } GLOBUS_IO_VERSION=$gpt_cv_globus_io_version GLOBUS_IO_LIBS=$gpt_cv_globus_io_libs GLOBUS_IO_CFLAGS=$gpt_cv_globus_io_cflags else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for globus_io" >&5 $as_echo_n "checking for globus_io... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_io_version=`$GPT_QUERY globus_io-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_io_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_io | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_io_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_io_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_io_version"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gpt_cv_globus_io_version" >&5 $as_echo "$gpt_cv_globus_io_version" >&6; } GLOBUS_IO_VERSION=$gpt_cv_globus_io_version GLOBUS_IO_LIBS=$gpt_cv_globus_io_libs GLOBUS_IO_CFLAGS=$gpt_cv_globus_io_cflags else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi else GLOBUS_IO_CFLAGS=$pkg_cv_GLOBUS_IO_CFLAGS GLOBUS_IO_LIBS=$pkg_cv_GLOBUS_IO_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } GLOBUS_IO_VERSION=`$PKG_CONFIG --modversion globus-io` fi pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GLOBUS_GSI_CERT_UTILS" >&5 $as_echo_n "checking for GLOBUS_GSI_CERT_UTILS... " >&6; } if test -n "$GLOBUS_GSI_CERT_UTILS_CFLAGS"; then pkg_cv_GLOBUS_GSI_CERT_UTILS_CFLAGS="$GLOBUS_GSI_CERT_UTILS_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"globus-gsi-cert-utils\""; } >&5 ($PKG_CONFIG --exists --print-errors "globus-gsi-cert-utils") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLOBUS_GSI_CERT_UTILS_CFLAGS=`$PKG_CONFIG --cflags "globus-gsi-cert-utils" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLOBUS_GSI_CERT_UTILS_LIBS"; then pkg_cv_GLOBUS_GSI_CERT_UTILS_LIBS="$GLOBUS_GSI_CERT_UTILS_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"globus-gsi-cert-utils\""; } >&5 ($PKG_CONFIG --exists --print-errors "globus-gsi-cert-utils") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLOBUS_GSI_CERT_UTILS_LIBS=`$PKG_CONFIG --libs "globus-gsi-cert-utils" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLOBUS_GSI_CERT_UTILS_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "globus-gsi-cert-utils" 2>&1` else GLOBUS_GSI_CERT_UTILS_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "globus-gsi-cert-utils" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLOBUS_GSI_CERT_UTILS_PKG_ERRORS" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for globus_gsi_cert_utils" >&5 $as_echo_n "checking for globus_gsi_cert_utils... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_gsi_cert_utils_version=`$GPT_QUERY globus_gsi_cert_utils-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_gsi_cert_utils_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_gsi_cert_utils | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_gsi_cert_utils_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_gsi_cert_utils_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_gsi_cert_utils_version"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gpt_cv_globus_gsi_cert_utils_version" >&5 $as_echo "$gpt_cv_globus_gsi_cert_utils_version" >&6; } GLOBUS_GSI_CERT_UTILS_VERSION=$gpt_cv_globus_gsi_cert_utils_version GLOBUS_GSI_CERT_UTILS_LIBS=$gpt_cv_globus_gsi_cert_utils_libs GLOBUS_GSI_CERT_UTILS_CFLAGS=$gpt_cv_globus_gsi_cert_utils_cflags else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for globus_gsi_cert_utils" >&5 $as_echo_n "checking for globus_gsi_cert_utils... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_gsi_cert_utils_version=`$GPT_QUERY globus_gsi_cert_utils-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_gsi_cert_utils_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_gsi_cert_utils | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_gsi_cert_utils_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_gsi_cert_utils_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_gsi_cert_utils_version"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gpt_cv_globus_gsi_cert_utils_version" >&5 $as_echo "$gpt_cv_globus_gsi_cert_utils_version" >&6; } GLOBUS_GSI_CERT_UTILS_VERSION=$gpt_cv_globus_gsi_cert_utils_version GLOBUS_GSI_CERT_UTILS_LIBS=$gpt_cv_globus_gsi_cert_utils_libs GLOBUS_GSI_CERT_UTILS_CFLAGS=$gpt_cv_globus_gsi_cert_utils_cflags else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi else GLOBUS_GSI_CERT_UTILS_CFLAGS=$pkg_cv_GLOBUS_GSI_CERT_UTILS_CFLAGS GLOBUS_GSI_CERT_UTILS_LIBS=$pkg_cv_GLOBUS_GSI_CERT_UTILS_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } GLOBUS_GSI_CERT_UTILS_VERSION=`$PKG_CONFIG --modversion globus-gsi-cert-utils` fi pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GLOBUS_GSI_CREDENTIAL" >&5 $as_echo_n "checking for GLOBUS_GSI_CREDENTIAL... " >&6; } if test -n "$GLOBUS_GSI_CREDENTIAL_CFLAGS"; then pkg_cv_GLOBUS_GSI_CREDENTIAL_CFLAGS="$GLOBUS_GSI_CREDENTIAL_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"globus-gsi-credential\""; } >&5 ($PKG_CONFIG --exists --print-errors "globus-gsi-credential") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLOBUS_GSI_CREDENTIAL_CFLAGS=`$PKG_CONFIG --cflags "globus-gsi-credential" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLOBUS_GSI_CREDENTIAL_LIBS"; then pkg_cv_GLOBUS_GSI_CREDENTIAL_LIBS="$GLOBUS_GSI_CREDENTIAL_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"globus-gsi-credential\""; } >&5 ($PKG_CONFIG --exists --print-errors "globus-gsi-credential") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLOBUS_GSI_CREDENTIAL_LIBS=`$PKG_CONFIG --libs "globus-gsi-credential" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLOBUS_GSI_CREDENTIAL_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "globus-gsi-credential" 2>&1` else GLOBUS_GSI_CREDENTIAL_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "globus-gsi-credential" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLOBUS_GSI_CREDENTIAL_PKG_ERRORS" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for globus_gsi_credential" >&5 $as_echo_n "checking for globus_gsi_credential... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_gsi_credential_version=`$GPT_QUERY globus_gsi_credential-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_gsi_credential_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_gsi_credential | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_gsi_credential_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_gsi_credential_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_gsi_credential_version"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gpt_cv_globus_gsi_credential_version" >&5 $as_echo "$gpt_cv_globus_gsi_credential_version" >&6; } GLOBUS_GSI_CREDENTIAL_VERSION=$gpt_cv_globus_gsi_credential_version GLOBUS_GSI_CREDENTIAL_LIBS=$gpt_cv_globus_gsi_credential_libs GLOBUS_GSI_CREDENTIAL_CFLAGS=$gpt_cv_globus_gsi_credential_cflags else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for globus_gsi_credential" >&5 $as_echo_n "checking for globus_gsi_credential... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_gsi_credential_version=`$GPT_QUERY globus_gsi_credential-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_gsi_credential_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_gsi_credential | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_gsi_credential_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_gsi_credential_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_gsi_credential_version"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gpt_cv_globus_gsi_credential_version" >&5 $as_echo "$gpt_cv_globus_gsi_credential_version" >&6; } GLOBUS_GSI_CREDENTIAL_VERSION=$gpt_cv_globus_gsi_credential_version GLOBUS_GSI_CREDENTIAL_LIBS=$gpt_cv_globus_gsi_credential_libs GLOBUS_GSI_CREDENTIAL_CFLAGS=$gpt_cv_globus_gsi_credential_cflags else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi else GLOBUS_GSI_CREDENTIAL_CFLAGS=$pkg_cv_GLOBUS_GSI_CREDENTIAL_CFLAGS GLOBUS_GSI_CREDENTIAL_LIBS=$pkg_cv_GLOBUS_GSI_CREDENTIAL_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } GLOBUS_GSI_CREDENTIAL_VERSION=`$PKG_CONFIG --modversion globus-gsi-credential` fi pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GLOBUS_OPENSSL_MODULE" >&5 $as_echo_n "checking for GLOBUS_OPENSSL_MODULE... " >&6; } if test -n "$GLOBUS_OPENSSL_MODULE_CFLAGS"; then pkg_cv_GLOBUS_OPENSSL_MODULE_CFLAGS="$GLOBUS_OPENSSL_MODULE_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"globus-openssl-module\""; } >&5 ($PKG_CONFIG --exists --print-errors "globus-openssl-module") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLOBUS_OPENSSL_MODULE_CFLAGS=`$PKG_CONFIG --cflags "globus-openssl-module" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLOBUS_OPENSSL_MODULE_LIBS"; then pkg_cv_GLOBUS_OPENSSL_MODULE_LIBS="$GLOBUS_OPENSSL_MODULE_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"globus-openssl-module\""; } >&5 ($PKG_CONFIG --exists --print-errors "globus-openssl-module") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLOBUS_OPENSSL_MODULE_LIBS=`$PKG_CONFIG --libs "globus-openssl-module" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLOBUS_OPENSSL_MODULE_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "globus-openssl-module" 2>&1` else GLOBUS_OPENSSL_MODULE_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "globus-openssl-module" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLOBUS_OPENSSL_MODULE_PKG_ERRORS" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for globus_openssl_module" >&5 $as_echo_n "checking for globus_openssl_module... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_openssl_module_version=`$GPT_QUERY globus_openssl_module-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_openssl_module_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_openssl_module | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_openssl_module_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_openssl_module_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_openssl_module_version"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gpt_cv_globus_openssl_module_version" >&5 $as_echo "$gpt_cv_globus_openssl_module_version" >&6; } GLOBUS_OPENSSL_MODULE_VERSION=$gpt_cv_globus_openssl_module_version GLOBUS_OPENSSL_MODULE_LIBS=$gpt_cv_globus_openssl_module_libs GLOBUS_OPENSSL_MODULE_CFLAGS=$gpt_cv_globus_openssl_module_cflags else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for globus_openssl_module" >&5 $as_echo_n "checking for globus_openssl_module... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_openssl_module_version=`$GPT_QUERY globus_openssl_module-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_openssl_module_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_openssl_module | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_openssl_module_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_openssl_module_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_openssl_module_version"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gpt_cv_globus_openssl_module_version" >&5 $as_echo "$gpt_cv_globus_openssl_module_version" >&6; } GLOBUS_OPENSSL_MODULE_VERSION=$gpt_cv_globus_openssl_module_version GLOBUS_OPENSSL_MODULE_LIBS=$gpt_cv_globus_openssl_module_libs GLOBUS_OPENSSL_MODULE_CFLAGS=$gpt_cv_globus_openssl_module_cflags else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi else GLOBUS_OPENSSL_MODULE_CFLAGS=$pkg_cv_GLOBUS_OPENSSL_MODULE_CFLAGS GLOBUS_OPENSSL_MODULE_LIBS=$pkg_cv_GLOBUS_OPENSSL_MODULE_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } GLOBUS_OPENSSL_MODULE_VERSION=`$PKG_CONFIG --modversion globus-openssl-module` fi # Check for new globus thread model selection SAVE_CFLAGS=$CFLAGS SAVE_LIBS=$LIBS CFLAGS="$CFLAGS $GLOBUS_COMMON_CFLAGS" LIBS="$LIBS $GLOBUS_COMMON_LIBS" for ac_func in globus_thread_set_model do : ac_fn_c_check_func "$LINENO" "globus_thread_set_model" "ac_cv_func_globus_thread_set_model" if test "x$ac_cv_func_globus_thread_set_model" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_GLOBUS_THREAD_SET_MODEL 1 _ACEOF fi done CFLAGS=$SAVE_CFLAGS LIBS=$SAVE_LIBS # Check for gridftp-v2 SAVE_CFLAGS=$CFLAGS SAVE_LIBS=$LIBS CFLAGS="$CFLAGS $GLOBUS_FTP_CLIENT_CFLAGS" LIBS="$LIBS $GLOBUS_FTP_CLIENT_LIBS" for ac_func in globus_ftp_client_handleattr_set_gridftp2 do : ac_fn_c_check_func "$LINENO" "globus_ftp_client_handleattr_set_gridftp2" "ac_cv_func_globus_ftp_client_handleattr_set_gridftp2" if test "x$ac_cv_func_globus_ftp_client_handleattr_set_gridftp2" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_GLOBUS_FTP_CLIENT_HANDLEATTR_SET_GRIDFTP2 1 _ACEOF fi done CFLAGS=$SAVE_CFLAGS LIBS=$SAVE_LIBS globus_openssl_detected= pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GLOBUS_OPENSSL" >&5 $as_echo_n "checking for GLOBUS_OPENSSL... " >&6; } if test -n "$GLOBUS_OPENSSL_CFLAGS"; then pkg_cv_GLOBUS_OPENSSL_CFLAGS="$GLOBUS_OPENSSL_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"globus-openssl\""; } >&5 ($PKG_CONFIG --exists --print-errors "globus-openssl") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLOBUS_OPENSSL_CFLAGS=`$PKG_CONFIG --cflags "globus-openssl" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GLOBUS_OPENSSL_LIBS"; then pkg_cv_GLOBUS_OPENSSL_LIBS="$GLOBUS_OPENSSL_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"globus-openssl\""; } >&5 ($PKG_CONFIG --exists --print-errors "globus-openssl") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GLOBUS_OPENSSL_LIBS=`$PKG_CONFIG --libs "globus-openssl" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GLOBUS_OPENSSL_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "globus-openssl" 2>&1` else GLOBUS_OPENSSL_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "globus-openssl" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GLOBUS_OPENSSL_PKG_ERRORS" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for globus_openssl" >&5 $as_echo_n "checking for globus_openssl... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_openssl_version=`$GPT_QUERY globus_openssl-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_openssl_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_openssl | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_openssl_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_openssl_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_openssl_version"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gpt_cv_globus_openssl_version" >&5 $as_echo "$gpt_cv_globus_openssl_version" >&6; } GLOBUS_OPENSSL_VERSION=$gpt_cv_globus_openssl_version GLOBUS_OPENSSL_LIBS=$gpt_cv_globus_openssl_libs GLOBUS_OPENSSL_CFLAGS=$gpt_cv_globus_openssl_cflags else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for globus_openssl" >&5 $as_echo_n "checking for globus_openssl... " >&6; } if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_globus_openssl_version=`$GPT_QUERY globus_openssl-$GPT_FLAVOR-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi if test -n "$gpt_cv_globus_openssl_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR globus_openssl | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_globus_openssl_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_globus_openssl_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_globus_openssl_version"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gpt_cv_globus_openssl_version" >&5 $as_echo "$gpt_cv_globus_openssl_version" >&6; } GLOBUS_OPENSSL_VERSION=$gpt_cv_globus_openssl_version GLOBUS_OPENSSL_LIBS=$gpt_cv_globus_openssl_libs GLOBUS_OPENSSL_CFLAGS=$gpt_cv_globus_openssl_cflags else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi else GLOBUS_OPENSSL_CFLAGS=$pkg_cv_GLOBUS_OPENSSL_CFLAGS GLOBUS_OPENSSL_LIBS=$pkg_cv_GLOBUS_OPENSSL_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } GLOBUS_OPENSSL_VERSION=`$PKG_CONFIG --modversion globus-openssl` fi if test ! "x$GLOBUS_OPENSSL_LIBS" = "x" ; then globus_openssl_detected=`echo "$GLOBUS_OPENSSL_LIBS" | grep "lssl_$GPT_FLAVOR"` if test ! "x$globus_openssl_detected" = "x" ; then globus_openssl_detected="yes" fi fi if test "x$globus_openssl_detected" = "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: Globus own OpenSSL library detected. In order to avoid runtime conflicts following components will be disabled: GridFTP DMC, SRM DMC, GSI MCC. To enable these components use Globus compiled for system OpenSSL. " >&5 $as_echo " Globus own OpenSSL library detected. In order to avoid runtime conflicts following components will be disabled: GridFTP DMC, SRM DMC, GSI MCC. To enable these components use Globus compiled for system OpenSSL. " >&6; } GLOBUS_FTP_CLIENT_VERSION= GLOBUS_FTP_CONTROL_VERSION= GLOBUS_IO_VERSION= GLOBUS_GSSAPI_GSI_VERSION= fi if test "x$GLOBUS_IO_VERSION" = "x"; then IO_VERSION_MAJOR=0 else IO_VERSION_MAJOR=`echo "$GLOBUS_IO_VERSION" | sed 's/^\([^.]*\).*/\1/'`; fi cat >>confdefs.h <<_ACEOF #define GLOBUS_IO_VERSION $IO_VERSION_MAJOR _ACEOF if test "x$GLOBUS_GSSAPI_GSI_VERSION" = "x"; then GLOBUS_GSSAPI_GSI_VERSION_MAJOR=0 GLOBUS_GSSAPI_GSI_VERSION_MINOR=0 else GLOBUS_GSSAPI_GSI_VERSION_MAJOR=`echo "$GLOBUS_GSSAPI_GSI_VERSION" | sed 's/^\([^.]*\).*/\1/'`; GLOBUS_GSSAPI_GSI_VERSION_MINOR=`echo "$GLOBUS_GSSAPI_GSI_VERSION" | sed 's/^[^.]*\.\([^.]*\).*/\1/'`; fi if test "$GLOBUS_GSSAPI_GSI_VERSION_MAJOR" -lt "12"; then GLOBUS_GSSAPI_GSI_OLD_OPENSSL=1 elif test "$GLOBUS_GSSAPI_GSI_VERSION_MAJOR" -eq "12"; then if test "$GLOBUS_GSSAPI_GSI_VERSION_MINOR" -lt "2"; then GLOBUS_GSSAPI_GSI_OLD_OPENSSL=1 else GLOBUS_GSSAPI_GSI_OLD_OPENSSL=0 fi else GLOBUS_GSSAPI_GSI_OLD_OPENSSL=0 fi cat >>confdefs.h <<_ACEOF #define GLOBUS_GSSAPI_GSI_VERSION $GSSAPI_GSI_VERSION_MAJOR _ACEOF cat >>confdefs.h <<_ACEOF #define GLOBUS_GSSAPI_GSI_OLD_OPENSSL $GLOBUS_GSSAPI_GSI_OLD_OPENSSL _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: checking for DEFAULT_GLOBUS_LOCATION" >&5 $as_echo_n "checking for DEFAULT_GLOBUS_LOCATION... " >&6; } # GLOBUS_LOCATION is set by GPT macros DEFAULT_GLOBUS_LOCATION="$GLOBUS_LOCATION" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DEFAULT_GLOBUS_LOCATION" >&5 $as_echo "$DEFAULT_GLOBUS_LOCATION" >&6; } #check lcas DEFAULT_LCAS_LOCATION=/opt/glite LCAS_LOCATION= LCAS_CFLAGS= LCAS_LIBS= # Check whether --with-lcas-location was given. if test "${with_lcas_location+set}" = set; then : withval=$with_lcas_location; LCAS_LOCATION=$with_lcas_location if test ! -d $LCAS_LOCATION; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: LCAS_LOCATION ($LCAS_LOCATION) does not exist" >&5 $as_echo "$as_me: WARNING: LCAS_LOCATION ($LCAS_LOCATION) does not exist" >&2;} LCAS_LOCATION= fi else if test "x$LCAS_LOCATION" = "x"; then LCAS_LOCATION=$DEFAULT_LCAS_LOCATION fi if test ! -d $LCAS_LOCATION; then LCAS_LOCATION= fi fi if test "x$LCAS_LOCATION" != "x"; then LCAS_CFLAGS=$LCAS_LOCATION/include/glite/security/lcas if test ! -d $LCAS_CFLAGS; then LCAS_CFLAGS=$LCAS_LOCATION/include/lcas if test ! -d $LCAS_CFLAGS; then LCAS_CFLAGS=$LCAS_LOCATION/include fi fi LCAS_CFLAGS=-I$LCAS_CFLAGS SAVE_CPPFLAGS=$CPPFLAGS CPPFLAGS="$LCAS_CFLAGS $GLOBUS_GSSAPI_GSI_CFLAGS" for ac_header in lcas.h do : ac_fn_c_check_header_mongrel "$LINENO" "lcas.h" "ac_cv_header_lcas_h" "$ac_includes_default" if test "x$ac_cv_header_lcas_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LCAS_H 1 _ACEOF LCAS_LDFLAGS= if test -d $LCAS_LOCATION/lib64; then LCAS_LDFLAGS="-L$LCAS_LOCATION/lib64 $GLOBUS_GSSAPI_GSI_LIBS" else LCAS_LDFLAGS="-L$LCAS_LOCATION/lib $GLOBUS_GSSAPI_GSI_LIBS" fi SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $LCAS_LDFLAGS" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for lcas_init in -llcas" >&5 $as_echo_n "checking for lcas_init in -llcas... " >&6; } if ${ac_cv_lib_lcas_lcas_init+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-llcas $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char lcas_init (); int main () { return lcas_init (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_lcas_lcas_init=yes else ac_cv_lib_lcas_lcas_init=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_lcas_lcas_init" >&5 $as_echo "$ac_cv_lib_lcas_lcas_init" >&6; } if test "x$ac_cv_lib_lcas_lcas_init" = xyes; then : LCAS_LIBS="$LCAS_LDFLAGS -llcas" else LCAS_LOCATION="" fi LDFLAGS=$SAVE_LDFLAGS else LCAS_LOCATION="" fi done CPPFLAGS=$SAVE_CPPFLAGS fi if test "x$LCAS_LOCATION" != "x"; then $as_echo "#define HAVE_LCAS 1" >>confdefs.h fi #check lcmaps DEFAULT_LCMAPS_LOCATION=/opt/glite LCMAPS_LOCATION= LCMAPS_CFLAGS= LCMAPS_LIBS= # Check whether --with-lcmaps-location was given. if test "${with_lcmaps_location+set}" = set; then : withval=$with_lcmaps_location; LCMAPS_LOCATION=$with_lcmaps_location if test ! -d $LCMAPS_LOCATION; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: LCMAPS_LOCATION ($LCMAPS_LOCATION) does not exist" >&5 $as_echo "$as_me: WARNING: LCMAPS_LOCATION ($LCMAPS_LOCATION) does not exist" >&2;} LCMAPS_LOCATION= fi else if test "x$LCMAPS_LOCATION" = "x"; then LCMAPS_LOCATION=$DEFAULT_LCMAPS_LOCATION fi if test ! -d $LCMAPS_LOCATION; then LCMAPS_LOCATION= fi fi if test "x$LCMAPS_LOCATION" != "x"; then LCMAPS_CFLAGS=$LCMAPS_LOCATION/include/glite/security/lcmaps if test ! -d $LCMAPS_CFLAGS; then LCMAPS_CFLAGS=$LCMAPS_LOCATION/include/lcmaps if test ! -d $LCMAPS_CFLAGS; then LCMAPS_CFLAGS=$LCMAPS_LOCATION/include fi fi LCMAPS_CFLAGS=-I$LCMAPS_CFLAGS SAVE_CPPFLAGS=$CPPFLAGS CPPFLAGS="$LCMAPS_CFLAGS $GLOBUS_GSSAPI_GSI_CFLAGS" for ac_header in lcmaps.h do : ac_fn_c_check_header_mongrel "$LINENO" "lcmaps.h" "ac_cv_header_lcmaps_h" "$ac_includes_default" if test "x$ac_cv_header_lcmaps_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LCMAPS_H 1 _ACEOF LCMAPS_LDFLAGS= if test -d $LCMAPS_LOCATION/lib64; then LCMAPS_LDFLAGS="-L$LCMAPS_LOCATION/lib64 $GLOBUS_GSSAPI_GSI_LIBS" else LCMAPS_LDFLAGS="-L$LCMAPS_LOCATION/lib $GLOBUS_GSSAPI_GSI_LIBS" fi SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $LCMAPS_LDFLAGS" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for lcmaps_init in -llcmaps" >&5 $as_echo_n "checking for lcmaps_init in -llcmaps... " >&6; } if ${ac_cv_lib_lcmaps_lcmaps_init+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-llcmaps $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char lcmaps_init (); int main () { return lcmaps_init (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_lcmaps_lcmaps_init=yes else ac_cv_lib_lcmaps_lcmaps_init=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_lcmaps_lcmaps_init" >&5 $as_echo "$ac_cv_lib_lcmaps_lcmaps_init" >&6; } if test "x$ac_cv_lib_lcmaps_lcmaps_init" = xyes; then : LCMAPS_LIBS="$LCMAPS_LDFLAGS -llcmaps" else LCMAPS_LOCATION="" fi LDFLAGS=$SAVE_LDFLAGS else LCMAPS_LOCATION="" fi done CPPFLAGS=$SAVE_CPPFLAGS fi if test "x$LCMAPS_LOCATION" != "x"; then $as_echo "#define HAVE_LCMAPS 1" >>confdefs.h fi # Check if mock DMC is enabled # Check whether --enable-mock-dmc was given. if test "${enable_mock_dmc+set}" = set; then : enableval=$enable_mock_dmc; enables_mock_dmc="$enableval" fi # Check for GFAL2 # Check whether --enable-gfal was given. if test "${enable_gfal+set}" = set; then : enableval=$enable_gfal; enables_gfal="$enableval" fi if test "x$enables_gfal" = "xyes"; then pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GFAL2" >&5 $as_echo_n "checking for GFAL2... " >&6; } if test -n "$GFAL2_CFLAGS"; then pkg_cv_GFAL2_CFLAGS="$GFAL2_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"gfal_transfer\""; } >&5 ($PKG_CONFIG --exists --print-errors "gfal_transfer") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GFAL2_CFLAGS=`$PKG_CONFIG --cflags "gfal_transfer" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$GFAL2_LIBS"; then pkg_cv_GFAL2_LIBS="$GFAL2_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"gfal_transfer\""; } >&5 ($PKG_CONFIG --exists --print-errors "gfal_transfer") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_GFAL2_LIBS=`$PKG_CONFIG --libs "gfal_transfer" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then GFAL2_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "gfal_transfer" 2>&1` else GFAL2_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "gfal_transfer" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GFAL2_PKG_ERRORS" >&5 enables_gfal="no" elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } enables_gfal="no" else GFAL2_CFLAGS=$pkg_cv_GFAL2_CFLAGS GFAL2_LIBS=$pkg_cv_GFAL2_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi fi # Check for S3 # Check whether --enable-s3 was given. if test "${enable_s3+set}" = set; then : enableval=$enable_s3; enables_s3="$enableval" fi if test "x$enables_s3" = "xyes"; then # Check whether --with-s3 was given. if test "${with_s3+set}" = set; then : withval=$with_s3; fi if test ! "x$with_s3" = "x" ; then S3_LOCATION="$with_s3" S3_CPPFLAGS="-I$S3_LOCATION/include" if test -d $S3_LOCATION/lib64; then S3_LDFLAGS="-L$S3_LOCATION/lib64" else S3_LDFLAGS="-L$S3_LOCATION/lib" fi fi SAVE_CPPFLAGS=$CPPFLAGS CPPFLAGS="$CPPFLAGS $S3_CPPFLAGS" ac_fn_c_check_header_mongrel "$LINENO" "libs3.h" "ac_cv_header_libs3_h" "$ac_includes_default" if test "x$ac_cv_header_libs3_h" = xyes; then : else enables_s3="no" fi CPPFLAGS=$SAVE_CPPFLAGS SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $S3_LDFLAGS" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for S3_initialize in -ls3" >&5 $as_echo_n "checking for S3_initialize in -ls3... " >&6; } if ${ac_cv_lib_s3_S3_initialize+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ls3 $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char S3_initialize (); int main () { return S3_initialize (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_s3_S3_initialize=yes else ac_cv_lib_s3_S3_initialize=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_s3_S3_initialize" >&5 $as_echo "$ac_cv_lib_s3_S3_initialize" >&6; } if test "x$ac_cv_lib_s3_S3_initialize" = xyes; then : S3_LIBS="$S3_LDFLAGS -ls3" else enables_s3="no" fi LDFLAGS=$SAVE_LDFLAGS if test x$enables_s3 = xyes then if s3 help 2>&1 | grep -q -- '--timeout' ; then $as_echo "#define HAVE_S3_TIMEOUT 1" >>confdefs.h fi fi fi # Check for xrootd (c++) ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu # Check whether --enable-xrootd was given. if test "${enable_xrootd+set}" = set; then : enableval=$enable_xrootd; enables_xrootd="$enableval" fi if test "x$enables_xrootd" = "xyes"; then XROOTD_CPPFLAGS="-I/usr/include/xrootd" # Check whether --with-xrootd was given. if test "${with_xrootd+set}" = set; then : withval=$with_xrootd; fi if test ! "x$with_xrootd" = "x" ; then XROOTD_LOCATION="$with_xrootd" XROOTD_CPPFLAGS="-I$XROOTD_LOCATION/include/xrootd" if test -d $XROOTD_LOCATION/lib64; then XROOTD_LDFLAGS="-L$XROOTD_LOCATION/lib64" else XROOTD_LDFLAGS="-L$XROOTD_LOCATION/lib" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for XROOTD headers" >&5 $as_echo_n "checking for XROOTD headers... " >&6; } SAVE_CPPFLAGS=$CPPFLAGS CPPFLAGS="$CPPFLAGS $XROOTD_CPPFLAGS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: $XROOTD_CPPFLAGS" >&5 $as_echo "$XROOTD_CPPFLAGS" >&6; } else XROOTD_CPPFLAGS="-std=c++0x $XROOTD_CPPFLAGS" CPPFLAGS="$SAVE_CPPFLAGS $XROOTD_CPPFLAGS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: $XROOTD_CPPFLAGS" >&5 $as_echo "$XROOTD_CPPFLAGS" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } enables_xrootd="no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext CPPFLAGS=$SAVE_CPPFLAGS SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $XROOTD_LDFLAGS" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for main in -lXrdPosix" >&5 $as_echo_n "checking for main in -lXrdPosix... " >&6; } if ${ac_cv_lib_XrdPosix_main+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lXrdPosix $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return main (); ; return 0; } _ACEOF if ac_fn_cxx_try_link "$LINENO"; then : ac_cv_lib_XrdPosix_main=yes else ac_cv_lib_XrdPosix_main=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_XrdPosix_main" >&5 $as_echo "$ac_cv_lib_XrdPosix_main" >&6; } if test "x$ac_cv_lib_XrdPosix_main" = xyes; then : XROOTD_LIBS="$XROOTD_LDFLAGS -lXrdPosix -lXrdCl" else enables_xrootd="no" fi LDFLAGS=$SAVE_LDFLAGS fi fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu # Setup conditionals if test -n "$GLOBUS_COMMON_VERSION"; then GLOBUSUTILS_ENABLED_TRUE= GLOBUSUTILS_ENABLED_FALSE='#' else GLOBUSUTILS_ENABLED_TRUE='#' GLOBUSUTILS_ENABLED_FALSE= fi if test -n "$GLOBUS_FTP_CLIENT_VERSION"; then GRIDFTP_ENABLED_TRUE= GRIDFTP_ENABLED_FALSE='#' else GRIDFTP_ENABLED_TRUE='#' GRIDFTP_ENABLED_FALSE= fi if test x$enables_mock_dmc = xyes; then MOCK_DMC_ENABLED_TRUE= MOCK_DMC_ENABLED_FALSE='#' else MOCK_DMC_ENABLED_TRUE='#' MOCK_DMC_ENABLED_FALSE= fi if test x$enables_gfal = xyes; then GFAL_ENABLED_TRUE= GFAL_ENABLED_FALSE='#' else GFAL_ENABLED_TRUE='#' GFAL_ENABLED_FALSE= fi if test x$enables_s3 = xyes; then S3_DMC_ENABLED_TRUE= S3_DMC_ENABLED_FALSE='#' else S3_DMC_ENABLED_TRUE='#' S3_DMC_ENABLED_FALSE= fi if test x$enables_xrootd = xyes; then XROOTD_ENABLED_TRUE= XROOTD_ENABLED_FALSE='#' else XROOTD_ENABLED_TRUE='#' XROOTD_ENABLED_FALSE= fi if test x$XMLSEC_INSTALLED = xyes; then XMLSEC_ENABLED_TRUE= XMLSEC_ENABLED_FALSE='#' else XMLSEC_ENABLED_TRUE='#' XMLSEC_ENABLED_FALSE= fi if test x$enables_cppunit = xyes; then CPPUNIT_ENABLED_TRUE= CPPUNIT_ENABLED_FALSE='#' else CPPUNIT_ENABLED_TRUE='#' CPPUNIT_ENABLED_FALSE= fi enables_srm_dmc=no if test "$enables_hed" = "yes"; then enables_srm_dmc=yes fi if test "x$enables_srm_dmc" = "xyes"; then SRM_DMC_ENABLED_TRUE= SRM_DMC_ENABLED_FALSE='#' else SRM_DMC_ENABLED_TRUE='#' SRM_DMC_ENABLED_FALSE= fi # Setup defines if test -n "$GLOBUS_COMMON_VERSION"; then $as_echo "#define HAVE_GLOBUS 1" >>confdefs.h fi if test x"$XMLSEC_INSTALLED" = xyes; then $as_echo "#define HAVE_XMLSEC 1" >>confdefs.h fi # Setup messages for reporting enables_gridftp=no if test -n "$GLOBUS_FTP_CLIENT_VERSION" ; then enables_gridftp=yes; fi enables_sqlite=no if test "x$SQLITE_INSTALLED" = "xyes" ; then enables_sqlite=yes; fi # Check version of Test::More Perl module. min_perl_test_more_version_required="0.88" # Stable version of Test::More containing done_testing sub. PERL_TEST_DIR= perl_test_more_version_found=$(perl -MTest::More -e "print \"\$Test::More::VERSION\"") if test $(echo "$perl_test_more_version_found" | cut -d. -f1) -gt $(echo "$min_perl_test_more_version_required" | cut -d. -f1) || \ test $(echo "$perl_test_more_version_found" | cut -d. -f1) -eq $(echo "$min_perl_test_more_version_required" | cut -d. -f1) && \ test $(echo "$perl_test_more_version_found" | cut -d. -f2) -ge $(echo "$min_perl_test_more_version_required" | cut -d. -f2); then PERL_TEST_DIR="test" fi # Check for the uuid lib UUID_LIBS="" if test "$enables_hed" = "yes"; then ac_fn_c_check_header_mongrel "$LINENO" "uuid/uuid.h" "ac_cv_header_uuid_uuid_h" "$ac_includes_default" if test "x$ac_cv_header_uuid_uuid_h" = xyes; then : ac_fn_c_check_func "$LINENO" "uuid_generate" "ac_cv_func_uuid_generate" if test "x$ac_cv_func_uuid_generate" = xyes; then : UUID_LIBS= else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uuid_generate in -luuid" >&5 $as_echo_n "checking for uuid_generate in -luuid... " >&6; } if ${ac_cv_lib_uuid_uuid_generate+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-luuid $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char uuid_generate (); int main () { return uuid_generate (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_uuid_uuid_generate=yes else ac_cv_lib_uuid_uuid_generate=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_uuid_uuid_generate" >&5 $as_echo "$ac_cv_lib_uuid_uuid_generate" >&6; } if test "x$ac_cv_lib_uuid_uuid_generate" = xyes; then : UUID_LIBS=-luuid else { $as_echo "$as_me:${as_lineno-$LINENO}: Can't find library containing uuid implementation" >&5 $as_echo "$as_me: Can't find library containing uuid implementation" >&6;} fi fi else { $as_echo "$as_me:${as_lineno-$LINENO}: Can't find uuid header" >&5 $as_echo "$as_me: Can't find uuid header" >&6;} fi LIBS="$LIBS $UUID_LIBS" fi # Check for dlopen DLOPEN_LIBS="" if test "$enables_hed" = "yes"; then ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen" if test "x$ac_cv_func_dlopen" = xyes; then : DLOPEN_LIBS= else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 $as_echo_n "checking for dlopen in -ldl... " >&6; } if ${ac_cv_lib_dl_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dl_dlopen=yes else ac_cv_lib_dl_dlopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 $as_echo "$ac_cv_lib_dl_dlopen" >&6; } if test "x$ac_cv_lib_dl_dlopen" = xyes; then : DLOPEN_LIBS=-ldl else { $as_echo "$as_me:${as_lineno-$LINENO}: Can't find library containing dlopen implementation" >&5 $as_echo "$as_me: Can't find library containing dlopen implementation" >&6;} fi fi fi # Check for clock_gettime { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5 $as_echo_n "checking for library containing clock_gettime... " >&6; } if ${ac_cv_search_clock_gettime+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char clock_gettime (); int main () { return clock_gettime (); ; return 0; } _ACEOF for ac_lib in '' rt; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_clock_gettime=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_clock_gettime+:} false; then : break fi done if ${ac_cv_search_clock_gettime+:} false; then : else ac_cv_search_clock_gettime=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5 $as_echo "$ac_cv_search_clock_gettime" >&6; } ac_res=$ac_cv_search_clock_gettime if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi # Define bash-completion dir pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for BASH_COMPLETION" >&5 $as_echo_n "checking for BASH_COMPLETION... " >&6; } if test -n "$BASH_COMPLETION_CFLAGS"; then pkg_cv_BASH_COMPLETION_CFLAGS="$BASH_COMPLETION_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"bash-completion >= 2.0\""; } >&5 ($PKG_CONFIG --exists --print-errors "bash-completion >= 2.0") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_BASH_COMPLETION_CFLAGS=`$PKG_CONFIG --cflags "bash-completion >= 2.0" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$BASH_COMPLETION_LIBS"; then pkg_cv_BASH_COMPLETION_LIBS="$BASH_COMPLETION_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"bash-completion >= 2.0\""; } >&5 ($PKG_CONFIG --exists --print-errors "bash-completion >= 2.0") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_BASH_COMPLETION_LIBS=`$PKG_CONFIG --libs "bash-completion >= 2.0" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then BASH_COMPLETION_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "bash-completion >= 2.0" 2>&1` else BASH_COMPLETION_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "bash-completion >= 2.0" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$BASH_COMPLETION_PKG_ERRORS" >&5 bashcompdir="${sysconfdir}/bash_completion.d" elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } bashcompdir="${sysconfdir}/bash_completion.d" else BASH_COMPLETION_CFLAGS=$pkg_cv_BASH_COMPLETION_CFLAGS BASH_COMPLETION_LIBS=$pkg_cv_BASH_COMPLETION_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } bashcompdir="`pkg-config --variable=completionsdir --define-variable=prefix=${prefix} --define-variable=datadir=${datadir} bash-completion`" fi # check for fsusage if test "$enables_hed" = "yes"; then for ac_header in sys/param.h do : ac_fn_c_check_header_mongrel "$LINENO" "sys/param.h" "ac_cv_header_sys_param_h" "$ac_includes_default" if test "x$ac_cv_header_sys_param_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_SYS_PARAM_H 1 _ACEOF fi done for ac_header in sys/vfs.h sys/fs_types.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done for ac_header in sys/mount.h do : ac_fn_c_check_header_compile "$LINENO" "sys/mount.h" "ac_cv_header_sys_mount_h" "$ac_includes_default #if HAVE_SYS_PARAM_H #include #endif " if test "x$ac_cv_header_sys_mount_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_SYS_MOUNT_H 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to get file system space usage" >&5 $as_echo "$as_me: checking how to get file system space usage" >&6;} ac_fsusage_space=no # Perform only the link test since it seems there are no variants of the # statvfs function. This check is more than just AC_CHECK_FUNCS(statvfs) # because that got a false positive on SCO OSR5. Adding the declaration # of a `struct statvfs' causes this test to fail (as it should) on such # systems. That system is reported to work fine with STAT_STATFS4 which # is what it gets when this test fails. if test $ac_fsusage_space = no; then # SVR4 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for statvfs function (SVR4)" >&5 $as_echo_n "checking for statvfs function (SVR4)... " >&6; } if ${fu_cv_sys_stat_statvfs+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #if defined __GLIBC__ && !defined __BEOS__ Do not use statvfs on systems with GNU libc, because that function stats all preceding entries in /proc/mounts, and that makes df hang if even one of the corresponding file systems is hard-mounted, but not available. statvfs in GNU libc on BeOS operates differently: it only makes a system call. #endif #ifdef __osf__ "Do not use Tru64's statvfs implementation" #endif #include int main () { struct statvfs fsd; statvfs (0, &fsd); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : fu_cv_sys_stat_statvfs=yes else fu_cv_sys_stat_statvfs=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $fu_cv_sys_stat_statvfs" >&5 $as_echo "$fu_cv_sys_stat_statvfs" >&6; } if test $fu_cv_sys_stat_statvfs = yes; then ac_fsusage_space=yes $as_echo "#define STAT_STATVFS 1" >>confdefs.h fi fi if test $ac_fsusage_space = no; then # DEC Alpha running OSF/1 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for 3-argument statfs function (DEC OSF/1)" >&5 $as_echo_n "checking for 3-argument statfs function (DEC OSF/1)... " >&6; } if ${fu_cv_sys_stat_statfs3_osf1+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : fu_cv_sys_stat_statfs3_osf1=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include int main () { struct statfs fsd; fsd.f_fsize = 0; return statfs (".", &fsd, sizeof (struct statfs)) != 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : fu_cv_sys_stat_statfs3_osf1=yes else fu_cv_sys_stat_statfs3_osf1=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $fu_cv_sys_stat_statfs3_osf1" >&5 $as_echo "$fu_cv_sys_stat_statfs3_osf1" >&6; } if test $fu_cv_sys_stat_statfs3_osf1 = yes; then ac_fsusage_space=yes $as_echo "#define STAT_STATFS3_OSF1 1" >>confdefs.h fi fi if test $ac_fsusage_space = no; then # AIX { $as_echo "$as_me:${as_lineno-$LINENO}: checking for two-argument statfs with statfs.bsize member (AIX, 4.3BSD)" >&5 $as_echo_n "checking for two-argument statfs with statfs.bsize member (AIX, 4.3BSD)... " >&6; } if ${fu_cv_sys_stat_statfs2_bsize+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : fu_cv_sys_stat_statfs2_bsize=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef HAVE_SYS_PARAM_H #include #endif #ifdef HAVE_SYS_MOUNT_H #include #endif #ifdef HAVE_SYS_VFS_H #include #endif int main () { struct statfs fsd; fsd.f_bsize = 0; return statfs (".", &fsd) != 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : fu_cv_sys_stat_statfs2_bsize=yes else fu_cv_sys_stat_statfs2_bsize=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $fu_cv_sys_stat_statfs2_bsize" >&5 $as_echo "$fu_cv_sys_stat_statfs2_bsize" >&6; } if test $fu_cv_sys_stat_statfs2_bsize = yes; then ac_fsusage_space=yes $as_echo "#define STAT_STATFS2_BSIZE 1" >>confdefs.h fi fi if test $ac_fsusage_space = no; then # SVR3 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for four-argument statfs (AIX-3.2.5, SVR3)" >&5 $as_echo_n "checking for four-argument statfs (AIX-3.2.5, SVR3)... " >&6; } if ${fu_cv_sys_stat_statfs4+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : fu_cv_sys_stat_statfs4=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { struct statfs fsd; return statfs (".", &fsd, sizeof fsd, 0) != 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : fu_cv_sys_stat_statfs4=yes else fu_cv_sys_stat_statfs4=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $fu_cv_sys_stat_statfs4" >&5 $as_echo "$fu_cv_sys_stat_statfs4" >&6; } if test $fu_cv_sys_stat_statfs4 = yes; then ac_fsusage_space=yes $as_echo "#define STAT_STATFS4 1" >>confdefs.h fi fi if test $ac_fsusage_space = no; then # 4.4BSD and NetBSD { $as_echo "$as_me:${as_lineno-$LINENO}: checking for two-argument statfs with statfs.fsize member (4.4BSD and NetBSD)" >&5 $as_echo_n "checking for two-argument statfs with statfs.fsize member (4.4BSD and NetBSD)... " >&6; } if ${fu_cv_sys_stat_statfs2_fsize+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : fu_cv_sys_stat_statfs2_fsize=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #ifdef HAVE_SYS_PARAM_H #include #endif #ifdef HAVE_SYS_MOUNT_H #include #endif int main () { struct statfs fsd; fsd.f_fsize = 0; return statfs (".", &fsd) != 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : fu_cv_sys_stat_statfs2_fsize=yes else fu_cv_sys_stat_statfs2_fsize=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $fu_cv_sys_stat_statfs2_fsize" >&5 $as_echo "$fu_cv_sys_stat_statfs2_fsize" >&6; } if test $fu_cv_sys_stat_statfs2_fsize = yes; then ac_fsusage_space=yes $as_echo "#define STAT_STATFS2_FSIZE 1" >>confdefs.h fi fi if test $ac_fsusage_space = no; then # Ultrix { $as_echo "$as_me:${as_lineno-$LINENO}: checking for two-argument statfs with struct fs_data (Ultrix)" >&5 $as_echo_n "checking for two-argument statfs with struct fs_data (Ultrix)... " >&6; } if ${fu_cv_sys_stat_fs_data+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : fu_cv_sys_stat_fs_data=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #ifdef HAVE_SYS_PARAM_H #include #endif #ifdef HAVE_SYS_MOUNT_H #include #endif #ifdef HAVE_SYS_FS_TYPES_H #include #endif int main () { struct fs_data fsd; /* Ultrix's statfs returns 1 for success, 0 for not mounted, -1 for failure. */ return statfs (".", &fsd) != 1; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : fu_cv_sys_stat_fs_data=yes else fu_cv_sys_stat_fs_data=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $fu_cv_sys_stat_fs_data" >&5 $as_echo "$fu_cv_sys_stat_fs_data" >&6; } if test $fu_cv_sys_stat_fs_data = yes; then ac_fsusage_space=yes $as_echo "#define STAT_STATFS2_FS_DATA 1" >>confdefs.h fi fi if test $ac_fsusage_space = no; then # SVR2 cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : $as_echo "#define STAT_READ_FILSYS 1" >>confdefs.h ac_fsusage_space=yes fi rm -f conftest.err conftest.i conftest.$ac_ext fi if test $ac_fsusage_space = yes; then : gl_cv_fs_space=yes else gl_cv_fs_space=no fi if test $gl_cv_fs_space = yes; then case " $LIBOBJS " in *" fsusage.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS fsusage.$ac_objext" ;; esac for ac_header in dustat.h sys/fs/s5param.h sys/filsys.h sys/statfs.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for statfs that truncates block counts" >&5 $as_echo_n "checking for statfs that truncates block counts... " >&6; } if ${fu_cv_sys_truncating_statfs+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #if !defined(sun) && !defined(__sun) choke -- this is a workaround for a Sun-specific problem #endif #include #include int main () { struct statfs t; long c = *(t.f_spare); if (c) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : fu_cv_sys_truncating_statfs=yes else fu_cv_sys_truncating_statfs=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi if test $fu_cv_sys_truncating_statfs = yes; then $as_echo "#define STATFS_TRUNCATES_BLOCK_COUNTS 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $fu_cv_sys_truncating_statfs" >&5 $as_echo "$fu_cv_sys_truncating_statfs" >&6; } fi fi if test "$enables_hed" = "yes"; then # Checks for header files. ac_header_dirent=no for ac_hdr in dirent.h sys/ndir.h sys/dir.h ndir.h; do as_ac_Header=`$as_echo "ac_cv_header_dirent_$ac_hdr" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_hdr that defines DIR" >&5 $as_echo_n "checking for $ac_hdr that defines DIR... " >&6; } if eval \${$as_ac_Header+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include <$ac_hdr> int main () { if ((DIR *) 0) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$as_ac_Header=yes" else eval "$as_ac_Header=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$as_ac_Header { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_hdr" | $as_tr_cpp` 1 _ACEOF ac_header_dirent=$ac_hdr; break fi done # Two versions of opendir et al. are in -ldir and -lx on SCO Xenix. if test $ac_header_dirent = dirent.h; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing opendir" >&5 $as_echo_n "checking for library containing opendir... " >&6; } if ${ac_cv_search_opendir+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char opendir (); int main () { return opendir (); ; return 0; } _ACEOF for ac_lib in '' dir; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_opendir=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_opendir+:} false; then : break fi done if ${ac_cv_search_opendir+:} false; then : else ac_cv_search_opendir=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_opendir" >&5 $as_echo "$ac_cv_search_opendir" >&6; } ac_res=$ac_cv_search_opendir if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing opendir" >&5 $as_echo_n "checking for library containing opendir... " >&6; } if ${ac_cv_search_opendir+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char opendir (); int main () { return opendir (); ; return 0; } _ACEOF for ac_lib in '' x; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_opendir=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_opendir+:} false; then : break fi done if ${ac_cv_search_opendir+:} false; then : else ac_cv_search_opendir=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_opendir" >&5 $as_echo "$ac_cv_search_opendir" >&6; } ac_res=$ac_cv_search_opendir if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if ${ac_cv_header_stdc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdc=yes else ac_cv_header_stdc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : : else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ (('a' <= (c) && (c) <= 'i') \ || ('j' <= (c) && (c) <= 'r') \ || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif #define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : else ac_cv_header_stdc=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then $as_echo "#define STDC_HEADERS 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for sys/wait.h that is POSIX.1 compatible" >&5 $as_echo_n "checking for sys/wait.h that is POSIX.1 compatible... " >&6; } if ${ac_cv_header_sys_wait_h+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #ifndef WEXITSTATUS # define WEXITSTATUS(stat_val) ((unsigned int) (stat_val) >> 8) #endif #ifndef WIFEXITED # define WIFEXITED(stat_val) (((stat_val) & 255) == 0) #endif int main () { int s; wait (&s); s = WIFEXITED (s) ? WEXITSTATUS (s) : 1; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_sys_wait_h=yes else ac_cv_header_sys_wait_h=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_sys_wait_h" >&5 $as_echo "$ac_cv_header_sys_wait_h" >&6; } if test $ac_cv_header_sys_wait_h = yes; then $as_echo "#define HAVE_SYS_WAIT_H 1" >>confdefs.h fi for ac_header in arpa/inet.h fcntl.h float.h limits.h netdb.h netinet/in.h sasl.h sasl/sasl.h stdint.h stdlib.h string.h sys/file.h sys/socket.h sys/vfs.h unistd.h uuid/uuid.h getopt.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the compiler implements namespaces" >&5 $as_echo_n "checking whether the compiler implements namespaces... " >&6; } if ${ac_cv_cxx_namespaces+:} false; then : $as_echo_n "(cached) " >&6 else ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ namespace Outer { namespace Inner { int i = 0; }} int main () { using namespace Outer::Inner; return i; ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_cv_cxx_namespaces=yes else ac_cv_cxx_namespaces=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_namespaces" >&5 $as_echo "$ac_cv_cxx_namespaces" >&6; } if test "$ac_cv_cxx_namespaces" = yes; then $as_echo "#define HAVE_NAMESPACES /**/" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the compiler has stringstream" >&5 $as_echo_n "checking whether the compiler has stringstream... " >&6; } if ${ac_cv_cxx_have_sstream+:} false; then : $as_echo_n "(cached) " >&6 else ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #ifdef HAVE_NAMESPACES using namespace std; #endif int main () { stringstream message; message << "Hello"; return 0; ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_cv_cxx_have_sstream=yes else ac_cv_cxx_have_sstream=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_have_sstream" >&5 $as_echo "$ac_cv_cxx_have_sstream" >&6; } if test "$ac_cv_cxx_have_sstream" = yes; then $as_echo "#define HAVE_SSTREAM /**/" >>confdefs.h fi # Checks for typedefs, structures, and compiler characteristics. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for stdbool.h that conforms to C99" >&5 $as_echo_n "checking for stdbool.h that conforms to C99... " >&6; } if ${ac_cv_header_stdbool_h+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #ifndef bool "error: bool is not defined" #endif #ifndef false "error: false is not defined" #endif #if false "error: false is not 0" #endif #ifndef true "error: true is not defined" #endif #if true != 1 "error: true is not 1" #endif #ifndef __bool_true_false_are_defined "error: __bool_true_false_are_defined is not defined" #endif struct s { _Bool s: 1; _Bool t; } s; char a[true == 1 ? 1 : -1]; char b[false == 0 ? 1 : -1]; char c[__bool_true_false_are_defined == 1 ? 1 : -1]; char d[(bool) 0.5 == true ? 1 : -1]; /* See body of main program for 'e'. */ char f[(_Bool) 0.0 == false ? 1 : -1]; char g[true]; char h[sizeof (_Bool)]; char i[sizeof s.t]; enum { j = false, k = true, l = false * true, m = true * 256 }; /* The following fails for HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */ _Bool n[m]; char o[sizeof n == m * sizeof n[0] ? 1 : -1]; char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1]; /* Catch a bug in an HP-UX C compiler. See http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html */ _Bool q = true; _Bool *pq = &q; int main () { bool e = &s; *pq |= q; *pq |= ! q; /* Refer to every declared value, to avoid compiler optimizations. */ return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l + !m + !n + !o + !p + !q + !pq); ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdbool_h=yes else ac_cv_header_stdbool_h=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdbool_h" >&5 $as_echo "$ac_cv_header_stdbool_h" >&6; } ac_fn_c_check_type "$LINENO" "_Bool" "ac_cv_type__Bool" "$ac_includes_default" if test "x$ac_cv_type__Bool" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE__BOOL 1 _ACEOF fi if test $ac_cv_header_stdbool_h = yes; then $as_echo "#define HAVE_STDBOOL_H 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5 $as_echo_n "checking for an ANSI C-conforming const... " >&6; } if ${ac_cv_c_const+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __cplusplus /* Ultrix mips cc rejects this sort of thing. */ typedef int charset[2]; const charset cs = { 0, 0 }; /* SunOS 4.1.1 cc rejects this. */ char const *const *pcpcc; char **ppc; /* NEC SVR4.0.2 mips cc rejects this. */ struct point {int x, y;}; static struct point const zero = {0,0}; /* AIX XL C 1.02.0.0 rejects this. It does not let you subtract one const X* pointer from another in an arm of an if-expression whose if-part is not a constant expression */ const char *g = "string"; pcpcc = &g + (g ? g-g : 0); /* HPUX 7.0 cc rejects these. */ ++pcpcc; ppc = (char**) pcpcc; pcpcc = (char const *const *) ppc; { /* SCO 3.2v4 cc rejects this sort of thing. */ char tx; char *t = &tx; char const *s = 0 ? (char *) 0 : (char const *) 0; *t++ = 0; if (s) return 0; } { /* Someone thinks the Sun supposedly-ANSI compiler will reject this. */ int x[] = {25, 17}; const int *foo = &x[0]; ++foo; } { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */ typedef const int *iptr; iptr p = 0; ++p; } { /* AIX XL C 1.02.0.0 rejects this sort of thing, saying "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */ struct s { int j; const int *ap[3]; } bx; struct s *b = &bx; b->j = 5; } { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */ const int foo = 10; if (!foo) return 0; } return !cs[0] && !zero.x; #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_const=yes else ac_cv_c_const=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_const" >&5 $as_echo "$ac_cv_c_const" >&6; } if test $ac_cv_c_const = no; then $as_echo "#define const /**/" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uid_t in sys/types.h" >&5 $as_echo_n "checking for uid_t in sys/types.h... " >&6; } if ${ac_cv_type_uid_t+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "uid_t" >/dev/null 2>&1; then : ac_cv_type_uid_t=yes else ac_cv_type_uid_t=no fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_uid_t" >&5 $as_echo "$ac_cv_type_uid_t" >&6; } if test $ac_cv_type_uid_t = no; then $as_echo "#define uid_t int" >>confdefs.h $as_echo "#define gid_t int" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for inline" >&5 $as_echo_n "checking for inline... " >&6; } if ${ac_cv_c_inline+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_c_inline=no for ac_kw in inline __inline__ __inline; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifndef __cplusplus typedef int foo_t; static $ac_kw foo_t static_foo () {return 0; } $ac_kw foo_t foo () {return 0; } #endif _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_inline=$ac_kw fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext test "$ac_cv_c_inline" != no && break done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_inline" >&5 $as_echo "$ac_cv_c_inline" >&6; } case $ac_cv_c_inline in inline | yes) ;; *) case $ac_cv_c_inline in no) ac_val=;; *) ac_val=$ac_cv_c_inline;; esac cat >>confdefs.h <<_ACEOF #ifndef __cplusplus #define inline $ac_val #endif _ACEOF ;; esac ac_fn_c_check_type "$LINENO" "mode_t" "ac_cv_type_mode_t" "$ac_includes_default" if test "x$ac_cv_type_mode_t" = xyes; then : else cat >>confdefs.h <<_ACEOF #define mode_t int _ACEOF fi ac_fn_c_check_type "$LINENO" "off_t" "ac_cv_type_off_t" "$ac_includes_default" if test "x$ac_cv_type_off_t" = xyes; then : else cat >>confdefs.h <<_ACEOF #define off_t long int _ACEOF fi ac_fn_c_check_type "$LINENO" "pid_t" "ac_cv_type_pid_t" "$ac_includes_default" if test "x$ac_cv_type_pid_t" = xyes; then : else cat >>confdefs.h <<_ACEOF #define pid_t int _ACEOF fi ac_fn_c_check_type "$LINENO" "size_t" "ac_cv_type_size_t" "$ac_includes_default" if test "x$ac_cv_type_size_t" = xyes; then : else cat >>confdefs.h <<_ACEOF #define size_t unsigned int _ACEOF fi ac_fn_c_check_member "$LINENO" "struct stat" "st_blksize" "ac_cv_member_struct_stat_st_blksize" "$ac_includes_default" if test "x$ac_cv_member_struct_stat_st_blksize" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STRUCT_STAT_ST_BLKSIZE 1 _ACEOF fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether time.h and sys/time.h may both be included" >&5 $as_echo_n "checking whether time.h and sys/time.h may both be included... " >&6; } if ${ac_cv_header_time+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include int main () { if ((struct tm *) 0) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_time=yes else ac_cv_header_time=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_time" >&5 $as_echo "$ac_cv_header_time" >&6; } if test $ac_cv_header_time = yes; then $as_echo "#define TIME_WITH_SYS_TIME 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether struct tm is in sys/time.h or time.h" >&5 $as_echo_n "checking whether struct tm is in sys/time.h or time.h... " >&6; } if ${ac_cv_struct_tm+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { struct tm tm; int *p = &tm.tm_sec; return !p; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_struct_tm=time.h else ac_cv_struct_tm=sys/time.h fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_struct_tm" >&5 $as_echo "$ac_cv_struct_tm" >&6; } if test $ac_cv_struct_tm = sys/time.h; then $as_echo "#define TM_IN_SYS_TIME 1" >>confdefs.h fi ac_fn_c_check_type "$LINENO" "ptrdiff_t" "ac_cv_type_ptrdiff_t" "$ac_includes_default" if test "x$ac_cv_type_ptrdiff_t" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_PTRDIFF_T 1 _ACEOF fi # Checks for library functions. for ac_header in unistd.h do : ac_fn_c_check_header_mongrel "$LINENO" "unistd.h" "ac_cv_header_unistd_h" "$ac_includes_default" if test "x$ac_cv_header_unistd_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_UNISTD_H 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working chown" >&5 $as_echo_n "checking for working chown... " >&6; } if ${ac_cv_func_chown_works+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_chown_works=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default #include int main () { char *f = "conftest.chown"; struct stat before, after; if (creat (f, 0600) < 0) return 1; if (stat (f, &before) < 0) return 1; if (chown (f, (uid_t) -1, (gid_t) -1) == -1) return 1; if (stat (f, &after) < 0) return 1; return ! (before.st_uid == after.st_uid && before.st_gid == after.st_gid); ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_chown_works=yes else ac_cv_func_chown_works=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi rm -f conftest.chown fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_chown_works" >&5 $as_echo "$ac_cv_func_chown_works" >&6; } if test $ac_cv_func_chown_works = yes; then $as_echo "#define HAVE_CHOWN 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether closedir returns void" >&5 $as_echo_n "checking whether closedir returns void... " >&6; } if ${ac_cv_func_closedir_void+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_closedir_void=yes else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default #include <$ac_header_dirent> #ifndef __cplusplus int closedir (); #endif int main () { return closedir (opendir (".")) != 0; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_closedir_void=no else ac_cv_func_closedir_void=yes fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_closedir_void" >&5 $as_echo "$ac_cv_func_closedir_void" >&6; } if test $ac_cv_func_closedir_void = yes; then $as_echo "#define CLOSEDIR_VOID 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for error_at_line" >&5 $as_echo_n "checking for error_at_line... " >&6; } if ${ac_cv_lib_error_at_line+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { error_at_line (0, 0, "", 0, "an error occurred"); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_error_at_line=yes else ac_cv_lib_error_at_line=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_error_at_line" >&5 $as_echo "$ac_cv_lib_error_at_line" >&6; } if test $ac_cv_lib_error_at_line = no; then case " $LIBOBJS " in *" error.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS error.$ac_objext" ;; esac fi for ac_header in vfork.h do : ac_fn_c_check_header_mongrel "$LINENO" "vfork.h" "ac_cv_header_vfork_h" "$ac_includes_default" if test "x$ac_cv_header_vfork_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_VFORK_H 1 _ACEOF fi done for ac_func in fork vfork do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done if test "x$ac_cv_func_fork" = xyes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working fork" >&5 $as_echo_n "checking for working fork... " >&6; } if ${ac_cv_func_fork_works+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_fork_works=cross else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { /* By Ruediger Kuhlmann. */ return fork () < 0; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_fork_works=yes else ac_cv_func_fork_works=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_fork_works" >&5 $as_echo "$ac_cv_func_fork_works" >&6; } else ac_cv_func_fork_works=$ac_cv_func_fork fi if test "x$ac_cv_func_fork_works" = xcross; then case $host in *-*-amigaos* | *-*-msdosdjgpp*) # Override, as these systems have only a dummy fork() stub ac_cv_func_fork_works=no ;; *) ac_cv_func_fork_works=yes ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: result $ac_cv_func_fork_works guessed because of cross compilation" >&5 $as_echo "$as_me: WARNING: result $ac_cv_func_fork_works guessed because of cross compilation" >&2;} fi ac_cv_func_vfork_works=$ac_cv_func_vfork if test "x$ac_cv_func_vfork" = xyes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working vfork" >&5 $as_echo_n "checking for working vfork... " >&6; } if ${ac_cv_func_vfork_works+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_vfork_works=cross else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Thanks to Paul Eggert for this test. */ $ac_includes_default #include #ifdef HAVE_VFORK_H # include #endif /* On some sparc systems, changes by the child to local and incoming argument registers are propagated back to the parent. The compiler is told about this with #include , but some compilers (e.g. gcc -O) don't grok . Test for this by using a static variable whose address is put into a register that is clobbered by the vfork. */ static void #ifdef __cplusplus sparc_address_test (int arg) # else sparc_address_test (arg) int arg; #endif { static pid_t child; if (!child) { child = vfork (); if (child < 0) { perror ("vfork"); _exit(2); } if (!child) { arg = getpid(); write(-1, "", 0); _exit (arg); } } } int main () { pid_t parent = getpid (); pid_t child; sparc_address_test (0); child = vfork (); if (child == 0) { /* Here is another test for sparc vfork register problems. This test uses lots of local variables, at least as many local variables as main has allocated so far including compiler temporaries. 4 locals are enough for gcc 1.40.3 on a Solaris 4.1.3 sparc, but we use 8 to be safe. A buggy compiler should reuse the register of parent for one of the local variables, since it will think that parent can't possibly be used any more in this routine. Assigning to the local variable will thus munge parent in the parent process. */ pid_t p = getpid(), p1 = getpid(), p2 = getpid(), p3 = getpid(), p4 = getpid(), p5 = getpid(), p6 = getpid(), p7 = getpid(); /* Convince the compiler that p..p7 are live; otherwise, it might use the same hardware register for all 8 local variables. */ if (p != p1 || p != p2 || p != p3 || p != p4 || p != p5 || p != p6 || p != p7) _exit(1); /* On some systems (e.g. IRIX 3.3), vfork doesn't separate parent from child file descriptors. If the child closes a descriptor before it execs or exits, this munges the parent's descriptor as well. Test for this by closing stdout in the child. */ _exit(close(fileno(stdout)) != 0); } else { int status; struct stat st; while (wait(&status) != child) ; return ( /* Was there some problem with vforking? */ child < 0 /* Did the child fail? (This shouldn't happen.) */ || status /* Did the vfork/compiler bug occur? */ || parent != getpid() /* Did the file descriptor bug occur? */ || fstat(fileno(stdout), &st) != 0 ); } } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_vfork_works=yes else ac_cv_func_vfork_works=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_vfork_works" >&5 $as_echo "$ac_cv_func_vfork_works" >&6; } fi; if test "x$ac_cv_func_fork_works" = xcross; then ac_cv_func_vfork_works=$ac_cv_func_vfork { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: result $ac_cv_func_vfork_works guessed because of cross compilation" >&5 $as_echo "$as_me: WARNING: result $ac_cv_func_vfork_works guessed because of cross compilation" >&2;} fi if test "x$ac_cv_func_vfork_works" = xyes; then $as_echo "#define HAVE_WORKING_VFORK 1" >>confdefs.h else $as_echo "#define vfork fork" >>confdefs.h fi if test "x$ac_cv_func_fork_works" = xyes; then $as_echo "#define HAVE_WORKING_FORK 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether lstat correctly handles trailing slash" >&5 $as_echo_n "checking whether lstat correctly handles trailing slash... " >&6; } if ${ac_cv_func_lstat_dereferences_slashed_symlink+:} false; then : $as_echo_n "(cached) " >&6 else rm -f conftest.sym conftest.file echo >conftest.file if test "$as_ln_s" = "ln -s" && ln -s conftest.file conftest.sym; then if test "$cross_compiling" = yes; then : ac_cv_func_lstat_dereferences_slashed_symlink=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { struct stat sbuf; /* Linux will dereference the symlink and fail, as required by POSIX. That is better in the sense that it means we will not have to compile and use the lstat wrapper. */ return lstat ("conftest.sym/", &sbuf) == 0; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_lstat_dereferences_slashed_symlink=yes else ac_cv_func_lstat_dereferences_slashed_symlink=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi else # If the `ln -s' command failed, then we probably don't even # have an lstat function. ac_cv_func_lstat_dereferences_slashed_symlink=no fi rm -f conftest.sym conftest.file fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_lstat_dereferences_slashed_symlink" >&5 $as_echo "$ac_cv_func_lstat_dereferences_slashed_symlink" >&6; } test $ac_cv_func_lstat_dereferences_slashed_symlink = yes && cat >>confdefs.h <<_ACEOF #define LSTAT_FOLLOWS_SLASHED_SYMLINK 1 _ACEOF if test "x$ac_cv_func_lstat_dereferences_slashed_symlink" = xno; then case " $LIBOBJS " in *" lstat.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS lstat.$ac_objext" ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether lstat accepts an empty string" >&5 $as_echo_n "checking whether lstat accepts an empty string... " >&6; } if ${ac_cv_func_lstat_empty_string_bug+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_lstat_empty_string_bug=yes else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { struct stat sbuf; return lstat ("", &sbuf) == 0; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_lstat_empty_string_bug=no else ac_cv_func_lstat_empty_string_bug=yes fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_lstat_empty_string_bug" >&5 $as_echo "$ac_cv_func_lstat_empty_string_bug" >&6; } if test $ac_cv_func_lstat_empty_string_bug = yes; then case " $LIBOBJS " in *" lstat.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS lstat.$ac_objext" ;; esac cat >>confdefs.h <<_ACEOF #define HAVE_LSTAT_EMPTY_STRING_BUG 1 _ACEOF fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether lstat correctly handles trailing slash" >&5 $as_echo_n "checking whether lstat correctly handles trailing slash... " >&6; } if ${ac_cv_func_lstat_dereferences_slashed_symlink+:} false; then : $as_echo_n "(cached) " >&6 else rm -f conftest.sym conftest.file echo >conftest.file if test "$as_ln_s" = "ln -s" && ln -s conftest.file conftest.sym; then if test "$cross_compiling" = yes; then : ac_cv_func_lstat_dereferences_slashed_symlink=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { struct stat sbuf; /* Linux will dereference the symlink and fail, as required by POSIX. That is better in the sense that it means we will not have to compile and use the lstat wrapper. */ return lstat ("conftest.sym/", &sbuf) == 0; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_lstat_dereferences_slashed_symlink=yes else ac_cv_func_lstat_dereferences_slashed_symlink=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi else # If the `ln -s' command failed, then we probably don't even # have an lstat function. ac_cv_func_lstat_dereferences_slashed_symlink=no fi rm -f conftest.sym conftest.file fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_lstat_dereferences_slashed_symlink" >&5 $as_echo "$ac_cv_func_lstat_dereferences_slashed_symlink" >&6; } test $ac_cv_func_lstat_dereferences_slashed_symlink = yes && cat >>confdefs.h <<_ACEOF #define LSTAT_FOLLOWS_SLASHED_SYMLINK 1 _ACEOF if test "x$ac_cv_func_lstat_dereferences_slashed_symlink" = xno; then case " $LIBOBJS " in *" lstat.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS lstat.$ac_objext" ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working memcmp" >&5 $as_echo_n "checking for working memcmp... " >&6; } if ${ac_cv_func_memcmp_working+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_memcmp_working=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { /* Some versions of memcmp are not 8-bit clean. */ char c0 = '\100', c1 = '\200', c2 = '\201'; if (memcmp(&c0, &c2, 1) >= 0 || memcmp(&c1, &c2, 1) >= 0) return 1; /* The Next x86 OpenStep bug shows up only when comparing 16 bytes or more and with at least one buffer not starting on a 4-byte boundary. William Lewis provided this test program. */ { char foo[21]; char bar[21]; int i; for (i = 0; i < 4; i++) { char *a = foo + i; char *b = bar + i; strcpy (a, "--------01111111"); strcpy (b, "--------10000000"); if (memcmp (a, b, 16) >= 0) return 1; } return 0; } ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_memcmp_working=yes else ac_cv_func_memcmp_working=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_memcmp_working" >&5 $as_echo "$ac_cv_func_memcmp_working" >&6; } test $ac_cv_func_memcmp_working = no && case " $LIBOBJS " in *" memcmp.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS memcmp.$ac_objext" ;; esac for ac_header in $ac_header_list do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default " if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done for ac_func in $ac_func_list do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working mktime" >&5 $as_echo_n "checking for working mktime... " >&6; } if ${ac_cv_func_working_mktime+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_working_mktime=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Test program from Paul Eggert and Tony Leneis. */ #ifdef TIME_WITH_SYS_TIME # include # include #else # ifdef HAVE_SYS_TIME_H # include # else # include # endif #endif #include #include #ifdef HAVE_UNISTD_H # include #endif #ifndef HAVE_ALARM # define alarm(X) /* empty */ #endif /* Work around redefinition to rpl_putenv by other config tests. */ #undef putenv static time_t time_t_max; static time_t time_t_min; /* Values we'll use to set the TZ environment variable. */ static const char *tz_strings[] = { (const char *) 0, "TZ=GMT0", "TZ=JST-9", "TZ=EST+3EDT+2,M10.1.0/00:00:00,M2.3.0/00:00:00" }; #define N_STRINGS (sizeof (tz_strings) / sizeof (tz_strings[0])) /* Return 0 if mktime fails to convert a date in the spring-forward gap. Based on a problem report from Andreas Jaeger. */ static int spring_forward_gap () { /* glibc (up to about 1998-10-07) failed this test. */ struct tm tm; /* Use the portable POSIX.1 specification "TZ=PST8PDT,M4.1.0,M10.5.0" instead of "TZ=America/Vancouver" in order to detect the bug even on systems that don't support the Olson extension, or don't have the full zoneinfo tables installed. */ putenv ((char*) "TZ=PST8PDT,M4.1.0,M10.5.0"); tm.tm_year = 98; tm.tm_mon = 3; tm.tm_mday = 5; tm.tm_hour = 2; tm.tm_min = 0; tm.tm_sec = 0; tm.tm_isdst = -1; return mktime (&tm) != (time_t) -1; } static int mktime_test1 (time_t now) { struct tm *lt; return ! (lt = localtime (&now)) || mktime (lt) == now; } static int mktime_test (time_t now) { return (mktime_test1 (now) && mktime_test1 ((time_t) (time_t_max - now)) && mktime_test1 ((time_t) (time_t_min + now))); } static int irix_6_4_bug () { /* Based on code from Ariel Faigon. */ struct tm tm; tm.tm_year = 96; tm.tm_mon = 3; tm.tm_mday = 0; tm.tm_hour = 0; tm.tm_min = 0; tm.tm_sec = 0; tm.tm_isdst = -1; mktime (&tm); return tm.tm_mon == 2 && tm.tm_mday == 31; } static int bigtime_test (int j) { struct tm tm; time_t now; tm.tm_year = tm.tm_mon = tm.tm_mday = tm.tm_hour = tm.tm_min = tm.tm_sec = j; now = mktime (&tm); if (now != (time_t) -1) { struct tm *lt = localtime (&now); if (! (lt && lt->tm_year == tm.tm_year && lt->tm_mon == tm.tm_mon && lt->tm_mday == tm.tm_mday && lt->tm_hour == tm.tm_hour && lt->tm_min == tm.tm_min && lt->tm_sec == tm.tm_sec && lt->tm_yday == tm.tm_yday && lt->tm_wday == tm.tm_wday && ((lt->tm_isdst < 0 ? -1 : 0 < lt->tm_isdst) == (tm.tm_isdst < 0 ? -1 : 0 < tm.tm_isdst)))) return 0; } return 1; } static int year_2050_test () { /* The correct answer for 2050-02-01 00:00:00 in Pacific time, ignoring leap seconds. */ unsigned long int answer = 2527315200UL; struct tm tm; time_t t; tm.tm_year = 2050 - 1900; tm.tm_mon = 2 - 1; tm.tm_mday = 1; tm.tm_hour = tm.tm_min = tm.tm_sec = 0; tm.tm_isdst = -1; /* Use the portable POSIX.1 specification "TZ=PST8PDT,M4.1.0,M10.5.0" instead of "TZ=America/Vancouver" in order to detect the bug even on systems that don't support the Olson extension, or don't have the full zoneinfo tables installed. */ putenv ((char*) "TZ=PST8PDT,M4.1.0,M10.5.0"); t = mktime (&tm); /* Check that the result is either a failure, or close enough to the correct answer that we can assume the discrepancy is due to leap seconds. */ return (t == (time_t) -1 || (0 < t && answer - 120 <= t && t <= answer + 120)); } int main () { time_t t, delta; int i, j; /* This test makes some buggy mktime implementations loop. Give up after 60 seconds; a mktime slower than that isn't worth using anyway. */ alarm (60); for (;;) { t = (time_t_max << 1) + 1; if (t <= time_t_max) break; time_t_max = t; } time_t_min = - ((time_t) ~ (time_t) 0 == (time_t) -1) - time_t_max; delta = time_t_max / 997; /* a suitable prime number */ for (i = 0; i < N_STRINGS; i++) { if (tz_strings[i]) putenv ((char*) tz_strings[i]); for (t = 0; t <= time_t_max - delta; t += delta) if (! mktime_test (t)) return 1; if (! (mktime_test ((time_t) 1) && mktime_test ((time_t) (60 * 60)) && mktime_test ((time_t) (60 * 60 * 24)))) return 1; for (j = 1; ; j <<= 1) if (! bigtime_test (j)) return 1; else if (INT_MAX / 2 < j) break; if (! bigtime_test (INT_MAX)) return 1; } return ! (irix_6_4_bug () && spring_forward_gap () && year_2050_test ()); } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_working_mktime=yes else ac_cv_func_working_mktime=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_working_mktime" >&5 $as_echo "$ac_cv_func_working_mktime" >&6; } if test $ac_cv_func_working_mktime = no; then case " $LIBOBJS " in *" mktime.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS mktime.$ac_objext" ;; esac fi for ac_header in stdlib.h do : ac_fn_c_check_header_mongrel "$LINENO" "stdlib.h" "ac_cv_header_stdlib_h" "$ac_includes_default" if test "x$ac_cv_header_stdlib_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STDLIB_H 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU libc compatible malloc" >&5 $as_echo_n "checking for GNU libc compatible malloc... " >&6; } if ${ac_cv_func_malloc_0_nonnull+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_malloc_0_nonnull=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #if defined STDC_HEADERS || defined HAVE_STDLIB_H # include #else char *malloc (); #endif int main () { return ! malloc (0); ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_malloc_0_nonnull=yes else ac_cv_func_malloc_0_nonnull=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_malloc_0_nonnull" >&5 $as_echo "$ac_cv_func_malloc_0_nonnull" >&6; } if test $ac_cv_func_malloc_0_nonnull = yes; then : $as_echo "#define HAVE_MALLOC 1" >>confdefs.h else $as_echo "#define HAVE_MALLOC 0" >>confdefs.h case " $LIBOBJS " in *" malloc.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS malloc.$ac_objext" ;; esac $as_echo "#define malloc rpl_malloc" >>confdefs.h fi for ac_header in stdlib.h do : ac_fn_c_check_header_mongrel "$LINENO" "stdlib.h" "ac_cv_header_stdlib_h" "$ac_includes_default" if test "x$ac_cv_header_stdlib_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STDLIB_H 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU libc compatible realloc" >&5 $as_echo_n "checking for GNU libc compatible realloc... " >&6; } if ${ac_cv_func_realloc_0_nonnull+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_realloc_0_nonnull=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #if defined STDC_HEADERS || defined HAVE_STDLIB_H # include #else char *realloc (); #endif int main () { return ! realloc (0, 0); ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_realloc_0_nonnull=yes else ac_cv_func_realloc_0_nonnull=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_realloc_0_nonnull" >&5 $as_echo "$ac_cv_func_realloc_0_nonnull" >&6; } if test $ac_cv_func_realloc_0_nonnull = yes; then : $as_echo "#define HAVE_REALLOC 1" >>confdefs.h else $as_echo "#define HAVE_REALLOC 0" >>confdefs.h case " $LIBOBJS " in *" realloc.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS realloc.$ac_objext" ;; esac $as_echo "#define realloc rpl_realloc" >>confdefs.h fi for ac_header in sys/select.h sys/socket.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking types of arguments for select" >&5 $as_echo_n "checking types of arguments for select... " >&6; } if ${ac_cv_func_select_args+:} false; then : $as_echo_n "(cached) " >&6 else for ac_arg234 in 'fd_set *' 'int *' 'void *'; do for ac_arg1 in 'int' 'size_t' 'unsigned long int' 'unsigned int'; do for ac_arg5 in 'struct timeval *' 'const struct timeval *'; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default #ifdef HAVE_SYS_SELECT_H # include #endif #ifdef HAVE_SYS_SOCKET_H # include #endif int main () { extern int select ($ac_arg1, $ac_arg234, $ac_arg234, $ac_arg234, $ac_arg5); ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_func_select_args="$ac_arg1,$ac_arg234,$ac_arg5"; break 3 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext done done done # Provide a safe default value. : "${ac_cv_func_select_args=int,int *,struct timeval *}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_select_args" >&5 $as_echo "$ac_cv_func_select_args" >&6; } ac_save_IFS=$IFS; IFS=',' set dummy `echo "$ac_cv_func_select_args" | sed 's/\*/\*/g'` IFS=$ac_save_IFS shift cat >>confdefs.h <<_ACEOF #define SELECT_TYPE_ARG1 $1 _ACEOF cat >>confdefs.h <<_ACEOF #define SELECT_TYPE_ARG234 ($2) _ACEOF cat >>confdefs.h <<_ACEOF #define SELECT_TYPE_ARG5 ($3) _ACEOF rm -f conftest* { $as_echo "$as_me:${as_lineno-$LINENO}: checking return type of signal handlers" >&5 $as_echo_n "checking return type of signal handlers... " >&6; } if ${ac_cv_type_signal+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { return *(signal (0, 0)) (0) == 1; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_type_signal=int else ac_cv_type_signal=void fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_signal" >&5 $as_echo "$ac_cv_type_signal" >&6; } cat >>confdefs.h <<_ACEOF #define RETSIGTYPE $ac_cv_type_signal _ACEOF ac_fn_c_check_decl "$LINENO" "strerror_r" "ac_cv_have_decl_strerror_r" "$ac_includes_default" if test "x$ac_cv_have_decl_strerror_r" = xyes; then : ac_have_decl=1 else ac_have_decl=0 fi cat >>confdefs.h <<_ACEOF #define HAVE_DECL_STRERROR_R $ac_have_decl _ACEOF for ac_func in strerror_r do : ac_fn_c_check_func "$LINENO" "strerror_r" "ac_cv_func_strerror_r" if test "x$ac_cv_func_strerror_r" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STRERROR_R 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether strerror_r returns char *" >&5 $as_echo_n "checking whether strerror_r returns char *... " >&6; } if ${ac_cv_func_strerror_r_char_p+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_func_strerror_r_char_p=no if test $ac_cv_have_decl_strerror_r = yes; then cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { char buf[100]; char x = *strerror_r (0, buf, sizeof buf); char *p = strerror_r (0, buf, sizeof buf); return !p || x; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_func_strerror_r_char_p=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext else # strerror_r is not declared. Choose between # systems that have relatively inaccessible declarations for the # function. BeOS and DEC UNIX 4.0 fall in this category, but the # former has a strerror_r that returns char*, while the latter # has a strerror_r that returns `int'. # This test should segfault on the DEC system. if test "$cross_compiling" = yes; then : : else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default extern char *strerror_r (); int main () { char buf[100]; char x = *strerror_r (0, buf, sizeof buf); return ! isalpha (x); ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_strerror_r_char_p=yes fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_strerror_r_char_p" >&5 $as_echo "$ac_cv_func_strerror_r_char_p" >&6; } if test $ac_cv_func_strerror_r_char_p = yes; then $as_echo "#define STRERROR_R_CHAR_P 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stat accepts an empty string" >&5 $as_echo_n "checking whether stat accepts an empty string... " >&6; } if ${ac_cv_func_stat_empty_string_bug+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_stat_empty_string_bug=yes else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { struct stat sbuf; return stat ("", &sbuf) == 0; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_stat_empty_string_bug=no else ac_cv_func_stat_empty_string_bug=yes fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_stat_empty_string_bug" >&5 $as_echo "$ac_cv_func_stat_empty_string_bug" >&6; } if test $ac_cv_func_stat_empty_string_bug = yes; then case " $LIBOBJS " in *" stat.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS stat.$ac_objext" ;; esac cat >>confdefs.h <<_ACEOF #define HAVE_STAT_EMPTY_STRING_BUG 1 _ACEOF fi for ac_func in acl dup2 floor ftruncate gethostname getdomainname getpid gmtime_r lchown localtime_r memchr memmove memset mkdir mkfifo regcomp rmdir select setenv socket strcasecmp strchr strcspn strdup strerror strncasecmp strstr strtol strtoul strtoull timegm tzset unsetenv getopt_long_only getgrouplist mkdtemp posix_fallocate readdir_r mkstemp mktemp do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for res_query in -lresolv" >&5 $as_echo_n "checking for res_query in -lresolv... " >&6; } if ${ac_cv_lib_resolv_res_query+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lresolv $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char res_query (); int main () { return res_query (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_resolv_res_query=yes else ac_cv_lib_resolv_res_query=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_resolv_res_query" >&5 $as_echo "$ac_cv_lib_resolv_res_query" >&6; } if test "x$ac_cv_lib_resolv_res_query" = xyes; then : LIBRESOLV=-lresolv else LIBRESOLV= fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __dn_skipname in -lresolv" >&5 $as_echo_n "checking for __dn_skipname in -lresolv... " >&6; } if ${ac_cv_lib_resolv___dn_skipname+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lresolv $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char __dn_skipname (); int main () { return __dn_skipname (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_resolv___dn_skipname=yes else ac_cv_lib_resolv___dn_skipname=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_resolv___dn_skipname" >&5 $as_echo "$ac_cv_lib_resolv___dn_skipname" >&6; } if test "x$ac_cv_lib_resolv___dn_skipname" = xyes; then : LIBRESOLV=-lresolv else LIBRESOLV= fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gethostbyname in -lnsl" >&5 $as_echo_n "checking for gethostbyname in -lnsl... " >&6; } if ${ac_cv_lib_nsl_gethostbyname+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lnsl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char gethostbyname (); int main () { return gethostbyname (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_nsl_gethostbyname=yes else ac_cv_lib_nsl_gethostbyname=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_nsl_gethostbyname" >&5 $as_echo "$ac_cv_lib_nsl_gethostbyname" >&6; } if test "x$ac_cv_lib_nsl_gethostbyname" = xyes; then : LIBRESOLV="$LIBRESOLV -lnsl" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for getdomainname in -lnsl" >&5 $as_echo_n "checking for getdomainname in -lnsl... " >&6; } if ${ac_cv_lib_nsl_getdomainname+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lnsl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char getdomainname (); int main () { return getdomainname (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_nsl_getdomainname=yes else ac_cv_lib_nsl_getdomainname=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_nsl_getdomainname" >&5 $as_echo "$ac_cv_lib_nsl_getdomainname" >&6; } if test "x$ac_cv_lib_nsl_getdomainname" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBNSL 1 _ACEOF LIBS="-lnsl $LIBS" fi fi # check for platfom specific flags case " $LDFLAGS " in " -Wl,--no-undefined ") ;; " -Wl,-no-undefined ") ;; " -Wl,-z -Wl,defs ") ;; " -Wl,-z,defs ") ;; *) case "${host}" in *darwin*);; *) LDFLAGS="$LDFLAGS -Wl,--no-undefined" ;; esac ;; esac for ac_prog in pdflatex do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_PDFLATEX+:} false; then : $as_echo_n "(cached) " >&6 else case $PDFLATEX in [\\/]* | ?:[\\/]*) ac_cv_path_PDFLATEX="$PDFLATEX" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_PDFLATEX="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi PDFLATEX=$ac_cv_path_PDFLATEX if test -n "$PDFLATEX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PDFLATEX" >&5 $as_echo "$PDFLATEX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$PDFLATEX" && break done for ac_prog in doxygen do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_DOXYGEN+:} false; then : $as_echo_n "(cached) " >&6 else case $DOXYGEN in [\\/]* | ?:[\\/]*) ac_cv_path_DOXYGEN="$DOXYGEN" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_DOXYGEN="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi DOXYGEN=$ac_cv_path_DOXYGEN if test -n "$DOXYGEN"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DOXYGEN" >&5 $as_echo "$DOXYGEN" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$DOXYGEN" && break done for ac_prog in dot do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_DOT+:} false; then : $as_echo_n "(cached) " >&6 else case $DOT in [\\/]* | ?:[\\/]*) ac_cv_path_DOT="$DOT" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_DOT="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi DOT=$ac_cv_path_DOT if test -n "$DOT"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DOT" >&5 $as_echo "$DOT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$DOT" && break done # Check if user asks to skip documentation build # Check whether --enable-doc was given. if test "${enable_doc+set}" = set; then : enableval=$enable_doc; enables_doc=$enableval fi #if test "x$enables_doc" = "xyes"; then # There is no point disabling docs due to missing tools since the pdf # files are both in svn and in the dist tarball # if test "x$PDFLATEX" = "x"; then # enables_doc="no" # AC_MSG_NOTICE([WARNING: Missing pdflatex - documentation won't be built]) # elif test "x$DOXYGEN" = "x"; then # enables_doc="no" # AC_MSG_NOTICE([WARNING: Missing doxygen - documentation won't be built]) # elif test "x$DOT" = "x"; then # enables_doc="no" # AC_MSG_NOTICE([WARNING: Missing dot - documentation won't be built]) # fi #fi { $as_echo "$as_me:${as_lineno-$LINENO}: Documentation enabled: $enables_doc" >&5 $as_echo "$as_me: Documentation enabled: $enables_doc" >&6;} if test "x$enables_doc" = "xyes"; then DOC_ENABLED_TRUE= DOC_ENABLED_FALSE='#' else DOC_ENABLED_TRUE='#' DOC_ENABLED_FALSE= fi if test -f python/python/arc/index.xml -o "x$DOXYGEN" != "x"; then PYDOXYGEN_TRUE= PYDOXYGEN_FALSE='#' else PYDOXYGEN_TRUE='#' PYDOXYGEN_FALSE= fi if test -f python/altpython/arc/index.xml -o "x$DOXYGEN" != "x"; then ALTPYDOXYGEN_TRUE= ALTPYDOXYGEN_FALSE='#' else ALTPYDOXYGEN_TRUE='#' ALTPYDOXYGEN_FALSE= fi # Check for explicitly and implicitely disabled services # A-Rex # Check whether --enable-a_rex_service was given. if test "${enable_a_rex_service+set}" = set; then : enableval=$enable_a_rex_service; enables_a_rex_service=$enableval fi if test "$enables_a_rex_service" = "yes"; then if test "x$SQLITE_INSTALLED" != "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: A-Rex can't be built without SQLite - disabling" >&5 $as_echo "$as_me: A-Rex can't be built without SQLite - disabling" >&6;} enables_a_rex_service="no" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: A-Rex service enabled: $enables_a_rex_service" >&5 $as_echo "$as_me: A-Rex service enabled: $enables_a_rex_service" >&6;} if test "x$enables_a_rex_service" = "xyes"; then A_REX_SERVICE_ENABLED_TRUE= A_REX_SERVICE_ENABLED_FALSE='#' else A_REX_SERVICE_ENABLED_TRUE='#' A_REX_SERVICE_ENABLED_FALSE= fi # Internal job plugin # Check whether --enable-internal was given. if test "${enable_internal+set}" = set; then : enableval=$enable_internal; enables_internal=$enableval fi if test "$enables_internal" = "yes"; then if test "x$enables_a_rex_service" != "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: Internal job plugin can't be built without A-Rex - disabling" >&5 $as_echo "$as_me: Internal job plugin can't be built without A-Rex - disabling" >&6;} enables_internal="no" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: Internal plugin enabled: $enables_internal" >&5 $as_echo "$as_me: Internal plugin enabled: $enables_internal" >&6;} if test "x$enables_internal" = "xyes"; then INTERNAL_ENABLED_TRUE= INTERNAL_ENABLED_FALSE='#' else INTERNAL_ENABLED_TRUE='#' INTERNAL_ENABLED_FALSE= fi # LDAP service # Check whether --enable-ldap_service was given. if test "${enable_ldap_service+set}" = set; then : enableval=$enable_ldap_service; enables_ldap_service=$enableval fi { $as_echo "$as_me:${as_lineno-$LINENO}: LDAP Infosystem service enabled: $enables_ldap_service" >&5 $as_echo "$as_me: LDAP Infosystem service enabled: $enables_ldap_service" >&6;} if test "x$enables_ldap_service" = "xyes"; then LDAP_SERVICE_ENABLED_TRUE= LDAP_SERVICE_ENABLED_FALSE='#' else LDAP_SERVICE_ENABLED_TRUE='#' LDAP_SERVICE_ENABLED_FALSE= fi # LDAP monitor # Check whether --enable-monitor was given. if test "${enable_monitor+set}" = set; then : enableval=$enable_monitor; enables_monitor=$enableval fi { $as_echo "$as_me:${as_lineno-$LINENO}: LDAP Monitor enabled: $enables_monitor" >&5 $as_echo "$as_me: LDAP Monitor enabled: $enables_monitor" >&6;} if test "x$enables_monitor" = "xyes"; then MONITOR_ENABLED_TRUE= MONITOR_ENABLED_FALSE='#' else MONITOR_ENABLED_TRUE='#' MONITOR_ENABLED_FALSE= fi # Cache service # Check whether --enable-candypond was given. if test "${enable_candypond+set}" = set; then : enableval=$enable_candypond; enables_candypond=$enableval fi if test "$enables_candypond" = "yes"; then if test ! "x$enables_a_rex_service" = "xyes" ; then enables_candypond="no" { $as_echo "$as_me:${as_lineno-$LINENO}: CandyPond can't be built without A-REX - disabling" >&5 $as_echo "$as_me: CandyPond can't be built without A-REX - disabling" >&6;} fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: CandyPond enabled: $enables_candypond" >&5 $as_echo "$as_me: CandyPond enabled: $enables_candypond" >&6;} if test "x$enables_candypond" = "xyes"; then CANDYPOND_ENABLED_TRUE= CANDYPOND_ENABLED_FALSE='#' else CANDYPOND_ENABLED_TRUE='#' CANDYPOND_ENABLED_FALSE= fi # DataDelivery service # Check whether --enable-datadelivery_service was given. if test "${enable_datadelivery_service+set}" = set; then : enableval=$enable_datadelivery_service; enables_datadelivery_service=$enableval fi { $as_echo "$as_me:${as_lineno-$LINENO}: DataDelivery service enabled: $enables_datadelivery_service" >&5 $as_echo "$as_me: DataDelivery service enabled: $enables_datadelivery_service" >&6;} if test "x$enables_datadelivery_service" = "xyes"; then DATADELIVERY_SERVICE_ENABLED_TRUE= DATADELIVERY_SERVICE_ENABLED_FALSE='#' else DATADELIVERY_SERVICE_ENABLED_TRUE='#' DATADELIVERY_SERVICE_ENABLED_FALSE= fi # Check for explicitly and implicitely disabled clients # Check whether --enable-compute_client was given. if test "${enable_compute_client+set}" = set; then : enableval=$enable_compute_client; enables_compute_client=$enableval fi { $as_echo "$as_me:${as_lineno-$LINENO}: Compute client tools enabled: $enables_compute_client" >&5 $as_echo "$as_me: Compute client tools enabled: $enables_compute_client" >&6;} if test "x$enables_compute_client" = "xyes"; then COMPUTE_CLIENT_ENABLED_TRUE= COMPUTE_CLIENT_ENABLED_FALSE='#' else COMPUTE_CLIENT_ENABLED_TRUE='#' COMPUTE_CLIENT_ENABLED_FALSE= fi # Check whether --enable-credentials_client was given. if test "${enable_credentials_client+set}" = set; then : enableval=$enable_credentials_client; enables_credentials_client=$enableval fi { $as_echo "$as_me:${as_lineno-$LINENO}: Credentials client tools enabled: $enables_credentials_client" >&5 $as_echo "$as_me: Credentials client tools enabled: $enables_credentials_client" >&6;} if test "x$enables_credentials_client" = "xyes"; then CREDENTIALS_CLIENT_ENABLED_TRUE= CREDENTIALS_CLIENT_ENABLED_FALSE='#' else CREDENTIALS_CLIENT_ENABLED_TRUE='#' CREDENTIALS_CLIENT_ENABLED_FALSE= fi # Check whether --enable-data_client was given. if test "${enable_data_client+set}" = set; then : enableval=$enable_data_client; enables_data_client=$enableval fi { $as_echo "$as_me:${as_lineno-$LINENO}: Data client tools enabled: $enables_data_client" >&5 $as_echo "$as_me: Data client tools enabled: $enables_data_client" >&6;} if test "x$enables_data_client" = "xyes"; then DATA_CLIENT_ENABLED_TRUE= DATA_CLIENT_ENABLED_FALSE='#' else DATA_CLIENT_ENABLED_TRUE='#' DATA_CLIENT_ENABLED_FALSE= fi # Check whether --enable-arcrest_client was given. if test "${enable_arcrest_client+set}" = set; then : enableval=$enable_arcrest_client; enables_arcrest_client=$enableval fi if test "x$enables_arcrest_client" = "xyes" ; then $PYTHON -m pip >/dev/null if test "$?" != '0' ; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: PIP not avilable -- disabling ARC REST pythin module" >&5 $as_echo "$as_me: WARNING: PIP not avilable -- disabling ARC REST pythin module" >&2;} enables_arcrest_client=no fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: ARC REST python module enabled: $enables_arcrest_client" >&5 $as_echo "$as_me: ARC REST python module enabled: $enables_arcrest_client" >&6;} if test "x$enables_arcrest_client" = "xyes"; then ARCREST_ENABLED_TRUE= ARCREST_ENABLED_FALSE='#' else ARCREST_ENABLED_TRUE='#' ARCREST_ENABLED_FALSE= fi # Check for consistency among disabled components if test "$enables_hed" = "no"; then if test "$enables_a_rex_service" = "yes" -o \ "$enables_candypond" = "yes" -o \ "$enables_datadelivery_service" = "yes" -o \ "$enables_compute_client" = "yes" -o \ "$enables_credentials_client" = "yes" -o \ "$enables_data_client" = "yes" -o \ "$enables_swig_python" = "yes" ; then as_fn_error $? "HED is needed for building any of the client or service tools. Please enable HED by using --enable-hed." "$LINENO" 5 fi fi if test "x$enables_hed" = "xyes"; then HED_ENABLED_TRUE= HED_ENABLED_FALSE='#' else HED_ENABLED_TRUE='#' HED_ENABLED_FALSE= fi # A-Rex specific hack for backend scripts tmp_dir=/tmp gnu_time=/usr/bin/time case "${host}" in *darwin*) # hostname -f does not work on OS X nodename="hostname" ;; *) nodename="/bin/hostname -f" ;; esac arc_location=$prefix # Shell for the job control scripts posix_shell='/bin/sh' DATE=`date +%Y-%m-%d ${SOURCE_DATE_EPOCH:+-u -d @$SOURCE_DATE_EPOCH}` #DATER=`date -R` DATER=`date +'%a, %d %b %Y %H:%M:%S %z'` SPECDATE=`LANG=C date +"%a %b %d %Y"` ac_config_files="$ac_config_files Makefile include/arc/ArcVersion.h src/Makefile src/external/Makefile src/external/cJSON/Makefile src/hed/Makefile src/hed/libs/compute/Makefile src/hed/libs/compute/test/Makefile src/hed/libs/compute/examples/Makefile src/hed/libs/common/ArcVersion.h src/hed/libs/common/Makefile src/hed/libs/common/test/Makefile src/hed/libs/communication/Makefile src/hed/libs/credential/Makefile src/hed/libs/credential/test/Makefile src/hed/libs/credentialmod/Makefile src/hed/libs/crypto/Makefile src/hed/libs/cryptomod/Makefile src/hed/libs/data/Makefile src/hed/libs/data/cache-clean.1 src/hed/libs/data/cache-list.1 src/hed/libs/data/test/Makefile src/hed/libs/data/examples/Makefile src/hed/libs/Makefile src/hed/libs/loader/Makefile src/hed/libs/loader/schema/Makefile src/hed/libs/loader/test/Makefile src/hed/libs/message/Makefile src/hed/libs/message/test/Makefile src/hed/libs/security/Makefile src/hed/libs/security/ArcPDP/Makefile src/hed/libs/security/ArcPDP/attr/Makefile src/hed/libs/security/ArcPDP/policy/Makefile src/hed/libs/security/ArcPDP/alg/Makefile src/hed/libs/security/ArcPDP/fn/Makefile src/hed/libs/credentialstore/Makefile src/hed/libs/ws-addressing/Makefile src/hed/libs/ws-security/Makefile src/hed/libs/ws-security/test/Makefile src/hed/libs/infosys/Makefile src/hed/libs/infosys/schema/Makefile src/hed/libs/infosys/test/Makefile src/hed/libs/delegation/Makefile src/hed/libs/delegation/test/Makefile src/hed/libs/xmlsec/Makefile src/hed/libs/globusutils/Makefile src/hed/libs/otokens/Makefile src/hed/daemon/Makefile src/hed/daemon/scripts/Makefile src/hed/daemon/schema/Makefile src/hed/daemon/unix/Makefile src/hed/mcc/Makefile src/hed/mcc/soap/Makefile src/hed/mcc/tcp/Makefile src/hed/mcc/tcp/schema/Makefile src/hed/mcc/http/Makefile src/hed/mcc/http/schema/Makefile src/hed/mcc/tls/Makefile src/hed/mcc/tls/schema/Makefile src/hed/mcc/tls/test/Makefile src/hed/mcc/msgvalidator/Makefile src/hed/mcc/msgvalidator/schema/Makefile src/hed/acc/Makefile src/hed/acc/ARCREST/Makefile src/hed/acc/Broker/Makefile src/hed/acc/Broker/test/Makefile src/hed/acc/PythonBroker/Makefile src/hed/acc/JobDescriptionParser/Makefile src/hed/acc/JobDescriptionParser/test/Makefile src/hed/acc/ARCHERY/Makefile src/hed/acc/TEST/Makefile src/hed/dmc/Makefile src/hed/dmc/file/Makefile src/hed/dmc/gridftp/Makefile src/hed/dmc/http/Makefile src/hed/dmc/srm/Makefile src/hed/dmc/srm/srmclient/Makefile src/hed/dmc/gfal/Makefile src/hed/dmc/xrootd/Makefile src/hed/dmc/mock/Makefile src/hed/dmc/rucio/Makefile src/hed/dmc/s3/Makefile src/hed/profiles/general/general.xml src/hed/shc/Makefile src/hed/shc/arcpdp/Makefile src/hed/shc/arcpdp/schema/Makefile src/hed/shc/xacmlpdp/Makefile src/hed/shc/xacmlpdp/schema/Makefile src/hed/shc/delegationpdp/Makefile src/hed/shc/delegationpdp/schema/Makefile src/hed/shc/gaclpdp/Makefile src/hed/shc/pdpserviceinvoker/Makefile src/hed/shc/pdpserviceinvoker/schema/Makefile src/hed/shc/allowpdp/Makefile src/hed/shc/denypdp/Makefile src/hed/shc/simplelistpdp/Makefile src/hed/shc/simplelistpdp/schema/Makefile src/hed/shc/arcauthzsh/Makefile src/hed/shc/arcauthzsh/schema/Makefile src/hed/shc/usernametokensh/Makefile src/hed/shc/usernametokensh/schema/Makefile src/hed/shc/x509tokensh/Makefile src/hed/shc/x509tokensh/schema/Makefile src/hed/shc/samltokensh/Makefile src/hed/shc/samltokensh/schema/Makefile src/hed/shc/saml2sso_assertionconsumersh/Makefile src/hed/shc/delegationsh/Makefile src/hed/shc/delegationsh/schema/Makefile src/hed/shc/legacy/Makefile src/hed/shc/legacy/test/Makefile src/hed/shc/legacy/schema/Makefile src/hed/shc/otokens/Makefile src/hed/identitymap/Makefile src/hed/identitymap/schema/Makefile src/libs/Makefile src/libs/data-staging/Makefile src/libs/data-staging/test/Makefile src/libs/data-staging/examples/Makefile src/services/Makefile src/services/a-rex/Makefile src/services/a-rex/arc-arex src/services/a-rex/arc-arex.service src/services/a-rex/arc-arex-start src/services/a-rex/arc-arex-ws src/services/a-rex/arc-arex-ws.service src/services/a-rex/arc-arex-ws-start src/services/a-rex/a-rex-backtrace-collect src/services/a-rex/a-rex-backtrace-collect.8 src/services/a-rex/perferator src/services/a-rex/update-controldir src/services/a-rex/grid-manager/arc-blahp-logger.8 src/services/a-rex/grid-manager/gm-jobs.8 src/services/a-rex/rest/Makefile src/services/a-rex/rest/test/Makefile src/services/a-rex/delegation/Makefile src/services/a-rex/grid-manager/Makefile src/services/a-rex/grid-manager/accounting/Makefile src/services/a-rex/grid-manager/conf/Makefile src/services/a-rex/grid-manager/files/Makefile src/services/a-rex/grid-manager/jobs/Makefile src/services/a-rex/grid-manager/log/Makefile src/services/a-rex/grid-manager/mail/Makefile src/services/a-rex/grid-manager/misc/Makefile src/services/a-rex/grid-manager/run/Makefile src/services/a-rex/internaljobplugin/Makefile src/services/a-rex/infoproviders/Makefile src/services/a-rex/infoproviders/CEinfo.pl src/services/a-rex/infoproviders/ConfigCentral.pm src/services/a-rex/infoproviders/PerfData.pl src/services/a-rex/infoproviders/test/Makefile src/services/a-rex/lrms/Makefile src/services/a-rex/lrms/test/Makefile src/services/a-rex/lrms/lrms_common.sh src/services/a-rex/lrms/condor/Makefile src/services/a-rex/lrms/condor/scan-condor-job src/services/a-rex/lrms/condor/cancel-condor-job src/services/a-rex/lrms/condor/submit-condor-job src/services/a-rex/lrms/fork/Makefile src/services/a-rex/lrms/fork/scan-fork-job src/services/a-rex/lrms/fork/submit-fork-job src/services/a-rex/lrms/fork/cancel-fork-job src/services/a-rex/lrms/ll/Makefile src/services/a-rex/lrms/ll/submit-ll-job src/services/a-rex/lrms/ll/cancel-ll-job src/services/a-rex/lrms/ll/scan-ll-job src/services/a-rex/lrms/lsf/Makefile src/services/a-rex/lrms/lsf/submit-lsf-job src/services/a-rex/lrms/lsf/cancel-lsf-job src/services/a-rex/lrms/lsf/scan-lsf-job src/services/a-rex/lrms/pbs/Makefile src/services/a-rex/lrms/pbs/submit-pbs-job src/services/a-rex/lrms/pbs/cancel-pbs-job src/services/a-rex/lrms/pbs/scan-pbs-job src/services/a-rex/lrms/pbspro/Makefile src/services/a-rex/lrms/pbspro/submit-pbspro-job src/services/a-rex/lrms/pbspro/cancel-pbspro-job src/services/a-rex/lrms/pbspro/scan-pbspro-job src/services/a-rex/lrms/sge/Makefile src/services/a-rex/lrms/sge/submit-sge-job src/services/a-rex/lrms/sge/scan-sge-job src/services/a-rex/lrms/sge/cancel-sge-job src/services/a-rex/lrms/slurm/Makefile src/services/a-rex/lrms/slurm/submit-SLURM-job src/services/a-rex/lrms/slurm/scan-SLURM-job src/services/a-rex/lrms/slurm/cancel-SLURM-job src/services/a-rex/lrms/slurm/test/Makefile src/services/a-rex/lrms/slurm/test/scan/Makefile src/services/a-rex/lrms/slurm/test/submit/Makefile src/services/a-rex/lrms/boinc/Makefile src/services/a-rex/lrms/boinc/submit-boinc-job src/services/a-rex/lrms/boinc/scan-boinc-job src/services/a-rex/lrms/boinc/cancel-boinc-job src/services/a-rex/rte/Makefile src/services/a-rex/rte/ENV/PROXY src/services/a-rex/rte/ENV/CANDYPOND src/services/a-rex/schema/Makefile src/services/candypond/Makefile src/services/data-staging/Makefile src/services/data-staging/arc-datadelivery-service src/services/data-staging/arc-datadelivery-service.service src/services/data-staging/arc-datadelivery-service-start src/services/ldap-infosys/Makefile src/services/ldap-infosys/create-bdii-config src/services/ldap-infosys/create-slapd-config src/services/ldap-infosys/arc-infosys-ldap src/services/ldap-infosys/arc-infosys-ldap.service src/services/ldap-infosys/arc-infosys-ldap-slapd.service src/services/monitor/Makefile src/services/monitor/monitor src/services/monitor/README src/services/monitor/man/Makefile src/services/monitor/man/monitor.7 src/services/monitor/includes/Makefile src/services/monitor/mon-icons/Makefile src/services/monitor/lang/Makefile src/services/examples/Makefile src/services/examples/echo_python/Makefile src/services/wrappers/Makefile src/services/wrappers/python/Makefile src/services/wrappers/python/schema/Makefile src/clients/Makefile src/clients/data/Makefile src/clients/credentials/Makefile src/clients/compute/Makefile src/clients/pyarcrest/Makefile src/tests/Makefile src/tests/echo/Makefile src/tests/echo/perftest.1 src/tests/echo/echo_service.xml.example src/tests/echo/schema/Makefile src/tests/policy-delegation/Makefile src/tests/delegation/Makefile src/tests/translator/Makefile src/tests/xpath/Makefile src/tests/arcpolicy/Makefile src/tests/perf/Makefile src/tests/perf/arcperftest.1 src/tests/client/Makefile src/tests/lrms/Makefile src/utils/arc-exporter/Makefile src/utils/arc-exporter/arc-exporter src/utils/archery/Makefile src/utils/archery/archery-manage src/utils/python/Makefile src/utils/python/arccandypond src/utils/python/arcctl src/utils/python/arcctl.1 src/utils/python/jura-ng src/utils/python/arc/Makefile src/utils/python/arc/gen_paths_dist.sh src/utils/python/arc/utils/Makefile src/utils/python/arc/control/Makefile src/utils/hed/wsdl2hed.1 src/utils/hed/arcplugin.1 src/utils/hed/Makefile src/utils/Makefile src/wn/Makefile src/doc/Makefile src/doc/arc.conf.5 swig/Makefile python/Makefile python/Doxyfile.api python/python/Makefile python/python/arc/Makefile python/altpython/Makefile python/altpython/arc/Makefile python/test/Makefile python/test/python/Makefile python/test/altpython/Makefile python/examples/Makefile po/Makefile.in include/Makefile debian/Makefile debian/changelog.deb nordugrid-arc.spec src/hed/daemon/arched.8 src/hed/daemon/scripts/arched src/hed/daemon/scripts/arched.service src/hed/daemon/scripts/arched-start src/doxygen/Makefile" ac_config_files="$ac_config_files src/utils/python/arcconfig-parser" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure # tests run on this system so they can be shared between configure # scripts and configure runs, see configure's option --config-cache. # It is not useful on other systems. If it contains results you don't # want to keep, you may remove or edit it. # # config.status only pays attention to the cache file if you give it # the --recheck option to rerun configure. # # `ac_cv_env_foo' variables (set or unset) will be overridden when # loading this file, other *unset* `ac_cv_foo' will be assigned the # following values. _ACEOF # The following way of writing the cache mishandles newlines in values, # but we know of no workaround that is simple, portable, and efficient. # So, we kill variables containing newlines. # Ultrix sh set writes to stderr and can't be redirected directly, # and sets the high bit in the cache file unless we assign to the vars. ( for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space=' '; set) 2>&1` in #( *${as_nl}ac_space=\ *) # `set' does not quote correctly, so add quotes: double-quote # substitution turns \\\\ into \\, and sed turns \\ into \. sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; #( *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) | sed ' /^ac_cv_env_/b end t clear :clear s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then if test "x$cache_file" != "x/dev/null"; then { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 $as_echo "$as_me: updating cache $cache_file" >&6;} if test ! -f "$cache_file" || test -h "$cache_file"; then cat confcache >"$cache_file" else case $cache_file in #( */* | ?:*) mv -f confcache "$cache_file"$$ && mv -f "$cache_file"$$ "$cache_file" ;; #( *) mv -f confcache "$cache_file" ;; esac fi fi else { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 $as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi rm -f confcache test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' DEFS=-DHAVE_CONFIG_H ac_libobjs= ac_ltlibobjs= U= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' ac_i=`$as_echo "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' done LIBOBJS=$ac_libobjs LTLIBOBJS=$ac_ltlibobjs { $as_echo "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5 $as_echo_n "checking that generated files are newer than configure... " >&6; } if test -n "$am_sleep_pid"; then # Hide warnings about reused PIDs. wait $am_sleep_pid 2>/dev/null fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: done" >&5 $as_echo "done" >&6; } if test -n "$EXEEXT"; then am__EXEEXT_TRUE= am__EXEEXT_FALSE='#' else am__EXEEXT_TRUE='#' am__EXEEXT_FALSE= fi if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then as_fn_error $? "conditional \"AMDEP\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then as_fn_error $? "conditional \"am__fastdepCXX\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then as_fn_error $? "conditional \"am__fastdepCC\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${SYSTEMD_UNITS_ENABLED_TRUE}" && test -z "${SYSTEMD_UNITS_ENABLED_FALSE}"; then as_fn_error $? "conditional \"SYSTEMD_UNITS_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${SYSV_SCRIPTS_ENABLED_TRUE}" && test -z "${SYSV_SCRIPTS_ENABLED_FALSE}"; then as_fn_error $? "conditional \"SYSV_SCRIPTS_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${PEDANTIC_COMPILE_TRUE}" && test -z "${PEDANTIC_COMPILE_FALSE}"; then as_fn_error $? "conditional \"PEDANTIC_COMPILE\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${SWIG_ENABLED_TRUE}" && test -z "${SWIG_ENABLED_FALSE}"; then as_fn_error $? "conditional \"SWIG_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${PYTHON_ENABLED_TRUE}" && test -z "${PYTHON_ENABLED_FALSE}"; then as_fn_error $? "conditional \"PYTHON_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${PYTHON3_TRUE}" && test -z "${PYTHON3_FALSE}"; then as_fn_error $? "conditional \"PYTHON3\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${PYTHON_SWIG_ENABLED_TRUE}" && test -z "${PYTHON_SWIG_ENABLED_FALSE}"; then as_fn_error $? "conditional \"PYTHON_SWIG_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${PYTHON_SERVICE_TRUE}" && test -z "${PYTHON_SERVICE_FALSE}"; then as_fn_error $? "conditional \"PYTHON_SERVICE\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${ALTPYTHON_ENABLED_TRUE}" && test -z "${ALTPYTHON_ENABLED_FALSE}"; then as_fn_error $? "conditional \"ALTPYTHON_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${ALTPYTHON3_TRUE}" && test -z "${ALTPYTHON3_FALSE}"; then as_fn_error $? "conditional \"ALTPYTHON3\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${PYLINT_ENABLED_TRUE}" && test -z "${PYLINT_ENABLED_FALSE}"; then as_fn_error $? "conditional \"PYLINT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${NSS_ENABLED_TRUE}" && test -z "${NSS_ENABLED_FALSE}"; then as_fn_error $? "conditional \"NSS_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${SQLITE_ENABLED_TRUE}" && test -z "${SQLITE_ENABLED_FALSE}"; then as_fn_error $? "conditional \"SQLITE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${LDNS_ENABLED_TRUE}" && test -z "${LDNS_ENABLED_FALSE}"; then as_fn_error $? "conditional \"LDNS_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${MACOSX_TRUE}" && test -z "${MACOSX_FALSE}"; then as_fn_error $? "conditional \"MACOSX\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${SQLITEJSTORE_ENABLED_TRUE}" && test -z "${SQLITEJSTORE_ENABLED_FALSE}"; then as_fn_error $? "conditional \"SQLITEJSTORE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${GLOBUSUTILS_ENABLED_TRUE}" && test -z "${GLOBUSUTILS_ENABLED_FALSE}"; then as_fn_error $? "conditional \"GLOBUSUTILS_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${GRIDFTP_ENABLED_TRUE}" && test -z "${GRIDFTP_ENABLED_FALSE}"; then as_fn_error $? "conditional \"GRIDFTP_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${MOCK_DMC_ENABLED_TRUE}" && test -z "${MOCK_DMC_ENABLED_FALSE}"; then as_fn_error $? "conditional \"MOCK_DMC_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${GFAL_ENABLED_TRUE}" && test -z "${GFAL_ENABLED_FALSE}"; then as_fn_error $? "conditional \"GFAL_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${S3_DMC_ENABLED_TRUE}" && test -z "${S3_DMC_ENABLED_FALSE}"; then as_fn_error $? "conditional \"S3_DMC_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${XROOTD_ENABLED_TRUE}" && test -z "${XROOTD_ENABLED_FALSE}"; then as_fn_error $? "conditional \"XROOTD_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${XMLSEC_ENABLED_TRUE}" && test -z "${XMLSEC_ENABLED_FALSE}"; then as_fn_error $? "conditional \"XMLSEC_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${CPPUNIT_ENABLED_TRUE}" && test -z "${CPPUNIT_ENABLED_FALSE}"; then as_fn_error $? "conditional \"CPPUNIT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${SRM_DMC_ENABLED_TRUE}" && test -z "${SRM_DMC_ENABLED_FALSE}"; then as_fn_error $? "conditional \"SRM_DMC_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${DOC_ENABLED_TRUE}" && test -z "${DOC_ENABLED_FALSE}"; then as_fn_error $? "conditional \"DOC_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${PYDOXYGEN_TRUE}" && test -z "${PYDOXYGEN_FALSE}"; then as_fn_error $? "conditional \"PYDOXYGEN\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${ALTPYDOXYGEN_TRUE}" && test -z "${ALTPYDOXYGEN_FALSE}"; then as_fn_error $? "conditional \"ALTPYDOXYGEN\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${A_REX_SERVICE_ENABLED_TRUE}" && test -z "${A_REX_SERVICE_ENABLED_FALSE}"; then as_fn_error $? "conditional \"A_REX_SERVICE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${INTERNAL_ENABLED_TRUE}" && test -z "${INTERNAL_ENABLED_FALSE}"; then as_fn_error $? "conditional \"INTERNAL_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${LDAP_SERVICE_ENABLED_TRUE}" && test -z "${LDAP_SERVICE_ENABLED_FALSE}"; then as_fn_error $? "conditional \"LDAP_SERVICE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${MONITOR_ENABLED_TRUE}" && test -z "${MONITOR_ENABLED_FALSE}"; then as_fn_error $? "conditional \"MONITOR_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${CANDYPOND_ENABLED_TRUE}" && test -z "${CANDYPOND_ENABLED_FALSE}"; then as_fn_error $? "conditional \"CANDYPOND_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${DATADELIVERY_SERVICE_ENABLED_TRUE}" && test -z "${DATADELIVERY_SERVICE_ENABLED_FALSE}"; then as_fn_error $? "conditional \"DATADELIVERY_SERVICE_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${COMPUTE_CLIENT_ENABLED_TRUE}" && test -z "${COMPUTE_CLIENT_ENABLED_FALSE}"; then as_fn_error $? "conditional \"COMPUTE_CLIENT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${CREDENTIALS_CLIENT_ENABLED_TRUE}" && test -z "${CREDENTIALS_CLIENT_ENABLED_FALSE}"; then as_fn_error $? "conditional \"CREDENTIALS_CLIENT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${DATA_CLIENT_ENABLED_TRUE}" && test -z "${DATA_CLIENT_ENABLED_FALSE}"; then as_fn_error $? "conditional \"DATA_CLIENT_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${ARCREST_ENABLED_TRUE}" && test -z "${ARCREST_ENABLED_FALSE}"; then as_fn_error $? "conditional \"ARCREST_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${HED_ENABLED_TRUE}" && test -z "${HED_ENABLED_FALSE}"; then as_fn_error $? "conditional \"HED_ENABLED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi : "${CONFIG_STATUS=./config.status}" ac_write_fail=0 ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 $as_echo "$as_me: creating $CONFIG_STATUS" >&6;} as_write_fail=0 cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=\${CONFIG_SHELL-$SHELL} export SHELL _ASEOF cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 ## ----------------------------------- ## ## Main body of $CONFIG_STATUS script. ## ## ----------------------------------- ## _ASEOF test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Save the log message, to keep $0 and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by nordugrid-arc $as_me 7.1.1, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ on `(hostname || uname -n) 2>/dev/null | sed 1q` " _ACEOF case $ac_config_files in *" "*) set x $ac_config_files; shift; ac_config_files=$*;; esac case $ac_config_headers in *" "*) set x $ac_config_headers; shift; ac_config_headers=$*;; esac cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # Files that config.status was made for. config_files="$ac_config_files" config_headers="$ac_config_headers" config_commands="$ac_config_commands" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ac_cs_usage="\ \`$as_me' instantiates files and other configuration actions from templates according to the current configuration. Unless the files and actions are specified as TAGs, all are instantiated by default. Usage: $0 [OPTION]... [TAG]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit --config print configuration, then exit -q, --quiet, --silent do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE --header=FILE[:TEMPLATE] instantiate the configuration header FILE Configuration files: $config_files Configuration headers: $config_headers Configuration commands: $config_commands Report bugs to ." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ nordugrid-arc config.status 7.1.1 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" Copyright (C) 2012 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." ac_pwd='$ac_pwd' srcdir='$srcdir' INSTALL='$INSTALL' MKDIR_P='$MKDIR_P' AWK='$AWK' test -n "\$AWK" || AWK=awk _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # The default lists apply if the user does not specify any file. ac_need_defaults=: while test $# != 0 do case $1 in --*=?*) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` ac_shift=: ;; --*=) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg= ac_shift=: ;; *) ac_option=$1 ac_optarg=$2 ac_shift=shift ;; esac case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) $as_echo "$ac_cs_version"; exit ;; --config | --confi | --conf | --con | --co | --c ) $as_echo "$ac_cs_config"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; '') as_fn_error $? "missing file argument" ;; esac as_fn_append CONFIG_FILES " '$ac_optarg'" ac_need_defaults=false;; --header | --heade | --head | --hea ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac as_fn_append CONFIG_HEADERS " '$ac_optarg'" ac_need_defaults=false;; --he | --h) # Conflict between --help and --header as_fn_error $? "ambiguous option: \`$1' Try \`$0 --help' for more information.";; --help | --hel | -h ) $as_echo "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) as_fn_error $? "unrecognized option: \`$1' Try \`$0 --help' for more information." ;; *) as_fn_append ac_config_targets " $1" ac_need_defaults=false ;; esac shift done ac_configure_extra_args= if $ac_cs_silent; then exec 6>/dev/null ac_configure_extra_args="$ac_configure_extra_args --silent" fi _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 if \$ac_cs_recheck; then set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion shift \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 CONFIG_SHELL='$SHELL' export CONFIG_SHELL exec "\$@" fi _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 exec 5>>config.log { echo sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ## Running $as_me. ## _ASBOX $as_echo "$ac_log" } >&5 _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # # INIT-COMMANDS # AMDEP_TRUE="$AMDEP_TRUE" MAKE="${MAKE-make}" # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH sed_quote_subst='$sed_quote_subst' double_quote_subst='$double_quote_subst' delay_variable_subst='$delay_variable_subst' enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`' macro_version='`$ECHO "$macro_version" | $SED "$delay_single_quote_subst"`' macro_revision='`$ECHO "$macro_revision" | $SED "$delay_single_quote_subst"`' enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`' pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`' enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`' shared_archive_member_spec='`$ECHO "$shared_archive_member_spec" | $SED "$delay_single_quote_subst"`' SHELL='`$ECHO "$SHELL" | $SED "$delay_single_quote_subst"`' ECHO='`$ECHO "$ECHO" | $SED "$delay_single_quote_subst"`' PATH_SEPARATOR='`$ECHO "$PATH_SEPARATOR" | $SED "$delay_single_quote_subst"`' host_alias='`$ECHO "$host_alias" | $SED "$delay_single_quote_subst"`' host='`$ECHO "$host" | $SED "$delay_single_quote_subst"`' host_os='`$ECHO "$host_os" | $SED "$delay_single_quote_subst"`' build_alias='`$ECHO "$build_alias" | $SED "$delay_single_quote_subst"`' build='`$ECHO "$build" | $SED "$delay_single_quote_subst"`' build_os='`$ECHO "$build_os" | $SED "$delay_single_quote_subst"`' SED='`$ECHO "$SED" | $SED "$delay_single_quote_subst"`' Xsed='`$ECHO "$Xsed" | $SED "$delay_single_quote_subst"`' GREP='`$ECHO "$GREP" | $SED "$delay_single_quote_subst"`' EGREP='`$ECHO "$EGREP" | $SED "$delay_single_quote_subst"`' FGREP='`$ECHO "$FGREP" | $SED "$delay_single_quote_subst"`' LD='`$ECHO "$LD" | $SED "$delay_single_quote_subst"`' NM='`$ECHO "$NM" | $SED "$delay_single_quote_subst"`' LN_S='`$ECHO "$LN_S" | $SED "$delay_single_quote_subst"`' max_cmd_len='`$ECHO "$max_cmd_len" | $SED "$delay_single_quote_subst"`' ac_objext='`$ECHO "$ac_objext" | $SED "$delay_single_quote_subst"`' exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' old_postuninstall_cmds='`$ECHO "$old_postuninstall_cmds" | $SED "$delay_single_quote_subst"`' old_archive_cmds='`$ECHO "$old_archive_cmds" | $SED "$delay_single_quote_subst"`' lock_old_archive_extraction='`$ECHO "$lock_old_archive_extraction" | $SED "$delay_single_quote_subst"`' CC='`$ECHO "$CC" | $SED "$delay_single_quote_subst"`' CFLAGS='`$ECHO "$CFLAGS" | $SED "$delay_single_quote_subst"`' compiler='`$ECHO "$compiler" | $SED "$delay_single_quote_subst"`' GCC='`$ECHO "$GCC" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_import='`$ECHO "$lt_cv_sys_global_symbol_to_import" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' lt_cv_nm_interface='`$ECHO "$lt_cv_nm_interface" | $SED "$delay_single_quote_subst"`' nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' lt_cv_truncate_bin='`$ECHO "$lt_cv_truncate_bin" | $SED "$delay_single_quote_subst"`' objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' OTOOL='`$ECHO "$OTOOL" | $SED "$delay_single_quote_subst"`' OTOOL64='`$ECHO "$OTOOL64" | $SED "$delay_single_quote_subst"`' libext='`$ECHO "$libext" | $SED "$delay_single_quote_subst"`' shrext_cmds='`$ECHO "$shrext_cmds" | $SED "$delay_single_quote_subst"`' extract_expsyms_cmds='`$ECHO "$extract_expsyms_cmds" | $SED "$delay_single_quote_subst"`' archive_cmds_need_lc='`$ECHO "$archive_cmds_need_lc" | $SED "$delay_single_quote_subst"`' enable_shared_with_static_runtimes='`$ECHO "$enable_shared_with_static_runtimes" | $SED "$delay_single_quote_subst"`' export_dynamic_flag_spec='`$ECHO "$export_dynamic_flag_spec" | $SED "$delay_single_quote_subst"`' whole_archive_flag_spec='`$ECHO "$whole_archive_flag_spec" | $SED "$delay_single_quote_subst"`' compiler_needs_object='`$ECHO "$compiler_needs_object" | $SED "$delay_single_quote_subst"`' old_archive_from_new_cmds='`$ECHO "$old_archive_from_new_cmds" | $SED "$delay_single_quote_subst"`' old_archive_from_expsyms_cmds='`$ECHO "$old_archive_from_expsyms_cmds" | $SED "$delay_single_quote_subst"`' archive_cmds='`$ECHO "$archive_cmds" | $SED "$delay_single_quote_subst"`' archive_expsym_cmds='`$ECHO "$archive_expsym_cmds" | $SED "$delay_single_quote_subst"`' module_cmds='`$ECHO "$module_cmds" | $SED "$delay_single_quote_subst"`' module_expsym_cmds='`$ECHO "$module_expsym_cmds" | $SED "$delay_single_quote_subst"`' with_gnu_ld='`$ECHO "$with_gnu_ld" | $SED "$delay_single_quote_subst"`' allow_undefined_flag='`$ECHO "$allow_undefined_flag" | $SED "$delay_single_quote_subst"`' no_undefined_flag='`$ECHO "$no_undefined_flag" | $SED "$delay_single_quote_subst"`' hardcode_libdir_flag_spec='`$ECHO "$hardcode_libdir_flag_spec" | $SED "$delay_single_quote_subst"`' hardcode_libdir_separator='`$ECHO "$hardcode_libdir_separator" | $SED "$delay_single_quote_subst"`' hardcode_direct='`$ECHO "$hardcode_direct" | $SED "$delay_single_quote_subst"`' hardcode_direct_absolute='`$ECHO "$hardcode_direct_absolute" | $SED "$delay_single_quote_subst"`' hardcode_minus_L='`$ECHO "$hardcode_minus_L" | $SED "$delay_single_quote_subst"`' hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_quote_subst"`' hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' need_version='`$ECHO "$need_version" | $SED "$delay_single_quote_subst"`' version_type='`$ECHO "$version_type" | $SED "$delay_single_quote_subst"`' runpath_var='`$ECHO "$runpath_var" | $SED "$delay_single_quote_subst"`' shlibpath_var='`$ECHO "$shlibpath_var" | $SED "$delay_single_quote_subst"`' shlibpath_overrides_runpath='`$ECHO "$shlibpath_overrides_runpath" | $SED "$delay_single_quote_subst"`' libname_spec='`$ECHO "$libname_spec" | $SED "$delay_single_quote_subst"`' library_names_spec='`$ECHO "$library_names_spec" | $SED "$delay_single_quote_subst"`' soname_spec='`$ECHO "$soname_spec" | $SED "$delay_single_quote_subst"`' install_override_mode='`$ECHO "$install_override_mode" | $SED "$delay_single_quote_subst"`' postinstall_cmds='`$ECHO "$postinstall_cmds" | $SED "$delay_single_quote_subst"`' postuninstall_cmds='`$ECHO "$postuninstall_cmds" | $SED "$delay_single_quote_subst"`' finish_cmds='`$ECHO "$finish_cmds" | $SED "$delay_single_quote_subst"`' finish_eval='`$ECHO "$finish_eval" | $SED "$delay_single_quote_subst"`' hardcode_into_libs='`$ECHO "$hardcode_into_libs" | $SED "$delay_single_quote_subst"`' sys_lib_search_path_spec='`$ECHO "$sys_lib_search_path_spec" | $SED "$delay_single_quote_subst"`' configure_time_dlsearch_path='`$ECHO "$configure_time_dlsearch_path" | $SED "$delay_single_quote_subst"`' configure_time_lt_sys_library_path='`$ECHO "$configure_time_lt_sys_library_path" | $SED "$delay_single_quote_subst"`' hardcode_action='`$ECHO "$hardcode_action" | $SED "$delay_single_quote_subst"`' enable_dlopen='`$ECHO "$enable_dlopen" | $SED "$delay_single_quote_subst"`' enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_subst"`' enable_dlopen_self_static='`$ECHO "$enable_dlopen_self_static" | $SED "$delay_single_quote_subst"`' old_striplib='`$ECHO "$old_striplib" | $SED "$delay_single_quote_subst"`' striplib='`$ECHO "$striplib" | $SED "$delay_single_quote_subst"`' compiler_lib_search_dirs='`$ECHO "$compiler_lib_search_dirs" | $SED "$delay_single_quote_subst"`' predep_objects='`$ECHO "$predep_objects" | $SED "$delay_single_quote_subst"`' postdep_objects='`$ECHO "$postdep_objects" | $SED "$delay_single_quote_subst"`' predeps='`$ECHO "$predeps" | $SED "$delay_single_quote_subst"`' postdeps='`$ECHO "$postdeps" | $SED "$delay_single_quote_subst"`' compiler_lib_search_path='`$ECHO "$compiler_lib_search_path" | $SED "$delay_single_quote_subst"`' LD_CXX='`$ECHO "$LD_CXX" | $SED "$delay_single_quote_subst"`' reload_flag_CXX='`$ECHO "$reload_flag_CXX" | $SED "$delay_single_quote_subst"`' reload_cmds_CXX='`$ECHO "$reload_cmds_CXX" | $SED "$delay_single_quote_subst"`' old_archive_cmds_CXX='`$ECHO "$old_archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' compiler_CXX='`$ECHO "$compiler_CXX" | $SED "$delay_single_quote_subst"`' GCC_CXX='`$ECHO "$GCC_CXX" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_no_builtin_flag_CXX='`$ECHO "$lt_prog_compiler_no_builtin_flag_CXX" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_pic_CXX='`$ECHO "$lt_prog_compiler_pic_CXX" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_wl_CXX='`$ECHO "$lt_prog_compiler_wl_CXX" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_static_CXX='`$ECHO "$lt_prog_compiler_static_CXX" | $SED "$delay_single_quote_subst"`' lt_cv_prog_compiler_c_o_CXX='`$ECHO "$lt_cv_prog_compiler_c_o_CXX" | $SED "$delay_single_quote_subst"`' archive_cmds_need_lc_CXX='`$ECHO "$archive_cmds_need_lc_CXX" | $SED "$delay_single_quote_subst"`' enable_shared_with_static_runtimes_CXX='`$ECHO "$enable_shared_with_static_runtimes_CXX" | $SED "$delay_single_quote_subst"`' export_dynamic_flag_spec_CXX='`$ECHO "$export_dynamic_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' whole_archive_flag_spec_CXX='`$ECHO "$whole_archive_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' compiler_needs_object_CXX='`$ECHO "$compiler_needs_object_CXX" | $SED "$delay_single_quote_subst"`' old_archive_from_new_cmds_CXX='`$ECHO "$old_archive_from_new_cmds_CXX" | $SED "$delay_single_quote_subst"`' old_archive_from_expsyms_cmds_CXX='`$ECHO "$old_archive_from_expsyms_cmds_CXX" | $SED "$delay_single_quote_subst"`' archive_cmds_CXX='`$ECHO "$archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' archive_expsym_cmds_CXX='`$ECHO "$archive_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' module_cmds_CXX='`$ECHO "$module_cmds_CXX" | $SED "$delay_single_quote_subst"`' module_expsym_cmds_CXX='`$ECHO "$module_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' with_gnu_ld_CXX='`$ECHO "$with_gnu_ld_CXX" | $SED "$delay_single_quote_subst"`' allow_undefined_flag_CXX='`$ECHO "$allow_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' no_undefined_flag_CXX='`$ECHO "$no_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' hardcode_libdir_flag_spec_CXX='`$ECHO "$hardcode_libdir_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' hardcode_libdir_separator_CXX='`$ECHO "$hardcode_libdir_separator_CXX" | $SED "$delay_single_quote_subst"`' hardcode_direct_CXX='`$ECHO "$hardcode_direct_CXX" | $SED "$delay_single_quote_subst"`' hardcode_direct_absolute_CXX='`$ECHO "$hardcode_direct_absolute_CXX" | $SED "$delay_single_quote_subst"`' hardcode_minus_L_CXX='`$ECHO "$hardcode_minus_L_CXX" | $SED "$delay_single_quote_subst"`' hardcode_shlibpath_var_CXX='`$ECHO "$hardcode_shlibpath_var_CXX" | $SED "$delay_single_quote_subst"`' hardcode_automatic_CXX='`$ECHO "$hardcode_automatic_CXX" | $SED "$delay_single_quote_subst"`' inherit_rpath_CXX='`$ECHO "$inherit_rpath_CXX" | $SED "$delay_single_quote_subst"`' link_all_deplibs_CXX='`$ECHO "$link_all_deplibs_CXX" | $SED "$delay_single_quote_subst"`' always_export_symbols_CXX='`$ECHO "$always_export_symbols_CXX" | $SED "$delay_single_quote_subst"`' export_symbols_cmds_CXX='`$ECHO "$export_symbols_cmds_CXX" | $SED "$delay_single_quote_subst"`' exclude_expsyms_CXX='`$ECHO "$exclude_expsyms_CXX" | $SED "$delay_single_quote_subst"`' include_expsyms_CXX='`$ECHO "$include_expsyms_CXX" | $SED "$delay_single_quote_subst"`' prelink_cmds_CXX='`$ECHO "$prelink_cmds_CXX" | $SED "$delay_single_quote_subst"`' postlink_cmds_CXX='`$ECHO "$postlink_cmds_CXX" | $SED "$delay_single_quote_subst"`' file_list_spec_CXX='`$ECHO "$file_list_spec_CXX" | $SED "$delay_single_quote_subst"`' hardcode_action_CXX='`$ECHO "$hardcode_action_CXX" | $SED "$delay_single_quote_subst"`' compiler_lib_search_dirs_CXX='`$ECHO "$compiler_lib_search_dirs_CXX" | $SED "$delay_single_quote_subst"`' predep_objects_CXX='`$ECHO "$predep_objects_CXX" | $SED "$delay_single_quote_subst"`' postdep_objects_CXX='`$ECHO "$postdep_objects_CXX" | $SED "$delay_single_quote_subst"`' predeps_CXX='`$ECHO "$predeps_CXX" | $SED "$delay_single_quote_subst"`' postdeps_CXX='`$ECHO "$postdeps_CXX" | $SED "$delay_single_quote_subst"`' compiler_lib_search_path_CXX='`$ECHO "$compiler_lib_search_path_CXX" | $SED "$delay_single_quote_subst"`' LTCC='$LTCC' LTCFLAGS='$LTCFLAGS' compiler='$compiler_DEFAULT' # A function that is used when there is no print builtin or printf. func_fallback_echo () { eval 'cat <<_LTECHO_EOF \$1 _LTECHO_EOF' } # Quote evaled strings. for var in SHELL \ ECHO \ PATH_SEPARATOR \ SED \ GREP \ EGREP \ FGREP \ LD \ NM \ LN_S \ lt_SP2NL \ lt_NL2SP \ reload_flag \ OBJDUMP \ deplibs_check_method \ file_magic_cmd \ file_magic_glob \ want_nocaseglob \ DLLTOOL \ sharedlib_from_linklib_cmd \ AR \ AR_FLAGS \ archiver_list_spec \ STRIP \ RANLIB \ CC \ CFLAGS \ compiler \ lt_cv_sys_global_symbol_pipe \ lt_cv_sys_global_symbol_to_cdecl \ lt_cv_sys_global_symbol_to_import \ lt_cv_sys_global_symbol_to_c_name_address \ lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ lt_cv_nm_interface \ nm_file_list_spec \ lt_cv_truncate_bin \ lt_prog_compiler_no_builtin_flag \ lt_prog_compiler_pic \ lt_prog_compiler_wl \ lt_prog_compiler_static \ lt_cv_prog_compiler_c_o \ need_locks \ MANIFEST_TOOL \ DSYMUTIL \ NMEDIT \ LIPO \ OTOOL \ OTOOL64 \ shrext_cmds \ export_dynamic_flag_spec \ whole_archive_flag_spec \ compiler_needs_object \ with_gnu_ld \ allow_undefined_flag \ no_undefined_flag \ hardcode_libdir_flag_spec \ hardcode_libdir_separator \ exclude_expsyms \ include_expsyms \ file_list_spec \ variables_saved_for_relink \ libname_spec \ library_names_spec \ soname_spec \ install_override_mode \ finish_eval \ old_striplib \ striplib \ compiler_lib_search_dirs \ predep_objects \ postdep_objects \ predeps \ postdeps \ compiler_lib_search_path \ LD_CXX \ reload_flag_CXX \ compiler_CXX \ lt_prog_compiler_no_builtin_flag_CXX \ lt_prog_compiler_pic_CXX \ lt_prog_compiler_wl_CXX \ lt_prog_compiler_static_CXX \ lt_cv_prog_compiler_c_o_CXX \ export_dynamic_flag_spec_CXX \ whole_archive_flag_spec_CXX \ compiler_needs_object_CXX \ with_gnu_ld_CXX \ allow_undefined_flag_CXX \ no_undefined_flag_CXX \ hardcode_libdir_flag_spec_CXX \ hardcode_libdir_separator_CXX \ exclude_expsyms_CXX \ include_expsyms_CXX \ file_list_spec_CXX \ compiler_lib_search_dirs_CXX \ predep_objects_CXX \ postdep_objects_CXX \ predeps_CXX \ postdeps_CXX \ compiler_lib_search_path_CXX; do case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in *[\\\\\\\`\\"\\\$]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done # Double-quote double-evaled strings. for var in reload_cmds \ old_postinstall_cmds \ old_postuninstall_cmds \ old_archive_cmds \ extract_expsyms_cmds \ old_archive_from_new_cmds \ old_archive_from_expsyms_cmds \ archive_cmds \ archive_expsym_cmds \ module_cmds \ module_expsym_cmds \ export_symbols_cmds \ prelink_cmds \ postlink_cmds \ postinstall_cmds \ postuninstall_cmds \ finish_cmds \ sys_lib_search_path_spec \ configure_time_dlsearch_path \ configure_time_lt_sys_library_path \ reload_cmds_CXX \ old_archive_cmds_CXX \ old_archive_from_new_cmds_CXX \ old_archive_from_expsyms_cmds_CXX \ archive_cmds_CXX \ archive_expsym_cmds_CXX \ module_cmds_CXX \ module_expsym_cmds_CXX \ export_symbols_cmds_CXX \ prelink_cmds_CXX \ postlink_cmds_CXX; do case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in *[\\\\\\\`\\"\\\$]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done ac_aux_dir='$ac_aux_dir' # See if we are running on zsh, and set the options that allow our # commands through without removal of \ escapes INIT. if test -n "\${ZSH_VERSION+set}"; then setopt NO_GLOB_SUBST fi PACKAGE='$PACKAGE' VERSION='$VERSION' RM='$RM' ofile='$ofile' # Capture the value of obsolete ALL_LINGUAS because we need it to compute # POFILES, UPDATEPOFILES, DUMMYPOFILES, GMOFILES, CATALOGS. But hide it # from automake < 1.5. eval 'OBSOLETE_ALL_LINGUAS''="$ALL_LINGUAS"' # Capture the value of LINGUAS because we need it to compute CATALOGS. LINGUAS="${LINGUAS-%UNSET%}" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Handling of arguments. for ac_config_target in $ac_config_targets do case $ac_config_target in "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;; "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; "po-directories") CONFIG_COMMANDS="$CONFIG_COMMANDS po-directories" ;; "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; "include/arc/ArcVersion.h") CONFIG_FILES="$CONFIG_FILES include/arc/ArcVersion.h" ;; "src/Makefile") CONFIG_FILES="$CONFIG_FILES src/Makefile" ;; "src/external/Makefile") CONFIG_FILES="$CONFIG_FILES src/external/Makefile" ;; "src/external/cJSON/Makefile") CONFIG_FILES="$CONFIG_FILES src/external/cJSON/Makefile" ;; "src/hed/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/Makefile" ;; "src/hed/libs/compute/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/compute/Makefile" ;; "src/hed/libs/compute/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/compute/test/Makefile" ;; "src/hed/libs/compute/examples/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/compute/examples/Makefile" ;; "src/hed/libs/common/ArcVersion.h") CONFIG_FILES="$CONFIG_FILES src/hed/libs/common/ArcVersion.h" ;; "src/hed/libs/common/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/common/Makefile" ;; "src/hed/libs/common/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/common/test/Makefile" ;; "src/hed/libs/communication/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/communication/Makefile" ;; "src/hed/libs/credential/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/credential/Makefile" ;; "src/hed/libs/credential/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/credential/test/Makefile" ;; "src/hed/libs/credentialmod/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/credentialmod/Makefile" ;; "src/hed/libs/crypto/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/crypto/Makefile" ;; "src/hed/libs/cryptomod/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/cryptomod/Makefile" ;; "src/hed/libs/data/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/data/Makefile" ;; "src/hed/libs/data/cache-clean.1") CONFIG_FILES="$CONFIG_FILES src/hed/libs/data/cache-clean.1" ;; "src/hed/libs/data/cache-list.1") CONFIG_FILES="$CONFIG_FILES src/hed/libs/data/cache-list.1" ;; "src/hed/libs/data/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/data/test/Makefile" ;; "src/hed/libs/data/examples/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/data/examples/Makefile" ;; "src/hed/libs/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/Makefile" ;; "src/hed/libs/loader/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/loader/Makefile" ;; "src/hed/libs/loader/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/loader/schema/Makefile" ;; "src/hed/libs/loader/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/loader/test/Makefile" ;; "src/hed/libs/message/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/message/Makefile" ;; "src/hed/libs/message/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/message/test/Makefile" ;; "src/hed/libs/security/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/security/Makefile" ;; "src/hed/libs/security/ArcPDP/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/security/ArcPDP/Makefile" ;; "src/hed/libs/security/ArcPDP/attr/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/security/ArcPDP/attr/Makefile" ;; "src/hed/libs/security/ArcPDP/policy/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/security/ArcPDP/policy/Makefile" ;; "src/hed/libs/security/ArcPDP/alg/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/security/ArcPDP/alg/Makefile" ;; "src/hed/libs/security/ArcPDP/fn/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/security/ArcPDP/fn/Makefile" ;; "src/hed/libs/credentialstore/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/credentialstore/Makefile" ;; "src/hed/libs/ws-addressing/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/ws-addressing/Makefile" ;; "src/hed/libs/ws-security/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/ws-security/Makefile" ;; "src/hed/libs/ws-security/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/ws-security/test/Makefile" ;; "src/hed/libs/infosys/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/infosys/Makefile" ;; "src/hed/libs/infosys/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/infosys/schema/Makefile" ;; "src/hed/libs/infosys/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/infosys/test/Makefile" ;; "src/hed/libs/delegation/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/delegation/Makefile" ;; "src/hed/libs/delegation/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/delegation/test/Makefile" ;; "src/hed/libs/xmlsec/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/xmlsec/Makefile" ;; "src/hed/libs/globusutils/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/globusutils/Makefile" ;; "src/hed/libs/otokens/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/libs/otokens/Makefile" ;; "src/hed/daemon/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/daemon/Makefile" ;; "src/hed/daemon/scripts/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/daemon/scripts/Makefile" ;; "src/hed/daemon/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/daemon/schema/Makefile" ;; "src/hed/daemon/unix/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/daemon/unix/Makefile" ;; "src/hed/mcc/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/mcc/Makefile" ;; "src/hed/mcc/soap/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/mcc/soap/Makefile" ;; "src/hed/mcc/tcp/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/mcc/tcp/Makefile" ;; "src/hed/mcc/tcp/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/mcc/tcp/schema/Makefile" ;; "src/hed/mcc/http/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/mcc/http/Makefile" ;; "src/hed/mcc/http/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/mcc/http/schema/Makefile" ;; "src/hed/mcc/tls/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/mcc/tls/Makefile" ;; "src/hed/mcc/tls/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/mcc/tls/schema/Makefile" ;; "src/hed/mcc/tls/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/mcc/tls/test/Makefile" ;; "src/hed/mcc/msgvalidator/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/mcc/msgvalidator/Makefile" ;; "src/hed/mcc/msgvalidator/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/mcc/msgvalidator/schema/Makefile" ;; "src/hed/acc/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/Makefile" ;; "src/hed/acc/ARCREST/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/ARCREST/Makefile" ;; "src/hed/acc/Broker/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/Broker/Makefile" ;; "src/hed/acc/Broker/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/Broker/test/Makefile" ;; "src/hed/acc/PythonBroker/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/PythonBroker/Makefile" ;; "src/hed/acc/JobDescriptionParser/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/JobDescriptionParser/Makefile" ;; "src/hed/acc/JobDescriptionParser/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/JobDescriptionParser/test/Makefile" ;; "src/hed/acc/ARCHERY/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/ARCHERY/Makefile" ;; "src/hed/acc/TEST/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/acc/TEST/Makefile" ;; "src/hed/dmc/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/Makefile" ;; "src/hed/dmc/file/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/file/Makefile" ;; "src/hed/dmc/gridftp/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/gridftp/Makefile" ;; "src/hed/dmc/http/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/http/Makefile" ;; "src/hed/dmc/srm/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/srm/Makefile" ;; "src/hed/dmc/srm/srmclient/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/srm/srmclient/Makefile" ;; "src/hed/dmc/gfal/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/gfal/Makefile" ;; "src/hed/dmc/xrootd/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/xrootd/Makefile" ;; "src/hed/dmc/mock/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/mock/Makefile" ;; "src/hed/dmc/rucio/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/rucio/Makefile" ;; "src/hed/dmc/s3/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/dmc/s3/Makefile" ;; "src/hed/profiles/general/general.xml") CONFIG_FILES="$CONFIG_FILES src/hed/profiles/general/general.xml" ;; "src/hed/shc/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/Makefile" ;; "src/hed/shc/arcpdp/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/arcpdp/Makefile" ;; "src/hed/shc/arcpdp/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/arcpdp/schema/Makefile" ;; "src/hed/shc/xacmlpdp/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/xacmlpdp/Makefile" ;; "src/hed/shc/xacmlpdp/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/xacmlpdp/schema/Makefile" ;; "src/hed/shc/delegationpdp/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/delegationpdp/Makefile" ;; "src/hed/shc/delegationpdp/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/delegationpdp/schema/Makefile" ;; "src/hed/shc/gaclpdp/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/gaclpdp/Makefile" ;; "src/hed/shc/pdpserviceinvoker/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/pdpserviceinvoker/Makefile" ;; "src/hed/shc/pdpserviceinvoker/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/pdpserviceinvoker/schema/Makefile" ;; "src/hed/shc/allowpdp/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/allowpdp/Makefile" ;; "src/hed/shc/denypdp/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/denypdp/Makefile" ;; "src/hed/shc/simplelistpdp/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/simplelistpdp/Makefile" ;; "src/hed/shc/simplelistpdp/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/simplelistpdp/schema/Makefile" ;; "src/hed/shc/arcauthzsh/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/arcauthzsh/Makefile" ;; "src/hed/shc/arcauthzsh/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/arcauthzsh/schema/Makefile" ;; "src/hed/shc/usernametokensh/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/usernametokensh/Makefile" ;; "src/hed/shc/usernametokensh/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/usernametokensh/schema/Makefile" ;; "src/hed/shc/x509tokensh/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/x509tokensh/Makefile" ;; "src/hed/shc/x509tokensh/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/x509tokensh/schema/Makefile" ;; "src/hed/shc/samltokensh/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/samltokensh/Makefile" ;; "src/hed/shc/samltokensh/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/samltokensh/schema/Makefile" ;; "src/hed/shc/saml2sso_assertionconsumersh/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/saml2sso_assertionconsumersh/Makefile" ;; "src/hed/shc/delegationsh/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/delegationsh/Makefile" ;; "src/hed/shc/delegationsh/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/delegationsh/schema/Makefile" ;; "src/hed/shc/legacy/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/legacy/Makefile" ;; "src/hed/shc/legacy/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/legacy/test/Makefile" ;; "src/hed/shc/legacy/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/legacy/schema/Makefile" ;; "src/hed/shc/otokens/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/shc/otokens/Makefile" ;; "src/hed/identitymap/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/identitymap/Makefile" ;; "src/hed/identitymap/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/hed/identitymap/schema/Makefile" ;; "src/libs/Makefile") CONFIG_FILES="$CONFIG_FILES src/libs/Makefile" ;; "src/libs/data-staging/Makefile") CONFIG_FILES="$CONFIG_FILES src/libs/data-staging/Makefile" ;; "src/libs/data-staging/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/libs/data-staging/test/Makefile" ;; "src/libs/data-staging/examples/Makefile") CONFIG_FILES="$CONFIG_FILES src/libs/data-staging/examples/Makefile" ;; "src/services/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/Makefile" ;; "src/services/a-rex/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/Makefile" ;; "src/services/a-rex/arc-arex") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/arc-arex" ;; "src/services/a-rex/arc-arex.service") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/arc-arex.service" ;; "src/services/a-rex/arc-arex-start") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/arc-arex-start" ;; "src/services/a-rex/arc-arex-ws") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/arc-arex-ws" ;; "src/services/a-rex/arc-arex-ws.service") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/arc-arex-ws.service" ;; "src/services/a-rex/arc-arex-ws-start") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/arc-arex-ws-start" ;; "src/services/a-rex/a-rex-backtrace-collect") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/a-rex-backtrace-collect" ;; "src/services/a-rex/a-rex-backtrace-collect.8") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/a-rex-backtrace-collect.8" ;; "src/services/a-rex/perferator") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/perferator" ;; "src/services/a-rex/update-controldir") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/update-controldir" ;; "src/services/a-rex/grid-manager/arc-blahp-logger.8") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/arc-blahp-logger.8" ;; "src/services/a-rex/grid-manager/gm-jobs.8") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/gm-jobs.8" ;; "src/services/a-rex/rest/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/rest/Makefile" ;; "src/services/a-rex/rest/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/rest/test/Makefile" ;; "src/services/a-rex/delegation/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/delegation/Makefile" ;; "src/services/a-rex/grid-manager/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/Makefile" ;; "src/services/a-rex/grid-manager/accounting/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/accounting/Makefile" ;; "src/services/a-rex/grid-manager/conf/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/conf/Makefile" ;; "src/services/a-rex/grid-manager/files/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/files/Makefile" ;; "src/services/a-rex/grid-manager/jobs/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/jobs/Makefile" ;; "src/services/a-rex/grid-manager/log/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/log/Makefile" ;; "src/services/a-rex/grid-manager/mail/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/mail/Makefile" ;; "src/services/a-rex/grid-manager/misc/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/misc/Makefile" ;; "src/services/a-rex/grid-manager/run/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/grid-manager/run/Makefile" ;; "src/services/a-rex/internaljobplugin/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/internaljobplugin/Makefile" ;; "src/services/a-rex/infoproviders/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/infoproviders/Makefile" ;; "src/services/a-rex/infoproviders/CEinfo.pl") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/infoproviders/CEinfo.pl" ;; "src/services/a-rex/infoproviders/ConfigCentral.pm") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/infoproviders/ConfigCentral.pm" ;; "src/services/a-rex/infoproviders/PerfData.pl") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/infoproviders/PerfData.pl" ;; "src/services/a-rex/infoproviders/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/infoproviders/test/Makefile" ;; "src/services/a-rex/lrms/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/Makefile" ;; "src/services/a-rex/lrms/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/test/Makefile" ;; "src/services/a-rex/lrms/lrms_common.sh") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/lrms_common.sh" ;; "src/services/a-rex/lrms/condor/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/condor/Makefile" ;; "src/services/a-rex/lrms/condor/scan-condor-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/condor/scan-condor-job" ;; "src/services/a-rex/lrms/condor/cancel-condor-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/condor/cancel-condor-job" ;; "src/services/a-rex/lrms/condor/submit-condor-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/condor/submit-condor-job" ;; "src/services/a-rex/lrms/fork/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/fork/Makefile" ;; "src/services/a-rex/lrms/fork/scan-fork-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/fork/scan-fork-job" ;; "src/services/a-rex/lrms/fork/submit-fork-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/fork/submit-fork-job" ;; "src/services/a-rex/lrms/fork/cancel-fork-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/fork/cancel-fork-job" ;; "src/services/a-rex/lrms/ll/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/ll/Makefile" ;; "src/services/a-rex/lrms/ll/submit-ll-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/ll/submit-ll-job" ;; "src/services/a-rex/lrms/ll/cancel-ll-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/ll/cancel-ll-job" ;; "src/services/a-rex/lrms/ll/scan-ll-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/ll/scan-ll-job" ;; "src/services/a-rex/lrms/lsf/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/lsf/Makefile" ;; "src/services/a-rex/lrms/lsf/submit-lsf-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/lsf/submit-lsf-job" ;; "src/services/a-rex/lrms/lsf/cancel-lsf-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/lsf/cancel-lsf-job" ;; "src/services/a-rex/lrms/lsf/scan-lsf-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/lsf/scan-lsf-job" ;; "src/services/a-rex/lrms/pbs/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/pbs/Makefile" ;; "src/services/a-rex/lrms/pbs/submit-pbs-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/pbs/submit-pbs-job" ;; "src/services/a-rex/lrms/pbs/cancel-pbs-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/pbs/cancel-pbs-job" ;; "src/services/a-rex/lrms/pbs/scan-pbs-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/pbs/scan-pbs-job" ;; "src/services/a-rex/lrms/pbspro/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/pbspro/Makefile" ;; "src/services/a-rex/lrms/pbspro/submit-pbspro-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/pbspro/submit-pbspro-job" ;; "src/services/a-rex/lrms/pbspro/cancel-pbspro-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/pbspro/cancel-pbspro-job" ;; "src/services/a-rex/lrms/pbspro/scan-pbspro-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/pbspro/scan-pbspro-job" ;; "src/services/a-rex/lrms/sge/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/sge/Makefile" ;; "src/services/a-rex/lrms/sge/submit-sge-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/sge/submit-sge-job" ;; "src/services/a-rex/lrms/sge/scan-sge-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/sge/scan-sge-job" ;; "src/services/a-rex/lrms/sge/cancel-sge-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/sge/cancel-sge-job" ;; "src/services/a-rex/lrms/slurm/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/slurm/Makefile" ;; "src/services/a-rex/lrms/slurm/submit-SLURM-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/slurm/submit-SLURM-job" ;; "src/services/a-rex/lrms/slurm/scan-SLURM-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/slurm/scan-SLURM-job" ;; "src/services/a-rex/lrms/slurm/cancel-SLURM-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/slurm/cancel-SLURM-job" ;; "src/services/a-rex/lrms/slurm/test/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/slurm/test/Makefile" ;; "src/services/a-rex/lrms/slurm/test/scan/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/slurm/test/scan/Makefile" ;; "src/services/a-rex/lrms/slurm/test/submit/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/slurm/test/submit/Makefile" ;; "src/services/a-rex/lrms/boinc/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/boinc/Makefile" ;; "src/services/a-rex/lrms/boinc/submit-boinc-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/boinc/submit-boinc-job" ;; "src/services/a-rex/lrms/boinc/scan-boinc-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/boinc/scan-boinc-job" ;; "src/services/a-rex/lrms/boinc/cancel-boinc-job") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/lrms/boinc/cancel-boinc-job" ;; "src/services/a-rex/rte/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/rte/Makefile" ;; "src/services/a-rex/rte/ENV/PROXY") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/rte/ENV/PROXY" ;; "src/services/a-rex/rte/ENV/CANDYPOND") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/rte/ENV/CANDYPOND" ;; "src/services/a-rex/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/a-rex/schema/Makefile" ;; "src/services/candypond/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/candypond/Makefile" ;; "src/services/data-staging/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/data-staging/Makefile" ;; "src/services/data-staging/arc-datadelivery-service") CONFIG_FILES="$CONFIG_FILES src/services/data-staging/arc-datadelivery-service" ;; "src/services/data-staging/arc-datadelivery-service.service") CONFIG_FILES="$CONFIG_FILES src/services/data-staging/arc-datadelivery-service.service" ;; "src/services/data-staging/arc-datadelivery-service-start") CONFIG_FILES="$CONFIG_FILES src/services/data-staging/arc-datadelivery-service-start" ;; "src/services/ldap-infosys/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/ldap-infosys/Makefile" ;; "src/services/ldap-infosys/create-bdii-config") CONFIG_FILES="$CONFIG_FILES src/services/ldap-infosys/create-bdii-config" ;; "src/services/ldap-infosys/create-slapd-config") CONFIG_FILES="$CONFIG_FILES src/services/ldap-infosys/create-slapd-config" ;; "src/services/ldap-infosys/arc-infosys-ldap") CONFIG_FILES="$CONFIG_FILES src/services/ldap-infosys/arc-infosys-ldap" ;; "src/services/ldap-infosys/arc-infosys-ldap.service") CONFIG_FILES="$CONFIG_FILES src/services/ldap-infosys/arc-infosys-ldap.service" ;; "src/services/ldap-infosys/arc-infosys-ldap-slapd.service") CONFIG_FILES="$CONFIG_FILES src/services/ldap-infosys/arc-infosys-ldap-slapd.service" ;; "src/services/monitor/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/monitor/Makefile" ;; "src/services/monitor/monitor") CONFIG_FILES="$CONFIG_FILES src/services/monitor/monitor" ;; "src/services/monitor/README") CONFIG_FILES="$CONFIG_FILES src/services/monitor/README" ;; "src/services/monitor/man/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/monitor/man/Makefile" ;; "src/services/monitor/man/monitor.7") CONFIG_FILES="$CONFIG_FILES src/services/monitor/man/monitor.7" ;; "src/services/monitor/includes/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/monitor/includes/Makefile" ;; "src/services/monitor/mon-icons/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/monitor/mon-icons/Makefile" ;; "src/services/monitor/lang/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/monitor/lang/Makefile" ;; "src/services/examples/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/examples/Makefile" ;; "src/services/examples/echo_python/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/examples/echo_python/Makefile" ;; "src/services/wrappers/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/wrappers/Makefile" ;; "src/services/wrappers/python/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/wrappers/python/Makefile" ;; "src/services/wrappers/python/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/services/wrappers/python/schema/Makefile" ;; "src/clients/Makefile") CONFIG_FILES="$CONFIG_FILES src/clients/Makefile" ;; "src/clients/data/Makefile") CONFIG_FILES="$CONFIG_FILES src/clients/data/Makefile" ;; "src/clients/credentials/Makefile") CONFIG_FILES="$CONFIG_FILES src/clients/credentials/Makefile" ;; "src/clients/compute/Makefile") CONFIG_FILES="$CONFIG_FILES src/clients/compute/Makefile" ;; "src/clients/pyarcrest/Makefile") CONFIG_FILES="$CONFIG_FILES src/clients/pyarcrest/Makefile" ;; "src/tests/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/Makefile" ;; "src/tests/echo/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/echo/Makefile" ;; "src/tests/echo/perftest.1") CONFIG_FILES="$CONFIG_FILES src/tests/echo/perftest.1" ;; "src/tests/echo/echo_service.xml.example") CONFIG_FILES="$CONFIG_FILES src/tests/echo/echo_service.xml.example" ;; "src/tests/echo/schema/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/echo/schema/Makefile" ;; "src/tests/policy-delegation/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/policy-delegation/Makefile" ;; "src/tests/delegation/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/delegation/Makefile" ;; "src/tests/translator/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/translator/Makefile" ;; "src/tests/xpath/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/xpath/Makefile" ;; "src/tests/arcpolicy/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/arcpolicy/Makefile" ;; "src/tests/perf/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/perf/Makefile" ;; "src/tests/perf/arcperftest.1") CONFIG_FILES="$CONFIG_FILES src/tests/perf/arcperftest.1" ;; "src/tests/client/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/client/Makefile" ;; "src/tests/lrms/Makefile") CONFIG_FILES="$CONFIG_FILES src/tests/lrms/Makefile" ;; "src/utils/arc-exporter/Makefile") CONFIG_FILES="$CONFIG_FILES src/utils/arc-exporter/Makefile" ;; "src/utils/arc-exporter/arc-exporter") CONFIG_FILES="$CONFIG_FILES src/utils/arc-exporter/arc-exporter" ;; "src/utils/archery/Makefile") CONFIG_FILES="$CONFIG_FILES src/utils/archery/Makefile" ;; "src/utils/archery/archery-manage") CONFIG_FILES="$CONFIG_FILES src/utils/archery/archery-manage" ;; "src/utils/python/Makefile") CONFIG_FILES="$CONFIG_FILES src/utils/python/Makefile" ;; "src/utils/python/arccandypond") CONFIG_FILES="$CONFIG_FILES src/utils/python/arccandypond" ;; "src/utils/python/arcctl") CONFIG_FILES="$CONFIG_FILES src/utils/python/arcctl" ;; "src/utils/python/arcctl.1") CONFIG_FILES="$CONFIG_FILES src/utils/python/arcctl.1" ;; "src/utils/python/jura-ng") CONFIG_FILES="$CONFIG_FILES src/utils/python/jura-ng" ;; "src/utils/python/arc/Makefile") CONFIG_FILES="$CONFIG_FILES src/utils/python/arc/Makefile" ;; "src/utils/python/arc/gen_paths_dist.sh") CONFIG_FILES="$CONFIG_FILES src/utils/python/arc/gen_paths_dist.sh" ;; "src/utils/python/arc/utils/Makefile") CONFIG_FILES="$CONFIG_FILES src/utils/python/arc/utils/Makefile" ;; "src/utils/python/arc/control/Makefile") CONFIG_FILES="$CONFIG_FILES src/utils/python/arc/control/Makefile" ;; "src/utils/hed/wsdl2hed.1") CONFIG_FILES="$CONFIG_FILES src/utils/hed/wsdl2hed.1" ;; "src/utils/hed/arcplugin.1") CONFIG_FILES="$CONFIG_FILES src/utils/hed/arcplugin.1" ;; "src/utils/hed/Makefile") CONFIG_FILES="$CONFIG_FILES src/utils/hed/Makefile" ;; "src/utils/Makefile") CONFIG_FILES="$CONFIG_FILES src/utils/Makefile" ;; "src/wn/Makefile") CONFIG_FILES="$CONFIG_FILES src/wn/Makefile" ;; "src/doc/Makefile") CONFIG_FILES="$CONFIG_FILES src/doc/Makefile" ;; "src/doc/arc.conf.5") CONFIG_FILES="$CONFIG_FILES src/doc/arc.conf.5" ;; "swig/Makefile") CONFIG_FILES="$CONFIG_FILES swig/Makefile" ;; "python/Makefile") CONFIG_FILES="$CONFIG_FILES python/Makefile" ;; "python/Doxyfile.api") CONFIG_FILES="$CONFIG_FILES python/Doxyfile.api" ;; "python/python/Makefile") CONFIG_FILES="$CONFIG_FILES python/python/Makefile" ;; "python/python/arc/Makefile") CONFIG_FILES="$CONFIG_FILES python/python/arc/Makefile" ;; "python/altpython/Makefile") CONFIG_FILES="$CONFIG_FILES python/altpython/Makefile" ;; "python/altpython/arc/Makefile") CONFIG_FILES="$CONFIG_FILES python/altpython/arc/Makefile" ;; "python/test/Makefile") CONFIG_FILES="$CONFIG_FILES python/test/Makefile" ;; "python/test/python/Makefile") CONFIG_FILES="$CONFIG_FILES python/test/python/Makefile" ;; "python/test/altpython/Makefile") CONFIG_FILES="$CONFIG_FILES python/test/altpython/Makefile" ;; "python/examples/Makefile") CONFIG_FILES="$CONFIG_FILES python/examples/Makefile" ;; "po/Makefile.in") CONFIG_FILES="$CONFIG_FILES po/Makefile.in" ;; "include/Makefile") CONFIG_FILES="$CONFIG_FILES include/Makefile" ;; "debian/Makefile") CONFIG_FILES="$CONFIG_FILES debian/Makefile" ;; "debian/changelog.deb") CONFIG_FILES="$CONFIG_FILES debian/changelog.deb" ;; "nordugrid-arc.spec") CONFIG_FILES="$CONFIG_FILES nordugrid-arc.spec" ;; "src/hed/daemon/arched.8") CONFIG_FILES="$CONFIG_FILES src/hed/daemon/arched.8" ;; "src/hed/daemon/scripts/arched") CONFIG_FILES="$CONFIG_FILES src/hed/daemon/scripts/arched" ;; "src/hed/daemon/scripts/arched.service") CONFIG_FILES="$CONFIG_FILES src/hed/daemon/scripts/arched.service" ;; "src/hed/daemon/scripts/arched-start") CONFIG_FILES="$CONFIG_FILES src/hed/daemon/scripts/arched-start" ;; "src/doxygen/Makefile") CONFIG_FILES="$CONFIG_FILES src/doxygen/Makefile" ;; "src/utils/python/arcconfig-parser") CONFIG_FILES="$CONFIG_FILES src/utils/python/arcconfig-parser" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands fi # Have a temporary directory for convenience. Make it in the build tree # simply because there is no reason against having it here, and in addition, # creating and moving files from /tmp can sometimes cause problems. # Hook for its removal unless debugging. # Note that there is a small window in which the directory will not be cleaned: # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= ac_tmp= trap 'exit_status=$? : "${ac_tmp:=$tmp}" { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status ' 0 trap 'as_fn_exit 1' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 ac_tmp=$tmp # Set up the scripts for CONFIG_FILES section. # No need to generate them if there are no CONFIG_FILES. # This happens for instance with `./config.status config.h'. if test -n "$CONFIG_FILES"; then ac_cr=`echo X | tr X '\015'` # On cygwin, bash can eat \r inside `` if the user requested igncr. # But we know of no other shell where ac_cr would be empty at this # point, so we can use a bashism as a fallback. if test "x$ac_cr" = x; then eval ac_cr=\$\'\\r\' fi ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then ac_cs_awk_cr='\\r' else ac_cs_awk_cr=$ac_cr fi echo 'BEGIN {' >"$ac_tmp/subs1.awk" && _ACEOF { echo "cat >conf$$subs.awk <<_ACEOF" && echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && echo "_ACEOF" } >conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` ac_delim='%!_!# ' for ac_last_try in false false false false false :; do . ./conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` if test $ac_delim_n = $ac_delim_num; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done rm -f conf$$subs.sh cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && _ACEOF sed -n ' h s/^/S["/; s/!.*/"]=/ p g s/^[^!]*!// :repl t repl s/'"$ac_delim"'$// t delim :nl h s/\(.\{148\}\)..*/\1/ t more1 s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ p n b repl :more1 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t nl :delim h s/\(.\{148\}\)..*/\1/ t more2 s/["\\]/\\&/g; s/^/"/; s/$/"/ p b :more2 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t delim ' >$CONFIG_STATUS || ac_write_fail=1 rm -f conf$$subs.awk cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACAWK cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && for (key in S) S_is_set[key] = 1 FS = "" } { line = $ 0 nfields = split(line, field, "@") substed = 0 len = length(field[1]) for (i = 2; i < nfields; i++) { key = field[i] keylen = length(key) if (S_is_set[key]) { value = S[key] line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) len += length(value) + length(field[++i]) substed = 1 } else len += 1 + keylen } print line } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else cat fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 _ACEOF # VPATH may cause trouble with some makes, so we remove sole $(srcdir), # ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ h s/// s/^/:/ s/[ ]*$/:/ s/:\$(srcdir):/:/g s/:\${srcdir}:/:/g s/:@srcdir@:/:/g s/^:*// s/:*$// x s/\(=[ ]*\).*/\1/ G s/\n// s/^[^=]*=[ ]*$// }' fi cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 fi # test -n "$CONFIG_FILES" # Set up the scripts for CONFIG_HEADERS section. # No need to generate them if there are no CONFIG_HEADERS. # This happens for instance with `./config.status Makefile'. if test -n "$CONFIG_HEADERS"; then cat >"$ac_tmp/defines.awk" <<\_ACAWK || BEGIN { _ACEOF # Transform confdefs.h into an awk script `defines.awk', embedded as # here-document in config.status, that substitutes the proper values into # config.h.in to produce config.h. # Create a delimiter string that does not exist in confdefs.h, to ease # handling of long lines. ac_delim='%!_!# ' for ac_last_try in false false :; do ac_tt=`sed -n "/$ac_delim/p" confdefs.h` if test -z "$ac_tt"; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done # For the awk script, D is an array of macro values keyed by name, # likewise P contains macro parameters if any. Preserve backslash # newline sequences. ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* sed -n ' s/.\{148\}/&'"$ac_delim"'/g t rset :rset s/^[ ]*#[ ]*define[ ][ ]*/ / t def d :def s/\\$// t bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3"/p s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p d :bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3\\\\\\n"\\/p t cont s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p t cont d :cont n s/.\{148\}/&'"$ac_delim"'/g t clear :clear s/\\$// t bsnlc s/["\\]/\\&/g; s/^/"/; s/$/"/p d :bsnlc s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p b cont ' >$CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 for (key in D) D_is_set[key] = 1 FS = "" } /^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { line = \$ 0 split(line, arg, " ") if (arg[1] == "#") { defundef = arg[2] mac1 = arg[3] } else { defundef = substr(arg[1], 2) mac1 = arg[2] } split(mac1, mac2, "(") #) macro = mac2[1] prefix = substr(line, 1, index(line, defundef) - 1) if (D_is_set[macro]) { # Preserve the white space surrounding the "#". print prefix "define", macro P[macro] D[macro] next } else { # Replace #undef with comments. This is necessary, for example, # in the case of _POSIX_SOURCE, which is predefined and required # on some systems where configure will not decide to define it. if (defundef == "undef") { print "/*", prefix defundef, macro, "*/" next } } } { print } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 fi # test -n "$CONFIG_HEADERS" eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" shift for ac_tag do case $ac_tag in :[FHLC]) ac_mode=$ac_tag; continue;; esac case $ac_mode$ac_tag in :[FHL]*:*);; :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac ac_save_IFS=$IFS IFS=: set x $ac_tag IFS=$ac_save_IFS shift ac_file=$1 shift case $ac_mode in :L) ac_source=$1;; :[FH]) ac_file_inputs= for ac_f do case $ac_f in -) ac_f="$ac_tmp/stdin";; *) # Look for the file first in the build tree, then in the source tree # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; esac case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac as_fn_append ac_file_inputs " '$ac_f'" done # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input='Generated from '` $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' `' by configure.' if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 $as_echo "$as_me: creating $ac_file" >&6;} fi # Neutralize special characters interpreted by sed in replacement strings. case $configure_input in #( *\&* | *\|* | *\\* ) ac_sed_conf_input=`$as_echo "$configure_input" | sed 's/[\\\\&|]/\\\\&/g'`;; #( *) ac_sed_conf_input=$configure_input;; esac case $ac_tag in *:-:* | *:-) cat >"$ac_tmp/stdin" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; esac ;; esac ac_dir=`$as_dirname -- "$ac_file" || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` as_dir="$ac_dir"; as_fn_mkdir_p ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix case $ac_mode in :F) # # CONFIG_FILE # case $INSTALL in [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; esac ac_MKDIR_P=$MKDIR_P case $MKDIR_P in [\\/$]* | ?:[\\/]* ) ;; */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; esac _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= ac_sed_dataroot=' /datarootdir/ { p q } /@datadir@/p /@docdir@/p /@infodir@/p /@localedir@/p /@mandir@/p' case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 $as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_datarootdir_hack=' s&@datadir@&$datadir&g s&@docdir@&$docdir&g s&@infodir@&$infodir&g s&@localedir@&$localedir&g s&@mandir@&$mandir&g s&\\\${datarootdir}&$datarootdir&g' ;; esac _ACEOF # Neutralize VPATH when `$srcdir' = `.'. # Shell code in configure.ac might set extrasub. # FIXME: do we really want to maintain this feature? cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_sed_extra="$ac_vpsub $extrasub _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 :t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b s|@configure_input@|$ac_sed_conf_input|;t t s&@top_builddir@&$ac_top_builddir_sub&;t t s&@top_build_prefix@&$ac_top_build_prefix&;t t s&@srcdir@&$ac_srcdir&;t t s&@abs_srcdir@&$ac_abs_srcdir&;t t s&@top_srcdir@&$ac_top_srcdir&;t t s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t s&@INSTALL@&$ac_INSTALL&;t t s&@MKDIR_P@&$ac_MKDIR_P&;t t $ac_datarootdir_hack " eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ "$ac_tmp/out"`; test -z "$ac_out"; } && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&5 $as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&2;} rm -f "$ac_tmp/stdin" case $ac_file in -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; esac \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; :H) # # CONFIG_HEADER # if test x"$ac_file" != x-; then { $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" } >"$ac_tmp/config.h" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 $as_echo "$as_me: $ac_file is unchanged" >&6;} else rm -f "$ac_file" mv "$ac_tmp/config.h" "$ac_file" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 fi else $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ || as_fn_error $? "could not create -" "$LINENO" 5 fi # Compute "$ac_file"'s index in $config_headers. _am_arg="$ac_file" _am_stamp_count=1 for _am_header in $config_headers :; do case $_am_header in $_am_arg | $_am_arg:* ) break ;; * ) _am_stamp_count=`expr $_am_stamp_count + 1` ;; esac done echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" || $as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$_am_arg" : 'X\(//\)[^/]' \| \ X"$_am_arg" : 'X\(//\)$' \| \ X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$_am_arg" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'`/stamp-h$_am_stamp_count ;; :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 $as_echo "$as_me: executing $ac_file commands" >&6;} ;; esac case $ac_file$ac_mode in "depfiles":C) test x"$AMDEP_TRUE" != x"" || { # Older Autoconf quotes --file arguments for eval, but not when files # are listed without --file. Let's play safe and only enable the eval # if we detect the quoting. # TODO: see whether this extra hack can be removed once we start # requiring Autoconf 2.70 or later. case $CONFIG_FILES in #( *\'*) : eval set x "$CONFIG_FILES" ;; #( *) : set x $CONFIG_FILES ;; #( *) : ;; esac shift # Used to flag and report bootstrapping failures. am_rc=0 for am_mf do # Strip MF so we end up with the name of the file. am_mf=`$as_echo "$am_mf" | sed -e 's/:.*$//'` # Check whether this is an Automake generated Makefile which includes # dependency-tracking related rules and includes. # Grep'ing the whole file directly is not great: AIX grep has a line # limit of 2048, but all sed's we know have understand at least 4000. sed -n 's,^am--depfiles:.*,X,p' "$am_mf" | grep X >/dev/null 2>&1 \ || continue am_dirpart=`$as_dirname -- "$am_mf" || $as_expr X"$am_mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$am_mf" : 'X\(//\)[^/]' \| \ X"$am_mf" : 'X\(//\)$' \| \ X"$am_mf" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$am_mf" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` am_filepart=`$as_basename -- "$am_mf" || $as_expr X/"$am_mf" : '.*/\([^/][^/]*\)/*$' \| \ X"$am_mf" : 'X\(//\)$' \| \ X"$am_mf" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$am_mf" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` { echo "$as_me:$LINENO: cd "$am_dirpart" \ && sed -e '/# am--include-marker/d' "$am_filepart" \ | $MAKE -f - am--depfiles" >&5 (cd "$am_dirpart" \ && sed -e '/# am--include-marker/d' "$am_filepart" \ | $MAKE -f - am--depfiles) >&5 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } || am_rc=$? done if test $am_rc -ne 0; then { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "Something went wrong bootstrapping makefile fragments for automatic dependency tracking. If GNU make was not used, consider re-running the configure script with MAKE=\"gmake\" (or whatever is necessary). You can also try re-running configure with the '--disable-dependency-tracking' option to at least be able to build the package (albeit without support for automatic dependency tracking). See \`config.log' for more details" "$LINENO" 5; } fi { am_dirpart=; unset am_dirpart;} { am_filepart=; unset am_filepart;} { am_mf=; unset am_mf;} { am_rc=; unset am_rc;} rm -f conftest-deps.mk } ;; "libtool":C) # See if we are running on zsh, and set the options that allow our # commands through without removal of \ escapes. if test -n "${ZSH_VERSION+set}"; then setopt NO_GLOB_SUBST fi cfgfile=${ofile}T trap "$RM \"$cfgfile\"; exit 1" 1 2 15 $RM "$cfgfile" cat <<_LT_EOF >> "$cfgfile" #! $SHELL # Generated automatically by $as_me ($PACKAGE) $VERSION # Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: # NOTE: Changes made to this file will be lost: look at ltmain.sh. # Provide generalized library-building support services. # Written by Gordon Matzigkeit, 1996 # Copyright (C) 2014 Free Software Foundation, Inc. # This is free software; see the source for copying conditions. There is NO # warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # GNU Libtool is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of of the License, or # (at your option) any later version. # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program or library that is built # using GNU Libtool, you may include this file under the same # distribution terms that you use for the rest of that program. # # GNU Libtool is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # The names of the tagged configurations supported by this script. available_tags='CXX ' # Configured defaults for sys_lib_dlsearch_path munging. : \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"} # ### BEGIN LIBTOOL CONFIG # Whether or not to build static libraries. build_old_libs=$enable_static # Which release of libtool.m4 was used? macro_version=$macro_version macro_revision=$macro_revision # Whether or not to build shared libraries. build_libtool_libs=$enable_shared # What type of objects to build. pic_mode=$pic_mode # Whether or not to optimize for fast installation. fast_install=$enable_fast_install # Shared archive member basename,for filename based shared library versioning on AIX. shared_archive_member_spec=$shared_archive_member_spec # Shell to use when invoking shell scripts. SHELL=$lt_SHELL # An echo program that protects backslashes. ECHO=$lt_ECHO # The PATH separator for the build system. PATH_SEPARATOR=$lt_PATH_SEPARATOR # The host system. host_alias=$host_alias host=$host host_os=$host_os # The build system. build_alias=$build_alias build=$build build_os=$build_os # A sed program that does not truncate output. SED=$lt_SED # Sed that helps us avoid accidentally triggering echo(1) options like -n. Xsed="\$SED -e 1s/^X//" # A grep program that handles long lines. GREP=$lt_GREP # An ERE matcher. EGREP=$lt_EGREP # A literal string matcher. FGREP=$lt_FGREP # A BSD- or MS-compatible name lister. NM=$lt_NM # Whether we need soft or hard links. LN_S=$lt_LN_S # What is the maximum length of a command? max_cmd_len=$max_cmd_len # Object file suffix (normally "o"). objext=$ac_objext # Executable file suffix (normally ""). exeext=$exeext # whether the shell understands "unset". lt_unset=$lt_unset # turn spaces into newlines. SP2NL=$lt_lt_SP2NL # turn newlines into spaces. NL2SP=$lt_lt_NL2SP # convert \$build file names to \$host format. to_host_file_cmd=$lt_cv_to_host_file_cmd # convert \$build files to toolchain format. to_tool_file_cmd=$lt_cv_to_tool_file_cmd # An object symbol dumper. OBJDUMP=$lt_OBJDUMP # Method to check whether dependent libraries are shared objects. deplibs_check_method=$lt_deplibs_check_method # Command to use when deplibs_check_method = "file_magic". file_magic_cmd=$lt_file_magic_cmd # How to find potential files when deplibs_check_method = "file_magic". file_magic_glob=$lt_file_magic_glob # Find potential files using nocaseglob when deplibs_check_method = "file_magic". want_nocaseglob=$lt_want_nocaseglob # DLL creation program. DLLTOOL=$lt_DLLTOOL # Command to associate shared and link libraries. sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd # The archiver. AR=$lt_AR # Flags to create an archive. AR_FLAGS=$lt_AR_FLAGS # How to feed a file listing to the archiver. archiver_list_spec=$lt_archiver_list_spec # A symbol stripping program. STRIP=$lt_STRIP # Commands used to install an old-style archive. RANLIB=$lt_RANLIB old_postinstall_cmds=$lt_old_postinstall_cmds old_postuninstall_cmds=$lt_old_postuninstall_cmds # Whether to use a lock for old archive extraction. lock_old_archive_extraction=$lock_old_archive_extraction # A C compiler. LTCC=$lt_CC # LTCC compiler flags. LTCFLAGS=$lt_CFLAGS # Take the output of nm and produce a listing of raw symbols and C names. global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe # Transform the output of nm in a proper C declaration. global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl # Transform the output of nm into a list of symbols to manually relocate. global_symbol_to_import=$lt_lt_cv_sys_global_symbol_to_import # Transform the output of nm in a C name address pair. global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address # Transform the output of nm in a C name address pair when lib prefix is needed. global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix # The name lister interface. nm_interface=$lt_lt_cv_nm_interface # Specify filename containing input files for \$NM. nm_file_list_spec=$lt_nm_file_list_spec # The root where to search for dependent libraries,and where our libraries should be installed. lt_sysroot=$lt_sysroot # Command to truncate a binary pipe. lt_truncate_bin=$lt_lt_cv_truncate_bin # The name of the directory that contains temporary libtool files. objdir=$objdir # Used to examine libraries when file_magic_cmd begins with "file". MAGIC_CMD=$MAGIC_CMD # Must we lock files when doing compilation? need_locks=$lt_need_locks # Manifest tool. MANIFEST_TOOL=$lt_MANIFEST_TOOL # Tool to manipulate archived DWARF debug symbol files on Mac OS X. DSYMUTIL=$lt_DSYMUTIL # Tool to change global to local symbols on Mac OS X. NMEDIT=$lt_NMEDIT # Tool to manipulate fat objects and archives on Mac OS X. LIPO=$lt_LIPO # ldd/readelf like tool for Mach-O binaries on Mac OS X. OTOOL=$lt_OTOOL # ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. OTOOL64=$lt_OTOOL64 # Old archive suffix (normally "a"). libext=$libext # Shared library suffix (normally ".so"). shrext_cmds=$lt_shrext_cmds # The commands to extract the exported symbol list from a shared archive. extract_expsyms_cmds=$lt_extract_expsyms_cmds # Variables whose values should be saved in libtool wrapper scripts and # restored at link time. variables_saved_for_relink=$lt_variables_saved_for_relink # Do we need the "lib" prefix for modules? need_lib_prefix=$need_lib_prefix # Do we need a version for libraries? need_version=$need_version # Library versioning type. version_type=$version_type # Shared library runtime path variable. runpath_var=$runpath_var # Shared library path variable. shlibpath_var=$shlibpath_var # Is shlibpath searched before the hard-coded library search path? shlibpath_overrides_runpath=$shlibpath_overrides_runpath # Format of library name prefix. libname_spec=$lt_libname_spec # List of archive names. First name is the real one, the rest are links. # The last name is the one that the linker finds with -lNAME library_names_spec=$lt_library_names_spec # The coded name of the library, if different from the real name. soname_spec=$lt_soname_spec # Permission mode override for installation of shared libraries. install_override_mode=$lt_install_override_mode # Command to use after installation of a shared archive. postinstall_cmds=$lt_postinstall_cmds # Command to use after uninstallation of a shared archive. postuninstall_cmds=$lt_postuninstall_cmds # Commands used to finish a libtool library installation in a directory. finish_cmds=$lt_finish_cmds # As "finish_cmds", except a single script fragment to be evaled but # not shown. finish_eval=$lt_finish_eval # Whether we should hardcode library paths into libraries. hardcode_into_libs=$hardcode_into_libs # Compile-time system search path for libraries. sys_lib_search_path_spec=$lt_sys_lib_search_path_spec # Detected run-time system search path for libraries. sys_lib_dlsearch_path_spec=$lt_configure_time_dlsearch_path # Explicit LT_SYS_LIBRARY_PATH set during ./configure time. configure_time_lt_sys_library_path=$lt_configure_time_lt_sys_library_path # Whether dlopen is supported. dlopen_support=$enable_dlopen # Whether dlopen of programs is supported. dlopen_self=$enable_dlopen_self # Whether dlopen of statically linked programs is supported. dlopen_self_static=$enable_dlopen_self_static # Commands to strip libraries. old_striplib=$lt_old_striplib striplib=$lt_striplib # The linker used to build libraries. LD=$lt_LD # How to create reloadable object files. reload_flag=$lt_reload_flag reload_cmds=$lt_reload_cmds # Commands used to build an old-style archive. old_archive_cmds=$lt_old_archive_cmds # A language specific compiler. CC=$lt_compiler # Is the compiler the GNU compiler? with_gcc=$GCC # Compiler flag to turn off builtin functions. no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag # Additional compiler flags for building library objects. pic_flag=$lt_lt_prog_compiler_pic # How to pass a linker flag through the compiler. wl=$lt_lt_prog_compiler_wl # Compiler flag to prevent dynamic linking. link_static_flag=$lt_lt_prog_compiler_static # Does compiler simultaneously support -c and -o options? compiler_c_o=$lt_lt_cv_prog_compiler_c_o # Whether or not to add -lc for building shared libraries. build_libtool_need_lc=$archive_cmds_need_lc # Whether or not to disallow shared libs when runtime libs are static. allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes # Compiler flag to allow reflexive dlopens. export_dynamic_flag_spec=$lt_export_dynamic_flag_spec # Compiler flag to generate shared objects directly from archives. whole_archive_flag_spec=$lt_whole_archive_flag_spec # Whether the compiler copes with passing no objects directly. compiler_needs_object=$lt_compiler_needs_object # Create an old-style archive from a shared archive. old_archive_from_new_cmds=$lt_old_archive_from_new_cmds # Create a temporary old-style archive to link instead of a shared archive. old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds # Commands used to build a shared archive. archive_cmds=$lt_archive_cmds archive_expsym_cmds=$lt_archive_expsym_cmds # Commands used to build a loadable module if different from building # a shared archive. module_cmds=$lt_module_cmds module_expsym_cmds=$lt_module_expsym_cmds # Whether we are building with GNU ld or not. with_gnu_ld=$lt_with_gnu_ld # Flag that allows shared libraries with undefined symbols to be built. allow_undefined_flag=$lt_allow_undefined_flag # Flag that enforces no undefined symbols. no_undefined_flag=$lt_no_undefined_flag # Flag to hardcode \$libdir into a binary during linking. # This must work even if \$libdir does not exist hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec # Whether we need a single "-rpath" flag with a separated argument. hardcode_libdir_separator=$lt_hardcode_libdir_separator # Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes # DIR into the resulting binary. hardcode_direct=$hardcode_direct # Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes # DIR into the resulting binary and the resulting library dependency is # "absolute",i.e impossible to change by setting \$shlibpath_var if the # library is relocated. hardcode_direct_absolute=$hardcode_direct_absolute # Set to "yes" if using the -LDIR flag during linking hardcodes DIR # into the resulting binary. hardcode_minus_L=$hardcode_minus_L # Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR # into the resulting binary. hardcode_shlibpath_var=$hardcode_shlibpath_var # Set to "yes" if building a shared library automatically hardcodes DIR # into the library and all subsequent libraries and executables linked # against it. hardcode_automatic=$hardcode_automatic # Set to yes if linker adds runtime paths of dependent libraries # to runtime path list. inherit_rpath=$inherit_rpath # Whether libtool must link a program against all its dependency libraries. link_all_deplibs=$link_all_deplibs # Set to "yes" if exported symbols are required. always_export_symbols=$always_export_symbols # The commands to list exported symbols. export_symbols_cmds=$lt_export_symbols_cmds # Symbols that should not be listed in the preloaded symbols. exclude_expsyms=$lt_exclude_expsyms # Symbols that must always be exported. include_expsyms=$lt_include_expsyms # Commands necessary for linking programs (against libraries) with templates. prelink_cmds=$lt_prelink_cmds # Commands necessary for finishing linking programs. postlink_cmds=$lt_postlink_cmds # Specify filename containing input files. file_list_spec=$lt_file_list_spec # How to hardcode a shared library path into an executable. hardcode_action=$hardcode_action # The directories searched by this compiler when creating a shared library. compiler_lib_search_dirs=$lt_compiler_lib_search_dirs # Dependencies to place before and after the objects being linked to # create a shared library. predep_objects=$lt_predep_objects postdep_objects=$lt_postdep_objects predeps=$lt_predeps postdeps=$lt_postdeps # The library search path used internally by the compiler when linking # a shared library. compiler_lib_search_path=$lt_compiler_lib_search_path # ### END LIBTOOL CONFIG _LT_EOF cat <<'_LT_EOF' >> "$cfgfile" # ### BEGIN FUNCTIONS SHARED WITH CONFIGURE # func_munge_path_list VARIABLE PATH # ----------------------------------- # VARIABLE is name of variable containing _space_ separated list of # directories to be munged by the contents of PATH, which is string # having a format: # "DIR[:DIR]:" # string "DIR[ DIR]" will be prepended to VARIABLE # ":DIR[:DIR]" # string "DIR[ DIR]" will be appended to VARIABLE # "DIRP[:DIRP]::[DIRA:]DIRA" # string "DIRP[ DIRP]" will be prepended to VARIABLE and string # "DIRA[ DIRA]" will be appended to VARIABLE # "DIR[:DIR]" # VARIABLE will be replaced by "DIR[ DIR]" func_munge_path_list () { case x$2 in x) ;; *:) eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" ;; x:*) eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" ;; *::*) eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" ;; *) eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" ;; esac } # Calculate cc_basename. Skip known compiler wrappers and cross-prefix. func_cc_basename () { for cc_temp in $*""; do case $cc_temp in compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; \-*) ;; *) break;; esac done func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` } # ### END FUNCTIONS SHARED WITH CONFIGURE _LT_EOF case $host_os in aix3*) cat <<\_LT_EOF >> "$cfgfile" # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test set != "${COLLECT_NAMES+set}"; then COLLECT_NAMES= export COLLECT_NAMES fi _LT_EOF ;; esac ltmain=$ac_aux_dir/ltmain.sh # We use sed instead of cat because bash on DJGPP gets confused if # if finds mixed CR/LF and LF-only lines. Since sed operates in # text mode, it properly converts lines to CR/LF. This bash problem # is reportedly fixed, but why not run on old versions too? sed '$q' "$ltmain" >> "$cfgfile" \ || (rm -f "$cfgfile"; exit 1) mv -f "$cfgfile" "$ofile" || (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") chmod +x "$ofile" cat <<_LT_EOF >> "$ofile" # ### BEGIN LIBTOOL TAG CONFIG: CXX # The linker used to build libraries. LD=$lt_LD_CXX # How to create reloadable object files. reload_flag=$lt_reload_flag_CXX reload_cmds=$lt_reload_cmds_CXX # Commands used to build an old-style archive. old_archive_cmds=$lt_old_archive_cmds_CXX # A language specific compiler. CC=$lt_compiler_CXX # Is the compiler the GNU compiler? with_gcc=$GCC_CXX # Compiler flag to turn off builtin functions. no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX # Additional compiler flags for building library objects. pic_flag=$lt_lt_prog_compiler_pic_CXX # How to pass a linker flag through the compiler. wl=$lt_lt_prog_compiler_wl_CXX # Compiler flag to prevent dynamic linking. link_static_flag=$lt_lt_prog_compiler_static_CXX # Does compiler simultaneously support -c and -o options? compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX # Whether or not to add -lc for building shared libraries. build_libtool_need_lc=$archive_cmds_need_lc_CXX # Whether or not to disallow shared libs when runtime libs are static. allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX # Compiler flag to allow reflexive dlopens. export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX # Compiler flag to generate shared objects directly from archives. whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX # Whether the compiler copes with passing no objects directly. compiler_needs_object=$lt_compiler_needs_object_CXX # Create an old-style archive from a shared archive. old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX # Create a temporary old-style archive to link instead of a shared archive. old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX # Commands used to build a shared archive. archive_cmds=$lt_archive_cmds_CXX archive_expsym_cmds=$lt_archive_expsym_cmds_CXX # Commands used to build a loadable module if different from building # a shared archive. module_cmds=$lt_module_cmds_CXX module_expsym_cmds=$lt_module_expsym_cmds_CXX # Whether we are building with GNU ld or not. with_gnu_ld=$lt_with_gnu_ld_CXX # Flag that allows shared libraries with undefined symbols to be built. allow_undefined_flag=$lt_allow_undefined_flag_CXX # Flag that enforces no undefined symbols. no_undefined_flag=$lt_no_undefined_flag_CXX # Flag to hardcode \$libdir into a binary during linking. # This must work even if \$libdir does not exist hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX # Whether we need a single "-rpath" flag with a separated argument. hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX # Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes # DIR into the resulting binary. hardcode_direct=$hardcode_direct_CXX # Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes # DIR into the resulting binary and the resulting library dependency is # "absolute",i.e impossible to change by setting \$shlibpath_var if the # library is relocated. hardcode_direct_absolute=$hardcode_direct_absolute_CXX # Set to "yes" if using the -LDIR flag during linking hardcodes DIR # into the resulting binary. hardcode_minus_L=$hardcode_minus_L_CXX # Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR # into the resulting binary. hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX # Set to "yes" if building a shared library automatically hardcodes DIR # into the library and all subsequent libraries and executables linked # against it. hardcode_automatic=$hardcode_automatic_CXX # Set to yes if linker adds runtime paths of dependent libraries # to runtime path list. inherit_rpath=$inherit_rpath_CXX # Whether libtool must link a program against all its dependency libraries. link_all_deplibs=$link_all_deplibs_CXX # Set to "yes" if exported symbols are required. always_export_symbols=$always_export_symbols_CXX # The commands to list exported symbols. export_symbols_cmds=$lt_export_symbols_cmds_CXX # Symbols that should not be listed in the preloaded symbols. exclude_expsyms=$lt_exclude_expsyms_CXX # Symbols that must always be exported. include_expsyms=$lt_include_expsyms_CXX # Commands necessary for linking programs (against libraries) with templates. prelink_cmds=$lt_prelink_cmds_CXX # Commands necessary for finishing linking programs. postlink_cmds=$lt_postlink_cmds_CXX # Specify filename containing input files. file_list_spec=$lt_file_list_spec_CXX # How to hardcode a shared library path into an executable. hardcode_action=$hardcode_action_CXX # The directories searched by this compiler when creating a shared library. compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_CXX # Dependencies to place before and after the objects being linked to # create a shared library. predep_objects=$lt_predep_objects_CXX postdep_objects=$lt_postdep_objects_CXX predeps=$lt_predeps_CXX postdeps=$lt_postdeps_CXX # The library search path used internally by the compiler when linking # a shared library. compiler_lib_search_path=$lt_compiler_lib_search_path_CXX # ### END LIBTOOL TAG CONFIG: CXX _LT_EOF ;; "po-directories":C) for ac_file in $CONFIG_FILES; do # Support "outfile[:infile[:infile...]]" case "$ac_file" in *:*) ac_file=`echo "$ac_file"|sed 's%:.*%%'` ;; esac # PO directories have a Makefile.in generated from Makefile.in.in. case "$ac_file" in */Makefile.in) # Adjust a relative srcdir. ac_dir=`echo "$ac_file"|sed 's%/[^/][^/]*$%%'` ac_dir_suffix="/`echo "$ac_dir"|sed 's%^\./%%'`" ac_dots=`echo "$ac_dir_suffix"|sed 's%/[^/]*%../%g'` # In autoconf-2.13 it is called $ac_given_srcdir. # In autoconf-2.50 it is called $srcdir. test -n "$ac_given_srcdir" || ac_given_srcdir="$srcdir" case "$ac_given_srcdir" in .) top_srcdir=`echo $ac_dots|sed 's%/$%%'` ;; /*) top_srcdir="$ac_given_srcdir" ;; *) top_srcdir="$ac_dots$ac_given_srcdir" ;; esac # Treat a directory as a PO directory if and only if it has a # POTFILES.in file. This allows packages to have multiple PO # directories under different names or in different locations. if test -f "$ac_given_srcdir/$ac_dir/POTFILES.in"; then rm -f "$ac_dir/POTFILES" test -n "$as_me" && echo "$as_me: creating $ac_dir/POTFILES" || echo "creating $ac_dir/POTFILES" cat "$ac_given_srcdir/$ac_dir/POTFILES.in" | sed -e "/^#/d" -e "/^[ ]*\$/d" -e "s,.*, $top_srcdir/& \\\\," | sed -e "\$s/\(.*\) \\\\/\1/" > "$ac_dir/POTFILES" POMAKEFILEDEPS="POTFILES.in" # ALL_LINGUAS, POFILES, UPDATEPOFILES, DUMMYPOFILES, GMOFILES depend # on $ac_dir but don't depend on user-specified configuration # parameters. if test -f "$ac_given_srcdir/$ac_dir/LINGUAS"; then # The LINGUAS file contains the set of available languages. if test -n "$OBSOLETE_ALL_LINGUAS"; then test -n "$as_me" && echo "$as_me: setting ALL_LINGUAS in configure.in is obsolete" || echo "setting ALL_LINGUAS in configure.in is obsolete" fi ALL_LINGUAS_=`sed -e "/^#/d" -e "s/#.*//" "$ac_given_srcdir/$ac_dir/LINGUAS"` # Hide the ALL_LINGUAS assigment from automake < 1.5. eval 'ALL_LINGUAS''=$ALL_LINGUAS_' POMAKEFILEDEPS="$POMAKEFILEDEPS LINGUAS" else # The set of available languages was given in configure.in. # Hide the ALL_LINGUAS assigment from automake < 1.5. eval 'ALL_LINGUAS''=$OBSOLETE_ALL_LINGUAS' fi # Compute POFILES # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).po) # Compute UPDATEPOFILES # as $(foreach lang, $(ALL_LINGUAS), $(lang).po-update) # Compute DUMMYPOFILES # as $(foreach lang, $(ALL_LINGUAS), $(lang).nop) # Compute GMOFILES # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).gmo) case "$ac_given_srcdir" in .) srcdirpre= ;; *) srcdirpre='$(srcdir)/' ;; esac POFILES= UPDATEPOFILES= DUMMYPOFILES= GMOFILES= for lang in $ALL_LINGUAS; do POFILES="$POFILES $srcdirpre$lang.po" UPDATEPOFILES="$UPDATEPOFILES $lang.po-update" DUMMYPOFILES="$DUMMYPOFILES $lang.nop" GMOFILES="$GMOFILES $srcdirpre$lang.gmo" done # CATALOGS depends on both $ac_dir and the user's LINGUAS # environment variable. INST_LINGUAS= if test -n "$ALL_LINGUAS"; then for presentlang in $ALL_LINGUAS; do useit=no if test "%UNSET%" != "$LINGUAS"; then desiredlanguages="$LINGUAS" else desiredlanguages="$ALL_LINGUAS" fi for desiredlang in $desiredlanguages; do # Use the presentlang catalog if desiredlang is # a. equal to presentlang, or # b. a variant of presentlang (because in this case, # presentlang can be used as a fallback for messages # which are not translated in the desiredlang catalog). case "$desiredlang" in "$presentlang"*) useit=yes;; esac done if test $useit = yes; then INST_LINGUAS="$INST_LINGUAS $presentlang" fi done fi CATALOGS= if test -n "$INST_LINGUAS"; then for lang in $INST_LINGUAS; do CATALOGS="$CATALOGS $lang.gmo" done fi test -n "$as_me" && echo "$as_me: creating $ac_dir/Makefile" || echo "creating $ac_dir/Makefile" sed -e "/^POTFILES =/r $ac_dir/POTFILES" -e "/^# Makevars/r $ac_given_srcdir/$ac_dir/Makevars" -e "s|@POFILES@|$POFILES|g" -e "s|@UPDATEPOFILES@|$UPDATEPOFILES|g" -e "s|@DUMMYPOFILES@|$DUMMYPOFILES|g" -e "s|@GMOFILES@|$GMOFILES|g" -e "s|@CATALOGS@|$CATALOGS|g" -e "s|@POMAKEFILEDEPS@|$POMAKEFILEDEPS|g" "$ac_dir/Makefile.in" > "$ac_dir/Makefile" for f in "$ac_given_srcdir/$ac_dir"/Rules-*; do if test -f "$f"; then case "$f" in *.orig | *.bak | *~) ;; *) cat "$f" >> "$ac_dir/Makefile" ;; esac fi done fi ;; esac done ;; "src/utils/python/arcconfig-parser":F) chmod +x src/utils/python/arcconfig-parser ;; esac done # for ac_tag as_fn_exit 0 _ACEOF ac_clean_files=$ac_clean_files_save test $ac_write_fail = 0 || as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. # Unfortunately, on DOS this fails, as config.log is still kept open # by configure, so config.status won't be able to write to it; its # output is simply discarded. So we exec the FD to /dev/null, # effectively closing config.log, so it can be properly (re)opened and # appended to by config.status. When coming back to configure, we # need to make the FD available again. if test "$no_create" != yes; then ac_cs_success=: ac_config_status_args= test "$silent" = yes && ac_config_status_args="$ac_config_status_args --quiet" exec 5>/dev/null $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. $ac_cs_success || as_fn_exit 1 fi if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: Unit testing: ${enables_cppunit} Python binding: ${enables_swig_python} ($PYTHON_VERSION) Alt.Python binding: ${enables_altpython} ($ALTPYTHON_VERSION) Available third-party features: GridFTP: ${enables_gridftp} GFAL: ${enables_gfal} S3: ${enables_s3} Xrootd: ${enables_xrootd} xmlsec1: ${enables_xmlsec1} NSS: ${enables_nss} SQLite: ${enables_sqlite} LDNS: ${enables_ldns} Enabled features: Local jobs info in SQLite: ${enables_sqlitejstore} Systemd Integration: ${enables_systemd} Included components: HED: ${enables_hed} A-REX service: ${enables_a_rex_service} Internal plugin: ${enables_internal} LDAP Info service: ${enables_ldap_service} CANDYPOND service: ${enables_candypond} DATADELIVERY service: ${enables_datadelivery_service} COMPUTE clients: ${enables_compute_client} DATA clients: ${enables_data_client} CREDENTIAL clients: ${enables_credentials_client} ARC REST client: ${enables_arcrest_client} SRM client (DMC): ${enables_srm_dmc} Documentation: ${enables_doc} Monitoring: LDAP Monitor ${enables_monitor} " >&5 $as_echo " Unit testing: ${enables_cppunit} Python binding: ${enables_swig_python} ($PYTHON_VERSION) Alt.Python binding: ${enables_altpython} ($ALTPYTHON_VERSION) Available third-party features: GridFTP: ${enables_gridftp} GFAL: ${enables_gfal} S3: ${enables_s3} Xrootd: ${enables_xrootd} xmlsec1: ${enables_xmlsec1} NSS: ${enables_nss} SQLite: ${enables_sqlite} LDNS: ${enables_ldns} Enabled features: Local jobs info in SQLite: ${enables_sqlitejstore} Systemd Integration: ${enables_systemd} Included components: HED: ${enables_hed} A-REX service: ${enables_a_rex_service} Internal plugin: ${enables_internal} LDAP Info service: ${enables_ldap_service} CANDYPOND service: ${enables_candypond} DATADELIVERY service: ${enables_datadelivery_service} COMPUTE clients: ${enables_compute_client} DATA clients: ${enables_data_client} CREDENTIAL clients: ${enables_credentials_client} ARC REST client: ${enables_arcrest_client} SRM client (DMC): ${enables_srm_dmc} Documentation: ${enables_doc} Monitoring: LDAP Monitor ${enables_monitor} " >&6; } nordugrid-arc-7.1.1/PaxHeaders/nordugrid-arc.spec.in0000644000000000000000000000013215067751327017374 xustar0030 mtime=1759498967.642740224 30 atime=1759498967.808492785 30 ctime=1759499024.691021088 nordugrid-arc-7.1.1/nordugrid-arc.spec.in0000644000175000002070000012657415067751327021315 0ustar00mockbuildmock00000000000000%{!?_pkgdocdir: %global _pkgdocdir %{_docdir}/%{name}-%{version}} # # xROOTd # %if %{?fedora}%{!?fedora:0} >= 24 || %{?rhel}%{!?rhel:0} %global with_xrootd %{!?_without_xrootd:1}%{?_without_xrootd:0} %else %global with_xrootd 0 %endif %{!?python3_pkgversion: %global python3_pkgversion 3} %global with_pylint 0 %if %{?fedora}%{!?fedora:0} >= 21 || %{?rhel}%{!?rhel:0} >= 5 %global with_s3 1 %else %global with_s3 0 %endif %if ( %{?fedora}%{!?fedora:0} >= 21 && %{?fedora}%{!?fedora:0} < 43 ) || ( %{?rhel}%{!?rhel:0} >= 5 && %{?rhel}%{!?rhel:0} < 10 ) %global with_gfal 1 %else %global with_gfal 0 %endif %if %{?fedora}%{!?fedora:0} || %{?rhel}%{!?rhel:0} %global with_xmlsec1 %{!?_without_xmlsec1:1}%{?_without_xmlsec1:0} %else %global with_xmlsec1 0 %endif # LDNS %if %{?fedora}%{!?fedora:0} >= 13 || %{?rhel}%{!?rhel:0} >= 5 %global with_ldns 1 %else %global with_ldns 0 %endif %if %{?fedora}%{!?fedora:0} >= 25 || %{?rhel}%{!?rhel:0} >= 7 %global use_systemd 1 %else %global use_systemd 0 %endif %global with_ldap_service 1 %global pkgdir arc # bash-completion %global _bashcompdir %(pkg-config --variable=completionsdir bash-completion 2>/dev/null || echo %{_sysconfdir}/bash_completion.d) # # Macros for scripts # # Stop and disable service on package removal %if %{use_systemd} %define stop_on_removal() %{expand:%%systemd_preun %(sed 's/[^ ]*/&.service/g' <<< '%{?*}')} %else %if %{?stop_on_removal:0}%{!?stop_on_removal:1} %global stop_on_removal() if [ $1 -eq 0 ]; then for s in %*; do service $s stop > /dev/null 2>&1 || : ; done; for s in %*; do /sbin/chkconfig --del $s; done; fi %endif %endif # Enable a service %if %{use_systemd} %define enable_service() %{expand:%%systemd_post %(sed 's/[^ ]*/&.service/g' <<< '%{?*}')} %else %if %{?suse_version:1}%{!?suse_version:0} %define enable_service() %{expand:%%fillup_and_insserv -f %{?*}} %else %define enable_service() for s in %{?*}; do /sbin/chkconfig --add $s ; done %endif %endif # Conditionally restart service on package update %if %{use_systemd} %define condrestart_on_update() %{expand:%%systemd_postun_with_restart %(sed 's/[^ ]*/&.service/g' <<< '%{?*}')} %else %if %{?suse_version:1}%{!?suse_version:0} %define condrestart_on_update() %{expand:%%restart_on_update %{?*}} %{expand:%%insserv_cleanup} %else %define condrestart_on_update() if [ $1 -ge 1 ]; then for s in %{?*}; do service $s condrestart > /dev/null 2>&1 || : ; done; fi %endif %endif # Standard service requirements %if %{use_systemd} %define service_post_requires systemd-units %define service_preun_requires systemd-units %define service_postun_requires systemd-units %else %if %{?suse_version:1}%{!?suse_version:0} %define service_post_requires %{insserv_prereq} %define service_preun_requires %{insserv_prereq} %define service_postun_requires %{insserv_prereq} %else %define service_post_requires chkconfig %define service_preun_requires chkconfig, initscripts %define service_postun_requires initscripts %endif %endif Name: @PACKAGE@ Version: @baseversion@ Release: @fedorarelease@%{?dist} Summary: Advanced Resource Connector Middleware Group: System Environment/Daemons License: ASL 2.0 URL: http://www.nordugrid.org/ Source: http://download.nordugrid.org/packages/%{name}/releases/%{version}@preversion@/src/%{name}-%{version}@preversion@.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) # Packages dropped without replacements Obsoletes: %{name}-chelonia < 2.0.0 Obsoletes: %{name}-hopi < 2.0.0 Obsoletes: %{name}-isis < 2.0.0 Obsoletes: %{name}-janitor < 2.0.0 Obsoletes: %{name}-doxygen < 4.0.0 Obsoletes: %{name}-arcproxyalt < 6.0.0 Obsoletes: %{name}-java < 6.0.0 Obsoletes: %{name}-egiis < 6.0.0 Obsoletes: %{name}-acix-cache < 6.0.0 Obsoletes: %{name}-acix-core < 7.0.0 Obsoletes: %{name}-acix-scanner < 7.0.0 Obsoletes: %{name}-acix-index < 7.0.0 Obsoletes: %{name}-arex-python-lrms < 7.0.0 Obsoletes: %{name}-gridftpd < 7.0.0 Obsoletes: python2-%{name} < 7.0.0 Obsoletes: %{name}-python < 5.3.3 Obsoletes: %{name}-nordugridmap < 7.0.0 Obsoletes: %{name}-gridmap-utils < 6.0.0 Obsoletes: %{name}-plugins-gridftpjob < 7.0.0 Obsoletes: %{name}-plugins-ldap < 7.0.0 %if ! %{with_ldap_service} Obsoletes: %{name}-infosys-ldap < %{version}-%{release} Obsoletes: %{name}-ldap-infosys < 6.0.0 Obsoletes: %{name}-aris < 6.0.0 %endif BuildRequires: autoconf BuildRequires: automake BuildRequires: libtool BuildRequires: make BuildRequires: gcc-c++ BuildRequires: cppunit-devel BuildRequires: pkgconfig %if %{use_systemd} BuildRequires: systemd BuildRequires: systemd-devel %endif BuildRequires: libuuid-devel BuildRequires: gettext-devel BuildRequires: python%{python3_pkgversion}-devel BuildRequires: python%{python3_pkgversion}-pip BuildRequires: python%{python3_pkgversion}-setuptools BuildRequires: python%{python3_pkgversion}-wheel %if %{with_pylint} BuildRequires: pylint %endif %if %{?fedora}%{!?fedora:0} || %{?rhel}%{!?rhel:0} >= 10 BuildRequires: glibmm2.68-devel %else BuildRequires: glibmm24-devel %endif BuildRequires: libxml2-devel BuildRequires: openssl BuildRequires: openssl-devel %if %{with_xmlsec1} BuildRequires: xmlsec1-devel >= 1.2.4 BuildRequires: xmlsec1-openssl-devel >= 1.2.4 %endif BuildRequires: nss-devel BuildRequires: globus-common-devel BuildRequires: globus-ftp-client-devel BuildRequires: globus-ftp-control-devel %if %{?fedora}%{!?fedora:0} >= 23 || %{?rhel}%{!?rhel:0} >= 5 BuildRequires: globus-gssapi-gsi-devel >= 12.2 %else BuildRequires: globus-gssapi-gsi-devel < 12.2 %endif %if %{with_xrootd} BuildRequires: xrootd-client-devel >= 1:4.5.0 %endif %if %{with_gfal} BuildRequires: gfal2-devel %endif %if %{with_s3} BuildRequires: libs3-devel %endif %if %{?fedora}%{!?fedora:0} >= 21 || %{?rhel}%{!?rhel:0} BuildRequires: perl-generators %endif # Needed for Boinc backend testing during make check BuildRequires: perl(DBI) # Needed for infoprovider testing during make check BuildRequires: perl(English) BuildRequires: perl(JSON::XS) BuildRequires: perl(Sys::Hostname) BuildRequires: perl(XML::Simple) # Needed for LRMS testing during make check BuildRequires: perl(Test::Harness) BuildRequires: perl(Test::Simple) BuildRequires: swig %if %{?fedora}%{!?fedora:0} >= 4 || %{?rhel}%{!?rhel:0} >= 5 BuildRequires: libtool-ltdl-devel %else BuildRequires: libtool %endif BuildRequires: sqlite-devel >= 3.6 %if %{with_ldns} BuildRequires: ldns-devel >= 1.6.8 %endif %if %{?fedora}%{!?fedora:0} >= 17 || %{?rhel}%{!?rhel:0} >= 7 || %{?suse_version:1}%{!?suse_version:0} BuildRequires: pkgconfig(bash-completion) %endif BuildRequires: help2man %if %{?fedora}%{!?fedora:0} >= 13 || %{?rhel}%{!?rhel:0} >= 7 || %{?suse_version:1}%{!?suse_version:0} Requires: hostname %else Requires: net-tools %endif Requires: openssl %description NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). The ARC middleware is a software solution that uses distributed computing technologies to enable sharing and federation of computing resources across different administrative and application domains. ARC is used to create distributed infrastructures of various scope and complexity, from campus to national and global deployments. %package client Summary: ARC command line clients Group: Applications/Internet Requires: %{name} = %{version}-%{release} Requires: %{name}-plugins-needed = %{version}-%{release} %description client NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). This client package contains all the CLI tools that are needed to operate with x509 proxies, submit and manage jobs and handle data transfers. %package hed Summary: ARC Hosting Environment Daemon Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %description hed NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). The ARC Hosting Environment Daemon (HED) is a Web Service container for ARC services. %package datadelivery-service Summary: ARC data delivery service Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires: %{name}-hed = %{version}-%{release} Requires: %{name}-plugins-needed = %{version}-%{release} Requires: %{name}-arcctl-service = %{version}-%{release} Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %description datadelivery-service NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). This package contains the ARC data delivery service. %if %{with_ldap_service} %package infosys-ldap Summary: ARC LDAP-based information services Group: System Environment/Libraries %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif Requires: openldap-servers Requires: bdii Requires: glue-schema >= 2.0.10 Requires: %{name}-arcctl-service = %{version}-%{release} Provides: %{name}-ldap-infosys = %{version}-%{release} Obsoletes: %{name}-ldap-infosys < 6.0.0 Provides: %{name}-aris = %{version}-%{release} Obsoletes: %{name}-aris < 6.0.0 Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %if %{?fedora}%{!?fedora:0} >= 23 || %{?rhel}%{!?rhel:0} >= 8 Requires(post): policycoreutils-python-utils Requires(postun): policycoreutils-python-utils %else %if %{?fedora}%{!?fedora:0} >= 11 || %{?rhel}%{!?rhel:0} >= 6 Requires(post): policycoreutils-python Requires(postun): policycoreutils-python %else %if %{?fedora}%{!?fedora:0} >= 5 || %{?rhel}%{!?rhel:0} >= 5 Requires(post): policycoreutils Requires(postun): policycoreutils %endif %endif %endif %description infosys-ldap NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). This package contains the ARC information services relying on BDII and LDAP technologies to publish ARC CE information according to various LDAP schemas. Please note that the information collectors are part of another package, the nordugrid-arc-arex. %endif %package monitor Summary: ARC LDAP monitor web application Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires: php Requires: php-gd Requires: php-ldap %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif Obsoletes: %{name}-ldap-monitor < 6.0.0 Obsoletes: %{name}-ws-monitor < 6.0.0 %description monitor NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). This package contains the PHP web application that is used to set up a web-based monitor which pulls information from the LDAP information system and visualizes it. %package arcctl Summary: ARC Control Tool Group: Applications/Internet Requires: %{name} = %{version}-%{release} %if %{?fedora}%{!?fedora:0} >= 26 || %{?rhel}%{!?rhel:0} >= 8 Requires: python3-jwcrypto %endif %description arcctl NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). This package contains the ARC Control Tool with basic set of control modules suitable for both server and client side. %package arcctl-service Summary: ARC Control Tool - service control modules Group: Applications/Internet Requires: %{name} = %{version}-%{release} Requires: %{name}-arcctl = %{version}-%{release} %description arcctl-service NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). This package contains the service control modules for ARC Contol Tool that allow working with server-side config and manage ARC services. %package arex Summary: ARC Resource-coupled EXecution service Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires: %{name}-hed = %{version}-%{release} Requires: %{name}-arcctl = %{version}-%{release} Requires: %{name}-arcctl-service = %{version}-%{release} Requires: %{name}-plugins-needed = %{version}-%{release} Requires: findutils Requires: procps Provides: %{name}-cache-service = %{version}-%{release} Obsoletes: %{name}-cache-service < 6.0.0 Provides: %{name}-candypond = %{version}-%{release} Obsoletes: %{name}-candypond < 6.0.0 Requires(post): %{name}-arcctl = %{version}-%{release} Requires(preun): %{name}-arcctl = %{version}-%{release} %if %{?fedora}%{!?fedora:0} >= 13 || %{?rhel}%{!?rhel:0} >= 7 || %{?suse_version:1}%{!?suse_version:0} Requires(post): hostname %else Requires(post): net-tools %endif Requires(post): openssl Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %description arex NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). The ARC Resource-coupled EXecution service (AREX) is the Computing Element of the ARC middleware. AREX offers a full-featured middle layer to manage computational tasks including interfacing to local batch systems, taking care of complex environments such as data staging, data caching, software environment provisioning, information collection and exposure, accounting information gathering and publishing. %package arex-lrms-contrib Summary: ARC Resource-coupled EXecution service - conributed LRMS backends Requires: %{name}-arex = %{version}-%{release} %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif # Split from AREX package Obsoletes: %{name}-arex < 7.0.0 %description arex-lrms-contrib NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). The AREX contributed LRMS backends package contains additional LRMS support script contributed by the ARC user community. %package community-rtes Summary: ARC community defined RTEs support Group: System Environment/Libraries Requires: %{name}-arex = %{version}-%{release} Requires: %{name}-arcctl = %{version}-%{release} Requires: gnupg2 Requires: python3-dns %description community-rtes NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). Community RTEs is the framework that allows deploying software packages (tarballs, containers, etc) provided by trusted communities to ARC CE using simple arcctl commands. It is released as a technology preview. %package plugins-needed Summary: ARC base plugins Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Provides: %{name}-plugins-arcrest = %{version}-%{release} Obsoletes: %{name}-plugins-arcrest < 7.0.0 %description plugins-needed NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). ARC base plugins. This includes the Message Chain Components (MCCs) and Data Manager Components (DMCs). %package plugins-globus Summary: ARC Globus plugins (compat) Group: System Environment/Libraries Requires: %{name}-plugins-gridftp = %{version}-%{release} Requires: %{name}-plugins-lcas-lcmaps = %{version}-%{release} %description plugins-globus NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). ARC Globus plugins. This compat metapackage brings all Globus dependent plugins at once, including: Data Manager Components (DMCs) and LCAS/LCMAPS tools. This package is meant to allow smooth transition and will be removed from the upcoming releases. %package plugins-globus-common Summary: ARC Globus plugins common libraries Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} %description plugins-globus-common NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). ARC Globus plugins common libraries package includes the bundle of necessary Globus libraries needed for all other globus-dependent ARC components. %package plugins-gridftp Summary: ARC Globus dependent DMCs Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires: %{name}-plugins-globus-common = %{version}-%{release} %description plugins-gridftp NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). ARC Globus GridFTP plugins. These allow access to data through the gridftp protocol. %package plugins-lcas-lcmaps Summary: ARC LCAS/LCMAPS plugins Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires: %{name}-plugins-globus-common = %{version}-%{release} %if %{?fedora}%{!?fedora:0} >= 23 || %{?rhel}%{!?rhel:0} >= 5 Requires: globus-gssapi-gsi >= 12.2 %else Requires: globus-gssapi-gsi < 12.2 %endif %description plugins-lcas-lcmaps NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). ARC LCAS/LCMAPS tools allow configuring ARC CE to use LCAS/LCMAPS services for authorization and mapping. %if %{with_xrootd} %package plugins-xrootd Summary: ARC xrootd plugins Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} %description plugins-xrootd NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). ARC xrootd plugins. These allow access to data through the xrootd protocol. %endif %if %{with_gfal} %package plugins-gfal Summary: ARC GFAL2 plugins Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} %description plugins-gfal NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). ARC plugins for GFAL2. This allows third-party transfer and adds support for several extra transfer protocols (rfio, dcap, gsidcap). Support for specific protocols is provided by separate 3rd-party GFAL2 plugin packages. %endif %if %{with_s3} %package plugins-s3 Summary: ARC S3 plugins Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} %description plugins-s3 NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). ARC plugins for S3. These allow access to data through the S3 protocol. %endif %package plugins-internal Summary: ARC internal plugin Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires: %{name}-arex = %{version}-%{release} %description plugins-internal NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). The ARC internal plugin. A special interface aimed for restrictive HPC sites, to be used with a local installation of the ARC Control Tower. %package plugins-python Summary: ARC Python dependent plugin Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires: python%{python3_pkgversion}-%{name} = %{version}-%{release} %description plugins-python NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). ARC plugins dependent on Python. %package devel Summary: ARC development files Group: Development/Libraries Requires: %{name} = %{version}-%{release} %if %{?fedora}%{!?fedora:0} || %{?rhel}%{!?rhel:0} >= 10 Requires: glibmm2.68-devel %else Requires: glibmm24-devel %endif Requires: libxml2-devel Requires: openssl-devel %description devel NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). Header files and libraries needed to develop applications using ARC. %package -n python%{python3_pkgversion}-%{name} Summary: ARC Python 3 wrapper Group: Development/Libraries %{?python_provide:%python_provide python%{python3_pkgversion}-%{name}} Provides: %{name}-python%{python3_pkgversion} = %{version}-%{release} Obsoletes: %{name}-python%{python3_pkgversion} < 5.3.3 Requires: %{name} = %{version}-%{release} %description -n python%{python3_pkgversion}-%{name} NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). Python 3 bindings for ARC. %package test-utils Summary: ARC test tools Group: Applications/Internet Requires: %{name} = %{version}-%{release} Requires: %{name}-plugins-needed = %{version}-%{release} Obsoletes: %{name}-misc-utils < 6.0.0 %description test-utils NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). This package contains a few utilities useful to test various ARC subsystems. The package is not required by users or sysadmins and it is mainly for developers. %package archery-manage Summary: ARCHERY administration tool Group: Applications/Internet %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif Requires: python3-dns Requires: python3-ldap %description archery-manage NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). This package contains the archery-manage utility for administration of an ARCHERY DNS-embedded service endpoint registry. %package wn Summary: ARC optional worker nodes components Group: Applications/Internet %description wn NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). This package contains the optional components that provide new job management features on the worker nodes (WN). %package -n python%{python3_pkgversion}-arcrest Summary: ARC REST client Group: Applications/Internet %{?python_provide:%python_provide python%{python3_pkgversion}-arcrest} %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif %description -n python%{python3_pkgversion}-arcrest NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). This package contains the ARC REST client. %package arc-exporter Summary: ARC Prometheus exporter service Group: Applications/Internet %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif Requires: python3-prometheus_client %description arc-exporter NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). This package contains the Prometheus arc-exporter which collects and publishes metrics about jobs and datastaging on the ARC-CE. %prep %setup @fedorasetupopts@ %build autoreconf -v -f -i %configure --disable-static \ %if %{with_gfal} --enable-gfal \ %endif %if %{with_s3} --enable-s3 \ %endif --with-python=python3 \ %if ! %{with_pylint} --disable-pylint \ %endif %if ! %{with_xrootd} --disable-xrootd \ %endif %if ! %{with_ldns} --disable-ldns \ %endif --enable-internal \ %if %{use_systemd} --enable-systemd \ --with-systemd-units-location=%{_unitdir} \ %endif %if ! %{with_ldap_service} --disable-ldap-service \ %endif --disable-doc \ --docdir=%{_pkgdocdir} make %{?_smp_mflags} %check make %{?_smp_mflags} check %install rm -rf $RPM_BUILD_ROOT make install DESTDIR=$RPM_BUILD_ROOT # Install Logrotate. mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d install -p -m 644 debian/%{name}-arex.logrotate \ $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/%{name}-arex %if %{with_ldap_service} install -p -m 644 debian/%{name}-infosys-ldap.logrotate \ $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/%{name}-infosys-ldap %endif install -p -m 644 debian/%{name}-datadelivery-service.logrotate \ $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/%{name}-datadelivery-service find $RPM_BUILD_ROOT -type f -name \*.la -exec rm -fv '{}' ';' # The py-compile script in the source tarball is old (RHEL 6) # It does the wrong thing for python 3 - remove and let rpmbuild do it right find $RPM_BUILD_ROOT -type f -name \*.pyc -exec rm -fv '{}' ';' find $RPM_BUILD_ROOT -type f -name \*.pyo -exec rm -fv '{}' ';' # libarcglobusutils is not part of the ARC api. find $RPM_BUILD_ROOT -name libarcglobusutils.so -exec rm -fv '{}' ';' rm -f $RPM_BUILD_ROOT%{python3_sitelib}/pyarcrest-*.*-info/direct_url.json # Create log directory mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/log/arc # Create spool directories for Jura mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/spool/arc mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/spool/arc/ssm mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/spool/arc/urs # create config directory mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/arc.conf.d %find_lang %{name} # Remove examples and let RPM package them under /usr/share/doc using the doc macro rm -rf $RPM_BUILD_ROOT%{_datadir}/%{pkgdir}/examples make -C src/libs/data-staging/examples DESTDIR=$PWD/docdir/devel pkgdatadir= install-exampleDATA make -C src/hed/libs/compute/examples DESTDIR=$PWD/docdir/devel pkgdatadir= install-exampleDATA make -C src/hed/libs/data/examples DESTDIR=$PWD/docdir/devel pkgdatadir= install-exampleDATA make -C src/hed/acc/PythonBroker DESTDIR=$PWD/docdir/python pkgdatadir= install-exampleDATA make -C python/examples DESTDIR=$PWD/docdir/devel pkgdatadir= install-exampleDATA make -C src/tests/echo DESTDIR=$PWD/docdir/hed pkgdatadir= install-exampleDATA make -C src/hed DESTDIR=$PWD/docdir/hed pkgdatadir= install-profileDATA # client.conf needs special handling make -C src/clients DESTDIR=$RPM_BUILD_ROOT install-exampleDATA # Link to client.conf from doc ln -s %{_datadir}/%{pkgdir}/examples/client.conf $PWD/docdir/client.conf %clean rm -rf $RPM_BUILD_ROOT %post -p /sbin/ldconfig %postun -p /sbin/ldconfig %post plugins-globus-common -p /sbin/ldconfig %postun plugins-globus-common -p /sbin/ldconfig %post hed %enable_service arched %preun hed %stop_on_removal arched %postun hed %condrestart_on_update arched %post arex %enable_service arc-arex %enable_service arc-arex-ws # out-of-package testing host certificate if [ $1 -eq 1 ]; then arcctl test-ca init arcctl test-ca hostcert fi %preun arex %stop_on_removal arc-arex %stop_on_removal arc-arex-ws if [ $1 -eq 0 ]; then arcctl test-ca cleanup fi %postun arex %condrestart_on_update arc-arex %condrestart_on_update arc-arex-ws %post datadelivery-service %enable_service arc-datadelivery-service %preun datadelivery-service %stop_on_removal arc-datadelivery-service %postun datadelivery-service %condrestart_on_update arc-datadelivery-service %if %{with_ldap_service} %post infosys-ldap %enable_service arc-infosys-ldap %if %{?fedora}%{!?fedora:0} >= 5 || %{?rhel}%{!?rhel:0} >= 5 semanage port -a -t ldap_port_t -p tcp 2135 2>/dev/null || : semanage fcontext -a -t slapd_etc_t "/var/run/arc/infosys/bdii-slapd\.conf" 2>/dev/null || : semanage fcontext -a -t slapd_db_t "/var/lib/arc/bdii/db(/.*)?" 2>/dev/null || : semanage fcontext -a -t slapd_var_run_t "/var/run/arc/bdii/db(/.*)?" 2>/dev/null || : %endif %preun infosys-ldap %stop_on_removal arc-infosys-ldap %postun infosys-ldap %condrestart_on_update arc-infosys-ldap %if %{?fedora}%{!?fedora:0} >= 5 || %{?rhel}%{!?rhel:0} >= 5 if [ $1 -eq 0 ]; then semanage port -d -t ldap_port_t -p tcp 2135 2>/dev/null || : semanage fcontext -d -t slapd_etc_t "/var/run/arc/infosys/bdii-slapd\.conf" 2>/dev/null || : semanage fcontext -d -t slapd_db_t "/var/lib/arc/bdii/db(/.*)?" 2>/dev/null || : semanage fcontext -d -t slapd_var_run_t "/var/run/arc/bdii/db(/.*)?" 2>/dev/null || : fi %endif %triggerpostun infosys-ldap -- %{name}-ldap-infosys # Uninstalling the old %{name}-ldap-infosys will remove some selinux config # for %{name}-infosys-ldap - put them back in this triggerpostun script semanage port -a -t ldap_port_t -p tcp 2135 2>/dev/null || : semanage fcontext -a -t slapd_etc_t "/var/run/arc/infosys/bdii-slapd\.conf" 2>/dev/null || : %triggerpostun infosys-ldap -- %{name}-aris # Uninstalling the old %{name}-aris will remove some selinux config # for %{name}-infosys-ldap - put them back in this triggerpostun script semanage fcontext -a -t slapd_db_t "/var/lib/arc/bdii/db(/.*)?" 2>/dev/null || : semanage fcontext -a -t slapd_var_run_t "/var/run/arc/bdii/db(/.*)?" 2>/dev/null || : %triggerun infosys-ldap -- bdii %if %{?suse_version:1}%{!?suse_version:0} FIRST_ARG=1 %restart_on_update arc-infosys-ldap %else service arc-infosys-ldap condrestart > /dev/null 2>&1 || : %endif %endif %files -f %{name}.lang %defattr(-,root,root,-) %doc src/doc/arc.conf.reference src/doc/arc.conf.DELETED %doc README AUTHORS LICENSE NOTICE %{_libdir}/libarccompute.so.* %{_libdir}/libarccommunication.so.* %{_libdir}/libarccommon.so.* %{_libdir}/libarccredential.so.* %{_libdir}/libarccredentialstore.so.* %{_libdir}/libarccrypto.so.* %{_libdir}/libarcdata.so.* %{_libdir}/libarcdatastaging.so.* %{_libdir}/libarcloader.so.* %{_libdir}/libarcmessage.so.* %{_libdir}/libarcsecurity.so.* %{_libdir}/libarcotokens.so.* %{_libdir}/libarcinfosys.so.* %{_libdir}/libarcwsaddressing.so.* %{_libdir}/libarcwssecurity.so.* %if %{with_xmlsec1} %{_libdir}/libarcxmlsec.so.* %endif %dir %{_libdir}/%{pkgdir} # We need to have libmodcrypto.so close to libarccrypto %{_libdir}/%{pkgdir}/libmodcrypto.so %{_libdir}/%{pkgdir}/libmodcrypto.apd # We need to have libmodcredential.so close to libarccredential %{_libdir}/%{pkgdir}/libmodcredential.so %{_libdir}/%{pkgdir}/libmodcredential.apd %{_libdir}/%{pkgdir}/arc-file-access %{_libdir}/%{pkgdir}/arc-hostname-resolver %{_libdir}/%{pkgdir}/DataStagingDelivery %{_libdir}/%{pkgdir}/arc-dmc %dir %{_libexecdir}/%{pkgdir} %{_libexecdir}/%{pkgdir}/arcconfig-parser %dir %{python3_sitearch}/%{pkgdir} %{python3_sitearch}/%{pkgdir}/utils %{python3_sitearch}/%{pkgdir}/__init__.py %{python3_sitearch}/%{pkgdir}/paths.py %{python3_sitearch}/%{pkgdir}/paths_dist.py %dir %{python3_sitearch}/%{pkgdir}/__pycache__ %{python3_sitearch}/%{pkgdir}/__pycache__/__init__.* %{python3_sitearch}/%{pkgdir}/__pycache__/paths.* %{python3_sitearch}/%{pkgdir}/__pycache__/paths_dist.* %dir %{_datadir}/%{pkgdir} %{_datadir}/%{pkgdir}/arc.parser.defaults %dir %{_datadir}/%{pkgdir}/test-jobs %{_datadir}/%{pkgdir}/test-jobs/test-job-* %{_datadir}/%{pkgdir}/schema %files client %defattr(-,root,root,-) %doc docdir/client.conf %{_bindir}/arccat %{_bindir}/arcclean %{_bindir}/arccp %{_bindir}/arcget %{_bindir}/arcinfo %{_bindir}/arckill %{_bindir}/arcls %{_bindir}/arcmkdir %{_bindir}/arcrename %{_bindir}/arcproxy %{_bindir}/arcrenew %{_bindir}/arcresume %{_bindir}/arcrm %{_bindir}/arcstat %{_bindir}/arcsub %{_bindir}/arcsync %{_bindir}/arctest %dir %{_datadir}/%{pkgdir}/examples %{_datadir}/%{pkgdir}/examples/client.conf %dir %{_sysconfdir}/%{pkgdir} %config(noreplace) %{_sysconfdir}/%{pkgdir}/client.conf %doc %{_mandir}/man1/arccat.1* %doc %{_mandir}/man1/arcclean.1* %doc %{_mandir}/man1/arccp.1* %doc %{_mandir}/man1/arcget.1* %doc %{_mandir}/man1/arcinfo.1* %doc %{_mandir}/man1/arckill.1* %doc %{_mandir}/man1/arcls.1* %doc %{_mandir}/man1/arcmkdir.1* %doc %{_mandir}/man1/arcrename.1* %doc %{_mandir}/man1/arcproxy.1* %doc %{_mandir}/man1/arcrenew.1* %doc %{_mandir}/man1/arcresume.1* %doc %{_mandir}/man1/arcrm.1* %doc %{_mandir}/man1/arcstat.1* %doc %{_mandir}/man1/arcsub.1* %doc %{_mandir}/man1/arcsync.1* %doc %{_mandir}/man1/arctest.1* %dir %{_bashcompdir} %{_bashcompdir}/arc-client-tools %files hed %defattr(-,root,root,-) %doc docdir/hed/* %if %{use_systemd} %{_unitdir}/arched.service %else %{_initrddir}/arched %endif %{_sbindir}/arched %{_libdir}/%{pkgdir}/libecho.so %{_libdir}/%{pkgdir}/libecho.apd %{_datadir}/%{pkgdir}/arched-start %{_datadir}/%{pkgdir}/profiles %doc %{_mandir}/man8/arched.8* %doc %{_mandir}/man5/arc.conf.5* %files datadelivery-service %defattr(-,root,root,-) %if %{use_systemd} %{_unitdir}/arc-datadelivery-service.service %else %{_initrddir}/arc-datadelivery-service %endif %{_libdir}/%{pkgdir}/libdatadeliveryservice.so %{_libdir}/%{pkgdir}/libdatadeliveryservice.apd %{_datadir}/%{pkgdir}/arc-datadelivery-service-start %config(noreplace) %{_sysconfdir}/logrotate.d/%{name}-datadelivery-service %if %{with_ldap_service} %files infosys-ldap %defattr(-,root,root,-) %if %{use_systemd} %{_unitdir}/arc-infosys-ldap.service %{_unitdir}/arc-infosys-ldap-slapd.service %else %{_initrddir}/arc-infosys-ldap %endif %{_datadir}/%{pkgdir}/create-bdii-config %{_datadir}/%{pkgdir}/create-slapd-config %{_datadir}/%{pkgdir}/ldap-schema %config(noreplace) %{_sysconfdir}/logrotate.d/%{name}-infosys-ldap %endif %files monitor %defattr(-,root,root,-) %{_datadir}/%{pkgdir}/monitor %doc %{_mandir}/man7/monitor.7* %files arcctl %{_sbindir}/arcctl %dir %{python3_sitearch}/%{pkgdir}/control %{python3_sitearch}/%{pkgdir}/control/__init__.py %{python3_sitearch}/%{pkgdir}/control/CertificateGenerator.py %{python3_sitearch}/%{pkgdir}/control/ControlCommon.py %{python3_sitearch}/%{pkgdir}/control/OSPackage.py %{python3_sitearch}/%{pkgdir}/control/TestCA.py %{python3_sitearch}/%{pkgdir}/control/TestJWT.py %{python3_sitearch}/%{pkgdir}/control/ThirdPartyDeployment.py %dir %{python3_sitearch}/%{pkgdir}/control/__pycache__ %{python3_sitearch}/%{pkgdir}/control/__pycache__/__init__.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/CertificateGenerator.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/ControlCommon.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/OSPackage.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/TestCA.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/TestJWT.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/ThirdPartyDeployment.* %doc %{_mandir}/man1/arcctl.1* %files arcctl-service %{python3_sitearch}/%{pkgdir}/control/Cleanup.py %{python3_sitearch}/%{pkgdir}/control/Config.py %{python3_sitearch}/%{pkgdir}/control/ServiceCommon.py %{python3_sitearch}/%{pkgdir}/control/Services.py %{python3_sitearch}/%{pkgdir}/control/OSService.py %{python3_sitearch}/%{pkgdir}/control/Validator.py %{python3_sitearch}/%{pkgdir}/control/__pycache__/Cleanup.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/Config.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/ServiceCommon.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/Services.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/OSService.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/Validator.* %files arex %defattr(-,root,root,-) %if %{use_systemd} %{_unitdir}/arc-arex.service %{_unitdir}/arc-arex-ws.service %else %{_initrddir}/arc-arex %{_initrddir}/arc-arex-ws %endif %{_libexecdir}/%{pkgdir}/cache-clean %{_libexecdir}/%{pkgdir}/cache-list %{_libexecdir}/%{pkgdir}/jura-ng %{_libexecdir}/%{pkgdir}/gm-jobs %{_libexecdir}/%{pkgdir}/gm-kick %{_libexecdir}/%{pkgdir}/smtp-send %{_libexecdir}/%{pkgdir}/smtp-send.sh %{_libexecdir}/%{pkgdir}/inputcheck %{_libexecdir}/%{pkgdir}/arc-blahp-logger %{_libdir}/%{pkgdir}/libarex.so %{_libdir}/%{pkgdir}/libarex.apd %{_libdir}/%{pkgdir}/libcandypond.so %{_libdir}/%{pkgdir}/libcandypond.apd %{_datadir}/%{pkgdir}/cancel-condor-job %{_datadir}/%{pkgdir}/cancel-fork-job %{_datadir}/%{pkgdir}/cancel-SLURM-job %{_datadir}/%{pkgdir}/scan-condor-job %{_datadir}/%{pkgdir}/scan-fork-job %{_datadir}/%{pkgdir}/scan-SLURM-job %{_datadir}/%{pkgdir}/submit-condor-job %{_datadir}/%{pkgdir}/submit-fork-job %{_datadir}/%{pkgdir}/submit-SLURM-job %{_datadir}/%{pkgdir}/CEinfo.pl %{_datadir}/%{pkgdir}/ARC0mod.pm %{_datadir}/%{pkgdir}/Condor.pm %{_datadir}/%{pkgdir}/Fork.pm %{_datadir}/%{pkgdir}/FORKmod.pm %{_datadir}/%{pkgdir}/SLURM.pm %{_datadir}/%{pkgdir}/SLURMmod.pm %{_datadir}/%{pkgdir}/XmlPrinter.pm %{_datadir}/%{pkgdir}/InfosysHelper.pm %{_datadir}/%{pkgdir}/LdifPrinter.pm %{_datadir}/%{pkgdir}/GLUE2xmlPrinter.pm %{_datadir}/%{pkgdir}/GLUE2ldifPrinter.pm %{_datadir}/%{pkgdir}/NGldifPrinter.pm %{_datadir}/%{pkgdir}/ARC0ClusterInfo.pm %{_datadir}/%{pkgdir}/ARC1ClusterInfo.pm %{_datadir}/%{pkgdir}/ConfigCentral.pm %{_datadir}/%{pkgdir}/GMJobsInfo.pm %{_datadir}/%{pkgdir}/HostInfo.pm %{_datadir}/%{pkgdir}/RTEInfo.pm %{_datadir}/%{pkgdir}/InfoChecker.pm %{_datadir}/%{pkgdir}/IniParser.pm %{_datadir}/%{pkgdir}/LRMSInfo.pm %{_datadir}/%{pkgdir}/Sysinfo.pm %{_datadir}/%{pkgdir}/LogUtils.pm %{_datadir}/%{pkgdir}/condor_env.pm %{_datadir}/%{pkgdir}/cancel_common.sh %{_datadir}/%{pkgdir}/configure-*-env.sh %{_datadir}/%{pkgdir}/submit_common.sh %{_datadir}/%{pkgdir}/scan_common.sh %{_datadir}/%{pkgdir}/lrms_common.sh %{_datadir}/%{pkgdir}/perferator %{_datadir}/%{pkgdir}/update-controldir %{_datadir}/%{pkgdir}/PerfData.pl %{_datadir}/%{pkgdir}/arc-arex-start %{_datadir}/%{pkgdir}/arc-arex-ws-start %dir %{_datadir}/%{pkgdir}/sql-schema %{_datadir}/%{pkgdir}/sql-schema/arex_accounting_db_schema_v2.sql %doc %{_mandir}/man1/cache-clean.1* %doc %{_mandir}/man1/cache-list.1* %doc %{_mandir}/man8/gm-jobs.8* %doc %{_mandir}/man8/arc-blahp-logger.8* %doc %{_mandir}/man8/a-rex-backtrace-collect.8* %config(noreplace) %{_sysconfdir}/logrotate.d/%{name}-arex %dir %{_localstatedir}/log/arc %dir %{_localstatedir}/spool/arc %dir %{_localstatedir}/spool/arc/ssm %dir %{_localstatedir}/spool/arc/urs %{python3_sitearch}/%{pkgdir}/control/AccountingDB.py %{python3_sitearch}/%{pkgdir}/control/AccountingPublishing.py %{python3_sitearch}/%{pkgdir}/control/Accounting.py %{python3_sitearch}/%{pkgdir}/control/Cache.py %{python3_sitearch}/%{pkgdir}/control/DataStaging.py %{python3_sitearch}/%{pkgdir}/control/Jobs.py %{python3_sitearch}/%{pkgdir}/control/RunTimeEnvironment.py %{python3_sitearch}/%{pkgdir}/control/__pycache__/AccountingDB.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/AccountingPublishing.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/Accounting.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/Cache.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/DataStaging.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/Jobs.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/RunTimeEnvironment.* %{_libexecdir}/%{pkgdir}/arccandypond %dir %{_datadir}/%{pkgdir}/rte %dir %{_datadir}/%{pkgdir}/rte/ENV %{_datadir}/%{pkgdir}/rte/ENV/LRMS-SCRATCH %{_datadir}/%{pkgdir}/rte/ENV/PROXY %{_datadir}/%{pkgdir}/rte/ENV/RTE %{_datadir}/%{pkgdir}/rte/ENV/CANDYPOND %{_datadir}/%{pkgdir}/rte/ENV/SINGULARITY %dir %{_datadir}/%{pkgdir}/rte/ENV/CONDOR %{_datadir}/%{pkgdir}/rte/ENV/CONDOR/DOCKER %{_sbindir}/a-rex-backtrace-collect %config(noreplace) %{_sysconfdir}/arc.conf %dir %{_sysconfdir}/arc.conf.d %files arex-lrms-contrib %defattr(-,root,root,-) %{_datadir}/%{pkgdir}/cancel-boinc-job %{_datadir}/%{pkgdir}/cancel-ll-job %{_datadir}/%{pkgdir}/cancel-lsf-job %{_datadir}/%{pkgdir}/cancel-pbs-job %{_datadir}/%{pkgdir}/cancel-pbspro-job %{_datadir}/%{pkgdir}/cancel-sge-job %{_datadir}/%{pkgdir}/scan-boinc-job %{_datadir}/%{pkgdir}/scan-ll-job %{_datadir}/%{pkgdir}/scan-lsf-job %{_datadir}/%{pkgdir}/scan-pbs-job %{_datadir}/%{pkgdir}/scan-pbspro-job %{_datadir}/%{pkgdir}/scan-sge-job %{_datadir}/%{pkgdir}/submit-boinc-job %{_datadir}/%{pkgdir}/submit-ll-job %{_datadir}/%{pkgdir}/submit-lsf-job %{_datadir}/%{pkgdir}/submit-pbs-job %{_datadir}/%{pkgdir}/submit-pbspro-job %{_datadir}/%{pkgdir}/submit-sge-job %{_datadir}/%{pkgdir}/Boinc.pm %{_datadir}/%{pkgdir}/LL.pm %{_datadir}/%{pkgdir}/LSF.pm %{_datadir}/%{pkgdir}/PBS.pm %{_datadir}/%{pkgdir}/PBSPRO.pm %{_datadir}/%{pkgdir}/SGE.pm %{_datadir}/%{pkgdir}/SGEmod.pm %files community-rtes %defattr(-,root,root,-) %{_datadir}/%{pkgdir}/community_rtes.sh %{python3_sitearch}/%{pkgdir}/control/CommunityRTE.py %{python3_sitearch}/%{pkgdir}/control/__pycache__/CommunityRTE.* %files plugins-needed %defattr(-,root,root,-) %dir %{_libdir}/%{pkgdir}/test %{_libdir}/%{pkgdir}/test/libaccTEST.so %{_libdir}/%{pkgdir}/libaccARCREST.so %{_libdir}/%{pkgdir}/libaccBroker.so %{_libdir}/%{pkgdir}/libaccJobDescriptionParser.so %if %{with_ldns} %{_libdir}/%{pkgdir}/libaccARCHERY.so %endif %{_libdir}/%{pkgdir}/libarcshc.so %{_libdir}/%{pkgdir}/libarcshclegacy.so %{_libdir}/%{pkgdir}/libarcshcotokens.so %{_libdir}/%{pkgdir}/libdmcfile.so %{_libdir}/%{pkgdir}/libdmchttp.so %{_libdir}/%{pkgdir}/libdmcsrm.so %{_libdir}/%{pkgdir}/libdmcrucio.so %{_libdir}/%{pkgdir}/libidentitymap.so %{_libdir}/%{pkgdir}/libmcchttp.so %{_libdir}/%{pkgdir}/libmccmsgvalidator.so %{_libdir}/%{pkgdir}/libmccsoap.so %{_libdir}/%{pkgdir}/libmcctcp.so %{_libdir}/%{pkgdir}/libmcctls.so %{_libdir}/%{pkgdir}/test/libaccTEST.apd %{_libdir}/%{pkgdir}/libaccARCREST.apd %{_libdir}/%{pkgdir}/libaccBroker.apd %{_libdir}/%{pkgdir}/libaccJobDescriptionParser.apd %if %{with_ldns} %{_libdir}/%{pkgdir}/libaccARCHERY.apd %endif %{_libdir}/%{pkgdir}/libarcshc.apd %{_libdir}/%{pkgdir}/libarcshclegacy.apd %{_libdir}/%{pkgdir}/libarcshcotokens.apd %{_libdir}/%{pkgdir}/libdmcfile.apd %{_libdir}/%{pkgdir}/libdmchttp.apd %{_libdir}/%{pkgdir}/libdmcsrm.apd %{_libdir}/%{pkgdir}/libdmcrucio.apd %{_libdir}/%{pkgdir}/libidentitymap.apd %{_libdir}/%{pkgdir}/libmcchttp.apd %{_libdir}/%{pkgdir}/libmccmsgvalidator.apd %{_libdir}/%{pkgdir}/libmccsoap.apd %{_libdir}/%{pkgdir}/libmcctcp.apd %{_libdir}/%{pkgdir}/libmcctls.apd %files plugins-globus %defattr(-,root,root,-) %files plugins-globus-common %defattr(-,root,root,-) %{_libdir}/libarcglobusutils.so.* %files plugins-gridftp %defattr(-,root,root,-) %{_libdir}/%{pkgdir}/arc-dmcgridftp %{_libdir}/%{pkgdir}/libdmcgridftpdeleg.so %{_libdir}/%{pkgdir}/libdmcgridftpdeleg.apd %files plugins-lcas-lcmaps %defattr(-,root,root,-) %{_libexecdir}/%{pkgdir}/arc-lcas %{_libexecdir}/%{pkgdir}/arc-lcmaps %if %{with_xrootd} %files plugins-xrootd %defattr(-,root,root,-) %dir %{_libdir}/%{pkgdir}/external %{_libdir}/%{pkgdir}/external/libdmcxrootd.so %{_libdir}/%{pkgdir}/external/libdmcxrootd.apd %{_libdir}/%{pkgdir}/libdmcxrootddeleg.so %{_libdir}/%{pkgdir}/libdmcxrootddeleg.apd %endif %if %{with_gfal} %files plugins-gfal %defattr(-,root,root,-) %dir %{_libdir}/%{pkgdir}/external %{_libdir}/%{pkgdir}/external/libdmcgfal.so %{_libdir}/%{pkgdir}/external/libdmcgfal.apd %{_libdir}/%{pkgdir}/libdmcgfaldeleg.so %{_libdir}/%{pkgdir}/libdmcgfaldeleg.apd %endif %if %{with_s3} %files plugins-s3 %defattr(-,root,root,-) %{_libdir}/%{pkgdir}/libdmcs3.so %{_libdir}/%{pkgdir}/libdmcs3.apd %endif %files plugins-internal %defattr(-,root,root,-) %{_libdir}/%{pkgdir}/libaccINTERNAL.so %{_libdir}/%{pkgdir}/libaccINTERNAL.apd %files plugins-python %defattr(-,root,root,-) %doc docdir/python/* %{_libdir}/%{pkgdir}/libaccPythonBroker.so %{_libdir}/%{pkgdir}/libaccPythonBroker.apd %{_libdir}/%{pkgdir}/libpythonservice.so %{_libdir}/%{pkgdir}/libpythonservice.apd %files devel %defattr(-,root,root,-) %doc docdir/devel/* src/hed/shc/arcpdp/*.xsd %{_includedir}/%{pkgdir} %{_libdir}/lib*.so %{_bindir}/wsdl2hed %doc %{_mandir}/man1/wsdl2hed.1* %{_bindir}/arcplugin %doc %{_mandir}/man1/arcplugin.1* %files -n python%{python3_pkgversion}-%{name} %defattr(-,root,root,-) %{python3_sitearch}/_arc.*so %{python3_sitearch}/%{pkgdir}/[^_p]*.py %{python3_sitearch}/%{pkgdir}/__pycache__/[^_p]*.* %files test-utils %defattr(-,root,root,-) %{_bindir}/arcperftest %doc %{_mandir}/man1/arcperftest.1* %files archery-manage %defattr(-,root,root,-) %{_sbindir}/archery-manage %files wn %defattr(-,root,root,-) %attr(4755,root,root) %{_bindir}/arc-job-cgroup %files -n python%{python3_pkgversion}-arcrest %defattr(-,root,root,-) %{python3_sitelib}/pyarcrest %{python3_sitelib}/pyarcrest-*.*-info %{_bindir}/arcrest %files arc-exporter %defattr(-,root,root,-) %{_sbindir}/arc-exporter %changelog * @SPECDATE@ Anders Waananen - @baseversion@-@fedorarelease@ - Unofficial build nordugrid-arc-7.1.1/PaxHeaders/Makefile.in0000644000000000000000000000013215067751346015421 xustar0030 mtime=1759498982.985572364 30 atime=1759499015.134211902 30 ctime=1759499024.688727464 nordugrid-arc-7.1.1/Makefile.in0000644000175000002070000010163215067751346017326 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ # /opt/local is the location for macports on MacOS X VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = . ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(top_srcdir)/configure \ $(am__configure_deps) $(am__DIST_COMMON) am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ configure.lineno config.status.lineno mkinstalldirs = $(install_sh) -d CONFIG_HEADER = config.h CONFIG_CLEAN_FILES = include/arc/ArcVersion.h \ src/hed/profiles/general/general.xml \ src/services/a-rex/rte/ENV/PROXY \ src/services/a-rex/rte/ENV/CANDYPOND nordugrid-arc.spec CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ cscope distdir distdir-am dist dist-all distcheck am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) \ config.h.in # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags CSCOPE = cscope am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/config.h.in \ $(srcdir)/nordugrid-arc.spec.in \ $(top_srcdir)/include/arc/ArcVersion.h.in \ $(top_srcdir)/src/hed/profiles/general/general.xml.in \ $(top_srcdir)/src/services/a-rex/rte/ENV/CANDYPOND.in \ $(top_srcdir)/src/services/a-rex/rte/ENV/PROXY.in ABOUT-NLS \ AUTHORS README compile config.guess config.rpath config.sub \ install-sh ltmain.sh missing DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) distdir = $(PACKAGE)-$(VERSION) top_distdir = $(distdir) am__remove_distdir = \ if test -d "$(distdir)"; then \ find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \ && rm -rf "$(distdir)" \ || { sleep 5 && rm -rf "$(distdir)"; }; \ else :; fi am__post_remove_distdir = $(am__remove_distdir) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" DIST_ARCHIVES = $(distdir).tar.gz GZIP_ENV = --best DIST_TARGETS = dist-gzip distuninstallcheck_listfiles = find . -type f -print am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \ | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$' distcleancheck_listfiles = find . -type f -print pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ ACLOCAL_AMFLAGS = -I m4 `test -d /opt/local/share/aclocal && echo -I /opt/local/share/aclocal` @SWIG_ENABLED_TRUE@SWIG_SD = swig SUBDIRS = src include $(SWIG_SD) python $(POSUB) debian DIST_SUBDIRS = src include swig python po debian EXTRA_DIST = nordugrid-arc.spec autogen.sh LICENSE NOTICE all: config.h $(MAKE) $(AM_MAKEFLAGS) all-recursive .SUFFIXES: am--refresh: Makefile @: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \ $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \ && exit 0; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ echo ' $(SHELL) ./config.status'; \ $(SHELL) ./config.status;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) $(SHELL) ./config.status --recheck $(top_srcdir)/configure: $(am__configure_deps) $(am__cd) $(srcdir) && $(AUTOCONF) $(ACLOCAL_M4): $(am__aclocal_m4_deps) $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) $(am__aclocal_m4_deps): config.h: stamp-h1 @test -f $@ || rm -f stamp-h1 @test -f $@ || $(MAKE) $(AM_MAKEFLAGS) stamp-h1 stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status @rm -f stamp-h1 cd $(top_builddir) && $(SHELL) ./config.status config.h $(srcdir)/config.h.in: $(am__configure_deps) ($(am__cd) $(top_srcdir) && $(AUTOHEADER)) rm -f stamp-h1 touch $@ distclean-hdr: -rm -f config.h stamp-h1 include/arc/ArcVersion.h: $(top_builddir)/config.status $(top_srcdir)/include/arc/ArcVersion.h.in cd $(top_builddir) && $(SHELL) ./config.status $@ src/hed/profiles/general/general.xml: $(top_builddir)/config.status $(top_srcdir)/src/hed/profiles/general/general.xml.in cd $(top_builddir) && $(SHELL) ./config.status $@ src/services/a-rex/rte/ENV/PROXY: $(top_builddir)/config.status $(top_srcdir)/src/services/a-rex/rte/ENV/PROXY.in cd $(top_builddir) && $(SHELL) ./config.status $@ src/services/a-rex/rte/ENV/CANDYPOND: $(top_builddir)/config.status $(top_srcdir)/src/services/a-rex/rte/ENV/CANDYPOND.in cd $(top_builddir) && $(SHELL) ./config.status $@ nordugrid-arc.spec: $(top_builddir)/config.status $(srcdir)/nordugrid-arc.spec.in cd $(top_builddir) && $(SHELL) ./config.status $@ mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs distclean-libtool: -rm -f libtool config.lt # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscope: cscope.files test ! -s cscope.files \ || $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS) clean-cscope: -rm -f cscope.files cscope.files: clean-cscope cscopelist cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags -rm -f cscope.out cscope.in.out cscope.po.out cscope.files distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) $(am__remove_distdir) test -d "$(distdir)" || mkdir "$(distdir)" @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done -test -n "$(am__skip_mode_fix)" \ || find "$(distdir)" -type d ! -perm -755 \ -exec chmod u+rwx,go+rx {} \; -o \ ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ || chmod -R a+r "$(distdir)" dist-gzip: distdir tardir=$(distdir) && $(am__tar) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).tar.gz $(am__post_remove_distdir) dist-bzip2: distdir tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2 $(am__post_remove_distdir) dist-lzip: distdir tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz $(am__post_remove_distdir) dist-xz: distdir tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz $(am__post_remove_distdir) dist-zstd: distdir tardir=$(distdir) && $(am__tar) | zstd -c $${ZSTD_CLEVEL-$${ZSTD_OPT--19}} >$(distdir).tar.zst $(am__post_remove_distdir) dist-tarZ: distdir @echo WARNING: "Support for distribution archives compressed with" \ "legacy program 'compress' is deprecated." >&2 @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z $(am__post_remove_distdir) dist-shar: distdir @echo WARNING: "Support for shar distribution archives is" \ "deprecated." >&2 @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 shar $(distdir) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).shar.gz $(am__post_remove_distdir) dist-zip: distdir -rm -f $(distdir).zip zip -rq $(distdir).zip $(distdir) $(am__post_remove_distdir) dist dist-all: $(MAKE) $(AM_MAKEFLAGS) $(DIST_TARGETS) am__post_remove_distdir='@:' $(am__post_remove_distdir) # This target untars the dist file and tries a VPATH configuration. Then # it guarantees that the distribution is self-contained by making another # tarfile. distcheck: dist case '$(DIST_ARCHIVES)' in \ *.tar.gz*) \ eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).tar.gz | $(am__untar) ;;\ *.tar.bz2*) \ bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\ *.tar.lz*) \ lzip -dc $(distdir).tar.lz | $(am__untar) ;;\ *.tar.xz*) \ xz -dc $(distdir).tar.xz | $(am__untar) ;;\ *.tar.Z*) \ uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ *.shar.gz*) \ eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).shar.gz | unshar ;;\ *.zip*) \ unzip $(distdir).zip ;;\ *.tar.zst*) \ zstd -dc $(distdir).tar.zst | $(am__untar) ;;\ esac chmod -R a-w $(distdir) chmod u+w $(distdir) mkdir $(distdir)/_build $(distdir)/_build/sub $(distdir)/_inst chmod a-w $(distdir) test -d $(distdir)/_build || exit 0; \ dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ && am__cwd=`pwd` \ && $(am__cd) $(distdir)/_build/sub \ && ../../configure \ $(AM_DISTCHECK_CONFIGURE_FLAGS) \ $(DISTCHECK_CONFIGURE_FLAGS) \ --srcdir=../.. --prefix="$$dc_install_base" \ && $(MAKE) $(AM_MAKEFLAGS) \ && $(MAKE) $(AM_MAKEFLAGS) dvi \ && $(MAKE) $(AM_MAKEFLAGS) check \ && $(MAKE) $(AM_MAKEFLAGS) install \ && $(MAKE) $(AM_MAKEFLAGS) installcheck \ && $(MAKE) $(AM_MAKEFLAGS) uninstall \ && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ distuninstallcheck \ && chmod -R a-w "$$dc_install_base" \ && ({ \ (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ } || { rm -rf "$$dc_destdir"; exit 1; }) \ && rm -rf "$$dc_destdir" \ && $(MAKE) $(AM_MAKEFLAGS) dist \ && rm -rf $(DIST_ARCHIVES) \ && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \ && cd "$$am__cwd" \ || exit 1 $(am__post_remove_distdir) @(echo "$(distdir) archives ready for distribution: "; \ list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' distuninstallcheck: @test -n '$(distuninstallcheck_dir)' || { \ echo 'ERROR: trying to run $@ with an empty' \ '$$(distuninstallcheck_dir)' >&2; \ exit 1; \ }; \ $(am__cd) '$(distuninstallcheck_dir)' || { \ echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \ exit 1; \ }; \ test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \ || { echo "ERROR: files left after uninstall:" ; \ if test -n "$(DESTDIR)"; then \ echo " (check DESTDIR support)"; \ fi ; \ $(distuninstallcheck_listfiles) ; \ exit 1; } >&2 distcleancheck: distclean @if test '$(srcdir)' = . ; then \ echo "ERROR: distcleancheck can only run from a VPATH build" ; \ exit 1 ; \ fi @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ || { echo "ERROR: files left in build directory after distclean:" ; \ $(distcleancheck_listfiles) ; \ exit 1; } >&2 check-am: all-am check: check-recursive all-am: Makefile config.h installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f $(am__CONFIG_DISTCLEAN_FILES) -rm -f Makefile distclean-am: clean-am distclean-generic distclean-hdr \ distclean-libtool distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f $(am__CONFIG_DISTCLEAN_FILES) -rm -rf $(top_srcdir)/autom4te.cache -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(am__recursive_targets) all install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \ am--refresh check check-am clean clean-cscope clean-generic \ clean-libtool cscope cscopelist-am ctags ctags-am dist \ dist-all dist-bzip2 dist-gzip dist-lzip dist-shar dist-tarZ \ dist-xz dist-zip dist-zstd distcheck distclean \ distclean-generic distclean-hdr distclean-libtool \ distclean-tags distcleancheck distdir distuninstallcheck dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs installdirs-am \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/PaxHeaders/config.h.in0000644000000000000000000000013215067751345015376 xustar0030 mtime=1759498981.708334649 30 atime=1759499020.769297527 30 ctime=1759499024.689842727 nordugrid-arc-7.1.1/config.h.in0000644000175000002070000004017615067751345017310 0ustar00mockbuildmock00000000000000/* config.h.in. Generated from configure.ac by autoheader. */ /* Define to 1 if the `closedir' function returns void instead of `int'. */ #undef CLOSEDIR_VOID /* Define to 1 if translation of program messages to the user's native language is requested. */ #undef ENABLE_NLS /* Globus GSSAPI GSI is for OpenSSL post-1.1 */ #undef GLOBUS_GSSAPI_GSI_OLD_OPENSSL /* Globus GSSAPI GSI version */ #undef GLOBUS_GSSAPI_GSI_VERSION /* Globus IO version */ #undef GLOBUS_IO_VERSION /* Define to 1 if you have the `acl' function. */ #undef HAVE_ACL /* Define to 1 if you have the `alarm' function. */ #undef HAVE_ALARM /* Define to 1 if you have the header file. */ #undef HAVE_ARPA_INET_H /* Define to 1 if you have the MacOS X function CFLocaleCopyCurrent in the CoreFoundation framework. */ #undef HAVE_CFLOCALECOPYCURRENT /* Define to 1 if you have the MacOS X function CFPreferencesCopyAppValue in the CoreFoundation framework. */ #undef HAVE_CFPREFERENCESCOPYAPPVALUE /* Define to 1 if your system has a working `chown' function. */ #undef HAVE_CHOWN /* Define if the GNU dcgettext() function is already present or preinstalled. */ #undef HAVE_DCGETTEXT /* Define to 1 if you have the declaration of `strerror_r', and to 0 if you don't. */ #undef HAVE_DECL_STRERROR_R /* Define to 1 if you have the header file, and it defines `DIR'. */ #undef HAVE_DIRENT_H /* Define to 1 if you have the header file. */ #undef HAVE_DLFCN_H /* define if DTLSv1_2_method is available */ #undef HAVE_DTLSV1_2_METHOD /* define if DTLSv1_method is available */ #undef HAVE_DTLSV1_METHOD /* define if DTLS_method is available */ #undef HAVE_DTLS_METHOD /* Define to 1 if you have the `dup2' function. */ #undef HAVE_DUP2 /* Define to 1 if you have the header file. */ #undef HAVE_DUSTAT_H /* Define to 1 if you have the header file. */ #undef HAVE_FCNTL_H /* Define to 1 if you have the header file. */ #undef HAVE_FLOAT_H /* Define to 1 if you have the `floor' function. */ #undef HAVE_FLOOR /* Define to 1 if you have the `fork' function. */ #undef HAVE_FORK /* Define to 1 if you have the `ftruncate' function. */ #undef HAVE_FTRUNCATE /* Define to 1 if you have the `getdomainname' function. */ #undef HAVE_GETDOMAINNAME /* Define to 1 if you have the `getgrouplist' function. */ #undef HAVE_GETGROUPLIST /* Define to 1 if you have the `gethostname' function. */ #undef HAVE_GETHOSTNAME /* Define to 1 if you have the header file. */ #undef HAVE_GETOPT_H /* Define to 1 if you have the `getopt_long_only' function. */ #undef HAVE_GETOPT_LONG_ONLY /* Define to 1 if you have the `getpid' function. */ #undef HAVE_GETPID /* Define if the GNU gettext() function is already present or preinstalled. */ #undef HAVE_GETTEXT /* define if using glibmm 2.68 API */ #undef HAVE_GLIBMM_268 /* define if glibmm have support local symbol resolution in shared libraries */ #undef HAVE_GLIBMM_BIND_LOCAL /* define if glibmm have getenv operations */ #undef HAVE_GLIBMM_GETENV /* define if glibmm have listenv operations */ #undef HAVE_GLIBMM_LISTENV /* define if glibmm has Glib::OptionContext::get_help() */ #undef HAVE_GLIBMM_OPTIONCONTEXT_GET_HELP /* define if glibmm has Glib::OptionContext::set_summary() */ #undef HAVE_GLIBMM_OPTIONCONTEXT_SET_SUMMARY /* define if glibmm have setenv operations */ #undef HAVE_GLIBMM_SETENV /* define if glibmm have unsetenv operations */ #undef HAVE_GLIBMM_UNSETENV /* define if GLOBUS is available */ #undef HAVE_GLOBUS /* Define to 1 if you have the `globus_ftp_client_handleattr_set_gridftp2' function. */ #undef HAVE_GLOBUS_FTP_CLIENT_HANDLEATTR_SET_GRIDFTP2 /* Define to 1 if you have the `globus_thread_set_model' function. */ #undef HAVE_GLOBUS_THREAD_SET_MODEL /* Define to 1 if you have the `gmtime_r' function. */ #undef HAVE_GMTIME_R /* Define if you have the iconv() function and it works. */ #undef HAVE_ICONV /* Define to 1 if you have the header file. */ #undef HAVE_INTTYPES_H /* define if lcas is available */ #undef HAVE_LCAS /* Define to 1 if you have the header file. */ #undef HAVE_LCAS_H /* Define to 1 if you have the `lchown' function. */ #undef HAVE_LCHOWN /* define if lcmaps is available */ #undef HAVE_LCMAPS /* Define to 1 if you have the header file. */ #undef HAVE_LCMAPS_H /* define if LDNS is enabled and available */ #undef HAVE_LDNS /* Define to 1 if you have the `nsl' library (-lnsl). */ #undef HAVE_LIBNSL /* Define to 1 if you have the header file. */ #undef HAVE_LIMITS_H /* Define to 1 if you have the `localtime_r' function. */ #undef HAVE_LOCALTIME_R /* Define to 1 if `lstat' has the bug that it succeeds when given the zero-length file name argument. */ #undef HAVE_LSTAT_EMPTY_STRING_BUG /* Define to 1 if your system has a GNU libc compatible `malloc' function, and to 0 otherwise. */ #undef HAVE_MALLOC /* Define to 1 if you have the `memchr' function. */ #undef HAVE_MEMCHR /* Define to 1 if you have the `memmove' function. */ #undef HAVE_MEMMOVE /* Define to 1 if you have the header file. */ #undef HAVE_MEMORY_H /* Define to 1 if you have the `memset' function. */ #undef HAVE_MEMSET /* Define to 1 if you have the `mkdir' function. */ #undef HAVE_MKDIR /* Define to 1 if you have the `mkdtemp' function. */ #undef HAVE_MKDTEMP /* Define to 1 if you have the `mkfifo' function. */ #undef HAVE_MKFIFO /* Define to 1 if you have the `[mkstemp]' function. */ #undef HAVE_MKSTEMP /* Define to 1 if you have the `mktemp' function. */ #undef HAVE_MKTEMP /* define if the compiler implements namespaces */ #undef HAVE_NAMESPACES /* Define to 1 if you have the header file, and it defines `DIR'. */ #undef HAVE_NDIR_H /* Define to 1 if you have the header file. */ #undef HAVE_NETDB_H /* Define to 1 if you have the header file. */ #undef HAVE_NETINET_IN_H /* define if NSS is enabled and available */ #undef HAVE_NSS /* Define to 1 if you have the `posix_fallocate' function. */ #undef HAVE_POSIX_FALLOCATE /* Define to 1 if the system has the type `ptrdiff_t'. */ #undef HAVE_PTRDIFF_T /* Define if you have Py_InitializeEx function */ #undef HAVE_PYTHON_INITIALIZE_EX /* Define to 1 if you have the `readdir_r' function. */ #undef HAVE_READDIR_R /* Define to 1 if your system has a GNU libc compatible `realloc' function, and to 0 otherwise. */ #undef HAVE_REALLOC /* Define to 1 if you have the `regcomp' function. */ #undef HAVE_REGCOMP /* Define to 1 if you have the `rmdir' function. */ #undef HAVE_RMDIR /* Define if S3 API has timeouts */ #undef HAVE_S3_TIMEOUT /* Define to 1 if you have the header file. */ #undef HAVE_SASL_H /* Define to 1 if you have the header file. */ #undef HAVE_SASL_SASL_H /* Define to 1 if you have the `select' function. */ #undef HAVE_SELECT /* Define to 1 if you have the `setenv' function. */ #undef HAVE_SETENV /* Define to 1 if you have the `socket' function. */ #undef HAVE_SOCKET /* define if SQLite is available */ #undef HAVE_SQLITE /* Define to 1 if you have the `sqlite3_errstr' function. */ #undef HAVE_SQLITE3_ERRSTR /* define if SSLv3_method is available */ #undef HAVE_SSLV3_METHOD /* define if the compiler has stringstream */ #undef HAVE_SSTREAM /* Define to 1 if `stat' has the bug that it succeeds when given the zero-length file name argument. */ #undef HAVE_STAT_EMPTY_STRING_BUG /* Define to 1 if stdbool.h conforms to C99. */ #undef HAVE_STDBOOL_H /* Define to 1 if you have the header file. */ #undef HAVE_STDINT_H /* Define to 1 if you have the header file. */ #undef HAVE_STDLIB_H /* Define to 1 if you have the `strcasecmp' function. */ #undef HAVE_STRCASECMP /* Define to 1 if you have the `strchr' function. */ #undef HAVE_STRCHR /* Define to 1 if you have the `strcspn' function. */ #undef HAVE_STRCSPN /* Define to 1 if you have the `strdup' function. */ #undef HAVE_STRDUP /* Define to 1 if you have the `strerror' function. */ #undef HAVE_STRERROR /* Define to 1 if you have the `strerror_r' function. */ #undef HAVE_STRERROR_R /* Define to 1 if you have the header file. */ #undef HAVE_STRINGS_H /* Define to 1 if you have the header file. */ #undef HAVE_STRING_H /* Define to 1 if you have the `strncasecmp' function. */ #undef HAVE_STRNCASECMP /* Define to 1 if you have the `strstr' function. */ #undef HAVE_STRSTR /* Define to 1 if you have the `strtol' function. */ #undef HAVE_STRTOL /* Define to 1 if you have the `strtoul' function. */ #undef HAVE_STRTOUL /* Define to 1 if you have the `strtoull' function. */ #undef HAVE_STRTOULL /* Define to 1 if `st_blksize' is a member of `struct stat'. */ #undef HAVE_STRUCT_STAT_ST_BLKSIZE /* Define if you have systemd daemon */ #undef HAVE_SYSTEMD_DAEMON /* Define to 1 if you have the header file. */ #undef HAVE_SYSTEMD_SD_DAEMON_H /* Define to 1 if you have the header file, and it defines `DIR'. */ #undef HAVE_SYS_DIR_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_FILE_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_FILSYS_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_FS_S5PARAM_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_FS_TYPES_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_MOUNT_H /* Define to 1 if you have the header file, and it defines `DIR'. */ #undef HAVE_SYS_NDIR_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_PARAM_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_SELECT_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_SOCKET_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_STATFS_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_STAT_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_TIME_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_TYPES_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_VFS_H /* Define to 1 if you have that is POSIX.1 compatible. */ #undef HAVE_SYS_WAIT_H /* Define to 1 if you have the `timegm' function. */ #undef HAVE_TIMEGM /* define if TLSv1_1_method is available */ #undef HAVE_TLSV1_1_METHOD /* define if TLSv1_2_method is available */ #undef HAVE_TLSV1_2_METHOD /* define if TLSv1_method is available */ #undef HAVE_TLSV1_METHOD /* define if TLS_method is available */ #undef HAVE_TLS_METHOD /* Define to 1 if you have the `tzset' function. */ #undef HAVE_TZSET /* Define to 1 if you have the header file. */ #undef HAVE_UNISTD_H /* Define to 1 if you have the `unsetenv' function. */ #undef HAVE_UNSETENV /* Define to 1 if you have the header file. */ #undef HAVE_UUID_UUID_H /* Define to 1 if you have the `vfork' function. */ #undef HAVE_VFORK /* Define to 1 if you have the header file. */ #undef HAVE_VFORK_H /* Define to 1 if `fork' works. */ #undef HAVE_WORKING_FORK /* Define to 1 if `vfork' works. */ #undef HAVE_WORKING_VFORK /* define if XMLSEC package is available */ #undef HAVE_XMLSEC /* Define to 1 if the system has the type `_Bool'. */ #undef HAVE__BOOL /* installation prefix */ #undef INSTPREFIX /* library installation subdirectory */ #undef LIBSUBDIR /* Define to 1 if `lstat' dereferences a symlink specified with a trailing slash. */ #undef LSTAT_FOLLOWS_SLASHED_SYMLINK /* Define to the sub-directory where libtool stores uninstalled libraries. */ #undef LT_OBJDIR /* Name of package */ #undef PACKAGE /* Define to the address where bug reports for this package should be sent. */ #undef PACKAGE_BUGREPORT /* Define to the full name of this package. */ #undef PACKAGE_NAME /* Define to the full name and version of this package. */ #undef PACKAGE_STRING /* Define to the one symbol short name of this package. */ #undef PACKAGE_TARNAME /* Define to the home page for this package. */ #undef PACKAGE_URL /* Define to the version of this package. */ #undef PACKAGE_VERSION /* package data subdirectory */ #undef PKGDATASUBDIR /* helper programs installation subdirectory */ #undef PKGLIBEXECSUBDIR /* plugin installation subdirectory */ #undef PKGLIBSUBDIR /* Define as the return type of signal handlers (`int' or `void'). */ #undef RETSIGTYPE /* Define to the type of arg 1 for `select'. */ #undef SELECT_TYPE_ARG1 /* Define to the type of args 2, 3 and 4 for `select'. */ #undef SELECT_TYPE_ARG234 /* Define to the type of arg 5 for `select'. */ #undef SELECT_TYPE_ARG5 /* define to build job information in SQLite storage */ #undef SQLITEJSTORE_ENABLED /* Define if the block counts reported by statfs may be truncated to 2GB and the correct values may be stored in the f_spare array. (SunOS 4.1.2, 4.1.3, and 4.1.3_U1 are reported to have this problem. SunOS 4.1.1 seems not to be affected.) */ #undef STATFS_TRUNCATES_BLOCK_COUNTS /* Define if there is no specific function for reading file systems usage information and you have the header file. (SVR2) */ #undef STAT_READ_FILSYS /* Define if statfs takes 2 args and struct statfs has a field named f_bsize. (4.3BSD, SunOS 4, HP-UX, AIX PS/2) */ #undef STAT_STATFS2_BSIZE /* Define if statfs takes 2 args and struct statfs has a field named f_fsize. (4.4BSD, NetBSD) */ #undef STAT_STATFS2_FSIZE /* Define if statfs takes 2 args and the second argument has type struct fs_data. (Ultrix) */ #undef STAT_STATFS2_FS_DATA /* Define if statfs takes 3 args. (DEC Alpha running OSF/1) */ #undef STAT_STATFS3_OSF1 /* Define if statfs takes 4 args. (SVR3, Dynix, Irix, Dolphin) */ #undef STAT_STATFS4 /* Define if there is a function named statvfs. (SVR4) */ #undef STAT_STATVFS /* Define to 1 if you have the ANSI C header files. */ #undef STDC_HEADERS /* Define to 1 if strerror_r returns char *. */ #undef STRERROR_R_CHAR_P /* Define to 1 if you can safely include both and . */ #undef TIME_WITH_SYS_TIME /* Define to 1 if your declares `struct tm'. */ #undef TM_IN_SYS_TIME /* Enable extensions on AIX 3, Interix. */ #ifndef _ALL_SOURCE # undef _ALL_SOURCE #endif /* Enable GNU extensions on systems that have them. */ #ifndef _GNU_SOURCE # undef _GNU_SOURCE #endif /* Enable threading extensions on Solaris. */ #ifndef _POSIX_PTHREAD_SEMANTICS # undef _POSIX_PTHREAD_SEMANTICS #endif /* Enable extensions on HP NonStop. */ #ifndef _TANDEM_SOURCE # undef _TANDEM_SOURCE #endif /* Enable general extensions on Solaris. */ #ifndef __EXTENSIONS__ # undef __EXTENSIONS__ #endif /* Version number of package */ #undef VERSION /* Enable large inode numbers on Mac OS X 10.5. */ #ifndef _DARWIN_USE_64_BIT_INODE # define _DARWIN_USE_64_BIT_INODE 1 #endif /* Number of bits in a file offset, on hosts where this is settable. */ #undef _FILE_OFFSET_BITS /* Define for large files, on AIX-style hosts. */ #undef _LARGE_FILES /* Define if compiling for MacOSX */ #undef _MACOSX /* Define to 1 if on MINIX. */ #undef _MINIX /* Define to 2 if the system does not provide POSIX.1 features except with this defined. */ #undef _POSIX_1_SOURCE /* Define to 1 if you need to in order for `stat' and other things to work. */ #undef _POSIX_SOURCE /* Define to empty if `const' does not conform to ANSI C. */ #undef const /* Define to `int' if doesn't define. */ #undef gid_t /* Define to `__inline__' or `__inline' if that's what the C compiler calls it, or to nothing if 'inline' is not supported under any name. */ #ifndef __cplusplus #undef inline #endif /* Define to rpl_malloc if the replacement function should be used. */ #undef malloc /* Define to `int' if does not define. */ #undef mode_t /* Define to `long int' if does not define. */ #undef off_t /* Define to `int' if does not define. */ #undef pid_t /* Define to rpl_realloc if the replacement function should be used. */ #undef realloc /* Define to `unsigned int' if does not define. */ #undef size_t /* Define to `int' if doesn't define. */ #undef uid_t /* Define as `fork' if `vfork' does not work. */ #undef vfork nordugrid-arc-7.1.1/PaxHeaders/nordugrid-arc.spec0000644000000000000000000000013215067751414016764 xustar0030 mtime=1759499020.640295567 30 atime=1759499024.528354646 30 ctime=1759499024.708441001 nordugrid-arc-7.1.1/nordugrid-arc.spec0000644000175000002070000012646215067751414020701 0ustar00mockbuildmock00000000000000%{!?_pkgdocdir: %global _pkgdocdir %{_docdir}/%{name}-%{version}} # # xROOTd # %if %{?fedora}%{!?fedora:0} >= 24 || %{?rhel}%{!?rhel:0} %global with_xrootd %{!?_without_xrootd:1}%{?_without_xrootd:0} %else %global with_xrootd 0 %endif %{!?python3_pkgversion: %global python3_pkgversion 3} %global with_pylint 0 %if %{?fedora}%{!?fedora:0} >= 21 || %{?rhel}%{!?rhel:0} >= 5 %global with_s3 1 %else %global with_s3 0 %endif %if ( %{?fedora}%{!?fedora:0} >= 21 && %{?fedora}%{!?fedora:0} < 43 ) || ( %{?rhel}%{!?rhel:0} >= 5 && %{?rhel}%{!?rhel:0} < 10 ) %global with_gfal 1 %else %global with_gfal 0 %endif %if %{?fedora}%{!?fedora:0} || %{?rhel}%{!?rhel:0} %global with_xmlsec1 %{!?_without_xmlsec1:1}%{?_without_xmlsec1:0} %else %global with_xmlsec1 0 %endif # LDNS %if %{?fedora}%{!?fedora:0} >= 13 || %{?rhel}%{!?rhel:0} >= 5 %global with_ldns 1 %else %global with_ldns 0 %endif %if %{?fedora}%{!?fedora:0} >= 25 || %{?rhel}%{!?rhel:0} >= 7 %global use_systemd 1 %else %global use_systemd 0 %endif %global with_ldap_service 1 %global pkgdir arc # bash-completion %global _bashcompdir %(pkg-config --variable=completionsdir bash-completion 2>/dev/null || echo %{_sysconfdir}/bash_completion.d) # # Macros for scripts # # Stop and disable service on package removal %if %{use_systemd} %define stop_on_removal() %{expand:%%systemd_preun %(sed 's/[^ ]*/&.service/g' <<< '%{?*}')} %else %if %{?stop_on_removal:0}%{!?stop_on_removal:1} %global stop_on_removal() if [ $1 -eq 0 ]; then for s in %*; do service $s stop > /dev/null 2>&1 || : ; done; for s in %*; do /sbin/chkconfig --del $s; done; fi %endif %endif # Enable a service %if %{use_systemd} %define enable_service() %{expand:%%systemd_post %(sed 's/[^ ]*/&.service/g' <<< '%{?*}')} %else %if %{?suse_version:1}%{!?suse_version:0} %define enable_service() %{expand:%%fillup_and_insserv -f %{?*}} %else %define enable_service() for s in %{?*}; do /sbin/chkconfig --add $s ; done %endif %endif # Conditionally restart service on package update %if %{use_systemd} %define condrestart_on_update() %{expand:%%systemd_postun_with_restart %(sed 's/[^ ]*/&.service/g' <<< '%{?*}')} %else %if %{?suse_version:1}%{!?suse_version:0} %define condrestart_on_update() %{expand:%%restart_on_update %{?*}} %{expand:%%insserv_cleanup} %else %define condrestart_on_update() if [ $1 -ge 1 ]; then for s in %{?*}; do service $s condrestart > /dev/null 2>&1 || : ; done; fi %endif %endif # Standard service requirements %if %{use_systemd} %define service_post_requires systemd-units %define service_preun_requires systemd-units %define service_postun_requires systemd-units %else %if %{?suse_version:1}%{!?suse_version:0} %define service_post_requires %{insserv_prereq} %define service_preun_requires %{insserv_prereq} %define service_postun_requires %{insserv_prereq} %else %define service_post_requires chkconfig %define service_preun_requires chkconfig, initscripts %define service_postun_requires initscripts %endif %endif Name: nordugrid-arc Version: 7.1.1 Release: 1%{?dist} Summary: Advanced Resource Connector Middleware Group: System Environment/Daemons License: ASL 2.0 URL: http://www.nordugrid.org/ Source: http://download.nordugrid.org/packages/%{name}/releases/%{version}/src/%{name}-%{version}.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) # Packages dropped without replacements Obsoletes: %{name}-chelonia < 2.0.0 Obsoletes: %{name}-hopi < 2.0.0 Obsoletes: %{name}-isis < 2.0.0 Obsoletes: %{name}-janitor < 2.0.0 Obsoletes: %{name}-doxygen < 4.0.0 Obsoletes: %{name}-arcproxyalt < 6.0.0 Obsoletes: %{name}-java < 6.0.0 Obsoletes: %{name}-egiis < 6.0.0 Obsoletes: %{name}-acix-cache < 6.0.0 Obsoletes: %{name}-acix-core < 7.0.0 Obsoletes: %{name}-acix-scanner < 7.0.0 Obsoletes: %{name}-acix-index < 7.0.0 Obsoletes: %{name}-arex-python-lrms < 7.0.0 Obsoletes: %{name}-gridftpd < 7.0.0 Obsoletes: python2-%{name} < 7.0.0 Obsoletes: %{name}-python < 5.3.3 Obsoletes: %{name}-nordugridmap < 7.0.0 Obsoletes: %{name}-gridmap-utils < 6.0.0 Obsoletes: %{name}-plugins-gridftpjob < 7.0.0 Obsoletes: %{name}-plugins-ldap < 7.0.0 %if ! %{with_ldap_service} Obsoletes: %{name}-infosys-ldap < %{version}-%{release} Obsoletes: %{name}-ldap-infosys < 6.0.0 Obsoletes: %{name}-aris < 6.0.0 %endif BuildRequires: autoconf BuildRequires: automake BuildRequires: libtool BuildRequires: make BuildRequires: gcc-c++ BuildRequires: cppunit-devel BuildRequires: pkgconfig %if %{use_systemd} BuildRequires: systemd BuildRequires: systemd-devel %endif BuildRequires: libuuid-devel BuildRequires: gettext-devel BuildRequires: python%{python3_pkgversion}-devel BuildRequires: python%{python3_pkgversion}-pip BuildRequires: python%{python3_pkgversion}-setuptools BuildRequires: python%{python3_pkgversion}-wheel %if %{with_pylint} BuildRequires: pylint %endif %if %{?fedora}%{!?fedora:0} || %{?rhel}%{!?rhel:0} >= 10 BuildRequires: glibmm2.68-devel %else BuildRequires: glibmm24-devel %endif BuildRequires: libxml2-devel BuildRequires: openssl BuildRequires: openssl-devel %if %{with_xmlsec1} BuildRequires: xmlsec1-devel >= 1.2.4 BuildRequires: xmlsec1-openssl-devel >= 1.2.4 %endif BuildRequires: nss-devel BuildRequires: globus-common-devel BuildRequires: globus-ftp-client-devel BuildRequires: globus-ftp-control-devel %if %{?fedora}%{!?fedora:0} >= 23 || %{?rhel}%{!?rhel:0} >= 5 BuildRequires: globus-gssapi-gsi-devel >= 12.2 %else BuildRequires: globus-gssapi-gsi-devel < 12.2 %endif %if %{with_xrootd} BuildRequires: xrootd-client-devel >= 1:4.5.0 %endif %if %{with_gfal} BuildRequires: gfal2-devel %endif %if %{with_s3} BuildRequires: libs3-devel %endif %if %{?fedora}%{!?fedora:0} >= 21 || %{?rhel}%{!?rhel:0} BuildRequires: perl-generators %endif # Needed for Boinc backend testing during make check BuildRequires: perl(DBI) # Needed for infoprovider testing during make check BuildRequires: perl(English) BuildRequires: perl(JSON::XS) BuildRequires: perl(Sys::Hostname) BuildRequires: perl(XML::Simple) # Needed for LRMS testing during make check BuildRequires: perl(Test::Harness) BuildRequires: perl(Test::Simple) BuildRequires: swig %if %{?fedora}%{!?fedora:0} >= 4 || %{?rhel}%{!?rhel:0} >= 5 BuildRequires: libtool-ltdl-devel %else BuildRequires: libtool %endif BuildRequires: sqlite-devel >= 3.6 %if %{with_ldns} BuildRequires: ldns-devel >= 1.6.8 %endif %if %{?fedora}%{!?fedora:0} >= 17 || %{?rhel}%{!?rhel:0} >= 7 || %{?suse_version:1}%{!?suse_version:0} BuildRequires: pkgconfig(bash-completion) %endif BuildRequires: help2man %if %{?fedora}%{!?fedora:0} >= 13 || %{?rhel}%{!?rhel:0} >= 7 || %{?suse_version:1}%{!?suse_version:0} Requires: hostname %else Requires: net-tools %endif Requires: openssl %description NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). The ARC middleware is a software solution that uses distributed computing technologies to enable sharing and federation of computing resources across different administrative and application domains. ARC is used to create distributed infrastructures of various scope and complexity, from campus to national and global deployments. %package client Summary: ARC command line clients Group: Applications/Internet Requires: %{name} = %{version}-%{release} Requires: %{name}-plugins-needed = %{version}-%{release} %description client NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). This client package contains all the CLI tools that are needed to operate with x509 proxies, submit and manage jobs and handle data transfers. %package hed Summary: ARC Hosting Environment Daemon Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %description hed NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). The ARC Hosting Environment Daemon (HED) is a Web Service container for ARC services. %package datadelivery-service Summary: ARC data delivery service Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires: %{name}-hed = %{version}-%{release} Requires: %{name}-plugins-needed = %{version}-%{release} Requires: %{name}-arcctl-service = %{version}-%{release} Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %description datadelivery-service NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). This package contains the ARC data delivery service. %if %{with_ldap_service} %package infosys-ldap Summary: ARC LDAP-based information services Group: System Environment/Libraries %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif Requires: openldap-servers Requires: bdii Requires: glue-schema >= 2.0.10 Requires: %{name}-arcctl-service = %{version}-%{release} Provides: %{name}-ldap-infosys = %{version}-%{release} Obsoletes: %{name}-ldap-infosys < 6.0.0 Provides: %{name}-aris = %{version}-%{release} Obsoletes: %{name}-aris < 6.0.0 Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %if %{?fedora}%{!?fedora:0} >= 23 || %{?rhel}%{!?rhel:0} >= 8 Requires(post): policycoreutils-python-utils Requires(postun): policycoreutils-python-utils %else %if %{?fedora}%{!?fedora:0} >= 11 || %{?rhel}%{!?rhel:0} >= 6 Requires(post): policycoreutils-python Requires(postun): policycoreutils-python %else %if %{?fedora}%{!?fedora:0} >= 5 || %{?rhel}%{!?rhel:0} >= 5 Requires(post): policycoreutils Requires(postun): policycoreutils %endif %endif %endif %description infosys-ldap NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). This package contains the ARC information services relying on BDII and LDAP technologies to publish ARC CE information according to various LDAP schemas. Please note that the information collectors are part of another package, the nordugrid-arc-arex. %endif %package monitor Summary: ARC LDAP monitor web application Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires: php Requires: php-gd Requires: php-ldap %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif Obsoletes: %{name}-ldap-monitor < 6.0.0 Obsoletes: %{name}-ws-monitor < 6.0.0 %description monitor NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). This package contains the PHP web application that is used to set up a web-based monitor which pulls information from the LDAP information system and visualizes it. %package arcctl Summary: ARC Control Tool Group: Applications/Internet Requires: %{name} = %{version}-%{release} %if %{?fedora}%{!?fedora:0} >= 26 || %{?rhel}%{!?rhel:0} >= 8 Requires: python3-jwcrypto %endif %description arcctl NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). This package contains the ARC Control Tool with basic set of control modules suitable for both server and client side. %package arcctl-service Summary: ARC Control Tool - service control modules Group: Applications/Internet Requires: %{name} = %{version}-%{release} Requires: %{name}-arcctl = %{version}-%{release} %description arcctl-service NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). This package contains the service control modules for ARC Contol Tool that allow working with server-side config and manage ARC services. %package arex Summary: ARC Resource-coupled EXecution service Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires: %{name}-hed = %{version}-%{release} Requires: %{name}-arcctl = %{version}-%{release} Requires: %{name}-arcctl-service = %{version}-%{release} Requires: %{name}-plugins-needed = %{version}-%{release} Requires: findutils Requires: procps Provides: %{name}-cache-service = %{version}-%{release} Obsoletes: %{name}-cache-service < 6.0.0 Provides: %{name}-candypond = %{version}-%{release} Obsoletes: %{name}-candypond < 6.0.0 Requires(post): %{name}-arcctl = %{version}-%{release} Requires(preun): %{name}-arcctl = %{version}-%{release} %if %{?fedora}%{!?fedora:0} >= 13 || %{?rhel}%{!?rhel:0} >= 7 || %{?suse_version:1}%{!?suse_version:0} Requires(post): hostname %else Requires(post): net-tools %endif Requires(post): openssl Requires(post): %{service_post_requires} Requires(preun): %{service_preun_requires} Requires(postun): %{service_postun_requires} %description arex NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). The ARC Resource-coupled EXecution service (AREX) is the Computing Element of the ARC middleware. AREX offers a full-featured middle layer to manage computational tasks including interfacing to local batch systems, taking care of complex environments such as data staging, data caching, software environment provisioning, information collection and exposure, accounting information gathering and publishing. %package arex-lrms-contrib Summary: ARC Resource-coupled EXecution service - conributed LRMS backends Requires: %{name}-arex = %{version}-%{release} %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif # Split from AREX package Obsoletes: %{name}-arex < 7.0.0 %description arex-lrms-contrib NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). The AREX contributed LRMS backends package contains additional LRMS support script contributed by the ARC user community. %package community-rtes Summary: ARC community defined RTEs support Group: System Environment/Libraries Requires: %{name}-arex = %{version}-%{release} Requires: %{name}-arcctl = %{version}-%{release} Requires: gnupg2 Requires: python3-dns %description community-rtes NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). Community RTEs is the framework that allows deploying software packages (tarballs, containers, etc) provided by trusted communities to ARC CE using simple arcctl commands. It is released as a technology preview. %package plugins-needed Summary: ARC base plugins Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Provides: %{name}-plugins-arcrest = %{version}-%{release} Obsoletes: %{name}-plugins-arcrest < 7.0.0 %description plugins-needed NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). ARC base plugins. This includes the Message Chain Components (MCCs) and Data Manager Components (DMCs). %package plugins-globus Summary: ARC Globus plugins (compat) Group: System Environment/Libraries Requires: %{name}-plugins-gridftp = %{version}-%{release} Requires: %{name}-plugins-lcas-lcmaps = %{version}-%{release} %description plugins-globus NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). ARC Globus plugins. This compat metapackage brings all Globus dependent plugins at once, including: Data Manager Components (DMCs) and LCAS/LCMAPS tools. This package is meant to allow smooth transition and will be removed from the upcoming releases. %package plugins-globus-common Summary: ARC Globus plugins common libraries Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} %description plugins-globus-common NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). ARC Globus plugins common libraries package includes the bundle of necessary Globus libraries needed for all other globus-dependent ARC components. %package plugins-gridftp Summary: ARC Globus dependent DMCs Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires: %{name}-plugins-globus-common = %{version}-%{release} %description plugins-gridftp NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). ARC Globus GridFTP plugins. These allow access to data through the gridftp protocol. %package plugins-lcas-lcmaps Summary: ARC LCAS/LCMAPS plugins Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires: %{name}-plugins-globus-common = %{version}-%{release} %if %{?fedora}%{!?fedora:0} >= 23 || %{?rhel}%{!?rhel:0} >= 5 Requires: globus-gssapi-gsi >= 12.2 %else Requires: globus-gssapi-gsi < 12.2 %endif %description plugins-lcas-lcmaps NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). ARC LCAS/LCMAPS tools allow configuring ARC CE to use LCAS/LCMAPS services for authorization and mapping. %if %{with_xrootd} %package plugins-xrootd Summary: ARC xrootd plugins Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} %description plugins-xrootd NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). ARC xrootd plugins. These allow access to data through the xrootd protocol. %endif %if %{with_gfal} %package plugins-gfal Summary: ARC GFAL2 plugins Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} %description plugins-gfal NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). ARC plugins for GFAL2. This allows third-party transfer and adds support for several extra transfer protocols (rfio, dcap, gsidcap). Support for specific protocols is provided by separate 3rd-party GFAL2 plugin packages. %endif %if %{with_s3} %package plugins-s3 Summary: ARC S3 plugins Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} %description plugins-s3 NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). ARC plugins for S3. These allow access to data through the S3 protocol. %endif %package plugins-internal Summary: ARC internal plugin Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires: %{name}-arex = %{version}-%{release} %description plugins-internal NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). The ARC internal plugin. A special interface aimed for restrictive HPC sites, to be used with a local installation of the ARC Control Tower. %package plugins-python Summary: ARC Python dependent plugin Group: System Environment/Libraries Requires: %{name} = %{version}-%{release} Requires: python%{python3_pkgversion}-%{name} = %{version}-%{release} %description plugins-python NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). ARC plugins dependent on Python. %package devel Summary: ARC development files Group: Development/Libraries Requires: %{name} = %{version}-%{release} %if %{?fedora}%{!?fedora:0} || %{?rhel}%{!?rhel:0} >= 10 Requires: glibmm2.68-devel %else Requires: glibmm24-devel %endif Requires: libxml2-devel Requires: openssl-devel %description devel NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). Header files and libraries needed to develop applications using ARC. %package -n python%{python3_pkgversion}-%{name} Summary: ARC Python 3 wrapper Group: Development/Libraries %{?python_provide:%python_provide python%{python3_pkgversion}-%{name}} Provides: %{name}-python%{python3_pkgversion} = %{version}-%{release} Obsoletes: %{name}-python%{python3_pkgversion} < 5.3.3 Requires: %{name} = %{version}-%{release} %description -n python%{python3_pkgversion}-%{name} NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). Python 3 bindings for ARC. %package test-utils Summary: ARC test tools Group: Applications/Internet Requires: %{name} = %{version}-%{release} Requires: %{name}-plugins-needed = %{version}-%{release} Obsoletes: %{name}-misc-utils < 6.0.0 %description test-utils NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). This package contains a few utilities useful to test various ARC subsystems. The package is not required by users or sysadmins and it is mainly for developers. %package archery-manage Summary: ARCHERY administration tool Group: Applications/Internet %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif Requires: python3-dns Requires: python3-ldap %description archery-manage NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). This package contains the archery-manage utility for administration of an ARCHERY DNS-embedded service endpoint registry. %package wn Summary: ARC optional worker nodes components Group: Applications/Internet %description wn NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). This package contains the optional components that provide new job management features on the worker nodes (WN). %package -n python%{python3_pkgversion}-arcrest Summary: ARC REST client Group: Applications/Internet %{?python_provide:%python_provide python%{python3_pkgversion}-arcrest} %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif %description -n python%{python3_pkgversion}-arcrest NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). This package contains the ARC REST client. %package arc-exporter Summary: ARC Prometheus exporter service Group: Applications/Internet %if %{?fedora}%{!?fedora:0} >= 10 || %{?rhel}%{!?rhel:0} >= 6 BuildArch: noarch %endif Requires: python3-prometheus_client %description arc-exporter NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). This package contains the Prometheus arc-exporter which collects and publishes metrics about jobs and datastaging on the ARC-CE. %prep %setup -q %build autoreconf -v -f -i %configure --disable-static \ %if %{with_gfal} --enable-gfal \ %endif %if %{with_s3} --enable-s3 \ %endif --with-python=python3 \ %if ! %{with_pylint} --disable-pylint \ %endif %if ! %{with_xrootd} --disable-xrootd \ %endif %if ! %{with_ldns} --disable-ldns \ %endif --enable-internal \ %if %{use_systemd} --enable-systemd \ --with-systemd-units-location=%{_unitdir} \ %endif %if ! %{with_ldap_service} --disable-ldap-service \ %endif --disable-doc \ --docdir=%{_pkgdocdir} make %{?_smp_mflags} %check make %{?_smp_mflags} check %install rm -rf $RPM_BUILD_ROOT make install DESTDIR=$RPM_BUILD_ROOT # Install Logrotate. mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d install -p -m 644 debian/%{name}-arex.logrotate \ $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/%{name}-arex %if %{with_ldap_service} install -p -m 644 debian/%{name}-infosys-ldap.logrotate \ $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/%{name}-infosys-ldap %endif install -p -m 644 debian/%{name}-datadelivery-service.logrotate \ $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/%{name}-datadelivery-service find $RPM_BUILD_ROOT -type f -name \*.la -exec rm -fv '{}' ';' # The py-compile script in the source tarball is old (RHEL 6) # It does the wrong thing for python 3 - remove and let rpmbuild do it right find $RPM_BUILD_ROOT -type f -name \*.pyc -exec rm -fv '{}' ';' find $RPM_BUILD_ROOT -type f -name \*.pyo -exec rm -fv '{}' ';' # libarcglobusutils is not part of the ARC api. find $RPM_BUILD_ROOT -name libarcglobusutils.so -exec rm -fv '{}' ';' rm -f $RPM_BUILD_ROOT%{python3_sitelib}/pyarcrest-*.*-info/direct_url.json # Create log directory mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/log/arc # Create spool directories for Jura mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/spool/arc mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/spool/arc/ssm mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/spool/arc/urs # create config directory mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/arc.conf.d %find_lang %{name} # Remove examples and let RPM package them under /usr/share/doc using the doc macro rm -rf $RPM_BUILD_ROOT%{_datadir}/%{pkgdir}/examples make -C src/libs/data-staging/examples DESTDIR=$PWD/docdir/devel pkgdatadir= install-exampleDATA make -C src/hed/libs/compute/examples DESTDIR=$PWD/docdir/devel pkgdatadir= install-exampleDATA make -C src/hed/libs/data/examples DESTDIR=$PWD/docdir/devel pkgdatadir= install-exampleDATA make -C src/hed/acc/PythonBroker DESTDIR=$PWD/docdir/python pkgdatadir= install-exampleDATA make -C python/examples DESTDIR=$PWD/docdir/devel pkgdatadir= install-exampleDATA make -C src/tests/echo DESTDIR=$PWD/docdir/hed pkgdatadir= install-exampleDATA make -C src/hed DESTDIR=$PWD/docdir/hed pkgdatadir= install-profileDATA # client.conf needs special handling make -C src/clients DESTDIR=$RPM_BUILD_ROOT install-exampleDATA # Link to client.conf from doc ln -s %{_datadir}/%{pkgdir}/examples/client.conf $PWD/docdir/client.conf %clean rm -rf $RPM_BUILD_ROOT %post -p /sbin/ldconfig %postun -p /sbin/ldconfig %post plugins-globus-common -p /sbin/ldconfig %postun plugins-globus-common -p /sbin/ldconfig %post hed %enable_service arched %preun hed %stop_on_removal arched %postun hed %condrestart_on_update arched %post arex %enable_service arc-arex %enable_service arc-arex-ws # out-of-package testing host certificate if [ $1 -eq 1 ]; then arcctl test-ca init arcctl test-ca hostcert fi %preun arex %stop_on_removal arc-arex %stop_on_removal arc-arex-ws if [ $1 -eq 0 ]; then arcctl test-ca cleanup fi %postun arex %condrestart_on_update arc-arex %condrestart_on_update arc-arex-ws %post datadelivery-service %enable_service arc-datadelivery-service %preun datadelivery-service %stop_on_removal arc-datadelivery-service %postun datadelivery-service %condrestart_on_update arc-datadelivery-service %if %{with_ldap_service} %post infosys-ldap %enable_service arc-infosys-ldap %if %{?fedora}%{!?fedora:0} >= 5 || %{?rhel}%{!?rhel:0} >= 5 semanage port -a -t ldap_port_t -p tcp 2135 2>/dev/null || : semanage fcontext -a -t slapd_etc_t "/var/run/arc/infosys/bdii-slapd\.conf" 2>/dev/null || : semanage fcontext -a -t slapd_db_t "/var/lib/arc/bdii/db(/.*)?" 2>/dev/null || : semanage fcontext -a -t slapd_var_run_t "/var/run/arc/bdii/db(/.*)?" 2>/dev/null || : %endif %preun infosys-ldap %stop_on_removal arc-infosys-ldap %postun infosys-ldap %condrestart_on_update arc-infosys-ldap %if %{?fedora}%{!?fedora:0} >= 5 || %{?rhel}%{!?rhel:0} >= 5 if [ $1 -eq 0 ]; then semanage port -d -t ldap_port_t -p tcp 2135 2>/dev/null || : semanage fcontext -d -t slapd_etc_t "/var/run/arc/infosys/bdii-slapd\.conf" 2>/dev/null || : semanage fcontext -d -t slapd_db_t "/var/lib/arc/bdii/db(/.*)?" 2>/dev/null || : semanage fcontext -d -t slapd_var_run_t "/var/run/arc/bdii/db(/.*)?" 2>/dev/null || : fi %endif %triggerpostun infosys-ldap -- %{name}-ldap-infosys # Uninstalling the old %{name}-ldap-infosys will remove some selinux config # for %{name}-infosys-ldap - put them back in this triggerpostun script semanage port -a -t ldap_port_t -p tcp 2135 2>/dev/null || : semanage fcontext -a -t slapd_etc_t "/var/run/arc/infosys/bdii-slapd\.conf" 2>/dev/null || : %triggerpostun infosys-ldap -- %{name}-aris # Uninstalling the old %{name}-aris will remove some selinux config # for %{name}-infosys-ldap - put them back in this triggerpostun script semanage fcontext -a -t slapd_db_t "/var/lib/arc/bdii/db(/.*)?" 2>/dev/null || : semanage fcontext -a -t slapd_var_run_t "/var/run/arc/bdii/db(/.*)?" 2>/dev/null || : %triggerun infosys-ldap -- bdii %if %{?suse_version:1}%{!?suse_version:0} FIRST_ARG=1 %restart_on_update arc-infosys-ldap %else service arc-infosys-ldap condrestart > /dev/null 2>&1 || : %endif %endif %files -f %{name}.lang %defattr(-,root,root,-) %doc src/doc/arc.conf.reference src/doc/arc.conf.DELETED %doc README AUTHORS LICENSE NOTICE %{_libdir}/libarccompute.so.* %{_libdir}/libarccommunication.so.* %{_libdir}/libarccommon.so.* %{_libdir}/libarccredential.so.* %{_libdir}/libarccredentialstore.so.* %{_libdir}/libarccrypto.so.* %{_libdir}/libarcdata.so.* %{_libdir}/libarcdatastaging.so.* %{_libdir}/libarcloader.so.* %{_libdir}/libarcmessage.so.* %{_libdir}/libarcsecurity.so.* %{_libdir}/libarcotokens.so.* %{_libdir}/libarcinfosys.so.* %{_libdir}/libarcwsaddressing.so.* %{_libdir}/libarcwssecurity.so.* %if %{with_xmlsec1} %{_libdir}/libarcxmlsec.so.* %endif %dir %{_libdir}/%{pkgdir} # We need to have libmodcrypto.so close to libarccrypto %{_libdir}/%{pkgdir}/libmodcrypto.so %{_libdir}/%{pkgdir}/libmodcrypto.apd # We need to have libmodcredential.so close to libarccredential %{_libdir}/%{pkgdir}/libmodcredential.so %{_libdir}/%{pkgdir}/libmodcredential.apd %{_libdir}/%{pkgdir}/arc-file-access %{_libdir}/%{pkgdir}/arc-hostname-resolver %{_libdir}/%{pkgdir}/DataStagingDelivery %{_libdir}/%{pkgdir}/arc-dmc %dir %{_libexecdir}/%{pkgdir} %{_libexecdir}/%{pkgdir}/arcconfig-parser %dir %{python3_sitearch}/%{pkgdir} %{python3_sitearch}/%{pkgdir}/utils %{python3_sitearch}/%{pkgdir}/__init__.py %{python3_sitearch}/%{pkgdir}/paths.py %{python3_sitearch}/%{pkgdir}/paths_dist.py %dir %{python3_sitearch}/%{pkgdir}/__pycache__ %{python3_sitearch}/%{pkgdir}/__pycache__/__init__.* %{python3_sitearch}/%{pkgdir}/__pycache__/paths.* %{python3_sitearch}/%{pkgdir}/__pycache__/paths_dist.* %dir %{_datadir}/%{pkgdir} %{_datadir}/%{pkgdir}/arc.parser.defaults %dir %{_datadir}/%{pkgdir}/test-jobs %{_datadir}/%{pkgdir}/test-jobs/test-job-* %{_datadir}/%{pkgdir}/schema %files client %defattr(-,root,root,-) %doc docdir/client.conf %{_bindir}/arccat %{_bindir}/arcclean %{_bindir}/arccp %{_bindir}/arcget %{_bindir}/arcinfo %{_bindir}/arckill %{_bindir}/arcls %{_bindir}/arcmkdir %{_bindir}/arcrename %{_bindir}/arcproxy %{_bindir}/arcrenew %{_bindir}/arcresume %{_bindir}/arcrm %{_bindir}/arcstat %{_bindir}/arcsub %{_bindir}/arcsync %{_bindir}/arctest %dir %{_datadir}/%{pkgdir}/examples %{_datadir}/%{pkgdir}/examples/client.conf %dir %{_sysconfdir}/%{pkgdir} %config(noreplace) %{_sysconfdir}/%{pkgdir}/client.conf %doc %{_mandir}/man1/arccat.1* %doc %{_mandir}/man1/arcclean.1* %doc %{_mandir}/man1/arccp.1* %doc %{_mandir}/man1/arcget.1* %doc %{_mandir}/man1/arcinfo.1* %doc %{_mandir}/man1/arckill.1* %doc %{_mandir}/man1/arcls.1* %doc %{_mandir}/man1/arcmkdir.1* %doc %{_mandir}/man1/arcrename.1* %doc %{_mandir}/man1/arcproxy.1* %doc %{_mandir}/man1/arcrenew.1* %doc %{_mandir}/man1/arcresume.1* %doc %{_mandir}/man1/arcrm.1* %doc %{_mandir}/man1/arcstat.1* %doc %{_mandir}/man1/arcsub.1* %doc %{_mandir}/man1/arcsync.1* %doc %{_mandir}/man1/arctest.1* %dir %{_bashcompdir} %{_bashcompdir}/arc-client-tools %files hed %defattr(-,root,root,-) %doc docdir/hed/* %if %{use_systemd} %{_unitdir}/arched.service %else %{_initrddir}/arched %endif %{_sbindir}/arched %{_libdir}/%{pkgdir}/libecho.so %{_libdir}/%{pkgdir}/libecho.apd %{_datadir}/%{pkgdir}/arched-start %{_datadir}/%{pkgdir}/profiles %doc %{_mandir}/man8/arched.8* %doc %{_mandir}/man5/arc.conf.5* %files datadelivery-service %defattr(-,root,root,-) %if %{use_systemd} %{_unitdir}/arc-datadelivery-service.service %else %{_initrddir}/arc-datadelivery-service %endif %{_libdir}/%{pkgdir}/libdatadeliveryservice.so %{_libdir}/%{pkgdir}/libdatadeliveryservice.apd %{_datadir}/%{pkgdir}/arc-datadelivery-service-start %config(noreplace) %{_sysconfdir}/logrotate.d/%{name}-datadelivery-service %if %{with_ldap_service} %files infosys-ldap %defattr(-,root,root,-) %if %{use_systemd} %{_unitdir}/arc-infosys-ldap.service %{_unitdir}/arc-infosys-ldap-slapd.service %else %{_initrddir}/arc-infosys-ldap %endif %{_datadir}/%{pkgdir}/create-bdii-config %{_datadir}/%{pkgdir}/create-slapd-config %{_datadir}/%{pkgdir}/ldap-schema %config(noreplace) %{_sysconfdir}/logrotate.d/%{name}-infosys-ldap %endif %files monitor %defattr(-,root,root,-) %{_datadir}/%{pkgdir}/monitor %doc %{_mandir}/man7/monitor.7* %files arcctl %{_sbindir}/arcctl %dir %{python3_sitearch}/%{pkgdir}/control %{python3_sitearch}/%{pkgdir}/control/__init__.py %{python3_sitearch}/%{pkgdir}/control/CertificateGenerator.py %{python3_sitearch}/%{pkgdir}/control/ControlCommon.py %{python3_sitearch}/%{pkgdir}/control/OSPackage.py %{python3_sitearch}/%{pkgdir}/control/TestCA.py %{python3_sitearch}/%{pkgdir}/control/TestJWT.py %{python3_sitearch}/%{pkgdir}/control/ThirdPartyDeployment.py %dir %{python3_sitearch}/%{pkgdir}/control/__pycache__ %{python3_sitearch}/%{pkgdir}/control/__pycache__/__init__.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/CertificateGenerator.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/ControlCommon.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/OSPackage.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/TestCA.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/TestJWT.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/ThirdPartyDeployment.* %doc %{_mandir}/man1/arcctl.1* %files arcctl-service %{python3_sitearch}/%{pkgdir}/control/Cleanup.py %{python3_sitearch}/%{pkgdir}/control/Config.py %{python3_sitearch}/%{pkgdir}/control/ServiceCommon.py %{python3_sitearch}/%{pkgdir}/control/Services.py %{python3_sitearch}/%{pkgdir}/control/OSService.py %{python3_sitearch}/%{pkgdir}/control/Validator.py %{python3_sitearch}/%{pkgdir}/control/__pycache__/Cleanup.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/Config.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/ServiceCommon.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/Services.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/OSService.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/Validator.* %files arex %defattr(-,root,root,-) %if %{use_systemd} %{_unitdir}/arc-arex.service %{_unitdir}/arc-arex-ws.service %else %{_initrddir}/arc-arex %{_initrddir}/arc-arex-ws %endif %{_libexecdir}/%{pkgdir}/cache-clean %{_libexecdir}/%{pkgdir}/cache-list %{_libexecdir}/%{pkgdir}/jura-ng %{_libexecdir}/%{pkgdir}/gm-jobs %{_libexecdir}/%{pkgdir}/gm-kick %{_libexecdir}/%{pkgdir}/smtp-send %{_libexecdir}/%{pkgdir}/smtp-send.sh %{_libexecdir}/%{pkgdir}/inputcheck %{_libexecdir}/%{pkgdir}/arc-blahp-logger %{_libdir}/%{pkgdir}/libarex.so %{_libdir}/%{pkgdir}/libarex.apd %{_libdir}/%{pkgdir}/libcandypond.so %{_libdir}/%{pkgdir}/libcandypond.apd %{_datadir}/%{pkgdir}/cancel-condor-job %{_datadir}/%{pkgdir}/cancel-fork-job %{_datadir}/%{pkgdir}/cancel-SLURM-job %{_datadir}/%{pkgdir}/scan-condor-job %{_datadir}/%{pkgdir}/scan-fork-job %{_datadir}/%{pkgdir}/scan-SLURM-job %{_datadir}/%{pkgdir}/submit-condor-job %{_datadir}/%{pkgdir}/submit-fork-job %{_datadir}/%{pkgdir}/submit-SLURM-job %{_datadir}/%{pkgdir}/CEinfo.pl %{_datadir}/%{pkgdir}/ARC0mod.pm %{_datadir}/%{pkgdir}/Condor.pm %{_datadir}/%{pkgdir}/Fork.pm %{_datadir}/%{pkgdir}/FORKmod.pm %{_datadir}/%{pkgdir}/SLURM.pm %{_datadir}/%{pkgdir}/SLURMmod.pm %{_datadir}/%{pkgdir}/XmlPrinter.pm %{_datadir}/%{pkgdir}/InfosysHelper.pm %{_datadir}/%{pkgdir}/LdifPrinter.pm %{_datadir}/%{pkgdir}/GLUE2xmlPrinter.pm %{_datadir}/%{pkgdir}/GLUE2ldifPrinter.pm %{_datadir}/%{pkgdir}/NGldifPrinter.pm %{_datadir}/%{pkgdir}/ARC0ClusterInfo.pm %{_datadir}/%{pkgdir}/ARC1ClusterInfo.pm %{_datadir}/%{pkgdir}/ConfigCentral.pm %{_datadir}/%{pkgdir}/GMJobsInfo.pm %{_datadir}/%{pkgdir}/HostInfo.pm %{_datadir}/%{pkgdir}/RTEInfo.pm %{_datadir}/%{pkgdir}/InfoChecker.pm %{_datadir}/%{pkgdir}/IniParser.pm %{_datadir}/%{pkgdir}/LRMSInfo.pm %{_datadir}/%{pkgdir}/Sysinfo.pm %{_datadir}/%{pkgdir}/LogUtils.pm %{_datadir}/%{pkgdir}/condor_env.pm %{_datadir}/%{pkgdir}/cancel_common.sh %{_datadir}/%{pkgdir}/configure-*-env.sh %{_datadir}/%{pkgdir}/submit_common.sh %{_datadir}/%{pkgdir}/scan_common.sh %{_datadir}/%{pkgdir}/lrms_common.sh %{_datadir}/%{pkgdir}/perferator %{_datadir}/%{pkgdir}/update-controldir %{_datadir}/%{pkgdir}/PerfData.pl %{_datadir}/%{pkgdir}/arc-arex-start %{_datadir}/%{pkgdir}/arc-arex-ws-start %dir %{_datadir}/%{pkgdir}/sql-schema %{_datadir}/%{pkgdir}/sql-schema/arex_accounting_db_schema_v2.sql %doc %{_mandir}/man1/cache-clean.1* %doc %{_mandir}/man1/cache-list.1* %doc %{_mandir}/man8/gm-jobs.8* %doc %{_mandir}/man8/arc-blahp-logger.8* %doc %{_mandir}/man8/a-rex-backtrace-collect.8* %config(noreplace) %{_sysconfdir}/logrotate.d/%{name}-arex %dir %{_localstatedir}/log/arc %dir %{_localstatedir}/spool/arc %dir %{_localstatedir}/spool/arc/ssm %dir %{_localstatedir}/spool/arc/urs %{python3_sitearch}/%{pkgdir}/control/AccountingDB.py %{python3_sitearch}/%{pkgdir}/control/AccountingPublishing.py %{python3_sitearch}/%{pkgdir}/control/Accounting.py %{python3_sitearch}/%{pkgdir}/control/Cache.py %{python3_sitearch}/%{pkgdir}/control/DataStaging.py %{python3_sitearch}/%{pkgdir}/control/Jobs.py %{python3_sitearch}/%{pkgdir}/control/RunTimeEnvironment.py %{python3_sitearch}/%{pkgdir}/control/__pycache__/AccountingDB.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/AccountingPublishing.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/Accounting.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/Cache.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/DataStaging.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/Jobs.* %{python3_sitearch}/%{pkgdir}/control/__pycache__/RunTimeEnvironment.* %{_libexecdir}/%{pkgdir}/arccandypond %dir %{_datadir}/%{pkgdir}/rte %dir %{_datadir}/%{pkgdir}/rte/ENV %{_datadir}/%{pkgdir}/rte/ENV/LRMS-SCRATCH %{_datadir}/%{pkgdir}/rte/ENV/PROXY %{_datadir}/%{pkgdir}/rte/ENV/RTE %{_datadir}/%{pkgdir}/rte/ENV/CANDYPOND %{_datadir}/%{pkgdir}/rte/ENV/SINGULARITY %dir %{_datadir}/%{pkgdir}/rte/ENV/CONDOR %{_datadir}/%{pkgdir}/rte/ENV/CONDOR/DOCKER %{_sbindir}/a-rex-backtrace-collect %config(noreplace) %{_sysconfdir}/arc.conf %dir %{_sysconfdir}/arc.conf.d %files arex-lrms-contrib %defattr(-,root,root,-) %{_datadir}/%{pkgdir}/cancel-boinc-job %{_datadir}/%{pkgdir}/cancel-ll-job %{_datadir}/%{pkgdir}/cancel-lsf-job %{_datadir}/%{pkgdir}/cancel-pbs-job %{_datadir}/%{pkgdir}/cancel-pbspro-job %{_datadir}/%{pkgdir}/cancel-sge-job %{_datadir}/%{pkgdir}/scan-boinc-job %{_datadir}/%{pkgdir}/scan-ll-job %{_datadir}/%{pkgdir}/scan-lsf-job %{_datadir}/%{pkgdir}/scan-pbs-job %{_datadir}/%{pkgdir}/scan-pbspro-job %{_datadir}/%{pkgdir}/scan-sge-job %{_datadir}/%{pkgdir}/submit-boinc-job %{_datadir}/%{pkgdir}/submit-ll-job %{_datadir}/%{pkgdir}/submit-lsf-job %{_datadir}/%{pkgdir}/submit-pbs-job %{_datadir}/%{pkgdir}/submit-pbspro-job %{_datadir}/%{pkgdir}/submit-sge-job %{_datadir}/%{pkgdir}/Boinc.pm %{_datadir}/%{pkgdir}/LL.pm %{_datadir}/%{pkgdir}/LSF.pm %{_datadir}/%{pkgdir}/PBS.pm %{_datadir}/%{pkgdir}/PBSPRO.pm %{_datadir}/%{pkgdir}/SGE.pm %{_datadir}/%{pkgdir}/SGEmod.pm %files community-rtes %defattr(-,root,root,-) %{_datadir}/%{pkgdir}/community_rtes.sh %{python3_sitearch}/%{pkgdir}/control/CommunityRTE.py %{python3_sitearch}/%{pkgdir}/control/__pycache__/CommunityRTE.* %files plugins-needed %defattr(-,root,root,-) %dir %{_libdir}/%{pkgdir}/test %{_libdir}/%{pkgdir}/test/libaccTEST.so %{_libdir}/%{pkgdir}/libaccARCREST.so %{_libdir}/%{pkgdir}/libaccBroker.so %{_libdir}/%{pkgdir}/libaccJobDescriptionParser.so %if %{with_ldns} %{_libdir}/%{pkgdir}/libaccARCHERY.so %endif %{_libdir}/%{pkgdir}/libarcshc.so %{_libdir}/%{pkgdir}/libarcshclegacy.so %{_libdir}/%{pkgdir}/libarcshcotokens.so %{_libdir}/%{pkgdir}/libdmcfile.so %{_libdir}/%{pkgdir}/libdmchttp.so %{_libdir}/%{pkgdir}/libdmcsrm.so %{_libdir}/%{pkgdir}/libdmcrucio.so %{_libdir}/%{pkgdir}/libidentitymap.so %{_libdir}/%{pkgdir}/libmcchttp.so %{_libdir}/%{pkgdir}/libmccmsgvalidator.so %{_libdir}/%{pkgdir}/libmccsoap.so %{_libdir}/%{pkgdir}/libmcctcp.so %{_libdir}/%{pkgdir}/libmcctls.so %{_libdir}/%{pkgdir}/test/libaccTEST.apd %{_libdir}/%{pkgdir}/libaccARCREST.apd %{_libdir}/%{pkgdir}/libaccBroker.apd %{_libdir}/%{pkgdir}/libaccJobDescriptionParser.apd %if %{with_ldns} %{_libdir}/%{pkgdir}/libaccARCHERY.apd %endif %{_libdir}/%{pkgdir}/libarcshc.apd %{_libdir}/%{pkgdir}/libarcshclegacy.apd %{_libdir}/%{pkgdir}/libarcshcotokens.apd %{_libdir}/%{pkgdir}/libdmcfile.apd %{_libdir}/%{pkgdir}/libdmchttp.apd %{_libdir}/%{pkgdir}/libdmcsrm.apd %{_libdir}/%{pkgdir}/libdmcrucio.apd %{_libdir}/%{pkgdir}/libidentitymap.apd %{_libdir}/%{pkgdir}/libmcchttp.apd %{_libdir}/%{pkgdir}/libmccmsgvalidator.apd %{_libdir}/%{pkgdir}/libmccsoap.apd %{_libdir}/%{pkgdir}/libmcctcp.apd %{_libdir}/%{pkgdir}/libmcctls.apd %files plugins-globus %defattr(-,root,root,-) %files plugins-globus-common %defattr(-,root,root,-) %{_libdir}/libarcglobusutils.so.* %files plugins-gridftp %defattr(-,root,root,-) %{_libdir}/%{pkgdir}/arc-dmcgridftp %{_libdir}/%{pkgdir}/libdmcgridftpdeleg.so %{_libdir}/%{pkgdir}/libdmcgridftpdeleg.apd %files plugins-lcas-lcmaps %defattr(-,root,root,-) %{_libexecdir}/%{pkgdir}/arc-lcas %{_libexecdir}/%{pkgdir}/arc-lcmaps %if %{with_xrootd} %files plugins-xrootd %defattr(-,root,root,-) %dir %{_libdir}/%{pkgdir}/external %{_libdir}/%{pkgdir}/external/libdmcxrootd.so %{_libdir}/%{pkgdir}/external/libdmcxrootd.apd %{_libdir}/%{pkgdir}/libdmcxrootddeleg.so %{_libdir}/%{pkgdir}/libdmcxrootddeleg.apd %endif %if %{with_gfal} %files plugins-gfal %defattr(-,root,root,-) %dir %{_libdir}/%{pkgdir}/external %{_libdir}/%{pkgdir}/external/libdmcgfal.so %{_libdir}/%{pkgdir}/external/libdmcgfal.apd %{_libdir}/%{pkgdir}/libdmcgfaldeleg.so %{_libdir}/%{pkgdir}/libdmcgfaldeleg.apd %endif %if %{with_s3} %files plugins-s3 %defattr(-,root,root,-) %{_libdir}/%{pkgdir}/libdmcs3.so %{_libdir}/%{pkgdir}/libdmcs3.apd %endif %files plugins-internal %defattr(-,root,root,-) %{_libdir}/%{pkgdir}/libaccINTERNAL.so %{_libdir}/%{pkgdir}/libaccINTERNAL.apd %files plugins-python %defattr(-,root,root,-) %doc docdir/python/* %{_libdir}/%{pkgdir}/libaccPythonBroker.so %{_libdir}/%{pkgdir}/libaccPythonBroker.apd %{_libdir}/%{pkgdir}/libpythonservice.so %{_libdir}/%{pkgdir}/libpythonservice.apd %files devel %defattr(-,root,root,-) %doc docdir/devel/* src/hed/shc/arcpdp/*.xsd %{_includedir}/%{pkgdir} %{_libdir}/lib*.so %{_bindir}/wsdl2hed %doc %{_mandir}/man1/wsdl2hed.1* %{_bindir}/arcplugin %doc %{_mandir}/man1/arcplugin.1* %files -n python%{python3_pkgversion}-%{name} %defattr(-,root,root,-) %{python3_sitearch}/_arc.*so %{python3_sitearch}/%{pkgdir}/[^_p]*.py %{python3_sitearch}/%{pkgdir}/__pycache__/[^_p]*.* %files test-utils %defattr(-,root,root,-) %{_bindir}/arcperftest %doc %{_mandir}/man1/arcperftest.1* %files archery-manage %defattr(-,root,root,-) %{_sbindir}/archery-manage %files wn %defattr(-,root,root,-) %attr(4755,root,root) %{_bindir}/arc-job-cgroup %files -n python%{python3_pkgversion}-arcrest %defattr(-,root,root,-) %{python3_sitelib}/pyarcrest %{python3_sitelib}/pyarcrest-*.*-info %{_bindir}/arcrest %files arc-exporter %defattr(-,root,root,-) %{_sbindir}/arc-exporter %changelog * Fri Oct 03 2025 Anders Waananen - 7.1.1-1 - Unofficial build nordugrid-arc-7.1.1/PaxHeaders/config.rpath0000644000000000000000000000013115067751331015652 xustar0030 mtime=1759498969.741503772 29 atime=1759498995.87191921 30 ctime=1759499024.702333452 nordugrid-arc-7.1.1/config.rpath0000755000175000002070000004364715067751331017576 0ustar00mockbuildmock00000000000000#! /bin/sh # Output a system dependent set of variables, describing how to set the # run time search path of shared libraries in an executable. # # Copyright 1996-2007 Free Software Foundation, Inc. # Taken from GNU libtool, 2001 # Originally by Gordon Matzigkeit , 1996 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # # The first argument passed to this file is the canonical host specification, # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM # or # CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM # The environment variables CC, GCC, LDFLAGS, LD, with_gnu_ld # should be set by the caller. # # The set of defined variables is at the end of this script. # Known limitations: # - On IRIX 6.5 with CC="cc", the run time search patch must not be longer # than 256 bytes, otherwise the compiler driver will dump core. The only # known workaround is to choose shorter directory names for the build # directory and/or the installation directory. # All known linkers require a `.a' archive for static linking (except MSVC, # which needs '.lib'). libext=a shrext=.so host="$1" host_cpu=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'` host_vendor=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'` host_os=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'` # Code taken from libtool.m4's _LT_CC_BASENAME. for cc_temp in $CC""; do case $cc_temp in compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; \-*) ;; *) break;; esac done cc_basename=`echo "$cc_temp" | sed -e 's%^.*/%%'` # Code taken from libtool.m4's AC_LIBTOOL_PROG_COMPILER_PIC. wl= if test "$GCC" = yes; then wl='-Wl,' else case "$host_os" in aix*) wl='-Wl,' ;; darwin*) case $cc_basename in xlc*) wl='-Wl,' ;; esac ;; mingw* | cygwin* | pw32* | os2*) ;; hpux9* | hpux10* | hpux11*) wl='-Wl,' ;; irix5* | irix6* | nonstopux*) wl='-Wl,' ;; newsos6) ;; linux* | k*bsd*-gnu) case $cc_basename in icc* | ecc*) wl='-Wl,' ;; pgcc | pgf77 | pgf90) wl='-Wl,' ;; ccc*) wl='-Wl,' ;; como) wl='-lopt=' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) wl='-Wl,' ;; esac ;; esac ;; osf3* | osf4* | osf5*) wl='-Wl,' ;; rdos*) ;; solaris*) wl='-Wl,' ;; sunos4*) wl='-Qoption ld ' ;; sysv4 | sysv4.2uw2* | sysv4.3*) wl='-Wl,' ;; sysv4*MP*) ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) wl='-Wl,' ;; unicos*) wl='-Wl,' ;; uts4*) ;; esac fi # Code taken from libtool.m4's AC_LIBTOOL_PROG_LD_SHLIBS. hardcode_libdir_flag_spec= hardcode_libdir_separator= hardcode_direct=no hardcode_minus_L=no case "$host_os" in cygwin* | mingw* | pw32*) # FIXME: the MSVC++ port hasn't been tested in a loooong time # When not using gcc, we currently assume that we are using # Microsoft Visual C++. if test "$GCC" != yes; then with_gnu_ld=no fi ;; interix*) # we just hope/assume this is gcc and not c89 (= MSVC++) with_gnu_ld=yes ;; openbsd*) with_gnu_ld=no ;; esac ld_shlibs=yes if test "$with_gnu_ld" = yes; then # Set some defaults for GNU ld with shared library support. These # are reset later if shared libraries are not supported. Putting them # here allows them to be overridden if necessary. # Unlike libtool, we use -rpath here, not --rpath, since the documented # option of GNU ld is called -rpath, not --rpath. hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' case "$host_os" in aix3* | aix4* | aix5*) # On AIX/PPC, the GNU linker is very broken if test "$host_cpu" != ia64; then ld_shlibs=no fi ;; amigaos*) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes # Samuel A. Falvo II reports # that the semantics of dynamic libraries on AmigaOS, at least up # to version 4, is to share data among multiple programs linked # with the same dynamic library. Since this doesn't match the # behavior of shared libraries on other platforms, we cannot use # them. ld_shlibs=no ;; beos*) if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then : else ld_shlibs=no fi ;; cygwin* | mingw* | pw32*) # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. hardcode_libdir_flag_spec='-L$libdir' if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then : else ld_shlibs=no fi ;; interix[3-9]*) hardcode_direct=no hardcode_libdir_flag_spec='${wl}-rpath,$libdir' ;; gnu* | linux* | k*bsd*-gnu) if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then : else ld_shlibs=no fi ;; netbsd*) ;; solaris*) if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then ld_shlibs=no elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then : else ld_shlibs=no fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) case `$LD -v 2>&1` in *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) ld_shlibs=no ;; *) if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then hardcode_libdir_flag_spec='`test -z "$SCOABSPATH" && echo ${wl}-rpath,$libdir`' else ld_shlibs=no fi ;; esac ;; sunos4*) hardcode_direct=yes ;; *) if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then : else ld_shlibs=no fi ;; esac if test "$ld_shlibs" = no; then hardcode_libdir_flag_spec= fi else case "$host_os" in aix3*) # Note: this linker hardcodes the directories in LIBPATH if there # are no directories specified by -L. hardcode_minus_L=yes if test "$GCC" = yes; then # Neither direct hardcoding nor static linking is supported with a # broken collect2. hardcode_direct=unsupported fi ;; aix4* | aix5*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no else aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. case $host_os in aix4.[23]|aix4.[23].*|aix5*) for ld_flag in $LDFLAGS; do if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then aix_use_runtimelinking=yes break fi done ;; esac fi hardcode_direct=yes hardcode_libdir_separator=':' if test "$GCC" = yes; then case $host_os in aix4.[012]|aix4.[012].*) collect2name=`${CC} -print-prog-name=collect2` if test -f "$collect2name" && \ strings "$collect2name" | grep resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 hardcode_direct=unsupported hardcode_minus_L=yes hardcode_libdir_flag_spec='-L$libdir' hardcode_libdir_separator= fi ;; esac fi # Begin _LT_AC_SYS_LIBPATH_AIX. echo 'int main () { return 0; }' > conftest.c ${CC} ${LDFLAGS} conftest.c -o conftest aix_libpath=`dump -H conftest 2>/dev/null | sed -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } }'` if test -z "$aix_libpath"; then aix_libpath=`dump -HX64 conftest 2>/dev/null | sed -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } }'` fi if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib" fi rm -f conftest.c conftest # End _LT_AC_SYS_LIBPATH_AIX. if test "$aix_use_runtimelinking" = yes; then hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" else if test "$host_cpu" = ia64; then hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' else hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" fi fi ;; amigaos*) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes # see comment about different semantics on the GNU ld section ld_shlibs=no ;; bsdi[45]*) ;; cygwin* | mingw* | pw32*) # When not using gcc, we currently assume that we are using # Microsoft Visual C++. # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. hardcode_libdir_flag_spec=' ' libext=lib ;; darwin* | rhapsody*) hardcode_direct=no if test "$GCC" = yes ; then : else case $cc_basename in xlc*) ;; *) ld_shlibs=no ;; esac fi ;; dgux*) hardcode_libdir_flag_spec='-L$libdir' ;; freebsd1*) ld_shlibs=no ;; freebsd2.2*) hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes ;; freebsd2*) hardcode_direct=yes hardcode_minus_L=yes ;; freebsd* | dragonfly*) hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes ;; hpux9*) hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: hardcode_direct=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes ;; hpux10*) if test "$with_gnu_ld" = no; then hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: hardcode_direct=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes fi ;; hpux11*) if test "$with_gnu_ld" = no; then hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: case $host_cpu in hppa*64*|ia64*) hardcode_direct=no ;; *) hardcode_direct=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes ;; esac fi ;; irix5* | irix6* | nonstopux*) hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: ;; netbsd*) hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes ;; newsos6) hardcode_direct=yes hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: ;; openbsd*) if test -f /usr/libexec/ld.so; then hardcode_direct=yes if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then hardcode_libdir_flag_spec='${wl}-rpath,$libdir' else case "$host_os" in openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) hardcode_libdir_flag_spec='-R$libdir' ;; *) hardcode_libdir_flag_spec='${wl}-rpath,$libdir' ;; esac fi else ld_shlibs=no fi ;; os2*) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes ;; osf3*) hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: ;; osf4* | osf5*) if test "$GCC" = yes; then hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' else # Both cc and cxx compiler support -rpath directly hardcode_libdir_flag_spec='-rpath $libdir' fi hardcode_libdir_separator=: ;; solaris*) hardcode_libdir_flag_spec='-R$libdir' ;; sunos4*) hardcode_libdir_flag_spec='-L$libdir' hardcode_direct=yes hardcode_minus_L=yes ;; sysv4) case $host_vendor in sni) hardcode_direct=yes # is this really true??? ;; siemens) hardcode_direct=no ;; motorola) hardcode_direct=no #Motorola manual says yes, but my tests say they lie ;; esac ;; sysv4.3*) ;; sysv4*MP*) if test -d /usr/nec; then ld_shlibs=yes fi ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) ;; sysv5* | sco3.2v5* | sco5v6*) hardcode_libdir_flag_spec='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' hardcode_libdir_separator=':' ;; uts4*) hardcode_libdir_flag_spec='-L$libdir' ;; *) ld_shlibs=no ;; esac fi # Check dynamic linker characteristics # Code taken from libtool.m4's AC_LIBTOOL_SYS_DYNAMIC_LINKER. # Unlike libtool.m4, here we don't care about _all_ names of the library, but # only about the one the linker finds when passed -lNAME. This is the last # element of library_names_spec in libtool.m4, or possibly two of them if the # linker has special search rules. library_names_spec= # the last element of library_names_spec in libtool.m4 libname_spec='lib$name' case "$host_os" in aix3*) library_names_spec='$libname.a' ;; aix4* | aix5*) library_names_spec='$libname$shrext' ;; amigaos*) library_names_spec='$libname.a' ;; beos*) library_names_spec='$libname$shrext' ;; bsdi[45]*) library_names_spec='$libname$shrext' ;; cygwin* | mingw* | pw32*) shrext=.dll library_names_spec='$libname.dll.a $libname.lib' ;; darwin* | rhapsody*) shrext=.dylib library_names_spec='$libname$shrext' ;; dgux*) library_names_spec='$libname$shrext' ;; freebsd1*) ;; freebsd* | dragonfly*) case "$host_os" in freebsd[123]*) library_names_spec='$libname$shrext$versuffix' ;; *) library_names_spec='$libname$shrext' ;; esac ;; gnu*) library_names_spec='$libname$shrext' ;; hpux9* | hpux10* | hpux11*) case $host_cpu in ia64*) shrext=.so ;; hppa*64*) shrext=.sl ;; *) shrext=.sl ;; esac library_names_spec='$libname$shrext' ;; interix[3-9]*) library_names_spec='$libname$shrext' ;; irix5* | irix6* | nonstopux*) library_names_spec='$libname$shrext' case "$host_os" in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= ;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 ;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 ;; *) libsuff= shlibsuff= ;; esac ;; esac ;; linux*oldld* | linux*aout* | linux*coff*) ;; linux* | k*bsd*-gnu) library_names_spec='$libname$shrext' ;; knetbsd*-gnu) library_names_spec='$libname$shrext' ;; netbsd*) library_names_spec='$libname$shrext' ;; newsos6) library_names_spec='$libname$shrext' ;; nto-qnx*) library_names_spec='$libname$shrext' ;; openbsd*) library_names_spec='$libname$shrext$versuffix' ;; os2*) libname_spec='$name' shrext=.dll library_names_spec='$libname.a' ;; osf3* | osf4* | osf5*) library_names_spec='$libname$shrext' ;; rdos*) ;; solaris*) library_names_spec='$libname$shrext' ;; sunos4*) library_names_spec='$libname$shrext$versuffix' ;; sysv4 | sysv4.3*) library_names_spec='$libname$shrext' ;; sysv4*MP*) library_names_spec='$libname$shrext' ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) library_names_spec='$libname$shrext' ;; uts4*) library_names_spec='$libname$shrext' ;; esac sed_quote_subst='s/\(["`$\\]\)/\\\1/g' escaped_wl=`echo "X$wl" | sed -e 's/^X//' -e "$sed_quote_subst"` shlibext=`echo "$shrext" | sed -e 's,^\.,,'` escaped_libname_spec=`echo "X$libname_spec" | sed -e 's/^X//' -e "$sed_quote_subst"` escaped_library_names_spec=`echo "X$library_names_spec" | sed -e 's/^X//' -e "$sed_quote_subst"` escaped_hardcode_libdir_flag_spec=`echo "X$hardcode_libdir_flag_spec" | sed -e 's/^X//' -e "$sed_quote_subst"` LC_ALL=C sed -e 's/^\([a-zA-Z0-9_]*\)=/acl_cv_\1=/' < POTFILES.in.2 ; \ if diff $(srcdir)/POTFILES.in POTFILES.in.2 >/dev/null 2>&1 ; then \ rm -f POTFILES.in.2 ; \ else \ mv POTFILES.in.2 $(srcdir)/POTFILES.in ; \ fi DISTFILES.extra1 = Rules-POTFILES nordugrid-arc-7.1.1/po/PaxHeaders/hu.po0000644000000000000000000000013215067751432014742 xustar0030 mtime=1759499034.265502602 30 atime=1759499034.528506599 30 ctime=1759499034.632733591 nordugrid-arc-7.1.1/po/hu.po0000644000175000002070000174436215067751432016665 0ustar00mockbuildmock00000000000000# Translation of Arc.po to Hungarian # Gábor RÅ‘czei , 2010. # Translation file for the Advanced Resource Connector (Arc) msgid "" msgstr "" "Project-Id-Version: Arc\n" "Report-Msgid-Bugs-To: support@nordugrid.org\n" "POT-Creation-Date: 2025-10-03 15:43+0200\n" "PO-Revision-Date: 2010-07-05 12:25+0100\n" "Last-Translator: Gábor RÅ‘czei \n" "Language-Team: Hungarian\n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "X-Poedit-Language: Hungarian\n" "X-Poedit-Country: HUNGARY\n" "X-Poedit-SourceCharset: utf-8\n" #: src/clients/compute/arccat.cpp:38 src/clients/compute/arcclean.cpp:34 #: src/clients/compute/arcget.cpp:35 src/clients/compute/arckill.cpp:33 #: src/clients/compute/arcrenew.cpp:32 src/clients/compute/arcresume.cpp:32 #: src/clients/compute/arcstat.cpp:34 msgid "[job ...]" msgstr "" #: src/clients/compute/arccat.cpp:39 msgid "" "The arccat command performs the cat command on the stdout, stderr or grid\n" "manager's error log of the job." msgstr "" #: src/clients/compute/arccat.cpp:46 src/clients/compute/arcclean.cpp:41 #: src/clients/compute/arcget.cpp:42 src/clients/compute/arcinfo.cpp:45 #: src/clients/compute/arckill.cpp:40 src/clients/compute/arcrenew.cpp:37 #: src/clients/compute/arcresume.cpp:37 src/clients/compute/arcstat.cpp:42 #: src/clients/compute/arcsub.cpp:53 src/clients/compute/arcsync.cpp:147 #: src/clients/compute/arctest.cpp:67 src/clients/credentials/arcproxy.cpp:484 #: src/clients/data/arccp.cpp:652 src/clients/data/arcls.cpp:371 #: src/clients/data/arcmkdir.cpp:149 src/clients/data/arcrename.cpp:160 #: src/clients/data/arcrm.cpp:174 src/hed/daemon/unix/main_unix.cpp:345 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1265 #: src/hed/libs/data/DataExternalHelper.cpp:358 #, c-format msgid "%s version %s" msgstr "%s verzió %s" #: src/clients/compute/arccat.cpp:55 src/clients/compute/arcclean.cpp:50 #: src/clients/compute/arcget.cpp:51 src/clients/compute/arcinfo.cpp:53 #: src/clients/compute/arckill.cpp:49 src/clients/compute/arcrenew.cpp:46 #: src/clients/compute/arcresume.cpp:46 src/clients/compute/arcstat.cpp:51 #: src/clients/compute/arcsub.cpp:62 src/clients/compute/arcsync.cpp:156 #: src/clients/compute/arctest.cpp:89 src/clients/credentials/arcproxy.cpp:492 #: src/clients/data/arccp.cpp:659 src/clients/data/arcls.cpp:379 #: src/clients/data/arcmkdir.cpp:157 src/clients/data/arcrename.cpp:168 #: src/clients/data/arcrm.cpp:183 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:192 #: src/services/a-rex/grid-manager/GridManager.cpp:110 #: src/services/a-rex/grid-manager/log/JobLog.cpp:139 #, c-format msgid "Running command: %s" msgstr "" #: src/clients/compute/arccat.cpp:66 src/clients/compute/arcclean.cpp:61 #: src/clients/compute/arcget.cpp:62 src/clients/compute/arcinfo.cpp:65 #: src/clients/compute/arckill.cpp:60 src/clients/compute/arcrenew.cpp:57 #: src/clients/compute/arcresume.cpp:50 src/clients/compute/arcstat.cpp:62 #: src/clients/compute/arcsub.cpp:66 src/clients/compute/arcsync.cpp:167 #: src/clients/compute/arctest.cpp:93 src/clients/data/arccp.cpp:682 #: src/clients/data/arcls.cpp:401 src/clients/data/arcmkdir.cpp:179 #: src/clients/data/arcrename.cpp:190 src/clients/data/arcrm.cpp:205 msgid "Failed configuration initialization" msgstr "Nem sikerült betölteni a konfigurációt" #: src/clients/compute/arccat.cpp:78 src/clients/compute/arcclean.cpp:73 #: src/clients/compute/arcget.cpp:87 src/clients/compute/arckill.cpp:72 #: src/clients/compute/arcrenew.cpp:69 src/clients/compute/arcresume.cpp:69 #: src/clients/compute/arcstat.cpp:74 #, fuzzy, c-format msgid "Cannot read specified jobid file: %s" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/clients/compute/arccat.cpp:108 src/clients/compute/arcclean.cpp:103 #: src/clients/compute/arcget.cpp:117 src/clients/compute/arckill.cpp:102 #: src/clients/compute/arcrenew.cpp:99 src/clients/compute/arcresume.cpp:99 #: src/clients/compute/arcstat.cpp:127 msgid "No jobs given" msgstr "Nem adott meg feladatot" #: src/clients/compute/arccat.cpp:121 src/clients/compute/arcclean.cpp:116 #: src/clients/compute/arcget.cpp:130 src/clients/compute/arckill.cpp:115 #: src/clients/compute/arcrenew.cpp:112 src/clients/compute/arcresume.cpp:112 #: src/clients/compute/arcstat.cpp:139 #, fuzzy, c-format msgid "Job list file (%s) doesn't exist" msgstr "Az XML konfigurációs fájl: %s nem létezik" #: src/clients/compute/arccat.cpp:128 src/clients/compute/arcclean.cpp:123 #: src/clients/compute/arcget.cpp:137 src/clients/compute/arckill.cpp:122 #: src/clients/compute/arcrenew.cpp:119 src/clients/compute/arcresume.cpp:119 #: src/clients/compute/arcstat.cpp:146 src/clients/compute/arctest.cpp:296 #, fuzzy, c-format msgid "Unable to read job information from file (%s)" msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #: src/clients/compute/arccat.cpp:137 src/clients/compute/arcclean.cpp:131 #: src/clients/compute/arcget.cpp:145 src/clients/compute/arckill.cpp:130 #: src/clients/compute/arcrenew.cpp:128 src/clients/compute/arcresume.cpp:128 #: src/clients/compute/arcstat.cpp:155 #, c-format msgid "Warning: Job not found in job list: %s" msgstr "" #: src/clients/compute/arccat.cpp:150 src/clients/compute/arcclean.cpp:186 #: src/clients/compute/arcget.cpp:158 src/clients/compute/arckill.cpp:142 #: src/clients/compute/arcrenew.cpp:140 src/clients/compute/arcresume.cpp:140 #, fuzzy msgid "No jobs" msgstr "Nem adott meg feladatot" #: src/clients/compute/arccat.cpp:165 #, c-format msgid "Could not create temporary file \"%s\"" msgstr "" #: src/clients/compute/arccat.cpp:166 src/clients/compute/arccat.cpp:172 #, c-format msgid "Cannot create output of %s for any jobs" msgstr "" #: src/clients/compute/arccat.cpp:173 #, fuzzy, c-format msgid "Invalid destination URL %s" msgstr "Érvénytelen URL: %s" #: src/clients/compute/arccat.cpp:191 #, c-format msgid "Job deleted: %s" msgstr "" #: src/clients/compute/arccat.cpp:201 #, c-format msgid "Job has not started yet: %s" msgstr "" #: src/clients/compute/arccat.cpp:242 #, c-format msgid "Cannot determine the %s location: %s" msgstr "" #: src/clients/compute/arccat.cpp:247 #, c-format msgid "Cannot create output of %s for job (%s): Invalid source %s" msgstr "" #: src/clients/compute/arccat.cpp:260 #, c-format msgid "Catting %s for job %s" msgstr "" #: src/clients/compute/arcclean.cpp:35 #, fuzzy msgid "The arcclean command removes a job from the computing resource." msgstr "Az arcclean parancs eltávolít egy feladatot a távoli klaszterröl" #: src/clients/compute/arcclean.cpp:155 msgid "" "You are about to remove jobs from the job list for which no information " "could be\n" "found. NOTE: Recently submitted jobs might not have appeared in the " "information\n" "system, and this action will also remove such jobs." msgstr "" #: src/clients/compute/arcclean.cpp:158 msgid "Are you sure you want to clean jobs missing information?" msgstr "" #: src/clients/compute/arcclean.cpp:159 src/clients/compute/arcsync.cpp:237 msgid "y" msgstr "y" #: src/clients/compute/arcclean.cpp:159 src/clients/compute/arcsync.cpp:237 msgid "n" msgstr "n" #: src/clients/compute/arcclean.cpp:164 msgid "Jobs missing information will not be cleaned!" msgstr "" #: src/clients/compute/arcclean.cpp:180 src/clients/compute/arctest.cpp:300 #, fuzzy, c-format msgid "Warning: Failed to write job information to file (%s)" msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #: src/clients/compute/arcclean.cpp:181 msgid "" " Run 'arcclean -s Undefined' to remove cleaned jobs from job list" msgstr "" #: src/clients/compute/arcclean.cpp:190 #, c-format msgid "Jobs processed: %d, deleted: %d" msgstr "" #: src/clients/compute/arcget.cpp:36 msgid "The arcget command is used for retrieving the results from a job." msgstr "" "Az arcget parancsot arra lehet használni, hogy a feladat eredményeit " "megjelenítse" #: src/clients/compute/arcget.cpp:75 #, fuzzy, c-format msgid "Job download directory from user configuration file: %s" msgstr "voms szerver fájljának az elérési útvonala" #: src/clients/compute/arcget.cpp:78 #, fuzzy msgid "Job download directory will be created in present working directory." msgstr "" "könyvtár letöltése (a feladat könyvtára ebben a könyvtárban fog létrejönni)" #: src/clients/compute/arcget.cpp:82 #, fuzzy, c-format msgid "Job download directory: %s" msgstr "voms szerver fájljának az elérési útvonala" #: src/clients/compute/arcget.cpp:168 #, c-format msgid "Unable to create directory for storing results (%s) - %s" msgstr "" #: src/clients/compute/arcget.cpp:178 #, c-format msgid "Results stored at: %s" msgstr "" #: src/clients/compute/arcget.cpp:190 src/clients/compute/arckill.cpp:158 msgid "Warning: Some jobs were not removed from server" msgstr "" #: src/clients/compute/arcget.cpp:191 src/clients/compute/arcget.cpp:198 #: src/clients/compute/arckill.cpp:159 msgid " Use arcclean to remove retrieved jobs from job list" msgstr "" #: src/clients/compute/arcget.cpp:197 src/clients/compute/arckill.cpp:165 #, c-format msgid "Warning: Failed removing jobs from file (%s)" msgstr "" #: src/clients/compute/arcget.cpp:202 #, c-format msgid "" "Jobs processed: %d, successfully retrieved: %d, successfully cleaned: %d" msgstr "" #: src/clients/compute/arcget.cpp:206 #, c-format msgid "Jobs processed: %d, successfully retrieved: %d" msgstr "" #: src/clients/compute/arcinfo.cpp:34 msgid "[resource ...]" msgstr "" #: src/clients/compute/arcinfo.cpp:35 #, fuzzy msgid "" "The arcinfo command is used for obtaining the status of computing resources " "on the Grid." msgstr "" "Az arcinfo paranccsal lehet lekérdezni az egyes klaszterek állapotát a grid-" "ben." #: src/clients/compute/arcinfo.cpp:141 msgid "Information endpoint" msgstr "" #: src/clients/compute/arcinfo.cpp:152 msgid "Submission endpoint" msgstr "" #: src/clients/compute/arcinfo.cpp:154 msgid "status" msgstr "" #: src/clients/compute/arcinfo.cpp:156 #, fuzzy msgid "interface" msgstr "Felhasználó oldali hiba" #: src/clients/compute/arcinfo.cpp:175 msgid "ERROR: Failed to retrieve information from the following endpoints:" msgstr "" #: src/clients/compute/arcinfo.cpp:188 msgid "ERROR: Failed to retrieve information" msgstr "" #: src/clients/compute/arcinfo.cpp:190 msgid "from the following endpoints:" msgstr "" #: src/clients/compute/arckill.cpp:34 msgid "The arckill command is used to kill running jobs." msgstr "Az arckill paranccsal lehet megölni egy futó feladatot" #: src/clients/compute/arckill.cpp:166 msgid "" " Run 'arcclean -s Undefined' to remove killed jobs from job list" msgstr "" #: src/clients/compute/arckill.cpp:169 #, c-format msgid "Jobs processed: %d, successfully killed: %d, successfully cleaned: %d" msgstr "" #: src/clients/compute/arckill.cpp:171 #, c-format msgid "Jobs processed: %d, successfully killed: %d" msgstr "" #: src/clients/compute/arcrenew.cpp:146 #, c-format msgid "Jobs processed: %d, renewed: %d" msgstr "" #: src/clients/compute/arcresume.cpp:146 #, c-format msgid "Jobs processed: %d, resumed: %d" msgstr "" #: src/clients/compute/arcstat.cpp:35 #, fuzzy msgid "" "The arcstat command is used for obtaining the status of jobs that have\n" "been submitted to Grid enabled resources." msgstr "" "Az arcstat paranccsal lehet lekérdezni azon feladatok állapotát,\n" "amelyek el lettek küldve a grid-ben lévÅ‘ klaszterre." #: src/clients/compute/arcstat.cpp:101 msgid "The 'sort' and 'rsort' flags cannot be specified at the same time." msgstr "A 'sort' vagy az 'rsort' kapcsolókat nem lehet egyszerre használni" #: src/clients/compute/arcstat.cpp:171 msgid "No jobs found, try later" msgstr "" #: src/clients/compute/arcstat.cpp:215 #, c-format msgid "Status of %d jobs was queried, %d jobs returned information" msgstr "" #: src/clients/compute/arcsub.cpp:45 msgid "[filename ...]" msgstr "[fájlnév ...]" #: src/clients/compute/arcsub.cpp:46 #, fuzzy msgid "" "The arcsub command is used for submitting jobs to Grid enabled computing\n" "resources." msgstr "Az arcsub paranccsal lehet feladatot küldeni a grid-be." #: src/clients/compute/arcsub.cpp:97 msgid "No job description input specified" msgstr "Nincs megadva feladat leírás bemeneti adatként" #: src/clients/compute/arcsub.cpp:110 #, c-format msgid "Can not open job description file: %s" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/clients/compute/arcsub.cpp:138 src/clients/compute/arcsub.cpp:166 msgid "Invalid JobDescription:" msgstr "Érvénytelen feladat leírás:" #: src/clients/compute/arcsub.cpp:208 src/clients/compute/arctest.cpp:250 msgid "" "Cannot adapt job description to the submission target when information " "discovery is turned off" msgstr "" #: src/clients/compute/arcsync.cpp:66 src/clients/compute/arcsync.cpp:177 #, c-format msgid "Warning: Unable to open job list file (%s), unknown format" msgstr "" #: src/clients/compute/arcsync.cpp:76 msgid "Found the following jobs:" msgstr "" #: src/clients/compute/arcsync.cpp:86 msgid "Total number of jobs found: " msgstr "" #: src/clients/compute/arcsync.cpp:98 msgid "Found the following new jobs:" msgstr "" #: src/clients/compute/arcsync.cpp:108 msgid "Total number of new jobs found: " msgstr "" #: src/clients/compute/arcsync.cpp:113 #, c-format msgid "ERROR: Failed to write job information to file (%s)" msgstr "" #: src/clients/compute/arcsync.cpp:140 msgid "" "The arcsync command synchronizes your local job list with the information " "at\n" "the given CEs or registry servers." msgstr "" #: src/clients/compute/arcsync.cpp:183 #, fuzzy, c-format msgid "Warning: Unable to read local list of jobs from file (%s)" msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #: src/clients/compute/arcsync.cpp:188 #, fuzzy, c-format msgid "Warning: Unable to truncate local list of jobs in file (%s)" msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #: src/clients/compute/arcsync.cpp:194 #, c-format msgid "Warning: Unable to create job list file (%s), jobs list is destroyed" msgstr "" #: src/clients/compute/arcsync.cpp:198 #, fuzzy, c-format msgid "" "Warning: Failed to write local list of jobs into file (%s), jobs list is " "destroyed" msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #: src/clients/compute/arcsync.cpp:231 msgid "" "Synchronizing the local list of active jobs with the information in the\n" "information system can result in some inconsistencies. Very recently " "submitted\n" "jobs might not yet be present, whereas jobs very recently scheduled for\n" "deletion can still be present." msgstr "" #: src/clients/compute/arcsync.cpp:236 msgid "Are you sure you want to synchronize your local job list?" msgstr "" #: src/clients/compute/arcsync.cpp:241 msgid "Cancelling synchronization request" msgstr "" #: src/clients/compute/arcsync.cpp:251 msgid "" "No services specified. Please configure default services in the client " "configuration, or specify a cluster or registry (-C or -Y options, see " "arcsync -h)." msgstr "" #: src/clients/compute/arctest.cpp:60 msgid " " msgstr "" #: src/clients/compute/arctest.cpp:61 #, fuzzy msgid "The arctest command is used for testing clusters as resources." msgstr "" "Az arcget parancsot arra lehet használni, hogy a feladat eredményeit " "megjelenítse" #: src/clients/compute/arctest.cpp:73 msgid "" "Nothing to do:\n" "you have to either specify a test job id with -J (--job)\n" "or query information about the certificates with -E (--certificate)\n" msgstr "" #: src/clients/compute/arctest.cpp:80 msgid "" "For the 1st test job you also have to specify a runtime value with -r (--" "runtime) option." msgstr "" #: src/clients/compute/arctest.cpp:118 #, fuzzy msgid "Certificate information:" msgstr "verzió információ kiírása" #: src/clients/compute/arctest.cpp:122 #, fuzzy msgid "No user-certificate found" msgstr "publikus kulcs elérési útvonala" #: src/clients/compute/arctest.cpp:125 #, fuzzy, c-format msgid "Certificate: %s" msgstr "Célállomás: %s" #: src/clients/compute/arctest.cpp:127 #, fuzzy, c-format msgid "Subject name: %s" msgstr "Tárgy: %s" #: src/clients/compute/arctest.cpp:128 #, fuzzy, c-format msgid "Valid until: %s" msgstr "A proxy eddig érvényes: %s" #: src/clients/compute/arctest.cpp:132 msgid "Unable to determine certificate information" msgstr "" #: src/clients/compute/arctest.cpp:136 #, fuzzy msgid "Proxy certificate information:" msgstr "verzió információ kiírása" #: src/clients/compute/arctest.cpp:138 msgid "No proxy found" msgstr "" #: src/clients/compute/arctest.cpp:141 #, fuzzy, c-format msgid "Proxy: %s" msgstr "Proxy elérési útvonal: %s" #: src/clients/compute/arctest.cpp:142 #, fuzzy, c-format msgid "Proxy-subject: %s" msgstr "Tárgy: %s" #: src/clients/compute/arctest.cpp:144 #, fuzzy msgid "Valid for: Proxy expired" msgstr "Nem használható tovább a proxy: Lejárt a proxy" #: src/clients/compute/arctest.cpp:146 #, fuzzy msgid "Valid for: Proxy not valid" msgstr "Nem használható tovább a proxy: Nem érvényes a proxy" #: src/clients/compute/arctest.cpp:148 #, fuzzy, c-format msgid "Valid for: %s" msgstr "Érvénytelen URL: %s" #: src/clients/compute/arctest.cpp:153 #, c-format msgid "Certificate issuer: %s" msgstr "" #: src/clients/compute/arctest.cpp:157 #, fuzzy msgid "CA-certificates installed:" msgstr "publikus kulcs elérési útvonala" #: src/clients/compute/arctest.cpp:179 msgid "Unable to detect if issuer certificate is installed." msgstr "" #: src/clients/compute/arctest.cpp:182 msgid "Your issuer's certificate is not installed" msgstr "" #: src/clients/compute/arctest.cpp:196 #, c-format msgid "No test-job, with ID \"%d\"" msgstr "" #: src/clients/compute/arctest.cpp:267 #, fuzzy, c-format msgid "Cannot write jobid (%s) to file (%s)" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/clients/compute/arctest.cpp:268 #, fuzzy, c-format msgid "Test submitted with jobid: %s" msgstr "Feladat elküldve ezzel az azonítóval: %s" #: src/clients/compute/arctest.cpp:283 #, c-format msgid "Computing service: %s" msgstr "" #: src/clients/compute/arctest.cpp:289 #, fuzzy msgid "Test failed, no more possible targets" msgstr "A feladat küldés meghiusúlt, mert nincs több szabad várakozó sor." #: src/clients/compute/arctest.cpp:302 src/clients/compute/submit.cpp:49 msgid "To recover missing jobs, run arcsync" msgstr "" #: src/clients/compute/arctest.cpp:315 src/clients/compute/submit.cpp:159 #, fuzzy, c-format msgid "" "Unable to prepare job description according to needs of the target resource " "(%s)." msgstr "Nem tudom kiíratni a feladat leírást: Nem található várakozó sor." #: src/clients/compute/arctest.cpp:325 src/clients/compute/submit.cpp:175 #, fuzzy, c-format msgid "" "An error occurred during the generation of job description to be sent to %s" msgstr "Egy hiba lépett fel a feladat leírás elkészítése közben." #: src/clients/compute/arctest.cpp:329 src/clients/compute/submit.cpp:179 #, c-format msgid "Job description to be sent to %s:" msgstr "Feladat leírás elküldve ide: %s" #: src/clients/compute/submit.cpp:34 #, c-format msgid "Job submitted with jobid: %s" msgstr "Feladat elküldve ezzel az azonítóval: %s" #: src/clients/compute/submit.cpp:40 #, fuzzy, c-format msgid "Cannot write job IDs to file (%s)" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/clients/compute/submit.cpp:45 #, c-format msgid "Unable to open job list file (%s), unknown format" msgstr "" #: src/clients/compute/submit.cpp:47 #, fuzzy, c-format msgid "Failed to write job information to database (%s)" msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #: src/clients/compute/submit.cpp:51 #, c-format msgid "Record about new job successfully added to the database (%s)" msgstr "" #: src/clients/compute/submit.cpp:57 msgid "Job submission summary:" msgstr "Job küldési összefoglaló" #: src/clients/compute/submit.cpp:59 #, c-format msgid "%d of %d jobs were submitted" msgstr "%d %d feladatból elküldve" #: src/clients/compute/submit.cpp:61 #, fuzzy msgid "The following jobs were not submitted:" msgstr "%d nem lett elküldve" #: src/clients/compute/submit.cpp:65 msgid "Job nr." msgstr "" #: src/clients/compute/submit.cpp:75 #, fuzzy, c-format msgid "ERROR: Unable to load broker %s" msgstr "Nem sikerült betölteni a %s bróker modult" #: src/clients/compute/submit.cpp:79 #, fuzzy msgid "" "ERROR: Job submission aborted because no resource returned any information" msgstr "" "Feladat küldés sikertelen, mert egyetlen klaszter sem adott vissza " "információt magáról" #: src/clients/compute/submit.cpp:83 msgid "ERROR: One or multiple job descriptions was not submitted." msgstr "" #: src/clients/compute/submit.cpp:100 #, c-format msgid "" "A computing resource using the GridFTP interface was requested, but\n" "%sthe corresponding plugin could not be loaded. Is the plugin installed?\n" "%sIf not, please install the package 'nordugrid-arc-plugins-globus'.\n" "%sDepending on your type of installation the package name might differ." msgstr "" #: src/clients/compute/submit.cpp:129 msgid "" "Unable to adapt job description to any resource, no resource information " "could be obtained." msgstr "" #: src/clients/compute/submit.cpp:130 msgid "Original job description is listed below:" msgstr "" #: src/clients/compute/submit.cpp:142 #, c-format msgid "Dumping job description aborted: Unable to load broker %s" msgstr "" #: src/clients/compute/submit.cpp:197 #, fuzzy msgid "" "Unable to prepare job description according to needs of the target resource." msgstr "Nem tudom kiíratni a feladat leírást: Nem található várakozó sor." #: src/clients/compute/submit.cpp:281 src/clients/compute/submit.cpp:311 #, c-format msgid "Service endpoint %s (type %s) added to the list for resource discovery" msgstr "" #: src/clients/compute/submit.cpp:291 msgid "" "There are no endpoints in registry that match requested info endpoint type" msgstr "" #: src/clients/compute/submit.cpp:332 #, c-format msgid "Service endpoint %s (type %s) added to the list for direct submission" msgstr "" #: src/clients/compute/submit.cpp:340 msgid "" "There are no endpoints in registry that match requested submission endpoint " "type" msgstr "" #: src/clients/compute/utils.cpp:111 #, c-format msgid "Types of execution services that %s is able to submit jobs to:" msgstr "" #: src/clients/compute/utils.cpp:114 #, c-format msgid "Types of registry services that %s is able to collect information from:" msgstr "" #: src/clients/compute/utils.cpp:117 #, c-format msgid "" "Types of local information services that %s is able to collect information " "from:" msgstr "" #: src/clients/compute/utils.cpp:120 #, c-format msgid "" "Types of local information services that %s is able to collect job " "information from:" msgstr "" #: src/clients/compute/utils.cpp:123 #, c-format msgid "Types of services that %s is able to manage jobs at:" msgstr "" #: src/clients/compute/utils.cpp:126 #, fuzzy, c-format msgid "Job description languages supported by %s:" msgstr "Feladat leírás elküldve ide: %s" #: src/clients/compute/utils.cpp:129 #, c-format msgid "Brokers available to %s:" msgstr "" #: src/clients/compute/utils.cpp:152 #, c-format msgid "" "Default broker (%s) is not available. When using %s a broker should be " "specified explicitly (-b option)." msgstr "" #: src/clients/compute/utils.cpp:162 msgid "Proxy expired. Job submission aborted. Please run 'arcproxy'!" msgstr "" #: src/clients/compute/utils.cpp:167 msgid "" "Cannot find any proxy. This application currently cannot run without a " "proxy.\n" " If you have the proxy file in a non-default location,\n" " please make sure the path is specified in the client configuration file.\n" " If you don't have a proxy yet, please run 'arcproxy'!" msgstr "" #: src/clients/compute/utils.cpp:179 src/clients/data/utils.cpp:28 msgid "" "Cannot find any token. Please run 'oidc-token' or use similar\n" " utility to obtain authentication token!" msgstr "" #: src/clients/compute/utils.cpp:308 #, fuzzy, c-format msgid "Unsupported submission endpoint type: %s" msgstr "Nem támogatott url: %s" #: src/clients/compute/utils.cpp:327 msgid "" "Requested to skip resource discovery. Will try direct submission to arcrest " "endpoint type." msgstr "" #: src/clients/compute/utils.cpp:332 #, fuzzy, c-format msgid "Unsupported information endpoint type: %s" msgstr "Nem támogatott url: %s" #: src/clients/compute/utils.cpp:385 msgid "Other actions" msgstr "" #: src/clients/compute/utils.cpp:386 #, fuzzy msgid "Brokering and filtering" msgstr "szöveg" #: src/clients/compute/utils.cpp:387 msgid "Output format modifiers" msgstr "" #: src/clients/compute/utils.cpp:388 msgid "Behaviour tuning" msgstr "" #: src/clients/compute/utils.cpp:389 msgid "Target endpoint selection" msgstr "" #: src/clients/compute/utils.cpp:393 msgid "computing element hostname or a complete endpoint URL" msgstr "" #: src/clients/compute/utils.cpp:394 src/clients/compute/utils.cpp:404 msgid "ce" msgstr "" #: src/clients/compute/utils.cpp:398 msgid "registry service URL with optional specification of protocol" msgstr "" #: src/clients/compute/utils.cpp:399 msgid "registry" msgstr "" #: src/clients/compute/utils.cpp:403 msgid "only select jobs that were submitted to this computing element" msgstr "" #: src/clients/compute/utils.cpp:410 msgid "" "require the specified endpoint type for job submission.\n" "\tAllowed values are: arcrest and internal." msgstr "" #: src/clients/compute/utils.cpp:412 src/clients/compute/utils.cpp:426 #: src/clients/compute/utils.cpp:434 msgid "type" msgstr "" #: src/clients/compute/utils.cpp:418 msgid "skip the service with the given URL during service discovery" msgstr "" #: src/clients/compute/utils.cpp:419 src/clients/compute/utils.cpp:603 #: src/clients/data/arccp.cpp:583 msgid "URL" msgstr "" #: src/clients/compute/utils.cpp:423 msgid "" "require information query using the specified information endpoint type.\n" "\tSpecial value 'NONE' will disable all resource information queries and the " "following brokering.\n" "\tAllowed values are: ldap.nordugrid, ldap.glue2, arcrest and internal." msgstr "" #: src/clients/compute/utils.cpp:432 msgid "" "only get information about executon targets that support this job submission " "endpoint type.\n" "\tAllowed values are: arcrest and internal." msgstr "" #: src/clients/compute/utils.cpp:440 msgid "keep the files on the server (do not clean)" msgstr "fájlok megÅ‘rzése a szerveren (nincs törlés)" #: src/clients/compute/utils.cpp:446 msgid "do not ask for verification" msgstr "ne kérjen ellenÅ‘rzést" #: src/clients/compute/utils.cpp:450 #, fuzzy msgid "truncate the joblist before synchronizing" msgstr "feladat lista megcsonkult a szinkronizáció elÅ‘tt" #: src/clients/compute/utils.cpp:454 msgid "do not collect information, only convert jobs storage format" msgstr "" #: src/clients/compute/utils.cpp:460 src/clients/data/arcls.cpp:277 msgid "long format (more information)" msgstr "részletes formátum (több információ)" #: src/clients/compute/utils.cpp:466 msgid "show the stdout of the job (default)" msgstr "" #: src/clients/compute/utils.cpp:470 msgid "show the stderr of the job" msgstr "" #: src/clients/compute/utils.cpp:474 msgid "show the CE's error log of the job" msgstr "" #: src/clients/compute/utils.cpp:478 msgid "show the specified file from job's session directory" msgstr "" #: src/clients/compute/utils.cpp:479 #, fuzzy msgid "filepath" msgstr "elérési útvonal" #: src/clients/compute/utils.cpp:485 msgid "" "download directory (the job directory will be created in this directory)" msgstr "" "könyvtár letöltése (a feladat könyvtára ebben a könyvtárban fog létrejönni)" #: src/clients/compute/utils.cpp:487 msgid "dirname" msgstr "könyvtárnév" #: src/clients/compute/utils.cpp:491 msgid "use the jobname instead of the short ID as the job directory name" msgstr "" #: src/clients/compute/utils.cpp:496 msgid "force download (overwrite existing job directory)" msgstr "" #: src/clients/compute/utils.cpp:502 msgid "instead of the status only the IDs of the selected jobs will be printed" msgstr "" #: src/clients/compute/utils.cpp:506 msgid "sort jobs according to jobid, submissiontime or jobname" msgstr "" "feladatok rendezése az azonosítójuk, az elküldés ideje vagy a neve alapján" #: src/clients/compute/utils.cpp:507 src/clients/compute/utils.cpp:510 msgid "order" msgstr "sorrend" #: src/clients/compute/utils.cpp:509 msgid "reverse sorting of jobs according to jobid, submissiontime or jobname" msgstr "" "feladatok rendezésének megfordítása az azonosítójuk, az elküldés ideje vagy " "a neve alapján" #: src/clients/compute/utils.cpp:513 msgid "show jobs where status information is unavailable" msgstr "" #: src/clients/compute/utils.cpp:517 msgid "show status information in JSON format" msgstr "" #: src/clients/compute/utils.cpp:523 msgid "" "remove the job from the local list of jobs even if the job is not found in " "the infosys" msgstr "" "feladat eltávolítása a helyi listából ha az nem található az információs " "rendszerben" #: src/clients/compute/utils.cpp:530 msgid "submit test job given by the number" msgstr "" #: src/clients/compute/utils.cpp:531 src/clients/compute/utils.cpp:535 msgid "int" msgstr "" #: src/clients/compute/utils.cpp:534 msgid "test job runtime specified by the number" msgstr "" #: src/clients/compute/utils.cpp:541 msgid "only select jobs whose status is statusstr" msgstr "" #: src/clients/compute/utils.cpp:542 msgid "statusstr" msgstr "" #: src/clients/compute/utils.cpp:546 msgid "all jobs" msgstr "" #: src/clients/compute/utils.cpp:552 msgid "jobdescription string describing the job to be submitted" msgstr "a feladat leíró szöveg tartalmazza magát az elküldendÅ‘ feladatot" #: src/clients/compute/utils.cpp:554 src/clients/compute/utils.cpp:560 #: src/clients/credentials/arcproxy.cpp:353 #: src/clients/credentials/arcproxy.cpp:360 #: src/clients/credentials/arcproxy.cpp:379 #: src/clients/credentials/arcproxy.cpp:386 #: src/clients/credentials/arcproxy.cpp:404 #: src/clients/credentials/arcproxy.cpp:408 #: src/clients/credentials/arcproxy.cpp:423 #: src/clients/credentials/arcproxy.cpp:432 #: src/clients/credentials/arcproxy.cpp:436 msgid "string" msgstr "szöveg" #: src/clients/compute/utils.cpp:558 msgid "jobdescription file describing the job to be submitted" msgstr "a feladat leíró fájl tartalmazza magát az elküldendÅ‘ feladatot" #: src/clients/compute/utils.cpp:566 msgid "select broker method (list available brokers with --listplugins flag)" msgstr "" #: src/clients/compute/utils.cpp:567 msgid "broker" msgstr "bróker" #: src/clients/compute/utils.cpp:570 msgid "the IDs of the submitted jobs will be appended to this file" msgstr "" #: src/clients/compute/utils.cpp:571 src/clients/compute/utils.cpp:598 #: src/clients/compute/utils.cpp:625 src/clients/compute/utils.cpp:633 #: src/clients/credentials/arcproxy.cpp:445 src/clients/data/arccp.cpp:603 #: src/clients/data/arcls.cpp:322 src/clients/data/arcmkdir.cpp:100 #: src/clients/data/arcrename.cpp:111 src/clients/data/arcrm.cpp:125 #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:53 msgid "filename" msgstr "fájlnév" #: src/clients/compute/utils.cpp:575 msgid "do not perform any delegation for submitted jobs" msgstr "" #: src/clients/compute/utils.cpp:579 msgid "perform X.509 delegation for submitted jobs" msgstr "" #: src/clients/compute/utils.cpp:583 msgid "perform token delegation for submitted jobs" msgstr "" #: src/clients/compute/utils.cpp:587 msgid "" "request at most this number of job instances submitted in single submit " "request" msgstr "" #: src/clients/compute/utils.cpp:591 msgid "" "request at least this number of job instances submitted in single submit " "request" msgstr "" #: src/clients/compute/utils.cpp:597 msgid "a file containing a list of jobIDs" msgstr "" #: src/clients/compute/utils.cpp:602 msgid "skip jobs that are on a computing element with a given URL" msgstr "" #: src/clients/compute/utils.cpp:608 msgid "submit jobs as dry run (no submission to batch system)" msgstr "" #: src/clients/compute/utils.cpp:612 msgid "" "do not submit - dump job description in the language accepted by the target" msgstr "" "nincs küldés - azon feladat leíró formátumban megjelenítése, amit a távoli " "klaszter elfogad" #: src/clients/compute/utils.cpp:618 msgid "prints info about installed user- and CA-certificates" msgstr "" #: src/clients/compute/utils.cpp:619 src/clients/credentials/arcproxy.cpp:469 #: src/clients/data/arccp.cpp:637 src/clients/data/arcls.cpp:356 #: src/clients/data/arcmkdir.cpp:134 src/clients/data/arcrename.cpp:145 #: src/clients/data/arcrm.cpp:159 msgid "allow TLS connection which failed verification" msgstr "" #: src/clients/compute/utils.cpp:624 #, c-format msgid "the file storing information about active jobs (default %s)" msgstr "" #: src/clients/compute/utils.cpp:632 src/clients/credentials/arcproxy.cpp:444 #: src/clients/data/arccp.cpp:602 src/clients/data/arcls.cpp:321 #: src/clients/data/arcmkdir.cpp:99 src/clients/data/arcrename.cpp:110 #: src/clients/data/arcrm.cpp:124 msgid "configuration file (default ~/.arc/client.conf)" msgstr "konfigurációs fájl (alapbeállítás ~/.arc/client.conf)" #: src/clients/compute/utils.cpp:635 src/clients/credentials/arcproxy.cpp:439 #: src/clients/data/arccp.cpp:597 src/clients/data/arcls.cpp:316 #: src/clients/data/arcmkdir.cpp:94 src/clients/data/arcrename.cpp:105 #: src/clients/data/arcrm.cpp:119 msgid "timeout in seconds (default 20)" msgstr "idÅ‘korlát másodpercben (alapbeállítás 20)" #: src/clients/compute/utils.cpp:636 src/clients/credentials/arcproxy.cpp:440 #: src/clients/data/arccp.cpp:598 src/clients/data/arcls.cpp:317 #: src/clients/data/arcmkdir.cpp:95 src/clients/data/arcrename.cpp:106 #: src/clients/data/arcrm.cpp:120 msgid "seconds" msgstr "másodpercek" #: src/clients/compute/utils.cpp:639 msgid "list the available plugins" msgstr "" #: src/clients/compute/utils.cpp:643 src/clients/credentials/arcproxy.cpp:449 #: src/clients/data/arccp.cpp:642 src/clients/data/arcls.cpp:361 #: src/clients/data/arcmkdir.cpp:139 src/clients/data/arcrename.cpp:150 #: src/clients/data/arcrm.cpp:164 #: src/hed/libs/compute/test_jobdescription.cpp:38 #: src/services/a-rex/grid-manager/gm_jobs.cpp:190 #: src/services/a-rex/grid-manager/inputcheck.cpp:81 #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:67 msgid "FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG" msgstr "FATAL, ERROR, WARNING, INFO, VERBOSE vagy DEBUG" #: src/clients/compute/utils.cpp:644 src/clients/credentials/arcproxy.cpp:450 #: src/clients/data/arccp.cpp:643 src/clients/data/arcls.cpp:362 #: src/clients/data/arcmkdir.cpp:140 src/clients/data/arcrename.cpp:151 #: src/clients/data/arcrm.cpp:165 #: src/hed/libs/compute/test_jobdescription.cpp:39 #: src/services/a-rex/grid-manager/gm_jobs.cpp:191 #: src/services/a-rex/grid-manager/inputcheck.cpp:82 #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:68 msgid "debuglevel" msgstr "logolási szint" #: src/clients/compute/utils.cpp:646 src/clients/credentials/arcproxy.cpp:473 #: src/clients/data/arccp.cpp:646 src/clients/data/arcls.cpp:365 #: src/clients/data/arcmkdir.cpp:143 src/clients/data/arcrename.cpp:154 #: src/clients/data/arcrm.cpp:168 msgid "print version information" msgstr "verzió információ kiírása" #: src/clients/compute/utils.cpp:652 src/clients/data/arccp.cpp:607 #: src/clients/data/arcls.cpp:326 src/clients/data/arcmkdir.cpp:104 #: src/clients/data/arcrename.cpp:115 src/clients/data/arcrm.cpp:129 msgid "do not perform any authentication for opened connections" msgstr "" #: src/clients/compute/utils.cpp:656 src/clients/data/arccp.cpp:612 #: src/clients/data/arcls.cpp:331 src/clients/data/arcmkdir.cpp:109 #: src/clients/data/arcrename.cpp:120 src/clients/data/arcrm.cpp:134 msgid "perform X.509 authentication for opened connections" msgstr "" #: src/clients/compute/utils.cpp:660 src/clients/data/arccp.cpp:617 #: src/clients/data/arcls.cpp:336 src/clients/data/arcmkdir.cpp:114 #: src/clients/data/arcrename.cpp:125 src/clients/data/arcrm.cpp:139 msgid "perform token authentication for opened connections" msgstr "" #: src/clients/compute/utils.cpp:664 src/clients/credentials/arcproxy.cpp:454 #: src/clients/data/arccp.cpp:622 src/clients/data/arcls.cpp:341 #: src/clients/data/arcmkdir.cpp:119 src/clients/data/arcrename.cpp:130 #: src/clients/data/arcrm.cpp:144 msgid "force using CA certificates configuration provided by OpenSSL" msgstr "" #: src/clients/compute/utils.cpp:668 src/clients/credentials/arcproxy.cpp:459 #: src/clients/data/arccp.cpp:627 src/clients/data/arcls.cpp:346 #: src/clients/data/arcmkdir.cpp:124 src/clients/data/arcrename.cpp:135 #: src/clients/data/arcrm.cpp:149 msgid "" "force using CA certificates configuration for Grid services (typically IGTF)" msgstr "" #: src/clients/compute/utils.cpp:672 src/clients/credentials/arcproxy.cpp:464 msgid "" "force using CA certificates configuration for Grid services (typically IGTF) " "and one provided by OpenSSL" msgstr "" #: src/clients/compute/utils.cpp:681 src/clients/compute/utils.cpp:688 #: src/clients/compute/utils.cpp:695 #, fuzzy msgid "Conflicting delegation types specified." msgstr "Nincs megadva feladat leírás bemeneti adatként" #: src/clients/compute/utils.cpp:727 src/clients/compute/utils.cpp:734 #: src/clients/compute/utils.cpp:741 src/clients/data/utils.cpp:41 #: src/clients/data/utils.cpp:48 src/clients/data/utils.cpp:55 #, fuzzy msgid "Conflicting authentication types specified." msgstr "Nincs megadva feladat leírás bemeneti adatként" #: src/clients/credentials/arcproxy.cpp:151 #, fuzzy, c-format msgid "There are %d user certificates existing in the NSS database" msgstr "%d darab publikus tanúsítvány van a válasz üzenetben" #: src/clients/credentials/arcproxy.cpp:167 #, c-format msgid "Number %d is with nickname: %s%s" msgstr "" #: src/clients/credentials/arcproxy.cpp:176 #, c-format msgid " expiration time: %s " msgstr "" #: src/clients/credentials/arcproxy.cpp:180 #, c-format msgid " certificate dn: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:181 #, c-format msgid " issuer dn: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:182 #, c-format msgid " serial number: %d" msgstr "" #: src/clients/credentials/arcproxy.cpp:186 #, c-format msgid "Please choose the one you would use (1-%d): " msgstr "" #: src/clients/credentials/arcproxy.cpp:251 #, fuzzy msgid "" "The arcproxy command creates a proxy from a key/certificate pair which can\n" "then be used to access grid resources." msgstr "" "Az arcproxy parancs proxy-t készít a publikus/privát kulcsból,\n" "hogy tudja használni az ARC köztesréteget" #: src/clients/credentials/arcproxy.cpp:253 msgid "" "Supported constraints are:\n" " validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start\n" " from now)\n" "\n" " validityEnd=time\n" "\n" " validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod and\n" " validityEnd not specified, the default is 12 hours for local proxy, and\n" " 168 hours for delegated proxy on myproxy server)\n" "\n" " vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, " "the\n" " default is the minimum value of 12 hours and validityPeriod)\n" "\n" " myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy " "server,\n" " e.g. 43200 or 12h or 12H; if not specified, the default is the minimum " "value\n" " of 12 hours and validityPeriod (which is lifetime of the delegated proxy " "on\n" " myproxy server))\n" "\n" " proxyPolicy=policy content\n" "\n" " proxyPolicyFile=policy file\n" "\n" " keybits=number - length of the key to generate. Default is 2048 bits.\n" " Special value 'inherit' is to use key length of signing certificate.\n" "\n" " signingAlgorithm=name - signing algorithm to use for signing public key " "of\n" " proxy. Possible values are sha1, sha2 (alias for sha256), sha224, sha256,\n" " sha384, sha512 and inherit (use algorithm of signing certificate). " "Default\n" " is inherit. With old systems, only sha1 is acceptable.\n" "\n" "Supported information item names are:\n" " subject - subject name of proxy certificate.\n" "\n" " identity - identity subject name of proxy certificate.\n" "\n" " issuer - issuer subject name of proxy certificate.\n" "\n" " ca - subject name of CA which issued initial certificate.\n" "\n" " path - file system path to file containing proxy.\n" "\n" " type - type of proxy certificate.\n" "\n" " validityStart - timestamp when proxy validity starts.\n" "\n" " validityEnd - timestamp when proxy validity ends.\n" "\n" " validityPeriod - duration of proxy validity in seconds.\n" "\n" " validityLeft - duration of proxy validity left in seconds.\n" "\n" " vomsVO - VO name represented by VOMS attribute\n" "\n" " vomsSubject - subject of certificate for which VOMS attribute is issued\n" "\n" " vomsIssuer - subject of service which issued VOMS certificate\n" "\n" " vomsACvalidityStart - timestamp when VOMS attribute validity starts.\n" "\n" " vomsACvalidityEnd - timestamp when VOMS attribute validity ends.\n" "\n" " vomsACvalidityPeriod - duration of VOMS attribute validity in seconds.\n" "\n" " vomsACvalidityLeft - duration of VOMS attribute validity left in seconds.\n" "\n" " proxyPolicy\n" "\n" " keybits - size of proxy certificate key in bits.\n" "\n" " signingAlgorithm - algorithm used to sign proxy certificate.\n" "\n" "Items are printed in requested order and are separated by newline.\n" "If item has multiple values they are printed in same line separated by |.\n" "\n" "Supported password destinations are:\n" " key - for reading private key\n" "\n" " myproxy - for accessing credentials at MyProxy service\n" "\n" " myproxynew - for creating credentials at MyProxy service\n" "\n" " all - for any purspose.\n" "\n" "Supported password sources are:\n" " quoted string (\"password\") - explicitly specified password\n" "\n" " int - interactively request password from console\n" "\n" " stdin - read password from standard input delimited by newline\n" "\n" " file:filename - read password from file named filename\n" "\n" " stream:# - read password from input stream number #.\n" " Currently only 0 (standard input) is supported." msgstr "" #: src/clients/credentials/arcproxy.cpp:315 #, fuzzy msgid "path to the proxy file" msgstr "proxy fájl elérési útvonala" #: src/clients/credentials/arcproxy.cpp:316 #: src/clients/credentials/arcproxy.cpp:320 #: src/clients/credentials/arcproxy.cpp:324 #: src/clients/credentials/arcproxy.cpp:328 #: src/clients/credentials/arcproxy.cpp:332 #: src/clients/credentials/arcproxy.cpp:336 src/clients/data/arccp.cpp:560 msgid "path" msgstr "elérési útvonal" #: src/clients/credentials/arcproxy.cpp:319 msgid "" "path to the certificate file, it can be either PEM, DER, or PKCS12 formatted" msgstr "" #: src/clients/credentials/arcproxy.cpp:323 msgid "" "path to the private key file, if the certificate is in PKCS12 format, then " "no need to give private key" msgstr "" #: src/clients/credentials/arcproxy.cpp:327 #, fuzzy msgid "" "path to the trusted certificate directory, only needed for the VOMS client " "functionality" msgstr "megbízható tanúsítványok könyvtára, csak a voms kliensek használják" #: src/clients/credentials/arcproxy.cpp:331 #, fuzzy msgid "" "path to the top directory of VOMS *.lsc files, only needed for the VOMS " "client functionality" msgstr "megbízható tanúsítványok könyvtára, csak a voms kliensek használják" #: src/clients/credentials/arcproxy.cpp:335 #, fuzzy msgid "path to the VOMS server configuration file" msgstr "voms szerver fájljának az elérési útvonala" #: src/clients/credentials/arcproxy.cpp:339 msgid "" "voms<:command>. Specify VOMS server\n" " More than one VOMS server can be specified like this:\n" " --voms VOa:command1 --voms VOb:command2.\n" " :command is optional, and is used to ask for specific attributes (e.g: " "roles)\n" " command options are:\n" "\n" " all --- put all of this DN's attributes into AC;\n" "\n" " list --- list all of the DN's attribute, will not create AC " "extension;\n" "\n" " /Role=yourRole --- specify the role, if this DN\n" " has such a role, the role will be put into AC;\n" "\n" " /voname/groupname/Role=yourRole --- specify the VO, group and " "role; if this DN\n" " has such a role, the role will be put into AC.\n" "\n" " If this option is not specified values from configuration " "files are used.\n" " To avoid anything to be used specify -S with empty value.\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:356 msgid "" "group<:role>. Specify ordering of attributes\n" " Example: --order /knowarc.eu/coredev:Developer,/knowarc.eu/" "testers:Tester\n" " or: --order /knowarc.eu/coredev:Developer --order /knowarc.eu/" "testers:Tester\n" " Note that it does not make sense to specify the order if you have two or " "more different VOMS servers specified" msgstr "" #: src/clients/credentials/arcproxy.cpp:363 msgid "use GSI communication protocol for contacting VOMS services" msgstr "GSI kommunikációs protokoll használata a VOMS szolgáltatás eléréséhez" #: src/clients/credentials/arcproxy.cpp:366 msgid "" "use HTTP communication protocol for contacting VOMS services that provide " "RESTful access\n" " Note for RESTful access, 'list' command and multiple VOMS " "servers are not supported\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:370 #, fuzzy msgid "" "use old communication protocol for contacting VOMS services instead of " "RESTful access\n" msgstr "GSI kommunikációs protokoll használata a VOMS szolgáltatás eléréséhez" #: src/clients/credentials/arcproxy.cpp:373 msgid "" "this option is not functional (old GSI proxies are not supported anymore)" msgstr "" #: src/clients/credentials/arcproxy.cpp:376 msgid "print all information about this proxy." msgstr "" #: src/clients/credentials/arcproxy.cpp:379 msgid "print selected information about this proxy." msgstr "" #: src/clients/credentials/arcproxy.cpp:382 msgid "remove proxy" msgstr "" #: src/clients/credentials/arcproxy.cpp:385 msgid "" "username to MyProxy server (if missing subject of user certificate is used)" msgstr "" #: src/clients/credentials/arcproxy.cpp:390 msgid "" "don't prompt for a credential passphrase, when retrieving a credential from " "a MyProxy server.\n" " The precondition of this choice is that the credential was PUT onto\n" " the MyProxy server without a passphrase by using the\n" " -R (--retrievable_by_cert) option.\n" " This option is specific to the GET command when contacting a Myproxy\n" " server." msgstr "" #: src/clients/credentials/arcproxy.cpp:401 msgid "" "Allow specified entity to retrieve credential without passphrase.\n" " This option is specific to the PUT command when contacting a Myproxy\n" " server." msgstr "" #: src/clients/credentials/arcproxy.cpp:407 #, fuzzy msgid "hostname[:port] of MyProxy server" msgstr "szervernév[:port] myproxy szerveré" #: src/clients/credentials/arcproxy.cpp:412 msgid "" "command to MyProxy server. The command can be PUT, GET, INFO, NEWPASS or " "DESTROY.\n" " PUT -- put a delegated credentials to the MyProxy server;\n" "\n" " GET -- get a delegated credentials from the MyProxy server;\n" "\n" " INFO -- get and present information about credentials stored " "at the MyProxy server;\n" "\n" " NEWPASS -- change password protecting credentials stored at " "the MyProxy server;\n" "\n" " DESTROY -- wipe off credentials stored at the MyProxy server;\n" "\n" " Local credentials (certificate and key) are not necessary " "except in case of PUT.\n" " MyProxy functionality can be used together with VOMS " "functionality.\n" " --voms and --vomses can be used for Get command if VOMS " "attributes\n" " is required to be included in the proxy.\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:427 msgid "" "use NSS credential database in default Mozilla profiles, including Firefox, " "Seamonkey and Thunderbird." msgstr "" #: src/clients/credentials/arcproxy.cpp:431 msgid "proxy constraints" msgstr "" #: src/clients/credentials/arcproxy.cpp:435 msgid "password destination=password source" msgstr "" #: src/clients/credentials/arcproxy.cpp:479 msgid "" "RESTful and old VOMS communication protocols can't be requested " "simultaneously." msgstr "" #: src/clients/credentials/arcproxy.cpp:509 #: src/clients/credentials/arcproxy.cpp:1220 #, fuzzy msgid "Failed configuration initialization." msgstr "Nem sikerült betölteni a konfigurációt" #: src/clients/credentials/arcproxy.cpp:544 msgid "" "Failed to find certificate and/or private key or files have improper " "permissions or ownership." msgstr "" #: src/clients/credentials/arcproxy.cpp:545 #: src/clients/credentials/arcproxy.cpp:557 msgid "You may try to increase verbosity to get more information." msgstr "" #: src/clients/credentials/arcproxy.cpp:553 #, fuzzy msgid "Failed to find CA certificates" msgstr "Nem sikerült listázni a meta adatokat" #: src/clients/credentials/arcproxy.cpp:554 msgid "" "Cannot find the CA certificates directory path, please set environment " "variable X509_CERT_DIR, or cacertificatesdirectory in a configuration file." msgstr "" #: src/clients/credentials/arcproxy.cpp:558 msgid "" "The CA certificates directory is required for contacting VOMS and MyProxy " "servers." msgstr "" #: src/clients/credentials/arcproxy.cpp:570 msgid "" "$X509_VOMS_FILE, and $X509_VOMSES are not set;\n" "User has not specified the location for vomses information;\n" "There is also not vomses location information in user's configuration file;\n" "Can not find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses,\n" "$ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/" "vomses,\n" "/etc/vomses, /etc/grid-security/vomses, and the location at the " "corresponding sub-directory" msgstr "" #: src/clients/credentials/arcproxy.cpp:615 msgid "Wrong number of arguments!" msgstr "" #: src/clients/credentials/arcproxy.cpp:623 #: src/clients/credentials/arcproxy.cpp:647 #: src/clients/credentials/arcproxy.cpp:780 msgid "" "Cannot find the path of the proxy file, please setup environment " "X509_USER_PROXY, or proxypath in a configuration file" msgstr "" #: src/clients/credentials/arcproxy.cpp:630 #, fuzzy, c-format msgid "Cannot remove proxy file at %s" msgstr "proxy fájl elérési útvonala" #: src/clients/credentials/arcproxy.cpp:632 #, c-format msgid "Cannot remove proxy file at %s, because it's not there" msgstr "" #: src/clients/credentials/arcproxy.cpp:641 msgid "Bearer token is available. It is preferred for job submission." msgstr "" #: src/clients/credentials/arcproxy.cpp:653 #: src/clients/credentials/arcproxy.cpp:786 #, c-format msgid "" "Cannot find file at %s for getting the proxy. Please make sure this file " "exists." msgstr "" #: src/clients/credentials/arcproxy.cpp:659 #: src/clients/credentials/arcproxy.cpp:792 #, fuzzy, c-format msgid "Cannot process proxy file at %s." msgstr "proxy fájl elérési útvonala" #: src/clients/credentials/arcproxy.cpp:662 #, c-format msgid "Subject: %s" msgstr "Tárgy: %s" #: src/clients/credentials/arcproxy.cpp:663 #, fuzzy, c-format msgid "Issuer: %s" msgstr "Válasz: %s" #: src/clients/credentials/arcproxy.cpp:664 #, c-format msgid "Identity: %s" msgstr "Azonosító: %s" #: src/clients/credentials/arcproxy.cpp:666 msgid "Time left for proxy: Proxy expired" msgstr "Nem használható tovább a proxy: Lejárt a proxy" #: src/clients/credentials/arcproxy.cpp:668 #, fuzzy msgid "Time left for proxy: Proxy not valid yet" msgstr "Nem használható tovább a proxy: Nem érvényes a proxy" #: src/clients/credentials/arcproxy.cpp:670 #, c-format msgid "Time left for proxy: %s" msgstr "Ennyi ideig érvényes még a proxy: %s" #: src/clients/credentials/arcproxy.cpp:671 #, c-format msgid "Proxy path: %s" msgstr "Proxy elérési útvonal: %s" #: src/clients/credentials/arcproxy.cpp:672 #, c-format msgid "Proxy type: %s" msgstr "Proxy típusa: %s" #: src/clients/credentials/arcproxy.cpp:673 #, fuzzy, c-format msgid "Proxy key length: %i" msgstr "Proxy elérési útvonal: %s" #: src/clients/credentials/arcproxy.cpp:674 #, fuzzy, c-format msgid "Proxy signature: %s" msgstr "Proxy elérési útvonal: %s" #: src/clients/credentials/arcproxy.cpp:683 #, fuzzy msgid "AC extension information for VO " msgstr "verzió információ kiírása" #: src/clients/credentials/arcproxy.cpp:686 msgid "Error detected while parsing this AC" msgstr "" #: src/clients/credentials/arcproxy.cpp:699 msgid "AC is invalid: " msgstr "" #: src/clients/credentials/arcproxy.cpp:729 #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:222 #, c-format msgid "Malformed VOMS AC attribute %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:760 #, fuzzy msgid "Time left for AC: AC is not valid yet" msgstr "Nem használható tovább a proxy: Nem érvényes a proxy" #: src/clients/credentials/arcproxy.cpp:762 #, fuzzy msgid "Time left for AC: AC has expired" msgstr "Nem használható tovább a proxy: Lejárt a proxy" #: src/clients/credentials/arcproxy.cpp:764 #, fuzzy, c-format msgid "Time left for AC: %s" msgstr "Ennyi ideig érvényes még a proxy: %s" #: src/clients/credentials/arcproxy.cpp:871 #, c-format msgid "Information item '%s' is not known" msgstr "" #: src/clients/credentials/arcproxy.cpp:883 msgid "" "Cannot find the user certificate path, please setup environment " "X509_USER_CERT, or certificatepath in a configuration file" msgstr "" #: src/clients/credentials/arcproxy.cpp:887 msgid "" "Cannot find the user private key path, please setup environment " "X509_USER_KEY, or keypath in a configuration file" msgstr "" #: src/clients/credentials/arcproxy.cpp:911 #, c-format msgid "" "Cannot parse password source expression %s it must be of type=source format" msgstr "" #: src/clients/credentials/arcproxy.cpp:928 #, c-format msgid "" "Cannot parse password type %s. Currently supported values are " "'key','myproxy','myproxynew' and 'all'." msgstr "" #: src/clients/credentials/arcproxy.cpp:943 #, c-format msgid "" "Cannot parse password source %s it must be of source_type or source_type:" "data format. Supported source types are int, stdin, stream, file." msgstr "" #: src/clients/credentials/arcproxy.cpp:957 msgid "Only standard input is currently supported for password source." msgstr "" #: src/clients/credentials/arcproxy.cpp:962 #, c-format msgid "" "Cannot parse password source type %s. Supported source types are int, stdin, " "stream, file." msgstr "" #: src/clients/credentials/arcproxy.cpp:1001 msgid "The start, end and period can't be set simultaneously" msgstr "" #: src/clients/credentials/arcproxy.cpp:1007 #, c-format msgid "The start time that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1014 #, c-format msgid "The period that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1021 #, c-format msgid "The end time that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1030 #, c-format msgid "The end time that you set: %s is before start time: %s." msgstr "" #: src/clients/credentials/arcproxy.cpp:1041 #, c-format msgid "WARNING: The start time that you set: %s is before current time: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:1044 #, c-format msgid "WARNING: The end time that you set: %s is before current time: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:1054 #, c-format msgid "The VOMS AC period that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1072 #, c-format msgid "The MyProxy period that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1087 #, c-format msgid "The keybits constraint is wrong: %s." msgstr "" #: src/clients/credentials/arcproxy.cpp:1101 msgid "The NSS database can not be detected in the Firefox profile" msgstr "" #: src/clients/credentials/arcproxy.cpp:1110 #, c-format msgid "" "There are %d NSS base directories where the certificate, key, and module " "databases live" msgstr "" #: src/clients/credentials/arcproxy.cpp:1112 #, c-format msgid "Number %d is: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:1114 #, c-format msgid "Please choose the NSS database you would like to use (1-%d): " msgstr "" #: src/clients/credentials/arcproxy.cpp:1130 #, c-format msgid "NSS database to be accessed: %s\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:1201 #, c-format msgid "Certificate to use is: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:1252 #: src/clients/credentials/arcproxy.cpp:1366 msgid "Proxy generation succeeded" msgstr "Proxy készítés sikeres" #: src/clients/credentials/arcproxy.cpp:1253 #: src/clients/credentials/arcproxy.cpp:1367 #, c-format msgid "Your proxy is valid until: %s" msgstr "A proxy eddig érvényes: %s" #: src/clients/credentials/arcproxy.cpp:1272 msgid "" "The old GSI proxies are not supported anymore. Please do not use -O/--old " "option." msgstr "" #: src/clients/credentials/arcproxy.cpp:1291 src/hed/mcc/tls/MCCTLS.cpp:182 #: src/hed/mcc/tls/MCCTLS.cpp:215 src/hed/mcc/tls/MCCTLS.cpp:241 msgid "VOMS attribute parsing failed" msgstr "VOMS attribútumok értelmezése sikertelen" #: src/clients/credentials/arcproxy.cpp:1293 msgid "Myproxy server did not return proxy with VOMS AC included" msgstr "" #: src/clients/credentials/arcproxy.cpp:1314 #, fuzzy msgid "Proxy generation failed: No valid certificate found." msgstr "" "Proxy készítés sikertelen: Nem sikerült leellenÅ‘rizni a publikus kulcsot" #: src/clients/credentials/arcproxy.cpp:1319 #, fuzzy msgid "Proxy generation failed: No valid private key found." msgstr "" "Proxy készítés sikertelen: Nem sikerült leellenÅ‘rizni a publikus kulcsot" #: src/clients/credentials/arcproxy.cpp:1323 #, c-format msgid "Your identity: %s" msgstr "Azonosítód: %s" #: src/clients/credentials/arcproxy.cpp:1325 msgid "Proxy generation failed: Certificate has expired." msgstr "Proxy készítés sikertelen: A publikus kulcs érvényessége lejárt." #: src/clients/credentials/arcproxy.cpp:1329 msgid "Proxy generation failed: Certificate is not valid yet." msgstr "Proxy készítés sikertelen: A publikus kulcs érvénytelen." #: src/clients/credentials/arcproxy.cpp:1340 #, fuzzy msgid "Proxy generation failed: Failed to create temporary file." msgstr "" "Proxy készítés sikertelen: Nem sikerült leellenÅ‘rizni a publikus kulcsot" #: src/clients/credentials/arcproxy.cpp:1348 #, fuzzy msgid "Proxy generation failed: Failed to retrieve VOMS information." msgstr "" "Proxy készítés sikertelen: Nem sikerült leellenÅ‘rizni a publikus kulcsot" #: src/clients/credentials/arcproxy_myproxy.cpp:100 msgid "Succeeded to get info from MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:144 msgid "Succeeded to change password on MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:185 msgid "Succeeded to destroy credential on MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:241 #, c-format msgid "Succeeded to get a proxy in %s from MyProxy server %s" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:294 msgid "Succeeded to put a proxy onto MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_proxy.cpp:93 msgid "Failed to add VOMS AC extension. Your proxy may be incomplete." msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:63 msgid "" "Failed to process VOMS configuration or no suitable configuration lines " "found." msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:75 #, fuzzy, c-format msgid "Failed to parse requested VOMS lifetime: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/clients/credentials/arcproxy_voms.cpp:93 #, fuzzy, c-format msgid "Cannot get VOMS server address information from vomses line: \"%s\"" msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #: src/clients/credentials/arcproxy_voms.cpp:97 #: src/clients/credentials/arcproxy_voms.cpp:99 #, c-format msgid "Contacting VOMS server (named %s): %s on port: %s" msgstr "VOMS szerver elérése (neve: %s): %s ezen a porton: %s" #: src/clients/credentials/arcproxy_voms.cpp:105 #, c-format msgid "Failed to parse requested VOMS server port number: %s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:122 msgid "List functionality is not supported for RESTful VOMS interface" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:132 #: src/clients/credentials/arcproxy_voms.cpp:188 #, c-format msgid "" "The VOMS server with the information:\n" "\t%s\n" "can not be reached, please make sure it is available." msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:133 #: src/clients/credentials/arcproxy_voms.cpp:138 #: src/clients/credentials/arcproxy_voms.cpp:189 #: src/clients/credentials/arcproxy_voms.cpp:194 #, c-format msgid "" "Collected error is:\n" "\t%s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:137 #: src/clients/credentials/arcproxy_voms.cpp:193 #, fuzzy, c-format msgid "No valid response from VOMS server: %s" msgstr "Nincs válasz a voms szervertÅ‘l" #: src/clients/credentials/arcproxy_voms.cpp:155 msgid "List functionality is not supported for legacy VOMS interface" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:167 #, c-format msgid "Failed to parse VOMS command: %s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:204 #, c-format msgid "" "There are %d servers with the same name: %s in your vomses file, but none of " "them can be reached, or can return a valid message." msgstr "" #: src/clients/data/arccp.cpp:79 src/clients/data/arccp.cpp:315 #, c-format msgid "Current transfer FAILED: %s" msgstr "Az aktuális átvitel MEGSZAKADT: %s" #: src/clients/data/arccp.cpp:81 src/clients/data/arccp.cpp:119 #: src/clients/data/arccp.cpp:317 src/clients/data/arcls.cpp:214 #: src/clients/data/arcmkdir.cpp:62 src/clients/data/arcrename.cpp:78 #: src/clients/data/arcrm.cpp:83 msgid "This seems like a temporary error, please try again later" msgstr "" #: src/clients/data/arccp.cpp:96 src/clients/data/arccp.cpp:100 #: src/clients/data/arccp.cpp:133 src/clients/data/arccp.cpp:137 #: src/clients/data/arccp.cpp:343 src/clients/data/arccp.cpp:348 #: src/clients/data/arcls.cpp:125 src/clients/data/arcmkdir.cpp:30 #: src/clients/data/arcrename.cpp:31 src/clients/data/arcrename.cpp:35 #: src/clients/data/arcrm.cpp:38 #, c-format msgid "Invalid URL: %s" msgstr "Érvénytelen URL: %s" #: src/clients/data/arccp.cpp:112 msgid "Third party transfer is not supported for these endpoints" msgstr "" #: src/clients/data/arccp.cpp:114 msgid "" "Protocol(s) not supported - please check that the relevant gfal2\n" " plugins are installed (gfal2-plugin-* packages)" msgstr "" #: src/clients/data/arccp.cpp:117 #, c-format msgid "Transfer FAILED: %s" msgstr "" #: src/clients/data/arccp.cpp:145 src/clients/data/arccp.cpp:171 #: src/clients/data/arccp.cpp:359 src/clients/data/arccp.cpp:387 #, c-format msgid "Can't read list of sources from file %s" msgstr "Nem tudom olvasni a forrásokat a fájlból: %s" #: src/clients/data/arccp.cpp:150 src/clients/data/arccp.cpp:186 #: src/clients/data/arccp.cpp:364 src/clients/data/arccp.cpp:403 #, c-format msgid "Can't read list of destinations from file %s" msgstr "Nem tudom olvasni a célállomásokat a fájlból: %s" #: src/clients/data/arccp.cpp:155 src/clients/data/arccp.cpp:370 msgid "Numbers of sources and destinations do not match" msgstr "A forrás és céállomások száma nem egyezik meg" #: src/clients/data/arccp.cpp:200 msgid "Fileset registration is not supported yet" msgstr "A fileset regisztcáció nem támogatott még" #: src/clients/data/arccp.cpp:206 src/clients/data/arccp.cpp:279 #: src/clients/data/arccp.cpp:441 #, c-format msgid "Unsupported source url: %s" msgstr "Nem támogatott url: %s" #: src/clients/data/arccp.cpp:210 src/clients/data/arccp.cpp:283 #, c-format msgid "Unsupported destination url: %s" msgstr "Nem támogatott url: %s" #: src/clients/data/arccp.cpp:217 msgid "" "For registration source must be ordinary URL and destination must be " "indexing service" msgstr "" #: src/clients/data/arccp.cpp:227 #, c-format msgid "Could not obtain information about source: %s" msgstr "" #: src/clients/data/arccp.cpp:234 msgid "" "Metadata of source does not match existing destination. Use the --force " "option to override this." msgstr "" #: src/clients/data/arccp.cpp:246 msgid "Failed to accept new file/destination" msgstr "" #: src/clients/data/arccp.cpp:252 src/clients/data/arccp.cpp:258 #, c-format msgid "Failed to register new file/destination: %s" msgstr "" #: src/clients/data/arccp.cpp:421 msgid "Fileset copy to single object is not supported yet" msgstr "" #: src/clients/data/arccp.cpp:431 msgid "Can't extract object's name from source url" msgstr "" #: src/clients/data/arccp.cpp:450 #, c-format msgid "%s. Cannot copy fileset" msgstr "" #: src/clients/data/arccp.cpp:460 src/hed/libs/compute/ExecutionTarget.cpp:256 #: src/hed/libs/compute/ExecutionTarget.cpp:328 #, c-format msgid "Name: %s" msgstr "Név: %s" #: src/clients/data/arccp.cpp:463 #, c-format msgid "Source: %s" msgstr "Forrás: %s" #: src/clients/data/arccp.cpp:464 #, c-format msgid "Destination: %s" msgstr "Célállomás: %s" #: src/clients/data/arccp.cpp:470 msgid "Current transfer complete" msgstr "Az aktuális átvitel sikeres" #: src/clients/data/arccp.cpp:473 msgid "Some transfers failed" msgstr "" #: src/clients/data/arccp.cpp:483 #, c-format msgid "Directory: %s" msgstr "" #: src/clients/data/arccp.cpp:503 msgid "Transfer complete" msgstr "" #: src/clients/data/arccp.cpp:522 msgid "source destination" msgstr "" #: src/clients/data/arccp.cpp:523 msgid "" "The arccp command copies files to, from and between grid storage elements." msgstr "" #: src/clients/data/arccp.cpp:528 msgid "" "use passive transfer (off by default if secure is on, on by default if " "secure is not requested)" msgstr "" #: src/clients/data/arccp.cpp:534 msgid "do not try to force passive transfer" msgstr "" #: src/clients/data/arccp.cpp:539 #, fuzzy msgid "force overwrite of existing destination" msgstr "Nem sikerült feloldani a célállomást" #: src/clients/data/arccp.cpp:543 msgid "show progress indicator" msgstr "" #: src/clients/data/arccp.cpp:548 msgid "" "do not transfer, but register source into destination. destination must be a " "meta-url." msgstr "" #: src/clients/data/arccp.cpp:554 msgid "use secure transfer (insecure by default)" msgstr "" #: src/clients/data/arccp.cpp:559 msgid "path to local cache (use to put file into cache)" msgstr "" #: src/clients/data/arccp.cpp:564 src/clients/data/arcls.cpp:290 msgid "operate recursively" msgstr "" #: src/clients/data/arccp.cpp:569 src/clients/data/arcls.cpp:295 msgid "operate recursively up to specified level" msgstr "" #: src/clients/data/arccp.cpp:570 src/clients/data/arcls.cpp:296 msgid "level" msgstr "" #: src/clients/data/arccp.cpp:574 msgid "number of retries before failing file transfer" msgstr "" #: src/clients/data/arccp.cpp:575 msgid "number" msgstr "szám" #: src/clients/data/arccp.cpp:579 msgid "" "physical location to write to when destination is an indexing service. Must " "be specified for indexing services which do not automatically generate " "physical locations. Can be specified multiple times - locations will be " "tried in order until one succeeds." msgstr "" #: src/clients/data/arccp.cpp:587 msgid "" "perform third party transfer, where the destination pulls from the source " "(only available with GFAL plugin)" msgstr "" #: src/clients/data/arccp.cpp:593 src/clients/data/arcls.cpp:312 #: src/clients/data/arcmkdir.cpp:90 src/clients/data/arcrename.cpp:101 #: src/clients/data/arcrm.cpp:115 msgid "list the available plugins (protocols supported)" msgstr "" #: src/clients/data/arccp.cpp:632 src/clients/data/arcls.cpp:351 #: src/clients/data/arcmkdir.cpp:129 src/clients/data/arcrename.cpp:140 #: src/clients/data/arcrm.cpp:154 msgid "" "force using both CA certificates configuration for Grid services (typically " "IGTF) and those provided by OpenSSL" msgstr "" #: src/clients/data/arccp.cpp:667 src/clients/data/arcls.cpp:387 #: src/clients/data/arcmkdir.cpp:165 src/clients/data/arcrename.cpp:176 #: src/clients/data/arcrm.cpp:191 msgid "Protocol plugins available:" msgstr "" #: src/clients/data/arccp.cpp:715 src/clients/data/arcls.cpp:435 #: src/clients/data/arcmkdir.cpp:212 src/clients/data/arcrename.cpp:222 #: src/clients/data/arcrm.cpp:239 msgid "Wrong number of parameters specified" msgstr "" #: src/clients/data/arccp.cpp:720 msgid "Options 'p' and 'n' can't be used simultaneously" msgstr "" #: src/clients/data/arcls.cpp:131 src/clients/data/arcmkdir.cpp:36 #: src/clients/data/arcrm.cpp:45 #, c-format msgid "Can't read list of locations from file %s" msgstr "" #: src/clients/data/arcls.cpp:146 src/clients/data/arcmkdir.cpp:51 #: src/clients/data/arcrename.cpp:63 msgid "Unsupported URL given" msgstr "" #: src/clients/data/arcls.cpp:217 msgid "Warning: Failed listing files but some information is obtained" msgstr "" #: src/clients/data/arcls.cpp:271 src/clients/data/arcmkdir.cpp:79 msgid "url" msgstr "url" #: src/clients/data/arcls.cpp:272 msgid "" "The arcls command is used for listing files in grid storage elements and " "file\n" "index catalogues." msgstr "" #: src/clients/data/arcls.cpp:281 msgid "show URLs of file locations" msgstr "" #: src/clients/data/arcls.cpp:285 msgid "display all available metadata" msgstr "" #: src/clients/data/arcls.cpp:299 msgid "" "show only description of requested object, do not list content of directories" msgstr "" #: src/clients/data/arcls.cpp:303 msgid "treat requested object as directory and always try to list content" msgstr "" #: src/clients/data/arcls.cpp:307 msgid "check readability of object, does not show any information about object" msgstr "" #: src/clients/data/arcls.cpp:440 msgid "Incompatible options --nolist and --forcelist requested" msgstr "" #: src/clients/data/arcls.cpp:445 msgid "Requesting recursion and --nolist has no sense" msgstr "" #: src/clients/data/arcmkdir.cpp:80 msgid "" "The arcmkdir command creates directories on grid storage elements and " "catalogs." msgstr "" #: src/clients/data/arcmkdir.cpp:85 msgid "make parent directories as needed" msgstr "" #: src/clients/data/arcrename.cpp:43 msgid "Both URLs must have the same protocol, host and port" msgstr "" #: src/clients/data/arcrename.cpp:53 msgid "Cannot rename to or from root directory" msgstr "" #: src/clients/data/arcrename.cpp:57 msgid "Cannot rename to the same URL" msgstr "" #: src/clients/data/arcrename.cpp:95 msgid "old_url new_url" msgstr "" #: src/clients/data/arcrename.cpp:96 #, fuzzy msgid "The arcrename command renames files on grid storage elements." msgstr "Az arcclean parancs eltávolít egy feladatot a távoli klaszterröl" #: src/clients/data/arcrm.cpp:60 #, fuzzy, c-format msgid "Unsupported URL given: %s" msgstr "Nem támogatott url: %s" #: src/clients/data/arcrm.cpp:103 #, fuzzy msgid "url [url ...]" msgstr "[klaszter ...]" #: src/clients/data/arcrm.cpp:104 #, fuzzy msgid "The arcrm command deletes files on grid storage elements." msgstr "Az arcclean parancs eltávolít egy feladatot a távoli klaszterröl" #: src/clients/data/arcrm.cpp:109 msgid "" "remove logical file name registration even if not all physical instances " "were removed" msgstr "" #: src/clients/data/utils.cpp:18 msgid "Proxy expired. Please run 'arcproxy'!" msgstr "" #: src/clients/data/utils.cpp:81 src/clients/data/utils.cpp:90 #, fuzzy, c-format msgid "Unable to handle %s" msgstr "Nem sikerült elküldeni a kérést" #: src/clients/data/utils.cpp:82 src/clients/data/utils.cpp:91 msgid "Invalid credentials, please check proxy and/or CA certificates" msgstr "" #: src/clients/data/utils.cpp:88 msgid "Proxy expired" msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:53 msgid "Cannot initialize ARCHERY domain name for query" msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:60 msgid "Cannot create resolver from /etc/resolv.conf" msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:68 msgid "Cannot query service endpoint TXT records from DNS" msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:79 msgid "Cannot parse service endpoint TXT records." msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:124 #, c-format msgid "Wrong service record field \"%s\" found in the \"%s\"" msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:129 #, c-format msgid "Malformed ARCHERY record found (endpoint url is not defined): %s" msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:134 #, c-format msgid "Malformed ARCHERY record found (endpoint type is not defined): %s" msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:138 #, c-format msgid "Found service endpoint %s (type %s)" msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:157 #, c-format msgid "" "Status for service endpoint \"%s\" is set to inactive in ARCHERY. Skipping." msgstr "" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:229 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:149 #, c-format msgid "Job %s has no delegation associated. Can't renew such job." msgstr "" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:241 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:161 #, fuzzy, c-format msgid "Job %s failed to renew delegation %s." msgstr "Nem támogatott url: %s" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:314 #, fuzzy, c-format msgid "Failed to process jobs - error response: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:316 #, fuzzy, c-format msgid "Failed to process jobs - wrong response: %u" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:318 #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:327 #, fuzzy, c-format msgid "Content: %s" msgstr "Forrás: %s" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:321 #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:333 #, fuzzy, c-format msgid "Failed to process job: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:331 #, fuzzy msgid "Failed to process jobs - failed to parse response" msgstr "Nem tudom kiíratni a feladat leírást: Nem található várakozó sor." #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:344 #, fuzzy, c-format msgid "No response returned: %s" msgstr "Nincs válasz a voms szervertÅ‘l" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:368 #, fuzzy, c-format msgid "Failed to process job: %s - %s %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:455 #, c-format msgid "Failed retrieving job description for job: %s" msgstr "" #: src/hed/acc/ARCREST/JobListRetrieverPluginREST.cpp:29 msgid "Collecting Job (A-REX REST jobs) information." msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:49 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:80 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:115 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:149 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:189 msgid "Failed to communicate to delegation endpoint." msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:54 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:85 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:120 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:154 #, c-format msgid "Unexpected response code from delegation endpoint - %u" msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:56 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:87 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:122 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:156 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:399 #: src/hed/dmc/gridftp/Lister.cpp:223 src/hed/dmc/gridftp/Lister.cpp:243 #: src/hed/dmc/gridftp/Lister.cpp:468 src/hed/dmc/gridftp/Lister.cpp:475 #: src/hed/dmc/gridftp/Lister.cpp:497 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:164 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:197 #, c-format msgid "Response: %s" msgstr "Válasz: %s" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:64 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:136 #, c-format msgid "Unexpected delegation location from delegation endpoint - %s." msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:92 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:127 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:161 msgid "Missing response from delegation endpoint." msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:193 #, c-format msgid "Unexpected response code from delegation endpoint: %u, %s." msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:235 #, fuzzy, c-format msgid "Failed to submit all jobs: %s %s" msgstr "Feladat újraküldésének megkisérlése ide: %s" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:249 msgid "Failed uploading local input files" msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:304 #, fuzzy msgid "Failed to prepare job description" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:313 #, fuzzy, c-format msgid "Unable to submit job. Job description is not valid in the %s format: %s" msgstr "Nem tudom kiíratni a feladat leírást: Nem található várakozó sor." #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:320 msgid "" "Can't submit multiple instances for multiple job descriptions. Not " "implemented yet." msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:331 #, fuzzy msgid "Unable to submit jobs. Failed to delegate X.509 credentials." msgstr "Nem sikerült elküldeni a kérést" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:338 #, fuzzy msgid "Unable to submit jobs. Failed to delegate token." msgstr "Nem sikerült elküldeni a kérést" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:348 msgid "Unable to submit job. Failed to assign delegation to job description." msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:388 #, fuzzy msgid "Failed to submit all jobs." msgstr "Feladat újraküldésének megkisérlése ide: %s" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:398 #, fuzzy, c-format msgid "Failed to submit all jobs: %u %s" msgstr "Feladat újraküldésének megkisérlése ide: %s" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:411 #, fuzzy, c-format msgid "Failed to submit all jobs: %s" msgstr "Feladat újraküldésének megkisérlése ide: %s" #: src/hed/acc/ARCREST/TargetInformationRetrieverPluginREST.cpp:27 msgid "Querying WSRF GLUE2 computing REST endpoint." msgstr "" #: src/hed/acc/ARCREST/TargetInformationRetrieverPluginREST.cpp:60 #, c-format msgid "CONTENT %u: %s" msgstr "" #: src/hed/acc/ARCREST/TargetInformationRetrieverPluginREST.cpp:64 #, fuzzy msgid "Response is not XML" msgstr "Válasz: %s" #: src/hed/acc/ARCREST/TargetInformationRetrieverPluginREST.cpp:69 #, c-format msgid "Parsed domains: %u" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:14 msgid "Sorting according to free slots in queue" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:15 msgid "Random sorting" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:16 msgid "Sorting according to specified benchmark (default \"specint2000\")" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:17 msgid "Sorting according to input data availability at target" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:18 msgid "Performs neither sorting nor matching" msgstr "" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:24 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of waiting " "jobs" msgstr "" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:27 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of total slots" msgstr "" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:30 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of free slots" msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:74 #, c-format msgid "[ADLParser] Unsupported EMI ES state %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:94 #, c-format msgid "[ADLParser] Unsupported internal state %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:104 #, c-format msgid "[ADLParser] Optional for %s elements are not supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:113 #, c-format msgid "[ADLParser] %s element must be boolean." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:125 #, c-format msgid "[ADLParser] Code in FailIfExitCodeNotEqualTo in %s is not valid number." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:363 msgid "[ADLParser] Root element is not ActivityDescription " msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:410 msgid "[ADLParser] priority is too large - using max value 100" msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:453 #, c-format msgid "[ADLParser] Unsupported URL %s for RemoteLogging." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:472 #, c-format msgid "[ADLParser] Wrong time %s in ExpirationTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:502 msgid "[ADLParser] AccessControl isn't valid XML." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:513 msgid "[ADLParser] CredentialService must contain valid URL." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:542 #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:545 msgid "[ADLParser] Only email Prorocol for Notification is supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:603 msgid "[ADLParser] Missing or wrong value in ProcessesPerSlot." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:608 msgid "[ADLParser] Missing or wrong value in ThreadsPerProcess." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:614 msgid "" "[ADLParser] Missing Name element or value in ParallelEnvironment/Option " "element." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:631 #, fuzzy msgid "[ADLParser] NetworkInfo is not supported yet." msgstr "A fileset regisztcáció nem támogatott még" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:645 #, c-format msgid "[ADLParser] NodeAccess value %s is not supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:653 msgid "[ADLParser] Missing or wrong value in NumberOfSlots." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:660 msgid "" "[ADLParser] The NumberOfSlots element should be specified, when the value of " "useNumberOfSlots attribute of SlotsPerHost element is \"true\"." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:667 msgid "[ADLParser] Missing or wrong value in SlotsPerHost." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:696 msgid "[ADLParser] Missing or wrong value in IndividualPhysicalMemory." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:706 msgid "[ADLParser] Missing or wrong value in IndividualVirtualMemory." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:716 msgid "[ADLParser] Missing or wrong value in DiskSpaceRequirement." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:730 #, fuzzy msgid "[ADLParser] Benchmark is not supported yet." msgstr "A fileset regisztcáció nem támogatott még" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:738 msgid "[ADLParser] Missing or wrong value in IndividualCPUTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:746 msgid "[ADLParser] Missing or wrong value in TotalCPUTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:755 msgid "[ADLParser] Missing or wrong value in WallTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:775 msgid "[ADLParser] Missing or empty Name in InputFile." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:786 #, c-format msgid "[ADLParser] Wrong URI specified in Source - %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:808 msgid "[ADLParser] Missing or empty Name in OutputFile." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:814 #, c-format msgid "[ADLParser] Wrong URI specified in Target - %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:827 #, c-format msgid "Location URI for file %s is invalid" msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:852 #, c-format msgid "[ADLParser] CreationFlag value %s is not supported." msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:42 msgid "Left operand for RSL concatenation does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:50 msgid "Right operand for RSL concatenation does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:161 msgid "Multi-request operator only allowed at top level" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:186 msgid "RSL substitution is not a sequence" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:192 msgid "RSL substitution sequence is not of length 2" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:211 msgid "RSL substitution variable name does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:220 msgid "RSL substitution variable value does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:313 msgid "End of comment not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:324 msgid "Junk at end of RSL" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:424 msgid "End of single quoted string not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:441 msgid "End of double quoted string not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:460 #, c-format msgid "End of user delimiter (%s) quoted string not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:518 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:546 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:625 msgid "')' expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:528 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:609 msgid "'(' expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:536 msgid "Variable name expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:541 #, c-format msgid "Variable name (%s) contains invalid character (%s)" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:557 #, fuzzy msgid "Broken string" msgstr "szöveg" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:570 msgid "No left operand for concatenation operator" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:574 msgid "No right operand for concatenation operator" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:638 msgid "Attribute name expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:643 #, c-format msgid "Attribute name (%s) contains invalid character (%s)" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:649 msgid "Relation operator expected" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:86 msgid "Error parsing the internally set executables attribute." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:102 #, c-format msgid "" "File '%s' in the 'executables' attribute is not present in the 'inputfiles' " "attribute" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:120 msgid "The value of the ftpthreads attribute must be a number from 1 to 10" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:177 msgid "'stdout' attribute must be specified when 'join' attribute is specified" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:181 msgid "" "Attribute 'join' cannot be specified when both 'stdout' and 'stderr' " "attributes is specified" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:200 #, fuzzy msgid "Attributes 'gridtime' and 'cputime' cannot be specified together" msgstr "A 'sort' vagy az 'rsort' kapcsolókat nem lehet egyszerre használni" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:204 msgid "Attributes 'gridtime' and 'walltime' cannot be specified together" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:226 msgid "" "When specifying 'countpernode' attribute, 'count' attribute must also be " "specified" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:229 msgid "Value of 'countpernode' attribute must be an integer" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:287 #, fuzzy msgid "No RSL content in job description found" msgstr "Nem tudom kiíratni a feladat leírást: Nem található várakozó sor." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:295 msgid "'action' attribute not allowed in user-side job description" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:304 #, c-format msgid "String successfully parsed as %s." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:313 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:331 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:349 #, c-format msgid "Attribute '%s' multiply defined" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:317 #, c-format msgid "Value of attribute '%s' expected to be single value" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:322 #, c-format msgid "Value of attribute '%s' expected to be a string" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:338 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:368 #, c-format msgid "Value of attribute '%s' is not a string" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:356 #, c-format msgid "Value of attribute '%s' is not sequence" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:360 #, c-format msgid "" "Value of attribute '%s' has wrong sequence length: Expected %d, found %d" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:492 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1375 msgid "Unexpected RSL type" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:557 msgid "At least two values are needed for the 'inputfiles' attribute" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:562 msgid "First value of 'inputfiles' attribute (filename) cannot be empty" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:587 #, fuzzy, c-format msgid "Invalid URL '%s' for input file '%s'" msgstr "Érvénytelen URL: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:596 #, fuzzy, c-format msgid "Invalid URL option syntax in option '%s' for input file '%s'" msgstr "Érvénytelen URL: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:606 #, fuzzy, c-format msgid "Invalid URL: '%s' in input file '%s'" msgstr "Érvénytelen URL: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:677 msgid "At least two values are needed for the 'outputfiles' attribute" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:682 msgid "First value of 'outputfiles' attribute (filename) cannot be empty" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:694 #, fuzzy, c-format msgid "Invalid URL '%s' for output file '%s'" msgstr "Érvénytelen URL: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:704 #, fuzzy, c-format msgid "Invalid URL option syntax in option '%s' for output file '%s'" msgstr "Érvénytelen URL: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:714 #, fuzzy, c-format msgid "Invalid URL: '%s' in output file '%s'" msgstr "Érvénytelen URL: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:746 #, c-format msgid "" "Invalid comparison operator '%s' used at 'delegationid' attribute, only \"=" "\" is allowed." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:764 #, c-format msgid "" "Invalid comparison operator '%s' used at 'queue' attribute in 'GRIDMANAGER' " "dialect, only \"=\" is allowed" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:770 #, c-format msgid "" "Invalid comparison operator '%s' used at 'queue' attribute, only \"!=\" or " "\"=\" are allowed." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:927 #, c-format msgid "Value of attribute '%s' expected not to be empty" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1036 msgid "The value of the acl XRSL attribute isn't valid XML." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1050 msgid "The cluster XRSL attribute is currently unsupported." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1066 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it must contain an email " "address" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1074 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it must only contain email " "addresses after state flag(s)" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1077 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it contains unknown state " "flags" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1125 msgid "priority is too large - using max value 100" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1158 #, c-format msgid "Invalid nodeaccess value: %s" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1201 msgid "Value of 'count' attribute must be an integer" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1231 msgid "Value of 'exclusiveexecution' attribute must either be 'yes' or 'no'" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1277 #, c-format msgid "Invalid action value %s" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1367 #, c-format msgid "The specified Globus attribute (%s) is not supported. %s ignored." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1371 #, c-format msgid "Unknown XRSL attribute: %s - Ignoring it." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1385 #, c-format msgid "Wrong language requested: %s" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1722 msgid "" "Cannot output XRSL representation: The Resources.SlotRequirement." "NumberOfSlots attribute must be specified when the Resources.SlotRequirement." "SlotsPerHost attribute is specified." msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:65 #: src/services/wrappers/python/pythonwrapper.cpp:92 msgid "Failed to initialize main Python thread" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:71 #: src/services/wrappers/python/pythonwrapper.cpp:97 msgid "Main Python thread was not initialized" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:81 #, fuzzy, c-format msgid "Loading Python broker (%i)" msgstr "PythonBroker betöltése" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:104 #: src/services/wrappers/python/pythonwrapper.cpp:134 msgid "Main Python thread is not initialized" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:108 msgid "PythonBroker init" msgstr "PythonBroker betöltése" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:116 msgid "" "Invalid class name. The broker argument for the PythonBroker should be\n" " Filename.Class.args (args is optional), for example SampleBroker." "MyBroker" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:122 #, fuzzy, c-format msgid "Class name: %s" msgstr "osztály neve: %s" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:123 #, fuzzy, c-format msgid "Module name: %s" msgstr "modul neve: %s" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:132 #: src/services/wrappers/python/pythonwrapper.cpp:178 msgid "Cannot convert ARC module name to Python string" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:140 #: src/services/wrappers/python/pythonwrapper.cpp:186 #, fuzzy msgid "Cannot import ARC module" msgstr "Nem tudom importálni az arc modult" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:149 #: src/services/wrappers/python/pythonwrapper.cpp:196 #: src/services/wrappers/python/pythonwrapper.cpp:429 #, fuzzy msgid "Cannot get dictionary of ARC module" msgstr "Nem tudom importálni az arc modult" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:158 msgid "Cannot find ARC UserConfig class" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:166 msgid "UserConfig class is not an object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:173 #, fuzzy msgid "Cannot find ARC JobDescription class" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:181 msgid "JobDescription class is not an object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:188 msgid "Cannot find ARC ExecutionTarget class" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:196 msgid "ExecutionTarget class is not an object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:207 #: src/services/wrappers/python/pythonwrapper.cpp:157 msgid "Cannot convert module name to Python string" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:215 #: src/services/wrappers/python/pythonwrapper.cpp:164 msgid "Cannot import module" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:224 msgid "Cannot get dictionary of custom broker module" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:233 msgid "Cannot find custom broker class" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:241 #, c-format msgid "%s class is not an object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:247 msgid "Cannot create UserConfig argument" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:255 msgid "Cannot convert UserConfig to Python object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:263 #: src/services/wrappers/python/pythonwrapper.cpp:253 msgid "Cannot create argument of the constructor" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:272 #: src/services/wrappers/python/pythonwrapper.cpp:261 msgid "Cannot create instance of Python class" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:278 #, c-format msgid "Python broker constructor called (%d)" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:302 #, c-format msgid "Python broker destructor called (%d)" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:311 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:328 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:361 msgid "Cannot create ExecutionTarget argument" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:319 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:336 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:369 #, c-format msgid "Cannot convert ExecutionTarget (%s) to python object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:393 msgid "Cannot create JobDescription argument" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:401 msgid "Cannot convert JobDescription to python object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:422 msgid "Do sorting using user created python broker" msgstr "" #: src/hed/daemon/unix/daemon.cpp:84 #, c-format msgid "Daemonization fork failed: %s" msgstr "" #: src/hed/daemon/unix/daemon.cpp:95 msgid "Watchdog (re)starting application" msgstr "" #: src/hed/daemon/unix/daemon.cpp:100 #, c-format msgid "Watchdog fork failed: %s" msgstr "" #: src/hed/daemon/unix/daemon.cpp:110 msgid "Watchdog starting monitoring" msgstr "" #: src/hed/daemon/unix/daemon.cpp:136 #, c-format msgid "Watchdog detected application exit due to signal %u" msgstr "" #: src/hed/daemon/unix/daemon.cpp:138 #, c-format msgid "Watchdog detected application exited with code %u" msgstr "" #: src/hed/daemon/unix/daemon.cpp:140 msgid "Watchdog detected application exit" msgstr "" #: src/hed/daemon/unix/daemon.cpp:149 msgid "" "Watchdog exiting because application was purposely killed or exited itself" msgstr "" #: src/hed/daemon/unix/daemon.cpp:156 msgid "Watchdog detected application timeout or error - killing process" msgstr "" #: src/hed/daemon/unix/daemon.cpp:167 msgid "Watchdog failed to wait till application exited - sending KILL" msgstr "" #: src/hed/daemon/unix/daemon.cpp:179 msgid "Watchdog failed to kill application - giving up and exiting" msgstr "" #: src/hed/daemon/unix/daemon.cpp:200 msgid "Shutdown daemon" msgstr "Démon leállítása" #: src/hed/daemon/unix/main_unix.cpp:47 msgid "shutdown" msgstr "leállítás" #: src/hed/daemon/unix/main_unix.cpp:50 msgid "exit" msgstr "kilép" #: src/hed/daemon/unix/main_unix.cpp:88 msgid "No server config part of config file" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:163 #, c-format msgid "Unknown log level %s" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:173 #, c-format msgid "Failed to open log file: %s" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:205 msgid "Start foreground" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:254 #, c-format msgid "XML config file %s does not exist" msgstr "Az XML konfigurációs fájl: %s nem létezik" #: src/hed/daemon/unix/main_unix.cpp:258 src/hed/daemon/unix/main_unix.cpp:273 #, c-format msgid "Failed to load service configuration from file %s" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:264 #, c-format msgid "INI config file %s does not exist" msgstr "Az INI konfigurációs fájl: %s nem létezik" #: src/hed/daemon/unix/main_unix.cpp:269 src/hed/daemon/unix/main_unix.cpp:291 msgid "Error evaluating profile" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:285 msgid "Error loading generated configuration" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:296 msgid "Failed to load service configuration from any default config file" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:357 msgid "Schema validation error" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:372 msgid "Configuration root element is not " msgstr "" #: src/hed/daemon/unix/main_unix.cpp:388 #, c-format msgid "Cannot switch to group (%s)" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:398 #, c-format msgid "Cannot switch to primary group for user (%s)" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:403 #, c-format msgid "Cannot switch to user (%s)" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:421 msgid "Failed to load service side MCCs" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:423 src/tests/count/test_service.cpp:29 #: src/tests/echo/test.cpp:30 src/tests/echo/test_service.cpp:29 msgid "Service side MCCs are loaded" msgstr "A szolgáltatás oldali MCC-k betöltÅ‘dtek" #: src/hed/daemon/unix/main_unix.cpp:430 msgid "Unexpected arguments supplied" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:87 #, c-format msgid "Unknown channel %s for stdio protocol" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:94 #, fuzzy, c-format msgid "Failed to open stdio channel %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/file/DataPointFile.cpp:95 #, c-format msgid "Failed to open stdio channel %d" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:335 #, c-format msgid "fsync of file %s failed: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:340 #: src/hed/dmc/file/DataPointFile.cpp:348 #, c-format msgid "closing file %s failed: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:367 #, c-format msgid "File is not accessible: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:373 #: src/hed/dmc/file/DataPointFile.cpp:458 #, c-format msgid "Can't stat file: %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:419 #: src/hed/dmc/file/DataPointFile.cpp:425 #, c-format msgid "Can't stat stdio channel %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:473 #, fuzzy, c-format msgid "%s is not a directory" msgstr "könyvtár" #: src/hed/dmc/file/DataPointFile.cpp:488 #, c-format msgid "Failed to read object %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:501 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:534 #, c-format msgid "File is not accessible %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:507 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:540 #, c-format msgid "Can't delete directory %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:514 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:547 #, c-format msgid "Can't delete file %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:524 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:335 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:315 #: src/hed/dmc/http/DataPointHTTP.cpp:1658 #: src/hed/dmc/http/DataPointHTTP.cpp:1676 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1466 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:562 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:582 #, c-format msgid "Creating directory %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:532 src/hed/dmc/srm/DataPointSRM.cpp:168 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:596 #, fuzzy, c-format msgid "Renaming %s to %s" msgstr "Indok : %s" #: src/hed/dmc/file/DataPointFile.cpp:534 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:605 #, c-format msgid "Can't rename file %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:562 #, c-format msgid "Failed to open %s for reading: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:577 #: src/hed/dmc/file/DataPointFile.cpp:712 #, c-format msgid "Failed to switch user id to %d/%d" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:583 #, c-format msgid "Failed to create/open file %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:599 #, fuzzy msgid "Failed to create thread" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/file/DataPointFile.cpp:679 #, c-format msgid "Invalid url: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:688 src/hed/libs/data/FileCache.cpp:480 #, c-format msgid "Failed to create directory %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:701 #: src/hed/dmc/file/DataPointFile.cpp:720 #, fuzzy, c-format msgid "Failed to create file %s: %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/hed/dmc/file/DataPointFile.cpp:732 #, c-format msgid "setting file %s to size %llu" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:755 #, c-format msgid "Failed to preallocate space for %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:794 src/hed/libs/data/FileCache.cpp:854 #, fuzzy, c-format msgid "Failed to clean up file %s: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/file/DataPointFile.cpp:808 #, c-format msgid "Error during file validation. Can't stat file %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:812 #, c-format msgid "" "Error during file validation: Local file size %llu does not match source " "file size %llu for file %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:53 #, c-format msgid "Using proxy %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:54 #, c-format msgid "Using key %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:55 #, c-format msgid "Using cert %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:113 msgid "Locations are missing in destination LFC URL" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:119 #, c-format msgid "Duplicate replica found in LFC: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:121 #, c-format msgid "Adding location: %s - %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:129 #: src/hed/libs/data/DataPointIndex.cpp:161 #, c-format msgid "Add location: url: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:130 #: src/hed/libs/data/DataPointIndex.cpp:162 #, c-format msgid "Add location: metadata: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:150 #: src/hed/dmc/gfal/DataPointGFAL.cpp:310 #, fuzzy, c-format msgid "gfal_open failed: %s" msgstr "Fájl feltöltve %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:163 #: src/hed/dmc/gfal/DataPointGFAL.cpp:223 #: src/hed/dmc/gfal/DataPointGFAL.cpp:249 #: src/hed/dmc/gfal/DataPointGFAL.cpp:324 #: src/hed/dmc/gfal/DataPointGFAL.cpp:403 #: src/hed/dmc/gfal/DataPointGFAL.cpp:430 #, c-format msgid "gfal_close failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:195 #, fuzzy, c-format msgid "gfal_read failed: %s" msgstr "Fájl feltöltve %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:237 msgid "StopReading starts waiting for transfer_condition." msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:239 msgid "StopReading finished waiting for transfer_condition." msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:271 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:68 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:73 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:44 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:49 #, c-format msgid "No locations defined for %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:278 #, fuzzy, c-format msgid "Failed to set LFC replicas: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/gfal/DataPointGFAL.cpp:304 #, c-format msgid "gfal_mkdir failed (%s), trying to write anyway" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:359 #, c-format msgid "DataPointGFAL::write_file got position %d and offset %d, has to seek" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:388 #, c-format msgid "gfal_write failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:418 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:405 msgid "StopWriting starts waiting for transfer_condition." msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:420 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:407 msgid "StopWriting finished waiting for transfer_condition." msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:451 #, c-format msgid "gfal_stat failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:496 #, c-format msgid "gfal_listxattr failed, no replica information can be obtained: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:537 #, fuzzy, c-format msgid "gfal_opendir failed: %s" msgstr "Fájl feltöltve %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:549 #, c-format msgid "List will stat the URL %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:556 #, fuzzy, c-format msgid "gfal_closedir failed: %s" msgstr "Fájl feltöltve %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:584 #, c-format msgid "gfal_rmdir failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:587 #, c-format msgid "gfal_unlink failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:604 #, c-format msgid "gfal_mkdir failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:619 #, c-format msgid "gfal_rename failed: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:19 #, c-format msgid "Failed to obtain bytes transferred: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:42 #, c-format msgid "Failed to get initiate GFAL2 parameter handle: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:49 #, c-format msgid "Failed to get initiate new GFAL2 context: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:56 #, c-format msgid "Failed to set GFAL2 monitor callback: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:64 #, c-format msgid "Failed to set overwrite option in GFAL2: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:72 #, c-format msgid "Failed to set GFAL2 transfer timeout, will use default: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:84 msgid "Transfer failed" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:92 #, fuzzy msgid "Transfer succeeded" msgstr "Proxy készítés sikeres" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:38 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:56 msgid "ftp_complete_callback: success" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:44 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:62 #, c-format msgid "ftp_complete_callback: error: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:60 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:78 msgid "ftp_check_callback" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:62 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:90 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:116 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:135 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:305 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:340 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:678 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:847 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:879 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:913 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1052 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1104 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1114 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1122 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1130 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1138 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1144 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:80 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:108 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:285 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:321 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:731 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:764 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:801 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:932 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:996 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1006 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1014 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1022 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1030 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1036 #, c-format msgid "Globus error: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:73 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:91 msgid "Excessive data received while checking file access" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:89 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:107 msgid "Registration of Globus FTP buffer failed - cancel check" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:115 msgid "check_ftp: globus_ftp_client_size failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:119 msgid "check_ftp: timeout waiting for size" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:124 msgid "check_ftp: failed to get file's size" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:127 #, c-format msgid "check_ftp: obtained size: %lli" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:134 msgid "check_ftp: globus_ftp_client_modification_time failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:138 msgid "check_ftp: timeout waiting for modification_time" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:143 msgid "check_ftp: failed to get file's modification time" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:148 #, c-format msgid "check_ftp: obtained modification date: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:167 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:147 msgid "check_ftp: globus_ftp_client_get failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:174 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:154 msgid "check_ftp: globus_ftp_client_register_read" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:185 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:166 msgid "check_ftp: timeout waiting for partial get" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:215 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:193 #, c-format msgid "File delete failed, attempting directory delete for %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:225 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:204 msgid "delete_ftp: globus_ftp_client_delete failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:231 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:252 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:210 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:232 msgid "delete_ftp: timeout waiting for delete" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:246 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:226 msgid "delete_ftp: globus_ftp_client_rmdir failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:301 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:280 #, c-format msgid "mkdir_ftp: making %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:309 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:289 msgid "mkdir_ftp: timeout waiting for mkdir" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:344 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:325 msgid "Timeout waiting for mkdir" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:370 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:348 msgid "start_reading_ftp" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:374 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:352 msgid "start_reading_ftp: globus_ftp_client_get" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:388 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:366 msgid "start_reading_ftp: globus_ftp_client_get failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:399 msgid "start_reading_ftp: globus_thread_create failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:418 msgid "stop_reading_ftp: aborting connection" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:425 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:647 #, c-format msgid "Failed to abort transfer of ftp file: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:426 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:648 msgid "Assuming transfer is already aborted or failed." msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:433 msgid "stop_reading_ftp: waiting for transfer to finish" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:435 #, c-format msgid "stop_reading_ftp: exiting: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:449 msgid "ftp_read_thread: get and register buffers" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:455 #, c-format msgid "ftp_read_thread: for_read failed - aborting: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:465 #, c-format msgid "ftp_read_thread: data callback failed - aborting: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:477 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:382 #, c-format msgid "ftp_read_thread: Globus error: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:490 #, c-format msgid "ftp_read_thread: too many registration failures - abort: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:495 #, c-format msgid "ftp_read_thread: failed to register Globus buffer - will try later: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:508 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:398 msgid "ftp_read_thread: waiting for eof" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:512 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:402 msgid "ftp_read_thread: waiting for buffers released" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:516 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:410 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:664 msgid "ftp_read_thread: failed to release buffers - leaking" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:521 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:417 msgid "ftp_read_thread: exiting" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:539 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:438 #, c-format msgid "ftp_read_callback: failure: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:542 msgid "ftp_read_callback: success" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:558 msgid "Failed to get ftp file" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:560 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:819 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:519 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:708 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:129 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:169 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1214 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1248 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1430 #: src/hed/libs/common/Thread.cpp:240 src/hed/libs/common/Thread.cpp:243 #: src/hed/libs/credential/Credential.cpp:1076 #: src/hed/libs/data/DataPointDelegate.cpp:628 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:66 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:82 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:98 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:117 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:127 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:135 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:144 #: src/hed/mcc/tls/PayloadTLSMCC.cpp:69 src/hed/shc/arcpdp/ArcPDP.cpp:234 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:305 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:258 #: src/libs/data-staging/Scheduler.cpp:117 #: src/services/a-rex/delegation/DelegationStore.cpp:36 #: src/services/a-rex/delegation/DelegationStore.cpp:41 #: src/services/a-rex/delegation/DelegationStore.cpp:46 #: src/services/a-rex/delegation/DelegationStore.cpp:75 #: src/services/a-rex/delegation/DelegationStore.cpp:81 #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:233 #: src/services/a-rex/grid-manager/inputcheck.cpp:33 #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:408 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:395 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:435 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:487 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:602 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:636 #, c-format msgid "%s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:594 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:545 msgid "start_writing_ftp: mkdir" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:597 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:547 msgid "start_writing_ftp: mkdir failed - still trying to write" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:599 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:549 msgid "start_writing_ftp: put" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:613 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:563 msgid "start_writing_ftp: put failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:623 msgid "start_writing_ftp: globus_thread_create failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:640 #: src/hed/libs/data/DataPointDelegate.cpp:307 msgid "StopWriting: aborting connection" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:664 #: src/hed/dmc/http/DataPointHTTP.cpp:982 #: src/hed/libs/data/DataPointDelegate.cpp:321 #, c-format msgid "StopWriting: Calculated checksum %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:668 #: src/hed/dmc/http/DataPointHTTP.cpp:986 #: src/hed/libs/data/DataPointDelegate.cpp:325 #, c-format msgid "StopWriting: looking for checksum of %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:677 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:912 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:800 msgid "list_files_ftp: globus_ftp_client_cksm failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:681 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:916 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:804 msgid "list_files_ftp: timeout waiting for cksum" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:688 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:923 msgid "list_files_ftp: no checksum information possible" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:691 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:926 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:817 #, c-format msgid "list_files_ftp: checksum %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:694 #: src/hed/dmc/http/DataPointHTTP.cpp:995 #: src/hed/libs/data/DataPointDelegate.cpp:332 msgid "" "Checksum type returned by server is different to requested type, cannot " "compare" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:696 #: src/hed/dmc/http/DataPointHTTP.cpp:997 #: src/hed/libs/data/DataPointDelegate.cpp:334 #, c-format msgid "Calculated checksum %s matches checksum reported by server" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:699 #: src/hed/dmc/http/DataPointHTTP.cpp:999 #: src/hed/libs/data/DataPointDelegate.cpp:337 #, c-format msgid "" "Checksum mismatch between calculated checksum %s and checksum reported by " "server %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:721 msgid "ftp_write_thread: get and register buffers" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:725 msgid "ftp_write_thread: for_write failed - aborting" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:743 msgid "ftp_write_thread: data callback failed - aborting" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:759 msgid "ftp_write_thread: waiting for eof" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:763 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:662 msgid "ftp_write_thread: waiting for buffers released" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:771 msgid "ftp_write_thread: failed to release buffers - leaking" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:776 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:669 msgid "ftp_write_thread: exiting" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:799 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:688 #, c-format msgid "ftp_write_callback: failure: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:802 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:690 #, c-format msgid "ftp_write_callback: success %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:817 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:706 msgid "Failed to store ftp file" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:825 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:711 msgid "ftp_put_complete_callback: success" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:841 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:725 #, c-format msgid "list_files_ftp: looking for size of %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:845 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:729 msgid "list_files_ftp: globus_ftp_client_size failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:851 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:852 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:735 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:736 msgid "list_files_ftp: timeout waiting for size" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:858 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:742 msgid "list_files_ftp: failed to get file's size" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:870 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:755 #, c-format msgid "list_files_ftp: looking for modification time of %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:876 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:761 msgid "list_files_ftp: globus_ftp_client_modification_time failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:883 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:768 msgid "list_files_ftp: timeout waiting for modification_time" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:891 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:776 msgid "list_files_ftp: failed to get file's modification time" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:903 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:790 #, c-format msgid "list_files_ftp: looking for checksum of %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:942 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:830 #, c-format msgid "Failed to obtain stat from FTP: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:948 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:835 msgid "No results returned from stat" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:954 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:841 #, c-format msgid "Wrong number of objects (%i) for stat from ftp: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:968 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:854 #, c-format msgid "Unexpected path %s returned from server" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1007 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:887 #, c-format msgid "Failed to obtain listing from FTP: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1050 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:930 msgid "Rename: globus_ftp_client_move failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1056 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:936 msgid "Rename: timeout waiting for operation to complete" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1103 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:995 msgid "init_handle: globus_ftp_client_handleattr_init failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1112 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1004 msgid "init_handle: globus_ftp_client_handleattr_set_gridftp2 failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1121 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1013 msgid "init_handle: globus_ftp_client_handle_init failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1128 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1020 msgid "init_handle: globus_ftp_client_operationattr_init failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1136 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1028 msgid "init_handle: globus_ftp_client_operationattr_set_allow_ipv6 failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1142 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1034 msgid "init_handle: globus_ftp_client_operationattr_set_delayed_pasv failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1190 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1218 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1086 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1115 #, c-format msgid "globus_ftp_client_operationattr_set_authorization: error: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1217 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1114 msgid "Failed to set credentials for GridFTP transfer" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1223 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1120 msgid "Using secure data transfer" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1228 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1125 msgid "Using insecure data transfer" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1255 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1152 msgid "~DataPoint: destroy ftp_handle" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1258 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1155 msgid "~DataPoint: destroy ftp_handle failed - retrying" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1276 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1173 msgid "~DataPoint: failed to destroy ftp_handle - leaking" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1296 msgid "" "Missing reference to factory and/or module. It is unsafe to use Globus in " "non-persistent mode - (Grid)FTP code is disabled. Report to developers." msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:390 msgid "ftp_read_thread: failed to register buffers" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:405 msgid "ftp_read_thread: failed to release buffers" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:443 #, c-format msgid "ftp_read_callback: success - offset=%u, length=%u, eof=%u, allow oof=%u" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:457 #, c-format msgid "ftp_read_callback: delayed data chunk: %llu %llu" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:464 #, c-format msgid "ftp_read_callback: unexpected data out of order: %llu != %llu" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:471 msgid "ftp_read_callback: too many unexpected out of order chunks" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:492 #, c-format msgid "ftp_read_callback: Globus error: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:517 msgid "ftp_get_complete_callback: Failed to get ftp file" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:522 msgid "ftp_get_complete_callback: success" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:577 msgid "start_writing_ftp: waiting for data tag" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:580 msgid "start_writing_ftp: failed to read data tag" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:585 msgid "start_writing_ftp: waiting for data chunk" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:587 msgid "start_writing_ftp: failed to read data chunk" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:598 #, c-format msgid "ftp_write_thread: data out of order in stream mode: %llu != %llu" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:605 msgid "ftp_write_thread: too many out of order chunks in stream mode" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:610 #, c-format msgid "start_writing_ftp: data chunk: %llu %llu" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:616 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:642 #, c-format msgid "ftp_write_thread: Globus error: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:635 #, c-format msgid "start_writing_ftp: delayed data chunk: %llu %llu" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:654 msgid "start_writing_ftp: waiting for some buffers sent" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:660 #, fuzzy msgid "ftp_write_thread: waiting for transfer complete" msgstr "Az aktuális átvitel sikeres" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:811 msgid "list_files_ftp: no checksum information supported" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:813 msgid "list_files_ftp: no checksum information returned" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:908 msgid "Too many failures to obtain checksum - giving up" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1268 msgid "Expecting Command and URL provided" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1275 #: src/hed/libs/data/DataExternalHelper.cpp:376 msgid "Expecting Command among arguments" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1279 #: src/hed/libs/data/DataExternalHelper.cpp:380 msgid "Expecting URL among arguments" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:221 src/hed/dmc/gridftp/Lister.cpp:289 #: src/hed/dmc/gridftp/Lister.cpp:384 src/hed/dmc/gridftp/Lister.cpp:767 #: src/hed/dmc/gridftp/Lister.cpp:812 #, c-format msgid "Failure: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:288 msgid "Error getting list of files (in list)" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:290 msgid "Assuming - file not found" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:307 #, c-format msgid "list record: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:362 msgid "Failed reading list of files" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:398 msgid "Failed reading data" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:426 #, c-format msgid "Command: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:430 src/hed/dmc/gridftp/Lister.cpp:471 #: src/hed/mcc/http/PayloadHTTP.cpp:1010 msgid "Memory allocation error" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:438 #, c-format msgid "%s failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:442 msgid "Command is being sent" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:447 msgid "Waiting for response" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:452 msgid "Callback got failure" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:538 msgid "Failed in globus_cond_init" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:542 msgid "Failed in globus_mutex_init" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:549 msgid "Failed allocating memory for handle" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:554 msgid "Failed in globus_ftp_control_handle_init" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:562 #, fuzzy msgid "Failed to enable IPv6" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/gridftp/Lister.cpp:573 msgid "Closing connection" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:580 src/hed/dmc/gridftp/Lister.cpp:595 msgid "Timeout waiting for Globus callback - leaking connection" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:605 msgid "Closed successfully" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:607 msgid "Closing may have failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:634 msgid "Waiting for globus handle to settle" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:639 #, c-format msgid "Handle is not in proper state %u/%u" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:645 msgid "Globus handle is stuck" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:661 #, c-format msgid "Failed destroying handle: %s. Can't handle such situation." msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:684 #, c-format msgid "EPSV failed: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:688 msgid "EPSV failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:695 #, c-format msgid "PASV failed: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:699 msgid "PASV failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:765 msgid "Failed to apply local address to data connection" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:783 msgid "Can't parse host and/or port in response to EPSV/PASV" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:788 #, c-format msgid "Data channel: %d.%d.%d.%d:%d" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:806 #, c-format msgid "Data channel: [%s]:%d" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:810 msgid "Obtained host and address are not acceptable" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:820 msgid "Failed to open data channel" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:838 #, c-format msgid "Unsupported protocol in url %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:850 msgid "Reusing connection" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:874 #, c-format msgid "Failed connecting to server %s:%d" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:880 #, c-format msgid "Failed to connect to server %s:%d" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:896 #, fuzzy msgid "Missing authentication information" msgstr "verzió információ kiírása" #: src/hed/dmc/gridftp/Lister.cpp:905 src/hed/dmc/gridftp/Lister.cpp:919 #, c-format msgid "Bad authentication information: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:928 src/hed/dmc/gridftp/Lister.cpp:943 #, fuzzy, c-format msgid "Failed authenticating: %s" msgstr "Nem sikerült listázni a fájlokat" #: src/hed/dmc/gridftp/Lister.cpp:935 msgid "Failed authenticating" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:970 src/hed/dmc/gridftp/Lister.cpp:1126 #, c-format msgid "DCAU failed: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:974 src/hed/dmc/gridftp/Lister.cpp:1131 msgid "DCAU failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:994 msgid "MLST is not supported - trying LIST" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1010 #, c-format msgid "Immediate completion expected: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1014 msgid "Immediate completion expected" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1027 #, c-format msgid "Missing information in reply: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1061 #, c-format msgid "Missing final reply: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1085 #, c-format msgid "Unexpected immediate completion: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1097 #, c-format msgid "LIST/MLST failed: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1102 msgid "LIST/MLST failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1152 msgid "MLSD is not supported - trying NLST" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1166 #, c-format msgid "Immediate completion: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1174 #, c-format msgid "NLST/MLSD failed: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1180 msgid "NLST/MLSD failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1201 #, c-format msgid "Data transfer aborted: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1206 msgid "Data transfer aborted" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1218 msgid "Failed to transfer data" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:409 #: src/hed/dmc/http/DataPointHTTP.cpp:597 #: src/hed/dmc/http/DataPointHTTP.cpp:691 #: src/hed/dmc/http/DataPointHTTP.cpp:1137 #: src/hed/dmc/http/DataPointHTTP.cpp:1282 #: src/hed/dmc/http/DataPointHTTP.cpp:1431 #, c-format msgid "Redirecting to %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:461 #, fuzzy, c-format msgid "PROPFIND response: %s" msgstr "Válasz: %s" #: src/hed/dmc/http/DataPointHTTP.cpp:515 #, c-format msgid "Using checksum %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:523 #, c-format msgid "No matching checksum type, using first in list %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:616 #: src/hed/dmc/http/DataPointHTTP.cpp:710 msgid "No information returned by PROPFIND" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:767 #, c-format msgid "Stat: obtained size %llu" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:771 #, c-format msgid "Stat: obtained modification time %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:775 #, c-format msgid "Stat: obtained checksum %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:991 #, c-format msgid "Could not find checksum: %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:993 #, c-format msgid "Checksum of %s is not available" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:1037 #, c-format msgid "Check: obtained size %llu" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:1039 #, c-format msgid "Check: obtained modification time %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:1154 #: src/hed/dmc/http/DataPointHTTP.cpp:1302 #, c-format msgid "HTTP failure %u - %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:1459 #, fuzzy, c-format msgid "Failed to create %s, trying to create parent directories" msgstr "Nem tudom kiíratni a feladat leírást: Nem található várakozó sor." #: src/hed/dmc/http/DataPointHTTP.cpp:1648 #, fuzzy, c-format msgid "Error creating directory: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/rucio/DataPointRucio.cpp:27 #, c-format msgid "Replacing existing token for %s in Rucio token cache" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:40 #, c-format msgid "Found existing token for %s in Rucio token cache with expiry time %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:43 #, c-format msgid "Rucio token for %s has expired or is about to expire" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:105 #, c-format msgid "Extracted nickname %s from credentials to use for RUCIO_ACCOUNT" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:108 msgid "Failed to extract VOMS nickname from proxy" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:110 #, c-format msgid "Using Rucio account %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:123 #, c-format msgid "Strange path in Rucio URL: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:133 src/hed/libs/common/FileLock.cpp:42 msgid "Cannot determine hostname from gethostname()" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:171 #, c-format msgid "Bad path for %s: Format should be /replicas//" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:190 #, fuzzy, c-format msgid "Failed to query parent DIDs: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/rucio/DataPointRucio.cpp:195 #, fuzzy, c-format msgid "Failed to parse Rucio info: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/rucio/DataPointRucio.cpp:242 #: src/hed/dmc/rucio/DataPointRucio.cpp:522 #, c-format msgid "No locations found for %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:333 #, c-format msgid "Acquired auth token for %s: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:383 #, c-format msgid "Rucio returned %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:452 #: src/hed/dmc/rucio/DataPointRucio.cpp:543 #, fuzzy, c-format msgid "Failed to parse Rucio response: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/rucio/DataPointRucio.cpp:457 #: src/hed/dmc/rucio/DataPointRucio.cpp:548 #, c-format msgid "Filename not returned in Rucio response: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:462 #, c-format msgid "Unexpected name returned in Rucio response: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:467 #, fuzzy, c-format msgid "No pfns returned in Rucio response: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/rucio/DataPointRucio.cpp:477 #, c-format msgid "Cannot determine replica type for %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:479 #, c-format msgid "%s: replica type %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:482 #, c-format msgid "Skipping %s replica %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:498 #, c-format msgid "Error extracting RSE for %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:508 #, c-format msgid "No filesize information returned in Rucio response for %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:511 #, c-format msgid "%s: size %llu" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:515 #, c-format msgid "No checksum information returned in Rucio response for %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:518 #, c-format msgid "%s: checksum %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:553 #, fuzzy, c-format msgid "Parent dataset: %s" msgstr "Azonosító: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:575 #, c-format msgid "Could not find matching RSE to %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:617 #, c-format msgid "Sending Rucio trace: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:620 #, fuzzy, c-format msgid "Failed to send traces to Rucio: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/s3/DataPointS3.cpp:269 #, c-format msgid "Initializing S3 connection to %s" msgstr "" #: src/hed/dmc/s3/DataPointS3.cpp:274 #, fuzzy, c-format msgid "Failed to initialize S3 to %s: %s" msgstr "Nem sikerült listázni a fájlokat" #: src/hed/dmc/s3/DataPointS3.cpp:470 src/hed/dmc/s3/DataPointS3.cpp:592 #, fuzzy, c-format msgid "Failed to read object %s: %s; %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/hed/dmc/s3/DataPointS3.cpp:669 #, fuzzy, c-format msgid "Failed to write object %s: %s; %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/hed/dmc/srm/DataPointSRM.cpp:56 #, c-format msgid "TURL %s cannot be handled" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:83 #, c-format msgid "Check: looking for metadata: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:94 #, c-format msgid "Check: obtained size: %lli" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:100 #, c-format msgid "Check: obtained checksum: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:104 #, c-format msgid "Check: obtained modification date: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:108 msgid "Check: obtained access latency: low (ONLINE)" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:112 msgid "Check: obtained access latency: high (NEARLINE)" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:131 #, c-format msgid "Remove: deleting: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:149 #, c-format msgid "Creating directory: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:197 src/hed/dmc/srm/DataPointSRM.cpp:246 msgid "Calling PrepareReading when request was already prepared!" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:217 #, c-format msgid "File %s is NEARLINE, will make request to bring online" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:226 #, c-format msgid "Bring online request %s is still in queue, should wait" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:231 #, c-format msgid "Bring online request %s finished successfully, file is now ONLINE" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:237 #, c-format msgid "" "Bad logic for %s - bringOnline returned ok but SRM request is not finished " "successfully or on going" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:265 src/hed/dmc/srm/DataPointSRM.cpp:408 msgid "None of the requested transfer protocols are supported" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:278 #, c-format msgid "Get request %s is still in queue, should wait %i seconds" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:286 src/hed/dmc/srm/DataPointSRM.cpp:465 #, c-format msgid "Checking URL returned by SRM: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:301 src/hed/dmc/srm/DataPointSRM.cpp:480 #, c-format msgid "SRM returned no useful Transfer URLs: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:308 #, c-format msgid "" "Bad logic for %s - getTURLs returned ok but SRM request is not finished " "successfully or on going" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:316 msgid "StartReading" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:318 msgid "StartReading: File was not prepared properly" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:328 src/hed/dmc/srm/DataPointSRM.cpp:507 #, c-format msgid "Redirecting to new URL: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:389 msgid "Calling PrepareWriting when request was already prepared!" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:418 msgid "No space token specified" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:424 msgid "Warning: Using SRM protocol v1 which does not support space tokens" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:427 #, c-format msgid "Using space token description %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:433 #, c-format msgid "Error looking up space tokens matching description %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:437 #, c-format msgid "No space tokens found matching description %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:442 #, c-format msgid "Using space token %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:457 #, c-format msgid "Put request %s is still in queue, should wait %i seconds" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:487 #, c-format msgid "" "Bad logic for %s - putTURLs returned ok but SRM request is not finished " "successfully or on going" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:495 msgid "StartWriting" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:497 msgid "StartWriting: File was not prepared properly" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:556 #, c-format msgid "FinishWriting: looking for metadata: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:571 #, c-format msgid "FinishWriting: obtained checksum: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:574 #, c-format msgid "" "Calculated/supplied transfer checksum %s matches checksum reported by SRM " "destination %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:577 #, c-format msgid "" "Checksum mismatch between calculated/supplied checksum (%s) and checksum " "reported by SRM destination (%s)" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:580 #, c-format msgid "" "Checksum type of SRM (%s) and calculated/supplied checksum (%s) differ, " "cannot compare" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:581 src/hed/dmc/srm/DataPointSRM.cpp:582 msgid "No checksum information from server" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:583 src/hed/dmc/srm/DataPointSRM.cpp:584 msgid "No checksum verification possible" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:590 #, fuzzy msgid "Failed to release completed request" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/srm/DataPointSRM.cpp:633 src/hed/dmc/srm/DataPointSRM.cpp:700 #, c-format msgid "ListFiles: looking for metadata: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:818 #, c-format msgid "plugin for transport protocol %s is not installed" msgstr "" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:51 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:90 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:142 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:181 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:221 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:259 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:303 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:365 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:438 msgid "SRM did not return any information" msgstr "" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:316 #, c-format msgid "File could not be moved to Running state: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:372 msgid "SRM did not return any useful information" msgstr "" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:450 msgid "File could not be moved to Done state" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:88 msgid "Could not determine version of server" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:94 #, c-format msgid "Server SRM version: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:99 #, c-format msgid "Server implementation: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:136 #, c-format msgid "Adding space token %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:163 msgid "No request tokens found" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:176 #, c-format msgid "Adding request token %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:237 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:642 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:828 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1385 #, c-format msgid "%s: File request %s in SRM queue. Sleeping for %i seconds" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:275 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:327 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:698 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:764 #, c-format msgid "File is ready! TURL is %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:359 #, c-format msgid "Setting userRequestDescription to %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:414 #, c-format msgid "%s: Bring online request %s in SRM queue. Sleeping for %i seconds" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:457 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1160 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1194 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1228 msgid "No request token specified!" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:524 msgid "Request is reported as ABORTED, but all files are done" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:530 msgid "Request is reported as ABORTED, since it was cancelled" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:536 #, c-format msgid "Request is reported as ABORTED. Reason: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:673 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:745 #, c-format msgid "Path %s is invalid, creating required directories" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:678 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:750 #, c-format msgid "Error creating required directories for %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:851 msgid "Too many files in one request - please try again with fewer files" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:899 msgid "" "Directory size is too large to list in one call, will have to call multiple " "times" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:936 msgid "" "Failure in parsing response from server - some information may be inaccurate" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:942 #: src/hed/shc/legacy/auth_otokens.cpp:437 #, c-format msgid "%s: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:975 #, c-format msgid "" "Directory size is larger than %i files, will have to call multiple times" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1185 #, c-format msgid "Files associated with request token %s released successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1219 #, c-format msgid "Files associated with request token %s put done successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1254 #, c-format msgid "Files associated with request token %s aborted successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1271 #, c-format msgid "" "Failed to find metadata info on %s for determining file or directory delete" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1277 msgid "Type is file, calling srmRm" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1281 msgid "Type is dir, calling srmRmDir" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1285 msgid "File type is not available, attempting file delete" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1288 msgid "File delete failed, attempting directory delete" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1313 #, c-format msgid "File %s removed successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1340 #, c-format msgid "Directory %s removed successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1455 #, c-format msgid "Checking for existence of %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1458 #, c-format msgid "File already exists: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1495 #, c-format msgid "Error creating directory %s: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:82 #, c-format msgid "Attempting to contact %s on port %i" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:88 #, c-format msgid "Storing port %i for %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:102 #, c-format msgid "No port succeeded for %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:112 #, c-format msgid "URL %s disagrees with stored SRM info, testing new info" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:118 #, c-format msgid "Replacing old SRM info with new for URL %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:140 #, fuzzy, c-format msgid "SOAP request: %s" msgstr "Kérés: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:147 #: src/hed/dmc/srm/srmclient/SRMClient.cpp:176 #, c-format msgid "SOAP fault: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:148 msgid "Reconnecting" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:158 #, c-format msgid "SRM Client status: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:164 msgid "No SOAP response" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:171 #, fuzzy, c-format msgid "SOAP response: %s" msgstr "Válasz: %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:75 #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:161 #, fuzzy, c-format msgid "Failed to acquire lock on file %s" msgstr "Nem sikerült listázni a fájlokat" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:80 #, fuzzy, c-format msgid "Error reading info from file %s:%s" msgstr "Nem tudom olvasni a forrásokat a fájlból: %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:94 #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:186 #, c-format msgid "Bad or old format detected in file %s, in line %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:99 #, c-format msgid "Cannot convert string %s to int in line %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:202 #, c-format msgid "Error writing srm info file %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:81 msgid "" "Missing reference to factory and/or module. It is unsafe to use Xrootd in " "non-persistent mode - Xrootd code is disabled. Report to developers." msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:120 #, c-format msgid "Could not handle checksum %s: skip checksum check" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:126 #, fuzzy, c-format msgid "Failed to create xrootd copy job: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:143 #, fuzzy, c-format msgid "Failed to copy %s: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:194 #, c-format msgid "Reading %u bytes from byte %llu" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:196 #, c-format msgid "Read %i bytes" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:227 #, c-format msgid "Could not open file %s for reading: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:242 #, c-format msgid "Unable to find file size of %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:306 #, c-format msgid "DataPointXrootd::write_file got position %d and offset %d, has to seek" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:329 #, c-format msgid "xrootd write failed: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:338 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:412 #, c-format msgid "xrootd close failed: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:361 #, c-format msgid "Failed to open %s, trying to create parent directories" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:374 #, c-format msgid "xrootd open failed: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:388 #, fuzzy, c-format msgid "close failed: %s" msgstr "Fájl feltöltve %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:430 #, c-format msgid "Read access not allowed for %s: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:449 #, c-format msgid "Could not stat file %s: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:454 msgid "Not getting checksum of zip constituent" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:458 #, c-format msgid "Could not get checksum of %s: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:462 #, c-format msgid "Checksum %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:500 #, fuzzy, c-format msgid "Failed to open directory %s: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:518 #, c-format msgid "Error while reading dir %s: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:568 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:586 #, c-format msgid "Error creating required dirs: %s" msgstr "" #: src/hed/identitymap/IdentityMap.cpp:196 #, c-format msgid "PDP: %s can not be loaded" msgstr "" #: src/hed/identitymap/IdentityMap.cpp:219 src/hed/shc/legacy/LegacyMap.cpp:221 #, c-format msgid "Grid identity is mapped to local identity '%s'" msgstr "" #: src/hed/libs/common/ArcLocation.cpp:129 #, c-format msgid "" "Can not determine the install location. Using %s. Please set ARC_LOCATION if " "this is not correct." msgstr "" #: src/hed/libs/common/DateTime.cpp:86 src/hed/libs/common/DateTime.cpp:631 #: src/hed/libs/common/StringConv.h:25 msgid "Empty string" msgstr "" #: src/hed/libs/common/DateTime.cpp:107 #, c-format msgid "Can not parse date: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:130 #, c-format msgid "Can not parse time: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:160 #, c-format msgid "Can not parse time zone offset: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:180 src/hed/libs/common/DateTime.cpp:199 #: src/hed/libs/common/DateTime.cpp:252 src/hed/libs/common/DateTime.cpp:291 #, c-format msgid "Illegal time format: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:230 src/hed/libs/common/DateTime.cpp:283 #, c-format msgid "Can not parse month: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:647 src/hed/libs/common/DateTime.cpp:688 #, c-format msgid "Invalid ISO duration format: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:752 #, c-format msgid "Invalid period string: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:874 msgid "hour" msgid_plural "hours" msgstr[0] "" msgstr[1] "" #: src/hed/libs/common/DateTime.cpp:880 msgid "minute" msgid_plural "minutes" msgstr[0] "" msgstr[1] "" #: src/hed/libs/common/DateTime.cpp:886 msgid "second" msgid_plural "seconds" msgstr[0] "" msgstr[1] "" #: src/hed/libs/common/FileLock.cpp:92 #, c-format msgid "EACCES Error opening lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:96 #, c-format msgid "Error opening lock file %s in initial check: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:103 #, c-format msgid "Error creating temporary file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:113 #, c-format msgid "Could not create link to lock file %s as it already exists" msgstr "" #: src/hed/libs/common/FileLock.cpp:124 #, c-format msgid "Could not create lock file %s as it already exists" msgstr "" #: src/hed/libs/common/FileLock.cpp:128 #, c-format msgid "Error creating lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:133 #, c-format msgid "Error writing to lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:141 #, c-format msgid "Error linking tmp file %s to lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:150 #, c-format msgid "Error in lock file %s, even though linking did not return an error" msgstr "" #: src/hed/libs/common/FileLock.cpp:158 #, c-format msgid "%li seconds since lock file %s was created" msgstr "" #: src/hed/libs/common/FileLock.cpp:161 #, c-format msgid "Timeout has expired, will remove lock file %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:165 #, c-format msgid "Failed to remove stale lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:178 #, c-format msgid "This process already owns the lock on %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:182 #, c-format msgid "" "The process owning the lock on %s is no longer running, will remove lock" msgstr "" #: src/hed/libs/common/FileLock.cpp:184 #, fuzzy, c-format msgid "Failed to remove file %s: %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/hed/libs/common/FileLock.cpp:193 #, c-format msgid "The file %s is currently locked with a valid lock" msgstr "" #: src/hed/libs/common/FileLock.cpp:210 #, c-format msgid "Failed to unlock file with lock %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:222 #, c-format msgid "Lock file %s doesn't exist" msgstr "" #: src/hed/libs/common/FileLock.cpp:224 #, c-format msgid "Error listing lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:230 #, c-format msgid "Found unexpected empty lock file %s. Must go back to acquire()" msgstr "" #: src/hed/libs/common/FileLock.cpp:236 #, c-format msgid "Error reading lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:240 #, c-format msgid "Error with formatting in lock file %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:250 #, c-format msgid "Lock %s is owned by a different host (%s)" msgstr "" #: src/hed/libs/common/FileLock.cpp:259 #, c-format msgid "Badly formatted pid %s in lock file %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:262 #, c-format msgid "Another process (%s) owns the lock on file %s" msgstr "" #: src/hed/libs/common/IString.cpp:32 src/hed/libs/common/IString.cpp:41 #: src/hed/libs/common/IString.cpp:42 msgid "(empty)" msgstr "" #: src/hed/libs/common/IString.cpp:32 src/hed/libs/common/IString.cpp:41 #: src/hed/libs/common/IString.cpp:42 msgid "(null)" msgstr "" #: src/hed/libs/common/Logger.cpp:58 #, c-format msgid "Invalid log level. Using default %s." msgstr "" #: src/hed/libs/common/Logger.cpp:123 #, c-format msgid "Invalid old log level. Using default %s." msgstr "" #: src/hed/libs/common/OptionParser.cpp:106 #, c-format msgid "Cannot parse integer value '%s' for -%c" msgstr "" #: src/hed/libs/common/OptionParser.cpp:309 #: src/hed/libs/common/OptionParser.cpp:442 #, c-format msgid "Options Group %s:" msgstr "" #: src/hed/libs/common/OptionParser.cpp:311 #: src/hed/libs/common/OptionParser.cpp:445 #, c-format msgid "%s:" msgstr "" #: src/hed/libs/common/OptionParser.cpp:313 #, c-format msgid "Show %s help options" msgstr "" #: src/hed/libs/common/OptionParser.cpp:348 msgid "Use -? to get usage description" msgstr "" #: src/hed/libs/common/OptionParser.cpp:425 msgid "Usage:" msgstr "" #: src/hed/libs/common/OptionParser.cpp:428 msgid "OPTION..." msgstr "" #: src/hed/libs/common/OptionParser.cpp:434 msgid "Help Options:" msgstr "" #: src/hed/libs/common/OptionParser.cpp:435 msgid "Show help options" msgstr "" #: src/hed/libs/common/Profile.cpp:199 src/hed/libs/common/Profile.cpp:273 #: src/hed/libs/common/Profile.cpp:404 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"inisections\" " "attribute cannot be the empty string." msgstr "" #: src/hed/libs/common/Profile.cpp:205 src/hed/libs/common/Profile.cpp:279 #: src/hed/libs/common/Profile.cpp:411 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"initag\" attribute " "cannot be the empty string." msgstr "" #: src/hed/libs/common/Profile.cpp:419 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"initype\" " "attribute cannot be the empty string." msgstr "" #: src/hed/libs/common/Profile.cpp:422 #, c-format msgid "" "Element \"%s\" in the profile ignored: the \"inidefaultvalue\" attribute " "cannot be specified when the \"inisections\" and \"initag\" attributes have " "not been specified." msgstr "" #: src/hed/libs/common/Profile.cpp:497 #, c-format msgid "" "In the configuration profile the 'initype' attribute on the \"%s\" element " "has a invalid value \"%s\"." msgstr "" #: src/hed/libs/common/Run_unix.cpp:225 msgid "Child monitoring signal detected" msgstr "" #: src/hed/libs/common/Run_unix.cpp:230 #, c-format msgid "Child monitoring error: %i" msgstr "" #: src/hed/libs/common/Run_unix.cpp:243 msgid "Child monitoring kick detected" msgstr "" #: src/hed/libs/common/Run_unix.cpp:246 msgid "Child monitoring internal communication error" msgstr "" #: src/hed/libs/common/Run_unix.cpp:258 msgid "Child monitoring stdout is closed" msgstr "" #: src/hed/libs/common/Run_unix.cpp:268 msgid "Child monitoring stderr is closed" msgstr "" #: src/hed/libs/common/Run_unix.cpp:278 msgid "Child monitoring stdin is closed" msgstr "" #: src/hed/libs/common/Run_unix.cpp:296 #, c-format msgid "Child monitoring child %d exited" msgstr "" #: src/hed/libs/common/Run_unix.cpp:300 #, c-format msgid "Child monitoring lost child %d (%d)" msgstr "" #: src/hed/libs/common/Run_unix.cpp:321 #, c-format msgid "Child monitoring drops abandoned child %d (%d)" msgstr "" #: src/hed/libs/common/Run_unix.cpp:484 msgid "Child was already started" msgstr "" #: src/hed/libs/common/Run_unix.cpp:488 msgid "No arguments are assigned for external process" msgstr "" #: src/hed/libs/common/Run_unix.cpp:621 #, c-format msgid "Excepton while trying to start external process: %s" msgstr "" #: src/hed/libs/common/StringConv.h:31 #, c-format msgid "Conversion failed: %s" msgstr "" #: src/hed/libs/common/StringConv.h:35 #, c-format msgid "Full string not used: %s" msgstr "" #: src/hed/libs/common/Thread.cpp:256 msgid "Maximum number of threads running - putting new request into queue" msgstr "" #: src/hed/libs/common/Thread.cpp:304 #, c-format msgid "Thread exited with Glib error: %s" msgstr "" #: src/hed/libs/common/Thread.cpp:306 #, c-format msgid "Thread exited with generic exception: %s" msgstr "" #: src/hed/libs/common/URL.cpp:137 #, c-format msgid "URL is not valid: %s" msgstr "" #: src/hed/libs/common/URL.cpp:188 #, c-format msgid "Illegal URL - path must be absolute: %s" msgstr "" #: src/hed/libs/common/URL.cpp:193 #, c-format msgid "Illegal URL - no hostname given: %s" msgstr "" #: src/hed/libs/common/URL.cpp:282 #, c-format msgid "Illegal URL - path must be absolute or empty: %s" msgstr "" #: src/hed/libs/common/URL.cpp:298 #, c-format msgid "Illegal URL - no closing ] for IPv6 address found: %s" msgstr "" #: src/hed/libs/common/URL.cpp:306 #, c-format msgid "" "Illegal URL - closing ] for IPv6 address is followed by illegal token: %s" msgstr "" #: src/hed/libs/common/URL.cpp:322 #, c-format msgid "Invalid port number in %s" msgstr "" #: src/hed/libs/common/URL.cpp:455 #, c-format msgid "Unknown LDAP scope %s - using base" msgstr "" #: src/hed/libs/common/URL.cpp:618 msgid "Attempt to assign relative path to URL - making it absolute" msgstr "" #: src/hed/libs/common/URL.cpp:717 #, c-format msgid "URL option %s does not have format name=value" msgstr "" #: src/hed/libs/common/URL.cpp:1186 #, c-format msgid "urllist %s contains invalid URL: %s" msgstr "" #: src/hed/libs/common/URL.cpp:1191 #, c-format msgid "URL protocol is not urllist: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:38 src/hed/libs/common/UserConfig.cpp:831 #: src/hed/libs/common/UserConfig.cpp:840 #: src/hed/libs/common/UserConfig.cpp:846 #: src/hed/libs/common/UserConfig.cpp:872 #: src/hed/libs/common/UserConfig.cpp:884 #: src/hed/libs/common/UserConfig.cpp:896 #: src/hed/libs/common/UserConfig.cpp:916 #, c-format msgid "Multiple %s attributes in configuration file (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:139 #, fuzzy, c-format msgid "Wrong ownership of certificate file: %s" msgstr "publikus kulcs elérési útvonala" #: src/hed/libs/common/UserConfig.cpp:141 #, fuzzy, c-format msgid "Wrong permissions of certificate file: %s" msgstr "publikus kulcs elérési útvonala" #: src/hed/libs/common/UserConfig.cpp:143 #, c-format msgid "Can not access certificate file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:150 #, c-format msgid "Wrong ownership of key file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:152 #, c-format msgid "Wrong permissions of key file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:154 #, c-format msgid "Can not access key file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:161 #, c-format msgid "Wrong ownership of proxy file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:163 #, c-format msgid "Wrong permissions of proxy file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:165 #, c-format msgid "Can not access proxy file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:176 msgid "computing" msgstr "" #: src/hed/libs/common/UserConfig.cpp:178 msgid "index" msgstr "" #: src/hed/libs/common/UserConfig.cpp:277 #: src/hed/libs/common/UserConfig.cpp:281 #: src/hed/libs/common/UserConfig.cpp:328 #: src/hed/libs/common/UserConfig.cpp:332 #, c-format msgid "System configuration file (%s) contains errors." msgstr "" #: src/hed/libs/common/UserConfig.cpp:285 #: src/hed/libs/common/UserConfig.cpp:336 #, c-format msgid "System configuration file (%s or %s) does not exist." msgstr "" #: src/hed/libs/common/UserConfig.cpp:287 #: src/hed/libs/common/UserConfig.cpp:338 #, c-format msgid "System configuration file (%s) does not exist." msgstr "" #: src/hed/libs/common/UserConfig.cpp:293 #: src/hed/libs/common/UserConfig.cpp:305 #: src/hed/libs/common/UserConfig.cpp:344 #: src/hed/libs/common/UserConfig.cpp:356 #, c-format msgid "User configuration file (%s) contains errors." msgstr "" #: src/hed/libs/common/UserConfig.cpp:298 #: src/hed/libs/common/UserConfig.cpp:349 msgid "No configuration file could be loaded." msgstr "" #: src/hed/libs/common/UserConfig.cpp:301 #: src/hed/libs/common/UserConfig.cpp:352 #, c-format msgid "User configuration file (%s) does not exist or cannot be loaded." msgstr "" #: src/hed/libs/common/UserConfig.cpp:438 #, c-format msgid "" "Unable to parse the specified verbosity (%s) to one of the allowed levels" msgstr "" #: src/hed/libs/common/UserConfig.cpp:450 #, c-format msgid "" "Unsupported job list type '%s', using 'SQLITE'. Supported types are: SQLITE, " "XML." msgstr "" #: src/hed/libs/common/UserConfig.cpp:511 msgid "Loading OToken failed - ignoring its presence" msgstr "" #: src/hed/libs/common/UserConfig.cpp:652 #, c-format msgid "Certificate and key ('%s' and '%s') not found in any of the paths: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:654 #, c-format msgid "" "If the proxy or certificate/key does exist, you can manually specify the " "locations via environment variables '%s'/'%s' or '%s', or the '%s'/'%s' or " "'%s' attributes in the client configuration file (e.g. '%s')" msgstr "" #: src/hed/libs/common/UserConfig.cpp:672 #: src/hed/libs/common/UserConfig.cpp:682 #, c-format msgid "" "Can not access CA certificate directory: %s. The certificates will not be " "verified." msgstr "" #: src/hed/libs/common/UserConfig.cpp:708 #, c-format msgid "" "Can not find CA certificates directory in default locations:\n" "~/.arc/certificates, ~/.globus/certificates,\n" "%s/etc/certificates, %s/etc/grid-security/certificates,\n" "%s/share/certificates, /etc/grid-security/certificates.\n" "The certificate will not be verified.\n" "If the CA certificates directory does exist, please manually specify the " "locations via env\n" "X509_CERT_DIR, or the cacertificatesdirectory item in client.conf\n" msgstr "" #: src/hed/libs/common/UserConfig.cpp:730 #, c-format msgid "Using proxy file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:733 #, c-format msgid "Using certificate file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:734 #, c-format msgid "Using key file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:738 #, c-format msgid "Using CA certificate directory: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:742 msgid "Using OToken" msgstr "" #: src/hed/libs/common/UserConfig.cpp:755 #: src/hed/libs/common/UserConfig.cpp:761 #, c-format msgid "Can not access VOMSES file/directory: %s." msgstr "" #: src/hed/libs/common/UserConfig.cpp:767 #, c-format msgid "Can not access VOMS file/directory: %s." msgstr "" #: src/hed/libs/common/UserConfig.cpp:781 msgid "" "Can not find voms service configuration file (vomses) in default locations: " "~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/" "grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomses" msgstr "" #: src/hed/libs/common/UserConfig.cpp:794 #, c-format msgid "Loading configuration (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:828 #, c-format msgid "" "The value of the timeout attribute in the configuration file (%s) was only " "partially parsed" msgstr "" #: src/hed/libs/common/UserConfig.cpp:853 msgid "" "The brokerarguments attribute can only be used in conjunction with the " "brokername attribute" msgstr "" #: src/hed/libs/common/UserConfig.cpp:869 #, c-format msgid "" "The value of the keysize attribute in the configuration file (%s) was only " "partially parsed" msgstr "" #: src/hed/libs/common/UserConfig.cpp:891 #, c-format msgid "" "Could not convert the slcs attribute value (%s) to an URL instance in " "configuration file (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:937 #, c-format msgid "Specified overlay file (%s) does not exist." msgstr "" #: src/hed/libs/common/UserConfig.cpp:941 #, c-format msgid "" "Unknown attribute %s in common section of configuration file (%s), ignoring " "it" msgstr "" #: src/hed/libs/common/UserConfig.cpp:982 #, c-format msgid "Unknown section %s, ignoring it" msgstr "" #: src/hed/libs/common/UserConfig.cpp:986 #, c-format msgid "Configuration (%s) loaded" msgstr "" #: src/hed/libs/common/UserConfig.cpp:989 #, c-format msgid "Could not load configuration (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:1086 #, c-format msgid "UserConfiguration saved to file (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:1099 #, c-format msgid "Unable to create %s directory." msgstr "" #: src/hed/libs/common/UserConfig.cpp:1108 #, c-format msgid "Configuration example file created (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:1110 #, c-format msgid "Unable to copy example configuration from existing configuration (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:1115 #, c-format msgid "Cannot copy example configuration (%s), it is not a regular file" msgstr "" #: src/hed/libs/common/UserConfig.cpp:1120 #, c-format msgid "Example configuration (%s) not created." msgstr "" #: src/hed/libs/common/UserConfig.cpp:1125 #, c-format msgid "The default configuration file (%s) is not a regular file." msgstr "" #: src/hed/libs/common/UserConfig.cpp:1143 #, c-format msgid "%s directory created" msgstr "" #: src/hed/libs/common/UserConfig.cpp:1145 #: src/hed/libs/common/UserConfig.cpp:1172 src/hed/libs/data/DataMover.cpp:703 #, c-format msgid "Failed to create directory %s" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:58 msgid "This VERBOSE message should not be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:62 msgid "This INFO message should be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:73 msgid "This VERBOSE message should now be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:79 msgid "This INFO message should also be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:93 msgid "This message goes to initial destination" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:108 msgid "This message goes to per-thread destination" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:80 msgid "Request failed: No response from SPService" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:84 #: src/hed/libs/communication/ClientSAML2SSO.cpp:137 msgid "Request failed: response from SPService is not as expected" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:92 #, c-format msgid "Authentication Request URL: %s" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:133 msgid "Request failed: No response from IdP" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:184 msgid "Request failed: No response from IdP when doing redirecting" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:188 msgid "" "Request failed: response from IdP is not as expected when doing redirecting" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:245 msgid "Request failed: No response from IdP when doing authentication" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:249 msgid "" "Request failed: response from IdP is not as expected when doing " "authentication" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:294 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:312 msgid "Succeeded to verify the signature under " msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:296 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:315 msgid "Failed to verify the signature under " msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:310 msgid "" "Request failed: No response from SP Service when sending SAML assertion to SP" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:314 msgid "" "Request failed: response from SP Service is not as expected when sending " "SAML assertion to SP" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:325 #, c-format msgid "IdP return some error message: %s" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:353 #: src/hed/libs/communication/ClientSAML2SSO.cpp:398 msgid "SAML2SSO process failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:56 msgid "Creating delegation credential to ARC delegation service" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:66 #: src/hed/libs/communication/ClientX509Delegation.cpp:269 msgid "DelegateCredentialsInit failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:70 #: src/hed/libs/communication/ClientX509Delegation.cpp:124 #: src/hed/libs/communication/ClientX509Delegation.cpp:159 #: src/hed/libs/communication/ClientX509Delegation.cpp:214 #: src/hed/libs/communication/ClientX509Delegation.cpp:273 msgid "There is no SOAP response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:75 msgid "There is no X509 request in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:80 msgid "There is no Format request in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:88 msgid "There is no Id or X509 request value in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:101 #: src/hed/libs/communication/ClientX509Delegation.cpp:189 msgid "DelegateProxy failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:120 msgid "UpdateCredentials failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:128 msgid "There is no UpdateCredentialsResponse in response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:136 #: src/hed/libs/communication/ClientX509Delegation.cpp:164 #: src/hed/libs/communication/ClientX509Delegation.cpp:219 #: src/hed/libs/communication/ClientX509Delegation.cpp:304 msgid "There is no SOAP connection chain configured" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:142 msgid "Creating delegation to CREAM delegation service" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:155 msgid "Delegation getProxyReq request failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:175 msgid "Creating delegation to CREAM delegation service failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:210 msgid "Delegation putProxy request failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:224 msgid "Creating delegation to CREAM delegation failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:239 msgid "Getting delegation credential from ARC delegation service" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:278 msgid "There is no Delegated X509 token in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:283 msgid "There is no Format delegated token in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:291 msgid "There is no Id or X509 token value in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:300 #, c-format msgid "" "Get delegated credential from delegation service: \n" " %s" msgstr "" #: src/hed/libs/compute/Broker.cpp:54 #, c-format msgid "Performing matchmaking against target (%s)." msgstr "" #: src/hed/libs/compute/Broker.cpp:64 #, c-format msgid "Matchmaking, ExecutionTarget: %s matches job description" msgstr "" #: src/hed/libs/compute/Broker.cpp:145 #, c-format msgid "" "The CA issuer (%s) of the credentials (%s) is not trusted by the target (%s)." msgstr "" #: src/hed/libs/compute/Broker.cpp:153 #, c-format msgid "ComputingShareName of ExecutionTarget (%s) is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:157 src/hed/libs/compute/Broker.cpp:162 #, c-format msgid "ComputingShare (%s) explicitly rejected" msgstr "" #: src/hed/libs/compute/Broker.cpp:171 #, c-format msgid "" "Matchmaking, ComputingShareName of ExecutionTarget (%s) is not defined, but " "requested queue is (%s)" msgstr "" #: src/hed/libs/compute/Broker.cpp:175 src/hed/libs/compute/Broker.cpp:180 #, c-format msgid "" "Matchmaking, ComputingShare (%s) does not match requested queue (%s): " "skipping" msgstr "" #: src/hed/libs/compute/Broker.cpp:184 #, c-format msgid "Matchmaking, ComputingShare (%s) matches requested queue (%s)" msgstr "" #: src/hed/libs/compute/Broker.cpp:192 #, c-format msgid "" "ProcessingStartTime (%s) specified in job description is inside the targets " "downtime period [ %s - %s ]." msgstr "" #: src/hed/libs/compute/Broker.cpp:197 #, c-format msgid "The downtime of the target (%s) is not published. Keeping target." msgstr "" #: src/hed/libs/compute/Broker.cpp:203 #, c-format msgid "HealthState of ExecutionTarget (%s) is not OK or WARNING (%s)" msgstr "" #: src/hed/libs/compute/Broker.cpp:208 #, c-format msgid "Matchmaking, ExecutionTarget: %s, HealthState is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:215 #, c-format msgid "" "Matchmaking, Computing endpoint requirement not satisfied. ExecutionTarget: " "%s" msgstr "" #: src/hed/libs/compute/Broker.cpp:220 #, c-format msgid "Matchmaking, ExecutionTarget: %s, ImplementationName is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:246 #, c-format msgid "" "Matchmaking, %s (%d) is %s than %s (%d) published by the ExecutionTarget." msgstr "" #: src/hed/libs/compute/Broker.cpp:275 #, c-format msgid "" "Matchmaking, The %s scaled %s (%d) is %s than the %s (%d) published by the " "ExecutionTarget." msgstr "" #: src/hed/libs/compute/Broker.cpp:287 #, c-format msgid "Matchmaking, Benchmark %s is not published by the ExecutionTarget." msgstr "" #: src/hed/libs/compute/Broker.cpp:302 #, c-format msgid "" "Matchmaking, MaxTotalCPUTime problem, ExecutionTarget: %d (MaxTotalCPUTime), " "JobDescription: %d (TotalCPUTime)" msgstr "" #: src/hed/libs/compute/Broker.cpp:309 #, c-format msgid "" "Matchmaking, MaxCPUTime problem, ExecutionTarget: %d (MaxCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" msgstr "" #: src/hed/libs/compute/Broker.cpp:314 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxTotalCPUTime or MaxCPUTime not " "defined, assuming no CPU time limit" msgstr "" #: src/hed/libs/compute/Broker.cpp:320 #, c-format msgid "" "Matchmaking, MinCPUTime problem, ExecutionTarget: %d (MinCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" msgstr "" #: src/hed/libs/compute/Broker.cpp:325 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MinCPUTime not defined, assuming no CPU " "time limit" msgstr "" #: src/hed/libs/compute/Broker.cpp:333 #, c-format msgid "" "Matchmaking, MainMemorySize problem, ExecutionTarget: %d (MainMemorySize), " "JobDescription: %d (IndividualPhysicalMemory)" msgstr "" #: src/hed/libs/compute/Broker.cpp:339 #, c-format msgid "" "Matchmaking, MaxMainMemory problem, ExecutionTarget: %d (MaxMainMemory), " "JobDescription: %d (IndividualPhysicalMemory)" msgstr "" #: src/hed/libs/compute/Broker.cpp:344 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxMainMemory and MainMemorySize are not " "defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:352 #, c-format msgid "" "Matchmaking, MaxVirtualMemory problem, ExecutionTarget: %d " "(MaxVirtualMemory), JobDescription: %d (IndividualVirtualMemory)" msgstr "" #: src/hed/libs/compute/Broker.cpp:357 #, c-format msgid "Matchmaking, ExecutionTarget: %s, MaxVirtualMemory is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:365 #, c-format msgid "" "Matchmaking, Platform problem, ExecutionTarget: %s (Platform) " "JobDescription: %s (Platform)" msgstr "" #: src/hed/libs/compute/Broker.cpp:370 #, c-format msgid "Matchmaking, ExecutionTarget: %s, Platform is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:378 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, OperatingSystem requirements not satisfied" msgstr "" #: src/hed/libs/compute/Broker.cpp:383 #, c-format msgid "Matchmaking, ExecutionTarget: %s, OperatingSystem is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:391 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, RunTimeEnvironment requirements not " "satisfied" msgstr "" #: src/hed/libs/compute/Broker.cpp:396 #, c-format msgid "Matchmaking, ExecutionTarget: %s, ApplicationEnvironments not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:405 #, c-format msgid "" "Matchmaking, NetworkInfo demand not fulfilled, ExecutionTarget do not " "support %s, specified in the JobDescription." msgstr "" #: src/hed/libs/compute/Broker.cpp:409 #, c-format msgid "Matchmaking, ExecutionTarget: %s, NetworkInfo is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:417 #, c-format msgid "" "Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); " "JobDescription: %d MB (SessionDiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:424 #, c-format msgid "" "Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB " "(WorkingAreaFree); JobDescription: %d MB (SessionDiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:430 src/hed/libs/compute/Broker.cpp:451 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxDiskSpace and WorkingAreaFree are not " "defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:438 #, c-format msgid "" "Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); " "JobDescription: %d MB (DiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:445 #, c-format msgid "" "Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB " "(WorkingAreaFree); JobDescription: %d MB (DiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:459 #, c-format msgid "" "Matchmaking, CacheTotal problem, ExecutionTarget: %d MB (CacheTotal); " "JobDescription: %d MB (CacheDiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:464 #, c-format msgid "Matchmaking, ExecutionTarget: %s, CacheTotal is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:472 #, c-format msgid "" "Matchmaking, TotalSlots problem, ExecutionTarget: %d (TotalSlots) " "JobDescription: %d (NumberOfProcesses)" msgstr "" #: src/hed/libs/compute/Broker.cpp:478 #, c-format msgid "" "Matchmaking, MaxSlotsPerJob problem, ExecutionTarget: %d (MaxSlotsPerJob) " "JobDescription: %d (NumberOfProcesses)" msgstr "" #: src/hed/libs/compute/Broker.cpp:484 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, TotalSlots and MaxSlotsPerJob are not " "defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:492 #, c-format msgid "" "Matchmaking, WorkingAreaLifeTime problem, ExecutionTarget: %s " "(WorkingAreaLifeTime) JobDescription: %s (SessionLifeTime)" msgstr "" #: src/hed/libs/compute/Broker.cpp:497 #, c-format msgid "Matchmaking, ExecutionTarget: %s, WorkingAreaLifeTime is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:505 #, c-format msgid "" "Matchmaking, ConnectivityIn problem, ExecutionTarget: %s (ConnectivityIn) " "JobDescription: %s (InBound)" msgstr "" #: src/hed/libs/compute/Broker.cpp:512 #, c-format msgid "" "Matchmaking, ConnectivityOut problem, ExecutionTarget: %s (ConnectivityOut) " "JobDescription: %s (OutBound)" msgstr "" #: src/hed/libs/compute/Broker.cpp:535 msgid "Unable to sort added jobs. The BrokerPlugin plugin has not been loaded." msgstr "" #: src/hed/libs/compute/Broker.cpp:552 msgid "Unable to match target, marking it as not matching. Broker not valid." msgstr "" #: src/hed/libs/compute/Broker.cpp:588 msgid "Unable to sort ExecutionTarget objects - Invalid Broker object." msgstr "" #: src/hed/libs/compute/Broker.cpp:612 msgid "" "Unable to register job submission. Can't get JobDescription object from " "Broker, Broker is invalid." msgstr "" #: src/hed/libs/compute/BrokerPlugin.cpp:89 #, c-format msgid "Broker plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/BrokerPlugin.cpp:96 #, fuzzy, c-format msgid "Unable to load BrokerPlugin (%s)" msgstr "Nem sikerült betölteni a %s bróker modult" #: src/hed/libs/compute/BrokerPlugin.cpp:106 #, c-format msgid "Broker %s loaded" msgstr "%s bróker betöltve" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:27 #, c-format msgid "Uniq is replacing service coming from %s with service coming from %s" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:31 #, c-format msgid "Uniq is ignoring service coming from %s" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:38 #, c-format msgid "Uniq is adding service coming from %s" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:61 #, c-format msgid "Adding endpoint (%s) to TargetInformationRetriever" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:64 #, c-format msgid "Adding endpoint (%s) to ServiceEndpointRetriever" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:67 #, c-format msgid "" "Adding endpoint (%s) to both ServiceEndpointRetriever and " "TargetInformationRetriever" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:42 #, c-format msgid "The plugin %s does not support any interfaces, skipping it." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:47 #, c-format msgid "" "The first supported interface of the plugin %s is an empty string, skipping " "the plugin." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:95 #, c-format msgid "Interface on endpoint (%s) %s." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:101 #: src/hed/libs/compute/EntityRetriever.cpp:133 #: src/hed/libs/compute/EntityRetriever.cpp:425 #, c-format msgid "Ignoring endpoint (%s), it is already registered in retriever." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:110 #, c-format msgid "Service Loop: Endpoint %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:112 #, c-format msgid " This endpoint (%s) is STARTED or SUCCESSFUL" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:115 #, c-format msgid "" "Suspending querying of endpoint (%s) since the service at the endpoint is " "already being queried, or has been queried." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:122 #: src/hed/libs/compute/EntityRetriever.cpp:237 #, c-format msgid " Status of endpoint (%s) is %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:126 #, c-format msgid "Setting status (STARTED) for endpoint: %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:145 #, c-format msgid "Starting thread to query the endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:147 #: src/hed/libs/compute/EntityRetriever.cpp:289 #, c-format msgid "Failed to start querying the endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:174 #, c-format msgid "Found a registry, will query it recursively: %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:211 #, c-format msgid "Setting status (%s) for endpoint: %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:231 msgid "Checking for suspended endpoints which should be started." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:241 #, c-format msgid "Found started or successful endpoint (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:253 #, c-format msgid "Found suspended endpoint (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:264 #, c-format msgid "Trying to start suspended endpoint (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:284 #, c-format msgid "" "Starting querying of suspended endpoint (%s) - no other endpoints for this " "service is being queried or has been queried successfully." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:351 #, c-format msgid "Calling plugin %s to query endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:373 #, c-format msgid "" "The interface of this endpoint (%s) is unspecified, will try all possible " "plugins" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:389 #, c-format msgid "Problem loading plugin %s, skipping it." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:393 #, c-format msgid "The endpoint (%s) is not supported by this plugin (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:414 #, c-format msgid "" "New endpoint is created (%s) from the one with the unspecified interface (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:432 #, c-format msgid "Starting sub-thread to query the endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:434 #, c-format msgid "" "Failed to start querying the endpoint on %s (unable to create sub-thread)" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:32 #, c-format msgid "Found %s %s (it was loaded already)" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:41 #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:49 #: src/hed/libs/compute/JobControllerPlugin.cpp:98 #: src/hed/libs/compute/JobControllerPlugin.cpp:107 #: src/hed/libs/compute/SubmitterPlugin.cpp:167 #: src/hed/libs/compute/SubmitterPlugin.cpp:177 #, c-format msgid "" "Unable to locate the \"%s\" plugin. Please refer to installation " "instructions and check if package providing support for \"%s\" plugin is " "installed" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:42 #, c-format msgid "%s plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:50 #, fuzzy, c-format msgid "%s %s could not be created." msgstr "A feladatot nem sikerült megölni vagy letörölni" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:55 #, fuzzy, c-format msgid "Loaded %s %s" msgstr "Feltöltve %s" #: src/hed/libs/compute/ExecutionTarget.cpp:51 #, c-format msgid "" "Skipping ComputingEndpoint '%s', because it has '%s' interface instead of " "the requested '%s'." msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:132 #, c-format msgid "" "Computing endpoint %s (type %s) added to the list for submission brokering" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:239 #, fuzzy, c-format msgid "Address: %s" msgstr "Válasz: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:240 #, fuzzy, c-format msgid "Place: %s" msgstr "Név: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:241 #, fuzzy, c-format msgid "Country: %s" msgstr "Forrás: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:242 #, c-format msgid "Postal code: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:243 #, c-format msgid "Latitude: %f" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:244 #, c-format msgid "Longitude: %f" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:250 #, c-format msgid "Owner: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:257 #, c-format msgid "ID: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:258 #, fuzzy, c-format msgid "Type: %s" msgstr "Proxy típusa: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:263 #, fuzzy, c-format msgid "URL: %s" msgstr "Érvénytelen URL: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:264 #, fuzzy, c-format msgid "Interface: %s" msgstr "Forrás: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:266 #, fuzzy msgid "Interface versions:" msgstr "Felhasználó oldali hiba" #: src/hed/libs/compute/ExecutionTarget.cpp:271 msgid "Interface extensions:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:276 msgid "Capabilities:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:280 #, c-format msgid "Technology: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:282 msgid "Supported Profiles:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:286 #, fuzzy, c-format msgid "Implementor: %s" msgstr "modul neve: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:287 #, fuzzy, c-format msgid "Implementation name: %s" msgstr "modul neve: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:288 #, c-format msgid "Quality level: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:289 #, c-format msgid "Health state: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:290 #, fuzzy, c-format msgid "Health state info: %s" msgstr "Célállomás: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:291 #, c-format msgid "Serving state: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:292 #, c-format msgid "Issuer CA: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:294 msgid "Trusted CAs:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:298 #, c-format msgid "Downtime starts: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:299 #, c-format msgid "Downtime ends: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:300 #, fuzzy, c-format msgid "Staging: %s" msgstr "Célállomás: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:302 #, fuzzy msgid "Job descriptions:" msgstr "Feladat leírás: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:314 #, fuzzy, c-format msgid "Scheme: %s" msgstr "Forrás: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:317 #, fuzzy, c-format msgid "Rule: %s" msgstr "Kérés: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:329 #, fuzzy, c-format msgid "Mapping queue: %s" msgstr "Kérés: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:330 #, c-format msgid "Max wall-time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:331 #, c-format msgid "Max total wall-time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:332 #, c-format msgid "Min wall-time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:333 #, c-format msgid "Default wall-time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:334 #, c-format msgid "Max CPU time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:335 #, c-format msgid "Min CPU time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:336 #, c-format msgid "Default CPU time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:337 #, c-format msgid "Max total jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:338 #, c-format msgid "Max running jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:339 #, c-format msgid "Max waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:340 #, c-format msgid "Max pre-LRMS waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:341 #, c-format msgid "Max user running jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:342 #, c-format msgid "Max slots per job: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:343 #, c-format msgid "Max stage in streams: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:344 #, c-format msgid "Max stage out streams: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:345 #, c-format msgid "Scheduling policy: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:346 #, c-format msgid "Max memory: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:347 #, c-format msgid "Max virtual memory: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:348 #, c-format msgid "Max disk space: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:349 #, c-format msgid "Default Storage Service: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:350 msgid "Supports preemption" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:351 msgid "Doesn't support preemption" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:352 #, c-format msgid "Total jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:353 #, c-format msgid "Running jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:354 #, c-format msgid "Local running jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:355 #, c-format msgid "Waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:356 #, c-format msgid "Local waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:357 #, c-format msgid "Suspended jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:358 #, c-format msgid "Local suspended jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:359 #, c-format msgid "Staging jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:360 #, c-format msgid "Pre-LRMS waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:361 #, c-format msgid "Estimated average waiting time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:362 #, c-format msgid "Estimated worst waiting time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:363 #, c-format msgid "Free slots: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:365 msgid "Free slots grouped according to time limits (limit: free slots):" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:368 #, c-format msgid " %s: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:369 #, c-format msgid " unspecified: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:372 #, c-format msgid "Used slots: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:373 #, fuzzy, c-format msgid "Requested slots: %i" msgstr "Kérés: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:374 #, c-format msgid "Reservation policy: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:381 #, fuzzy, c-format msgid "Resource manager: %s" msgstr "modul neve: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:383 #, c-format msgid " (%s)" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:387 #, c-format msgid "Total physical CPUs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:388 #, c-format msgid "Total logical CPUs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:389 #, c-format msgid "Total slots: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:390 msgid "Supports advance reservations" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:391 msgid "Doesn't support advance reservations" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:392 msgid "Supports bulk submission" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:393 msgid "Doesn't support bulk Submission" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:394 msgid "Homogeneous resource" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:395 msgid "Non-homogeneous resource" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:397 #, fuzzy msgid "Network information:" msgstr "verzió információ kiírása" #: src/hed/libs/compute/ExecutionTarget.cpp:402 msgid "Working area is shared among jobs" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:403 msgid "Working area is not shared among jobs" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:404 #, c-format msgid "Working area total size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:405 #, c-format msgid "Working area free size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:406 #, c-format msgid "Working area life time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:407 #, c-format msgid "Cache area total size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:408 #, c-format msgid "Cache area free size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:414 #, fuzzy, c-format msgid "Platform: %s" msgstr "Név: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:415 msgid "Execution environment supports inbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:416 msgid "Execution environment does not support inbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:417 msgid "Execution environment supports outbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:418 msgid "Execution environment does not support outbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:419 msgid "Execution environment is a virtual machine" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:420 msgid "Execution environment is a physical machine" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:421 #, c-format msgid "CPU vendor: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:422 #, c-format msgid "CPU model: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:423 #, fuzzy, c-format msgid "CPU version: %s" msgstr "%s verzió %s" #: src/hed/libs/compute/ExecutionTarget.cpp:424 #, c-format msgid "CPU clock speed: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:425 #, c-format msgid "Main memory size: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:426 #, c-format msgid "OS family: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:427 #, fuzzy, c-format msgid "OS name: %s" msgstr "Név: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:428 #, fuzzy, c-format msgid "OS version: %s" msgstr "%s verzió %s" #: src/hed/libs/compute/ExecutionTarget.cpp:435 msgid "Computing service:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:459 #, c-format msgid "%d Endpoints" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:464 #, fuzzy msgid "Endpoint Information:" msgstr "verzió információ kiírása" #: src/hed/libs/compute/ExecutionTarget.cpp:476 #, c-format msgid "%d Batch Systems" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:481 msgid "Batch System Information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:487 msgid "Installed application environments:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:500 #, c-format msgid "%d Shares" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:505 #, fuzzy msgid "Share Information:" msgstr "verzió információ kiírása" #: src/hed/libs/compute/ExecutionTarget.cpp:511 #, c-format msgid "%d mapping policies" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:515 msgid "Mapping policy:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:531 #, c-format msgid "Execution Target on Computing Service: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:533 #, c-format msgid " Computing endpoint URL: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:535 #, c-format msgid " Computing endpoint interface name: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:537 #: src/hed/libs/compute/Job.cpp:579 #, c-format msgid " Queue: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:540 #, c-format msgid " Mapping queue: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:543 #, c-format msgid " Health state: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:548 msgid "Service information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:553 msgid " Installed application environments:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:560 #, fuzzy msgid "Batch system information:" msgstr "verzió információ kiírása" #: src/hed/libs/compute/ExecutionTarget.cpp:563 msgid "Queue information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:570 #, fuzzy msgid " Benchmark information:" msgstr "verzió információ kiírása" #: src/hed/libs/compute/GLUE2.cpp:53 msgid "The Service doesn't advertise its Type." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:58 msgid "The ComputingService doesn't advertise its Quality Level." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:99 msgid "The ComputingEndpoint has no URL." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:104 msgid "The Service advertises no Health State." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:117 msgid "The ComputingEndpoint doesn't advertise its Quality Level." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:128 msgid "The ComputingService doesn't advertise its Interface." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:160 msgid "The ComputingEndpoint doesn't advertise its Serving State." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:247 #, c-format msgid "" "The \"FreeSlotsWithDuration\" attribute published by \"%s\" is wrongly " "formatted. Ignoring it." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:248 #, c-format msgid "Wrong format of the \"FreeSlotsWithDuration\" = \"%s\" (\"%s\")" msgstr "" #: src/hed/libs/compute/GLUE2.cpp:420 #, c-format msgid "" "Couldn't parse benchmark XML:\n" "%s" msgstr "" #: src/hed/libs/compute/Job.cpp:328 msgid "Unable to detect format of job record." msgstr "" #: src/hed/libs/compute/Job.cpp:549 #, c-format msgid "Job: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:551 #, c-format msgid " Name: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:552 #, fuzzy, c-format msgid " State: %s" msgstr "Név: %s" #: src/hed/libs/compute/Job.cpp:555 #, c-format msgid " Specific state: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:559 src/hed/libs/compute/Job.cpp:583 #, c-format msgid " Waiting Position: %d" msgstr "" #: src/hed/libs/compute/Job.cpp:563 #, c-format msgid " Exit Code: %d" msgstr "" #: src/hed/libs/compute/Job.cpp:567 #, fuzzy, c-format msgid " Job Error: %s" msgstr "Feladat leírás: %s" #: src/hed/libs/compute/Job.cpp:572 #, c-format msgid " Owner: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:576 #, c-format msgid " Other Messages: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:581 #, fuzzy, c-format msgid " Requested Slots: %d" msgstr "Kérés: %s" #: src/hed/libs/compute/Job.cpp:586 #, c-format msgid " Stdin: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:588 #, c-format msgid " Stdout: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:590 #, c-format msgid " Stderr: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:592 #, c-format msgid " Computing Service Log Directory: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:595 #, c-format msgid " Submitted: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:598 #, c-format msgid " End Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:601 #, c-format msgid " Submitted from: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:604 #, c-format msgid " Submitting client: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:607 #, c-format msgid " Requested CPU Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:612 #, c-format msgid " Used CPU Time: %s (%s per slot)" msgstr "" #: src/hed/libs/compute/Job.cpp:616 #, c-format msgid " Used CPU Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:622 #, c-format msgid " Used Wall Time: %s (%s per slot)" msgstr "" #: src/hed/libs/compute/Job.cpp:626 #, c-format msgid " Used Wall Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:631 #, c-format msgid " Used Memory: %d" msgstr "" #: src/hed/libs/compute/Job.cpp:635 #, c-format msgid " Results were deleted: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:636 #, c-format msgid " Results must be retrieved before: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:640 #, c-format msgid " Proxy valid until: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:644 #, c-format msgid " Entry valid from: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:647 #, c-format msgid " Entry valid for: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:651 msgid " Old job IDs:" msgstr "" #: src/hed/libs/compute/Job.cpp:659 #, c-format msgid " ID on service: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:660 #, c-format msgid " Service information URL: %s (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:661 #, c-format msgid " Job status URL: %s (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:662 #, c-format msgid " Job management URL: %s (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:663 #, c-format msgid " Stagein directory URL: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:664 #, c-format msgid " Stageout directory URL: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:665 #, c-format msgid " Session directory URL: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:667 msgid " Delegation IDs:" msgstr "" #: src/hed/libs/compute/Job.cpp:849 #, c-format msgid "Unable to handle job (%s), no interface specified." msgstr "" #: src/hed/libs/compute/Job.cpp:854 #, c-format msgid "" "Unable to handle job (%s), no plugin associated with the specified interface " "(%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:876 #, c-format msgid "Invalid download destination path specified (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:881 #, c-format msgid "" "Unable to download job (%s), no JobControllerPlugin plugin was set to handle " "the job." msgstr "" #: src/hed/libs/compute/Job.cpp:885 #, c-format msgid "Downloading job: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:891 #, c-format msgid "" "Can't retrieve job files for job (%s) - unable to determine URL of stage out " "directory" msgstr "" #: src/hed/libs/compute/Job.cpp:897 #, c-format msgid "" "Can't retrieve job files for job (%s) - unable to determine URL of log " "directory" msgstr "" #: src/hed/libs/compute/Job.cpp:903 #, c-format msgid "Invalid stage out path specified (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:911 #, c-format msgid "%s directory exist! Skipping job." msgstr "" #: src/hed/libs/compute/Job.cpp:923 #, c-format msgid "Unable to retrieve list of job files to download for job %s" msgstr "" #: src/hed/libs/compute/Job.cpp:944 #, c-format msgid "Unable to retrieve list of log files to download for job %s" msgstr "" #: src/hed/libs/compute/Job.cpp:963 #, c-format msgid "No files to retrieve for job %s" msgstr "" #: src/hed/libs/compute/Job.cpp:969 #, c-format msgid "Failed to create directory %s! Skipping job." msgstr "" #: src/hed/libs/compute/Job.cpp:986 #, fuzzy, c-format msgid "Failed downloading %s to %s, destination already exist" msgstr "Nem sikerült feloldani a célállomást" #: src/hed/libs/compute/Job.cpp:992 #, c-format msgid "Failed downloading %s to %s, unable to remove existing destination" msgstr "" #: src/hed/libs/compute/Job.cpp:999 #, c-format msgid "Failed downloading %s to %s" msgstr "" #: src/hed/libs/compute/Job.cpp:1012 #, fuzzy, c-format msgid "Unable to initialize handler for %s" msgstr "Nem sikerült betölteni a %s bróker modult" #: src/hed/libs/compute/Job.cpp:1017 #, c-format msgid "Unable to list files at %s" msgstr "" #: src/hed/libs/compute/Job.cpp:1060 msgid "Now copying (from -> to)" msgstr "" #: src/hed/libs/compute/Job.cpp:1061 #, c-format msgid " %s -> %s" msgstr "" #: src/hed/libs/compute/Job.cpp:1076 #, c-format msgid "Unable to initialise connection to source: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:1087 #, c-format msgid "Unable to initialise connection to destination: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:1109 #, c-format msgid "File download failed: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:1148 src/hed/libs/compute/Job.cpp:1177 #: src/hed/libs/compute/Job.cpp:1209 src/hed/libs/compute/Job.cpp:1242 #, c-format msgid "Waiting for lock on file %s" msgstr "" #: src/hed/libs/compute/JobControllerPlugin.cpp:99 #, c-format msgid "JobControllerPlugin plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/JobControllerPlugin.cpp:108 #, fuzzy, c-format msgid "JobControllerPlugin %s could not be created" msgstr "A feladat vezérlÅ‘ modult nem sikerült betölteni" #: src/hed/libs/compute/JobControllerPlugin.cpp:113 #, fuzzy, c-format msgid "Loaded JobControllerPlugin %s" msgstr "A feladat vezérlÅ‘ modult nem sikerült betölteni" #: src/hed/libs/compute/JobDescription.cpp:26 #, c-format msgid ": %d" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:28 #, c-format msgid ": %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:144 msgid " --- DRY RUN --- " msgstr "" #: src/hed/libs/compute/JobDescription.cpp:154 #, fuzzy, c-format msgid " Annotation: %s" msgstr "Célállomás: %s" #: src/hed/libs/compute/JobDescription.cpp:160 #, c-format msgid " Old activity ID: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:166 #, c-format msgid " Argument: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:177 #, c-format msgid " RemoteLogging (optional): %s (%s)" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:180 #, c-format msgid " RemoteLogging: %s (%s)" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:188 #, c-format msgid " Environment.name: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:189 #, c-format msgid " Environment: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:202 #, c-format msgid " PreExecutable.Argument: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:205 #: src/hed/libs/compute/JobDescription.cpp:223 #, c-format msgid " Exit code for successful execution: %d" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:208 #: src/hed/libs/compute/JobDescription.cpp:226 msgid " No exit code for successful execution specified." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:220 #, c-format msgid " PostExecutable.Argument: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:236 #, c-format msgid " Access control: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:240 #, c-format msgid " Processing start time: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:243 msgid " Notify:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:257 #, c-format msgid " Credential service: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:267 msgid " Operating system requirements:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:285 msgid " Computing endpoint requirements:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:298 msgid " Node access: inbound" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:301 msgid " Node access: outbound" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:304 msgid " Node access: inbound and outbound" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:314 msgid " Job requires exclusive execution" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:317 msgid " Job does not require exclusive execution" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:322 msgid " Run time environment requirements:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:334 msgid " Inputfile element:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:335 #: src/hed/libs/compute/JobDescription.cpp:357 #, c-format msgid " Name: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:337 msgid " Is executable: true" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:341 #, fuzzy, c-format msgid " Sources: %s" msgstr "Forrás: %s" #: src/hed/libs/compute/JobDescription.cpp:343 #, c-format msgid " Sources.DelegationID: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:347 #, c-format msgid " Sources.Options: %s = %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:356 msgid " Outputfile element:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:360 #, c-format msgid " Targets: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:362 #, c-format msgid " Targets.DelegationID: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:366 #, c-format msgid " Targets.Options: %s = %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:373 #, c-format msgid " DelegationID element: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:380 #, c-format msgid " Other attributes: [%s], %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:446 msgid "Empty job description source string" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:479 #, fuzzy msgid "No job description parsers available" msgstr "Nincs megadva feladat leírás bemeneti adatként" #: src/hed/libs/compute/JobDescription.cpp:481 #, c-format msgid "" "No job description parsers suitable for handling '%s' language are available" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:489 #, c-format msgid "%s parsing error" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:505 #, fuzzy msgid "No job description parser was able to interpret job description" msgstr "Feladat leírás elküldve ide: %s" #: src/hed/libs/compute/JobDescription.cpp:515 msgid "" "Job description language is not specified, unable to output description." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:527 #, fuzzy, c-format msgid "Generating %s job description output" msgstr "Egy hiba lépett fel a feladat leírás elkészítése közben." #: src/hed/libs/compute/JobDescription.cpp:543 #, c-format msgid "Language (%s) not recognized by any job description parsers." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:556 #, c-format msgid "Two input files have identical name '%s'." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:575 #: src/hed/libs/compute/JobDescription.cpp:588 #, fuzzy, c-format msgid "Cannot stat local input file '%s'" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/hed/libs/compute/JobDescription.cpp:608 #, c-format msgid "Cannot find local input file '%s' (%s)" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:650 msgid "Unable to select runtime environment" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:657 msgid "Unable to select middleware" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:664 msgid "Unable to select operating system." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:683 #, c-format msgid "No test-job with ID %d found." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:695 #, c-format msgid "Test was defined with ID %d, but some error occurred during parsing it." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:699 #, fuzzy, c-format msgid "No jobdescription resulted at %d test" msgstr "Feladat leírás elküldve ide: %s" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:52 #, c-format msgid "JobDescriptionParserPlugin plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:59 #, c-format msgid "JobDescriptionParserPlugin %s could not be created" msgstr "" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:64 #, c-format msgid "Loaded JobDescriptionParserPlugin %s" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:125 #, fuzzy, c-format msgid "Unable to create data base (%s)" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:133 #, fuzzy, c-format msgid "Unable to create jobs table in data base (%s)" msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:142 #, fuzzy, c-format msgid "Unable to create jobs_new table in data base (%s)" msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:148 #, fuzzy, c-format msgid "Unable to transfer from jobs to jobs_new in data base (%s)" msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:154 #, fuzzy, c-format msgid "Unable to drop jobs in data base (%s)" msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:160 #, fuzzy, c-format msgid "Unable to rename jobs table in data base (%s)" msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:170 #, fuzzy, c-format msgid "Unable to create index for jobs table in data base (%s)" msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:178 #, fuzzy, c-format msgid "Failed checking database (%s)" msgstr "Nem sikerült listázni a fájlokat" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:180 #, c-format msgid "Job database connection established successfully (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:202 #, fuzzy, c-format msgid "Error from SQLite: %s: %s" msgstr "Nem tudom olvasni a forrásokat a fájlból: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:205 #, c-format msgid "Error from SQLite: %s" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:226 #: src/hed/libs/compute/JobInformationStorageXML.cpp:36 #, c-format msgid "" "Job list file cannot be created: The parent directory (%s) doesn't exist." msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:230 #: src/hed/libs/compute/JobInformationStorageXML.cpp:40 #, c-format msgid "Job list file cannot be created: %s is not a directory" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:237 #: src/hed/libs/compute/JobInformationStorageXML.cpp:47 #, c-format msgid "Job list file (%s) is not a regular file" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:367 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:374 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:381 #, c-format msgid "Unable to write records into job database (%s): Id \"%s\"" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:627 #: src/hed/libs/compute/JobInformationStorageXML.cpp:146 #, c-format msgid "Unable to truncate job database (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:660 #, fuzzy, c-format msgid "Unable to determine error (%d)" msgstr "Nem sikerült betölteni a %s bróker modult" #: src/hed/libs/compute/JobInformationStorageXML.cpp:60 #: src/hed/libs/compute/JobInformationStorageXML.cpp:232 #: src/hed/libs/compute/JobInformationStorageXML.cpp:273 #, fuzzy, c-format msgid "Waiting for lock on job list file %s" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/hed/libs/compute/JobInformationStorageXML.cpp:171 #, c-format msgid "Will remove %s on service %s." msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:40 msgid "Ignoring job, the job ID is empty" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:45 #, c-format msgid "Ignoring job (%s), the management interface name is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:50 #, c-format msgid "Ignoring job (%s), the job management URL is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:55 #, c-format msgid "Ignoring job (%s), the status interface name is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:60 #, c-format msgid "Ignoring job (%s), the job status URL is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:69 #, c-format msgid "Ignoring job (%s), unable to load JobControllerPlugin for %s" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:76 #, c-format msgid "" "Ignoring job (%s), already tried and were unable to load JobControllerPlugin" msgstr "" #: src/hed/libs/compute/Software.cpp:65 src/hed/libs/compute/Software.cpp:94 #: src/hed/libs/compute/Software.cpp:113 #, c-format msgid "%s > %s => false" msgstr "" #: src/hed/libs/compute/Software.cpp:70 src/hed/libs/compute/Software.cpp:83 #: src/hed/libs/compute/Software.cpp:107 #, c-format msgid "%s > %s => true" msgstr "" #: src/hed/libs/compute/Software.cpp:90 src/hed/libs/compute/Software.cpp:102 #, c-format msgid "%s > %s => false: %s contains non numbers in the version part." msgstr "" #: src/hed/libs/compute/Software.cpp:199 src/hed/libs/compute/Software.cpp:210 #, c-format msgid "Requirement \"%s %s\" NOT satisfied." msgstr "" #: src/hed/libs/compute/Software.cpp:205 #, c-format msgid "Requirement \"%s %s\" satisfied." msgstr "" #: src/hed/libs/compute/Software.cpp:214 #, c-format msgid "Requirement \"%s %s\" satisfied by \"%s\"." msgstr "" #: src/hed/libs/compute/Software.cpp:219 msgid "All software requirements satisfied." msgstr "" #: src/hed/libs/compute/Submitter.cpp:83 #, fuzzy, c-format msgid "Trying to submit directly to endpoint (%s)" msgstr "Feladat újraküldésének megkisérlése ide: %s" #: src/hed/libs/compute/Submitter.cpp:88 #, c-format msgid "Interface (%s) specified, submitting only to that interface" msgstr "" #: src/hed/libs/compute/Submitter.cpp:106 msgid "Trying all available interfaces" msgstr "" #: src/hed/libs/compute/Submitter.cpp:112 #, c-format msgid "Trying to submit endpoint (%s) using interface (%s) with plugin (%s)." msgstr "" #: src/hed/libs/compute/Submitter.cpp:116 #, c-format msgid "" "Unable to load plugin (%s) for interface (%s) when trying to submit job " "description." msgstr "" #: src/hed/libs/compute/Submitter.cpp:130 #, c-format msgid "No more interfaces to try for endpoint %s." msgstr "" #: src/hed/libs/compute/Submitter.cpp:336 #, c-format msgid "Target %s does not match requested interface(s)." msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:63 msgid "No stagein URL is provided" msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:72 #, fuzzy, c-format msgid "Failed reading file %s" msgstr "Nem sikerült listázni a fájlokat" #: src/hed/libs/compute/SubmitterPlugin.cpp:86 #, fuzzy, c-format msgid "Failed uploading file %s to %s: %s" msgstr "Nem sikerült listázni a fájlokat" #: src/hed/libs/compute/SubmitterPlugin.cpp:168 #, c-format msgid "SubmitterPlugin plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:178 #, c-format msgid "SubmitterPlugin %s could not be created" msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:183 #, c-format msgid "Loaded SubmitterPlugin %s" msgstr "" #: src/hed/libs/compute/examples/basic_job_submission.cpp:28 #, fuzzy msgid "Invalid job description" msgstr "Érvénytelen feladat leírás:" #: src/hed/libs/compute/examples/basic_job_submission.cpp:47 #, fuzzy msgid "Failed to submit job" msgstr "Feladat újraküldésének megkisérlése ide: %s" #: src/hed/libs/compute/examples/basic_job_submission.cpp:54 #, c-format msgid "Failed to write to local job list %s" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:20 msgid "[job description ...]" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:21 msgid "" "This tiny tool can be used for testing the JobDescription's conversion " "abilities." msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:23 msgid "" "The job description also can be a file or a string in ADL or XRSL format." msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:27 msgid "define the requested format (nordugrid:xrsl, emies:adl)" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:28 msgid "format" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:33 msgid "show the original job description" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:43 #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:72 msgid "Use --help option for detailed usage information" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:50 #, fuzzy msgid " [ JobDescription tester ] " msgstr "Feladat leírás elküldve ide: %s" #: src/hed/libs/compute/test_jobdescription.cpp:74 msgid " [ Parsing the original text ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:80 #, fuzzy msgid "Unable to parse." msgstr "Nem sikerült betölteni a %s bróker modult" #: src/hed/libs/compute/test_jobdescription.cpp:89 msgid " [ emies:adl ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:91 msgid " [ nordugrid:xrsl ] " msgstr "" #: src/hed/libs/credential/CertUtil.cpp:127 #, c-format msgid "Error number in store context: %i" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:128 msgid "Self-signed certificate" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:131 #, fuzzy, c-format msgid "The certificate with subject %s is not valid" msgstr "Proxy készítés sikertelen: A publikus kulcs érvénytelen." #: src/hed/libs/credential/CertUtil.cpp:134 #, c-format msgid "" "Can not find issuer certificate for the certificate with subject %s and " "hash: %lu" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:137 #, c-format msgid "Certificate with subject %s has expired" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:140 #, c-format msgid "" "Untrusted self-signed certificate in chain with subject %s and hash: %lu" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:142 #, c-format msgid "Certificate verification error: %s" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:154 msgid "Can not get the certificate type" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:194 msgid "Couldn't verify availability of CRL" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:207 msgid "In the available CRL the lastUpdate field is not valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:214 msgid "The available CRL is not yet valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:223 msgid "In the available CRL, the nextUpdate field is not valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:229 msgid "The available CRL has expired" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:252 #, c-format msgid "Certificate with serial number %s and subject \"%s\" is revoked" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:270 msgid "" "Directory of trusted CAs is not specified/found; Using current path as the " "CA direcroty" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:279 msgid "Can't allocate memory for CA policy path" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:325 #, c-format msgid "Certificate has unknown extension with numeric ID %u and SN %s" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:339 #: src/hed/libs/credential/Credential.cpp:1727 msgid "" "Can not convert DER encoded PROXY_CERT_INFO_EXTENSION extension to internal " "format" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:385 msgid "Trying to check X509 cert with check_cert_type" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:424 msgid "Can't convert DER encoded PROXYCERTINFO extension to internal format" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:428 msgid "Can't get policy from PROXYCERTINFO extension" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:432 msgid "Can't get policy language from PROXYCERTINFO extension" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:464 msgid "The subject does not match the issuer name + proxy CN entry" msgstr "" #: src/hed/libs/credential/Credential.cpp:48 #, c-format msgid "OpenSSL error string: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:169 msgid "Can't get the first byte of input to determine its format" msgstr "" #: src/hed/libs/credential/Credential.cpp:183 msgid "Can't reset the input" msgstr "" #: src/hed/libs/credential/Credential.cpp:208 #: src/hed/libs/credential/Credential.cpp:244 msgid "Can't get the first byte of input BIO to get its format" msgstr "" #: src/hed/libs/credential/Credential.cpp:220 msgid "Can not read certificate/key string" msgstr "" #: src/hed/libs/credential/Credential.cpp:433 #, c-format msgid "Can not find certificate file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:438 #, c-format msgid "Can not read certificate file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:476 msgid "Can not read certificate string" msgstr "" #: src/hed/libs/credential/Credential.cpp:496 msgid "Certificate format is PEM" msgstr "" #: src/hed/libs/credential/Credential.cpp:523 msgid "Certificate format is DER" msgstr "" #: src/hed/libs/credential/Credential.cpp:552 msgid "Certificate format is PKCS" msgstr "" #: src/hed/libs/credential/Credential.cpp:578 msgid "Certificate format is unknown" msgstr "" #: src/hed/libs/credential/Credential.cpp:586 #, c-format msgid "Can not find key file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:591 #, c-format msgid "Can not open key file %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:610 msgid "Can not read key string" msgstr "" #: src/hed/libs/credential/Credential.cpp:673 #: src/hed/libs/credential/VOMSUtil.cpp:210 msgid "Failed to lock arccredential library in memory" msgstr "" #: src/hed/libs/credential/Credential.cpp:685 msgid "Certificate verification succeeded" msgstr "" #: src/hed/libs/credential/Credential.cpp:689 msgid "Certificate verification failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:702 #: src/hed/libs/credential/Credential.cpp:722 #: src/hed/libs/credential/Credential.cpp:742 #: src/hed/libs/credential/Credential.cpp:1024 #: src/hed/libs/credential/Credential.cpp:2398 #: src/hed/libs/credential/Credential.cpp:2428 msgid "Failed to initialize extensions member for Credential" msgstr "" #: src/hed/libs/credential/Credential.cpp:787 #, c-format msgid "Unsupported proxy policy language is requested - %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:799 #, c-format msgid "Unsupported proxy version is requested - %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:810 msgid "If you specify a policy you also need to specify a policy language" msgstr "" #: src/hed/libs/credential/Credential.cpp:857 #, c-format msgid "Error: can't open policy file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:870 #, c-format msgid "Error: policy location: %s is not a regular file" msgstr "" #: src/hed/libs/credential/Credential.cpp:929 #: src/hed/libs/credential/Credential.cpp:962 #: src/hed/libs/credential/Credential.cpp:1029 msgid "Certificate/Proxy path is empty" msgstr "" #: src/hed/libs/credential/Credential.cpp:1087 #: src/hed/libs/credential/Credential.cpp:2937 msgid "Failed to duplicate extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1091 #, fuzzy msgid "Failed to add extension into credential extensions" msgstr "Nem sikerült betölteni a konfigurációt" #: src/hed/libs/credential/Credential.cpp:1104 #, fuzzy msgid "Certificate information collection failed" msgstr "verzió információ kiírása" #: src/hed/libs/credential/Credential.cpp:1143 #: src/hed/libs/credential/Credential.cpp:1148 msgid "Can not convert string into ASN1_OBJECT" msgstr "" #: src/hed/libs/credential/Credential.cpp:1155 msgid "Can not create ASN1_OCTET_STRING" msgstr "" #: src/hed/libs/credential/Credential.cpp:1164 msgid "Can not allocate memory for extension for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:1174 msgid "Can not create extension for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:1210 #: src/hed/libs/credential/Credential.cpp:1378 msgid "BN_set_word failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1219 #: src/hed/libs/credential/Credential.cpp:1387 msgid "RSA_generate_key_ex failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1228 #: src/hed/libs/credential/Credential.cpp:1395 msgid "BN_new || RSA_new failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1239 msgid "Created RSA key, proceeding with request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1244 msgid "pkey and rsa_key exist!" msgstr "" #: src/hed/libs/credential/Credential.cpp:1247 msgid "Generate new X509 request!" msgstr "" #: src/hed/libs/credential/Credential.cpp:1252 msgid "Setting subject name!" msgstr "" #: src/hed/libs/credential/Credential.cpp:1260 #: src/hed/libs/credential/Credential.cpp:1474 msgid "PEM_write_bio_X509_REQ failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1290 #: src/hed/libs/credential/Credential.cpp:1331 #: src/hed/libs/credential/Credential.cpp:1506 #: src/hed/libs/credential/Credential.cpp:1526 msgid "Can not create BIO for request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1308 msgid "Failed to write request into string" msgstr "" #: src/hed/libs/credential/Credential.cpp:1335 #: src/hed/libs/credential/Credential.cpp:1340 #: src/hed/libs/credential/Credential.cpp:1530 msgid "Can not set writable file for request BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:1346 #: src/hed/libs/credential/Credential.cpp:1535 msgid "Wrote request into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1348 #: src/hed/libs/credential/Credential.cpp:1538 msgid "Failed to write request into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1368 msgid "The credential's private key has already been initialized" msgstr "" #: src/hed/libs/credential/Credential.cpp:1416 msgid "" "Can not duplicate the subject name for the self-signing proxy certificate " "request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1426 msgid "Can not create a new X509_NAME_ENTRY for the proxy certificate request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1444 #: src/hed/libs/credential/Credential.cpp:1451 #: src/hed/libs/credential/Credential.cpp:2029 #: src/hed/libs/credential/Credential.cpp:2037 msgid "" "Can not convert PROXY_CERT_INFO_EXTENSION struct from internal to DER " "encoded format" msgstr "" #: src/hed/libs/credential/Credential.cpp:1481 msgid "Can't convert X509 request from internal to DER encoded format" msgstr "" #: src/hed/libs/credential/Credential.cpp:1491 msgid "Can not generate X509 request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1493 msgid "Can not set private key" msgstr "" #: src/hed/libs/credential/Credential.cpp:1591 msgid "Failed to get private key" msgstr "" #: src/hed/libs/credential/Credential.cpp:1610 msgid "Failed to get public key from RSA object" msgstr "" #: src/hed/libs/credential/Credential.cpp:1618 msgid "Failed to get public key from X509 object" msgstr "" #: src/hed/libs/credential/Credential.cpp:1625 msgid "Failed to get public key" msgstr "" #: src/hed/libs/credential/Credential.cpp:1663 #, c-format msgid "Certiticate chain number %d" msgstr "" #: src/hed/libs/credential/Credential.cpp:1691 msgid "NULL BIO passed to InquireRequest" msgstr "" #: src/hed/libs/credential/Credential.cpp:1694 msgid "PEM_read_bio_X509_REQ failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1698 msgid "d2i_X509_REQ_bio failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1720 msgid "Missing data in DER encoded PROXY_CERT_INFO_EXTENSION extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1732 msgid "Can not create PROXY_CERT_INFO_EXTENSION extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1742 msgid "Can not get policy from PROXY_CERT_INFO_EXTENSION extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1746 msgid "Can not get policy language from PROXY_CERT_INFO_EXTENSION extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1762 #, c-format msgid "Cert Type: %d" msgstr "" #: src/hed/libs/credential/Credential.cpp:1775 #: src/hed/libs/credential/Credential.cpp:1794 msgid "Can not create BIO for parsing request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1780 msgid "Read request from a string" msgstr "" #: src/hed/libs/credential/Credential.cpp:1783 msgid "Failed to read request from a string" msgstr "" #: src/hed/libs/credential/Credential.cpp:1798 msgid "Can not set readable file for request BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:1803 msgid "Read request from a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1806 msgid "Failed to read request from a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1846 msgid "Can not convert private key to DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2010 msgid "Credential is not initialized" msgstr "" #: src/hed/libs/credential/Credential.cpp:2016 #, fuzzy msgid "Failed to duplicate X509 structure" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/credential/Credential.cpp:2021 msgid "Failed to initialize X509 structure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2044 msgid "Can not create extension for PROXY_CERT_INFO" msgstr "" #: src/hed/libs/credential/Credential.cpp:2048 #: src/hed/libs/credential/Credential.cpp:2096 msgid "Can not add X509 extension to proxy cert" msgstr "" #: src/hed/libs/credential/Credential.cpp:2064 msgid "Can not convert keyUsage struct from DER encoded format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2076 #: src/hed/libs/credential/Credential.cpp:2085 msgid "Can not convert keyUsage struct from internal to DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2092 msgid "Can not create extension for keyUsage" msgstr "" #: src/hed/libs/credential/Credential.cpp:2105 msgid "Can not get extended KeyUsage extension from issuer certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2110 msgid "Can not copy extended KeyUsage extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:2115 msgid "Can not add X509 extended KeyUsage extension to new proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2125 msgid "Can not compute digest of public key" msgstr "" #: src/hed/libs/credential/Credential.cpp:2136 msgid "Can not copy the subject name from issuer for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2142 msgid "Can not create name entry CN for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2147 msgid "Can not set CN in proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2155 msgid "Can not set issuer's subject for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2160 msgid "Can not set version number for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2168 msgid "Can not set serial number for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2174 msgid "Can not duplicate serial number for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2180 msgid "Can not set the lifetime for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2184 msgid "Can not set pubkey for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2200 #: src/hed/libs/credential/Credential.cpp:2827 msgid "The credential to be signed is NULL" msgstr "" #: src/hed/libs/credential/Credential.cpp:2204 #: src/hed/libs/credential/Credential.cpp:2831 msgid "The credential to be signed contains no request" msgstr "" #: src/hed/libs/credential/Credential.cpp:2208 #: src/hed/libs/credential/Credential.cpp:2835 msgid "The BIO for output is NULL" msgstr "" #: src/hed/libs/credential/Credential.cpp:2222 #: src/hed/libs/credential/Credential.cpp:2842 msgid "Error when extracting public key from request" msgstr "" #: src/hed/libs/credential/Credential.cpp:2227 #: src/hed/libs/credential/Credential.cpp:2846 msgid "Failed to verify the request" msgstr "" #: src/hed/libs/credential/Credential.cpp:2231 msgid "Failed to add issuer's extension into proxy" msgstr "" #: src/hed/libs/credential/Credential.cpp:2255 msgid "Failed to find extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:2267 msgid "Can not get the issuer's private key" msgstr "" #: src/hed/libs/credential/Credential.cpp:2274 #: src/hed/libs/credential/Credential.cpp:2878 msgid "There is no digest in issuer's private key object" msgstr "" #: src/hed/libs/credential/Credential.cpp:2279 #: src/hed/libs/credential/Credential.cpp:2882 #, c-format msgid "%s is an unsupported digest type" msgstr "" #: src/hed/libs/credential/Credential.cpp:2290 #, c-format msgid "" "The signing algorithm %s is not allowed,it should be SHA1 or SHA2 to sign " "certificate requests" msgstr "" #: src/hed/libs/credential/Credential.cpp:2296 msgid "Failed to sign the proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2298 msgid "Succeeded to sign the proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2303 msgid "Failed to verify the signed certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2305 msgid "Succeeded to verify the signed certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2310 #: src/hed/libs/credential/Credential.cpp:2319 msgid "Output the proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2313 msgid "Can not convert signed proxy cert into PEM format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2322 msgid "Can not convert signed proxy cert into DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2338 #: src/hed/libs/credential/Credential.cpp:2361 msgid "Can not create BIO for signed proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2365 msgid "Can not set writable file for signed proxy certificate BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:2370 msgid "Wrote signed proxy certificate into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:2373 msgid "Failed to write signed proxy certificate into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:2408 #: src/hed/libs/credential/Credential.cpp:2447 #, fuzzy, c-format msgid "ERROR: %s" msgstr "Érvénytelen URL: %s" #: src/hed/libs/credential/Credential.cpp:2455 #, c-format msgid "SSL error: %s, libs: %s, func: %s, reason: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2500 #, c-format msgid "unable to load number from: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2505 msgid "error converting number from bin to BIGNUM" msgstr "" #: src/hed/libs/credential/Credential.cpp:2532 msgid "file name too long" msgstr "" #: src/hed/libs/credential/Credential.cpp:2555 msgid "error converting serial to ASN.1 format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2588 #, c-format msgid "load serial from %s failure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2593 msgid "add_word failure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2598 #, c-format msgid "save serial to %s failure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2618 msgid "Error initialising X509 store" msgstr "" #: src/hed/libs/credential/Credential.cpp:2625 msgid "Out of memory when generate random serial" msgstr "" #: src/hed/libs/credential/Credential.cpp:2637 msgid "CA certificate and CA private key do not match" msgstr "" #: src/hed/libs/credential/Credential.cpp:2661 #, c-format msgid "Failed to load extension section: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2698 msgid "malloc error" msgstr "" #: src/hed/libs/credential/Credential.cpp:2702 msgid "Subject does not start with '/'" msgstr "" #: src/hed/libs/credential/Credential.cpp:2718 #: src/hed/libs/credential/Credential.cpp:2739 msgid "escape character at end of string" msgstr "" #: src/hed/libs/credential/Credential.cpp:2730 #, c-format msgid "" "end of string encountered while processing type of subject name element #%d" msgstr "" #: src/hed/libs/credential/Credential.cpp:2767 #, c-format msgid "Subject Attribute %s has no known NID, skipped" msgstr "" #: src/hed/libs/credential/Credential.cpp:2771 #, c-format msgid "No value provided for Subject Attribute %s skipped" msgstr "" #: src/hed/libs/credential/Credential.cpp:2812 msgid "Failed to set the pubkey for X509 object by using pubkey from X509_REQ" msgstr "" #: src/hed/libs/credential/Credential.cpp:2822 msgid "The private key for signing is not initialized" msgstr "" #: src/hed/libs/credential/Credential.cpp:2901 #, c-format msgid "Error when loading the extension config file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2905 #, c-format msgid "Error when loading the extension config file: %s on line: %d" msgstr "" #: src/hed/libs/credential/Credential.cpp:2953 msgid "Can not sign a EEC" msgstr "" #: src/hed/libs/credential/Credential.cpp:2957 msgid "Output EEC certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2960 msgid "Can not convert signed EEC cert into DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2974 #: src/hed/libs/credential/Credential.cpp:2993 msgid "Can not create BIO for signed EEC certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2997 msgid "Can not set writable file for signed EEC certificate BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:3002 msgid "Wrote signed EEC certificate into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:3005 msgid "Failed to write signed EEC certificate into a file" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:143 msgid "Error writing raw certificate" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:222 msgid "Failed to add RFC proxy OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:225 #, c-format msgid "Succeeded to add RFC proxy OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:231 msgid "Failed to add anyLanguage OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:234 #, c-format msgid "Succeeded to add anyLanguage OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:240 msgid "Failed to add inheritAll OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:243 #, c-format msgid "Succeeded to add inheritAll OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:249 msgid "Failed to add Independent OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:252 #, c-format msgid "Succeeded to add Independent OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:258 msgid "Failed to add VOMS AC sequence OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:261 #, c-format msgid "Succeeded to add VOMS AC sequence OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:290 #, c-format msgid "NSS initialization failed on certificate database: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:301 msgid "Succeeded to initialize NSS" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:323 #, c-format msgid "Failed to read attribute %x from private key." msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:375 msgid "Succeeded to get credential" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:376 #, fuzzy msgid "Failed to get credential" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/credential/NSSUtil.cpp:438 msgid "p12 file is empty" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:448 msgid "Unable to write to p12 file" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:464 #, fuzzy msgid "Failed to open p12 file" msgstr "Nem sikerült listázni a fájlokat" #: src/hed/libs/credential/NSSUtil.cpp:492 msgid "Failed to allocate p12 context" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1200 msgid "Failed to find issuer certificate for proxy certificate" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1351 #, c-format msgid "Failed to authenticate to PKCS11 slot %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1357 #, c-format msgid "Failed to find certificates by nickname: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1362 #, c-format msgid "No user certificate by nickname %s found" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1375 #: src/hed/libs/credential/NSSUtil.cpp:1411 msgid "Certificate does not have a slot" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1381 msgid "Failed to create export context" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1396 msgid "PKCS12 output password not provided" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1403 msgid "PKCS12 add password integrity failed" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1424 #, fuzzy msgid "Failed to create key or certificate safe" msgstr "publikus kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:1440 #, fuzzy msgid "Failed to add certificate and key" msgstr "publikus kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:1449 #, c-format msgid "Failed to initialize PKCS12 file: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1454 #, fuzzy msgid "Failed to encode PKCS12" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/credential/NSSUtil.cpp:1457 msgid "Succeeded to export PKCS12" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1485 #, c-format msgid "" "There is no certificate named %s found, the certificate could be removed " "when generating CSR" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1491 #, fuzzy msgid "Failed to delete certificate" msgstr "publikus kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:1505 msgid "The name of the private key to delete is empty" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1510 #: src/hed/libs/credential/NSSUtil.cpp:2939 #: src/hed/libs/credential/NSSUtil.cpp:2956 #, c-format msgid "Failed to authenticate to token %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1517 #, c-format msgid "No private key with nickname %s exist in NSS database" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1550 #, fuzzy msgid "Failed to delete private key and certificate" msgstr "A privát és publikus kulcs tárolására szolgáló könyvtár" #: src/hed/libs/credential/NSSUtil.cpp:1560 #, fuzzy msgid "Failed to delete private key" msgstr "privát kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:1571 #, c-format msgid "Can not find key with name: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1599 msgid "Can not read PEM private key: probably bad password" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1601 msgid "Can not read PEM private key: failed to decrypt" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1603 #: src/hed/libs/credential/NSSUtil.cpp:1605 msgid "Can not read PEM private key: failed to obtain password" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1606 msgid "Can not read PEM private key" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1613 msgid "Failed to convert EVP_PKEY to PKCS8" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1650 #, fuzzy msgid "Failed to load private key" msgstr "privát kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:1651 msgid "Succeeded to load PrivateKeyInfo" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1654 msgid "Failed to convert PrivateKeyInfo to EVP_PKEY" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1655 msgid "Succeeded to convert PrivateKeyInfo to EVP_PKEY" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1692 #, fuzzy msgid "Failed to import private key" msgstr "privát kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:1695 msgid "Succeeded to import private key" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1708 #: src/hed/libs/credential/NSSUtil.cpp:1750 #: src/hed/libs/credential/NSSUtil.cpp:2889 msgid "Failed to authenticate to key database" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1717 msgid "Succeeded to generate public/private key pair" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1719 msgid "Failed to generate public/private key pair" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1724 #, fuzzy msgid "Failed to export private key" msgstr "privát kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:1791 msgid "Failed to create subject name" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1807 #, fuzzy msgid "Failed to create certificate request" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/credential/NSSUtil.cpp:1820 msgid "Failed to call PORT_NewArena" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1828 msgid "Failed to encode the certificate request with DER format" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1835 msgid "Unknown key or hash type" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1841 #, fuzzy msgid "Failed to sign the certificate request" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/credential/NSSUtil.cpp:1857 msgid "Failed to output the certificate request as ASCII format" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1866 msgid "Failed to output the certificate request as DER format" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1875 #, c-format msgid "Succeeded to output the certificate request into %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1914 #: src/hed/libs/credential/NSSUtil.cpp:1951 msgid "Failed to read data from input file" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1930 msgid "Input is without trailer\n" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1941 msgid "Failed to convert ASCII to DER" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1992 msgid "Certificate request is invalid" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2212 #, c-format msgid "The policy language: %s is not supported" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2220 #: src/hed/libs/credential/NSSUtil.cpp:2245 #: src/hed/libs/credential/NSSUtil.cpp:2268 #: src/hed/libs/credential/NSSUtil.cpp:2290 #, fuzzy msgid "Failed to new arena" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/credential/NSSUtil.cpp:2229 #: src/hed/libs/credential/NSSUtil.cpp:2254 msgid "Failed to create path length" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2232 #: src/hed/libs/credential/NSSUtil.cpp:2257 #: src/hed/libs/credential/NSSUtil.cpp:2277 #: src/hed/libs/credential/NSSUtil.cpp:2299 msgid "Failed to create policy language" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2700 #, c-format msgid "Failed to parse certificate request from CSR file %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2707 #, c-format msgid "Can not find certificate with name %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2739 msgid "Can not allocate memory" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2747 #, fuzzy, c-format msgid "Proxy subject: %s" msgstr "Tárgy: %s" #: src/hed/libs/credential/NSSUtil.cpp:2764 msgid "Failed to start certificate extension" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2769 msgid "Failed to add key usage extension" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2774 msgid "Failed to add proxy certificate information extension" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2778 msgid "Failed to add voms AC extension" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2798 #, fuzzy msgid "Failed to retrieve private key for issuer" msgstr "privát kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:2805 msgid "Unknown key or hash type of issuer" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2811 msgid "Failed to set signature algorithm ID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2823 #, fuzzy msgid "Failed to encode certificate" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/credential/NSSUtil.cpp:2829 msgid "Failed to allocate item for certificate data" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2835 msgid "Failed to sign encoded certificate data" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2844 #, c-format msgid "Failed to open file %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2855 #, c-format msgid "Succeeded to output certificate to %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2896 #, fuzzy, c-format msgid "Failed to open input certificate file %s" msgstr "publikus kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:2913 #, fuzzy msgid "Failed to read input certificate file" msgstr "publikus kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:2918 #, fuzzy msgid "Failed to get certificate from certificate file" msgstr "publikus kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:2925 msgid "Failed to allocate certificate trust" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2930 #, fuzzy msgid "Failed to decode trust string" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/credential/NSSUtil.cpp:2944 #: src/hed/libs/credential/NSSUtil.cpp:2961 msgid "Failed to add certificate to token or database" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2947 #: src/hed/libs/credential/NSSUtil.cpp:2950 msgid "Succeeded to import certificate" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2964 #: src/hed/libs/credential/NSSUtil.cpp:2967 #, c-format msgid "Succeeded to change trusts to: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2994 #, fuzzy, c-format msgid "Failed to import private key from file: %s" msgstr "privát kulcs elérési útvonala" #: src/hed/libs/credential/NSSUtil.cpp:2996 #, fuzzy, c-format msgid "Failed to import certificate from file: %s" msgstr "publikus kulcs elérési útvonala" #: src/hed/libs/credential/VOMSConfig.cpp:147 #, c-format msgid "" "ERROR: VOMS configuration line contains too many tokens. Expecting 5 or 6. " "Line was: %s" msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:163 #, c-format msgid "" "ERROR: file tree is too deep while scanning VOMS configuration. Max allowed " "nesting is %i." msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:181 #, c-format msgid "ERROR: failed to read file %s while scanning VOMS configuration." msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:186 #, c-format msgid "" "ERROR: VOMS configuration file %s contains too many lines. Max supported " "number is %i." msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:193 #, c-format msgid "" "ERROR: VOMS configuration file %s contains too long line(s). Max supported " "length is %i characters." msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:137 #, fuzzy, c-format msgid "Failed to create OpenSSL object %s %s - %u %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/hed/libs/credential/VOMSUtil.cpp:144 #, fuzzy, c-format msgid "Failed to obtain OpenSSL identifier for %s" msgstr "publikus kulcs elérési útvonala" #: src/hed/libs/credential/VOMSUtil.cpp:302 #: src/hed/libs/credential/VOMSUtil.cpp:571 #, c-format msgid "VOMS: create FQAN: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:340 #: src/hed/libs/credential/VOMSUtil.cpp:619 #, c-format msgid "VOMS: create attribute: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:917 msgid "VOMS: Can not allocate memory for parsing AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:925 msgid "VOMS: Can not allocate memory for storing the order of AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:951 msgid "VOMS: Can not parse AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:981 msgid "" "VOMS: CA directory or CA file must be provided or default setting enabled" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1052 msgid "VOMS: failed to verify AC signature" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1108 #, c-format msgid "VOMS: trust chain to check: %s " msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1116 #, c-format msgid "" "VOMS: the DN in certificate: %s does not match that in trusted DN list: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1122 #, c-format msgid "" "VOMS: the Issuer identity in certificate: %s does not match that in trusted " "DN list: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1157 #, fuzzy, c-format msgid "VOMS: The lsc file %s does not exist" msgstr "Az XML konfigurációs fájl: %s nem létezik" #: src/hed/libs/credential/VOMSUtil.cpp:1163 #, c-format msgid "VOMS: The lsc file %s can not be open" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1215 msgid "" "VOMS: there is no constraints of trusted voms DNs, the certificates stack in " "AC will not be checked." msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1248 msgid "VOMS: unable to match certificate chain against VOMS trusted DNs" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1268 msgid "VOMS: AC signature verification failed" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1277 msgid "VOMS: unable to verify certificate chain" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1283 #, c-format msgid "VOMS: cannot validate AC issuer for VO %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1306 #, c-format msgid "VOMS: directory for trusted service certificates: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1332 #, c-format msgid "VOMS: Cannot find certificate of AC issuer for VO %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1358 #: src/hed/libs/credential/VOMSUtil.cpp:1427 msgid "VOMS: Can not find AC_ATTR with IETFATTR type" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1365 #: src/hed/libs/credential/VOMSUtil.cpp:1434 msgid "VOMS: case of multiple IETFATTR attributes not supported" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1375 #: src/hed/libs/credential/VOMSUtil.cpp:1450 msgid "VOMS: case of multiple policyAuthority not supported" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1391 #: src/hed/libs/credential/VOMSUtil.cpp:1467 msgid "VOMS: the format of policyAuthority is unsupported - expecting URI" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1400 #: src/hed/libs/credential/VOMSUtil.cpp:1478 msgid "" "VOMS: the format of IETFATTRVAL is not supported - expecting OCTET STRING" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1443 msgid "VOMS: failed to access IETFATTR attribute" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1538 msgid "VOMS: the grantor attribute is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1556 msgid "VOMS: the attribute name is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1562 #, c-format msgid "VOMS: the attribute value for %s is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1567 msgid "VOMS: the attribute qualifier is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1602 #: src/hed/libs/credential/VOMSUtil.cpp:1721 msgid "" "VOMS: both idcenoRevAvail and authorityKeyIdentifier certificate extensions " "must be present" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1636 #: src/hed/libs/credential/VOMSUtil.cpp:1757 #, c-format msgid "VOMS: FQDN of this host %s does not match any target in AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1641 #: src/hed/libs/credential/VOMSUtil.cpp:1762 msgid "VOMS: the only supported critical extension of the AC is idceTargets" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1656 #: src/hed/libs/credential/VOMSUtil.cpp:1777 msgid "VOMS: failed to parse attributes from AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1700 #: src/hed/libs/credential/VOMSUtil.cpp:1829 msgid "VOMS: authorityKey is wrong" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1861 #: src/hed/libs/credential/VOMSUtil.cpp:2029 #: src/hed/libs/credential/VOMSUtil.cpp:2037 msgid "VOMS: missing AC parts" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1878 #: src/hed/libs/credential/VOMSUtil.cpp:2054 msgid "VOMS: unsupported time format in AC - expecting GENERALIZED TIME" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1884 #: src/hed/libs/credential/VOMSUtil.cpp:2060 msgid "VOMS: AC is not yet valid" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1891 #: src/hed/libs/credential/VOMSUtil.cpp:2067 msgid "VOMS: AC has expired" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1906 #: src/hed/libs/credential/VOMSUtil.cpp:2080 msgid "VOMS: AC is not complete - missing Serial or Issuer information" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1911 #: src/hed/libs/credential/VOMSUtil.cpp:2085 #, c-format msgid "VOMS: the holder serial number is: %lx" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1912 #: src/hed/libs/credential/VOMSUtil.cpp:2086 #, c-format msgid "VOMS: the serial number in AC is: %lx" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1915 #: src/hed/libs/credential/VOMSUtil.cpp:2089 #, c-format msgid "" "VOMS: the holder serial number %lx is not the same as the serial number in " "AC %lx, the holder certificate that is used to create a voms proxy could be " "a proxy certificate with a different serial number as the original EEC cert" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1924 #: src/hed/libs/credential/VOMSUtil.cpp:2098 msgid "VOMS: the holder information in AC is wrong" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1946 #: src/hed/libs/credential/VOMSUtil.cpp:2120 #, c-format msgid "VOMS: DN of holder in AC: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1947 #: src/hed/libs/credential/VOMSUtil.cpp:2121 #, c-format msgid "VOMS: DN of holder: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1948 #: src/hed/libs/credential/VOMSUtil.cpp:2122 #, c-format msgid "VOMS: DN of issuer: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1955 #: src/hed/libs/credential/VOMSUtil.cpp:2129 msgid "" "VOMS: the holder name in AC is not related to the distinguished name in " "holder certificate" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1967 #: src/hed/libs/credential/VOMSUtil.cpp:1974 #: src/hed/libs/credential/VOMSUtil.cpp:2141 #: src/hed/libs/credential/VOMSUtil.cpp:2148 msgid "VOMS: the holder issuerUID is not the same as that in AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1987 #: src/hed/libs/credential/VOMSUtil.cpp:2160 msgid "VOMS: the holder issuer name is not the same as that in AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1997 #: src/hed/libs/credential/VOMSUtil.cpp:2169 msgid "VOMS: the issuer information in AC is wrong" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:2005 #: src/hed/libs/credential/VOMSUtil.cpp:2177 #, c-format msgid "VOMS: the issuer name %s is not the same as that in AC - %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:2013 #: src/hed/libs/credential/VOMSUtil.cpp:2185 msgid "" "VOMS: the serial number of AC INFO is too long - expecting no more than 20 " "octets" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:2221 #: src/hed/libs/credential/VOMSUtil.cpp:2233 #: src/hed/libs/credential/VOMSUtil.cpp:2247 #: src/hed/libs/credential/VOMSUtil.cpp:2259 #: src/hed/libs/credential/VOMSUtil.cpp:2282 msgid "VOMS: unable to extract VO name from AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:2273 #, c-format msgid "VOMS: unable to determine hostname of AC from VO name: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:2292 msgid "VOMS: can not verify the signature of the AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:2298 msgid "VOMS: problems while parsing information in AC" msgstr "" #: src/hed/libs/credential/test/VOMSUtilTest.cpp:126 #, c-format msgid "Line %d.%d of the attributes returned: %s" msgstr "" #: src/hed/libs/credentialstore/ClientVOMS.cpp:149 msgid "voms" msgstr "" #: src/hed/libs/credentialstore/CredentialStore.cpp:194 #: src/hed/libs/credentialstore/CredentialStore.cpp:245 #: src/hed/libs/credentialstore/CredentialStore.cpp:273 #: src/hed/libs/credentialstore/CredentialStore.cpp:336 #: src/hed/libs/credentialstore/CredentialStore.cpp:376 #: src/hed/libs/credentialstore/CredentialStore.cpp:406 #, c-format msgid "MyProxy failure: %s" msgstr "" #: src/hed/libs/crypto/OpenSSL.cpp:64 #, c-format msgid "SSL error: %d - %s:%s:%s" msgstr "" #: src/hed/libs/crypto/OpenSSL.cpp:78 msgid "Failed to lock arccrypto library in memory" msgstr "" #: src/hed/libs/crypto/OpenSSL.cpp:81 msgid "Failed to initialize OpenSSL library" msgstr "" #: src/hed/libs/data/DataExternalHelper.cpp:157 #, fuzzy msgid "failed to read data tag" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/data/DataExternalHelper.cpp:161 msgid "waiting for data chunk" msgstr "" #: src/hed/libs/data/DataExternalHelper.cpp:163 #, fuzzy msgid "failed to read data chunk" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/data/DataExternalHelper.cpp:171 #, c-format msgid "data chunk: %llu %llu" msgstr "" #: src/hed/libs/data/DataExternalHelper.cpp:242 #, c-format msgid "DataMove::Transfer: using supplied checksum %s" msgstr "" #: src/hed/libs/data/DataExternalHelper.cpp:361 msgid "Expecting Module, Command and URL provided" msgstr "" #: src/hed/libs/data/DataExternalHelper.cpp:368 msgid "Expecting Command module path among arguments" msgstr "" #: src/hed/libs/data/DataExternalHelper.cpp:372 msgid "Expecting Command module name among arguments" msgstr "" #: src/hed/libs/data/DataMover.cpp:126 msgid "No locations found - probably no more physical instances" msgstr "" #: src/hed/libs/data/DataMover.cpp:132 src/hed/libs/data/FileCache.cpp:550 #: src/libs/data-staging/Processor.cpp:394 #: src/libs/data-staging/Processor.cpp:408 #, c-format msgid "Removing %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:145 msgid "This instance was already deleted" msgstr "" #: src/hed/libs/data/DataMover.cpp:151 msgid "Failed to delete physical file" msgstr "" #: src/hed/libs/data/DataMover.cpp:162 #, c-format msgid "Removing metadata in %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:166 msgid "Failed to delete meta-information" msgstr "" #: src/hed/libs/data/DataMover.cpp:180 msgid "Failed to remove all physical instances" msgstr "" #: src/hed/libs/data/DataMover.cpp:184 #, c-format msgid "Removing logical file from metadata %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:187 msgid "Failed to delete logical file" msgstr "" #: src/hed/libs/data/DataMover.cpp:194 msgid "Failed to remove instance" msgstr "" #: src/hed/libs/data/DataMover.cpp:243 msgid "DataMover::Transfer : starting new thread" msgstr "" #: src/hed/libs/data/DataMover.cpp:271 #, c-format msgid "Transfer from %s to %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:273 msgid "Not valid source" msgstr "" #: src/hed/libs/data/DataMover.cpp:278 msgid "Not valid destination" msgstr "" #: src/hed/libs/data/DataMover.cpp:300 src/services/candypond/CandyPond.cpp:304 #, c-format msgid "Couldn't handle certificate: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:309 src/hed/libs/data/DataMover.cpp:614 #: src/libs/data-staging/Processor.cpp:123 #, c-format msgid "File %s is cached (%s) - checking permissions" msgstr "" #: src/hed/libs/data/DataMover.cpp:313 src/hed/libs/data/DataMover.cpp:633 #: src/hed/libs/data/DataMover.cpp:691 src/libs/data-staging/Processor.cpp:142 msgid "Permission checking passed" msgstr "" #: src/hed/libs/data/DataMover.cpp:314 src/hed/libs/data/DataMover.cpp:652 #: src/hed/libs/data/DataMover.cpp:1180 msgid "Linking/copying cached file" msgstr "" #: src/hed/libs/data/DataMover.cpp:338 #, c-format msgid "No locations for source found: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:342 #, c-format msgid "Failed to resolve source: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:356 src/hed/libs/data/DataMover.cpp:431 #, c-format msgid "No locations for destination found: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:361 src/hed/libs/data/DataMover.cpp:435 #, c-format msgid "Failed to resolve destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:378 #, c-format msgid "No locations for destination different from source found: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:400 #, c-format msgid "DataMover::Transfer: trying to destroy/overwrite destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:412 #, c-format msgid "Failed to delete %s but will still try to copy" msgstr "" #: src/hed/libs/data/DataMover.cpp:416 #, c-format msgid "Failed to delete %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:447 #, c-format msgid "Deleted but still have locations at %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:459 msgid "DataMover: cycle" msgstr "" #: src/hed/libs/data/DataMover.cpp:461 msgid "DataMover: no retries requested - exit" msgstr "" #: src/hed/libs/data/DataMover.cpp:466 msgid "DataMover: source out of tries - exit" msgstr "" #: src/hed/libs/data/DataMover.cpp:468 msgid "DataMover: destination out of tries - exit" msgstr "" #: src/hed/libs/data/DataMover.cpp:476 #, c-format msgid "Real transfer from %s to %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:502 #, c-format msgid "Creating buffer: %lli x %i" msgstr "" #: src/hed/libs/data/DataMover.cpp:518 #, c-format msgid "DataMove::Transfer: no checksum calculation for %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:523 #, c-format msgid "DataMove::Transfer: using supplied checksum %s:%s" msgstr "" #: src/hed/libs/data/DataMover.cpp:547 #, c-format msgid "DataMove::Transfer: will calculate %s checksum" msgstr "" #: src/hed/libs/data/DataMover.cpp:552 msgid "Buffer creation failed !" msgstr "" #: src/hed/libs/data/DataMover.cpp:575 #, c-format msgid "URL is mapped to: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:603 src/hed/libs/data/DataMover.cpp:661 #: src/libs/data-staging/Processor.cpp:78 msgid "Cached file is locked - should retry" msgstr "" #: src/hed/libs/data/DataMover.cpp:608 src/libs/data-staging/Processor.cpp:96 msgid "Failed to initiate cache" msgstr "" #: src/hed/libs/data/DataMover.cpp:625 src/services/candypond/CandyPond.cpp:379 #, c-format msgid "Permission checking failed: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:627 src/hed/libs/data/DataMover.cpp:685 #: src/hed/libs/data/DataMover.cpp:705 src/hed/libs/data/DataMover.cpp:716 msgid "source.next_location" msgstr "" #: src/hed/libs/data/DataMover.cpp:641 src/libs/data-staging/Processor.cpp:147 #, c-format msgid "Source modification date: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:642 src/libs/data-staging/Processor.cpp:148 #, c-format msgid "Cache creation date: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:648 src/libs/data-staging/Processor.cpp:153 msgid "Cached file is outdated, will re-download" msgstr "" #: src/hed/libs/data/DataMover.cpp:651 src/libs/data-staging/Processor.cpp:158 msgid "Cached copy is still valid" msgstr "" #: src/hed/libs/data/DataMover.cpp:678 msgid "URL is mapped to local access - checking permissions on original URL" msgstr "" #: src/hed/libs/data/DataMover.cpp:682 #, c-format msgid "Permission checking on original URL failed: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:693 msgid "Linking local file" msgstr "" #: src/hed/libs/data/DataMover.cpp:713 #, c-format msgid "Failed to make symbolic link %s to %s : %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:722 #, c-format msgid "Failed to change owner of symbolic link %s to %i" msgstr "" #: src/hed/libs/data/DataMover.cpp:733 #, c-format msgid "cache file: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:759 #, fuzzy, c-format msgid "Failed to stat source %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/data/DataMover.cpp:761 src/hed/libs/data/DataMover.cpp:776 #: src/hed/libs/data/DataMover.cpp:808 src/hed/libs/data/DataMover.cpp:828 #: src/hed/libs/data/DataMover.cpp:851 src/hed/libs/data/DataMover.cpp:869 #: src/hed/libs/data/DataMover.cpp:1028 src/hed/libs/data/DataMover.cpp:1061 #: src/hed/libs/data/DataMover.cpp:1072 src/hed/libs/data/DataMover.cpp:1146 msgid "(Re)Trying next source" msgstr "" #: src/hed/libs/data/DataMover.cpp:772 #, c-format msgid "Meta info of source and location do not match for %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:786 #, c-format msgid "" "Replica %s has high latency, but no more sources exist so will use this one" msgstr "" #: src/hed/libs/data/DataMover.cpp:790 #, c-format msgid "Replica %s has high latency, trying next source" msgstr "" #: src/hed/libs/data/DataMover.cpp:802 src/hed/libs/data/DataMover.cpp:823 #: src/libs/data-staging/DataStagingDelivery.cpp:376 #: src/libs/data-staging/DataStagingDelivery.cpp:399 #, c-format msgid "Using internal transfer method of %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:815 src/hed/libs/data/DataMover.cpp:833 #: src/libs/data-staging/DataStagingDelivery.cpp:392 #: src/libs/data-staging/DataStagingDelivery.cpp:413 #, c-format msgid "Internal transfer method is not supported for %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:840 msgid "Using buffered transfer method" msgstr "" #: src/hed/libs/data/DataMover.cpp:844 #, fuzzy, c-format msgid "Failed to prepare source: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/data/DataMover.cpp:859 #, c-format msgid "Failed to start reading from source: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:879 msgid "Metadata of source and destination are different" msgstr "" #: src/hed/libs/data/DataMover.cpp:899 #, c-format msgid "Failed to preregister destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:904 src/hed/libs/data/DataMover.cpp:1170 msgid "destination.next_location" msgstr "" #: src/hed/libs/data/DataMover.cpp:915 #, fuzzy, c-format msgid "Failed to prepare destination: %s" msgstr "Nem támogatott url: %s" #: src/hed/libs/data/DataMover.cpp:922 src/hed/libs/data/DataMover.cpp:945 #: src/hed/libs/data/DataMover.cpp:1167 #, c-format msgid "" "Failed to unregister preregistered lfn. You may need to unregister it " "manually: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:926 src/hed/libs/data/DataMover.cpp:948 #: src/hed/libs/data/DataMover.cpp:1037 src/hed/libs/data/DataMover.cpp:1053 #: src/hed/libs/data/DataMover.cpp:1078 src/hed/libs/data/DataMover.cpp:1123 msgid "(Re)Trying next destination" msgstr "" #: src/hed/libs/data/DataMover.cpp:937 #, c-format msgid "Failed to start writing to destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:960 msgid "Failed to start writing to cache" msgstr "" #: src/hed/libs/data/DataMover.cpp:968 src/hed/libs/data/DataMover.cpp:1014 #: src/hed/libs/data/DataMover.cpp:1192 msgid "" "Failed to unregister preregistered lfn. You may need to unregister it " "manually" msgstr "" #: src/hed/libs/data/DataMover.cpp:975 msgid "Waiting for buffer" msgstr "" #: src/hed/libs/data/DataMover.cpp:982 #, c-format msgid "Failed updating timestamp on cache lock file %s for file %s: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:987 #, c-format msgid "buffer: read EOF : %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:988 #, c-format msgid "buffer: write EOF: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:989 #, c-format msgid "buffer: error : %s, read: %s, write: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:990 msgid "Closing read channel" msgstr "" #: src/hed/libs/data/DataMover.cpp:997 msgid "Closing write channel" msgstr "" #: src/hed/libs/data/DataMover.cpp:1005 msgid "Failed to complete writing to destination" msgstr "" #: src/hed/libs/data/DataMover.cpp:1019 msgid "Transfer cancelled successfully" msgstr "" #: src/hed/libs/data/DataMover.cpp:1066 msgid "Cause of failure unclear - choosing randomly" msgstr "" #: src/hed/libs/data/DataMover.cpp:1110 #, c-format msgid "" "Checksum mismatch between checksum given as meta option (%s:%s) and " "calculated checksum (%s)" msgstr "" #: src/hed/libs/data/DataMover.cpp:1116 msgid "" "Failed to unregister preregistered lfn, You may need to unregister it " "manually" msgstr "" #: src/hed/libs/data/DataMover.cpp:1120 msgid "Failed to delete destination, retry may fail" msgstr "" #: src/hed/libs/data/DataMover.cpp:1130 msgid "Cannot compare empty checksum" msgstr "" #: src/hed/libs/data/DataMover.cpp:1137 #: src/libs/data-staging/DataStagingDelivery.cpp:570 msgid "Checksum type of source and calculated checksum differ, cannot compare" msgstr "" #: src/hed/libs/data/DataMover.cpp:1139 #, c-format msgid "Checksum mismatch between calcuated checksum %s and source checksum %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:1151 #: src/libs/data-staging/DataStagingDelivery.cpp:586 #, c-format msgid "Calculated transfer checksum %s matches source checksum" msgstr "" #: src/hed/libs/data/DataMover.cpp:1157 #: src/libs/data-staging/DataStagingDelivery.cpp:589 msgid "Checksum not computed" msgstr "" #: src/hed/libs/data/DataMover.cpp:1163 #, c-format msgid "Failed to postregister destination %s" msgstr "" #: src/hed/libs/data/DataPoint.cpp:90 #, c-format msgid "Invalid URL option: %s" msgstr "" #: src/hed/libs/data/DataPoint.cpp:251 msgid "Checksum types of index and replica are different, skipping comparison" msgstr "" #: src/hed/libs/data/DataPoint.cpp:278 #, c-format msgid "Skipping invalid URL option %s" msgstr "" #: src/hed/libs/data/DataPoint.cpp:293 msgid "" "Third party transfer was requested but the corresponding plugin could\n" " not be loaded. Is the GFAL plugin installed? If not, please install " "the\n" " packages 'nordugrid-arc-plugins-gfal' and 'gfal2-all'. Depending on\n" " your type of installation the package names might differ." msgstr "" #: src/hed/libs/data/DataPoint.cpp:311 #, fuzzy, c-format msgid "Failed to load plugin for URL %s" msgstr "Nem sikerült listázni a fájlokat" #: src/hed/libs/data/DataPointDelegate.cpp:75 #: src/hed/libs/data/DataPointDelegate.cpp:76 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:2032 #, c-format msgid "Starting helper process: %s" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:180 msgid "start_reading" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:189 msgid "start_reading: helper start failed" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:197 msgid "start_reading: thread create failed" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:213 msgid "StopReading: aborting connection" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:218 msgid "stop_reading: waiting for transfer to finish" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:221 #, c-format msgid "stop_reading: exiting: %s" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:231 msgid "read_thread: get and register buffers" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:239 #, c-format msgid "read_thread: for_read failed - aborting: %s" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:247 #, c-format msgid "read_thread: non-data tag '%c' from external process - leaving: %s" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:256 #, c-format msgid "read_thread: data read error from external process - aborting: %s" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:264 msgid "read_thread: exiting" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:285 msgid "start_writing_ftp: helper start failed" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:293 msgid "start_writing_ftp: thread create failed" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:343 #, fuzzy msgid "No checksum information possible" msgstr "Nem jött létre új információs dokumentum" #: src/hed/libs/data/DataPointDelegate.cpp:359 msgid "write_thread: get and pass buffers" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:366 msgid "write_thread: for_write failed - aborting" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:370 msgid "write_thread: for_write eof" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:384 msgid "write_thread: out failed - aborting" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:392 msgid "write_thread: exiting" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:91 #, c-format msgid "Can't handle location %s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:183 msgid "Sorting replicas according to URL map" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:187 #, c-format msgid "Replica %s is mapped" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:195 #, c-format msgid "Sorting replicas according to preferred pattern %s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:218 #: src/hed/libs/data/DataPointIndex.cpp:236 #, c-format msgid "Excluding replica %s matching pattern !%s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:229 #, c-format msgid "Replica %s matches host pattern %s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:247 #, c-format msgid "Replica %s matches pattern %s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:263 #, c-format msgid "Replica %s doesn't match preferred pattern or URL map" msgstr "" #: src/hed/libs/data/DataStatus.cpp:12 msgid "Operation completed successfully" msgstr "" #: src/hed/libs/data/DataStatus.cpp:13 #, fuzzy msgid "Source is invalid URL" msgstr "A lekérdezés nem XML helyes" #: src/hed/libs/data/DataStatus.cpp:14 #, fuzzy msgid "Destination is invalid URL" msgstr "Célállomás: %s" #: src/hed/libs/data/DataStatus.cpp:15 msgid "Resolving of index service for source failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:16 msgid "Resolving of index service for destination failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:17 msgid "Can't read from source" msgstr "" #: src/hed/libs/data/DataStatus.cpp:18 msgid "Can't write to destination" msgstr "" #: src/hed/libs/data/DataStatus.cpp:19 msgid "Failed while reading from source" msgstr "" #: src/hed/libs/data/DataStatus.cpp:20 msgid "Failed while writing to destination" msgstr "" #: src/hed/libs/data/DataStatus.cpp:21 msgid "Failed while transferring data" msgstr "" #: src/hed/libs/data/DataStatus.cpp:22 msgid "Failed while finishing reading from source" msgstr "" #: src/hed/libs/data/DataStatus.cpp:23 msgid "Failed while finishing writing to destination" msgstr "" #: src/hed/libs/data/DataStatus.cpp:24 msgid "First stage of registration to index service failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:25 msgid "Last stage of registration to index service failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:26 msgid "Unregistering from index service failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:27 msgid "Error in caching procedure" msgstr "" #: src/hed/libs/data/DataStatus.cpp:28 msgid "Error due to expiration of provided credentials" msgstr "" #: src/hed/libs/data/DataStatus.cpp:29 msgid "Delete error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:30 msgid "No valid location available" msgstr "" #: src/hed/libs/data/DataStatus.cpp:31 msgid "Location already exists" msgstr "" #: src/hed/libs/data/DataStatus.cpp:32 msgid "Operation not supported for this kind of URL" msgstr "" #: src/hed/libs/data/DataStatus.cpp:33 msgid "Feature is not implemented" msgstr "" #: src/hed/libs/data/DataStatus.cpp:34 msgid "Already reading from source" msgstr "" #: src/hed/libs/data/DataStatus.cpp:35 #, fuzzy msgid "Already writing to destination" msgstr "Nem sikerült feloldani a célállomást" #: src/hed/libs/data/DataStatus.cpp:36 msgid "Read access check failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:37 #, fuzzy msgid "Directory listing failed" msgstr "Nem sikerült listázni a fájlokat" #: src/hed/libs/data/DataStatus.cpp:38 msgid "Object is not suitable for listing" msgstr "" #: src/hed/libs/data/DataStatus.cpp:39 msgid "Failed to obtain information about file" msgstr "" #: src/hed/libs/data/DataStatus.cpp:40 msgid "No such file or directory" msgstr "" #: src/hed/libs/data/DataStatus.cpp:41 msgid "Object not initialized (internal error)" msgstr "" #: src/hed/libs/data/DataStatus.cpp:42 msgid "Operating System error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:43 msgid "Failed to stage file(s)" msgstr "" #: src/hed/libs/data/DataStatus.cpp:44 msgid "Inconsistent metadata" msgstr "" #: src/hed/libs/data/DataStatus.cpp:45 #, fuzzy msgid "Failed to prepare source" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/data/DataStatus.cpp:46 msgid "Should wait for source to be prepared" msgstr "" #: src/hed/libs/data/DataStatus.cpp:47 #, fuzzy msgid "Failed to prepare destination" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/data/DataStatus.cpp:48 msgid "Should wait for destination to be prepared" msgstr "" #: src/hed/libs/data/DataStatus.cpp:49 msgid "Failed to finalize reading from source" msgstr "" #: src/hed/libs/data/DataStatus.cpp:50 msgid "Failed to finalize writing to destination" msgstr "" #: src/hed/libs/data/DataStatus.cpp:51 #, fuzzy msgid "Failed to create directory" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/data/DataStatus.cpp:52 #, fuzzy msgid "Failed to rename URL" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/libs/data/DataStatus.cpp:53 msgid "Data was already cached" msgstr "" #: src/hed/libs/data/DataStatus.cpp:54 msgid "Operation cancelled successfully" msgstr "" #: src/hed/libs/data/DataStatus.cpp:55 #, fuzzy msgid "Generic error" msgstr "Felhasználó oldali hiba" #: src/hed/libs/data/DataStatus.cpp:56 src/hed/libs/data/DataStatus.cpp:69 msgid "Unknown error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:60 msgid "No error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:61 msgid "Transfer timed out" msgstr "" #: src/hed/libs/data/DataStatus.cpp:62 msgid "Checksum mismatch" msgstr "" #: src/hed/libs/data/DataStatus.cpp:63 msgid "Bad logic" msgstr "" #: src/hed/libs/data/DataStatus.cpp:64 msgid "All results obtained are invalid" msgstr "" #: src/hed/libs/data/DataStatus.cpp:65 #, fuzzy msgid "Temporary service error" msgstr "Felhasználó oldali hiba" #: src/hed/libs/data/DataStatus.cpp:66 #, fuzzy msgid "Permanent service error" msgstr "Felhasználó oldali hiba" #: src/hed/libs/data/DataStatus.cpp:67 msgid "Error switching uid" msgstr "" #: src/hed/libs/data/DataStatus.cpp:68 msgid "Request timed out" msgstr "" #: src/hed/libs/data/FileCache.cpp:109 msgid "No cache directory specified" msgstr "" #: src/hed/libs/data/FileCache.cpp:126 msgid "No usable caches" msgstr "" #: src/hed/libs/data/FileCache.cpp:135 msgid "No draining cache directory specified" msgstr "" #: src/hed/libs/data/FileCache.cpp:153 msgid "No read-only cache directory specified" msgstr "" #: src/hed/libs/data/FileCache.cpp:182 #, c-format msgid "Failed to create cache directory for file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:192 #, c-format msgid "Failed to create any cache directories for %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:199 #, c-format msgid "Failed to change permissions on %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:211 #, c-format msgid "Failed to delete stale cache file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:214 #, fuzzy, c-format msgid "Failed to release lock on file %s" msgstr "Nem sikerült listázni a fájlokat" #: src/hed/libs/data/FileCache.cpp:232 #, c-format msgid "Failed looking up attributes of cached file: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:238 #, c-format msgid "Failed to obtain lock on cache file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:247 src/hed/libs/data/FileCache.cpp:307 #, c-format msgid "Error removing cache file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:249 src/hed/libs/data/FileCache.cpp:260 #, c-format msgid "Failed to remove lock on %s. Some manual intervention may be required" msgstr "" #: src/hed/libs/data/FileCache.cpp:279 src/hed/libs/data/FileCache.cpp:313 #, c-format msgid "Failed to unlock file %s: %s. Manual intervention may be required" msgstr "" #: src/hed/libs/data/FileCache.cpp:296 #, c-format msgid "Invalid lock on file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:302 #, fuzzy, c-format msgid "Failed to remove .meta file %s: %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/hed/libs/data/FileCache.cpp:367 #, fuzzy, c-format msgid "Cache not found for file %s" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/hed/libs/data/FileCache.cpp:377 #, c-format msgid "" "Cache file %s was modified in the last second, sleeping 1 second to avoid " "race condition" msgstr "" #: src/hed/libs/data/FileCache.cpp:382 src/hed/libs/data/FileCache.cpp:687 #, c-format msgid "Cache file %s does not exist" msgstr "" #: src/hed/libs/data/FileCache.cpp:387 src/hed/libs/data/FileCache.cpp:689 #, c-format msgid "Error accessing cache file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:393 #, c-format msgid "Cannot create directory %s for per-job hard links" msgstr "" #: src/hed/libs/data/FileCache.cpp:398 #, c-format msgid "Cannot change permission of %s: %s " msgstr "" #: src/hed/libs/data/FileCache.cpp:402 #, c-format msgid "Cannot change owner of %s: %s " msgstr "" #: src/hed/libs/data/FileCache.cpp:416 #, c-format msgid "Failed to remove existing hard link at %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:420 src/hed/libs/data/FileCache.cpp:431 #, c-format msgid "Failed to create hard link from %s to %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:426 #, c-format msgid "Cache file %s not found" msgstr "" #: src/hed/libs/data/FileCache.cpp:441 #, c-format msgid "Failed to change permissions or set owner of hard link %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:449 #, c-format msgid "Failed to release lock on cache file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:460 #, c-format msgid "Cache file %s was locked during link/copy, must start again" msgstr "" #: src/hed/libs/data/FileCache.cpp:465 #, c-format msgid "Cache file %s was deleted during link/copy, must start again" msgstr "" #: src/hed/libs/data/FileCache.cpp:470 #, c-format msgid "Cache file %s was modified while linking, must start again" msgstr "" #: src/hed/libs/data/FileCache.cpp:488 #, c-format msgid "Failed to copy file %s to %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:494 #, c-format msgid "Failed to set executable bit on file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:499 #, c-format msgid "Failed to set executable bit on file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:513 #, c-format msgid "Failed to remove existing symbolic link at %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:517 src/hed/libs/data/FileCache.cpp:522 #, c-format msgid "Failed to create symbolic link from %s to %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:552 #, c-format msgid "Failed to remove cache per-job dir %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:571 src/hed/libs/data/FileCache.cpp:639 #, c-format msgid "Error reading meta file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:576 src/hed/libs/data/FileCache.cpp:644 #, c-format msgid "Error opening meta file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:581 src/hed/libs/data/FileCache.cpp:648 #, c-format msgid "meta file %s is empty" msgstr "" #: src/hed/libs/data/FileCache.cpp:591 #, c-format msgid "" "File %s is already cached at %s under a different URL: %s - will not add DN " "to cached list" msgstr "" #: src/hed/libs/data/FileCache.cpp:602 #, c-format msgid "Bad format detected in file %s, in line %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:618 #, c-format msgid "Could not acquire lock on meta file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:622 #, c-format msgid "Error opening meta file for writing %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:658 #, c-format msgid "DN %s is cached and is valid until %s for URL %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:662 #, c-format msgid "DN %s is cached but has expired for URL %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:713 #, c-format msgid "Failed to acquire lock on cache meta file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:718 #, fuzzy, c-format msgid "Failed to create cache meta file %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/hed/libs/data/FileCache.cpp:733 #, fuzzy, c-format msgid "Failed to read cache meta file %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/hed/libs/data/FileCache.cpp:738 #, c-format msgid "Cache meta file %s is empty, will recreate" msgstr "" #: src/hed/libs/data/FileCache.cpp:743 #, c-format msgid "Cache meta file %s possibly corrupted, will recreate" msgstr "" #: src/hed/libs/data/FileCache.cpp:747 #, c-format msgid "" "File %s is already cached at %s under a different URL: %s - this file will " "not be cached" msgstr "" #: src/hed/libs/data/FileCache.cpp:757 #, c-format msgid "Error looking up attributes of cache meta file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:828 #, c-format msgid "Using cache %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:842 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:79 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:112 #, c-format msgid "Error getting info from statvfs for the path %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:848 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:118 #, c-format msgid "Cache %s: Free space %f GB" msgstr "" #: src/hed/libs/data/URLMap.cpp:33 #, c-format msgid "Can't use URL %s" msgstr "" #: src/hed/libs/data/URLMap.cpp:39 #, c-format msgid "file %s is not accessible" msgstr "" #: src/hed/libs/data/URLMap.cpp:49 #, c-format msgid "Mapping %s to %s" msgstr "" #: src/hed/libs/data/examples/simple_copy.cpp:17 msgid "Usage: copy source destination" msgstr "" #: src/hed/libs/data/examples/simple_copy.cpp:42 #, c-format msgid "Copy failed: %s" msgstr "" #: src/hed/libs/globusutils/GSSCredential.cpp:41 #, c-format msgid "Failed to read proxy file: %s" msgstr "" #: src/hed/libs/globusutils/GSSCredential.cpp:49 #, c-format msgid "Failed to read certificate file: %s" msgstr "" #: src/hed/libs/globusutils/GSSCredential.cpp:56 #, c-format msgid "Failed to read private key file: %s" msgstr "" #: src/hed/libs/globusutils/GSSCredential.cpp:82 #, c-format msgid "" "Failed to convert GSI credential to GSS credential (major: %d, minor: %d):%s:" "%s" msgstr "" #: src/hed/libs/globusutils/GSSCredential.cpp:94 #, c-format msgid "Failed to release GSS credential (major: %d, minor: %d):%s:%s" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:30 msgid "Module Manager Init" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:73 msgid "" "Busy plugins found while unloading Module Manager. Waiting for them to be " "released." msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:207 #, c-format msgid "Found %s in cache" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:214 #, c-format msgid "Could not locate module %s in following paths:" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:218 #, c-format msgid "\t%s" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:232 #, c-format msgid "Loaded %s" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:276 msgid "Module Manager Init by ModuleManager::setCfg" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:312 #: src/hed/libs/loader/ModuleManager.cpp:325 #, c-format msgid "%s made persistent" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:316 #, c-format msgid "Not found %s in cache" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:330 msgid "Specified module not found in cache" msgstr "" #: src/hed/libs/loader/Plugin.cpp:364 src/hed/libs/loader/Plugin.cpp:557 #, c-format msgid "Could not find loadable module descriptor by name %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:372 src/hed/libs/loader/Plugin.cpp:567 #, c-format msgid "Could not find loadable module by name %s (%s)" msgstr "" #: src/hed/libs/loader/Plugin.cpp:378 src/hed/libs/loader/Plugin.cpp:480 #: src/hed/libs/loader/Plugin.cpp:572 #, c-format msgid "Module %s is not an ARC plugin (%s)" msgstr "" #: src/hed/libs/loader/Plugin.cpp:395 src/hed/libs/loader/Plugin.cpp:490 #: src/hed/libs/loader/Plugin.cpp:598 #, c-format msgid "Module %s failed to reload (%s)" msgstr "" #: src/hed/libs/loader/Plugin.cpp:417 #, c-format msgid "Module %s contains no plugin %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:462 #, c-format msgid "Could not find loadable module descriptor by name %s or kind %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:467 #, c-format msgid "Loadable module %s contains no requested plugin %s of kind %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:474 #, c-format msgid "Could not find loadable module by names %s and %s (%s)" msgstr "" #: src/hed/libs/loader/Plugin.cpp:503 #, c-format msgid "Module %s contains no requested plugin %s of kind %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:588 #, c-format msgid "Module %s does not contain plugin(s) of specified kind(s)" msgstr "" #: src/hed/libs/message/MCC.cpp:76 src/hed/libs/message/Service.cpp:25 #, c-format msgid "No security processing/check requested for '%s'" msgstr "" #: src/hed/libs/message/MCC.cpp:85 #, c-format msgid "Security processing/check failed: %s" msgstr "" #: src/hed/libs/message/MCC.cpp:90 msgid "Security processing/check passed" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:17 msgid "Chain(s) configuration failed" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:134 msgid "SecHandler configuration is not defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:157 msgid "SecHandler has no configuration" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:163 msgid "SecHandler has no name attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:173 #, c-format msgid "Security Handler %s(%s) could not be created" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:177 #, c-format msgid "SecHandler: %s(%s)" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:189 msgid "Component has no name attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:194 msgid "Component has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:203 #, c-format msgid "Component %s(%s) could not be created" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:229 #, c-format msgid "Component's %s(%s) next has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:290 #, c-format msgid "Loaded MCC %s(%s)" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:308 #, c-format msgid "Plexer's (%s) next has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:318 #, c-format msgid "Loaded Plexer %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:326 msgid "Service has no Name attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:332 msgid "Service has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:341 #, c-format msgid "Service %s(%s) could not be created" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:348 #, c-format msgid "Loaded Service %s(%s)" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:390 #, c-format msgid "Linking MCC %s(%s) to MCC (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:401 #, c-format msgid "Linking MCC %s(%s) to Service (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:410 #, c-format msgid "Linking MCC %s(%s) to Plexer (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:415 #, c-format msgid "MCC %s(%s) - next %s(%s) has no target" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:434 #, c-format msgid "Linking Plexer %s to MCC (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:445 #, c-format msgid "Linking Plexer %s to Service (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:454 #, c-format msgid "Linking Plexer %s to Plexer (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:460 #, c-format msgid "Plexer (%s) - next %s(%s) has no target" msgstr "" #: src/hed/libs/message/Plexer.cpp:31 #, c-format msgid "Bad label: \"%s\"" msgstr "" #: src/hed/libs/message/Plexer.cpp:47 #, c-format msgid "Operation on path \"%s\"" msgstr "" #: src/hed/libs/message/Plexer.cpp:60 #, c-format msgid "No next MCC or Service at path \"%s\"" msgstr "" #: src/hed/libs/message/Service.cpp:35 #, c-format msgid "Security processing/check for '%s' failed: %s" msgstr "" #: src/hed/libs/message/Service.cpp:41 #, c-format msgid "Security processing/check for '%s' passed" msgstr "" #: src/hed/libs/otokens/jwse.cpp:55 #, c-format msgid "JWSE::Input: token: %s" msgstr "" #: src/hed/libs/otokens/jwse.cpp:75 #, c-format msgid "JWSE::Input: header: %s" msgstr "" #: src/hed/libs/otokens/jwse.cpp:101 #, c-format msgid "JWSE::Input: JWS content: %s" msgstr "" #: src/hed/libs/otokens/jwse.cpp:111 msgid "JWSE::Input: JWS: token too young" msgstr "" #: src/hed/libs/otokens/jwse.cpp:120 msgid "JWSE::Input: JWS: token too old" msgstr "" #: src/hed/libs/otokens/jwse.cpp:131 #, c-format msgid "JWSE::Input: JWS: signature algorithm: %s" msgstr "" #: src/hed/libs/otokens/jwse.cpp:174 #, c-format msgid "JWSE::Input: JWS: signature algorithn not supported: %s" msgstr "" #: src/hed/libs/otokens/jwse.cpp:192 msgid "JWSE::Input: JWS: signature verification failed" msgstr "" #: src/hed/libs/otokens/jwse.cpp:198 msgid "JWSE::Input: JWE: not supported yet" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:21 msgid "JWSE::VerifyECDSA: missing key" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:25 msgid "JWSE::VerifyECDSA: wrong signature size" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:34 msgid "JWSE::VerifyECDSA: failed to create ECDSA signature" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:41 msgid "JWSE::VerifyECDSA: failed to parse signature" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:47 #, c-format msgid "JWSE::VerifyECDSA: failed to assign ECDSA signature: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:56 msgid "JWSE::VerifyECDSA: failed to create EVP context" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:61 #, c-format msgid "JWSE::VerifyECDSA: failed to recognize digest: %s" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:66 #, c-format msgid "JWSE::VerifyECDSA: failed to initialize hash: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:72 #, c-format msgid "JWSE::VerifyECDSA: failed to add message to hash: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:80 #, c-format msgid "JWSE::VerifyECDSA: failed to finalize hash: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:87 #, c-format msgid "JWSE::VerifyECDSA: failed to verify: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:96 msgid "JWSE::SignECDSA: missing key" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:104 msgid "JWSE::SignECDSA: failed to create EVP context" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:109 #, c-format msgid "JWSE::SignECDSA: failed to recognize digest: %s" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:114 #, c-format msgid "JWSE::SignECDSA: failed to initialize hash: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:120 #, c-format msgid "JWSE::SignECDSA: failed to add message to hash: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:128 #, c-format msgid "JWSE::SignECDSA: failed to finalize hash: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:135 msgid "JWSE::SignECDSA: failed to create ECDSA signature" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:143 msgid "JWSE::SignECDSA: failed to parse signature" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:150 #, c-format msgid "JWSE::SignECDSA: wrong signature size: %i + %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:156 msgid "JWSE::SignECDSA: wrong signature size written" msgstr "" #: src/hed/libs/otokens/jwse_keys.cpp:273 msgid "JWSE::ExtractPublicKey: x5c key" msgstr "" #: src/hed/libs/otokens/jwse_keys.cpp:281 msgid "JWSE::ExtractPublicKey: jwk key" msgstr "" #: src/hed/libs/otokens/jwse_keys.cpp:288 msgid "JWSE::ExtractPublicKey: external jwk key" msgstr "" #: src/hed/libs/otokens/jwse_keys.cpp:315 #, c-format msgid "JWSE::ExtractPublicKey: deleting outdated info: %s" msgstr "" #: src/hed/libs/otokens/jwse_keys.cpp:344 #, c-format msgid "JWSE::ExtractPublicKey: fetching jws key from %s" msgstr "" #: src/hed/libs/otokens/jwse_keys.cpp:372 msgid "JWSE::ExtractPublicKey: no supported key" msgstr "" #: src/hed/libs/otokens/jwse_keys.cpp:375 msgid "JWSE::ExtractPublicKey: key parsing error" msgstr "" #: src/hed/libs/otokens/openid_metadata.cpp:40 #: src/hed/libs/otokens/openid_metadata.cpp:45 #, c-format msgid "Input: metadata: %s" msgstr "" #: src/hed/libs/otokens/openid_metadata.cpp:438 #, c-format msgid "Fetch: response code: %u %s" msgstr "" #: src/hed/libs/otokens/openid_metadata.cpp:440 #, fuzzy, c-format msgid "Fetch: response body: %s" msgstr "Válasz: %s" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:141 #, c-format msgid "Can not load ARC evaluator object: %s" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:192 #, c-format msgid "Can not load ARC request object: %s" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:233 #, c-format msgid "Can not load policy object: %s" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:281 msgid "Can not load policy object" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:329 msgid "Can not load request object" msgstr "" #: src/hed/libs/security/ArcPDP/PolicyParser.cpp:119 msgid "Can not generate policy object" msgstr "" #: src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp:37 #, c-format msgid "Id= %s,Type= %s,Issuer= %s,Value= %s" msgstr "" #: src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp:40 #, c-format msgid "No Attribute exists, which can deal with type: %s" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:189 #, c-format msgid "HTTP Error: %d %s" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:270 msgid "Cannot create http payload" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:353 msgid "No next element in the chain" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:362 #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:253 msgid "next element of the chain returned error status" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:371 msgid "next element of the chain returned no payload" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:383 msgid "next element of the chain returned invalid/unsupported payload" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:465 msgid "Error to flush output payload" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:306 #, c-format msgid "<< %s" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:355 src/hed/mcc/http/PayloadHTTP.cpp:457 #, c-format msgid "< %s" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:576 #, fuzzy msgid "Failed to parse HTTP header" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/mcc/http/PayloadHTTP.cpp:837 msgid "Invalid HTTP object can't produce result" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:969 #, c-format msgid "> %s" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:994 msgid "Failed to write header to output stream" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:1019 src/hed/mcc/http/PayloadHTTP.cpp:1025 #: src/hed/mcc/http/PayloadHTTP.cpp:1031 src/hed/mcc/http/PayloadHTTP.cpp:1041 #: src/hed/mcc/http/PayloadHTTP.cpp:1053 src/hed/mcc/http/PayloadHTTP.cpp:1058 #: src/hed/mcc/http/PayloadHTTP.cpp:1063 src/hed/mcc/http/PayloadHTTP.cpp:1071 #: src/hed/mcc/http/PayloadHTTP.cpp:1078 msgid "Failed to write body to output stream" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:31 msgid "Skipping service: no ServicePath found!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:37 msgid "Skipping service: no SchemaPath found!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:89 msgid "Parser Context creation failed!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:98 msgid "Cannot parse schema!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:110 msgid "Empty payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:119 msgid "Could not convert payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:125 #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:212 msgid "Could not create PayloadSOAP!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:196 msgid "Empty input payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:205 msgid "Could not convert incoming payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:232 msgid "Missing schema! Skipping validation..." msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:237 msgid "Could not validate message!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:245 #: src/hed/mcc/soap/MCCSOAP.cpp:238 src/hed/mcc/soap/MCCSOAP.cpp:252 #: src/hed/mcc/soap/MCCSOAP.cpp:282 msgid "empty next chain element" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:257 #: src/hed/mcc/soap/MCCSOAP.cpp:298 msgid "next element of the chain returned empty payload" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:265 msgid "next element of the chain returned invalid payload" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:223 msgid "empty input payload" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:233 #, c-format msgid "MIME is not suitable for SOAP: %s" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:247 msgid "incoming message is not SOAP" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:274 #, c-format msgid "Security check failed in SOAP MCC for incoming message: %s" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:290 #, c-format msgid "next element of the chain returned error status: %s" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:309 msgid "next element of the chain returned unknown payload - passing through" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:314 src/hed/mcc/soap/MCCSOAP.cpp:330 #, c-format msgid "Security check failed in SOAP MCC for outgoing message: %s" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:384 msgid "Security check failed in SOAP MCC for outgoing message" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:437 msgid "Security check failed in SOAP MCC for incoming message" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:82 msgid "Missing Port in Listen element" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:91 msgid "Version in Listen element can't be recognized" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:100 #, c-format msgid "Failed to obtain local address for port %s - %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:102 #, c-format msgid "Failed to obtain local address for %s:%s - %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:109 #, c-format msgid "Trying to listen on TCP port %s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:111 #, c-format msgid "Trying to listen on %s:%s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:117 #, fuzzy, c-format msgid "Failed to create socket for listening at TCP port %s(%s): %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/hed/mcc/tcp/MCCTCP.cpp:119 #, fuzzy, c-format msgid "Failed to create socket for listening at %s:%s(%s): %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/hed/mcc/tcp/MCCTCP.cpp:134 #, c-format msgid "" "Failed to limit socket to IPv6 at TCP port %s - may cause errors for IPv4 at " "same port" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:136 #, c-format msgid "" "Failed to limit socket to IPv6 at %s:%s - may cause errors for IPv4 at same " "port" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:144 #, c-format msgid "Failed to bind socket for TCP port %s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:146 #, c-format msgid "Failed to bind socket for %s:%s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:161 #, c-format msgid "Failed to listen at TCP port %s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:163 #, c-format msgid "Failed to listen at %s:%s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:180 #, c-format msgid "Listening on TCP port %s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:182 #, c-format msgid "Listening on %s:%s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:189 #, c-format msgid "Failed to start listening on any address for %s:%s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:191 #, c-format msgid "Failed to start listening on any address for %s:%s(IPv%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:197 msgid "No listening ports initiated" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:208 msgid "dropped" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:208 msgid "put on hold" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:208 #, c-format msgid "Setting connections limit to %i, connections over limit will be %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:212 msgid "Failed to start thread for listening" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:245 msgid "Failed to start thread for communication" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:271 msgid "Failed while waiting for connection request" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:293 msgid "Failed to accept connection request" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:302 msgid "Too many connections - dropping new one" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:309 msgid "Too many connections - waiting for old to close" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:548 msgid "next chain element called" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:563 msgid "Only Raw Buffer payload is supported for output" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:571 src/hed/mcc/tcp/MCCTCP.cpp:670 #: src/hed/mcc/tls/MCCTLS.cpp:561 msgid "Failed to send content of buffer" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:583 msgid "TCP executor is removed" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:585 #, c-format msgid "Sockets do not match on exit %i != %i" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:606 msgid "No Connect element specified" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:612 msgid "Missing Port in Connect element" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:618 msgid "Missing Host in Connect element" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:646 msgid "TCP client process called" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:65 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:81 #, c-format msgid "Failed to resolve %s (%s)" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:91 #, c-format msgid "Trying to connect %s(%s):%d" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:95 #, c-format msgid "Failed to create socket for connecting to %s(%s):%d - %s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:107 #, c-format msgid "" "Failed to get TCP socket options for connection to %s(%s):%d - timeout won't " "work - %s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:114 #, c-format msgid "Failed to connect to %s(%s):%i - %s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:124 #, c-format msgid "Timeout connecting to %s(%s):%i - %i s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:132 #, c-format msgid "Failed while waiting for connection to %s(%s):%i - %s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:142 #, c-format msgid "Failed to connect to %s(%s):%i" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:198 msgid "" "Received message out-of-band (not critical, ERROR level is just for " "debugging purposes)" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:201 msgid "Using CA default location" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:210 #, fuzzy, c-format msgid "Using CA file: %s" msgstr "Fájl feltöltve %s" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:212 #, c-format msgid "Using CA dir: %s" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:289 #, fuzzy, c-format msgid "Using DH parameters from file: %s" msgstr "voms szerver fájljának az elérési útvonala" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:292 msgid "Failed to open file with DH parameters for reading" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:297 #, fuzzy msgid "Failed to read file with DH parameters" msgstr "Nem sikerült listázni a meta adatokat" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:300 #, fuzzy msgid "Failed to apply DH parameters" msgstr "Túl sok paraméter" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:302 msgid "DH parameters applied" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:316 #, c-format msgid "Using curve with NID: %u" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:319 #, fuzzy msgid "Failed to generate EC key" msgstr "privát kulcs elérési útvonala" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:322 #, fuzzy msgid "Failed to apply ECDH parameters" msgstr "Túl sok paraméter" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:324 msgid "ECDH parameters applied" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:330 #, c-format msgid "Using cipher list: %s" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:354 #, c-format msgid "Using protocol options: 0x%x" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:39 msgid "Independent proxy - no rights granted" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:43 msgid "Proxy with all rights inherited" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:51 msgid "Proxy with empty policy - fail on unrecognized policy" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:56 #, c-format msgid "Proxy with specific policy: %s" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:60 msgid "Proxy with ARC Policy" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:62 msgid "Proxy with unknown policy - fail on unrecognized policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:116 #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:167 #, c-format msgid "Was expecting %s at the beginning of \"%s\"" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:121 #, c-format msgid "We only support CAs in Globus signing policy - %s is not supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:126 #, c-format msgid "We only support X509 CAs in Globus signing policy - %s is not supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:131 msgid "Missing CA subject in Globus signing policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:141 msgid "Negative rights are not supported in Globus signing policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:145 #, c-format msgid "Unknown rights in Globus signing policy - %s" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:150 #, c-format msgid "" "Only globus rights are supported in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:155 #, c-format msgid "" "Only signing rights are supported in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:172 #, c-format msgid "" "We only support subjects conditions in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:177 #, c-format msgid "" "We only support globus conditions in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:183 msgid "Missing condition subjects in Globus signing policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:265 msgid "Unknown element in Globus signing policy" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:237 #, fuzzy msgid "Critical VOMS attribute processing failed" msgstr "VOMS attribútumok értelmezése sikertelen" #: src/hed/mcc/tls/MCCTLS.cpp:245 #, fuzzy msgid "VOMS attribute validation failed" msgstr "VOMS attribútumok értelmezése sikertelen" #: src/hed/mcc/tls/MCCTLS.cpp:247 msgid "VOMS attribute is ignored due to processing/validation error" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:439 src/hed/mcc/tls/MCCTLS.cpp:578 #: src/hed/mcc/tls/MCCTLS.cpp:597 #, fuzzy, c-format msgid "Failed to establish connection: %s" msgstr "Nem sikerült betölteni a konfigurációt" #: src/hed/mcc/tls/MCCTLS.cpp:458 src/hed/mcc/tls/MCCTLS.cpp:540 #, c-format msgid "Peer name: %s" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:460 src/hed/mcc/tls/MCCTLS.cpp:542 #, c-format msgid "Identity name: %s" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:462 src/hed/mcc/tls/MCCTLS.cpp:544 #, c-format msgid "CA name: %s" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:469 msgid "Failed to process security attributes in TLS MCC for incoming message" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:477 msgid "Security check failed in TLS MCC for incoming message" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:550 msgid "Security check failed for outgoing TLS message" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:582 msgid "Security check failed for incoming TLS message" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:64 #, c-format msgid "Ignoring verification error due to insecure connection allowed: %s" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:79 #, fuzzy msgid "" "Failed to allocate memory for certificate subject while matching policy." msgstr "publikus kulcs elérési útvonala" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:83 msgid "" "Failed to retrieve link to TLS stream. Additional policy matching is skipped." msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:85 msgid "" "Skipping additional policy matching due to insecure connections allowed." msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:109 #, c-format msgid "Certificate %s already expired" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:117 #, c-format msgid "Certificate %s will expire in %s" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:138 msgid "Failed to store application data" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:166 msgid "Failed to retrieve application data from OpenSSL" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:238 src/hed/mcc/tls/PayloadTLSMCC.cpp:338 msgid "Can not create the SSL Context object" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:251 src/hed/mcc/tls/PayloadTLSMCC.cpp:358 msgid "Can't set OpenSSL verify flags" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:270 src/hed/mcc/tls/PayloadTLSMCC.cpp:372 msgid "Can not create the SSL object" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:280 #, fuzzy msgid "Faile to assign hostname extension" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:294 msgid "Failed to establish SSL connection" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:298 src/hed/mcc/tls/PayloadTLSMCC.cpp:388 #, c-format msgid "Using cipher: %s" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:384 msgid "Failed to accept SSL connection" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:446 #, c-format msgid "Failed to shut down SSL: %s" msgstr "" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:47 msgid "" "ArcAuthZ: failed to initiate all PDPs - this instance will be non-functional" msgstr "" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:70 msgid "PDP: missing name attribute" msgstr "" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:74 #, c-format msgid "PDP: %s (%s)" msgstr "" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:79 #, c-format msgid "PDP: %s (%s) can not be loaded" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluationCtx.cpp:251 #, fuzzy, c-format msgid "There are %d RequestItems" msgstr "%d darab publikus tanúsítvány van a válasz üzenetben" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:60 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:55 msgid "Can not parse classname for FunctionFactory from configuration" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:68 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:63 msgid "Can not parse classname for AttributeFactory from configuration" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:76 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:71 msgid "" "Can not parse classname for CombiningAlgorithmFactory from configuration" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:84 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:79 msgid "Can not parse classname for Request from configuration" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:93 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:88 msgid "Can not parse classname for Policy from configuration" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:105 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:100 msgid "Can not dynamically produce AttributeFactory" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:110 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:105 msgid "Can not dynamically produce FnFactory" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:115 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:110 msgid "Can not dynamically produce AlgFacroty" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:126 #: src/hed/shc/gaclpdp/GACLEvaluator.cpp:31 #: src/hed/shc/gaclpdp/GACLEvaluator.cpp:37 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:121 msgid "Can not create PolicyStore object" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:177 src/hed/shc/test.cpp:183 #: src/hed/shc/testinterface_arc.cpp:102 src/hed/shc/testinterface_xacml.cpp:54 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:172 msgid "Can not dynamically produce Request" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:261 #, c-format msgid "Result value (0=Permit, 1=Deny, 2=Indeterminate, 3=Not_Applicable): %d" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:109 msgid "Can not find ArcPDPContext" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:138 src/hed/shc/xacmlpdp/XACMLPDP.cpp:116 msgid "Evaluator does not support loadable Combining Algorithms" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:142 src/hed/shc/xacmlpdp/XACMLPDP.cpp:120 #, c-format msgid "Evaluator does not support specified Combining Algorithm - %s" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:154 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:83 #: src/hed/shc/gaclpdp/GACLPDP.cpp:117 src/hed/shc/test.cpp:94 #: src/hed/shc/testinterface_arc.cpp:37 src/hed/shc/testinterface_xacml.cpp:37 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:132 msgid "Can not dynamically produce Evaluator" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:157 msgid "Evaluator for ArcPDP was not loaded" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:164 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:56 #: src/hed/shc/gaclpdp/GACLPDP.cpp:127 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:88 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:142 src/tests/echo/echo.cpp:108 msgid "Missing security object in message" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:172 src/hed/shc/arcpdp/ArcPDP.cpp:180 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:136 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:142 #: src/hed/shc/gaclpdp/GACLPDP.cpp:135 src/hed/shc/gaclpdp/GACLPDP.cpp:143 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:96 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:104 #: src/tests/echo/echo.cpp:116 src/tests/echo/echo.cpp:123 msgid "Failed to convert security information to ARC request" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:188 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:149 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:112 #, c-format msgid "ARC Auth. request: %s" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:191 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:152 #: src/hed/shc/gaclpdp/GACLPDP.cpp:154 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:115 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:169 msgid "No requested security information was collected" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:198 msgid "Not authorized by arc.pdp - failed to get response from Evaluator" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:244 msgid "Authorized by arc.pdp" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:245 msgid "" "Not authorized by arc.pdp - some of the RequestItem elements do not satisfy " "Policy" msgstr "" #: src/hed/shc/arcpdp/ArcPolicy.cpp:56 src/hed/shc/arcpdp/ArcPolicy.cpp:70 #: src/hed/shc/gaclpdp/GACLPolicy.cpp:46 src/hed/shc/gaclpdp/GACLPolicy.cpp:59 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:48 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:64 msgid "Policy is empty" msgstr "" #: src/hed/shc/arcpdp/ArcPolicy.cpp:114 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:115 #, c-format msgid "PolicyId: %s Alg inside this policy is:-- %s" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:74 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:128 msgid "No delegation policies in this context and message - passing through" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:94 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:108 msgid "Failed to convert security information to ARC policy" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:115 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:122 #, c-format msgid "ARC delegation policy: %s" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:160 msgid "No authorization response was returned" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:163 #, c-format msgid "There are %d requests, which satisfy at least one policy" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:182 msgid "Delegation authorization passed" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:184 msgid "Delegation authorization failed" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:63 msgid "" "Missing CertificatePath element or ProxyPath element, or " " is missing" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:68 msgid "" "Missing or empty KeyPath element, or is missing" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:74 msgid "Missing or empty CertificatePath or CACertificatesDir element" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:81 #, c-format msgid "Delegation role not supported: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:90 #, c-format msgid "Delegation type not supported: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:115 msgid "Failed to acquire delegation context" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:143 #: src/hed/shc/delegationsh/DelegationSH.cpp:254 msgid "Can't create delegation context" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:149 msgid "Delegation handler with delegatee role starts to process" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:152 #: src/services/a-rex/arex.cpp:478 src/services/candypond/CandyPond.cpp:526 #: src/services/data-staging/DataDeliveryService.cpp:648 msgid "process: POST" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:159 #: src/services/a-rex/arex.cpp:485 src/services/candypond/CandyPond.cpp:535 #: src/services/data-staging/DataDeliveryService.cpp:657 #: src/services/wrappers/python/pythonwrapper.cpp:416 msgid "input is not SOAP" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:166 #, c-format msgid "Delegation service: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:181 #: src/hed/shc/delegationsh/DelegationSH.cpp:188 #: src/tests/client/test_ClientX509Delegation_ARC.cpp:55 #, c-format msgid "Can not get the delegation credential: %s from delegation service: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:204 #: src/hed/shc/delegationsh/DelegationSH.cpp:268 #, c-format msgid "Delegated credential identity: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:205 #, c-format msgid "" "The delegated credential got from delegation service is stored into path: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:218 msgid "The endpoint of delegation service should be configured" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:228 #: src/hed/shc/delegationsh/DelegationSH.cpp:340 msgid "Delegation handler with delegatee role ends" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:260 msgid "Delegation handler with delegator role starts to process" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:269 #, c-format msgid "The delegated credential got from path: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:290 #, c-format msgid "Can not create delegation crendential to delegation service: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:328 msgid "output is not SOAP" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:339 #, c-format msgid "" "Succeeded to send DelegationService: %s and DelegationID: %s info to peer " "service" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:345 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:230 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:101 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:94 msgid "Incoming Message is not SOAP" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:352 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:353 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:123 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:108 msgid "Outgoing Message is not SOAP" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:356 msgid "Delegation handler is not configured" msgstr "" #: src/hed/shc/gaclpdp/GACLPDP.cpp:120 msgid "Evaluator for GACLPDP was not loaded" msgstr "" #: src/hed/shc/gaclpdp/GACLPDP.cpp:151 #, c-format msgid "GACL Auth. request: %s" msgstr "" #: src/hed/shc/gaclpdp/GACLPolicy.cpp:50 src/hed/shc/gaclpdp/GACLPolicy.cpp:63 msgid "Policy is not gacl" msgstr "" #: src/hed/shc/legacy/ConfigParser.cpp:13 #, fuzzy msgid "Configuration file not specified" msgstr "Nincs megadva feladat leírás bemeneti adatként" #: src/hed/shc/legacy/ConfigParser.cpp:18 #: src/hed/shc/legacy/ConfigParser.cpp:28 #: src/hed/shc/legacy/ConfigParser.cpp:33 msgid "Configuration file can not be read" msgstr "" #: src/hed/shc/legacy/ConfigParser.cpp:43 #, c-format msgid "Configuration file is broken - block name is too short: %s" msgstr "" #: src/hed/shc/legacy/ConfigParser.cpp:47 #, c-format msgid "Configuration file is broken - block name does not end with ]: %s" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:39 src/hed/shc/legacy/LegacyPDP.cpp:119 msgid "Configuration file not specified in ConfigBlock" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:48 src/hed/shc/legacy/LegacyPDP.cpp:128 msgid "BlockName is empty" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:108 #, fuzzy, c-format msgid "Failed processing user mapping command: %s %s" msgstr "voms szerver fájljának az elérési útvonala" #: src/hed/shc/legacy/LegacyMap.cpp:114 #, c-format msgid "Failed to change mapping stack processing policy in: %s = %s" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:179 msgid "LegacyMap: no configurations blocks defined" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:201 src/hed/shc/legacy/LegacyPDP.cpp:255 #, c-format msgid "" "LegacyPDP: there is no %s Sec Attribute defined. Probably ARC Legacy Sec " "Handler is not configured or failed." msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:206 src/hed/shc/legacy/LegacyPDP.cpp:260 msgid "LegacyPDP: ARC Legacy Sec Attribute not recognized." msgstr "" #: src/hed/shc/legacy/LegacyPDP.cpp:138 #, fuzzy, c-format msgid "Failed to parse configuration file %s" msgstr "voms szerver fájljának az elérési útvonala" #: src/hed/shc/legacy/LegacyPDP.cpp:144 #, c-format msgid "Block %s not found in configuration file %s" msgstr "" #: src/hed/shc/legacy/LegacySecHandler.cpp:40 #: src/hed/shc/legacy/LegacySecHandler.cpp:118 msgid "LegacySecHandler: configuration file not specified" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:149 src/hed/shc/legacy/arc_lcmaps.cpp:163 #, c-format msgid "" "Failed to convert GSI credential to GSS credential (major: %d, minor: %d)" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:174 src/hed/shc/legacy/arc_lcmaps.cpp:188 msgid "Missing subject name" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:179 src/hed/shc/legacy/arc_lcmaps.cpp:193 #, fuzzy msgid "Missing path of credentials file" msgstr "kérési fájl elérési útvonala" #: src/hed/shc/legacy/arc_lcas.cpp:185 msgid "Missing name of LCAS library" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:202 #, c-format msgid "Can't load LCAS library %s: %s" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:212 #, c-format msgid "Can't find LCAS functions in a library %s" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:222 msgid "Failed to initialize LCAS" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:237 msgid "Failed to terminate LCAS" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:199 msgid "Missing name of LCMAPS library" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:213 msgid "Can't read policy names" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:224 #, c-format msgid "Can't load LCMAPS library %s: %s" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:236 #, c-format msgid "Can't find LCMAPS functions in a library %s" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:248 msgid "LCMAPS has lcmaps_run" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:249 msgid "LCMAPS has getCredentialData" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:253 msgid "Failed to initialize LCMAPS" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:293 #, c-format msgid "LCMAPS returned invalid GID: %u" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:296 msgid "LCMAPS did not return any GID" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:299 #, c-format msgid "LCMAPS returned UID which has no username: %u" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:302 #, c-format msgid "LCMAPS returned invalid UID: %u" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:305 msgid "LCMAPS did not return any UID" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:314 msgid "Failed to terminate LCMAPS" msgstr "" #: src/hed/shc/legacy/auth.cpp:35 #, c-format msgid "Unexpected argument for 'all' rule - %s" msgstr "" #: src/hed/shc/legacy/auth.cpp:340 #, c-format msgid "Credentials stored in temporary file %s" msgstr "" #: src/hed/shc/legacy/auth.cpp:349 #, c-format msgid "Assigned to authorization group %s" msgstr "" #: src/hed/shc/legacy/auth.cpp:354 #, fuzzy, c-format msgid "Assigned to userlist %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/shc/legacy/auth_file.cpp:22 #, c-format msgid "Failed to read file %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:33 #, fuzzy msgid "Missing subject in configuration" msgstr "verzió információ kiírása" #: src/hed/shc/legacy/auth_otokens.cpp:38 #, fuzzy msgid "Missing issuer in configuration" msgstr "verzió információ kiírása" #: src/hed/shc/legacy/auth_otokens.cpp:43 #, fuzzy msgid "Missing audience in configuration" msgstr "verzió információ kiírása" #: src/hed/shc/legacy/auth_otokens.cpp:48 #, fuzzy msgid "Missing scope in configuration" msgstr "voms szerver fájljának az elérési útvonala" #: src/hed/shc/legacy/auth_otokens.cpp:53 src/hed/shc/legacy/auth_voms.cpp:47 msgid "Missing group in configuration" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:56 #, fuzzy, c-format msgid "Rule: subject: %s" msgstr "Tárgy: %s" #: src/hed/shc/legacy/auth_otokens.cpp:57 #, fuzzy, c-format msgid "Rule: issuer: %s" msgstr "modul neve: %s" #: src/hed/shc/legacy/auth_otokens.cpp:58 #, fuzzy, c-format msgid "Rule: audience: %s" msgstr "modul neve: %s" #: src/hed/shc/legacy/auth_otokens.cpp:59 #, fuzzy, c-format msgid "Rule: scope: %s" msgstr "modul neve: %s" #: src/hed/shc/legacy/auth_otokens.cpp:60 src/hed/shc/legacy/auth_voms.cpp:66 #, c-format msgid "Rule: group: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:63 #, fuzzy, c-format msgid "Match issuer: %s" msgstr "Válasz: %s" #: src/hed/shc/legacy/auth_otokens.cpp:69 #, fuzzy, c-format msgid "Matched: %s %s %s" msgstr "Feltöltve %s" #: src/hed/shc/legacy/auth_otokens.cpp:83 src/hed/shc/legacy/auth_voms.cpp:93 msgid "Matched nothing" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:176 #, c-format msgid "Evaluate operator =: left: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:177 #, c-format msgid "Evaluate operator =: right: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:182 #, c-format msgid "Evaluate operator =: left from context: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:239 #, c-format msgid "Operator token: %c" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:268 #, fuzzy, c-format msgid "String token: %s" msgstr "Célállomás: %s" #: src/hed/shc/legacy/auth_otokens.cpp:296 #, fuzzy, c-format msgid "Quoted string token: %s" msgstr "Célállomás: %s" #: src/hed/shc/legacy/auth_otokens.cpp:304 #, c-format msgid "Sequence token parsing: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:420 #, c-format msgid "Matching tokens expression: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:424 #, fuzzy msgid "Failed to parse expression" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/shc/legacy/auth_otokens.cpp:435 #, c-format msgid "%s: " msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:441 #, fuzzy, c-format msgid " %s" msgstr "Forrás: %s" #: src/hed/shc/legacy/auth_otokens.cpp:446 msgid "Expression matched" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:451 #, fuzzy, c-format msgid "Failed to evaluate expression: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/shc/legacy/auth_otokens.cpp:454 msgid "Expression failed to matched" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:79 src/hed/shc/legacy/unixmap.cpp:216 #, c-format msgid "Plugin %s returned: %u" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:83 src/hed/shc/legacy/unixmap.cpp:220 #, c-format msgid "Plugin %s timeout after %u seconds" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:86 src/hed/shc/legacy/unixmap.cpp:223 #, c-format msgid "Plugin %s failed to start" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:88 src/hed/shc/legacy/unixmap.cpp:225 #, c-format msgid "Plugin %s printed: %s" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:89 src/hed/shc/legacy/unixmap.cpp:213 #: src/hed/shc/legacy/unixmap.cpp:226 #, c-format msgid "Plugin %s error: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:42 msgid "Missing VO in configuration" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:52 msgid "Missing role in configuration" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:57 msgid "Missing capabilities in configuration" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:62 msgid "Too many arguments in configuration" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:65 #, fuzzy, c-format msgid "Rule: vo: %s" msgstr "Kérés: %s" #: src/hed/shc/legacy/auth_voms.cpp:67 #, fuzzy, c-format msgid "Rule: role: %s" msgstr "modul neve: %s" #: src/hed/shc/legacy/auth_voms.cpp:68 #, c-format msgid "Rule: capabilities: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:71 #, c-format msgid "Match vo: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:78 #, c-format msgid "Matched: %s %s %s %s" msgstr "" #: src/hed/shc/legacy/simplemap.cpp:70 #, c-format msgid "SimpleMap: acquired new unmap time of %u seconds" msgstr "" #: src/hed/shc/legacy/simplemap.cpp:72 msgid "SimpleMap: wrong number in unmaptime command" msgstr "" #: src/hed/shc/legacy/simplemap.cpp:85 src/hed/shc/legacy/simplemap.cpp:90 #, c-format msgid "SimpleMap: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:65 src/hed/shc/legacy/unixmap.cpp:70 msgid "Mapping policy option has empty value" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:80 #, fuzzy, c-format msgid "Unsupported mapping policy action: %s" msgstr "Nem támogatott url: %s" #: src/hed/shc/legacy/unixmap.cpp:91 #, fuzzy, c-format msgid "Unsupported mapping policy option: %s" msgstr "Nem támogatott url: %s" #: src/hed/shc/legacy/unixmap.cpp:103 src/hed/shc/legacy/unixmap.cpp:108 msgid "User name mapping command is empty" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:116 #, c-format msgid "User name mapping has empty authgroup: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:147 #, c-format msgid "Unknown user name mapping rule %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:156 src/hed/shc/legacy/unixmap.cpp:161 #: src/hed/shc/legacy/unixmap.cpp:177 src/hed/shc/legacy/unixmap.cpp:183 msgid "Plugin (user mapping) command is empty" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:167 #, c-format msgid "Plugin (user mapping) timeout is not a number: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:171 #, c-format msgid "Plugin (user mapping) timeout is wrong number: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:204 #, c-format msgid "Plugin %s returned no username" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:209 #, c-format msgid "Plugin %s returned too much: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:212 #, c-format msgid "Plugin %s returned no mapping" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:235 msgid "User subject match is missing user subject." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:239 #, c-format msgid "Mapfile at %s can't be opened." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:263 msgid "User pool mapping is missing user subject." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:268 #, c-format msgid "User pool at %s can't be opened." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:273 #, c-format msgid "User pool at %s failed to perform user mapping." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:291 #, c-format msgid "User name direct mapping is missing user name: %s." msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:65 msgid "OTokens: Attr: message" msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:70 #, c-format msgid "OTokens: Attr: %s = %s" msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:75 #, c-format msgid "OTokens: Attr: token: %s" msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:78 #, c-format msgid "OTokens: Attr: token: bearer: %s" msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:193 msgid "OTokens: Handle" msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:195 msgid "OTokens: Handle: message" msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:198 #, fuzzy msgid "Failed to create OTokens security attributes" msgstr "Nem sikerült elküldeni a kérést" #: src/hed/shc/otokens/OTokensSH.cpp:202 msgid "OTokens: Handle: token was not present" msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:206 #, c-format msgid "OTokens: Handle: attributes created: subject = %s" msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:47 msgid "Creating a pdpservice client" msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:81 msgid "Arc policy can not been carried by SAML2.0 profile of XACML" msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:153 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:186 msgid "Policy Decision Service invocation failed" msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:156 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:189 #: src/tests/client/test_ClientInterface.cpp:88 #: src/tests/client/test_ClientSAML2SSO.cpp:81 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:100 #: src/tests/echo/test_clientinterface.cpp:82 #: src/tests/echo/test_clientinterface.cpp:149 #: src/tests/echo/test_clientinterface.py:32 msgid "There was no SOAP response" msgstr "Nincs SOAP-os válasz" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:171 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:205 msgid "Authorized from remote pdp service" msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:172 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:206 msgid "Unauthorized from remote pdp service" msgstr "" #: src/hed/shc/saml2sso_assertionconsumersh/SAML2SSO_AssertionConsumerSH.cpp:69 msgid "Can not get SAMLAssertion SecAttr from message context" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:158 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:44 msgid "Missing or empty CertificatePath element" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:163 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:49 msgid "Missing or empty KeyPath element" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:171 msgid "" "Both of CACertificatePath and CACertificatesDir elements missing or empty" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:185 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:61 msgid "" "Missing or empty CertificatePath or CACertificatesDir element; will only " "check the signature, will not do message authentication" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:189 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:65 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:65 #, c-format msgid "Processing type not supported: %s" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:209 msgid "Failed to parse SAML Token from incoming SOAP" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:219 msgid "Failed to authenticate SAML Token inside the incoming SOAP" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:222 msgid "Succeeded to authenticate SAMLToken" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:287 #, fuzzy, c-format msgid "No response from AA service %s" msgstr "Nincs válasz a voms szervertÅ‘l" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:291 #, c-format msgid "SOAP Request to AA service %s failed" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:299 msgid "Cannot find content under response soap message" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:303 msgid "Cannot find under response soap message:" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:320 msgid "The Response is not going to this end" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:327 msgid "The StatusCode is Success" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:333 msgid "Succeeded to verify the signature under " msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:336 msgid "Failed to verify the signature under " msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:347 msgid "Failed to generate SAML Token for outgoing SOAP" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:357 msgid "SAML Token handler is not configured" msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:28 #, c-format msgid "Access list location: %s" msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:38 msgid "" "No policy file or DNs specified for simplelist.pdp, please set location " "attribute or at least one DN element for simplelist PDP node in " "configuration." msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:41 #, fuzzy, c-format msgid "Subject to match: %s" msgstr "Tárgy: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:44 #, fuzzy, c-format msgid "Policy subject: %s" msgstr "Tárgy: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:46 #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:72 #, c-format msgid "Authorized from simplelist.pdp: %s" msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:53 msgid "" "The policy file setup for simplelist.pdp does not exist, please check " "location attribute for simplelist PDP node in service configuration" msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:60 #, fuzzy, c-format msgid "Policy line: %s" msgstr "Proxy típusa: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:78 #, c-format msgid "Not authorized from simplelist.pdp: %s" msgstr "" #: src/hed/shc/test.cpp:27 src/hed/shc/testinterface_arc.cpp:26 #: src/hed/shc/testinterface_xacml.cpp:26 msgid "Start test" msgstr "" #: src/hed/shc/test.cpp:101 msgid "Input request from a file: Request.xml" msgstr "" #: src/hed/shc/test.cpp:107 src/hed/shc/test.cpp:197 #: src/hed/shc/testinterface_arc.cpp:124 #, c-format msgid "There is %d subjects, which satisfy at least one policy" msgstr "" #: src/hed/shc/test.cpp:121 #, c-format msgid "Attribute Value (1): %s" msgstr "" #: src/hed/shc/test.cpp:132 msgid "Input request from code" msgstr "" #: src/hed/shc/test.cpp:211 #, c-format msgid "Attribute Value (2): %s" msgstr "" #: src/hed/shc/testinterface_arc.cpp:75 src/hed/shc/testinterface_xacml.cpp:46 msgid "Can not dynamically produce Policy" msgstr "" #: src/hed/shc/testinterface_arc.cpp:138 #, c-format msgid "Attribute Value inside Subject: %s" msgstr "" #: src/hed/shc/testinterface_arc.cpp:148 msgid "The request has passed the policy evaluation" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:43 msgid "Missing or empty PasswordSource element" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:54 #, c-format msgid "Password encoding type not supported: %s" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:59 msgid "Missing or empty Username element" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:79 msgid "The payload of incoming message is empty" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:84 msgid "Failed to cast PayloadSOAP from incoming payload" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:89 msgid "Failed to parse Username Token from incoming SOAP" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:95 msgid "Failed to authenticate Username Token inside the incoming SOAP" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:98 msgid "Succeeded to authenticate UsernameToken" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:108 msgid "The payload of outgoing message is empty" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:113 msgid "Failed to cast PayloadSOAP from outgoing payload" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:119 msgid "Failed to generate Username Token for outgoing SOAP" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:127 msgid "Username Token handler is not configured" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:81 msgid "Failed to parse X509 Token from incoming SOAP" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:85 msgid "Failed to verify X509 Token inside the incoming SOAP" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:89 msgid "Failed to authenticate X509 Token inside the incoming SOAP" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:92 msgid "Succeeded to authenticate X509Token" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:102 msgid "Failed to generate X509 Token for outgoing SOAP" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:112 msgid "X509 Token handler is not configured" msgstr "" #: src/hed/shc/xacmlpdp/XACMLApply.cpp:29 msgid "Can not create function: FunctionId does not exist" msgstr "" #: src/hed/shc/xacmlpdp/XACMLApply.cpp:33 #: src/hed/shc/xacmlpdp/XACMLTarget.cpp:40 #, c-format msgid "Can not create function %s" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:87 msgid "Can not find XACMLPDPContext" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:135 msgid "Evaluator for XACMLPDP was not loaded" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:150 src/hed/shc/xacmlpdp/XACMLPDP.cpp:158 msgid "Failed to convert security information to XACML request" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:166 #, c-format msgid "XACML request: %s" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:178 msgid "Authorized from xacml.pdp" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:179 msgid "UnAuthorized from xacml.pdp" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:55 msgid "Can not find element with proper namespace" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:132 msgid "No target available inside the policy" msgstr "" #: src/hed/shc/xacmlpdp/XACMLRequest.cpp:34 msgid "Request is empty" msgstr "" #: src/hed/shc/xacmlpdp/XACMLRequest.cpp:39 msgid "Can not find element with proper namespace" msgstr "" #: src/hed/shc/xacmlpdp/XACMLRule.cpp:35 msgid "Invalid Effect" msgstr "" #: src/hed/shc/xacmlpdp/XACMLRule.cpp:48 msgid "No target available inside the rule" msgstr "" #: src/libs/data-staging/DTR.cpp:81 src/libs/data-staging/DTR.cpp:85 #, c-format msgid "Could not handle endpoint %s" msgstr "" #: src/libs/data-staging/DTR.cpp:95 msgid "Source is the same as destination" msgstr "" #: src/libs/data-staging/DTR.cpp:175 #, fuzzy, c-format msgid "Invalid ID: %s" msgstr "Érvénytelen URL: %s" #: src/libs/data-staging/DTR.cpp:212 #, c-format msgid "%s->%s" msgstr "" #: src/libs/data-staging/DTR.cpp:320 #, c-format msgid "No callback for %s defined" msgstr "" #: src/libs/data-staging/DTR.cpp:335 #, c-format msgid "NULL callback for %s" msgstr "" #: src/libs/data-staging/DTR.cpp:338 #, c-format msgid "Request to push to unknown owner - %u" msgstr "" #: src/libs/data-staging/DTRList.cpp:216 #, c-format msgid "Boosting priority from %i to %i due to incoming higher priority DTR" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:48 #: src/libs/data-staging/DataDelivery.cpp:72 msgid "Received invalid DTR" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:54 #, c-format msgid "Delivery received new DTR %s with source: %s, destination: %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:68 msgid "Received no DTR" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:80 #, c-format msgid "Cancelling DTR %s with source: %s, destination: %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:91 #, c-format msgid "DTR %s requested cancel but no active transfer" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:147 #, c-format msgid "Cleaning up after failure: deleting %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:188 #: src/libs/data-staging/DataDelivery.cpp:263 #: src/libs/data-staging/DataDelivery.cpp:303 #: src/libs/data-staging/DataDelivery.cpp:323 msgid "Failed to delete delivery object or deletion timed out" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:254 #, c-format msgid "Transfer finished: %llu bytes transferred %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:329 msgid "Data delivery loop exited" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:45 #, fuzzy msgid "No source defined" msgstr "Státusz lekérdezés sikertelen" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:49 #, fuzzy msgid "No destination defined" msgstr "Nem sikerült feloldani a célállomást" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:157 #, c-format msgid "Bad checksum format %s" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:196 #, fuzzy, c-format msgid "Failed to run command: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:235 #, c-format msgid "DataDelivery: %s" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:247 #, c-format msgid "DataStagingDelivery exited with code %i" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:266 #, c-format msgid "Transfer killed after %i seconds without communication" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:72 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:326 #, c-format msgid "Connecting to Delivery service at %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:101 #, c-format msgid "Failed to set up credential delegation with %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:107 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:185 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:251 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:337 #, c-format msgid "" "Request:\n" "%s" msgstr "" "Kérés:\n" "%s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:113 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:343 #, c-format msgid "Could not connect to service %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:121 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:351 #, fuzzy, c-format msgid "No SOAP response from Delivery service %s" msgstr "Nincs válasz a voms szervertÅ‘l" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:126 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:204 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:278 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:357 #, c-format msgid "" "Response:\n" "%s" msgstr "" "Válasz:\n" "%s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:135 #, fuzzy, c-format msgid "Failed to start transfer request: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:142 #, c-format msgid "Bad format in XML response from service at %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:150 #, c-format msgid "Could not make new transfer request: %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:155 #, c-format msgid "Started remote Delivery at %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:192 #, fuzzy, c-format msgid "Failed to send cancel request: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:199 #, fuzzy msgid "Failed to cancel: No SOAP response" msgstr "Nincs SOAP-os válasz" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:213 #, fuzzy, c-format msgid "Failed to cancel transfer request: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:220 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:301 #, c-format msgid "Bad format in XML response: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:227 #, fuzzy, c-format msgid "Failed to cancel: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:271 #, fuzzy msgid "No SOAP response from delivery service" msgstr "Nincs válasz a voms szervertÅ‘l" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:292 #, fuzzy, c-format msgid "Failed to query state: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:366 #, c-format msgid "SOAP fault from delivery service at %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:374 #, c-format msgid "Bad format in XML response from delivery service at %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:382 #, c-format msgid "Error pinging delivery service at %s: %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:390 #, c-format msgid "Dir %s allowed at service %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:484 #, c-format msgid "" "DataDelivery log tail:\n" "%s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:498 #, fuzzy msgid "Failed locating credentials" msgstr "Nem sikerült listázni a meta adatokat" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:503 #, fuzzy msgid "Failed to initiate client connection" msgstr "Nem sikerült betölteni a konfigurációt" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:509 msgid "Client connection has no entry point" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:518 msgid "Initiating delegation procedure" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:520 msgid "Failed to initiate delegation credentials" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:97 #, c-format msgid "%5u s: %10.1f kB %8.1f kB/s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:156 msgid "Unexpected arguments" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:159 msgid "Source URL missing" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:162 #, fuzzy msgid "Destination URL missing" msgstr "Célállomás: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:166 #, fuzzy, c-format msgid "Source URL not valid: %s" msgstr "A lekérdezés nem XML helyes" #: src/libs/data-staging/DataStagingDelivery.cpp:170 #, fuzzy, c-format msgid "Destination URL not valid: %s" msgstr "Célállomás: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:235 #, c-format msgid "Unknown transfer option: %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:272 #, c-format msgid "Source URL not supported: %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:277 #: src/libs/data-staging/DataStagingDelivery.cpp:299 msgid "No credentials supplied" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:294 #, fuzzy, c-format msgid "Destination URL not supported: %s" msgstr "A fileset regisztcáció nem támogatott még" #: src/libs/data-staging/DataStagingDelivery.cpp:348 #, c-format msgid "Will calculate %s checksum" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:359 msgid "Cannot use supplied --size option" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:572 #, c-format msgid "Checksum mismatch between calculated checksum %s and source checksum %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:582 #, fuzzy, c-format msgid "Failed cleaning up destination %s" msgstr "Nem sikerült feloldani a célállomást" #: src/libs/data-staging/Processor.cpp:49 #: src/services/candypond/CandyPond.cpp:117 msgid "Error creating cache" msgstr "" #: src/libs/data-staging/Processor.cpp:73 #, c-format msgid "Forcing re-download of file %s" msgstr "" #: src/libs/data-staging/Processor.cpp:90 #, c-format msgid "Will wait around %is" msgstr "" #: src/libs/data-staging/Processor.cpp:109 #, c-format msgid "Force-checking source of cache file %s" msgstr "" #: src/libs/data-staging/Processor.cpp:112 #, c-format msgid "Source check requested but failed: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:132 msgid "Permission checking failed, will try downloading without using cache" msgstr "" #: src/libs/data-staging/Processor.cpp:162 #, c-format msgid "Will download to cache file %s" msgstr "" #: src/libs/data-staging/Processor.cpp:183 msgid "Looking up source replicas" msgstr "" #: src/libs/data-staging/Processor.cpp:205 #: src/libs/data-staging/Processor.cpp:432 #, fuzzy msgid "Resolving destination replicas" msgstr "Nem sikerült feloldani a célállomást" #: src/libs/data-staging/Processor.cpp:222 msgid "No locations for destination different from source found" msgstr "" #: src/libs/data-staging/Processor.cpp:233 msgid "Pre-registering destination in index service" msgstr "" #: src/libs/data-staging/Processor.cpp:259 msgid "Resolving source replicas in bulk" msgstr "" #: src/libs/data-staging/Processor.cpp:273 #, c-format msgid "No replicas found for %s" msgstr "" #: src/libs/data-staging/Processor.cpp:293 #, c-format msgid "Checking %s" msgstr "" #: src/libs/data-staging/Processor.cpp:302 #: src/libs/data-staging/Processor.cpp:360 msgid "Metadata of replica and index service differ" msgstr "" #: src/libs/data-staging/Processor.cpp:310 #, c-format msgid "Failed checking source replica %s: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:336 msgid "Querying source replicas in bulk" msgstr "" #: src/libs/data-staging/Processor.cpp:348 #, c-format msgid "Failed checking source replica: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:354 msgid "Failed checking source replica" msgstr "" #: src/libs/data-staging/Processor.cpp:391 msgid "Overwrite requested - will pre-clean destination" msgstr "" #: src/libs/data-staging/Processor.cpp:400 msgid "Finding existing destination replicas" msgstr "" #: src/libs/data-staging/Processor.cpp:412 #, c-format msgid "Failed to delete replica %s: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:426 #, fuzzy, c-format msgid "Unregistering %s" msgstr "Figyelmen kívül hagyás: %s" #: src/libs/data-staging/Processor.cpp:437 #, fuzzy msgid "Pre-registering destination" msgstr "Nem sikerült feloldani a célállomást" #: src/libs/data-staging/Processor.cpp:443 #, c-format msgid "Failed to pre-clean destination: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:452 #, fuzzy msgid "Destination already exists" msgstr "Célállomás: %s" #: src/libs/data-staging/Processor.cpp:476 msgid "Preparing to stage source" msgstr "" #: src/libs/data-staging/Processor.cpp:489 #, c-format msgid "Source is not ready, will wait %u seconds" msgstr "" #: src/libs/data-staging/Processor.cpp:495 msgid "No physical files found for source" msgstr "" #: src/libs/data-staging/Processor.cpp:513 #, fuzzy msgid "Preparing to stage destination" msgstr "Nem sikerült feloldani a célállomást" #: src/libs/data-staging/Processor.cpp:526 #, c-format msgid "Destination is not ready, will wait %u seconds" msgstr "" #: src/libs/data-staging/Processor.cpp:532 msgid "No physical files found for destination" msgstr "" #: src/libs/data-staging/Processor.cpp:558 msgid "Releasing source" msgstr "" #: src/libs/data-staging/Processor.cpp:562 #, c-format msgid "There was a problem during post-transfer source handling: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:567 #, fuzzy msgid "Releasing destination" msgstr "Nem sikerült feloldani a célállomást" #: src/libs/data-staging/Processor.cpp:571 #, c-format msgid "" "There was a problem during post-transfer destination handling after error: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:575 #, c-format msgid "Error with post-transfer destination handling: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:597 #, c-format msgid "Finalising current replica %s" msgstr "" #: src/libs/data-staging/Processor.cpp:617 msgid "Removing pre-registered destination in index service" msgstr "" #: src/libs/data-staging/Processor.cpp:620 #, c-format msgid "" "Failed to unregister pre-registered destination %s: %s. You may need to " "unregister it manually" msgstr "" #: src/libs/data-staging/Processor.cpp:626 msgid "Registering destination replica" msgstr "" #: src/libs/data-staging/Processor.cpp:629 #, fuzzy, c-format msgid "Failed to register destination replica: %s" msgstr "Nem támogatott url: %s" #: src/libs/data-staging/Processor.cpp:632 #, c-format msgid "" "Failed to unregister pre-registered destination %s. You may need to " "unregister it manually" msgstr "" #: src/libs/data-staging/Processor.cpp:662 msgid "Error creating cache. Stale locks may remain." msgstr "" #: src/libs/data-staging/Processor.cpp:695 #, c-format msgid "Linking/copying cached file to %s" msgstr "" #: src/libs/data-staging/Processor.cpp:716 #, fuzzy, c-format msgid "Failed linking cache file to %s" msgstr "Nem sikerült listázni a fájlokat" #: src/libs/data-staging/Processor.cpp:720 #, c-format msgid "Error linking cache file to %s." msgstr "" #: src/libs/data-staging/Processor.cpp:741 #: src/libs/data-staging/Processor.cpp:748 msgid "Adding to bulk request" msgstr "" #: src/libs/data-staging/Scheduler.cpp:174 #: src/libs/data-staging/Scheduler.cpp:181 #, fuzzy msgid "source" msgstr "Forrás: %s" #: src/libs/data-staging/Scheduler.cpp:174 #: src/libs/data-staging/Scheduler.cpp:181 #, fuzzy msgid "destination" msgstr "Célállomás: %s" #: src/libs/data-staging/Scheduler.cpp:174 #, c-format msgid "Using next %s replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:181 #, c-format msgid "No more %s replicas" msgstr "" #: src/libs/data-staging/Scheduler.cpp:183 msgid "Will clean up pre-registered destination" msgstr "" #: src/libs/data-staging/Scheduler.cpp:187 msgid "Will release cache locks" msgstr "" #: src/libs/data-staging/Scheduler.cpp:190 msgid "Moving to end of data staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:199 #, c-format msgid "Source is mapped to %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:203 msgid "Cannot link to source which can be modified, will copy instead" msgstr "" #: src/libs/data-staging/Scheduler.cpp:212 msgid "Cannot link to a remote destination. Will not use mapped URL" msgstr "" #: src/libs/data-staging/Scheduler.cpp:215 msgid "Linking mapped file" msgstr "" #: src/libs/data-staging/Scheduler.cpp:222 #, c-format msgid "Failed to create link: %s. Will not use mapped URL" msgstr "" #: src/libs/data-staging/Scheduler.cpp:247 #, c-format msgid "" "Scheduler received new DTR %s with source: %s, destination: %s, assigned to " "transfer share %s with priority %d" msgstr "" #: src/libs/data-staging/Scheduler.cpp:255 msgid "" "File is not cacheable, was requested not to be cached or no cache available, " "skipping cache check" msgstr "" #: src/libs/data-staging/Scheduler.cpp:261 msgid "File is cacheable, will check cache" msgstr "" #: src/libs/data-staging/Scheduler.cpp:264 #: src/libs/data-staging/Scheduler.cpp:289 #, c-format msgid "File is currently being cached, will wait %is" msgstr "" #: src/libs/data-staging/Scheduler.cpp:283 msgid "Timed out while waiting for cache lock" msgstr "" #: src/libs/data-staging/Scheduler.cpp:293 msgid "Checking cache again" msgstr "" #: src/libs/data-staging/Scheduler.cpp:313 #, fuzzy msgid "Destination file is in cache" msgstr "Célállomás: %s" #: src/libs/data-staging/Scheduler.cpp:317 msgid "Source and/or destination is index service, will resolve replicas" msgstr "" #: src/libs/data-staging/Scheduler.cpp:320 msgid "" "Neither source nor destination are index services, will skip resolving " "replicas" msgstr "" #: src/libs/data-staging/Scheduler.cpp:331 msgid "Problem with index service, will release cache lock" msgstr "" #: src/libs/data-staging/Scheduler.cpp:335 msgid "Problem with index service, will proceed to end of data staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:345 msgid "Checking source file is present" msgstr "" #: src/libs/data-staging/Scheduler.cpp:353 msgid "Error with source file, moving to next replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:375 #, c-format msgid "Replica %s has long latency, trying next replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:377 #, c-format msgid "No more replicas, will use %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:380 #, c-format msgid "Checking replica %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:392 #, fuzzy msgid "Pre-clean failed" msgstr "Fájl feltöltve %s" #: src/libs/data-staging/Scheduler.cpp:397 msgid "Pre-clean failed, will still try to copy" msgstr "" #: src/libs/data-staging/Scheduler.cpp:405 #, fuzzy msgid "Source or destination requires staging" msgstr "A feladat megszakítása sikertelen" #: src/libs/data-staging/Scheduler.cpp:409 msgid "No need to stage source or destination, skipping staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:439 msgid "Staging request timed out, will release request" msgstr "" #: src/libs/data-staging/Scheduler.cpp:443 msgid "Querying status of staging request" msgstr "" #: src/libs/data-staging/Scheduler.cpp:452 #, fuzzy msgid "Releasing requests" msgstr "Nem sikerült elküldeni a kérést" #: src/libs/data-staging/Scheduler.cpp:477 msgid "DTR is ready for transfer, moving to delivery queue" msgstr "" #: src/libs/data-staging/Scheduler.cpp:492 #, c-format msgid "Transfer failed: %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:502 msgid "Releasing request(s) made during staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:505 msgid "Neither source nor destination were staged, skipping releasing requests" msgstr "" #: src/libs/data-staging/Scheduler.cpp:526 msgid "Trying next replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:531 #, fuzzy msgid "unregister" msgstr "Figyelmen kívül hagyás: %s" #: src/libs/data-staging/Scheduler.cpp:531 #, fuzzy msgid "register" msgstr "Figyelmen kívül hagyás: %s" #: src/libs/data-staging/Scheduler.cpp:530 #, c-format msgid "Will %s in destination index service" msgstr "" #: src/libs/data-staging/Scheduler.cpp:534 msgid "Destination is not index service, skipping replica registration" msgstr "" #: src/libs/data-staging/Scheduler.cpp:547 msgid "Error registering replica, moving to end of data staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:556 msgid "Will process cache" msgstr "" #: src/libs/data-staging/Scheduler.cpp:560 msgid "File is not cacheable, skipping cache processing" msgstr "" #: src/libs/data-staging/Scheduler.cpp:574 msgid "Cancellation complete" msgstr "" #: src/libs/data-staging/Scheduler.cpp:588 msgid "Will wait 10s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:594 msgid "Error in cache processing, will retry without caching" msgstr "" #: src/libs/data-staging/Scheduler.cpp:603 msgid "Will retry without caching" msgstr "" #: src/libs/data-staging/Scheduler.cpp:621 msgid "Proxy has expired" msgstr "" #: src/libs/data-staging/Scheduler.cpp:632 #, c-format msgid "%i retries left, will wait until %s before next attempt" msgstr "" #: src/libs/data-staging/Scheduler.cpp:648 msgid "Out of retries" msgstr "" #: src/libs/data-staging/Scheduler.cpp:650 msgid "Permanent failure" msgstr "" #: src/libs/data-staging/Scheduler.cpp:656 msgid "Finished successfully" msgstr "" #: src/libs/data-staging/Scheduler.cpp:666 msgid "Returning to generator" msgstr "" #: src/libs/data-staging/Scheduler.cpp:840 #, c-format msgid "File is smaller than %llu bytes, will use local delivery" msgstr "" #: src/libs/data-staging/Scheduler.cpp:894 #, c-format msgid "Delivery service at %s can copy to %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:902 #, c-format msgid "Delivery service at %s can copy from %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:915 msgid "Could not find any useable delivery service, forcing local transfer" msgstr "" #: src/libs/data-staging/Scheduler.cpp:931 #, c-format msgid "Not using delivery service at %s because it is full" msgstr "" #: src/libs/data-staging/Scheduler.cpp:958 #, c-format msgid "Not using delivery service %s due to previous failure" msgstr "" #: src/libs/data-staging/Scheduler.cpp:968 msgid "No remote delivery services are useable, forcing local delivery" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1172 msgid "Cancelling active transfer" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1182 msgid "Processing thread timed out. Restarting DTR" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1250 #, fuzzy msgid "Will use bulk request" msgstr "Nem sikerült elküldeni a kérést" #: src/libs/data-staging/Scheduler.cpp:1272 msgid "No delivery endpoints available, will try later" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1291 msgid "Scheduler received NULL DTR" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1301 msgid "Scheduler received invalid DTR" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1390 msgid "Scheduler starting up" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1391 msgid "Scheduler configuration:" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1392 #, c-format msgid " Pre-processor slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1393 #, c-format msgid " Delivery slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1394 #, c-format msgid " Post-processor slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1395 #, c-format msgid " Emergency slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1396 #, c-format msgid " Prepared slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1397 #, c-format msgid "" " Shares configuration:\n" "%s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1400 msgid " Delivery service: LOCAL" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1401 #, c-format msgid " Delivery service: %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1406 msgid "Failed to create DTR dump thread" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1423 #: src/services/data-staging/DataDeliveryService.cpp:531 #, c-format msgid "DTR %s cancelled" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:15 msgid "Shutting down scheduler" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:17 msgid "Scheduler stopped, exiting" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:23 #, c-format msgid "Received DTR %s back from scheduler in state %s" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:30 #, fuzzy msgid "Generator started" msgstr "A feltöltÅ‘ elindult" #: src/libs/data-staging/examples/Generator.cpp:31 msgid "Starting DTR threads" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:44 #, fuzzy msgid "No valid credentials found, exiting" msgstr "Nem tudom másolni a %s fájlt: Nem érvényes tanúsítvány" #: src/libs/data-staging/examples/Generator.cpp:55 #, fuzzy, c-format msgid "Problem creating dtr (source %s, destination %s)" msgstr "Nem sikerült feloldani a célállomást" #: src/services/a-rex/arex.cpp:340 src/services/candypond/CandyPond.cpp:569 #: src/services/data-staging/DataDeliveryService.cpp:705 #, c-format msgid "SOAP operation is not supported: %s" msgstr "" #: src/services/a-rex/arex.cpp:358 src/services/a-rex/arex.cpp:403 #, fuzzy, c-format msgid "Security Handlers processing failed: %s" msgstr "VOMS attribútumok értelmezése sikertelen" #: src/services/a-rex/arex.cpp:381 msgid "" "Can't obtain configuration. Public information is disallowed for this user." msgstr "" #: src/services/a-rex/arex.cpp:388 msgid "Can't obtain configuration. Only public information is provided." msgstr "" #: src/services/a-rex/arex.cpp:416 src/services/a-rex/rest/rest.cpp:740 #, fuzzy, c-format msgid "Connection from %s: %s" msgstr "Funkció: %s" #: src/services/a-rex/arex.cpp:419 src/services/a-rex/rest/rest.cpp:744 #, c-format msgid "process: method: %s" msgstr "" #: src/services/a-rex/arex.cpp:420 src/services/a-rex/rest/rest.cpp:745 #, c-format msgid "process: endpoint: %s" msgstr "" #: src/services/a-rex/arex.cpp:445 #, c-format msgid "process: id: %s" msgstr "" #: src/services/a-rex/arex.cpp:446 #, c-format msgid "process: subop: %s" msgstr "" #: src/services/a-rex/arex.cpp:453 #, c-format msgid "process: subpath: %s" msgstr "" #: src/services/a-rex/arex.cpp:491 src/services/candypond/CandyPond.cpp:543 #: src/services/data-staging/DataDeliveryService.cpp:665 #: src/tests/echo/echo.cpp:98 #, c-format msgid "process: request=%s" msgstr "" #: src/services/a-rex/arex.cpp:496 src/services/candypond/CandyPond.cpp:548 #: src/services/data-staging/DataDeliveryService.cpp:670 #: src/tests/count/count.cpp:69 msgid "input does not define operation" msgstr "" #: src/services/a-rex/arex.cpp:499 src/services/candypond/CandyPond.cpp:551 #: src/services/data-staging/DataDeliveryService.cpp:673 #: src/tests/count/count.cpp:72 #, c-format msgid "process: operation: %s" msgstr "" #: src/services/a-rex/arex.cpp:526 #, fuzzy msgid "POST request on special path is not supported" msgstr "A fileset regisztcáció nem támogatott még" #: src/services/a-rex/arex.cpp:531 msgid "process: factory endpoint" msgstr "" #: src/services/a-rex/arex.cpp:575 src/services/candypond/CandyPond.cpp:580 #: src/services/data-staging/DataDeliveryService.cpp:716 #: src/tests/echo/echo.cpp:158 #, c-format msgid "process: response=%s" msgstr "" #: src/services/a-rex/arex.cpp:580 msgid "Per-job POST/SOAP requests are not supported" msgstr "" #: src/services/a-rex/arex.cpp:589 msgid "process: GET" msgstr "" #: src/services/a-rex/arex.cpp:590 #, c-format msgid "GET: id %s path %s" msgstr "" #: src/services/a-rex/arex.cpp:623 msgid "process: HEAD" msgstr "" #: src/services/a-rex/arex.cpp:624 #, c-format msgid "HEAD: id %s path %s" msgstr "" #: src/services/a-rex/arex.cpp:657 msgid "process: PUT" msgstr "" #: src/services/a-rex/arex.cpp:690 msgid "process: DELETE" msgstr "" #: src/services/a-rex/arex.cpp:723 #, c-format msgid "process: method %s is not supported" msgstr "" #: src/services/a-rex/arex.cpp:726 msgid "process: method is not defined" msgstr "" #: src/services/a-rex/arex.cpp:836 msgid "Failed to run Grid Manager thread" msgstr "" #: src/services/a-rex/arex.cpp:889 #, c-format msgid "Failed to process configuration in %s" msgstr "" #: src/services/a-rex/arex.cpp:894 msgid "No control directory set in configuration" msgstr "" #: src/services/a-rex/arex.cpp:898 msgid "No session directory set in configuration" msgstr "" #: src/services/a-rex/arex.cpp:902 msgid "No LRMS set in configuration" msgstr "" #: src/services/a-rex/arex.cpp:961 #, c-format msgid "Failed to create control directory %s" msgstr "" #: src/services/a-rex/arex.cpp:965 #, fuzzy, c-format msgid "Failed to update control directory %s" msgstr "Nem sikerült elküldeni a kérést" #: src/services/a-rex/arex.cpp:972 #, fuzzy msgid "Failed to start GM threads" msgstr "Nem sikerült elküldeni a kérést" #: src/services/a-rex/arex.cpp:1008 #, c-format msgid "Created entry for JWT issuer %s" msgstr "" #: src/services/a-rex/arex.cpp:1010 #, fuzzy, c-format msgid "Failed to create entry for JWT issuer %s" msgstr "privát kulcs elérési útvonala" #: src/services/a-rex/arex.cpp:1013 #, c-format msgid "Empty data for JWT issuer %s" msgstr "" #: src/services/a-rex/arex.cpp:1016 #, fuzzy, c-format msgid "Failed to read data for JWT issuer %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/authop.cpp:26 #, fuzzy msgid "CheckOperationAllowed: missing configuration" msgstr "voms szerver fájljának az elérési útvonala" #: src/services/a-rex/authop.cpp:80 msgid "CheckOperationAllowed: allowed due to missing configuration scopes" msgstr "" #: src/services/a-rex/authop.cpp:83 #, c-format msgid "CheckOperationAllowed: token scopes: %s" msgstr "" #: src/services/a-rex/authop.cpp:84 #, c-format msgid "CheckOperationAllowed: configuration scopes: %s" msgstr "" #: src/services/a-rex/authop.cpp:87 msgid "CheckOperationAllowed: allowed due to matching scopes" msgstr "" #: src/services/a-rex/authop.cpp:91 msgid "CheckOperationAllowed: token scopes do not match required scopes" msgstr "" #: src/services/a-rex/authop.cpp:97 msgid "CheckOperationAllowed: allowed for TLS connection" msgstr "" #: src/services/a-rex/authop.cpp:101 msgid "CheckOperationAllowed: no supported identity found" msgstr "" #: src/services/a-rex/cachecheck.cpp:37 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:710 #, c-format msgid "Error with cache configuration: %s" msgstr "" #: src/services/a-rex/cachecheck.cpp:53 #: src/services/candypond/CandyPond.cpp:318 msgid "Error with cache configuration" msgstr "" #: src/services/a-rex/cachecheck.cpp:78 #: src/services/candypond/CandyPond.cpp:146 #: src/services/candypond/CandyPond.cpp:343 #, c-format msgid "Looking up URL %s" msgstr "" #: src/services/a-rex/cachecheck.cpp:80 #: src/services/candypond/CandyPond.cpp:155 #, fuzzy, c-format msgid "Cache file is %s" msgstr "Fájl feltöltve %s" #: src/services/a-rex/change_activity_status.cpp:22 #: src/services/a-rex/put.cpp:163 src/services/a-rex/put.cpp:204 #, c-format msgid "%s: there is no such job: %s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:30 #, c-format msgid "%s: put log %s: there is no payload" msgstr "" #: src/services/a-rex/change_activity_status.cpp:36 #, c-format msgid "%s: put log %s: unrecognized payload" msgstr "" #: src/services/a-rex/change_activity_status.cpp:75 #, fuzzy msgid "A-REX REST: Failed to resume job" msgstr "Feladat újraküldésének megkisérlése ide: %s" #: src/services/a-rex/change_activity_status.cpp:79 #, c-format msgid "A-REX REST: State change not allowed: from %s to %s" msgstr "" #: src/services/a-rex/create_activity.cpp:24 msgid "NEW: put new job: there is no payload" msgstr "" #: src/services/a-rex/create_activity.cpp:28 msgid "NEW: put new job: max jobs total limit reached" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:47 msgid "Wiping and re-creating whole storage" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:207 #: src/services/a-rex/delegation/DelegationStore.cpp:309 #, c-format msgid "DelegationStore: TouchConsumer failed to create file %s" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:269 msgid "DelegationStore: PeriodicCheckConsumers failed to resume iterator" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:289 #, c-format msgid "" "DelegationStore: PeriodicCheckConsumers failed to remove old delegation %s - " "%s" msgstr "" #: src/services/a-rex/get.cpp:172 src/services/a-rex/get.cpp:227 #: src/services/a-rex/get.cpp:313 #, c-format msgid "Get: there is no job %s - %s" msgstr "" #: src/services/a-rex/get.cpp:380 #, c-format msgid "Head: there is no job %s - %s" msgstr "" #: src/services/a-rex/get.cpp:436 #, fuzzy msgid "Failed to extract credential information" msgstr "Nem sikerült betölteni a konfigurációt" #: src/services/a-rex/get.cpp:439 #, c-format msgid "Checking cache permissions: DN: %s" msgstr "" #: src/services/a-rex/get.cpp:440 #, c-format msgid "Checking cache permissions: VO: %s" msgstr "" #: src/services/a-rex/get.cpp:442 #, c-format msgid "Checking cache permissions: VOMS attr: %s" msgstr "" #: src/services/a-rex/get.cpp:452 #, c-format msgid "Cache access allowed to %s by DN %s" msgstr "" #: src/services/a-rex/get.cpp:455 #, c-format msgid "DN %s doesn't match %s" msgstr "" #: src/services/a-rex/get.cpp:458 #, c-format msgid "Cache access allowed to %s by VO %s" msgstr "" #: src/services/a-rex/get.cpp:461 #, c-format msgid "VO %s doesn't match %s" msgstr "" #: src/services/a-rex/get.cpp:467 src/services/a-rex/get.cpp:486 #, c-format msgid "Bad credential value %s in cache access rules" msgstr "" #: src/services/a-rex/get.cpp:475 src/services/a-rex/get.cpp:494 #, c-format msgid "VOMS attr %s matches %s" msgstr "" #: src/services/a-rex/get.cpp:476 #, c-format msgid "Cache access allowed to %s by VO %s and role %s" msgstr "" #: src/services/a-rex/get.cpp:479 src/services/a-rex/get.cpp:498 #, c-format msgid "VOMS attr %s doesn't match %s" msgstr "" #: src/services/a-rex/get.cpp:495 #, c-format msgid "Cache access allowed to %s by VO %s and group %s" msgstr "" #: src/services/a-rex/get.cpp:501 #, c-format msgid "Unknown credential type %s for URL pattern %s" msgstr "" #: src/services/a-rex/get.cpp:507 #, c-format msgid "No match found in cache access rules for %s" msgstr "" #: src/services/a-rex/get.cpp:517 #, c-format msgid "Get from cache: Looking in cache for %s" msgstr "" #: src/services/a-rex/get.cpp:520 #, fuzzy, c-format msgid "Get from cache: Invalid URL %s" msgstr "Érvénytelen URL: %s" #: src/services/a-rex/get.cpp:537 msgid "Get from cache: Error in cache configuration" msgstr "" #: src/services/a-rex/get.cpp:546 msgid "Get from cache: File not in cache" msgstr "" #: src/services/a-rex/get.cpp:549 #, c-format msgid "Get from cache: could not access cached file: %s" msgstr "" #: src/services/a-rex/get.cpp:559 msgid "Get from cache: Cached file is locked" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:98 #, c-format msgid "" "Cannot create directories for log file %s. Messages will be logged to this " "log" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:104 #, c-format msgid "" "Cannot open cache log file %s: %s. Cache cleaning messages will be logged to " "this log" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:114 msgid "Failed to start cache clean script" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:115 msgid "Cache cleaning script failed" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:183 #, c-format msgid "External request for attention %s" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:201 #, fuzzy, c-format msgid "Failed to open heartbeat file %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/grid-manager/GridManager.cpp:223 msgid "Starting jobs processing thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:224 #, c-format msgid "Used configuration file %s" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:232 #, c-format msgid "" "Error initiating delegation database in %s. Maybe permissions are not " "suitable. Returned error is: %s." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:244 msgid "Failed to start new thread: cache won't be cleaned" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:251 msgid "Failed to activate Jobs Processing object, exiting Grid Manager thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:260 #, c-format msgid "" "Error adding communication interface in %s. Maybe another instance of A-REX " "is already running." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:263 #, c-format msgid "" "Error adding communication interface in %s. Maybe permissions are not " "suitable." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:270 #, fuzzy msgid "Failed to start new thread for monitoring job requests" msgstr "Nem sikerült elküldeni a kérést" #: src/services/a-rex/grid-manager/GridManager.cpp:276 msgid "Picking up left jobs" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:279 msgid "Starting data staging threads" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:283 msgid "Starting jobs' monitoring" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:291 #, c-format msgid "" "SSHFS mount point of session directory (%s) is broken - waiting for " "reconnect ..." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:295 #, c-format msgid "" "SSHFS mount point of runtime directory (%s) is broken - waiting for " "reconnect ..." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:300 #, c-format msgid "" "SSHFS mount point of cache directory (%s) is broken - waiting for " "reconnect ..." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:349 #, c-format msgid "Orphan delegation lock detected (%s) - cleaning" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:354 msgid "Failed to obtain delegation locks for cleaning orphaned locks" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:368 msgid "Waking up" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:371 msgid "Stopping jobs processing thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:373 msgid "Exiting jobs processing thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:391 msgid "Requesting to stop job processing" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:399 msgid "Waiting for main job processing thread to exit" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:401 msgid "Stopped job processing" msgstr "" #: src/services/a-rex/grid-manager/accounting/AAR.cpp:73 msgid "Cannot find information abouto job submission endpoint" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:58 #, fuzzy, c-format msgid "Failed to read database schema file at %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:68 msgid "Accounting database initialized successfully" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:70 msgid "Accounting database connection has been established" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:80 #, c-format msgid "%s. SQLite database error: %s" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:82 #, c-format msgid "SQLite database error: %s" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:110 #, c-format msgid "Directory %s to store accounting database has been created." msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:112 #, c-format msgid "" "Accounting database cannot be created. Faile to create parent directory %s." msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:116 #, c-format msgid "Accounting database cannot be created: %s is not a directory" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:123 #, fuzzy msgid "Failed to initialize accounting database" msgstr "Nem sikerült betölteni a konfigurációt" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:130 #, c-format msgid "Accounting database file (%s) is not a regular file" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:136 msgid "Error opening accounting database" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:154 msgid "Closing connection to SQLite accounting database" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:243 #, c-format msgid "Failed to fetch data from %s accounting database table" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:260 #, c-format msgid "Failed to add '%s' into the accounting database %s table" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:327 msgid "Failed to fetch data from accounting database Endpoints table" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:344 #, c-format msgid "" "Failed to add '%s' URL (interface type %s) into the accounting database " "Endpoints table" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:370 #, fuzzy, c-format msgid "Failed to query AAR database ID for job %s" msgstr "Nem sikerült elküldeni a kérést" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:431 #, c-format msgid "Failed to insert AAR into the database for job %s" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:432 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:481 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:512 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:528 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:544 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:565 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:581 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:596 #, c-format msgid "SQL statement used: %s" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:437 #, fuzzy, c-format msgid "Failed to write authtoken attributes for job %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:441 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:498 #, fuzzy, c-format msgid "Failed to write event records for job %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:452 #, c-format msgid "" "Cannot to update AAR. Cannot find registered AAR for job %s in accounting " "database." msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:480 #, fuzzy, c-format msgid "Failed to update AAR in the database for job %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:486 #, fuzzy, c-format msgid "Failed to write RTEs information for the job %s" msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:490 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:494 #, fuzzy, c-format msgid "Failed to write data transfers information for the job %s" msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:590 #, fuzzy, c-format msgid "Unable to add event: cannot find AAR for job %s in accounting database." msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:73 #, fuzzy, c-format msgid "Unknown option %s" msgstr "Célállomás: %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:80 msgid "Job ID argument is required." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:86 msgid "Path to user's proxy file should be specified." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:92 msgid "User name should be specified." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:98 msgid "Path to .local job status file is required." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:106 msgid "Generating ceID prefix from hostname automatically" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:109 msgid "" "Cannot determine hostname from gethostname() to generate ceID automatically." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:118 #, c-format msgid "ceID prefix is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:126 #, c-format msgid "Getting currect timestamp for BLAH parser log: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:135 msgid "Parsing .local file to obtain job-specific identifiers and info" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:145 #, c-format msgid "globalid is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:148 #, c-format msgid "headnode is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:151 #, c-format msgid "interface is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:155 msgid "There is no local LRMS ID. Message will not be written to BLAH log." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:158 #, c-format msgid "localid is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:161 #, c-format msgid "queue name is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:164 #, c-format msgid "owner subject is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:166 msgid "" "Job did not finished successfully. Message will not be written to BLAH log." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:174 #, c-format msgid "Job timestamp successfully parsed as %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:178 msgid "Can not read information from the local job status file" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:194 #, c-format msgid "" "Unsupported submission interface %s. Seems arc-blahp-logger need to be " "updated accordingly. Please submit the bug to bugzilla." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:204 msgid "Parsing VOMS AC to get FQANs information" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:217 #, c-format msgid "Found VOMS AC attribute: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:230 #, fuzzy msgid "VOMS AC attribute is a tag" msgstr "VOMS attribútumok értelmezése sikertelen" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:237 msgid "Skipping policyAuthority VOMS AC attribute" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:241 #, fuzzy msgid "VOMS AC attribute is the FQAN" msgstr "VOMS attribútumok értelmezése sikertelen" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:249 msgid "No FQAN found. Using None as userFQAN value" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:263 #, c-format msgid "Assembling BLAH parser log entry: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:268 #, c-format msgid "Writing the info to the BLAH parser log: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:276 #, fuzzy, c-format msgid "Cannot open BLAH log file '%s'" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:36 #, c-format msgid "Missing cancel-%s-job - job cancellation may not work" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:40 #, c-format msgid "Missing submit-%s-job - job submission to LRMS may not work" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:44 #, c-format msgid "Missing scan-%s-job - may miss when job finished executing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:58 #, c-format msgid "Wrong option in %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:69 #, fuzzy, c-format msgid "Can't read configuration file at %s" msgstr "Nem tudom olvasni a célállomásokat a fájlból: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:79 #, fuzzy, c-format msgid "Can't recognize type of configuration file at %s" msgstr "Nem tudom olvasni a célállomásokat a fájlból: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:82 msgid "Could not determine configuration type or configuration is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:163 #, fuzzy msgid "lrms is empty" msgstr "Túl sok paraméter" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:196 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:205 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:214 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:223 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:232 msgid "Missing number in maxjobs" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:199 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:208 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:217 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:226 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:235 #, c-format msgid "Wrong number in maxjobs: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:245 #, c-format msgid "Wrong number in wakeupperiod: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:251 #, fuzzy msgid "mail parameter is empty" msgstr "Túl sok paraméter" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:257 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:261 msgid "Wrong number in defaultttl command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:267 msgid "Wrong number in maxrerun command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:274 msgid "State name for plugin is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:278 msgid "Options for plugin are missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:281 #, c-format msgid "Failed to register plugin for state %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:287 msgid "Session root directory is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:290 msgid "Junk in sessiondir command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:302 msgid "Missing directory in controldir command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:307 msgid "" "'control' configuration option is no longer supported, please use " "'controldir' instead" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:312 msgid "User for helper program is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:315 msgid "Only user '.' for helper program is supported" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:318 msgid "Helper program is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:339 msgid "Wrong option in fixdirectories" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:366 msgid "Wrong option in delegationdb" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:375 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:608 #, fuzzy msgid "forcedefaultvoms parameter is empty" msgstr "Túl sok paraméter" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:486 msgid "Wrong number in maxjobdesc command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:535 msgid "Missing file name in [arex/jura] logfile" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:546 #, c-format msgid "Wrong number in urdelivery_frequency: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:601 msgid "No queue name given in queue block name" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:617 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:652 #, fuzzy msgid "advertisedvo parameter is empty" msgstr "Túl sok paraméter" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:117 #, c-format msgid "\tSession root dir : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:118 #, c-format msgid "\tControl dir : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:119 #, c-format msgid "\tdefault LRMS : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:120 #, c-format msgid "\tdefault queue : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:121 #, c-format msgid "\tdefault ttl : %u" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:126 msgid "No valid caches found in configuration, caching is disabled" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:131 #, c-format msgid "\tCache : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:133 #, c-format msgid "\tCache link dir : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:136 #, c-format msgid "\tCache (read-only): %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:138 msgid "\tCache cleaning enabled" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:139 msgid "\tCache cleaning disabled" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:256 msgid "Starting controldir update tool." msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:258 #, fuzzy msgid "Failed to start controldir update tool." msgstr "Nem sikerült elküldeni a kérést" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:261 #, c-format msgid "Failed to run controldir update tool. Exit code: %i" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:381 msgid "" "Globus location variable substitution is not supported anymore. Please " "specify path directly." msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:35 msgid "Can't read configuration file" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:41 #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:29 msgid "Can't recognize type of configuration file" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:47 msgid "Configuration error" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:77 msgid "Bad number in maxdelivery" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:83 msgid "Bad number in maxemergency" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:89 msgid "Bad number in maxprocessor" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:95 msgid "Bad number in maxprepared" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:101 msgid "Bad number in maxtransfertries" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:112 msgid "Bad number in speedcontrol" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:123 #, c-format msgid "Bad number in definedshare %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:132 #, c-format msgid "Bad URL in deliveryservice: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:143 msgid "Bad number in remotesizelimit" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:168 msgid "Bad value for loglevel" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:24 msgid "Can't open configuration file" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:45 msgid "Not enough parameters in copyurl" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:54 msgid "Not enough parameters in linkurl" msgstr "" #: src/services/a-rex/grid-manager/files/ControlFileContent.cpp:185 #, c-format msgid "Wrong directory in %s" msgstr "" #: src/services/a-rex/grid-manager/files/ControlFileHandling.cpp:104 #, c-format msgid "Failed setting file owner: %s" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:36 #, fuzzy, c-format msgid "Could not read data staging configuration from %s" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/services/a-rex/grid-manager/gm_jobs.cpp:44 #, c-format msgid "Can't read transfer states from %s. Perhaps A-REX is not running?" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:100 msgid "gm-jobs displays information on current jobs in the system." msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:105 msgid "display more information on each job" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:110 #: src/services/a-rex/grid-manager/gm_kick.cpp:24 msgid "use specified configuration file" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:111 #: src/services/a-rex/grid-manager/gm_kick.cpp:25 msgid "file" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:115 msgid "read information from specified control directory" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:116 msgid "dir" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:120 msgid "print summary of jobs in each transfer share" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:125 msgid "do not print list of jobs" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:130 msgid "do not print number of jobs in each state" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:135 msgid "print state of the service" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:140 msgid "show only jobs of user(s) with specified subject name(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:141 #: src/services/a-rex/grid-manager/gm_jobs.cpp:151 #: src/services/a-rex/grid-manager/gm_jobs.cpp:161 #, fuzzy msgid "dn" msgstr "n" #: src/services/a-rex/grid-manager/gm_jobs.cpp:145 msgid "request to cancel job(s) with specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:146 #: src/services/a-rex/grid-manager/gm_jobs.cpp:156 #: src/services/a-rex/grid-manager/gm_jobs.cpp:166 #: src/services/a-rex/grid-manager/gm_jobs.cpp:176 #: src/services/a-rex/grid-manager/gm_kick.cpp:30 msgid "id" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:150 msgid "" "request to cancel jobs belonging to user(s) with specified subject name(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:155 msgid "request to clean job(s) with specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:160 msgid "" "request to clean jobs belonging to user(s) with specified subject name(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:165 msgid "show only jobs with specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:170 msgid "print list of available delegation IDs" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:175 msgid "print delegation token of specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:180 msgid "print main delegation token of specified Job ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:181 msgid "job id" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:185 msgid "" "output requested elements (jobs list, delegation ids and tokens) to file" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:186 #, fuzzy msgid "file name" msgstr "fájlnév" #: src/services/a-rex/grid-manager/gm_jobs.cpp:209 #, fuzzy, c-format msgid "Using configuration at %s" msgstr "Nem tudom olvasni a célállomásokat a fájlból: %s" #: src/services/a-rex/grid-manager/gm_jobs.cpp:232 #, fuzzy, c-format msgid "Failed to open output file '%s'" msgstr "Nem sikerült listázni a fájlokat" #: src/services/a-rex/grid-manager/gm_jobs.cpp:241 msgid "Looking for current jobs" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:278 #, c-format msgid "Job: %s : ERROR : Unrecognizable state" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:287 #, c-format msgid "Job: %s : ERROR : No local information." msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:461 #, c-format msgid "Job: %s : ERROR : Failed to put cancel mark" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:465 #, c-format msgid "Job: %s : Cancel request put but failed to communicate to service" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:467 #, c-format msgid "Job: %s : Cancel request put and communicated to service" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:478 #, c-format msgid "Job: %s : ERROR : Failed to put clean mark" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:482 #, c-format msgid "Job: %s : Clean request put but failed to communicate to service" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:484 #, c-format msgid "Job: %s : Clean request put and communicated to service" msgstr "" #: src/services/a-rex/grid-manager/gm_kick.cpp:18 msgid "" "gm-kick wakes up the A-REX corresponding to the given control directory. If " "no directory is given it uses the control directory found in the " "configuration file." msgstr "" #: src/services/a-rex/grid-manager/gm_kick.cpp:29 msgid "inform about changes in particular job (can be used multiple times)" msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:39 #, c-format msgid "Failed to acquire source: %s" msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:44 #, c-format msgid "Failed to resolve %s" msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:61 #, c-format msgid "Failed to check %s" msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:75 msgid "job_description_file [proxy_file]" msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:76 msgid "" "inputcheck checks that input files specified in the job description are " "available and accessible using the credentials in the given proxy file." msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:88 msgid "Wrong number of arguments given" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:75 #, c-format msgid "" "DTR Generator waiting to process: %d jobs to cancel, %d DTRs, %d new jobs" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:89 #, c-format msgid "%s: Job cancel request from DTR generator to scheduler" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:94 #, c-format msgid "%s: Returning canceled job from DTR generator" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:135 #, c-format msgid "%s: Re-requesting attention from DTR generator" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:145 #, c-format msgid "DTR Generator processed: %d jobs to cancel, %d DTRs, %d new jobs" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:164 msgid "Exiting Generator thread" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:236 msgid "Shutting down data staging threads" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:246 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:259 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:287 msgid "DTRGenerator is not running!" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:249 #, c-format msgid "Received DTR %s during Generator shutdown - may not be processed" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:263 msgid "DTRGenerator was sent null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:272 #, c-format msgid "%s: Received job in DTR generator" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:275 #, c-format msgid "%s: Failed to receive job in DTR generator" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:282 msgid "DTRGenerator got request to cancel null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:297 msgid "DTRGenerator is queried about null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:327 msgid "DTRGenerator is asked about null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:355 msgid "DTRGenerator is requested to remove null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:362 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:370 #, c-format msgid "%s: Trying to remove job from data staging which is still active" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:378 #, c-format msgid "%s: Trying remove job from data staging which does not exist" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:389 #, fuzzy, c-format msgid "%s: Invalid DTR" msgstr "Érvénytelen URL: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:406 #, c-format msgid "%s: Received DTR %s to copy file %s in state %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:410 #, c-format msgid "%s: Received DTR belongs to inactive job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:427 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1065 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:474 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:532 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:646 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:856 #, c-format msgid "%s: Failed reading local information" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:436 #, c-format msgid "%s: DTR %s to copy file %s failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:442 #, c-format msgid "%s: Cancelling other DTRs" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:452 #, c-format msgid "%s: DTR %s to copy to %s failed but is not mandatory" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:462 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:727 #, c-format msgid "%s: Failed to read list of output files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:476 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:617 #, c-format msgid "%s: Failed to read dynamic output files in %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:478 #, c-format msgid "%s: Going through files in list %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:482 #, c-format msgid "%s: Removing %s from dynamic output file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:486 #, c-format msgid "%s: Failed to write back dynamic output files in %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:502 #, c-format msgid "%s: Failed to write list of output files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:506 #, c-format msgid "%s: Failed to write list of output status files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:518 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:739 #, c-format msgid "%s: Failed to read list of input files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:537 #, fuzzy, c-format msgid "%s: Failed to write list of input files" msgstr "Nem sikerült listázni a fájlokat" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:549 #, c-format msgid "%s: Received DTR with two remote endpoints!" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:561 #: src/services/candypond/CandyPondGenerator.cpp:105 #, fuzzy, c-format msgid "No active job id %s" msgstr "Feladat migrálásra került ezzel az azonosítóval: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:605 #, c-format msgid "%s: Failed to read list of output files, can't clean up session dir" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:631 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:650 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:777 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:909 #, c-format msgid "%s: Failed to clean up session dir" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:641 #, c-format msgid "%s: Failed to read list of input files, can't clean up session dir" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:663 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:667 #, fuzzy msgid "uploads" msgstr "Feltöltve %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:663 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:667 msgid "downloads" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:664 msgid "cancelled" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:664 msgid "finished" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:662 #, c-format msgid "%s: All %s %s successfully" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:666 #, c-format msgid "%s: Some %s failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:670 #, c-format msgid "%s: Requesting attention from DTR generator" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:681 msgid "DTRGenerator is requested to process null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:687 msgid "download" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:687 #, fuzzy msgid "upload" msgstr "Feltöltve %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:686 #, c-format msgid "%s: Received data staging request to %s files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:748 #, c-format msgid "%s: Duplicate file in list of input files: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:801 #, c-format msgid "%s: Reading output files from user generated list in %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:803 #, c-format msgid "%s: Error reading user generated output file list in %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:834 #, c-format msgid "%s: Failed to list output directory %s: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:852 #, c-format msgid "%s: Adding new output file %s: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:875 #, c-format msgid "%s: Two identical output destinations: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:888 #, c-format msgid "%s: Cannot upload two different files %s and %s to same LFN: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:920 #, c-format msgid "%s: Received job in a bad state: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:928 #, c-format msgid "%s: Session directory processing takes too long - %u.%06u seconds" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:976 #, c-format msgid "" "%s: Destination file %s was possibly left unfinished from previous A-REX " "run, will overwrite" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1071 #, c-format msgid "%s: Failed writing local information" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1089 #, c-format msgid "%s: Cancelling active DTRs" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1096 msgid "DTRGenerator is asked to check files for null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1116 #, fuzzy, c-format msgid "%s: Can't read list of input files" msgstr "Nem tudom olvasni a célállomásokat a fájlból: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1131 #, c-format msgid "%s: Checking user uploadable file: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1136 #, fuzzy, c-format msgid "%s: User has uploaded file %s" msgstr "Fájl feltöltve %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1143 #, fuzzy, c-format msgid "%s: Failed writing changed input file." msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1147 #, c-format msgid "%s: Critical error for uploadable file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1153 #, fuzzy, c-format msgid "%s: User has NOT uploaded file %s" msgstr "Fájl feltöltve %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1165 #, c-format msgid "%s: Uploadable files timed out" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1221 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1247 #, c-format msgid "%s: Can't convert checksum %s to int for %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1228 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1242 #, c-format msgid "%s: Can't convert filesize %s to int for %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1237 #, c-format msgid "%s: Invalid size/checksum information (%s) for %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1259 #, c-format msgid "%s: Invalid file: %s is too big." msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1275 #, c-format msgid "%s: Failed to switch user ID to %d/%d to read file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1281 #, c-format msgid "%s: Failed to open file %s for reading" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1289 #, c-format msgid "%s: Error accessing file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1301 #, c-format msgid "%s: Error reading file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1316 #, c-format msgid "%s: File %s has wrong checksum: %llu. Expected %lli" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1322 #, c-format msgid "%s: Checksum %llu verified for %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1334 msgid "" "Found unfinished DTR transfers. It is possible the previous A-REX process " "did not shut down normally" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1341 #, c-format msgid "Found DTR %s for file %s left in transferring state from previous run" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1350 msgid "DTRGenerator is requested to clean links for null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1366 #, c-format msgid "%s: Cache cleaning takes too long - %u.%06u seconds" msgstr "" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:108 #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:190 #, c-format msgid "%s: Job monitoring counter is broken" msgstr "" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:115 #, c-format msgid "%s: Job monitoring is unintentionally lost" msgstr "" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:124 #, c-format msgid "%s: Job monitoring stop success" msgstr "" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:129 #, c-format msgid "" "%s: Job monitoring stop requested with %u active references and %s queue " "associated" msgstr "" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:131 #, c-format msgid "%s: Job monitoring stop requested with %u active references" msgstr "" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:195 #, c-format msgid "%s: Job monitoring is lost due to removal from queue" msgstr "" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:278 #, c-format msgid "%s: PushSorted failed to find job where expected" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:161 #, c-format msgid "Replacing queue '%s' with '%s'" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:255 #, c-format msgid "Bad name for stdout: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:263 #, c-format msgid "Bad name for stderr: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:326 #, c-format msgid "Bad name for runtime environment: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:371 msgid "Job description file could not be read." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:422 #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:436 #, c-format msgid "Bad name for executable: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:89 #, fuzzy msgid "Failed to start data staging threads" msgstr "Nem sikerült elküldeni a kérést" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:190 #, c-format msgid "" "%s: Failed reading .local and changing state, job and A-REX may be left in " "an inconsistent state" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:195 #, c-format msgid "%s: unexpected failed job add request: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:206 #, c-format msgid "%s: unexpected job add request: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:259 #, c-format msgid "%s: job for attention" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:269 msgid "all for attention" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:286 #, c-format msgid "%s: job found while scanning" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:314 #, c-format msgid "%s: job will wait for external process" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:331 #, c-format msgid "%s: job assigned for slow polling" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:349 #, c-format msgid "%s: job being processed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:384 #, c-format msgid "Current jobs in system (PREPARING to FINISHING) per-DN (%i entries)" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:386 #, c-format msgid "%s: %i" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:398 #, fuzzy, c-format msgid "%s: Failed storing failure reason: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:404 #, fuzzy, c-format msgid "%s: Failed reading job description: %s" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:416 #, fuzzy, c-format msgid "%s: Failed parsing job request." msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:466 #, c-format msgid "%s: Failed writing list of output files: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:492 #, c-format msgid "%s: Failed obtaining lrms id" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:506 #, c-format msgid "%s: Failed writing local information: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:538 #, c-format msgid "%s: Failed creating grami file" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:542 #, c-format msgid "%s: Failed setting executable permissions" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:550 #, c-format msgid "%s: state SUBMIT: starting child: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:557 #, c-format msgid "%s: Failed running submission process" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:562 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:669 #, c-format msgid "%s: LRMS scripts limit of %u is reached - suspending submit/cancel" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:578 #, c-format msgid "" "%s: Job submission to LRMS takes too long, but ID is already obtained. " "Pretending submission is done." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:585 #, c-format msgid "%s: Job submission to LRMS takes too long. Failing." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:594 #, c-format msgid "%s: state SUBMIT: child exited with code %i" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:599 #, c-format msgid "%s: Job submission to LRMS failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:620 #, c-format msgid "%s: state CANCELING: timeout waiting for cancellation" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:626 #, c-format msgid "%s: state CANCELING: job diagnostics collected" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:654 #, c-format msgid "%s: state CANCELING: starting child: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:656 #, c-format msgid "%s: Job has completed already. No action taken to cancel" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:664 #, c-format msgid "%s: Failed running cancellation process" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:683 #, c-format msgid "" "%s: Job cancellation takes too long, but diagnostic collection seems to be " "done. Pretending cancellation succeeded." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:689 #, c-format msgid "%s: Job cancellation takes too long. Failing." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:699 #, c-format msgid "%s: state CANCELING: child exited with code %i" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:705 #, c-format msgid "%s: Failed to cancel running job" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:724 #, c-format msgid "%s: State: %s: data staging finished" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:759 #, c-format msgid "%s: State: %s: still in data staging" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:772 #, c-format msgid "%s: Job is not allowed to be rerun anymore" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:782 #, c-format msgid "%s: Job failed in unknown state. Won't rerun." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:803 #, fuzzy, c-format msgid "%s: Reprocessing job description failed" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:810 #, c-format msgid "%s: Failed to read reprocessed list of output files" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:814 #, c-format msgid "%s: Failed to read reprocessed list of input files" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:898 #, c-format msgid "%s: Reading status of new job failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:911 #, c-format msgid "%s: State: ACCEPTED: parsing job description" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:913 #, c-format msgid "%s: Processing job description failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:952 #, fuzzy, c-format msgid "%s: new job is accepted" msgstr "Feladat elküldve ezzel az azonítóval: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:964 #, c-format msgid "%s: %s: New job belongs to %i/%i" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:969 #, c-format msgid "%s: old job is accepted" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:980 #, c-format msgid "%s: State: ACCEPTED" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:986 #, c-format msgid "%s: State: ACCEPTED: dryrun" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1009 #, c-format msgid "%s: State: ACCEPTED: has process time %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1015 #, c-format msgid "%s: State: ACCEPTED: moving to PREPARING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1031 #, c-format msgid "%s: State: PREPARING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1038 #, c-format msgid "%s: Failed obtaining local job information." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1091 #, c-format msgid "%s: State: SUBMIT" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1111 #, c-format msgid "%s: State: CANCELING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1131 #, c-format msgid "%s: State: INLRMS" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1136 #, c-format msgid "%s: State: INLRMS - checking for pending(%u) and mark" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1138 #, c-format msgid "%s: State: INLRMS - checking for not pending" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1140 #, c-format msgid "%s: Job finished" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1144 #, c-format msgid "%s: State: INLRMS: exit message is %i %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1157 #, c-format msgid "%s: State: INLRMS - no mark found" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1169 #, c-format msgid "%s: State: FINISHING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1190 #, c-format msgid "%s: Job is requested to clean - deleting" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1207 #, c-format msgid "%s: restarted PREPARING job" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1223 #, c-format msgid "%s: restarted INLRMS job" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1232 #, c-format msgid "%s: restarted FINISHING job" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1237 #, c-format msgid "%s: Can't rerun on request" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1239 #, c-format msgid "%s: Can't rerun on request - not a suitable state" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1250 #, c-format msgid "%s: Job is too old - deleting" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1295 #, c-format msgid "%s: Job is ancient - delete rest of information" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1313 #, c-format msgid "%s: Canceling job because of user request" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1327 #, c-format msgid "%s: Failed to turn job into failed during cancel processing." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1359 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1367 #, c-format msgid "%s: Plugin at state %s : %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1373 #, c-format msgid "%s: Plugin execution failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1480 #, c-format msgid "%s: State: %s from %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1529 #, c-format msgid "Failed to get DN information from .local file for job %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1556 #, c-format msgid "%s: Delete request due to internal problems" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1591 #, c-format msgid "%s: Job failure detected" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1651 #, fuzzy, c-format msgid "Failed to move file %s to %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1659 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1769 #, c-format msgid "Failed reading control directory: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1729 #, c-format msgid "Failed reading control directory: %s: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:2043 #, c-format msgid "Helper process start failed: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:2050 #, c-format msgid "Stopping helper process %s" msgstr "" #: src/services/a-rex/grid-manager/log/HeartBeatMetrics.cpp:61 #, fuzzy, c-format msgid "Error with hearbeatfile: %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/grid-manager/log/HeartBeatMetrics.cpp:73 #: src/services/a-rex/grid-manager/log/JobsMetrics.cpp:139 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:136 #, c-format msgid ": Metrics tool returned error code %i: %s" msgstr "" #: src/services/a-rex/grid-manager/log/HeartBeatMetrics.cpp:107 #: src/services/a-rex/grid-manager/log/JobsMetrics.cpp:186 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:178 msgid "" "gmetric_bin_path empty in arc.conf (should never happen the default value " "should be used)" msgstr "" #: src/services/a-rex/grid-manager/log/JobLog.cpp:114 #, fuzzy msgid ": Accounting records reporter tool is not specified" msgstr "Nincs megadva feladat leírás bemeneti adatként" #: src/services/a-rex/grid-manager/log/JobLog.cpp:130 msgid ": Failure creating slot for accounting reporter child process" msgstr "" #: src/services/a-rex/grid-manager/log/JobLog.cpp:143 msgid ": Failure starting accounting reporter child process" msgstr "" #: src/services/a-rex/grid-manager/log/JobLog.cpp:176 msgid ": Failure creating accounting database connection" msgstr "" #: src/services/a-rex/grid-manager/log/JobLog.cpp:202 #, c-format msgid ": writing accounting record took %llu ms" msgstr "" #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:74 #, c-format msgid "Session dir '%s' contains user specific substitutions - skipping it" msgstr "" #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:86 #, c-format msgid "Sessiondir %s: Free space %f GB" msgstr "" #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:94 #, fuzzy msgid "No session directories found in configuration." msgstr "voms szerver fájljának az elérési útvonala" #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:125 msgid "No cachedirs found/configured for calculation of free space." msgstr "" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:29 msgid "Failed reading local information" msgstr "" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:79 #, c-format msgid "Running mailer command (%s)" msgstr "" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:81 msgid "Failed running mailer" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:34 #, c-format msgid "%s: Job's helper exited" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:71 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:24 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:56 #, c-format msgid "%s: Failure creating slot for child process" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:120 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:41 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:73 #, c-format msgid "%s: Failure starting child process" msgstr "" #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:30 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:62 #, c-format msgid "%s: Failure creating data storage for child process" msgstr "" #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:46 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:78 #, c-format msgid "%s: Failure waiting for child process to finish" msgstr "" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:47 #, fuzzy msgid "[job description input]" msgstr "Nincs megadva feladat leírás bemeneti adatként" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:48 msgid "" "Tool for writing the grami file representation of a job description file." msgstr "" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:52 msgid "Name of grami file" msgstr "" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:57 #, fuzzy msgid "Configuration file to load" msgstr "Nincs megadva feladat leírás bemeneti adatként" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:58 msgid "arc.conf" msgstr "" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:62 msgid "Session directory to use" msgstr "" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:63 #, fuzzy msgid "directory" msgstr "könyvtár" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:79 #, fuzzy msgid "No job description file name provided." msgstr "Nincs megadva feladat leírás bemeneti adatként" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:85 #, fuzzy, c-format msgid "Unable to parse job description input: %s" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:91 #, fuzzy msgid "Unable to load ARC configuration file." msgstr "voms szerver fájljának az elérési útvonala" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:111 #, fuzzy, c-format msgid "Unable to write grami file: %s" msgstr "voms szerver fájljának az elérési útvonala" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:117 #, fuzzy, c-format msgid "Unable to write 'output' file: %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/information_collector.cpp:53 #, c-format msgid "Resource information provider: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:56 #, fuzzy msgid "Resource information provider failed to start" msgstr "Státusz lekérdezés sikertelen" #: src/services/a-rex/information_collector.cpp:59 #, fuzzy msgid "Resource information provider failed to run" msgstr "Státusz lekérdezés sikertelen" #: src/services/a-rex/information_collector.cpp:63 #, c-format msgid "" "Resource information provider failed with exit status: %i\n" "%s" msgstr "" #: src/services/a-rex/information_collector.cpp:65 #, c-format msgid "" "Resource information provider log:\n" "%s" msgstr "" #: src/services/a-rex/information_collector.cpp:71 msgid "No new informational document assigned" msgstr "Nem jött létre új információs dokumentum" #: src/services/a-rex/information_collector.cpp:73 #, c-format msgid "Obtained XML: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:87 msgid "Informational document is empty" msgstr "" #: src/services/a-rex/information_collector.cpp:212 msgid "OptimizedInformationContainer failed to create temporary file" msgstr "" #: src/services/a-rex/information_collector.cpp:215 #, c-format msgid "OptimizedInformationContainer created temporary file: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:221 msgid "" "OptimizedInformationContainer failed to store XML document to temporary file" msgstr "" #: src/services/a-rex/information_collector.cpp:230 msgid "OptimizedInformationContainer failed to parse XML" msgstr "" #: src/services/a-rex/information_collector.cpp:242 #, fuzzy msgid "OptimizedInformationContainer failed to rename temporary file" msgstr "" "Proxy készítés sikertelen: Nem sikerült leellenÅ‘rizni a publikus kulcsot" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:38 msgid "Default INTERNAL client constructor" msgstr "" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:41 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:61 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:83 #, fuzzy msgid "Failed to load grid-manager configfile" msgstr "privát kulcs elérési útvonala" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:46 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:66 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:88 #, fuzzy msgid "Failed to set INTERNAL endpoint" msgstr "Nem sikerült elküldeni a kérést" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:131 #, fuzzy msgid "Failed to identify grid-manager config file" msgstr "voms szerver fájljának az elérési útvonala" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:150 #, fuzzy, c-format msgid "Failed to run configuration parser at %s." msgstr "voms szerver fájljának az elérési útvonala" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:154 #, c-format msgid "Parser failed with error code %i." msgstr "" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:160 #, c-format msgid "No pid file is found at '%s'. Probably A-REX is not running." msgstr "" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:175 #, fuzzy, c-format msgid "Failed to load grid-manager config file from %s" msgstr "voms szerver fájljának az elérési útvonala" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:266 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:372 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:405 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:451 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:505 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:557 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:575 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:625 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:655 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:673 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:691 msgid "INTERNALClient is not initialized" msgstr "" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:456 msgid "Submitting job " msgstr "" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:517 #, fuzzy, c-format msgid "Failed to copy input file: %s to path: %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:523 #, fuzzy, c-format msgid "Failed to set permissions on: %s" msgstr "Nem sikerült elküldeni a kérést" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:51 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:92 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:119 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:145 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:184 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:246 #, fuzzy msgid "Failed to load grid-manager config file" msgstr "voms szerver fájljának az elérési útvonala" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:191 #, c-format msgid "Job %s does not report a resumable state" msgstr "" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:196 #, c-format msgid "Resuming job: %s at state: %s (%s)" msgstr "" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:205 msgid "Job resuming successful" msgstr "" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:251 #, c-format msgid "Failed retrieving information for job: %s" msgstr "" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:324 msgid "Retrieving job description of INTERNAL jobs is not supported" msgstr "" #: src/services/a-rex/internaljobplugin/JobListRetrieverPluginINTERNAL.cpp:67 #, c-format msgid "Listing localjobs succeeded, %d localjobs found" msgstr "" #: src/services/a-rex/internaljobplugin/JobListRetrieverPluginINTERNAL.cpp:83 #, c-format msgid "" "Skipping retrieved job (%s) because it was submitted via another interface " "(%s)." msgstr "" #: src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.cpp:38 msgid "" "Failed to delegate credentials to server - no delegation interface found" msgstr "" #: src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.cpp:45 #, c-format msgid "Failed to delegate credentials to server - %s" msgstr "" #: src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.cpp:84 #, fuzzy msgid "Failed preparing job description" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.cpp:127 #, fuzzy msgid "Failed submitting job description" msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #: src/services/a-rex/job.cpp:78 #, c-format msgid "Using cached local account '%s'" msgstr "" #: src/services/a-rex/job.cpp:89 msgid "Will not map to 'root' account by default" msgstr "" #: src/services/a-rex/job.cpp:102 msgid "No local account name specified" msgstr "" #: src/services/a-rex/job.cpp:105 #, c-format msgid "Using local account '%s'" msgstr "" #: src/services/a-rex/job.cpp:109 msgid "TLS provides no identity, going for OTokens" msgstr "" #: src/services/a-rex/job.cpp:168 #, fuzzy msgid "Failed to acquire A-REX's configuration" msgstr "voms szerver fájljának az elérési útvonala" #: src/services/a-rex/job.cpp:240 #, c-format msgid "Cannot handle local user %s" msgstr "" #: src/services/a-rex/job.cpp:288 #, c-format msgid "%s: Failed to parse user policy" msgstr "" #: src/services/a-rex/job.cpp:293 #, c-format msgid "%s: Failed to load evaluator for user policy " msgstr "" #: src/services/a-rex/job.cpp:398 #, c-format msgid "%s: Unknown user policy '%s'" msgstr "" #: src/services/a-rex/job.cpp:738 src/services/a-rex/job.cpp:756 #, c-format msgid "Credential expires at %s" msgstr "" #: src/services/a-rex/job.cpp:740 src/services/a-rex/job.cpp:758 #, c-format msgid "Credential handling exception: %s" msgstr "" #: src/services/a-rex/job.cpp:924 #, c-format msgid "Failed to run external plugin: %s" msgstr "" #: src/services/a-rex/job.cpp:928 #, c-format msgid "Plugin response: %s" msgstr "" #: src/services/a-rex/job.cpp:1138 #, fuzzy, c-format msgid "Failed to create job in %s" msgstr "Nem sikerült elküldeni a kérést" #: src/services/a-rex/job.cpp:1147 #, c-format msgid "Out of tries while allocating new job ID in %s" msgstr "" #: src/services/a-rex/job.cpp:1397 msgid "No non-draining session dirs available" msgstr "" #: src/services/a-rex/put.cpp:150 #, c-format msgid "%s: put file %s: there is no payload" msgstr "" #: src/services/a-rex/put.cpp:156 #, c-format msgid "%s: put file %s: unrecognized payload" msgstr "" #: src/services/a-rex/put.cpp:172 src/services/a-rex/rest/rest.cpp:2050 #, fuzzy, c-format msgid "%s: put file %s: failed to create file: %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/put.cpp:188 #, c-format msgid "%s: put file %s: %s" msgstr "" #: src/services/a-rex/put.cpp:210 #, fuzzy, c-format msgid "%s: delete file %s: failed to obtain file path: %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/put.cpp:221 #, fuzzy, c-format msgid "%s: delete file %s: failed to open file/dir: %s" msgstr "Nem sikerült listázni a meta adatokat" #: src/services/a-rex/rest/rest.cpp:749 #, c-format msgid "REST: process %s at %s" msgstr "" #: src/services/a-rex/rest/rest.cpp:797 src/services/a-rex/rest/rest.cpp:813 #: src/services/a-rex/rest/rest.cpp:1094 src/services/a-rex/rest/rest.cpp:1185 #: src/services/a-rex/rest/rest.cpp:1549 src/services/a-rex/rest/rest.cpp:2161 #, c-format msgid "process: method %s is not supported for subpath %s" msgstr "" #: src/services/a-rex/rest/rest.cpp:819 #, c-format msgid "process: schema %s is not supported for subpath %s" msgstr "" #: src/services/a-rex/rest/rest.cpp:1182 src/services/a-rex/rest/rest.cpp:1546 #, fuzzy, c-format msgid "process: action %s is not supported for subpath %s" msgstr "A fileset regisztcáció nem támogatott még" #: src/services/a-rex/rest/rest.cpp:1558 src/services/a-rex/rest/rest.cpp:1627 #: src/services/a-rex/rest/rest.cpp:1987 src/services/a-rex/rest/rest.cpp:2150 #, c-format msgid "REST:GET job %s - %s" msgstr "" #: src/services/a-rex/rest/rest.cpp:1674 src/services/a-rex/rest/rest.cpp:1682 #, c-format msgid "REST:KILL job %s - %s" msgstr "" #: src/services/a-rex/rest/rest.cpp:1699 src/services/a-rex/rest/rest.cpp:1707 #, c-format msgid "REST:CLEAN job %s - %s" msgstr "" #: src/services/a-rex/rest/rest.cpp:1724 src/services/a-rex/rest/rest.cpp:1732 #: src/services/a-rex/rest/rest.cpp:1749 #, c-format msgid "REST:RESTART job %s - %s" msgstr "" #: src/services/a-rex/rest/rest.cpp:2040 #, c-format msgid "REST:PUT job %s: file %s: there is no payload" msgstr "" #: src/services/a-rex/rest/rest.cpp:2063 #, c-format msgid "HTTP:PUT %s: put file %s: %s" msgstr "" #: src/services/a-rex/test_cache_check.cpp:24 #: src/tests/count/test_client.cpp:20 #: src/tests/echo/echo_test4axis2c/test_client.cpp:20 #: src/tests/echo/test_client.cpp:21 msgid "Creating client side chain" msgstr "" #: src/services/a-rex/update_credentials.cpp:29 #, c-format msgid "" "UpdateCredentials: request = \n" "%s" msgstr "" #: src/services/a-rex/update_credentials.cpp:35 msgid "UpdateCredentials: missing Reference" msgstr "" #: src/services/a-rex/update_credentials.cpp:43 msgid "UpdateCredentials: wrong number of Reference" msgstr "" #: src/services/a-rex/update_credentials.cpp:51 msgid "UpdateCredentials: wrong number of elements inside Reference" msgstr "" #: src/services/a-rex/update_credentials.cpp:60 msgid "UpdateCredentials: EPR contains no JobID" msgstr "" #: src/services/a-rex/update_credentials.cpp:70 #, c-format msgid "UpdateCredentials: no job found: %s" msgstr "" #: src/services/a-rex/update_credentials.cpp:77 msgid "UpdateCredentials: failed to update credentials" msgstr "" #: src/services/a-rex/update_credentials.cpp:85 #, c-format msgid "" "UpdateCredentials: response = \n" "%s" msgstr "" #: src/services/candypond/CandyPond.cpp:52 msgid "No A-REX config file found in candypond configuration" msgstr "" #: src/services/candypond/CandyPond.cpp:56 #, c-format msgid "Using A-REX config file %s" msgstr "" #: src/services/candypond/CandyPond.cpp:60 #, c-format msgid "Failed to process A-REX configuration in %s" msgstr "" #: src/services/candypond/CandyPond.cpp:65 msgid "No caches defined in configuration" msgstr "" #: src/services/candypond/CandyPond.cpp:140 #: src/services/candypond/CandyPond.cpp:347 #, c-format msgid "Can't handle URL %s" msgstr "" #: src/services/candypond/CandyPond.cpp:150 msgid "Empty filename returned from FileCache" msgstr "" #: src/services/candypond/CandyPond.cpp:162 #, c-format msgid "Problem accessing cache file %s: %s" msgstr "" #: src/services/candypond/CandyPond.cpp:210 #: src/services/candypond/CandyPond.cpp:474 msgid "No job ID supplied" msgstr "" #: src/services/candypond/CandyPond.cpp:219 #, c-format msgid "Bad number in priority element: %s" msgstr "" #: src/services/candypond/CandyPond.cpp:228 msgid "No username supplied" msgstr "" #: src/services/candypond/CandyPond.cpp:235 #, c-format msgid "Supplied username %s does not match mapped username %s" msgstr "" #: src/services/candypond/CandyPond.cpp:249 msgid "No session directory found" msgstr "" #: src/services/candypond/CandyPond.cpp:253 #, c-format msgid "Using session dir %s" msgstr "" #: src/services/candypond/CandyPond.cpp:257 #, fuzzy, c-format msgid "Failed to stat session dir %s" msgstr "Nem sikerült elküldeni a kérést" #: src/services/candypond/CandyPond.cpp:262 #, c-format msgid "Session dir %s is owned by %i, but current mapped user is %i" msgstr "" #: src/services/candypond/CandyPond.cpp:289 #, fuzzy, c-format msgid "Failed to access proxy of given job id %s at %s" msgstr "Nem sikerült elküldeni a kérést" #: src/services/candypond/CandyPond.cpp:307 #, c-format msgid "DN is %s" msgstr "" #: src/services/candypond/CandyPond.cpp:385 #, c-format msgid "Permission checking passed for url %s" msgstr "" #: src/services/candypond/CandyPond.cpp:410 #: src/services/candypond/CandyPondGenerator.cpp:135 #, c-format msgid "Failed to move %s to %s: %s" msgstr "" #: src/services/candypond/CandyPond.cpp:441 #, c-format msgid "Starting new DTR for %s" msgstr "" #: src/services/candypond/CandyPond.cpp:443 #, c-format msgid "Failed to start new DTR for %s" msgstr "" #: src/services/candypond/CandyPond.cpp:487 #, c-format msgid "Job %s: all files downloaded successfully" msgstr "" #: src/services/candypond/CandyPond.cpp:494 #, c-format msgid "Job %s: Some downloads failed" msgstr "" #: src/services/candypond/CandyPond.cpp:499 #, c-format msgid "Job %s: files still downloading" msgstr "" #: src/services/candypond/CandyPond.cpp:511 msgid "CandyPond: Unauthorized" msgstr "" #: src/services/candypond/CandyPond.cpp:520 msgid "No local user mapping found" msgstr "" #: src/services/candypond/CandyPond.cpp:527 #: src/services/data-staging/DataDeliveryService.cpp:649 #, fuzzy, c-format msgid "Identity is %s" msgstr "Azonosító: %s" #: src/services/candypond/CandyPond.cpp:585 #: src/services/data-staging/DataDeliveryService.cpp:721 msgid "Security Handlers processing failed" msgstr "" #: src/services/candypond/CandyPond.cpp:592 msgid "Only POST is supported in CandyPond" msgstr "" #: src/services/candypond/CandyPondGenerator.cpp:88 #, c-format msgid "DTR %s finished with state %s" msgstr "" #: src/services/candypond/CandyPondGenerator.cpp:124 #, c-format msgid "Could not determine session directory from filename %s" msgstr "" #: src/services/candypond/CandyPondGenerator.cpp:164 #, c-format msgid "Invalid DTR for source %s, destination %s" msgstr "" #: src/services/candypond/CandyPondGenerator.cpp:206 #, c-format msgid "DTRs still running for job %s" msgstr "" #: src/services/candypond/CandyPondGenerator.cpp:215 #, c-format msgid "All DTRs finished for job %s" msgstr "" #: src/services/candypond/CandyPondGenerator.cpp:222 #, c-format msgid "Job %s not found" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:66 #, c-format msgid "Archiving DTR %s, state ERROR" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:70 #, c-format msgid "Archiving DTR %s, state %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:174 msgid "No delegation token in request" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:184 msgid "Failed to accept delegation" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:214 #: src/services/data-staging/DataDeliveryService.cpp:221 #, fuzzy msgid "ErrorDescription" msgstr "Feladat leírás: %s" #: src/services/data-staging/DataDeliveryService.cpp:226 #, c-format msgid "All %u process slots used" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:241 #, c-format msgid "Received retry for DTR %s still in transfer" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:248 #, c-format msgid "Replacing DTR %s in state %s with new request" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:258 #, c-format msgid "Storing temp proxy at %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:266 #, c-format msgid "Failed to create temp proxy at %s: %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:273 #, c-format msgid "Failed to change owner of temp proxy at %s to %i:%i: %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:302 #, fuzzy msgid "Invalid DTR" msgstr "Érvénytelen URL: %s" #: src/services/data-staging/DataDeliveryService.cpp:306 #, c-format msgid "Failed to remove temporary proxy %s: %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:407 #, c-format msgid "No such DTR %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:425 #, c-format msgid "DTR %s failed: %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:436 #, c-format msgid "DTR %s finished successfully" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:446 #, c-format msgid "DTR %s still in progress (%lluB transferred)" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:506 #, c-format msgid "No active DTR %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:516 #, c-format msgid "DTR %s was already cancelled" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:525 #, fuzzy, c-format msgid "DTR %s could not be cancelled" msgstr "A feladatot nem sikerült megölni vagy letörölni" #: src/services/data-staging/DataDeliveryService.cpp:569 #, fuzzy, c-format msgid "Failed to get load average: %s" msgstr "privát kulcs elérési útvonala" #: src/services/data-staging/DataDeliveryService.cpp:593 msgid "Invalid configuration - no allowed IP address specified" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:597 #, fuzzy msgid "Invalid configuration - no transfer dirs specified" msgstr "Nincs megadva feladat leírás bemeneti adatként" #: src/services/data-staging/DataDeliveryService.cpp:608 msgid "Failed to start archival thread" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:633 msgid "Shutting down data delivery service" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:642 msgid "Unauthorized" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:728 msgid "Only POST is supported in DataDeliveryService" msgstr "" #: src/services/examples/echo_python/EchoService.py:12 msgid "EchoService (python) constructor called" msgstr "" #: src/services/examples/echo_python/EchoService.py:17 #, python-format msgid "EchoService (python) has prefix %(prefix)s and suffix %(suffix)s" msgstr "" #: src/services/examples/echo_python/EchoService.py:24 msgid "EchoService (python) destructor called" msgstr "" #: src/services/examples/echo_python/EchoService.py:54 msgid "EchoService (python) thread test starting" msgstr "" #: src/services/examples/echo_python/EchoService.py:65 #, python-format msgid "EchoService (python) thread test, iteration %(iteration)s %(status)s" msgstr "" #: src/services/examples/echo_python/EchoService.py:82 msgid "EchoService (python) 'Process' called" msgstr "" #: src/services/examples/echo_python/EchoService.py:86 #, python-format msgid "inmsg.Auth().Export(arc.SecAttr.ARCAuth) = %s" msgstr "" #: src/services/examples/echo_python/EchoService.py:87 #, python-format msgid "inmsg.Attributes().getAll() = %s " msgstr "" #: src/services/examples/echo_python/EchoService.py:88 #, python-format msgid "EchoService (python) got: %s " msgstr "" #: src/services/examples/echo_python/EchoService.py:93 #, python-format msgid "EchoService (python) request_namespace: %s" msgstr "" #: src/services/examples/echo_python/EchoService.py:99 #: src/services/examples/echo_python/EchoService.py:171 #, fuzzy, python-format msgid "outpayload %s" msgstr "Feltöltve %s" #: src/services/examples/echo_python/EchoService.py:128 msgid "Calling https://localhost:60000/Echo using ClientSOAP" msgstr "" #: src/services/examples/echo_python/EchoService.py:131 msgid "Calling http://localhost:60000/Echo using ClientSOAP" msgstr "" #: src/services/examples/echo_python/EchoService.py:137 #: src/services/examples/echo_python/EchoService.py:155 #, fuzzy, python-format msgid "new_payload %s" msgstr "Feltöltve %s" #: src/services/examples/echo_python/EchoService.py:149 msgid "Calling http://localhost:60000/Echo using httplib" msgstr "" #: src/services/examples/echo_python/EchoService.py:165 msgid "Start waiting 10 sec..." msgstr "" #: src/services/examples/echo_python/EchoService.py:167 msgid "Waiting ends." msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:103 #, c-format msgid "Loading %u-th Python service" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:107 #, c-format msgid "Initialized %u-th Python service" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:142 msgid "Invalid class name" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:147 #, c-format msgid "class name: %s" msgstr "osztály neve: %s" #: src/services/wrappers/python/pythonwrapper.cpp:148 #, c-format msgid "module name: %s" msgstr "modul neve: %s" #: src/services/wrappers/python/pythonwrapper.cpp:205 msgid "Cannot find ARC Config class" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:212 msgid "Config class is not an object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:220 msgid "Cannot get dictionary of module" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:229 msgid "Cannot find service class" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:238 msgid "Cannot create config argument" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:245 msgid "Cannot convert config to Python object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:268 #, c-format msgid "%s is not an object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:274 msgid "Message class is not an object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:280 msgid "Python Wrapper constructor succeeded" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:295 #, c-format msgid "Python Wrapper destructor (%d)" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:328 msgid "Python interpreter locked" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:332 msgid "Python interpreter released" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:403 msgid "Python wrapper process called" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:412 msgid "Failed to create input SOAP container" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:422 msgid "Cannot create inmsg argument" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:436 msgid "Cannot find ARC Message class" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:442 msgid "Cannot convert inmsg to Python object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:451 msgid "Failed to create SOAP containers" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:457 msgid "Cannot create outmsg argument" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:463 msgid "Cannot convert outmsg to Python object" msgstr "" #: src/tests/client/test_ClientInterface.cpp:36 #: src/tests/client/test_ClientSAML2SSO.cpp:68 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:78 #: src/tests/echo/test_clientinterface.cpp:41 #: src/tests/echo/test_clientinterface.cpp:132 #: src/tests/echo/test_clientinterface.py:12 msgid "Creating a soap client" msgstr "" #: src/tests/client/test_ClientInterface.cpp:73 #: src/tests/client/test_ClientSAML2SSO.cpp:47 #: src/tests/client/test_ClientSAML2SSO.cpp:71 #: src/tests/count/test_client.cpp:61 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:85 #: src/tests/echo/echo_test4axis2c/test_client.cpp:56 #: src/tests/echo/test.cpp:62 src/tests/echo/test_client.cpp:72 #: src/tests/echo/test_clientinterface.cpp:67 #: src/tests/echo/test_clientinterface.cpp:107 #: src/tests/echo/test_clientinterface.cpp:136 #: src/tests/echo/test_clientinterface.py:22 msgid "Creating and sending request" msgstr "" #: src/tests/client/test_ClientInterface.cpp:84 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:97 #: src/tests/echo/test_clientinterface.cpp:78 #: src/tests/echo/test_clientinterface.py:30 msgid "SOAP invocation failed" msgstr "" #: src/tests/client/test_ClientSAML2SSO.cpp:44 #: src/tests/echo/test_clientinterface.cpp:100 msgid "Creating a http client" msgstr "" #: src/tests/client/test_ClientSAML2SSO.cpp:55 #: src/tests/echo/test_clientinterface.cpp:117 msgid "HTTP with SAML2SSO invocation failed" msgstr "" #: src/tests/client/test_ClientSAML2SSO.cpp:59 #: src/tests/echo/test_clientinterface.cpp:121 msgid "There was no HTTP response" msgstr "" #: src/tests/client/test_ClientSAML2SSO.cpp:77 #: src/tests/echo/test_clientinterface.cpp:145 msgid "SOAP with SAML2SSO invocation failed" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:37 #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:38 #: src/tests/delegation/test_delegation_client.cpp:46 #: src/tests/delegation/test_delegation_client.cpp:77 #: src/tests/echo/test_clientinterface.cpp:172 #: src/tests/echo/test_clientinterface.cpp:194 msgid "Creating a delegation soap client" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:46 #: src/tests/delegation/test_delegation_client.cpp:52 #: src/tests/echo/test_clientinterface.cpp:178 msgid "Delegation to ARC delegation service failed" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:50 #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:49 #: src/tests/delegation/test_delegation_client.cpp:57 #: src/tests/delegation/test_delegation_client.cpp:89 #: src/tests/echo/test_clientinterface.cpp:182 #: src/tests/echo/test_clientinterface.cpp:205 #, c-format msgid "Delegation ID: %s" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:58 #, c-format msgid "Delegated credential from delegation service: %s" msgstr "" #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:45 #: src/tests/delegation/test_delegation_client.cpp:84 #: src/tests/echo/test_clientinterface.cpp:201 msgid "Delegation to gridsite delegation service failed" msgstr "" #: src/tests/count/count.cpp:58 msgid "Input is not SOAP" msgstr "" #: src/tests/count/count.cpp:89 src/tests/echo/echo.cpp:83 msgid "echo: Unauthorized" msgstr "" #: src/tests/count/count.cpp:98 src/tests/count/count.cpp:104 #, c-format msgid "Request is not supported - %s" msgstr "" #: src/tests/count/test_client.cpp:50 #: src/tests/echo/echo_test4axis2c/test_client.cpp:43 #: src/tests/echo/test_client.cpp:59 msgid "Failed to load client configuration" msgstr "" #: src/tests/count/test_client.cpp:54 #: src/tests/echo/echo_test4axis2c/test_client.cpp:47 #: src/tests/echo/test.cpp:58 src/tests/echo/test_client.cpp:63 msgid "Client side MCCs are loaded" msgstr "" #: src/tests/count/test_client.cpp:57 #: src/tests/echo/echo_test4axis2c/test_client.cpp:50 #: src/tests/echo/test_client.cpp:66 msgid "Client chain does not have entry point" msgstr "" #: src/tests/count/test_client.cpp:84 #: src/tests/echo/echo_test4axis2c/test_client.cpp:74 #: src/tests/echo/test.cpp:74 src/tests/echo/test_client.cpp:90 msgid "Request failed" msgstr "" #: src/tests/count/test_client.cpp:90 #: src/tests/echo/echo_test4axis2c/test_client.cpp:80 #: src/tests/echo/test.cpp:79 src/tests/echo/test_client.cpp:96 msgid "There is no response" msgstr "" #: src/tests/count/test_client.cpp:97 #: src/tests/echo/echo_test4axis2c/test_client.cpp:87 #: src/tests/echo/test_client.cpp:103 msgid "Response is not SOAP" msgstr "" #: src/tests/count/test_service.cpp:22 src/tests/echo/test.cpp:23 #: src/tests/echo/test_service.cpp:22 msgid "Creating service side chain" msgstr "" #: src/tests/count/test_service.cpp:25 src/tests/echo/test.cpp:26 #: src/tests/echo/test_service.cpp:25 msgid "Failed to load service configuration" msgstr "" #: src/tests/count/test_service.cpp:30 src/tests/echo/test_service.cpp:30 msgid "Service is waiting for requests" msgstr "" #: src/tests/echo/test.cpp:32 msgid "Creating client interface" msgstr "" #: src/tests/echo/test.cpp:82 msgid "Request succeed!!!" msgstr "" #, fuzzy #~ msgid "No jobs to resubmit with the specified status" #~ msgstr "Nincs megadva feladat leírás bemeneti adatként" #, fuzzy, c-format #~ msgid "Cannot write jobids to file (%s)" #~ msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #, fuzzy #~ msgid "Job resubmission summary:" #~ msgstr "Job küldési összefoglaló" #, fuzzy, c-format #~ msgid "%d of %d jobs were resubmitted" #~ msgstr "%d %d feladatból elküldve" #, fuzzy, c-format #~ msgid "The following %d were not resubmitted" #~ msgstr "%d nem lett elküldve" #, fuzzy #~ msgid "Test aborted because no resource returned any information" #~ msgstr "" #~ "Feladat küldés sikertelen, mert egyetlen klaszter sem adott vissza " #~ "információt magáról" #, fuzzy #~ msgid "interfacename" #~ msgstr "Felhasználó oldali hiba" #~ msgid "force migration, ignore kill failure" #~ msgstr "migráció kikényszerítése, megölési hiba figyelmen kívül hagyása" #, fuzzy #~ msgid "resubmit to the same resource" #~ msgstr "újraküldés ugyanarra a klaszterre" #, fuzzy #~ msgid "do not resubmit to the same resource" #~ msgstr "újraküldés ugyanarra a klaszterre" #, fuzzy, c-format #~ msgid "OpenSSL error -- %s" #~ msgstr "OpenSSL hiba -- %s" #, c-format #~ msgid "Library : %s" #~ msgstr "Könyvtár : %s" #, c-format #~ msgid "Function : %s" #~ msgstr "Funkció: %s" #, c-format #~ msgid "Reason : %s" #~ msgstr "Indok : %s" #~ msgid "User interface error" #~ msgstr "Felhasználó oldali hiba" #~ msgid "Aborted!" #~ msgstr "Megszakítva!" #, fuzzy #~ msgid "username to MyProxy server" #~ msgstr "myproxy szerverhez szükséges felhasználónév" #, fuzzy, c-format #~ msgid "There are %d commands to the same VOMS server %s" #~ msgstr "%d számú parancs van ugyanahoz a voms szerverhez: %s" #, fuzzy, c-format #~ msgid "Try to get attribute from VOMS server with order: %s" #~ msgstr "Attribútumok lekérdezés a voms szervertÅ‘l ebben a sorrendben: %s" #, fuzzy #~ msgid "No HTTP response from VOMS server" #~ msgstr "Nincs válasz a voms szervertÅ‘l" #, fuzzy, c-format #~ msgid "Returned message from VOMS server: %s" #~ msgstr "Ez a válasz érkezett a voms szervertÅ‘l: %s" #, fuzzy #~ msgid "No stream response from VOMS server" #~ msgstr "Nincs válasz a voms szervertÅ‘l" #, fuzzy, c-format #~ msgid "Returned message from VOMS server %s is: %s\n" #~ msgstr "Ez a válasz érkezett a voms szervertÅ‘l: %s" #, fuzzy, c-format #~ msgid "Cannot get VOMS server %s information from the vomses files" #~ msgstr "Nem sikerült elérnem a voms szervert %s, ezen fájl alapján: %s" #~ msgid "No stream response" #~ msgstr "Nincs válasz" #, c-format #~ msgid "Returned msg from myproxy server: %s %d" #~ msgstr "Ezt a választ kaptam a myproxy szervertÅ‘l: %s %d" #, c-format #~ msgid "There are %d certificates in the returned msg" #~ msgstr "%d darab publikus tanúsítvány van a válasz üzenetben" #~ msgid "Delegate proxy failed" #~ msgstr "Proxy delegáció sikertelen" #, c-format #~ msgid "Returned msg from voms server: %s " #~ msgstr "Ezt a választ kaptam a voms szervertÅ‘l: %s" #, fuzzy, c-format #~ msgid "Unable to copy %s" #~ msgstr "Nem sikerült betölteni a %s bróker modult" #, fuzzy, c-format #~ msgid "Unable to list content of %s" #~ msgstr "Nem sikerült betölteni a %s bróker modult" #, fuzzy, c-format #~ msgid "Unable to remove file %s" #~ msgstr "Nem sikerült listázni a meta adatokat" #~ msgid "path to config file" #~ msgstr "a konfigurációs fájl elérési útvonala" #~ msgid "[-]name" #~ msgstr "[-]név" #~ msgid "Missing URL" #~ msgstr "Hiányzik az URL" #, fuzzy #~ msgid "Query is not a valid XML" #~ msgstr "A lekérdezés nem XML helyes" #~ msgid "Failed to send request" #~ msgstr "Nem sikerült elküldeni a kérést" #, fuzzy, c-format #~ msgid "SendCommand: Response: %s" #~ msgstr "Válasz: %s" #, fuzzy, c-format #~ msgid "invalid jobID: %s" #~ msgstr "Érvénytelen URL: %s" #, c-format #~ msgid "Job description: %s" #~ msgstr "Feladat leírás: %s" #, fuzzy, c-format #~ msgid "Invalid JobDescription: %s" #~ msgstr "Érvénytelen feladat leírás:" #, fuzzy #~ msgid "Failed to prepare job description." #~ msgstr "Nem tudom kiíratni a feladat leírást: Nem található várakozó sor." #, fuzzy #~ msgid "Failed to prepare job description to target resources." #~ msgstr "Nem tudom kiíratni a feladat leírást: Nem található várakozó sor." #, fuzzy #~ msgid "Failed locating credentials." #~ msgstr "Nem sikerült listázni a meta adatokat" #, fuzzy, c-format #~ msgid "Failed to cancel job: %s" #~ msgstr "Nem sikerült elküldeni a kérést" #, fuzzy #~ msgid "Creating and sending request to resume a job" #~ msgstr "SOAP kérés készítése és küldése" #, fuzzy #~ msgid "Creating and sending request to list jobs" #~ msgstr "SOAP kérés készítése és küldése" #, fuzzy, c-format #~ msgid "Failed resuming job: %s" #~ msgstr "Nem sikerült listázni a fájlokat" #, fuzzy, c-format #~ msgid "Unable to submit job. Job description is not valid in the %s format" #~ msgstr "Nem tudom kiíratni a feladat leírást: Nem található várakozó sor." #, fuzzy, c-format #~ msgid "Creating and sending job submit request to %s" #~ msgstr "SOAP kérés készítése és küldése" #, fuzzy, c-format #~ msgid "Creating and sending service information request to %s" #~ msgstr "SOAP kérés készítése és küldése" #, fuzzy, c-format #~ msgid "Creating and sending job clean request to %s" #~ msgstr "SOAP kérés készítése és küldése" #, fuzzy, c-format #~ msgid "Creating and sending job suspend request to %s" #~ msgstr "SOAP kérés készítése és küldése" #, fuzzy, c-format #~ msgid "Creating and sending job restart request to %s" #~ msgstr "SOAP kérés készítése és küldése" #, fuzzy, c-format #~ msgid "Creating and sending job notify request to %s" #~ msgstr "SOAP kérés készítése és küldése" #, fuzzy, c-format #~ msgid "Creating and sending notify request to %s" #~ msgstr "SOAP kérés készítése és küldése" #, fuzzy, c-format #~ msgid "Creating and sending job list request to %s" #~ msgstr "SOAP kérés készítése és küldése" #, fuzzy #~ msgid "Unable to submit job. Job description is not valid XML" #~ msgstr "Nem tudom kiíratni a feladat leírást: Nem található várakozó sor." #, fuzzy #~ msgid "Failed to notify service" #~ msgstr "Nem sikerült elküldeni a kérést" #, fuzzy #~ msgid "Failed preparing job description to target resources" #~ msgstr "Nem tudom kiíratni a feladat leírást: Nem található várakozó sor." #, fuzzy, c-format #~ msgid "Failed to submit job description: %s" #~ msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #, fuzzy #~ msgid "Obtaining status failed" #~ msgstr "Státusz lekérdezés sikertelen" #, fuzzy #~ msgid "Obtaining information failed" #~ msgstr "verzió információ kiírása" #, fuzzy, c-format #~ msgid "Failed to obtain resource description: %s" #~ msgstr "Nem tudom megnyitni a feladat leíró fájlt: %s" #, fuzzy #~ msgid "Resource description is empty" #~ msgstr "Feladat leírás: %s" #, fuzzy #~ msgid "All queries failed" #~ msgstr "Státusz lekérdezés sikertelen" #, fuzzy, c-format #~ msgid "Unsupported command: %s" #~ msgstr "Nem támogatott url: %s" #~ msgid "A status request failed" #~ msgstr "Státusz lekérdezés sikertelen" #~ msgid "A status request succeed" #~ msgstr "Státusz lekérdezés sikeres" #~ msgid "A job termination request failed" #~ msgstr "A feladat megszakítása sikertelen" #~ msgid "A job termination request succeed" #~ msgstr "A feladat megszakítása sikeres" #, fuzzy, c-format #~ msgid "Failed to query ACIX: %s" #~ msgstr "Nem sikerült elküldeni a kérést" #, fuzzy, c-format #~ msgid "PDPD location: %s" #~ msgstr "Célállomás: %s" #, fuzzy, c-format #~ msgid "There was no SOAP response return from PDP server: %s" #~ msgstr "Nincs SOAP-os válasz" #, fuzzy, c-format #~ msgid "XACML authorisation request: %s" #~ msgstr "A feladat megszakítása sikeres" #, fuzzy, c-format #~ msgid "PEPD location: %s" #~ msgstr "Célállomás: %s" #, fuzzy #~ msgid "Unable to create temporary directory" #~ msgstr "Nem sikerült elküldeni a kérést" #, fuzzy, c-format #~ msgid "Job resubmission failed: Unable to load broker (%s)" #~ msgstr "A feladat küldés meghiusúlt, mert nincs több szabad várakozó sor." #, fuzzy #~ msgid "" #~ "Job resubmission aborted because no resource returned any information" #~ msgstr "" #~ "Feladat küldés sikertelen, mert egyetlen klaszter sem adott vissza " #~ "információt magáról" #, fuzzy #~ msgid "Job migration aborted, no resource returned any information" #~ msgstr "" #~ "Feladat küldés sikertelen, mert egyetlen klaszter sem adott vissza " #~ "információt magáról" #, fuzzy, c-format #~ msgid "Job migration aborted, unable to load broker (%s)" #~ msgstr "Nem sikerült betölteni a %s bróker modult" #, fuzzy, c-format #~ msgid "Job migration failed for job (%s), no applicable targets" #~ msgstr "A feladat küldés meghiusúlt, mert nincs több szabad várakozó sor." #, fuzzy #~ msgid "Failed to sign proxy" #~ msgstr "Nem sikerült elküldeni a kérést" #, fuzzy #~ msgid "Failed to generate X509 request with NSS" #~ msgstr "Nem sikerült elküldeni a kérést" #, fuzzy, c-format #~ msgid "Failed processing user mapping command: unixgroupmap %s" #~ msgstr "voms szerver fájljának az elérési útvonala" #, fuzzy, c-format #~ msgid "CA certificates directory %s does not exist" #~ msgstr "Az XML konfigurációs fájl: %s nem létezik" #, fuzzy, c-format #~ msgid "Can't interpret configuration file %s as XML" #~ msgstr "voms szerver fájljának az elérési útvonala" #, fuzzy, c-format #~ msgid "Failed to lock delegated credentials: %s" #~ msgstr "Nem sikerült elküldeni a kérést" #, fuzzy, c-format #~ msgid "year: %s" #~ msgstr "Név: %s" #, fuzzy, c-format #~ msgid "moth: %s" #~ msgstr "Proxy elérési útvonal: %s" #, fuzzy, c-format #~ msgid "queue: %s" #~ msgstr "Kérés: %s" #, fuzzy, c-format #~ msgid "query: %s" #~ msgstr "Kérés: %s" #, fuzzy, c-format #~ msgid "Failed to parse remote addres %s" #~ msgstr "Nem sikerült elküldeni a kérést" #, fuzzy, c-format #~ msgid "failed while processing configuration command: %s %s" #~ msgstr "voms szerver fájljának az elérési útvonala" #, fuzzy #~ msgid "failed to initialize environment variables" #~ msgstr "Nem sikerült betölteni a konfigurációt" #, fuzzy, c-format #~ msgid "Select failed: %s" #~ msgstr "Fájl feltöltve %s" #~ msgid "use GSI proxy (RFC 3820 compliant proxy is default)" #~ msgstr "" #~ "GSI proxy használata (RFC 3820-nak megfelelÅ‘ proxy, ez az alapbeállítás)" #, fuzzy #~ msgid "Unable to copy %s: No valid credentials found" #~ msgstr "Nem tudom másolni a %s fájlt: Nem érvényes tanúsítvány" #, fuzzy #~ msgid "Unable to create directory %s: No valid credentials found" #~ msgstr "Nem tudom másolni a %s fájlt: Nem érvényes tanúsítvány" #, fuzzy #~ msgid "Unable to rename %s: No valid credentials found" #~ msgstr "Nem tudom másolni a %s fájlt: Nem érvényes tanúsítvány" #, fuzzy #~ msgid "Unable to remove file %s: No valid credentials found" #~ msgstr "Nem tudom másolni a %s fájlt: Nem érvényes tanúsítvány" #~ msgid "Uploaded file %s" #~ msgstr "Fájl feltöltve %s" #~ msgid "Uploader started" #~ msgstr "A feltöltÅ‘ elindult" #~ msgid "Uploaded %s" #~ msgstr "Feltöltve %s" #, fuzzy #~ msgid "Failed writing output status file" #~ msgstr "Nem sikerült listázni a meta adatokat" #~ msgid "explicitly select or reject a specific cluster" #~ msgstr "egy klaszter egyértelmű kiválasztása vagy tiltása" #~ msgid "explicitly select or reject an index server" #~ msgstr "egy index szerver egyértelmű kiválasztása vagy tiltása" #~ msgid "" #~ "The arcmigrate command is used for migrating queued jobs to another " #~ "cluster.\n" #~ "Note that migration is only supported between ARC1 clusters." #~ msgstr "" #~ "Az arcmigrate paraccsot arra lehet használni, hogy egy várakozó sorban\n" #~ "lévÅ‘ feladatot átmozgassunk egy másik klaszterre. Jelenleg csak az ARC1-" #~ "es\n" #~ "klaszterek esetén lehet csak használni" #~ msgid "explicitly select or reject a cluster to migrate to" #~ msgstr "egy klaszter egyértelmű kiválasztása vagy tiltása migráció esetére" #~ msgid "select broker method (Random (default), FastestQueue, or custom)" #~ msgstr "" #~ "bróker kiválasztása (Random (alapbeállítás), FastestQueue vagy saját)" #~ msgid "file where the jobs will be stored" #~ msgstr "azon fájl, ahol a feladat azonosítók tárolásra kerülnek" #~ msgid "explicitly select or reject a specific cluster for the new job" #~ msgstr "" #~ "egy klaszter egyértelmű kiválasztása vagy tiltása új feladat számára" #~ msgid "No jobs to resubmit" #~ msgstr "Nem sikerült újraküldeni a feladatot" #~ msgid "Submission to %s failed, trying next target" #~ msgstr "" #~ "Feladat küldés erre a klaszterre nem sikerült: %s, megpróbálom a " #~ "következÅ‘t" #~ msgid "Job resubmitted with new jobid: %s" #~ msgstr "Feladat újraküldve ezzel az azonosítóval: %s" #~ msgid "service_url request_file" #~ msgstr "szolgáltatás_url kérési_fájl" #~ msgid "url of the policy decision service" #~ msgstr "az eljárásmódot eldöntÅ‘ szolgáltatás url-je" #~ msgid "URL of SLCS service" #~ msgstr "SLCS szolgáltatás URL-je" #~ msgid "Identity provider name" #~ msgstr "Azonító szolgáltatás neve" #~ msgid "User account to identity provider" #~ msgstr "Felhasználói név az azonosító szolgáltató részére" #~ msgid "Password for user account to identity provider" #~ msgstr "" #~ "A felhasználói névhez tartozó jelszó az azonosító szolgáltató részére" #~ msgid "Key size of the private key (512, 1024, 2048)" #~ msgstr "A privát kulcs mérete (512, 1024, 2048)" #~ msgid "Private key passphrase" #~ msgstr "Privát kulcs jelszava" #~ msgid "passphrase" #~ msgstr "jelszó" #~ msgid "period" #~ msgstr "periódus" #~ msgid "Current transfer FAILED: %s - %s" #~ msgstr "Az aktuális átvitel MEGSZAKADT: %s - %s" nordugrid-arc-7.1.1/po/PaxHeaders/remove-potcdate.sin0000644000000000000000000000013215067751332017576 xustar0030 mtime=1759498970.359031736 30 atime=1759499032.606477393 30 ctime=1759499034.615747254 nordugrid-arc-7.1.1/po/remove-potcdate.sin0000644000175000002070000000066015067751332021502 0ustar00mockbuildmock00000000000000# Sed script that remove the POT-Creation-Date line in the header entry # from a POT file. # # The distinction between the first and the following occurrences of the # pattern is achieved by looking at the hold space. /^"POT-Creation-Date: .*"$/{ x # Test if the hold space is empty. s/P/P/ ta # Yes it was empty. First occurrence. Remove the line. g d bb :a # The hold space was nonempty. Following occurrences. Do nothing. x :b } nordugrid-arc-7.1.1/po/PaxHeaders/stamp-po0000644000000000000000000000013015067751432015447 xustar0029 mtime=1759499034.60950783 29 atime=1759499034.60950783 30 ctime=1759499034.643145017 nordugrid-arc-7.1.1/po/stamp-po0000644000175000002070000000001215067751432017344 0ustar00mockbuildmock00000000000000timestamp nordugrid-arc-7.1.1/po/PaxHeaders/boldquot.sed0000644000000000000000000000013215067751332016313 xustar0030 mtime=1759498970.294147862 30 atime=1759498970.293530545 30 ctime=1759499034.618138514 nordugrid-arc-7.1.1/po/boldquot.sed0000644000175000002070000000033115067751332020212 0ustar00mockbuildmock00000000000000s/"\([^"]*\)"/“\1â€/g s/`\([^`']*\)'/‘\1’/g s/ '\([^`']*\)' / ‘\1’ /g s/ '\([^`']*\)'$/ ‘\1’/g s/^'\([^`']*\)' /‘\1’ /g s/“â€/""/g s/“/“/g s/â€/â€/g s/‘/‘/g s/’/’/g nordugrid-arc-7.1.1/po/PaxHeaders/LINGUAS0000644000000000000000000000013215067751327015016 xustar0030 mtime=1759498967.643490278 30 atime=1759498967.808492785 30 ctime=1759499034.644597183 nordugrid-arc-7.1.1/po/LINGUAS0000644000175000002070000000001415067751327016713 0ustar00mockbuildmock00000000000000ru sv de hu nordugrid-arc-7.1.1/po/PaxHeaders/de.po0000644000000000000000000000013215067751432014716 xustar0030 mtime=1759499034.053499381 30 atime=1759499034.456505505 30 ctime=1759499034.631299907 nordugrid-arc-7.1.1/po/de.po0000644000175000002070000237161715067751432016641 0ustar00mockbuildmock00000000000000# translation of Arc.po to Russian # Oxana Smirnova , 2007. # Translation file for the Advanced Resource Connector (Arc) msgid "" msgstr "" "Project-Id-Version: Arc\n" "Report-Msgid-Bugs-To: support@nordugrid.org\n" "POT-Creation-Date: 2025-10-03 15:43+0200\n" "PO-Revision-Date: 2010-02-25 19:18+0100\n" "Last-Translator: Steffen Möller \n" "Language-Team: German\n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "X-Generator: KBabel 1.11.4\n" "Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" "X-Poedit-Language: Russian\n" "X-Poedit-KeywordsList: msg:2;IString:1;istring:1\n" "X-Poedit-Basepath: /home/oxana/CVSROOT/ARC1\n" "X-Poedit-SearchPath-0: src\n" #: src/clients/compute/arccat.cpp:38 src/clients/compute/arcclean.cpp:34 #: src/clients/compute/arcget.cpp:35 src/clients/compute/arckill.cpp:33 #: src/clients/compute/arcrenew.cpp:32 src/clients/compute/arcresume.cpp:32 #: src/clients/compute/arcstat.cpp:34 msgid "[job ...]" msgstr "[Job ...]" #: src/clients/compute/arccat.cpp:39 msgid "" "The arccat command performs the cat command on the stdout, stderr or grid\n" "manager's error log of the job." msgstr "" "Эта команда предназначена Ð´Ð»Ñ Ð²Ñ‹Ð²Ð¾Ð´Ð° на Ñкран Ñообщений Ñтандартного\n" "выхода, Ñтандартной ошибки или ошибок ÑиÑтемы при иÑполнении задачи" #: src/clients/compute/arccat.cpp:46 src/clients/compute/arcclean.cpp:41 #: src/clients/compute/arcget.cpp:42 src/clients/compute/arcinfo.cpp:45 #: src/clients/compute/arckill.cpp:40 src/clients/compute/arcrenew.cpp:37 #: src/clients/compute/arcresume.cpp:37 src/clients/compute/arcstat.cpp:42 #: src/clients/compute/arcsub.cpp:53 src/clients/compute/arcsync.cpp:147 #: src/clients/compute/arctest.cpp:67 src/clients/credentials/arcproxy.cpp:484 #: src/clients/data/arccp.cpp:652 src/clients/data/arcls.cpp:371 #: src/clients/data/arcmkdir.cpp:149 src/clients/data/arcrename.cpp:160 #: src/clients/data/arcrm.cpp:174 src/hed/daemon/unix/main_unix.cpp:345 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1265 #: src/hed/libs/data/DataExternalHelper.cpp:358 #, c-format msgid "%s version %s" msgstr "%s version %s" #: src/clients/compute/arccat.cpp:55 src/clients/compute/arcclean.cpp:50 #: src/clients/compute/arcget.cpp:51 src/clients/compute/arcinfo.cpp:53 #: src/clients/compute/arckill.cpp:49 src/clients/compute/arcrenew.cpp:46 #: src/clients/compute/arcresume.cpp:46 src/clients/compute/arcstat.cpp:51 #: src/clients/compute/arcsub.cpp:62 src/clients/compute/arcsync.cpp:156 #: src/clients/compute/arctest.cpp:89 src/clients/credentials/arcproxy.cpp:492 #: src/clients/data/arccp.cpp:659 src/clients/data/arcls.cpp:379 #: src/clients/data/arcmkdir.cpp:157 src/clients/data/arcrename.cpp:168 #: src/clients/data/arcrm.cpp:183 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:192 #: src/services/a-rex/grid-manager/GridManager.cpp:110 #: src/services/a-rex/grid-manager/log/JobLog.cpp:139 #, fuzzy, c-format msgid "Running command: %s" msgstr "Kommando: %s" #: src/clients/compute/arccat.cpp:66 src/clients/compute/arcclean.cpp:61 #: src/clients/compute/arcget.cpp:62 src/clients/compute/arcinfo.cpp:65 #: src/clients/compute/arckill.cpp:60 src/clients/compute/arcrenew.cpp:57 #: src/clients/compute/arcresume.cpp:50 src/clients/compute/arcstat.cpp:62 #: src/clients/compute/arcsub.cpp:66 src/clients/compute/arcsync.cpp:167 #: src/clients/compute/arctest.cpp:93 src/clients/data/arccp.cpp:682 #: src/clients/data/arcls.cpp:401 src/clients/data/arcmkdir.cpp:179 #: src/clients/data/arcrename.cpp:190 src/clients/data/arcrm.cpp:205 #, fuzzy msgid "Failed configuration initialization" msgstr "Fehler bei Initialisierung der Konfiguration" #: src/clients/compute/arccat.cpp:78 src/clients/compute/arcclean.cpp:73 #: src/clients/compute/arcget.cpp:87 src/clients/compute/arckill.cpp:72 #: src/clients/compute/arcrenew.cpp:69 src/clients/compute/arcresume.cpp:69 #: src/clients/compute/arcstat.cpp:74 #, fuzzy, c-format msgid "Cannot read specified jobid file: %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/clients/compute/arccat.cpp:108 src/clients/compute/arcclean.cpp:103 #: src/clients/compute/arcget.cpp:117 src/clients/compute/arckill.cpp:102 #: src/clients/compute/arcrenew.cpp:99 src/clients/compute/arcresume.cpp:99 #: src/clients/compute/arcstat.cpp:127 msgid "No jobs given" msgstr "Keine Jobs angegeben" #: src/clients/compute/arccat.cpp:121 src/clients/compute/arcclean.cpp:116 #: src/clients/compute/arcget.cpp:130 src/clients/compute/arckill.cpp:115 #: src/clients/compute/arcrenew.cpp:112 src/clients/compute/arcresume.cpp:112 #: src/clients/compute/arcstat.cpp:139 #, fuzzy, c-format msgid "Job list file (%s) doesn't exist" msgstr "Lock-Datei %s existiert nicht" #: src/clients/compute/arccat.cpp:128 src/clients/compute/arcclean.cpp:123 #: src/clients/compute/arcget.cpp:137 src/clients/compute/arckill.cpp:122 #: src/clients/compute/arcrenew.cpp:119 src/clients/compute/arcresume.cpp:119 #: src/clients/compute/arcstat.cpp:146 src/clients/compute/arctest.cpp:296 #, fuzzy, c-format msgid "Unable to read job information from file (%s)" msgstr "Konnte job information nicht beziehen für job: %s" #: src/clients/compute/arccat.cpp:137 src/clients/compute/arcclean.cpp:131 #: src/clients/compute/arcget.cpp:145 src/clients/compute/arckill.cpp:130 #: src/clients/compute/arcrenew.cpp:128 src/clients/compute/arcresume.cpp:128 #: src/clients/compute/arcstat.cpp:155 #, fuzzy, c-format msgid "Warning: Job not found in job list: %s" msgstr "Kann Job ID nicht finden: %s" #: src/clients/compute/arccat.cpp:150 src/clients/compute/arcclean.cpp:186 #: src/clients/compute/arcget.cpp:158 src/clients/compute/arckill.cpp:142 #: src/clients/compute/arcrenew.cpp:140 src/clients/compute/arcresume.cpp:140 #, fuzzy msgid "No jobs" msgstr "NO Job" #: src/clients/compute/arccat.cpp:165 #, c-format msgid "Could not create temporary file \"%s\"" msgstr "" #: src/clients/compute/arccat.cpp:166 src/clients/compute/arccat.cpp:172 #, fuzzy, c-format msgid "Cannot create output of %s for any jobs" msgstr "Kann Verzeichnis \"%s\" für cache nicht anlegen" #: src/clients/compute/arccat.cpp:173 #, fuzzy, c-format msgid "Invalid destination URL %s" msgstr "Ungültige URL: %s" #: src/clients/compute/arccat.cpp:191 #, c-format msgid "Job deleted: %s" msgstr "" #: src/clients/compute/arccat.cpp:201 #, c-format msgid "Job has not started yet: %s" msgstr "" #: src/clients/compute/arccat.cpp:242 #, fuzzy, c-format msgid "Cannot determine the %s location: %s" msgstr "Kann Funktion %s nicht anlegen" #: src/clients/compute/arccat.cpp:247 #, c-format msgid "Cannot create output of %s for job (%s): Invalid source %s" msgstr "" #: src/clients/compute/arccat.cpp:260 #, c-format msgid "Catting %s for job %s" msgstr "" #: src/clients/compute/arcclean.cpp:35 #, fuzzy msgid "The arcclean command removes a job from the computing resource." msgstr "Das arcclean Kommando entfernt einen Job von einem entfernten Cluster." #: src/clients/compute/arcclean.cpp:155 msgid "" "You are about to remove jobs from the job list for which no information " "could be\n" "found. NOTE: Recently submitted jobs might not have appeared in the " "information\n" "system, and this action will also remove such jobs." msgstr "" #: src/clients/compute/arcclean.cpp:158 #, fuzzy msgid "Are you sure you want to clean jobs missing information?" msgstr "Soll die lokale job list wirklich synchronisiert werden?" #: src/clients/compute/arcclean.cpp:159 src/clients/compute/arcsync.cpp:237 msgid "y" msgstr "j" #: src/clients/compute/arcclean.cpp:159 src/clients/compute/arcsync.cpp:237 msgid "n" msgstr "n" #: src/clients/compute/arcclean.cpp:164 #, fuzzy msgid "Jobs missing information will not be cleaned!" msgstr "Fehler bei Bezug von Information für Job: %s" #: src/clients/compute/arcclean.cpp:180 src/clients/compute/arctest.cpp:300 #, fuzzy, c-format msgid "Warning: Failed to write job information to file (%s)" msgstr "Konnte job information nicht beziehen für job: %s" #: src/clients/compute/arcclean.cpp:181 msgid "" " Run 'arcclean -s Undefined' to remove cleaned jobs from job list" msgstr "" #: src/clients/compute/arcclean.cpp:190 #, c-format msgid "Jobs processed: %d, deleted: %d" msgstr "" #: src/clients/compute/arcget.cpp:36 msgid "The arcget command is used for retrieving the results from a job." msgstr "Mit arcget erhält man die Ergebnisse eines Jobs." #: src/clients/compute/arcget.cpp:75 #, fuzzy, c-format msgid "Job download directory from user configuration file: %s" msgstr "Pfad zu VOMS Server Konfigurationsdatei" #: src/clients/compute/arcget.cpp:78 #, fuzzy msgid "Job download directory will be created in present working directory." msgstr "" "Download-Verzeichnis (das Job-Verzeichnis wird in diesem Verzeichnis " "abgelegt)" #: src/clients/compute/arcget.cpp:82 #, fuzzy, c-format msgid "Job download directory: %s" msgstr "Fehler beim Öffnen von Verzeichs: %s" #: src/clients/compute/arcget.cpp:168 #, fuzzy, c-format msgid "Unable to create directory for storing results (%s) - %s" msgstr "Fehler bei Anlegen von Datei %s zum Schreiben: %s" #: src/clients/compute/arcget.cpp:178 #, c-format msgid "Results stored at: %s" msgstr "" #: src/clients/compute/arcget.cpp:190 src/clients/compute/arckill.cpp:158 msgid "Warning: Some jobs were not removed from server" msgstr "" #: src/clients/compute/arcget.cpp:191 src/clients/compute/arcget.cpp:198 #: src/clients/compute/arckill.cpp:159 msgid " Use arcclean to remove retrieved jobs from job list" msgstr "" #: src/clients/compute/arcget.cpp:197 src/clients/compute/arckill.cpp:165 #, fuzzy, c-format msgid "Warning: Failed removing jobs from file (%s)" msgstr "Warnung: Fehler bei Bezug von Attributen von %s: %s" #: src/clients/compute/arcget.cpp:202 #, c-format msgid "" "Jobs processed: %d, successfully retrieved: %d, successfully cleaned: %d" msgstr "" #: src/clients/compute/arcget.cpp:206 #, c-format msgid "Jobs processed: %d, successfully retrieved: %d" msgstr "" #: src/clients/compute/arcinfo.cpp:34 #, fuzzy msgid "[resource ...]" msgstr "[Job ...]" #: src/clients/compute/arcinfo.cpp:35 #, fuzzy msgid "" "The arcinfo command is used for obtaining the status of computing resources " "on the Grid." msgstr "Mit arcinfo wird der Zustand von Clustern auf dem Grid bestimmt." #: src/clients/compute/arcinfo.cpp:141 msgid "Information endpoint" msgstr "" #: src/clients/compute/arcinfo.cpp:152 #, fuzzy msgid "Submission endpoint" msgstr "Submission ergab Fehler: %s" #: src/clients/compute/arcinfo.cpp:154 #, fuzzy msgid "status" msgstr "statusstr" #: src/clients/compute/arcinfo.cpp:156 #, fuzzy msgid "interface" msgstr "Benutzungsschnittstellenfehler" #: src/clients/compute/arcinfo.cpp:175 #, fuzzy msgid "ERROR: Failed to retrieve information from the following endpoints:" msgstr "Fehler bei Bezug von Information für Job: %s" #: src/clients/compute/arcinfo.cpp:188 #, fuzzy msgid "ERROR: Failed to retrieve information" msgstr "Konnte Job Status Information nicht beziehen." #: src/clients/compute/arcinfo.cpp:190 msgid "from the following endpoints:" msgstr "" #: src/clients/compute/arckill.cpp:34 msgid "The arckill command is used to kill running jobs." msgstr "Mit arckill lassen sich laufenden Prozesse beenden." #: src/clients/compute/arckill.cpp:166 msgid "" " Run 'arcclean -s Undefined' to remove killed jobs from job list" msgstr "" #: src/clients/compute/arckill.cpp:169 #, c-format msgid "Jobs processed: %d, successfully killed: %d, successfully cleaned: %d" msgstr "" #: src/clients/compute/arckill.cpp:171 #, fuzzy, c-format msgid "Jobs processed: %d, successfully killed: %d" msgstr "Job resumed erfolgreich" #: src/clients/compute/arcrenew.cpp:146 #, c-format msgid "Jobs processed: %d, renewed: %d" msgstr "" #: src/clients/compute/arcresume.cpp:146 #, c-format msgid "Jobs processed: %d, resumed: %d" msgstr "" #: src/clients/compute/arcstat.cpp:35 #, fuzzy msgid "" "The arcstat command is used for obtaining the status of jobs that have\n" "been submitted to Grid enabled resources." msgstr "" "Эта команда иÑпользуетÑÑ Ð´Ð»Ñ Ð²Ñ‹Ð²Ð¾Ð´Ð° информации о ÑоÑтоÑнии\n" "задач, отправленных на Грид, и о ÑоÑтоÑнии вычиÑлительных\n" "реÑурÑов Грид " #: src/clients/compute/arcstat.cpp:101 msgid "The 'sort' and 'rsort' flags cannot be specified at the same time." msgstr "" #: src/clients/compute/arcstat.cpp:171 #, fuzzy msgid "No jobs found, try later" msgstr "Keine Jobs zu bearbeiten" #: src/clients/compute/arcstat.cpp:215 #, c-format msgid "Status of %d jobs was queried, %d jobs returned information" msgstr "" #: src/clients/compute/arcsub.cpp:45 msgid "[filename ...]" msgstr "[dateiname ...]" #: src/clients/compute/arcsub.cpp:46 #, fuzzy msgid "" "The arcsub command is used for submitting jobs to Grid enabled computing\n" "resources." msgstr "Mit dem arcsub Kommando werden Jobs den entfernten Clustern zugewiesen" #: src/clients/compute/arcsub.cpp:97 #, fuzzy msgid "No job description input specified" msgstr "Keine Job Beschreibung als Eingabe benötigt" #: src/clients/compute/arcsub.cpp:110 #, c-format msgid "Can not open job description file: %s" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/clients/compute/arcsub.cpp:138 src/clients/compute/arcsub.cpp:166 msgid "Invalid JobDescription:" msgstr "Ungültige JobDescription:" #: src/clients/compute/arcsub.cpp:208 src/clients/compute/arctest.cpp:250 msgid "" "Cannot adapt job description to the submission target when information " "discovery is turned off" msgstr "" #: src/clients/compute/arcsync.cpp:66 src/clients/compute/arcsync.cpp:177 #, c-format msgid "Warning: Unable to open job list file (%s), unknown format" msgstr "" #: src/clients/compute/arcsync.cpp:76 msgid "Found the following jobs:" msgstr "" #: src/clients/compute/arcsync.cpp:86 msgid "Total number of jobs found: " msgstr "" #: src/clients/compute/arcsync.cpp:98 msgid "Found the following new jobs:" msgstr "" #: src/clients/compute/arcsync.cpp:108 msgid "Total number of new jobs found: " msgstr "" #: src/clients/compute/arcsync.cpp:113 #, fuzzy, c-format msgid "ERROR: Failed to write job information to file (%s)" msgstr "Konnte job information nicht beziehen für job: %s" #: src/clients/compute/arcsync.cpp:140 #, fuzzy msgid "" "The arcsync command synchronizes your local job list with the information " "at\n" "the given CEs or registry servers." msgstr "" "Das Kommando synchronisierte Ihre lokale Jobliste mit der Information eines " "Clusters oder Index-Servers" #: src/clients/compute/arcsync.cpp:183 #, fuzzy, c-format msgid "Warning: Unable to read local list of jobs from file (%s)" msgstr "Warnung: Fehler bei Bezug von Attributen von %s: %s" #: src/clients/compute/arcsync.cpp:188 #, fuzzy, c-format msgid "Warning: Unable to truncate local list of jobs in file (%s)" msgstr "Konnte job information nicht beziehen für job: %s" #: src/clients/compute/arcsync.cpp:194 #, c-format msgid "Warning: Unable to create job list file (%s), jobs list is destroyed" msgstr "" #: src/clients/compute/arcsync.cpp:198 #, fuzzy, c-format msgid "" "Warning: Failed to write local list of jobs into file (%s), jobs list is " "destroyed" msgstr "Konnte job information nicht beziehen für job: %s" #: src/clients/compute/arcsync.cpp:231 #, fuzzy msgid "" "Synchronizing the local list of active jobs with the information in the\n" "information system can result in some inconsistencies. Very recently " "submitted\n" "jobs might not yet be present, whereas jobs very recently scheduled for\n" "deletion can still be present." msgstr "" "Synchronisiere lokale Liste aktiver Jobs mit der Information im MDS. Dies " "mag\n" "zu Inkonsistenzen führen. Gerade erst hochgeladene Jobs sind vielleicht " "noch\n" "nicht dem MDB bekannt, während für die Löschung ausgewählte Jobs noch ange-\n" "zeigt werden." #: src/clients/compute/arcsync.cpp:236 msgid "Are you sure you want to synchronize your local job list?" msgstr "Soll die lokale job list wirklich synchronisiert werden?" #: src/clients/compute/arcsync.cpp:241 msgid "Cancelling synchronization request" msgstr "Abbruch der Synchronisationsanfrage" #: src/clients/compute/arcsync.cpp:251 msgid "" "No services specified. Please configure default services in the client " "configuration, or specify a cluster or registry (-C or -Y options, see " "arcsync -h)." msgstr "" #: src/clients/compute/arctest.cpp:60 msgid " " msgstr "" #: src/clients/compute/arctest.cpp:61 #, fuzzy msgid "The arctest command is used for testing clusters as resources." msgstr "Mit arcget erhält man die Ergebnisse eines Jobs." #: src/clients/compute/arctest.cpp:73 msgid "" "Nothing to do:\n" "you have to either specify a test job id with -J (--job)\n" "or query information about the certificates with -E (--certificate)\n" msgstr "" #: src/clients/compute/arctest.cpp:80 msgid "" "For the 1st test job you also have to specify a runtime value with -r (--" "runtime) option." msgstr "" #: src/clients/compute/arctest.cpp:118 #, fuzzy msgid "Certificate information:" msgstr "Ungültige Authentisierungs-Information" #: src/clients/compute/arctest.cpp:122 #, fuzzy msgid "No user-certificate found" msgstr "Pfad zu Zertifikat-Datei" #: src/clients/compute/arctest.cpp:125 #, fuzzy, c-format msgid "Certificate: %s" msgstr "Voreinstellung: %s" #: src/clients/compute/arctest.cpp:127 #, fuzzy, c-format msgid "Subject name: %s" msgstr "Subjekt: %s" #: src/clients/compute/arctest.cpp:128 #, fuzzy, c-format msgid "Valid until: %s" msgstr "Ungültige url: %s" #: src/clients/compute/arctest.cpp:132 #, fuzzy msgid "Unable to determine certificate information" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/clients/compute/arctest.cpp:136 #, fuzzy msgid "Proxy certificate information:" msgstr "Ungültige Authentisierungs-Information" #: src/clients/compute/arctest.cpp:138 msgid "No proxy found" msgstr "" #: src/clients/compute/arctest.cpp:141 #, fuzzy, c-format msgid "Proxy: %s" msgstr "Proxy Pfad: %s" #: src/clients/compute/arctest.cpp:142 #, fuzzy, c-format msgid "Proxy-subject: %s" msgstr "Subjekt: %s" #: src/clients/compute/arctest.cpp:144 #, fuzzy msgid "Valid for: Proxy expired" msgstr "Zeit verbleibend für Proxy: Proxy abgelaufen" #: src/clients/compute/arctest.cpp:146 #, fuzzy msgid "Valid for: Proxy not valid" msgstr "Zeit verbleibend für Proxy: Proxy ungültig" #: src/clients/compute/arctest.cpp:148 #, fuzzy, c-format msgid "Valid for: %s" msgstr "Ungültige url: %s" #: src/clients/compute/arctest.cpp:153 #, c-format msgid "Certificate issuer: %s" msgstr "" #: src/clients/compute/arctest.cpp:157 #, fuzzy msgid "CA-certificates installed:" msgstr "Pfad zu Zertifikat-Datei" #: src/clients/compute/arctest.cpp:179 msgid "Unable to detect if issuer certificate is installed." msgstr "" #: src/clients/compute/arctest.cpp:182 msgid "Your issuer's certificate is not installed" msgstr "" #: src/clients/compute/arctest.cpp:196 #, c-format msgid "No test-job, with ID \"%d\"" msgstr "" #: src/clients/compute/arctest.cpp:267 #, fuzzy, c-format msgid "Cannot write jobid (%s) to file (%s)" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/clients/compute/arctest.cpp:268 #, fuzzy, c-format msgid "Test submitted with jobid: %s" msgstr "Job hochgeladen mit Job ID: %s" #: src/clients/compute/arctest.cpp:283 #, fuzzy, c-format msgid "Computing service: %s" msgstr "Delegation service: %s" #: src/clients/compute/arctest.cpp:289 #, fuzzy msgid "Test failed, no more possible targets" msgstr "Hochladen des Jobs schlug fehl, keine weiteren Ziele verfügbar" #: src/clients/compute/arctest.cpp:302 src/clients/compute/submit.cpp:49 msgid "To recover missing jobs, run arcsync" msgstr "" #: src/clients/compute/arctest.cpp:315 src/clients/compute/submit.cpp:159 #, fuzzy, c-format msgid "" "Unable to prepare job description according to needs of the target resource " "(%s)." msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #: src/clients/compute/arctest.cpp:325 src/clients/compute/submit.cpp:175 #, fuzzy, c-format msgid "" "An error occurred during the generation of job description to be sent to %s" msgstr "Eine fehler geschah während des Generieres der Job Beschreibung." #: src/clients/compute/arctest.cpp:329 src/clients/compute/submit.cpp:179 #, fuzzy, c-format msgid "Job description to be sent to %s:" msgstr "Zu sendende Job-Beschreibung : %s" #: src/clients/compute/submit.cpp:34 #, c-format msgid "Job submitted with jobid: %s" msgstr "Job hochgeladen mit Job ID: %s" #: src/clients/compute/submit.cpp:40 #, fuzzy, c-format msgid "Cannot write job IDs to file (%s)" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/clients/compute/submit.cpp:45 #, c-format msgid "Unable to open job list file (%s), unknown format" msgstr "" #: src/clients/compute/submit.cpp:47 #, fuzzy, c-format msgid "Failed to write job information to database (%s)" msgstr "Konnte job information nicht beziehen für job: %s" #: src/clients/compute/submit.cpp:51 #, c-format msgid "Record about new job successfully added to the database (%s)" msgstr "" #: src/clients/compute/submit.cpp:57 msgid "Job submission summary:" msgstr "Job Hochladen Zusammenfassung:" #: src/clients/compute/submit.cpp:59 #, fuzzy, c-format msgid "%d of %d jobs were submitted" msgstr "%d von %s Jobs wurden hochgeladen" #: src/clients/compute/submit.cpp:61 #, fuzzy msgid "The following jobs were not submitted:" msgstr "Die folgenden %d wurden nicht hochgeladen" #: src/clients/compute/submit.cpp:65 msgid "Job nr." msgstr "" #: src/clients/compute/submit.cpp:75 #, fuzzy, c-format msgid "ERROR: Unable to load broker %s" msgstr "Konnter Broker %s nicht laden" #: src/clients/compute/submit.cpp:79 #, fuzzy msgid "" "ERROR: Job submission aborted because no resource returned any information" msgstr "" "Hochladen des Jobs abgebrochen, da keine Cluster entsprechende Informationen " "anboten" #: src/clients/compute/submit.cpp:83 msgid "ERROR: One or multiple job descriptions was not submitted." msgstr "" #: src/clients/compute/submit.cpp:100 #, c-format msgid "" "A computing resource using the GridFTP interface was requested, but\n" "%sthe corresponding plugin could not be loaded. Is the plugin installed?\n" "%sIf not, please install the package 'nordugrid-arc-plugins-globus'.\n" "%sDepending on your type of installation the package name might differ." msgstr "" #: src/clients/compute/submit.cpp:129 msgid "" "Unable to adapt job description to any resource, no resource information " "could be obtained." msgstr "" #: src/clients/compute/submit.cpp:130 #, fuzzy msgid "Original job description is listed below:" msgstr "" " -o, -stdout вывеÑти файл Ñтандартого выхода задачи (по\n" " умолчанию)" #: src/clients/compute/submit.cpp:142 #, c-format msgid "Dumping job description aborted: Unable to load broker %s" msgstr "" #: src/clients/compute/submit.cpp:197 #, fuzzy msgid "" "Unable to prepare job description according to needs of the target resource." msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #: src/clients/compute/submit.cpp:281 src/clients/compute/submit.cpp:311 #, c-format msgid "Service endpoint %s (type %s) added to the list for resource discovery" msgstr "" #: src/clients/compute/submit.cpp:291 msgid "" "There are no endpoints in registry that match requested info endpoint type" msgstr "" #: src/clients/compute/submit.cpp:332 #, c-format msgid "Service endpoint %s (type %s) added to the list for direct submission" msgstr "" #: src/clients/compute/submit.cpp:340 msgid "" "There are no endpoints in registry that match requested submission endpoint " "type" msgstr "" #: src/clients/compute/utils.cpp:111 #, c-format msgid "Types of execution services that %s is able to submit jobs to:" msgstr "" #: src/clients/compute/utils.cpp:114 #, c-format msgid "Types of registry services that %s is able to collect information from:" msgstr "" #: src/clients/compute/utils.cpp:117 #, c-format msgid "" "Types of local information services that %s is able to collect information " "from:" msgstr "" #: src/clients/compute/utils.cpp:120 #, c-format msgid "" "Types of local information services that %s is able to collect job " "information from:" msgstr "" #: src/clients/compute/utils.cpp:123 #, c-format msgid "Types of services that %s is able to manage jobs at:" msgstr "" #: src/clients/compute/utils.cpp:126 #, fuzzy, c-format msgid "Job description languages supported by %s:" msgstr "Zu sendende Job-Beschreibung : %s" #: src/clients/compute/utils.cpp:129 #, c-format msgid "Brokers available to %s:" msgstr "" #: src/clients/compute/utils.cpp:152 #, c-format msgid "" "Default broker (%s) is not available. When using %s a broker should be " "specified explicitly (-b option)." msgstr "" #: src/clients/compute/utils.cpp:162 msgid "Proxy expired. Job submission aborted. Please run 'arcproxy'!" msgstr "" #: src/clients/compute/utils.cpp:167 msgid "" "Cannot find any proxy. This application currently cannot run without a " "proxy.\n" " If you have the proxy file in a non-default location,\n" " please make sure the path is specified in the client configuration file.\n" " If you don't have a proxy yet, please run 'arcproxy'!" msgstr "" #: src/clients/compute/utils.cpp:179 src/clients/data/utils.cpp:28 msgid "" "Cannot find any token. Please run 'oidc-token' or use similar\n" " utility to obtain authentication token!" msgstr "" #: src/clients/compute/utils.cpp:308 #, fuzzy, c-format msgid "Unsupported submission endpoint type: %s" msgstr "Nicht unterstützte URL für Ziel: %s" #: src/clients/compute/utils.cpp:327 msgid "" "Requested to skip resource discovery. Will try direct submission to arcrest " "endpoint type." msgstr "" #: src/clients/compute/utils.cpp:332 #, fuzzy, c-format msgid "Unsupported information endpoint type: %s" msgstr "Nicht unterstützte URL für Ziel: %s" #: src/clients/compute/utils.cpp:385 #, fuzzy msgid "Other actions" msgstr "SASL Interaktion" #: src/clients/compute/utils.cpp:386 #, fuzzy msgid "Brokering and filtering" msgstr "Zeichenkette" #: src/clients/compute/utils.cpp:387 msgid "Output format modifiers" msgstr "" #: src/clients/compute/utils.cpp:388 msgid "Behaviour tuning" msgstr "" #: src/clients/compute/utils.cpp:389 msgid "Target endpoint selection" msgstr "" #: src/clients/compute/utils.cpp:393 msgid "computing element hostname or a complete endpoint URL" msgstr "" #: src/clients/compute/utils.cpp:394 src/clients/compute/utils.cpp:404 msgid "ce" msgstr "" #: src/clients/compute/utils.cpp:398 msgid "registry service URL with optional specification of protocol" msgstr "" #: src/clients/compute/utils.cpp:399 msgid "registry" msgstr "" #: src/clients/compute/utils.cpp:403 msgid "only select jobs that were submitted to this computing element" msgstr "" #: src/clients/compute/utils.cpp:410 msgid "" "require the specified endpoint type for job submission.\n" "\tAllowed values are: arcrest and internal." msgstr "" #: src/clients/compute/utils.cpp:412 src/clients/compute/utils.cpp:426 #: src/clients/compute/utils.cpp:434 msgid "type" msgstr "" #: src/clients/compute/utils.cpp:418 msgid "skip the service with the given URL during service discovery" msgstr "" #: src/clients/compute/utils.cpp:419 src/clients/compute/utils.cpp:603 #: src/clients/data/arccp.cpp:583 msgid "URL" msgstr "" #: src/clients/compute/utils.cpp:423 msgid "" "require information query using the specified information endpoint type.\n" "\tSpecial value 'NONE' will disable all resource information queries and the " "following brokering.\n" "\tAllowed values are: ldap.nordugrid, ldap.glue2, arcrest and internal." msgstr "" #: src/clients/compute/utils.cpp:432 msgid "" "only get information about executon targets that support this job submission " "endpoint type.\n" "\tAllowed values are: arcrest and internal." msgstr "" #: src/clients/compute/utils.cpp:440 msgid "keep the files on the server (do not clean)" msgstr "behalte die Dateien auf dem Server (dort nicht löschen)" #: src/clients/compute/utils.cpp:446 msgid "do not ask for verification" msgstr "frage nicht nach Verifikation" #: src/clients/compute/utils.cpp:450 #, fuzzy msgid "truncate the joblist before synchronizing" msgstr "kürze Jobliste vor Synchronisation" #: src/clients/compute/utils.cpp:454 msgid "do not collect information, only convert jobs storage format" msgstr "" #: src/clients/compute/utils.cpp:460 src/clients/data/arcls.cpp:277 msgid "long format (more information)" msgstr "ausführliche Ausgabe" #: src/clients/compute/utils.cpp:466 msgid "show the stdout of the job (default)" msgstr "Zeige stdout des Jobs (Voreinstellung)" #: src/clients/compute/utils.cpp:470 msgid "show the stderr of the job" msgstr "zeige stderr des Jobs" #: src/clients/compute/utils.cpp:474 #, fuzzy msgid "show the CE's error log of the job" msgstr "zeige den error log des Grid Manager für diesen Job" #: src/clients/compute/utils.cpp:478 msgid "show the specified file from job's session directory" msgstr "" #: src/clients/compute/utils.cpp:479 #, fuzzy msgid "filepath" msgstr "Pfad" #: src/clients/compute/utils.cpp:485 msgid "" "download directory (the job directory will be created in this directory)" msgstr "" "Download-Verzeichnis (das Job-Verzeichnis wird in diesem Verzeichnis " "abgelegt)" #: src/clients/compute/utils.cpp:487 msgid "dirname" msgstr "Verzeichnisname" #: src/clients/compute/utils.cpp:491 msgid "use the jobname instead of the short ID as the job directory name" msgstr "" #: src/clients/compute/utils.cpp:496 msgid "force download (overwrite existing job directory)" msgstr "" #: src/clients/compute/utils.cpp:502 msgid "instead of the status only the IDs of the selected jobs will be printed" msgstr "" #: src/clients/compute/utils.cpp:506 msgid "sort jobs according to jobid, submissiontime or jobname" msgstr "" #: src/clients/compute/utils.cpp:507 src/clients/compute/utils.cpp:510 msgid "order" msgstr "" #: src/clients/compute/utils.cpp:509 msgid "reverse sorting of jobs according to jobid, submissiontime or jobname" msgstr "" #: src/clients/compute/utils.cpp:513 msgid "show jobs where status information is unavailable" msgstr "" #: src/clients/compute/utils.cpp:517 msgid "show status information in JSON format" msgstr "" #: src/clients/compute/utils.cpp:523 msgid "" "remove the job from the local list of jobs even if the job is not found in " "the infosys" msgstr "" "entferne Job aus lokaler Liste selbst wenn der Job dem Infosys nicht bekannt " "ist" #: src/clients/compute/utils.cpp:530 msgid "submit test job given by the number" msgstr "" #: src/clients/compute/utils.cpp:531 src/clients/compute/utils.cpp:535 #, fuzzy msgid "int" msgstr "Minuten" #: src/clients/compute/utils.cpp:534 msgid "test job runtime specified by the number" msgstr "" #: src/clients/compute/utils.cpp:541 msgid "only select jobs whose status is statusstr" msgstr "Selektiere Jobs mit Status statusstr" #: src/clients/compute/utils.cpp:542 msgid "statusstr" msgstr "statusstr" #: src/clients/compute/utils.cpp:546 msgid "all jobs" msgstr "alle Jobs" #: src/clients/compute/utils.cpp:552 msgid "jobdescription string describing the job to be submitted" msgstr "Zeichenkette mit Job-Beschreibung wird hochgeladen" #: src/clients/compute/utils.cpp:554 src/clients/compute/utils.cpp:560 #: src/clients/credentials/arcproxy.cpp:353 #: src/clients/credentials/arcproxy.cpp:360 #: src/clients/credentials/arcproxy.cpp:379 #: src/clients/credentials/arcproxy.cpp:386 #: src/clients/credentials/arcproxy.cpp:404 #: src/clients/credentials/arcproxy.cpp:408 #: src/clients/credentials/arcproxy.cpp:423 #: src/clients/credentials/arcproxy.cpp:432 #: src/clients/credentials/arcproxy.cpp:436 msgid "string" msgstr "Zeichenkette" #: src/clients/compute/utils.cpp:558 msgid "jobdescription file describing the job to be submitted" msgstr "Datei mit Job-Beschreibung wird hochgeladen" #: src/clients/compute/utils.cpp:566 msgid "select broker method (list available brokers with --listplugins flag)" msgstr "" #: src/clients/compute/utils.cpp:567 msgid "broker" msgstr "Broker" #: src/clients/compute/utils.cpp:570 msgid "the IDs of the submitted jobs will be appended to this file" msgstr "" #: src/clients/compute/utils.cpp:571 src/clients/compute/utils.cpp:598 #: src/clients/compute/utils.cpp:625 src/clients/compute/utils.cpp:633 #: src/clients/credentials/arcproxy.cpp:445 src/clients/data/arccp.cpp:603 #: src/clients/data/arcls.cpp:322 src/clients/data/arcmkdir.cpp:100 #: src/clients/data/arcrename.cpp:111 src/clients/data/arcrm.cpp:125 #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:53 msgid "filename" msgstr "Dateiname" #: src/clients/compute/utils.cpp:575 msgid "do not perform any delegation for submitted jobs" msgstr "" #: src/clients/compute/utils.cpp:579 msgid "perform X.509 delegation for submitted jobs" msgstr "" #: src/clients/compute/utils.cpp:583 msgid "perform token delegation for submitted jobs" msgstr "" #: src/clients/compute/utils.cpp:587 msgid "" "request at most this number of job instances submitted in single submit " "request" msgstr "" #: src/clients/compute/utils.cpp:591 msgid "" "request at least this number of job instances submitted in single submit " "request" msgstr "" #: src/clients/compute/utils.cpp:597 #, fuzzy msgid "a file containing a list of jobIDs" msgstr "Datei mit Liste aller Jobs" #: src/clients/compute/utils.cpp:602 msgid "skip jobs that are on a computing element with a given URL" msgstr "" #: src/clients/compute/utils.cpp:608 msgid "submit jobs as dry run (no submission to batch system)" msgstr "" #: src/clients/compute/utils.cpp:612 msgid "" "do not submit - dump job description in the language accepted by the target" msgstr "" #: src/clients/compute/utils.cpp:618 msgid "prints info about installed user- and CA-certificates" msgstr "" #: src/clients/compute/utils.cpp:619 src/clients/credentials/arcproxy.cpp:469 #: src/clients/data/arccp.cpp:637 src/clients/data/arcls.cpp:356 #: src/clients/data/arcmkdir.cpp:134 src/clients/data/arcrename.cpp:145 #: src/clients/data/arcrm.cpp:159 msgid "allow TLS connection which failed verification" msgstr "" #: src/clients/compute/utils.cpp:624 #, fuzzy, c-format msgid "the file storing information about active jobs (default %s)" msgstr "Fehler bei Bezug von Information für Job: %s" #: src/clients/compute/utils.cpp:632 src/clients/credentials/arcproxy.cpp:444 #: src/clients/data/arccp.cpp:602 src/clients/data/arcls.cpp:321 #: src/clients/data/arcmkdir.cpp:99 src/clients/data/arcrename.cpp:110 #: src/clients/data/arcrm.cpp:124 msgid "configuration file (default ~/.arc/client.conf)" msgstr "Konfigurationsdatei (Vorteinstellung ~/.arc/client.conf)" #: src/clients/compute/utils.cpp:635 src/clients/credentials/arcproxy.cpp:439 #: src/clients/data/arccp.cpp:597 src/clients/data/arcls.cpp:316 #: src/clients/data/arcmkdir.cpp:94 src/clients/data/arcrename.cpp:105 #: src/clients/data/arcrm.cpp:119 msgid "timeout in seconds (default 20)" msgstr "Zeitüberschreitung nach Sekunden (Voreinstellung 20)" #: src/clients/compute/utils.cpp:636 src/clients/credentials/arcproxy.cpp:440 #: src/clients/data/arccp.cpp:598 src/clients/data/arcls.cpp:317 #: src/clients/data/arcmkdir.cpp:95 src/clients/data/arcrename.cpp:106 #: src/clients/data/arcrm.cpp:120 msgid "seconds" msgstr "Sekunden" #: src/clients/compute/utils.cpp:639 msgid "list the available plugins" msgstr "" #: src/clients/compute/utils.cpp:643 src/clients/credentials/arcproxy.cpp:449 #: src/clients/data/arccp.cpp:642 src/clients/data/arcls.cpp:361 #: src/clients/data/arcmkdir.cpp:139 src/clients/data/arcrename.cpp:150 #: src/clients/data/arcrm.cpp:164 #: src/hed/libs/compute/test_jobdescription.cpp:38 #: src/services/a-rex/grid-manager/gm_jobs.cpp:190 #: src/services/a-rex/grid-manager/inputcheck.cpp:81 #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:67 msgid "FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG" msgstr "FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG" #: src/clients/compute/utils.cpp:644 src/clients/credentials/arcproxy.cpp:450 #: src/clients/data/arccp.cpp:643 src/clients/data/arcls.cpp:362 #: src/clients/data/arcmkdir.cpp:140 src/clients/data/arcrename.cpp:151 #: src/clients/data/arcrm.cpp:165 #: src/hed/libs/compute/test_jobdescription.cpp:39 #: src/services/a-rex/grid-manager/gm_jobs.cpp:191 #: src/services/a-rex/grid-manager/inputcheck.cpp:82 #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:68 #, fuzzy msgid "debuglevel" msgstr "debuglevel" #: src/clients/compute/utils.cpp:646 src/clients/credentials/arcproxy.cpp:473 #: src/clients/data/arccp.cpp:646 src/clients/data/arcls.cpp:365 #: src/clients/data/arcmkdir.cpp:143 src/clients/data/arcrename.cpp:154 #: src/clients/data/arcrm.cpp:168 msgid "print version information" msgstr "Angabe des aktuellen Versionsbezeichners" #: src/clients/compute/utils.cpp:652 src/clients/data/arccp.cpp:607 #: src/clients/data/arcls.cpp:326 src/clients/data/arcmkdir.cpp:104 #: src/clients/data/arcrename.cpp:115 src/clients/data/arcrm.cpp:129 msgid "do not perform any authentication for opened connections" msgstr "" #: src/clients/compute/utils.cpp:656 src/clients/data/arccp.cpp:612 #: src/clients/data/arcls.cpp:331 src/clients/data/arcmkdir.cpp:109 #: src/clients/data/arcrename.cpp:120 src/clients/data/arcrm.cpp:134 msgid "perform X.509 authentication for opened connections" msgstr "" #: src/clients/compute/utils.cpp:660 src/clients/data/arccp.cpp:617 #: src/clients/data/arcls.cpp:336 src/clients/data/arcmkdir.cpp:114 #: src/clients/data/arcrename.cpp:125 src/clients/data/arcrm.cpp:139 msgid "perform token authentication for opened connections" msgstr "" #: src/clients/compute/utils.cpp:664 src/clients/credentials/arcproxy.cpp:454 #: src/clients/data/arccp.cpp:622 src/clients/data/arcls.cpp:341 #: src/clients/data/arcmkdir.cpp:119 src/clients/data/arcrename.cpp:130 #: src/clients/data/arcrm.cpp:144 msgid "force using CA certificates configuration provided by OpenSSL" msgstr "" #: src/clients/compute/utils.cpp:668 src/clients/credentials/arcproxy.cpp:459 #: src/clients/data/arccp.cpp:627 src/clients/data/arcls.cpp:346 #: src/clients/data/arcmkdir.cpp:124 src/clients/data/arcrename.cpp:135 #: src/clients/data/arcrm.cpp:149 msgid "" "force using CA certificates configuration for Grid services (typically IGTF)" msgstr "" #: src/clients/compute/utils.cpp:672 src/clients/credentials/arcproxy.cpp:464 msgid "" "force using CA certificates configuration for Grid services (typically IGTF) " "and one provided by OpenSSL" msgstr "" #: src/clients/compute/utils.cpp:681 src/clients/compute/utils.cpp:688 #: src/clients/compute/utils.cpp:695 #, fuzzy msgid "Conflicting delegation types specified." msgstr "Delegation Authorisierung fehlgeschlagen" #: src/clients/compute/utils.cpp:727 src/clients/compute/utils.cpp:734 #: src/clients/compute/utils.cpp:741 src/clients/data/utils.cpp:41 #: src/clients/data/utils.cpp:48 src/clients/data/utils.cpp:55 #, fuzzy msgid "Conflicting authentication types specified." msgstr "Delegation Authorisierung fehlgeschlagen" #: src/clients/credentials/arcproxy.cpp:151 #, fuzzy, c-format msgid "There are %d user certificates existing in the NSS database" msgstr "Es sind %d Zertifikate in der zurückgelieferten Nachricht." #: src/clients/credentials/arcproxy.cpp:167 #, c-format msgid "Number %d is with nickname: %s%s" msgstr "" #: src/clients/credentials/arcproxy.cpp:176 #, c-format msgid " expiration time: %s " msgstr "" #: src/clients/credentials/arcproxy.cpp:180 #, fuzzy, c-format msgid " certificate dn: %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/clients/credentials/arcproxy.cpp:181 #, fuzzy, c-format msgid " issuer dn: %s" msgstr " base dn: %s" #: src/clients/credentials/arcproxy.cpp:182 #, c-format msgid " serial number: %d" msgstr "" #: src/clients/credentials/arcproxy.cpp:186 #, c-format msgid "Please choose the one you would use (1-%d): " msgstr "" #: src/clients/credentials/arcproxy.cpp:251 #, fuzzy msgid "" "The arcproxy command creates a proxy from a key/certificate pair which can\n" "then be used to access grid resources." msgstr "" "Команда arcproxy Ñоздаёт доверенноÑти из пары закрытый/открытый ключ\n" "Ð´Ð»Ñ Ð¸ÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ Ð½Ð° Гриде" #: src/clients/credentials/arcproxy.cpp:253 msgid "" "Supported constraints are:\n" " validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start\n" " from now)\n" "\n" " validityEnd=time\n" "\n" " validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod and\n" " validityEnd not specified, the default is 12 hours for local proxy, and\n" " 168 hours for delegated proxy on myproxy server)\n" "\n" " vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, " "the\n" " default is the minimum value of 12 hours and validityPeriod)\n" "\n" " myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy " "server,\n" " e.g. 43200 or 12h or 12H; if not specified, the default is the minimum " "value\n" " of 12 hours and validityPeriod (which is lifetime of the delegated proxy " "on\n" " myproxy server))\n" "\n" " proxyPolicy=policy content\n" "\n" " proxyPolicyFile=policy file\n" "\n" " keybits=number - length of the key to generate. Default is 2048 bits.\n" " Special value 'inherit' is to use key length of signing certificate.\n" "\n" " signingAlgorithm=name - signing algorithm to use for signing public key " "of\n" " proxy. Possible values are sha1, sha2 (alias for sha256), sha224, sha256,\n" " sha384, sha512 and inherit (use algorithm of signing certificate). " "Default\n" " is inherit. With old systems, only sha1 is acceptable.\n" "\n" "Supported information item names are:\n" " subject - subject name of proxy certificate.\n" "\n" " identity - identity subject name of proxy certificate.\n" "\n" " issuer - issuer subject name of proxy certificate.\n" "\n" " ca - subject name of CA which issued initial certificate.\n" "\n" " path - file system path to file containing proxy.\n" "\n" " type - type of proxy certificate.\n" "\n" " validityStart - timestamp when proxy validity starts.\n" "\n" " validityEnd - timestamp when proxy validity ends.\n" "\n" " validityPeriod - duration of proxy validity in seconds.\n" "\n" " validityLeft - duration of proxy validity left in seconds.\n" "\n" " vomsVO - VO name represented by VOMS attribute\n" "\n" " vomsSubject - subject of certificate for which VOMS attribute is issued\n" "\n" " vomsIssuer - subject of service which issued VOMS certificate\n" "\n" " vomsACvalidityStart - timestamp when VOMS attribute validity starts.\n" "\n" " vomsACvalidityEnd - timestamp when VOMS attribute validity ends.\n" "\n" " vomsACvalidityPeriod - duration of VOMS attribute validity in seconds.\n" "\n" " vomsACvalidityLeft - duration of VOMS attribute validity left in seconds.\n" "\n" " proxyPolicy\n" "\n" " keybits - size of proxy certificate key in bits.\n" "\n" " signingAlgorithm - algorithm used to sign proxy certificate.\n" "\n" "Items are printed in requested order and are separated by newline.\n" "If item has multiple values they are printed in same line separated by |.\n" "\n" "Supported password destinations are:\n" " key - for reading private key\n" "\n" " myproxy - for accessing credentials at MyProxy service\n" "\n" " myproxynew - for creating credentials at MyProxy service\n" "\n" " all - for any purspose.\n" "\n" "Supported password sources are:\n" " quoted string (\"password\") - explicitly specified password\n" "\n" " int - interactively request password from console\n" "\n" " stdin - read password from standard input delimited by newline\n" "\n" " file:filename - read password from file named filename\n" "\n" " stream:# - read password from input stream number #.\n" " Currently only 0 (standard input) is supported." msgstr "" #: src/clients/credentials/arcproxy.cpp:315 #, fuzzy msgid "path to the proxy file" msgstr "Pfad zu Proxy-Datei" #: src/clients/credentials/arcproxy.cpp:316 #: src/clients/credentials/arcproxy.cpp:320 #: src/clients/credentials/arcproxy.cpp:324 #: src/clients/credentials/arcproxy.cpp:328 #: src/clients/credentials/arcproxy.cpp:332 #: src/clients/credentials/arcproxy.cpp:336 src/clients/data/arccp.cpp:560 msgid "path" msgstr "Pfad" #: src/clients/credentials/arcproxy.cpp:319 msgid "" "path to the certificate file, it can be either PEM, DER, or PKCS12 formatted" msgstr "" #: src/clients/credentials/arcproxy.cpp:323 msgid "" "path to the private key file, if the certificate is in PKCS12 format, then " "no need to give private key" msgstr "" #: src/clients/credentials/arcproxy.cpp:327 #, fuzzy msgid "" "path to the trusted certificate directory, only needed for the VOMS client " "functionality" msgstr "" "путь к каталогу Ñ Ð´Ð¾Ð²ÐµÑ€Ñемыми Ñертификатами, иÑпользуетÑÑ\n" " только клиентом VOMS" #: src/clients/credentials/arcproxy.cpp:331 #, fuzzy msgid "" "path to the top directory of VOMS *.lsc files, only needed for the VOMS " "client functionality" msgstr "" "путь к каталогу Ñ Ð´Ð¾Ð²ÐµÑ€Ñемыми Ñертификатами, иÑпользуетÑÑ\n" " только клиентом VOMS" #: src/clients/credentials/arcproxy.cpp:335 #, fuzzy msgid "path to the VOMS server configuration file" msgstr "Pfad zu VOMS Server Konfigurationsdatei" #: src/clients/credentials/arcproxy.cpp:339 #, fuzzy msgid "" "voms<:command>. Specify VOMS server\n" " More than one VOMS server can be specified like this:\n" " --voms VOa:command1 --voms VOb:command2.\n" " :command is optional, and is used to ask for specific attributes (e.g: " "roles)\n" " command options are:\n" "\n" " all --- put all of this DN's attributes into AC;\n" "\n" " list --- list all of the DN's attribute, will not create AC " "extension;\n" "\n" " /Role=yourRole --- specify the role, if this DN\n" " has such a role, the role will be put into AC;\n" "\n" " /voname/groupname/Role=yourRole --- specify the VO, group and " "role; if this DN\n" " has such a role, the role will be put into AC.\n" "\n" " If this option is not specified values from configuration " "files are used.\n" " To avoid anything to be used specify -S with empty value.\n" msgstr "" "voms<:инÑтрукциÑ>. ОпиÑание Ñервера VOMS (неÑколько Ñерверов задаютÑÑ\n" " Ñледующим образом: --voms VOa:инÑтрукциÑ1 --voms VOb:" "инÑтрукциÑ2).\n" " <:инÑтрукциÑ> не обÑзательна и Ñлужит Ð´Ð»Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа " "дополнительных\n" " атрибутов (например, ролей)\n" " ИнÑтрукции:\n" " all --- добавить вÑе атрибуты, доÑтупные данному " "пользователю;\n" " list --- перечиÑлить вÑе атрибуты, доÑтупные данному " "пользователю,\n" " без ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ€Ð°ÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ AC; \n" " /Role=вашаРоль --- указать желаемую роль; еÑли данный " "пользователь\n" " может играть такую роль, она будет " "добавлена;\n" " /voname/groupname/Role=вашаРоль --- указать ВО, группу и роль; " "еÑли\n" " данный пользователь может играть такую " "роль, она\n" " будет добавлена.\n" #: src/clients/credentials/arcproxy.cpp:356 msgid "" "group<:role>. Specify ordering of attributes\n" " Example: --order /knowarc.eu/coredev:Developer,/knowarc.eu/" "testers:Tester\n" " or: --order /knowarc.eu/coredev:Developer --order /knowarc.eu/" "testers:Tester\n" " Note that it does not make sense to specify the order if you have two or " "more different VOMS servers specified" msgstr "" #: src/clients/credentials/arcproxy.cpp:363 msgid "use GSI communication protocol for contacting VOMS services" msgstr "" #: src/clients/credentials/arcproxy.cpp:366 msgid "" "use HTTP communication protocol for contacting VOMS services that provide " "RESTful access\n" " Note for RESTful access, 'list' command and multiple VOMS " "servers are not supported\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:370 msgid "" "use old communication protocol for contacting VOMS services instead of " "RESTful access\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:373 msgid "" "this option is not functional (old GSI proxies are not supported anymore)" msgstr "" #: src/clients/credentials/arcproxy.cpp:376 msgid "print all information about this proxy." msgstr "" #: src/clients/credentials/arcproxy.cpp:379 msgid "print selected information about this proxy." msgstr "" #: src/clients/credentials/arcproxy.cpp:382 msgid "remove proxy" msgstr "" #: src/clients/credentials/arcproxy.cpp:385 msgid "" "username to MyProxy server (if missing subject of user certificate is used)" msgstr "" #: src/clients/credentials/arcproxy.cpp:390 msgid "" "don't prompt for a credential passphrase, when retrieving a credential from " "a MyProxy server.\n" " The precondition of this choice is that the credential was PUT onto\n" " the MyProxy server without a passphrase by using the\n" " -R (--retrievable_by_cert) option.\n" " This option is specific to the GET command when contacting a Myproxy\n" " server." msgstr "" #: src/clients/credentials/arcproxy.cpp:401 msgid "" "Allow specified entity to retrieve credential without passphrase.\n" " This option is specific to the PUT command when contacting a Myproxy\n" " server." msgstr "" #: src/clients/credentials/arcproxy.cpp:407 #, fuzzy msgid "hostname[:port] of MyProxy server" msgstr "Nutzername bei myproxy Server" #: src/clients/credentials/arcproxy.cpp:412 #, fuzzy msgid "" "command to MyProxy server. The command can be PUT, GET, INFO, NEWPASS or " "DESTROY.\n" " PUT -- put a delegated credentials to the MyProxy server;\n" "\n" " GET -- get a delegated credentials from the MyProxy server;\n" "\n" " INFO -- get and present information about credentials stored " "at the MyProxy server;\n" "\n" " NEWPASS -- change password protecting credentials stored at " "the MyProxy server;\n" "\n" " DESTROY -- wipe off credentials stored at the MyProxy server;\n" "\n" " Local credentials (certificate and key) are not necessary " "except in case of PUT.\n" " MyProxy functionality can be used together with VOMS " "functionality.\n" " --voms and --vomses can be used for Get command if VOMS " "attributes\n" " is required to be included in the proxy.\n" msgstr "" "инÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ Ñерверу MyProxy. Возможны две инÑтрукции: PUT и GET:\n" " PUT/put -- Ñохранить делегированный Ñертификат на Ñервере " "MyProxy;\n" " GET/get -- получить делегированный Ñертификат Ñ Ñервера " "MyProxy,\n" " в Ñтом Ñлучае не требуютÑÑ Ð»Ð¸Ñ‡Ð½Ñ‹Ðµ Ñертификаты и " "ключи.\n" " ИнÑтрукции MyProxy и VOMS могут иÑпользоватьÑÑ Ð¾Ð´Ð½Ð¾Ð²Ñ€ÐµÐ¼ÐµÐ½Ð½Ð¾.\n" #: src/clients/credentials/arcproxy.cpp:427 msgid "" "use NSS credential database in default Mozilla profiles, including Firefox, " "Seamonkey and Thunderbird." msgstr "" #: src/clients/credentials/arcproxy.cpp:431 #, fuzzy msgid "proxy constraints" msgstr "Proxy constraints" #: src/clients/credentials/arcproxy.cpp:435 msgid "password destination=password source" msgstr "" #: src/clients/credentials/arcproxy.cpp:479 msgid "" "RESTful and old VOMS communication protocols can't be requested " "simultaneously." msgstr "" #: src/clients/credentials/arcproxy.cpp:509 #: src/clients/credentials/arcproxy.cpp:1220 #, fuzzy msgid "Failed configuration initialization." msgstr "Fehler bei Initialisierung der Konfiguration" #: src/clients/credentials/arcproxy.cpp:544 msgid "" "Failed to find certificate and/or private key or files have improper " "permissions or ownership." msgstr "" #: src/clients/credentials/arcproxy.cpp:545 #: src/clients/credentials/arcproxy.cpp:557 msgid "You may try to increase verbosity to get more information." msgstr "" #: src/clients/credentials/arcproxy.cpp:553 #, fuzzy msgid "Failed to find CA certificates" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/clients/credentials/arcproxy.cpp:554 #, fuzzy msgid "" "Cannot find the CA certificates directory path, please set environment " "variable X509_CERT_DIR, or cacertificatesdirectory in a configuration file." msgstr "" "Kann den Pfad zum CA Zertifikat-Verzeichnis nicht ermitteln. Bitte setzen " "Sie die Umgebungsvariable X509_CERT_DIR oder den Eintrag zu " "cacertificatesdirectory in der Konfigurationsdatei" #: src/clients/credentials/arcproxy.cpp:558 msgid "" "The CA certificates directory is required for contacting VOMS and MyProxy " "servers." msgstr "" #: src/clients/credentials/arcproxy.cpp:570 msgid "" "$X509_VOMS_FILE, and $X509_VOMSES are not set;\n" "User has not specified the location for vomses information;\n" "There is also not vomses location information in user's configuration file;\n" "Can not find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses,\n" "$ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/" "vomses,\n" "/etc/vomses, /etc/grid-security/vomses, and the location at the " "corresponding sub-directory" msgstr "" #: src/clients/credentials/arcproxy.cpp:615 msgid "Wrong number of arguments!" msgstr "" #: src/clients/credentials/arcproxy.cpp:623 #: src/clients/credentials/arcproxy.cpp:647 #: src/clients/credentials/arcproxy.cpp:780 #, fuzzy msgid "" "Cannot find the path of the proxy file, please setup environment " "X509_USER_PROXY, or proxypath in a configuration file" msgstr "" "Kann den Pfad zum CA Zertifikat-Verzeichnis nicht ermitteln. Bitte setzen " "Sie die Umgebungsvariable X509_CERT_DIR oder den Eintrag zu " "cacertificatesdirectory in der Konfigurationsdatei" #: src/clients/credentials/arcproxy.cpp:630 #, fuzzy, c-format msgid "Cannot remove proxy file at %s" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/clients/credentials/arcproxy.cpp:632 #, c-format msgid "Cannot remove proxy file at %s, because it's not there" msgstr "" #: src/clients/credentials/arcproxy.cpp:641 msgid "Bearer token is available. It is preferred for job submission." msgstr "" #: src/clients/credentials/arcproxy.cpp:653 #: src/clients/credentials/arcproxy.cpp:786 #, c-format msgid "" "Cannot find file at %s for getting the proxy. Please make sure this file " "exists." msgstr "" "Kann Datei nicht bei %s finden, um den Proxy zu erhalten. Bitte stellen Sie " "sicher, dass diese Datei existiert." #: src/clients/credentials/arcproxy.cpp:659 #: src/clients/credentials/arcproxy.cpp:792 #, fuzzy, c-format msgid "Cannot process proxy file at %s." msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/clients/credentials/arcproxy.cpp:662 #, c-format msgid "Subject: %s" msgstr "Subjekt: %s" #: src/clients/credentials/arcproxy.cpp:663 #, fuzzy, c-format msgid "Issuer: %s" msgstr "Anfrage: %s" #: src/clients/credentials/arcproxy.cpp:664 #, c-format msgid "Identity: %s" msgstr "Identität: %s" #: src/clients/credentials/arcproxy.cpp:666 #, fuzzy msgid "Time left for proxy: Proxy expired" msgstr "Zeit verbleibend für Proxy: Proxy abgelaufen" #: src/clients/credentials/arcproxy.cpp:668 #, fuzzy msgid "Time left for proxy: Proxy not valid yet" msgstr "Zeit verbleibend für Proxy: Proxy ungültig" #: src/clients/credentials/arcproxy.cpp:670 #, fuzzy, c-format msgid "Time left for proxy: %s" msgstr "Fehler bei Lesen von Proxy-Datei: %s" #: src/clients/credentials/arcproxy.cpp:671 #, c-format msgid "Proxy path: %s" msgstr "Proxy Pfad: %s" #: src/clients/credentials/arcproxy.cpp:672 #, c-format msgid "Proxy type: %s" msgstr "Proxy Typ: %s" #: src/clients/credentials/arcproxy.cpp:673 #, fuzzy, c-format msgid "Proxy key length: %i" msgstr "Proxy Pfad: %s" #: src/clients/credentials/arcproxy.cpp:674 #, fuzzy, c-format msgid "Proxy signature: %s" msgstr "Nach Signatur: %s" #: src/clients/credentials/arcproxy.cpp:683 #, fuzzy msgid "AC extension information for VO " msgstr "Fehler bei Bezug von Information für Job: %s" #: src/clients/credentials/arcproxy.cpp:686 msgid "Error detected while parsing this AC" msgstr "" #: src/clients/credentials/arcproxy.cpp:699 msgid "AC is invalid: " msgstr "" #: src/clients/credentials/arcproxy.cpp:729 #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:222 #, c-format msgid "Malformed VOMS AC attribute %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:760 #, fuzzy msgid "Time left for AC: AC is not valid yet" msgstr "Zeit verbleibend für Proxy: Proxy ungültig" #: src/clients/credentials/arcproxy.cpp:762 #, fuzzy msgid "Time left for AC: AC has expired" msgstr "Zeit verbleibend für Proxy: Proxy abgelaufen" #: src/clients/credentials/arcproxy.cpp:764 #, fuzzy, c-format msgid "Time left for AC: %s" msgstr "Fehler bei Lesen von Proxy-Datei: %s" #: src/clients/credentials/arcproxy.cpp:871 #, c-format msgid "Information item '%s' is not known" msgstr "" #: src/clients/credentials/arcproxy.cpp:883 #, fuzzy msgid "" "Cannot find the user certificate path, please setup environment " "X509_USER_CERT, or certificatepath in a configuration file" msgstr "" "Kann den Pfad zum CA Zertifikat-Verzeichnis nicht ermitteln. Bitte setzen " "Sie die Umgebungsvariable X509_CERT_DIR oder den Eintrag zu " "cacertificatesdirectory in der Konfigurationsdatei" #: src/clients/credentials/arcproxy.cpp:887 #, fuzzy msgid "" "Cannot find the user private key path, please setup environment " "X509_USER_KEY, or keypath in a configuration file" msgstr "" "Kann den Pfad zum CA Zertifikat-Verzeichnis nicht ermitteln. Bitte setzen " "Sie die Umgebungsvariable X509_CERT_DIR oder den Eintrag zu " "cacertificatesdirectory in der Konfigurationsdatei" #: src/clients/credentials/arcproxy.cpp:911 #, c-format msgid "" "Cannot parse password source expression %s it must be of type=source format" msgstr "" #: src/clients/credentials/arcproxy.cpp:928 #, c-format msgid "" "Cannot parse password type %s. Currently supported values are " "'key','myproxy','myproxynew' and 'all'." msgstr "" #: src/clients/credentials/arcproxy.cpp:943 #, c-format msgid "" "Cannot parse password source %s it must be of source_type or source_type:" "data format. Supported source types are int, stdin, stream, file." msgstr "" #: src/clients/credentials/arcproxy.cpp:957 msgid "Only standard input is currently supported for password source." msgstr "" #: src/clients/credentials/arcproxy.cpp:962 #, c-format msgid "" "Cannot parse password source type %s. Supported source types are int, stdin, " "stream, file." msgstr "" #: src/clients/credentials/arcproxy.cpp:1001 msgid "The start, end and period can't be set simultaneously" msgstr "" #: src/clients/credentials/arcproxy.cpp:1007 #, c-format msgid "The start time that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1014 #, c-format msgid "The period that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1021 #, c-format msgid "The end time that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1030 #, c-format msgid "The end time that you set: %s is before start time: %s." msgstr "" #: src/clients/credentials/arcproxy.cpp:1041 #, c-format msgid "WARNING: The start time that you set: %s is before current time: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:1044 #, c-format msgid "WARNING: The end time that you set: %s is before current time: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:1054 #, c-format msgid "The VOMS AC period that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1072 #, c-format msgid "The MyProxy period that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1087 #, c-format msgid "The keybits constraint is wrong: %s." msgstr "" #: src/clients/credentials/arcproxy.cpp:1101 msgid "The NSS database can not be detected in the Firefox profile" msgstr "" #: src/clients/credentials/arcproxy.cpp:1110 #, c-format msgid "" "There are %d NSS base directories where the certificate, key, and module " "databases live" msgstr "" #: src/clients/credentials/arcproxy.cpp:1112 #, c-format msgid "Number %d is: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:1114 #, c-format msgid "Please choose the NSS database you would like to use (1-%d): " msgstr "" #: src/clients/credentials/arcproxy.cpp:1130 #, c-format msgid "NSS database to be accessed: %s\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:1201 #, fuzzy, c-format msgid "Certificate to use is: %s" msgstr "Verbindung zu %s schlug fehl: %s" #: src/clients/credentials/arcproxy.cpp:1252 #: src/clients/credentials/arcproxy.cpp:1366 msgid "Proxy generation succeeded" msgstr "Proxy erfolgreich angelegt" #: src/clients/credentials/arcproxy.cpp:1253 #: src/clients/credentials/arcproxy.cpp:1367 #, c-format msgid "Your proxy is valid until: %s" msgstr "Ihr Proxy ist gültig bis: %s" #: src/clients/credentials/arcproxy.cpp:1272 msgid "" "The old GSI proxies are not supported anymore. Please do not use -O/--old " "option." msgstr "" #: src/clients/credentials/arcproxy.cpp:1291 src/hed/mcc/tls/MCCTLS.cpp:182 #: src/hed/mcc/tls/MCCTLS.cpp:215 src/hed/mcc/tls/MCCTLS.cpp:241 msgid "VOMS attribute parsing failed" msgstr "Konnte VOMS Attribut nicht herauslesen" #: src/clients/credentials/arcproxy.cpp:1293 msgid "Myproxy server did not return proxy with VOMS AC included" msgstr "" #: src/clients/credentials/arcproxy.cpp:1314 msgid "Proxy generation failed: No valid certificate found." msgstr "" #: src/clients/credentials/arcproxy.cpp:1319 msgid "Proxy generation failed: No valid private key found." msgstr "" #: src/clients/credentials/arcproxy.cpp:1323 #, c-format msgid "Your identity: %s" msgstr "Ihre Identität: %s" #: src/clients/credentials/arcproxy.cpp:1325 msgid "Proxy generation failed: Certificate has expired." msgstr "" #: src/clients/credentials/arcproxy.cpp:1329 msgid "Proxy generation failed: Certificate is not valid yet." msgstr "" #: src/clients/credentials/arcproxy.cpp:1340 #, fuzzy msgid "Proxy generation failed: Failed to create temporary file." msgstr "Fehler bei Erstellen von Info-Datei %s: %s" #: src/clients/credentials/arcproxy.cpp:1348 msgid "Proxy generation failed: Failed to retrieve VOMS information." msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:100 msgid "Succeeded to get info from MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:144 msgid "Succeeded to change password on MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:185 msgid "Succeeded to destroy credential on MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:241 #, fuzzy, c-format msgid "Succeeded to get a proxy in %s from MyProxy server %s" msgstr "Zurückerhaltene Nachricht von myproxy Server: %s" #: src/clients/credentials/arcproxy_myproxy.cpp:294 msgid "Succeeded to put a proxy onto MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_proxy.cpp:93 msgid "Failed to add VOMS AC extension. Your proxy may be incomplete." msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:63 #, fuzzy msgid "" "Failed to process VOMS configuration or no suitable configuration lines " "found." msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/clients/credentials/arcproxy_voms.cpp:75 #, fuzzy, c-format msgid "Failed to parse requested VOMS lifetime: %s" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/clients/credentials/arcproxy_voms.cpp:93 #, c-format msgid "Cannot get VOMS server address information from vomses line: \"%s\"" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:97 #: src/clients/credentials/arcproxy_voms.cpp:99 #, c-format msgid "Contacting VOMS server (named %s): %s on port: %s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:105 #, c-format msgid "Failed to parse requested VOMS server port number: %s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:122 msgid "List functionality is not supported for RESTful VOMS interface" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:132 #: src/clients/credentials/arcproxy_voms.cpp:188 #, c-format msgid "" "The VOMS server with the information:\n" "\t%s\n" "can not be reached, please make sure it is available." msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:133 #: src/clients/credentials/arcproxy_voms.cpp:138 #: src/clients/credentials/arcproxy_voms.cpp:189 #: src/clients/credentials/arcproxy_voms.cpp:194 #, c-format msgid "" "Collected error is:\n" "\t%s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:137 #: src/clients/credentials/arcproxy_voms.cpp:193 #, fuzzy, c-format msgid "No valid response from VOMS server: %s" msgstr "Frühe Antwort vom Server" #: src/clients/credentials/arcproxy_voms.cpp:155 msgid "List functionality is not supported for legacy VOMS interface" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:167 #, fuzzy, c-format msgid "Failed to parse VOMS command: %s" msgstr "Fehler bei Anlegen von GSI Context: %s" #: src/clients/credentials/arcproxy_voms.cpp:204 #, c-format msgid "" "There are %d servers with the same name: %s in your vomses file, but none of " "them can be reached, or can return a valid message." msgstr "" #: src/clients/data/arccp.cpp:79 src/clients/data/arccp.cpp:315 #, c-format msgid "Current transfer FAILED: %s" msgstr "Aktueller Transfer SCHLUG FEHL: %s" #: src/clients/data/arccp.cpp:81 src/clients/data/arccp.cpp:119 #: src/clients/data/arccp.cpp:317 src/clients/data/arcls.cpp:214 #: src/clients/data/arcmkdir.cpp:62 src/clients/data/arcrename.cpp:78 #: src/clients/data/arcrm.cpp:83 msgid "This seems like a temporary error, please try again later" msgstr "" "Dies scheint ein vorübergehender Fehler zu sein, bitte später nochmal " "probieren" #: src/clients/data/arccp.cpp:96 src/clients/data/arccp.cpp:100 #: src/clients/data/arccp.cpp:133 src/clients/data/arccp.cpp:137 #: src/clients/data/arccp.cpp:343 src/clients/data/arccp.cpp:348 #: src/clients/data/arcls.cpp:125 src/clients/data/arcmkdir.cpp:30 #: src/clients/data/arcrename.cpp:31 src/clients/data/arcrename.cpp:35 #: src/clients/data/arcrm.cpp:38 #, c-format msgid "Invalid URL: %s" msgstr "Ungültige URL: %s" #: src/clients/data/arccp.cpp:112 msgid "Third party transfer is not supported for these endpoints" msgstr "" #: src/clients/data/arccp.cpp:114 msgid "" "Protocol(s) not supported - please check that the relevant gfal2\n" " plugins are installed (gfal2-plugin-* packages)" msgstr "" #: src/clients/data/arccp.cpp:117 #, c-format msgid "Transfer FAILED: %s" msgstr "Transfer FEHLER: %s" #: src/clients/data/arccp.cpp:145 src/clients/data/arccp.cpp:171 #: src/clients/data/arccp.cpp:359 src/clients/data/arccp.cpp:387 #, c-format msgid "Can't read list of sources from file %s" msgstr "" #: src/clients/data/arccp.cpp:150 src/clients/data/arccp.cpp:186 #: src/clients/data/arccp.cpp:364 src/clients/data/arccp.cpp:403 #, c-format msgid "Can't read list of destinations from file %s" msgstr "" #: src/clients/data/arccp.cpp:155 src/clients/data/arccp.cpp:370 msgid "Numbers of sources and destinations do not match" msgstr "" #: src/clients/data/arccp.cpp:200 msgid "Fileset registration is not supported yet" msgstr "" #: src/clients/data/arccp.cpp:206 src/clients/data/arccp.cpp:279 #: src/clients/data/arccp.cpp:441 #, c-format msgid "Unsupported source url: %s" msgstr "Nicht unterstützte URL für Quelle: %s" #: src/clients/data/arccp.cpp:210 src/clients/data/arccp.cpp:283 #, c-format msgid "Unsupported destination url: %s" msgstr "Nicht unterstützte URL für Ziel: %s" #: src/clients/data/arccp.cpp:217 msgid "" "For registration source must be ordinary URL and destination must be " "indexing service" msgstr "" #: src/clients/data/arccp.cpp:227 #, fuzzy, c-format msgid "Could not obtain information about source: %s" msgstr "Fehler bei Bezug von Information für Job: %s" #: src/clients/data/arccp.cpp:234 msgid "" "Metadata of source does not match existing destination. Use the --force " "option to override this." msgstr "" #: src/clients/data/arccp.cpp:246 msgid "Failed to accept new file/destination" msgstr "" #: src/clients/data/arccp.cpp:252 src/clients/data/arccp.cpp:258 #, fuzzy, c-format msgid "Failed to register new file/destination: %s" msgstr "Fehler bei Schreiben zu Datein %s: %s" #: src/clients/data/arccp.cpp:421 msgid "Fileset copy to single object is not supported yet" msgstr "" #: src/clients/data/arccp.cpp:431 msgid "Can't extract object's name from source url" msgstr "" #: src/clients/data/arccp.cpp:450 #, fuzzy, c-format msgid "%s. Cannot copy fileset" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/clients/data/arccp.cpp:460 src/hed/libs/compute/ExecutionTarget.cpp:256 #: src/hed/libs/compute/ExecutionTarget.cpp:328 #, c-format msgid "Name: %s" msgstr "Name %s" #: src/clients/data/arccp.cpp:463 #, c-format msgid "Source: %s" msgstr "Quelle: %s" #: src/clients/data/arccp.cpp:464 #, c-format msgid "Destination: %s" msgstr "Ziel: %s" #: src/clients/data/arccp.cpp:470 msgid "Current transfer complete" msgstr "Aktueller Transfer vollständig" #: src/clients/data/arccp.cpp:473 msgid "Some transfers failed" msgstr "Einige Transfers schlugen fehl" #: src/clients/data/arccp.cpp:483 #, c-format msgid "Directory: %s" msgstr "Verzeichnis: %s" #: src/clients/data/arccp.cpp:503 msgid "Transfer complete" msgstr "Transfer vollständig" #: src/clients/data/arccp.cpp:522 msgid "source destination" msgstr "Quelle Ziel" #: src/clients/data/arccp.cpp:523 msgid "" "The arccp command copies files to, from and between grid storage elements." msgstr "" "Mit arccp werden Dateien zu, von und zwischen grid storage Elementen kopiert." #: src/clients/data/arccp.cpp:528 msgid "" "use passive transfer (off by default if secure is on, on by default if " "secure is not requested)" msgstr "" #: src/clients/data/arccp.cpp:534 msgid "do not try to force passive transfer" msgstr "versuche nicht, passiven Transfer zu erzwigen" #: src/clients/data/arccp.cpp:539 #, fuzzy msgid "force overwrite of existing destination" msgstr "Probleme bei Auflösen von Zieladresse" #: src/clients/data/arccp.cpp:543 msgid "show progress indicator" msgstr "zeige Fortschrittsanzeige" #: src/clients/data/arccp.cpp:548 #, fuzzy msgid "" "do not transfer, but register source into destination. destination must be a " "meta-url." msgstr "" "transferiere Datei nicht, registriere sie nur - Zeil muss eine nicht-" "existierende Meta-URL sein" #: src/clients/data/arccp.cpp:554 msgid "use secure transfer (insecure by default)" msgstr "Nutze sicheren Transfer (unsicher ist Voreinstellung)" #: src/clients/data/arccp.cpp:559 msgid "path to local cache (use to put file into cache)" msgstr "" #: src/clients/data/arccp.cpp:564 src/clients/data/arcls.cpp:290 #, fuzzy msgid "operate recursively" msgstr "arbeite rekursiv bis zu einer festgelegten Tiefe" #: src/clients/data/arccp.cpp:569 src/clients/data/arcls.cpp:295 msgid "operate recursively up to specified level" msgstr "arbeite rekursiv bis zu einer festgelegten Tiefe" #: src/clients/data/arccp.cpp:570 src/clients/data/arcls.cpp:296 msgid "level" msgstr "Tiefe" #: src/clients/data/arccp.cpp:574 msgid "number of retries before failing file transfer" msgstr "Anzahl von Wiederholungen bis zu einem Abbruch der Dateiübertragung" #: src/clients/data/arccp.cpp:575 msgid "number" msgstr "Nummer" #: src/clients/data/arccp.cpp:579 msgid "" "physical location to write to when destination is an indexing service. Must " "be specified for indexing services which do not automatically generate " "physical locations. Can be specified multiple times - locations will be " "tried in order until one succeeds." msgstr "" #: src/clients/data/arccp.cpp:587 msgid "" "perform third party transfer, where the destination pulls from the source " "(only available with GFAL plugin)" msgstr "" #: src/clients/data/arccp.cpp:593 src/clients/data/arcls.cpp:312 #: src/clients/data/arcmkdir.cpp:90 src/clients/data/arcrename.cpp:101 #: src/clients/data/arcrm.cpp:115 msgid "list the available plugins (protocols supported)" msgstr "" #: src/clients/data/arccp.cpp:632 src/clients/data/arcls.cpp:351 #: src/clients/data/arcmkdir.cpp:129 src/clients/data/arcrename.cpp:140 #: src/clients/data/arcrm.cpp:154 msgid "" "force using both CA certificates configuration for Grid services (typically " "IGTF) and those provided by OpenSSL" msgstr "" #: src/clients/data/arccp.cpp:667 src/clients/data/arcls.cpp:387 #: src/clients/data/arcmkdir.cpp:165 src/clients/data/arcrename.cpp:176 #: src/clients/data/arcrm.cpp:191 msgid "Protocol plugins available:" msgstr "" #: src/clients/data/arccp.cpp:715 src/clients/data/arcls.cpp:435 #: src/clients/data/arcmkdir.cpp:212 src/clients/data/arcrename.cpp:222 #: src/clients/data/arcrm.cpp:239 msgid "Wrong number of parameters specified" msgstr "Falsche Anzahl an Parametern übertragen" #: src/clients/data/arccp.cpp:720 msgid "Options 'p' and 'n' can't be used simultaneously" msgstr "" #: src/clients/data/arcls.cpp:131 src/clients/data/arcmkdir.cpp:36 #: src/clients/data/arcrm.cpp:45 #, c-format msgid "Can't read list of locations from file %s" msgstr "" #: src/clients/data/arcls.cpp:146 src/clients/data/arcmkdir.cpp:51 #: src/clients/data/arcrename.cpp:63 #, fuzzy msgid "Unsupported URL given" msgstr "Nicht-unterstützte URL angegeben" #: src/clients/data/arcls.cpp:217 msgid "Warning: Failed listing files but some information is obtained" msgstr "" #: src/clients/data/arcls.cpp:271 src/clients/data/arcmkdir.cpp:79 msgid "url" msgstr "URL" #: src/clients/data/arcls.cpp:272 msgid "" "The arcls command is used for listing files in grid storage elements and " "file\n" "index catalogues." msgstr "" "Mit arcls werden Verzeichniss auf grid storage Elementen und Datei Index " "Katalogen angegeben" #: src/clients/data/arcls.cpp:281 msgid "show URLs of file locations" msgstr "zeige URLs von Datei-Lokalisationen" #: src/clients/data/arcls.cpp:285 msgid "display all available metadata" msgstr "zeige alle verfügbare Metadaten" #: src/clients/data/arcls.cpp:299 msgid "" "show only description of requested object, do not list content of directories" msgstr "" #: src/clients/data/arcls.cpp:303 msgid "treat requested object as directory and always try to list content" msgstr "" #: src/clients/data/arcls.cpp:307 msgid "check readability of object, does not show any information about object" msgstr "" #: src/clients/data/arcls.cpp:440 msgid "Incompatible options --nolist and --forcelist requested" msgstr "" #: src/clients/data/arcls.cpp:445 msgid "Requesting recursion and --nolist has no sense" msgstr "" #: src/clients/data/arcmkdir.cpp:80 #, fuzzy msgid "" "The arcmkdir command creates directories on grid storage elements and " "catalogs." msgstr "" "Mit arcls werden Verzeichniss auf grid storage Elementen und Datei Index " "Katalogen angegeben" #: src/clients/data/arcmkdir.cpp:85 msgid "make parent directories as needed" msgstr "" #: src/clients/data/arcrename.cpp:43 msgid "Both URLs must have the same protocol, host and port" msgstr "" #: src/clients/data/arcrename.cpp:53 #, fuzzy msgid "Cannot rename to or from root directory" msgstr "Konnte classname für Policy nicht von Konfiguration parsen" #: src/clients/data/arcrename.cpp:57 #, fuzzy msgid "Cannot rename to the same URL" msgstr "Kann doc Argument nicht anlegen" #: src/clients/data/arcrename.cpp:95 msgid "old_url new_url" msgstr "" #: src/clients/data/arcrename.cpp:96 #, fuzzy msgid "The arcrename command renames files on grid storage elements." msgstr "" "Mit arccp werden Dateien zu, von und zwischen grid storage Elementen kopiert." #: src/clients/data/arcrm.cpp:60 #, fuzzy, c-format msgid "Unsupported URL given: %s" msgstr "Nicht-unterstützte URL angegeben" #: src/clients/data/arcrm.cpp:103 #, fuzzy msgid "url [url ...]" msgstr "[Cluster ...]" #: src/clients/data/arcrm.cpp:104 #, fuzzy msgid "The arcrm command deletes files on grid storage elements." msgstr "" "Mit arccp werden Dateien zu, von und zwischen grid storage Elementen kopiert." #: src/clients/data/arcrm.cpp:109 msgid "" "remove logical file name registration even if not all physical instances " "were removed" msgstr "" #: src/clients/data/utils.cpp:18 msgid "Proxy expired. Please run 'arcproxy'!" msgstr "" #: src/clients/data/utils.cpp:81 src/clients/data/utils.cpp:90 #, fuzzy, c-format msgid "Unable to handle %s" msgstr "Fehler bei Erstellen von GUID in RLS: %s" #: src/clients/data/utils.cpp:82 src/clients/data/utils.cpp:91 msgid "Invalid credentials, please check proxy and/or CA certificates" msgstr "" #: src/clients/data/utils.cpp:88 #, fuzzy msgid "Proxy expired" msgstr "Proxy store:" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:53 msgid "Cannot initialize ARCHERY domain name for query" msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:60 #, fuzzy msgid "Cannot create resolver from /etc/resolv.conf" msgstr "Konnte classname für Policy nicht von Konfiguration parsen" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:68 msgid "Cannot query service endpoint TXT records from DNS" msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:79 msgid "Cannot parse service endpoint TXT records." msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:124 #, c-format msgid "Wrong service record field \"%s\" found in the \"%s\"" msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:129 #, c-format msgid "Malformed ARCHERY record found (endpoint url is not defined): %s" msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:134 #, c-format msgid "Malformed ARCHERY record found (endpoint type is not defined): %s" msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:138 #, fuzzy, c-format msgid "Found service endpoint %s (type %s)" msgstr "Fand %u execution services des index service %s" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:157 #, c-format msgid "" "Status for service endpoint \"%s\" is set to inactive in ARCHERY. Skipping." msgstr "" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:229 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:149 #, c-format msgid "Job %s has no delegation associated. Can't renew such job." msgstr "" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:241 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:161 #, fuzzy, c-format msgid "Job %s failed to renew delegation %s." msgstr "Initiierung der Delegation fehlgeschlagen" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:314 #, fuzzy, c-format msgid "Failed to process jobs - error response: %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:316 #, fuzzy, c-format msgid "Failed to process jobs - wrong response: %u" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:318 #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:327 #, fuzzy, c-format msgid "Content: %s" msgstr "Anfrage: %s" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:321 #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:333 #, fuzzy, c-format msgid "Failed to process job: %s" msgstr "Löschen fehlgeschlagen von job: %s" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:331 #, fuzzy msgid "Failed to process jobs - failed to parse response" msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:344 #, fuzzy, c-format msgid "No response returned: %s" msgstr "Keine Antwort von %s" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:368 #, fuzzy, c-format msgid "Failed to process job: %s - %s %s" msgstr "Fehler bei Entfernen von hard link %s: %s" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:455 #, fuzzy, c-format msgid "Failed retrieving job description for job: %s" msgstr "Fehler beim Bezug der Job Beschreibung von Job: %s" #: src/hed/acc/ARCREST/JobListRetrieverPluginREST.cpp:29 msgid "Collecting Job (A-REX REST jobs) information." msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:49 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:80 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:115 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:149 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:189 #, fuzzy msgid "Failed to communicate to delegation endpoint." msgstr "Fehler bei der Initialisierung der delegation credentials" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:54 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:85 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:120 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:154 #, c-format msgid "Unexpected response code from delegation endpoint - %u" msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:56 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:87 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:122 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:156 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:399 #: src/hed/dmc/gridftp/Lister.cpp:223 src/hed/dmc/gridftp/Lister.cpp:243 #: src/hed/dmc/gridftp/Lister.cpp:468 src/hed/dmc/gridftp/Lister.cpp:475 #: src/hed/dmc/gridftp/Lister.cpp:497 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:164 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:197 #, c-format msgid "Response: %s" msgstr "Antwort: %s" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:64 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:136 #, fuzzy, c-format msgid "Unexpected delegation location from delegation endpoint - %s." msgstr "" "Kann delegation credential nicht erhalten: %s von delegation service: %s" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:92 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:127 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:161 msgid "Missing response from delegation endpoint." msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:193 #, c-format msgid "Unexpected response code from delegation endpoint: %u, %s." msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:235 #, fuzzy, c-format msgid "Failed to submit all jobs: %s %s" msgstr "Konnte job nicht starten" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:249 msgid "Failed uploading local input files" msgstr "Konnte lokale Inputdateien nicht hochladen" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:304 #, fuzzy msgid "Failed to prepare job description" msgstr "Fehler beim Bezug der Job Beschreibung von Job: %s" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:313 #, fuzzy, c-format msgid "Unable to submit job. Job description is not valid in the %s format: %s" msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:320 msgid "" "Can't submit multiple instances for multiple job descriptions. Not " "implemented yet." msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:331 #, fuzzy msgid "Unable to submit jobs. Failed to delegate X.509 credentials." msgstr "Fehler bei der Initialisierung der delegation credentials" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:338 #, fuzzy msgid "Unable to submit jobs. Failed to delegate token." msgstr "Fehler bei der Initialisierung der delegation credentials" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:348 #, fuzzy msgid "Unable to submit job. Failed to assign delegation to job description." msgstr "Submit: Fehler bei Senden von Job Beschreibung" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:388 #, fuzzy msgid "Failed to submit all jobs." msgstr "Konnte job nicht starten" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:398 #, fuzzy, c-format msgid "Failed to submit all jobs: %u %s" msgstr "Konnte job nicht starten" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:411 #, fuzzy, c-format msgid "Failed to submit all jobs: %s" msgstr "Konnte job nicht starten" #: src/hed/acc/ARCREST/TargetInformationRetrieverPluginREST.cpp:27 msgid "Querying WSRF GLUE2 computing REST endpoint." msgstr "" #: src/hed/acc/ARCREST/TargetInformationRetrieverPluginREST.cpp:60 #, c-format msgid "CONTENT %u: %s" msgstr "" #: src/hed/acc/ARCREST/TargetInformationRetrieverPluginREST.cpp:64 #, fuzzy msgid "Response is not XML" msgstr "Antwort: %s" #: src/hed/acc/ARCREST/TargetInformationRetrieverPluginREST.cpp:69 #, c-format msgid "Parsed domains: %u" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:14 msgid "Sorting according to free slots in queue" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:15 msgid "Random sorting" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:16 msgid "Sorting according to specified benchmark (default \"specint2000\")" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:17 msgid "Sorting according to input data availability at target" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:18 msgid "Performs neither sorting nor matching" msgstr "" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:24 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of waiting " "jobs" msgstr "" "Ziel %s entfernt durch FastestQueueBroker, die Anzahl wartender Jobs wird " "nicht genannt" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:27 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of total slots" msgstr "" "Ziel %s entfernt durch FastestQueueBroker, die Anzahl vorhandener slots wird " "nicht genannt" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:30 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of free slots" msgstr "" "Ziel %s entfernt durch FastestQueueBroker, die Anzahl freier slots wird " "nicht genannt" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:74 #, c-format msgid "[ADLParser] Unsupported EMI ES state %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:94 #, c-format msgid "[ADLParser] Unsupported internal state %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:104 #, c-format msgid "[ADLParser] Optional for %s elements are not supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:113 #, c-format msgid "[ADLParser] %s element must be boolean." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:125 #, c-format msgid "[ADLParser] Code in FailIfExitCodeNotEqualTo in %s is not valid number." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:363 msgid "[ADLParser] Root element is not ActivityDescription " msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:410 msgid "[ADLParser] priority is too large - using max value 100" msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:453 #, c-format msgid "[ADLParser] Unsupported URL %s for RemoteLogging." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:472 #, c-format msgid "[ADLParser] Wrong time %s in ExpirationTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:502 msgid "[ADLParser] AccessControl isn't valid XML." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:513 msgid "[ADLParser] CredentialService must contain valid URL." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:542 #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:545 msgid "[ADLParser] Only email Prorocol for Notification is supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:603 msgid "[ADLParser] Missing or wrong value in ProcessesPerSlot." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:608 msgid "[ADLParser] Missing or wrong value in ThreadsPerProcess." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:614 msgid "" "[ADLParser] Missing Name element or value in ParallelEnvironment/Option " "element." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:631 msgid "[ADLParser] NetworkInfo is not supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:645 #, c-format msgid "[ADLParser] NodeAccess value %s is not supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:653 msgid "[ADLParser] Missing or wrong value in NumberOfSlots." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:660 msgid "" "[ADLParser] The NumberOfSlots element should be specified, when the value of " "useNumberOfSlots attribute of SlotsPerHost element is \"true\"." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:667 msgid "[ADLParser] Missing or wrong value in SlotsPerHost." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:696 msgid "[ADLParser] Missing or wrong value in IndividualPhysicalMemory." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:706 msgid "[ADLParser] Missing or wrong value in IndividualVirtualMemory." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:716 msgid "[ADLParser] Missing or wrong value in DiskSpaceRequirement." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:730 msgid "[ADLParser] Benchmark is not supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:738 msgid "[ADLParser] Missing or wrong value in IndividualCPUTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:746 msgid "[ADLParser] Missing or wrong value in TotalCPUTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:755 msgid "[ADLParser] Missing or wrong value in WallTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:775 msgid "[ADLParser] Missing or empty Name in InputFile." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:786 #, c-format msgid "[ADLParser] Wrong URI specified in Source - %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:808 msgid "[ADLParser] Missing or empty Name in OutputFile." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:814 #, c-format msgid "[ADLParser] Wrong URI specified in Target - %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:827 #, c-format msgid "Location URI for file %s is invalid" msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:852 #, c-format msgid "[ADLParser] CreationFlag value %s is not supported." msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:42 msgid "Left operand for RSL concatenation does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:50 msgid "Right operand for RSL concatenation does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:161 msgid "Multi-request operator only allowed at top level" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:186 msgid "RSL substitution is not a sequence" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:192 msgid "RSL substitution sequence is not of length 2" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:211 msgid "RSL substitution variable name does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:220 msgid "RSL substitution variable value does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:313 #, fuzzy msgid "End of comment not found" msgstr "clientxrsl nicht gefunden" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:324 msgid "Junk at end of RSL" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:424 msgid "End of single quoted string not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:441 msgid "End of double quoted string not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:460 #, c-format msgid "End of user delimiter (%s) quoted string not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:518 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:546 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:625 msgid "')' expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:528 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:609 msgid "'(' expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:536 msgid "Variable name expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:541 #, c-format msgid "Variable name (%s) contains invalid character (%s)" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:557 #, fuzzy msgid "Broken string" msgstr "Zeichenkette" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:570 msgid "No left operand for concatenation operator" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:574 msgid "No right operand for concatenation operator" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:638 msgid "Attribute name expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:643 #, c-format msgid "Attribute name (%s) contains invalid character (%s)" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:649 #, fuzzy msgid "Relation operator expected" msgstr "Sofortige Vervollständigung: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:86 msgid "Error parsing the internally set executables attribute." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:102 #, c-format msgid "" "File '%s' in the 'executables' attribute is not present in the 'inputfiles' " "attribute" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:120 msgid "The value of the ftpthreads attribute must be a number from 1 to 10" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:177 msgid "'stdout' attribute must be specified when 'join' attribute is specified" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:181 msgid "" "Attribute 'join' cannot be specified when both 'stdout' and 'stderr' " "attributes is specified" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:200 msgid "Attributes 'gridtime' and 'cputime' cannot be specified together" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:204 msgid "Attributes 'gridtime' and 'walltime' cannot be specified together" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:226 msgid "" "When specifying 'countpernode' attribute, 'count' attribute must also be " "specified" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:229 msgid "Value of 'countpernode' attribute must be an integer" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:287 #, fuzzy msgid "No RSL content in job description found" msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:295 msgid "'action' attribute not allowed in user-side job description" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:304 #, c-format msgid "String successfully parsed as %s." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:313 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:331 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:349 #, c-format msgid "Attribute '%s' multiply defined" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:317 #, c-format msgid "Value of attribute '%s' expected to be single value" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:322 #, c-format msgid "Value of attribute '%s' expected to be a string" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:338 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:368 #, c-format msgid "Value of attribute '%s' is not a string" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:356 #, c-format msgid "Value of attribute '%s' is not sequence" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:360 #, c-format msgid "" "Value of attribute '%s' has wrong sequence length: Expected %d, found %d" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:492 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1375 msgid "Unexpected RSL type" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:557 msgid "At least two values are needed for the 'inputfiles' attribute" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:562 msgid "First value of 'inputfiles' attribute (filename) cannot be empty" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:587 #, fuzzy, c-format msgid "Invalid URL '%s' for input file '%s'" msgstr "Ungültige URL Option: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:596 #, fuzzy, c-format msgid "Invalid URL option syntax in option '%s' for input file '%s'" msgstr "Ungültige URL Option: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:606 #, fuzzy, c-format msgid "Invalid URL: '%s' in input file '%s'" msgstr "Ungültige URL Option: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:677 msgid "At least two values are needed for the 'outputfiles' attribute" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:682 msgid "First value of 'outputfiles' attribute (filename) cannot be empty" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:694 #, fuzzy, c-format msgid "Invalid URL '%s' for output file '%s'" msgstr "Ungültige URL Option: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:704 #, fuzzy, c-format msgid "Invalid URL option syntax in option '%s' for output file '%s'" msgstr "Ungültige URL Option: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:714 #, fuzzy, c-format msgid "Invalid URL: '%s' in output file '%s'" msgstr "Ungültige URL Option: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:746 #, c-format msgid "" "Invalid comparison operator '%s' used at 'delegationid' attribute, only \"=" "\" is allowed." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:764 #, c-format msgid "" "Invalid comparison operator '%s' used at 'queue' attribute in 'GRIDMANAGER' " "dialect, only \"=\" is allowed" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:770 #, c-format msgid "" "Invalid comparison operator '%s' used at 'queue' attribute, only \"!=\" or " "\"=\" are allowed." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:927 #, c-format msgid "Value of attribute '%s' expected not to be empty" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1036 msgid "The value of the acl XRSL attribute isn't valid XML." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1050 msgid "The cluster XRSL attribute is currently unsupported." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1066 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it must contain an email " "address" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1074 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it must only contain email " "addresses after state flag(s)" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1077 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it contains unknown state " "flags" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1125 msgid "priority is too large - using max value 100" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1158 #, c-format msgid "Invalid nodeaccess value: %s" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1201 msgid "Value of 'count' attribute must be an integer" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1231 msgid "Value of 'exclusiveexecution' attribute must either be 'yes' or 'no'" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1277 #, c-format msgid "Invalid action value %s" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1367 #, c-format msgid "The specified Globus attribute (%s) is not supported. %s ignored." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1371 #, c-format msgid "Unknown XRSL attribute: %s - Ignoring it." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1385 #, fuzzy, c-format msgid "Wrong language requested: %s" msgstr "*** Client Anfrage: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1722 msgid "" "Cannot output XRSL representation: The Resources.SlotRequirement." "NumberOfSlots attribute must be specified when the Resources.SlotRequirement." "SlotsPerHost attribute is specified." msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:65 #: src/services/wrappers/python/pythonwrapper.cpp:92 msgid "Failed to initialize main Python thread" msgstr "Fehler bei Initialisierung des main Python Threads" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:71 #: src/services/wrappers/python/pythonwrapper.cpp:97 msgid "Main Python thread was not initialized" msgstr "Main Python Thread wurde nicht initialisiert" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:81 #, fuzzy, c-format msgid "Loading Python broker (%i)" msgstr "Lade python broker (%i)" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:104 #: src/services/wrappers/python/pythonwrapper.cpp:134 #, fuzzy msgid "Main Python thread is not initialized" msgstr "Main python thread wurde nicht initialisiert" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:108 #, fuzzy msgid "PythonBroker init" msgstr "PythonBroker init" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:116 msgid "" "Invalid class name. The broker argument for the PythonBroker should be\n" " Filename.Class.args (args is optional), for example SampleBroker." "MyBroker" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:122 #, fuzzy, c-format msgid "Class name: %s" msgstr "Klassenname: %s" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:123 #, fuzzy, c-format msgid "Module name: %s" msgstr "Modulname: %s" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:132 #: src/services/wrappers/python/pythonwrapper.cpp:178 #, fuzzy msgid "Cannot convert ARC module name to Python string" msgstr "Kann Modul name nicht zu Python Zeichenkette konvertieren" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:140 #: src/services/wrappers/python/pythonwrapper.cpp:186 #, fuzzy msgid "Cannot import ARC module" msgstr "Kann Modul nicht importieren" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:149 #: src/services/wrappers/python/pythonwrapper.cpp:196 #: src/services/wrappers/python/pythonwrapper.cpp:429 #, fuzzy msgid "Cannot get dictionary of ARC module" msgstr "Kann auf Wörterbuch des arc Moduls nicht zugreifen" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:158 #, fuzzy msgid "Cannot find ARC UserConfig class" msgstr "Kann UserConfig Klasse nicht finden" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:166 #, fuzzy msgid "UserConfig class is not an object" msgstr "UserConfig Klasse ist kein Objekt" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:173 #, fuzzy msgid "Cannot find ARC JobDescription class" msgstr "Kann arc JobDescription Klasse nicht finden" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:181 #, fuzzy msgid "JobDescription class is not an object" msgstr "JobDescription Klasse ist kein Objekt" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:188 #, fuzzy msgid "Cannot find ARC ExecutionTarget class" msgstr "Kann arc ExecutionTarget Klasse nicht finden" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:196 #, fuzzy msgid "ExecutionTarget class is not an object" msgstr "ExecutionTarget Klasse ist kein Objekt" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:207 #: src/services/wrappers/python/pythonwrapper.cpp:157 msgid "Cannot convert module name to Python string" msgstr "Kann Modul name nicht zu Python Zeichenkette konvertieren" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:215 #: src/services/wrappers/python/pythonwrapper.cpp:164 msgid "Cannot import module" msgstr "Kann Modul nicht importieren" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:224 #, fuzzy msgid "Cannot get dictionary of custom broker module" msgstr "Kann auf Wörterbuch von custom broker Modul nicht zugreifen" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:233 #, fuzzy msgid "Cannot find custom broker class" msgstr "Kann custom broker Klasse nicht finden" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:241 #, fuzzy, c-format msgid "%s class is not an object" msgstr "Klasse %s ist kein Objekt" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:247 #, fuzzy msgid "Cannot create UserConfig argument" msgstr "Kann UserConfig Argument nicht anlegen" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:255 #, fuzzy msgid "Cannot convert UserConfig to Python object" msgstr "Kann UserConfig nicht zu python Objekt konvertieren" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:263 #: src/services/wrappers/python/pythonwrapper.cpp:253 msgid "Cannot create argument of the constructor" msgstr "Kann Argument für den Konstruktor nicht anlegen" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:272 #: src/services/wrappers/python/pythonwrapper.cpp:261 #, fuzzy msgid "Cannot create instance of Python class" msgstr "Kann Instanz von Python Klasse nicht anlegen" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:278 #, fuzzy, c-format msgid "Python broker constructor called (%d)" msgstr "Python broker Kontruktor aufgerufen (%d)" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:302 #, fuzzy, c-format msgid "Python broker destructor called (%d)" msgstr "Python broker Destruktor aufgerufen (%d)" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:311 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:328 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:361 #, fuzzy msgid "Cannot create ExecutionTarget argument" msgstr "Kann ExecutionTarget Argument nicht anlegen" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:319 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:336 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:369 #, fuzzy, c-format msgid "Cannot convert ExecutionTarget (%s) to python object" msgstr "Kann ExecutionTarget nicht zu Python Objekt konvertieren" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:393 #, fuzzy msgid "Cannot create JobDescription argument" msgstr "Kann JobDescription Argument nicht anlegen." #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:401 #, fuzzy msgid "Cannot convert JobDescription to python object" msgstr "Kann JobDescription nicht zu Python Objekt konvertieren" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:422 msgid "Do sorting using user created python broker" msgstr "" #: src/hed/daemon/unix/daemon.cpp:84 #, c-format msgid "Daemonization fork failed: %s" msgstr "" #: src/hed/daemon/unix/daemon.cpp:95 msgid "Watchdog (re)starting application" msgstr "" #: src/hed/daemon/unix/daemon.cpp:100 #, fuzzy, c-format msgid "Watchdog fork failed: %s" msgstr "Verbindung zu %s schlug fehl: %s" #: src/hed/daemon/unix/daemon.cpp:110 msgid "Watchdog starting monitoring" msgstr "" #: src/hed/daemon/unix/daemon.cpp:136 #, c-format msgid "Watchdog detected application exit due to signal %u" msgstr "" #: src/hed/daemon/unix/daemon.cpp:138 #, c-format msgid "Watchdog detected application exited with code %u" msgstr "" #: src/hed/daemon/unix/daemon.cpp:140 msgid "Watchdog detected application exit" msgstr "" #: src/hed/daemon/unix/daemon.cpp:149 msgid "" "Watchdog exiting because application was purposely killed or exited itself" msgstr "" #: src/hed/daemon/unix/daemon.cpp:156 msgid "Watchdog detected application timeout or error - killing process" msgstr "" #: src/hed/daemon/unix/daemon.cpp:167 msgid "Watchdog failed to wait till application exited - sending KILL" msgstr "" #: src/hed/daemon/unix/daemon.cpp:179 msgid "Watchdog failed to kill application - giving up and exiting" msgstr "" #: src/hed/daemon/unix/daemon.cpp:200 msgid "Shutdown daemon" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:47 msgid "shutdown" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:50 msgid "exit" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:88 msgid "No server config part of config file" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:163 #, c-format msgid "Unknown log level %s" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:173 #, c-format msgid "Failed to open log file: %s" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:205 msgid "Start foreground" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:254 #, c-format msgid "XML config file %s does not exist" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:258 src/hed/daemon/unix/main_unix.cpp:273 #, c-format msgid "Failed to load service configuration from file %s" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:264 #, c-format msgid "INI config file %s does not exist" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:269 src/hed/daemon/unix/main_unix.cpp:291 msgid "Error evaluating profile" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:285 #, fuzzy msgid "Error loading generated configuration" msgstr "" "\n" "%s: ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð²Ñ…Ð¾Ð´Ð½Ð¾Ð³Ð¾ файла '%s': %s\n" #: src/hed/daemon/unix/main_unix.cpp:296 msgid "Failed to load service configuration from any default config file" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:357 msgid "Schema validation error" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:372 msgid "Configuration root element is not " msgstr "" #: src/hed/daemon/unix/main_unix.cpp:388 #, c-format msgid "Cannot switch to group (%s)" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:398 #, c-format msgid "Cannot switch to primary group for user (%s)" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:403 #, c-format msgid "Cannot switch to user (%s)" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:421 #, fuzzy msgid "Failed to load service side MCCs" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/daemon/unix/main_unix.cpp:423 src/tests/count/test_service.cpp:29 #: src/tests/echo/test.cpp:30 src/tests/echo/test_service.cpp:29 msgid "Service side MCCs are loaded" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:430 msgid "Unexpected arguments supplied" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:87 #, c-format msgid "Unknown channel %s for stdio protocol" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:94 #, fuzzy, c-format msgid "Failed to open stdio channel %s" msgstr "Fehler bei Öffnen von Datenkanal" #: src/hed/dmc/file/DataPointFile.cpp:95 #, fuzzy, c-format msgid "Failed to open stdio channel %d" msgstr "Fehler bei Öffnen von Datenkanal" #: src/hed/dmc/file/DataPointFile.cpp:335 #, fuzzy, c-format msgid "fsync of file %s failed: %s" msgstr "Verbindung zu %s schlug fehl: %s" #: src/hed/dmc/file/DataPointFile.cpp:340 #: src/hed/dmc/file/DataPointFile.cpp:348 #, fuzzy, c-format msgid "closing file %s failed: %s" msgstr "Verbindung zu %s schlug fehl: %s" #: src/hed/dmc/file/DataPointFile.cpp:367 #, c-format msgid "File is not accessible: %s" msgstr "Datei ist nicht zugreifbar: %s" #: src/hed/dmc/file/DataPointFile.cpp:373 #: src/hed/dmc/file/DataPointFile.cpp:458 #, fuzzy, c-format msgid "Can't stat file: %s: %s" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/hed/dmc/file/DataPointFile.cpp:419 #: src/hed/dmc/file/DataPointFile.cpp:425 #, fuzzy, c-format msgid "Can't stat stdio channel %s" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/hed/dmc/file/DataPointFile.cpp:473 #, fuzzy, c-format msgid "%s is not a directory" msgstr "Klasse %s ist kein Objekt" #: src/hed/dmc/file/DataPointFile.cpp:488 #, c-format msgid "Failed to read object %s: %s" msgstr "Fehler bei Lesen von Objekt %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:501 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:534 #, fuzzy, c-format msgid "File is not accessible %s: %s" msgstr "Datei ist nicht zugreifbar: %s" #: src/hed/dmc/file/DataPointFile.cpp:507 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:540 #, fuzzy, c-format msgid "Can't delete directory %s: %s" msgstr "Kann Verzeichnis nicht löschen: %s - %s" #: src/hed/dmc/file/DataPointFile.cpp:514 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:547 #, fuzzy, c-format msgid "Can't delete file %s: %s" msgstr "Kann Datei nicht löschen: %s - %s" #: src/hed/dmc/file/DataPointFile.cpp:524 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:335 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:315 #: src/hed/dmc/http/DataPointHTTP.cpp:1658 #: src/hed/dmc/http/DataPointHTTP.cpp:1676 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1466 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:562 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:582 #, c-format msgid "Creating directory %s" msgstr "Lege Verzeichnis %s an" #: src/hed/dmc/file/DataPointFile.cpp:532 src/hed/dmc/srm/DataPointSRM.cpp:168 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:596 #, fuzzy, c-format msgid "Renaming %s to %s" msgstr "" "\n" " СоответÑтвие раздел-Ñегмент:\n" #: src/hed/dmc/file/DataPointFile.cpp:534 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:605 #, fuzzy, c-format msgid "Can't rename file %s: %s" msgstr "Kann Datei nicht löschen: %s - %s" #: src/hed/dmc/file/DataPointFile.cpp:562 #, fuzzy, c-format msgid "Failed to open %s for reading: %s" msgstr "Fehler bei Öffnen von Datei %s zum Lesen: %s" #: src/hed/dmc/file/DataPointFile.cpp:577 #: src/hed/dmc/file/DataPointFile.cpp:712 #, fuzzy, c-format msgid "Failed to switch user id to %d/%d" msgstr "fehler bei Senden zu %d von %s" #: src/hed/dmc/file/DataPointFile.cpp:583 #, fuzzy, c-format msgid "Failed to create/open file %s: %s" msgstr "Fehler bei Anlegen/Öffnen von Datei %s (%d)" #: src/hed/dmc/file/DataPointFile.cpp:599 #, fuzzy msgid "Failed to create thread" msgstr "Fehler bei Anlegen von ldap bind thread (%s)" #: src/hed/dmc/file/DataPointFile.cpp:679 #, c-format msgid "Invalid url: %s" msgstr "Ungültige url: %s" #: src/hed/dmc/file/DataPointFile.cpp:688 src/hed/libs/data/FileCache.cpp:480 #, fuzzy, c-format msgid "Failed to create directory %s: %s" msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #: src/hed/dmc/file/DataPointFile.cpp:701 #: src/hed/dmc/file/DataPointFile.cpp:720 #, fuzzy, c-format msgid "Failed to create file %s: %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:732 #, c-format msgid "setting file %s to size %llu" msgstr "Setze Datei %s zu Größe %llu" #: src/hed/dmc/file/DataPointFile.cpp:755 #, fuzzy, c-format msgid "Failed to preallocate space for %s" msgstr "Fehler bei Reservieren von Platz" #: src/hed/dmc/file/DataPointFile.cpp:794 src/hed/libs/data/FileCache.cpp:854 #, fuzzy, c-format msgid "Failed to clean up file %s: %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:808 #, fuzzy, c-format msgid "Error during file validation. Can't stat file %s: %s" msgstr "Fehler bei Lesen von gültiger und existierender Lock-Datei %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:812 #, c-format msgid "" "Error during file validation: Local file size %llu does not match source " "file size %llu for file %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:53 #, fuzzy, c-format msgid "Using proxy %s" msgstr "Nutze space token %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:54 #, fuzzy, c-format msgid "Using key %s" msgstr "Nutze space token %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:55 #, fuzzy, c-format msgid "Using cert %s" msgstr "Nutze space token %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:113 msgid "Locations are missing in destination LFC URL" msgstr "Locations fehlen in destination LFC URL" #: src/hed/dmc/gfal/DataPointGFAL.cpp:119 #, c-format msgid "Duplicate replica found in LFC: %s" msgstr "Doppelte replica gefunden in LFC: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:121 #, c-format msgid "Adding location: %s - %s" msgstr "Füge location hinzu: %s - %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:129 #: src/hed/libs/data/DataPointIndex.cpp:161 #, fuzzy, c-format msgid "Add location: url: %s" msgstr "Füge location hinzu: url: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:130 #: src/hed/libs/data/DataPointIndex.cpp:162 #, fuzzy, c-format msgid "Add location: metadata: %s" msgstr "Füge location hinzu: Metadaten: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:150 #: src/hed/dmc/gfal/DataPointGFAL.cpp:310 #, fuzzy, c-format msgid "gfal_open failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:163 #: src/hed/dmc/gfal/DataPointGFAL.cpp:223 #: src/hed/dmc/gfal/DataPointGFAL.cpp:249 #: src/hed/dmc/gfal/DataPointGFAL.cpp:324 #: src/hed/dmc/gfal/DataPointGFAL.cpp:403 #: src/hed/dmc/gfal/DataPointGFAL.cpp:430 #, fuzzy, c-format msgid "gfal_close failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:195 #, fuzzy, c-format msgid "gfal_read failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:237 #, fuzzy msgid "StopReading starts waiting for transfer_condition." msgstr "stop_reading_ftp: warte auf Beenden von Transfer" #: src/hed/dmc/gfal/DataPointGFAL.cpp:239 #, fuzzy msgid "StopReading finished waiting for transfer_condition." msgstr "stop_reading_ftp: warte auf Beenden von Transfer" #: src/hed/dmc/gfal/DataPointGFAL.cpp:271 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:68 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:73 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:44 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:49 #, fuzzy, c-format msgid "No locations defined for %s" msgstr "Keine locations gefunden für %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:278 #, fuzzy, c-format msgid "Failed to set LFC replicas: %s" msgstr "Fehler beim Entfernen von LFC Verzeichnis: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:304 #, fuzzy, c-format msgid "gfal_mkdir failed (%s), trying to write anyway" msgstr "start_writing_ftp: mkdir fehlgeschlagen - versuche weiter zu schreiben" #: src/hed/dmc/gfal/DataPointGFAL.cpp:359 #, c-format msgid "DataPointGFAL::write_file got position %d and offset %d, has to seek" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:388 #, fuzzy, c-format msgid "gfal_write failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:418 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:405 #, fuzzy msgid "StopWriting starts waiting for transfer_condition." msgstr "StopWriting: Abbruch der Verbindung" #: src/hed/dmc/gfal/DataPointGFAL.cpp:420 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:407 #, fuzzy msgid "StopWriting finished waiting for transfer_condition." msgstr "stop_reading_ftp: warte auf Beenden von Transfer" #: src/hed/dmc/gfal/DataPointGFAL.cpp:451 #, fuzzy, c-format msgid "gfal_stat failed: %s" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:496 #, c-format msgid "gfal_listxattr failed, no replica information can be obtained: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:537 #, fuzzy, c-format msgid "gfal_opendir failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:549 #, c-format msgid "List will stat the URL %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:556 #, fuzzy, c-format msgid "gfal_closedir failed: %s" msgstr "Verbindung zu %s schlug fehl: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:584 #, fuzzy, c-format msgid "gfal_rmdir failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:587 #, fuzzy, c-format msgid "gfal_unlink failed: %s" msgstr "globus_io_cancel ist fehlgeschlagen: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:604 #, fuzzy, c-format msgid "gfal_mkdir failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:619 #, fuzzy, c-format msgid "gfal_rename failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:19 #, fuzzy, c-format msgid "Failed to obtain bytes transferred: %s" msgstr "Konnte Listing nicht via FTP beziehen: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:42 #, fuzzy, c-format msgid "Failed to get initiate GFAL2 parameter handle: %s" msgstr "Connect: Konnte init handle nicht initialisieren: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:49 #, fuzzy, c-format msgid "Failed to get initiate new GFAL2 context: %s" msgstr "Fehler bei Anlegen von GSI Context: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:56 #, fuzzy, c-format msgid "Failed to set GFAL2 monitor callback: %s" msgstr "Fehler bei Anlegen von soft link: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:64 #, fuzzy, c-format msgid "Failed to set overwrite option in GFAL2: %s" msgstr "Fehler beim Entfernen der location vom LFC: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:72 #, c-format msgid "Failed to set GFAL2 transfer timeout, will use default: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:84 #, fuzzy msgid "Transfer failed" msgstr "Einige Transfers schlugen fehl" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:92 #, fuzzy msgid "Transfer succeeded" msgstr "Transfer vollständig" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:38 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:56 msgid "ftp_complete_callback: success" msgstr "ftp_complete_callback: erfolgreich" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:44 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:62 #, c-format msgid "ftp_complete_callback: error: %s" msgstr "ftp_complete_callback: Fehler: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:60 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:78 #, fuzzy msgid "ftp_check_callback" msgstr "ftp_check_callback" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:62 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:90 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:116 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:135 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:305 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:340 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:678 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:847 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:879 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:913 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1052 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1104 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1114 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1122 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1130 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1138 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1144 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:80 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:108 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:285 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:321 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:731 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:764 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:801 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:932 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:996 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1006 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1014 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1022 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1030 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1036 #, c-format msgid "Globus error: %s" msgstr "Globus Fehler: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:73 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:91 msgid "Excessive data received while checking file access" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:89 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:107 #, fuzzy msgid "Registration of Globus FTP buffer failed - cancel check" msgstr "Registrierung von Globus FTP buffer fehlgeschlagen - breche check ab" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:115 msgid "check_ftp: globus_ftp_client_size failed" msgstr "check_ftp: globus_ftp_client_size fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:119 msgid "check_ftp: timeout waiting for size" msgstr "check_ftp: Zeitüberschreitung bei Warten für Größe" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:124 msgid "check_ftp: failed to get file's size" msgstr "check_ftp: konnten Dateigröße nicht bestimmen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:127 #, fuzzy, c-format msgid "check_ftp: obtained size: %lli" msgstr "Check: erhielt Größe: %lli" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:134 msgid "check_ftp: globus_ftp_client_modification_time failed" msgstr "check_ftp: globus_ftp_client_modification_time fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:138 msgid "check_ftp: timeout waiting for modification_time" msgstr "check_ftp: Zeitüberschreitung bei Warten auf modification_time" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:143 msgid "check_ftp: failed to get file's modification time" msgstr "check_ftp: konnte Modification time von Datei nicht erhalten" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:148 #, fuzzy, c-format msgid "check_ftp: obtained modification date: %s" msgstr "Check: erhielt Erstelldatum: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:167 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:147 msgid "check_ftp: globus_ftp_client_get failed" msgstr "check_ftp: globus_ftp_client_get fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:174 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:154 msgid "check_ftp: globus_ftp_client_register_read" msgstr "check_ftp: globus_ftp_client_register_read" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:185 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:166 #, fuzzy msgid "check_ftp: timeout waiting for partial get" msgstr "check_ftp: Zeitüberschreitung beim Warten auf partial get" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:215 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:193 #, fuzzy, c-format msgid "File delete failed, attempting directory delete for %s" msgstr "Löschen von Datei schlug fehl, versuche als Verzeichnis zu löschen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:225 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:204 #, fuzzy msgid "delete_ftp: globus_ftp_client_delete failed" msgstr "delete_ftp: globus_ftp_client_delete fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:231 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:252 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:210 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:232 #, fuzzy msgid "delete_ftp: timeout waiting for delete" msgstr "list_files_ftp: Zeitüberschreitung bei Warten auf Dateigröße " #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:246 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:226 #, fuzzy msgid "delete_ftp: globus_ftp_client_rmdir failed" msgstr "delete_ftp: globus_ftp_client_delete fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:301 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:280 #, fuzzy, c-format msgid "mkdir_ftp: making %s" msgstr "mkdir_ftp: erstelle %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:309 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:289 #, fuzzy msgid "mkdir_ftp: timeout waiting for mkdir" msgstr "mkdir_ftp: Zeitüberschreitung bei mkdir" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:344 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:325 #, fuzzy msgid "Timeout waiting for mkdir" msgstr "mkdir_ftp: Zeitüberschreitung bei mkdir" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:370 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:348 #, fuzzy msgid "start_reading_ftp" msgstr "start_reading_ftp" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:374 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:352 msgid "start_reading_ftp: globus_ftp_client_get" msgstr "start_reading_ftp: globus_ftp_client_get fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:388 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:366 msgid "start_reading_ftp: globus_ftp_client_get failed" msgstr "start_reading_ftp: globus_ftp_client_get fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:399 msgid "start_reading_ftp: globus_thread_create failed" msgstr "start_reading_ftp: globus_thread_create fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:418 msgid "stop_reading_ftp: aborting connection" msgstr "stop_reading_ftp: Abbruch der Verbindung" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:425 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:647 #, fuzzy, c-format msgid "Failed to abort transfer of ftp file: %s" msgstr "Fehler bei Ablage von FTP Datei" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:426 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:648 msgid "Assuming transfer is already aborted or failed." msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:433 msgid "stop_reading_ftp: waiting for transfer to finish" msgstr "stop_reading_ftp: warte auf Beenden von Transfer" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:435 #, fuzzy, c-format msgid "stop_reading_ftp: exiting: %s" msgstr "stop-reading_ftp: verlasse: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:449 msgid "ftp_read_thread: get and register buffers" msgstr "ftp_read_thread: beziehe und registriere Puffer" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:455 #, c-format msgid "ftp_read_thread: for_read failed - aborting: %s" msgstr "ftp_read_thread: for_read fehlgeschlagen - Abbruch: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:465 #, fuzzy, c-format msgid "ftp_read_thread: data callback failed - aborting: %s" msgstr "ftp_read_thread: for_read fehlgeschlagen - Abbruch: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:477 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:382 #, c-format msgid "ftp_read_thread: Globus error: %s" msgstr "ftp_read_thread: Globus Fehler: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:490 #, c-format msgid "ftp_read_thread: too many registration failures - abort: %s" msgstr "ftp_read_thread: zu viele Registrierungsfehler - Abbruch: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:495 #, fuzzy, c-format msgid "ftp_read_thread: failed to register Globus buffer - will try later: %s" msgstr "" "ftp_read_thread: Fehler bei Registrieren von Globus Puffer - verschoben auf " "später: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:508 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:398 msgid "ftp_read_thread: waiting for eof" msgstr "ftp_read_thread: warte auf EOF" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:512 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:402 #, fuzzy msgid "ftp_read_thread: waiting for buffers released" msgstr "ftp_read_thread: warte auf EOF" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:516 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:410 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:664 #, fuzzy msgid "ftp_read_thread: failed to release buffers - leaking" msgstr "" "ftp_read_thread: Fehler bei Registrieren von Globus Puffer - verschoben auf " "später: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:521 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:417 #, fuzzy msgid "ftp_read_thread: exiting" msgstr "ftp_read_thread: Beenden" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:539 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:438 #, fuzzy, c-format msgid "ftp_read_callback: failure: %s" msgstr "ftp_read_callback: Fehler" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:542 msgid "ftp_read_callback: success" msgstr "ftp_read_callback: Erfolg" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:558 #, fuzzy msgid "Failed to get ftp file" msgstr "Fehler bei Bezug von FTP Datei" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:560 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:819 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:519 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:708 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:129 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:169 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1214 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1248 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1430 #: src/hed/libs/common/Thread.cpp:240 src/hed/libs/common/Thread.cpp:243 #: src/hed/libs/credential/Credential.cpp:1076 #: src/hed/libs/data/DataPointDelegate.cpp:628 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:66 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:82 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:98 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:117 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:127 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:135 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:144 #: src/hed/mcc/tls/PayloadTLSMCC.cpp:69 src/hed/shc/arcpdp/ArcPDP.cpp:234 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:305 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:258 #: src/libs/data-staging/Scheduler.cpp:117 #: src/services/a-rex/delegation/DelegationStore.cpp:36 #: src/services/a-rex/delegation/DelegationStore.cpp:41 #: src/services/a-rex/delegation/DelegationStore.cpp:46 #: src/services/a-rex/delegation/DelegationStore.cpp:75 #: src/services/a-rex/delegation/DelegationStore.cpp:81 #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:233 #: src/services/a-rex/grid-manager/inputcheck.cpp:33 #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:408 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:395 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:435 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:487 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:602 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:636 #, c-format msgid "%s" msgstr "%s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:594 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:545 #, fuzzy msgid "start_writing_ftp: mkdir" msgstr "start_wrtiting_ftp: mkdir" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:597 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:547 msgid "start_writing_ftp: mkdir failed - still trying to write" msgstr "start_writing_ftp: mkdir fehlgeschlagen - versuche weiter zu schreiben" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:599 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:549 #, fuzzy msgid "start_writing_ftp: put" msgstr "start_writing_ftp: put" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:613 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:563 msgid "start_writing_ftp: put failed" msgstr "start_writing_ftp: put fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:623 msgid "start_writing_ftp: globus_thread_create failed" msgstr "start_writitng_ftp: globus_thread_create failed" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:640 #: src/hed/libs/data/DataPointDelegate.cpp:307 #, fuzzy msgid "StopWriting: aborting connection" msgstr "StopWriting: Abbruch der Verbindung" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:664 #: src/hed/dmc/http/DataPointHTTP.cpp:982 #: src/hed/libs/data/DataPointDelegate.cpp:321 #, fuzzy, c-format msgid "StopWriting: Calculated checksum %s" msgstr "Errechneted checksum: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:668 #: src/hed/dmc/http/DataPointHTTP.cpp:986 #: src/hed/libs/data/DataPointDelegate.cpp:325 #, fuzzy, c-format msgid "StopWriting: looking for checksum of %s" msgstr "list_files_ftp: Suche nach Größe von %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:677 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:912 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:800 #, fuzzy msgid "list_files_ftp: globus_ftp_client_cksm failed" msgstr "list_files_ftp: globus_ftp_client_size fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:681 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:916 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:804 #, fuzzy msgid "list_files_ftp: timeout waiting for cksum" msgstr "list_files_ftp: Zeitüberschreitung bei Warten auf Dateigröße " #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:688 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:923 #, fuzzy msgid "list_files_ftp: no checksum information possible" msgstr "list_files_ftp: Bezug von Zeitpunkt der letzten Dateiänderung von %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:691 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:926 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:817 #, fuzzy, c-format msgid "list_files_ftp: checksum %s" msgstr "meta_get_data: checksum: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:694 #: src/hed/dmc/http/DataPointHTTP.cpp:995 #: src/hed/libs/data/DataPointDelegate.cpp:332 msgid "" "Checksum type returned by server is different to requested type, cannot " "compare" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:696 #: src/hed/dmc/http/DataPointHTTP.cpp:997 #: src/hed/libs/data/DataPointDelegate.cpp:334 #, c-format msgid "Calculated checksum %s matches checksum reported by server" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:699 #: src/hed/dmc/http/DataPointHTTP.cpp:999 #: src/hed/libs/data/DataPointDelegate.cpp:337 #, c-format msgid "" "Checksum mismatch between calculated checksum %s and checksum reported by " "server %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:721 msgid "ftp_write_thread: get and register buffers" msgstr "ftp_write_thread: Beziehe und Registriere Puffer" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:725 msgid "ftp_write_thread: for_write failed - aborting" msgstr "ftp_write_thread: for_write fehlgeschlagen - Abbruch" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:743 #, fuzzy msgid "ftp_write_thread: data callback failed - aborting" msgstr "ftp_write_thread: for_write fehlgeschlagen - Abbruch" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:759 #, fuzzy msgid "ftp_write_thread: waiting for eof" msgstr "ftp_read_thread: warte auf EOF" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:763 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:662 #, fuzzy msgid "ftp_write_thread: waiting for buffers released" msgstr "ftp_read_thread: warte auf EOF" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:771 #, fuzzy msgid "ftp_write_thread: failed to release buffers - leaking" msgstr "ftp_write_thread: Beziehe und Registriere Puffer" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:776 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:669 #, fuzzy msgid "ftp_write_thread: exiting" msgstr "ftp_read_thread: Beenden" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:799 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:688 #, fuzzy, c-format msgid "ftp_write_callback: failure: %s" msgstr "ftp_write_callback: Fehler" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:802 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:690 #, fuzzy, c-format msgid "ftp_write_callback: success %s" msgstr "ftp_write_callback: Erfolg" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:817 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:706 msgid "Failed to store ftp file" msgstr "Fehler bei Ablage von FTP Datei" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:825 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:711 #, fuzzy msgid "ftp_put_complete_callback: success" msgstr "ftp_complete_callback: erfolgreich" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:841 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:725 #, c-format msgid "list_files_ftp: looking for size of %s" msgstr "list_files_ftp: Suche nach Größe von %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:845 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:729 msgid "list_files_ftp: globus_ftp_client_size failed" msgstr "list_files_ftp: globus_ftp_client_size fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:851 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:852 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:735 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:736 msgid "list_files_ftp: timeout waiting for size" msgstr "list_files_ftp: Zeitüberschreitung bei Warten auf Dateigröße " #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:858 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:742 msgid "list_files_ftp: failed to get file's size" msgstr "list_files_ftp: Fehler bei Bezug von Dateigröße" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:870 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:755 #, c-format msgid "list_files_ftp: looking for modification time of %s" msgstr "list_files_ftp: Bezug von Zeitpunkt der letzten Dateiänderung von %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:876 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:761 msgid "list_files_ftp: globus_ftp_client_modification_time failed" msgstr "list_files_ftp: globus_ftp_client_modification_time fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:883 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:768 msgid "list_files_ftp: timeout waiting for modification_time" msgstr "" "list_files_ftp: Zeitüberschreitung bei Warten auf Zeitpunkt der letzten " "Dateiänderung " #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:891 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:776 msgid "list_files_ftp: failed to get file's modification time" msgstr "" "list_files_ftp: Fehler bei Bezug von Zeitpunkt der letzten Dateiänderung" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:903 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:790 #, fuzzy, c-format msgid "list_files_ftp: looking for checksum of %s" msgstr "list_files_ftp: Suche nach Größe von %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:942 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:830 #, fuzzy, c-format msgid "Failed to obtain stat from FTP: %s" msgstr "Konnte Listing nicht via FTP beziehen: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:948 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:835 #, fuzzy msgid "No results returned from stat" msgstr "Keine Antwort von %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:954 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:841 #, c-format msgid "Wrong number of objects (%i) for stat from ftp: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:968 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:854 #, c-format msgid "Unexpected path %s returned from server" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1007 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:887 #, fuzzy, c-format msgid "Failed to obtain listing from FTP: %s" msgstr "Konnte Listing nicht via FTP beziehen: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1050 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:930 #, fuzzy msgid "Rename: globus_ftp_client_move failed" msgstr "check_ftp: globus_ftp_client_get fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1056 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:936 #, fuzzy msgid "Rename: timeout waiting for operation to complete" msgstr "check_ftp: Zeitüberschreitung bei Warten auf modification_time" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1103 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:995 msgid "init_handle: globus_ftp_client_handleattr_init failed" msgstr "init_handle: globus_ftp_client_handleattr_init fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1112 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1004 msgid "init_handle: globus_ftp_client_handleattr_set_gridftp2 failed" msgstr "init_handle: globus_ftp_client_handleattr_set_gridftp2 fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1121 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1013 #, fuzzy msgid "init_handle: globus_ftp_client_handle_init failed" msgstr "init_handle: globus_ftp_client_handle_init fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1128 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1020 msgid "init_handle: globus_ftp_client_operationattr_init failed" msgstr "init_handle: globus_ftp_client_operationattr_init fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1136 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1028 #, fuzzy msgid "init_handle: globus_ftp_client_operationattr_set_allow_ipv6 failed" msgstr "init_handle: globus_ftp_client_operationattr_init fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1142 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1034 #, fuzzy msgid "init_handle: globus_ftp_client_operationattr_set_delayed_pasv failed" msgstr "init_handle: globus_ftp_client_operationattr_init fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1190 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1218 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1086 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1115 #, c-format msgid "globus_ftp_client_operationattr_set_authorization: error: %s" msgstr "globus_ftp_client_operationattr_set_authorisation: Fehler: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1217 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1114 #, fuzzy msgid "Failed to set credentials for GridFTP transfer" msgstr "Fehler bei Setzen von Credentials für GridFTP transfer" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1223 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1120 msgid "Using secure data transfer" msgstr "Nutze sicheren Datentransfer" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1228 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1125 msgid "Using insecure data transfer" msgstr "Nutze unsicheren Datentransfer" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1255 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1152 #, fuzzy msgid "~DataPoint: destroy ftp_handle" msgstr "DataPoint::deinit_handle: destroly ftp_handle" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1258 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1155 #, fuzzy msgid "~DataPoint: destroy ftp_handle failed - retrying" msgstr "DataPoint::deinit_handle: destroly ftp_handle" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1276 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1173 #, fuzzy msgid "~DataPoint: failed to destroy ftp_handle - leaking" msgstr "DataPoint::deinit_handle: destroly ftp_handle" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1296 msgid "" "Missing reference to factory and/or module. It is unsafe to use Globus in " "non-persistent mode - (Grid)FTP code is disabled. Report to developers." msgstr "" "Fehlende Referenz zu factory und/doer Module. Es ist unsicher, Globus im " "nicht-persistenten Modus zu nutzen - (Grid)FTP code wurde disabled. Bitte " "die Entwickler informieren." #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:390 #, fuzzy msgid "ftp_read_thread: failed to register buffers" msgstr "ftp_read_thread: beziehe und registriere Puffer" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:405 #, fuzzy msgid "ftp_read_thread: failed to release buffers" msgstr "" "ftp_read_thread: Fehler bei Registrieren von Globus Puffer - verschoben auf " "später: %s" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:443 #, c-format msgid "ftp_read_callback: success - offset=%u, length=%u, eof=%u, allow oof=%u" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:457 #, fuzzy, c-format msgid "ftp_read_callback: delayed data chunk: %llu %llu" msgstr "ftp_read_callback: Fehler" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:464 #, c-format msgid "ftp_read_callback: unexpected data out of order: %llu != %llu" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:471 msgid "ftp_read_callback: too many unexpected out of order chunks" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:492 #, fuzzy, c-format msgid "ftp_read_callback: Globus error: %s" msgstr "ftp_complete_callback: Fehler: %s" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:517 #, fuzzy msgid "ftp_get_complete_callback: Failed to get ftp file" msgstr "ftp_complete_callback: Fehler: %s" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:522 #, fuzzy msgid "ftp_get_complete_callback: success" msgstr "ftp_complete_callback: erfolgreich" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:577 #, fuzzy msgid "start_writing_ftp: waiting for data tag" msgstr "start_writing_ftp: put fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:580 #, fuzzy msgid "start_writing_ftp: failed to read data tag" msgstr "start_writing_ftp: put fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:585 #, fuzzy msgid "start_writing_ftp: waiting for data chunk" msgstr "start_writing_ftp: put fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:587 #, fuzzy msgid "start_writing_ftp: failed to read data chunk" msgstr "start_writing_ftp: put fehlgeschlagen" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:598 #, c-format msgid "ftp_write_thread: data out of order in stream mode: %llu != %llu" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:605 #, fuzzy msgid "ftp_write_thread: too many out of order chunks in stream mode" msgstr "ftp_write_thread: Beziehe und Registriere Puffer" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:610 #, fuzzy, c-format msgid "start_writing_ftp: data chunk: %llu %llu" msgstr "start_writing_ftp: put" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:616 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:642 #, fuzzy, c-format msgid "ftp_write_thread: Globus error: %s" msgstr "ftp_read_thread: Globus Fehler: %s" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:635 #, fuzzy, c-format msgid "start_writing_ftp: delayed data chunk: %llu %llu" msgstr "start_reading_ftp: erzielte Größe: %llu" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:654 #, fuzzy msgid "start_writing_ftp: waiting for some buffers sent" msgstr "start_reading_ftp: Zeitüberschreitung bei Warten auf Dateigröße" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:660 #, fuzzy msgid "ftp_write_thread: waiting for transfer complete" msgstr "ftp_read_thread: warte auf EOF" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:811 #, fuzzy msgid "list_files_ftp: no checksum information supported" msgstr "list_files_ftp: Bezug von Zeitpunkt der letzten Dateiänderung von %s" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:813 #, fuzzy msgid "list_files_ftp: no checksum information returned" msgstr "list_files_ftp: Bezug von Zeitpunkt der letzten Dateiänderung von %s" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:908 msgid "Too many failures to obtain checksum - giving up" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1268 msgid "Expecting Command and URL provided" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1275 #: src/hed/libs/data/DataExternalHelper.cpp:376 #, fuzzy msgid "Expecting Command among arguments" msgstr "Kann doc Argument nicht anlegen" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1279 #: src/hed/libs/data/DataExternalHelper.cpp:380 #, fuzzy msgid "Expecting URL among arguments" msgstr "Kann doc Argument nicht anlegen" #: src/hed/dmc/gridftp/Lister.cpp:221 src/hed/dmc/gridftp/Lister.cpp:289 #: src/hed/dmc/gridftp/Lister.cpp:384 src/hed/dmc/gridftp/Lister.cpp:767 #: src/hed/dmc/gridftp/Lister.cpp:812 #, c-format msgid "Failure: %s" msgstr "Fehler: %s" #: src/hed/dmc/gridftp/Lister.cpp:288 #, fuzzy msgid "Error getting list of files (in list)" msgstr "Fehler bei Bezug von Dateiliste (in Liste)" #: src/hed/dmc/gridftp/Lister.cpp:290 #, fuzzy msgid "Assuming - file not found" msgstr "Vermuting - Datei nicht gefunden" #: src/hed/dmc/gridftp/Lister.cpp:307 #, fuzzy, c-format msgid "list record: %s" msgstr "Listen-Eintrag: %s" #: src/hed/dmc/gridftp/Lister.cpp:362 msgid "Failed reading list of files" msgstr "Fehler bei Lesen von Dateiliste" #: src/hed/dmc/gridftp/Lister.cpp:398 msgid "Failed reading data" msgstr "Fehler bei Lesen von Daten" #: src/hed/dmc/gridftp/Lister.cpp:426 #, c-format msgid "Command: %s" msgstr "Kommando: %s" #: src/hed/dmc/gridftp/Lister.cpp:430 src/hed/dmc/gridftp/Lister.cpp:471 #: src/hed/mcc/http/PayloadHTTP.cpp:1010 msgid "Memory allocation error" msgstr "Speicherallokationsfehler" #: src/hed/dmc/gridftp/Lister.cpp:438 #, c-format msgid "%s failed" msgstr "%s fehlgeschlagen" #: src/hed/dmc/gridftp/Lister.cpp:442 #, fuzzy msgid "Command is being sent" msgstr "Kommando wird gesendet" #: src/hed/dmc/gridftp/Lister.cpp:447 msgid "Waiting for response" msgstr "Warte vor Antwort" #: src/hed/dmc/gridftp/Lister.cpp:452 #, fuzzy msgid "Callback got failure" msgstr "Callback erhielt Fehler" #: src/hed/dmc/gridftp/Lister.cpp:538 #, fuzzy msgid "Failed in globus_cond_init" msgstr "Fehler bei Initialisierung der condition" #: src/hed/dmc/gridftp/Lister.cpp:542 #, fuzzy msgid "Failed in globus_mutex_init" msgstr "Fehler bei Initialisierung des Mutex" #: src/hed/dmc/gridftp/Lister.cpp:549 #, fuzzy msgid "Failed allocating memory for handle" msgstr "Fehler bei Reservieren des Speichers für handle" #: src/hed/dmc/gridftp/Lister.cpp:554 #, fuzzy msgid "Failed in globus_ftp_control_handle_init" msgstr "Memory leak (globus_ftp_control_handle_t)" #: src/hed/dmc/gridftp/Lister.cpp:562 #, fuzzy msgid "Failed to enable IPv6" msgstr "Fehler bei Lesen von Objekt %s" #: src/hed/dmc/gridftp/Lister.cpp:573 msgid "Closing connection" msgstr "Schließe Verbindung" #: src/hed/dmc/gridftp/Lister.cpp:580 src/hed/dmc/gridftp/Lister.cpp:595 msgid "Timeout waiting for Globus callback - leaking connection" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:605 msgid "Closed successfully" msgstr "Verbindung erfolgreich geschlossen" #: src/hed/dmc/gridftp/Lister.cpp:607 #, fuzzy msgid "Closing may have failed" msgstr "Verbindung zu %s schlug fehl: %s" #: src/hed/dmc/gridftp/Lister.cpp:634 msgid "Waiting for globus handle to settle" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:639 #, c-format msgid "Handle is not in proper state %u/%u" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:645 msgid "Globus handle is stuck" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:661 #, c-format msgid "Failed destroying handle: %s. Can't handle such situation." msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:684 #, fuzzy, c-format msgid "EPSV failed: %s" msgstr "PASV fehlgeschlagen: %s" #: src/hed/dmc/gridftp/Lister.cpp:688 #, fuzzy msgid "EPSV failed" msgstr "PASV fehlgeschlagen" #: src/hed/dmc/gridftp/Lister.cpp:695 #, c-format msgid "PASV failed: %s" msgstr "PASV fehlgeschlagen: %s" #: src/hed/dmc/gridftp/Lister.cpp:699 msgid "PASV failed" msgstr "PASV fehlgeschlagen" #: src/hed/dmc/gridftp/Lister.cpp:765 #, fuzzy msgid "Failed to apply local address to data connection" msgstr "Fehler bei Schließen von Verbindung 1" #: src/hed/dmc/gridftp/Lister.cpp:783 #, fuzzy msgid "Can't parse host and/or port in response to EPSV/PASV" msgstr "Kann host and port nicht aus Antwort zu PASV herauslesen" #: src/hed/dmc/gridftp/Lister.cpp:788 #, fuzzy, c-format msgid "Data channel: %d.%d.%d.%d:%d" msgstr "Datenkanal: %d.%d.%d.%d %d" #: src/hed/dmc/gridftp/Lister.cpp:806 #, fuzzy, c-format msgid "Data channel: [%s]:%d" msgstr "Datenkanal: %d.%d.%d.%d %d" #: src/hed/dmc/gridftp/Lister.cpp:810 #, fuzzy msgid "Obtained host and address are not acceptable" msgstr "Erhaltener host und Adresse sind nicht akzeptabel" #: src/hed/dmc/gridftp/Lister.cpp:820 msgid "Failed to open data channel" msgstr "Fehler bei Öffnen von Datenkanal" #: src/hed/dmc/gridftp/Lister.cpp:838 #, c-format msgid "Unsupported protocol in url %s" msgstr "Nicht-unterstzütztes Protrokoll in URL %s" #: src/hed/dmc/gridftp/Lister.cpp:850 msgid "Reusing connection" msgstr "Wiederholte Nutzung von Verbindung" #: src/hed/dmc/gridftp/Lister.cpp:874 #, c-format msgid "Failed connecting to server %s:%d" msgstr "Fehler bei Verbinden zu server %s:%d" #: src/hed/dmc/gridftp/Lister.cpp:880 #, c-format msgid "Failed to connect to server %s:%d" msgstr "Fehler bei Verbinden zu server %s:%d" #: src/hed/dmc/gridftp/Lister.cpp:896 #, fuzzy msgid "Missing authentication information" msgstr "Ungültige Authentisierungs-Information" #: src/hed/dmc/gridftp/Lister.cpp:905 src/hed/dmc/gridftp/Lister.cpp:919 #, fuzzy, c-format msgid "Bad authentication information: %s" msgstr "Ungültige Authentisierungs-Information" #: src/hed/dmc/gridftp/Lister.cpp:928 src/hed/dmc/gridftp/Lister.cpp:943 #, fuzzy, c-format msgid "Failed authenticating: %s" msgstr "Fehler bei Authentisieren" #: src/hed/dmc/gridftp/Lister.cpp:935 msgid "Failed authenticating" msgstr "Fehler bei Authentisieren" #: src/hed/dmc/gridftp/Lister.cpp:970 src/hed/dmc/gridftp/Lister.cpp:1126 #, c-format msgid "DCAU failed: %s" msgstr "DCAU fehlgeschlagen: %s" #: src/hed/dmc/gridftp/Lister.cpp:974 src/hed/dmc/gridftp/Lister.cpp:1131 msgid "DCAU failed" msgstr "DCAU fehlgeschlagen" #: src/hed/dmc/gridftp/Lister.cpp:994 #, fuzzy msgid "MLST is not supported - trying LIST" msgstr "MLSD ist nicht unterstützt - versuche NLST" #: src/hed/dmc/gridftp/Lister.cpp:1010 #, fuzzy, c-format msgid "Immediate completion expected: %s" msgstr "Sofortige Vervollständigung: %s" #: src/hed/dmc/gridftp/Lister.cpp:1014 #, fuzzy msgid "Immediate completion expected" msgstr "Sofortige Vervollständigung: %s" #: src/hed/dmc/gridftp/Lister.cpp:1027 #, fuzzy, c-format msgid "Missing information in reply: %s" msgstr "Fehler bei Bezug von Information für Job: %s" #: src/hed/dmc/gridftp/Lister.cpp:1061 #, c-format msgid "Missing final reply: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1085 #, fuzzy, c-format msgid "Unexpected immediate completion: %s" msgstr "Sofortige Vervollständigung: %s" #: src/hed/dmc/gridftp/Lister.cpp:1097 #, fuzzy, c-format msgid "LIST/MLST failed: %s" msgstr "NLST/UMLSD fehlgeschlagen: %s" #: src/hed/dmc/gridftp/Lister.cpp:1102 #, fuzzy msgid "LIST/MLST failed" msgstr "NLST/UMLSD fehlgeschlagen" #: src/hed/dmc/gridftp/Lister.cpp:1152 msgid "MLSD is not supported - trying NLST" msgstr "MLSD ist nicht unterstützt - versuche NLST" #: src/hed/dmc/gridftp/Lister.cpp:1166 #, fuzzy, c-format msgid "Immediate completion: %s" msgstr "Sofortige Vervollständigung: %s" #: src/hed/dmc/gridftp/Lister.cpp:1174 #, c-format msgid "NLST/MLSD failed: %s" msgstr "NLST/UMLSD fehlgeschlagen: %s" #: src/hed/dmc/gridftp/Lister.cpp:1180 msgid "NLST/MLSD failed" msgstr "NLST/UMLSD fehlgeschlagen" #: src/hed/dmc/gridftp/Lister.cpp:1201 #, c-format msgid "Data transfer aborted: %s" msgstr "Datentransfer abgebrochen: %s" #: src/hed/dmc/gridftp/Lister.cpp:1206 msgid "Data transfer aborted" msgstr "Datentransfer abgebrochen" #: src/hed/dmc/gridftp/Lister.cpp:1218 msgid "Failed to transfer data" msgstr "Fehler bei Transfer von Daten" #: src/hed/dmc/http/DataPointHTTP.cpp:409 #: src/hed/dmc/http/DataPointHTTP.cpp:597 #: src/hed/dmc/http/DataPointHTTP.cpp:691 #: src/hed/dmc/http/DataPointHTTP.cpp:1137 #: src/hed/dmc/http/DataPointHTTP.cpp:1282 #: src/hed/dmc/http/DataPointHTTP.cpp:1431 #, fuzzy, c-format msgid "Redirecting to %s" msgstr "Weiterleitung zu neuer URL: %s" #: src/hed/dmc/http/DataPointHTTP.cpp:461 #, fuzzy, c-format msgid "PROPFIND response: %s" msgstr "Keine SOAP Antwort" #: src/hed/dmc/http/DataPointHTTP.cpp:515 #, fuzzy, c-format msgid "Using checksum %s" msgstr "Errechneted checksum: %s" #: src/hed/dmc/http/DataPointHTTP.cpp:523 #, c-format msgid "No matching checksum type, using first in list %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:616 #: src/hed/dmc/http/DataPointHTTP.cpp:710 #, fuzzy msgid "No information returned by PROPFIND" msgstr "A-REX lieferte keinen Job Identifikator zurück" #: src/hed/dmc/http/DataPointHTTP.cpp:767 #, fuzzy, c-format msgid "Stat: obtained size %llu" msgstr "StartReading: erhielt Größe: %" #: src/hed/dmc/http/DataPointHTTP.cpp:771 #, fuzzy, c-format msgid "Stat: obtained modification time %s" msgstr "Check: erhielt Erstelldatum: %s" #: src/hed/dmc/http/DataPointHTTP.cpp:775 #, fuzzy, c-format msgid "Stat: obtained checksum %s" msgstr "Check: erhielt checksum: %s" #: src/hed/dmc/http/DataPointHTTP.cpp:991 #, fuzzy, c-format msgid "Could not find checksum: %s" msgstr "Check: erhielt checksum: %s" #: src/hed/dmc/http/DataPointHTTP.cpp:993 #, c-format msgid "Checksum of %s is not available" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:1037 #, fuzzy, c-format msgid "Check: obtained size %llu" msgstr "Check: erhielt Größe: %lli" #: src/hed/dmc/http/DataPointHTTP.cpp:1039 #, fuzzy, c-format msgid "Check: obtained modification time %s" msgstr "Check: erhielt Erstelldatum: %s" #: src/hed/dmc/http/DataPointHTTP.cpp:1154 #: src/hed/dmc/http/DataPointHTTP.cpp:1302 #, fuzzy, c-format msgid "HTTP failure %u - %s" msgstr "Proxy Pfad: %s" #: src/hed/dmc/http/DataPointHTTP.cpp:1459 #, fuzzy, c-format msgid "Failed to create %s, trying to create parent directories" msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #: src/hed/dmc/http/DataPointHTTP.cpp:1648 #, fuzzy, c-format msgid "Error creating directory: %s" msgstr "Fehler bei Anlegen von Verzeichnis %s: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:27 #, c-format msgid "Replacing existing token for %s in Rucio token cache" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:40 #, c-format msgid "Found existing token for %s in Rucio token cache with expiry time %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:43 #, c-format msgid "Rucio token for %s has expired or is about to expire" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:105 #, c-format msgid "Extracted nickname %s from credentials to use for RUCIO_ACCOUNT" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:108 msgid "Failed to extract VOMS nickname from proxy" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:110 #, fuzzy, c-format msgid "Using Rucio account %s" msgstr "Nutze space token %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:123 #, fuzzy, c-format msgid "Strange path in Rucio URL: %s" msgstr "Lege Verzeichnis %s an" #: src/hed/dmc/rucio/DataPointRucio.cpp:133 src/hed/libs/common/FileLock.cpp:42 #, fuzzy msgid "Cannot determine hostname from gethostname()" msgstr "Kann hostname von uname nciht ermitteln" #: src/hed/dmc/rucio/DataPointRucio.cpp:171 #, c-format msgid "Bad path for %s: Format should be /replicas//" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:190 #, fuzzy, c-format msgid "Failed to query parent DIDs: %s" msgstr "Fehler beim Authentifizieren: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:195 #, fuzzy, c-format msgid "Failed to parse Rucio info: %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:242 #: src/hed/dmc/rucio/DataPointRucio.cpp:522 #, fuzzy, c-format msgid "No locations found for %s" msgstr "Keine locations gefunden für %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:333 #, c-format msgid "Acquired auth token for %s: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:383 #, fuzzy, c-format msgid "Rucio returned %s" msgstr "unbekannter return code %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:452 #: src/hed/dmc/rucio/DataPointRucio.cpp:543 #, fuzzy, c-format msgid "Failed to parse Rucio response: %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:457 #: src/hed/dmc/rucio/DataPointRucio.cpp:548 #, c-format msgid "Filename not returned in Rucio response: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:462 #, fuzzy, c-format msgid "Unexpected name returned in Rucio response: %s" msgstr "Sofortige Vervollständigung: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:467 #, fuzzy, c-format msgid "No pfns returned in Rucio response: %s" msgstr "Sofortige Vervollständigung: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:477 #, fuzzy, c-format msgid "Cannot determine replica type for %s" msgstr "Kann Funktion %s nicht anlegen" #: src/hed/dmc/rucio/DataPointRucio.cpp:479 #, c-format msgid "%s: replica type %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:482 #, fuzzy, c-format msgid "Skipping %s replica %s" msgstr "Suche nache Existenz von %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:498 #, fuzzy, c-format msgid "Error extracting RSE for %s" msgstr "Fehler bei Anlegen von LFC Eintrag: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:508 #, c-format msgid "No filesize information returned in Rucio response for %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:511 #, c-format msgid "%s: size %llu" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:515 #, c-format msgid "No checksum information returned in Rucio response for %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:518 #, fuzzy, c-format msgid "%s: checksum %s" msgstr "Errechneted checksum: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:553 #, fuzzy, c-format msgid "Parent dataset: %s" msgstr "Identität: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:575 #, fuzzy, c-format msgid "Could not find matching RSE to %s" msgstr "konnte Start von clientxrsl nicht finden" #: src/hed/dmc/rucio/DataPointRucio.cpp:617 #, fuzzy, c-format msgid "Sending Rucio trace: %s" msgstr "Nutze space token %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:620 #, fuzzy, c-format msgid "Failed to send traces to Rucio: %s" msgstr "Kann Kanal stdout nicht nutzen" #: src/hed/dmc/s3/DataPointS3.cpp:269 #, fuzzy, c-format msgid "Initializing S3 connection to %s" msgstr "LDAPQuery: Initialisiere Verbindung zu %s:%d" #: src/hed/dmc/s3/DataPointS3.cpp:274 #, fuzzy, c-format msgid "Failed to initialize S3 to %s: %s" msgstr "Fehler bei Schreiben zu Datein %s: %s" #: src/hed/dmc/s3/DataPointS3.cpp:470 src/hed/dmc/s3/DataPointS3.cpp:592 #, fuzzy, c-format msgid "Failed to read object %s: %s; %s" msgstr "Fehler bei Lesen von Objekt %s: %s" #: src/hed/dmc/s3/DataPointS3.cpp:669 #, fuzzy, c-format msgid "Failed to write object %s: %s; %s" msgstr "Fehler bei Lesen von Objekt %s: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:56 #, fuzzy, c-format msgid "TURL %s cannot be handled" msgstr "PDP: %s kann nicht geladen werden" #: src/hed/dmc/srm/DataPointSRM.cpp:83 #, c-format msgid "Check: looking for metadata: %s" msgstr "Check: looking für Metadata: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:94 #, c-format msgid "Check: obtained size: %lli" msgstr "Check: erhielt Größe: %lli" #: src/hed/dmc/srm/DataPointSRM.cpp:100 #, c-format msgid "Check: obtained checksum: %s" msgstr "Check: erhielt checksum: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:104 #, fuzzy, c-format msgid "Check: obtained modification date: %s" msgstr "Check: erhielt Erstelldatum: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:108 #, fuzzy msgid "Check: obtained access latency: low (ONLINE)" msgstr "Check: erhielt Erstelldatum: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:112 #, fuzzy msgid "Check: obtained access latency: high (NEARLINE)" msgstr "Check: erhielt Erstelldatum: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:131 #, fuzzy, c-format msgid "Remove: deleting: %s" msgstr "remove_srm: lösche: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:149 #, fuzzy, c-format msgid "Creating directory: %s" msgstr "Lege Verzeichnis %s an" #: src/hed/dmc/srm/DataPointSRM.cpp:197 src/hed/dmc/srm/DataPointSRM.cpp:246 msgid "Calling PrepareReading when request was already prepared!" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:217 #, c-format msgid "File %s is NEARLINE, will make request to bring online" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:226 #, c-format msgid "Bring online request %s is still in queue, should wait" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:231 #, c-format msgid "Bring online request %s finished successfully, file is now ONLINE" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:237 #, c-format msgid "" "Bad logic for %s - bringOnline returned ok but SRM request is not finished " "successfully or on going" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:265 src/hed/dmc/srm/DataPointSRM.cpp:408 msgid "None of the requested transfer protocols are supported" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:278 #, fuzzy, c-format msgid "Get request %s is still in queue, should wait %i seconds" msgstr "%s: Datei Anfrage %s in SRM queue. Schlage für %i Sekunden" #: src/hed/dmc/srm/DataPointSRM.cpp:286 src/hed/dmc/srm/DataPointSRM.cpp:465 #, c-format msgid "Checking URL returned by SRM: %s" msgstr "Überprüfen der URL zurückgegeben von SRM: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:301 src/hed/dmc/srm/DataPointSRM.cpp:480 #, c-format msgid "SRM returned no useful Transfer URLs: %s" msgstr "SRM gab keine nützliche Transfer URLs: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:308 #, c-format msgid "" "Bad logic for %s - getTURLs returned ok but SRM request is not finished " "successfully or on going" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:316 msgid "StartReading" msgstr "StartReading" #: src/hed/dmc/srm/DataPointSRM.cpp:318 #, fuzzy msgid "StartReading: File was not prepared properly" msgstr "AREXClient wurde nicht richtig angelegt." #: src/hed/dmc/srm/DataPointSRM.cpp:328 src/hed/dmc/srm/DataPointSRM.cpp:507 #, fuzzy, c-format msgid "Redirecting to new URL: %s" msgstr "Weiterleitung zu neuer URL: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:389 msgid "Calling PrepareWriting when request was already prepared!" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:418 #, fuzzy msgid "No space token specified" msgstr "Kein space token angegeben" #: src/hed/dmc/srm/DataPointSRM.cpp:424 msgid "Warning: Using SRM protocol v1 which does not support space tokens" msgstr "Warnung: Nutze SRM Protokol v1 das keine space tokens unterstützt" #: src/hed/dmc/srm/DataPointSRM.cpp:427 #, fuzzy, c-format msgid "Using space token description %s" msgstr "Nutze space token Beschreibugn %s" #: src/hed/dmc/srm/DataPointSRM.cpp:433 #, fuzzy, c-format msgid "Error looking up space tokens matching description %s" msgstr "" "Warnung: Fehler beim Nachschlagen von space tokens, entsprechend der " "Beschreibung %s. Kopiere ohne Nutzung der Token" #: src/hed/dmc/srm/DataPointSRM.cpp:437 #, fuzzy, c-format msgid "No space tokens found matching description %s" msgstr "Nutze space token Beschreibugn %s" #: src/hed/dmc/srm/DataPointSRM.cpp:442 #, c-format msgid "Using space token %s" msgstr "Nutze space token %s" #: src/hed/dmc/srm/DataPointSRM.cpp:457 #, fuzzy, c-format msgid "Put request %s is still in queue, should wait %i seconds" msgstr "%s: Datei Anfrage %s in SRM queue. Schlage für %i Sekunden" #: src/hed/dmc/srm/DataPointSRM.cpp:487 #, c-format msgid "" "Bad logic for %s - putTURLs returned ok but SRM request is not finished " "successfully or on going" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:495 msgid "StartWriting" msgstr "StartWriting" #: src/hed/dmc/srm/DataPointSRM.cpp:497 #, fuzzy msgid "StartWriting: File was not prepared properly" msgstr "AREXClient wurde nicht richtig angelegt." #: src/hed/dmc/srm/DataPointSRM.cpp:556 #, fuzzy, c-format msgid "FinishWriting: looking for metadata: %s" msgstr "ListFiles: suche nach Metadata: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:571 #, fuzzy, c-format msgid "FinishWriting: obtained checksum: %s" msgstr "StartReading: erhielt checksum: %s:%s" #: src/hed/dmc/srm/DataPointSRM.cpp:574 #, c-format msgid "" "Calculated/supplied transfer checksum %s matches checksum reported by SRM " "destination %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:577 #, c-format msgid "" "Checksum mismatch between calculated/supplied checksum (%s) and checksum " "reported by SRM destination (%s)" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:580 #, c-format msgid "" "Checksum type of SRM (%s) and calculated/supplied checksum (%s) differ, " "cannot compare" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:581 src/hed/dmc/srm/DataPointSRM.cpp:582 msgid "No checksum information from server" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:583 src/hed/dmc/srm/DataPointSRM.cpp:584 msgid "No checksum verification possible" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:590 #, fuzzy msgid "Failed to release completed request" msgstr "Fehler bei Lesen von Objekt %s" #: src/hed/dmc/srm/DataPointSRM.cpp:633 src/hed/dmc/srm/DataPointSRM.cpp:700 #, fuzzy, c-format msgid "ListFiles: looking for metadata: %s" msgstr "ListFiles: suche nach Metadata: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:818 #, c-format msgid "plugin for transport protocol %s is not installed" msgstr "" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:51 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:90 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:142 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:181 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:221 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:259 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:303 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:365 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:438 msgid "SRM did not return any information" msgstr "SRM lieferte keine Information zurück" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:316 #, fuzzy, c-format msgid "File could not be moved to Running state: %s" msgstr "Datei konnte nicht zu Running Zustand bewegt werden: %s" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:372 msgid "SRM did not return any useful information" msgstr "SRM lieferte keinerlei gebrauchbare Information" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:450 #, fuzzy msgid "File could not be moved to Done state" msgstr "Datei konnte nicht zu Done Zustand bewegt werden" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:88 msgid "Could not determine version of server" msgstr "Konnte Version des Server nicht bestimmen" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:94 #, c-format msgid "Server SRM version: %s" msgstr "Server SRM version: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:99 #, c-format msgid "Server implementation: %s" msgstr "Server Implementation: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:136 #, fuzzy, c-format msgid "Adding space token %s" msgstr "Füge space token %s hinzu" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:163 #, fuzzy msgid "No request tokens found" msgstr "Keine Anfrage-Token gefunden" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:176 #, fuzzy, c-format msgid "Adding request token %s" msgstr "Füge Anfrage-Token %s hinzu" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:237 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:642 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:828 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1385 #, c-format msgid "%s: File request %s in SRM queue. Sleeping for %i seconds" msgstr "%s: Datei Anfrage %s in SRM queue. Schlage für %i Sekunden" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:275 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:327 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:698 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:764 #, fuzzy, c-format msgid "File is ready! TURL is %s" msgstr "Datei ist bereit! Die URL ist %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:359 #, fuzzy, c-format msgid "Setting userRequestDescription to %s" msgstr "Setzer userRequestDescription zu %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:414 #, fuzzy, c-format msgid "%s: Bring online request %s in SRM queue. Sleeping for %i seconds" msgstr "%s: Datei Anfrage %s in SRM queue. Schlage für %i Sekunden" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:457 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1160 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1194 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1228 #, fuzzy msgid "No request token specified!" msgstr "Keine Anfrage-Token spezifiziert!" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:524 msgid "Request is reported as ABORTED, but all files are done" msgstr "" "Anfrage wurde berichtet als ABORTED (abgebrochen), aber alle Dateien wurden " "bearbeitet" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:530 msgid "Request is reported as ABORTED, since it was cancelled" msgstr "" "Anfrage wurde berichtet als ABORTED (abgebrochen), denn sie wurde abgebrochen" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:536 #, c-format msgid "Request is reported as ABORTED. Reason: %s" msgstr "Anfrage wurde berichtet als ABORTED (abgebrochen). Grund: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:673 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:745 #, c-format msgid "Path %s is invalid, creating required directories" msgstr "Pfad %s ist ungültig, lege benötigte Verzeichnisse an" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:678 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:750 #, fuzzy, c-format msgid "Error creating required directories for %s" msgstr "Fehler bei Anlegen von benötigten Verzeichnissen für %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:851 msgid "Too many files in one request - please try again with fewer files" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:899 #, fuzzy msgid "" "Directory size is too large to list in one call, will have to call multiple " "times" msgstr "" "Verzeichnis enthält mehr als %i Dateien, werde Aufruf mehrfach ausführen" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:936 msgid "" "Failure in parsing response from server - some information may be inaccurate" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:942 #: src/hed/shc/legacy/auth_otokens.cpp:437 #, c-format msgid "%s: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:975 #, c-format msgid "" "Directory size is larger than %i files, will have to call multiple times" msgstr "" "Verzeichnis enthält mehr als %i Dateien, werde Aufruf mehrfach ausführen" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1185 #, c-format msgid "Files associated with request token %s released successfully" msgstr "Dateien assoziiert mit Anfrage Token %s erfolgreich freigegeben" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1219 #, fuzzy, c-format msgid "Files associated with request token %s put done successfully" msgstr "Dateien assoziiert mit Anfrage Token %s erfolgreich put done" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1254 #, c-format msgid "Files associated with request token %s aborted successfully" msgstr "Dateien assoziiert mit Anfrage Token %s erfolgreich abgebrochen" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1271 #, fuzzy, c-format msgid "" "Failed to find metadata info on %s for determining file or directory delete" msgstr "Konnte Metadaen für Datei %s nicht finden" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1277 msgid "Type is file, calling srmRm" msgstr "Typ ist Datei, rufe srmRm auf" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1281 #, fuzzy msgid "Type is dir, calling srmRmDir" msgstr "Typ ist Datei, rufe srmRmDir auf" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1285 msgid "File type is not available, attempting file delete" msgstr "Dateitype ist nicht verfügbar, versuche Datei zu löschen" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1288 msgid "File delete failed, attempting directory delete" msgstr "Löschen von Datei schlug fehl, versuche als Verzeichnis zu löschen" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1313 #, fuzzy, c-format msgid "File %s removed successfully" msgstr "Datei %s erfolgreich entfernt" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1340 #, fuzzy, c-format msgid "Directory %s removed successfully" msgstr "Verzeichnis %s erfolgreich entfernt" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1455 #, fuzzy, c-format msgid "Checking for existence of %s" msgstr "Suche nache Existenz von %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1458 #, fuzzy, c-format msgid "File already exists: %s" msgstr "LFN existiert bereits in LFC" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1495 #, fuzzy, c-format msgid "Error creating directory %s: %s" msgstr "Fehler bei Anlegen von Verzeichnis %s: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:82 #, c-format msgid "Attempting to contact %s on port %i" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:88 #, fuzzy, c-format msgid "Storing port %i for %s" msgstr "Nicht-unterstzütztes Protrokoll in URL %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:102 #, fuzzy, c-format msgid "No port succeeded for %s" msgstr "Keine locations gefunden für %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:112 #, c-format msgid "URL %s disagrees with stored SRM info, testing new info" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:118 #, c-format msgid "Replacing old SRM info with new for URL %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:140 #, fuzzy, c-format msgid "SOAP request: %s" msgstr "XACML Anfrage: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:147 #: src/hed/dmc/srm/srmclient/SRMClient.cpp:176 #, fuzzy, c-format msgid "SOAP fault: %s" msgstr "Voreinstellung: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:148 #, fuzzy msgid "Reconnecting" msgstr "Wiederholte Nutzung von Verbindung" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:158 #, fuzzy, c-format msgid "SRM Client status: %s" msgstr "*** Client Anfrage: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:164 msgid "No SOAP response" msgstr "Keine SOAP Antwort" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:171 #, fuzzy, c-format msgid "SOAP response: %s" msgstr "Keine SOAP Antwort" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:75 #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:161 #, fuzzy, c-format msgid "Failed to acquire lock on file %s" msgstr "Fehler bei Unlock von Datei %s: %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:80 #, fuzzy, c-format msgid "Error reading info from file %s:%s" msgstr "Fehler bei Lesen von Meta-Datei %s: %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:94 #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:186 #, fuzzy, c-format msgid "Bad or old format detected in file %s, in line %s" msgstr "Formatierungsfehler erkannt in Datei %s, in Zeile %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:99 #, fuzzy, c-format msgid "Cannot convert string %s to int in line %s" msgstr "Formatierungsfehler erkannt in Datei %s, in Zeile %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:202 #, fuzzy, c-format msgid "Error writing srm info file %s" msgstr "Fehler bei Lesen von Meta-Datei %s: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:81 #, fuzzy msgid "" "Missing reference to factory and/or module. It is unsafe to use Xrootd in " "non-persistent mode - Xrootd code is disabled. Report to developers." msgstr "" "Fehlende Referenz zu factory und/doer Module. Es ist unsicher, Globus im " "nicht-persistenten Modus zu nutzen - (Grid)FTP code wurde disabled. Bitte " "die Entwickler informieren." #: src/hed/dmc/xrootd/DataPointXrootd.cpp:120 #, c-format msgid "Could not handle checksum %s: skip checksum check" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:126 #, fuzzy, c-format msgid "Failed to create xrootd copy job: %s" msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:143 #, fuzzy, c-format msgid "Failed to copy %s: %s" msgstr "Fehler bei Öffnen von Datei %s zum Lesen: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:194 #, c-format msgid "Reading %u bytes from byte %llu" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:196 #, c-format msgid "Read %i bytes" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:227 #, fuzzy, c-format msgid "Could not open file %s for reading: %s" msgstr "Fehler bei Öffnen von Datei %s zum Lesen: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:242 #, fuzzy, c-format msgid "Unable to find file size of %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:306 #, c-format msgid "DataPointXrootd::write_file got position %d and offset %d, has to seek" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:329 #, fuzzy, c-format msgid "xrootd write failed: %s" msgstr "SendData: Schreiben von Daten schlug fehl: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:338 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:412 #, fuzzy, c-format msgid "xrootd close failed: %s" msgstr "Verbindung zu %s schlug fehl: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:361 #, c-format msgid "Failed to open %s, trying to create parent directories" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:374 #, fuzzy, c-format msgid "xrootd open failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:388 #, fuzzy, c-format msgid "close failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:430 #, c-format msgid "Read access not allowed for %s: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:449 #, fuzzy, c-format msgid "Could not stat file %s: %s" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:454 msgid "Not getting checksum of zip constituent" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:458 #, fuzzy, c-format msgid "Could not get checksum of %s: %s" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:462 #, fuzzy, c-format msgid "Checksum %s" msgstr "Errechneted checksum: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:500 #, fuzzy, c-format msgid "Failed to open directory %s: %s" msgstr "Fehler beim Entfernen von LFC Verzeichnis: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:518 #, fuzzy, c-format msgid "Error while reading dir %s: %s" msgstr "Fehler beim Listen von Verzeichnis: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:568 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:586 #, fuzzy, c-format msgid "Error creating required dirs: %s" msgstr "Fehler bei Anlegen benötigter Verzeichnisse: %s" #: src/hed/identitymap/IdentityMap.cpp:196 #, c-format msgid "PDP: %s can not be loaded" msgstr "PDP: %s kann nicht geladen werden" #: src/hed/identitymap/IdentityMap.cpp:219 src/hed/shc/legacy/LegacyMap.cpp:221 #, c-format msgid "Grid identity is mapped to local identity '%s'" msgstr "Grid Identität wird zugewiesen zu lokaler Identität '%s'" #: src/hed/libs/common/ArcLocation.cpp:129 #, c-format msgid "" "Can not determine the install location. Using %s. Please set ARC_LOCATION if " "this is not correct." msgstr "" #: src/hed/libs/common/DateTime.cpp:86 src/hed/libs/common/DateTime.cpp:631 #: src/hed/libs/common/StringConv.h:25 msgid "Empty string" msgstr "" #: src/hed/libs/common/DateTime.cpp:107 #, c-format msgid "Can not parse date: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:130 #, c-format msgid "Can not parse time: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:160 #, c-format msgid "Can not parse time zone offset: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:180 src/hed/libs/common/DateTime.cpp:199 #: src/hed/libs/common/DateTime.cpp:252 src/hed/libs/common/DateTime.cpp:291 #, c-format msgid "Illegal time format: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:230 src/hed/libs/common/DateTime.cpp:283 #, c-format msgid "Can not parse month: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:647 src/hed/libs/common/DateTime.cpp:688 #, c-format msgid "Invalid ISO duration format: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:752 #, c-format msgid "Invalid period string: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:874 msgid "hour" msgid_plural "hours" msgstr[0] "Stunden" msgstr[1] "Stunde" msgstr[2] "Stunden" #: src/hed/libs/common/DateTime.cpp:880 msgid "minute" msgid_plural "minutes" msgstr[0] "Minuten" msgstr[1] "Minute" msgstr[2] "Minuten" #: src/hed/libs/common/DateTime.cpp:886 msgid "second" msgid_plural "seconds" msgstr[0] "Sekunden" msgstr[1] "Sekunde" msgstr[2] "Sekunden" #: src/hed/libs/common/FileLock.cpp:92 #, fuzzy, c-format msgid "EACCES Error opening lock file %s: %s" msgstr "EACCESS Fehler bei Öffnen von Lock-Datei %s: %s" #: src/hed/libs/common/FileLock.cpp:96 #, c-format msgid "Error opening lock file %s in initial check: %s" msgstr "Fehler bei Öffnen von Lock-Datei %s in initialer Überprüfung: %s" #: src/hed/libs/common/FileLock.cpp:103 #, fuzzy, c-format msgid "Error creating temporary file %s: %s" msgstr "Fehler bei Lesen von Meta-Datei %s: %s" #: src/hed/libs/common/FileLock.cpp:113 #, c-format msgid "Could not create link to lock file %s as it already exists" msgstr "" #: src/hed/libs/common/FileLock.cpp:124 #, fuzzy, c-format msgid "Could not create lock file %s as it already exists" msgstr "Konnte temporäre Datei nicht anlegen: %s" #: src/hed/libs/common/FileLock.cpp:128 #, fuzzy, c-format msgid "Error creating lock file %s: %s" msgstr "Fehler bei Lesen von Lock-datei %s. %s" #: src/hed/libs/common/FileLock.cpp:133 #, fuzzy, c-format msgid "Error writing to lock file %s: %s" msgstr "Fehler beim Schreiben zu tmp lock Datei %s: %s" #: src/hed/libs/common/FileLock.cpp:141 #, fuzzy, c-format msgid "Error linking tmp file %s to lock file %s: %s" msgstr "Fehler bei Umbenennen von temporärer Datei %s zu Lock_datei %s: %s" #: src/hed/libs/common/FileLock.cpp:150 #, fuzzy, c-format msgid "Error in lock file %s, even though linking did not return an error" msgstr "" "Fehler bei Umbenennen von Lock-Datei, obwohl rename() keinen Fehler " "zurücklieferte" #: src/hed/libs/common/FileLock.cpp:158 #, fuzzy, c-format msgid "%li seconds since lock file %s was created" msgstr "%li Sekunden seit Lock-Datei engelegt wurde" #: src/hed/libs/common/FileLock.cpp:161 #, fuzzy, c-format msgid "Timeout has expired, will remove lock file %s" msgstr "Zeitüberschreitung, werde lock-Datei entfernen" #: src/hed/libs/common/FileLock.cpp:165 #, fuzzy, c-format msgid "Failed to remove stale lock file %s: %s" msgstr "Fehler bei Erstellen von Info-Datei %s: %s" #: src/hed/libs/common/FileLock.cpp:178 #, fuzzy, c-format msgid "This process already owns the lock on %s" msgstr "Warnung: Diesem Prozess gehört der Lock bereits" #: src/hed/libs/common/FileLock.cpp:182 #, fuzzy, c-format msgid "" "The process owning the lock on %s is no longer running, will remove lock" msgstr "" "Der Prozesse, dem der Lock gehört, läuft nicht mehr. Der Lock wird entfernt." #: src/hed/libs/common/FileLock.cpp:184 #, fuzzy, c-format msgid "Failed to remove file %s: %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/libs/common/FileLock.cpp:193 #, fuzzy, c-format msgid "The file %s is currently locked with a valid lock" msgstr "Die Datei ist derzeit gelockt mit einem gültigen Lock" #: src/hed/libs/common/FileLock.cpp:210 #, fuzzy, c-format msgid "Failed to unlock file with lock %s: %s" msgstr "Fehler bei Unlock von Datei mit Lock %s: %s" #: src/hed/libs/common/FileLock.cpp:222 #, fuzzy, c-format msgid "Lock file %s doesn't exist" msgstr "Lock-Datei %s existiert nicht" #: src/hed/libs/common/FileLock.cpp:224 #, fuzzy, c-format msgid "Error listing lock file %s: %s" msgstr "Fehler bei Listing von Lock-Datei %s: %s" #: src/hed/libs/common/FileLock.cpp:230 #, fuzzy, c-format msgid "Found unexpected empty lock file %s. Must go back to acquire()" msgstr "" "Ein anderer Prozess besitzt das Lock auf Datei %s. Muss zurück zu Start()" #: src/hed/libs/common/FileLock.cpp:236 #, fuzzy, c-format msgid "Error reading lock file %s: %s" msgstr "Fehler bei Lesen von Lock-datei %s. %s" #: src/hed/libs/common/FileLock.cpp:240 #, fuzzy, c-format msgid "Error with formatting in lock file %s" msgstr "Fehler bei Formatieren von Lock-Datei %s: %s" #: src/hed/libs/common/FileLock.cpp:250 #, fuzzy, c-format msgid "Lock %s is owned by a different host (%s)" msgstr "Lock gehört einem anderen host" #: src/hed/libs/common/FileLock.cpp:259 #, fuzzy, c-format msgid "Badly formatted pid %s in lock file %s" msgstr "Formatierungsfehler erkannt in Datei %s, in Zeile %s" #: src/hed/libs/common/FileLock.cpp:262 #, fuzzy, c-format msgid "Another process (%s) owns the lock on file %s" msgstr "" "Ein anderer Prozess besitzt das Lock auf Datei %s. Muss zurück zu Start()" #: src/hed/libs/common/IString.cpp:32 src/hed/libs/common/IString.cpp:41 #: src/hed/libs/common/IString.cpp:42 msgid "(empty)" msgstr "" #: src/hed/libs/common/IString.cpp:32 src/hed/libs/common/IString.cpp:41 #: src/hed/libs/common/IString.cpp:42 msgid "(null)" msgstr "" #: src/hed/libs/common/Logger.cpp:58 #, fuzzy, c-format msgid "Invalid log level. Using default %s." msgstr "Ungültiger Wert für Priority, nutze Voreinstellung von 10" #: src/hed/libs/common/Logger.cpp:123 #, fuzzy, c-format msgid "Invalid old log level. Using default %s." msgstr "Ungültiger Wert für Priority, nutze Voreinstellung von 10" #: src/hed/libs/common/OptionParser.cpp:106 #, c-format msgid "Cannot parse integer value '%s' for -%c" msgstr "" #: src/hed/libs/common/OptionParser.cpp:309 #: src/hed/libs/common/OptionParser.cpp:442 #, fuzzy, c-format msgid "Options Group %s:" msgstr "Funktion : %s" #: src/hed/libs/common/OptionParser.cpp:311 #: src/hed/libs/common/OptionParser.cpp:445 #, fuzzy, c-format msgid "%s:" msgstr "%s" #: src/hed/libs/common/OptionParser.cpp:313 #, c-format msgid "Show %s help options" msgstr "" #: src/hed/libs/common/OptionParser.cpp:348 #, fuzzy msgid "Use -? to get usage description" msgstr "Nutze space token Beschreibugn %s" #: src/hed/libs/common/OptionParser.cpp:425 msgid "Usage:" msgstr "" #: src/hed/libs/common/OptionParser.cpp:428 msgid "OPTION..." msgstr "" #: src/hed/libs/common/OptionParser.cpp:434 msgid "Help Options:" msgstr "" #: src/hed/libs/common/OptionParser.cpp:435 msgid "Show help options" msgstr "" #: src/hed/libs/common/Profile.cpp:199 src/hed/libs/common/Profile.cpp:273 #: src/hed/libs/common/Profile.cpp:404 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"inisections\" " "attribute cannot be the empty string." msgstr "" #: src/hed/libs/common/Profile.cpp:205 src/hed/libs/common/Profile.cpp:279 #: src/hed/libs/common/Profile.cpp:411 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"initag\" attribute " "cannot be the empty string." msgstr "" #: src/hed/libs/common/Profile.cpp:419 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"initype\" " "attribute cannot be the empty string." msgstr "" #: src/hed/libs/common/Profile.cpp:422 #, c-format msgid "" "Element \"%s\" in the profile ignored: the \"inidefaultvalue\" attribute " "cannot be specified when the \"inisections\" and \"initag\" attributes have " "not been specified." msgstr "" #: src/hed/libs/common/Profile.cpp:497 #, c-format msgid "" "In the configuration profile the 'initype' attribute on the \"%s\" element " "has a invalid value \"%s\"." msgstr "" #: src/hed/libs/common/Run_unix.cpp:225 msgid "Child monitoring signal detected" msgstr "" #: src/hed/libs/common/Run_unix.cpp:230 #, fuzzy, c-format msgid "Child monitoring error: %i" msgstr "Cthread_init() Fehler: %s" #: src/hed/libs/common/Run_unix.cpp:243 msgid "Child monitoring kick detected" msgstr "" #: src/hed/libs/common/Run_unix.cpp:246 msgid "Child monitoring internal communication error" msgstr "" #: src/hed/libs/common/Run_unix.cpp:258 msgid "Child monitoring stdout is closed" msgstr "" #: src/hed/libs/common/Run_unix.cpp:268 msgid "Child monitoring stderr is closed" msgstr "" #: src/hed/libs/common/Run_unix.cpp:278 msgid "Child monitoring stdin is closed" msgstr "" #: src/hed/libs/common/Run_unix.cpp:296 #, c-format msgid "Child monitoring child %d exited" msgstr "" #: src/hed/libs/common/Run_unix.cpp:300 #, c-format msgid "Child monitoring lost child %d (%d)" msgstr "" #: src/hed/libs/common/Run_unix.cpp:321 #, c-format msgid "Child monitoring drops abandoned child %d (%d)" msgstr "" #: src/hed/libs/common/Run_unix.cpp:484 msgid "Child was already started" msgstr "" #: src/hed/libs/common/Run_unix.cpp:488 msgid "No arguments are assigned for external process" msgstr "" #: src/hed/libs/common/Run_unix.cpp:621 #, c-format msgid "Excepton while trying to start external process: %s" msgstr "" #: src/hed/libs/common/StringConv.h:31 #, c-format msgid "Conversion failed: %s" msgstr "" #: src/hed/libs/common/StringConv.h:35 #, c-format msgid "Full string not used: %s" msgstr "" #: src/hed/libs/common/Thread.cpp:256 msgid "Maximum number of threads running - putting new request into queue" msgstr "" #: src/hed/libs/common/Thread.cpp:304 #, fuzzy, c-format msgid "Thread exited with Glib error: %s" msgstr "ftp_read_thread: Globus Fehler: %s" #: src/hed/libs/common/Thread.cpp:306 #, c-format msgid "Thread exited with generic exception: %s" msgstr "" #: src/hed/libs/common/URL.cpp:137 #, c-format msgid "URL is not valid: %s" msgstr "" #: src/hed/libs/common/URL.cpp:188 #, c-format msgid "Illegal URL - path must be absolute: %s" msgstr "" #: src/hed/libs/common/URL.cpp:193 #, c-format msgid "Illegal URL - no hostname given: %s" msgstr "" #: src/hed/libs/common/URL.cpp:282 #, c-format msgid "Illegal URL - path must be absolute or empty: %s" msgstr "" #: src/hed/libs/common/URL.cpp:298 #, c-format msgid "Illegal URL - no closing ] for IPv6 address found: %s" msgstr "" #: src/hed/libs/common/URL.cpp:306 #, c-format msgid "" "Illegal URL - closing ] for IPv6 address is followed by illegal token: %s" msgstr "" #: src/hed/libs/common/URL.cpp:322 #, fuzzy, c-format msgid "Invalid port number in %s" msgstr "Ungültige url: %s" #: src/hed/libs/common/URL.cpp:455 #, c-format msgid "Unknown LDAP scope %s - using base" msgstr "" #: src/hed/libs/common/URL.cpp:618 msgid "Attempt to assign relative path to URL - making it absolute" msgstr "" #: src/hed/libs/common/URL.cpp:717 #, c-format msgid "URL option %s does not have format name=value" msgstr "" #: src/hed/libs/common/URL.cpp:1186 #, c-format msgid "urllist %s contains invalid URL: %s" msgstr "" #: src/hed/libs/common/URL.cpp:1191 #, c-format msgid "URL protocol is not urllist: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:38 src/hed/libs/common/UserConfig.cpp:831 #: src/hed/libs/common/UserConfig.cpp:840 #: src/hed/libs/common/UserConfig.cpp:846 #: src/hed/libs/common/UserConfig.cpp:872 #: src/hed/libs/common/UserConfig.cpp:884 #: src/hed/libs/common/UserConfig.cpp:896 #: src/hed/libs/common/UserConfig.cpp:916 #, c-format msgid "Multiple %s attributes in configuration file (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:139 #, fuzzy, c-format msgid "Wrong ownership of certificate file: %s" msgstr "Fehler bei Ändern des Besitzers der destination Datei %s: %s" #: src/hed/libs/common/UserConfig.cpp:141 #, fuzzy, c-format msgid "Wrong permissions of certificate file: %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/common/UserConfig.cpp:143 #, c-format msgid "Can not access certificate file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:150 #, fuzzy, c-format msgid "Wrong ownership of key file: %s" msgstr "Fehler bei Ändern des Besitzers der destination Datei %s: %s" #: src/hed/libs/common/UserConfig.cpp:152 #, fuzzy, c-format msgid "Wrong permissions of key file: %s" msgstr "Fehler bei Änderung von Zugriffsrechten auf Datei %s: %s" #: src/hed/libs/common/UserConfig.cpp:154 #, c-format msgid "Can not access key file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:161 #, fuzzy, c-format msgid "Wrong ownership of proxy file: %s" msgstr "Fehler bei Ändern des Besitzers der destination Datei %s: %s" #: src/hed/libs/common/UserConfig.cpp:163 #, fuzzy, c-format msgid "Wrong permissions of proxy file: %s" msgstr "Fehler bei Änderung von Zugriffsrechten auf Datei %s: %s" #: src/hed/libs/common/UserConfig.cpp:165 #, c-format msgid "Can not access proxy file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:176 msgid "computing" msgstr "" #: src/hed/libs/common/UserConfig.cpp:178 msgid "index" msgstr "" #: src/hed/libs/common/UserConfig.cpp:277 #: src/hed/libs/common/UserConfig.cpp:281 #: src/hed/libs/common/UserConfig.cpp:328 #: src/hed/libs/common/UserConfig.cpp:332 #, c-format msgid "System configuration file (%s) contains errors." msgstr "" #: src/hed/libs/common/UserConfig.cpp:285 #: src/hed/libs/common/UserConfig.cpp:336 #, fuzzy, c-format msgid "System configuration file (%s or %s) does not exist." msgstr "Cache-Datei %s existiert nicht" #: src/hed/libs/common/UserConfig.cpp:287 #: src/hed/libs/common/UserConfig.cpp:338 #, c-format msgid "System configuration file (%s) does not exist." msgstr "" #: src/hed/libs/common/UserConfig.cpp:293 #: src/hed/libs/common/UserConfig.cpp:305 #: src/hed/libs/common/UserConfig.cpp:344 #: src/hed/libs/common/UserConfig.cpp:356 #, c-format msgid "User configuration file (%s) contains errors." msgstr "" #: src/hed/libs/common/UserConfig.cpp:298 #: src/hed/libs/common/UserConfig.cpp:349 msgid "No configuration file could be loaded." msgstr "" #: src/hed/libs/common/UserConfig.cpp:301 #: src/hed/libs/common/UserConfig.cpp:352 #, c-format msgid "User configuration file (%s) does not exist or cannot be loaded." msgstr "" #: src/hed/libs/common/UserConfig.cpp:438 #, c-format msgid "" "Unable to parse the specified verbosity (%s) to one of the allowed levels" msgstr "" #: src/hed/libs/common/UserConfig.cpp:450 #, c-format msgid "" "Unsupported job list type '%s', using 'SQLITE'. Supported types are: SQLITE, " "XML." msgstr "" #: src/hed/libs/common/UserConfig.cpp:511 msgid "Loading OToken failed - ignoring its presence" msgstr "" #: src/hed/libs/common/UserConfig.cpp:652 #, c-format msgid "Certificate and key ('%s' and '%s') not found in any of the paths: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:654 #, c-format msgid "" "If the proxy or certificate/key does exist, you can manually specify the " "locations via environment variables '%s'/'%s' or '%s', or the '%s'/'%s' or " "'%s' attributes in the client configuration file (e.g. '%s')" msgstr "" #: src/hed/libs/common/UserConfig.cpp:672 #: src/hed/libs/common/UserConfig.cpp:682 #, c-format msgid "" "Can not access CA certificate directory: %s. The certificates will not be " "verified." msgstr "" #: src/hed/libs/common/UserConfig.cpp:708 #, c-format msgid "" "Can not find CA certificates directory in default locations:\n" "~/.arc/certificates, ~/.globus/certificates,\n" "%s/etc/certificates, %s/etc/grid-security/certificates,\n" "%s/share/certificates, /etc/grid-security/certificates.\n" "The certificate will not be verified.\n" "If the CA certificates directory does exist, please manually specify the " "locations via env\n" "X509_CERT_DIR, or the cacertificatesdirectory item in client.conf\n" msgstr "" #: src/hed/libs/common/UserConfig.cpp:730 #, c-format msgid "Using proxy file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:733 #, c-format msgid "Using certificate file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:734 #, c-format msgid "Using key file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:738 #, c-format msgid "Using CA certificate directory: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:742 #, fuzzy msgid "Using OToken" msgstr "Nutze space token %s" #: src/hed/libs/common/UserConfig.cpp:755 #: src/hed/libs/common/UserConfig.cpp:761 #, fuzzy, c-format msgid "Can not access VOMSES file/directory: %s." msgstr "Lege Verzeichnis %s an" #: src/hed/libs/common/UserConfig.cpp:767 #, fuzzy, c-format msgid "Can not access VOMS file/directory: %s." msgstr "Lege Verzeichnis %s an" #: src/hed/libs/common/UserConfig.cpp:781 msgid "" "Can not find voms service configuration file (vomses) in default locations: " "~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/" "grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomses" msgstr "" #: src/hed/libs/common/UserConfig.cpp:794 #, c-format msgid "Loading configuration (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:828 #, c-format msgid "" "The value of the timeout attribute in the configuration file (%s) was only " "partially parsed" msgstr "" #: src/hed/libs/common/UserConfig.cpp:853 msgid "" "The brokerarguments attribute can only be used in conjunction with the " "brokername attribute" msgstr "" #: src/hed/libs/common/UserConfig.cpp:869 #, c-format msgid "" "The value of the keysize attribute in the configuration file (%s) was only " "partially parsed" msgstr "" #: src/hed/libs/common/UserConfig.cpp:891 #, c-format msgid "" "Could not convert the slcs attribute value (%s) to an URL instance in " "configuration file (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:937 #, c-format msgid "Specified overlay file (%s) does not exist." msgstr "" #: src/hed/libs/common/UserConfig.cpp:941 #, c-format msgid "" "Unknown attribute %s in common section of configuration file (%s), ignoring " "it" msgstr "" #: src/hed/libs/common/UserConfig.cpp:982 #, c-format msgid "Unknown section %s, ignoring it" msgstr "" #: src/hed/libs/common/UserConfig.cpp:986 #, c-format msgid "Configuration (%s) loaded" msgstr "" #: src/hed/libs/common/UserConfig.cpp:989 #, c-format msgid "Could not load configuration (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:1086 #, c-format msgid "UserConfiguration saved to file (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:1099 #, c-format msgid "Unable to create %s directory." msgstr "" #: src/hed/libs/common/UserConfig.cpp:1108 #, c-format msgid "Configuration example file created (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:1110 #, c-format msgid "Unable to copy example configuration from existing configuration (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:1115 #, c-format msgid "Cannot copy example configuration (%s), it is not a regular file" msgstr "" #: src/hed/libs/common/UserConfig.cpp:1120 #, c-format msgid "Example configuration (%s) not created." msgstr "" #: src/hed/libs/common/UserConfig.cpp:1125 #, c-format msgid "The default configuration file (%s) is not a regular file." msgstr "" #: src/hed/libs/common/UserConfig.cpp:1143 #, c-format msgid "%s directory created" msgstr "" #: src/hed/libs/common/UserConfig.cpp:1145 #: src/hed/libs/common/UserConfig.cpp:1172 src/hed/libs/data/DataMover.cpp:703 #, fuzzy, c-format msgid "Failed to create directory %s" msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #: src/hed/libs/common/test/LoggerTest.cpp:58 msgid "This VERBOSE message should not be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:62 msgid "This INFO message should be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:73 msgid "This VERBOSE message should now be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:79 msgid "This INFO message should also be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:93 msgid "This message goes to initial destination" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:108 msgid "This message goes to per-thread destination" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:80 msgid "Request failed: No response from SPService" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:84 #: src/hed/libs/communication/ClientSAML2SSO.cpp:137 msgid "Request failed: response from SPService is not as expected" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:92 #, c-format msgid "Authentication Request URL: %s" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:133 msgid "Request failed: No response from IdP" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:184 msgid "Request failed: No response from IdP when doing redirecting" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:188 msgid "" "Request failed: response from IdP is not as expected when doing redirecting" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:245 msgid "Request failed: No response from IdP when doing authentication" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:249 msgid "" "Request failed: response from IdP is not as expected when doing " "authentication" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:294 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:312 msgid "Succeeded to verify the signature under " msgstr "Erfolgreiche Verifikation der Signatur unter " #: src/hed/libs/communication/ClientSAML2SSO.cpp:296 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:315 msgid "Failed to verify the signature under " msgstr "Fehler bei der Überprüfung der Signatur unter " #: src/hed/libs/communication/ClientSAML2SSO.cpp:310 msgid "" "Request failed: No response from SP Service when sending SAML assertion to SP" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:314 msgid "" "Request failed: response from SP Service is not as expected when sending " "SAML assertion to SP" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:325 #, c-format msgid "IdP return some error message: %s" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:353 #: src/hed/libs/communication/ClientSAML2SSO.cpp:398 msgid "SAML2SSO process failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:56 msgid "Creating delegation credential to ARC delegation service" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:66 #: src/hed/libs/communication/ClientX509Delegation.cpp:269 msgid "DelegateCredentialsInit failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:70 #: src/hed/libs/communication/ClientX509Delegation.cpp:124 #: src/hed/libs/communication/ClientX509Delegation.cpp:159 #: src/hed/libs/communication/ClientX509Delegation.cpp:214 #: src/hed/libs/communication/ClientX509Delegation.cpp:273 msgid "There is no SOAP response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:75 msgid "There is no X509 request in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:80 msgid "There is no Format request in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:88 msgid "There is no Id or X509 request value in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:101 #: src/hed/libs/communication/ClientX509Delegation.cpp:189 msgid "DelegateProxy failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:120 msgid "UpdateCredentials failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:128 msgid "There is no UpdateCredentialsResponse in response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:136 #: src/hed/libs/communication/ClientX509Delegation.cpp:164 #: src/hed/libs/communication/ClientX509Delegation.cpp:219 #: src/hed/libs/communication/ClientX509Delegation.cpp:304 msgid "There is no SOAP connection chain configured" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:142 msgid "Creating delegation to CREAM delegation service" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:155 msgid "Delegation getProxyReq request failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:175 msgid "Creating delegation to CREAM delegation service failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:210 msgid "Delegation putProxy request failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:224 msgid "Creating delegation to CREAM delegation failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:239 msgid "Getting delegation credential from ARC delegation service" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:278 msgid "There is no Delegated X509 token in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:283 msgid "There is no Format delegated token in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:291 msgid "There is no Id or X509 token value in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:300 #, c-format msgid "" "Get delegated credential from delegation service: \n" " %s" msgstr "" #: src/hed/libs/compute/Broker.cpp:54 #, c-format msgid "Performing matchmaking against target (%s)." msgstr "" #: src/hed/libs/compute/Broker.cpp:64 #, c-format msgid "Matchmaking, ExecutionTarget: %s matches job description" msgstr "" #: src/hed/libs/compute/Broker.cpp:145 #, c-format msgid "" "The CA issuer (%s) of the credentials (%s) is not trusted by the target (%s)." msgstr "" #: src/hed/libs/compute/Broker.cpp:153 #, c-format msgid "ComputingShareName of ExecutionTarget (%s) is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:157 src/hed/libs/compute/Broker.cpp:162 #, c-format msgid "ComputingShare (%s) explicitly rejected" msgstr "" #: src/hed/libs/compute/Broker.cpp:171 #, c-format msgid "" "Matchmaking, ComputingShareName of ExecutionTarget (%s) is not defined, but " "requested queue is (%s)" msgstr "" #: src/hed/libs/compute/Broker.cpp:175 src/hed/libs/compute/Broker.cpp:180 #, c-format msgid "" "Matchmaking, ComputingShare (%s) does not match requested queue (%s): " "skipping" msgstr "" #: src/hed/libs/compute/Broker.cpp:184 #, c-format msgid "Matchmaking, ComputingShare (%s) matches requested queue (%s)" msgstr "" #: src/hed/libs/compute/Broker.cpp:192 #, c-format msgid "" "ProcessingStartTime (%s) specified in job description is inside the targets " "downtime period [ %s - %s ]." msgstr "" #: src/hed/libs/compute/Broker.cpp:197 #, c-format msgid "The downtime of the target (%s) is not published. Keeping target." msgstr "" #: src/hed/libs/compute/Broker.cpp:203 #, c-format msgid "HealthState of ExecutionTarget (%s) is not OK or WARNING (%s)" msgstr "" #: src/hed/libs/compute/Broker.cpp:208 #, c-format msgid "Matchmaking, ExecutionTarget: %s, HealthState is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:215 #, c-format msgid "" "Matchmaking, Computing endpoint requirement not satisfied. ExecutionTarget: " "%s" msgstr "" #: src/hed/libs/compute/Broker.cpp:220 #, c-format msgid "Matchmaking, ExecutionTarget: %s, ImplementationName is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:246 #, c-format msgid "" "Matchmaking, %s (%d) is %s than %s (%d) published by the ExecutionTarget." msgstr "" #: src/hed/libs/compute/Broker.cpp:275 #, c-format msgid "" "Matchmaking, The %s scaled %s (%d) is %s than the %s (%d) published by the " "ExecutionTarget." msgstr "" #: src/hed/libs/compute/Broker.cpp:287 #, c-format msgid "Matchmaking, Benchmark %s is not published by the ExecutionTarget." msgstr "" #: src/hed/libs/compute/Broker.cpp:302 #, c-format msgid "" "Matchmaking, MaxTotalCPUTime problem, ExecutionTarget: %d (MaxTotalCPUTime), " "JobDescription: %d (TotalCPUTime)" msgstr "" #: src/hed/libs/compute/Broker.cpp:309 #, c-format msgid "" "Matchmaking, MaxCPUTime problem, ExecutionTarget: %d (MaxCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" msgstr "" #: src/hed/libs/compute/Broker.cpp:314 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxTotalCPUTime or MaxCPUTime not " "defined, assuming no CPU time limit" msgstr "" #: src/hed/libs/compute/Broker.cpp:320 #, c-format msgid "" "Matchmaking, MinCPUTime problem, ExecutionTarget: %d (MinCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" msgstr "" #: src/hed/libs/compute/Broker.cpp:325 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MinCPUTime not defined, assuming no CPU " "time limit" msgstr "" #: src/hed/libs/compute/Broker.cpp:333 #, c-format msgid "" "Matchmaking, MainMemorySize problem, ExecutionTarget: %d (MainMemorySize), " "JobDescription: %d (IndividualPhysicalMemory)" msgstr "" #: src/hed/libs/compute/Broker.cpp:339 #, c-format msgid "" "Matchmaking, MaxMainMemory problem, ExecutionTarget: %d (MaxMainMemory), " "JobDescription: %d (IndividualPhysicalMemory)" msgstr "" #: src/hed/libs/compute/Broker.cpp:344 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxMainMemory and MainMemorySize are not " "defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:352 #, c-format msgid "" "Matchmaking, MaxVirtualMemory problem, ExecutionTarget: %d " "(MaxVirtualMemory), JobDescription: %d (IndividualVirtualMemory)" msgstr "" #: src/hed/libs/compute/Broker.cpp:357 #, c-format msgid "Matchmaking, ExecutionTarget: %s, MaxVirtualMemory is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:365 #, c-format msgid "" "Matchmaking, Platform problem, ExecutionTarget: %s (Platform) " "JobDescription: %s (Platform)" msgstr "" #: src/hed/libs/compute/Broker.cpp:370 #, c-format msgid "Matchmaking, ExecutionTarget: %s, Platform is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:378 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, OperatingSystem requirements not satisfied" msgstr "" #: src/hed/libs/compute/Broker.cpp:383 #, c-format msgid "Matchmaking, ExecutionTarget: %s, OperatingSystem is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:391 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, RunTimeEnvironment requirements not " "satisfied" msgstr "" #: src/hed/libs/compute/Broker.cpp:396 #, c-format msgid "Matchmaking, ExecutionTarget: %s, ApplicationEnvironments not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:405 #, c-format msgid "" "Matchmaking, NetworkInfo demand not fulfilled, ExecutionTarget do not " "support %s, specified in the JobDescription." msgstr "" #: src/hed/libs/compute/Broker.cpp:409 #, c-format msgid "Matchmaking, ExecutionTarget: %s, NetworkInfo is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:417 #, c-format msgid "" "Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); " "JobDescription: %d MB (SessionDiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:424 #, c-format msgid "" "Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB " "(WorkingAreaFree); JobDescription: %d MB (SessionDiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:430 src/hed/libs/compute/Broker.cpp:451 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxDiskSpace and WorkingAreaFree are not " "defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:438 #, c-format msgid "" "Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); " "JobDescription: %d MB (DiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:445 #, c-format msgid "" "Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB " "(WorkingAreaFree); JobDescription: %d MB (DiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:459 #, c-format msgid "" "Matchmaking, CacheTotal problem, ExecutionTarget: %d MB (CacheTotal); " "JobDescription: %d MB (CacheDiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:464 #, c-format msgid "Matchmaking, ExecutionTarget: %s, CacheTotal is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:472 #, c-format msgid "" "Matchmaking, TotalSlots problem, ExecutionTarget: %d (TotalSlots) " "JobDescription: %d (NumberOfProcesses)" msgstr "" #: src/hed/libs/compute/Broker.cpp:478 #, c-format msgid "" "Matchmaking, MaxSlotsPerJob problem, ExecutionTarget: %d (MaxSlotsPerJob) " "JobDescription: %d (NumberOfProcesses)" msgstr "" #: src/hed/libs/compute/Broker.cpp:484 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, TotalSlots and MaxSlotsPerJob are not " "defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:492 #, c-format msgid "" "Matchmaking, WorkingAreaLifeTime problem, ExecutionTarget: %s " "(WorkingAreaLifeTime) JobDescription: %s (SessionLifeTime)" msgstr "" #: src/hed/libs/compute/Broker.cpp:497 #, c-format msgid "Matchmaking, ExecutionTarget: %s, WorkingAreaLifeTime is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:505 #, c-format msgid "" "Matchmaking, ConnectivityIn problem, ExecutionTarget: %s (ConnectivityIn) " "JobDescription: %s (InBound)" msgstr "" #: src/hed/libs/compute/Broker.cpp:512 #, c-format msgid "" "Matchmaking, ConnectivityOut problem, ExecutionTarget: %s (ConnectivityOut) " "JobDescription: %s (OutBound)" msgstr "" #: src/hed/libs/compute/Broker.cpp:535 msgid "Unable to sort added jobs. The BrokerPlugin plugin has not been loaded." msgstr "" #: src/hed/libs/compute/Broker.cpp:552 msgid "Unable to match target, marking it as not matching. Broker not valid." msgstr "" #: src/hed/libs/compute/Broker.cpp:588 #, fuzzy msgid "Unable to sort ExecutionTarget objects - Invalid Broker object." msgstr "Kann ExecutionTarget nicht zu Python Objekt konvertieren" #: src/hed/libs/compute/Broker.cpp:612 msgid "" "Unable to register job submission. Can't get JobDescription object from " "Broker, Broker is invalid." msgstr "" #: src/hed/libs/compute/BrokerPlugin.cpp:89 #, c-format msgid "Broker plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/BrokerPlugin.cpp:96 #, fuzzy, c-format msgid "Unable to load BrokerPlugin (%s)" msgstr "Konnter Broker %s nicht laden" #: src/hed/libs/compute/BrokerPlugin.cpp:106 #, c-format msgid "Broker %s loaded" msgstr "Broker %s geladen" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:27 #, c-format msgid "Uniq is replacing service coming from %s with service coming from %s" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:31 #, c-format msgid "Uniq is ignoring service coming from %s" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:38 #, c-format msgid "Uniq is adding service coming from %s" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:61 #, c-format msgid "Adding endpoint (%s) to TargetInformationRetriever" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:64 #, c-format msgid "Adding endpoint (%s) to ServiceEndpointRetriever" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:67 #, c-format msgid "" "Adding endpoint (%s) to both ServiceEndpointRetriever and " "TargetInformationRetriever" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:42 #, c-format msgid "The plugin %s does not support any interfaces, skipping it." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:47 #, c-format msgid "" "The first supported interface of the plugin %s is an empty string, skipping " "the plugin." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:95 #, c-format msgid "Interface on endpoint (%s) %s." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:101 #: src/hed/libs/compute/EntityRetriever.cpp:133 #: src/hed/libs/compute/EntityRetriever.cpp:425 #, c-format msgid "Ignoring endpoint (%s), it is already registered in retriever." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:110 #, c-format msgid "Service Loop: Endpoint %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:112 #, c-format msgid " This endpoint (%s) is STARTED or SUCCESSFUL" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:115 #, c-format msgid "" "Suspending querying of endpoint (%s) since the service at the endpoint is " "already being queried, or has been queried." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:122 #: src/hed/libs/compute/EntityRetriever.cpp:237 #, c-format msgid " Status of endpoint (%s) is %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:126 #, c-format msgid "Setting status (STARTED) for endpoint: %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:145 #, c-format msgid "Starting thread to query the endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:147 #: src/hed/libs/compute/EntityRetriever.cpp:289 #, fuzzy, c-format msgid "Failed to start querying the endpoint on %s" msgstr "Fehler beim Authentifizieren: %s" #: src/hed/libs/compute/EntityRetriever.cpp:174 #, c-format msgid "Found a registry, will query it recursively: %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:211 #, c-format msgid "Setting status (%s) for endpoint: %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:231 msgid "Checking for suspended endpoints which should be started." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:241 #, c-format msgid "Found started or successful endpoint (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:253 #, c-format msgid "Found suspended endpoint (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:264 #, fuzzy, c-format msgid "Trying to start suspended endpoint (%s)" msgstr "" "Ошибка при попытке открыть файл:\n" " %1" #: src/hed/libs/compute/EntityRetriever.cpp:284 #, c-format msgid "" "Starting querying of suspended endpoint (%s) - no other endpoints for this " "service is being queried or has been queried successfully." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:351 #, c-format msgid "Calling plugin %s to query endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:373 #, c-format msgid "" "The interface of this endpoint (%s) is unspecified, will try all possible " "plugins" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:389 #, c-format msgid "Problem loading plugin %s, skipping it." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:393 #, c-format msgid "The endpoint (%s) is not supported by this plugin (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:414 #, c-format msgid "" "New endpoint is created (%s) from the one with the unspecified interface (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:432 #, c-format msgid "Starting sub-thread to query the endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:434 #, c-format msgid "" "Failed to start querying the endpoint on %s (unable to create sub-thread)" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:32 #, c-format msgid "Found %s %s (it was loaded already)" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:41 #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:49 #: src/hed/libs/compute/JobControllerPlugin.cpp:98 #: src/hed/libs/compute/JobControllerPlugin.cpp:107 #: src/hed/libs/compute/SubmitterPlugin.cpp:167 #: src/hed/libs/compute/SubmitterPlugin.cpp:177 #, c-format msgid "" "Unable to locate the \"%s\" plugin. Please refer to installation " "instructions and check if package providing support for \"%s\" plugin is " "installed" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:42 #, fuzzy, c-format msgid "%s plugin \"%s\" not found." msgstr "clientxrsl nicht gefunden" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:50 #, fuzzy, c-format msgid "%s %s could not be created." msgstr "Der Job Status konnte nicht ermittelt werden" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:55 #, c-format msgid "Loaded %s %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:51 #, c-format msgid "" "Skipping ComputingEndpoint '%s', because it has '%s' interface instead of " "the requested '%s'." msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:132 #, c-format msgid "" "Computing endpoint %s (type %s) added to the list for submission brokering" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:239 #, fuzzy, c-format msgid "Address: %s" msgstr "Antwort: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:240 #, fuzzy, c-format msgid "Place: %s" msgstr "Name %s" #: src/hed/libs/compute/ExecutionTarget.cpp:241 #, fuzzy, c-format msgid "Country: %s" msgstr "Anfrage: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:242 #, fuzzy, c-format msgid "Postal code: %s" msgstr "Listen-Eintrag: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:243 #, fuzzy, c-format msgid "Latitude: %f" msgstr "Fehler: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:244 #, c-format msgid "Longitude: %f" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:250 #, fuzzy, c-format msgid "Owner: %s" msgstr "Anfrage: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:257 #, fuzzy, c-format msgid "ID: %s" msgstr "ID: " #: src/hed/libs/compute/ExecutionTarget.cpp:258 #, fuzzy, c-format msgid "Type: %s" msgstr "Proxy Typ: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:263 #, fuzzy, c-format msgid "URL: %s" msgstr "HER: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:264 #, fuzzy, c-format msgid "Interface: %s" msgstr "Quelle: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:266 #, fuzzy msgid "Interface versions:" msgstr "Benutzungsschnittstellenfehler" #: src/hed/libs/compute/ExecutionTarget.cpp:271 msgid "Interface extensions:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:276 msgid "Capabilities:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:280 #, c-format msgid "Technology: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:282 #, fuzzy msgid "Supported Profiles:" msgstr "Nicht-unterstützte URL angegeben" #: src/hed/libs/compute/ExecutionTarget.cpp:286 #, fuzzy, c-format msgid "Implementor: %s" msgstr "Server Implementation: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:287 #, fuzzy, c-format msgid "Implementation name: %s" msgstr "Server Implementation: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:288 #, fuzzy, c-format msgid "Quality level: %s" msgstr "Policy Zeile: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:289 #, fuzzy, c-format msgid "Health state: %s" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:290 #, fuzzy, c-format msgid "Health state info: %s" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:291 #, fuzzy, c-format msgid "Serving state: %s" msgstr "Start start" #: src/hed/libs/compute/ExecutionTarget.cpp:292 #, fuzzy, c-format msgid "Issuer CA: %s" msgstr "Anfrage: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:294 msgid "Trusted CAs:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:298 #, c-format msgid "Downtime starts: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:299 #, c-format msgid "Downtime ends: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:300 #, fuzzy, c-format msgid "Staging: %s" msgstr "Kontaktiere %s" #: src/hed/libs/compute/ExecutionTarget.cpp:302 #, fuzzy msgid "Job descriptions:" msgstr "Job Beschreibung: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:314 #, fuzzy, c-format msgid "Scheme: %s" msgstr "Quelle: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:317 #, fuzzy, c-format msgid "Rule: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:329 #, fuzzy, c-format msgid "Mapping queue: %s" msgstr "" "\n" " СоответÑтвие раздел-Ñегмент:\n" #: src/hed/libs/compute/ExecutionTarget.cpp:330 #, c-format msgid "Max wall-time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:331 #, c-format msgid "Max total wall-time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:332 #, c-format msgid "Min wall-time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:333 #, fuzzy, c-format msgid "Default wall-time: %s" msgstr "Voreinstellung: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:334 #, c-format msgid "Max CPU time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:335 #, c-format msgid "Min CPU time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:336 #, fuzzy, c-format msgid "Default CPU time: %s" msgstr "Voreinstellung: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:337 #, c-format msgid "Max total jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:338 #, fuzzy, c-format msgid "Max running jobs: %i" msgstr "Aufräumen von Job: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:339 #, fuzzy, c-format msgid "Max waiting jobs: %i" msgstr "Herunterladen des Job: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:340 #, c-format msgid "Max pre-LRMS waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:341 #, c-format msgid "Max user running jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:342 #, c-format msgid "Max slots per job: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:343 #, c-format msgid "Max stage in streams: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:344 #, c-format msgid "Max stage out streams: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:345 #, fuzzy, c-format msgid "Scheduling policy: %s" msgstr "ARC delegation policy: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:346 #, c-format msgid "Max memory: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:347 #, c-format msgid "Max virtual memory: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:348 #, c-format msgid "Max disk space: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:349 #, fuzzy, c-format msgid "Default Storage Service: %s" msgstr "Delegation service: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:350 msgid "Supports preemption" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:351 msgid "Doesn't support preemption" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:352 #, fuzzy, c-format msgid "Total jobs: %i" msgstr "alle Jobs" #: src/hed/libs/compute/ExecutionTarget.cpp:353 #, fuzzy, c-format msgid "Running jobs: %i" msgstr "Aufräumen von Job: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:354 #, fuzzy, c-format msgid "Local running jobs: %i" msgstr "Aufräumen von Job: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:355 #, fuzzy, c-format msgid "Waiting jobs: %i" msgstr "Aufräumen von Job: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:356 #, fuzzy, c-format msgid "Local waiting jobs: %i" msgstr "Herunterladen des Job: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:357 #, c-format msgid "Suspended jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:358 #, c-format msgid "Local suspended jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:359 #, fuzzy, c-format msgid "Staging jobs: %i" msgstr "Aufräumen von Job: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:360 #, c-format msgid "Pre-LRMS waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:361 #, fuzzy, c-format msgid "Estimated average waiting time: %s" msgstr "start_reading_ftp: angegelegt um: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:362 #, fuzzy, c-format msgid "Estimated worst waiting time: %s" msgstr "start_reading_ftp: angegelegt um: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:363 #, c-format msgid "Free slots: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:365 msgid "Free slots grouped according to time limits (limit: free slots):" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:368 #, c-format msgid " %s: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:369 #, c-format msgid " unspecified: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:372 #, c-format msgid "Used slots: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:373 #, fuzzy, c-format msgid "Requested slots: %i" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:374 #, fuzzy, c-format msgid "Reservation policy: %s" msgstr "ARC delegation policy: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:381 #, fuzzy, c-format msgid "Resource manager: %s" msgstr "Modulname: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:383 #, fuzzy, c-format msgid " (%s)" msgstr "%s (%s)" #: src/hed/libs/compute/ExecutionTarget.cpp:387 #, c-format msgid "Total physical CPUs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:388 #, c-format msgid "Total logical CPUs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:389 #, c-format msgid "Total slots: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:390 msgid "Supports advance reservations" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:391 msgid "Doesn't support advance reservations" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:392 msgid "Supports bulk submission" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:393 msgid "Doesn't support bulk Submission" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:394 msgid "Homogeneous resource" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:395 msgid "Non-homogeneous resource" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:397 #, fuzzy msgid "Network information:" msgstr "Angabe des aktuellen Versionsbezeichners" #: src/hed/libs/compute/ExecutionTarget.cpp:402 msgid "Working area is shared among jobs" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:403 msgid "Working area is not shared among jobs" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:404 #, c-format msgid "Working area total size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:405 #, c-format msgid "Working area free size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:406 #, c-format msgid "Working area life time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:407 #, c-format msgid "Cache area total size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:408 #, c-format msgid "Cache area free size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:414 #, fuzzy, c-format msgid "Platform: %s" msgstr "ProxyStore: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:415 msgid "Execution environment supports inbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:416 msgid "Execution environment does not support inbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:417 msgid "Execution environment supports outbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:418 msgid "Execution environment does not support outbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:419 msgid "Execution environment is a virtual machine" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:420 msgid "Execution environment is a physical machine" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:421 #, fuzzy, c-format msgid "CPU vendor: %s" msgstr "DCAU fehlgeschlagen: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:422 #, fuzzy, c-format msgid "CPU model: %s" msgstr "Policy Zeile: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:423 #, fuzzy, c-format msgid "CPU version: %s" msgstr "%s version %s" #: src/hed/libs/compute/ExecutionTarget.cpp:424 #, c-format msgid "CPU clock speed: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:425 #, c-format msgid "Main memory size: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:426 #, fuzzy, c-format msgid "OS family: %s" msgstr "PASV fehlgeschlagen: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:427 #, fuzzy, c-format msgid "OS name: %s" msgstr "Name %s" #: src/hed/libs/compute/ExecutionTarget.cpp:428 #, fuzzy, c-format msgid "OS version: %s" msgstr "%s version %s" #: src/hed/libs/compute/ExecutionTarget.cpp:435 #, fuzzy msgid "Computing service:" msgstr "Delegation service: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:459 #, c-format msgid "%d Endpoints" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:464 #, fuzzy msgid "Endpoint Information:" msgstr "Angabe des aktuellen Versionsbezeichners" #: src/hed/libs/compute/ExecutionTarget.cpp:476 #, c-format msgid "%d Batch Systems" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:481 msgid "Batch System Information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:487 #, fuzzy msgid "Installed application environments:" msgstr "Initialisierte replication Umgebung" #: src/hed/libs/compute/ExecutionTarget.cpp:500 #, c-format msgid "%d Shares" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:505 #, fuzzy msgid "Share Information:" msgstr "Ungültige Authentisierungs-Information" #: src/hed/libs/compute/ExecutionTarget.cpp:511 #, c-format msgid "%d mapping policies" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:515 #, fuzzy msgid "Mapping policy:" msgstr "ARC delegation policy: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:531 #, c-format msgid "Execution Target on Computing Service: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:533 #, c-format msgid " Computing endpoint URL: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:535 #, fuzzy, c-format msgid " Computing endpoint interface name: %s" msgstr "Erstelle Client Schnitstelle" #: src/hed/libs/compute/ExecutionTarget.cpp:537 #: src/hed/libs/compute/Job.cpp:579 #, c-format msgid " Queue: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:540 #, fuzzy, c-format msgid " Mapping queue: %s" msgstr "" "\n" " СоответÑтвие раздел-Ñегмент:\n" #: src/hed/libs/compute/ExecutionTarget.cpp:543 #, fuzzy, c-format msgid " Health state: %s" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:548 msgid "Service information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:553 msgid " Installed application environments:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:560 #, fuzzy msgid "Batch system information:" msgstr "Ungültige Authentisierungs-Information" #: src/hed/libs/compute/ExecutionTarget.cpp:563 msgid "Queue information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:570 #, fuzzy msgid " Benchmark information:" msgstr "Ungültige Authentisierungs-Information" #: src/hed/libs/compute/GLUE2.cpp:53 msgid "The Service doesn't advertise its Type." msgstr "Der Service gibt seinen Typ nicht an." #: src/hed/libs/compute/GLUE2.cpp:58 #, fuzzy msgid "The ComputingService doesn't advertise its Quality Level." msgstr "Der Service gibt seinen Quality Level nicht an." #: src/hed/libs/compute/GLUE2.cpp:99 msgid "The ComputingEndpoint has no URL." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:104 msgid "The Service advertises no Health State." msgstr "Der Service gibt keinen Health State an." #: src/hed/libs/compute/GLUE2.cpp:117 #, fuzzy msgid "The ComputingEndpoint doesn't advertise its Quality Level." msgstr "Der Service gibt seinen Quality Level nicht an." #: src/hed/libs/compute/GLUE2.cpp:128 #, fuzzy msgid "The ComputingService doesn't advertise its Interface." msgstr "Der Service gibt seine Interface nicht an." #: src/hed/libs/compute/GLUE2.cpp:160 #, fuzzy msgid "The ComputingEndpoint doesn't advertise its Serving State." msgstr "Der Servcice gibt seinen Serving State nicht an." #: src/hed/libs/compute/GLUE2.cpp:247 #, c-format msgid "" "The \"FreeSlotsWithDuration\" attribute published by \"%s\" is wrongly " "formatted. Ignoring it." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:248 #, c-format msgid "Wrong format of the \"FreeSlotsWithDuration\" = \"%s\" (\"%s\")" msgstr "" #: src/hed/libs/compute/GLUE2.cpp:420 #, c-format msgid "" "Couldn't parse benchmark XML:\n" "%s" msgstr "" "Konnte benchmark XML nicht parsen:\n" "%s" #: src/hed/libs/compute/Job.cpp:328 #, fuzzy msgid "Unable to detect format of job record." msgstr "Konnte status Beschreibung des jobs (%s) nicht erhalten: " #: src/hed/libs/compute/Job.cpp:549 #, c-format msgid "Job: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:551 #, c-format msgid " Name: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:552 #, fuzzy, c-format msgid " State: %s" msgstr "Name %s" #: src/hed/libs/compute/Job.cpp:555 #, c-format msgid " Specific state: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:559 src/hed/libs/compute/Job.cpp:583 #, c-format msgid " Waiting Position: %d" msgstr "" #: src/hed/libs/compute/Job.cpp:563 #, c-format msgid " Exit Code: %d" msgstr "" #: src/hed/libs/compute/Job.cpp:567 #, fuzzy, c-format msgid " Job Error: %s" msgstr "Error: %s" #: src/hed/libs/compute/Job.cpp:572 #, c-format msgid " Owner: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:576 #, c-format msgid " Other Messages: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:581 #, fuzzy, c-format msgid " Requested Slots: %d" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/libs/compute/Job.cpp:586 #, c-format msgid " Stdin: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:588 #, c-format msgid " Stdout: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:590 #, c-format msgid " Stderr: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:592 #, fuzzy, c-format msgid " Computing Service Log Directory: %s" msgstr "Fehler beim listen von Datei oder Verzeichnis: %s" #: src/hed/libs/compute/Job.cpp:595 #, c-format msgid " Submitted: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:598 #, c-format msgid " End Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:601 #, c-format msgid " Submitted from: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:604 #, c-format msgid " Submitting client: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:607 #, c-format msgid " Requested CPU Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:612 #, c-format msgid " Used CPU Time: %s (%s per slot)" msgstr "" #: src/hed/libs/compute/Job.cpp:616 #, c-format msgid " Used CPU Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:622 #, c-format msgid " Used Wall Time: %s (%s per slot)" msgstr "" #: src/hed/libs/compute/Job.cpp:626 #, c-format msgid " Used Wall Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:631 #, c-format msgid " Used Memory: %d" msgstr "" #: src/hed/libs/compute/Job.cpp:635 #, c-format msgid " Results were deleted: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:636 #, c-format msgid " Results must be retrieved before: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:640 #, c-format msgid " Proxy valid until: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:644 #, c-format msgid " Entry valid from: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:647 #, c-format msgid " Entry valid for: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:651 msgid " Old job IDs:" msgstr "" #: src/hed/libs/compute/Job.cpp:659 #, fuzzy, c-format msgid " ID on service: %s" msgstr "Delegation service: %s" #: src/hed/libs/compute/Job.cpp:660 #, c-format msgid " Service information URL: %s (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:661 #, c-format msgid " Job status URL: %s (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:662 #, c-format msgid " Job management URL: %s (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:663 #, fuzzy, c-format msgid " Stagein directory URL: %s" msgstr "Lege Verzeichnis %s an" #: src/hed/libs/compute/Job.cpp:664 #, fuzzy, c-format msgid " Stageout directory URL: %s" msgstr "Fehler beim Listen von Verzeichnis: %s" #: src/hed/libs/compute/Job.cpp:665 #, fuzzy, c-format msgid " Session directory URL: %s" msgstr "Lege Verzeichnis %s an" #: src/hed/libs/compute/Job.cpp:667 #, fuzzy msgid " Delegation IDs:" msgstr "Delegation ID: %s" #: src/hed/libs/compute/Job.cpp:849 #, c-format msgid "Unable to handle job (%s), no interface specified." msgstr "" #: src/hed/libs/compute/Job.cpp:854 #, c-format msgid "" "Unable to handle job (%s), no plugin associated with the specified interface " "(%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:876 #, c-format msgid "Invalid download destination path specified (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:881 #, c-format msgid "" "Unable to download job (%s), no JobControllerPlugin plugin was set to handle " "the job." msgstr "" #: src/hed/libs/compute/Job.cpp:885 #, fuzzy, c-format msgid "Downloading job: %s" msgstr "Herunterladen des Job: %s" #: src/hed/libs/compute/Job.cpp:891 #, c-format msgid "" "Can't retrieve job files for job (%s) - unable to determine URL of stage out " "directory" msgstr "" #: src/hed/libs/compute/Job.cpp:897 #, c-format msgid "" "Can't retrieve job files for job (%s) - unable to determine URL of log " "directory" msgstr "" #: src/hed/libs/compute/Job.cpp:903 #, c-format msgid "Invalid stage out path specified (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:911 #, c-format msgid "%s directory exist! Skipping job." msgstr "" #: src/hed/libs/compute/Job.cpp:923 #, fuzzy, c-format msgid "Unable to retrieve list of job files to download for job %s" msgstr "Konnte status Beschreibung des jobs (%s) nicht erhalten: " #: src/hed/libs/compute/Job.cpp:944 #, fuzzy, c-format msgid "Unable to retrieve list of log files to download for job %s" msgstr "Konnte status Beschreibung des jobs (%s) nicht erhalten: " #: src/hed/libs/compute/Job.cpp:963 #, fuzzy, c-format msgid "No files to retrieve for job %s" msgstr "Konnte status Beschreibung des jobs (%s) nicht erhalten: " #: src/hed/libs/compute/Job.cpp:969 #, fuzzy, c-format msgid "Failed to create directory %s! Skipping job." msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #: src/hed/libs/compute/Job.cpp:986 #, fuzzy, c-format msgid "Failed downloading %s to %s, destination already exist" msgstr "Fahler bei Herunterladen %s zu %s" #: src/hed/libs/compute/Job.cpp:992 #, fuzzy, c-format msgid "Failed downloading %s to %s, unable to remove existing destination" msgstr "Fehler bei Schreiben zu Ziel" #: src/hed/libs/compute/Job.cpp:999 #, fuzzy, c-format msgid "Failed downloading %s to %s" msgstr "Fahler bei Herunterladen %s zu %s" #: src/hed/libs/compute/Job.cpp:1012 #, fuzzy, c-format msgid "Unable to initialize handler for %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/libs/compute/Job.cpp:1017 #, c-format msgid "Unable to list files at %s" msgstr "" #: src/hed/libs/compute/Job.cpp:1060 msgid "Now copying (from -> to)" msgstr "" #: src/hed/libs/compute/Job.cpp:1061 #, c-format msgid " %s -> %s" msgstr "" #: src/hed/libs/compute/Job.cpp:1076 #, c-format msgid "Unable to initialise connection to source: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:1087 #, c-format msgid "Unable to initialise connection to destination: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:1109 #, c-format msgid "File download failed: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:1148 src/hed/libs/compute/Job.cpp:1177 #: src/hed/libs/compute/Job.cpp:1209 src/hed/libs/compute/Job.cpp:1242 #, fuzzy, c-format msgid "Waiting for lock on file %s" msgstr "Warte vor Antwort" #: src/hed/libs/compute/JobControllerPlugin.cpp:99 #, c-format msgid "JobControllerPlugin plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/JobControllerPlugin.cpp:108 #, fuzzy, c-format msgid "JobControllerPlugin %s could not be created" msgstr "Keine Job controller Plugins geladen" #: src/hed/libs/compute/JobControllerPlugin.cpp:113 #, fuzzy, c-format msgid "Loaded JobControllerPlugin %s" msgstr "Keine Job controller Plugins geladen" #: src/hed/libs/compute/JobDescription.cpp:26 #, c-format msgid ": %d" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:28 #, c-format msgid ": %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:144 msgid " --- DRY RUN --- " msgstr "" #: src/hed/libs/compute/JobDescription.cpp:154 #, fuzzy, c-format msgid " Annotation: %s" msgstr "Ziel: %s" #: src/hed/libs/compute/JobDescription.cpp:160 #, c-format msgid " Old activity ID: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:166 #, c-format msgid " Argument: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:177 #, c-format msgid " RemoteLogging (optional): %s (%s)" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:180 #, c-format msgid " RemoteLogging: %s (%s)" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:188 #, c-format msgid " Environment.name: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:189 #, c-format msgid " Environment: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:202 #, c-format msgid " PreExecutable.Argument: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:205 #: src/hed/libs/compute/JobDescription.cpp:223 #, c-format msgid " Exit code for successful execution: %d" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:208 #: src/hed/libs/compute/JobDescription.cpp:226 msgid " No exit code for successful execution specified." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:220 #, c-format msgid " PostExecutable.Argument: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:236 #, fuzzy, c-format msgid " Access control: %s" msgstr "Zugriffslist location: %s" #: src/hed/libs/compute/JobDescription.cpp:240 #, fuzzy, c-format msgid " Processing start time: %s" msgstr "Verarbeitungstyp nicht unterstützt: %s" #: src/hed/libs/compute/JobDescription.cpp:243 msgid " Notify:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:257 #, fuzzy, c-format msgid " Credential service: %s" msgstr "Delegation service: %s" #: src/hed/libs/compute/JobDescription.cpp:267 msgid " Operating system requirements:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:285 msgid " Computing endpoint requirements:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:298 msgid " Node access: inbound" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:301 msgid " Node access: outbound" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:304 msgid " Node access: inbound and outbound" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:314 msgid " Job requires exclusive execution" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:317 msgid " Job does not require exclusive execution" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:322 msgid " Run time environment requirements:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:334 msgid " Inputfile element:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:335 #: src/hed/libs/compute/JobDescription.cpp:357 #, c-format msgid " Name: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:337 msgid " Is executable: true" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:341 #, fuzzy, c-format msgid " Sources: %s" msgstr "Quelle: %s" #: src/hed/libs/compute/JobDescription.cpp:343 #, fuzzy, c-format msgid " Sources.DelegationID: %s" msgstr "Delegation ID: %s" #: src/hed/libs/compute/JobDescription.cpp:347 #, c-format msgid " Sources.Options: %s = %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:356 msgid " Outputfile element:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:360 #, c-format msgid " Targets: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:362 #, fuzzy, c-format msgid " Targets.DelegationID: %s" msgstr "Delegation ID: %s" #: src/hed/libs/compute/JobDescription.cpp:366 #, c-format msgid " Targets.Options: %s = %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:373 #, fuzzy, c-format msgid " DelegationID element: %s" msgstr "Delegation ID: %s" #: src/hed/libs/compute/JobDescription.cpp:380 #, fuzzy, c-format msgid " Other attributes: [%s], %s" msgstr "Attribut: %s - %s" #: src/hed/libs/compute/JobDescription.cpp:446 msgid "Empty job description source string" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:479 #, fuzzy msgid "No job description parsers available" msgstr "Keine Job Beschreibung als Eingabe benötigt" #: src/hed/libs/compute/JobDescription.cpp:481 #, c-format msgid "" "No job description parsers suitable for handling '%s' language are available" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:489 #, fuzzy, c-format msgid "%s parsing error" msgstr "Fataler Fehler: %s" #: src/hed/libs/compute/JobDescription.cpp:505 #, fuzzy msgid "No job description parser was able to interpret job description" msgstr "Zu sendende Job-Beschreibung : %s" #: src/hed/libs/compute/JobDescription.cpp:515 msgid "" "Job description language is not specified, unable to output description." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:527 #, fuzzy, c-format msgid "Generating %s job description output" msgstr "Eine fehler geschah während des Generieres der Job Beschreibung." #: src/hed/libs/compute/JobDescription.cpp:543 #, c-format msgid "Language (%s) not recognized by any job description parsers." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:556 #, fuzzy, c-format msgid "Two input files have identical name '%s'." msgstr "Zwei Dateien haben identische Namen: '%s'." #: src/hed/libs/compute/JobDescription.cpp:575 #: src/hed/libs/compute/JobDescription.cpp:588 #, fuzzy, c-format msgid "Cannot stat local input file '%s'" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/hed/libs/compute/JobDescription.cpp:608 #, fuzzy, c-format msgid "Cannot find local input file '%s' (%s)" msgstr "Konnte lokale Inputdateien nicht hochladen" #: src/hed/libs/compute/JobDescription.cpp:650 #, fuzzy msgid "Unable to select runtime environment" msgstr "Kann run time environment nicht auswählen." #: src/hed/libs/compute/JobDescription.cpp:657 #, fuzzy msgid "Unable to select middleware" msgstr "Kann middleware nicht auswählen." #: src/hed/libs/compute/JobDescription.cpp:664 #, fuzzy msgid "Unable to select operating system." msgstr "Kann Operating System nciht auswählen." #: src/hed/libs/compute/JobDescription.cpp:683 #, c-format msgid "No test-job with ID %d found." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:695 #, c-format msgid "Test was defined with ID %d, but some error occurred during parsing it." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:699 #, fuzzy, c-format msgid "No jobdescription resulted at %d test" msgstr "Zu sendende Job-Beschreibung : %s" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:52 #, c-format msgid "JobDescriptionParserPlugin plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:59 #, fuzzy, c-format msgid "JobDescriptionParserPlugin %s could not be created" msgstr "JobDescription Klasse ist kein Objekt" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:64 #, fuzzy, c-format msgid "Loaded JobDescriptionParserPlugin %s" msgstr "Gültige JobDescription gefunden" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:125 #, fuzzy, c-format msgid "Unable to create data base (%s)" msgstr "Konnte Job Status-Informationen nicht erhalten." #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:133 #, fuzzy, c-format msgid "Unable to create jobs table in data base (%s)" msgstr "Konnte Job Status-Informationen nicht erhalten." #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:142 #, fuzzy, c-format msgid "Unable to create jobs_new table in data base (%s)" msgstr "Konnte Job Status-Informationen nicht erhalten." #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:148 #, fuzzy, c-format msgid "Unable to transfer from jobs to jobs_new in data base (%s)" msgstr "Konnte Job Status-Informationen nicht erhalten." #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:154 #, fuzzy, c-format msgid "Unable to drop jobs in data base (%s)" msgstr "Konnte Job Status-Informationen nicht erhalten." #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:160 #, fuzzy, c-format msgid "Unable to rename jobs table in data base (%s)" msgstr "Konnte Job Status-Informationen nicht erhalten." #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:170 #, fuzzy, c-format msgid "Unable to create index for jobs table in data base (%s)" msgstr "Konnte Job Status-Informationen nicht erhalten." #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:178 #, fuzzy, c-format msgid "Failed checking database (%s)" msgstr "Löschen fehlgeschlagen von job: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:180 #, fuzzy, c-format msgid "Job database connection established successfully (%s)" msgstr "erfolgreich angelegt, ID: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:202 #, fuzzy, c-format msgid "Error from SQLite: %s: %s" msgstr "Fehler bei Suche nach LFN anhand guid %s: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:205 #, fuzzy, c-format msgid "Error from SQLite: %s" msgstr "Error: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:226 #: src/hed/libs/compute/JobInformationStorageXML.cpp:36 #, c-format msgid "" "Job list file cannot be created: The parent directory (%s) doesn't exist." msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:230 #: src/hed/libs/compute/JobInformationStorageXML.cpp:40 #, c-format msgid "Job list file cannot be created: %s is not a directory" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:237 #: src/hed/libs/compute/JobInformationStorageXML.cpp:47 #, c-format msgid "Job list file (%s) is not a regular file" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:367 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:374 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:381 #, fuzzy, c-format msgid "Unable to write records into job database (%s): Id \"%s\"" msgstr "Konnte status Beschreibung des jobs (%s) nicht erhalten: " #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:627 #: src/hed/libs/compute/JobInformationStorageXML.cpp:146 #, fuzzy, c-format msgid "Unable to truncate job database (%s)" msgstr "Konnte status Beschreibung des jobs (%s) nicht erhalten: " #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:660 #, fuzzy, c-format msgid "Unable to determine error (%d)" msgstr "Konnter Broker %s nicht laden" #: src/hed/libs/compute/JobInformationStorageXML.cpp:60 #: src/hed/libs/compute/JobInformationStorageXML.cpp:232 #: src/hed/libs/compute/JobInformationStorageXML.cpp:273 #, fuzzy, c-format msgid "Waiting for lock on job list file %s" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/hed/libs/compute/JobInformationStorageXML.cpp:171 #, c-format msgid "Will remove %s on service %s." msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:40 msgid "Ignoring job, the job ID is empty" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:45 #, c-format msgid "Ignoring job (%s), the management interface name is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:50 #, c-format msgid "Ignoring job (%s), the job management URL is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:55 #, c-format msgid "Ignoring job (%s), the status interface name is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:60 #, c-format msgid "Ignoring job (%s), the job status URL is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:69 #, c-format msgid "Ignoring job (%s), unable to load JobControllerPlugin for %s" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:76 #, c-format msgid "" "Ignoring job (%s), already tried and were unable to load JobControllerPlugin" msgstr "" #: src/hed/libs/compute/Software.cpp:65 src/hed/libs/compute/Software.cpp:94 #: src/hed/libs/compute/Software.cpp:113 #, c-format msgid "%s > %s => false" msgstr "" #: src/hed/libs/compute/Software.cpp:70 src/hed/libs/compute/Software.cpp:83 #: src/hed/libs/compute/Software.cpp:107 #, c-format msgid "%s > %s => true" msgstr "" #: src/hed/libs/compute/Software.cpp:90 src/hed/libs/compute/Software.cpp:102 #, c-format msgid "%s > %s => false: %s contains non numbers in the version part." msgstr "" #: src/hed/libs/compute/Software.cpp:199 src/hed/libs/compute/Software.cpp:210 #, c-format msgid "Requirement \"%s %s\" NOT satisfied." msgstr "" #: src/hed/libs/compute/Software.cpp:205 #, c-format msgid "Requirement \"%s %s\" satisfied." msgstr "" #: src/hed/libs/compute/Software.cpp:214 #, c-format msgid "Requirement \"%s %s\" satisfied by \"%s\"." msgstr "" #: src/hed/libs/compute/Software.cpp:219 msgid "All software requirements satisfied." msgstr "" #: src/hed/libs/compute/Submitter.cpp:83 #, fuzzy, c-format msgid "Trying to submit directly to endpoint (%s)" msgstr "Versuche den Job erneut hochzuladen zu %s" #: src/hed/libs/compute/Submitter.cpp:88 #, c-format msgid "Interface (%s) specified, submitting only to that interface" msgstr "" #: src/hed/libs/compute/Submitter.cpp:106 #, fuzzy msgid "Trying all available interfaces" msgstr "Erstelle Client Schnitstelle" #: src/hed/libs/compute/Submitter.cpp:112 #, c-format msgid "Trying to submit endpoint (%s) using interface (%s) with plugin (%s)." msgstr "" #: src/hed/libs/compute/Submitter.cpp:116 #, c-format msgid "" "Unable to load plugin (%s) for interface (%s) when trying to submit job " "description." msgstr "" #: src/hed/libs/compute/Submitter.cpp:130 #, c-format msgid "No more interfaces to try for endpoint %s." msgstr "" #: src/hed/libs/compute/Submitter.cpp:336 #, c-format msgid "Target %s does not match requested interface(s)." msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:63 msgid "No stagein URL is provided" msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:72 #, fuzzy, c-format msgid "Failed reading file %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/libs/compute/SubmitterPlugin.cpp:86 #, fuzzy, c-format msgid "Failed uploading file %s to %s: %s" msgstr "Fahler bei Herunterladen %s zu %s" #: src/hed/libs/compute/SubmitterPlugin.cpp:168 #, c-format msgid "SubmitterPlugin plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:178 #, fuzzy, c-format msgid "SubmitterPlugin %s could not be created" msgstr "Der Job Status konnte nicht ermittelt werden" #: src/hed/libs/compute/SubmitterPlugin.cpp:183 #, c-format msgid "Loaded SubmitterPlugin %s" msgstr "" #: src/hed/libs/compute/examples/basic_job_submission.cpp:28 #, fuzzy msgid "Invalid job description" msgstr "Ungültige JobDescription:" #: src/hed/libs/compute/examples/basic_job_submission.cpp:47 #, fuzzy msgid "Failed to submit job" msgstr "Konnte job nicht starten" #: src/hed/libs/compute/examples/basic_job_submission.cpp:54 #, fuzzy, c-format msgid "Failed to write to local job list %s" msgstr "Fehler bei Schreiben zu Datein %s: %s" #: src/hed/libs/compute/test_jobdescription.cpp:20 msgid "[job description ...]" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:21 msgid "" "This tiny tool can be used for testing the JobDescription's conversion " "abilities." msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:23 msgid "" "The job description also can be a file or a string in ADL or XRSL format." msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:27 msgid "define the requested format (nordugrid:xrsl, emies:adl)" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:28 msgid "format" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:33 #, fuzzy msgid "show the original job description" msgstr "" " -o, -stdout вывеÑти файл Ñтандартого выхода задачи (по\n" " умолчанию)" #: src/hed/libs/compute/test_jobdescription.cpp:43 #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:72 msgid "Use --help option for detailed usage information" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:50 #, fuzzy msgid " [ JobDescription tester ] " msgstr "Zu sendende Job-Beschreibung : %s" #: src/hed/libs/compute/test_jobdescription.cpp:74 msgid " [ Parsing the original text ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:80 #, fuzzy msgid "Unable to parse." msgstr "Konnter Broker %s nicht laden" #: src/hed/libs/compute/test_jobdescription.cpp:89 msgid " [ emies:adl ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:91 msgid " [ nordugrid:xrsl ] " msgstr "" #: src/hed/libs/credential/CertUtil.cpp:127 #, c-format msgid "Error number in store context: %i" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:128 msgid "Self-signed certificate" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:131 #, fuzzy, c-format msgid "The certificate with subject %s is not valid" msgstr "Cache-Datei %s existiert nicht" #: src/hed/libs/credential/CertUtil.cpp:134 #, c-format msgid "" "Can not find issuer certificate for the certificate with subject %s and " "hash: %lu" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:137 #, c-format msgid "Certificate with subject %s has expired" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:140 #, c-format msgid "" "Untrusted self-signed certificate in chain with subject %s and hash: %lu" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:142 #, c-format msgid "Certificate verification error: %s" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:154 msgid "Can not get the certificate type" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:194 msgid "Couldn't verify availability of CRL" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:207 msgid "In the available CRL the lastUpdate field is not valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:214 msgid "The available CRL is not yet valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:223 msgid "In the available CRL, the nextUpdate field is not valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:229 msgid "The available CRL has expired" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:252 #, c-format msgid "Certificate with serial number %s and subject \"%s\" is revoked" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:270 msgid "" "Directory of trusted CAs is not specified/found; Using current path as the " "CA direcroty" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:279 msgid "Can't allocate memory for CA policy path" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:325 #, c-format msgid "Certificate has unknown extension with numeric ID %u and SN %s" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:339 #: src/hed/libs/credential/Credential.cpp:1727 msgid "" "Can not convert DER encoded PROXY_CERT_INFO_EXTENSION extension to internal " "format" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:385 msgid "Trying to check X509 cert with check_cert_type" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:424 msgid "Can't convert DER encoded PROXYCERTINFO extension to internal format" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:428 msgid "Can't get policy from PROXYCERTINFO extension" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:432 msgid "Can't get policy language from PROXYCERTINFO extension" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:464 msgid "The subject does not match the issuer name + proxy CN entry" msgstr "" #: src/hed/libs/credential/Credential.cpp:48 #, fuzzy, c-format msgid "OpenSSL error string: %s" msgstr "Fehler bei Traversieren: %s" #: src/hed/libs/credential/Credential.cpp:169 msgid "Can't get the first byte of input to determine its format" msgstr "" #: src/hed/libs/credential/Credential.cpp:183 #, fuzzy msgid "Can't reset the input" msgstr "Kann Python Liste nicht anlegen" #: src/hed/libs/credential/Credential.cpp:208 #: src/hed/libs/credential/Credential.cpp:244 msgid "Can't get the first byte of input BIO to get its format" msgstr "" #: src/hed/libs/credential/Credential.cpp:220 #, fuzzy msgid "Can not read certificate/key string" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/Credential.cpp:433 #, c-format msgid "Can not find certificate file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:438 #, c-format msgid "Can not read certificate file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:476 msgid "Can not read certificate string" msgstr "" #: src/hed/libs/credential/Credential.cpp:496 msgid "Certificate format is PEM" msgstr "" #: src/hed/libs/credential/Credential.cpp:523 msgid "Certificate format is DER" msgstr "" #: src/hed/libs/credential/Credential.cpp:552 msgid "Certificate format is PKCS" msgstr "" #: src/hed/libs/credential/Credential.cpp:578 msgid "Certificate format is unknown" msgstr "" #: src/hed/libs/credential/Credential.cpp:586 #, c-format msgid "Can not find key file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:591 #, fuzzy, c-format msgid "Can not open key file %s" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/hed/libs/credential/Credential.cpp:610 msgid "Can not read key string" msgstr "" #: src/hed/libs/credential/Credential.cpp:673 #: src/hed/libs/credential/VOMSUtil.cpp:210 msgid "Failed to lock arccredential library in memory" msgstr "" #: src/hed/libs/credential/Credential.cpp:685 msgid "Certificate verification succeeded" msgstr "" #: src/hed/libs/credential/Credential.cpp:689 msgid "Certificate verification failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:702 #: src/hed/libs/credential/Credential.cpp:722 #: src/hed/libs/credential/Credential.cpp:742 #: src/hed/libs/credential/Credential.cpp:1024 #: src/hed/libs/credential/Credential.cpp:2398 #: src/hed/libs/credential/Credential.cpp:2428 #, fuzzy msgid "Failed to initialize extensions member for Credential" msgstr "Fehler bei der Initialisierung der delegation credentials" #: src/hed/libs/credential/Credential.cpp:787 #, fuzzy, c-format msgid "Unsupported proxy policy language is requested - %s" msgstr "Nicht-unterstzütztes Protrokoll in URL %s" #: src/hed/libs/credential/Credential.cpp:799 #, fuzzy, c-format msgid "Unsupported proxy version is requested - %s" msgstr "Nicht-unterstzütztes Protrokoll in URL %s" #: src/hed/libs/credential/Credential.cpp:810 msgid "If you specify a policy you also need to specify a policy language" msgstr "" #: src/hed/libs/credential/Credential.cpp:857 #, c-format msgid "Error: can't open policy file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:870 #, c-format msgid "Error: policy location: %s is not a regular file" msgstr "" #: src/hed/libs/credential/Credential.cpp:929 #: src/hed/libs/credential/Credential.cpp:962 #: src/hed/libs/credential/Credential.cpp:1029 msgid "Certificate/Proxy path is empty" msgstr "" #: src/hed/libs/credential/Credential.cpp:1087 #: src/hed/libs/credential/Credential.cpp:2937 msgid "Failed to duplicate extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1091 #, fuzzy msgid "Failed to add extension into credential extensions" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/hed/libs/credential/Credential.cpp:1104 #, fuzzy msgid "Certificate information collection failed" msgstr "Ungültige Authentisierungs-Information" #: src/hed/libs/credential/Credential.cpp:1143 #: src/hed/libs/credential/Credential.cpp:1148 msgid "Can not convert string into ASN1_OBJECT" msgstr "" #: src/hed/libs/credential/Credential.cpp:1155 msgid "Can not create ASN1_OCTET_STRING" msgstr "" #: src/hed/libs/credential/Credential.cpp:1164 #, fuzzy msgid "Can not allocate memory for extension for proxy certificate" msgstr "" "Der Transfer des signierten delegation certificate zu Service schlug fehl" #: src/hed/libs/credential/Credential.cpp:1174 msgid "Can not create extension for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:1210 #: src/hed/libs/credential/Credential.cpp:1378 msgid "BN_set_word failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1219 #: src/hed/libs/credential/Credential.cpp:1387 msgid "RSA_generate_key_ex failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1228 #: src/hed/libs/credential/Credential.cpp:1395 msgid "BN_new || RSA_new failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1239 msgid "Created RSA key, proceeding with request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1244 msgid "pkey and rsa_key exist!" msgstr "" #: src/hed/libs/credential/Credential.cpp:1247 msgid "Generate new X509 request!" msgstr "" #: src/hed/libs/credential/Credential.cpp:1252 msgid "Setting subject name!" msgstr "" #: src/hed/libs/credential/Credential.cpp:1260 #: src/hed/libs/credential/Credential.cpp:1474 msgid "PEM_write_bio_X509_REQ failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1290 #: src/hed/libs/credential/Credential.cpp:1331 #: src/hed/libs/credential/Credential.cpp:1506 #: src/hed/libs/credential/Credential.cpp:1526 msgid "Can not create BIO for request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1308 msgid "Failed to write request into string" msgstr "" #: src/hed/libs/credential/Credential.cpp:1335 #: src/hed/libs/credential/Credential.cpp:1340 #: src/hed/libs/credential/Credential.cpp:1530 msgid "Can not set writable file for request BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:1346 #: src/hed/libs/credential/Credential.cpp:1535 msgid "Wrote request into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1348 #: src/hed/libs/credential/Credential.cpp:1538 msgid "Failed to write request into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1368 msgid "The credential's private key has already been initialized" msgstr "" #: src/hed/libs/credential/Credential.cpp:1416 msgid "" "Can not duplicate the subject name for the self-signing proxy certificate " "request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1426 msgid "Can not create a new X509_NAME_ENTRY for the proxy certificate request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1444 #: src/hed/libs/credential/Credential.cpp:1451 #: src/hed/libs/credential/Credential.cpp:2029 #: src/hed/libs/credential/Credential.cpp:2037 msgid "" "Can not convert PROXY_CERT_INFO_EXTENSION struct from internal to DER " "encoded format" msgstr "" #: src/hed/libs/credential/Credential.cpp:1481 msgid "Can't convert X509 request from internal to DER encoded format" msgstr "" #: src/hed/libs/credential/Credential.cpp:1491 msgid "Can not generate X509 request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1493 msgid "Can not set private key" msgstr "" #: src/hed/libs/credential/Credential.cpp:1591 msgid "Failed to get private key" msgstr "" #: src/hed/libs/credential/Credential.cpp:1610 msgid "Failed to get public key from RSA object" msgstr "" #: src/hed/libs/credential/Credential.cpp:1618 msgid "Failed to get public key from X509 object" msgstr "" #: src/hed/libs/credential/Credential.cpp:1625 msgid "Failed to get public key" msgstr "" #: src/hed/libs/credential/Credential.cpp:1663 #, c-format msgid "Certiticate chain number %d" msgstr "" #: src/hed/libs/credential/Credential.cpp:1691 msgid "NULL BIO passed to InquireRequest" msgstr "" #: src/hed/libs/credential/Credential.cpp:1694 msgid "PEM_read_bio_X509_REQ failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1698 msgid "d2i_X509_REQ_bio failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1720 msgid "Missing data in DER encoded PROXY_CERT_INFO_EXTENSION extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1732 msgid "Can not create PROXY_CERT_INFO_EXTENSION extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1742 msgid "Can not get policy from PROXY_CERT_INFO_EXTENSION extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1746 msgid "Can not get policy language from PROXY_CERT_INFO_EXTENSION extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1762 #, c-format msgid "Cert Type: %d" msgstr "" #: src/hed/libs/credential/Credential.cpp:1775 #: src/hed/libs/credential/Credential.cpp:1794 msgid "Can not create BIO for parsing request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1780 msgid "Read request from a string" msgstr "" #: src/hed/libs/credential/Credential.cpp:1783 msgid "Failed to read request from a string" msgstr "" #: src/hed/libs/credential/Credential.cpp:1798 msgid "Can not set readable file for request BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:1803 msgid "Read request from a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1806 msgid "Failed to read request from a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1846 msgid "Can not convert private key to DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2010 msgid "Credential is not initialized" msgstr "" #: src/hed/libs/credential/Credential.cpp:2016 #, fuzzy msgid "Failed to duplicate X509 structure" msgstr "Fehler bei Lesen von Objekt %s" #: src/hed/libs/credential/Credential.cpp:2021 msgid "Failed to initialize X509 structure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2044 msgid "Can not create extension for PROXY_CERT_INFO" msgstr "" #: src/hed/libs/credential/Credential.cpp:2048 #: src/hed/libs/credential/Credential.cpp:2096 msgid "Can not add X509 extension to proxy cert" msgstr "" #: src/hed/libs/credential/Credential.cpp:2064 msgid "Can not convert keyUsage struct from DER encoded format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2076 #: src/hed/libs/credential/Credential.cpp:2085 msgid "Can not convert keyUsage struct from internal to DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2092 #, fuzzy msgid "Can not create extension for keyUsage" msgstr "Kann Funktion %s nicht anlegen" #: src/hed/libs/credential/Credential.cpp:2105 msgid "Can not get extended KeyUsage extension from issuer certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2110 msgid "Can not copy extended KeyUsage extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:2115 msgid "Can not add X509 extended KeyUsage extension to new proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2125 msgid "Can not compute digest of public key" msgstr "" #: src/hed/libs/credential/Credential.cpp:2136 msgid "Can not copy the subject name from issuer for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2142 msgid "Can not create name entry CN for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2147 msgid "Can not set CN in proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2155 msgid "Can not set issuer's subject for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2160 msgid "Can not set version number for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2168 msgid "Can not set serial number for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2174 #, fuzzy msgid "Can not duplicate serial number for proxy certificate" msgstr "" "Der Transfer des signierten delegation certificate zu Service schlug fehl" #: src/hed/libs/credential/Credential.cpp:2180 msgid "Can not set the lifetime for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2184 msgid "Can not set pubkey for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2200 #: src/hed/libs/credential/Credential.cpp:2827 msgid "The credential to be signed is NULL" msgstr "" #: src/hed/libs/credential/Credential.cpp:2204 #: src/hed/libs/credential/Credential.cpp:2831 msgid "The credential to be signed contains no request" msgstr "" #: src/hed/libs/credential/Credential.cpp:2208 #: src/hed/libs/credential/Credential.cpp:2835 msgid "The BIO for output is NULL" msgstr "" #: src/hed/libs/credential/Credential.cpp:2222 #: src/hed/libs/credential/Credential.cpp:2842 msgid "Error when extracting public key from request" msgstr "" #: src/hed/libs/credential/Credential.cpp:2227 #: src/hed/libs/credential/Credential.cpp:2846 msgid "Failed to verify the request" msgstr "" #: src/hed/libs/credential/Credential.cpp:2231 msgid "Failed to add issuer's extension into proxy" msgstr "" #: src/hed/libs/credential/Credential.cpp:2255 msgid "Failed to find extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:2267 msgid "Can not get the issuer's private key" msgstr "" #: src/hed/libs/credential/Credential.cpp:2274 #: src/hed/libs/credential/Credential.cpp:2878 msgid "There is no digest in issuer's private key object" msgstr "" #: src/hed/libs/credential/Credential.cpp:2279 #: src/hed/libs/credential/Credential.cpp:2882 #, c-format msgid "%s is an unsupported digest type" msgstr "" #: src/hed/libs/credential/Credential.cpp:2290 #, c-format msgid "" "The signing algorithm %s is not allowed,it should be SHA1 or SHA2 to sign " "certificate requests" msgstr "" #: src/hed/libs/credential/Credential.cpp:2296 msgid "Failed to sign the proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2298 msgid "Succeeded to sign the proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2303 msgid "Failed to verify the signed certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2305 msgid "Succeeded to verify the signed certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2310 #: src/hed/libs/credential/Credential.cpp:2319 msgid "Output the proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2313 msgid "Can not convert signed proxy cert into PEM format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2322 msgid "Can not convert signed proxy cert into DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2338 #: src/hed/libs/credential/Credential.cpp:2361 msgid "Can not create BIO for signed proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2365 msgid "Can not set writable file for signed proxy certificate BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:2370 msgid "Wrote signed proxy certificate into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:2373 msgid "Failed to write signed proxy certificate into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:2408 #: src/hed/libs/credential/Credential.cpp:2447 #, fuzzy, c-format msgid "ERROR: %s" msgstr "HER: %s" #: src/hed/libs/credential/Credential.cpp:2455 #, c-format msgid "SSL error: %s, libs: %s, func: %s, reason: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2500 #, c-format msgid "unable to load number from: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2505 msgid "error converting number from bin to BIGNUM" msgstr "" #: src/hed/libs/credential/Credential.cpp:2532 msgid "file name too long" msgstr "" #: src/hed/libs/credential/Credential.cpp:2555 msgid "error converting serial to ASN.1 format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2588 #, c-format msgid "load serial from %s failure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2593 msgid "add_word failure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2598 #, c-format msgid "save serial to %s failure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2618 msgid "Error initialising X509 store" msgstr "" #: src/hed/libs/credential/Credential.cpp:2625 msgid "Out of memory when generate random serial" msgstr "" #: src/hed/libs/credential/Credential.cpp:2637 msgid "CA certificate and CA private key do not match" msgstr "" #: src/hed/libs/credential/Credential.cpp:2661 #, fuzzy, c-format msgid "Failed to load extension section: %s" msgstr "Fehler bei Schließen von Verbindung 1" #: src/hed/libs/credential/Credential.cpp:2698 msgid "malloc error" msgstr "" #: src/hed/libs/credential/Credential.cpp:2702 msgid "Subject does not start with '/'" msgstr "" #: src/hed/libs/credential/Credential.cpp:2718 #: src/hed/libs/credential/Credential.cpp:2739 msgid "escape character at end of string" msgstr "" #: src/hed/libs/credential/Credential.cpp:2730 #, c-format msgid "" "end of string encountered while processing type of subject name element #%d" msgstr "" #: src/hed/libs/credential/Credential.cpp:2767 #, c-format msgid "Subject Attribute %s has no known NID, skipped" msgstr "" #: src/hed/libs/credential/Credential.cpp:2771 #, c-format msgid "No value provided for Subject Attribute %s skipped" msgstr "" #: src/hed/libs/credential/Credential.cpp:2812 msgid "Failed to set the pubkey for X509 object by using pubkey from X509_REQ" msgstr "" #: src/hed/libs/credential/Credential.cpp:2822 msgid "The private key for signing is not initialized" msgstr "" #: src/hed/libs/credential/Credential.cpp:2901 #, c-format msgid "Error when loading the extension config file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2905 #, c-format msgid "Error when loading the extension config file: %s on line: %d" msgstr "" #: src/hed/libs/credential/Credential.cpp:2953 msgid "Can not sign a EEC" msgstr "" #: src/hed/libs/credential/Credential.cpp:2957 msgid "Output EEC certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2960 msgid "Can not convert signed EEC cert into DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2974 #: src/hed/libs/credential/Credential.cpp:2993 msgid "Can not create BIO for signed EEC certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2997 msgid "Can not set writable file for signed EEC certificate BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:3002 msgid "Wrote signed EEC certificate into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:3005 msgid "Failed to write signed EEC certificate into a file" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:143 #, fuzzy msgid "Error writing raw certificate" msgstr "Fehler beim Listen der replicas: %s" #: src/hed/libs/credential/NSSUtil.cpp:222 #, fuzzy msgid "Failed to add RFC proxy OID" msgstr "Fehler bei Lesen von Proxy-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:225 #, c-format msgid "Succeeded to add RFC proxy OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:231 msgid "Failed to add anyLanguage OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:234 #, c-format msgid "Succeeded to add anyLanguage OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:240 #, fuzzy msgid "Failed to add inheritAll OID" msgstr "Fehler bei Lesen von Objekt %s" #: src/hed/libs/credential/NSSUtil.cpp:243 #, c-format msgid "Succeeded to add inheritAll OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:249 #, fuzzy msgid "Failed to add Independent OID" msgstr "Fehler bei Lesen von Objekt %s" #: src/hed/libs/credential/NSSUtil.cpp:252 #, fuzzy, c-format msgid "Succeeded to add Independent OID, tag %d is returned" msgstr "Fehler bei Lesen von Objekt %s" #: src/hed/libs/credential/NSSUtil.cpp:258 msgid "Failed to add VOMS AC sequence OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:261 #, c-format msgid "Succeeded to add VOMS AC sequence OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:290 #, c-format msgid "NSS initialization failed on certificate database: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:301 #, fuzzy msgid "Succeeded to initialize NSS" msgstr "Erfolreiche Anthentifikation von SAMLTOken" #: src/hed/libs/credential/NSSUtil.cpp:323 #, fuzzy, c-format msgid "Failed to read attribute %x from private key." msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:375 #, fuzzy msgid "Succeeded to get credential" msgstr "Erfolreiche Anthentifikation von SAMLTOken" #: src/hed/libs/credential/NSSUtil.cpp:376 #, fuzzy msgid "Failed to get credential" msgstr "Fehler bei Bezug von FTP Datei" #: src/hed/libs/credential/NSSUtil.cpp:438 #, fuzzy msgid "p12 file is empty" msgstr "Policy is leer" #: src/hed/libs/credential/NSSUtil.cpp:448 #, fuzzy msgid "Unable to write to p12 file" msgstr "Fehler bei Ablage von FTP Datei" #: src/hed/libs/credential/NSSUtil.cpp:464 #, fuzzy msgid "Failed to open p12 file" msgstr "Fehler bei Ablage von FTP Datei" #: src/hed/libs/credential/NSSUtil.cpp:492 #, fuzzy msgid "Failed to allocate p12 context" msgstr "Fehler bei Reservieren von Platz" #: src/hed/libs/credential/NSSUtil.cpp:1200 #, fuzzy msgid "Failed to find issuer certificate for proxy certificate" msgstr "" "Der Transfer des signierten delegation certificate zu Service schlug fehl" #: src/hed/libs/credential/NSSUtil.cpp:1351 #, fuzzy, c-format msgid "Failed to authenticate to PKCS11 slot %s" msgstr "Fehler beim Authentifizieren: %s" #: src/hed/libs/credential/NSSUtil.cpp:1357 #, fuzzy, c-format msgid "Failed to find certificates by nickname: %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1362 #, c-format msgid "No user certificate by nickname %s found" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1375 #: src/hed/libs/credential/NSSUtil.cpp:1411 #, fuzzy msgid "Certificate does not have a slot" msgstr "Cache-Datei %s existiert nicht" #: src/hed/libs/credential/NSSUtil.cpp:1381 #, fuzzy msgid "Failed to create export context" msgstr "Fehler bei Anlegen von GSI Context: %s" #: src/hed/libs/credential/NSSUtil.cpp:1396 msgid "PKCS12 output password not provided" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1403 msgid "PKCS12 add password integrity failed" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1424 #, fuzzy msgid "Failed to create key or certificate safe" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1440 #, fuzzy msgid "Failed to add certificate and key" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1449 #, fuzzy, c-format msgid "Failed to initialize PKCS12 file: %s" msgstr "Fehler bei Schreiben zu Datein %s: %s" #: src/hed/libs/credential/NSSUtil.cpp:1454 #, fuzzy msgid "Failed to encode PKCS12" msgstr "Fehler beim Senden von body" #: src/hed/libs/credential/NSSUtil.cpp:1457 msgid "Succeeded to export PKCS12" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1485 #, c-format msgid "" "There is no certificate named %s found, the certificate could be removed " "when generating CSR" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1491 #, fuzzy msgid "Failed to delete certificate" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1505 msgid "The name of the private key to delete is empty" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1510 #: src/hed/libs/credential/NSSUtil.cpp:2939 #: src/hed/libs/credential/NSSUtil.cpp:2956 #, fuzzy, c-format msgid "Failed to authenticate to token %s" msgstr "Fehler beim Authentifizieren: %s" #: src/hed/libs/credential/NSSUtil.cpp:1517 #, c-format msgid "No private key with nickname %s exist in NSS database" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1550 #, fuzzy msgid "Failed to delete private key and certificate" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1560 #, fuzzy msgid "Failed to delete private key" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1571 #, fuzzy, c-format msgid "Can not find key with name: %s" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/hed/libs/credential/NSSUtil.cpp:1599 msgid "Can not read PEM private key: probably bad password" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1601 #, fuzzy msgid "Can not read PEM private key: failed to decrypt" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1603 #: src/hed/libs/credential/NSSUtil.cpp:1605 #, fuzzy msgid "Can not read PEM private key: failed to obtain password" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1606 #, fuzzy msgid "Can not read PEM private key" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1613 msgid "Failed to convert EVP_PKEY to PKCS8" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1650 #, fuzzy msgid "Failed to load private key" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1651 msgid "Succeeded to load PrivateKeyInfo" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1654 #, fuzzy msgid "Failed to convert PrivateKeyInfo to EVP_PKEY" msgstr "Fehler bei Konvertieren von security information für ARC policy" #: src/hed/libs/credential/NSSUtil.cpp:1655 msgid "Succeeded to convert PrivateKeyInfo to EVP_PKEY" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1692 #, fuzzy msgid "Failed to import private key" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1695 #, fuzzy msgid "Succeeded to import private key" msgstr "Erfolreiche Anthentifikation von SAMLTOken" #: src/hed/libs/credential/NSSUtil.cpp:1708 #: src/hed/libs/credential/NSSUtil.cpp:1750 #: src/hed/libs/credential/NSSUtil.cpp:2889 #, fuzzy msgid "Failed to authenticate to key database" msgstr "Fehler beim Authentifizieren: %s" #: src/hed/libs/credential/NSSUtil.cpp:1717 #, fuzzy msgid "Succeeded to generate public/private key pair" msgstr "Erfolreiche Anthentifikation von SAMLTOken" #: src/hed/libs/credential/NSSUtil.cpp:1719 #, fuzzy msgid "Failed to generate public/private key pair" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1724 #, fuzzy msgid "Failed to export private key" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1791 #, fuzzy msgid "Failed to create subject name" msgstr "Fehler bei Lesen von Objekt %s" #: src/hed/libs/credential/NSSUtil.cpp:1807 #, fuzzy msgid "Failed to create certificate request" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:1820 #, fuzzy msgid "Failed to call PORT_NewArena" msgstr "Fehler beim Reservieren von Speicher" #: src/hed/libs/credential/NSSUtil.cpp:1828 #, fuzzy msgid "Failed to encode the certificate request with DER format" msgstr "Fehler beim Signieren der Anfrage nach Austellen eines Zertifikats" #: src/hed/libs/credential/NSSUtil.cpp:1835 msgid "Unknown key or hash type" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1841 #, fuzzy msgid "Failed to sign the certificate request" msgstr "Fehler beim Signieren der Anfrage nach Austellen eines Zertifikats" #: src/hed/libs/credential/NSSUtil.cpp:1857 #, fuzzy msgid "Failed to output the certificate request as ASCII format" msgstr "Fehler beim Signieren der Anfrage nach Austellen eines Zertifikats" #: src/hed/libs/credential/NSSUtil.cpp:1866 #, fuzzy msgid "Failed to output the certificate request as DER format" msgstr "Fehler beim Signieren der Anfrage nach Austellen eines Zertifikats" #: src/hed/libs/credential/NSSUtil.cpp:1875 #, fuzzy, c-format msgid "Succeeded to output the certificate request into %s" msgstr "Erfolgreiche Authentifikation des UsernameToken" #: src/hed/libs/credential/NSSUtil.cpp:1914 #: src/hed/libs/credential/NSSUtil.cpp:1951 #, fuzzy msgid "Failed to read data from input file" msgstr "Konnte Metadaen für Datei %s nicht finden" #: src/hed/libs/credential/NSSUtil.cpp:1930 msgid "Input is without trailer\n" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1941 #, fuzzy msgid "Failed to convert ASCII to DER" msgstr "Fehler beim Verbinden zu RLS server: %s" #: src/hed/libs/credential/NSSUtil.cpp:1992 msgid "Certificate request is invalid" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2212 #, fuzzy, c-format msgid "The policy language: %s is not supported" msgstr "Der Erhalt von BES Jobs wird nicht unterstützt" #: src/hed/libs/credential/NSSUtil.cpp:2220 #: src/hed/libs/credential/NSSUtil.cpp:2245 #: src/hed/libs/credential/NSSUtil.cpp:2268 #: src/hed/libs/credential/NSSUtil.cpp:2290 #, fuzzy msgid "Failed to new arena" msgstr "Fehler bei Transfer von Daten" #: src/hed/libs/credential/NSSUtil.cpp:2229 #: src/hed/libs/credential/NSSUtil.cpp:2254 #, fuzzy msgid "Failed to create path length" msgstr "Fehler bei Anlegen von soft link: %s" #: src/hed/libs/credential/NSSUtil.cpp:2232 #: src/hed/libs/credential/NSSUtil.cpp:2257 #: src/hed/libs/credential/NSSUtil.cpp:2277 #: src/hed/libs/credential/NSSUtil.cpp:2299 #, fuzzy msgid "Failed to create policy language" msgstr "Fehler bei Anlegen von soft link: %s" #: src/hed/libs/credential/NSSUtil.cpp:2700 #, fuzzy, c-format msgid "Failed to parse certificate request from CSR file %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2707 #, fuzzy, c-format msgid "Can not find certificate with name %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2739 #, fuzzy msgid "Can not allocate memory" msgstr "Fehler beim Reservieren von Speicher" #: src/hed/libs/credential/NSSUtil.cpp:2747 #, fuzzy, c-format msgid "Proxy subject: %s" msgstr "Subjekt: %s" #: src/hed/libs/credential/NSSUtil.cpp:2764 #, fuzzy msgid "Failed to start certificate extension" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2769 #, fuzzy msgid "Failed to add key usage extension" msgstr "Fehler beim Lesen von SSL Token während Authentifizierung" #: src/hed/libs/credential/NSSUtil.cpp:2774 #, fuzzy msgid "Failed to add proxy certificate information extension" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2778 #, fuzzy msgid "Failed to add voms AC extension" msgstr "Fehler bei Lesen von Objekt %s" #: src/hed/libs/credential/NSSUtil.cpp:2798 #, fuzzy msgid "Failed to retrieve private key for issuer" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2805 msgid "Unknown key or hash type of issuer" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2811 #, fuzzy msgid "Failed to set signature algorithm ID" msgstr "Fehler bei der Überprüfung der Signatur unter " #: src/hed/libs/credential/NSSUtil.cpp:2823 #, fuzzy msgid "Failed to encode certificate" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2829 #, fuzzy msgid "Failed to allocate item for certificate data" msgstr "Fehler beim Reservieren von Speicher" #: src/hed/libs/credential/NSSUtil.cpp:2835 #, fuzzy msgid "Failed to sign encoded certificate data" msgstr "Fehler beim Signieren der Anfrage nach Austellen eines Zertifikats" #: src/hed/libs/credential/NSSUtil.cpp:2844 #, fuzzy, c-format msgid "Failed to open file %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/libs/credential/NSSUtil.cpp:2855 #, fuzzy, c-format msgid "Succeeded to output certificate to %s" msgstr "Erfolreiche Anthentifikation von SAMLTOken" #: src/hed/libs/credential/NSSUtil.cpp:2896 #, fuzzy, c-format msgid "Failed to open input certificate file %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2913 #, fuzzy msgid "Failed to read input certificate file" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2918 #, fuzzy msgid "Failed to get certificate from certificate file" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2925 #, fuzzy msgid "Failed to allocate certificate trust" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2930 #, fuzzy msgid "Failed to decode trust string" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/libs/credential/NSSUtil.cpp:2944 #: src/hed/libs/credential/NSSUtil.cpp:2961 #, fuzzy msgid "Failed to add certificate to token or database" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2947 #: src/hed/libs/credential/NSSUtil.cpp:2950 #, fuzzy msgid "Succeeded to import certificate" msgstr "Erfolreiche Anthentifikation von SAMLTOken" #: src/hed/libs/credential/NSSUtil.cpp:2964 #: src/hed/libs/credential/NSSUtil.cpp:2967 #, c-format msgid "Succeeded to change trusts to: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2994 #, fuzzy, c-format msgid "Failed to import private key from file: %s" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/credential/NSSUtil.cpp:2996 #, fuzzy, c-format msgid "Failed to import certificate from file: %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/credential/VOMSConfig.cpp:147 #, c-format msgid "" "ERROR: VOMS configuration line contains too many tokens. Expecting 5 or 6. " "Line was: %s" msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:163 #, c-format msgid "" "ERROR: file tree is too deep while scanning VOMS configuration. Max allowed " "nesting is %i." msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:181 #, c-format msgid "ERROR: failed to read file %s while scanning VOMS configuration." msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:186 #, c-format msgid "" "ERROR: VOMS configuration file %s contains too many lines. Max supported " "number is %i." msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:193 #, c-format msgid "" "ERROR: VOMS configuration file %s contains too long line(s). Max supported " "length is %i characters." msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:137 #, fuzzy, c-format msgid "Failed to create OpenSSL object %s %s - %u %s" msgstr "Fehler bei Lesen von Objekt %s: %s" #: src/hed/libs/credential/VOMSUtil.cpp:144 #, fuzzy, c-format msgid "Failed to obtain OpenSSL identifier for %s" msgstr "Konnte Listing nicht via FTP beziehen: %s" #: src/hed/libs/credential/VOMSUtil.cpp:302 #: src/hed/libs/credential/VOMSUtil.cpp:571 #, c-format msgid "VOMS: create FQAN: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:340 #: src/hed/libs/credential/VOMSUtil.cpp:619 #, c-format msgid "VOMS: create attribute: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:917 msgid "VOMS: Can not allocate memory for parsing AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:925 msgid "VOMS: Can not allocate memory for storing the order of AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:951 msgid "VOMS: Can not parse AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:981 msgid "" "VOMS: CA directory or CA file must be provided or default setting enabled" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1052 msgid "VOMS: failed to verify AC signature" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1108 #, c-format msgid "VOMS: trust chain to check: %s " msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1116 #, c-format msgid "" "VOMS: the DN in certificate: %s does not match that in trusted DN list: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1122 #, c-format msgid "" "VOMS: the Issuer identity in certificate: %s does not match that in trusted " "DN list: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1157 #, fuzzy, c-format msgid "VOMS: The lsc file %s does not exist" msgstr "Cache-Datei %s existiert nicht" #: src/hed/libs/credential/VOMSUtil.cpp:1163 #, c-format msgid "VOMS: The lsc file %s can not be open" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1215 msgid "" "VOMS: there is no constraints of trusted voms DNs, the certificates stack in " "AC will not be checked." msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1248 msgid "VOMS: unable to match certificate chain against VOMS trusted DNs" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1268 #, fuzzy msgid "VOMS: AC signature verification failed" msgstr "SOAP Aufruf fehlgeschlagen" #: src/hed/libs/credential/VOMSUtil.cpp:1277 msgid "VOMS: unable to verify certificate chain" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1283 #, c-format msgid "VOMS: cannot validate AC issuer for VO %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1306 #, c-format msgid "VOMS: directory for trusted service certificates: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1332 #, c-format msgid "VOMS: Cannot find certificate of AC issuer for VO %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1358 #: src/hed/libs/credential/VOMSUtil.cpp:1427 msgid "VOMS: Can not find AC_ATTR with IETFATTR type" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1365 #: src/hed/libs/credential/VOMSUtil.cpp:1434 msgid "VOMS: case of multiple IETFATTR attributes not supported" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1375 #: src/hed/libs/credential/VOMSUtil.cpp:1450 msgid "VOMS: case of multiple policyAuthority not supported" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1391 #: src/hed/libs/credential/VOMSUtil.cpp:1467 msgid "VOMS: the format of policyAuthority is unsupported - expecting URI" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1400 #: src/hed/libs/credential/VOMSUtil.cpp:1478 msgid "" "VOMS: the format of IETFATTRVAL is not supported - expecting OCTET STRING" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1443 msgid "VOMS: failed to access IETFATTR attribute" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1538 msgid "VOMS: the grantor attribute is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1556 msgid "VOMS: the attribute name is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1562 #, c-format msgid "VOMS: the attribute value for %s is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1567 msgid "VOMS: the attribute qualifier is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1602 #: src/hed/libs/credential/VOMSUtil.cpp:1721 msgid "" "VOMS: both idcenoRevAvail and authorityKeyIdentifier certificate extensions " "must be present" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1636 #: src/hed/libs/credential/VOMSUtil.cpp:1757 #, c-format msgid "VOMS: FQDN of this host %s does not match any target in AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1641 #: src/hed/libs/credential/VOMSUtil.cpp:1762 msgid "VOMS: the only supported critical extension of the AC is idceTargets" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1656 #: src/hed/libs/credential/VOMSUtil.cpp:1777 msgid "VOMS: failed to parse attributes from AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1700 #: src/hed/libs/credential/VOMSUtil.cpp:1829 msgid "VOMS: authorityKey is wrong" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1861 #: src/hed/libs/credential/VOMSUtil.cpp:2029 #: src/hed/libs/credential/VOMSUtil.cpp:2037 msgid "VOMS: missing AC parts" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1878 #: src/hed/libs/credential/VOMSUtil.cpp:2054 msgid "VOMS: unsupported time format in AC - expecting GENERALIZED TIME" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1884 #: src/hed/libs/credential/VOMSUtil.cpp:2060 msgid "VOMS: AC is not yet valid" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1891 #: src/hed/libs/credential/VOMSUtil.cpp:2067 msgid "VOMS: AC has expired" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1906 #: src/hed/libs/credential/VOMSUtil.cpp:2080 msgid "VOMS: AC is not complete - missing Serial or Issuer information" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1911 #: src/hed/libs/credential/VOMSUtil.cpp:2085 #, c-format msgid "VOMS: the holder serial number is: %lx" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1912 #: src/hed/libs/credential/VOMSUtil.cpp:2086 #, c-format msgid "VOMS: the serial number in AC is: %lx" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1915 #: src/hed/libs/credential/VOMSUtil.cpp:2089 #, c-format msgid "" "VOMS: the holder serial number %lx is not the same as the serial number in " "AC %lx, the holder certificate that is used to create a voms proxy could be " "a proxy certificate with a different serial number as the original EEC cert" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1924 #: src/hed/libs/credential/VOMSUtil.cpp:2098 msgid "VOMS: the holder information in AC is wrong" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1946 #: src/hed/libs/credential/VOMSUtil.cpp:2120 #, c-format msgid "VOMS: DN of holder in AC: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1947 #: src/hed/libs/credential/VOMSUtil.cpp:2121 #, c-format msgid "VOMS: DN of holder: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1948 #: src/hed/libs/credential/VOMSUtil.cpp:2122 #, c-format msgid "VOMS: DN of issuer: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1955 #: src/hed/libs/credential/VOMSUtil.cpp:2129 msgid "" "VOMS: the holder name in AC is not related to the distinguished name in " "holder certificate" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1967 #: src/hed/libs/credential/VOMSUtil.cpp:1974 #: src/hed/libs/credential/VOMSUtil.cpp:2141 #: src/hed/libs/credential/VOMSUtil.cpp:2148 msgid "VOMS: the holder issuerUID is not the same as that in AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1987 #: src/hed/libs/credential/VOMSUtil.cpp:2160 msgid "VOMS: the holder issuer name is not the same as that in AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1997 #: src/hed/libs/credential/VOMSUtil.cpp:2169 msgid "VOMS: the issuer information in AC is wrong" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:2005 #: src/hed/libs/credential/VOMSUtil.cpp:2177 #, c-format msgid "VOMS: the issuer name %s is not the same as that in AC - %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:2013 #: src/hed/libs/credential/VOMSUtil.cpp:2185 msgid "" "VOMS: the serial number of AC INFO is too long - expecting no more than 20 " "octets" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:2221 #: src/hed/libs/credential/VOMSUtil.cpp:2233 #: src/hed/libs/credential/VOMSUtil.cpp:2247 #: src/hed/libs/credential/VOMSUtil.cpp:2259 #: src/hed/libs/credential/VOMSUtil.cpp:2282 msgid "VOMS: unable to extract VO name from AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:2273 #, c-format msgid "VOMS: unable to determine hostname of AC from VO name: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:2292 msgid "VOMS: can not verify the signature of the AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:2298 msgid "VOMS: problems while parsing information in AC" msgstr "" #: src/hed/libs/credential/test/VOMSUtilTest.cpp:126 #, c-format msgid "Line %d.%d of the attributes returned: %s" msgstr "" #: src/hed/libs/credentialstore/ClientVOMS.cpp:149 msgid "voms" msgstr "" #: src/hed/libs/credentialstore/CredentialStore.cpp:194 #: src/hed/libs/credentialstore/CredentialStore.cpp:245 #: src/hed/libs/credentialstore/CredentialStore.cpp:273 #: src/hed/libs/credentialstore/CredentialStore.cpp:336 #: src/hed/libs/credentialstore/CredentialStore.cpp:376 #: src/hed/libs/credentialstore/CredentialStore.cpp:406 #, fuzzy, c-format msgid "MyProxy failure: %s" msgstr "Proxy Pfad: %s" #: src/hed/libs/crypto/OpenSSL.cpp:64 #, c-format msgid "SSL error: %d - %s:%s:%s" msgstr "SSL Fehler: %d - %s:%s:%s" #: src/hed/libs/crypto/OpenSSL.cpp:78 #, fuzzy msgid "Failed to lock arccrypto library in memory" msgstr "Fehler bei Lock von arccrypto Bibliothek in Speicher" #: src/hed/libs/crypto/OpenSSL.cpp:81 msgid "Failed to initialize OpenSSL library" msgstr "Fehler bei Initialisierung von OpenSSL Bibliothek" #: src/hed/libs/data/DataExternalHelper.cpp:157 #, fuzzy msgid "failed to read data tag" msgstr "Fehler bei Lesen von Daten" #: src/hed/libs/data/DataExternalHelper.cpp:161 msgid "waiting for data chunk" msgstr "" #: src/hed/libs/data/DataExternalHelper.cpp:163 #, fuzzy msgid "failed to read data chunk" msgstr "Fehler bei Öffnen von Datenkanal" #: src/hed/libs/data/DataExternalHelper.cpp:171 #, c-format msgid "data chunk: %llu %llu" msgstr "" #: src/hed/libs/data/DataExternalHelper.cpp:242 #, c-format msgid "DataMove::Transfer: using supplied checksum %s" msgstr "" #: src/hed/libs/data/DataExternalHelper.cpp:361 msgid "Expecting Module, Command and URL provided" msgstr "" #: src/hed/libs/data/DataExternalHelper.cpp:368 msgid "Expecting Command module path among arguments" msgstr "" #: src/hed/libs/data/DataExternalHelper.cpp:372 msgid "Expecting Command module name among arguments" msgstr "" #: src/hed/libs/data/DataMover.cpp:126 msgid "No locations found - probably no more physical instances" msgstr "" #: src/hed/libs/data/DataMover.cpp:132 src/hed/libs/data/FileCache.cpp:550 #: src/libs/data-staging/Processor.cpp:394 #: src/libs/data-staging/Processor.cpp:408 #, c-format msgid "Removing %s" msgstr "Entferne %s" #: src/hed/libs/data/DataMover.cpp:145 msgid "This instance was already deleted" msgstr "" #: src/hed/libs/data/DataMover.cpp:151 msgid "Failed to delete physical file" msgstr "" #: src/hed/libs/data/DataMover.cpp:162 #, c-format msgid "Removing metadata in %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:166 msgid "Failed to delete meta-information" msgstr "" #: src/hed/libs/data/DataMover.cpp:180 msgid "Failed to remove all physical instances" msgstr "" #: src/hed/libs/data/DataMover.cpp:184 #, c-format msgid "Removing logical file from metadata %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:187 msgid "Failed to delete logical file" msgstr "" #: src/hed/libs/data/DataMover.cpp:194 msgid "Failed to remove instance" msgstr "" #: src/hed/libs/data/DataMover.cpp:243 msgid "DataMover::Transfer : starting new thread" msgstr "" #: src/hed/libs/data/DataMover.cpp:271 #, c-format msgid "Transfer from %s to %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:273 msgid "Not valid source" msgstr "" #: src/hed/libs/data/DataMover.cpp:278 msgid "Not valid destination" msgstr "" #: src/hed/libs/data/DataMover.cpp:300 src/services/candypond/CandyPond.cpp:304 #, c-format msgid "Couldn't handle certificate: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:309 src/hed/libs/data/DataMover.cpp:614 #: src/libs/data-staging/Processor.cpp:123 #, c-format msgid "File %s is cached (%s) - checking permissions" msgstr "" #: src/hed/libs/data/DataMover.cpp:313 src/hed/libs/data/DataMover.cpp:633 #: src/hed/libs/data/DataMover.cpp:691 src/libs/data-staging/Processor.cpp:142 msgid "Permission checking passed" msgstr "" #: src/hed/libs/data/DataMover.cpp:314 src/hed/libs/data/DataMover.cpp:652 #: src/hed/libs/data/DataMover.cpp:1180 msgid "Linking/copying cached file" msgstr "" #: src/hed/libs/data/DataMover.cpp:338 #, c-format msgid "No locations for source found: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:342 #, c-format msgid "Failed to resolve source: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:356 src/hed/libs/data/DataMover.cpp:431 #, c-format msgid "No locations for destination found: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:361 src/hed/libs/data/DataMover.cpp:435 #, c-format msgid "Failed to resolve destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:378 #, c-format msgid "No locations for destination different from source found: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:400 #, c-format msgid "DataMover::Transfer: trying to destroy/overwrite destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:412 #, c-format msgid "Failed to delete %s but will still try to copy" msgstr "" #: src/hed/libs/data/DataMover.cpp:416 #, c-format msgid "Failed to delete %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:447 #, c-format msgid "Deleted but still have locations at %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:459 msgid "DataMover: cycle" msgstr "" #: src/hed/libs/data/DataMover.cpp:461 msgid "DataMover: no retries requested - exit" msgstr "" #: src/hed/libs/data/DataMover.cpp:466 msgid "DataMover: source out of tries - exit" msgstr "" #: src/hed/libs/data/DataMover.cpp:468 msgid "DataMover: destination out of tries - exit" msgstr "" #: src/hed/libs/data/DataMover.cpp:476 #, c-format msgid "Real transfer from %s to %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:502 #, c-format msgid "Creating buffer: %lli x %i" msgstr "" #: src/hed/libs/data/DataMover.cpp:518 #, c-format msgid "DataMove::Transfer: no checksum calculation for %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:523 #, c-format msgid "DataMove::Transfer: using supplied checksum %s:%s" msgstr "" #: src/hed/libs/data/DataMover.cpp:547 #, c-format msgid "DataMove::Transfer: will calculate %s checksum" msgstr "" #: src/hed/libs/data/DataMover.cpp:552 msgid "Buffer creation failed !" msgstr "" #: src/hed/libs/data/DataMover.cpp:575 #, c-format msgid "URL is mapped to: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:603 src/hed/libs/data/DataMover.cpp:661 #: src/libs/data-staging/Processor.cpp:78 msgid "Cached file is locked - should retry" msgstr "" #: src/hed/libs/data/DataMover.cpp:608 src/libs/data-staging/Processor.cpp:96 msgid "Failed to initiate cache" msgstr "" #: src/hed/libs/data/DataMover.cpp:625 src/services/candypond/CandyPond.cpp:379 #, c-format msgid "Permission checking failed: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:627 src/hed/libs/data/DataMover.cpp:685 #: src/hed/libs/data/DataMover.cpp:705 src/hed/libs/data/DataMover.cpp:716 msgid "source.next_location" msgstr "" #: src/hed/libs/data/DataMover.cpp:641 src/libs/data-staging/Processor.cpp:147 #, fuzzy, c-format msgid "Source modification date: %s" msgstr "Anlegen von Socket schlug fehl: %s" #: src/hed/libs/data/DataMover.cpp:642 src/libs/data-staging/Processor.cpp:148 #, c-format msgid "Cache creation date: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:648 src/libs/data-staging/Processor.cpp:153 msgid "Cached file is outdated, will re-download" msgstr "" #: src/hed/libs/data/DataMover.cpp:651 src/libs/data-staging/Processor.cpp:158 msgid "Cached copy is still valid" msgstr "" #: src/hed/libs/data/DataMover.cpp:678 msgid "URL is mapped to local access - checking permissions on original URL" msgstr "" #: src/hed/libs/data/DataMover.cpp:682 #, c-format msgid "Permission checking on original URL failed: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:693 msgid "Linking local file" msgstr "" #: src/hed/libs/data/DataMover.cpp:713 #, c-format msgid "Failed to make symbolic link %s to %s : %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:722 #, fuzzy, c-format msgid "Failed to change owner of symbolic link %s to %i" msgstr "Fehler bei Ändernn des Owner von hard link zu %i: %s" #: src/hed/libs/data/DataMover.cpp:733 #, c-format msgid "cache file: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:759 #, fuzzy, c-format msgid "Failed to stat source %s" msgstr "Fehler bei Lesen von Objekt %s" #: src/hed/libs/data/DataMover.cpp:761 src/hed/libs/data/DataMover.cpp:776 #: src/hed/libs/data/DataMover.cpp:808 src/hed/libs/data/DataMover.cpp:828 #: src/hed/libs/data/DataMover.cpp:851 src/hed/libs/data/DataMover.cpp:869 #: src/hed/libs/data/DataMover.cpp:1028 src/hed/libs/data/DataMover.cpp:1061 #: src/hed/libs/data/DataMover.cpp:1072 src/hed/libs/data/DataMover.cpp:1146 msgid "(Re)Trying next source" msgstr "" #: src/hed/libs/data/DataMover.cpp:772 #, c-format msgid "Meta info of source and location do not match for %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:786 #, c-format msgid "" "Replica %s has high latency, but no more sources exist so will use this one" msgstr "" #: src/hed/libs/data/DataMover.cpp:790 #, c-format msgid "Replica %s has high latency, trying next source" msgstr "" #: src/hed/libs/data/DataMover.cpp:802 src/hed/libs/data/DataMover.cpp:823 #: src/libs/data-staging/DataStagingDelivery.cpp:376 #: src/libs/data-staging/DataStagingDelivery.cpp:399 #, fuzzy, c-format msgid "Using internal transfer method of %s" msgstr "Nutze unsicheren Datentransfer" #: src/hed/libs/data/DataMover.cpp:815 src/hed/libs/data/DataMover.cpp:833 #: src/libs/data-staging/DataStagingDelivery.cpp:392 #: src/libs/data-staging/DataStagingDelivery.cpp:413 #, c-format msgid "Internal transfer method is not supported for %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:840 #, fuzzy msgid "Using buffered transfer method" msgstr "Nutze sicheren Datentransfer" #: src/hed/libs/data/DataMover.cpp:844 #, fuzzy, c-format msgid "Failed to prepare source: %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/libs/data/DataMover.cpp:859 #, c-format msgid "Failed to start reading from source: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:879 #, fuzzy msgid "Metadata of source and destination are different" msgstr "" "Файл Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ñовпадает Ñ Ð¸Ñходным.\n" "%1" #: src/hed/libs/data/DataMover.cpp:899 #, c-format msgid "Failed to preregister destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:904 src/hed/libs/data/DataMover.cpp:1170 msgid "destination.next_location" msgstr "" #: src/hed/libs/data/DataMover.cpp:915 #, fuzzy, c-format msgid "Failed to prepare destination: %s" msgstr "Fehler bei Anlegen von soft link: %s" #: src/hed/libs/data/DataMover.cpp:922 src/hed/libs/data/DataMover.cpp:945 #: src/hed/libs/data/DataMover.cpp:1167 #, c-format msgid "" "Failed to unregister preregistered lfn. You may need to unregister it " "manually: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:926 src/hed/libs/data/DataMover.cpp:948 #: src/hed/libs/data/DataMover.cpp:1037 src/hed/libs/data/DataMover.cpp:1053 #: src/hed/libs/data/DataMover.cpp:1078 src/hed/libs/data/DataMover.cpp:1123 msgid "(Re)Trying next destination" msgstr "" #: src/hed/libs/data/DataMover.cpp:937 #, c-format msgid "Failed to start writing to destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:960 msgid "Failed to start writing to cache" msgstr "" #: src/hed/libs/data/DataMover.cpp:968 src/hed/libs/data/DataMover.cpp:1014 #: src/hed/libs/data/DataMover.cpp:1192 msgid "" "Failed to unregister preregistered lfn. You may need to unregister it " "manually" msgstr "" #: src/hed/libs/data/DataMover.cpp:975 msgid "Waiting for buffer" msgstr "" #: src/hed/libs/data/DataMover.cpp:982 #, fuzzy, c-format msgid "Failed updating timestamp on cache lock file %s for file %s: %s" msgstr "Fehler bei Erstellen von Info-Datei %s: %s" #: src/hed/libs/data/DataMover.cpp:987 #, c-format msgid "buffer: read EOF : %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:988 #, fuzzy, c-format msgid "buffer: write EOF: %s" msgstr "Globus error (Schreiben): %s" #: src/hed/libs/data/DataMover.cpp:989 #, c-format msgid "buffer: error : %s, read: %s, write: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:990 msgid "Closing read channel" msgstr "" #: src/hed/libs/data/DataMover.cpp:997 msgid "Closing write channel" msgstr "" #: src/hed/libs/data/DataMover.cpp:1005 #, fuzzy msgid "Failed to complete writing to destination" msgstr "Fehler bei Schreiben zu Ziel" #: src/hed/libs/data/DataMover.cpp:1019 #, fuzzy msgid "Transfer cancelled successfully" msgstr "Job erfolgreich abgebrochen" #: src/hed/libs/data/DataMover.cpp:1066 msgid "Cause of failure unclear - choosing randomly" msgstr "" #: src/hed/libs/data/DataMover.cpp:1110 #, c-format msgid "" "Checksum mismatch between checksum given as meta option (%s:%s) and " "calculated checksum (%s)" msgstr "" #: src/hed/libs/data/DataMover.cpp:1116 msgid "" "Failed to unregister preregistered lfn, You may need to unregister it " "manually" msgstr "" #: src/hed/libs/data/DataMover.cpp:1120 #, fuzzy msgid "Failed to delete destination, retry may fail" msgstr "Fehler bei der Initialisierung der delegation credentials" #: src/hed/libs/data/DataMover.cpp:1130 #, fuzzy msgid "Cannot compare empty checksum" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/hed/libs/data/DataMover.cpp:1137 #: src/libs/data-staging/DataStagingDelivery.cpp:570 msgid "Checksum type of source and calculated checksum differ, cannot compare" msgstr "" #: src/hed/libs/data/DataMover.cpp:1139 #, c-format msgid "Checksum mismatch between calcuated checksum %s and source checksum %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:1151 #: src/libs/data-staging/DataStagingDelivery.cpp:586 #, c-format msgid "Calculated transfer checksum %s matches source checksum" msgstr "" #: src/hed/libs/data/DataMover.cpp:1157 #: src/libs/data-staging/DataStagingDelivery.cpp:589 msgid "Checksum not computed" msgstr "" #: src/hed/libs/data/DataMover.cpp:1163 #, c-format msgid "Failed to postregister destination %s" msgstr "" #: src/hed/libs/data/DataPoint.cpp:90 #, fuzzy, c-format msgid "Invalid URL option: %s" msgstr "Ungültige URL Option: %s" #: src/hed/libs/data/DataPoint.cpp:251 msgid "Checksum types of index and replica are different, skipping comparison" msgstr "" #: src/hed/libs/data/DataPoint.cpp:278 #, fuzzy, c-format msgid "Skipping invalid URL option %s" msgstr "Ungültige URL Option: %s" #: src/hed/libs/data/DataPoint.cpp:293 msgid "" "Third party transfer was requested but the corresponding plugin could\n" " not be loaded. Is the GFAL plugin installed? If not, please install " "the\n" " packages 'nordugrid-arc-plugins-gfal' and 'gfal2-all'. Depending on\n" " your type of installation the package names might differ." msgstr "" #: src/hed/libs/data/DataPoint.cpp:311 #, fuzzy, c-format msgid "Failed to load plugin for URL %s" msgstr "Fehler bei Reservieren von Platz" #: src/hed/libs/data/DataPointDelegate.cpp:75 #: src/hed/libs/data/DataPointDelegate.cpp:76 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:2032 #, c-format msgid "Starting helper process: %s" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:180 #, fuzzy msgid "start_reading" msgstr "start_reading_ftp" #: src/hed/libs/data/DataPointDelegate.cpp:189 #, fuzzy msgid "start_reading: helper start failed" msgstr "start_reading_ftp: globus_thread_create fehlgeschlagen" #: src/hed/libs/data/DataPointDelegate.cpp:197 #, fuzzy msgid "start_reading: thread create failed" msgstr "start_reading_ftp: globus_thread_create fehlgeschlagen" #: src/hed/libs/data/DataPointDelegate.cpp:213 #, fuzzy msgid "StopReading: aborting connection" msgstr "StopWriting: Abbruch der Verbindung" #: src/hed/libs/data/DataPointDelegate.cpp:218 #, fuzzy msgid "stop_reading: waiting for transfer to finish" msgstr "stop_reading_ftp: warte auf Beenden von Transfer" #: src/hed/libs/data/DataPointDelegate.cpp:221 #, fuzzy, c-format msgid "stop_reading: exiting: %s" msgstr "stop-reading_ftp: verlasse: %s" #: src/hed/libs/data/DataPointDelegate.cpp:231 #, fuzzy msgid "read_thread: get and register buffers" msgstr "ftp_read_thread: beziehe und registriere Puffer" #: src/hed/libs/data/DataPointDelegate.cpp:239 #, fuzzy, c-format msgid "read_thread: for_read failed - aborting: %s" msgstr "ftp_read_thread: for_read fehlgeschlagen - Abbruch: %s" #: src/hed/libs/data/DataPointDelegate.cpp:247 #, fuzzy, c-format msgid "read_thread: non-data tag '%c' from external process - leaving: %s" msgstr "ftp_read_thread: for_read fehlgeschlagen - Abbruch: %s" #: src/hed/libs/data/DataPointDelegate.cpp:256 #, fuzzy, c-format msgid "read_thread: data read error from external process - aborting: %s" msgstr "ftp_read_thread: for_read fehlgeschlagen - Abbruch: %s" #: src/hed/libs/data/DataPointDelegate.cpp:264 #, fuzzy msgid "read_thread: exiting" msgstr "ftp_read_thread: Beenden" #: src/hed/libs/data/DataPointDelegate.cpp:285 #, fuzzy msgid "start_writing_ftp: helper start failed" msgstr "start_writing_ftp: put fehlgeschlagen" #: src/hed/libs/data/DataPointDelegate.cpp:293 #, fuzzy msgid "start_writing_ftp: thread create failed" msgstr "start_writitng_ftp: globus_thread_create failed" #: src/hed/libs/data/DataPointDelegate.cpp:343 #, fuzzy msgid "No checksum information possible" msgstr "list_files_ftp: Bezug von Zeitpunkt der letzten Dateiänderung von %s" #: src/hed/libs/data/DataPointDelegate.cpp:359 #, fuzzy msgid "write_thread: get and pass buffers" msgstr "ftp_write_thread: Beziehe und Registriere Puffer" #: src/hed/libs/data/DataPointDelegate.cpp:366 #, fuzzy msgid "write_thread: for_write failed - aborting" msgstr "ftp_write_thread: for_write fehlgeschlagen - Abbruch" #: src/hed/libs/data/DataPointDelegate.cpp:370 #, fuzzy msgid "write_thread: for_write eof" msgstr "ftp_write_thread: for_write fehlgeschlagen - Abbruch" #: src/hed/libs/data/DataPointDelegate.cpp:384 #, fuzzy msgid "write_thread: out failed - aborting" msgstr "ftp_write_thread: for_write fehlgeschlagen - Abbruch" #: src/hed/libs/data/DataPointDelegate.cpp:392 #, fuzzy msgid "write_thread: exiting" msgstr "ftp_read_thread: Beenden" #: src/hed/libs/data/DataPointIndex.cpp:91 #, fuzzy, c-format msgid "Can't handle location %s" msgstr "Kann Funktion %s nicht anlegen" #: src/hed/libs/data/DataPointIndex.cpp:183 msgid "Sorting replicas according to URL map" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:187 #, c-format msgid "Replica %s is mapped" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:195 #, c-format msgid "Sorting replicas according to preferred pattern %s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:218 #: src/hed/libs/data/DataPointIndex.cpp:236 #, fuzzy, c-format msgid "Excluding replica %s matching pattern !%s" msgstr "Replica %s existiert bereits für LFN %s" #: src/hed/libs/data/DataPointIndex.cpp:229 #, fuzzy, c-format msgid "Replica %s matches host pattern %s" msgstr "Replica %s existiert bereits für LFN %s" #: src/hed/libs/data/DataPointIndex.cpp:247 #, c-format msgid "Replica %s matches pattern %s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:263 #, c-format msgid "Replica %s doesn't match preferred pattern or URL map" msgstr "" #: src/hed/libs/data/DataStatus.cpp:12 msgid "Operation completed successfully" msgstr "Operation erfolgreich abgeschlossen" #: src/hed/libs/data/DataStatus.cpp:13 #, fuzzy msgid "Source is invalid URL" msgstr "Quelle muss LFN enthalten" #: src/hed/libs/data/DataStatus.cpp:14 #, fuzzy msgid "Destination is invalid URL" msgstr "Destination muss LFN enthalten" #: src/hed/libs/data/DataStatus.cpp:15 #, fuzzy msgid "Resolving of index service for source failed" msgstr "Auflösen von index service URL für Quelle schlug fehl" #: src/hed/libs/data/DataStatus.cpp:16 #, fuzzy msgid "Resolving of index service for destination failed" msgstr "Auflösen von index service URL für Ziel schlug fehl" #: src/hed/libs/data/DataStatus.cpp:17 msgid "Can't read from source" msgstr "Kann nicht von Quelle lesen" #: src/hed/libs/data/DataStatus.cpp:18 msgid "Can't write to destination" msgstr "Kann nicht zu Ziel schreiben" #: src/hed/libs/data/DataStatus.cpp:19 msgid "Failed while reading from source" msgstr "Fehler bei Lesen von Quelle" #: src/hed/libs/data/DataStatus.cpp:20 msgid "Failed while writing to destination" msgstr "Fehler bei Schreiben zu Ziel" #: src/hed/libs/data/DataStatus.cpp:21 #, fuzzy msgid "Failed while transferring data" msgstr "Fehler bei Transfer von Daten" #: src/hed/libs/data/DataStatus.cpp:22 msgid "Failed while finishing reading from source" msgstr "Fehler bei Abschluß des Lesens von Quelle" #: src/hed/libs/data/DataStatus.cpp:23 msgid "Failed while finishing writing to destination" msgstr "" #: src/hed/libs/data/DataStatus.cpp:24 msgid "First stage of registration to index service failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:25 msgid "Last stage of registration to index service failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:26 #, fuzzy msgid "Unregistering from index service failed" msgstr "Keine Antwort von AA service %s schlug fehl" #: src/hed/libs/data/DataStatus.cpp:27 msgid "Error in caching procedure" msgstr "" #: src/hed/libs/data/DataStatus.cpp:28 msgid "Error due to expiration of provided credentials" msgstr "" #: src/hed/libs/data/DataStatus.cpp:29 #, fuzzy msgid "Delete error" msgstr "Löschte %s" #: src/hed/libs/data/DataStatus.cpp:30 msgid "No valid location available" msgstr "" #: src/hed/libs/data/DataStatus.cpp:31 msgid "Location already exists" msgstr "" #: src/hed/libs/data/DataStatus.cpp:32 msgid "Operation not supported for this kind of URL" msgstr "" #: src/hed/libs/data/DataStatus.cpp:33 msgid "Feature is not implemented" msgstr "" #: src/hed/libs/data/DataStatus.cpp:34 #, fuzzy msgid "Already reading from source" msgstr "Fehler bei Lesen von Quelle" #: src/hed/libs/data/DataStatus.cpp:35 #, fuzzy msgid "Already writing to destination" msgstr "Fehler bei Schreiben zu Ziel" #: src/hed/libs/data/DataStatus.cpp:36 #, fuzzy msgid "Read access check failed" msgstr "Lese Archiv Datei %s" #: src/hed/libs/data/DataStatus.cpp:37 #, fuzzy msgid "Directory listing failed" msgstr "Fehler beim Auflisten von Dateien" #: src/hed/libs/data/DataStatus.cpp:38 msgid "Object is not suitable for listing" msgstr "" #: src/hed/libs/data/DataStatus.cpp:39 #, fuzzy msgid "Failed to obtain information about file" msgstr "Konnte Listing nicht via FTP beziehen: %s" #: src/hed/libs/data/DataStatus.cpp:40 #, fuzzy msgid "No such file or directory" msgstr "Fehler beim listen von Datei oder Verzeichnis: %s" #: src/hed/libs/data/DataStatus.cpp:41 msgid "Object not initialized (internal error)" msgstr "" #: src/hed/libs/data/DataStatus.cpp:42 msgid "Operating System error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:43 msgid "Failed to stage file(s)" msgstr "" #: src/hed/libs/data/DataStatus.cpp:44 msgid "Inconsistent metadata" msgstr "" #: src/hed/libs/data/DataStatus.cpp:45 #, fuzzy msgid "Failed to prepare source" msgstr "Fehler bei Reservieren von Platz" #: src/hed/libs/data/DataStatus.cpp:46 msgid "Should wait for source to be prepared" msgstr "" #: src/hed/libs/data/DataStatus.cpp:47 #, fuzzy msgid "Failed to prepare destination" msgstr "Initiierung der Delegation fehlgeschlagen" #: src/hed/libs/data/DataStatus.cpp:48 msgid "Should wait for destination to be prepared" msgstr "" #: src/hed/libs/data/DataStatus.cpp:49 #, fuzzy msgid "Failed to finalize reading from source" msgstr "Fehler bei Lesen von Quelle" #: src/hed/libs/data/DataStatus.cpp:50 #, fuzzy msgid "Failed to finalize writing to destination" msgstr "Fehler bei Schreiben zu Ziel" #: src/hed/libs/data/DataStatus.cpp:51 #, fuzzy msgid "Failed to create directory" msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #: src/hed/libs/data/DataStatus.cpp:52 #, fuzzy msgid "Failed to rename URL" msgstr "Fehler bei Erstellen von GUID in RLS: %s" #: src/hed/libs/data/DataStatus.cpp:53 msgid "Data was already cached" msgstr "" #: src/hed/libs/data/DataStatus.cpp:54 #, fuzzy msgid "Operation cancelled successfully" msgstr "Operation erfolgreich abgeschlossen" #: src/hed/libs/data/DataStatus.cpp:55 #, fuzzy msgid "Generic error" msgstr "Benutzungsschnittstellenfehler" #: src/hed/libs/data/DataStatus.cpp:56 src/hed/libs/data/DataStatus.cpp:69 msgid "Unknown error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:60 msgid "No error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:61 #, fuzzy msgid "Transfer timed out" msgstr "Transfer vollständig" #: src/hed/libs/data/DataStatus.cpp:62 msgid "Checksum mismatch" msgstr "" #: src/hed/libs/data/DataStatus.cpp:63 msgid "Bad logic" msgstr "" #: src/hed/libs/data/DataStatus.cpp:64 msgid "All results obtained are invalid" msgstr "" #: src/hed/libs/data/DataStatus.cpp:65 #, fuzzy msgid "Temporary service error" msgstr "Benutzungsschnittstellenfehler" #: src/hed/libs/data/DataStatus.cpp:66 #, fuzzy msgid "Permanent service error" msgstr "Benutzungsschnittstellenfehler" #: src/hed/libs/data/DataStatus.cpp:67 #, fuzzy msgid "Error switching uid" msgstr "Fehler bei Importieren" #: src/hed/libs/data/DataStatus.cpp:68 #, fuzzy msgid "Request timed out" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/libs/data/FileCache.cpp:109 #, fuzzy msgid "No cache directory specified" msgstr "Kein Cache-Verzeichnis angegeben" #: src/hed/libs/data/FileCache.cpp:126 msgid "No usable caches" msgstr "" #: src/hed/libs/data/FileCache.cpp:135 #, fuzzy msgid "No draining cache directory specified" msgstr "Kein Cache-Verzeichnis angegeben" #: src/hed/libs/data/FileCache.cpp:153 #, fuzzy msgid "No read-only cache directory specified" msgstr "Kein Cache-Verzeichnis angegeben" #: src/hed/libs/data/FileCache.cpp:182 #, fuzzy, c-format msgid "Failed to create cache directory for file %s: %s" msgstr "Fehler bei Erstellen von Info-Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:192 #, fuzzy, c-format msgid "Failed to create any cache directories for %s" msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #: src/hed/libs/data/FileCache.cpp:199 #, fuzzy, c-format msgid "Failed to change permissions on %s: %s" msgstr "Konnte Zugriffsrechte von hard link nicht ändern zu 0644: %s" #: src/hed/libs/data/FileCache.cpp:211 #, fuzzy, c-format msgid "Failed to delete stale cache file %s: %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:214 #, fuzzy, c-format msgid "Failed to release lock on file %s" msgstr "Fehler bei Lesen von Proxy-Datei: %s" #: src/hed/libs/data/FileCache.cpp:232 #, fuzzy, c-format msgid "Failed looking up attributes of cached file: %s" msgstr "" "Warnung: Fehler bei Nachschlagen von Attributen von gecachter Datei: %s" #: src/hed/libs/data/FileCache.cpp:238 #, fuzzy, c-format msgid "Failed to obtain lock on cache file %s" msgstr "Fehler bei Unlock von Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:247 src/hed/libs/data/FileCache.cpp:307 #, fuzzy, c-format msgid "Error removing cache file %s: %s" msgstr "Fehler bei Entfernen von Cache-Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:249 src/hed/libs/data/FileCache.cpp:260 #, c-format msgid "Failed to remove lock on %s. Some manual intervention may be required" msgstr "" #: src/hed/libs/data/FileCache.cpp:279 src/hed/libs/data/FileCache.cpp:313 #, fuzzy, c-format msgid "Failed to unlock file %s: %s. Manual intervention may be required" msgstr "Fehler bei Unlock von Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:296 #, fuzzy, c-format msgid "Invalid lock on file %s" msgstr "Ungültige url: %s" #: src/hed/libs/data/FileCache.cpp:302 #, fuzzy, c-format msgid "Failed to remove .meta file %s: %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:367 #, fuzzy, c-format msgid "Cache not found for file %s" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/hed/libs/data/FileCache.cpp:377 #, c-format msgid "" "Cache file %s was modified in the last second, sleeping 1 second to avoid " "race condition" msgstr "" #: src/hed/libs/data/FileCache.cpp:382 src/hed/libs/data/FileCache.cpp:687 #, fuzzy, c-format msgid "Cache file %s does not exist" msgstr "Cache-Datei %s existiert nicht" #: src/hed/libs/data/FileCache.cpp:387 src/hed/libs/data/FileCache.cpp:689 #, fuzzy, c-format msgid "Error accessing cache file %s: %s" msgstr "Fehler bei Zugriff auf Cache-Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:393 #, fuzzy, c-format msgid "Cannot create directory %s for per-job hard links" msgstr "Kann Verzeichnis \"%s\" nicht anlegen für Job-spezifische hard links" #: src/hed/libs/data/FileCache.cpp:398 #, fuzzy, c-format msgid "Cannot change permission of %s: %s " msgstr "Kann Zugriffsrecht von \"%s\" nicht zu 0700 ändern" #: src/hed/libs/data/FileCache.cpp:402 #, fuzzy, c-format msgid "Cannot change owner of %s: %s " msgstr "" "Kann Owner von %s nicht ändernÐевозможно изменить владельца папки %1.\n" "Ошибка: %2" #: src/hed/libs/data/FileCache.cpp:416 #, fuzzy, c-format msgid "Failed to remove existing hard link at %s: %s" msgstr "Fehler bei Entfernen von hard link %s: %s" #: src/hed/libs/data/FileCache.cpp:420 src/hed/libs/data/FileCache.cpp:431 #, fuzzy, c-format msgid "Failed to create hard link from %s to %s: %s" msgstr "Fehler bei Anlegen von hard link von %s zu %s: %s" #: src/hed/libs/data/FileCache.cpp:426 #, fuzzy, c-format msgid "Cache file %s not found" msgstr "Cache-Datei %s existiert nicht" #: src/hed/libs/data/FileCache.cpp:441 #, fuzzy, c-format msgid "Failed to change permissions or set owner of hard link %s: %s" msgstr "Konnte Zugriffsrechte von hard link nicht ändern zu 0644: %s" #: src/hed/libs/data/FileCache.cpp:449 #, fuzzy, c-format msgid "Failed to release lock on cache file %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/data/FileCache.cpp:460 #, c-format msgid "Cache file %s was locked during link/copy, must start again" msgstr "" #: src/hed/libs/data/FileCache.cpp:465 #, c-format msgid "Cache file %s was deleted during link/copy, must start again" msgstr "" #: src/hed/libs/data/FileCache.cpp:470 #, c-format msgid "Cache file %s was modified while linking, must start again" msgstr "" #: src/hed/libs/data/FileCache.cpp:488 #, fuzzy, c-format msgid "Failed to copy file %s to %s: %s" msgstr "Fehler bei Öffnen von Datei %s zum Lesen: %s" #: src/hed/libs/data/FileCache.cpp:494 #, fuzzy, c-format msgid "Failed to set executable bit on file %s" msgstr "Konnte Metadaen für Datei %s nicht finden" #: src/hed/libs/data/FileCache.cpp:499 #, fuzzy, c-format msgid "Failed to set executable bit on file %s: %s" msgstr "Fehler bei Erstellen von Info-Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:513 #, fuzzy, c-format msgid "Failed to remove existing symbolic link at %s: %s" msgstr "Fehler bei Entfernen von hard link %s: %s" #: src/hed/libs/data/FileCache.cpp:517 src/hed/libs/data/FileCache.cpp:522 #, fuzzy, c-format msgid "Failed to create symbolic link from %s to %s: %s" msgstr "Fehler bei Anlegen von hard link von %s zu %s: %s" #: src/hed/libs/data/FileCache.cpp:552 #, c-format msgid "Failed to remove cache per-job dir %s: %s" msgstr "Fehler bei Entfernen von cache per-job Verzeichnis %s: %s" #: src/hed/libs/data/FileCache.cpp:571 src/hed/libs/data/FileCache.cpp:639 #, fuzzy, c-format msgid "Error reading meta file %s: %s" msgstr "Fehler bei Lesen von Meta-Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:576 src/hed/libs/data/FileCache.cpp:644 #, fuzzy, c-format msgid "Error opening meta file %s" msgstr "Fehler bei Öffnen von Meta-Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:581 src/hed/libs/data/FileCache.cpp:648 #, fuzzy, c-format msgid "meta file %s is empty" msgstr "Anfrage ist leer" #: src/hed/libs/data/FileCache.cpp:591 #, fuzzy, c-format msgid "" "File %s is already cached at %s under a different URL: %s - will not add DN " "to cached list" msgstr "" "Fehler: Datei %s wird bereits gecacht bei %s unter anderer URL: %s - werde " "DN nicht zu cached list hinzufügen" #: src/hed/libs/data/FileCache.cpp:602 #, fuzzy, c-format msgid "Bad format detected in file %s, in line %s" msgstr "Formatierungsfehler erkannt in Datei %s, in Zeile %s" #: src/hed/libs/data/FileCache.cpp:618 #, fuzzy, c-format msgid "Could not acquire lock on meta file %s" msgstr "Konnte temporäre Datei nicht anlegen: %s" #: src/hed/libs/data/FileCache.cpp:622 #, fuzzy, c-format msgid "Error opening meta file for writing %s" msgstr "Fehler bei Öffnen von Meta-Datei zum schreiben %s: %s" #: src/hed/libs/data/FileCache.cpp:658 #, c-format msgid "DN %s is cached and is valid until %s for URL %s" msgstr "DN %s wird gecacht und ist gültig bis %s für Datei %s" #: src/hed/libs/data/FileCache.cpp:662 #, c-format msgid "DN %s is cached but has expired for URL %s" msgstr "DN %s wird gecacht aber ist abgelaufen für URL %s" #: src/hed/libs/data/FileCache.cpp:713 #, fuzzy, c-format msgid "Failed to acquire lock on cache meta file %s" msgstr "Fehler bei Unlock von Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:718 #, fuzzy, c-format msgid "Failed to create cache meta file %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/data/FileCache.cpp:733 #, fuzzy, c-format msgid "Failed to read cache meta file %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/data/FileCache.cpp:738 #, c-format msgid "Cache meta file %s is empty, will recreate" msgstr "" #: src/hed/libs/data/FileCache.cpp:743 #, c-format msgid "Cache meta file %s possibly corrupted, will recreate" msgstr "" #: src/hed/libs/data/FileCache.cpp:747 #, fuzzy, c-format msgid "" "File %s is already cached at %s under a different URL: %s - this file will " "not be cached" msgstr "" "Fehler: Datei %s wird bereits gecacht bei %s unter einer anderen URL: %s - " "diese Datei wird nicht gecacht" #: src/hed/libs/data/FileCache.cpp:757 #, fuzzy, c-format msgid "Error looking up attributes of cache meta file %s: %s" msgstr "Fehler bei Nachschlagen von Attributen von Meta-Datei %s: %s" #: src/hed/libs/data/FileCache.cpp:828 #, fuzzy, c-format msgid "Using cache %s" msgstr "Nutze space token %s" #: src/hed/libs/data/FileCache.cpp:842 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:79 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:112 #, fuzzy, c-format msgid "Error getting info from statvfs for the path %s: %s" msgstr "Fehler bei Öffnen von Meta-Datei zum schreiben %s: %s" #: src/hed/libs/data/FileCache.cpp:848 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:118 #, c-format msgid "Cache %s: Free space %f GB" msgstr "" #: src/hed/libs/data/URLMap.cpp:33 #, fuzzy, c-format msgid "Can't use URL %s" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/hed/libs/data/URLMap.cpp:39 #, c-format msgid "file %s is not accessible" msgstr "" #: src/hed/libs/data/URLMap.cpp:49 #, fuzzy, c-format msgid "Mapping %s to %s" msgstr "" "\n" " СоответÑтвие раздел-Ñегмент:\n" #: src/hed/libs/data/examples/simple_copy.cpp:17 #, fuzzy msgid "Usage: copy source destination" msgstr "Quelle Ziel" #: src/hed/libs/data/examples/simple_copy.cpp:42 #, fuzzy, c-format msgid "Copy failed: %s" msgstr "DCAU fehlgeschlagen: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:41 #, fuzzy, c-format msgid "Failed to read proxy file: %s" msgstr "Fehler bei Lesen von Proxy-Datei: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:49 #, fuzzy, c-format msgid "Failed to read certificate file: %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:56 #, fuzzy, c-format msgid "Failed to read private key file: %s" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:82 #, fuzzy, c-format msgid "" "Failed to convert GSI credential to GSS credential (major: %d, minor: %d):%s:" "%s" msgstr "" "Fehler bei Konvertieren von GSI Credential zu GCC Credential (major: %d, " "minor: %d)%s" #: src/hed/libs/globusutils/GSSCredential.cpp:94 #, fuzzy, c-format msgid "Failed to release GSS credential (major: %d, minor: %d):%s:%s" msgstr "Fehler bei Freigabe von GSS Credential (major: %d, minor: %d):%s" #: src/hed/libs/loader/ModuleManager.cpp:30 msgid "Module Manager Init" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:73 msgid "" "Busy plugins found while unloading Module Manager. Waiting for them to be " "released." msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:207 #, c-format msgid "Found %s in cache" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:214 #, c-format msgid "Could not locate module %s in following paths:" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:218 #, fuzzy, c-format msgid "\t%s" msgstr "%s" #: src/hed/libs/loader/ModuleManager.cpp:232 #, c-format msgid "Loaded %s" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:276 msgid "Module Manager Init by ModuleManager::setCfg" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:312 #: src/hed/libs/loader/ModuleManager.cpp:325 #, c-format msgid "%s made persistent" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:316 #, c-format msgid "Not found %s in cache" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:330 msgid "Specified module not found in cache" msgstr "" #: src/hed/libs/loader/Plugin.cpp:364 src/hed/libs/loader/Plugin.cpp:557 #, c-format msgid "Could not find loadable module descriptor by name %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:372 src/hed/libs/loader/Plugin.cpp:567 #, c-format msgid "Could not find loadable module by name %s (%s)" msgstr "" #: src/hed/libs/loader/Plugin.cpp:378 src/hed/libs/loader/Plugin.cpp:480 #: src/hed/libs/loader/Plugin.cpp:572 #, c-format msgid "Module %s is not an ARC plugin (%s)" msgstr "" #: src/hed/libs/loader/Plugin.cpp:395 src/hed/libs/loader/Plugin.cpp:490 #: src/hed/libs/loader/Plugin.cpp:598 #, c-format msgid "Module %s failed to reload (%s)" msgstr "" #: src/hed/libs/loader/Plugin.cpp:417 #, c-format msgid "Module %s contains no plugin %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:462 #, c-format msgid "Could not find loadable module descriptor by name %s or kind %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:467 #, c-format msgid "Loadable module %s contains no requested plugin %s of kind %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:474 #, c-format msgid "Could not find loadable module by names %s and %s (%s)" msgstr "" #: src/hed/libs/loader/Plugin.cpp:503 #, c-format msgid "Module %s contains no requested plugin %s of kind %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:588 #, c-format msgid "Module %s does not contain plugin(s) of specified kind(s)" msgstr "" #: src/hed/libs/message/MCC.cpp:76 src/hed/libs/message/Service.cpp:25 #, c-format msgid "No security processing/check requested for '%s'" msgstr "" #: src/hed/libs/message/MCC.cpp:85 #, fuzzy, c-format msgid "Security processing/check failed: %s" msgstr "Fehler bei Zugriff auf Cache-Datei %s: %s" #: src/hed/libs/message/MCC.cpp:90 msgid "Security processing/check passed" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:17 #, fuzzy msgid "Chain(s) configuration failed" msgstr "Delegation Authorisierung fehlgeschlagen" #: src/hed/libs/message/MCCLoader.cpp:134 msgid "SecHandler configuration is not defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:157 msgid "SecHandler has no configuration" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:163 msgid "SecHandler has no name attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:173 #, c-format msgid "Security Handler %s(%s) could not be created" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:177 #, c-format msgid "SecHandler: %s(%s)" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:189 msgid "Component has no name attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:194 msgid "Component has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:203 #, c-format msgid "Component %s(%s) could not be created" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:229 #, c-format msgid "Component's %s(%s) next has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:290 #, c-format msgid "Loaded MCC %s(%s)" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:308 #, c-format msgid "Plexer's (%s) next has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:318 #, c-format msgid "Loaded Plexer %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:326 msgid "Service has no Name attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:332 msgid "Service has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:341 #, c-format msgid "Service %s(%s) could not be created" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:348 #, c-format msgid "Loaded Service %s(%s)" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:390 #, c-format msgid "Linking MCC %s(%s) to MCC (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:401 #, c-format msgid "Linking MCC %s(%s) to Service (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:410 #, c-format msgid "Linking MCC %s(%s) to Plexer (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:415 #, c-format msgid "MCC %s(%s) - next %s(%s) has no target" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:434 #, c-format msgid "Linking Plexer %s to MCC (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:445 #, c-format msgid "Linking Plexer %s to Service (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:454 #, c-format msgid "Linking Plexer %s to Plexer (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:460 #, c-format msgid "Plexer (%s) - next %s(%s) has no target" msgstr "" #: src/hed/libs/message/Plexer.cpp:31 #, c-format msgid "Bad label: \"%s\"" msgstr "" #: src/hed/libs/message/Plexer.cpp:47 #, c-format msgid "Operation on path \"%s\"" msgstr "" #: src/hed/libs/message/Plexer.cpp:60 #, c-format msgid "No next MCC or Service at path \"%s\"" msgstr "" #: src/hed/libs/message/Service.cpp:35 #, fuzzy, c-format msgid "Security processing/check for '%s' failed: %s" msgstr "Verbindung zu %s schlug fehl: %s" #: src/hed/libs/message/Service.cpp:41 #, c-format msgid "Security processing/check for '%s' passed" msgstr "" #: src/hed/libs/otokens/jwse.cpp:55 #, c-format msgid "JWSE::Input: token: %s" msgstr "" #: src/hed/libs/otokens/jwse.cpp:75 #, c-format msgid "JWSE::Input: header: %s" msgstr "" #: src/hed/libs/otokens/jwse.cpp:101 #, c-format msgid "JWSE::Input: JWS content: %s" msgstr "" #: src/hed/libs/otokens/jwse.cpp:111 msgid "JWSE::Input: JWS: token too young" msgstr "" #: src/hed/libs/otokens/jwse.cpp:120 msgid "JWSE::Input: JWS: token too old" msgstr "" #: src/hed/libs/otokens/jwse.cpp:131 #, c-format msgid "JWSE::Input: JWS: signature algorithm: %s" msgstr "" #: src/hed/libs/otokens/jwse.cpp:174 #, c-format msgid "JWSE::Input: JWS: signature algorithn not supported: %s" msgstr "" #: src/hed/libs/otokens/jwse.cpp:192 #, fuzzy msgid "JWSE::Input: JWS: signature verification failed" msgstr "SOAP Aufruf fehlgeschlagen" #: src/hed/libs/otokens/jwse.cpp:198 msgid "JWSE::Input: JWE: not supported yet" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:21 msgid "JWSE::VerifyECDSA: missing key" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:25 msgid "JWSE::VerifyECDSA: wrong signature size" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:34 msgid "JWSE::VerifyECDSA: failed to create ECDSA signature" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:41 msgid "JWSE::VerifyECDSA: failed to parse signature" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:47 #, c-format msgid "JWSE::VerifyECDSA: failed to assign ECDSA signature: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:56 #, fuzzy msgid "JWSE::VerifyECDSA: failed to create EVP context" msgstr "Fehler bei Anlegen von GSI Context: %s" #: src/hed/libs/otokens/jwse_ecdsa.cpp:61 #, c-format msgid "JWSE::VerifyECDSA: failed to recognize digest: %s" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:66 #, c-format msgid "JWSE::VerifyECDSA: failed to initialize hash: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:72 #, c-format msgid "JWSE::VerifyECDSA: failed to add message to hash: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:80 #, c-format msgid "JWSE::VerifyECDSA: failed to finalize hash: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:87 #, c-format msgid "JWSE::VerifyECDSA: failed to verify: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:96 msgid "JWSE::SignECDSA: missing key" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:104 #, fuzzy msgid "JWSE::SignECDSA: failed to create EVP context" msgstr "Fehler bei Anlegen von GSI Context: %s" #: src/hed/libs/otokens/jwse_ecdsa.cpp:109 #, c-format msgid "JWSE::SignECDSA: failed to recognize digest: %s" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:114 #, c-format msgid "JWSE::SignECDSA: failed to initialize hash: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:120 #, fuzzy, c-format msgid "JWSE::SignECDSA: failed to add message to hash: %i" msgstr "Warnung: Fehler bei Hinzufügen eines Attributs zu RLS: %s" #: src/hed/libs/otokens/jwse_ecdsa.cpp:128 #, c-format msgid "JWSE::SignECDSA: failed to finalize hash: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:135 msgid "JWSE::SignECDSA: failed to create ECDSA signature" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:143 msgid "JWSE::SignECDSA: failed to parse signature" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:150 #, c-format msgid "JWSE::SignECDSA: wrong signature size: %i + %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:156 msgid "JWSE::SignECDSA: wrong signature size written" msgstr "" #: src/hed/libs/otokens/jwse_keys.cpp:273 msgid "JWSE::ExtractPublicKey: x5c key" msgstr "" #: src/hed/libs/otokens/jwse_keys.cpp:281 msgid "JWSE::ExtractPublicKey: jwk key" msgstr "" #: src/hed/libs/otokens/jwse_keys.cpp:288 msgid "JWSE::ExtractPublicKey: external jwk key" msgstr "" #: src/hed/libs/otokens/jwse_keys.cpp:315 #, c-format msgid "JWSE::ExtractPublicKey: deleting outdated info: %s" msgstr "" #: src/hed/libs/otokens/jwse_keys.cpp:344 #, c-format msgid "JWSE::ExtractPublicKey: fetching jws key from %s" msgstr "" #: src/hed/libs/otokens/jwse_keys.cpp:372 msgid "JWSE::ExtractPublicKey: no supported key" msgstr "" #: src/hed/libs/otokens/jwse_keys.cpp:375 msgid "JWSE::ExtractPublicKey: key parsing error" msgstr "" #: src/hed/libs/otokens/openid_metadata.cpp:40 #: src/hed/libs/otokens/openid_metadata.cpp:45 #, fuzzy, c-format msgid "Input: metadata: %s" msgstr "Füge location hinzu: Metadaten: %s" #: src/hed/libs/otokens/openid_metadata.cpp:438 #, fuzzy, c-format msgid "Fetch: response code: %u %s" msgstr "Erhalte Antwort: %s" #: src/hed/libs/otokens/openid_metadata.cpp:440 #, fuzzy, c-format msgid "Fetch: response body: %s" msgstr "Erhalte Antwort: %s" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:141 #, fuzzy, c-format msgid "Can not load ARC evaluator object: %s" msgstr "Kann PolicyStore Objekt nicht anlegen" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:192 #, c-format msgid "Can not load ARC request object: %s" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:233 #, c-format msgid "Can not load policy object: %s" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:281 msgid "Can not load policy object" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:329 msgid "Can not load request object" msgstr "" #: src/hed/libs/security/ArcPDP/PolicyParser.cpp:119 msgid "Can not generate policy object" msgstr "" #: src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp:37 #, c-format msgid "Id= %s,Type= %s,Issuer= %s,Value= %s" msgstr "" #: src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp:40 #, c-format msgid "No Attribute exists, which can deal with type: %s" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:189 #, c-format msgid "HTTP Error: %d %s" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:270 msgid "Cannot create http payload" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:353 msgid "No next element in the chain" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:362 #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:253 msgid "next element of the chain returned error status" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:371 msgid "next element of the chain returned no payload" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:383 msgid "next element of the chain returned invalid/unsupported payload" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:465 msgid "Error to flush output payload" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:306 #, c-format msgid "<< %s" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:355 src/hed/mcc/http/PayloadHTTP.cpp:457 #, fuzzy, c-format msgid "< %s" msgstr "%s" #: src/hed/mcc/http/PayloadHTTP.cpp:576 #, fuzzy msgid "Failed to parse HTTP header" msgstr "Fehler beim Senden von header" #: src/hed/mcc/http/PayloadHTTP.cpp:837 msgid "Invalid HTTP object can't produce result" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:969 #, fuzzy, c-format msgid "> %s" msgstr "%s" #: src/hed/mcc/http/PayloadHTTP.cpp:994 #, fuzzy msgid "Failed to write header to output stream" msgstr "Fehler bei Schreiben zu Datein %s: %s" #: src/hed/mcc/http/PayloadHTTP.cpp:1019 src/hed/mcc/http/PayloadHTTP.cpp:1025 #: src/hed/mcc/http/PayloadHTTP.cpp:1031 src/hed/mcc/http/PayloadHTTP.cpp:1041 #: src/hed/mcc/http/PayloadHTTP.cpp:1053 src/hed/mcc/http/PayloadHTTP.cpp:1058 #: src/hed/mcc/http/PayloadHTTP.cpp:1063 src/hed/mcc/http/PayloadHTTP.cpp:1071 #: src/hed/mcc/http/PayloadHTTP.cpp:1078 #, fuzzy msgid "Failed to write body to output stream" msgstr "Fehler bei Lesen von Dateiliste" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:31 msgid "Skipping service: no ServicePath found!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:37 msgid "Skipping service: no SchemaPath found!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:89 msgid "Parser Context creation failed!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:98 msgid "Cannot parse schema!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:110 msgid "Empty payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:119 msgid "Could not convert payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:125 #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:212 msgid "Could not create PayloadSOAP!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:196 msgid "Empty input payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:205 msgid "Could not convert incoming payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:232 msgid "Missing schema! Skipping validation..." msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:237 msgid "Could not validate message!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:245 #: src/hed/mcc/soap/MCCSOAP.cpp:238 src/hed/mcc/soap/MCCSOAP.cpp:252 #: src/hed/mcc/soap/MCCSOAP.cpp:282 msgid "empty next chain element" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:257 #: src/hed/mcc/soap/MCCSOAP.cpp:298 msgid "next element of the chain returned empty payload" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:265 msgid "next element of the chain returned invalid payload" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:223 msgid "empty input payload" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:233 #, c-format msgid "MIME is not suitable for SOAP: %s" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:247 msgid "incoming message is not SOAP" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:274 #, c-format msgid "Security check failed in SOAP MCC for incoming message: %s" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:290 #, c-format msgid "next element of the chain returned error status: %s" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:309 msgid "next element of the chain returned unknown payload - passing through" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:314 src/hed/mcc/soap/MCCSOAP.cpp:330 #, c-format msgid "Security check failed in SOAP MCC for outgoing message: %s" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:384 msgid "Security check failed in SOAP MCC for outgoing message" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:437 msgid "Security check failed in SOAP MCC for incoming message" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:82 msgid "Missing Port in Listen element" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:91 msgid "Version in Listen element can't be recognized" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:100 #, c-format msgid "Failed to obtain local address for port %s - %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:102 #, fuzzy, c-format msgid "Failed to obtain local address for %s:%s - %s" msgstr "Warnung: Fehler bei Bezug von Attributen von %s: %s" #: src/hed/mcc/tcp/MCCTCP.cpp:109 #, c-format msgid "Trying to listen on TCP port %s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:111 #, fuzzy, c-format msgid "Trying to listen on %s:%s(%s)" msgstr "" "Ошибка при попытке открыть файл:\n" " %1" #: src/hed/mcc/tcp/MCCTCP.cpp:117 #, fuzzy, c-format msgid "Failed to create socket for listening at TCP port %s(%s): %s" msgstr "Fehler bei Anlegen von Datei %s zum Schreiben: %s" #: src/hed/mcc/tcp/MCCTCP.cpp:119 #, fuzzy, c-format msgid "Failed to create socket for listening at %s:%s(%s): %s" msgstr "Fehler bei Anlegen von Datei %s zum Schreiben: %s" #: src/hed/mcc/tcp/MCCTCP.cpp:134 #, c-format msgid "" "Failed to limit socket to IPv6 at TCP port %s - may cause errors for IPv4 at " "same port" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:136 #, c-format msgid "" "Failed to limit socket to IPv6 at %s:%s - may cause errors for IPv4 at same " "port" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:144 #, c-format msgid "Failed to bind socket for TCP port %s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:146 #, fuzzy, c-format msgid "Failed to bind socket for %s:%s(%s): %s" msgstr "Fehler bei Unlock von Datei %s: %s" #: src/hed/mcc/tcp/MCCTCP.cpp:161 #, c-format msgid "Failed to listen at TCP port %s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:163 #, fuzzy, c-format msgid "Failed to listen at %s:%s(%s): %s" msgstr "Fehler bei Schreiben zu Datein %s: %s" #: src/hed/mcc/tcp/MCCTCP.cpp:180 #, c-format msgid "Listening on TCP port %s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:182 #, c-format msgid "Listening on %s:%s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:189 #, fuzzy, c-format msgid "Failed to start listening on any address for %s:%s" msgstr "Warnung: Fehler bei Bezug von Attributen von %s: %s" #: src/hed/mcc/tcp/MCCTCP.cpp:191 #, fuzzy, c-format msgid "Failed to start listening on any address for %s:%s(IPv%s)" msgstr "Warnung: Fehler bei Bezug von Attributen von %s: %s" #: src/hed/mcc/tcp/MCCTCP.cpp:197 msgid "No listening ports initiated" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:208 msgid "dropped" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:208 msgid "put on hold" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:208 #, c-format msgid "Setting connections limit to %i, connections over limit will be %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:212 msgid "Failed to start thread for listening" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:245 msgid "Failed to start thread for communication" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:271 msgid "Failed while waiting for connection request" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:293 msgid "Failed to accept connection request" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:302 msgid "Too many connections - dropping new one" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:309 msgid "Too many connections - waiting for old to close" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:548 msgid "next chain element called" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:563 msgid "Only Raw Buffer payload is supported for output" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:571 src/hed/mcc/tcp/MCCTCP.cpp:670 #: src/hed/mcc/tls/MCCTLS.cpp:561 msgid "Failed to send content of buffer" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:583 msgid "TCP executor is removed" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:585 #, c-format msgid "Sockets do not match on exit %i != %i" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:606 msgid "No Connect element specified" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:612 msgid "Missing Port in Connect element" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:618 msgid "Missing Host in Connect element" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:646 #, fuzzy msgid "TCP client process called" msgstr "konnte Nachricht nicht verarbeiten" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:65 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:81 #, c-format msgid "Failed to resolve %s (%s)" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:91 #, c-format msgid "Trying to connect %s(%s):%d" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:95 #, fuzzy, c-format msgid "Failed to create socket for connecting to %s(%s):%d - %s" msgstr "Fehler bei Anlegen von Datei %s zum Schreiben: %s" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:107 #, c-format msgid "" "Failed to get TCP socket options for connection to %s(%s):%d - timeout won't " "work - %s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:114 #, c-format msgid "Failed to connect to %s(%s):%i - %s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:124 #, c-format msgid "Timeout connecting to %s(%s):%i - %i s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:132 #, fuzzy, c-format msgid "Failed while waiting for connection to %s(%s):%i - %s" msgstr "Fehler bei Schreiben zu Ziel" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:142 #, c-format msgid "Failed to connect to %s(%s):%i" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:198 msgid "" "Received message out-of-band (not critical, ERROR level is just for " "debugging purposes)" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:201 msgid "Using CA default location" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:210 #, fuzzy, c-format msgid "Using CA file: %s" msgstr "Nutze space token %s" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:212 #, fuzzy, c-format msgid "Using CA dir: %s" msgstr "Nutze space token %s" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:289 #, fuzzy, c-format msgid "Using DH parameters from file: %s" msgstr "Fehler bei Ablage von FTP Datei" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:292 #, fuzzy msgid "Failed to open file with DH parameters for reading" msgstr "Fehler bei Öffnen von Datei %s zum Lesen: %s" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:297 #, fuzzy msgid "Failed to read file with DH parameters" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:300 #, fuzzy msgid "Failed to apply DH parameters" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:302 msgid "DH parameters applied" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:316 #, fuzzy, c-format msgid "Using curve with NID: %u" msgstr "Nutze space token %s" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:319 #, fuzzy msgid "Failed to generate EC key" msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:322 msgid "Failed to apply ECDH parameters" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:324 msgid "ECDH parameters applied" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:330 #, fuzzy, c-format msgid "Using cipher list: %s" msgstr "Nutze space token %s" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:354 #, c-format msgid "Using protocol options: 0x%x" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:39 msgid "Independent proxy - no rights granted" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:43 msgid "Proxy with all rights inherited" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:51 msgid "Proxy with empty policy - fail on unrecognized policy" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:56 #, c-format msgid "Proxy with specific policy: %s" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:60 msgid "Proxy with ARC Policy" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:62 msgid "Proxy with unknown policy - fail on unrecognized policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:116 #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:167 #, c-format msgid "Was expecting %s at the beginning of \"%s\"" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:121 #, c-format msgid "We only support CAs in Globus signing policy - %s is not supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:126 #, c-format msgid "We only support X509 CAs in Globus signing policy - %s is not supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:131 msgid "Missing CA subject in Globus signing policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:141 msgid "Negative rights are not supported in Globus signing policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:145 #, c-format msgid "Unknown rights in Globus signing policy - %s" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:150 #, c-format msgid "" "Only globus rights are supported in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:155 #, c-format msgid "" "Only signing rights are supported in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:172 #, c-format msgid "" "We only support subjects conditions in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:177 #, c-format msgid "" "We only support globus conditions in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:183 msgid "Missing condition subjects in Globus signing policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:265 msgid "Unknown element in Globus signing policy" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:237 #, fuzzy msgid "Critical VOMS attribute processing failed" msgstr "Konnte VOMS Attribut nicht herauslesen" #: src/hed/mcc/tls/MCCTLS.cpp:245 #, fuzzy msgid "VOMS attribute validation failed" msgstr "Konnte VOMS Attribut nicht herauslesen" #: src/hed/mcc/tls/MCCTLS.cpp:247 msgid "VOMS attribute is ignored due to processing/validation error" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:439 src/hed/mcc/tls/MCCTLS.cpp:578 #: src/hed/mcc/tls/MCCTLS.cpp:597 #, fuzzy, c-format msgid "Failed to establish connection: %s" msgstr "Fehler bei Schließen von Verbindung 2" #: src/hed/mcc/tls/MCCTLS.cpp:458 src/hed/mcc/tls/MCCTLS.cpp:540 #, c-format msgid "Peer name: %s" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:460 src/hed/mcc/tls/MCCTLS.cpp:542 #, c-format msgid "Identity name: %s" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:462 src/hed/mcc/tls/MCCTLS.cpp:544 #, c-format msgid "CA name: %s" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:469 msgid "Failed to process security attributes in TLS MCC for incoming message" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:477 msgid "Security check failed in TLS MCC for incoming message" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:550 msgid "Security check failed for outgoing TLS message" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:582 msgid "Security check failed for incoming TLS message" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:64 #, c-format msgid "Ignoring verification error due to insecure connection allowed: %s" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:79 #, fuzzy msgid "" "Failed to allocate memory for certificate subject while matching policy." msgstr "Fehler beim Reservieren von Speicher" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:83 msgid "" "Failed to retrieve link to TLS stream. Additional policy matching is skipped." msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:85 msgid "" "Skipping additional policy matching due to insecure connections allowed." msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:109 #, c-format msgid "Certificate %s already expired" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:117 #, c-format msgid "Certificate %s will expire in %s" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:138 msgid "Failed to store application data" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:166 msgid "Failed to retrieve application data from OpenSSL" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:238 src/hed/mcc/tls/PayloadTLSMCC.cpp:338 msgid "Can not create the SSL Context object" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:251 src/hed/mcc/tls/PayloadTLSMCC.cpp:358 msgid "Can't set OpenSSL verify flags" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:270 src/hed/mcc/tls/PayloadTLSMCC.cpp:372 msgid "Can not create the SSL object" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:280 #, fuzzy msgid "Faile to assign hostname extension" msgstr "Fehler beim Lesen von SSL Token während Authentifizierung" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:294 msgid "Failed to establish SSL connection" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:298 src/hed/mcc/tls/PayloadTLSMCC.cpp:388 #, fuzzy, c-format msgid "Using cipher: %s" msgstr "Nutze space token %s" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:384 msgid "Failed to accept SSL connection" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:446 #, fuzzy, c-format msgid "Failed to shut down SSL: %s" msgstr "Fehler bei Erstellen von GUID in RLS: %s" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:47 msgid "" "ArcAuthZ: failed to initiate all PDPs - this instance will be non-functional" msgstr "" "ArcAuthZ: Fehler bei Initiierung wenigstens einer PDP - diese Instanz wird " "nicht funktional sein" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:70 #, fuzzy msgid "PDP: missing name attribute" msgstr "PDP: %s Name ist Doublette" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:74 #, fuzzy, c-format msgid "PDP: %s (%s)" msgstr "PDP: %s (%d)" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:79 #, fuzzy, c-format msgid "PDP: %s (%s) can not be loaded" msgstr "PDP: %s kann nicht geladen werden" #: src/hed/shc/arcpdp/ArcEvaluationCtx.cpp:251 #, fuzzy, c-format msgid "There are %d RequestItems" msgstr "Es gibt %d RequestItems" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:60 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:55 msgid "Can not parse classname for FunctionFactory from configuration" msgstr "Konnte classname für FunctionFactory nicht von Konfiguration parsen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:68 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:63 msgid "Can not parse classname for AttributeFactory from configuration" msgstr "Konnte classname für AttributeFactory nicht von Konfiguration parsen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:76 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:71 msgid "" "Can not parse classname for CombiningAlgorithmFactory from configuration" msgstr "" "Konnte classname für CombiningAlgorithmFactory nicht von Konfiguration parsen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:84 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:79 msgid "Can not parse classname for Request from configuration" msgstr "Konnte classname für Request nicht von Konfiguration parsen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:93 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:88 msgid "Can not parse classname for Policy from configuration" msgstr "Konnte classname für Policy nicht von Konfiguration parsen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:105 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:100 msgid "Can not dynamically produce AttributeFactory" msgstr "Kann AttributeFactory nicht dynamisch anlegen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:110 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:105 msgid "Can not dynamically produce FnFactory" msgstr "Kann FnFactory nicht dynamisch anlegen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:115 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:110 msgid "Can not dynamically produce AlgFacroty" msgstr "Kann AlgFactory nicht dynamisch anlegen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:126 #: src/hed/shc/gaclpdp/GACLEvaluator.cpp:31 #: src/hed/shc/gaclpdp/GACLEvaluator.cpp:37 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:121 #, fuzzy msgid "Can not create PolicyStore object" msgstr "Kann PolicyStore Objekt nicht anlegen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:177 src/hed/shc/test.cpp:183 #: src/hed/shc/testinterface_arc.cpp:102 src/hed/shc/testinterface_xacml.cpp:54 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:172 #, fuzzy msgid "Can not dynamically produce Request" msgstr "Kann Anfrage nicht dynamisch produzieren" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:261 #, c-format msgid "Result value (0=Permit, 1=Deny, 2=Indeterminate, 3=Not_Applicable): %d" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:109 #, fuzzy msgid "Can not find ArcPDPContext" msgstr "Kann ArcPDPContext nicht finden" #: src/hed/shc/arcpdp/ArcPDP.cpp:138 src/hed/shc/xacmlpdp/XACMLPDP.cpp:116 msgid "Evaluator does not support loadable Combining Algorithms" msgstr "Evaluator unterstützt ladare Combining Algorithms nicht" #: src/hed/shc/arcpdp/ArcPDP.cpp:142 src/hed/shc/xacmlpdp/XACMLPDP.cpp:120 #, c-format msgid "Evaluator does not support specified Combining Algorithm - %s" msgstr "Evaluator unterstützt die angegebenen Combining Algorithms nicht - %s" #: src/hed/shc/arcpdp/ArcPDP.cpp:154 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:83 #: src/hed/shc/gaclpdp/GACLPDP.cpp:117 src/hed/shc/test.cpp:94 #: src/hed/shc/testinterface_arc.cpp:37 src/hed/shc/testinterface_xacml.cpp:37 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:132 msgid "Can not dynamically produce Evaluator" msgstr "Kann Evaluator nicht dynamisch produzieren" #: src/hed/shc/arcpdp/ArcPDP.cpp:157 msgid "Evaluator for ArcPDP was not loaded" msgstr "Evaluator für ArcPDP wurde nicht geladen" #: src/hed/shc/arcpdp/ArcPDP.cpp:164 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:56 #: src/hed/shc/gaclpdp/GACLPDP.cpp:127 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:88 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:142 src/tests/echo/echo.cpp:108 msgid "Missing security object in message" msgstr "Fehlendes security Objekt in Nachricht" #: src/hed/shc/arcpdp/ArcPDP.cpp:172 src/hed/shc/arcpdp/ArcPDP.cpp:180 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:136 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:142 #: src/hed/shc/gaclpdp/GACLPDP.cpp:135 src/hed/shc/gaclpdp/GACLPDP.cpp:143 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:96 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:104 #: src/tests/echo/echo.cpp:116 src/tests/echo/echo.cpp:123 msgid "Failed to convert security information to ARC request" msgstr "Fehler bei Konvertierung von security information für ARC Anfrage" #: src/hed/shc/arcpdp/ArcPDP.cpp:188 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:149 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:112 #, c-format msgid "ARC Auth. request: %s" msgstr "ARC Auth. Anfrage: %s" #: src/hed/shc/arcpdp/ArcPDP.cpp:191 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:152 #: src/hed/shc/gaclpdp/GACLPDP.cpp:154 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:115 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:169 #, fuzzy msgid "No requested security information was collected" msgstr "Keine security information erhalten" #: src/hed/shc/arcpdp/ArcPDP.cpp:198 msgid "Not authorized by arc.pdp - failed to get response from Evaluator" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:244 #, fuzzy msgid "Authorized by arc.pdp" msgstr "Authorisiert von arc.pdp" #: src/hed/shc/arcpdp/ArcPDP.cpp:245 #, fuzzy msgid "" "Not authorized by arc.pdp - some of the RequestItem elements do not satisfy " "Policy" msgstr "" "UnAuthorisiert von arc.pdp; einige der ReqestItems genügen nicht der Policy" #: src/hed/shc/arcpdp/ArcPolicy.cpp:56 src/hed/shc/arcpdp/ArcPolicy.cpp:70 #: src/hed/shc/gaclpdp/GACLPolicy.cpp:46 src/hed/shc/gaclpdp/GACLPolicy.cpp:59 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:48 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:64 msgid "Policy is empty" msgstr "Policy is leer" #: src/hed/shc/arcpdp/ArcPolicy.cpp:114 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:115 #, fuzzy, c-format msgid "PolicyId: %s Alg inside this policy is:-- %s" msgstr "PolicyId: %s Alg in dieser Policy ist:-- %s" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:74 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:128 #, fuzzy msgid "No delegation policies in this context and message - passing through" msgstr "" "Keine delegation policies in diesem context und message - durchgelassen" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:94 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:108 msgid "Failed to convert security information to ARC policy" msgstr "Fehler bei Konvertieren von security information für ARC policy" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:115 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:122 #, c-format msgid "ARC delegation policy: %s" msgstr "ARC delegation policy: %s" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:160 msgid "No authorization response was returned" msgstr "Es wurde keine authorization response erwidert" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:163 #, c-format msgid "There are %d requests, which satisfy at least one policy" msgstr "Es gibt %d Anfragen, die wenigstens einer Policy Anfrage genügt" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:182 #, fuzzy msgid "Delegation authorization passed" msgstr "Delegations Authorisation zugelassen" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:184 msgid "Delegation authorization failed" msgstr "Delegation Authorisierung fehlgeschlagen" #: src/hed/shc/delegationsh/DelegationSH.cpp:63 msgid "" "Missing CertificatePath element or ProxyPath element, or " " is missing" msgstr "" "Fehlendes CertificatePath Element oder ProxyPath Element, oder " " fehlt" #: src/hed/shc/delegationsh/DelegationSH.cpp:68 msgid "" "Missing or empty KeyPath element, or is missing" msgstr "" "Fehlendes oder leeres KeyPath Element, oder fehlt" #: src/hed/shc/delegationsh/DelegationSH.cpp:74 msgid "Missing or empty CertificatePath or CACertificatesDir element" msgstr "Fehlendes oder leeres CertificatePath oder CACertificatesDir Element" #: src/hed/shc/delegationsh/DelegationSH.cpp:81 #, c-format msgid "Delegation role not supported: %s" msgstr "Delegation role nicht unterstützt: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:90 #, c-format msgid "Delegation type not supported: %s" msgstr "Delegation Typ nicht unterstützt: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:115 #, fuzzy msgid "Failed to acquire delegation context" msgstr "Konnte delegation context nicht erhalten" #: src/hed/shc/delegationsh/DelegationSH.cpp:143 #: src/hed/shc/delegationsh/DelegationSH.cpp:254 msgid "Can't create delegation context" msgstr "Kann delegation context nicht anlegen" #: src/hed/shc/delegationsh/DelegationSH.cpp:149 #, fuzzy msgid "Delegation handler with delegatee role starts to process" msgstr "Delegation handler mit delegatee role gestartet." #: src/hed/shc/delegationsh/DelegationSH.cpp:152 #: src/services/a-rex/arex.cpp:478 src/services/candypond/CandyPond.cpp:526 #: src/services/data-staging/DataDeliveryService.cpp:648 #, fuzzy msgid "process: POST" msgstr "Prozess: POST" #: src/hed/shc/delegationsh/DelegationSH.cpp:159 #: src/services/a-rex/arex.cpp:485 src/services/candypond/CandyPond.cpp:535 #: src/services/data-staging/DataDeliveryService.cpp:657 #: src/services/wrappers/python/pythonwrapper.cpp:416 msgid "input is not SOAP" msgstr "Eingabe ist kein SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:166 #, c-format msgid "Delegation service: %s" msgstr "Delegation service: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:181 #: src/hed/shc/delegationsh/DelegationSH.cpp:188 #: src/tests/client/test_ClientX509Delegation_ARC.cpp:55 #, fuzzy, c-format msgid "Can not get the delegation credential: %s from delegation service: %s" msgstr "" "Kann delegation credential nicht erhalten: %s von delegation service: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:204 #: src/hed/shc/delegationsh/DelegationSH.cpp:268 #, fuzzy, c-format msgid "Delegated credential identity: %s" msgstr "Delegated credential Identität: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:205 #, c-format msgid "" "The delegated credential got from delegation service is stored into path: %s" msgstr "" "Das delegierte credential wie erhalten von delegation service is abgelegt " "unter Pfad: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:218 #, fuzzy msgid "The endpoint of delegation service should be configured" msgstr "Der Endpunkt des delegation service konnte nicht konfiguriert werden" #: src/hed/shc/delegationsh/DelegationSH.cpp:228 #: src/hed/shc/delegationsh/DelegationSH.cpp:340 #, fuzzy msgid "Delegation handler with delegatee role ends" msgstr "Delegation handler mit delegatee Rolle endet" #: src/hed/shc/delegationsh/DelegationSH.cpp:260 #, fuzzy msgid "Delegation handler with delegator role starts to process" msgstr "Delegation handler mit delegator Rolle startet" #: src/hed/shc/delegationsh/DelegationSH.cpp:269 #, fuzzy, c-format msgid "The delegated credential got from path: %s" msgstr "Das delegated credetion erhalten von Pfad: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:290 #, fuzzy, c-format msgid "Can not create delegation crendential to delegation service: %s" msgstr "Kann delegation credential nicht anlegen für delegation service: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:328 msgid "output is not SOAP" msgstr "Ausgabe ist nicht SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:339 #, fuzzy, c-format msgid "" "Succeeded to send DelegationService: %s and DelegationID: %s info to peer " "service" msgstr "" "Senden von DelegationService erfolgreich: %s und DelegationID: %s Info an " "peer service" #: src/hed/shc/delegationsh/DelegationSH.cpp:345 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:230 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:101 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:94 #, fuzzy msgid "Incoming Message is not SOAP" msgstr "Einkommende Nachricht ist nicht SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:352 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:353 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:123 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:108 msgid "Outgoing Message is not SOAP" msgstr "Ausgehende Nachricht ist kein SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:356 #, fuzzy msgid "Delegation handler is not configured" msgstr "Delegation handler wurde nicht konfiguriert" #: src/hed/shc/gaclpdp/GACLPDP.cpp:120 msgid "Evaluator for GACLPDP was not loaded" msgstr "Evaluator für GACLPDP wurde nicht geladen" #: src/hed/shc/gaclpdp/GACLPDP.cpp:151 #, fuzzy, c-format msgid "GACL Auth. request: %s" msgstr "GACL Auth. Anfrage. %s" #: src/hed/shc/gaclpdp/GACLPolicy.cpp:50 src/hed/shc/gaclpdp/GACLPolicy.cpp:63 #, fuzzy msgid "Policy is not gacl" msgstr "Policy ist nicht gacl" #: src/hed/shc/legacy/ConfigParser.cpp:13 #, fuzzy msgid "Configuration file not specified" msgstr "Delegation Authorisierung fehlgeschlagen" #: src/hed/shc/legacy/ConfigParser.cpp:18 #: src/hed/shc/legacy/ConfigParser.cpp:28 #: src/hed/shc/legacy/ConfigParser.cpp:33 #, fuzzy msgid "Configuration file can not be read" msgstr "Delegation Authorisierung fehlgeschlagen" #: src/hed/shc/legacy/ConfigParser.cpp:43 #, c-format msgid "Configuration file is broken - block name is too short: %s" msgstr "" #: src/hed/shc/legacy/ConfigParser.cpp:47 #, c-format msgid "Configuration file is broken - block name does not end with ]: %s" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:39 src/hed/shc/legacy/LegacyPDP.cpp:119 msgid "Configuration file not specified in ConfigBlock" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:48 src/hed/shc/legacy/LegacyPDP.cpp:128 #, fuzzy msgid "BlockName is empty" msgstr "Policy is leer" #: src/hed/shc/legacy/LegacyMap.cpp:108 #, fuzzy, c-format msgid "Failed processing user mapping command: %s %s" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/hed/shc/legacy/LegacyMap.cpp:114 #, fuzzy, c-format msgid "Failed to change mapping stack processing policy in: %s = %s" msgstr "Konnte Zugriffsrechte von hard link nicht ändern zu 0644: %s" #: src/hed/shc/legacy/LegacyMap.cpp:179 msgid "LegacyMap: no configurations blocks defined" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:201 src/hed/shc/legacy/LegacyPDP.cpp:255 #, c-format msgid "" "LegacyPDP: there is no %s Sec Attribute defined. Probably ARC Legacy Sec " "Handler is not configured or failed." msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:206 src/hed/shc/legacy/LegacyPDP.cpp:260 msgid "LegacyPDP: ARC Legacy Sec Attribute not recognized." msgstr "" #: src/hed/shc/legacy/LegacyPDP.cpp:138 #, fuzzy, c-format msgid "Failed to parse configuration file %s" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/hed/shc/legacy/LegacyPDP.cpp:144 #, fuzzy, c-format msgid "Block %s not found in configuration file %s" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/hed/shc/legacy/LegacySecHandler.cpp:40 #: src/hed/shc/legacy/LegacySecHandler.cpp:118 msgid "LegacySecHandler: configuration file not specified" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:149 src/hed/shc/legacy/arc_lcmaps.cpp:163 #, fuzzy, c-format msgid "" "Failed to convert GSI credential to GSS credential (major: %d, minor: %d)" msgstr "" "Fehler bei Konvertieren von GSI Credential zu GCC Credential (major: %d, " "minor: %d)%s" #: src/hed/shc/legacy/arc_lcas.cpp:174 src/hed/shc/legacy/arc_lcmaps.cpp:188 #, fuzzy msgid "Missing subject name" msgstr "Fehlendes security Objekt in Nachricht" #: src/hed/shc/legacy/arc_lcas.cpp:179 src/hed/shc/legacy/arc_lcmaps.cpp:193 #, fuzzy msgid "Missing path of credentials file" msgstr "Pfad zu verlangter Datei" #: src/hed/shc/legacy/arc_lcas.cpp:185 msgid "Missing name of LCAS library" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:202 #, c-format msgid "Can't load LCAS library %s: %s" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:212 #, c-format msgid "Can't find LCAS functions in a library %s" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:222 #, fuzzy msgid "Failed to initialize LCAS" msgstr "Fehler bei Initialisierung von OpenSSL Bibliothek" #: src/hed/shc/legacy/arc_lcas.cpp:237 #, fuzzy msgid "Failed to terminate LCAS" msgstr "Fehler beim Authentifizieren: %s" #: src/hed/shc/legacy/arc_lcmaps.cpp:199 msgid "Missing name of LCMAPS library" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:213 #, fuzzy msgid "Can't read policy names" msgstr "Kann nicht von Quelle lesen" #: src/hed/shc/legacy/arc_lcmaps.cpp:224 #, c-format msgid "Can't load LCMAPS library %s: %s" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:236 #, c-format msgid "Can't find LCMAPS functions in a library %s" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:248 msgid "LCMAPS has lcmaps_run" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:249 msgid "LCMAPS has getCredentialData" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:253 #, fuzzy msgid "Failed to initialize LCMAPS" msgstr "Fehler bei Initialisierung von OpenSSL Bibliothek" #: src/hed/shc/legacy/arc_lcmaps.cpp:293 #, c-format msgid "LCMAPS returned invalid GID: %u" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:296 #, fuzzy msgid "LCMAPS did not return any GID" msgstr "SRM lieferte keine Information zurück" #: src/hed/shc/legacy/arc_lcmaps.cpp:299 #, c-format msgid "LCMAPS returned UID which has no username: %u" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:302 #, c-format msgid "LCMAPS returned invalid UID: %u" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:305 #, fuzzy msgid "LCMAPS did not return any UID" msgstr "SRM lieferte keine Information zurück" #: src/hed/shc/legacy/arc_lcmaps.cpp:314 #, fuzzy msgid "Failed to terminate LCMAPS" msgstr "Fehler beim Authentifizieren: %s" #: src/hed/shc/legacy/auth.cpp:35 #, fuzzy, c-format msgid "Unexpected argument for 'all' rule - %s" msgstr "Kann doc Argument nicht anlegen" #: src/hed/shc/legacy/auth.cpp:340 #, fuzzy, c-format msgid "Credentials stored in temporary file %s" msgstr "Konnte nicht in temporäre Datei schreiben: %s" #: src/hed/shc/legacy/auth.cpp:349 #, fuzzy, c-format msgid "Assigned to authorization group %s" msgstr "Delegations Authorisation zugelassen" #: src/hed/shc/legacy/auth.cpp:354 #, fuzzy, c-format msgid "Assigned to userlist %s" msgstr "Delegations Authorisation zugelassen" #: src/hed/shc/legacy/auth_file.cpp:22 #, fuzzy, c-format msgid "Failed to read file %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/hed/shc/legacy/auth_otokens.cpp:33 #, fuzzy msgid "Missing subject in configuration" msgstr "Fehlendes oder leeres KeyPath Element" #: src/hed/shc/legacy/auth_otokens.cpp:38 #, fuzzy msgid "Missing issuer in configuration" msgstr "Fehlendes oder leeres KeyPath Element" #: src/hed/shc/legacy/auth_otokens.cpp:43 #, fuzzy msgid "Missing audience in configuration" msgstr "Fehlendes oder leeres KeyPath Element" #: src/hed/shc/legacy/auth_otokens.cpp:48 #, fuzzy msgid "Missing scope in configuration" msgstr "Fehlendes oder leeres KeyPath Element" #: src/hed/shc/legacy/auth_otokens.cpp:53 src/hed/shc/legacy/auth_voms.cpp:47 #, fuzzy msgid "Missing group in configuration" msgstr "Fehlendes oder leeres KeyPath Element" #: src/hed/shc/legacy/auth_otokens.cpp:56 #, fuzzy, c-format msgid "Rule: subject: %s" msgstr "Subjekt: %s" #: src/hed/shc/legacy/auth_otokens.cpp:57 #, fuzzy, c-format msgid "Rule: issuer: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/shc/legacy/auth_otokens.cpp:58 #, fuzzy, c-format msgid "Rule: audience: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/shc/legacy/auth_otokens.cpp:59 #, fuzzy, c-format msgid "Rule: scope: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/shc/legacy/auth_otokens.cpp:60 src/hed/shc/legacy/auth_voms.cpp:66 #, c-format msgid "Rule: group: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:63 #, fuzzy, c-format msgid "Match issuer: %s" msgstr "Fataler Fehler: %s" #: src/hed/shc/legacy/auth_otokens.cpp:69 #, fuzzy, c-format msgid "Matched: %s %s %s" msgstr "" "cnd:\n" "%s ist ein %s" #: src/hed/shc/legacy/auth_otokens.cpp:83 src/hed/shc/legacy/auth_voms.cpp:93 msgid "Matched nothing" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:176 #, c-format msgid "Evaluate operator =: left: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:177 #, c-format msgid "Evaluate operator =: right: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:182 #, c-format msgid "Evaluate operator =: left from context: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:239 #, c-format msgid "Operator token: %c" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:268 #, fuzzy, c-format msgid "String token: %s" msgstr "Start start" #: src/hed/shc/legacy/auth_otokens.cpp:296 #, fuzzy, c-format msgid "Quoted string token: %s" msgstr "Konnte GI token nicht wrappen: %s" #: src/hed/shc/legacy/auth_otokens.cpp:304 #, c-format msgid "Sequence token parsing: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:420 #, c-format msgid "Matching tokens expression: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:424 #, fuzzy msgid "Failed to parse expression" msgstr "Initiierung der Delegation fehlgeschlagen" #: src/hed/shc/legacy/auth_otokens.cpp:435 #, c-format msgid "%s: " msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:441 #, fuzzy, c-format msgid " %s" msgstr " %s" #: src/hed/shc/legacy/auth_otokens.cpp:446 msgid "Expression matched" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:451 #, fuzzy, c-format msgid "Failed to evaluate expression: %s" msgstr "Fehler bei Lesen von Objekt %s: %s" #: src/hed/shc/legacy/auth_otokens.cpp:454 msgid "Expression failed to matched" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:79 src/hed/shc/legacy/unixmap.cpp:216 #, c-format msgid "Plugin %s returned: %u" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:83 src/hed/shc/legacy/unixmap.cpp:220 #, fuzzy, c-format msgid "Plugin %s timeout after %u seconds" msgstr "Verbindung zu %s fehlgeschlagen nach %i Sekunden" #: src/hed/shc/legacy/auth_plugin.cpp:86 src/hed/shc/legacy/unixmap.cpp:223 #, c-format msgid "Plugin %s failed to start" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:88 src/hed/shc/legacy/unixmap.cpp:225 #, fuzzy, c-format msgid "Plugin %s printed: %s" msgstr "lfn: %s - pfn: %s" #: src/hed/shc/legacy/auth_plugin.cpp:89 src/hed/shc/legacy/unixmap.cpp:213 #: src/hed/shc/legacy/unixmap.cpp:226 #, fuzzy, c-format msgid "Plugin %s error: %s" msgstr "Globus Fehler: %s" #: src/hed/shc/legacy/auth_voms.cpp:42 #, fuzzy msgid "Missing VO in configuration" msgstr "Fehlendes oder leeres KeyPath Element" #: src/hed/shc/legacy/auth_voms.cpp:52 #, fuzzy msgid "Missing role in configuration" msgstr "Fehlendes oder leeres KeyPath Element" #: src/hed/shc/legacy/auth_voms.cpp:57 #, fuzzy msgid "Missing capabilities in configuration" msgstr "Fehlendes oder leeres KeyPath Element" #: src/hed/shc/legacy/auth_voms.cpp:62 #, fuzzy msgid "Too many arguments in configuration" msgstr "ausführliche Ausgabe" #: src/hed/shc/legacy/auth_voms.cpp:65 #, c-format msgid "Rule: vo: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:67 #, fuzzy, c-format msgid "Rule: role: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/shc/legacy/auth_voms.cpp:68 #, fuzzy, c-format msgid "Rule: capabilities: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/hed/shc/legacy/auth_voms.cpp:71 #, c-format msgid "Match vo: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:78 #, fuzzy, c-format msgid "Matched: %s %s %s %s" msgstr "" "cnd:\n" "%s ist ein %s" #: src/hed/shc/legacy/simplemap.cpp:70 #, c-format msgid "SimpleMap: acquired new unmap time of %u seconds" msgstr "" #: src/hed/shc/legacy/simplemap.cpp:72 #, fuzzy msgid "SimpleMap: wrong number in unmaptime command" msgstr "Falsche Anzahl an Parametern übertragen" #: src/hed/shc/legacy/simplemap.cpp:85 src/hed/shc/legacy/simplemap.cpp:90 #, c-format msgid "SimpleMap: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:65 src/hed/shc/legacy/unixmap.cpp:70 msgid "Mapping policy option has empty value" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:80 #, fuzzy, c-format msgid "Unsupported mapping policy action: %s" msgstr "Nicht unterstützte URL für Ziel: %s" #: src/hed/shc/legacy/unixmap.cpp:91 #, fuzzy, c-format msgid "Unsupported mapping policy option: %s" msgstr "Nicht unterstützte URL für Ziel: %s" #: src/hed/shc/legacy/unixmap.cpp:103 src/hed/shc/legacy/unixmap.cpp:108 msgid "User name mapping command is empty" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:116 #, c-format msgid "User name mapping has empty authgroup: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:147 #, fuzzy, c-format msgid "Unknown user name mapping rule %s" msgstr "unbekannter return code %s" #: src/hed/shc/legacy/unixmap.cpp:156 src/hed/shc/legacy/unixmap.cpp:161 #: src/hed/shc/legacy/unixmap.cpp:177 src/hed/shc/legacy/unixmap.cpp:183 msgid "Plugin (user mapping) command is empty" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:167 #, c-format msgid "Plugin (user mapping) timeout is not a number: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:171 #, c-format msgid "Plugin (user mapping) timeout is wrong number: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:204 #, fuzzy, c-format msgid "Plugin %s returned no username" msgstr "lfn: %s - pfn: %s" #: src/hed/shc/legacy/unixmap.cpp:209 #, c-format msgid "Plugin %s returned too much: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:212 #, fuzzy, c-format msgid "Plugin %s returned no mapping" msgstr "lfn: %s - pfn: %s" #: src/hed/shc/legacy/unixmap.cpp:235 msgid "User subject match is missing user subject." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:239 #, c-format msgid "Mapfile at %s can't be opened." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:263 msgid "User pool mapping is missing user subject." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:268 #, c-format msgid "User pool at %s can't be opened." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:273 #, c-format msgid "User pool at %s failed to perform user mapping." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:291 #, c-format msgid "User name direct mapping is missing user name: %s." msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:65 msgid "OTokens: Attr: message" msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:70 #, c-format msgid "OTokens: Attr: %s = %s" msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:75 #, c-format msgid "OTokens: Attr: token: %s" msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:78 #, c-format msgid "OTokens: Attr: token: bearer: %s" msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:193 msgid "OTokens: Handle" msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:195 msgid "OTokens: Handle: message" msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:198 #, fuzzy msgid "Failed to create OTokens security attributes" msgstr "Fehler bei Lesen von Objekt %s" #: src/hed/shc/otokens/OTokensSH.cpp:202 msgid "OTokens: Handle: token was not present" msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:206 #, c-format msgid "OTokens: Handle: attributes created: subject = %s" msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:47 msgid "Creating a pdpservice client" msgstr "Lege pdpservice client an" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:81 #, fuzzy msgid "Arc policy can not been carried by SAML2.0 profile of XACML" msgstr "Arc policy can nicht mit SAML2.0 Profil von XACML geprüft werden" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:153 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:186 msgid "Policy Decision Service invocation failed" msgstr "Ausführen des Policy Decision Service schlug fehl" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:156 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:189 #: src/tests/client/test_ClientInterface.cpp:88 #: src/tests/client/test_ClientSAML2SSO.cpp:81 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:100 #: src/tests/echo/test_clientinterface.cpp:82 #: src/tests/echo/test_clientinterface.cpp:149 #: src/tests/echo/test_clientinterface.py:32 msgid "There was no SOAP response" msgstr "Keine SOAP response erhalten" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:171 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:205 msgid "Authorized from remote pdp service" msgstr "Authorisiert durch remote pdp service" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:172 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:206 msgid "Unauthorized from remote pdp service" msgstr "Nicht authorisiert von entferntem PDP service" #: src/hed/shc/saml2sso_assertionconsumersh/SAML2SSO_AssertionConsumerSH.cpp:69 #, fuzzy msgid "Can not get SAMLAssertion SecAttr from message context" msgstr "Kann SAMLAssertion SecAttr nicht erhalten von message context" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:158 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:44 msgid "Missing or empty CertificatePath element" msgstr "Fehlendes oder leeres CertificatePath Element" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:163 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:49 msgid "Missing or empty KeyPath element" msgstr "Fehlendes oder leeres KeyPath Element" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:171 msgid "" "Both of CACertificatePath and CACertificatesDir elements missing or empty" msgstr "" "Sowohl CACertificatePath als auch CACertificatesDir Elemente sind fehlend " "oder leer" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:185 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:61 msgid "" "Missing or empty CertificatePath or CACertificatesDir element; will only " "check the signature, will not do message authentication" msgstr "" "Fehlendes oder leeres CertificatePath oder CACertificatesDir Element; werde " "nur die Signature überprüfen, die Nachricht jedoch nicht authentifizieren" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:189 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:65 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:65 #, fuzzy, c-format msgid "Processing type not supported: %s" msgstr "Verarbeitungstyp nicht unterstützt: %s" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:209 msgid "Failed to parse SAML Token from incoming SOAP" msgstr "Konnte SAML Token nicht aus eingehender SOAP herausparsen" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:219 msgid "Failed to authenticate SAML Token inside the incoming SOAP" msgstr "Konnte SAML Token aus eingehender SOAP nicht authentifizieren" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:222 msgid "Succeeded to authenticate SAMLToken" msgstr "Erfolreiche Anthentifikation von SAMLTOken" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:287 #, fuzzy, c-format msgid "No response from AA service %s" msgstr "Keine Antwort von AA service %s schlug fehl" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:291 #, fuzzy, c-format msgid "SOAP Request to AA service %s failed" msgstr "SOAP Request zu AA service %s schlug fehl" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:299 msgid "Cannot find content under response soap message" msgstr "Kann Inhalt in SOAP-Antwort nicht finden" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:303 msgid "Cannot find under response soap message:" msgstr "Kann in SOAP-Antwort nicht finden" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:320 msgid "The Response is not going to this end" msgstr "Die Antwort geht nicht bis zu diesem Ende" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:327 msgid "The StatusCode is Success" msgstr "Der StatusCode ist Success" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:333 msgid "Succeeded to verify the signature under " msgstr "Erfolgreiche Überprüfung der Signatur unter " #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:336 msgid "Failed to verify the signature under " msgstr "Fehler bei der Überprüfung der Signatur unter " #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:347 msgid "Failed to generate SAML Token for outgoing SOAP" msgstr "Konnte SAML Token für ausgehendes SOAP nicht generieren" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:357 #, fuzzy msgid "SAML Token handler is not configured" msgstr "SAML Token handler ist nicht konfiguriert" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:28 #, fuzzy, c-format msgid "Access list location: %s" msgstr "Zugriffslist location: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:38 msgid "" "No policy file or DNs specified for simplelist.pdp, please set location " "attribute or at least one DN element for simplelist PDP node in " "configuration." msgstr "" "Keine Policy Datei oder DNs angegeben für simplelist.pdp, bitte setzen Sie " "ein location Attribut oder zumindest ein DN Element für den PDP Knoten in " "der Konfiguration" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:41 #, fuzzy, c-format msgid "Subject to match: %s" msgstr "Subjekt: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:44 #, fuzzy, c-format msgid "Policy subject: %s" msgstr "Subjekt: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:46 #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:72 #, fuzzy, c-format msgid "Authorized from simplelist.pdp: %s" msgstr "Authorisiert durch simplelist.pdp" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:53 #, fuzzy msgid "" "The policy file setup for simplelist.pdp does not exist, please check " "location attribute for simplelist PDP node in service configuration" msgstr "" "Die Policy Datei Konfiguration für simplelist.pdb existiert nicht, bitte " "überprüfen Sie das location Attribut für simplelist PDP node in der Serivice " "Konfiguration" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:60 #, fuzzy, c-format msgid "Policy line: %s" msgstr "Policy Zeile: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:78 #, fuzzy, c-format msgid "Not authorized from simplelist.pdp: %s" msgstr "Nicht authorisiert von simplelist.pdp" #: src/hed/shc/test.cpp:27 src/hed/shc/testinterface_arc.cpp:26 #: src/hed/shc/testinterface_xacml.cpp:26 msgid "Start test" msgstr "Starte Test" #: src/hed/shc/test.cpp:101 #, fuzzy msgid "Input request from a file: Request.xml" msgstr "Input request von einer Datei: Request.xml" #: src/hed/shc/test.cpp:107 src/hed/shc/test.cpp:197 #: src/hed/shc/testinterface_arc.cpp:124 #, c-format msgid "There is %d subjects, which satisfy at least one policy" msgstr "Es gibt %d Subjekte, die wenigstens eine Policy erfüllen" #: src/hed/shc/test.cpp:121 #, fuzzy, c-format msgid "Attribute Value (1): %s" msgstr "Attribut Wert (1): %s" #: src/hed/shc/test.cpp:132 #, fuzzy msgid "Input request from code" msgstr "Eingabe-Aufforderung von code" #: src/hed/shc/test.cpp:211 #, fuzzy, c-format msgid "Attribute Value (2): %s" msgstr "Attributewert (2): %s" #: src/hed/shc/testinterface_arc.cpp:75 src/hed/shc/testinterface_xacml.cpp:46 #, fuzzy msgid "Can not dynamically produce Policy" msgstr "Kann Policy nicht dynamisch produzieren" #: src/hed/shc/testinterface_arc.cpp:138 #, fuzzy, c-format msgid "Attribute Value inside Subject: %s" msgstr "Attributwert in Subjekt: %s" #: src/hed/shc/testinterface_arc.cpp:148 msgid "The request has passed the policy evaluation" msgstr "Die Anfrage hat die Policy Evaluierung bestanden" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:43 msgid "Missing or empty PasswordSource element" msgstr "Fehlendes oder leeres PasswordSource Element" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:54 #, c-format msgid "Password encoding type not supported: %s" msgstr "Passwort Kodierung nicht unterstützt: %s" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:59 msgid "Missing or empty Username element" msgstr "Fehlendes oder leeres Username Element" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:79 msgid "The payload of incoming message is empty" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:84 #, fuzzy msgid "Failed to cast PayloadSOAP from incoming payload" msgstr "Konnte SAML Token nicht aus eingehender SOAP herausparsen" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:89 msgid "Failed to parse Username Token from incoming SOAP" msgstr "Konnte Username Token nicht von eingehender SOAP Nachricht herauslesen" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:95 msgid "Failed to authenticate Username Token inside the incoming SOAP" msgstr "" "Fehler bei der Authentifikation des Username Token in der einngehenden SOAP" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:98 msgid "Succeeded to authenticate UsernameToken" msgstr "Erfolgreiche Authentifikation des UsernameToken" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:108 msgid "The payload of outgoing message is empty" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:113 msgid "Failed to cast PayloadSOAP from outgoing payload" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:119 msgid "Failed to generate Username Token for outgoing SOAP" msgstr "Fehler bei Erstellen von Nutzernamen Token für ausgehende SOAP" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:127 msgid "Username Token handler is not configured" msgstr "Nutzernamen Token handler ist nicht konfiguriert" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:81 msgid "Failed to parse X509 Token from incoming SOAP" msgstr "Fehler bei Parsen von X509 Token in eigehendem SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:85 msgid "Failed to verify X509 Token inside the incoming SOAP" msgstr "Fehler bei Verifizieren von X509 Token in eigehendem SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:89 msgid "Failed to authenticate X509 Token inside the incoming SOAP" msgstr "Fehler bei Authentifizieren von X509 Token in eigehendem SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:92 msgid "Succeeded to authenticate X509Token" msgstr "X509Token erfolgreich authentifiziert" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:102 msgid "Failed to generate X509 Token for outgoing SOAP" msgstr "Fehler bei Generieren von X509 Token für ausgehende SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:112 msgid "X509 Token handler is not configured" msgstr "X509 Token handler ist nicht konfiguriert" #: src/hed/shc/xacmlpdp/XACMLApply.cpp:29 msgid "Can not create function: FunctionId does not exist" msgstr "Kann Funktion nicht anlegen: FunctionId existiert nicht" #: src/hed/shc/xacmlpdp/XACMLApply.cpp:33 #: src/hed/shc/xacmlpdp/XACMLTarget.cpp:40 #, fuzzy, c-format msgid "Can not create function %s" msgstr "Kann Funktion %s nicht anlegen" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:87 #, fuzzy msgid "Can not find XACMLPDPContext" msgstr "Kann XACMLPDPContext nciht finden" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:135 msgid "Evaluator for XACMLPDP was not loaded" msgstr "Evaluator für XACMLPDP wurde nicht geladen" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:150 src/hed/shc/xacmlpdp/XACMLPDP.cpp:158 #, fuzzy msgid "Failed to convert security information to XACML request" msgstr "" "Fehler bei Konvertierung der security information zu einer XACML Anfrage" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:166 #, c-format msgid "XACML request: %s" msgstr "XACML Anfrage: %s" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:178 msgid "Authorized from xacml.pdp" msgstr "Authorisiert durch xaml.pdp" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:179 #, fuzzy msgid "UnAuthorized from xacml.pdp" msgstr "UnAuthorisiert durch xaml.pdp" #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:55 msgid "Can not find element with proper namespace" msgstr "Kann element mit passendem namespace nicht finden" #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:132 msgid "No target available inside the policy" msgstr "Kein Ziel innerhalb der Policy vorhanden" #: src/hed/shc/xacmlpdp/XACMLRequest.cpp:34 msgid "Request is empty" msgstr "Anfrage ist leer" #: src/hed/shc/xacmlpdp/XACMLRequest.cpp:39 msgid "Can not find element with proper namespace" msgstr "Kann element mit passendem namespace nicht finden" #: src/hed/shc/xacmlpdp/XACMLRule.cpp:35 msgid "Invalid Effect" msgstr "Ungültiger Effekt" #: src/hed/shc/xacmlpdp/XACMLRule.cpp:48 msgid "No target available inside the rule" msgstr "Kein Ziel verfügbar in dieser Regel" #: src/libs/data-staging/DTR.cpp:81 src/libs/data-staging/DTR.cpp:85 #, fuzzy, c-format msgid "Could not handle endpoint %s" msgstr "konnte Ende von clientxrsl nicht finden" #: src/libs/data-staging/DTR.cpp:95 #, fuzzy msgid "Source is the same as destination" msgstr "Quelle Ziel" #: src/libs/data-staging/DTR.cpp:175 #, fuzzy, c-format msgid "Invalid ID: %s" msgstr "Ungültige URL: %s" #: src/libs/data-staging/DTR.cpp:212 #, fuzzy, c-format msgid "%s->%s" msgstr "%s (%s)" #: src/libs/data-staging/DTR.cpp:320 #, c-format msgid "No callback for %s defined" msgstr "" #: src/libs/data-staging/DTR.cpp:335 #, c-format msgid "NULL callback for %s" msgstr "" #: src/libs/data-staging/DTR.cpp:338 #, c-format msgid "Request to push to unknown owner - %u" msgstr "" #: src/libs/data-staging/DTRList.cpp:216 #, c-format msgid "Boosting priority from %i to %i due to incoming higher priority DTR" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:48 #: src/libs/data-staging/DataDelivery.cpp:72 msgid "Received invalid DTR" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:54 #, c-format msgid "Delivery received new DTR %s with source: %s, destination: %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:68 msgid "Received no DTR" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:80 #, c-format msgid "Cancelling DTR %s with source: %s, destination: %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:91 #, c-format msgid "DTR %s requested cancel but no active transfer" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:147 #, fuzzy, c-format msgid "Cleaning up after failure: deleting %s" msgstr "Lege Verzeichnis %s an" #: src/libs/data-staging/DataDelivery.cpp:188 #: src/libs/data-staging/DataDelivery.cpp:263 #: src/libs/data-staging/DataDelivery.cpp:303 #: src/libs/data-staging/DataDelivery.cpp:323 msgid "Failed to delete delivery object or deletion timed out" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:254 #, c-format msgid "Transfer finished: %llu bytes transferred %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:329 msgid "Data delivery loop exited" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:45 #, fuzzy msgid "No source defined" msgstr "Anfrage %s schlug fehl" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:49 #, fuzzy msgid "No destination defined" msgstr "Beendigung des Jobs schlug fehl" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:157 #, fuzzy, c-format msgid "Bad checksum format %s" msgstr "Errechneted checksum: %s" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:196 #, fuzzy, c-format msgid "Failed to run command: %s" msgstr "Fehler bei Anlegen von GSI Context: %s" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:235 #, fuzzy, c-format msgid "DataDelivery: %s" msgstr "Fataler Fehler: %s" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:247 #, c-format msgid "DataStagingDelivery exited with code %i" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:266 #, c-format msgid "Transfer killed after %i seconds without communication" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:72 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:326 #, fuzzy, c-format msgid "Connecting to Delivery service at %s" msgstr "Kein Verbindungsaufbau zu Server: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:101 #, fuzzy, c-format msgid "Failed to set up credential delegation with %s" msgstr "Initiierung der Delegation fehlgeschlagen" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:107 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:185 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:251 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:337 #, c-format msgid "" "Request:\n" "%s" msgstr "" "Anfrage:\n" "%s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:113 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:343 #, fuzzy, c-format msgid "Could not connect to service %s: %s" msgstr "Fehler bei Verbinden zu server %s:%d" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:121 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:351 #, fuzzy, c-format msgid "No SOAP response from Delivery service %s" msgstr "Keine Antwort von AA service %s schlug fehl" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:126 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:204 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:278 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:357 #, c-format msgid "" "Response:\n" "%s" msgstr "" "Antwort:\n" "%s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:135 #, fuzzy, c-format msgid "Failed to start transfer request: %s" msgstr "Fehler bei Transfer von Daten" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:142 #, fuzzy, c-format msgid "Bad format in XML response from service at %s: %s" msgstr "Formatierungfehler in Dati %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:150 #, fuzzy, c-format msgid "Could not make new transfer request: %s: %s" msgstr "Konnte temporäre Datei nicht anlegen: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:155 #, c-format msgid "Started remote Delivery at %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:192 #, fuzzy, c-format msgid "Failed to send cancel request: %s" msgstr "Kann Kanal stdout nicht nutzen" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:199 #, fuzzy msgid "Failed to cancel: No SOAP response" msgstr "Keine SOAP Antwort" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:213 #, fuzzy, c-format msgid "Failed to cancel transfer request: %s" msgstr "Fehler bei Transfer von Daten" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:220 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:301 #, fuzzy, c-format msgid "Bad format in XML response: %s" msgstr "Formatierungfehler in Dati %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:227 #, fuzzy, c-format msgid "Failed to cancel: %s" msgstr "Fehler beim Authentifizieren: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:271 #, fuzzy msgid "No SOAP response from delivery service" msgstr "Keine Antwort von Server erhalten" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:292 #, fuzzy, c-format msgid "Failed to query state: %s" msgstr "Fehler beim Authentifizieren: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:366 #, c-format msgid "SOAP fault from delivery service at %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:374 #, c-format msgid "Bad format in XML response from delivery service at %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:382 #, fuzzy, c-format msgid "Error pinging delivery service at %s: %s: %s" msgstr "Konnte replica nicht finden: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:390 #, fuzzy, c-format msgid "Dir %s allowed at service %s" msgstr "Delegation service: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:484 #, c-format msgid "" "DataDelivery log tail:\n" "%s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:498 #, fuzzy msgid "Failed locating credentials" msgstr "Fehler beim Auflisten von Meta-Dateien" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:503 #, fuzzy msgid "Failed to initiate client connection" msgstr "Initiierung der Delegation fehlgeschlagen" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:509 msgid "Client connection has no entry point" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:518 msgid "Initiating delegation procedure" msgstr "Initialisierung der Delegations-Prozedur" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:520 msgid "Failed to initiate delegation credentials" msgstr "Fehler bei der Initialisierung der delegation credentials" #: src/libs/data-staging/DataStagingDelivery.cpp:97 #, c-format msgid "%5u s: %10.1f kB %8.1f kB/s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:156 #, fuzzy msgid "Unexpected arguments" msgstr "Kann doc Argument nicht anlegen" #: src/libs/data-staging/DataStagingDelivery.cpp:159 #, fuzzy msgid "Source URL missing" msgstr "ServiceURL fehlt" #: src/libs/data-staging/DataStagingDelivery.cpp:162 #, fuzzy msgid "Destination URL missing" msgstr "Ziel: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:166 #, c-format msgid "Source URL not valid: %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:170 #, fuzzy, c-format msgid "Destination URL not valid: %s" msgstr "Ziel: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:235 #, fuzzy, c-format msgid "Unknown transfer option: %s" msgstr "Datentransfer abgebrochen: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:272 #, fuzzy, c-format msgid "Source URL not supported: %s" msgstr "Verarbeitungstyp nicht unterstützt: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:277 #: src/libs/data-staging/DataStagingDelivery.cpp:299 msgid "No credentials supplied" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:294 #, fuzzy, c-format msgid "Destination URL not supported: %s" msgstr "Delegation role nicht unterstützt: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:348 #, fuzzy, c-format msgid "Will calculate %s checksum" msgstr "Errechneted checksum: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:359 msgid "Cannot use supplied --size option" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:572 #, c-format msgid "Checksum mismatch between calculated checksum %s and source checksum %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:582 #, fuzzy, c-format msgid "Failed cleaning up destination %s" msgstr "Fehler bei Schreiben zu Ziel" #: src/libs/data-staging/Processor.cpp:49 #: src/services/candypond/CandyPond.cpp:117 msgid "Error creating cache" msgstr "" #: src/libs/data-staging/Processor.cpp:73 #, c-format msgid "Forcing re-download of file %s" msgstr "" #: src/libs/data-staging/Processor.cpp:90 #, c-format msgid "Will wait around %is" msgstr "" #: src/libs/data-staging/Processor.cpp:109 #, fuzzy, c-format msgid "Force-checking source of cache file %s" msgstr "Fehler bei Zugriff auf Cache-Datei %s: %s" #: src/libs/data-staging/Processor.cpp:112 #, fuzzy, c-format msgid "Source check requested but failed: %s" msgstr "Anlegen von Socket schlug fehl: %s" #: src/libs/data-staging/Processor.cpp:132 msgid "Permission checking failed, will try downloading without using cache" msgstr "" #: src/libs/data-staging/Processor.cpp:162 #, fuzzy, c-format msgid "Will download to cache file %s" msgstr "Lese Archiv Datei %s" #: src/libs/data-staging/Processor.cpp:183 msgid "Looking up source replicas" msgstr "" #: src/libs/data-staging/Processor.cpp:205 #: src/libs/data-staging/Processor.cpp:432 #, fuzzy msgid "Resolving destination replicas" msgstr "Probleme bei Auflösen von Zieladresse" #: src/libs/data-staging/Processor.cpp:222 #, fuzzy msgid "No locations for destination different from source found" msgstr "Keine locations gefunden für Ziel" #: src/libs/data-staging/Processor.cpp:233 #, fuzzy msgid "Pre-registering destination in index service" msgstr "Erstellen und senden einer Index Service Anfrage" #: src/libs/data-staging/Processor.cpp:259 msgid "Resolving source replicas in bulk" msgstr "" #: src/libs/data-staging/Processor.cpp:273 #, fuzzy, c-format msgid "No replicas found for %s" msgstr "Keine locations gefunden für %s" #: src/libs/data-staging/Processor.cpp:293 #, fuzzy, c-format msgid "Checking %s" msgstr "Herausforderung: %s" #: src/libs/data-staging/Processor.cpp:302 #: src/libs/data-staging/Processor.cpp:360 #, fuzzy msgid "Metadata of replica and index service differ" msgstr "" "Файл Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ñовпадает Ñ Ð¸Ñходным.\n" "%1" #: src/libs/data-staging/Processor.cpp:310 #, fuzzy, c-format msgid "Failed checking source replica %s: %s" msgstr "Fehler bei Verbinden zu server %s:%d" #: src/libs/data-staging/Processor.cpp:336 msgid "Querying source replicas in bulk" msgstr "" #: src/libs/data-staging/Processor.cpp:348 #, fuzzy, c-format msgid "Failed checking source replica: %s" msgstr "Löschen fehlgeschlagen von job: %s" #: src/libs/data-staging/Processor.cpp:354 #, fuzzy msgid "Failed checking source replica" msgstr "Fehler bei Lesen von Quelle" #: src/libs/data-staging/Processor.cpp:391 msgid "Overwrite requested - will pre-clean destination" msgstr "" #: src/libs/data-staging/Processor.cpp:400 #, fuzzy msgid "Finding existing destination replicas" msgstr "Fehler bei Schreiben zu Ziel" #: src/libs/data-staging/Processor.cpp:412 #, fuzzy, c-format msgid "Failed to delete replica %s: %s" msgstr "Fehler bei Schreiben zu Datein %s: %s" #: src/libs/data-staging/Processor.cpp:426 #, fuzzy, c-format msgid "Unregistering %s" msgstr "Außer Acht lassend %s" #: src/libs/data-staging/Processor.cpp:437 #, fuzzy msgid "Pre-registering destination" msgstr "Probleme bei Auflösen von Zieladresse" #: src/libs/data-staging/Processor.cpp:443 #, fuzzy, c-format msgid "Failed to pre-clean destination: %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/libs/data-staging/Processor.cpp:452 #, fuzzy msgid "Destination already exists" msgstr "LFN existiert bereits in LFC" #: src/libs/data-staging/Processor.cpp:476 msgid "Preparing to stage source" msgstr "" #: src/libs/data-staging/Processor.cpp:489 #, c-format msgid "Source is not ready, will wait %u seconds" msgstr "" #: src/libs/data-staging/Processor.cpp:495 #, fuzzy msgid "No physical files found for source" msgstr "Keine locations gefunden für %s" #: src/libs/data-staging/Processor.cpp:513 #, fuzzy msgid "Preparing to stage destination" msgstr "Kann nicht zu Ziel schreiben" #: src/libs/data-staging/Processor.cpp:526 #, c-format msgid "Destination is not ready, will wait %u seconds" msgstr "" #: src/libs/data-staging/Processor.cpp:532 #, fuzzy msgid "No physical files found for destination" msgstr "Keine locations gefunden für Ziel" #: src/libs/data-staging/Processor.cpp:558 msgid "Releasing source" msgstr "" #: src/libs/data-staging/Processor.cpp:562 #, c-format msgid "There was a problem during post-transfer source handling: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:567 #, fuzzy msgid "Releasing destination" msgstr "Probleme bei Auflösen von Zieladresse" #: src/libs/data-staging/Processor.cpp:571 #, c-format msgid "" "There was a problem during post-transfer destination handling after error: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:575 #, c-format msgid "Error with post-transfer destination handling: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:597 #, fuzzy, c-format msgid "Finalising current replica %s" msgstr "Löschen fehlgeschlagen von job: %s" #: src/libs/data-staging/Processor.cpp:617 #, fuzzy msgid "Removing pre-registered destination in index service" msgstr "Keine execution services in index service registriert" #: src/libs/data-staging/Processor.cpp:620 #, c-format msgid "" "Failed to unregister pre-registered destination %s: %s. You may need to " "unregister it manually" msgstr "" #: src/libs/data-staging/Processor.cpp:626 msgid "Registering destination replica" msgstr "" #: src/libs/data-staging/Processor.cpp:629 #, fuzzy, c-format msgid "Failed to register destination replica: %s" msgstr "Fehler bei Ändern von owner des Zielverzeichnisses zu %i: %s" #: src/libs/data-staging/Processor.cpp:632 #, c-format msgid "" "Failed to unregister pre-registered destination %s. You may need to " "unregister it manually" msgstr "" #: src/libs/data-staging/Processor.cpp:662 msgid "Error creating cache. Stale locks may remain." msgstr "" #: src/libs/data-staging/Processor.cpp:695 #, c-format msgid "Linking/copying cached file to %s" msgstr "" #: src/libs/data-staging/Processor.cpp:716 #, fuzzy, c-format msgid "Failed linking cache file to %s" msgstr "Fehler beim Auflisten von Dateien" #: src/libs/data-staging/Processor.cpp:720 #, fuzzy, c-format msgid "Error linking cache file to %s." msgstr "Fehler bei Entfernen von Cache-Datei %s: %s" #: src/libs/data-staging/Processor.cpp:741 #: src/libs/data-staging/Processor.cpp:748 #, fuzzy msgid "Adding to bulk request" msgstr "Füge Anfrage-Token %s hinzu" #: src/libs/data-staging/Scheduler.cpp:174 #: src/libs/data-staging/Scheduler.cpp:181 #, fuzzy msgid "source" msgstr "Quelle: %s" #: src/libs/data-staging/Scheduler.cpp:174 #: src/libs/data-staging/Scheduler.cpp:181 #, fuzzy msgid "destination" msgstr "Ziel: %s" #: src/libs/data-staging/Scheduler.cpp:174 #, c-format msgid "Using next %s replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:181 #, c-format msgid "No more %s replicas" msgstr "" #: src/libs/data-staging/Scheduler.cpp:183 msgid "Will clean up pre-registered destination" msgstr "" #: src/libs/data-staging/Scheduler.cpp:187 msgid "Will release cache locks" msgstr "" #: src/libs/data-staging/Scheduler.cpp:190 msgid "Moving to end of data staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:199 #, c-format msgid "Source is mapped to %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:203 msgid "Cannot link to source which can be modified, will copy instead" msgstr "" #: src/libs/data-staging/Scheduler.cpp:212 msgid "Cannot link to a remote destination. Will not use mapped URL" msgstr "" #: src/libs/data-staging/Scheduler.cpp:215 msgid "Linking mapped file" msgstr "" #: src/libs/data-staging/Scheduler.cpp:222 #, fuzzy, c-format msgid "Failed to create link: %s. Will not use mapped URL" msgstr "Fehler bei Anlegen von soft link: %s" #: src/libs/data-staging/Scheduler.cpp:247 #, c-format msgid "" "Scheduler received new DTR %s with source: %s, destination: %s, assigned to " "transfer share %s with priority %d" msgstr "" #: src/libs/data-staging/Scheduler.cpp:255 msgid "" "File is not cacheable, was requested not to be cached or no cache available, " "skipping cache check" msgstr "" #: src/libs/data-staging/Scheduler.cpp:261 msgid "File is cacheable, will check cache" msgstr "" #: src/libs/data-staging/Scheduler.cpp:264 #: src/libs/data-staging/Scheduler.cpp:289 #, c-format msgid "File is currently being cached, will wait %is" msgstr "" #: src/libs/data-staging/Scheduler.cpp:283 #, fuzzy msgid "Timed out while waiting for cache lock" msgstr "Timeout beim Lesen des response header" #: src/libs/data-staging/Scheduler.cpp:293 msgid "Checking cache again" msgstr "" #: src/libs/data-staging/Scheduler.cpp:313 #, fuzzy msgid "Destination file is in cache" msgstr "Destination muss LFN enthalten" #: src/libs/data-staging/Scheduler.cpp:317 msgid "Source and/or destination is index service, will resolve replicas" msgstr "" #: src/libs/data-staging/Scheduler.cpp:320 msgid "" "Neither source nor destination are index services, will skip resolving " "replicas" msgstr "" #: src/libs/data-staging/Scheduler.cpp:331 msgid "Problem with index service, will release cache lock" msgstr "" #: src/libs/data-staging/Scheduler.cpp:335 msgid "Problem with index service, will proceed to end of data staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:345 msgid "Checking source file is present" msgstr "" #: src/libs/data-staging/Scheduler.cpp:353 msgid "Error with source file, moving to next replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:375 #, c-format msgid "Replica %s has long latency, trying next replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:377 #, c-format msgid "No more replicas, will use %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:380 #, fuzzy, c-format msgid "Checking replica %s" msgstr "Suche nache Existenz von %s" #: src/libs/data-staging/Scheduler.cpp:392 #, fuzzy msgid "Pre-clean failed" msgstr "Delegation nicht erfolgreich: " #: src/libs/data-staging/Scheduler.cpp:397 msgid "Pre-clean failed, will still try to copy" msgstr "" #: src/libs/data-staging/Scheduler.cpp:405 #, fuzzy msgid "Source or destination requires staging" msgstr "Quelle Ziel" #: src/libs/data-staging/Scheduler.cpp:409 msgid "No need to stage source or destination, skipping staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:439 msgid "Staging request timed out, will release request" msgstr "" #: src/libs/data-staging/Scheduler.cpp:443 #, fuzzy msgid "Querying status of staging request" msgstr "Erstellen und senden von Anfrage" #: src/libs/data-staging/Scheduler.cpp:452 #, fuzzy msgid "Releasing requests" msgstr "Verarbeite %s Anfrage" #: src/libs/data-staging/Scheduler.cpp:477 msgid "DTR is ready for transfer, moving to delivery queue" msgstr "" #: src/libs/data-staging/Scheduler.cpp:492 #, fuzzy, c-format msgid "Transfer failed: %s" msgstr "Einige Transfers schlugen fehl" #: src/libs/data-staging/Scheduler.cpp:502 msgid "Releasing request(s) made during staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:505 msgid "Neither source nor destination were staged, skipping releasing requests" msgstr "" #: src/libs/data-staging/Scheduler.cpp:526 msgid "Trying next replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:531 #, fuzzy msgid "unregister" msgstr "Außer Acht lassend %s" #: src/libs/data-staging/Scheduler.cpp:531 #, fuzzy msgid "register" msgstr "Außer Acht lassend %s" #: src/libs/data-staging/Scheduler.cpp:530 #, c-format msgid "Will %s in destination index service" msgstr "" #: src/libs/data-staging/Scheduler.cpp:534 msgid "Destination is not index service, skipping replica registration" msgstr "" #: src/libs/data-staging/Scheduler.cpp:547 msgid "Error registering replica, moving to end of data staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:556 msgid "Will process cache" msgstr "" #: src/libs/data-staging/Scheduler.cpp:560 msgid "File is not cacheable, skipping cache processing" msgstr "" #: src/libs/data-staging/Scheduler.cpp:574 #, fuzzy msgid "Cancellation complete" msgstr "Transfer vollständig" #: src/libs/data-staging/Scheduler.cpp:588 msgid "Will wait 10s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:594 msgid "Error in cache processing, will retry without caching" msgstr "" #: src/libs/data-staging/Scheduler.cpp:603 msgid "Will retry without caching" msgstr "" #: src/libs/data-staging/Scheduler.cpp:621 #, fuzzy msgid "Proxy has expired" msgstr "Proxy store:" #: src/libs/data-staging/Scheduler.cpp:632 #, c-format msgid "%i retries left, will wait until %s before next attempt" msgstr "" #: src/libs/data-staging/Scheduler.cpp:648 msgid "Out of retries" msgstr "" #: src/libs/data-staging/Scheduler.cpp:650 msgid "Permanent failure" msgstr "" #: src/libs/data-staging/Scheduler.cpp:656 #, fuzzy msgid "Finished successfully" msgstr "Verbindung erfolgreich geschlossen" #: src/libs/data-staging/Scheduler.cpp:666 #, fuzzy msgid "Returning to generator" msgstr "Wiederholte Nutzung von Verbindung" #: src/libs/data-staging/Scheduler.cpp:840 #, c-format msgid "File is smaller than %llu bytes, will use local delivery" msgstr "" #: src/libs/data-staging/Scheduler.cpp:894 #, c-format msgid "Delivery service at %s can copy to %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:902 #, c-format msgid "Delivery service at %s can copy from %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:915 msgid "Could not find any useable delivery service, forcing local transfer" msgstr "" #: src/libs/data-staging/Scheduler.cpp:931 #, c-format msgid "Not using delivery service at %s because it is full" msgstr "" #: src/libs/data-staging/Scheduler.cpp:958 #, c-format msgid "Not using delivery service %s due to previous failure" msgstr "" #: src/libs/data-staging/Scheduler.cpp:968 msgid "No remote delivery services are useable, forcing local delivery" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1172 msgid "Cancelling active transfer" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1182 msgid "Processing thread timed out. Restarting DTR" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1250 msgid "Will use bulk request" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1272 msgid "No delivery endpoints available, will try later" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1291 msgid "Scheduler received NULL DTR" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1301 msgid "Scheduler received invalid DTR" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1390 #, fuzzy msgid "Scheduler starting up" msgstr "Konnte job nicht starten" #: src/libs/data-staging/Scheduler.cpp:1391 msgid "Scheduler configuration:" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1392 #, c-format msgid " Pre-processor slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1393 #, c-format msgid " Delivery slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1394 #, c-format msgid " Post-processor slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1395 #, c-format msgid " Emergency slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1396 #, c-format msgid " Prepared slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1397 #, fuzzy, c-format msgid "" " Shares configuration:\n" "%s" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/libs/data-staging/Scheduler.cpp:1400 msgid " Delivery service: LOCAL" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1401 #, fuzzy, c-format msgid " Delivery service: %s" msgstr "Delegation service: %s" #: src/libs/data-staging/Scheduler.cpp:1406 #, fuzzy msgid "Failed to create DTR dump thread" msgstr "Fehler bei Anlegen von ldap bind thread (%s)" #: src/libs/data-staging/Scheduler.cpp:1423 #: src/services/data-staging/DataDeliveryService.cpp:531 #, c-format msgid "DTR %s cancelled" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:15 msgid "Shutting down scheduler" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:17 msgid "Scheduler stopped, exiting" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:23 #, c-format msgid "Received DTR %s back from scheduler in state %s" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:30 #, fuzzy msgid "Generator started" msgstr "Start start" #: src/libs/data-staging/examples/Generator.cpp:31 #, fuzzy msgid "Starting DTR threads" msgstr "Starte Test" #: src/libs/data-staging/examples/Generator.cpp:44 msgid "No valid credentials found, exiting" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:55 #, fuzzy, c-format msgid "Problem creating dtr (source %s, destination %s)" msgstr "Probleme bei Auflösen von Zieladresse" #: src/services/a-rex/arex.cpp:340 src/services/candypond/CandyPond.cpp:569 #: src/services/data-staging/DataDeliveryService.cpp:705 #, c-format msgid "SOAP operation is not supported: %s" msgstr "" #: src/services/a-rex/arex.cpp:358 src/services/a-rex/arex.cpp:403 #, fuzzy, c-format msgid "Security Handlers processing failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/services/a-rex/arex.cpp:381 msgid "" "Can't obtain configuration. Public information is disallowed for this user." msgstr "" #: src/services/a-rex/arex.cpp:388 msgid "Can't obtain configuration. Only public information is provided." msgstr "" #: src/services/a-rex/arex.cpp:416 src/services/a-rex/rest/rest.cpp:740 #, fuzzy, c-format msgid "Connection from %s: %s" msgstr "Verbindung zu %s schlug fehl: %s" #: src/services/a-rex/arex.cpp:419 src/services/a-rex/rest/rest.cpp:744 #, c-format msgid "process: method: %s" msgstr "" #: src/services/a-rex/arex.cpp:420 src/services/a-rex/rest/rest.cpp:745 #, c-format msgid "process: endpoint: %s" msgstr "" #: src/services/a-rex/arex.cpp:445 #, c-format msgid "process: id: %s" msgstr "" #: src/services/a-rex/arex.cpp:446 #, c-format msgid "process: subop: %s" msgstr "" #: src/services/a-rex/arex.cpp:453 #, c-format msgid "process: subpath: %s" msgstr "" #: src/services/a-rex/arex.cpp:491 src/services/candypond/CandyPond.cpp:543 #: src/services/data-staging/DataDeliveryService.cpp:665 #: src/tests/echo/echo.cpp:98 #, c-format msgid "process: request=%s" msgstr "" #: src/services/a-rex/arex.cpp:496 src/services/candypond/CandyPond.cpp:548 #: src/services/data-staging/DataDeliveryService.cpp:670 #: src/tests/count/count.cpp:69 msgid "input does not define operation" msgstr "" #: src/services/a-rex/arex.cpp:499 src/services/candypond/CandyPond.cpp:551 #: src/services/data-staging/DataDeliveryService.cpp:673 #: src/tests/count/count.cpp:72 #, c-format msgid "process: operation: %s" msgstr "" #: src/services/a-rex/arex.cpp:526 #, fuzzy msgid "POST request on special path is not supported" msgstr "Es wurde keine authorization response erwidert" #: src/services/a-rex/arex.cpp:531 msgid "process: factory endpoint" msgstr "" #: src/services/a-rex/arex.cpp:575 src/services/candypond/CandyPond.cpp:580 #: src/services/data-staging/DataDeliveryService.cpp:716 #: src/tests/echo/echo.cpp:158 #, c-format msgid "process: response=%s" msgstr "" #: src/services/a-rex/arex.cpp:580 msgid "Per-job POST/SOAP requests are not supported" msgstr "" #: src/services/a-rex/arex.cpp:589 msgid "process: GET" msgstr "" #: src/services/a-rex/arex.cpp:590 #, c-format msgid "GET: id %s path %s" msgstr "" #: src/services/a-rex/arex.cpp:623 #, fuzzy msgid "process: HEAD" msgstr "Prozess: POST" #: src/services/a-rex/arex.cpp:624 #, c-format msgid "HEAD: id %s path %s" msgstr "" #: src/services/a-rex/arex.cpp:657 msgid "process: PUT" msgstr "" #: src/services/a-rex/arex.cpp:690 #, fuzzy msgid "process: DELETE" msgstr "Prozess: POST" #: src/services/a-rex/arex.cpp:723 #, c-format msgid "process: method %s is not supported" msgstr "" #: src/services/a-rex/arex.cpp:726 msgid "process: method is not defined" msgstr "" #: src/services/a-rex/arex.cpp:836 #, fuzzy msgid "Failed to run Grid Manager thread" msgstr "Fehler bei Transfer von Daten" #: src/services/a-rex/arex.cpp:889 #, fuzzy, c-format msgid "Failed to process configuration in %s" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/services/a-rex/arex.cpp:894 #, fuzzy msgid "No control directory set in configuration" msgstr "Pfad zu VOMS Server Konfigurationsdatei" #: src/services/a-rex/arex.cpp:898 #, fuzzy msgid "No session directory set in configuration" msgstr "Fehlendes oder leeres KeyPath Element" #: src/services/a-rex/arex.cpp:902 msgid "No LRMS set in configuration" msgstr "" #: src/services/a-rex/arex.cpp:961 #, fuzzy, c-format msgid "Failed to create control directory %s" msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #: src/services/a-rex/arex.cpp:965 #, fuzzy, c-format msgid "Failed to update control directory %s" msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #: src/services/a-rex/arex.cpp:972 #, fuzzy msgid "Failed to start GM threads" msgstr "Fehler bei Ablage von FTP Datei" #: src/services/a-rex/arex.cpp:1008 #, c-format msgid "Created entry for JWT issuer %s" msgstr "" #: src/services/a-rex/arex.cpp:1010 #, fuzzy, c-format msgid "Failed to create entry for JWT issuer %s" msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #: src/services/a-rex/arex.cpp:1013 #, c-format msgid "Empty data for JWT issuer %s" msgstr "" #: src/services/a-rex/arex.cpp:1016 #, fuzzy, c-format msgid "Failed to read data for JWT issuer %s" msgstr "Konnte Metadaen für Datei %s nicht finden" #: src/services/a-rex/authop.cpp:26 #, fuzzy msgid "CheckOperationAllowed: missing configuration" msgstr "Fehler bei Initialisierung der condition" #: src/services/a-rex/authop.cpp:80 msgid "CheckOperationAllowed: allowed due to missing configuration scopes" msgstr "" #: src/services/a-rex/authop.cpp:83 #, c-format msgid "CheckOperationAllowed: token scopes: %s" msgstr "" #: src/services/a-rex/authop.cpp:84 #, fuzzy, c-format msgid "CheckOperationAllowed: configuration scopes: %s" msgstr "Nicht unterstützte URL für Ziel: %s" #: src/services/a-rex/authop.cpp:87 msgid "CheckOperationAllowed: allowed due to matching scopes" msgstr "" #: src/services/a-rex/authop.cpp:91 msgid "CheckOperationAllowed: token scopes do not match required scopes" msgstr "" #: src/services/a-rex/authop.cpp:97 msgid "CheckOperationAllowed: allowed for TLS connection" msgstr "" #: src/services/a-rex/authop.cpp:101 msgid "CheckOperationAllowed: no supported identity found" msgstr "" #: src/services/a-rex/cachecheck.cpp:37 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:710 #, c-format msgid "Error with cache configuration: %s" msgstr "" #: src/services/a-rex/cachecheck.cpp:53 #: src/services/candypond/CandyPond.cpp:318 msgid "Error with cache configuration" msgstr "" #: src/services/a-rex/cachecheck.cpp:78 #: src/services/candypond/CandyPond.cpp:146 #: src/services/candypond/CandyPond.cpp:343 #, c-format msgid "Looking up URL %s" msgstr "" #: src/services/a-rex/cachecheck.cpp:80 #: src/services/candypond/CandyPond.cpp:155 #, fuzzy, c-format msgid "Cache file is %s" msgstr "Lese Archiv Datei %s" #: src/services/a-rex/change_activity_status.cpp:22 #: src/services/a-rex/put.cpp:163 src/services/a-rex/put.cpp:204 #, c-format msgid "%s: there is no such job: %s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:30 #, c-format msgid "%s: put log %s: there is no payload" msgstr "" #: src/services/a-rex/change_activity_status.cpp:36 #, c-format msgid "%s: put log %s: unrecognized payload" msgstr "" #: src/services/a-rex/change_activity_status.cpp:75 #, fuzzy msgid "A-REX REST: Failed to resume job" msgstr "Konnte job nicht starten" #: src/services/a-rex/change_activity_status.cpp:79 #, fuzzy, c-format msgid "A-REX REST: State change not allowed: from %s to %s" msgstr "" "ChangeActivityStatus: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" "%s" #: src/services/a-rex/create_activity.cpp:24 msgid "NEW: put new job: there is no payload" msgstr "" #: src/services/a-rex/create_activity.cpp:28 msgid "NEW: put new job: max jobs total limit reached" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:47 msgid "Wiping and re-creating whole storage" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:207 #: src/services/a-rex/delegation/DelegationStore.cpp:309 #, c-format msgid "DelegationStore: TouchConsumer failed to create file %s" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:269 msgid "DelegationStore: PeriodicCheckConsumers failed to resume iterator" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:289 #, c-format msgid "" "DelegationStore: PeriodicCheckConsumers failed to remove old delegation %s - " "%s" msgstr "" #: src/services/a-rex/get.cpp:172 src/services/a-rex/get.cpp:227 #: src/services/a-rex/get.cpp:313 #, c-format msgid "Get: there is no job %s - %s" msgstr "" #: src/services/a-rex/get.cpp:380 #, c-format msgid "Head: there is no job %s - %s" msgstr "" #: src/services/a-rex/get.cpp:436 #, fuzzy msgid "Failed to extract credential information" msgstr "Fehler beim Verbindungen für Erneuerung von credentials" #: src/services/a-rex/get.cpp:439 #, fuzzy, c-format msgid "Checking cache permissions: DN: %s" msgstr "Check: looking für Metadata: %s" #: src/services/a-rex/get.cpp:440 #, fuzzy, c-format msgid "Checking cache permissions: VO: %s" msgstr "Check: looking für Metadata: %s" #: src/services/a-rex/get.cpp:442 #, c-format msgid "Checking cache permissions: VOMS attr: %s" msgstr "" #: src/services/a-rex/get.cpp:452 #, c-format msgid "Cache access allowed to %s by DN %s" msgstr "" #: src/services/a-rex/get.cpp:455 #, c-format msgid "DN %s doesn't match %s" msgstr "" #: src/services/a-rex/get.cpp:458 #, c-format msgid "Cache access allowed to %s by VO %s" msgstr "" #: src/services/a-rex/get.cpp:461 #, c-format msgid "VO %s doesn't match %s" msgstr "" #: src/services/a-rex/get.cpp:467 src/services/a-rex/get.cpp:486 #, c-format msgid "Bad credential value %s in cache access rules" msgstr "" #: src/services/a-rex/get.cpp:475 src/services/a-rex/get.cpp:494 #, c-format msgid "VOMS attr %s matches %s" msgstr "" #: src/services/a-rex/get.cpp:476 #, c-format msgid "Cache access allowed to %s by VO %s and role %s" msgstr "" #: src/services/a-rex/get.cpp:479 src/services/a-rex/get.cpp:498 #, c-format msgid "VOMS attr %s doesn't match %s" msgstr "" #: src/services/a-rex/get.cpp:495 #, c-format msgid "Cache access allowed to %s by VO %s and group %s" msgstr "" #: src/services/a-rex/get.cpp:501 #, c-format msgid "Unknown credential type %s for URL pattern %s" msgstr "" #: src/services/a-rex/get.cpp:507 #, c-format msgid "No match found in cache access rules for %s" msgstr "" #: src/services/a-rex/get.cpp:517 #, c-format msgid "Get from cache: Looking in cache for %s" msgstr "" #: src/services/a-rex/get.cpp:520 #, fuzzy, c-format msgid "Get from cache: Invalid URL %s" msgstr "Ungültige URL: %s" #: src/services/a-rex/get.cpp:537 #, fuzzy msgid "Get from cache: Error in cache configuration" msgstr "Pfad zu VOMS Server Konfigurationsdatei" #: src/services/a-rex/get.cpp:546 msgid "Get from cache: File not in cache" msgstr "" #: src/services/a-rex/get.cpp:549 #, c-format msgid "Get from cache: could not access cached file: %s" msgstr "" #: src/services/a-rex/get.cpp:559 msgid "Get from cache: Cached file is locked" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:98 #, c-format msgid "" "Cannot create directories for log file %s. Messages will be logged to this " "log" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:104 #, c-format msgid "" "Cannot open cache log file %s: %s. Cache cleaning messages will be logged to " "this log" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:114 #, fuzzy msgid "Failed to start cache clean script" msgstr "Fehler bei der Initialisierung der delegation credentials" #: src/services/a-rex/grid-manager/GridManager.cpp:115 #, fuzzy msgid "Cache cleaning script failed" msgstr "Die Job Terminierungs-Anfrage schlug fehl" #: src/services/a-rex/grid-manager/GridManager.cpp:183 #, c-format msgid "External request for attention %s" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:201 #, fuzzy, c-format msgid "Failed to open heartbeat file %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/services/a-rex/grid-manager/GridManager.cpp:223 msgid "Starting jobs processing thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:224 #, c-format msgid "Used configuration file %s" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:232 #, c-format msgid "" "Error initiating delegation database in %s. Maybe permissions are not " "suitable. Returned error is: %s." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:244 msgid "Failed to start new thread: cache won't be cleaned" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:251 #, fuzzy msgid "Failed to activate Jobs Processing object, exiting Grid Manager thread" msgstr "Fehler bei Transfer von Daten" #: src/services/a-rex/grid-manager/GridManager.cpp:260 #, c-format msgid "" "Error adding communication interface in %s. Maybe another instance of A-REX " "is already running." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:263 #, c-format msgid "" "Error adding communication interface in %s. Maybe permissions are not " "suitable." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:270 #, fuzzy msgid "Failed to start new thread for monitoring job requests" msgstr "Fehler bei Transfer von Daten" #: src/services/a-rex/grid-manager/GridManager.cpp:276 msgid "Picking up left jobs" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:279 msgid "Starting data staging threads" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:283 msgid "Starting jobs' monitoring" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:291 #, c-format msgid "" "SSHFS mount point of session directory (%s) is broken - waiting for " "reconnect ..." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:295 #, c-format msgid "" "SSHFS mount point of runtime directory (%s) is broken - waiting for " "reconnect ..." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:300 #, c-format msgid "" "SSHFS mount point of cache directory (%s) is broken - waiting for " "reconnect ..." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:349 #, c-format msgid "Orphan delegation lock detected (%s) - cleaning" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:354 #, fuzzy msgid "Failed to obtain delegation locks for cleaning orphaned locks" msgstr "Fehler bei der Initialisierung der delegation credentials" #: src/services/a-rex/grid-manager/GridManager.cpp:368 msgid "Waking up" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:371 msgid "Stopping jobs processing thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:373 msgid "Exiting jobs processing thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:391 msgid "Requesting to stop job processing" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:399 msgid "Waiting for main job processing thread to exit" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:401 msgid "Stopped job processing" msgstr "" #: src/services/a-rex/grid-manager/accounting/AAR.cpp:73 msgid "Cannot find information abouto job submission endpoint" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:58 #, fuzzy, c-format msgid "Failed to read database schema file at %s" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:68 #, fuzzy msgid "Accounting database initialized successfully" msgstr "erfolgreich angelegt, ID: %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:70 msgid "Accounting database connection has been established" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:80 #, c-format msgid "%s. SQLite database error: %s" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:82 #, c-format msgid "SQLite database error: %s" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:110 #, c-format msgid "Directory %s to store accounting database has been created." msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:112 #, c-format msgid "" "Accounting database cannot be created. Faile to create parent directory %s." msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:116 #, c-format msgid "Accounting database cannot be created: %s is not a directory" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:123 #, fuzzy msgid "Failed to initialize accounting database" msgstr "Fehler bei Initialisierung des main Python Threads" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:130 #, c-format msgid "Accounting database file (%s) is not a regular file" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:136 #, fuzzy msgid "Error opening accounting database" msgstr "Fehler bei Öffnen von Meta-Datei zum schreiben %s: %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:154 msgid "Closing connection to SQLite accounting database" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:243 #, fuzzy, c-format msgid "Failed to fetch data from %s accounting database table" msgstr "Konnte Metadaen für Datei %s nicht finden" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:260 #, fuzzy, c-format msgid "Failed to add '%s' into the accounting database %s table" msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:327 #, fuzzy msgid "Failed to fetch data from accounting database Endpoints table" msgstr "Konnte Metadaen für Datei %s nicht finden" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:344 #, c-format msgid "" "Failed to add '%s' URL (interface type %s) into the accounting database " "Endpoints table" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:370 #, fuzzy, c-format msgid "Failed to query AAR database ID for job %s" msgstr "Fehler beim Verbinden zu RLS server: %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:431 #, fuzzy, c-format msgid "Failed to insert AAR into the database for job %s" msgstr "Fehler bei Lesen von Objekt %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:432 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:481 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:512 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:528 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:544 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:565 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:581 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:596 #, c-format msgid "SQL statement used: %s" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:437 #, fuzzy, c-format msgid "Failed to write authtoken attributes for job %s" msgstr "Fehler beim Authentifizieren: %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:441 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:498 #, fuzzy, c-format msgid "Failed to write event records for job %s" msgstr "Fehler bei Schreiben zu Datein %s: %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:452 #, c-format msgid "" "Cannot to update AAR. Cannot find registered AAR for job %s in accounting " "database." msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:480 #, fuzzy, c-format msgid "Failed to update AAR in the database for job %s" msgstr "Fahler bei Herunterladen %s zu %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:486 #, fuzzy, c-format msgid "Failed to write RTEs information for the job %s" msgstr "Fehler bei Bezug von Information für Job: %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:490 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:494 #, fuzzy, c-format msgid "Failed to write data transfers information for the job %s" msgstr "Fehler bei Bezug von Information für Job: %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:590 #, fuzzy, c-format msgid "Unable to add event: cannot find AAR for job %s in accounting database." msgstr "Konnte Job Status-Informationen nicht erhalten." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:73 #, fuzzy, c-format msgid "Unknown option %s" msgstr "Datentransfer abgebrochen: %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:80 msgid "Job ID argument is required." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:86 msgid "Path to user's proxy file should be specified." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:92 msgid "User name should be specified." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:98 msgid "Path to .local job status file is required." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:106 msgid "Generating ceID prefix from hostname automatically" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:109 #, fuzzy msgid "" "Cannot determine hostname from gethostname() to generate ceID automatically." msgstr "Kann hostname von uname nciht ermitteln" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:118 #, c-format msgid "ceID prefix is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:126 #, c-format msgid "Getting currect timestamp for BLAH parser log: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:135 msgid "Parsing .local file to obtain job-specific identifiers and info" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:145 #, c-format msgid "globalid is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:148 #, c-format msgid "headnode is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:151 #, c-format msgid "interface is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:155 msgid "There is no local LRMS ID. Message will not be written to BLAH log." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:158 #, c-format msgid "localid is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:161 #, c-format msgid "queue name is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:164 #, c-format msgid "owner subject is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:166 msgid "" "Job did not finished successfully. Message will not be written to BLAH log." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:174 #, fuzzy, c-format msgid "Job timestamp successfully parsed as %s" msgstr "erfolgreich angelegt, ID: %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:178 msgid "Can not read information from the local job status file" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:194 #, c-format msgid "" "Unsupported submission interface %s. Seems arc-blahp-logger need to be " "updated accordingly. Please submit the bug to bugzilla." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:204 msgid "Parsing VOMS AC to get FQANs information" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:217 #, fuzzy, c-format msgid "Found VOMS AC attribute: %s" msgstr " Attribute" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:230 #, fuzzy msgid "VOMS AC attribute is a tag" msgstr "Konnte VOMS Attribut nicht herauslesen" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:237 msgid "Skipping policyAuthority VOMS AC attribute" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:241 #, fuzzy msgid "VOMS AC attribute is the FQAN" msgstr "Konnte VOMS Attribut nicht herauslesen" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:249 msgid "No FQAN found. Using None as userFQAN value" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:263 #, c-format msgid "Assembling BLAH parser log entry: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:268 #, c-format msgid "Writing the info to the BLAH parser log: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:276 #, fuzzy, c-format msgid "Cannot open BLAH log file '%s'" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:36 #, c-format msgid "Missing cancel-%s-job - job cancellation may not work" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:40 #, c-format msgid "Missing submit-%s-job - job submission to LRMS may not work" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:44 #, c-format msgid "Missing scan-%s-job - may miss when job finished executing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:58 #, c-format msgid "Wrong option in %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:69 #, fuzzy, c-format msgid "Can't read configuration file at %s" msgstr "Delegation Authorisierung fehlgeschlagen" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:79 #, fuzzy, c-format msgid "Can't recognize type of configuration file at %s" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:82 msgid "Could not determine configuration type or configuration is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:163 #, fuzzy msgid "lrms is empty" msgstr "Policy is leer" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:196 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:205 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:214 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:223 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:232 #, fuzzy msgid "Missing number in maxjobs" msgstr "Fehlendes security Objekt in Nachricht" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:199 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:208 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:217 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:226 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:235 #, c-format msgid "Wrong number in maxjobs: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:245 #, c-format msgid "Wrong number in wakeupperiod: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:251 msgid "mail parameter is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:257 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:261 msgid "Wrong number in defaultttl command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:267 #, fuzzy msgid "Wrong number in maxrerun command" msgstr "Falsche Anzahl an Parametern übertragen" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:274 msgid "State name for plugin is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:278 msgid "Options for plugin are missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:281 #, c-format msgid "Failed to register plugin for state %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:287 msgid "Session root directory is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:290 msgid "Junk in sessiondir command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:302 #, fuzzy msgid "Missing directory in controldir command" msgstr "Fehlendes oder leeres KeyPath Element" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:307 msgid "" "'control' configuration option is no longer supported, please use " "'controldir' instead" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:312 msgid "User for helper program is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:315 msgid "Only user '.' for helper program is supported" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:318 msgid "Helper program is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:339 #, fuzzy msgid "Wrong option in fixdirectories" msgstr "Fehler beim Öffnen von Verzeichs: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:366 #, fuzzy msgid "Wrong option in delegationdb" msgstr "Fehler beim Öffnen von Verzeichs: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:375 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:608 msgid "forcedefaultvoms parameter is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:486 #, fuzzy msgid "Wrong number in maxjobdesc command" msgstr "Falsche Anzahl an Parametern übertragen" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:535 msgid "Missing file name in [arex/jura] logfile" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:546 #, fuzzy, c-format msgid "Wrong number in urdelivery_frequency: %s" msgstr "Falsche Anzahl an Parametern übertragen" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:601 msgid "No queue name given in queue block name" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:617 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:652 msgid "advertisedvo parameter is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:117 #, c-format msgid "\tSession root dir : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:118 #, c-format msgid "\tControl dir : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:119 #, c-format msgid "\tdefault LRMS : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:120 #, c-format msgid "\tdefault queue : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:121 #, c-format msgid "\tdefault ttl : %u" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:126 msgid "No valid caches found in configuration, caching is disabled" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:131 #, c-format msgid "\tCache : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:133 #, c-format msgid "\tCache link dir : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:136 #, fuzzy, c-format msgid "\tCache (read-only): %s" msgstr "Fataler Fehler: %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:138 msgid "\tCache cleaning enabled" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:139 msgid "\tCache cleaning disabled" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:256 msgid "Starting controldir update tool." msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:258 #, fuzzy msgid "Failed to start controldir update tool." msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:261 #, fuzzy, c-format msgid "Failed to run controldir update tool. Exit code: %i" msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:381 msgid "" "Globus location variable substitution is not supported anymore. Please " "specify path directly." msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:35 msgid "Can't read configuration file" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:41 #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:29 msgid "Can't recognize type of configuration file" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:47 #, fuzzy msgid "Configuration error" msgstr "Delegation Authorisierung fehlgeschlagen" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:77 msgid "Bad number in maxdelivery" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:83 msgid "Bad number in maxemergency" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:89 msgid "Bad number in maxprocessor" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:95 msgid "Bad number in maxprepared" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:101 msgid "Bad number in maxtransfertries" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:112 msgid "Bad number in speedcontrol" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:123 #, c-format msgid "Bad number in definedshare %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:132 #, c-format msgid "Bad URL in deliveryservice: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:143 msgid "Bad number in remotesizelimit" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:168 msgid "Bad value for loglevel" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:24 msgid "Can't open configuration file" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:45 msgid "Not enough parameters in copyurl" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:54 msgid "Not enough parameters in linkurl" msgstr "" #: src/services/a-rex/grid-manager/files/ControlFileContent.cpp:185 #, c-format msgid "Wrong directory in %s" msgstr "" #: src/services/a-rex/grid-manager/files/ControlFileHandling.cpp:104 #, c-format msgid "Failed setting file owner: %s" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:36 #, fuzzy, c-format msgid "Could not read data staging configuration from %s" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/services/a-rex/grid-manager/gm_jobs.cpp:44 #, c-format msgid "Can't read transfer states from %s. Perhaps A-REX is not running?" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:100 msgid "gm-jobs displays information on current jobs in the system." msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:105 msgid "display more information on each job" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:110 #: src/services/a-rex/grid-manager/gm_kick.cpp:24 msgid "use specified configuration file" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:111 #: src/services/a-rex/grid-manager/gm_kick.cpp:25 msgid "file" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:115 msgid "read information from specified control directory" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:116 msgid "dir" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:120 msgid "print summary of jobs in each transfer share" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:125 msgid "do not print list of jobs" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:130 msgid "do not print number of jobs in each state" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:135 msgid "print state of the service" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:140 msgid "show only jobs of user(s) with specified subject name(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:141 #: src/services/a-rex/grid-manager/gm_jobs.cpp:151 #: src/services/a-rex/grid-manager/gm_jobs.cpp:161 #, fuzzy msgid "dn" msgstr "n" #: src/services/a-rex/grid-manager/gm_jobs.cpp:145 msgid "request to cancel job(s) with specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:146 #: src/services/a-rex/grid-manager/gm_jobs.cpp:156 #: src/services/a-rex/grid-manager/gm_jobs.cpp:166 #: src/services/a-rex/grid-manager/gm_jobs.cpp:176 #: src/services/a-rex/grid-manager/gm_kick.cpp:30 msgid "id" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:150 msgid "" "request to cancel jobs belonging to user(s) with specified subject name(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:155 #, fuzzy msgid "request to clean job(s) with specified ID(s)" msgstr "Keine Anfrage-Token spezifiziert!" #: src/services/a-rex/grid-manager/gm_jobs.cpp:160 msgid "" "request to clean jobs belonging to user(s) with specified subject name(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:165 msgid "show only jobs with specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:170 msgid "print list of available delegation IDs" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:175 msgid "print delegation token of specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:180 msgid "print main delegation token of specified Job ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:181 #, fuzzy msgid "job id" msgstr "ungültige Job ID" #: src/services/a-rex/grid-manager/gm_jobs.cpp:185 msgid "" "output requested elements (jobs list, delegation ids and tokens) to file" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:186 #, fuzzy msgid "file name" msgstr "Dateiname" #: src/services/a-rex/grid-manager/gm_jobs.cpp:209 #, fuzzy, c-format msgid "Using configuration at %s" msgstr "Delegation Authorisierung fehlgeschlagen" #: src/services/a-rex/grid-manager/gm_jobs.cpp:232 #, fuzzy, c-format msgid "Failed to open output file '%s'" msgstr "Fehler bei Unlock von Datei %s: %s" #: src/services/a-rex/grid-manager/gm_jobs.cpp:241 msgid "Looking for current jobs" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:278 #, c-format msgid "Job: %s : ERROR : Unrecognizable state" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:287 #, fuzzy, c-format msgid "Job: %s : ERROR : No local information." msgstr "Konnte Job Status Information nicht beziehen." #: src/services/a-rex/grid-manager/gm_jobs.cpp:461 #, c-format msgid "Job: %s : ERROR : Failed to put cancel mark" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:465 #, c-format msgid "Job: %s : Cancel request put but failed to communicate to service" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:467 #, c-format msgid "Job: %s : Cancel request put and communicated to service" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:478 #, c-format msgid "Job: %s : ERROR : Failed to put clean mark" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:482 #, c-format msgid "Job: %s : Clean request put but failed to communicate to service" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:484 #, c-format msgid "Job: %s : Clean request put and communicated to service" msgstr "" #: src/services/a-rex/grid-manager/gm_kick.cpp:18 msgid "" "gm-kick wakes up the A-REX corresponding to the given control directory. If " "no directory is given it uses the control directory found in the " "configuration file." msgstr "" #: src/services/a-rex/grid-manager/gm_kick.cpp:29 msgid "inform about changes in particular job (can be used multiple times)" msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:39 #, fuzzy, c-format msgid "Failed to acquire source: %s" msgstr "Fehler beim Authentifizieren: %s" #: src/services/a-rex/grid-manager/inputcheck.cpp:44 #, fuzzy, c-format msgid "Failed to resolve %s" msgstr "Fehler bei Lesen von Objekt %s" #: src/services/a-rex/grid-manager/inputcheck.cpp:61 #, fuzzy, c-format msgid "Failed to check %s" msgstr "Fehler beim Authentifizieren: %s" #: src/services/a-rex/grid-manager/inputcheck.cpp:75 msgid "job_description_file [proxy_file]" msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:76 msgid "" "inputcheck checks that input files specified in the job description are " "available and accessible using the credentials in the given proxy file." msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:88 #, fuzzy msgid "Wrong number of arguments given" msgstr "Falsche Anzahl an Parametern übertragen" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:75 #, c-format msgid "" "DTR Generator waiting to process: %d jobs to cancel, %d DTRs, %d new jobs" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:89 #, c-format msgid "%s: Job cancel request from DTR generator to scheduler" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:94 #, fuzzy, c-format msgid "%s: Returning canceled job from DTR generator" msgstr "Wiederholte Nutzung von Verbindung" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:135 #, c-format msgid "%s: Re-requesting attention from DTR generator" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:145 #, c-format msgid "DTR Generator processed: %d jobs to cancel, %d DTRs, %d new jobs" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:164 msgid "Exiting Generator thread" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:236 msgid "Shutting down data staging threads" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:246 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:259 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:287 msgid "DTRGenerator is not running!" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:249 #, c-format msgid "Received DTR %s during Generator shutdown - may not be processed" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:263 msgid "DTRGenerator was sent null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:272 #, fuzzy, c-format msgid "%s: Received job in DTR generator" msgstr "Resuming Job: %s in Zustand: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:275 #, fuzzy, c-format msgid "%s: Failed to receive job in DTR generator" msgstr "Konnte Job Status Information nicht beziehen." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:282 msgid "DTRGenerator got request to cancel null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:297 msgid "DTRGenerator is queried about null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:327 msgid "DTRGenerator is asked about null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:355 msgid "DTRGenerator is requested to remove null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:362 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:370 #, c-format msgid "%s: Trying to remove job from data staging which is still active" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:378 #, c-format msgid "%s: Trying remove job from data staging which does not exist" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:389 #, fuzzy, c-format msgid "%s: Invalid DTR" msgstr "Ungültige URL: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:406 #, c-format msgid "%s: Received DTR %s to copy file %s in state %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:410 #, c-format msgid "%s: Received DTR belongs to inactive job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:427 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1065 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:474 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:532 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:646 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:856 #, c-format msgid "%s: Failed reading local information" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:436 #, fuzzy, c-format msgid "%s: DTR %s to copy file %s failed" msgstr "Warnung: Schließen von tmp Lock Datei %s fehlgeschlagen" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:442 #, c-format msgid "%s: Cancelling other DTRs" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:452 #, fuzzy, c-format msgid "%s: DTR %s to copy to %s failed but is not mandatory" msgstr "Warnung: Schließen von tmp Lock Datei %s fehlgeschlagen" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:462 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:727 #, c-format msgid "%s: Failed to read list of output files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:476 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:617 #, fuzzy, c-format msgid "%s: Failed to read dynamic output files in %s" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:478 #, c-format msgid "%s: Going through files in list %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:482 #, fuzzy, c-format msgid "%s: Removing %s from dynamic output file %s" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:486 #, fuzzy, c-format msgid "%s: Failed to write back dynamic output files in %s" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:502 #, fuzzy, c-format msgid "%s: Failed to write list of output files" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:506 #, fuzzy, c-format msgid "%s: Failed to write list of output status files" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:518 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:739 #, c-format msgid "%s: Failed to read list of input files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:537 #, fuzzy, c-format msgid "%s: Failed to write list of input files" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:549 #, c-format msgid "%s: Received DTR with two remote endpoints!" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:561 #: src/services/candypond/CandyPondGenerator.cpp:105 #, fuzzy, c-format msgid "No active job id %s" msgstr "Kann Job ID nicht finden: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:605 #, fuzzy, c-format msgid "%s: Failed to read list of output files, can't clean up session dir" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:631 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:650 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:777 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:909 #, fuzzy, c-format msgid "%s: Failed to clean up session dir" msgstr "Fehler bei Reservieren von Platz" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:641 #, fuzzy, c-format msgid "%s: Failed to read list of input files, can't clean up session dir" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:663 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:667 msgid "uploads" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:663 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:667 msgid "downloads" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:664 msgid "cancelled" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:664 msgid "finished" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:662 #, fuzzy, c-format msgid "%s: All %s %s successfully" msgstr "Verbindung erfolgreich geschlossen" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:666 #, fuzzy, c-format msgid "%s: Some %s failed" msgstr "Anfrage %s schlug fehl" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:670 #, c-format msgid "%s: Requesting attention from DTR generator" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:681 msgid "DTRGenerator is requested to process null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:687 msgid "download" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:687 msgid "upload" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:686 #, c-format msgid "%s: Received data staging request to %s files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:748 #, fuzzy, c-format msgid "%s: Duplicate file in list of input files: %s" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:801 #, fuzzy, c-format msgid "%s: Reading output files from user generated list in %s" msgstr "" "\n" "%s: ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð²Ñ…Ð¾Ð´Ð½Ð¾Ð³Ð¾ файла '%s': %s\n" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:803 #, fuzzy, c-format msgid "%s: Error reading user generated output file list in %s" msgstr "" "\n" "%s: ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð²Ñ…Ð¾Ð´Ð½Ð¾Ð³Ð¾ файла '%s': %s\n" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:834 #, fuzzy, c-format msgid "%s: Failed to list output directory %s: %s" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:852 #, fuzzy, c-format msgid "%s: Adding new output file %s: %s" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:875 #, c-format msgid "%s: Two identical output destinations: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:888 #, c-format msgid "%s: Cannot upload two different files %s and %s to same LFN: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:920 #, fuzzy, c-format msgid "%s: Received job in a bad state: %s" msgstr "Resuming Job: %s in Zustand: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:928 #, c-format msgid "%s: Session directory processing takes too long - %u.%06u seconds" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:976 #, c-format msgid "" "%s: Destination file %s was possibly left unfinished from previous A-REX " "run, will overwrite" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1071 #, c-format msgid "%s: Failed writing local information" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1089 #, c-format msgid "%s: Cancelling active DTRs" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1096 msgid "DTRGenerator is asked to check files for null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1116 #, fuzzy, c-format msgid "%s: Can't read list of input files" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1131 #, c-format msgid "%s: Checking user uploadable file: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1136 #, c-format msgid "%s: User has uploaded file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1143 #, fuzzy, c-format msgid "%s: Failed writing changed input file." msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1147 #, c-format msgid "%s: Critical error for uploadable file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1153 #, c-format msgid "%s: User has NOT uploaded file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1165 #, c-format msgid "%s: Uploadable files timed out" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1221 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1247 #, fuzzy, c-format msgid "%s: Can't convert checksum %s to int for %s" msgstr "Formatierungsfehler erkannt in Datei %s, in Zeile %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1228 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1242 #, fuzzy, c-format msgid "%s: Can't convert filesize %s to int for %s" msgstr "Formatierungsfehler erkannt in Datei %s, in Zeile %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1237 #, c-format msgid "%s: Invalid size/checksum information (%s) for %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1259 #, c-format msgid "%s: Invalid file: %s is too big." msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1275 #, c-format msgid "%s: Failed to switch user ID to %d/%d to read file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1281 #, fuzzy, c-format msgid "%s: Failed to open file %s for reading" msgstr "Fehler bei Öffnen von Datei %s zum Lesen: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1289 #, fuzzy, c-format msgid "%s: Error accessing file %s" msgstr "Fehler bei Zugriff auf Cache-Datei %s: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1301 #, fuzzy, c-format msgid "%s: Error reading file %s" msgstr "Fehler bei Lesen von Meta-Datei %s: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1316 #, c-format msgid "%s: File %s has wrong checksum: %llu. Expected %lli" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1322 #, c-format msgid "%s: Checksum %llu verified for %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1334 msgid "" "Found unfinished DTR transfers. It is possible the previous A-REX process " "did not shut down normally" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1341 #, c-format msgid "Found DTR %s for file %s left in transferring state from previous run" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1350 msgid "DTRGenerator is requested to clean links for null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1366 #, c-format msgid "%s: Cache cleaning takes too long - %u.%06u seconds" msgstr "" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:108 #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:190 #, c-format msgid "%s: Job monitoring counter is broken" msgstr "" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:115 #, c-format msgid "%s: Job monitoring is unintentionally lost" msgstr "" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:124 #, c-format msgid "%s: Job monitoring stop success" msgstr "" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:129 #, c-format msgid "" "%s: Job monitoring stop requested with %u active references and %s queue " "associated" msgstr "" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:131 #, c-format msgid "%s: Job monitoring stop requested with %u active references" msgstr "" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:195 #, c-format msgid "%s: Job monitoring is lost due to removal from queue" msgstr "" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:278 #, c-format msgid "%s: PushSorted failed to find job where expected" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:161 #, c-format msgid "Replacing queue '%s' with '%s'" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:255 #, c-format msgid "Bad name for stdout: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:263 #, c-format msgid "Bad name for stderr: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:326 #, c-format msgid "Bad name for runtime environment: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:371 msgid "Job description file could not be read." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:422 #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:436 #, c-format msgid "Bad name for executable: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:89 #, fuzzy msgid "Failed to start data staging threads" msgstr "Fehler bei Ablage von FTP Datei" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:190 #, c-format msgid "" "%s: Failed reading .local and changing state, job and A-REX may be left in " "an inconsistent state" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:195 #, fuzzy, c-format msgid "%s: unexpected failed job add request: %s" msgstr "Resuming Job: %s in Zustand: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:206 #, fuzzy, c-format msgid "%s: unexpected job add request: %s" msgstr "Resuming Job: %s in Zustand: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:259 #, c-format msgid "%s: job for attention" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:269 msgid "all for attention" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:286 #, c-format msgid "%s: job found while scanning" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:314 #, c-format msgid "%s: job will wait for external process" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:331 #, c-format msgid "%s: job assigned for slow polling" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:349 #, fuzzy, c-format msgid "%s: job being processed" msgstr "Die Job Löschen-Anfrage war erfolgreich" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:384 #, c-format msgid "Current jobs in system (PREPARING to FINISHING) per-DN (%i entries)" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:386 #, fuzzy, c-format msgid "%s: %i" msgstr "%s (%s)" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:398 #, fuzzy, c-format msgid "%s: Failed storing failure reason: %s" msgstr "Konnte job information nicht beziehen für job: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:404 #, fuzzy, c-format msgid "%s: Failed reading job description: %s" msgstr "Submit: Fehler bei Senden von Job Beschreibung" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:416 #, fuzzy, c-format msgid "%s: Failed parsing job request." msgstr "Submit: Fehler bei Senden von Job Beschreibung" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:466 #, fuzzy, c-format msgid "%s: Failed writing list of output files: %s" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:492 #, c-format msgid "%s: Failed obtaining lrms id" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:506 #, fuzzy, c-format msgid "%s: Failed writing local information: %s" msgstr "Konnte job information nicht beziehen für job: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:538 #, c-format msgid "%s: Failed creating grami file" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:542 #, c-format msgid "%s: Failed setting executable permissions" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:550 #, c-format msgid "%s: state SUBMIT: starting child: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:557 #, c-format msgid "%s: Failed running submission process" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:562 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:669 #, c-format msgid "%s: LRMS scripts limit of %u is reached - suspending submit/cancel" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:578 #, c-format msgid "" "%s: Job submission to LRMS takes too long, but ID is already obtained. " "Pretending submission is done." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:585 #, c-format msgid "%s: Job submission to LRMS takes too long. Failing." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:594 #, c-format msgid "%s: state SUBMIT: child exited with code %i" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:599 #, c-format msgid "%s: Job submission to LRMS failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:620 #, fuzzy, c-format msgid "%s: state CANCELING: timeout waiting for cancellation" msgstr "" "start_reading_ftp: Zeitüberschreitung bei Warten auf Zeitpunkt letzter " "Änderung" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:626 #, c-format msgid "%s: state CANCELING: job diagnostics collected" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:654 #, c-format msgid "%s: state CANCELING: starting child: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:656 #, c-format msgid "%s: Job has completed already. No action taken to cancel" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:664 #, fuzzy, c-format msgid "%s: Failed running cancellation process" msgstr "Fehler bei Reservieren von Platz" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:683 #, c-format msgid "" "%s: Job cancellation takes too long, but diagnostic collection seems to be " "done. Pretending cancellation succeeded." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:689 #, c-format msgid "%s: Job cancellation takes too long. Failing." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:699 #, c-format msgid "%s: state CANCELING: child exited with code %i" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:705 #, c-format msgid "%s: Failed to cancel running job" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:724 #, c-format msgid "%s: State: %s: data staging finished" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:759 #, c-format msgid "%s: State: %s: still in data staging" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:772 #, c-format msgid "%s: Job is not allowed to be rerun anymore" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:782 #, c-format msgid "%s: Job failed in unknown state. Won't rerun." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:803 #, fuzzy, c-format msgid "%s: Reprocessing job description failed" msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:810 #, c-format msgid "%s: Failed to read reprocessed list of output files" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:814 #, c-format msgid "%s: Failed to read reprocessed list of input files" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:898 #, c-format msgid "%s: Reading status of new job failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:911 #, c-format msgid "%s: State: ACCEPTED: parsing job description" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:913 #, c-format msgid "%s: Processing job description failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:952 #, fuzzy, c-format msgid "%s: new job is accepted" msgstr "Job migrierte mit Job ID: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:964 #, c-format msgid "%s: %s: New job belongs to %i/%i" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:969 #, c-format msgid "%s: old job is accepted" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:980 #, c-format msgid "%s: State: ACCEPTED" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:986 #, c-format msgid "%s: State: ACCEPTED: dryrun" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1009 #, c-format msgid "%s: State: ACCEPTED: has process time %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1015 #, c-format msgid "%s: State: ACCEPTED: moving to PREPARING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1031 #, c-format msgid "%s: State: PREPARING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1038 #, fuzzy, c-format msgid "%s: Failed obtaining local job information." msgstr "Konnte Job Status Information nicht beziehen." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1091 #, c-format msgid "%s: State: SUBMIT" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1111 #, c-format msgid "%s: State: CANCELING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1131 #, c-format msgid "%s: State: INLRMS" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1136 #, c-format msgid "%s: State: INLRMS - checking for pending(%u) and mark" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1138 #, c-format msgid "%s: State: INLRMS - checking for not pending" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1140 #, c-format msgid "%s: Job finished" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1144 #, c-format msgid "%s: State: INLRMS: exit message is %i %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1157 #, c-format msgid "%s: State: INLRMS - no mark found" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1169 #, c-format msgid "%s: State: FINISHING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1190 #, c-format msgid "%s: Job is requested to clean - deleting" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1207 #, c-format msgid "%s: restarted PREPARING job" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1223 #, c-format msgid "%s: restarted INLRMS job" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1232 #, c-format msgid "%s: restarted FINISHING job" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1237 #, c-format msgid "%s: Can't rerun on request" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1239 #, c-format msgid "%s: Can't rerun on request - not a suitable state" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1250 #, c-format msgid "%s: Job is too old - deleting" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1295 #, c-format msgid "%s: Job is ancient - delete rest of information" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1313 #, c-format msgid "%s: Canceling job because of user request" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1327 #, fuzzy, c-format msgid "%s: Failed to turn job into failed during cancel processing." msgstr "Fehler bei Reservieren von Platz" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1359 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1367 #, c-format msgid "%s: Plugin at state %s : %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1373 #, c-format msgid "%s: Plugin execution failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1480 #, c-format msgid "%s: State: %s from %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1529 #, fuzzy, c-format msgid "Failed to get DN information from .local file for job %s" msgstr "Fehler bei Bezug von Information für Job: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1556 #, c-format msgid "%s: Delete request due to internal problems" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1591 #, c-format msgid "%s: Job failure detected" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1651 #, fuzzy, c-format msgid "Failed to move file %s to %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1659 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1769 #, c-format msgid "Failed reading control directory: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1729 #, fuzzy, c-format msgid "Failed reading control directory: %s: %s" msgstr "Fehler bei Lesen von Objekt %s: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:2043 #, fuzzy, c-format msgid "Helper process start failed: %s" msgstr "Anfrage fehlgeschlagen: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:2050 #, c-format msgid "Stopping helper process %s" msgstr "" #: src/services/a-rex/grid-manager/log/HeartBeatMetrics.cpp:61 #, fuzzy, c-format msgid "Error with hearbeatfile: %s" msgstr "Fehler bei Formatieren von Lock-Datei %s: %s" #: src/services/a-rex/grid-manager/log/HeartBeatMetrics.cpp:73 #: src/services/a-rex/grid-manager/log/JobsMetrics.cpp:139 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:136 #, c-format msgid ": Metrics tool returned error code %i: %s" msgstr "" #: src/services/a-rex/grid-manager/log/HeartBeatMetrics.cpp:107 #: src/services/a-rex/grid-manager/log/JobsMetrics.cpp:186 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:178 msgid "" "gmetric_bin_path empty in arc.conf (should never happen the default value " "should be used)" msgstr "" #: src/services/a-rex/grid-manager/log/JobLog.cpp:114 #, fuzzy msgid ": Accounting records reporter tool is not specified" msgstr "Delegation Authorisierung fehlgeschlagen" #: src/services/a-rex/grid-manager/log/JobLog.cpp:130 #, fuzzy msgid ": Failure creating slot for accounting reporter child process" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/grid-manager/log/JobLog.cpp:143 #, fuzzy msgid ": Failure starting accounting reporter child process" msgstr "Fehler bei Reservieren von Platz" #: src/services/a-rex/grid-manager/log/JobLog.cpp:176 msgid ": Failure creating accounting database connection" msgstr "" #: src/services/a-rex/grid-manager/log/JobLog.cpp:202 #, c-format msgid ": writing accounting record took %llu ms" msgstr "" #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:74 #, c-format msgid "Session dir '%s' contains user specific substitutions - skipping it" msgstr "" #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:86 #, c-format msgid "Sessiondir %s: Free space %f GB" msgstr "" #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:94 #, fuzzy msgid "No session directories found in configuration." msgstr "Fehlendes oder leeres KeyPath Element" #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:125 msgid "No cachedirs found/configured for calculation of free space." msgstr "" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:29 msgid "Failed reading local information" msgstr "" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:79 #, c-format msgid "Running mailer command (%s)" msgstr "" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:81 msgid "Failed running mailer" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:34 #, c-format msgid "%s: Job's helper exited" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:71 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:24 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:56 #, c-format msgid "%s: Failure creating slot for child process" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:120 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:41 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:73 #, c-format msgid "%s: Failure starting child process" msgstr "" #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:30 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:62 #, c-format msgid "%s: Failure creating data storage for child process" msgstr "" #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:46 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:78 #, c-format msgid "%s: Failure waiting for child process to finish" msgstr "" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:47 #, fuzzy msgid "[job description input]" msgstr "Keine Job Beschreibung als Eingabe benötigt" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:48 msgid "" "Tool for writing the grami file representation of a job description file." msgstr "" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:52 #, fuzzy msgid "Name of grami file" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:57 #, fuzzy msgid "Configuration file to load" msgstr "Vermuting - Datei nicht gefunden" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:58 msgid "arc.conf" msgstr "" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:62 #, fuzzy msgid "Session directory to use" msgstr "Lege Verzeichnis %s an" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:63 msgid "directory" msgstr "Verzeichnis" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:79 #, fuzzy msgid "No job description file name provided." msgstr "Keine Job Beschreibung als Eingabe benötigt" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:85 #, fuzzy, c-format msgid "Unable to parse job description input: %s" msgstr "Fehler beim Bezug der Job Beschreibung von Job: %s" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:91 #, fuzzy msgid "Unable to load ARC configuration file." msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:111 #, fuzzy, c-format msgid "Unable to write grami file: %s" msgstr "Fehler bei Ablage von FTP Datei" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:117 #, fuzzy, c-format msgid "Unable to write 'output' file: %s" msgstr "Fehler bei Ablage von FTP Datei" #: src/services/a-rex/information_collector.cpp:53 #, fuzzy, c-format msgid "Resource information provider: %s" msgstr "Fehler bei Bezug von Information für Job: %s" #: src/services/a-rex/information_collector.cpp:56 #, fuzzy msgid "Resource information provider failed to start" msgstr "Fehler bei Bezug von Information für Job: %s" #: src/services/a-rex/information_collector.cpp:59 #, fuzzy msgid "Resource information provider failed to run" msgstr "Fehler bei Bezug von Information für Job: %s" #: src/services/a-rex/information_collector.cpp:63 #, c-format msgid "" "Resource information provider failed with exit status: %i\n" "%s" msgstr "" #: src/services/a-rex/information_collector.cpp:65 #, c-format msgid "" "Resource information provider log:\n" "%s" msgstr "" #: src/services/a-rex/information_collector.cpp:71 msgid "No new informational document assigned" msgstr "" #: src/services/a-rex/information_collector.cpp:73 #, c-format msgid "Obtained XML: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:87 msgid "Informational document is empty" msgstr "" #: src/services/a-rex/information_collector.cpp:212 msgid "OptimizedInformationContainer failed to create temporary file" msgstr "" #: src/services/a-rex/information_collector.cpp:215 #, c-format msgid "OptimizedInformationContainer created temporary file: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:221 msgid "" "OptimizedInformationContainer failed to store XML document to temporary file" msgstr "" #: src/services/a-rex/information_collector.cpp:230 msgid "OptimizedInformationContainer failed to parse XML" msgstr "" #: src/services/a-rex/information_collector.cpp:242 #, fuzzy msgid "OptimizedInformationContainer failed to rename temporary file" msgstr "Fehler bei Erstellen von Info-Datei %s: %s" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:38 msgid "Default INTERNAL client constructor" msgstr "" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:41 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:61 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:83 #, fuzzy msgid "Failed to load grid-manager configfile" msgstr "Pfad zu VOMS Server Konfigurationsdatei" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:46 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:66 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:88 #, fuzzy msgid "Failed to set INTERNAL endpoint" msgstr "Fehler beim Entfernen von LFC Verzeichnis: %s" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:131 #, fuzzy msgid "Failed to identify grid-manager config file" msgstr "Pfad zu VOMS Server Konfigurationsdatei" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:150 #, fuzzy, c-format msgid "Failed to run configuration parser at %s." msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:154 #, c-format msgid "Parser failed with error code %i." msgstr "" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:160 #, c-format msgid "No pid file is found at '%s'. Probably A-REX is not running." msgstr "" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:175 #, fuzzy, c-format msgid "Failed to load grid-manager config file from %s" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:266 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:372 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:405 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:451 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:505 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:557 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:575 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:625 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:655 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:673 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:691 #, fuzzy msgid "INTERNALClient is not initialized" msgstr "FATAL: SSL Locks nicht initialisiert" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:456 #, fuzzy msgid "Submitting job " msgstr "Aufräumen von Job: %s" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:517 #, fuzzy, c-format msgid "Failed to copy input file: %s to path: %s" msgstr "Fehler bei Öffnen von Datei %s zum Lesen: %s" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:523 #, fuzzy, c-format msgid "Failed to set permissions on: %s" msgstr "Konnte Zugriffsrechte von hard link nicht ändern zu 0644: %s" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:51 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:92 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:119 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:145 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:184 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:246 #, fuzzy msgid "Failed to load grid-manager config file" msgstr "Pfad zu VOMS Server Konfigurationsdatei" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:191 #, c-format msgid "Job %s does not report a resumable state" msgstr "Job %s berichtet nicht von einem resumable Zustand" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:196 #, fuzzy, c-format msgid "Resuming job: %s at state: %s (%s)" msgstr "Resuming Job: %s in Zustand: %s" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:205 msgid "Job resuming successful" msgstr "Job erfolgreich resumed." #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:251 #, fuzzy, c-format msgid "Failed retrieving information for job: %s" msgstr "Fehler bei Bezug von Information für Job: %s" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:324 #, fuzzy msgid "Retrieving job description of INTERNAL jobs is not supported" msgstr "Resume von CREAM jobs wird nicht unterstützt" #: src/services/a-rex/internaljobplugin/JobListRetrieverPluginINTERNAL.cpp:67 #, c-format msgid "Listing localjobs succeeded, %d localjobs found" msgstr "" #: src/services/a-rex/internaljobplugin/JobListRetrieverPluginINTERNAL.cpp:83 #, c-format msgid "" "Skipping retrieved job (%s) because it was submitted via another interface " "(%s)." msgstr "" #: src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.cpp:38 #, fuzzy msgid "" "Failed to delegate credentials to server - no delegation interface found" msgstr "Konnte delegation credentials in Client Konfiguration nicht finden" #: src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.cpp:45 #, fuzzy, c-format msgid "Failed to delegate credentials to server - %s" msgstr "Konnte delegation credentatials nicht zerstören für job: %s" #: src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.cpp:84 #, fuzzy msgid "Failed preparing job description" msgstr "Submit: Fehler bei Senden von Job Beschreibung" #: src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.cpp:127 #, fuzzy msgid "Failed submitting job description" msgstr "Fehler bei Schreiben zu Ziel" #: src/services/a-rex/job.cpp:78 #, fuzzy, c-format msgid "Using cached local account '%s'" msgstr "Nutze space token %s" #: src/services/a-rex/job.cpp:89 msgid "Will not map to 'root' account by default" msgstr "" #: src/services/a-rex/job.cpp:102 msgid "No local account name specified" msgstr "" #: src/services/a-rex/job.cpp:105 #, c-format msgid "Using local account '%s'" msgstr "" #: src/services/a-rex/job.cpp:109 msgid "TLS provides no identity, going for OTokens" msgstr "" #: src/services/a-rex/job.cpp:168 #, fuzzy msgid "Failed to acquire A-REX's configuration" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/services/a-rex/job.cpp:240 #, fuzzy, c-format msgid "Cannot handle local user %s" msgstr "" "Kann Owner von %s nicht ändernÐевозможно изменить владельца папки %1.\n" "Ошибка: %2" #: src/services/a-rex/job.cpp:288 #, c-format msgid "%s: Failed to parse user policy" msgstr "" #: src/services/a-rex/job.cpp:293 #, c-format msgid "%s: Failed to load evaluator for user policy " msgstr "" #: src/services/a-rex/job.cpp:398 #, c-format msgid "%s: Unknown user policy '%s'" msgstr "" #: src/services/a-rex/job.cpp:738 src/services/a-rex/job.cpp:756 #, fuzzy, c-format msgid "Credential expires at %s" msgstr "Delegation service: %s" #: src/services/a-rex/job.cpp:740 src/services/a-rex/job.cpp:758 #, fuzzy, c-format msgid "Credential handling exception: %s" msgstr "Delegation service: %s" #: src/services/a-rex/job.cpp:924 #, c-format msgid "Failed to run external plugin: %s" msgstr "" #: src/services/a-rex/job.cpp:928 #, c-format msgid "Plugin response: %s" msgstr "" #: src/services/a-rex/job.cpp:1138 #, fuzzy, c-format msgid "Failed to create job in %s" msgstr "Löschen fehlgeschlagen von job: %s" #: src/services/a-rex/job.cpp:1147 #, c-format msgid "Out of tries while allocating new job ID in %s" msgstr "" #: src/services/a-rex/job.cpp:1397 msgid "No non-draining session dirs available" msgstr "" #: src/services/a-rex/put.cpp:150 #, c-format msgid "%s: put file %s: there is no payload" msgstr "" #: src/services/a-rex/put.cpp:156 #, c-format msgid "%s: put file %s: unrecognized payload" msgstr "" #: src/services/a-rex/put.cpp:172 src/services/a-rex/rest/rest.cpp:2050 #, fuzzy, c-format msgid "%s: put file %s: failed to create file: %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/services/a-rex/put.cpp:188 #, fuzzy, c-format msgid "%s: put file %s: %s" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/put.cpp:210 #, fuzzy, c-format msgid "%s: delete file %s: failed to obtain file path: %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/services/a-rex/put.cpp:221 #, fuzzy, c-format msgid "%s: delete file %s: failed to open file/dir: %s" msgstr "Fehler bei Lesen von Datei %s: %s" #: src/services/a-rex/rest/rest.cpp:749 #, c-format msgid "REST: process %s at %s" msgstr "" #: src/services/a-rex/rest/rest.cpp:797 src/services/a-rex/rest/rest.cpp:813 #: src/services/a-rex/rest/rest.cpp:1094 src/services/a-rex/rest/rest.cpp:1185 #: src/services/a-rex/rest/rest.cpp:1549 src/services/a-rex/rest/rest.cpp:2161 #, fuzzy, c-format msgid "process: method %s is not supported for subpath %s" msgstr "Verarbeitungstyp nicht unterstützt: %s" #: src/services/a-rex/rest/rest.cpp:819 #, fuzzy, c-format msgid "process: schema %s is not supported for subpath %s" msgstr "Verarbeitungstyp nicht unterstützt: %s" #: src/services/a-rex/rest/rest.cpp:1182 src/services/a-rex/rest/rest.cpp:1546 #, fuzzy, c-format msgid "process: action %s is not supported for subpath %s" msgstr "Verarbeitungstyp nicht unterstützt: %s" #: src/services/a-rex/rest/rest.cpp:1558 src/services/a-rex/rest/rest.cpp:1627 #: src/services/a-rex/rest/rest.cpp:1987 src/services/a-rex/rest/rest.cpp:2150 #, c-format msgid "REST:GET job %s - %s" msgstr "" #: src/services/a-rex/rest/rest.cpp:1674 src/services/a-rex/rest/rest.cpp:1682 #, c-format msgid "REST:KILL job %s - %s" msgstr "" #: src/services/a-rex/rest/rest.cpp:1699 src/services/a-rex/rest/rest.cpp:1707 #, fuzzy, c-format msgid "REST:CLEAN job %s - %s" msgstr "" "CreateActivity: ответ = \n" "%s" #: src/services/a-rex/rest/rest.cpp:1724 src/services/a-rex/rest/rest.cpp:1732 #: src/services/a-rex/rest/rest.cpp:1749 #, c-format msgid "REST:RESTART job %s - %s" msgstr "" #: src/services/a-rex/rest/rest.cpp:2040 #, c-format msgid "REST:PUT job %s: file %s: there is no payload" msgstr "" #: src/services/a-rex/rest/rest.cpp:2063 #, fuzzy, c-format msgid "HTTP:PUT %s: put file %s: %s" msgstr "Fehler bei Lesen von Dateiliste" #: src/services/a-rex/test_cache_check.cpp:24 #: src/tests/count/test_client.cpp:20 #: src/tests/echo/echo_test4axis2c/test_client.cpp:20 #: src/tests/echo/test_client.cpp:21 msgid "Creating client side chain" msgstr "" #: src/services/a-rex/update_credentials.cpp:29 #, c-format msgid "" "UpdateCredentials: request = \n" "%s" msgstr "" "UpdateCredentials: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" "%s" #: src/services/a-rex/update_credentials.cpp:35 msgid "UpdateCredentials: missing Reference" msgstr "" #: src/services/a-rex/update_credentials.cpp:43 msgid "UpdateCredentials: wrong number of Reference" msgstr "" #: src/services/a-rex/update_credentials.cpp:51 msgid "UpdateCredentials: wrong number of elements inside Reference" msgstr "" #: src/services/a-rex/update_credentials.cpp:60 msgid "UpdateCredentials: EPR contains no JobID" msgstr "" #: src/services/a-rex/update_credentials.cpp:70 #, c-format msgid "UpdateCredentials: no job found: %s" msgstr "" #: src/services/a-rex/update_credentials.cpp:77 msgid "UpdateCredentials: failed to update credentials" msgstr "" #: src/services/a-rex/update_credentials.cpp:85 #, c-format msgid "" "UpdateCredentials: response = \n" "%s" msgstr "" "UpdateCredentials: отзыв = \n" "%s" #: src/services/candypond/CandyPond.cpp:52 #, fuzzy msgid "No A-REX config file found in candypond configuration" msgstr "Pfad zu VOMS Server Konfigurationsdatei" #: src/services/candypond/CandyPond.cpp:56 #, c-format msgid "Using A-REX config file %s" msgstr "" #: src/services/candypond/CandyPond.cpp:60 #, fuzzy, c-format msgid "Failed to process A-REX configuration in %s" msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #: src/services/candypond/CandyPond.cpp:65 #, fuzzy msgid "No caches defined in configuration" msgstr "Pfad zu VOMS Server Konfigurationsdatei" #: src/services/candypond/CandyPond.cpp:140 #: src/services/candypond/CandyPond.cpp:347 #, fuzzy, c-format msgid "Can't handle URL %s" msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #: src/services/candypond/CandyPond.cpp:150 msgid "Empty filename returned from FileCache" msgstr "" #: src/services/candypond/CandyPond.cpp:162 #, fuzzy, c-format msgid "Problem accessing cache file %s: %s" msgstr "Fehler bei Zugriff auf Cache-Datei %s: %s" #: src/services/candypond/CandyPond.cpp:210 #: src/services/candypond/CandyPond.cpp:474 #, fuzzy msgid "No job ID supplied" msgstr "Keine Job ID in Antwort" #: src/services/candypond/CandyPond.cpp:219 #, c-format msgid "Bad number in priority element: %s" msgstr "" #: src/services/candypond/CandyPond.cpp:228 msgid "No username supplied" msgstr "" #: src/services/candypond/CandyPond.cpp:235 #, c-format msgid "Supplied username %s does not match mapped username %s" msgstr "" #: src/services/candypond/CandyPond.cpp:249 #, fuzzy msgid "No session directory found" msgstr "Kein Cache-Verzeichnis angegeben" #: src/services/candypond/CandyPond.cpp:253 #, fuzzy, c-format msgid "Using session dir %s" msgstr "Fehler beim start von session: %s" #: src/services/candypond/CandyPond.cpp:257 #, fuzzy, c-format msgid "Failed to stat session dir %s" msgstr "Konnte ownen des session dir nicht ändern zu %i: %s" #: src/services/candypond/CandyPond.cpp:262 #, c-format msgid "Session dir %s is owned by %i, but current mapped user is %i" msgstr "" #: src/services/candypond/CandyPond.cpp:289 #, fuzzy, c-format msgid "Failed to access proxy of given job id %s at %s" msgstr "Fehler bei Lesen von Proxy-Datei: %s" #: src/services/candypond/CandyPond.cpp:307 #, fuzzy, c-format msgid "DN is %s" msgstr "Größe ist %s" #: src/services/candypond/CandyPond.cpp:385 #, c-format msgid "Permission checking passed for url %s" msgstr "" #: src/services/candypond/CandyPond.cpp:410 #: src/services/candypond/CandyPondGenerator.cpp:135 #, fuzzy, c-format msgid "Failed to move %s to %s: %s" msgstr "Fehler bei Entfernen von hard link %s: %s" #: src/services/candypond/CandyPond.cpp:441 #, c-format msgid "Starting new DTR for %s" msgstr "" #: src/services/candypond/CandyPond.cpp:443 #, fuzzy, c-format msgid "Failed to start new DTR for %s" msgstr "Fehler bei Schreiben zu Datein %s: %s" #: src/services/candypond/CandyPond.cpp:487 #, fuzzy, c-format msgid "Job %s: all files downloaded successfully" msgstr "Datei %s erfolgreich entfernt" #: src/services/candypond/CandyPond.cpp:494 #, c-format msgid "Job %s: Some downloads failed" msgstr "" #: src/services/candypond/CandyPond.cpp:499 #, c-format msgid "Job %s: files still downloading" msgstr "" #: src/services/candypond/CandyPond.cpp:511 #, fuzzy msgid "CandyPond: Unauthorized" msgstr "echo: Unauthorisiert" #: src/services/candypond/CandyPond.cpp:520 msgid "No local user mapping found" msgstr "" #: src/services/candypond/CandyPond.cpp:527 #: src/services/data-staging/DataDeliveryService.cpp:649 #, fuzzy, c-format msgid "Identity is %s" msgstr "Identität: %s" #: src/services/candypond/CandyPond.cpp:585 #: src/services/data-staging/DataDeliveryService.cpp:721 msgid "Security Handlers processing failed" msgstr "" #: src/services/candypond/CandyPond.cpp:592 msgid "Only POST is supported in CandyPond" msgstr "" #: src/services/candypond/CandyPondGenerator.cpp:88 #, c-format msgid "DTR %s finished with state %s" msgstr "" #: src/services/candypond/CandyPondGenerator.cpp:124 #, fuzzy, c-format msgid "Could not determine session directory from filename %s" msgstr "Konnte Version des Server nicht bestimmen" #: src/services/candypond/CandyPondGenerator.cpp:164 #, fuzzy, c-format msgid "Invalid DTR for source %s, destination %s" msgstr "Quelle Ziel" #: src/services/candypond/CandyPondGenerator.cpp:206 #, c-format msgid "DTRs still running for job %s" msgstr "" #: src/services/candypond/CandyPondGenerator.cpp:215 #, c-format msgid "All DTRs finished for job %s" msgstr "" #: src/services/candypond/CandyPondGenerator.cpp:222 #, fuzzy, c-format msgid "Job %s not found" msgstr "Erhielt dbnotfound" #: src/services/data-staging/DataDeliveryService.cpp:66 #, c-format msgid "Archiving DTR %s, state ERROR" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:70 #, c-format msgid "Archiving DTR %s, state %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:174 #, fuzzy msgid "No delegation token in request" msgstr "Erstellen und senden von Anfrage" #: src/services/data-staging/DataDeliveryService.cpp:184 msgid "Failed to accept delegation" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:214 #: src/services/data-staging/DataDeliveryService.cpp:221 #, fuzzy msgid "ErrorDescription" msgstr "Fehler bei Importieren" #: src/services/data-staging/DataDeliveryService.cpp:226 #, c-format msgid "All %u process slots used" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:241 #, c-format msgid "Received retry for DTR %s still in transfer" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:248 #, c-format msgid "Replacing DTR %s in state %s with new request" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:258 #, c-format msgid "Storing temp proxy at %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:266 #, fuzzy, c-format msgid "Failed to create temp proxy at %s: %s" msgstr "Fehler bei Erstellen von Info-Datei %s: %s" #: src/services/data-staging/DataDeliveryService.cpp:273 #, fuzzy, c-format msgid "Failed to change owner of temp proxy at %s to %i:%i: %s" msgstr "Konnte ownen des session dir nicht ändern zu %i: %s" #: src/services/data-staging/DataDeliveryService.cpp:302 #, fuzzy msgid "Invalid DTR" msgstr "Ungültige URL: %s" #: src/services/data-staging/DataDeliveryService.cpp:306 #, fuzzy, c-format msgid "Failed to remove temporary proxy %s: %s" msgstr "Fehler bei Entfernen von hard link %s: %s" #: src/services/data-staging/DataDeliveryService.cpp:407 #, c-format msgid "No such DTR %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:425 #, fuzzy, c-format msgid "DTR %s failed: %s" msgstr "DCAU fehlgeschlagen: %s" #: src/services/data-staging/DataDeliveryService.cpp:436 #, fuzzy, c-format msgid "DTR %s finished successfully" msgstr "Verbindung erfolgreich geschlossen" #: src/services/data-staging/DataDeliveryService.cpp:446 #, c-format msgid "DTR %s still in progress (%lluB transferred)" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:506 #, c-format msgid "No active DTR %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:516 #, c-format msgid "DTR %s was already cancelled" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:525 #, fuzzy, c-format msgid "DTR %s could not be cancelled" msgstr "PDP: %s kann nicht geladen werden" #: src/services/data-staging/DataDeliveryService.cpp:569 #, fuzzy, c-format msgid "Failed to get load average: %s" msgstr "Warnung: Fehler bei Bezug von Attributen von %s: %s" #: src/services/data-staging/DataDeliveryService.cpp:593 msgid "Invalid configuration - no allowed IP address specified" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:597 #, fuzzy msgid "Invalid configuration - no transfer dirs specified" msgstr "Delegation Authorisierung fehlgeschlagen" #: src/services/data-staging/DataDeliveryService.cpp:608 #, fuzzy msgid "Failed to start archival thread" msgstr "Fehler bei Ablage von FTP Datei" #: src/services/data-staging/DataDeliveryService.cpp:633 msgid "Shutting down data delivery service" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:642 #, fuzzy msgid "Unauthorized" msgstr "echo: Unauthorisiert" #: src/services/data-staging/DataDeliveryService.cpp:728 msgid "Only POST is supported in DataDeliveryService" msgstr "" #: src/services/examples/echo_python/EchoService.py:12 msgid "EchoService (python) constructor called" msgstr "" #: src/services/examples/echo_python/EchoService.py:17 #, python-format msgid "EchoService (python) has prefix %(prefix)s and suffix %(suffix)s" msgstr "" #: src/services/examples/echo_python/EchoService.py:24 msgid "EchoService (python) destructor called" msgstr "" #: src/services/examples/echo_python/EchoService.py:54 msgid "EchoService (python) thread test starting" msgstr "" #: src/services/examples/echo_python/EchoService.py:65 #, python-format msgid "EchoService (python) thread test, iteration %(iteration)s %(status)s" msgstr "" #: src/services/examples/echo_python/EchoService.py:82 msgid "EchoService (python) 'Process' called" msgstr "" #: src/services/examples/echo_python/EchoService.py:86 #, python-format msgid "inmsg.Auth().Export(arc.SecAttr.ARCAuth) = %s" msgstr "" #: src/services/examples/echo_python/EchoService.py:87 #, python-format msgid "inmsg.Attributes().getAll() = %s " msgstr "" #: src/services/examples/echo_python/EchoService.py:88 #, python-format msgid "EchoService (python) got: %s " msgstr "" #: src/services/examples/echo_python/EchoService.py:93 #, python-format msgid "EchoService (python) request_namespace: %s" msgstr "" #: src/services/examples/echo_python/EchoService.py:99 #: src/services/examples/echo_python/EchoService.py:171 #, python-format msgid "outpayload %s" msgstr "" #: src/services/examples/echo_python/EchoService.py:128 msgid "Calling https://localhost:60000/Echo using ClientSOAP" msgstr "" #: src/services/examples/echo_python/EchoService.py:131 msgid "Calling http://localhost:60000/Echo using ClientSOAP" msgstr "" #: src/services/examples/echo_python/EchoService.py:137 #: src/services/examples/echo_python/EchoService.py:155 #, python-format msgid "new_payload %s" msgstr "" #: src/services/examples/echo_python/EchoService.py:149 msgid "Calling http://localhost:60000/Echo using httplib" msgstr "" #: src/services/examples/echo_python/EchoService.py:165 msgid "Start waiting 10 sec..." msgstr "" #: src/services/examples/echo_python/EchoService.py:167 #, fuzzy msgid "Waiting ends." msgstr "Warte vor Antwort" #: src/services/wrappers/python/pythonwrapper.cpp:103 #, c-format msgid "Loading %u-th Python service" msgstr "Lade %u-th Python Service" #: src/services/wrappers/python/pythonwrapper.cpp:107 #, c-format msgid "Initialized %u-th Python service" msgstr "Initialisierte %u-th Python servce" #: src/services/wrappers/python/pythonwrapper.cpp:142 msgid "Invalid class name" msgstr "Ungültiger Klassenname" #: src/services/wrappers/python/pythonwrapper.cpp:147 #, c-format msgid "class name: %s" msgstr "Klassenname: %s" #: src/services/wrappers/python/pythonwrapper.cpp:148 #, c-format msgid "module name: %s" msgstr "Modulname: %s" #: src/services/wrappers/python/pythonwrapper.cpp:205 #, fuzzy msgid "Cannot find ARC Config class" msgstr "Kann UserConfig Klasse nicht finden" #: src/services/wrappers/python/pythonwrapper.cpp:212 #, fuzzy msgid "Config class is not an object" msgstr "UserConfig Klasse ist kein Objekt" #: src/services/wrappers/python/pythonwrapper.cpp:220 msgid "Cannot get dictionary of module" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:229 msgid "Cannot find service class" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:238 msgid "Cannot create config argument" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:245 #, fuzzy msgid "Cannot convert config to Python object" msgstr "Kann UserConfig nicht zu python Objekt konvertieren" #: src/services/wrappers/python/pythonwrapper.cpp:268 #, c-format msgid "%s is not an object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:274 #, fuzzy msgid "Message class is not an object" msgstr "Klasse %s ist kein Objekt" #: src/services/wrappers/python/pythonwrapper.cpp:280 msgid "Python Wrapper constructor succeeded" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:295 #, c-format msgid "Python Wrapper destructor (%d)" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:328 msgid "Python interpreter locked" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:332 msgid "Python interpreter released" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:403 msgid "Python wrapper process called" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:412 msgid "Failed to create input SOAP container" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:422 msgid "Cannot create inmsg argument" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:436 #, fuzzy msgid "Cannot find ARC Message class" msgstr "Kann arc ExecutionTarget Klasse nicht finden" #: src/services/wrappers/python/pythonwrapper.cpp:442 #, fuzzy msgid "Cannot convert inmsg to Python object" msgstr "Kann doc nicht zu Python Objekt konvertieren" #: src/services/wrappers/python/pythonwrapper.cpp:451 msgid "Failed to create SOAP containers" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:457 msgid "Cannot create outmsg argument" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:463 #, fuzzy msgid "Cannot convert outmsg to Python object" msgstr "Kann doc nicht zu Python Objekt konvertieren" #: src/tests/client/test_ClientInterface.cpp:36 #: src/tests/client/test_ClientSAML2SSO.cpp:68 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:78 #: src/tests/echo/test_clientinterface.cpp:41 #: src/tests/echo/test_clientinterface.cpp:132 #: src/tests/echo/test_clientinterface.py:12 msgid "Creating a soap client" msgstr "Lege SOAP Clietn an" #: src/tests/client/test_ClientInterface.cpp:73 #: src/tests/client/test_ClientSAML2SSO.cpp:47 #: src/tests/client/test_ClientSAML2SSO.cpp:71 #: src/tests/count/test_client.cpp:61 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:85 #: src/tests/echo/echo_test4axis2c/test_client.cpp:56 #: src/tests/echo/test.cpp:62 src/tests/echo/test_client.cpp:72 #: src/tests/echo/test_clientinterface.cpp:67 #: src/tests/echo/test_clientinterface.cpp:107 #: src/tests/echo/test_clientinterface.cpp:136 #: src/tests/echo/test_clientinterface.py:22 msgid "Creating and sending request" msgstr "Erstellen und senden von Anfrage" #: src/tests/client/test_ClientInterface.cpp:84 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:97 #: src/tests/echo/test_clientinterface.cpp:78 #: src/tests/echo/test_clientinterface.py:30 msgid "SOAP invocation failed" msgstr "SOAP Aufruf fehlgeschlagen" #: src/tests/client/test_ClientSAML2SSO.cpp:44 #: src/tests/echo/test_clientinterface.cpp:100 msgid "Creating a http client" msgstr "Lege HTTP Client an" #: src/tests/client/test_ClientSAML2SSO.cpp:55 #: src/tests/echo/test_clientinterface.cpp:117 #, fuzzy msgid "HTTP with SAML2SSO invocation failed" msgstr "SOAP Aufruf fehlgeschlagen" #: src/tests/client/test_ClientSAML2SSO.cpp:59 #: src/tests/echo/test_clientinterface.cpp:121 msgid "There was no HTTP response" msgstr "Keine HTTP Antwort erhalten" #: src/tests/client/test_ClientSAML2SSO.cpp:77 #: src/tests/echo/test_clientinterface.cpp:145 #, fuzzy msgid "SOAP with SAML2SSO invocation failed" msgstr "SOAP Aufruf fehlgeschlagen" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:37 #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:38 #: src/tests/delegation/test_delegation_client.cpp:46 #: src/tests/delegation/test_delegation_client.cpp:77 #: src/tests/echo/test_clientinterface.cpp:172 #: src/tests/echo/test_clientinterface.cpp:194 msgid "Creating a delegation soap client" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:46 #: src/tests/delegation/test_delegation_client.cpp:52 #: src/tests/echo/test_clientinterface.cpp:178 msgid "Delegation to ARC delegation service failed" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:50 #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:49 #: src/tests/delegation/test_delegation_client.cpp:57 #: src/tests/delegation/test_delegation_client.cpp:89 #: src/tests/echo/test_clientinterface.cpp:182 #: src/tests/echo/test_clientinterface.cpp:205 #, c-format msgid "Delegation ID: %s" msgstr "Delegation ID: %s" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:58 #, c-format msgid "Delegated credential from delegation service: %s" msgstr "" #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:45 #: src/tests/delegation/test_delegation_client.cpp:84 #: src/tests/echo/test_clientinterface.cpp:201 msgid "Delegation to gridsite delegation service failed" msgstr "" #: src/tests/count/count.cpp:58 msgid "Input is not SOAP" msgstr "" #: src/tests/count/count.cpp:89 src/tests/echo/echo.cpp:83 msgid "echo: Unauthorized" msgstr "echo: Unauthorisiert" #: src/tests/count/count.cpp:98 src/tests/count/count.cpp:104 #, c-format msgid "Request is not supported - %s" msgstr "" #: src/tests/count/test_client.cpp:50 #: src/tests/echo/echo_test4axis2c/test_client.cpp:43 #: src/tests/echo/test_client.cpp:59 msgid "Failed to load client configuration" msgstr "" #: src/tests/count/test_client.cpp:54 #: src/tests/echo/echo_test4axis2c/test_client.cpp:47 #: src/tests/echo/test.cpp:58 src/tests/echo/test_client.cpp:63 msgid "Client side MCCs are loaded" msgstr "" #: src/tests/count/test_client.cpp:57 #: src/tests/echo/echo_test4axis2c/test_client.cpp:50 #: src/tests/echo/test_client.cpp:66 msgid "Client chain does not have entry point" msgstr "" #: src/tests/count/test_client.cpp:84 #: src/tests/echo/echo_test4axis2c/test_client.cpp:74 #: src/tests/echo/test.cpp:74 src/tests/echo/test_client.cpp:90 msgid "Request failed" msgstr "" #: src/tests/count/test_client.cpp:90 #: src/tests/echo/echo_test4axis2c/test_client.cpp:80 #: src/tests/echo/test.cpp:79 src/tests/echo/test_client.cpp:96 msgid "There is no response" msgstr "" #: src/tests/count/test_client.cpp:97 #: src/tests/echo/echo_test4axis2c/test_client.cpp:87 #: src/tests/echo/test_client.cpp:103 msgid "Response is not SOAP" msgstr "" #: src/tests/count/test_service.cpp:22 src/tests/echo/test.cpp:23 #: src/tests/echo/test_service.cpp:22 msgid "Creating service side chain" msgstr "" #: src/tests/count/test_service.cpp:25 src/tests/echo/test.cpp:26 #: src/tests/echo/test_service.cpp:25 msgid "Failed to load service configuration" msgstr "" #: src/tests/count/test_service.cpp:30 src/tests/echo/test_service.cpp:30 msgid "Service is waiting for requests" msgstr "Service wartet auf Anfragen" #: src/tests/echo/test.cpp:32 #, fuzzy msgid "Creating client interface" msgstr "Erstelle Client Schnitstelle" #: src/tests/echo/test.cpp:82 msgid "Request succeed!!!" msgstr "" #, fuzzy #~ msgid "No jobs to resubmit with the specified status" #~ msgstr "Keine Job Beschreibung als Eingabe benötigt" #, fuzzy, c-format #~ msgid "Cannot write jobids to file (%s)" #~ msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #, fuzzy #~ msgid "Job resubmission summary:" #~ msgstr "Job Hochladen Zusammenfassung:" #, fuzzy, c-format #~ msgid "%d of %d jobs were resubmitted" #~ msgstr "%d von %s Jobs wurden hochgeladen" #, fuzzy, c-format #~ msgid "The following %d were not resubmitted" #~ msgstr "Die folgenden %d wurden nicht hochgeladen" #, fuzzy, c-format #~ msgid "Unable to load broker %s" #~ msgstr "Konnter Broker %s nicht laden" #, fuzzy #~ msgid "Test aborted because no resource returned any information" #~ msgstr "Job Migration abgebrochen, da kein Cluster Informationen lieferte" #, fuzzy #~ msgid "interfacename" #~ msgstr "Benutzungsschnittstellenfehler" #~ msgid "force migration, ignore kill failure" #~ msgstr "" #~ "erzwinge Migration, ignoriere ein Fehlschlagen des Abbruchs bereits " #~ "laufender Jobs" #, fuzzy #~ msgid "resubmit to the same resource" #~ msgstr "Erneut zu demselben Cluster submitten" #, fuzzy #~ msgid "do not resubmit to the same resource" #~ msgstr "Erneut zu demselben Cluster submitten" #, fuzzy #~ msgid "InterfaceName" #~ msgstr "Interaktiver Modus." #, fuzzy, c-format #~ msgid "OpenSSL error -- %s" #~ msgstr "OpenSSL Fehler -- %s" #, c-format #~ msgid "Library : %s" #~ msgstr "Bibliothek : %s" #, c-format #~ msgid "Function : %s" #~ msgstr "Funktion : %s" #, c-format #~ msgid "Reason : %s" #~ msgstr "Grund : %s" #~ msgid "User interface error" #~ msgstr "Benutzungsschnittstellenfehler" #~ msgid "Aborted!" #~ msgstr "Abbruch!" #, fuzzy #~ msgid "" #~ "Supported constraints are:\n" #~ " validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start " #~ "from now)\n" #~ " validityEnd=time\n" #~ " validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod " #~ "and validityEnd\n" #~ " not specified, the default is 12 hours for local proxy, and 168 hours " #~ "for delegated\n" #~ " proxy on myproxy server)\n" #~ " vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, " #~ "the default\n" #~ " is the minimum value of 12 hours and validityPeriod)\n" #~ " myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy " #~ "server,\n" #~ " e.g. 43200 or 12h or 12H; if not specified, the default is the minimum " #~ "value of\n" #~ " 12 hours and validityPeriod (which is lifetime of the delegated proxy " #~ "on myproxy server))\n" #~ " proxyPolicy=policy content\n" #~ " proxyPolicyFile=policy file" #~ msgstr "" #~ "Поддерживаемые ограничениÑ:\n" #~ " validityStart=Ð²Ñ€ÐµÐ¼Ñ (например, 2008-05-29T10:20:30Z; еÑли не указано, " #~ "то начинаетÑÑ Ð½ÐµÐ¼ÐµÐ´Ð»ÐµÐ½Ð½Ð¾)\n" #~ " validityEnd=времÑ\n" #~ " validityPeriod=Ð²Ñ€ÐµÐ¼Ñ (например, 43200, или 12h, или 12H; еÑли не " #~ "указаны ни validityPeriod,\n" #~ " ни validityEnd, то Ñрок дейÑÑ‚Ð²Ð¸Ñ Ð¿Ð¾ умолчанию ÑоÑтавлÑет 12 чаÑов)\n" #~ " vomsACvalidityPeriod=Ð²Ñ€ÐµÐ¼Ñ (например, 43200, или 12h, или 12H; еÑли не " #~ "указано, то иÑпользуетÑÑ\n" #~ " значение validityPeriod)\n" #~ " proxyPolicy=Ñодержимое норматива\n" #~ " proxyPolicyFile=файл норматива" #, fuzzy #~ msgid "" #~ "print all information about this proxy. \n" #~ " In order to show the Identity (DN without CN as suffix for " #~ "proxy) \n" #~ " of the certificate, the 'trusted certdir' is needed." #~ msgstr "" #~ "вывеÑти вÑÑŽ информацию о данной доверенноÑти. \n" #~ " Ð”Ð»Ñ Ð²Ñ‹Ð²Ð¾Ð´Ð° перÑональной информации (DN без CN как ÑÑƒÑ„Ñ„Ð¸ÐºÑ " #~ "доверенноÑти) \n" #~ " из Ñертификата, необходим 'trusted certdir'." #, fuzzy #~ msgid "username to MyProxy server" #~ msgstr "Nutzername bei myproxy Server" #, fuzzy, c-format #~ msgid "There are %d commands to the same VOMS server %s" #~ msgstr "Es gibt %d Kommandos an denselben VOMS Server %s\n" #, fuzzy, c-format #~ msgid "Message sent to VOMS server %s is: %s" #~ msgstr "Warnung: kann nicht verbinden zu RLS server %s: %s" #, fuzzy #~ msgid "No HTTP response from VOMS server" #~ msgstr "Frühe Antwort vom Server" #, fuzzy, c-format #~ msgid "Returned message from VOMS server: %s" #~ msgstr "Zurückerhaltene Nachricht von myproxy Server: %s" #, fuzzy #~ msgid "No stream response from VOMS server" #~ msgstr "Frühe Antwort vom Server" #, fuzzy, c-format #~ msgid "Returned message from VOMS server %s is: %s\n" #~ msgstr "Zurückerhaltene Nachricht von myproxy Server: %s" #, c-format #~ msgid "There are %d certificates in the returned msg" #~ msgstr "Es sind %d Zertifikate in der zurückgelieferten Nachricht." #, fuzzy, c-format #~ msgid "Unable to copy %s" #~ msgstr "Konnter Broker %s nicht laden" #, fuzzy, c-format #~ msgid "Unable to list content of %s" #~ msgstr "Fehler bei Lesen von Datei %s: %s" #, fuzzy, c-format #~ msgid "Unable to create directory %s" #~ msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #, fuzzy, c-format #~ msgid "Unable to remove file %s" #~ msgstr "Fehler bei Lesen von Datei %s: %s" #, fuzzy #~ msgid "service message" #~ msgstr "Service Nachricht" #~ msgid "path to config file" #~ msgstr "Pfad zu Konfigurationsdatei" #~ msgid "[-]name" #~ msgstr "[-]Name" #, fuzzy #~ msgid "Query is not a valid XML" #~ msgstr "" #~ "Указанный URL\n" #~ " %1 \n" #~ " Ñодержит ошибки." #, c-format #~ msgid "Connect: Failed to init handle: %s" #~ msgstr "Connect: Konnte init handle nicht initialisieren: %s" #, fuzzy, c-format #~ msgid "Failed to enable IPv6: %s" #~ msgstr "Fehler bei Lesen von Datei %s: %s" #, c-format #~ msgid "Connect: Failed to connect: %s" #~ msgstr "Connect: Verbindung zu %s schlug fehl" #, c-format #~ msgid "Connect: Connecting timed out after %d ms" #~ msgstr "Connect: Zeitüberschreitung der Verbindung nach %d ms" #, c-format #~ msgid "Connect: Failed to init auth info handle: %s" #~ msgstr "Connect: Konnte auth info handle nicht initialisieren: %s" #, c-format #~ msgid "Connect: Failed authentication: %s" #~ msgstr "Connect: Authentikation fehlgeschlagen: %s" #, c-format #~ msgid "Connect: Authentication timed out after %d ms" #~ msgstr "Connect: Zeitüberschreitung der Authentikation nach %d ms" #, fuzzy, c-format #~ msgid "SendCommand: Command: %s" #~ msgstr "SendCommand: Fehler: %s" #, c-format #~ msgid "SendCommand: Failed: %s" #~ msgstr "SendCommand: Fehler: %s" #, c-format #~ msgid "SendCommand: Timed out after %d ms" #~ msgstr "SendCommand: Zeitüberschreitung nach %d ms" #, fuzzy, c-format #~ msgid "SendCommand: Response: %s" #~ msgstr "SendCommand: Fehler: %s" #, fuzzy #~ msgid "SendData: Failed sending EPSV and PASV commands" #~ msgstr "SendData: Fehler bei Senden von PASV Kommando" #, fuzzy, c-format #~ msgid "SendData: Server PASV response parsing failed: %s" #~ msgstr "SendData: Server PASV Antwort konnte nicht geparst werden: %s" #, fuzzy, c-format #~ msgid "SendData: Server EPSV response parsing failed: %s" #~ msgstr "SendData: Server PASV Antwort konnte nicht geparst werden: %s" #, fuzzy, c-format #~ msgid "SendData: Server EPSV response port parsing failed: %s" #~ msgstr "SendData: Server PASV Antwort konnte nicht geparst werden: %s" #, fuzzy, c-format #~ msgid "SendData: Can't parse host and/or port in response to EPSV/PASV: %s" #~ msgstr "Kann host and port nicht aus Antwort zu PASV herauslesen" #, fuzzy, c-format #~ msgid "SendData: Data channel: %d.%d.%d.%d:%d" #~ msgstr "Datenkanal: %d.%d.%d.%d %d" #, fuzzy, c-format #~ msgid "SendData: Data channel: [%s]:%d" #~ msgstr "SendData: Fehler bei Datenverbindung zum Schreiben: %s" #, c-format #~ msgid "SendData: Local port failed: %s" #~ msgstr "SendData: Lokaler port schlug fehl: %s" #~ msgid "SendData: Failed sending DCAU command" #~ msgstr "SendData: Fehler bei Senden von DCAU Kommando" #~ msgid "SendData: Failed sending TYPE command" #~ msgstr "SendData: Fehler bei Senden von TYPE Kommando" #, c-format #~ msgid "SendData: Local type failed: %s" #~ msgstr "SendData: Lokaler type schlug fehl: %s" #, c-format #~ msgid "SendData: Failed sending STOR command: %s" #~ msgstr "SendData: Fehler bei Senden von STOR Kommando: %s" #, c-format #~ msgid "SendData: Data connect write failed: %s" #~ msgstr "SendData: Fehler bei Datenverbindung zum Schreiben: %s" #, c-format #~ msgid "SendData: Data connect write timed out after %d ms" #~ msgstr "" #~ "SendData: Zeitüberschreitung bei Datenverbindung zum Schreiben nach %d ms" #, c-format #~ msgid "SendData: Data write failed: %s" #~ msgstr "SendData: Schreiben von Daten schlug fehl: %s" #, c-format #~ msgid "SendData: Data write timed out after %d ms" #~ msgstr "SendData: Zeitüberschreitung beim Schreiben nach %d ms" #, fuzzy, c-format #~ msgid "Disconnect: Failed aborting - ignoring: %s" #~ msgstr "Disconnect: Fehler beim Schließen der Verbindung - ignoriert: %s" #, fuzzy, c-format #~ msgid "Disconnect: Data close timed out after %d ms" #~ msgstr "Disconnect: Zeitüberschreitung vom Schließen nach %d ms" #, fuzzy, c-format #~ msgid "Disconnect: Abort timed out after %d ms" #~ msgstr "Disconnect: Zeitüberschreitung vom Schließen nach %d ms" #, fuzzy, c-format #~ msgid "Disconnect: Failed quitting - ignoring: %s" #~ msgstr "Disconnect: Fehler beim Schließen der Verbindung - ignoriert: %s" #, c-format #~ msgid "Disconnect: Quitting timed out after %d ms" #~ msgstr "Disconnect: Zeitüberschreitung beim Verlassen nach %d ms" #, c-format #~ msgid "Disconnect: Failed closing - ignoring: %s" #~ msgstr "Disconnect: Fehler beim Schließen der Verbindung - ignoriert: %s" #, c-format #~ msgid "Disconnect: Closing timed out after %d ms" #~ msgstr "Disconnect: Zeitüberschreitung vom Schließen nach %d ms" #, fuzzy #~ msgid "Disconnect: waiting for globus handle to settle" #~ msgstr "Disconnect: Konnte handle nicht freigeben: %s" #, fuzzy #~ msgid "Disconnect: globus handle is stuck." #~ msgstr "Disconnect: Konnte handle nicht freigeben: %s" #, fuzzy, c-format #~ msgid "" #~ "Disconnect: Failed destroying handle: %s. Can't handle such situation." #~ msgstr "Disconnect: Konnte handle nicht freigeben: %s" #, fuzzy #~ msgid "Disconnect: handle destroyed." #~ msgstr "Disconnect: Konnte handle nicht freigeben: %s" #, fuzzy #~ msgid "" #~ "Missing reference to factory and/or module. It is unsafe to use Globus in " #~ "non-persistent mode - SubmitterPlugin for ARC0 is disabled. Report to " #~ "developers." #~ msgstr "" #~ "Fehlende Referenz zu factory und/doer Module. Es ist unsicher, Globus im " #~ "nicht-persistenten Modus zu nutzen - (Grid)FTP code wurde disabled. " #~ "Bitte die Entwickler informieren." #, fuzzy #~ msgid "" #~ "Can't create information handle - is the ARC LDAP DMC plugin available?" #~ msgstr "" #~ "Kann information handle nicht anlegen - ist das ARC LDAP DMC plugin " #~ "verfügbar?" #, c-format #~ msgid "Cleaning job: %s" #~ msgstr "Aufräumen von Job: %s" #~ msgid "Failed to connect for job cleaning" #~ msgstr "Konnte nicht verbinden, um Job aufzuräumen" #~ msgid "Failed sending CWD command for job cleaning" #~ msgstr "Konnte CWD Kommando nicht senden um Job aufzuräumen" #~ msgid "Failed sending RMD command for job cleaning" #~ msgstr "Konnte RMD Kommando nicht senden um Job aufzuräumen" #~ msgid "Failed to disconnect after job cleaning" #~ msgstr "Konnte Verbindung nicht trennen nach Aufräumen von Job" #~ msgid "Job cleaning successful" #~ msgstr "Job erfolgreich aufgeräumt." #, fuzzy, c-format #~ msgid "Cancelling job: %s" #~ msgstr "Aufräumen von Job: %s" #, fuzzy #~ msgid "Failed to connect for job cancelling" #~ msgstr "Konnte nicht verbinden, um Job aufzuräumen" #~ msgid "Failed sending CWD command for job cancelling" #~ msgstr "Fehler beim Senden von CWD für den Abbruch eines Jobs" #~ msgid "Failed sending DELE command for job cancelling" #~ msgstr "Fehler beim Senden von DELE für den Abbruch eines Jobs" #~ msgid "Failed to disconnect after job cancelling" #~ msgstr "Fehler beim Trennen der Verbindung nach Abbruch von Job" #~ msgid "Job cancelling successful" #~ msgstr "Job erfolgreich abgebrochen" #, c-format #~ msgid "Renewing credentials for job: %s" #~ msgstr "Erneuern der credentials für Job %s" #~ msgid "Failed to connect for credential renewal" #~ msgstr "Fehler beim Verbindungen für Erneuerung von credentials" #~ msgid "Failed sending CWD command for credentials renewal" #~ msgstr "Fehler beim Senden von CWD Kommando für Erneuerung von credentials" #~ msgid "Failed to disconnect after credentials renewal" #~ msgstr "Fehler bein Trennen der Verbindung nach Erneuerung der credentials" #~ msgid "Renewal of credentials was successful" #~ msgstr "Erneuerung der Credentials war erfolgreich" #, fuzzy, c-format #~ msgid "Illegal jobID specified (%s)" #~ msgstr "Ungültige Job ID angegeben" #, c-format #~ msgid "Could not create temporary file: %s" #~ msgstr "Konnte temporäre Datei nicht anlegen: %s" #, fuzzy, c-format #~ msgid "Trying to retrieve job description of %s from computing resource" #~ msgstr "Versuche Job Beschreibung von %s von Cluster zu beziehen" #, fuzzy, c-format #~ msgid "invalid jobID: %s" #~ msgstr "ungültige Job ID: %s" #~ msgid "clientxrsl found" #~ msgstr "clientxrsl gefunden" #~ msgid "could not find end of clientxrsl" #~ msgstr "konnte Ende von clientxrsl nicht finden" #, c-format #~ msgid "Job description: %s" #~ msgstr "Job Beschreibung: %s" #~ msgid "clientxrsl not found" #~ msgstr "clientxrsl nicht gefunden" #, fuzzy, c-format #~ msgid "Invalid JobDescription: %s" #~ msgstr "Ungültige JobDescription:" #, fuzzy #~ msgid "Valid JobDescription found" #~ msgstr "Gültige JobDescription gefunden" #~ msgid "Submit: Failed to connect" #~ msgstr "Submit: Verbindungsfehler" #~ msgid "Submit: Failed sending CWD command" #~ msgstr "Submit: Konnte CWD Kommmando nicht senden" #~ msgid "Submit: Failed sending CWD new command" #~ msgstr "Submit: Konnte CWD new Kommmando nicht senden" #, fuzzy #~ msgid "Failed to prepare job description." #~ msgstr "Fehler beim Bezug der Job Beschreibung von Job: %s" #~ msgid "Submit: Failed sending job description" #~ msgstr "Submit: Fehler bei Senden von Job Beschreibung" #~ msgid "Submit: Failed uploading local input files" #~ msgstr "Submit; Hochladen der lokalen Inputfiles schlug fehl" #, fuzzy #~ msgid "Failed to prepare job description to target resources." #~ msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #~ msgid "Creating an A-REX client" #~ msgstr "Lege A-REX client an." #, fuzzy #~ msgid "Unable to create SOAP client used by AREXClient." #~ msgstr "Konnte SOAP client nicht für AREXClient anlegen." #, fuzzy #~ msgid "Failed locating credentials." #~ msgstr "Fehler bei der Initialisierung der delegation credentials" #, fuzzy #~ msgid "Failed initiate client connection." #~ msgstr "Fehler bei Schließen von Verbindung 1" #, fuzzy #~ msgid "Re-creating an A-REX client" #~ msgstr "Lege A-REX client an." #~ msgid "AREXClient was not created properly." #~ msgstr "AREXClient wurde nicht richtig angelegt." #, c-format #~ msgid "Processing a %s request" #~ msgstr "Verarbeite %s Anfrage" #, c-format #~ msgid "%s request failed" #~ msgstr "Anfrage %s schlug fehl" #, c-format #~ msgid "%s request to %s failed with response: %s" #~ msgstr "%s Anfrage an %s schlug fehl mit Antwort %s" #, c-format #~ msgid "XML response: %s" #~ msgstr "XML Antwort: %s" #, fuzzy, c-format #~ msgid "%s request to %s failed. No expected response." #~ msgstr "Anfrage %s an %s schlug fehl. Leere Anwort." #, c-format #~ msgid "Creating and sending submit request to %s" #~ msgstr "Erstelle und sende submit Anfrage an %s" #, c-format #~ msgid "Job description to be sent: %s" #~ msgstr "Zu sendende Job-Beschreibung : %s" #, fuzzy, c-format #~ msgid "Creating and sending job information query request to %s" #~ msgstr "Erstelle und sende job information query request an %s" #, fuzzy, c-format #~ msgid "Unable to retrieve status of job (%s)" #~ msgstr "Konnte status Beschreibung des jobs (%s) nicht erhalten: " #, fuzzy, c-format #~ msgid "Creating and sending service information query request to %s" #~ msgstr "Erstelle und sende service information query request an %s" #, fuzzy, c-format #~ msgid "Creating and sending ISIS information query request to %s" #~ msgstr "Erstelle und send ISIS information query request an %s" #, fuzzy, c-format #~ msgid "Service %s of type %s ignored" #~ msgstr "Service %s des Typ %s wurde ignoriert" #~ msgid "No execution services registered in the index service" #~ msgstr "Keine execution services in index service registriert" #, fuzzy, c-format #~ msgid "Creating and sending terminate request to %s" #~ msgstr "Erstelle und sende terminate request an %s" #, fuzzy, c-format #~ msgid "Creating and sending clean request to %s" #~ msgstr "Erstelle und sende clean request an %s" #, fuzzy, c-format #~ msgid "Creating and sending job description retrieval request to %s" #~ msgstr "Erstelle und sende job description retrieval request an %s" #, fuzzy, c-format #~ msgid "Creating and sending job migrate request to %s" #~ msgstr "Erstelle und sende job migrate request an %s" #, fuzzy, c-format #~ msgid "Creating and sending job resume request to %s" #~ msgstr "Erstelle und sende job resume request an %s" #, fuzzy #~ msgid "Renewal of ARC1 jobs is not supported" #~ msgstr "Das Erneuern von ARC1 Jobs wird nicht unterstützt" #~ msgid "Failed retrieving job status information" #~ msgstr "Konnte Job Status Information nicht beziehen." #, fuzzy #~ msgid "Cleaning of BES jobs is not supported" #~ msgstr "Das Löschen von BES Jobs wird nicht unterstützt" #, fuzzy #~ msgid "Renewal of BES jobs is not supported" #~ msgstr "Das Erneuern von BES Jobs wird nicht unterstützt" #, fuzzy #~ msgid "Resuming BES jobs is not supported" #~ msgstr "Ein Resume von BES jobs wird nicht unterstützt" #, c-format #~ msgid "Failed retrieving job IDs: Unsupported url (%s) given" #~ msgstr "" #~ "Konnte job IDs nicht bestimmen: Nicht unterstützte URL erhalten (%s)" #~ msgid "Failed retrieving job IDs" #~ msgstr "Konnt job IDs nicht erhalten." #~ msgid "" #~ "Error encoutered during job ID retrieval. All job IDs might not have been " #~ "retrieved" #~ msgstr "" #~ "Fehler beim Bestimmen der job ID. Womöglich wurde keine job ID erhalten." #~ msgid "No job identifier returned by BES service" #~ msgstr "Kein Job identifier von BES service zurückerhalten" #, fuzzy #~ msgid "Failed adapting job description to target resources" #~ msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #~ msgid "The Service doesn't advertise its Quality Level." #~ msgstr "Der Service gibt seinen Quality Level nicht an." #, fuzzy, c-format #~ msgid "Generating A-REX target: %s" #~ msgstr "Generiere A-REX target: %s" #~ msgid "The Service doesn't advertise its Interface." #~ msgstr "Der Service gibt seine Interface nicht an." #~ msgid "The Service doesn't advertise its Serving State." #~ msgstr "Der Servcice gibt seinen Serving State nicht an." #, fuzzy, c-format #~ msgid "Failed to cancel job: %s" #~ msgstr "Fehler beim Authentifizieren: %s" #~ msgid "Creating a CREAM client" #~ msgstr "Anlegen eines CREAM client" #~ msgid "Unable to create SOAP client used by CREAMClient." #~ msgstr "Konnte SOAP client nicht anlegen für CREAMClient." #, fuzzy #~ msgid "CREAMClient not created properly" #~ msgstr "CREAMClient nicht richtig angelegt" #, fuzzy #~ msgid "Empty response" #~ msgstr "Leere Antwort" #, fuzzy, c-format #~ msgid "Request failed: %s" #~ msgstr "Anfrage fehlgeschlagen: %s" #~ msgid "Creating and sending a status request" #~ msgstr "Erstellen und senden einer Status-Anfrage" #, fuzzy #~ msgid "Unable to retrieve job status." #~ msgstr "Konnte Job Status-Informationen nicht erhalten." #~ msgid "Creating and sending request to terminate a job" #~ msgstr "Erstellen und senden von Anfrage, einen Job zu beenden" #~ msgid "Creating and sending request to clean a job" #~ msgstr "Erstlelen und senden einer Anfragen einen Job zu löschen" #, fuzzy #~ msgid "Creating and sending request to resume a job" #~ msgstr "Erstellen und senden von Anfrage, einen Job zu beenden" #, fuzzy #~ msgid "Creating and sending request to list jobs" #~ msgstr "Erstlelen und senden einer Anfragen einen Job zu löschen" #~ msgid "Creating and sending job register request" #~ msgstr "Erstellen und senden einer Anfragen, eien Job zu registrieren" #, fuzzy #~ msgid "No job ID in response" #~ msgstr "Keine Job ID in Antwort" #~ msgid "Creating and sending job start request" #~ msgstr "Erstellen und senden einer Anfrage, einen Job zu starten" #, fuzzy #~ msgid "Creating delegation" #~ msgstr "Erstelle Delegation" #, fuzzy, c-format #~ msgid "Delegatable credentials expired: %s" #~ msgstr "" #~ "Делегированные параметры доÑтупа:\n" #~ " %s" #, fuzzy #~ msgid "Failed signing certificate request" #~ msgstr "Fehler beim Signieren der Anfrage nach Austellen eines Zertifikats" #, fuzzy #~ msgid "Failed putting signed delegation certificate to service" #~ msgstr "" #~ "Der Transfer des signierten delegation certificate zu Service schlug fehl" #, fuzzy, c-format #~ msgid "Failed cleaning job: %s" #~ msgstr "Löschen fehlgeschlagen von job: %s" #, fuzzy, c-format #~ msgid "Failed canceling job: %s" #~ msgstr "Abbruch fehlgeschlagen von job: %s" #, fuzzy #~ msgid "Renewal of CREAM jobs is not supported" #~ msgstr "Erneuerung von CREAM jobs wird nicht unterstützt" #, fuzzy, c-format #~ msgid "Failed resuming job: %s" #~ msgstr "Löschen fehlgeschlagen von job: %s" #, fuzzy #~ msgid "Failed creating signed delegation certificate" #~ msgstr "Erstellen eines singed delegation certificate ist fehlgeschlagen" #, fuzzy, c-format #~ msgid "Unable to submit job. Job description is not valid in the %s format" #~ msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #, fuzzy #~ msgid "Failed registering job" #~ msgstr "Konnte job nicht registrieren" #~ msgid "Failed starting job" #~ msgstr "Konnte job nicht starten" #, fuzzy #~ msgid "Failed creating singed delegation certificate" #~ msgstr "Erstellen eines singed delegation certificate ist fehlgeschlagen" #, fuzzy #~ msgid "Creating an EMI ES client" #~ msgstr "Anlegen eines CREAM client" #, fuzzy #~ msgid "Unable to create SOAP client used by EMIESClient." #~ msgstr "Konnte SOAP client nicht anlegen für CREAMClient." #, fuzzy #~ msgid "Re-creating an EMI ES client" #~ msgstr "Anlegen eines CREAM client" #, fuzzy, c-format #~ msgid "%s request to %s failed. Unexpected response: %s." #~ msgstr "%s Anfrage an %s schlug fehl mit Antwort %s" #, fuzzy, c-format #~ msgid "Creating and sending job submit request to %s" #~ msgstr "Erstelle und sende submit Anfrage an %s" #, fuzzy, c-format #~ msgid "New limit for vector queries returned by EMI ES service: %d" #~ msgstr "Kein Job identifier von BES service zurückerhalten" #, fuzzy, c-format #~ msgid "Creating and sending service information request to %s" #~ msgstr "Erstelle und sende service information query request an %s" #, fuzzy, c-format #~ msgid "Creating and sending job clean request to %s" #~ msgstr "Erstelle und sende clean request an %s" #, fuzzy, c-format #~ msgid "Creating and sending job suspend request to %s" #~ msgstr "Erstelle und sende job resume request an %s" #, fuzzy, c-format #~ msgid "Creating and sending job restart request to %s" #~ msgstr "Erstelle und sende job resume request an %s" #, fuzzy, c-format #~ msgid "Creating and sending job notify request to %s" #~ msgstr "Erstelle und sende job migrate request an %s" #, fuzzy, c-format #~ msgid "Creating and sending notify request to %s" #~ msgstr "Erstelle und sende clean request an %s" #, fuzzy, c-format #~ msgid "Creating and sending job list request to %s" #~ msgstr "Erstelle und sende job migrate request an %s" #, fuzzy, c-format #~ msgid "Job %s failed to renew delegation %s - %s." #~ msgstr "Initiierung der Delegation fehlgeschlagen" #, fuzzy #~ msgid "Unable to submit job. Job description is not valid XML" #~ msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #, fuzzy #~ msgid "No valid job identifier returned by EMI ES" #~ msgstr "A-REX lieferte keinen Job Identifikator zurück" #, fuzzy #~ msgid "Job failed on service side" #~ msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #, fuzzy #~ msgid "Failed to obtain state of job" #~ msgstr "Konnte Listing nicht via FTP beziehen: %s" #, fuzzy #~ msgid "Failed to wait for job to allow stage in" #~ msgstr "Konnte nicht verbinden, um Job aufzuräumen" #, fuzzy #~ msgid "Failed to obtain valid stagein URL for input files" #~ msgstr "Konnte Listing nicht via FTP beziehen: %s" #, fuzzy, c-format #~ msgid "Failed uploading local input files to %s" #~ msgstr "Konnte lokale Inputdateien nicht hochladen" #, fuzzy, c-format #~ msgid "Failed to submit job description: EMIESFault(%s , %s)" #~ msgstr "Fehler beim Bezug der Job Beschreibung von Job: %s" #, fuzzy, c-format #~ msgid "Failed to submit job description: UnexpectedError(%s)" #~ msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #, fuzzy #~ msgid "Failed to notify service" #~ msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #, fuzzy #~ msgid "Failed preparing job description to target resources" #~ msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #, fuzzy, c-format #~ msgid "Failed to submit job description: %s" #~ msgstr "Fehler beim Bezug der Job Beschreibung von Job: %s" #, fuzzy #~ msgid "Generating EMIES targets" #~ msgstr "Generiere A-REX target: %s" #, fuzzy, c-format #~ msgid "Generated EMIES target: %s" #~ msgstr "Generiere A-REX target: %s" #, fuzzy #~ msgid "Submission failed" #~ msgstr "Submission von Anfrage schlug fehl" #, fuzzy #~ msgid "Obtaining status failed" #~ msgstr "Die Job Terminierungs-Anfrage schlug fehl" #, fuzzy #~ msgid "Obtaining information failed" #~ msgstr "SOAP Aufruf fehlgeschlagen" #, fuzzy #~ msgid "Notify failed" #~ msgstr "Schreibfehler" #, fuzzy #~ msgid "Kill failed" #~ msgstr "%s fehlgeschlagen" #, fuzzy #~ msgid "List failed" #~ msgstr "%s fehlgeschlagen" #, fuzzy, c-format #~ msgid "Fetching resource description from %s" #~ msgstr "Setzer userRequestDescription zu %s" #, fuzzy, c-format #~ msgid "Failed to obtain resource description: %s" #~ msgstr "Fehler beim Bezug der Job Beschreibung von Job: %s" #, fuzzy #~ msgid "Resource description is empty" #~ msgstr "Anfrage ist leer" #, fuzzy #~ msgid "Resource description validation passed" #~ msgstr "Quelle Ziel" #, fuzzy #~ msgid "All queries failed" #~ msgstr "Anfrage %s schlug fehl" #, fuzzy, c-format #~ msgid "Unsupported command: %s" #~ msgstr "Nicht unterstützte URL für Quelle: %s" #~ msgid "" #~ "Can't create information handle - is the ARC ldap DMC plugin available?" #~ msgstr "" #~ "Kann information handle nicht anlegen - ist das ARC LDAP DMC plugin " #~ "verfügbar?" #, fuzzy, c-format #~ msgid "Unknown entry in EGIIS (%s)" #~ msgstr "unbekannter return code %s" #, fuzzy, c-format #~ msgid "Found %u service endpoints from the index service at %s" #~ msgstr "Fand %u execution services des index service %s" #, fuzzy #~ msgid "Cleaning of UNICORE jobs is not supported" #~ msgstr "Das Löschen von BES Jobs wird nicht unterstützt" #, fuzzy #~ msgid "Canceling of UNICORE jobs is not supported" #~ msgstr "Erneuerung von UNICORE Jobs wird nicht unterstützt" #, fuzzy #~ msgid "Renewal of UNICORE jobs is not supported" #~ msgstr "Erneuerung von UNICORE Jobs wird nicht unterstützt" #, fuzzy #~ msgid "Resumation of UNICORE jobs is not supported" #~ msgstr "Resume von UNICORE jobs wird nicht unterstützt" #~ msgid "Creating a UNICORE client" #~ msgstr "Erstellen von UNICORE client" #~ msgid "Failed to find delegation credentials in client configuration" #~ msgstr "Konnte delegation credentials in Client Konfiguration nicht finden" #~ msgid "Failed to initiate delegation" #~ msgstr "Initiierung der Delegation fehlgeschlagen" #~ msgid "Submission request failed" #~ msgstr "Submission von Anfrage schlug fehl" #~ msgid "Submission request succeed" #~ msgstr "Submission von Anfrage ist erfolgt" #~ msgid "There was no response to a submission request" #~ msgstr "Keine Antwort zu submission request erhalten" #~ msgid "A response to a submission request was not a SOAP message" #~ msgstr "Eine Antwort zu submission request war keine SOAP message" #, fuzzy #~ msgid "There is no connection chain configured" #~ msgstr "Es ist keine connetion chain konfiguriert" #, fuzzy, c-format #~ msgid "Submission returned failure: %s" #~ msgstr "Submission ergab Fehler: %s" #, fuzzy, c-format #~ msgid "Submission failed, service returned: %s" #~ msgstr "Submission fehlgeschlagen, Service erwiderte: %s" #, fuzzy #~ msgid "Creating and sending a start job request" #~ msgstr "Erstelle und sende eine start job Anfrage" #, fuzzy #~ msgid "A start job request failed" #~ msgstr "Eine start job Anfrage schlug fehl" #, fuzzy #~ msgid "A start job request succeeded" #~ msgstr "Eine start job Anfrage war erfolgreich" #, fuzzy #~ msgid "There was no response to a start job request" #~ msgstr "Keine Antwort zu start job Anfrage erhalten" #, fuzzy #~ msgid "The response of a start job request was not a SOAP message" #~ msgstr "Die Antwort zu start job Anfrage war keine SOAP Nachricht" #~ msgid "A status request failed" #~ msgstr "Eine Anfrage nach dem Status schlug fehl" #~ msgid "A status request succeed" #~ msgstr "Die Anfrage nach dem Status war erfolgreich" #~ msgid "There was no response to a status request" #~ msgstr "Es gab keine Antwort zu einer Status Anfrage" #~ msgid "The response of a status request was not a SOAP message" #~ msgstr "Die Antwort auf eine Status Anfrage war keine SOAP Nachricht" #~ msgid "The job status could not be retrieved" #~ msgstr "Der Job Status konnte nicht ermittelt werden" #, fuzzy #~ msgid "Creating and sending an index service query" #~ msgstr "Erstellen und senden einer Index Service Anfrage" #~ msgid "Creating and sending a service status request" #~ msgstr "Erstlelen und senden einer Service Status Anfrage" #, fuzzy #~ msgid "A service status request failed" #~ msgstr "Eine Service Status Anfrage schlug fehl" #, fuzzy #~ msgid "A service status request succeeded" #~ msgstr "Eine Service Status Anfrage war erfolgreich" #~ msgid "There was no response to a service status request" #~ msgstr "Es gab keine Antwort zu einer Service Status Anfrage" #~ msgid "The response of a service status request was not a SOAP message" #~ msgstr "Die Antwort zu einer Service Status Anfrage war keine SOAP message" #~ msgid "The service status could not be retrieved" #~ msgstr "Der Service Status konnte nicht ermittelt werden" #, fuzzy #~ msgid "A job termination request failed" #~ msgstr "Die Job Terminierungs-Anfrage schlug fehl" #, fuzzy #~ msgid "A job termination request succeed" #~ msgstr "Eine Job Terminierungs-Anfrage war erfolgreich" #~ msgid "There was no response to a job termination request" #~ msgstr "Es gab keine Antwort zu einer Job Terminierungs-Anfrage" #~ msgid "The response of a job termination request was not a SOAP message" #~ msgstr "" #~ "Die Antwort zu einer Job Terminierungs-Anfrage war keine SOAP Nachricht" #, fuzzy #~ msgid "A job cleaning request failed" #~ msgstr "Die Job Terminierungs-Anfrage schlug fehl" #~ msgid "There was no response to a job cleaning request" #~ msgstr "Keine Antwort auf eine Job Löschen-Anfrage erhalten" #~ msgid "The response of a job cleaning request was not a SOAP message" #~ msgstr "Die Antwort auf eine Job Löschen-Anfrage war keine SOAP Nachricht" #, fuzzy, c-format #~ msgid "Cannot handle URL %s" #~ msgstr "" #~ "Kann Owner von %s nicht ändernÐевозможно изменить владельца папки %1.\n" #~ "Ошибка: %2" #, fuzzy, c-format #~ msgid "Could not resolve original source of %s: out of time" #~ msgstr "Konnte temporäre Datei nicht anlegen: %s" #, fuzzy, c-format #~ msgid "Could not resolve original source of %s: %s" #~ msgstr "Konnte temporäre Datei nicht anlegen: %s" #, fuzzy, c-format #~ msgid "Failed to query ACIX: %s" #~ msgstr "Fehler bei Lesen von Proxy-Datei: %s" #, fuzzy, c-format #~ msgid "Failed to parse ACIX response: %s" #~ msgstr "Fehler bei Anlegen von GSI Context: %s" #, fuzzy, c-format #~ msgid "ACIX returned %s" #~ msgstr "XACML Anfrage: %s" #, fuzzy, c-format #~ msgid "No locations for %s" #~ msgstr "Keine locations gefunden für %s" #, fuzzy, c-format #~ msgid "%s: ACIX Location: %s" #~ msgstr "Zugriffslist location: %s" #, c-format #~ msgid "" #~ "checingBartenderURL: Response:\n" #~ "%s" #~ msgstr "" #~ "checkingBartenderURL: Response:\n" #~ "%s" #, c-format #~ msgid "" #~ "nd:\n" #~ "%s" #~ msgstr "" #~ "nd:\n" #~ "%s" #, fuzzy #~ msgid "Not a collection" #~ msgstr "Nicht verbunden" #, c-format #~ msgid "Recieved transfer URL: %s" #~ msgstr "Erhielt transfer URL: %s" #, fuzzy, c-format #~ msgid "Calculated checksum: %s" #~ msgstr "Errechneted checksum: %s" #, c-format #~ msgid "Deleted %s" #~ msgstr "Löschte %s" #, fuzzy #~ msgid "" #~ "Missing reference to factory and/or module. Currently safe unloading of " #~ "LDAP DMC is not supported. Report to developers." #~ msgstr "" #~ "Fehlende Referenz zu factory und/doer Module. Es ist unsicher, Globus im " #~ "nicht-persistenten Modus zu nutzen - (Grid)FTP code wurde disabled. " #~ "Bitte die Entwickler informieren." #, fuzzy, c-format #~ msgid "Challenge: %s" #~ msgstr "Herausforderung: %s" #, c-format #~ msgid "Default: %s" #~ msgstr "Voreinstellung: %s" #, c-format #~ msgid "LDAP connection already open to %s" #~ msgstr "LDAP Verbindung bereits offen zu %s" #, c-format #~ msgid "Could not open LDAP connection to %s" #~ msgstr "Konnte LDAP Verbindung nicht öffnen zu %s" #, c-format #~ msgid "Failed to create ldap bind thread (%s)" #~ msgstr "Fehler bei Anlegen von ldap bind thread (%s)" #, c-format #~ msgid "Ldap bind timeout (%s)" #~ msgstr "Ldap bind timeout (%s)" #, c-format #~ msgid "Failed to bind to ldap server (%s)" #~ msgstr "Fehler bei Verbinden zu ldap server (%s)" #, c-format #~ msgid "Could not set LDAP network timeout (%s)" #~ msgstr "Konnte LDAP netowrk Zeitüberschreitung nicht setzen (%s)" #, c-format #~ msgid "Could not set LDAP timelimit (%s)" #~ msgstr "Konnte LDAP Zeitlimit nicht setzen (%s)" #, c-format #~ msgid "Could not set LDAP protocol version (%s)" #~ msgstr "Konnte LDAP Protokoll Version nicht setzen (%s)" #, c-format #~ msgid "LDAPQuery: Querying %s" #~ msgstr "LDAPQuery: Frage an %s" #, c-format #~ msgid " base dn: %s" #~ msgstr " base dn: %s" #, c-format #~ msgid " filter: %s" #~ msgstr " Filter: %s" #, c-format #~ msgid "%s (%s)" #~ msgstr "%s (%s)" #, c-format #~ msgid "LDAPQuery: Getting results from %s" #~ msgstr "LDAPQuery: Erhalte Ergebnisse von %s" #, c-format #~ msgid "Error: no LDAP query started to %s" #~ msgstr "Fehler: keine LDAP Anfrage gestartet bei %s" #, c-format #~ msgid "LDAP query timed out: %s" #~ msgstr "Zeitüberschreibung bei LDAP Anfrage: %s" #, fuzzy #~ msgid "PDPD location is missing" #~ msgstr "Location fehlt" #, fuzzy, c-format #~ msgid "PDPD location: %s" #~ msgstr "Füge location hinzu: url: %s" #, fuzzy, c-format #~ msgid "Failed to contact PDP server: %s" #~ msgstr "Fehler beim Verbinden zu RLS server: %s" #, fuzzy, c-format #~ msgid "There was no SOAP response return from PDP server: %s" #~ msgstr "Keine SOAP response erhalten" #, fuzzy #~ msgid "Creating a client to Argus PDP service" #~ msgstr "Erstelle Client Schnitstelle" #, fuzzy, c-format #~ msgid "XACML authorisation request: %s" #~ msgstr "GACL Auth. Anfrage. %s" #, fuzzy, c-format #~ msgid "XACML authorisation response: %s" #~ msgstr "Es wurde keine authorization response erwidert" #, fuzzy #~ msgid "Not authorized" #~ msgstr "echo: Unauthorisiert" #, fuzzy #~ msgid "Doing CREAM request" #~ msgstr "Verarbeite %s Anfrage" #, fuzzy, c-format #~ msgid "Adding profile-id value: %s" #~ msgstr "Verbindung zu %s schlug fehl: %s" #, fuzzy, c-format #~ msgid "Adding subject-id value: %s" #~ msgstr "Füge Anfrage-Token %s hinzu" #, fuzzy, c-format #~ msgid "Adding cert chain value: %s" #~ msgstr "Anlegen von Socket schlug fehl: %s" #, fuzzy, c-format #~ msgid "Adding resource-id value: %s" #~ msgstr "Addressen-Auflösung schlug fehl: %s" #, fuzzy, c-format #~ msgid "Adding action-id value: %s" #~ msgstr "Füge location hinzu: %s - %s" #, fuzzy, c-format #~ msgid "CREAM request generation failed: %s" #~ msgstr "Anfrage fehlgeschlagen: %s" #, fuzzy #~ msgid "Doing EMI request" #~ msgstr "Verarbeite %s Anfrage" #, fuzzy, c-format #~ msgid "Adding resource-owner value: %s" #~ msgstr "Addressen-Auflösung schlug fehl: %s" #, fuzzy, c-format #~ msgid "EMI request generation failed: %s" #~ msgstr "Anlegen von Socket schlug fehl: %s" #, fuzzy #~ msgid "PEPD location is missing" #~ msgstr "Location fehlt" #, fuzzy, c-format #~ msgid "PEPD location: %s" #~ msgstr "Füge location hinzu: url: %s" #, fuzzy, c-format #~ msgid "Can not create XACML SubjectAttribute: %s\n" #~ msgstr "Kann Funktion %s nicht anlegen" #, fuzzy #~ msgid "Can not create XACML Resource \n" #~ msgstr "Kann Resource ID nicht erhalten" #, fuzzy, c-format #~ msgid "Can not create XACML ResourceAttribute: %s\n" #~ msgstr "Kann Funktion %s nicht anlegen" #, fuzzy #~ msgid "Can not create XACML Action\n" #~ msgstr "Kann Funktion %s nicht anlegen" #, fuzzy, c-format #~ msgid "Can not create XACML ActionAttribute: %s\n" #~ msgstr "Kann Funktion %s nicht anlegen" #, fuzzy #~ msgid "Can not create XACML request\n" #~ msgstr "Kann doc Argument nicht anlegen" #, fuzzy, c-format #~ msgid "Adding resoure-id value: %s" #~ msgstr "Addressen-Auflösung schlug fehl: %s" #, fuzzy #~ msgid "Unable to create temporary directory" #~ msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #, fuzzy, c-format #~ msgid "Unable to create data base environment (%s)" #~ msgstr "Kann run time environment nicht auswählen." #, fuzzy, c-format #~ msgid "Unable to create job database (%s)" #~ msgstr "Konnte Job Status-Informationen nicht erhalten." #, fuzzy, c-format #~ msgid "Unable to create DB for secondary endpoint keys (%s)" #~ msgstr "Fehler bei Anlegen von Datei %s zum Schreiben: %s" #, fuzzy, c-format #~ msgid "Unable to create DB for secondary service info keys (%s)" #~ msgstr "Fehler bei Erstellen von Info-Datei %s: %s" #, fuzzy, c-format #~ msgid "Error from BDB: %s: %s" #~ msgstr "Fehler bei Suche nach LFN anhand guid %s: %s" #, fuzzy, c-format #~ msgid "Error from BDB: %s" #~ msgstr "Error: %s" #, fuzzy, c-format #~ msgid "Job resubmission failed: Unable to load broker (%s)" #~ msgstr "Hochladen des Jobs schlug fehl, keine weiteren Ziele verfügbar" #, fuzzy #~ msgid "" #~ "Job resubmission aborted because no resource returned any information" #~ msgstr "" #~ "Hochladen des Jobs abgebrochen, da keine Cluster entsprechende " #~ "Informationen anboten" #, fuzzy #~ msgid "Job migration aborted, no resource returned any information" #~ msgstr "Job Migration abgebrochen, da kein Cluster Informationen lieferte" #, fuzzy, c-format #~ msgid "Job migration aborted, unable to load broker (%s)" #~ msgstr "Konnter Broker %s nicht laden" #, fuzzy, c-format #~ msgid "Job migration failed for job (%s), no applicable targets" #~ msgstr "Hochladen des Jobs schlug fehl, keine weiteren Ziele verfügbar" #, fuzzy, c-format #~ msgid "" #~ "Trying to migrate to %s: Migration to a %s interface is not supported." #~ msgstr "" #~ "Versuche zu %s zu migrieren: Migration zu einem BES cluster wird nicht " #~ "unterstützt" #, fuzzy #~ msgid "Failed to sign proxy" #~ msgstr "Fehler beim Senden von body" #, fuzzy #~ msgid "Failed to generate X509 request with NSS" #~ msgstr "Fehler bei Generieren von X509 Token für ausgehende SOAP" #, fuzzy #~ msgid "Failed to create X509 certificate with NSS" #~ msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #, fuzzy #~ msgid "Failed to export X509 certificate from NSS DB" #~ msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #, fuzzy #~ msgid "Failed to import X509 certificate into NSS DB" #~ msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #, fuzzy #~ msgid "Failed to initialize the credential configuration" #~ msgstr "Konnte delegation credentials in Client Konfiguration nicht finden" #, fuzzy, c-format #~ msgid "Failed to authenticate to token %s." #~ msgstr "Fehler beim Authentifizieren: %s" #, fuzzy, c-format #~ msgid "Failed to delete private key that attaches to certificate: %s" #~ msgstr "Fehler bei Lesen von privater Schlüssel-Datei: %s" #, fuzzy, c-format #~ msgid "wrong SSL lock requested: %i of %i: %i - %s" #~ msgstr "FATAL: falsches SSL lock angefragt: %i von %i: %i - %s" #~ msgid "Number of OpenSSL locks changed - reinitializing" #~ msgstr "Anzahl von OpenSSL locks verändert - reinitialisierung" #, fuzzy, c-format #~ msgid "Failed to delete stale remote cache file %s: %s" #~ msgstr "Fehler bei Lesen von Datei %s: %s" #, fuzzy, c-format #~ msgid "Failed to release lock on remote cache file %s" #~ msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #, fuzzy, c-format #~ msgid "Replicating file %s to local cache file %s" #~ msgstr "Fehler bei Umbenennen von temporärer Datei %s zu Lock_datei %s: %s" #, fuzzy, c-format #~ msgid "Failed to delete bad copy of remote cache file %s at %s: %s" #~ msgstr "Fehler bei Anlegen von hard link von %s zu %s: %s" #, fuzzy, c-format #~ msgid "Using remote cache file %s for url %s" #~ msgstr "Fehler bei Entfernen von Cache-Datei %s: %s" #, fuzzy, c-format #~ msgid "Failed to remove registration from %s ISIS" #~ msgstr "Fehler beim Entfernen der location vom LFC: %s" #, fuzzy, c-format #~ msgid "Failed to remove registration from %s EMIRegistry" #~ msgstr "Fehler beim Entfernen der location vom LFC: %s" #, fuzzy, c-format #~ msgid "Failed to %s to EMIRegistry (%s) - %d" #~ msgstr "Fehler bei Verbinden zu server %s:%d" #, fuzzy, c-format #~ msgid "Failed processing user mapping command: unixmap %s" #~ msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #, fuzzy, c-format #~ msgid "Failed processing user mapping command: unixgroupmap %s" #~ msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #, fuzzy, c-format #~ msgid "Failed processing user mapping command: unixlistmap %s" #~ msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #, fuzzy, c-format #~ msgid "Skipping replica on local host %s" #~ msgstr "Doppelte replica location: %s" #, fuzzy, c-format #~ msgid "No locations left for %s" #~ msgstr "Keine locations gefunden für %s" #, fuzzy, c-format #~ msgid "Storing configuration in temporary file %s" #~ msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #, fuzzy #~ msgid "Failed to process service configuration" #~ msgstr "Pfad zu VOMS Server Konfigurationsdatei" #, c-format #~ msgid "" #~ "ChangeActivityStatus: request = \n" #~ "%s" #~ msgstr "" #~ "ChangeActivityStatus: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" #~ "%s" #, fuzzy #~ msgid "ChangeActivityStatus: old A-REX state does not match" #~ msgstr "" #~ "ChangeActivityStatus: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" #~ "%s" #, fuzzy #~ msgid "ChangeActivityStatus: Failed to update credentials" #~ msgstr "" #~ "ChangeActivityStatus: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" #~ "%s" #, fuzzy #~ msgid "ChangeActivityStatus: Failed to resume job" #~ msgstr "" #~ "ChangeActivityStatus: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" #~ "%s" #, c-format #~ msgid "" #~ "ChangeActivityStatus: response = \n" #~ "%s" #~ msgstr "" #~ "ChangeActivityStatus: ответ = \n" #~ "%s" #, fuzzy, c-format #~ msgid "EMIES:PauseActivity: job %s - %s" #~ msgstr "" #~ "MigrateActivity: отзыв = \n" #~ "%s" #, fuzzy, c-format #~ msgid "job %s cancelled successfully" #~ msgstr "Job erfolgreich abgebrochen" #, fuzzy, c-format #~ msgid "EMIES:WipeActivity: job %s - %s" #~ msgstr "" #~ "MigrateActivity: отзыв = \n" #~ "%s" #, fuzzy, c-format #~ msgid "job %s (will be) cleaned successfully" #~ msgstr "Job erfolgreich aufgeräumt." #, fuzzy, c-format #~ msgid "job %s restarted successfully" #~ msgstr "Datei %s erfolgreich entfernt" #, c-format #~ msgid "" #~ "CreateActivity: request = \n" #~ "%s" #~ msgstr "" #~ "CreateActivity: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" #~ "%s" #, c-format #~ msgid "" #~ "CreateActivity: response = \n" #~ "%s" #~ msgstr "" #~ "CreateActivity: ответ = \n" #~ "%s" #, fuzzy, c-format #~ msgid "" #~ "EMIES:CreateActivity: request = \n" #~ "%s" #~ msgstr "" #~ "CreateActivity: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" #~ "%s" #, fuzzy, c-format #~ msgid "ES:CreateActivity: Failed to create new job: %s" #~ msgstr "" #~ "CreateActivity: ответ = \n" #~ "%s" #, fuzzy, c-format #~ msgid "" #~ "EMIES:CreateActivity: response = \n" #~ "%s" #~ msgstr "" #~ "CreateActivity: ответ = \n" #~ "%s" #, c-format #~ msgid "" #~ "GetActivityDocuments: request = \n" #~ "%s" #~ msgstr "" #~ "GetActivityDocuments: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" #~ "%s" #, fuzzy #~ msgid "GetActivityDocuments: non-AREX job requested" #~ msgstr "" #~ "GetActivityDocuments: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" #~ "%s" #, c-format #~ msgid "" #~ "GetActivityDocuments: response = \n" #~ "%s" #~ msgstr "" #~ "GetActivityDocuments: ответ = \n" #~ "%s" #, c-format #~ msgid "" #~ "GetActivityStatuses: request = \n" #~ "%s" #~ msgstr "" #~ "GetActivityStatuses: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" #~ "%s" #, fuzzy, c-format #~ msgid "GetActivityStatuses: unknown verbosity level requested: %s" #~ msgstr "" #~ "GetActivityStatuses: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" #~ "%s" #, c-format #~ msgid "" #~ "GetActivityStatuses: response = \n" #~ "%s" #~ msgstr "" #~ "GetActivityStatuses: ответ = \n" #~ "%s" #, fuzzy, c-format #~ msgid "EMIES:GetActivityStatus: job %s - %s" #~ msgstr "" #~ "GetActivityStatuses: ответ = \n" #~ "%s" #, c-format #~ msgid "" #~ "GetFactoryAttributesDocument: request = \n" #~ "%s" #~ msgstr "" #~ "GetFactoryAttributesDocument: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" #~ "%s" #, c-format #~ msgid "" #~ "GetFactoryAttributesDocument: response = \n" #~ "%s" #~ msgstr "" #~ "GetFactoryAttributesDocument: ответ = \n" #~ "%s" #, fuzzy, c-format #~ msgid "Can not find queue '%s' in the configuration file" #~ msgstr "Delegation Authorisierung fehlgeschlagen" #, fuzzy, c-format #~ msgid "Checking a match for '%s'" #~ msgstr "Suche nache Existenz von %s" #, fuzzy, c-format #~ msgid "Can't interpret configuration file %s as XML" #~ msgstr "Pfad zu VOMS Server Konfigurationsdatei" #, fuzzy #~ msgid "Type in LRMS is missing" #~ msgstr "Location fehlt" #, fuzzy #~ msgid "Command for authPlugin is missing" #~ msgstr "Location fehlt" #, fuzzy #~ msgid "The delegationDB element is incorrect value" #~ msgstr "Delegation ID: %s" #, fuzzy, c-format #~ msgid "Running command %s" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Failed to read job's ACL for job %s from %s" #~ msgstr "Fehler bei Lesen von Objekt %s: %s" #, fuzzy, c-format #~ msgid "Failed to load policy evaluator for policy of job %s" #~ msgstr "Fahler bei Herunterladen %s zu %s" #, fuzzy #~ msgid "Failed processing A-REX configuration" #~ msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #, fuzzy, c-format #~ msgid "Job submission user: %s (%i:%i)" #~ msgstr "Job Hochladen Zusammenfassung:" #, fuzzy #~ msgid "Job plugin was not initialised" #~ msgstr "Main python thread wurde nicht initialisiert" #, fuzzy #~ msgid "No delegated credentials were passed" #~ msgstr "" #~ "Делегированные параметры доÑтупа:\n" #~ " %s" #, fuzzy, c-format #~ msgid "Cancelling job %s" #~ msgstr "Aufräumen von Job: %s" #, fuzzy, c-format #~ msgid "Cleaning job %s" #~ msgstr "Aufräumen von Job: %s" #, fuzzy #~ msgid "Request to open file with storing in progress" #~ msgstr "%s Anfrage an %s schlug fehl mit Antwort %s" #, fuzzy, c-format #~ msgid "Retrieving file %s" #~ msgstr "Lese Archiv Datei %s" #, fuzzy, c-format #~ msgid "Storing file %s" #~ msgstr "Anlegen von Socket schlug fehl: %s" #, fuzzy, c-format #~ msgid "Unknown open mode %i" #~ msgstr "unbekannter return code %s" #, fuzzy #~ msgid "Failed writing local description" #~ msgstr "Fehler bei Schreiben zu Ziel" #, fuzzy #~ msgid "Failed writing ACL" #~ msgstr "Konnte job nicht starten" #, fuzzy #~ msgid "Failed to run external plugin" #~ msgstr "Initiierung der Delegation fehlgeschlagen" #, fuzzy, c-format #~ msgid "Failed to create session directory %s" #~ msgstr "Fehler bei Anlegen/Finden von Verzeichnis %s, (%d)" #, fuzzy #~ msgid "Failed writing status" #~ msgstr "Fehler beim Auflisten von Meta-Dateien" #, fuzzy, c-format #~ msgid "Failed to lock delegated credentials: %s" #~ msgstr "Fehler bei der Initialisierung der delegation credentials" #, fuzzy, c-format #~ msgid "Renewing proxy for job %s" #~ msgstr "Erneuern der credentials für Job %s" #, fuzzy #~ msgid "Failed to write 'local' information" #~ msgstr "Konnte Job Status Information nicht beziehen." #, fuzzy #~ msgid "Failed to renew proxy" #~ msgstr "Fehler beim Senden von body" #, fuzzy, c-format #~ msgid "Checking file %s" #~ msgstr "Suche nache Existenz von %s" #, fuzzy, c-format #~ msgid "Failed to read job's local description for job %s from %s" #~ msgstr "Fehler beim Bezug der Job Beschreibung von Job: %s" #, fuzzy #~ msgid "No non-draining control or session directories available" #~ msgstr "Konnte ownen des session dir nicht ändern zu %i: %s" #, fuzzy, c-format #~ msgid "Using control directory %s" #~ msgstr "Lege Verzeichnis %s an" #, fuzzy, c-format #~ msgid "Using session directory %s" #~ msgstr "Lege Verzeichnis %s an" #, fuzzy, c-format #~ msgid "" #~ "Registered static information: \n" #~ " doc: %s" #~ msgstr "Fehler bei Bezug von Information für Job: %s" #~ msgid "ServiceURL missing" #~ msgstr "ServiceURL fehlt" #, c-format #~ msgid "Protocol is %s, should be https" #~ msgstr "Protokol ist %s, sollte https sein" #, fuzzy, c-format #~ msgid "Aggregation record (%s) read from file successful." #~ msgstr "Verzeichnis %s erfolgreich entfernt" #, fuzzy, c-format #~ msgid "Aggregation record (%s) stored successful." #~ msgstr "Verzeichnis %s erfolgreich entfernt" #, fuzzy, c-format #~ msgid "year: %s" #~ msgstr "header: %s" #, fuzzy, c-format #~ msgid "moth: %s" #~ msgstr "Proxy Pfad: %s" #, fuzzy, c-format #~ msgid "queue: %s" #~ msgstr "Anfrage: %s" #, fuzzy, c-format #~ msgid "query: %s" #~ msgstr "Anfrage: %s" #, fuzzy, c-format #~ msgid "list size: %d" #~ msgstr "Zeige Antwort: %s" #, fuzzy, c-format #~ msgid "XML: %s" #~ msgstr "XML Antwort: %s" #, fuzzy, c-format #~ msgid "synch message: %s" #~ msgstr "Service Nachricht" #, fuzzy, c-format #~ msgid "VO filter for host: %s" #~ msgstr " Filter: %s" #, c-format #~ msgid "Read archive file %s" #~ msgstr "Lese Archiv Datei %s" #, c-format #~ msgid "Failed to write file %s: %s" #~ msgstr "Fehler bei Schreiben zu Datein %s: %s" #, fuzzy, c-format #~ msgid "Incoming time range: %s" #~ msgstr "Verbindung zu %s schlug fehl: %s" #~ msgid "Interactive mode." #~ msgstr "Interaktiver Modus." #, fuzzy, c-format #~ msgid "Could not open log directory \"%s\": %s" #~ msgstr "" #~ "Ðевозможно открыть каталог Ñо Ñправкой:\n" #~ "%s" #, fuzzy, c-format #~ msgid "Could not open output directory \"%s\": %s" #~ msgstr "" #~ "Ðевозможно открыть каталог Ñо Ñправкой:\n" #~ "%s" #, c-format #~ msgid "" #~ "MigrateActivity: request = \n" #~ "%s" #~ msgstr "" #~ "MigrateActivity: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" #~ "%s" #, c-format #~ msgid "" #~ "MigrateActivity: response = \n" #~ "%s" #~ msgstr "" #~ "MigrateActivity: отзыв = \n" #~ "%s" #, c-format #~ msgid "" #~ "TerminateActivities: request = \n" #~ "%s" #~ msgstr "" #~ "TerminateActivities: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" #~ "%s" #, fuzzy #~ msgid "TerminateActivities: non-AREX job requested" #~ msgstr "" #~ "TerminateActivities: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" #~ "%s" #, c-format #~ msgid "" #~ "TerminateActivities: response = \n" #~ "%s" #~ msgstr "" #~ "TerminateActivities: ответ = \n" #~ "%s" #, fuzzy, c-format #~ msgid "Unknown authorization command %s" #~ msgstr "unbekannter return code %s" #, fuzzy, c-format #~ msgid "Plugin %s error: %u" #~ msgstr "Globus Fehler: %s" #, fuzzy, c-format #~ msgid "response: %s" #~ msgstr "Antwort: %s" #, fuzzy, c-format #~ msgid "Send response failed: %s" #~ msgstr "Anlegen von Socket schlug fehl: %s" #, fuzzy #~ msgid "Closed connection" #~ msgstr "Schließe Verbindung" #, fuzzy, c-format #~ msgid "Socket conversion failed: %s" #~ msgstr "Anlegen von Socket schlug fehl: %s" #, fuzzy, c-format #~ msgid "Failed to obtain own address: %s" #~ msgstr "Warnung: Fehler bei Bezug von Attributen von %s: %s" #, fuzzy, c-format #~ msgid "Accepted connection on [%s]:%u" #~ msgstr "Fehler bei Verbinden zu server %s:%d" #, fuzzy #~ msgid "Accept failed" #~ msgstr "PASV fehlgeschlagen" #, fuzzy, c-format #~ msgid "Accept failed: %s" #~ msgstr "Anfrage fehlgeschlagen: %s" #, fuzzy #~ msgid "Authenticate in commands failed" #~ msgstr "Authentifiziere: %s" #, fuzzy #~ msgid "Authentication failure" #~ msgstr "Authentifiziere: %s" #, fuzzy, c-format #~ msgid "Encrypted: %s" #~ msgstr "Verschlüsselter Name ID: %s" #, fuzzy #~ msgid "User has no proper configuration associated" #~ msgstr "Delegation Authorisierung fehlgeschlagen" #, fuzzy #~ msgid "Control connection (probably) closed" #~ msgstr "GET: Verbindung wird geschlossen" #, fuzzy #~ msgid "Command EPRT" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Failed to parse remote addres %s" #~ msgstr "Fehler bei Entfernen von hard link %s: %s" #, fuzzy, c-format #~ msgid "Command USER %s" #~ msgstr "Kommando: %s" #, fuzzy #~ msgid "Command CDUP" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Command CWD %s" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Command MKD %s" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Command SIZE %s" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Command SBUF: %i" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Command MLST %s" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Command DELE %s" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Command RMD %s" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Command TYPE %c" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Command MODE %c" #~ msgstr "Kommando: %s" #, fuzzy #~ msgid "Command ABOR" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Command REST %s" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Command EPSV %s" #~ msgstr "Kommando: %s" #, fuzzy #~ msgid "Command SPAS" #~ msgstr "Kommando: %s" #, fuzzy #~ msgid "Command PASV" #~ msgstr "Kommando: %s" #, fuzzy #~ msgid "local_pasv failed" #~ msgstr "%s fehlgeschlagen" #, fuzzy #~ msgid "local_spas failed" #~ msgstr "%s fehlgeschlagen" #, fuzzy #~ msgid "Command PORT" #~ msgstr "Kommando: %s" #, fuzzy #~ msgid "active_data is disabled" #~ msgstr "SOAP Aufruf fehlgeschlagen" #, fuzzy #~ msgid "local_port failed" #~ msgstr "SendData: Lokaler port schlug fehl: %s" #, fuzzy, c-format #~ msgid "Command MLSD %s" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Command NLST %s" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Command LIST %s" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Command ERET %s" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Command RETR %s" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Command STOR %s" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Command ALLO %i" #~ msgstr "Kommando: %s" #, fuzzy #~ msgid "Command OPTS" #~ msgstr "Kommando: %s" #, fuzzy #~ msgid "Command NOOP" #~ msgstr "Kommando: %s" #, fuzzy #~ msgid "Command QUIT" #~ msgstr "Kommando: %s" #, fuzzy #~ msgid "Failed to close, deleting client" #~ msgstr "Konnte delegation context nicht erhalten" #, fuzzy, c-format #~ msgid "Command DCAU: %i '%s'" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Command PBZS: %s" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Setting pbsz to %lu" #~ msgstr "Setze Datei %s zu Größe %llu" #, fuzzy, c-format #~ msgid "Command PROT: %s" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Command MDTM %s" #~ msgstr "Kommando: %s" #, fuzzy, c-format #~ msgid "Raw command: %s" #~ msgstr "Kommando: %s" #, fuzzy #~ msgid "abort_callback: start" #~ msgstr "ftp_write_callback: Fehler" #, fuzzy #~ msgid "Closing channel (list)" #~ msgstr "Schließe Verbindung" #, fuzzy #~ msgid "Buffer registration failed" #~ msgstr "Delegation nicht erfolgreich: " #, fuzzy #~ msgid "data_retrieve_callback" #~ msgstr "ftp_check_callback" #, fuzzy #~ msgid "Closing channel (retrieve)" #~ msgstr "Schließe Verbindung" #, fuzzy #~ msgid "data_retrieve_callback: lost buffer" #~ msgstr "ftp_write_callback: Fehler" #, fuzzy #~ msgid "data_connect_store_callback" #~ msgstr "ftp_check_callback" #, fuzzy #~ msgid "Failed to register any buffer" #~ msgstr "Konnte job nicht registrieren" #, fuzzy #~ msgid "data_store_callback: lost buffer" #~ msgstr "ftp_read_callback: Fehler" #, fuzzy #~ msgid "Closing channel (store)" #~ msgstr "Schließe Verbindung" #, fuzzy #~ msgid "Can't parse access rights in configuration line" #~ msgstr "Konnte classname für Policy nicht von Konfiguration parsen" #, fuzzy #~ msgid "Can't parse user:group in configuration line" #~ msgstr "Pfad zu VOMS Server Konfigurationsdatei" #, fuzzy #~ msgid "Can't recognize user in configuration line" #~ msgstr "Pfad zu VOMS Server Konfigurationsdatei" #, fuzzy #~ msgid "Can't parse or:and in configuration line" #~ msgstr "Konnte classname für Policy nicht von Konfiguration parsen" #, fuzzy #~ msgid "Can't parse configuration line" #~ msgstr "Delegation Authorisierung fehlgeschlagen" #, fuzzy, c-format #~ msgid "Bad directory name: %s" #~ msgstr "Verzeichnis: %s" #, fuzzy #~ msgid "Can't parse create arguments in configuration line" #~ msgstr "Konnte classname für Request nicht von Konfiguration parsen" #, fuzzy #~ msgid "Can't parse mkdir arguments in configuration line" #~ msgstr "Konnte classname für Request nicht von Konfiguration parsen" #, fuzzy, c-format #~ msgid "Bad subcommand in configuration line: %s" #~ msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #, fuzzy #~ msgid "Bad mount directory specified" #~ msgstr "Kein Cache-Verzeichnis angegeben" #, fuzzy, c-format #~ msgid "mkdir failed: %s" #~ msgstr "PASV fehlgeschlagen: %s" #, fuzzy, c-format #~ msgid "Warning: mount point %s creation failed." #~ msgstr "Warnung: Schließen von tmp Lock Datei %s fehlgeschlagen" #, fuzzy, c-format #~ msgid "plugin: open: %s" #~ msgstr "lfn: %s - pfn: %s" #, fuzzy #~ msgid "Not enough space to store file" #~ msgstr "Kein space token angegeben" #, fuzzy, c-format #~ msgid "open: changing owner for %s, %i, %i" #~ msgstr "" #~ "Kann Owner von %s nicht ändernÐевозможно изменить владельца папки %1.\n" #~ "Ошибка: %2" #, fuzzy #~ msgid "Error while reading file" #~ msgstr "Fehler beim Lesen des response header" #, fuzzy #~ msgid "configuration file not found" #~ msgstr "Vermuting - Datei nicht gefunden" #, fuzzy #~ msgid "Wrong maxconnections number in configuration" #~ msgstr "Es ist keine connetion chain konfiguriert" #, fuzzy, c-format #~ msgid "Can't resolve host %s" #~ msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #, fuzzy #~ msgid "Could not determine hostname from gethostname()" #~ msgstr "Kann hostname von uname nciht ermitteln" #, fuzzy, c-format #~ msgid "Already have directory: %s" #~ msgstr "Lege Verzeichnis %s an" #, fuzzy, c-format #~ msgid "Registering directory: %s with plugin: %s" #~ msgstr "Fehler bei Anlegen von Verzeichnis %s: %s" #, fuzzy, c-format #~ msgid "file node creation failed: %s" #~ msgstr "Anlegen von Socket schlug fehl: %s" #, fuzzy, c-format #~ msgid "failed while processing configuration command: %s %s" #~ msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #, fuzzy, c-format #~ msgid "Failed processing authorization group %s" #~ msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #, fuzzy #~ msgid "failed to process client identification" #~ msgstr "Pfad zu VOMS Server Konfigurationsdatei" #, fuzzy #~ msgid "failed to initialize environment variables" #~ msgstr "Fehler bei der Initialisierung der delegation credentials" #, fuzzy #~ msgid "failed to identify plugins path" #~ msgstr "Initiierung der Delegation fehlgeschlagen" #, fuzzy, c-format #~ msgid "Registering dummy directory: %s" #~ msgstr "Lege Verzeichnis %s an" #, fuzzy #~ msgid "Activation failed" #~ msgstr "SOAP Aufruf fehlgeschlagen" #, fuzzy #~ msgid "Globus connection error" #~ msgstr "Schließe Verbindung" #, fuzzy #~ msgid "New connection" #~ msgstr "Wiederholte Nutzung von Verbindung" #, fuzzy #~ msgid "fork failed" #~ msgstr "%s fehlgeschlagen" #, fuzzy #~ msgid "Waiting 1 minute" #~ msgstr "Warte vor Antwort" #, fuzzy #~ msgid "Wrong number of connections" #~ msgstr "Schließe Verbindung" #, fuzzy, c-format #~ msgid "Failed to obtain local address: %s" #~ msgstr "Warnung: Fehler bei Bezug von Attributen von %s: %s" #, fuzzy, c-format #~ msgid "Failed to create socket(%s): %s" #~ msgstr "Fehler bei Anlegen von soft link: %s" #, fuzzy, c-format #~ msgid "Failed to limit socket to IPv6: %s" #~ msgstr "Fehler bei Unlock von Datei %s: %s" #, fuzzy, c-format #~ msgid "Failed to bind socket(%s): %s" #~ msgstr "Fehler bei Unlock von Datei %s: %s" #, fuzzy, c-format #~ msgid "Failed to listen on socket(%s): %s" #~ msgstr "Fehler bei Schreiben zu Datein %s: %s" #, fuzzy #~ msgid "Listen started" #~ msgstr "Start start" #, fuzzy, c-format #~ msgid "Select failed: %s" #~ msgstr "Anfrage fehlgeschlagen: %s" #, fuzzy, c-format #~ msgid "Fork failed: %s" #~ msgstr "PASV fehlgeschlagen: %s" #, fuzzy #~ msgid "Init failed" #~ msgstr "Schreibfehler" #, fuzzy #~ msgid "Listen failed" #~ msgstr "Schreibfehler" #, fuzzy #~ msgid "Destroying handle" #~ msgstr "Fehler bei Initialisierung von handle" #, fuzzy #~ msgid "Exiting" #~ msgstr "Zeichenkette" #, fuzzy, c-format #~ msgid "%s: %s:%i" #~ msgstr "%s (%s)" #, fuzzy, c-format #~ msgid "%s %s" #~ msgstr "%s (%s)" #, fuzzy, c-format #~ msgid " %s: %s" #~ msgstr " %s" #, fuzzy, c-format #~ msgid " %s:" #~ msgstr " %s" #, fuzzy, c-format #~ msgid "Mapfile is missing at %s" #~ msgstr "Location fehlt" #, fuzzy #~ msgid "There is no local mapping for user" #~ msgstr "Es ist keine connetion chain konfiguriert" #, fuzzy, c-format #~ msgid "Initially mapped to local user: %s" #~ msgstr "Grid Identität wird zugewiesen zu lokaler Identität '%s'" #, fuzzy, c-format #~ msgid "Local user %s does not exist" #~ msgstr "Lock-Datei %s existiert nicht" #, fuzzy, c-format #~ msgid "Local group %s does not exist" #~ msgstr "Lock-Datei %s existiert nicht" #, fuzzy, c-format #~ msgid "Mapped to local id: %i" #~ msgstr "Fehler bei Unlock von Datei %s: %s" #, fuzzy, c-format #~ msgid "Proxy stored at %s" #~ msgstr "ProxyStore: %s" #, fuzzy #~ msgid "Local user does not exist" #~ msgstr "Lock-Datei %s existiert nicht" #, fuzzy, c-format #~ msgid "Remapped to local user: %s" #~ msgstr "Fehler bei Unlock von Datei %s: %s" #, fuzzy, c-format #~ msgid "Remapped to local id: %i" #~ msgstr "Grid Identität wird zugewiesen zu lokaler Identität '%s'" #, fuzzy #~ msgid "Cannot find ARC XMLNode class" #~ msgstr "Kann custom broker Klasse nicht finden" #~ msgid "Cannot create doc argument" #~ msgstr "Kann doc Argument nicht anlegen" #, fuzzy #~ msgid "Cannot convert doc to Python object" #~ msgstr "Kann doc nicht zu Python Objekt konvertieren" #, c-format #~ msgid "Can not get the delegation credential: %s from delegation service:%s" #~ msgstr "" #~ "Kann delegation credential nicht erhalten: %s von delegation service: %s" #, fuzzy #~ msgid "Head: can't process file %s" #~ msgstr "Lese Archiv Datei %s" #, fuzzy #~ msgid "wrong number in %s" #~ msgstr "Schließe Verbindung" #, fuzzy #~ msgid "CacheService: Unauthorized" #~ msgstr "echo: Unauthorisiert" #~ msgid "libjvm.so not loadable - check your LD_LIBRARY_PATH" #~ msgstr "libjvm.so nicht ladbar - überprüfe LD_LIBRARY_PATH" #~ msgid "JVM started" #~ msgstr "JVM gestartet" #~ msgid "Java object returned NULL status" #~ msgstr "Java Objekt gab NULL status an" #~ msgid "use GSI proxy (RFC 3820 compliant proxy is default)" #~ msgstr "" #~ "иÑпользовать доверенноÑть GSI (по умолчанию иÑпользуетÑÑ\n" #~ " RFC 3820-ÑовмеÑÑ‚Ð¸Ð¼Ð°Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñть)" #, fuzzy #~ msgid "Unable to create directory %s: No valid credentials found" #~ msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #, fuzzy #~ msgid "Unable to rename %s: No valid credentials found" #~ msgstr "Konnte Job Beschreibung nicht an Resourcen des Ziels anpassen: %s" #~ msgid "Submit: Failed to disconnect after submission" #~ msgstr "Submit: Fehler bei Disconnect nach Submission" #~ msgid "year" #~ msgid_plural "years" #~ msgstr[0] "Jahre" #~ msgstr[1] "Jahr" #~ msgstr[2] "Jahre" #~ msgid "month" #~ msgid_plural "months" #~ msgstr[0] "Monate" #~ msgstr[1] "Monat" #~ msgstr[2] "Monate" #~ msgid "day" #~ msgid_plural "days" #~ msgstr[0] "Tage" #~ msgstr[1] "Tag" #~ msgstr[2] "Tage" #, fuzzy #~ msgid "arc_to_voms - %u attributes" #~ msgstr " Attribute" #, fuzzy #~ msgid "Plugin failed: %s" #~ msgstr "PASV fehlgeschlagen: %s" #, fuzzy #~ msgid "Failed to report renewed proxy to job" #~ msgstr "Fehler bei Lesen von Proxy-Datei: %s" #, fuzzy #~ msgid "Connecting to %s:%i" #~ msgstr "Verbindung zu %s schlug fehl: %s" #, fuzzy #~ msgid "Querying at %s" #~ msgstr "Anfrage: %s" #, fuzzy #~ msgid "Failed to get results from LDAP server %s" #~ msgstr "Fehler beim Verbinden zu RLS server: %s" #, fuzzy #~ msgid "couldn't open file %s" #~ msgstr "Konnte Datei mit Job Beschreibung nicht öffnen: %s" #, fuzzy #~ msgid "couldn't process VO configuration" #~ msgstr "Pfad zu VOMS Server Konfigurationsdatei" #, fuzzy #~ msgid "can't parse configuration line: %s %s %s %s" #~ msgstr "Fehler beim Laden der Service Konfigurationsdatei %s" #, fuzzy #~ msgid "unsupported configuration command: %s" #~ msgstr "Nicht unterstützte URL für Ziel: %s" #, fuzzy #~ msgid "Match group: %s" #~ msgstr "Fataler Fehler: %s" #, fuzzy #~ msgid "Failed writing RSL" #~ msgstr "Konnte job nicht starten" #, fuzzy #~ msgid "RSL could not be evaluated: %s" #~ msgstr "Datei konnte nicht zu Running Zustand bewegt werden: %s" #, fuzzy #~ msgid "Can't evaluate RSL fragment: %s" #~ msgstr "Kann stat-Informationen zu Datei nicht einholen: %s" #, fuzzy #~ msgid "Error reading user generated output file list in %s" #~ msgstr "" #~ "\n" #~ "%s: ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð²Ñ…Ð¾Ð´Ð½Ð¾Ð³Ð¾ файла '%s': %s\n" #, fuzzy #~ msgid "Failed writing output status file" #~ msgstr "Fehler bei Lesen von Dateiliste" #, fuzzy #~ msgid "Failed to upload (but may be retried) %s" #~ msgstr "Fehler bei Verbinden zu ldap server (%s)" #, fuzzy #~ msgid "Some uploads failed, but (some) may be retried" #~ msgstr "Fehler bei Verbinden zu ldap server (%s)" #~ msgid "" #~ "Argument to -c has the format Flavour:URL e.g.\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #~ msgstr "" #~ "Ðргумент опции -c задаётÑÑ Ð¿Ð¾ форме Flavour:URL, например:\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #~ msgid "explicity select or reject a specific cluster" #~ msgstr "explizit einen Cluster auswählen oder ablehnen" #~ msgid "" #~ "Argument to -i has the format Flavour:URL e.g.\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/mds-vo-name=sweden,O=grid\n" #~ "CREAM:ldap://cream.grid.upjs.sk:2170/o=grid\n" #~ "\n" #~ "Argument to -c has the format Flavour:URL e.g.\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #~ msgstr "" #~ "Ðргумент опции -i задаётÑÑ Ð¿Ð¾ форме Flavour:URL, например:\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/mds-vo-name=sweden,O=grid\n" #~ "CREAM:ldap://cream.grid.upjs.sk:2170/o=grid\n" #~ "\n" #~ "Ðргумент опции -c задаётÑÑ Ð¿Ð¾ форме Flavour:URL, например:\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #~ msgid "explicity select or reject an index server" #~ msgstr "Ausdrücklich einen Index Server bestimmen oder ablehnen" #~ msgid "" #~ "The arcmigrate command is used for migrating queud jobs to another " #~ "cluster.\n" #~ "Note that migration is only supported between ARC1 clusters." #~ msgstr "" #~ "Das arcmigrate Kommando dient zur Verteilung von bereits hochgeladenen " #~ "Jobs zwischen Clustern. Diese Migration wird nur zwischen ARC1 Clustern " #~ "unterstützt." #, fuzzy #~ msgid "select broker method (Random (default), FastestQueue, or custom)" #~ msgstr "" #~ "wählt Methode für eine Verteilung von Jobs zwischen Clustern (Random " #~ "(engl. für zufällig, die Voreinstellung, FastestQueue (die Queue mit den " #~ "schnellsten Rechnern), oder custom (für eigenes))" #~ msgid "[job ...]\n" #~ msgstr "[Job ...]\n" #, fuzzy #~ msgid "file where the jobs will be stored" #~ msgstr "Datei, in der Jobs abgelegt werden" #~ msgid "explicity select or reject a specific cluster for the new job" #~ msgstr "positive oder negative Selektion eines Clusters für einen Job" #~ msgid "No jobs to resubmit" #~ msgstr "Keine erneut hochzuladenen Jobs" #~ msgid "Submission to %s failed, trying next target" #~ msgstr "Hochladen zu %s schlug fehl, versuche nächtes Ziel" #~ msgid "Job resubmitted with new jobid: %s" #~ msgstr "Job erneut hochgeladen mit neuer Job ID: %s" #~ msgid "Job could not be killed or cleaned" #~ msgstr "Job konnte nicht abgebrochen oder gelöscht werden" #~ msgid "url of the policy decision service" #~ msgstr "URL des Policy Decision Service" #~ msgid "use SAML 2.0 profile of XACML v2.0 to contact the service" #~ msgstr "nutze SAML 2.0 Profil von XACML v2.0 um Server zu kontaktieren" #~ msgid "path to private key file" #~ msgstr "Pfad zu Datei mit privatem Schlüssel" #~ msgid "URL of SLCS service" #~ msgstr "URL des SLCS Service" #~ msgid "IdP name" #~ msgstr "IdP Name" #~ msgid "Password for user account to IdP" #~ msgstr "Passwort des user accountd für IdP" #~ msgid "Private key passphrase" #~ msgstr "Passphrase für privaten Schlüssel" #~ msgid "passphrase" #~ msgstr "Passphrase" #~ msgid "Source probably does not exist" #~ msgstr "Quelle existiert vermutlich nicht" #~ msgid "Current transfer FAILED: %s - %s" #~ msgstr "Aktueller Transfer SCHLUG FEHL: %s - %s" #~ msgid "Transfer FAILED: %s - %s" #~ msgstr "Transfer FEHELER: %s - %s" #~ msgid "isis" #~ msgstr "isis" #~ msgid "method" #~ msgstr "Methode" #~ msgid " ISIS tester start!" #~ msgstr "ISIS tester Start" #~ msgid "Disconnect: Failed quitting: %s" #~ msgstr "Disconnect: Verlassen der Veerbindung fehlgeschlagen: %s" #~ msgid "Submit: Failed to modify job description to be sent to target." #~ msgstr "" #~ "Submit: Konnte Job Beschreibungung für die Sendung zum Ziel nicht " #~ "modifizieren" #, fuzzy #~ msgid "" #~ "Trying to migrate to %s: Migration to a ARC0 cluster is not supported." #~ msgstr "" #~ "Versuch zu migieren zu %s: Migration zu ARC0 cluster wird nicht " #~ "unterstützt" #~ msgid "TargetRetriverARC0 initialized with %s service url: %s" #~ msgstr "TargetRetrieverARC0 initialisiert mit %s service URL: %s" #, fuzzy #~ msgid "Failed locating delegation credentials in chain configuration" #~ msgstr "" #~ "Fehler bei Lokalisation der delegation credentials in chain Konfiguration" #, fuzzy #~ msgid "Found malformed job state string: %s" #~ msgstr "Fand ungültig formulierte Job Zustandsbeschreibung: %s\n" #~ msgid "TargetRetriverARC1 initialized with %s service url: %s" #~ msgstr "TargetRetriverARC1 initialisiert mit %s service url: %s" #~ msgid "TargetRetriverBES initialized with %s service url: %s" #~ msgstr "TargetRetriverBES initialisiert mit %s Service URL: %s" #~ msgid "" #~ "Matching against job description,following targets possible for " #~ "BenchmarkBroker: %d" #~ msgstr "" #~ "Abgleich mit Job Beschreibung, die folgenden Ziele sind möglich für " #~ "BenchmarkBroker: %d" #, fuzzy #~ msgid "%d. Cluster: %s; Queue: %s" #~ msgstr "%d. Cluster: %s; Queue: %s" #~ msgid "Cluster will be ranked according to the %s benchmark scenario" #~ msgstr "Cluster wird bewertet anhand des %s benchmark" #~ msgid "Best targets are: %d" #~ msgstr "Die besten Ziel sind: %d" #~ msgid "" #~ "Matching against job description, following targets possible for " #~ "DataBroker: %d" #~ msgstr "" #~ "Abgleich mit Job Beschreibung, die folgenden Ziele sind möglich für " #~ "DataBroker: %d" #~ msgid "FastestQueueBroker is filtering %d targets" #~ msgstr "FastestQueueBroker filter %d Ziele" #~ msgid "FastestQueueBroker will rank the following %d targets" #~ msgstr "FastestQueueBroker bewertet die folgenden %d Ziele:" #~ msgid "" #~ "Matching against job description, following targets possible for " #~ "RandomBroker: %d" #~ msgstr "" #~ "Abgleich gegen Job Beschreibung, folgende Ziele sind möglich für " #~ "RandomBroker: %d" #, fuzzy #~ msgid "" #~ "Trying to migrate to %s: Migration to a CREAM cluster is not supported." #~ msgstr "" #~ "Versuch zu migrieren zu %s: MIgration zu einem CREAM cluster wird nicht " #~ "unterstützt" #~ msgid "TargetRetriverCREAM initialized with %s service url: %s" #~ msgstr "TargetRetrieverCREAM initialisiert mit %s service URL: %s" #~ msgid "Cannot convert arc module name to Python string" #~ msgstr "Kann arc Modulename nicht zu Python Zeichenkette konvertierten" #~ msgid "Cannot import arc module" #~ msgstr "Kann arc Modul nicht importieren" #, fuzzy #~ msgid "" #~ "Trying to migrate to %s: Migration to a UNICORE cluster is not supported." #~ msgstr "" #~ "Versuch zu migrieren zu %s: Migration zu einem UNICORE cluster wird nicht " #~ "unterstützt" #~ msgid "TargetRetriverUNICORE initialized with %s service url: %s" #~ msgstr "TargetRetriverUNICORE initialisiert mit %s service URL: %s" #~ msgid "File is not accessible: %s - %s" #~ msgstr "Dati ist nicht zugreifbar: %s - %s" #~ msgid "delete_ftp: globus_ftp_client_delete timeout" #~ msgstr "delete_ftp: globus_ftp_client_delete Zeitüberschreitung" #, fuzzy #~ msgid "Response(%i): %s" #~ msgstr "Antwort: %s" #, fuzzy #~ msgid "Failed to close connection 3" #~ msgstr "Fehler bei Schließen von Verbindung 3" #, fuzzy #~ msgid "Using supplied guid %s" #~ msgstr "Nutze angegebene guid %s" #~ msgid "meta_get_data: size: %llu" #~ msgstr "meta_get_data: Größe: %llu" #~ msgid "meta_get_data: created: %s" #~ msgstr "meta_get_data: angelegt: %s" #~ msgid "LFN is missing in LFC (needed for replication)" #~ msgstr "LFN fehlt in LFC (benötigt für Replikation)" #~ msgid "LFN already exists in LFC" #~ msgstr "LFN existiert bereits in LFC" #~ msgid "Creating LFC directory %s" #~ msgstr "Anlegen von LFC Verzeichnis %s" #~ msgid "Error creating required LFC dirs: %s" #~ msgstr "Fehler bei Anlegen benötigter LFC Verzeichnisse: %s" #, fuzzy #~ msgid "Error finding info on LFC entry %s which should exist: %s" #~ msgstr "Fehler beim Anlegen von LFC Eintrag %s, guid %s: %s" #~ msgid "Error creating LFC entry %s, guid %s: %s" #~ msgstr "Fehler beim Anlegen von LFC Eintrag %s, guid %s: %s" #, fuzzy #~ msgid "Error entering metadata: %s" #~ msgstr "Fehler bei Eingabe von Metadaten: %s" #~ msgid "No GUID defined for LFN - probably not preregistered" #~ msgstr "Keine GUID definiert für LFN - vielleicht nicht preregistriert" #~ msgid "Error adding replica: %s" #~ msgstr "Fehler beim Hinzufügen von replica: %s" #, fuzzy #~ msgid "Entering checksum type %s, value %s, file size %llu" #~ msgstr "Eingabe von checksum Typ %s, Wert %s, Dateigröße %llu" #~ msgid "Failed to remove LFN in LFC - You may need to do it by hand" #~ msgstr "" #~ "Fehler beim Entfernen von LFN in LFC - Sie müssen dies wohl von Hand " #~ "erledigen" #, fuzzy #~ msgid "Error getting replicas: %s" #~ msgstr "Fehler bei Erhalt der replicas: %s" #~ msgid "Failed to remove location from LFC" #~ msgstr "Fehler beim Entfernen von location von LFC" #~ msgid "Failed to remove LFC directory: directory is not empty" #~ msgstr "" #~ "Fehler beim Entfernen von LFC Verzeichnis: Verzeichnis ist nicht leer" #~ msgid "Failed to remove LFN in LFC: %s" #~ msgstr "Fehler beim Entfernen von LFN in LFC: %s" #~ msgid "guid %s resolved to LFN %s" #~ msgstr "guid %s aufgelöst zu LFN %s" #~ msgid "Failed to find GUID for specified LFN in %s: %s" #~ msgstr "Konnte GUID für angegebenen LFN nicht finden in %s: %s" #~ msgid "There is no GUID for specified LFN in %s" #~ msgstr "Es gibt keine GUID für angegebenen LFN in %s" #~ msgid "Warning: can't get PFNs from server %s: %s" #~ msgstr "Warnung: Kann keine PFNs erhalten von server %s: %s" #, fuzzy #~ msgid "RLS URL must contain host" #~ msgstr "RLS URL muss Angabe von host enthalten" #, fuzzy #~ msgid "" #~ "Locations are missing in destination RLS url - will use those registered " #~ "with special name" #~ msgstr "" #~ "Lokalisation fehlen in Ziel RLS URL - werde die mit special name " #~ "registrierten nutzen" #~ msgid "LFN is missing in RLS (needed for replication)" #~ msgstr "LFN fehlt in RLS (benötigt für Replikation)" #~ msgid "LFN already exists in replica" #~ msgstr "LFN existiert bereits in replica" #~ msgid "Failed to check for existing LFN in %s: %s" #~ msgstr "Fehler bei Überprüfung für existierenden LFN in %s: %s" #, fuzzy #~ msgid "There is same LFN in %s" #~ msgstr "Es existiert dieselbe LFN in %s" #~ msgid "Failed to add LFN-GUID to RLS: %s" #~ msgstr "Fehler bei Hinzufüden von LFN-GUID zu RLS: %s" #~ msgid "Failed to create/add LFN-PFN mapping: %s" #~ msgstr "Fehler bei Anlegen/Hinzufügen von LFN-PFN Zuweisungen: %s" #~ msgid "Warning: Failed to retrieve LFN/PFNs from %s: %s" #~ msgstr "Warnung: Fehler bei Bezug von LFN/PFNs von %s: %s" #~ msgid "SE location will be unregistered automatically" #~ msgstr "SE location wird automatisch deregistriert" #~ msgid "Warning: Failed to delete LFN/PFN from %s: %s" #~ msgstr "Warnung: Fehler beim Löschen von LFN/PFN von %s: %s" #~ msgid "LFN must be already deleted, try LRC anyway" #~ msgstr "LFN muss bereits gelöscht sein, versuche dennoch LRC" #, fuzzy #~ msgid "Failed to retrieve LFN/LRC: %s" #~ msgstr "Fehler bei Bezug von LFN/LRC: %s" #~ msgid "Warning: Failed to connect to LRC at %s: %s" #~ msgstr "Warnung. Fehler bei Verbindung zu LRC bei %s: %s" #~ msgid "No LFNs found in %s" #~ msgstr "Keine LFNs gefunden in %s" #~ msgid "Failed to retrieve list of LFNs/PFNs from %s" #~ msgstr "Fehler bei Bezug von List von LFNs/PFNs von %s" #~ msgid "lfn: %s(%s) - %s" #~ msgstr "lfn: %s(%s) - %s" #~ msgid "Warning: can't get list of RLIs from server %s: %s" #~ msgstr "Warnung. Erhalte keine Liste von RLIs von server %s: %s" #, fuzzy #~ msgid "Warning: can't get list of senders from server %s: %s" #~ msgstr "Warnung: Erhalte keine Liste von senders von Server %s: %s" #, fuzzy #~ msgid "" #~ "Warning: No space tokens found matching description! Will copy without " #~ "using token" #~ msgstr "" #~ "Warnung: Keine space tokens gefunden, die der Beschreibung entsprechen! " #~ "Kopiere ohne Nutzung der Token" #, fuzzy #~ msgid "start_reading_srm: looking for metadata: %s" #~ msgstr "StartReading: suche nach Metadaten: %s" #~ msgid "globus_io_register_read failed: %s" #~ msgstr "globus_io_register_read ist fehlgeschlagen: %s" #~ msgid "globus_io_register_write failed: %s" #~ msgstr "globus_io_register_write ist fehlgeschlagen: %s" #, fuzzy #~ msgid "clear_input: %s" #~ msgstr "clear_input: %s" #~ msgid "Connection closed" #~ msgstr "Verbindung geschlossen" #, fuzzy #~ msgid "Globus error (read): %s" #~ msgstr "Globus Fehler (Lesen): %s" #, fuzzy #~ msgid "*** Server response: %s" #~ msgstr "*** Server Antwort: %s" #, fuzzy #~ msgid "Failed unwrapping GSI token: %s" #~ msgstr "Fehler bei unwrap des GSI token: %s" #, fuzzy #~ msgid "Unwrapped data does not fit into buffer" #~ msgstr "Unwrapped data passt nicht in Puffer" #, fuzzy #~ msgid "Urecognized SSL token received" #~ msgstr "Nicht erkannter SSL token erkannt" #, fuzzy #~ msgid "read_response_header: line: %s" #~ msgstr "read_response_header: Zeile: %s" #~ msgid "read_response_header: header finished" #~ msgstr "read_response_header: header beendet" #~ msgid "skip_response_entity" #~ msgstr "skip_response_entity" #~ msgid "skip_response_entity: size: %llu" #~ msgstr "skip_response_entity: Größe: %llu" #~ msgid "skip_response_entity: already have all" #~ msgstr "skip_response_entity: Sie haben bereits alle" #~ msgid "skip_response_entity: size left: %llu" #~ msgstr "skip_response_entity: Größe verbleibend: %llu" #~ msgid "skip_response_entity: to read: %llu" #~ msgstr "skip_response_entity: zu lesen: %llu" #~ msgid "skip_response_entity: timeout %llu" #~ msgstr "skip_response_entity: Zeitüberschreitung %llu" #~ msgid "skip_response_entity: read: %u (%llu)" #~ msgstr "skip_response_entity: gelesen: %u (%llu)" #~ msgid "skip_response_entity: read all" #~ msgstr "skip_response_entity: alles gelesen" #, fuzzy #~ msgid "skip_response_entity: no entity" #~ msgstr "skip_response_entity: no entity" #~ msgid "skip_response_entity: unknown size" #~ msgstr "skip_response_entity: unbekannte Größe" #, fuzzy #~ msgid "Timeout sending header" #~ msgstr "Zeitüberschreitung beim Senden des Header" #, fuzzy #~ msgid "Failure while receiving entity" #~ msgstr "Fehler beim Emfpangen von entity" #, fuzzy #~ msgid "Timeout while sending header" #~ msgstr "Zeitüberschreitung beim Senden von header" #~ msgid "GET: header is read - rest: %u" #~ msgstr "GET: header wird gelesen - verbleibend: %u" #~ msgid "GET: calling callback(rest): content: %s" #~ msgstr "GET: rufe callback(rest): Inhalt: %s" #~ msgid "GET: calling callback(rest): size: %u" #~ msgstr "GET: rufe callbeck(rest): Größe: %u" #~ msgid "GET: calling callback(rest): offset: %llu" #~ msgstr "GET: rufe callbeck(rest): offset: %llu" #, fuzzy #~ msgid "GET callback returned error" #~ msgstr "GET callback lieferte Fehlermeldung" #, fuzzy #~ msgid "Failed while reading response content" #~ msgstr "Fehler beim Lesen von Antwort Inhalt" #, fuzzy #~ msgid "Timeout while reading response content" #~ msgstr "Timeout beim Lesen von Antwort Inhalt" #, fuzzy #~ msgid "Error while reading response content" #~ msgstr "Fehler beim Lesen von Antwort Inhalt" #~ msgid "GET: calling callback: content: %s" #~ msgstr "GET: rufe callback: Inhalt: %s" #, fuzzy #~ msgid "GET: calling callback: size: %u" #~ msgstr "GET: rufe callback: Größe: %u" #~ msgid "GET: calling callback: offset: %llu" #~ msgstr "GET: rufe callback: offset: %llu" #, fuzzy #~ msgid "Timeout while sending SOAP request" #~ msgstr "Timeout beim Senden von SOAP request" #~ msgid "Error sending data to server" #~ msgstr "Fehler beim Senden von Daten zum Server" #~ msgid "SOAP request failed (get)" #~ msgstr "SOAP Anfrage fehlgeschlagen (get)" #~ msgid "SOAP request failed (getRequestStatus)" #~ msgstr "SOAP Anfrage fehlgeschlagen (getRequestStatus)" #~ msgid "SOAP request failed (put)" #~ msgstr "SOAP Anfrage fehlgeschlagen (put)" #~ msgid "SOAP request failed (copy)" #~ msgstr "SOAP Anfrage fehlgeschlagen (copy)" #~ msgid "SOAP request failed (setFileStatus)" #~ msgstr "SOAP Anfrage fehlgeschlagen (setFileStatus)" #~ msgid "SOAP request failed (SRMv1Meth__advisoryDelete)" #~ msgstr "SOAP Anfragen fehlgeschlagen (SRMv1Meth__advisoryDelete)" #~ msgid "SOAP request failed (getFileMetaData)" #~ msgstr "SOAP Anfragen fehlgeschlagen (getFileMetaData)" #~ msgid "SOAP request failed (%s)" #~ msgstr "SOAP Anfrage fehlgeschlagen (%s)" #~ msgid "Error: PrepareToGet request timed out after %i seconds" #~ msgstr "" #~ "Fehler: Zeitüberschreitung bei PrepareToGet Anfrage nach %i Sekunden" #~ msgid "Request is reported as ABORTED" #~ msgstr "Anfrage wurde berichtet als ABORTED (abgebrochen)" #~ msgid "Error: PrepareToPut request timed out after %i seconds" #~ msgstr "" #~ "Fehler: Zeitüberschreitung bei PrepareToPut Anfrage nach %i Sekunden" #~ msgid "Error: Ls request timed out after %i seconds" #~ msgstr "Fehler: Zeitüberschreitung bei Ls Anfrage nach %i Sekunden" #~ msgid "Error: copy request timed out after %i seconds" #~ msgstr "Fehler: Zeitüberschreitung bei Kopieranfrage nach %i Sekunden " #, fuzzy #~ msgid "SOAP request failed (srmMkdir)" #~ msgstr "SOAP Anfrage schlug fehl (srmMkdir)" #, fuzzy #~ msgid "Error opening srm info file %s:%s" #~ msgstr "Fehler bei Öffnen von Meta-Datei %s: %s" #, fuzzy #~ msgid "Trying to open confirm site %s" #~ msgstr "" #~ "Ошибка при попытке открыть файл:\n" #~ " %1" #~ msgid "Source is bad URL or can't be used due to some reason" #~ msgstr "" #~ "Quelle ist eine schlechte URL oder kann aus irgendeinem Grund nicht " #~ "genutzt werden." #~ msgid "Destination is bad URL or can't be used due to some reason" #~ msgstr "" #~ "Ziel ist eine schlechte URL oder kann aus irgendeinem Grund nicht genutzt " #~ "werden." #, fuzzy #~ msgid "Failed while transfering data (mostly timeout)" #~ msgstr "Fehler bei Datentransfer (überwiegend Zeitüberschreitung)" #, fuzzy #~ msgid "Error creating file %s with mkstemp(): %s" #~ msgstr "Fehler bei Anlegen von %s mit mkstemp(): %s" #~ msgid "Error opening lock file we just renamed successfully %s: %s" #~ msgstr "" #~ "Fehler bei Öfnnen von Lock-Datei die gerade erfolgreich umgenannt wurde " #~ "%s: %s" #~ msgid "" #~ "Lock that recently existed has been deleted by another process, calling " #~ "Start() again" #~ msgstr "" #~ "Lock das zuvor existierte wurde gelöscht von anderem Prozess, rufe " #~ "Start() nochmals" #~ msgid "Error opening valid and existing lock file %s: %s" #~ msgstr "Fehler bei Öffnen von gültiger und existierener Lock-Datei %s: %s" #~ msgid "Error reading valid and existing lock file %s: %s" #~ msgstr "Fehler bei Lesen von gültiger und existierender Lock-Datei %s: %s" #, fuzzy #~ msgid "Error creating tmp file %s for remote lock with mkstemp(): %s" #~ msgstr "Fehler bei Anlegen von %s mit mkstemp(): %s" #, fuzzy #~ msgid "Error writing to tmp lock file for remote lock %s: %s" #~ msgstr "Fehler beim Schreiben zu tmp lock Datei %s: %s" #, fuzzy #~ msgid "Warning: closing tmp lock file for remote lock %s failed" #~ msgstr "Warnung: Schließen von tmp Lock Datei %s fehlgeschlagen" #, fuzzy #~ msgid "Error renaming tmp file %s to lock file %s for remote lock: %s" #~ msgstr "Fehler bei Umbenennen von temporärer Datei %s zu Lock_datei %s: %s" #, fuzzy #~ msgid "" #~ "Error renaming lock file for remote lock, even though rename() did not " #~ "return an error: %s" #~ msgstr "" #~ "Fehler bei Umbenennen von Lock-Datei, obwohl rename() keinen Fehler " #~ "zurücklieferte" #, fuzzy #~ msgid "" #~ "Error opening lock file for remote lock we just renamed successfully %s: " #~ "%s" #~ msgstr "" #~ "Fehler bei Öfnnen von Lock-Datei die gerade erfolgreich umgenannt wurde " #~ "%s: %s" #, fuzzy #~ msgid "" #~ "The remote cache file is currently locked with a valid lock, will " #~ "download from source" #~ msgstr "Die Datei ist derzeit gelockt mit einem gültigen Lock" #, fuzzy #~ msgid "Failed to create file %s for writing: %s" #~ msgstr "Fehler bei Anlegen von Datei %s zum Schreiben: %s" #, fuzzy #~ msgid "Error: Cache file %s does not exist" #~ msgstr "Cache-Datei %s existiert nicht" #~ msgid "Failed to change permissions of session dir to 0700: %s" #~ msgstr "Konnte Zugriffsrechte auf session dir nicht ändern zu 0700: %s" #~ msgid "Error opening per-job dir %s: %s" #~ msgstr "Fehler bei Öffnen von per-job Verzeichnis %s. %s" #, fuzzy #~ msgid "Mismatching url in file %s: %s Expected %s" #~ msgstr "Nicht-Übereinstimmung von URL in Datei %s: %s erwartete %s" #, fuzzy #~ msgid "Bad separator in file %s: %s" #~ msgstr "Ungültges Trennzeichen in Datei %s: %s" #, fuzzy #~ msgid "Bad value of expiry time in %s: %s" #~ msgstr "Ungültiger Wert für expiry time in %s: %s" #, fuzzy #~ msgid "Error opening lock file %s: %s" #~ msgstr "Fehler bei Öffnen von Lock Datei %s: %s" #, fuzzy #~ msgid "Can't read user list in specified file %s" #~ msgstr "Fehler bei Lesen von Zertifikat-Datei: %s" #, fuzzy #~ msgid "%s: State: %s: failed to create temporary proxy for renew: %s" #~ msgstr "Fehler bei Erstellen von Info-Datei %s: %s" #, fuzzy #~ msgid "%s: adding to transfer share %s" #~ msgstr "Datentransfer abgebrochen: %s" #, fuzzy #~ msgid "NULL response" #~ msgstr "Keine SOAP Antwort" #, fuzzy #~ msgid "" #~ "Not authorized from Charon service; Some of the RequestItem does not " #~ "satisfy Policy" #~ msgstr "" #~ "UnAuthorisiert von arc.pdp; einige der ReqestItems genügen nicht der " #~ "Policy" #, fuzzy #~ msgid "Loading policy from %s" #~ msgstr "Lade python broker (%i)" #, fuzzy #~ msgid "Old policy times: %u/%u" #~ msgstr "Policy Zeile: %s" #, fuzzy #~ msgid "New policy times: %u/%u" #~ msgstr "Policy Zeile: %s" #, fuzzy #~ msgid "Misisng or empty CertificatePath element in the configuration!" #~ msgstr "Fehlendes oder leeres CertificatePath Element" #, fuzzy #~ msgid "Missing or empty CACertificatesDir element in the configuration!" #~ msgstr "Fehlendes oder leeres CertificatePath Element" #, fuzzy #~ msgid "Missing or empty CACertificatePath element in the configuration!" #~ msgstr "Fehlendes oder leeres CertificatePath Element" #~ msgid "" #~ "Thread %d: Task %d Result:\n" #~ "%s\n" #~ msgstr "" #~ "Поток %d: Задание %d Результат:\n" #~ "%s\n" #~ msgid "Is connected to database? %s" #~ msgstr "Ist verbunden mit Datenban? %s" #~ msgid "Can not decrypt the EncryptedID from saml assertion" #~ msgstr "Konnte die Encrypted ID von SAML Assertion nicht entschlüsseln" #~ msgid "Decrypted SAML NameID: %s" #~ msgstr "Entschlüsselter SAML NameID: %s" #~ msgid "Request succeeded!!!" #~ msgstr "Anfragen Erfolgreich!!!" #~ msgid "%d <> %d" #~ msgstr "%d <> %d" #~ msgid "Invalid status report" #~ msgstr "Ungültiger Status Report:" #~ msgid "%s reports job status of %s but it is running on %s" #~ msgstr "%s berichtet Job Status von %s aber läuft auf %s" #~ msgid "%s try to status change: %s->%s" #~ msgstr "%s versuch Status zu ändern: %s -> %s" #~ msgid "refresh: Cannot abort transaction: %s" #~ msgstr "refresh: Kann Transaktion nicht abbrechen: %s" #~ msgid "refresh: Error during transaction: %s" #~ msgstr "refresh: Fehler bei Transaktion: %s" #~ msgid "operator[]: Cannot abort transaction: %s" #~ msgstr "operator[]: Kann Transaktion nicht abbrechen: %s" #~ msgid "remove: Cannot abort transaction: %s" #~ msgstr "remove: Kann Transaktion nicht abbrechen: %s" #, fuzzy #~ msgid "There is no X509Request node in the request message" #~ msgstr "Es ist kein X509Request Knoten in der request Nachricht" #~ msgid "Composed DN: %s" #~ msgstr "Zusammengestellte DN: %s" #~ msgid "CentralAHash constructor called" #~ msgstr "CentralAHash Konstruktor aufgerufen" #~ msgid "Error importing class" #~ msgstr "Fehler bei Importieren von Klasse" #~ msgid "ReplicatedAHash constructor called" #~ msgstr "ReplicatedAHash aconstructor aufgrufen" #, fuzzy #~ msgid "sending message of length" #~ msgstr "sende Nachricht der Läng %d an %s" #~ msgid "sendt message, success=%s" #~ msgstr "Nachricht gesende, Erfolg=%s" #~ msgid "processing message..." #~ msgstr "verarbeite Nachricht" #~ msgid "processing message... Finished" #~ msgstr "Verarbeiten der Nachricht beendet" #~ msgid "Couldn't start replication manager." #~ msgstr "Konnte replication manager nicht starten" #~ msgid "Could not find checking period, using default 10s" #~ msgstr "Konnte checking period nicht finden, nutze Voreinstellung von 10s" #~ msgid "Bad cache size or no cache size configured, using 10MB" #~ msgstr "" #~ "Ungültige cache Größe oder keine cache Größe konfiguriert, nutze 10MB" #~ msgid "master locking" #~ msgstr "master setzt lock" #~ msgid "unlocking" #~ msgstr "entferne lock" #~ msgid "unlocked" #~ msgstr "lock entfernt" #~ msgid "couldn't unlock" #~ msgstr "konnte lock nicht entfernen" #~ msgid "checkingThread slept %d s" #~ msgstr "checkingThread schlief %d s" #, fuzzy #~ msgid "wrote ahash list %s" #~ msgstr "schrieb ahash Liste %s" #~ msgid "but dbenv wasn't ready." #~ msgstr "aber dbenv war nicht bereit" #~ msgid "Couldn't start replication framework" #~ msgstr "Konnte replication framework nicht starten" #, fuzzy #~ msgid "entered election thread" #~ msgstr "Starte Auswahl-Thread" #, fuzzy #~ msgid "%s: my role is" #~ msgstr "%s: meine Rolle ist %d" #, fuzzy #~ msgid "%s: my role is now" #~ msgstr "%s: meine Rolle ist nun %d" #, fuzzy #~ msgid "Couldn't run election" #~ msgstr "Konnte Auswahl nicht vornehmen" #, fuzzy #~ msgid "num_reps is %(nr)d, votes is %(v)d, hostMap is %(hm)s" #~ msgstr "num_reps ist %d, Stimmen sind %d, hostMap ist %s" #~ msgid "entering startElection" #~ msgstr "Start von startElection" #~ msgid "new role" #~ msgstr "neue Rolle" #~ msgid "Couldn't begin role" #~ msgstr "Konnte Rolle nicht beginnen" #~ msgid "entering send" #~ msgstr "Start von send" #, fuzzy #~ msgid "failed to send to" #~ msgstr "Fehler beim Senden von body" #~ msgid "Master is offline, starting re-election" #~ msgstr "Master ist offline, starte Neuwahl" #~ msgid "entering repSend" #~ msgstr "Starte repSend" #~ msgid "entering sendNewSiteMsg" #~ msgstr "Start von sendNewSiteMsg" #~ msgid "entering sendHeartbeatMsg" #~ msgstr "Start von sendHeartbeatMsg" #~ msgid "entering sendNewMasterMsg" #~ msgstr "Start von sendNewMasterMsg" #~ msgid "entering processMessage from " #~ msgstr "verarbeite processMessage von " #~ msgid "received message from myself!" #~ msgstr "erhielt Nachricht von mir selbst!" #~ msgid "received from new sender or sender back online" #~ msgstr "erhalten von neuem Sender der Sender ist wieder online" #~ msgid "received master id" #~ msgstr "erhielt master id" #~ msgid "received HEARTBEAT_MESSAGE" #~ msgstr "erhielt HEARTBEAT_MESSAGE" #~ msgid "received ELECTION_MESSAGE" #~ msgstr "erhielt ELECTION_MESSAGE" #~ msgid "received NEWSITE_MESSAGE" #~ msgstr "erhielt NEWSITE_MESSAGE" #~ msgid "processing message from %d" #~ msgstr "Verarbeite Nachricht von %d" #~ msgid "received DB_REP_NEWSITE from %s" #~ msgstr "erhielt DB_REP_NEWSITE von %s" #~ msgid "received DB_REP_HOLDELECTION" #~ msgstr "erhielt DB_REP_HODLELECTION" #~ msgid "REP_ISPERM returned for LSN %s" #~ msgstr "REP_ISPERM erhalten für LSN %s" #~ msgid "REP_NOTPERM returned for LSN %s" #~ msgstr "REP_NOTPERM erhalten für LSN %s" #~ msgid "REP_DUPMASTER received, starting new election" #~ msgstr "REP_DUPMASTER erhalten, stare neue Verbindung" #~ msgid "REP_IGNORE received" #~ msgstr "REP_IGNORE erhalten" #~ msgid "JOIN_FAILURE received" #~ msgstr "JOIN_FAILURE erhalten" #~ msgid "I am now a master" #~ msgstr "Ich bin nun ein master" #~ msgid "received DB_EVENT_REP_MASTER" #~ msgstr "erhielt DB_EVEN_REP_MASTER" #~ msgid "I am now a client" #~ msgstr "Ich bin nun ein Client" #~ msgid "Getting permission failed" #~ msgstr "Erlaubnis nicht erhalten" #~ msgid "New master elected" #~ msgstr "Neuer Master ausgewählt" #~ msgid "I won the election: I am the MASTER" #~ msgstr "Ich gewann die Auswahl: Ich bin der MASTER" #, fuzzy #~ msgid "Oops! Internal DB panic!" #~ msgstr "Ooops! Interne DB Panik!" #~ msgid "accessing gateway: %s" #~ msgstr "greife zu auf gateway: %s" #~ msgid "This bartender does not support gateway" #~ msgstr "Dieser Bartender benötigt keinen support gateway" #~ msgid "" #~ "cannot connect to gateway. Access of third party store required gateway." #~ msgstr "" #~ "kann nicht verbinden zu Gateway. Zugang zu store Dritter benötigt einen " #~ "Gatway." #~ msgid "Got Librarian URLs from the config:" #~ msgstr "Erhielt Librarian URLs von Konfiguration:" #, fuzzy #~ msgid "Librarian URL or ISIS URL not found in the configuration." #~ msgstr "Librarian URL oder ISIS URL nicht gefunden in der Konfiguration." #~ msgid "Got ISIS URL, starting initThread" #~ msgstr "Erhielt ISIS URL, startete initThread" #~ msgid "Trying to get Librarian from" #~ msgstr "Versuche Librarian zu erhalten von" #~ msgid "Got Librarian from ISIS:" #~ msgstr "Erhielt Librarian von ISIS:" #, fuzzy #~ msgid "Error connecting to ISIS %{iu}s, reason: %{r}s" #~ msgstr "Fehler beim Verbinden zu ISIS %s, Grund: %s" #~ msgid "Error in initThread: %s" #~ msgstr "Fehler in initThread: %s" #~ msgid "initThread finished, starting isisThread" #~ msgstr "initThread beended, starte isisThread" #, fuzzy #~ msgid "Error in isisThread: %s" #~ msgstr "Fehler in isisThread: %s" #~ msgid "//// _traverse request trailing slash removed:" #~ msgstr "//// bei _traverse Anfrage wurde terminaler Schrägstrich entfernt" #~ msgid "adding" #~ msgstr "beim Hinzufügen" #~ msgid "modifyMetadata response" #~ msgstr "modifyMetadata Antwort" #~ msgid "modifyMetadata failed, removing the new librarian entry" #~ msgstr "modifyMetadata failed, entferne den enuen librarian Eintrag" #~ msgid "Error creating new entry in Librarian: %s" #~ msgstr "Fehler beim Anlegen eines neuen Eintrags in Librarian: %s" #~ msgid "//// response from the external store:" #~ msgstr "//// Antwort von ausgewähltem store:" #~ msgid "location chosen:" #~ msgstr "ausgewählte Lokalisation:" #, fuzzy #~ msgid "ERROR from the chosen Shepherd" #~ msgstr "FEHLER bei ausgewähltem Shepherd" #~ msgid "addReplica" #~ msgstr "addReplica" #~ msgid "Registered Shepherds in Librarian" #~ msgstr "Registrierte Shepherds bei Librarian" #~ msgid "Alive Shepherds:" #~ msgstr "Aktive Shepherds:" #~ msgid "LN" #~ msgstr "LN" #~ msgid "metadata" #~ msgstr "Metadaten" #~ msgid "\\/\\/" #~ msgstr "\\/\\/" #~ msgid "removing" #~ msgstr "am Entfernen" #~ msgid "" #~ "The directory for storing proxies is not available. Proxy delegation " #~ "disabled." #~ msgstr "" #~ "Das Verzeichnis für die Ablage von Proxies ist nicht verfügbar. Proxy " #~ "delegation ausgesetzt." #~ msgid "Delegation status: " #~ msgstr "Delegation status: " #~ msgid "creating proxy file : " #~ msgstr "erstelle Proxy Datei : " #~ msgid "" #~ "cannot access proxy_store, Check the configuration file (service.xml)\n" #~ " Need to have a " #~ msgstr "" #~ "Kann auf Proxy Store nicht zugreifen. Überprüfe die Konfigurationsdatei " #~ "(service.xml)\n" #~ " Es wird ein benötigt" #~ msgid "removeCredentials: %s" #~ msgstr "removeCredentials: %s" #~ msgid "proxy store is not accessable." #~ msgstr "Proxy store nicht zugereifbar" #~ msgid "Error processing report message" #~ msgstr "Fehler bei Verarbeiten von report message" #~ msgid "Error traversing: %s" #~ msgstr "Fehler bei Traversieren: %s" #~ msgid "Error in traverseLN method: %s" #~ msgstr "Fehler in taverseLN Methode: %s" #~ msgid "Trying to get Bartender from" #~ msgstr "Zerstöre JVM" #~ msgid "Got Bartender from ISIS:" #~ msgstr "Erhielt Bartender von ISIS:" #~ msgid "" #~ "\n" #~ "CHECKSUM OK" #~ msgstr "" #~ "\n" #~ "CHECKSUM в порÑдке" #~ msgid "" #~ "\n" #~ "CHECKSUM MISMATCH" #~ msgstr "" #~ "\n" #~ "CHECKSUM не Ñовпадает" #~ msgid "\n" #~ msgstr "\n" #~ msgid "" #~ "\n" #~ "\n" #~ "File" #~ msgstr "" #~ "\n" #~ "\n" #~ "Файл" #~ msgid "" #~ "\n" #~ "\n" #~ "I have an invalid replica of file" #~ msgstr "" #~ "\n" #~ "\n" #~ "Обнаружена Ð½ÐµÐ²ÐµÑ€Ð½Ð°Ñ ÐºÐ¾Ð¿Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð°" #~ msgid "" #~ "\n" #~ "\n" #~ msgstr "" #~ "\n" #~ "\n" #~ msgid "url of myproxy server" #~ msgstr "URL von myproxy Server" #~ msgid "Myproxy server return failure msg" #~ msgstr "Myproxy Server lieferte eine Fehlermeldung zurück." #, fuzzy #~ msgid "Malformated response" #~ msgstr "Antwort hält sich nicht an Format" #~ msgid "srmPing gives v2.2, instantiating v2.2 client" #~ msgstr "srmPing gibt v2.2, instanziierend v2.2 client" #~ msgid "SOAP error with srmPing, instantiating v1 client" #~ msgstr "SOAP Fehler mit srmPing, instanziierende v1 client" #~ msgid "Service error, cannot instantiate SRM client" #~ msgstr "Service Fehler, kann nicht SRM client instanziieren" #~ msgid "start_reading_ftp: size: url: %s" #~ msgstr "start_reading_ftp: url: %s" #, fuzzy #~ msgid "start_reading_ftp: failure" #~ msgstr "start_reading_ftp: Fehler" #~ msgid "Timeout waiting for FTP file size - cancel transfer" #~ msgstr "" #~ "Zeitüberschreitung bei Warten auf FTP Datei Größe - breche Transfer ab" #~ msgid "start_reading_ftp: failed to get file's size" #~ msgstr "start_reading_ftp: Fehler bei Bezug von Dateigröße" #~ msgid "start_reading_ftp: globus_ftp_client_modification_time failed" #~ msgstr "" #~ "start_reading_ftp: globus_ftp_client_modification_time fehlgeschlagen" #~ msgid "start_reading_ftp: failed to get file's modification time" #~ msgstr "start_reading_ftp: Fehler bei Bezug von Zeitpunkt letzter Änderung" #, fuzzy #~ msgid "start_reading_ftp: range is out of size" #~ msgstr "start_reading_ftp: Größe von Wert verlässt erwarteten Bereich" #~ msgid "%s tried election with %d replicas" #~ msgstr "%s initiierte Auswahl mit %d Replicas" #, fuzzy #~ msgid "store job descriptions in local sandbox." #~ msgstr "Lege Job Beschreibung in lokaler Sandbox ab." nordugrid-arc-7.1.1/po/PaxHeaders/POTFILES.in0000644000000000000000000000013215067751430015541 xustar0030 mtime=1759499032.317473002 30 atime=1759499032.530476239 30 ctime=1759499034.626441169 nordugrid-arc-7.1.1/po/POTFILES.in0000644000175000002070000010546115067751430017452 0ustar00mockbuildmock00000000000000src/Test.cpp src/clients/compute/arccat.cpp src/clients/compute/arcclean.cpp src/clients/compute/arcget.cpp src/clients/compute/arcinfo.cpp src/clients/compute/arckill.cpp src/clients/compute/arcrenew.cpp src/clients/compute/arcresume.cpp src/clients/compute/arcstat.cpp src/clients/compute/arcsub.cpp src/clients/compute/arcsync.cpp src/clients/compute/arctest.cpp src/clients/compute/submit.cpp src/clients/compute/submit.h src/clients/compute/utils.cpp src/clients/compute/utils.h src/clients/credentials/arcproxy.cpp src/clients/credentials/arcproxy.h src/clients/credentials/arcproxy_myproxy.cpp src/clients/credentials/arcproxy_proxy.cpp src/clients/credentials/arcproxy_voms.cpp src/clients/data/arccp.cpp src/clients/data/arcls.cpp src/clients/data/arcmkdir.cpp src/clients/data/arcrename.cpp src/clients/data/arcrm.cpp src/clients/data/utils.cpp src/clients/data/utils.h src/clients/pyarcrest/setup.py src/clients/pyarcrest/src/pyarcrest/__init__.py src/clients/pyarcrest/src/pyarcrest/arc.py src/clients/pyarcrest/src/pyarcrest/cli/__init__.py src/clients/pyarcrest/src/pyarcrest/cli/arcrest.py src/clients/pyarcrest/src/pyarcrest/errors.py src/clients/pyarcrest/src/pyarcrest/http.py src/clients/pyarcrest/src/pyarcrest/x509.py src/doxygen/add-bindings-deviations-to-dox.py src/doxygen/create-mapping-documentation.py src/external/cJSON/cJSON.h src/hed/acc/ARCHERY/DescriptorsARCHERY.cpp src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.h src/hed/acc/ARCREST/DescriptorsARCREST.cpp src/hed/acc/ARCREST/JobControllerPluginREST.cpp src/hed/acc/ARCREST/JobControllerPluginREST.h src/hed/acc/ARCREST/JobListRetrieverPluginREST.cpp src/hed/acc/ARCREST/JobListRetrieverPluginREST.h src/hed/acc/ARCREST/SubmitterPluginREST.cpp src/hed/acc/ARCREST/SubmitterPluginREST.h src/hed/acc/ARCREST/TargetInformationRetrieverPluginREST.cpp src/hed/acc/ARCREST/TargetInformationRetrieverPluginREST.h src/hed/acc/Broker/BenchmarkBrokerPlugin.cpp src/hed/acc/Broker/BenchmarkBrokerPlugin.h src/hed/acc/Broker/DataBrokerPlugin.cpp src/hed/acc/Broker/DataBrokerPlugin.h src/hed/acc/Broker/DescriptorsBroker.cpp src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp src/hed/acc/Broker/FastestQueueBrokerPlugin.h src/hed/acc/Broker/NullBrokerPlugin.h src/hed/acc/Broker/RandomBrokerPlugin.h src/hed/acc/Broker/test/BenchmarkBrokerTest.cpp src/hed/acc/JobDescriptionParser/ADLParser.cpp src/hed/acc/JobDescriptionParser/ADLParser.h src/hed/acc/JobDescriptionParser/DescriptorsJobDescriptionParser.cpp src/hed/acc/JobDescriptionParser/RSLParser.cpp src/hed/acc/JobDescriptionParser/RSLParser.h src/hed/acc/JobDescriptionParser/XMLNodeRecover.cpp src/hed/acc/JobDescriptionParser/XMLNodeRecover.h src/hed/acc/JobDescriptionParser/XRSLParser.cpp src/hed/acc/JobDescriptionParser/XRSLParser.h src/hed/acc/JobDescriptionParser/test/ADLParserTest.cpp src/hed/acc/JobDescriptionParser/test/XRSLParserTest.cpp src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp src/hed/acc/PythonBroker/PythonBrokerPlugin.h src/hed/acc/PythonBroker/SampleBroker.py src/hed/acc/TEST/BrokerPluginTestACC.h src/hed/acc/TEST/JobControllerPluginTestACC.cpp src/hed/acc/TEST/JobControllerPluginTestACC.h src/hed/acc/TEST/JobDescriptionParserPluginTestACC.cpp src/hed/acc/TEST/JobDescriptionParserPluginTestACC.h src/hed/acc/TEST/JobListRetrieverPluginTEST.cpp src/hed/acc/TEST/JobListRetrieverPluginTEST.h src/hed/acc/TEST/ServiceEndpointRetrieverPluginTEST.cpp src/hed/acc/TEST/ServiceEndpointRetrieverPluginTEST.h src/hed/acc/TEST/SubmitterPluginTestACC.cpp src/hed/acc/TEST/SubmitterPluginTestACC.h src/hed/acc/TEST/TargetInformationRetrieverPluginTEST.cpp src/hed/acc/TEST/TargetInformationRetrieverPluginTEST.h src/hed/acc/TEST/TestACCPluginDescriptors.cpp src/hed/daemon/options.cpp src/hed/daemon/options.h src/hed/daemon/unix/daemon.cpp src/hed/daemon/unix/daemon.h src/hed/daemon/unix/main_unix.cpp src/hed/dmc/file/DataPointFile.cpp src/hed/dmc/file/DataPointFile.h src/hed/dmc/gfal/DataPointGFAL.cpp src/hed/dmc/gfal/DataPointGFAL.h src/hed/dmc/gfal/DataPointGFALDelegate.cpp src/hed/dmc/gfal/DataPointGFALDelegate.h src/hed/dmc/gfal/GFALTransfer3rdParty.cpp src/hed/dmc/gfal/GFALTransfer3rdParty.h src/hed/dmc/gfal/GFALUtils.cpp src/hed/dmc/gfal/GFALUtils.h src/hed/dmc/gridftp/DataPointGridFTP.cpp src/hed/dmc/gridftp/DataPointGridFTP.h src/hed/dmc/gridftp/DataPointGridFTPDelegate.cpp src/hed/dmc/gridftp/DataPointGridFTPDelegate.h src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp src/hed/dmc/gridftp/DataPointGridFTPHelper.h src/hed/dmc/gridftp/Lister.cpp src/hed/dmc/gridftp/Lister.h src/hed/dmc/http/DataPointHTTP.cpp src/hed/dmc/http/DataPointHTTP.h src/hed/dmc/http/StreamBuffer.cpp src/hed/dmc/http/StreamBuffer.h src/hed/dmc/mock/DataPointMock.cpp src/hed/dmc/mock/DataPointMock.h src/hed/dmc/rucio/DataPointRucio.cpp src/hed/dmc/rucio/DataPointRucio.h src/hed/dmc/s3/DataPointS3.cpp src/hed/dmc/s3/DataPointS3.h src/hed/dmc/srm/DataPointSRM.cpp src/hed/dmc/srm/DataPointSRM.h src/hed/dmc/srm/srmclient/SRM1Client.cpp src/hed/dmc/srm/srmclient/SRM1Client.h src/hed/dmc/srm/srmclient/SRM22Client.cpp src/hed/dmc/srm/srmclient/SRM22Client.h src/hed/dmc/srm/srmclient/SRMClient.cpp src/hed/dmc/srm/srmclient/SRMClient.h src/hed/dmc/srm/srmclient/SRMClientRequest.h src/hed/dmc/srm/srmclient/SRMInfo.cpp src/hed/dmc/srm/srmclient/SRMInfo.h src/hed/dmc/srm/srmclient/SRMURL.cpp src/hed/dmc/srm/srmclient/SRMURL.h src/hed/dmc/xrootd/DataPointXrootd.cpp src/hed/dmc/xrootd/DataPointXrootd.h src/hed/dmc/xrootd/DataPointXrootdDelegate.cpp src/hed/dmc/xrootd/DataPointXrootdDelegate.h src/hed/identitymap/IdentityMap.cpp src/hed/identitymap/IdentityMap.h src/hed/identitymap/SimpleMap.cpp src/hed/identitymap/SimpleMap.h src/hed/libs/common/ArcConfig.cpp src/hed/libs/common/ArcConfig.h src/hed/libs/common/ArcConfigFile.cpp src/hed/libs/common/ArcConfigFile.h src/hed/libs/common/ArcConfigIni.cpp src/hed/libs/common/ArcConfigIni.h src/hed/libs/common/ArcLocation.cpp src/hed/libs/common/ArcLocation.h src/hed/libs/common/ArcRegex.cpp src/hed/libs/common/ArcRegex.h src/hed/libs/common/ArcVersion.cpp src/hed/libs/common/ArcVersion.h src/hed/libs/common/Base64.cpp src/hed/libs/common/Base64.h src/hed/libs/common/CheckSum.cpp src/hed/libs/common/CheckSum.h src/hed/libs/common/Counter.cpp src/hed/libs/common/Counter.h src/hed/libs/common/DBInterface.h src/hed/libs/common/DateTime.cpp src/hed/libs/common/DateTime.h src/hed/libs/common/FileAccess.cpp src/hed/libs/common/FileAccess.h src/hed/libs/common/FileLock.cpp src/hed/libs/common/FileLock.h src/hed/libs/common/FileUtils.cpp src/hed/libs/common/FileUtils.h src/hed/libs/common/GUID.cpp src/hed/libs/common/GUID.h src/hed/libs/common/HostnameResolver.cpp src/hed/libs/common/HostnameResolver.h src/hed/libs/common/IString.cpp src/hed/libs/common/IString.h src/hed/libs/common/IniConfig.cpp src/hed/libs/common/IniConfig.h src/hed/libs/common/IntraProcessCounter.cpp src/hed/libs/common/IntraProcessCounter.h src/hed/libs/common/JSON.cpp src/hed/libs/common/JSON.h src/hed/libs/common/JobPerfLog.cpp src/hed/libs/common/JobPerfLog.h src/hed/libs/common/Logger.cpp src/hed/libs/common/Logger.h src/hed/libs/common/OptionParser.cpp src/hed/libs/common/OptionParser.h src/hed/libs/common/Profile.cpp src/hed/libs/common/Profile.h src/hed/libs/common/Run.h src/hed/libs/common/Run_unix.cpp src/hed/libs/common/StringConv.cpp src/hed/libs/common/StringConv.h src/hed/libs/common/Thread.cpp src/hed/libs/common/Thread.h src/hed/libs/common/URL.cpp src/hed/libs/common/URL.h src/hed/libs/common/User.cpp src/hed/libs/common/User.h src/hed/libs/common/UserConfig.cpp src/hed/libs/common/UserConfig.h src/hed/libs/common/Utils.cpp src/hed/libs/common/Utils.h src/hed/libs/common/Watchdog.cpp src/hed/libs/common/Watchdog.h src/hed/libs/common/XMLNode.cpp src/hed/libs/common/XMLNode.h src/hed/libs/common/file_access.cpp src/hed/libs/common/file_access.h src/hed/libs/common/hostname_resolver.cpp src/hed/libs/common/hostname_resolver.h src/hed/libs/common/test/ArcRegexTest.cpp src/hed/libs/common/test/Base64Test.cpp src/hed/libs/common/test/CheckSumTest.cpp src/hed/libs/common/test/DateTimeTest.cpp src/hed/libs/common/test/EnvTest.cpp src/hed/libs/common/test/FileAccessTest.cpp src/hed/libs/common/test/FileLockTest.cpp src/hed/libs/common/test/FileUtilsTest.cpp src/hed/libs/common/test/LoggerTest.cpp src/hed/libs/common/test/ProfileTest.cpp src/hed/libs/common/test/RunTest.cpp src/hed/libs/common/test/StringConvTest.cpp src/hed/libs/common/test/ThreadTest.cpp src/hed/libs/common/test/URLTest.cpp src/hed/libs/common/test/UserConfigTest.cpp src/hed/libs/common/test/UserTest.cpp src/hed/libs/common/test/WatchdogTest.cpp src/hed/libs/common/test/XMLNodeTest.cpp src/hed/libs/communication/ClientInterface.cpp src/hed/libs/communication/ClientInterface.h src/hed/libs/communication/ClientSAML2SSO.cpp src/hed/libs/communication/ClientSAML2SSO.h src/hed/libs/communication/ClientX509Delegation.cpp src/hed/libs/communication/ClientX509Delegation.h src/hed/libs/communication/test/SimulatorClasses.cpp src/hed/libs/communication/test/SimulatorClasses.h src/hed/libs/compute/Broker.cpp src/hed/libs/compute/Broker.h src/hed/libs/compute/BrokerPlugin.cpp src/hed/libs/compute/BrokerPlugin.h src/hed/libs/compute/ComputingServiceRetriever.cpp src/hed/libs/compute/ComputingServiceRetriever.h src/hed/libs/compute/Endpoint.cpp src/hed/libs/compute/Endpoint.h src/hed/libs/compute/EndpointQueryingStatus.cpp src/hed/libs/compute/EndpointQueryingStatus.h src/hed/libs/compute/EntityRetriever.cpp src/hed/libs/compute/EntityRetriever.h src/hed/libs/compute/EntityRetrieverPlugin.cpp src/hed/libs/compute/EntityRetrieverPlugin.h src/hed/libs/compute/ExecutionTarget.cpp src/hed/libs/compute/ExecutionTarget.h src/hed/libs/compute/GLUE2.cpp src/hed/libs/compute/GLUE2.h src/hed/libs/compute/GLUE2Entity.h src/hed/libs/compute/Job.cpp src/hed/libs/compute/Job.h src/hed/libs/compute/JobControllerPlugin.cpp src/hed/libs/compute/JobControllerPlugin.h src/hed/libs/compute/JobDescription.cpp src/hed/libs/compute/JobDescription.h src/hed/libs/compute/JobDescriptionParserPlugin.cpp src/hed/libs/compute/JobDescriptionParserPlugin.h src/hed/libs/compute/JobInformationStorage.h src/hed/libs/compute/JobInformationStorageDescriptor.cpp src/hed/libs/compute/JobInformationStorageSQLite.cpp src/hed/libs/compute/JobInformationStorageSQLite.h src/hed/libs/compute/JobInformationStorageXML.cpp src/hed/libs/compute/JobInformationStorageXML.h src/hed/libs/compute/JobState.cpp src/hed/libs/compute/JobState.h src/hed/libs/compute/JobSupervisor.cpp src/hed/libs/compute/JobSupervisor.h src/hed/libs/compute/Software.cpp src/hed/libs/compute/Software.h src/hed/libs/compute/SubmissionStatus.h src/hed/libs/compute/Submitter.cpp src/hed/libs/compute/Submitter.h src/hed/libs/compute/SubmitterPlugin.cpp src/hed/libs/compute/SubmitterPlugin.h src/hed/libs/compute/TestACCControl.cpp src/hed/libs/compute/TestACCControl.h src/hed/libs/compute/WSCommonPlugin.h src/hed/libs/compute/examples/basic_job_submission.cpp src/hed/libs/compute/examples/job_selector.cpp src/hed/libs/compute/test/BrokerTest.cpp src/hed/libs/compute/test/ComputingServiceUniqTest.cpp src/hed/libs/compute/test/ExecutionTargetTest.cpp src/hed/libs/compute/test/JobControllerPluginTest.cpp src/hed/libs/compute/test/JobDescriptionParserPluginTest.cpp src/hed/libs/compute/test/JobDescriptionTest.cpp src/hed/libs/compute/test/JobInformationStorageTest.cpp src/hed/libs/compute/test/JobListRetrieverTest.cpp src/hed/libs/compute/test/JobStateTest.cpp src/hed/libs/compute/test/JobSupervisorTest.cpp src/hed/libs/compute/test/JobTest.cpp src/hed/libs/compute/test/ServiceEndpointRetrieverTest.cpp src/hed/libs/compute/test/SoftwareTest.cpp src/hed/libs/compute/test/SubmissionStatusTest.cpp src/hed/libs/compute/test/SubmitterPluginTest.cpp src/hed/libs/compute/test/SubmitterTest.cpp src/hed/libs/compute/test/TargetInformationRetrieverTest.cpp src/hed/libs/compute/test_JobInformationStorage.cpp src/hed/libs/compute/test_jobdescription.cpp src/hed/libs/credential/CertUtil.cpp src/hed/libs/credential/CertUtil.h src/hed/libs/credential/Credential.cpp src/hed/libs/credential/Credential.h src/hed/libs/credential/NSSUtil.cpp src/hed/libs/credential/NSSUtil.h src/hed/libs/credential/PasswordSource.cpp src/hed/libs/credential/PasswordSource.h src/hed/libs/credential/Proxycertinfo.cpp src/hed/libs/credential/Proxycertinfo.h src/hed/libs/credential/VOMSAttribute.cpp src/hed/libs/credential/VOMSAttribute.h src/hed/libs/credential/VOMSConfig.cpp src/hed/libs/credential/VOMSConfig.h src/hed/libs/credential/VOMSUtil.cpp src/hed/libs/credential/VOMSUtil.h src/hed/libs/credential/listfunc.cpp src/hed/libs/credential/listfunc.h src/hed/libs/credential/nssprivkeyinfocodec.cpp src/hed/libs/credential/nssprivkeyinfocodec.h src/hed/libs/credential/test/CredentialTest.cpp src/hed/libs/credential/test/VOMSUtilTest.cpp src/hed/libs/credential/test/listfuncTest.cpp src/hed/libs/credential/testcertinfo.cpp src/hed/libs/credential/testeec.cpp src/hed/libs/credential/testproxy.cpp src/hed/libs/credential/testproxy2proxy.cpp src/hed/libs/credential/testvoms.cpp src/hed/libs/credentialmod/cred.cpp src/hed/libs/credentialstore/ClientVOMS.cpp src/hed/libs/credentialstore/ClientVOMS.h src/hed/libs/credentialstore/ClientVOMSRESTful.cpp src/hed/libs/credentialstore/ClientVOMSRESTful.h src/hed/libs/credentialstore/CredentialStore.cpp src/hed/libs/credentialstore/CredentialStore.h src/hed/libs/crypto/OpenSSL.cpp src/hed/libs/crypto/OpenSSL.h src/hed/libs/cryptomod/crypto.cpp src/hed/libs/data/DataBuffer.cpp src/hed/libs/data/DataBuffer.h src/hed/libs/data/DataCallback.h src/hed/libs/data/DataExternalComm.cpp src/hed/libs/data/DataExternalComm.h src/hed/libs/data/DataExternalHelper.cpp src/hed/libs/data/DataExternalHelper.h src/hed/libs/data/DataHandle.h src/hed/libs/data/DataMover.cpp src/hed/libs/data/DataMover.h src/hed/libs/data/DataPoint.cpp src/hed/libs/data/DataPoint.h src/hed/libs/data/DataPointDelegate.cpp src/hed/libs/data/DataPointDelegate.h src/hed/libs/data/DataPointDirect.cpp src/hed/libs/data/DataPointDirect.h src/hed/libs/data/DataPointIndex.cpp src/hed/libs/data/DataPointIndex.h src/hed/libs/data/DataSpeed.cpp src/hed/libs/data/DataSpeed.h src/hed/libs/data/DataStatus.cpp src/hed/libs/data/DataStatus.h src/hed/libs/data/FileCache.cpp src/hed/libs/data/FileCache.h src/hed/libs/data/FileCacheHash.cpp src/hed/libs/data/FileCacheHash.h src/hed/libs/data/FileInfo.h src/hed/libs/data/URLMap.cpp src/hed/libs/data/URLMap.h src/hed/libs/data/examples/DataPointMyProtocol.cpp src/hed/libs/data/examples/partial_copy.cpp src/hed/libs/data/examples/simple_copy.cpp src/hed/libs/data/test/FileCacheTest.cpp src/hed/libs/delegation/DelegationInterface.cpp src/hed/libs/delegation/DelegationInterface.h src/hed/libs/delegation/test/DelegationInterfaceTest.cpp src/hed/libs/deprecated.h src/hed/libs/globusutils/GSSCredential.cpp src/hed/libs/globusutils/GSSCredential.h src/hed/libs/globusutils/GlobusErrorUtils.cpp src/hed/libs/globusutils/GlobusErrorUtils.h src/hed/libs/globusutils/GlobusWorkarounds.cpp src/hed/libs/globusutils/GlobusWorkarounds.h src/hed/libs/infosys/InformationInterface.cpp src/hed/libs/infosys/InformationInterface.h src/hed/libs/infosys/test/InformationInterfaceTest.cpp src/hed/libs/loader/FinderLoader.cpp src/hed/libs/loader/FinderLoader.h src/hed/libs/loader/Loader.cpp src/hed/libs/loader/Loader.h src/hed/libs/loader/ModuleManager.cpp src/hed/libs/loader/ModuleManager.h src/hed/libs/loader/Plugin.cpp src/hed/libs/loader/Plugin.h src/hed/libs/loader/test/PluginTest.cpp src/hed/libs/loader/test/TestPlugin.cpp src/hed/libs/message/MCC.cpp src/hed/libs/message/MCC.h src/hed/libs/message/MCCLoader.cpp src/hed/libs/message/MCCLoader.h src/hed/libs/message/MCC_Status.cpp src/hed/libs/message/MCC_Status.h src/hed/libs/message/Message.cpp src/hed/libs/message/Message.h src/hed/libs/message/MessageAttributes.cpp src/hed/libs/message/MessageAttributes.h src/hed/libs/message/MessageAuth.cpp src/hed/libs/message/MessageAuth.h src/hed/libs/message/PayloadRaw.cpp src/hed/libs/message/PayloadRaw.h src/hed/libs/message/PayloadSOAP.cpp src/hed/libs/message/PayloadSOAP.h src/hed/libs/message/PayloadStream.cpp src/hed/libs/message/PayloadStream.h src/hed/libs/message/Plexer.cpp src/hed/libs/message/Plexer.h src/hed/libs/message/SOAPEnvelope.cpp src/hed/libs/message/SOAPEnvelope.h src/hed/libs/message/SOAPMessage.cpp src/hed/libs/message/SOAPMessage.h src/hed/libs/message/SecAttr.cpp src/hed/libs/message/SecAttr.h src/hed/libs/message/SecHandler.cpp src/hed/libs/message/SecHandler.h src/hed/libs/message/Service.cpp src/hed/libs/message/Service.h src/hed/libs/message/secattr/CIStringValue.cpp src/hed/libs/message/secattr/CIStringValue.h src/hed/libs/message/secattr/SecAttrValue.cpp src/hed/libs/message/secattr/SecAttrValue.h src/hed/libs/message/test/ChainTest.cpp src/hed/libs/message/test/TestMCC.cpp src/hed/libs/message/test/TestService.cpp src/hed/libs/otokens/jwse.cpp src/hed/libs/otokens/jwse_ecdsa.cpp src/hed/libs/otokens/jwse_hmac.cpp src/hed/libs/otokens/jwse_keys.cpp src/hed/libs/otokens/jwse_private.h src/hed/libs/otokens/jwse_rsassapkcs1.cpp src/hed/libs/otokens/jwse_rsassapss.cpp src/hed/libs/otokens/openid_metadata.cpp src/hed/libs/otokens/openid_metadata.h src/hed/libs/otokens/otokens.h src/hed/libs/security/ArcPDP/EvaluationCtx.cpp src/hed/libs/security/ArcPDP/EvaluationCtx.h src/hed/libs/security/ArcPDP/Evaluator.cpp src/hed/libs/security/ArcPDP/Evaluator.h src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp src/hed/libs/security/ArcPDP/EvaluatorLoader.h src/hed/libs/security/ArcPDP/PolicyParser.cpp src/hed/libs/security/ArcPDP/PolicyParser.h src/hed/libs/security/ArcPDP/PolicyStore.cpp src/hed/libs/security/ArcPDP/PolicyStore.h src/hed/libs/security/ArcPDP/Request.h src/hed/libs/security/ArcPDP/RequestItem.h src/hed/libs/security/ArcPDP/Response.h src/hed/libs/security/ArcPDP/Result.h src/hed/libs/security/ArcPDP/Source.cpp src/hed/libs/security/ArcPDP/Source.h src/hed/libs/security/ArcPDP/alg/AlgFactory.h src/hed/libs/security/ArcPDP/alg/CombiningAlg.h src/hed/libs/security/ArcPDP/alg/DenyOverridesAlg.cpp src/hed/libs/security/ArcPDP/alg/DenyOverridesAlg.h src/hed/libs/security/ArcPDP/alg/OrderedAlg.cpp src/hed/libs/security/ArcPDP/alg/OrderedAlg.h src/hed/libs/security/ArcPDP/alg/PermitOverridesAlg.cpp src/hed/libs/security/ArcPDP/alg/PermitOverridesAlg.h src/hed/libs/security/ArcPDP/attr/AnyURIAttribute.cpp src/hed/libs/security/ArcPDP/attr/AnyURIAttribute.h src/hed/libs/security/ArcPDP/attr/AttributeFactory.h src/hed/libs/security/ArcPDP/attr/AttributeProxy.h src/hed/libs/security/ArcPDP/attr/AttributeValue.h src/hed/libs/security/ArcPDP/attr/BooleanAttribute.cpp src/hed/libs/security/ArcPDP/attr/BooleanAttribute.h src/hed/libs/security/ArcPDP/attr/DateTimeAttribute.cpp src/hed/libs/security/ArcPDP/attr/DateTimeAttribute.h src/hed/libs/security/ArcPDP/attr/GenericAttribute.cpp src/hed/libs/security/ArcPDP/attr/GenericAttribute.h src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp src/hed/libs/security/ArcPDP/attr/RequestAttribute.h src/hed/libs/security/ArcPDP/attr/StringAttribute.cpp src/hed/libs/security/ArcPDP/attr/StringAttribute.h src/hed/libs/security/ArcPDP/attr/X500NameAttribute.cpp src/hed/libs/security/ArcPDP/attr/X500NameAttribute.h src/hed/libs/security/ArcPDP/fn/EqualFunction.cpp src/hed/libs/security/ArcPDP/fn/EqualFunction.h src/hed/libs/security/ArcPDP/fn/FnFactory.h src/hed/libs/security/ArcPDP/fn/Function.h src/hed/libs/security/ArcPDP/fn/InRangeFunction.cpp src/hed/libs/security/ArcPDP/fn/InRangeFunction.h src/hed/libs/security/ArcPDP/fn/MatchFunction.cpp src/hed/libs/security/ArcPDP/fn/MatchFunction.h src/hed/libs/security/ArcPDP/policy/Policy.cpp src/hed/libs/security/ArcPDP/policy/Policy.h src/hed/libs/security/ClassLoader.cpp src/hed/libs/security/ClassLoader.h src/hed/libs/security/PDP.cpp src/hed/libs/security/PDP.h src/hed/libs/security/Security.cpp src/hed/libs/security/Security.h src/hed/libs/ws-addressing/WSA.cpp src/hed/libs/ws-addressing/WSA.h src/hed/libs/ws-addressing/test.cpp src/hed/libs/ws-security/SAMLToken.cpp src/hed/libs/ws-security/SAMLToken.h src/hed/libs/ws-security/UsernameToken.cpp src/hed/libs/ws-security/UsernameToken.h src/hed/libs/ws-security/X509Token.cpp src/hed/libs/ws-security/X509Token.h src/hed/libs/ws-security/test/SAMLTokenTest.cpp src/hed/libs/ws-security/test/UsernameTokenTest.cpp src/hed/libs/ws-security/test/X509TokenTest.cpp src/hed/libs/ws-security/test_samltoken.cpp src/hed/libs/ws-security/test_usernametoken.cpp src/hed/libs/ws-security/test_x509token.cpp src/hed/libs/xmlsec/XMLSecNode.cpp src/hed/libs/xmlsec/XMLSecNode.h src/hed/libs/xmlsec/XmlSecUtils.cpp src/hed/libs/xmlsec/XmlSecUtils.h src/hed/libs/xmlsec/saml_util.cpp src/hed/libs/xmlsec/saml_util.h src/hed/libs/xmlsec/test_xmlsecnode.cpp src/hed/mcc/http/MCCHTTP.cpp src/hed/mcc/http/MCCHTTP.h src/hed/mcc/http/PayloadHTTP.cpp src/hed/mcc/http/PayloadHTTP.h src/hed/mcc/http/http_test.cpp src/hed/mcc/http/http_test_withtls.cpp src/hed/mcc/msgvalidator/MCCMsgValidator.cpp src/hed/mcc/msgvalidator/MCCMsgValidator.h src/hed/mcc/soap/MCCSOAP.cpp src/hed/mcc/soap/MCCSOAP.h src/hed/mcc/tcp/MCCTCP.cpp src/hed/mcc/tcp/MCCTCP.h src/hed/mcc/tcp/PayloadTCPSocket.cpp src/hed/mcc/tcp/PayloadTCPSocket.h src/hed/mcc/tls/BIOGSIMCC.cpp src/hed/mcc/tls/BIOGSIMCC.h src/hed/mcc/tls/BIOMCC.cpp src/hed/mcc/tls/BIOMCC.h src/hed/mcc/tls/ConfigTLSMCC.cpp src/hed/mcc/tls/ConfigTLSMCC.h src/hed/mcc/tls/DelegationCollector.cpp src/hed/mcc/tls/DelegationCollector.h src/hed/mcc/tls/DelegationSecAttr.cpp src/hed/mcc/tls/DelegationSecAttr.h src/hed/mcc/tls/GlobusSigningPolicy.cpp src/hed/mcc/tls/GlobusSigningPolicy.h src/hed/mcc/tls/MCCTLS.cpp src/hed/mcc/tls/MCCTLS.h src/hed/mcc/tls/PayloadTLSMCC.cpp src/hed/mcc/tls/PayloadTLSMCC.h src/hed/mcc/tls/PayloadTLSStream.cpp src/hed/mcc/tls/PayloadTLSStream.h src/hed/mcc/tls/test/GlobusSigningPolicyTest.cpp src/hed/shc/SecHandlerPlugin.cpp src/hed/shc/allowpdp/AllowPDP.cpp src/hed/shc/allowpdp/AllowPDP.h src/hed/shc/arcauthzsh/ArcAuthZ.cpp src/hed/shc/arcauthzsh/ArcAuthZ.h src/hed/shc/arcpdp/ArcAlgFactory.cpp src/hed/shc/arcpdp/ArcAlgFactory.h src/hed/shc/arcpdp/ArcAttributeFactory.cpp src/hed/shc/arcpdp/ArcAttributeFactory.h src/hed/shc/arcpdp/ArcAttributeProxy.h src/hed/shc/arcpdp/ArcEvaluationCtx.cpp src/hed/shc/arcpdp/ArcEvaluationCtx.h src/hed/shc/arcpdp/ArcEvaluator.cpp src/hed/shc/arcpdp/ArcEvaluator.h src/hed/shc/arcpdp/ArcFnFactory.cpp src/hed/shc/arcpdp/ArcFnFactory.h src/hed/shc/arcpdp/ArcPDP.cpp src/hed/shc/arcpdp/ArcPDP.h src/hed/shc/arcpdp/ArcPolicy.cpp src/hed/shc/arcpdp/ArcPolicy.h src/hed/shc/arcpdp/ArcRequest.cpp src/hed/shc/arcpdp/ArcRequest.h src/hed/shc/arcpdp/ArcRequestItem.cpp src/hed/shc/arcpdp/ArcRequestItem.h src/hed/shc/arcpdp/ArcRule.cpp src/hed/shc/arcpdp/ArcRule.h src/hed/shc/classload_test.cpp src/hed/shc/delegationpdp/DelegationPDP.cpp src/hed/shc/delegationpdp/DelegationPDP.h src/hed/shc/delegationsh/DelegationSH.cpp src/hed/shc/delegationsh/DelegationSH.h src/hed/shc/denypdp/DenyPDP.cpp src/hed/shc/denypdp/DenyPDP.h src/hed/shc/gaclpdp/GACLEvaluator.cpp src/hed/shc/gaclpdp/GACLEvaluator.h src/hed/shc/gaclpdp/GACLPDP.cpp src/hed/shc/gaclpdp/GACLPDP.h src/hed/shc/gaclpdp/GACLPolicy.cpp src/hed/shc/gaclpdp/GACLPolicy.h src/hed/shc/gaclpdp/GACLRequest.cpp src/hed/shc/gaclpdp/GACLRequest.h src/hed/shc/legacy/ConfigParser.cpp src/hed/shc/legacy/ConfigParser.h src/hed/shc/legacy/LegacyMap.cpp src/hed/shc/legacy/LegacyMap.h src/hed/shc/legacy/LegacyPDP.cpp src/hed/shc/legacy/LegacyPDP.h src/hed/shc/legacy/LegacySecAttr.cpp src/hed/shc/legacy/LegacySecAttr.h src/hed/shc/legacy/LegacySecHandler.cpp src/hed/shc/legacy/LegacySecHandler.h src/hed/shc/legacy/arc_lcas.cpp src/hed/shc/legacy/arc_lcmaps.cpp src/hed/shc/legacy/auth.cpp src/hed/shc/legacy/auth.h src/hed/shc/legacy/auth_file.cpp src/hed/shc/legacy/auth_otokens.cpp src/hed/shc/legacy/auth_plugin.cpp src/hed/shc/legacy/auth_subject.cpp src/hed/shc/legacy/auth_voms.cpp src/hed/shc/legacy/cert_util.cpp src/hed/shc/legacy/cert_util.h src/hed/shc/legacy/plugin.cpp src/hed/shc/legacy/simplemap.cpp src/hed/shc/legacy/simplemap.h src/hed/shc/legacy/test/AuthOtokensTest.cpp src/hed/shc/legacy/unixmap.cpp src/hed/shc/legacy/unixmap.h src/hed/shc/legacy/unixmap_lcmaps.cpp src/hed/shc/otokens/OTokensSH.cpp src/hed/shc/otokens/OTokensSH.h src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.h src/hed/shc/saml2sso_assertionconsumersh/SAML2SSO_AssertionConsumerSH.cpp src/hed/shc/saml2sso_assertionconsumersh/SAML2SSO_AssertionConsumerSH.h src/hed/shc/samltokensh/SAMLTokenSH.cpp src/hed/shc/samltokensh/SAMLTokenSH.h src/hed/shc/simplelistpdp/SimpleListPDP.cpp src/hed/shc/simplelistpdp/SimpleListPDP.h src/hed/shc/test.cpp src/hed/shc/testinterface_arc.cpp src/hed/shc/testinterface_xacml.cpp src/hed/shc/usernametokensh/UsernameTokenSH.cpp src/hed/shc/usernametokensh/UsernameTokenSH.h src/hed/shc/x509tokensh/X509TokenSH.cpp src/hed/shc/x509tokensh/X509TokenSH.h src/hed/shc/xacmlpdp/AttributeDesignator.cpp src/hed/shc/xacmlpdp/AttributeDesignator.h src/hed/shc/xacmlpdp/AttributeSelector.cpp src/hed/shc/xacmlpdp/AttributeSelector.h src/hed/shc/xacmlpdp/XACMLAlgFactory.cpp src/hed/shc/xacmlpdp/XACMLAlgFactory.h src/hed/shc/xacmlpdp/XACMLApply.cpp src/hed/shc/xacmlpdp/XACMLApply.h src/hed/shc/xacmlpdp/XACMLAttributeFactory.cpp src/hed/shc/xacmlpdp/XACMLAttributeFactory.h src/hed/shc/xacmlpdp/XACMLAttributeProxy.h src/hed/shc/xacmlpdp/XACMLCondition.cpp src/hed/shc/xacmlpdp/XACMLCondition.h src/hed/shc/xacmlpdp/XACMLEvaluationCtx.cpp src/hed/shc/xacmlpdp/XACMLEvaluationCtx.h src/hed/shc/xacmlpdp/XACMLEvaluator.cpp src/hed/shc/xacmlpdp/XACMLEvaluator.h src/hed/shc/xacmlpdp/XACMLFnFactory.cpp src/hed/shc/xacmlpdp/XACMLFnFactory.h src/hed/shc/xacmlpdp/XACMLPDP.cpp src/hed/shc/xacmlpdp/XACMLPDP.h src/hed/shc/xacmlpdp/XACMLPolicy.cpp src/hed/shc/xacmlpdp/XACMLPolicy.h src/hed/shc/xacmlpdp/XACMLRequest.cpp src/hed/shc/xacmlpdp/XACMLRequest.h src/hed/shc/xacmlpdp/XACMLRule.cpp src/hed/shc/xacmlpdp/XACMLRule.h src/hed/shc/xacmlpdp/XACMLTarget.cpp src/hed/shc/xacmlpdp/XACMLTarget.h src/libs/data-staging/DTR.cpp src/libs/data-staging/DTR.h src/libs/data-staging/DTRList.cpp src/libs/data-staging/DTRList.h src/libs/data-staging/DTRStatus.cpp src/libs/data-staging/DTRStatus.h src/libs/data-staging/DataDelivery.cpp src/libs/data-staging/DataDelivery.h src/libs/data-staging/DataDeliveryComm.cpp src/libs/data-staging/DataDeliveryComm.h src/libs/data-staging/DataDeliveryLocalComm.cpp src/libs/data-staging/DataDeliveryLocalComm.h src/libs/data-staging/DataDeliveryRemoteComm.cpp src/libs/data-staging/DataDeliveryRemoteComm.h src/libs/data-staging/DataStagingDelivery.cpp src/libs/data-staging/Processor.cpp src/libs/data-staging/Processor.h src/libs/data-staging/Scheduler.cpp src/libs/data-staging/Scheduler.h src/libs/data-staging/TransferShares.cpp src/libs/data-staging/TransferShares.h src/libs/data-staging/examples/Generator.cpp src/libs/data-staging/examples/Generator.h src/libs/data-staging/examples/generator-main.cpp src/libs/data-staging/test/DTRTest.cpp src/libs/data-staging/test/DeliveryTest.cpp src/libs/data-staging/test/ProcessorTest.cpp src/services/a-rex/FileChunks.cpp src/services/a-rex/FileChunks.h src/services/a-rex/PayloadFile.cpp src/services/a-rex/PayloadFile.h src/services/a-rex/SQLhelpers.h src/services/a-rex/arex.cpp src/services/a-rex/arex.h src/services/a-rex/authop.cpp src/services/a-rex/cachecheck.cpp src/services/a-rex/change_activity_status.cpp src/services/a-rex/create_activity.cpp src/services/a-rex/delegation/DelegationStore.cpp src/services/a-rex/delegation/DelegationStore.h src/services/a-rex/delegation/DelegationStores.cpp src/services/a-rex/delegation/DelegationStores.h src/services/a-rex/delegation/FileRecord.cpp src/services/a-rex/delegation/FileRecord.h src/services/a-rex/delegation/FileRecordSQLite.cpp src/services/a-rex/delegation/FileRecordSQLite.h src/services/a-rex/delegation/uid.cpp src/services/a-rex/delegation/uid.h src/services/a-rex/faults.cpp src/services/a-rex/get.cpp src/services/a-rex/grid-manager/GridManager.cpp src/services/a-rex/grid-manager/GridManager.h src/services/a-rex/grid-manager/accounting/AAR.cpp src/services/a-rex/grid-manager/accounting/AAR.h src/services/a-rex/grid-manager/accounting/AccountingDB.h src/services/a-rex/grid-manager/accounting/AccountingDBAsync.cpp src/services/a-rex/grid-manager/accounting/AccountingDBAsync.h src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.h src/services/a-rex/grid-manager/accounting/test_adb.cpp src/services/a-rex/grid-manager/arc_blahp_logger.cpp src/services/a-rex/grid-manager/conf/CacheConfig.cpp src/services/a-rex/grid-manager/conf/CacheConfig.h src/services/a-rex/grid-manager/conf/CoreConfig.cpp src/services/a-rex/grid-manager/conf/CoreConfig.h src/services/a-rex/grid-manager/conf/GMConfig.cpp src/services/a-rex/grid-manager/conf/GMConfig.h src/services/a-rex/grid-manager/conf/StagingConfig.cpp src/services/a-rex/grid-manager/conf/StagingConfig.h src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp src/services/a-rex/grid-manager/conf/UrlMapConfig.h src/services/a-rex/grid-manager/files/ControlFileContent.cpp src/services/a-rex/grid-manager/files/ControlFileContent.h src/services/a-rex/grid-manager/files/ControlFileHandling.cpp src/services/a-rex/grid-manager/files/ControlFileHandling.h src/services/a-rex/grid-manager/gm_jobs.cpp src/services/a-rex/grid-manager/gm_kick.cpp src/services/a-rex/grid-manager/inputcheck.cpp src/services/a-rex/grid-manager/jobs/CommFIFO.cpp src/services/a-rex/grid-manager/jobs/CommFIFO.h src/services/a-rex/grid-manager/jobs/ContinuationPlugins.cpp src/services/a-rex/grid-manager/jobs/ContinuationPlugins.h src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp src/services/a-rex/grid-manager/jobs/DTRGenerator.h src/services/a-rex/grid-manager/jobs/GMJob.cpp src/services/a-rex/grid-manager/jobs/GMJob.h src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.h src/services/a-rex/grid-manager/jobs/JobsList.cpp src/services/a-rex/grid-manager/jobs/JobsList.h src/services/a-rex/grid-manager/log/HeartBeatMetrics.cpp src/services/a-rex/grid-manager/log/HeartBeatMetrics.h src/services/a-rex/grid-manager/log/JobLog.cpp src/services/a-rex/grid-manager/log/JobLog.h src/services/a-rex/grid-manager/log/JobsMetrics.cpp src/services/a-rex/grid-manager/log/JobsMetrics.h src/services/a-rex/grid-manager/log/SpaceMetrics.cpp src/services/a-rex/grid-manager/log/SpaceMetrics.h src/services/a-rex/grid-manager/mail/send_mail.cpp src/services/a-rex/grid-manager/mail/send_mail.h src/services/a-rex/grid-manager/misc/proxy.cpp src/services/a-rex/grid-manager/misc/proxy.h src/services/a-rex/grid-manager/run/RunParallel.cpp src/services/a-rex/grid-manager/run/RunParallel.h src/services/a-rex/grid-manager/run/RunRedirected.cpp src/services/a-rex/grid-manager/run/RunRedirected.h src/services/a-rex/grid-manager/test_write_grami_file.cpp src/services/a-rex/information_collector.cpp src/services/a-rex/internaljobplugin/DescriptorsINTERNAL.cpp src/services/a-rex/internaljobplugin/INTERNALClient.cpp src/services/a-rex/internaljobplugin/INTERNALClient.h src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.h src/services/a-rex/internaljobplugin/JobListRetrieverPluginINTERNAL.cpp src/services/a-rex/internaljobplugin/JobListRetrieverPluginINTERNAL.h src/services/a-rex/internaljobplugin/JobStateINTERNAL.cpp src/services/a-rex/internaljobplugin/JobStateINTERNAL.h src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.cpp src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.h src/services/a-rex/internaljobplugin/TargetInformationRetrieverPluginINTERNAL.cpp src/services/a-rex/internaljobplugin/TargetInformationRetrieverPluginINTERNAL.h src/services/a-rex/job.cpp src/services/a-rex/job.h src/services/a-rex/put.cpp src/services/a-rex/rest/rest.cpp src/services/a-rex/rest/rest.h src/services/a-rex/rest/test/RESTTest.cpp src/services/a-rex/test_cache_check.cpp src/services/a-rex/tools.cpp src/services/a-rex/tools.h src/services/a-rex/update_credentials.cpp src/services/candypond/CandyPond.cpp src/services/candypond/CandyPond.h src/services/candypond/CandyPondGenerator.cpp src/services/candypond/CandyPondGenerator.h src/services/data-staging/DataDeliveryService.cpp src/services/data-staging/DataDeliveryService.h src/services/examples/echo_python/EchoService.py src/services/examples/echo_python/__init__.py src/services/examples/echo_python/test.cpp src/services/wrappers/python/pythonwrapper.cpp src/services/wrappers/python/pythonwrapper.h src/tests/arcpolicy/arcpolicy.cpp src/tests/client/test_ClientInterface.cpp src/tests/client/test_ClientSAML2SSO.cpp src/tests/client/test_ClientX509Delegation_ARC.cpp src/tests/client/test_ClientX509Delegation_GridSite.cpp src/tests/count/count.cpp src/tests/count/count.h src/tests/count/test_client.cpp src/tests/count/test_service.cpp src/tests/delegation/test_client_with_delegation_sechandler.cpp src/tests/delegation/test_delegation_client.cpp src/tests/echo/echo.cpp src/tests/echo/echo.h src/tests/echo/echo_client.py src/tests/echo/echo_test4axis2c/test_client.cpp src/tests/echo/perfengine.py src/tests/echo/perftest.cpp src/tests/echo/test.cpp src/tests/echo/test_client.cpp src/tests/echo/test_clientinterface.cpp src/tests/echo/test_clientinterface.py src/tests/echo/test_service.cpp src/tests/perf/perftest.cpp src/tests/perf/perftest_cmd_duration.cpp src/tests/perf/perftest_cmd_times.cpp src/tests/perf/perftest_deleg_bydelegclient.cpp src/tests/perf/perftest_deleg_bysechandler.cpp src/tests/perf/perftest_msgsize.cpp src/tests/perf/perftest_saml2sso.cpp src/tests/perf/perftest_samlaa.cpp src/tests/perf/perftest_slcs.cpp src/tests/policy-delegation/test.cpp src/tests/translator/translator.cpp src/tests/unit/ClientsTest.cpp src/tests/unit/ClientsTest.h src/tests/unit/Test.cpp src/tests/xpath/prepare.py src/tests/xpath/query.cpp src/utils/hed/arcplugin.cpp src/utils/hed/common.cpp src/utils/hed/complextype.cpp src/utils/hed/schemaconv.cpp src/utils/hed/schemaconv.h src/utils/hed/simpletype.cpp src/utils/hed/wsdl2hed.cpp src/utils/python/arc/__init__.py src/utils/python/arc/control/Accounting.py src/utils/python/arc/control/AccountingDB.py src/utils/python/arc/control/AccountingPublishing.py src/utils/python/arc/control/Cache.py src/utils/python/arc/control/CertificateGenerator.py src/utils/python/arc/control/Cleanup.py src/utils/python/arc/control/CommunityRTE.py src/utils/python/arc/control/Config.py src/utils/python/arc/control/ControlCommon.py src/utils/python/arc/control/DataStaging.py src/utils/python/arc/control/Jobs.py src/utils/python/arc/control/OSPackage.py src/utils/python/arc/control/OSService.py src/utils/python/arc/control/RunTimeEnvironment.py src/utils/python/arc/control/ServiceCommon.py src/utils/python/arc/control/Services.py src/utils/python/arc/control/TestCA.py src/utils/python/arc/control/TestJWT.py src/utils/python/arc/control/ThirdPartyDeployment.py src/utils/python/arc/control/Validator.py src/utils/python/arc/control/__init__.py src/utils/python/arc/paths.py src/utils/python/arc/utils/__init__.py src/utils/python/arc/utils/config.py src/utils/python/arc/utils/reference.py nordugrid-arc-7.1.1/po/PaxHeaders/Rules-quot0000644000000000000000000000013215067751332015770 xustar0030 mtime=1759498970.281429951 30 atime=1759499024.587355543 30 ctime=1759499034.623713411 nordugrid-arc-7.1.1/po/Rules-quot0000644000175000002070000000337615067751332017703 0ustar00mockbuildmock00000000000000# Special Makefile rules for English message catalogs with quotation marks. DISTFILES.common.extra1 = quot.sed boldquot.sed en@quot.header en@boldquot.header insert-header.sin Rules-quot .SUFFIXES: .insert-header .po-update-en en@quot.po-create: $(MAKE) en@quot.po-update en@boldquot.po-create: $(MAKE) en@boldquot.po-update en@quot.po-update: en@quot.po-update-en en@boldquot.po-update: en@boldquot.po-update-en .insert-header.po-update-en: @lang=`echo $@ | sed -e 's/\.po-update-en$$//'`; \ if test "$(PACKAGE)" = "gettext"; then PATH=`pwd`/../src:$$PATH; GETTEXTLIBDIR=`cd $(top_srcdir)/src && pwd`; export GETTEXTLIBDIR; fi; \ tmpdir=`pwd`; \ echo "$$lang:"; \ ll=`echo $$lang | sed -e 's/@.*//'`; \ LC_ALL=C; export LC_ALL; \ cd $(srcdir); \ if $(MSGINIT) -i $(DOMAIN).pot --no-translator -l $$ll -o - 2>/dev/null | sed -f $$tmpdir/$$lang.insert-header | $(MSGCONV) -t UTF-8 | $(MSGFILTER) sed -f `echo $$lang | sed -e 's/.*@//'`.sed 2>/dev/null > $$tmpdir/$$lang.new.po; then \ if cmp $$lang.po $$tmpdir/$$lang.new.po >/dev/null 2>&1; then \ rm -f $$tmpdir/$$lang.new.po; \ else \ if mv -f $$tmpdir/$$lang.new.po $$lang.po; then \ :; \ else \ echo "creation of $$lang.po failed: cannot move $$tmpdir/$$lang.new.po to $$lang.po" 1>&2; \ exit 1; \ fi; \ fi; \ else \ echo "creation of $$lang.po failed!" 1>&2; \ rm -f $$tmpdir/$$lang.new.po; \ fi en@quot.insert-header: insert-header.sin sed -e '/^#/d' -e 's/HEADER/en@quot.header/g' $(srcdir)/insert-header.sin > en@quot.insert-header en@boldquot.insert-header: insert-header.sin sed -e '/^#/d' -e 's/HEADER/en@boldquot.header/g' $(srcdir)/insert-header.sin > en@boldquot.insert-header mostlyclean: mostlyclean-quot mostlyclean-quot: rm -f *.insert-header nordugrid-arc-7.1.1/po/PaxHeaders/Makefile.in.in0000644000000000000000000000013215067751332016437 xustar0030 mtime=1759498970.256049548 30 atime=1759499020.559294336 30 ctime=1759499034.614598737 nordugrid-arc-7.1.1/po/Makefile.in.in0000644000175000002070000003552415067751332020352 0ustar00mockbuildmock00000000000000# Makefile for PO directory in any package using GNU gettext. # Copyright (C) 1995-1997, 2000-2007 by Ulrich Drepper # # This file can be copied and used freely without restrictions. It can # be used in projects which are not available under the GNU General Public # License but which still want to provide support for the GNU gettext # functionality. # Please note that the actual code of GNU gettext is covered by the GNU # General Public License and is *not* in the public domain. # # Origin: gettext-0.17 GETTEXT_MACRO_VERSION = 0.17 PACKAGE = @PACKAGE@ VERSION = @VERSION@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ SHELL = /bin/sh @SET_MAKE@ srcdir = @srcdir@ top_srcdir = @top_srcdir@ VPATH = @srcdir@ prefix = @prefix@ exec_prefix = @exec_prefix@ datarootdir = @datarootdir@ datadir = @datadir@ localedir = @localedir@ gettextsrcdir = $(datadir)/gettext/po INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ # We use $(mkdir_p). # In automake <= 1.9.x, $(mkdir_p) is defined either as "mkdir -p --" or as # "$(mkinstalldirs)" or as "$(install_sh) -d". For these automake versions, # @install_sh@ does not start with $(SHELL), so we add it. # In automake >= 1.10, @mkdir_p@ is derived from ${MKDIR_P}, which is defined # either as "/path/to/mkdir -p" or ".../install-sh -c -d". For these automake # versions, $(mkinstalldirs) and $(install_sh) are unused. mkinstalldirs = $(SHELL) @install_sh@ -d install_sh = $(SHELL) @install_sh@ MKDIR_P = @MKDIR_P@ mkdir_p = @mkdir_p@ GMSGFMT_ = @GMSGFMT@ GMSGFMT_no = @GMSGFMT@ GMSGFMT_yes = @GMSGFMT_015@ GMSGFMT = $(GMSGFMT_$(USE_MSGCTXT)) MSGFMT_ = @MSGFMT@ MSGFMT_no = @MSGFMT@ MSGFMT_yes = @MSGFMT_015@ MSGFMT = $(MSGFMT_$(USE_MSGCTXT)) XGETTEXT_ = @XGETTEXT@ XGETTEXT_no = @XGETTEXT@ XGETTEXT_yes = @XGETTEXT_015@ XGETTEXT = $(XGETTEXT_$(USE_MSGCTXT)) MSGMERGE = msgmerge MSGMERGE_UPDATE = @MSGMERGE@ --update MSGINIT = msginit MSGCONV = msgconv MSGFILTER = msgfilter POFILES = @POFILES@ GMOFILES = @GMOFILES@ UPDATEPOFILES = @UPDATEPOFILES@ DUMMYPOFILES = @DUMMYPOFILES@ DISTFILES.common = Makefile.in.in remove-potcdate.sin \ $(DISTFILES.common.extra1) $(DISTFILES.common.extra2) $(DISTFILES.common.extra3) DISTFILES = $(DISTFILES.common) Makevars POTFILES.in \ $(POFILES) $(GMOFILES) \ $(DISTFILES.extra1) $(DISTFILES.extra2) $(DISTFILES.extra3) POTFILES = \ CATALOGS = @CATALOGS@ # Makevars gets inserted here. (Don't remove this line!) .SUFFIXES: .SUFFIXES: .po .gmo .mo .sed .sin .nop .po-create .po-update .po.mo: @echo "$(MSGFMT) -c -o $@ $<"; \ $(MSGFMT) -c -o t-$@ $< && mv t-$@ $@ .po.gmo: @lang=`echo $* | sed -e 's,.*/,,'`; \ test "$(srcdir)" = . && cdcmd="" || cdcmd="cd $(srcdir) && "; \ echo "$${cdcmd}rm -f $${lang}.gmo && $(GMSGFMT) -c --statistics -o $${lang}.gmo $${lang}.po"; \ cd $(srcdir) && rm -f $${lang}.gmo && $(GMSGFMT) -c --statistics -o t-$${lang}.gmo $${lang}.po && mv t-$${lang}.gmo $${lang}.gmo .sin.sed: sed -e '/^#/d' $< > t-$@ mv t-$@ $@ all: check-macro-version all-@USE_NLS@ all-yes: stamp-po all-no: # Ensure that the gettext macros and this Makefile.in.in are in sync. check-macro-version: @test "$(GETTEXT_MACRO_VERSION)" = "@GETTEXT_MACRO_VERSION@" \ || { echo "*** error: gettext infrastructure mismatch: using a Makefile.in.in from gettext version $(GETTEXT_MACRO_VERSION) but the autoconf macros are from gettext version @GETTEXT_MACRO_VERSION@" 1>&2; \ exit 1; \ } # $(srcdir)/$(DOMAIN).pot is only created when needed. When xgettext finds no # internationalized messages, no $(srcdir)/$(DOMAIN).pot is created (because # we don't want to bother translators with empty POT files). We assume that # LINGUAS is empty in this case, i.e. $(POFILES) and $(GMOFILES) are empty. # In this case, stamp-po is a nop (i.e. a phony target). # stamp-po is a timestamp denoting the last time at which the CATALOGS have # been loosely updated. Its purpose is that when a developer or translator # checks out the package via CVS, and the $(DOMAIN).pot file is not in CVS, # "make" will update the $(DOMAIN).pot and the $(CATALOGS), but subsequent # invocations of "make" will do nothing. This timestamp would not be necessary # if updating the $(CATALOGS) would always touch them; however, the rule for # $(POFILES) has been designed to not touch files that don't need to be # changed. stamp-po: $(srcdir)/$(DOMAIN).pot test ! -f $(srcdir)/$(DOMAIN).pot || \ test -z "$(GMOFILES)" || $(MAKE) $(GMOFILES) @test ! -f $(srcdir)/$(DOMAIN).pot || { \ echo "touch stamp-po" && \ echo timestamp > stamp-poT && \ mv stamp-poT stamp-po; \ } # Note: Target 'all' must not depend on target '$(DOMAIN).pot-update', # otherwise packages like GCC can not be built if only parts of the source # have been downloaded. # This target rebuilds $(DOMAIN).pot; it is an expensive operation. # Note that $(DOMAIN).pot is not touched if it doesn't need to be changed. $(DOMAIN).pot-update: $(POTFILES) $(srcdir)/POTFILES.in remove-potcdate.sed if LC_ALL=C grep 'GNU @PACKAGE@' $(top_srcdir)/* 2>/dev/null | grep -v 'libtool:' >/dev/null; then \ package_gnu='GNU '; \ else \ package_gnu=''; \ fi; \ if test -n '$(MSGID_BUGS_ADDRESS)' || test '$(PACKAGE_BUGREPORT)' = '@'PACKAGE_BUGREPORT'@'; then \ msgid_bugs_address='$(MSGID_BUGS_ADDRESS)'; \ else \ msgid_bugs_address='$(PACKAGE_BUGREPORT)'; \ fi; \ case `$(XGETTEXT) --version | sed 1q | sed -e 's,^[^0-9]*,,'` in \ '' | 0.[0-9] | 0.[0-9].* | 0.1[0-5] | 0.1[0-5].* | 0.16 | 0.16.[0-1]*) \ $(XGETTEXT) --default-domain=$(DOMAIN) --directory=$(top_srcdir) \ --add-comments=TRANSLATORS: $(XGETTEXT_OPTIONS) @XGETTEXT_EXTRA_OPTIONS@ \ --files-from=$(srcdir)/POTFILES.in \ --copyright-holder='$(COPYRIGHT_HOLDER)' \ --msgid-bugs-address="$$msgid_bugs_address" \ ;; \ *) \ $(XGETTEXT) --default-domain=$(DOMAIN) --directory=$(top_srcdir) \ --add-comments=TRANSLATORS: $(XGETTEXT_OPTIONS) @XGETTEXT_EXTRA_OPTIONS@ \ --files-from=$(srcdir)/POTFILES.in \ --copyright-holder='$(COPYRIGHT_HOLDER)' \ --package-name="$${package_gnu}@PACKAGE@" \ --package-version='@VERSION@' \ --msgid-bugs-address="$$msgid_bugs_address" \ ;; \ esac test ! -f $(DOMAIN).po || { \ if test -f $(srcdir)/$(DOMAIN).pot; then \ sed -f remove-potcdate.sed < $(srcdir)/$(DOMAIN).pot > $(DOMAIN).1po && \ sed -f remove-potcdate.sed < $(DOMAIN).po > $(DOMAIN).2po && \ if cmp $(DOMAIN).1po $(DOMAIN).2po >/dev/null 2>&1; then \ rm -f $(DOMAIN).1po $(DOMAIN).2po $(DOMAIN).po; \ else \ rm -f $(DOMAIN).1po $(DOMAIN).2po $(srcdir)/$(DOMAIN).pot && \ mv $(DOMAIN).po $(srcdir)/$(DOMAIN).pot; \ fi; \ else \ mv $(DOMAIN).po $(srcdir)/$(DOMAIN).pot; \ fi; \ } # This rule has no dependencies: we don't need to update $(DOMAIN).pot at # every "make" invocation, only create it when it is missing. # Only "make $(DOMAIN).pot-update" or "make dist" will force an update. $(srcdir)/$(DOMAIN).pot: $(MAKE) $(DOMAIN).pot-update # This target rebuilds a PO file if $(DOMAIN).pot has changed. # Note that a PO file is not touched if it doesn't need to be changed. $(POFILES): $(srcdir)/$(DOMAIN).pot @lang=`echo $@ | sed -e 's,.*/,,' -e 's/\.po$$//'`; \ if test -f "$(srcdir)/$${lang}.po"; then \ test "$(srcdir)" = . && cdcmd="" || cdcmd="cd $(srcdir) && "; \ echo "$${cdcmd}$(MSGMERGE_UPDATE) $${lang}.po $(DOMAIN).pot"; \ cd $(srcdir) && $(MSGMERGE_UPDATE) $${lang}.po $(DOMAIN).pot; \ else \ $(MAKE) $${lang}.po-create; \ fi install: install-exec install-data install-exec: install-data: install-data-@USE_NLS@ if test "$(PACKAGE)" = "gettext-tools"; then \ $(mkdir_p) $(DESTDIR)$(gettextsrcdir); \ for file in $(DISTFILES.common) Makevars.template; do \ $(INSTALL_DATA) $(srcdir)/$$file \ $(DESTDIR)$(gettextsrcdir)/$$file; \ done; \ for file in Makevars; do \ rm -f $(DESTDIR)$(gettextsrcdir)/$$file; \ done; \ else \ : ; \ fi install-data-no: all install-data-yes: all $(mkdir_p) $(DESTDIR)$(datadir) @catalogs='$(CATALOGS)'; \ for cat in $$catalogs; do \ cat=`basename $$cat`; \ lang=`echo $$cat | sed -e 's/\.gmo$$//'`; \ dir=$(localedir)/$$lang/LC_MESSAGES; \ $(mkdir_p) $(DESTDIR)$$dir; \ if test -r $$cat; then realcat=$$cat; else realcat=$(srcdir)/$$cat; fi; \ $(INSTALL_DATA) $$realcat $(DESTDIR)$$dir/$(DOMAIN).mo; \ echo "installing $$realcat as $(DESTDIR)$$dir/$(DOMAIN).mo"; \ for lc in '' $(EXTRA_LOCALE_CATEGORIES); do \ if test -n "$$lc"; then \ if (cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc 2>/dev/null) | grep ' -> ' >/dev/null; then \ link=`cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc | sed -e 's/^.* -> //'`; \ mv $(DESTDIR)$(localedir)/$$lang/$$lc $(DESTDIR)$(localedir)/$$lang/$$lc.old; \ mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \ (cd $(DESTDIR)$(localedir)/$$lang/$$lc.old && \ for file in *; do \ if test -f $$file; then \ ln -s ../$$link/$$file $(DESTDIR)$(localedir)/$$lang/$$lc/$$file; \ fi; \ done); \ rm -f $(DESTDIR)$(localedir)/$$lang/$$lc.old; \ else \ if test -d $(DESTDIR)$(localedir)/$$lang/$$lc; then \ :; \ else \ rm -f $(DESTDIR)$(localedir)/$$lang/$$lc; \ mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \ fi; \ fi; \ rm -f $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo; \ ln -s ../LC_MESSAGES/$(DOMAIN).mo $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo 2>/dev/null || \ ln $(DESTDIR)$(localedir)/$$lang/LC_MESSAGES/$(DOMAIN).mo $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo 2>/dev/null || \ cp -p $(DESTDIR)$(localedir)/$$lang/LC_MESSAGES/$(DOMAIN).mo $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo; \ echo "installing $$realcat link as $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo"; \ fi; \ done; \ done install-strip: install installdirs: installdirs-exec installdirs-data installdirs-exec: installdirs-data: installdirs-data-@USE_NLS@ if test "$(PACKAGE)" = "gettext-tools"; then \ $(mkdir_p) $(DESTDIR)$(gettextsrcdir); \ else \ : ; \ fi installdirs-data-no: installdirs-data-yes: $(mkdir_p) $(DESTDIR)$(datadir) @catalogs='$(CATALOGS)'; \ for cat in $$catalogs; do \ cat=`basename $$cat`; \ lang=`echo $$cat | sed -e 's/\.gmo$$//'`; \ dir=$(localedir)/$$lang/LC_MESSAGES; \ $(mkdir_p) $(DESTDIR)$$dir; \ for lc in '' $(EXTRA_LOCALE_CATEGORIES); do \ if test -n "$$lc"; then \ if (cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc 2>/dev/null) | grep ' -> ' >/dev/null; then \ link=`cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc | sed -e 's/^.* -> //'`; \ mv $(DESTDIR)$(localedir)/$$lang/$$lc $(DESTDIR)$(localedir)/$$lang/$$lc.old; \ mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \ (cd $(DESTDIR)$(localedir)/$$lang/$$lc.old && \ for file in *; do \ if test -f $$file; then \ ln -s ../$$link/$$file $(DESTDIR)$(localedir)/$$lang/$$lc/$$file; \ fi; \ done); \ rm -f $(DESTDIR)$(localedir)/$$lang/$$lc.old; \ else \ if test -d $(DESTDIR)$(localedir)/$$lang/$$lc; then \ :; \ else \ rm -f $(DESTDIR)$(localedir)/$$lang/$$lc; \ mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \ fi; \ fi; \ fi; \ done; \ done # Define this as empty until I found a useful application. installcheck: uninstall: uninstall-exec uninstall-data uninstall-exec: uninstall-data: uninstall-data-@USE_NLS@ if test "$(PACKAGE)" = "gettext-tools"; then \ for file in $(DISTFILES.common) Makevars.template; do \ rm -f $(DESTDIR)$(gettextsrcdir)/$$file; \ done; \ else \ : ; \ fi uninstall-data-no: uninstall-data-yes: catalogs='$(CATALOGS)'; \ for cat in $$catalogs; do \ cat=`basename $$cat`; \ lang=`echo $$cat | sed -e 's/\.gmo$$//'`; \ for lc in LC_MESSAGES $(EXTRA_LOCALE_CATEGORIES); do \ rm -f $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo; \ done; \ done check: all info dvi ps pdf html tags TAGS ctags CTAGS ID: mostlyclean: rm -f remove-potcdate.sed rm -f stamp-poT rm -f core core.* $(DOMAIN).po $(DOMAIN).1po $(DOMAIN).2po *.new.po rm -fr *.o clean: mostlyclean distclean: clean rm -f Makefile Makefile.in POTFILES *.mo maintainer-clean: distclean @echo "This command is intended for maintainers to use;" @echo "it deletes files that may require special tools to rebuild." rm -f stamp-po $(GMOFILES) distdir = $(top_builddir)/$(PACKAGE)-$(VERSION)/$(subdir) dist distdir: $(MAKE) update-po @$(MAKE) dist2 # This is a separate target because 'update-po' must be executed before. dist2: stamp-po $(DISTFILES) dists="$(DISTFILES)"; \ if test "$(PACKAGE)" = "gettext-tools"; then \ dists="$$dists Makevars.template"; \ fi; \ if test -f $(srcdir)/$(DOMAIN).pot; then \ dists="$$dists $(DOMAIN).pot stamp-po"; \ fi; \ if test -f $(srcdir)/ChangeLog; then \ dists="$$dists ChangeLog"; \ fi; \ for i in 0 1 2 3 4 5 6 7 8 9; do \ if test -f $(srcdir)/ChangeLog.$$i; then \ dists="$$dists ChangeLog.$$i"; \ fi; \ done; \ if test -f $(srcdir)/LINGUAS; then dists="$$dists LINGUAS"; fi; \ for file in $$dists; do \ if test -f $$file; then \ cp -p $$file $(distdir) || exit 1; \ else \ cp -p $(srcdir)/$$file $(distdir) || exit 1; \ fi; \ done update-po: Makefile $(MAKE) $(DOMAIN).pot-update test -z "$(UPDATEPOFILES)" || $(MAKE) $(UPDATEPOFILES) $(MAKE) update-gmo # General rule for creating PO files. .nop.po-create: @lang=`echo $@ | sed -e 's/\.po-create$$//'`; \ echo "File $$lang.po does not exist. If you are a translator, you can create it through 'msginit'." 1>&2; \ exit 1 # General rule for updating PO files. .nop.po-update: @lang=`echo $@ | sed -e 's/\.po-update$$//'`; \ if test "$(PACKAGE)" = "gettext-tools"; then PATH=`pwd`/../src:$$PATH; fi; \ tmpdir=`pwd`; \ echo "$$lang:"; \ test "$(srcdir)" = . && cdcmd="" || cdcmd="cd $(srcdir) && "; \ echo "$${cdcmd}$(MSGMERGE) $$lang.po $(DOMAIN).pot -o $$lang.new.po"; \ cd $(srcdir); \ if $(MSGMERGE) $$lang.po $(DOMAIN).pot -o $$tmpdir/$$lang.new.po; then \ if cmp $$lang.po $$tmpdir/$$lang.new.po >/dev/null 2>&1; then \ rm -f $$tmpdir/$$lang.new.po; \ else \ if mv -f $$tmpdir/$$lang.new.po $$lang.po; then \ :; \ else \ echo "msgmerge for $$lang.po failed: cannot move $$tmpdir/$$lang.new.po to $$lang.po" 1>&2; \ exit 1; \ fi; \ fi; \ else \ echo "msgmerge for $$lang.po failed!" 1>&2; \ rm -f $$tmpdir/$$lang.new.po; \ fi $(DUMMYPOFILES): update-gmo: Makefile $(GMOFILES) @: Makefile: Makefile.in.in Makevars $(top_builddir)/config.status @POMAKEFILEDEPS@ cd $(top_builddir) \ && $(SHELL) ./config.status $(subdir)/$@.in po-directories force: # Tell versions [3.59,3.63) of GNU make not to export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/po/PaxHeaders/sv.po0000644000000000000000000000013215067751431014755 xustar0030 mtime=1759499033.536491525 30 atime=1759499034.395504578 30 ctime=1759499034.629607644 nordugrid-arc-7.1.1/po/sv.po0000644000175000002070000236020615067751431016670 0ustar00mockbuildmock00000000000000# Translation file for the Advanced Resource Connector (Arc) msgid "" msgstr "" "Project-Id-Version: Arc\n" "Report-Msgid-Bugs-To: support@nordugrid.org\n" "POT-Creation-Date: 2025-10-03 15:43+0200\n" "PO-Revision-Date: 2025-06-26 09:41+0200\n" "Last-Translator: Mattias Ellert \n" "Language-Team: Swedish\n" "Language: sv\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=n != 1;\n" #: src/clients/compute/arccat.cpp:38 src/clients/compute/arcclean.cpp:34 #: src/clients/compute/arcget.cpp:35 src/clients/compute/arckill.cpp:33 #: src/clients/compute/arcrenew.cpp:32 src/clients/compute/arcresume.cpp:32 #: src/clients/compute/arcstat.cpp:34 msgid "[job ...]" msgstr "[jobb ...]" #: src/clients/compute/arccat.cpp:39 msgid "" "The arccat command performs the cat command on the stdout, stderr or grid\n" "manager's error log of the job." msgstr "" "arccat-kommandot utför cat-kommandot pÃ¥ jobbets stdout, stderr eller\n" "gridmanager-fellogg." #: src/clients/compute/arccat.cpp:46 src/clients/compute/arcclean.cpp:41 #: src/clients/compute/arcget.cpp:42 src/clients/compute/arcinfo.cpp:45 #: src/clients/compute/arckill.cpp:40 src/clients/compute/arcrenew.cpp:37 #: src/clients/compute/arcresume.cpp:37 src/clients/compute/arcstat.cpp:42 #: src/clients/compute/arcsub.cpp:53 src/clients/compute/arcsync.cpp:147 #: src/clients/compute/arctest.cpp:67 src/clients/credentials/arcproxy.cpp:484 #: src/clients/data/arccp.cpp:652 src/clients/data/arcls.cpp:371 #: src/clients/data/arcmkdir.cpp:149 src/clients/data/arcrename.cpp:160 #: src/clients/data/arcrm.cpp:174 src/hed/daemon/unix/main_unix.cpp:345 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1265 #: src/hed/libs/data/DataExternalHelper.cpp:358 #, c-format msgid "%s version %s" msgstr "%s version %s" #: src/clients/compute/arccat.cpp:55 src/clients/compute/arcclean.cpp:50 #: src/clients/compute/arcget.cpp:51 src/clients/compute/arcinfo.cpp:53 #: src/clients/compute/arckill.cpp:49 src/clients/compute/arcrenew.cpp:46 #: src/clients/compute/arcresume.cpp:46 src/clients/compute/arcstat.cpp:51 #: src/clients/compute/arcsub.cpp:62 src/clients/compute/arcsync.cpp:156 #: src/clients/compute/arctest.cpp:89 src/clients/credentials/arcproxy.cpp:492 #: src/clients/data/arccp.cpp:659 src/clients/data/arcls.cpp:379 #: src/clients/data/arcmkdir.cpp:157 src/clients/data/arcrename.cpp:168 #: src/clients/data/arcrm.cpp:183 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:192 #: src/services/a-rex/grid-manager/GridManager.cpp:110 #: src/services/a-rex/grid-manager/log/JobLog.cpp:139 #, c-format msgid "Running command: %s" msgstr "Kör kommando: %s" #: src/clients/compute/arccat.cpp:66 src/clients/compute/arcclean.cpp:61 #: src/clients/compute/arcget.cpp:62 src/clients/compute/arcinfo.cpp:65 #: src/clients/compute/arckill.cpp:60 src/clients/compute/arcrenew.cpp:57 #: src/clients/compute/arcresume.cpp:50 src/clients/compute/arcstat.cpp:62 #: src/clients/compute/arcsub.cpp:66 src/clients/compute/arcsync.cpp:167 #: src/clients/compute/arctest.cpp:93 src/clients/data/arccp.cpp:682 #: src/clients/data/arcls.cpp:401 src/clients/data/arcmkdir.cpp:179 #: src/clients/data/arcrename.cpp:190 src/clients/data/arcrm.cpp:205 msgid "Failed configuration initialization" msgstr "Misslyckades med att initiera inställningar" #: src/clients/compute/arccat.cpp:78 src/clients/compute/arcclean.cpp:73 #: src/clients/compute/arcget.cpp:87 src/clients/compute/arckill.cpp:72 #: src/clients/compute/arcrenew.cpp:69 src/clients/compute/arcresume.cpp:69 #: src/clients/compute/arcstat.cpp:74 #, c-format msgid "Cannot read specified jobid file: %s" msgstr "Kan inte läsa angiven jobb-id-fil: %s" #: src/clients/compute/arccat.cpp:108 src/clients/compute/arcclean.cpp:103 #: src/clients/compute/arcget.cpp:117 src/clients/compute/arckill.cpp:102 #: src/clients/compute/arcrenew.cpp:99 src/clients/compute/arcresume.cpp:99 #: src/clients/compute/arcstat.cpp:127 msgid "No jobs given" msgstr "Inga jobb angivna" #: src/clients/compute/arccat.cpp:121 src/clients/compute/arcclean.cpp:116 #: src/clients/compute/arcget.cpp:130 src/clients/compute/arckill.cpp:115 #: src/clients/compute/arcrenew.cpp:112 src/clients/compute/arcresume.cpp:112 #: src/clients/compute/arcstat.cpp:139 #, c-format msgid "Job list file (%s) doesn't exist" msgstr "Jobblistfil (%s) existerar inte" #: src/clients/compute/arccat.cpp:128 src/clients/compute/arcclean.cpp:123 #: src/clients/compute/arcget.cpp:137 src/clients/compute/arckill.cpp:122 #: src/clients/compute/arcrenew.cpp:119 src/clients/compute/arcresume.cpp:119 #: src/clients/compute/arcstat.cpp:146 src/clients/compute/arctest.cpp:296 #, c-format msgid "Unable to read job information from file (%s)" msgstr "Misslyckades med att läsa jobbinformation frÃ¥n fil (%s)" #: src/clients/compute/arccat.cpp:137 src/clients/compute/arcclean.cpp:131 #: src/clients/compute/arcget.cpp:145 src/clients/compute/arckill.cpp:130 #: src/clients/compute/arcrenew.cpp:128 src/clients/compute/arcresume.cpp:128 #: src/clients/compute/arcstat.cpp:155 #, c-format msgid "Warning: Job not found in job list: %s" msgstr "Varning: Jobb finns inte i jobblista: %s" #: src/clients/compute/arccat.cpp:150 src/clients/compute/arcclean.cpp:186 #: src/clients/compute/arcget.cpp:158 src/clients/compute/arckill.cpp:142 #: src/clients/compute/arcrenew.cpp:140 src/clients/compute/arcresume.cpp:140 msgid "No jobs" msgstr "Inga jobb" #: src/clients/compute/arccat.cpp:165 #, c-format msgid "Could not create temporary file \"%s\"" msgstr "Kunde inte skapa temporär fil \"%s\"" #: src/clients/compute/arccat.cpp:166 src/clients/compute/arccat.cpp:172 #, c-format msgid "Cannot create output of %s for any jobs" msgstr "Kan inte skapa %s-utdata för nÃ¥got jobb" #: src/clients/compute/arccat.cpp:173 #, c-format msgid "Invalid destination URL %s" msgstr "Ogiltig destinations-URL %s" #: src/clients/compute/arccat.cpp:191 #, c-format msgid "Job deleted: %s" msgstr "Jobb borttaget: %s" #: src/clients/compute/arccat.cpp:201 #, c-format msgid "Job has not started yet: %s" msgstr "Jobb har inte startat än: %s" #: src/clients/compute/arccat.cpp:242 #, c-format msgid "Cannot determine the %s location: %s" msgstr "Kan inte bestämma plats för %s: %s" #: src/clients/compute/arccat.cpp:247 #, c-format msgid "Cannot create output of %s for job (%s): Invalid source %s" msgstr "Kan inte skapa %s-utdata för jobb (%s): Ogiltig källa %s" #: src/clients/compute/arccat.cpp:260 #, c-format msgid "Catting %s for job %s" msgstr "Visar %s för jobb %s" #: src/clients/compute/arcclean.cpp:35 msgid "The arcclean command removes a job from the computing resource." msgstr "arcclean-kommandot tar bort ett jobb frÃ¥n en beräkningresurs." #: src/clients/compute/arcclean.cpp:155 msgid "" "You are about to remove jobs from the job list for which no information " "could be\n" "found. NOTE: Recently submitted jobs might not have appeared in the " "information\n" "system, and this action will also remove such jobs." msgstr "" "Du är pÃ¥ väg att ta bort jobb frÃ¥n jobblistan för vilka ingen information\n" "kunde hittas. Notera att nyligen insända jobb kan saknas i " "informationssystemet\n" "och att denna handling kommer att ta bort ocksÃ¥ sÃ¥dana jobb." #: src/clients/compute/arcclean.cpp:158 msgid "Are you sure you want to clean jobs missing information?" msgstr "Är du säker pÃ¥ att du vill ta bort jobb för vilka information saknas?" #: src/clients/compute/arcclean.cpp:159 src/clients/compute/arcsync.cpp:237 msgid "y" msgstr "j" #: src/clients/compute/arcclean.cpp:159 src/clients/compute/arcsync.cpp:237 msgid "n" msgstr "n" #: src/clients/compute/arcclean.cpp:164 msgid "Jobs missing information will not be cleaned!" msgstr "Jobb som saknar information kommer inte att tas bort!" #: src/clients/compute/arcclean.cpp:180 src/clients/compute/arctest.cpp:300 #, c-format msgid "Warning: Failed to write job information to file (%s)" msgstr "Varning: Misslyckades med att skriva jobbinformation till fil (%s)" #: src/clients/compute/arcclean.cpp:181 msgid "" " Run 'arcclean -s Undefined' to remove cleaned jobs from job list" msgstr "" " Kör 'arcclean -s Undefined' för att ta bort borttagna jobb frÃ¥n " "jobblistan" #: src/clients/compute/arcclean.cpp:190 #, c-format msgid "Jobs processed: %d, deleted: %d" msgstr "Jobb behandlade: %d, borttagna; %d" #: src/clients/compute/arcget.cpp:36 msgid "The arcget command is used for retrieving the results from a job." msgstr "arcget-kommandot används för att hämta resultatet av ett jobb." #: src/clients/compute/arcget.cpp:75 #, c-format msgid "Job download directory from user configuration file: %s" msgstr "Jobbnedladdningskatalog frÃ¥n användarinställningsfil: %s" #: src/clients/compute/arcget.cpp:78 msgid "Job download directory will be created in present working directory." msgstr "Jobbnedladdningskatalog kommer att skapas i nuvarande arbetskatalog." #: src/clients/compute/arcget.cpp:82 #, c-format msgid "Job download directory: %s" msgstr "Jobbnedladdningskatalog: %s" #: src/clients/compute/arcget.cpp:168 #, c-format msgid "Unable to create directory for storing results (%s) - %s" msgstr "Misslyckades med att skapa katalog för att lagra resultat (%s) - %s" #: src/clients/compute/arcget.cpp:178 #, c-format msgid "Results stored at: %s" msgstr "Resultat lagrade i: %s" #: src/clients/compute/arcget.cpp:190 src/clients/compute/arckill.cpp:158 msgid "Warning: Some jobs were not removed from server" msgstr "Varning: NÃ¥gra jobb togs inte bort frÃ¥n servern" #: src/clients/compute/arcget.cpp:191 src/clients/compute/arcget.cpp:198 #: src/clients/compute/arckill.cpp:159 msgid " Use arcclean to remove retrieved jobs from job list" msgstr " Använd arcclean för att ta bort hämtade jobb frÃ¥n jobblistan" #: src/clients/compute/arcget.cpp:197 src/clients/compute/arckill.cpp:165 #, c-format msgid "Warning: Failed removing jobs from file (%s)" msgstr "Varning: Misslyckades med att ta bort jobb frÃ¥n fil (%s)" #: src/clients/compute/arcget.cpp:202 #, c-format msgid "" "Jobs processed: %d, successfully retrieved: %d, successfully cleaned: %d" msgstr "" "Jobb behandlade: %d, framgÃ¥ngsrikt hämtade: %d, framgÃ¥ngsrikt borttagna: %d" #: src/clients/compute/arcget.cpp:206 #, c-format msgid "Jobs processed: %d, successfully retrieved: %d" msgstr "Jobb behandlade: %d, framgÃ¥ngsrikt hämtade: %d" #: src/clients/compute/arcinfo.cpp:34 msgid "[resource ...]" msgstr "[resurs ...]" #: src/clients/compute/arcinfo.cpp:35 msgid "" "The arcinfo command is used for obtaining the status of computing resources " "on the Grid." msgstr "" "arcinfo-kommandot används för att erhÃ¥lla statusen pÃ¥ beräkningsresurser pÃ¥ " "griden." #: src/clients/compute/arcinfo.cpp:141 msgid "Information endpoint" msgstr "Informationsslutpunkt" #: src/clients/compute/arcinfo.cpp:152 msgid "Submission endpoint" msgstr "Insändningsslutpunkt" #: src/clients/compute/arcinfo.cpp:154 msgid "status" msgstr "status" #: src/clients/compute/arcinfo.cpp:156 msgid "interface" msgstr "gränssnitt" #: src/clients/compute/arcinfo.cpp:175 msgid "ERROR: Failed to retrieve information from the following endpoints:" msgstr "Fel: Misslyckades med att hämta information frÃ¥n följande slutpunkter:" #: src/clients/compute/arcinfo.cpp:188 msgid "ERROR: Failed to retrieve information" msgstr "Fel: Misslyckades med att hämta information" #: src/clients/compute/arcinfo.cpp:190 msgid "from the following endpoints:" msgstr "frÃ¥n följande slutpunkter:" #: src/clients/compute/arckill.cpp:34 msgid "The arckill command is used to kill running jobs." msgstr "arckill-kommandot används för att avbryta körande jobb." #: src/clients/compute/arckill.cpp:166 msgid "" " Run 'arcclean -s Undefined' to remove killed jobs from job list" msgstr "" " Kör 'arcclean -s Undefined' för att ta bort avbrutna jobb frÃ¥n " "jobblistan" #: src/clients/compute/arckill.cpp:169 #, c-format msgid "Jobs processed: %d, successfully killed: %d, successfully cleaned: %d" msgstr "" "Jobb behandlade: %d, framgÃ¥ngsrikt avbrutna %d, framgÃ¥ngsrikt borttagna %d" #: src/clients/compute/arckill.cpp:171 #, c-format msgid "Jobs processed: %d, successfully killed: %d" msgstr "Jobb behandlade: %d, framgÃ¥ngsrikt avbrutna %d" #: src/clients/compute/arcrenew.cpp:146 #, c-format msgid "Jobs processed: %d, renewed: %d" msgstr "Jobb behandlade: %d, förnyade %d" #: src/clients/compute/arcresume.cpp:146 #, c-format msgid "Jobs processed: %d, resumed: %d" msgstr "Jobb behandlade: %d, Ã¥terupptagna: %d" #: src/clients/compute/arcstat.cpp:35 msgid "" "The arcstat command is used for obtaining the status of jobs that have\n" "been submitted to Grid enabled resources." msgstr "" "arcstat-kommandot används för att erhÃ¥lla statusen pÃ¥ jobb som sänts in\n" "till gridresurser." #: src/clients/compute/arcstat.cpp:101 msgid "The 'sort' and 'rsort' flags cannot be specified at the same time." msgstr "Flaggorna 'sort' och 'rsort' kan inte anges samtidigt." #: src/clients/compute/arcstat.cpp:171 msgid "No jobs found, try later" msgstr "Inga jobb hittades, försök senare" #: src/clients/compute/arcstat.cpp:215 #, c-format msgid "Status of %d jobs was queried, %d jobs returned information" msgstr "FrÃ¥gade om status för %d jobb, %d jobb returnerade information" #: src/clients/compute/arcsub.cpp:45 msgid "[filename ...]" msgstr "[filnamn ...]" #: src/clients/compute/arcsub.cpp:46 msgid "" "The arcsub command is used for submitting jobs to Grid enabled computing\n" "resources." msgstr "" "arcsub-kommandot används för att sända in jobb till beräkningsresurser pÃ¥\n" "griden." #: src/clients/compute/arcsub.cpp:97 msgid "No job description input specified" msgstr "Ingen jobbeskrivning angiven" #: src/clients/compute/arcsub.cpp:110 #, c-format msgid "Can not open job description file: %s" msgstr "Kan inte öppna jobbeskrivningsfil: %s" #: src/clients/compute/arcsub.cpp:138 src/clients/compute/arcsub.cpp:166 msgid "Invalid JobDescription:" msgstr "Ogiltig jobbeskrivning:" #: src/clients/compute/arcsub.cpp:208 src/clients/compute/arctest.cpp:250 msgid "" "Cannot adapt job description to the submission target when information " "discovery is turned off" msgstr "" "Kan inte anpassa jobbeskrivning till insändnings-target när " "informationssökning är avslagen" #: src/clients/compute/arcsync.cpp:66 src/clients/compute/arcsync.cpp:177 #, c-format msgid "Warning: Unable to open job list file (%s), unknown format" msgstr "Varning: Kan inte öppna jobblistfil (%s), okänt format" #: src/clients/compute/arcsync.cpp:76 msgid "Found the following jobs:" msgstr "Hittade följande jobb:" #: src/clients/compute/arcsync.cpp:86 msgid "Total number of jobs found: " msgstr "Totalt antal hittade jobb: " #: src/clients/compute/arcsync.cpp:98 msgid "Found the following new jobs:" msgstr "Hittade följande nya jobb:" #: src/clients/compute/arcsync.cpp:108 msgid "Total number of new jobs found: " msgstr "Totalt antal hittade nya jobb: " #: src/clients/compute/arcsync.cpp:113 #, c-format msgid "ERROR: Failed to write job information to file (%s)" msgstr "Fel: Misslyckades med att skriva jobbinformation till fil (%s)" #: src/clients/compute/arcsync.cpp:140 msgid "" "The arcsync command synchronizes your local job list with the information " "at\n" "the given CEs or registry servers." msgstr "" "arcsync-kommandot synkroniserar din lokala jobblista med information frÃ¥n\n" "de angivna beräkningsresurserna eller registerservrarna." #: src/clients/compute/arcsync.cpp:183 #, c-format msgid "Warning: Unable to read local list of jobs from file (%s)" msgstr "Varning: Kunde inte läsa lokal jobblista frÃ¥n fil (%s)" #: src/clients/compute/arcsync.cpp:188 #, c-format msgid "Warning: Unable to truncate local list of jobs in file (%s)" msgstr "Varning: Kunde inte trunkera lokal jobblista i fil (%s)" #: src/clients/compute/arcsync.cpp:194 #, c-format msgid "Warning: Unable to create job list file (%s), jobs list is destroyed" msgstr "Varning: Kunde inte skapa jobblistfil (%s), jobblista har raderats" #: src/clients/compute/arcsync.cpp:198 #, c-format msgid "" "Warning: Failed to write local list of jobs into file (%s), jobs list is " "destroyed" msgstr "" "Varning: Misslyckades med att skriva lokal jobblista till fil (%s), " "jobblista har raderats" #: src/clients/compute/arcsync.cpp:231 msgid "" "Synchronizing the local list of active jobs with the information in the\n" "information system can result in some inconsistencies. Very recently " "submitted\n" "jobs might not yet be present, whereas jobs very recently scheduled for\n" "deletion can still be present." msgstr "" "Att synkronisera den lokal listan med aktiva jobb med informationen i\n" "informationssystemet kan resultera i bristande överensstämmelse.\n" "Nyligen insända jobb kan ännu saknas i informationssystemet, medan jobb\n" "som nyligen schemalagts för borttagning fortfarande kan finnas kvar." #: src/clients/compute/arcsync.cpp:236 msgid "Are you sure you want to synchronize your local job list?" msgstr "Är du säker pÃ¥ att du vill synkronisera din lokala jobblista?" #: src/clients/compute/arcsync.cpp:241 msgid "Cancelling synchronization request" msgstr "Avbryter synkroniseringsbegäran" #: src/clients/compute/arcsync.cpp:251 msgid "" "No services specified. Please configure default services in the client " "configuration, or specify a cluster or registry (-C or -Y options, see " "arcsync -h)." msgstr "" "Inga tjänster angivna. Konfigurera förvalda tjänster i " "klientinställningarna, eller ange ett kluster eller register (alternativ -C " "eller -Y, se arcsync -h)." #: src/clients/compute/arctest.cpp:60 msgid " " msgstr " " #: src/clients/compute/arctest.cpp:61 msgid "The arctest command is used for testing clusters as resources." msgstr "arctest-kommandot används för att testa kluster som resurser." #: src/clients/compute/arctest.cpp:73 msgid "" "Nothing to do:\n" "you have to either specify a test job id with -J (--job)\n" "or query information about the certificates with -E (--certificate)\n" msgstr "" "Inget att göra:\n" "du mÃ¥ste antingen ange ett test-jobb-id med -J (--job)\n" "eller frÃ¥ga om information om certifikaten med -E (--certificate)\n" #: src/clients/compute/arctest.cpp:80 msgid "" "For the 1st test job you also have to specify a runtime value with -r (--" "runtime) option." msgstr "" "För det första test-jobbet mÃ¥ste du ocksÃ¥ ange en körtid med alternativet -r " "(--runtime)." #: src/clients/compute/arctest.cpp:118 msgid "Certificate information:" msgstr "Certifikatinformation:" #: src/clients/compute/arctest.cpp:122 msgid "No user-certificate found" msgstr "Hittade inget användarcertifikat" #: src/clients/compute/arctest.cpp:125 #, c-format msgid "Certificate: %s" msgstr "Certifikat: %s" #: src/clients/compute/arctest.cpp:127 #, c-format msgid "Subject name: %s" msgstr "Subjekt-namn: %s" #: src/clients/compute/arctest.cpp:128 #, c-format msgid "Valid until: %s" msgstr "Giltigt till: %s" #: src/clients/compute/arctest.cpp:132 msgid "Unable to determine certificate information" msgstr "Kunde inte bestämma certifikatinformation" #: src/clients/compute/arctest.cpp:136 msgid "Proxy certificate information:" msgstr "Proxycertifikatinformation:" #: src/clients/compute/arctest.cpp:138 msgid "No proxy found" msgstr "Hittade ingen proxy" #: src/clients/compute/arctest.cpp:141 #, c-format msgid "Proxy: %s" msgstr "Proxy: %s" #: src/clients/compute/arctest.cpp:142 #, c-format msgid "Proxy-subject: %s" msgstr "Proxy-subjekt: %s" #: src/clients/compute/arctest.cpp:144 msgid "Valid for: Proxy expired" msgstr "Giltig i: Proxyns giltighetstid har gÃ¥tt ut" #: src/clients/compute/arctest.cpp:146 msgid "Valid for: Proxy not valid" msgstr "Giltig i: Proxyn är ej giltig" #: src/clients/compute/arctest.cpp:148 #, c-format msgid "Valid for: %s" msgstr "Giltig i: %s" #: src/clients/compute/arctest.cpp:153 #, c-format msgid "Certificate issuer: %s" msgstr "Certifikatutfärdare: %s" #: src/clients/compute/arctest.cpp:157 msgid "CA-certificates installed:" msgstr "Installerade CA-certifikat:" #: src/clients/compute/arctest.cpp:179 msgid "Unable to detect if issuer certificate is installed." msgstr "Kunde inte detektera om utfärdarcertifikat är installerat." #: src/clients/compute/arctest.cpp:182 msgid "Your issuer's certificate is not installed" msgstr "Din utfärdares certifikat är inte installerat" #: src/clients/compute/arctest.cpp:196 #, c-format msgid "No test-job, with ID \"%d\"" msgstr "Inget test-jobb, med ID \"%d\"" #: src/clients/compute/arctest.cpp:267 #, c-format msgid "Cannot write jobid (%s) to file (%s)" msgstr "Kan inte skriva jobb-id (%s) till fil (%s)" #: src/clients/compute/arctest.cpp:268 #, c-format msgid "Test submitted with jobid: %s" msgstr "Test insänt med jobb-id: %s" #: src/clients/compute/arctest.cpp:283 #, c-format msgid "Computing service: %s" msgstr "Beräkningstjänst: %s" #: src/clients/compute/arctest.cpp:289 msgid "Test failed, no more possible targets" msgstr "Test misslyckades, inga fler möjliga target" #: src/clients/compute/arctest.cpp:302 src/clients/compute/submit.cpp:49 msgid "To recover missing jobs, run arcsync" msgstr "För att Ã¥terställa saknade jobb, kör arcsync" #: src/clients/compute/arctest.cpp:315 src/clients/compute/submit.cpp:159 #, c-format msgid "" "Unable to prepare job description according to needs of the target resource " "(%s)." msgstr "" "Kunde inte förbereda jobbeskrivningen enligt target-resursens behov (%s)." #: src/clients/compute/arctest.cpp:325 src/clients/compute/submit.cpp:175 #, c-format msgid "" "An error occurred during the generation of job description to be sent to %s" msgstr "" "Ett fel inträffade under skapandet av jobbeskrivningen som ska sändas till %s" #: src/clients/compute/arctest.cpp:329 src/clients/compute/submit.cpp:179 #, c-format msgid "Job description to be sent to %s:" msgstr "Jobbeskrivning som skall sändas till: %s" #: src/clients/compute/submit.cpp:34 #, c-format msgid "Job submitted with jobid: %s" msgstr "Jobb insänt med jobb-id: %s" #: src/clients/compute/submit.cpp:40 #, c-format msgid "Cannot write job IDs to file (%s)" msgstr "Kan inte skriva jobb-id tillfil (%s)" #: src/clients/compute/submit.cpp:45 #, c-format msgid "Unable to open job list file (%s), unknown format" msgstr "Kan inte öppna jobblistfil (%s), okänt format" #: src/clients/compute/submit.cpp:47 #, c-format msgid "Failed to write job information to database (%s)" msgstr "Misslyckades med att skriva jobbinformation till databas (%s)" #: src/clients/compute/submit.cpp:51 #, c-format msgid "Record about new job successfully added to the database (%s)" msgstr "Post om nytt jobb framgÃ¥ngsrikt tillagd till databasen (%s)" #: src/clients/compute/submit.cpp:57 msgid "Job submission summary:" msgstr "Jobbinsändningssammanfattning:" #: src/clients/compute/submit.cpp:59 #, c-format msgid "%d of %d jobs were submitted" msgstr "%d av %d jobb sändes in" #: src/clients/compute/submit.cpp:61 msgid "The following jobs were not submitted:" msgstr "Följande jobb sändes inte in:" #: src/clients/compute/submit.cpp:65 msgid "Job nr." msgstr "Jobb nr." #: src/clients/compute/submit.cpp:75 #, c-format msgid "ERROR: Unable to load broker %s" msgstr "Fel: Kunde inte ladda in mäklare %s" #: src/clients/compute/submit.cpp:79 msgid "" "ERROR: Job submission aborted because no resource returned any information" msgstr "" "Fel: Jobbinsändning avbröts eftersom inga resurser returnerade nÃ¥gon " "information" #: src/clients/compute/submit.cpp:83 msgid "ERROR: One or multiple job descriptions was not submitted." msgstr "Fel: En eller flera jobbeskrivningar sändes inte in." #: src/clients/compute/submit.cpp:100 #, c-format msgid "" "A computing resource using the GridFTP interface was requested, but\n" "%sthe corresponding plugin could not be loaded. Is the plugin installed?\n" "%sIf not, please install the package 'nordugrid-arc-plugins-globus'.\n" "%sDepending on your type of installation the package name might differ." msgstr "" "En beräkningsresurs som använder GridFTP-gränssnittet begärdes, men\n" "%smotsvarande plugin kunde inte laddas in. Är pluginen installerad?\n" "%sOm inte, installera paketet 'nordugrid-arc-plugins-globus'.\n" "%sBeroende pÃ¥ din installationtyp kan paketnamnet variera." #: src/clients/compute/submit.cpp:129 msgid "" "Unable to adapt job description to any resource, no resource information " "could be obtained." msgstr "" "Kunde inte anpassa jobbeskrivningen till nÃ¥gon resurs, ingen " "resursinformation kunde erhÃ¥llas." #: src/clients/compute/submit.cpp:130 msgid "Original job description is listed below:" msgstr "Ursprunglig jobbeskrivning visas nedan:" #: src/clients/compute/submit.cpp:142 #, c-format msgid "Dumping job description aborted: Unable to load broker %s" msgstr "Visning av jobbeskrivning avbruten: kan inte ladda in mäklare %s" #: src/clients/compute/submit.cpp:197 msgid "" "Unable to prepare job description according to needs of the target resource." msgstr "Kan inte förbereda jobbeskrivning enligt target-resursens behov." #: src/clients/compute/submit.cpp:281 src/clients/compute/submit.cpp:311 #, c-format msgid "Service endpoint %s (type %s) added to the list for resource discovery" msgstr "Tjänsteslutpunkt %s (typ %s) lagd till i listan för resurssökning" #: src/clients/compute/submit.cpp:291 msgid "" "There are no endpoints in registry that match requested info endpoint type" msgstr "" "Det finns inga slutpunkter i registret som matchar den begärda " "informationsslutpunktstypen." #: src/clients/compute/submit.cpp:332 #, c-format msgid "Service endpoint %s (type %s) added to the list for direct submission" msgstr "Tjänsteslutpunkt %s (typ %s) lagd till i listan för direktinsändning" #: src/clients/compute/submit.cpp:340 msgid "" "There are no endpoints in registry that match requested submission endpoint " "type" msgstr "" "Det finns inga slutpunkter i registret som matchar den begärda " "insändningsslutpunktstypen" #: src/clients/compute/utils.cpp:111 #, c-format msgid "Types of execution services that %s is able to submit jobs to:" msgstr "Typer av beräkningstjänster som %s kan sända in jobb till:" #: src/clients/compute/utils.cpp:114 #, c-format msgid "Types of registry services that %s is able to collect information from:" msgstr "Typer av registertjänster som %s kan samla in information frÃ¥n:" #: src/clients/compute/utils.cpp:117 #, c-format msgid "" "Types of local information services that %s is able to collect information " "from:" msgstr "" "Typer av lokala informationstjänster som %s kan samla in information frÃ¥n:" #: src/clients/compute/utils.cpp:120 #, c-format msgid "" "Types of local information services that %s is able to collect job " "information from:" msgstr "" "Typer av lokala informationstjänster som %s kan samla in jobbinformation " "frÃ¥n:" #: src/clients/compute/utils.cpp:123 #, c-format msgid "Types of services that %s is able to manage jobs at:" msgstr "Typer av tjänster som %s kan hantera jobb pÃ¥:" #: src/clients/compute/utils.cpp:126 #, c-format msgid "Job description languages supported by %s:" msgstr "JobbeskrivningssprÃ¥k som stöds av %s:" #: src/clients/compute/utils.cpp:129 #, c-format msgid "Brokers available to %s:" msgstr "Mäklare tillgängliga för %s:" #: src/clients/compute/utils.cpp:152 #, c-format msgid "" "Default broker (%s) is not available. When using %s a broker should be " "specified explicitly (-b option)." msgstr "" "Förvald mäklare (%s) är inte tillgänglig. När %s används mÃ¥ste en mäklare " "anges explicit (alternativ -b)." #: src/clients/compute/utils.cpp:162 msgid "Proxy expired. Job submission aborted. Please run 'arcproxy'!" msgstr "Proxyns livstid har gÃ¥tt ut. Jobbinsändning avbruten. Kör 'arcproxy'!" #: src/clients/compute/utils.cpp:167 msgid "" "Cannot find any proxy. This application currently cannot run without a " "proxy.\n" " If you have the proxy file in a non-default location,\n" " please make sure the path is specified in the client configuration file.\n" " If you don't have a proxy yet, please run 'arcproxy'!" msgstr "" "Kan inte hitta nÃ¥gon proxy. Detta program kan för närvarande inte köras utan " "en proxy.\n" " Om du har proxyfilen pÃ¥ en icke-förvald plats,\n" " se till att sökvägen är angiven i klientinställningsfilen.\n" " Om du inte har en proxy än, kör 'arcproxy'\"" #: src/clients/compute/utils.cpp:179 src/clients/data/utils.cpp:28 msgid "" "Cannot find any token. Please run 'oidc-token' or use similar\n" " utility to obtain authentication token!" msgstr "" "Kan inte hitta nÃ¥got token. Kör 'oidc-token' eller använd ett liknande\n" " verktyg för att erhÃ¥lla ett autentiserings-token!" #: src/clients/compute/utils.cpp:308 #, c-format msgid "Unsupported submission endpoint type: %s" msgstr "Insändningsslutpunktstyp stöds inte: %s" #: src/clients/compute/utils.cpp:327 msgid "" "Requested to skip resource discovery. Will try direct submission to arcrest " "endpoint type." msgstr "" "Begärt att hoppa över resurssökning. Kommer att försöka med direkt " "insändning till arcrest-slutpunktstyp." #: src/clients/compute/utils.cpp:332 #, c-format msgid "Unsupported information endpoint type: %s" msgstr "Informationsänpunktstyp stöds inte: %s" #: src/clients/compute/utils.cpp:385 msgid "Other actions" msgstr "Övriga flaggor" #: src/clients/compute/utils.cpp:386 msgid "Brokering and filtering" msgstr "Resursmatchning och filtrering" #: src/clients/compute/utils.cpp:387 msgid "Output format modifiers" msgstr "Utdataformateringsmodifierare" #: src/clients/compute/utils.cpp:388 msgid "Behaviour tuning" msgstr "Beteendeinställning" #: src/clients/compute/utils.cpp:389 msgid "Target endpoint selection" msgstr "Val av target-slutpunkt" #: src/clients/compute/utils.cpp:393 msgid "computing element hostname or a complete endpoint URL" msgstr "beräkningsresurs-värdnamn eller en fullständig slutpunkts-URL" #: src/clients/compute/utils.cpp:394 src/clients/compute/utils.cpp:404 msgid "ce" msgstr "beräkningsresurs" #: src/clients/compute/utils.cpp:398 msgid "registry service URL with optional specification of protocol" msgstr "registertjänst-URL med frivilligt angivande av protokoll" #: src/clients/compute/utils.cpp:399 msgid "registry" msgstr "register" #: src/clients/compute/utils.cpp:403 msgid "only select jobs that were submitted to this computing element" msgstr "välj endast jobb som sändes in till denna beräkningsresurs" #: src/clients/compute/utils.cpp:410 msgid "" "require the specified endpoint type for job submission.\n" "\tAllowed values are: arcrest and internal." msgstr "" "kräv den angivna slutpunktstypen för jobbinsändning.\n" "\tTillÃ¥tna värden är: arcrest och internal." #: src/clients/compute/utils.cpp:412 src/clients/compute/utils.cpp:426 #: src/clients/compute/utils.cpp:434 msgid "type" msgstr "typ" #: src/clients/compute/utils.cpp:418 msgid "skip the service with the given URL during service discovery" msgstr "hoppa över tjänst med den angivna URLen under tjänstesökning" #: src/clients/compute/utils.cpp:419 src/clients/compute/utils.cpp:603 #: src/clients/data/arccp.cpp:583 msgid "URL" msgstr "URL" #: src/clients/compute/utils.cpp:423 msgid "" "require information query using the specified information endpoint type.\n" "\tSpecial value 'NONE' will disable all resource information queries and the " "following brokering.\n" "\tAllowed values are: ldap.nordugrid, ldap.glue2, arcrest and internal." msgstr "" "kräv informationsförfrÃ¥gan med den angivna informationsslutpunktstypen.\n" "\tSärskilda värdet 'NONE' stänger av alla resursinformationsförfrÃ¥gningar " "och den efterföljande resursmatchningen.\n" "\tTillÃ¥tna värden är: ldap.nordugrid, ldap.glue2, arcrest och internal." #: src/clients/compute/utils.cpp:432 msgid "" "only get information about executon targets that support this job submission " "endpoint type.\n" "\tAllowed values are: arcrest and internal." msgstr "" "hämta endast information om exekverings-target som stöder denna " "jobbinsändnings-slutpunktstyp.\n" "\tTillÃ¥tna värden är: arcrest och internal" #: src/clients/compute/utils.cpp:440 msgid "keep the files on the server (do not clean)" msgstr "behÃ¥ll filerna pÃ¥ servern (ta inte bort)" #: src/clients/compute/utils.cpp:446 msgid "do not ask for verification" msgstr "frÃ¥ga inte efter bekräftelse" #: src/clients/compute/utils.cpp:450 msgid "truncate the joblist before synchronizing" msgstr "trunkera jobblistan för synkronisering" #: src/clients/compute/utils.cpp:454 msgid "do not collect information, only convert jobs storage format" msgstr "samla inte in information, konvertera endast lagringsformat" #: src/clients/compute/utils.cpp:460 src/clients/data/arcls.cpp:277 msgid "long format (more information)" msgstr "lÃ¥ngt format (mer information)" #: src/clients/compute/utils.cpp:466 msgid "show the stdout of the job (default)" msgstr "visa jobbets stdout (förval)" #: src/clients/compute/utils.cpp:470 msgid "show the stderr of the job" msgstr "visa jobbets stderr" #: src/clients/compute/utils.cpp:474 msgid "show the CE's error log of the job" msgstr "visa jobbets beräkningsresurs-fellogg" #: src/clients/compute/utils.cpp:478 msgid "show the specified file from job's session directory" msgstr "visa den angivna filen frÃ¥n jobbets sessionskatalog" #: src/clients/compute/utils.cpp:479 msgid "filepath" msgstr "sökväg" #: src/clients/compute/utils.cpp:485 msgid "" "download directory (the job directory will be created in this directory)" msgstr "nedladdningskatalog (jobbkatalogen kommer att skapas i denna katalog)" #: src/clients/compute/utils.cpp:487 msgid "dirname" msgstr "katalognamn" #: src/clients/compute/utils.cpp:491 msgid "use the jobname instead of the short ID as the job directory name" msgstr "" "använd jobbets namn i stället för dess korta ID för jobbkatalogens namn" #: src/clients/compute/utils.cpp:496 msgid "force download (overwrite existing job directory)" msgstr "tvÃ¥ngsnedladdning (skriv över existerande jobbkatalog)" #: src/clients/compute/utils.cpp:502 msgid "instead of the status only the IDs of the selected jobs will be printed" msgstr "i stället för status skriv endast de utvalda jobbens ID" #: src/clients/compute/utils.cpp:506 msgid "sort jobs according to jobid, submissiontime or jobname" msgstr "sortera jobb efter jobb-id, insändningstid eller jobbnamn" #: src/clients/compute/utils.cpp:507 src/clients/compute/utils.cpp:510 msgid "order" msgstr "ordning" #: src/clients/compute/utils.cpp:509 msgid "reverse sorting of jobs according to jobid, submissiontime or jobname" msgstr "omvänd sortering av jobb efter jobb-id, insändningstid eller jobbnamn" #: src/clients/compute/utils.cpp:513 msgid "show jobs where status information is unavailable" msgstr "visa jobb för vilka statusinformation inte är tillgänglig" #: src/clients/compute/utils.cpp:517 msgid "show status information in JSON format" msgstr "visa statusinformation i JSON-format" #: src/clients/compute/utils.cpp:523 msgid "" "remove the job from the local list of jobs even if the job is not found in " "the infosys" msgstr "" "ta bort jobbet frÃ¥n den lokala jobblistan även om jobbet inte hittas i " "informationssystemet" #: src/clients/compute/utils.cpp:530 msgid "submit test job given by the number" msgstr "sänd in test-jobb givet av numret" #: src/clients/compute/utils.cpp:531 src/clients/compute/utils.cpp:535 msgid "int" msgstr "heltal" #: src/clients/compute/utils.cpp:534 msgid "test job runtime specified by the number" msgstr "test-jobbets körtid anges av numret" #: src/clients/compute/utils.cpp:541 msgid "only select jobs whose status is statusstr" msgstr "välj endast jobb vars status är statusstr" #: src/clients/compute/utils.cpp:542 msgid "statusstr" msgstr "statusstr" #: src/clients/compute/utils.cpp:546 msgid "all jobs" msgstr "alla jobb" #: src/clients/compute/utils.cpp:552 msgid "jobdescription string describing the job to be submitted" msgstr "jobbeskrivningssträng som beskriver jobbet som ska sändas in" #: src/clients/compute/utils.cpp:554 src/clients/compute/utils.cpp:560 #: src/clients/credentials/arcproxy.cpp:353 #: src/clients/credentials/arcproxy.cpp:360 #: src/clients/credentials/arcproxy.cpp:379 #: src/clients/credentials/arcproxy.cpp:386 #: src/clients/credentials/arcproxy.cpp:404 #: src/clients/credentials/arcproxy.cpp:408 #: src/clients/credentials/arcproxy.cpp:423 #: src/clients/credentials/arcproxy.cpp:432 #: src/clients/credentials/arcproxy.cpp:436 msgid "string" msgstr "sträng" #: src/clients/compute/utils.cpp:558 msgid "jobdescription file describing the job to be submitted" msgstr "jobbeskrivningsfil som beskriver jobbet som ska sändas in" #: src/clients/compute/utils.cpp:566 msgid "select broker method (list available brokers with --listplugins flag)" msgstr "" "välj resursmatchningsmetod (lista tillgängliga mäklare med --listplugins)" #: src/clients/compute/utils.cpp:567 msgid "broker" msgstr "mäklare" #: src/clients/compute/utils.cpp:570 msgid "the IDs of the submitted jobs will be appended to this file" msgstr "de insända jobbens ID kommer att läggas till i denna fil" #: src/clients/compute/utils.cpp:571 src/clients/compute/utils.cpp:598 #: src/clients/compute/utils.cpp:625 src/clients/compute/utils.cpp:633 #: src/clients/credentials/arcproxy.cpp:445 src/clients/data/arccp.cpp:603 #: src/clients/data/arcls.cpp:322 src/clients/data/arcmkdir.cpp:100 #: src/clients/data/arcrename.cpp:111 src/clients/data/arcrm.cpp:125 #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:53 msgid "filename" msgstr "filnamn" #: src/clients/compute/utils.cpp:575 msgid "do not perform any delegation for submitted jobs" msgstr "utför ingen delegering för insända jobb" #: src/clients/compute/utils.cpp:579 msgid "perform X.509 delegation for submitted jobs" msgstr "utför X.509-delegering för insända jobb" #: src/clients/compute/utils.cpp:583 msgid "perform token delegation for submitted jobs" msgstr "utför token-delegering för insända jobb" #: src/clients/compute/utils.cpp:587 msgid "" "request at most this number of job instances submitted in single submit " "request" msgstr "" "begär högst detta antal jobb-instanser inskickade i en enskild " "insändningsbegäran" #: src/clients/compute/utils.cpp:591 msgid "" "request at least this number of job instances submitted in single submit " "request" msgstr "" "begär minst detta antal jobb-instanser inskickade i en enskild " "insändningsbegäran" #: src/clients/compute/utils.cpp:597 msgid "a file containing a list of jobIDs" msgstr "en fil som innehÃ¥ller en lista med jobb-id" #: src/clients/compute/utils.cpp:602 msgid "skip jobs that are on a computing element with a given URL" msgstr "hoppa över jobb som är pÃ¥ en beräkningsresurs med en given URL" #: src/clients/compute/utils.cpp:608 msgid "submit jobs as dry run (no submission to batch system)" msgstr "sänd in jobb som dryrun (ingen insändning till batchsystemet)" #: src/clients/compute/utils.cpp:612 msgid "" "do not submit - dump job description in the language accepted by the target" msgstr "" "sänd inte in - skriv ut jobbeskrivning i ett sprÃ¥k som accepteras av target" #: src/clients/compute/utils.cpp:618 msgid "prints info about installed user- and CA-certificates" msgstr "skriver ut information om installerade användar- och CA-certifikat" #: src/clients/compute/utils.cpp:619 src/clients/credentials/arcproxy.cpp:469 #: src/clients/data/arccp.cpp:637 src/clients/data/arcls.cpp:356 #: src/clients/data/arcmkdir.cpp:134 src/clients/data/arcrename.cpp:145 #: src/clients/data/arcrm.cpp:159 msgid "allow TLS connection which failed verification" msgstr "tillÃ¥t TLS-förbindelse som ej kunde verifieras" #: src/clients/compute/utils.cpp:624 #, c-format msgid "the file storing information about active jobs (default %s)" msgstr "filen som lagrar information om aktiva jobb (förval %s)" #: src/clients/compute/utils.cpp:632 src/clients/credentials/arcproxy.cpp:444 #: src/clients/data/arccp.cpp:602 src/clients/data/arcls.cpp:321 #: src/clients/data/arcmkdir.cpp:99 src/clients/data/arcrename.cpp:110 #: src/clients/data/arcrm.cpp:124 msgid "configuration file (default ~/.arc/client.conf)" msgstr "inställningsfil (förval ~/.arc/client.conf)" #: src/clients/compute/utils.cpp:635 src/clients/credentials/arcproxy.cpp:439 #: src/clients/data/arccp.cpp:597 src/clients/data/arcls.cpp:316 #: src/clients/data/arcmkdir.cpp:94 src/clients/data/arcrename.cpp:105 #: src/clients/data/arcrm.cpp:119 msgid "timeout in seconds (default 20)" msgstr "timeout i sekunder (förval 20)" #: src/clients/compute/utils.cpp:636 src/clients/credentials/arcproxy.cpp:440 #: src/clients/data/arccp.cpp:598 src/clients/data/arcls.cpp:317 #: src/clients/data/arcmkdir.cpp:95 src/clients/data/arcrename.cpp:106 #: src/clients/data/arcrm.cpp:120 msgid "seconds" msgstr "sekunder" #: src/clients/compute/utils.cpp:639 msgid "list the available plugins" msgstr "lista de tillgängliga pluginerna" #: src/clients/compute/utils.cpp:643 src/clients/credentials/arcproxy.cpp:449 #: src/clients/data/arccp.cpp:642 src/clients/data/arcls.cpp:361 #: src/clients/data/arcmkdir.cpp:139 src/clients/data/arcrename.cpp:150 #: src/clients/data/arcrm.cpp:164 #: src/hed/libs/compute/test_jobdescription.cpp:38 #: src/services/a-rex/grid-manager/gm_jobs.cpp:190 #: src/services/a-rex/grid-manager/inputcheck.cpp:81 #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:67 msgid "FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG" msgstr "FATAL, ERROR, WARNING, INFO, VERBOSE eller DEBUG" #: src/clients/compute/utils.cpp:644 src/clients/credentials/arcproxy.cpp:450 #: src/clients/data/arccp.cpp:643 src/clients/data/arcls.cpp:362 #: src/clients/data/arcmkdir.cpp:140 src/clients/data/arcrename.cpp:151 #: src/clients/data/arcrm.cpp:165 #: src/hed/libs/compute/test_jobdescription.cpp:39 #: src/services/a-rex/grid-manager/gm_jobs.cpp:191 #: src/services/a-rex/grid-manager/inputcheck.cpp:82 #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:68 msgid "debuglevel" msgstr "debugnivÃ¥" #: src/clients/compute/utils.cpp:646 src/clients/credentials/arcproxy.cpp:473 #: src/clients/data/arccp.cpp:646 src/clients/data/arcls.cpp:365 #: src/clients/data/arcmkdir.cpp:143 src/clients/data/arcrename.cpp:154 #: src/clients/data/arcrm.cpp:168 msgid "print version information" msgstr "skriv ut versionsinformation" #: src/clients/compute/utils.cpp:652 src/clients/data/arccp.cpp:607 #: src/clients/data/arcls.cpp:326 src/clients/data/arcmkdir.cpp:104 #: src/clients/data/arcrename.cpp:115 src/clients/data/arcrm.cpp:129 msgid "do not perform any authentication for opened connections" msgstr "utför ingen autentisering för öppnade förbindelser" #: src/clients/compute/utils.cpp:656 src/clients/data/arccp.cpp:612 #: src/clients/data/arcls.cpp:331 src/clients/data/arcmkdir.cpp:109 #: src/clients/data/arcrename.cpp:120 src/clients/data/arcrm.cpp:134 msgid "perform X.509 authentication for opened connections" msgstr "utför X.509-autentisering för öppnade förbindelser" #: src/clients/compute/utils.cpp:660 src/clients/data/arccp.cpp:617 #: src/clients/data/arcls.cpp:336 src/clients/data/arcmkdir.cpp:114 #: src/clients/data/arcrename.cpp:125 src/clients/data/arcrm.cpp:139 msgid "perform token authentication for opened connections" msgstr "utför token-autentisering för öppnade förbindelse" #: src/clients/compute/utils.cpp:664 src/clients/credentials/arcproxy.cpp:454 #: src/clients/data/arccp.cpp:622 src/clients/data/arcls.cpp:341 #: src/clients/data/arcmkdir.cpp:119 src/clients/data/arcrename.cpp:130 #: src/clients/data/arcrm.cpp:144 msgid "force using CA certificates configuration provided by OpenSSL" msgstr "" "framtvinga användandet av CA-certifikat-inställningar tillhandahÃ¥llna av " "OpenSSL" #: src/clients/compute/utils.cpp:668 src/clients/credentials/arcproxy.cpp:459 #: src/clients/data/arccp.cpp:627 src/clients/data/arcls.cpp:346 #: src/clients/data/arcmkdir.cpp:124 src/clients/data/arcrename.cpp:135 #: src/clients/data/arcrm.cpp:149 msgid "" "force using CA certificates configuration for Grid services (typically IGTF)" msgstr "" "framtvinga användandet av CA-certifikat-inställningar för grid-tjänster " "(vanligtvis IGTF)" #: src/clients/compute/utils.cpp:672 src/clients/credentials/arcproxy.cpp:464 msgid "" "force using CA certificates configuration for Grid services (typically IGTF) " "and one provided by OpenSSL" msgstr "" "framtvinga användandet av CA-certifikat-inställningar för grid-tjänster " "(vanligtvis IGTF) och en tillhandahÃ¥llen av OpenSSL" #: src/clients/compute/utils.cpp:681 src/clients/compute/utils.cpp:688 #: src/clients/compute/utils.cpp:695 msgid "Conflicting delegation types specified." msgstr "Motstridiga delegeringstyper angivna." #: src/clients/compute/utils.cpp:727 src/clients/compute/utils.cpp:734 #: src/clients/compute/utils.cpp:741 src/clients/data/utils.cpp:41 #: src/clients/data/utils.cpp:48 src/clients/data/utils.cpp:55 msgid "Conflicting authentication types specified." msgstr "Motstridiga autentiseringstyper angivna." #: src/clients/credentials/arcproxy.cpp:151 #, c-format msgid "There are %d user certificates existing in the NSS database" msgstr "Det finns %d användarcertifikat i NSS-databasen" #: src/clients/credentials/arcproxy.cpp:167 #, c-format msgid "Number %d is with nickname: %s%s" msgstr "Nummer %d är med smeknamn: %s%s" #: src/clients/credentials/arcproxy.cpp:176 #, c-format msgid " expiration time: %s " msgstr " giltig till: %s " #: src/clients/credentials/arcproxy.cpp:180 #, c-format msgid " certificate dn: %s" msgstr " certifikat-dn: %s" #: src/clients/credentials/arcproxy.cpp:181 #, c-format msgid " issuer dn: %s" msgstr " utfärdar-dn: %s" #: src/clients/credentials/arcproxy.cpp:182 #, c-format msgid " serial number: %d" msgstr " serie-nummer: %d" #: src/clients/credentials/arcproxy.cpp:186 #, c-format msgid "Please choose the one you would use (1-%d): " msgstr "Välj det som du vill använda (1-%d): " #: src/clients/credentials/arcproxy.cpp:251 msgid "" "The arcproxy command creates a proxy from a key/certificate pair which can\n" "then be used to access grid resources." msgstr "" "arcproxy-kommandot skapar en proxy frÃ¥n ett nyckel/certifikat-par som sedan\n" "kan användas för att komma Ã¥t gridresurser." #: src/clients/credentials/arcproxy.cpp:253 msgid "" "Supported constraints are:\n" " validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start\n" " from now)\n" "\n" " validityEnd=time\n" "\n" " validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod and\n" " validityEnd not specified, the default is 12 hours for local proxy, and\n" " 168 hours for delegated proxy on myproxy server)\n" "\n" " vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, " "the\n" " default is the minimum value of 12 hours and validityPeriod)\n" "\n" " myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy " "server,\n" " e.g. 43200 or 12h or 12H; if not specified, the default is the minimum " "value\n" " of 12 hours and validityPeriod (which is lifetime of the delegated proxy " "on\n" " myproxy server))\n" "\n" " proxyPolicy=policy content\n" "\n" " proxyPolicyFile=policy file\n" "\n" " keybits=number - length of the key to generate. Default is 2048 bits.\n" " Special value 'inherit' is to use key length of signing certificate.\n" "\n" " signingAlgorithm=name - signing algorithm to use for signing public key " "of\n" " proxy. Possible values are sha1, sha2 (alias for sha256), sha224, sha256,\n" " sha384, sha512 and inherit (use algorithm of signing certificate). " "Default\n" " is inherit. With old systems, only sha1 is acceptable.\n" "\n" "Supported information item names are:\n" " subject - subject name of proxy certificate.\n" "\n" " identity - identity subject name of proxy certificate.\n" "\n" " issuer - issuer subject name of proxy certificate.\n" "\n" " ca - subject name of CA which issued initial certificate.\n" "\n" " path - file system path to file containing proxy.\n" "\n" " type - type of proxy certificate.\n" "\n" " validityStart - timestamp when proxy validity starts.\n" "\n" " validityEnd - timestamp when proxy validity ends.\n" "\n" " validityPeriod - duration of proxy validity in seconds.\n" "\n" " validityLeft - duration of proxy validity left in seconds.\n" "\n" " vomsVO - VO name represented by VOMS attribute\n" "\n" " vomsSubject - subject of certificate for which VOMS attribute is issued\n" "\n" " vomsIssuer - subject of service which issued VOMS certificate\n" "\n" " vomsACvalidityStart - timestamp when VOMS attribute validity starts.\n" "\n" " vomsACvalidityEnd - timestamp when VOMS attribute validity ends.\n" "\n" " vomsACvalidityPeriod - duration of VOMS attribute validity in seconds.\n" "\n" " vomsACvalidityLeft - duration of VOMS attribute validity left in seconds.\n" "\n" " proxyPolicy\n" "\n" " keybits - size of proxy certificate key in bits.\n" "\n" " signingAlgorithm - algorithm used to sign proxy certificate.\n" "\n" "Items are printed in requested order and are separated by newline.\n" "If item has multiple values they are printed in same line separated by |.\n" "\n" "Supported password destinations are:\n" " key - for reading private key\n" "\n" " myproxy - for accessing credentials at MyProxy service\n" "\n" " myproxynew - for creating credentials at MyProxy service\n" "\n" " all - for any purspose.\n" "\n" "Supported password sources are:\n" " quoted string (\"password\") - explicitly specified password\n" "\n" " int - interactively request password from console\n" "\n" " stdin - read password from standard input delimited by newline\n" "\n" " file:filename - read password from file named filename\n" "\n" " stream:# - read password from input stream number #.\n" " Currently only 0 (standard input) is supported." msgstr "" "Dessa begränsningar stöds:\n" " validityStart=tid (t.ex. 2008-05-29T10:20:30Z; om ej angivet, börjar\n" " giltighetstiden nu)\n" "\n" " validityEnd=tid\n" "\n" " validityPeriod=tid (t.ex. 43200 eller 12h eller 12H; om varken " "validityPeriod\n" " eller validityEnd är angivet, är förval 12 timmar för lokal proxy och\n" " 168 timmar för delegerad proxy pÃ¥ myproxy-server)\n" "\n" " vomsACvalidityPeriod=tid (t.ex. 43200 eller 12h eller 12H; om ej angivet, " "är\n" " förval minimum av 12 timmar och validityPeriod)\n" "\n" " myproxyvalidityPeriod=tid (livstid för proxyer som delegeras av\n" " myproxy-server, t.ex. 43200 eller 12h eller 12H; om ej angivet, är\n" " förval minimum av 12 timmar och validityPeriod (vilket är livstiden för\n" " den delegerade proxyn pÃ¥ myproxy-servern))\n" "\n" " proxyPolicy=policy-text\n" "\n" " proxyPolicyFile=policy-fil\n" "\n" " keybits=nummer - längd för den genererade nyckeln. Förval är 2048 bitar.\n" " Särskilt värde 'inherit' betyder att det signerande certifikatets\n" " nyckellängd används.\n" "\n" " signingAlgorithm=namn - signeringsalgoritm att använda för att signera\n" " proxyns publika nyckel. Möjliga värden är sha1, sha2 (alias för sha256),\n" " sha224, sha256, sha384, sha512 och inherit (använd det signerande\n" " certifikatets algoritm). Förval är inherit.\n" " PÃ¥ gamla system, är endast sha1 möjligt.\n" "\n" "Informationspunker som stöds är:\n" " subject - proxycertifikatets subjektnamn.\n" "\n" " identity - proxycertifikatets identitets-subjektnamn.\n" "\n" " issuer - proxycertifikatets utfärdar-subjektnamn.\n" "\n" " ca - subjektnamn för CA som utfärdade det ursprungliga certifikatet.\n" "\n" " path - filsystem-sökväg till fil som innehÃ¥ller proxyn.\n" "\n" " type - typ av proxycertifikat.\n" "\n" " validityStart - klockslag dÃ¥ proxyns giltighetstid börjar.\n" "\n" " validityEnd - klockslag dÃ¥ proxyns giltighetstid slutar.\n" "\n" " validityPeriod - längd pÃ¥ proxyns giltighetstid i sekunder.\n" "\n" " validityLeft - kvarvarande längd pÃ¥ proxyns giltighetstid i sekunder.\n" "\n" " vomsVO - VO-namn representerat av VOMS-attribut\n" "\n" " vomsSubject - subjekt för certifikat för vilket VOMS-attribut utfärdats\n" "\n" " vomsIssuer - subjekt för tjänst som utfärdat VOMS-certifikat\n" "\n" " vomsACvalidityStart - klockslag dÃ¥ VOMS-attributets giltighetstid börjar.\n" "\n" " vomsACvalidityEnd - klockslag dÃ¥ VOMS-attributets giltighetstid slutar.\n" "\n" " vomsACvalidityPeriod - längd pÃ¥ VOMS-attributets giltighetstid i " "sekunder.\n" "\n" " vomsACvalidityLeft - kvarvarande längd pÃ¥ VOMS-attributets giltighetstid\n" " i sekunder.\n" "\n" " proxyPolicy\n" "\n" " keybits - proxycertifikatets nyckellängd i bitar.\n" "\n" " signingAlgorithm - algoritm som användes för att signera " "proxycertifikatet.\n" "\n" "Informationspunkterna skrivs i begärd ordning separerade av nyrad.\n" "Om en punkt har mer än ett värde skrivs dessa pÃ¥ samma rad separerade av |.\n" "\n" "Lösenordsdestinationer som stöds är:\n" " key - för att läsa privat nyckel\n" "\n" " myproxy - för att komma Ã¥t referens pÃ¥ myproxy-tjänst\n" "\n" " myproxynew - för att skapa referens pÃ¥ myproxy-tjänst\n" "\n" " all - för alla användningsomrÃ¥den.\n" "\n" "Lösenordskällor som stöds är:\n" " sträng mellan citattecken (\"lösenord\") - explicit angivet lösenord\n" "\n" " int - interaktiv begäran av lösenord frÃ¥n konsol\n" "\n" " stdin - läs lösenord frÃ¥n standard input avgränsat av nyrad\n" "\n" " file:filnamn - läs lösenord frÃ¥n fil med namn filnamn\n" "\n" " stream:# - läs lösenord frÃ¥n input stream nummer #.\n" " För närvarande stöds endast 0 (standard input)." #: src/clients/credentials/arcproxy.cpp:315 msgid "path to the proxy file" msgstr "sökväg till proxyfilen" #: src/clients/credentials/arcproxy.cpp:316 #: src/clients/credentials/arcproxy.cpp:320 #: src/clients/credentials/arcproxy.cpp:324 #: src/clients/credentials/arcproxy.cpp:328 #: src/clients/credentials/arcproxy.cpp:332 #: src/clients/credentials/arcproxy.cpp:336 src/clients/data/arccp.cpp:560 msgid "path" msgstr "sökväg" #: src/clients/credentials/arcproxy.cpp:319 msgid "" "path to the certificate file, it can be either PEM, DER, or PKCS12 formatted" msgstr "" "sökväg till certifikatfilen, kan vara i endera PEM-, DER- eller PKCS12-format" #: src/clients/credentials/arcproxy.cpp:323 msgid "" "path to the private key file, if the certificate is in PKCS12 format, then " "no need to give private key" msgstr "" "sökväg till privata-nyckel-filen, om certifikatet är i PKCS12-format behöver " "inte den privata nyckeln anges" #: src/clients/credentials/arcproxy.cpp:327 msgid "" "path to the trusted certificate directory, only needed for the VOMS client " "functionality" msgstr "" "sökväg till katalogen med betrodda certifikat, behövs endast för VOMS-klient-" "funktionalitet" #: src/clients/credentials/arcproxy.cpp:331 msgid "" "path to the top directory of VOMS *.lsc files, only needed for the VOMS " "client functionality" msgstr "" "sökväg till huvudkatalogen för VOMS *.lsc-filer, behövs endast för VOMS-" "klient-funktionalitet" #: src/clients/credentials/arcproxy.cpp:335 msgid "path to the VOMS server configuration file" msgstr "sökväg till VOMS-server-inställningsfilen" #: src/clients/credentials/arcproxy.cpp:339 msgid "" "voms<:command>. Specify VOMS server\n" " More than one VOMS server can be specified like this:\n" " --voms VOa:command1 --voms VOb:command2.\n" " :command is optional, and is used to ask for specific attributes (e.g: " "roles)\n" " command options are:\n" "\n" " all --- put all of this DN's attributes into AC;\n" "\n" " list --- list all of the DN's attribute, will not create AC " "extension;\n" "\n" " /Role=yourRole --- specify the role, if this DN\n" " has such a role, the role will be put into AC;\n" "\n" " /voname/groupname/Role=yourRole --- specify the VO, group and " "role; if this DN\n" " has such a role, the role will be put into AC.\n" "\n" " If this option is not specified values from configuration " "files are used.\n" " To avoid anything to be used specify -S with empty value.\n" msgstr "" "voms<:kommando>. Ange VOMS-server\n" " Mer än en VOMS-server kan anges pÃ¥ detta sätt:\n" " --voms VOa:kommando1 --voms VOb:kommando2).\n" " :kommando är valfritt, och används för att begära specifika attribut (t." "ex. roller)\n" " kommandoalternativ är:\n" "\n" " all --- lägg till detta DNs alla attribut i AC;\n" "\n" " list --- lista detta DNs alla attribut, skapar inte AC-" "tillägg;\n" "\n" " /Role=dinRoll --- ange roll, om detta DN har en sÃ¥dan roll,\n" " kommer rollen att läggas till i AC;\n" "\n" " /vonamn/gruppnamn/Role=dinRoll --- ange VO, grupp och roll; " "om\n" " detta DN har en sÃ¥dan roll, kommer rollen att läggas till i " "AC.\n" "\n" " Om detta alternativ inte anges kommer värden frÃ¥n\n" " inställningsfilerna att användas.\n" " För att undvika att dessa används ange -S med tomt värde.\n" #: src/clients/credentials/arcproxy.cpp:356 msgid "" "group<:role>. Specify ordering of attributes\n" " Example: --order /knowarc.eu/coredev:Developer,/knowarc.eu/" "testers:Tester\n" " or: --order /knowarc.eu/coredev:Developer --order /knowarc.eu/" "testers:Tester\n" " Note that it does not make sense to specify the order if you have two or " "more different VOMS servers specified" msgstr "" "grupp<:roll>. Ange attributens ordning\n" " Exempel: --order /knowarc.eu/coredev:Developer,/knowarc.eu/" "testers:Tester\n" " eller: --order /knowarc.eu/coredev:Developer --order /knowarc." "eu/testers:Tester\n" " Notera att det saknar mening att ange ordningen om du har tvÃ¥ eller fler " "olika VOMS-servrar angivna" #: src/clients/credentials/arcproxy.cpp:363 msgid "use GSI communication protocol for contacting VOMS services" msgstr "använd GSI-kommunikationsprotokollet för att kontakta VOMS-tjänster." #: src/clients/credentials/arcproxy.cpp:366 msgid "" "use HTTP communication protocol for contacting VOMS services that provide " "RESTful access\n" " Note for RESTful access, 'list' command and multiple VOMS " "servers are not supported\n" msgstr "" "använd HTTP-kommunikationsprotokollet för att kontakta VOMS-tjänster som " "erbjuder RESTful Ã¥tkomst\n" " Notera att för RESTful Ã¥tkomst stöds inte 'list'-kommandot " "och mer än en VOMS-server\n" #: src/clients/credentials/arcproxy.cpp:370 msgid "" "use old communication protocol for contacting VOMS services instead of " "RESTful access\n" msgstr "" "använd det gamla kommunikationsprotokollet för att kontakta VOMS-tjänster " "istället för RESTful Ã¥tkomst\n" #: src/clients/credentials/arcproxy.cpp:373 msgid "" "this option is not functional (old GSI proxies are not supported anymore)" msgstr "detta alternativ gör ingenting (gamla GSI-proxyer stöds inte längre)" #: src/clients/credentials/arcproxy.cpp:376 msgid "print all information about this proxy." msgstr "skriv ut all information om denna proxy." #: src/clients/credentials/arcproxy.cpp:379 msgid "print selected information about this proxy." msgstr "skriv ut utvald information om denna proxy." #: src/clients/credentials/arcproxy.cpp:382 msgid "remove proxy" msgstr "ta bort proxy" #: src/clients/credentials/arcproxy.cpp:385 msgid "" "username to MyProxy server (if missing subject of user certificate is used)" msgstr "" "användarnamn till myproxy-server (om detta saknas används subjektet frÃ¥n " "användarcertifikatet)" #: src/clients/credentials/arcproxy.cpp:390 msgid "" "don't prompt for a credential passphrase, when retrieving a credential from " "a MyProxy server.\n" " The precondition of this choice is that the credential was PUT onto\n" " the MyProxy server without a passphrase by using the\n" " -R (--retrievable_by_cert) option.\n" " This option is specific to the GET command when contacting a Myproxy\n" " server." msgstr "" "frÃ¥ga inte efter ett referens-lösenord när en referens hämtas frÃ¥n en " "myproxy-server.\n" " En förutsättning för detta val är att referensen har satts pÃ¥\n" " myproxy-servern utan lösenord genom att använda alternativet\n" " -R (--retrievable_by_cert).\n" " Detta alternativ är specifikt för GET-kommandot när en myproxy-server\n" " kontaktas." #: src/clients/credentials/arcproxy.cpp:401 msgid "" "Allow specified entity to retrieve credential without passphrase.\n" " This option is specific to the PUT command when contacting a Myproxy\n" " server." msgstr "" "tillÃ¥t angiven entitet att hämta referens utan lösenord.\n" " Detta alternativ är specifikt för PUT-kommandot när en myproxy-server\n" " kontaktas." #: src/clients/credentials/arcproxy.cpp:407 msgid "hostname[:port] of MyProxy server" msgstr "värdnamn[:port] för myproxy-server" #: src/clients/credentials/arcproxy.cpp:412 msgid "" "command to MyProxy server. The command can be PUT, GET, INFO, NEWPASS or " "DESTROY.\n" " PUT -- put a delegated credentials to the MyProxy server;\n" "\n" " GET -- get a delegated credentials from the MyProxy server;\n" "\n" " INFO -- get and present information about credentials stored " "at the MyProxy server;\n" "\n" " NEWPASS -- change password protecting credentials stored at " "the MyProxy server;\n" "\n" " DESTROY -- wipe off credentials stored at the MyProxy server;\n" "\n" " Local credentials (certificate and key) are not necessary " "except in case of PUT.\n" " MyProxy functionality can be used together with VOMS " "functionality.\n" " --voms and --vomses can be used for Get command if VOMS " "attributes\n" " is required to be included in the proxy.\n" msgstr "" "kommando till myproxy-server. Kommandot kan vara PUT, GET, INFO, NEWPASS " "eller DESTROY.\n" " PUT -- lägg upp en delegerad referens pÃ¥ myproxy-servern;\n" "\n" " GET -- hämta en delegerad referens frÃ¥n myproxy-servern;\n" "\n" " INFO -- hämta och presentera information om referenser lagrade " "pÃ¥ myproxy-servern;\n" "\n" " NEWPASS -- ändra lösenord som skyddar referenser lagrade pÃ¥ " "myproxy-servern;\n" "\n" " DESTROY -- ta bort referenser lagrade pÃ¥ myproxy-servern;\n" "\n" " Lokala referenser (certifikat och nyckel) är inte nödvändiga " "utom vid PUT.\n" " Myproxy-funktionalitet kan användas tillsammans med VOMS-" "funktionalitet.\n" " --voms och --vomses kan användas med GET-kommandot om VOMS-" "attribut\n" " mÃ¥ste inkluderas i proxyn.\n" #: src/clients/credentials/arcproxy.cpp:427 msgid "" "use NSS credential database in default Mozilla profiles, including Firefox, " "Seamonkey and Thunderbird." msgstr "" "använd NSS-referens-databas i förvalda Mozilla-profiler, inklusive Firefox, " "Seamonkey och Thunderbird." #: src/clients/credentials/arcproxy.cpp:431 msgid "proxy constraints" msgstr "proxybegränsningar" #: src/clients/credentials/arcproxy.cpp:435 msgid "password destination=password source" msgstr "lösenordsdestination=lösenordskälla" #: src/clients/credentials/arcproxy.cpp:479 msgid "" "RESTful and old VOMS communication protocols can't be requested " "simultaneously." msgstr "" "RESTful och gammalt VOMS-kommunikationsprotokoll kan inte begäras samtidigt." #: src/clients/credentials/arcproxy.cpp:509 #: src/clients/credentials/arcproxy.cpp:1220 msgid "Failed configuration initialization." msgstr "Misslyckades med att initiera inställningar." #: src/clients/credentials/arcproxy.cpp:544 msgid "" "Failed to find certificate and/or private key or files have improper " "permissions or ownership." msgstr "" "Misslyckades med att hitta certifikat och/eller privat nyckel eller filer " "har olämpliga Ã¥tkomsträttigheter eller ägare." #: src/clients/credentials/arcproxy.cpp:545 #: src/clients/credentials/arcproxy.cpp:557 msgid "You may try to increase verbosity to get more information." msgstr "Du kan försöka att öka debugnivÃ¥n för att fÃ¥ mer information." #: src/clients/credentials/arcproxy.cpp:553 msgid "Failed to find CA certificates" msgstr "Misslyckades med att hitta CA-certifikat" #: src/clients/credentials/arcproxy.cpp:554 msgid "" "Cannot find the CA certificates directory path, please set environment " "variable X509_CERT_DIR, or cacertificatesdirectory in a configuration file." msgstr "" "Kan inte hitta sökväg till CA-certifikat-katalogen, sätt miljövariabeln " "X509_CERT_DIR, eller cacertificatesdirectory i en inställningsfil." #: src/clients/credentials/arcproxy.cpp:558 msgid "" "The CA certificates directory is required for contacting VOMS and MyProxy " "servers." msgstr "" "CA-certifikat-katalogen behövs för att kontakta VOMS- och myproxy-servrar." #: src/clients/credentials/arcproxy.cpp:570 msgid "" "$X509_VOMS_FILE, and $X509_VOMSES are not set;\n" "User has not specified the location for vomses information;\n" "There is also not vomses location information in user's configuration file;\n" "Can not find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses,\n" "$ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/" "vomses,\n" "/etc/vomses, /etc/grid-security/vomses, and the location at the " "corresponding sub-directory" msgstr "" "$X509_VOMS_FILE och $X509_VOMSES är inte tilldelade;\n" "Användaren har inte angivit sökvägen till vomses-informationen;\n" "Det finns inte heller sökväg till vomses i användarens inställningsfil;\n" "Kan inte hitta vomses pÃ¥ förvalda sökvägar: ~/.arc/vomses, ~/.voms/vomses,\n" "$ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/" "vomses,\n" "/etc/vomses, /etc/grid-security/vomses, och de motsvarande underkatalogerna" #: src/clients/credentials/arcproxy.cpp:615 msgid "Wrong number of arguments!" msgstr "Fel antal argument!" #: src/clients/credentials/arcproxy.cpp:623 #: src/clients/credentials/arcproxy.cpp:647 #: src/clients/credentials/arcproxy.cpp:780 msgid "" "Cannot find the path of the proxy file, please setup environment " "X509_USER_PROXY, or proxypath in a configuration file" msgstr "" "Kan inte hitta sökväg till proxyfilen, sätt miljövariabeln X509_USER_PROXY, " "eller proxypath i en inställningsfil" #: src/clients/credentials/arcproxy.cpp:630 #, c-format msgid "Cannot remove proxy file at %s" msgstr "Kan inte ta bort proxyfil pÃ¥ %s" #: src/clients/credentials/arcproxy.cpp:632 #, c-format msgid "Cannot remove proxy file at %s, because it's not there" msgstr "Kan inte ta bort proxyfil pÃ¥ %s, eftersom den inte existerar" #: src/clients/credentials/arcproxy.cpp:641 msgid "Bearer token is available. It is preferred for job submission." msgstr "Bärar-token är tillgängligt. Det föredras för jobbinsändning." #: src/clients/credentials/arcproxy.cpp:653 #: src/clients/credentials/arcproxy.cpp:786 #, c-format msgid "" "Cannot find file at %s for getting the proxy. Please make sure this file " "exists." msgstr "" "Kan inte hitta fil pÃ¥ %s för att hämta proxyn. Se till att denna fil " "existerar." #: src/clients/credentials/arcproxy.cpp:659 #: src/clients/credentials/arcproxy.cpp:792 #, c-format msgid "Cannot process proxy file at %s." msgstr "Kan inte behandla proxyfil pÃ¥ %s." #: src/clients/credentials/arcproxy.cpp:662 #, c-format msgid "Subject: %s" msgstr "Subjekt: %s" #: src/clients/credentials/arcproxy.cpp:663 #, c-format msgid "Issuer: %s" msgstr "Utfärdare: %s" #: src/clients/credentials/arcproxy.cpp:664 #, c-format msgid "Identity: %s" msgstr "Identitet: %s" #: src/clients/credentials/arcproxy.cpp:666 msgid "Time left for proxy: Proxy expired" msgstr "Kvarvarande tid för proxy: Proxyns giltighetstid har gÃ¥tt ut" #: src/clients/credentials/arcproxy.cpp:668 msgid "Time left for proxy: Proxy not valid yet" msgstr "Kvarvarande tid för proxy: Proxyn är inte giltig än" #: src/clients/credentials/arcproxy.cpp:670 #, c-format msgid "Time left for proxy: %s" msgstr "Kvarvarande tid för proxy: %s" #: src/clients/credentials/arcproxy.cpp:671 #, c-format msgid "Proxy path: %s" msgstr "Proxysökväg: %s" #: src/clients/credentials/arcproxy.cpp:672 #, c-format msgid "Proxy type: %s" msgstr "Proxytyp: %s" #: src/clients/credentials/arcproxy.cpp:673 #, c-format msgid "Proxy key length: %i" msgstr "Proxyns nyckellängd: %i" #: src/clients/credentials/arcproxy.cpp:674 #, c-format msgid "Proxy signature: %s" msgstr "Proxysignatur: %s" #: src/clients/credentials/arcproxy.cpp:683 msgid "AC extension information for VO " msgstr "AC-tilläggsinformation för VO " #: src/clients/credentials/arcproxy.cpp:686 msgid "Error detected while parsing this AC" msgstr "Fel upptäckt när denna AC tolkades" #: src/clients/credentials/arcproxy.cpp:699 msgid "AC is invalid: " msgstr "AC är ogiltig: " #: src/clients/credentials/arcproxy.cpp:729 #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:222 #, c-format msgid "Malformed VOMS AC attribute %s" msgstr "Felaktigt VOMS-AC-attribut %s" #: src/clients/credentials/arcproxy.cpp:760 msgid "Time left for AC: AC is not valid yet" msgstr "Kvarvarande tid för AC: AC är inte giltig än" #: src/clients/credentials/arcproxy.cpp:762 msgid "Time left for AC: AC has expired" msgstr "Kvarvarande tid för AC: ACs giltighetstid har gÃ¥tt ut" #: src/clients/credentials/arcproxy.cpp:764 #, c-format msgid "Time left for AC: %s" msgstr "Kvarvarande tid för AC: %s" #: src/clients/credentials/arcproxy.cpp:871 #, c-format msgid "Information item '%s' is not known" msgstr "Informationspunkt '%s' är okänd" #: src/clients/credentials/arcproxy.cpp:883 msgid "" "Cannot find the user certificate path, please setup environment " "X509_USER_CERT, or certificatepath in a configuration file" msgstr "" "Kan inte hitta sökväg till användarcertifikatet, sätt upp miljövariabeln " "X509_USER_CERT, eller certificatepath i en inställningsfil" #: src/clients/credentials/arcproxy.cpp:887 msgid "" "Cannot find the user private key path, please setup environment " "X509_USER_KEY, or keypath in a configuration file" msgstr "" "Kan inte hitta sökväg till privata nyckeln, sätt miljövariabeln " "X509_USER_KEY, eller keypath i en inställningsfil" #: src/clients/credentials/arcproxy.cpp:911 #, c-format msgid "" "Cannot parse password source expression %s it must be of type=source format" msgstr "" "Kan inte tolka uttrycket för lösenordskälla %s det mÃ¥ste vara i formatet " "typ=källa" #: src/clients/credentials/arcproxy.cpp:928 #, c-format msgid "" "Cannot parse password type %s. Currently supported values are " "'key','myproxy','myproxynew' and 'all'." msgstr "" "Kan inte tolka lösenordstyp %s. Nuvarande giltiga värden är 'key', " "'myproxy', 'myproxynew' och 'all'." #: src/clients/credentials/arcproxy.cpp:943 #, c-format msgid "" "Cannot parse password source %s it must be of source_type or source_type:" "data format. Supported source types are int, stdin, stream, file." msgstr "" "Kan inte tolka lösenordskälla %s det mÃ¥ste vara i formatet källtyp eller " "källtyp:data. Giltiga källtyper är int, stdin, stream, file." #: src/clients/credentials/arcproxy.cpp:957 msgid "Only standard input is currently supported for password source." msgstr "Endast standard input är för närvarande giltig för lösenordskälla." #: src/clients/credentials/arcproxy.cpp:962 #, c-format msgid "" "Cannot parse password source type %s. Supported source types are int, stdin, " "stream, file." msgstr "" "Kan inte tolka lösenordskälltyp %s. Giltiga källtyper är int, stdin, stream, " "file." #: src/clients/credentials/arcproxy.cpp:1001 msgid "The start, end and period can't be set simultaneously" msgstr "Början, slut och längd kan inte användas samtidigt" #: src/clients/credentials/arcproxy.cpp:1007 #, c-format msgid "The start time that you set: %s can't be recognized." msgstr "Starttiden som du angivit: %s kan inte tolkas." #: src/clients/credentials/arcproxy.cpp:1014 #, c-format msgid "The period that you set: %s can't be recognized." msgstr "Längden som du angivit: %s kan inte tolkas." #: src/clients/credentials/arcproxy.cpp:1021 #, c-format msgid "The end time that you set: %s can't be recognized." msgstr "Sluttiden som du angivit: %s kan inte tolkas." #: src/clients/credentials/arcproxy.cpp:1030 #, c-format msgid "The end time that you set: %s is before start time: %s." msgstr "Sluttiden du angivit: %s är före starttiden: %s." #: src/clients/credentials/arcproxy.cpp:1041 #, c-format msgid "WARNING: The start time that you set: %s is before current time: %s" msgstr "Varning: Starttiden du angivit: %s är före nuvarande tidpunkt: %s" #: src/clients/credentials/arcproxy.cpp:1044 #, c-format msgid "WARNING: The end time that you set: %s is before current time: %s" msgstr "Varning: Sluttiden du angivit: %s är före nuvarande tidpunkt: %s" #: src/clients/credentials/arcproxy.cpp:1054 #, c-format msgid "The VOMS AC period that you set: %s can't be recognized." msgstr "VOMS-AC-perioden du angivit: %s kan inte tolkas." #: src/clients/credentials/arcproxy.cpp:1072 #, c-format msgid "The MyProxy period that you set: %s can't be recognized." msgstr "Myproxy-perioden du angivit: %s kan inte tolkas." #: src/clients/credentials/arcproxy.cpp:1087 #, c-format msgid "The keybits constraint is wrong: %s." msgstr "Begränsningen av antalet bitar i nyckeln är felaktig: %s." #: src/clients/credentials/arcproxy.cpp:1101 msgid "The NSS database can not be detected in the Firefox profile" msgstr "NSS-databasen kan inte upptäckas i Firefox-profilen" #: src/clients/credentials/arcproxy.cpp:1110 #, c-format msgid "" "There are %d NSS base directories where the certificate, key, and module " "databases live" msgstr "" "Det finns %d NSS-baskataloger där certifikat, nycklar och moduldatabaser " "finns" #: src/clients/credentials/arcproxy.cpp:1112 #, c-format msgid "Number %d is: %s" msgstr "Nummer %d är: %s" #: src/clients/credentials/arcproxy.cpp:1114 #, c-format msgid "Please choose the NSS database you would like to use (1-%d): " msgstr "Välj den NSS-databas du vill använda (1-%d): " #: src/clients/credentials/arcproxy.cpp:1130 #, c-format msgid "NSS database to be accessed: %s\n" msgstr "NSS-databas som kommer att användas: %s\n" #: src/clients/credentials/arcproxy.cpp:1201 #, c-format msgid "Certificate to use is: %s" msgstr "Certifikat som kommer att användas är: %s" #: src/clients/credentials/arcproxy.cpp:1252 #: src/clients/credentials/arcproxy.cpp:1366 msgid "Proxy generation succeeded" msgstr "Proxygenerering lyckades" #: src/clients/credentials/arcproxy.cpp:1253 #: src/clients/credentials/arcproxy.cpp:1367 #, c-format msgid "Your proxy is valid until: %s" msgstr "Din proxy är giltig till: %s" #: src/clients/credentials/arcproxy.cpp:1272 msgid "" "The old GSI proxies are not supported anymore. Please do not use -O/--old " "option." msgstr "" "De gamla GSI-proxyerna stöds inte längre. Använd inte alternativ -O/--old." #: src/clients/credentials/arcproxy.cpp:1291 src/hed/mcc/tls/MCCTLS.cpp:182 #: src/hed/mcc/tls/MCCTLS.cpp:215 src/hed/mcc/tls/MCCTLS.cpp:241 msgid "VOMS attribute parsing failed" msgstr "Tolkning av VOMS-attribut misslyckades" #: src/clients/credentials/arcproxy.cpp:1293 msgid "Myproxy server did not return proxy with VOMS AC included" msgstr "Myproxy-server returnerade inte proxy med VOMS AC inkluderat" #: src/clients/credentials/arcproxy.cpp:1314 msgid "Proxy generation failed: No valid certificate found." msgstr "Proxygenerering misslyckades: Hittade inget giltigt certifikat." #: src/clients/credentials/arcproxy.cpp:1319 msgid "Proxy generation failed: No valid private key found." msgstr "Proxygenerering misslyckades: Hittade ingen giltig privat nyckel." #: src/clients/credentials/arcproxy.cpp:1323 #, c-format msgid "Your identity: %s" msgstr "Din identitet: %s" #: src/clients/credentials/arcproxy.cpp:1325 msgid "Proxy generation failed: Certificate has expired." msgstr "Proxygenerering misslyckades: Certifikatets giltighetstid har gÃ¥tt ut." #: src/clients/credentials/arcproxy.cpp:1329 msgid "Proxy generation failed: Certificate is not valid yet." msgstr "Proxygenerering misslyckades: Certifikatet är inte giltigt än." #: src/clients/credentials/arcproxy.cpp:1340 msgid "Proxy generation failed: Failed to create temporary file." msgstr "Proxygenerering misslyckades: Misslyckades med att skapa temporär fil." #: src/clients/credentials/arcproxy.cpp:1348 msgid "Proxy generation failed: Failed to retrieve VOMS information." msgstr "" "Proxygenerering misslyckades: Misslyckades med att hämta VOMS-information." #: src/clients/credentials/arcproxy_myproxy.cpp:100 msgid "Succeeded to get info from MyProxy server" msgstr "Lyckades med att hämta information frÃ¥n myproxy-server" #: src/clients/credentials/arcproxy_myproxy.cpp:144 msgid "Succeeded to change password on MyProxy server" msgstr "Lyckades med att ändra lösenord pÃ¥ myproxy-server" #: src/clients/credentials/arcproxy_myproxy.cpp:185 msgid "Succeeded to destroy credential on MyProxy server" msgstr "Lyckades med att ta bort referens pÃ¥ myproxy-server" #: src/clients/credentials/arcproxy_myproxy.cpp:241 #, c-format msgid "Succeeded to get a proxy in %s from MyProxy server %s" msgstr "Lyckades med att hämta en proxy i %s frÃ¥n myproxy-server %s" #: src/clients/credentials/arcproxy_myproxy.cpp:294 msgid "Succeeded to put a proxy onto MyProxy server" msgstr "Lyckades med att lägga upp en proxy pÃ¥ myproxy-server" #: src/clients/credentials/arcproxy_proxy.cpp:93 msgid "Failed to add VOMS AC extension. Your proxy may be incomplete." msgstr "" "Misslyckades med att lägga till VOMS-AC-tillägg. Din proxy kan vara " "ofullständig." #: src/clients/credentials/arcproxy_voms.cpp:63 msgid "" "Failed to process VOMS configuration or no suitable configuration lines " "found." msgstr "" "Misslyckades med att behandla VOMS-inställningar eller hittade inga lämpliga " "inställningsrader." #: src/clients/credentials/arcproxy_voms.cpp:75 #, c-format msgid "Failed to parse requested VOMS lifetime: %s" msgstr "Misslyckades med att tolka begärd VOMS-livstid: %s" #: src/clients/credentials/arcproxy_voms.cpp:93 #, c-format msgid "Cannot get VOMS server address information from vomses line: \"%s\"" msgstr "Kan inte hämta VOMS-serveradressinformation frÃ¥n vomsesrad: \"%s\"" #: src/clients/credentials/arcproxy_voms.cpp:97 #: src/clients/credentials/arcproxy_voms.cpp:99 #, c-format msgid "Contacting VOMS server (named %s): %s on port: %s" msgstr "Kontaktar VOMS-server (med namn %s): %s pÃ¥ port: %s" #: src/clients/credentials/arcproxy_voms.cpp:105 #, c-format msgid "Failed to parse requested VOMS server port number: %s" msgstr "Misslyckades med att tolka begärt VOMS-serverportnummer: %s" #: src/clients/credentials/arcproxy_voms.cpp:122 msgid "List functionality is not supported for RESTful VOMS interface" msgstr "Listfunktionalitet stöds inte av RESTful-VOMS-gränssnittet" #: src/clients/credentials/arcproxy_voms.cpp:132 #: src/clients/credentials/arcproxy_voms.cpp:188 #, c-format msgid "" "The VOMS server with the information:\n" "\t%s\n" "can not be reached, please make sure it is available." msgstr "" "VOMS-servern med informationen:\n" "\t%s\n" "kan inte nÃ¥s, säkerställ att den är tillgänglig." #: src/clients/credentials/arcproxy_voms.cpp:133 #: src/clients/credentials/arcproxy_voms.cpp:138 #: src/clients/credentials/arcproxy_voms.cpp:189 #: src/clients/credentials/arcproxy_voms.cpp:194 #, c-format msgid "" "Collected error is:\n" "\t%s" msgstr "" "Insamlade felet är:\n" "\t%s" #: src/clients/credentials/arcproxy_voms.cpp:137 #: src/clients/credentials/arcproxy_voms.cpp:193 #, c-format msgid "No valid response from VOMS server: %s" msgstr "Inget giltigt svar frÃ¥n VOMS-server: %s" #: src/clients/credentials/arcproxy_voms.cpp:155 msgid "List functionality is not supported for legacy VOMS interface" msgstr "Listfunktionalitet stöds inte av legacy-VOMS-gränssnittet" #: src/clients/credentials/arcproxy_voms.cpp:167 #, c-format msgid "Failed to parse VOMS command: %s" msgstr "Misslyckades med att tolka VOMS-kommando: %s" #: src/clients/credentials/arcproxy_voms.cpp:204 #, c-format msgid "" "There are %d servers with the same name: %s in your vomses file, but none of " "them can be reached, or can return a valid message." msgstr "" "Det finns %d servrar med samma namn: %s i din vomses-fil, men ingen av dem " "kan nÃ¥s eller returnera ett giltigt meddelande." #: src/clients/data/arccp.cpp:79 src/clients/data/arccp.cpp:315 #, c-format msgid "Current transfer FAILED: %s" msgstr "Nuvarande överföring MISSLYCKADES: %s" #: src/clients/data/arccp.cpp:81 src/clients/data/arccp.cpp:119 #: src/clients/data/arccp.cpp:317 src/clients/data/arcls.cpp:214 #: src/clients/data/arcmkdir.cpp:62 src/clients/data/arcrename.cpp:78 #: src/clients/data/arcrm.cpp:83 msgid "This seems like a temporary error, please try again later" msgstr "Detta verkar vara ett tillfälligt fel, försök igen senare" #: src/clients/data/arccp.cpp:96 src/clients/data/arccp.cpp:100 #: src/clients/data/arccp.cpp:133 src/clients/data/arccp.cpp:137 #: src/clients/data/arccp.cpp:343 src/clients/data/arccp.cpp:348 #: src/clients/data/arcls.cpp:125 src/clients/data/arcmkdir.cpp:30 #: src/clients/data/arcrename.cpp:31 src/clients/data/arcrename.cpp:35 #: src/clients/data/arcrm.cpp:38 #, c-format msgid "Invalid URL: %s" msgstr "Ogiltig URL: %s" #: src/clients/data/arccp.cpp:112 msgid "Third party transfer is not supported for these endpoints" msgstr "Tredjepartsöverföring stöds inte för dessa slutpunkter" #: src/clients/data/arccp.cpp:114 msgid "" "Protocol(s) not supported - please check that the relevant gfal2\n" " plugins are installed (gfal2-plugin-* packages)" msgstr "" "Protokoll stöds inte - kontrollera att relevanta gfal2-pluginer\n" " har installerats (gfal2-plugin-* paket)" #: src/clients/data/arccp.cpp:117 #, c-format msgid "Transfer FAILED: %s" msgstr "Överföring MISSLYCKADES: %s" #: src/clients/data/arccp.cpp:145 src/clients/data/arccp.cpp:171 #: src/clients/data/arccp.cpp:359 src/clients/data/arccp.cpp:387 #, c-format msgid "Can't read list of sources from file %s" msgstr "Kan inte läsa lista med källor frÃ¥n filen %s" #: src/clients/data/arccp.cpp:150 src/clients/data/arccp.cpp:186 #: src/clients/data/arccp.cpp:364 src/clients/data/arccp.cpp:403 #, c-format msgid "Can't read list of destinations from file %s" msgstr "Kan inte läsa lista med destinationer frÃ¥n filen %s" #: src/clients/data/arccp.cpp:155 src/clients/data/arccp.cpp:370 msgid "Numbers of sources and destinations do not match" msgstr "Antalet källor och destinationer stämmer inte överens" #: src/clients/data/arccp.cpp:200 msgid "Fileset registration is not supported yet" msgstr "Filuppsättningsregistrering stöds inte ännu" #: src/clients/data/arccp.cpp:206 src/clients/data/arccp.cpp:279 #: src/clients/data/arccp.cpp:441 #, c-format msgid "Unsupported source url: %s" msgstr "Käll-URL stöds inte: %s" #: src/clients/data/arccp.cpp:210 src/clients/data/arccp.cpp:283 #, c-format msgid "Unsupported destination url: %s" msgstr "Destinations-URL stöds inte: %s" #: src/clients/data/arccp.cpp:217 msgid "" "For registration source must be ordinary URL and destination must be " "indexing service" msgstr "" "För registrering mÃ¥ste källan vara en vanlig URL och destinationen en " "indexeringstjänst" #: src/clients/data/arccp.cpp:227 #, c-format msgid "Could not obtain information about source: %s" msgstr "Kunde inte erhÃ¥lla information om källa: %s" #: src/clients/data/arccp.cpp:234 msgid "" "Metadata of source does not match existing destination. Use the --force " "option to override this." msgstr "" "Källans metadata stämmer inte överens med existerande destination. Använd " "alternativet --force för att överstyra detta." #: src/clients/data/arccp.cpp:246 msgid "Failed to accept new file/destination" msgstr "Misslyckades med att acceptera ny fil/destination" #: src/clients/data/arccp.cpp:252 src/clients/data/arccp.cpp:258 #, c-format msgid "Failed to register new file/destination: %s" msgstr "Misslyckades med att registrera ny fil/destination: %s" #: src/clients/data/arccp.cpp:421 msgid "Fileset copy to single object is not supported yet" msgstr "Kopiering av filuppsättning till ett enstaka objekt stöds ej ännu" #: src/clients/data/arccp.cpp:431 msgid "Can't extract object's name from source url" msgstr "Kan ej extrahera objektets namn frÃ¥n käll-URL" #: src/clients/data/arccp.cpp:450 #, c-format msgid "%s. Cannot copy fileset" msgstr "%s. Kan inte kopiera filuppsättning" #: src/clients/data/arccp.cpp:460 src/hed/libs/compute/ExecutionTarget.cpp:256 #: src/hed/libs/compute/ExecutionTarget.cpp:328 #, c-format msgid "Name: %s" msgstr "Namn: %s" #: src/clients/data/arccp.cpp:463 #, c-format msgid "Source: %s" msgstr "Källa: %s" #: src/clients/data/arccp.cpp:464 #, c-format msgid "Destination: %s" msgstr "Destination: %s" #: src/clients/data/arccp.cpp:470 msgid "Current transfer complete" msgstr "Nuvarande överföring slutförd" #: src/clients/data/arccp.cpp:473 msgid "Some transfers failed" msgstr "NÃ¥gra överföringar misslyckades" #: src/clients/data/arccp.cpp:483 #, c-format msgid "Directory: %s" msgstr "Katalog: %s" #: src/clients/data/arccp.cpp:503 msgid "Transfer complete" msgstr "Överföring slutförd" #: src/clients/data/arccp.cpp:522 msgid "source destination" msgstr "källa destination" #: src/clients/data/arccp.cpp:523 msgid "" "The arccp command copies files to, from and between grid storage elements." msgstr "" "arccp-kommandot kopierar filer till, frÃ¥n och mellan gridlagringsresurser." #: src/clients/data/arccp.cpp:528 msgid "" "use passive transfer (off by default if secure is on, on by default if " "secure is not requested)" msgstr "" "använd passiv överföring (förvalt av om säker överföring begärts, förvalt pÃ¥ " "om säker överföring inte begärts" #: src/clients/data/arccp.cpp:534 msgid "do not try to force passive transfer" msgstr "försök inte tvinga fram passiv överföring" #: src/clients/data/arccp.cpp:539 msgid "force overwrite of existing destination" msgstr "framtvinga överskrivning av existerande destination" #: src/clients/data/arccp.cpp:543 msgid "show progress indicator" msgstr "visa fortskridandeindikator" #: src/clients/data/arccp.cpp:548 msgid "" "do not transfer, but register source into destination. destination must be a " "meta-url." msgstr "" "gör inte överföringen, men registrera källan i destinationen. destinationen " "mÃ¥ste vara en meta-url" #: src/clients/data/arccp.cpp:554 msgid "use secure transfer (insecure by default)" msgstr "använd säker överföring (osäker som förval)" #: src/clients/data/arccp.cpp:559 msgid "path to local cache (use to put file into cache)" msgstr "sökväg till lokalt cache (använd för att lägga in fil i cache)" #: src/clients/data/arccp.cpp:564 src/clients/data/arcls.cpp:290 msgid "operate recursively" msgstr "arbeta rekursivt" #: src/clients/data/arccp.cpp:569 src/clients/data/arcls.cpp:295 msgid "operate recursively up to specified level" msgstr "arbeta rekursivt upp till den angivna nivÃ¥n" #: src/clients/data/arccp.cpp:570 src/clients/data/arcls.cpp:296 msgid "level" msgstr "nivÃ¥" #: src/clients/data/arccp.cpp:574 msgid "number of retries before failing file transfer" msgstr "antal försök innan överföring misslyckas" #: src/clients/data/arccp.cpp:575 msgid "number" msgstr "nummer" #: src/clients/data/arccp.cpp:579 msgid "" "physical location to write to when destination is an indexing service. Must " "be specified for indexing services which do not automatically generate " "physical locations. Can be specified multiple times - locations will be " "tried in order until one succeeds." msgstr "" "fysisk plats att skriva till när destinationen är en indexeringstjänst. " "MÃ¥ste anges för indexeringstjänster som inte genererar fysiska platser " "automatiskt. Kan anges flera gÃ¥nger - platser kommer att provas i angiven " "ordning tills en lyckas." #: src/clients/data/arccp.cpp:587 msgid "" "perform third party transfer, where the destination pulls from the source " "(only available with GFAL plugin)" msgstr "" "utför tredjepartsöverföring, där destinationen läser frÃ¥n källan (endast " "tillgänglig med GFAL-plugin)" #: src/clients/data/arccp.cpp:593 src/clients/data/arcls.cpp:312 #: src/clients/data/arcmkdir.cpp:90 src/clients/data/arcrename.cpp:101 #: src/clients/data/arcrm.cpp:115 msgid "list the available plugins (protocols supported)" msgstr "lista tillgängliga pluginer (protokoll som stöds)" #: src/clients/data/arccp.cpp:632 src/clients/data/arcls.cpp:351 #: src/clients/data/arcmkdir.cpp:129 src/clients/data/arcrename.cpp:140 #: src/clients/data/arcrm.cpp:154 msgid "" "force using both CA certificates configuration for Grid services (typically " "IGTF) and those provided by OpenSSL" msgstr "" "framtvinga användandet av bÃ¥de CA-certifikat-inställningar för grid-tjänster " "(vanligtvis IGTF) och de som tillhandahÃ¥lls av OpenSSL" #: src/clients/data/arccp.cpp:667 src/clients/data/arcls.cpp:387 #: src/clients/data/arcmkdir.cpp:165 src/clients/data/arcrename.cpp:176 #: src/clients/data/arcrm.cpp:191 msgid "Protocol plugins available:" msgstr "Tillgängliga protokollpluginer:" #: src/clients/data/arccp.cpp:715 src/clients/data/arcls.cpp:435 #: src/clients/data/arcmkdir.cpp:212 src/clients/data/arcrename.cpp:222 #: src/clients/data/arcrm.cpp:239 msgid "Wrong number of parameters specified" msgstr "Fel antal parametrar angivna" #: src/clients/data/arccp.cpp:720 msgid "Options 'p' and 'n' can't be used simultaneously" msgstr "Alternativen 'p' och 'n' kan inte användas samtidigt" #: src/clients/data/arcls.cpp:131 src/clients/data/arcmkdir.cpp:36 #: src/clients/data/arcrm.cpp:45 #, c-format msgid "Can't read list of locations from file %s" msgstr "Kan inte läsa platslista frÃ¥n fil %s" #: src/clients/data/arcls.cpp:146 src/clients/data/arcmkdir.cpp:51 #: src/clients/data/arcrename.cpp:63 msgid "Unsupported URL given" msgstr "Angiven URL stöds inte" #: src/clients/data/arcls.cpp:217 msgid "Warning: Failed listing files but some information is obtained" msgstr "" "Varning: Misslyckades med att lista filer men viss information har erhÃ¥llits" #: src/clients/data/arcls.cpp:271 src/clients/data/arcmkdir.cpp:79 msgid "url" msgstr "url" #: src/clients/data/arcls.cpp:272 msgid "" "The arcls command is used for listing files in grid storage elements and " "file\n" "index catalogues." msgstr "" "arcls-kommandot används för att lista filer pÃ¥ gridlagringsresurser och i\n" "filindexkataloger." #: src/clients/data/arcls.cpp:281 msgid "show URLs of file locations" msgstr "visa URLer till filens registrerade kopior" #: src/clients/data/arcls.cpp:285 msgid "display all available metadata" msgstr "visa all tillgänglig metadata" #: src/clients/data/arcls.cpp:299 msgid "" "show only description of requested object, do not list content of directories" msgstr "" "visa endast beskrivning av begärt objekt, lista inte innehÃ¥ll i kataloger" #: src/clients/data/arcls.cpp:303 msgid "treat requested object as directory and always try to list content" msgstr "behandla begärt objekt som en katalog och försök alltid lista innehÃ¥ll" #: src/clients/data/arcls.cpp:307 msgid "check readability of object, does not show any information about object" msgstr "kontrollera objektets läsbarhet, visar ingen information om objektet" #: src/clients/data/arcls.cpp:440 msgid "Incompatible options --nolist and --forcelist requested" msgstr "Inkompatibla alternativ --nolist och --forcelist har begärts" #: src/clients/data/arcls.cpp:445 msgid "Requesting recursion and --nolist has no sense" msgstr "Att begära rekursion och --nolist saknar mening" #: src/clients/data/arcmkdir.cpp:80 msgid "" "The arcmkdir command creates directories on grid storage elements and " "catalogs." msgstr "" "arcmkdir-kommandot skapar kataloger pÃ¥ gridlagringsresurser och kataloger." #: src/clients/data/arcmkdir.cpp:85 msgid "make parent directories as needed" msgstr "skapa föräldrakataloger efter behov" #: src/clients/data/arcrename.cpp:43 msgid "Both URLs must have the same protocol, host and port" msgstr "BÃ¥da URLerna mÃ¥ste ha samma protokoll, värd och port" #: src/clients/data/arcrename.cpp:53 msgid "Cannot rename to or from root directory" msgstr "Kan inte byta namn till eller frÃ¥n rotkatalogen" #: src/clients/data/arcrename.cpp:57 msgid "Cannot rename to the same URL" msgstr "Kan inte byta namn till samma URL" #: src/clients/data/arcrename.cpp:95 msgid "old_url new_url" msgstr "gammal_url ny_url" #: src/clients/data/arcrename.cpp:96 msgid "The arcrename command renames files on grid storage elements." msgstr "arcrename-kommandot byter namn pÃ¥ filer pÃ¥ gridlagringsresurser." #: src/clients/data/arcrm.cpp:60 #, c-format msgid "Unsupported URL given: %s" msgstr "Angiven URL stöds inte: %s" #: src/clients/data/arcrm.cpp:103 msgid "url [url ...]" msgstr "url [url ...]" #: src/clients/data/arcrm.cpp:104 msgid "The arcrm command deletes files on grid storage elements." msgstr "arcrm-kommandot tar bort filer pÃ¥ gridlagringsresurser." #: src/clients/data/arcrm.cpp:109 msgid "" "remove logical file name registration even if not all physical instances " "were removed" msgstr "" "ta bort logiska filnamnsregistreringen även om inte alla fysiska kopior " "tagits bort" #: src/clients/data/utils.cpp:18 msgid "Proxy expired. Please run 'arcproxy'!" msgstr "Proxyns livstid har gÃ¥tt ut. Kör 'arcproxy'!" #: src/clients/data/utils.cpp:81 src/clients/data/utils.cpp:90 #, c-format msgid "Unable to handle %s" msgstr "Kan inte hantera %s" #: src/clients/data/utils.cpp:82 src/clients/data/utils.cpp:91 msgid "Invalid credentials, please check proxy and/or CA certificates" msgstr "Ogiltiga referenser, kontrollera proxy och/eller CA-certifikat" #: src/clients/data/utils.cpp:88 msgid "Proxy expired" msgstr "Proxyns livstid har gÃ¥tt ut" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:53 msgid "Cannot initialize ARCHERY domain name for query" msgstr "Kan inte initiera ARCHERY-domännamn för förfrÃ¥gan" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:60 msgid "Cannot create resolver from /etc/resolv.conf" msgstr "Kan inte skapa resolver frÃ¥n /etc/resolv.conf" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:68 msgid "Cannot query service endpoint TXT records from DNS" msgstr "Kan inte frÃ¥ga om tjänsteslutpunkt-TXT-poster frÃ¥n DNS" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:79 msgid "Cannot parse service endpoint TXT records." msgstr "Kan inte tolka tjänsteslutpunkt-TXT-poster." #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:124 #, c-format msgid "Wrong service record field \"%s\" found in the \"%s\"" msgstr "Hittade fel tjänste-post-fält \"%s\" i \"%s\"" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:129 #, c-format msgid "Malformed ARCHERY record found (endpoint url is not defined): %s" msgstr "Hittade felaktig ARCHERY-post (slutpunkts-URL är inte definierad): %s" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:134 #, c-format msgid "Malformed ARCHERY record found (endpoint type is not defined): %s" msgstr "Hittade felaktig ARCHERY-post (slutpunktstyp är inte definierad): %s" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:138 #, c-format msgid "Found service endpoint %s (type %s)" msgstr "Hittade tjänsteslutpunkt %s (typ %s)" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:157 #, c-format msgid "" "Status for service endpoint \"%s\" is set to inactive in ARCHERY. Skipping." msgstr "" "Status för tjänsteslutpunkt \"%s\" är satt till inaktiv i ARCHERY. Hoppar " "över." #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:229 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:149 #, c-format msgid "Job %s has no delegation associated. Can't renew such job." msgstr "Jobb %s har ingen associerad delegering. Kan inte förnya sÃ¥dana jobb." #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:241 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:161 #, c-format msgid "Job %s failed to renew delegation %s." msgstr "Jobb %s misslyckades med att förnya delegering %s." #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:314 #, c-format msgid "Failed to process jobs - error response: %s" msgstr "Misslyckades med att behandla jobb - felsvar: %s" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:316 #, c-format msgid "Failed to process jobs - wrong response: %u" msgstr "Misslyckades med att behandla jobb - felaktigt svar: %u" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:318 #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:327 #, c-format msgid "Content: %s" msgstr "InnehÃ¥ll: %s" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:321 #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:333 #, c-format msgid "Failed to process job: %s" msgstr "Misslyckades med att behandla jobb: %s" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:331 msgid "Failed to process jobs - failed to parse response" msgstr "Misslyckades med att behandla jobb - misslyckades med att tolka svar" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:344 #, c-format msgid "No response returned: %s" msgstr "Inget svar returnerades: %s" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:368 #, c-format msgid "Failed to process job: %s - %s %s" msgstr "Misslyckades med att behandla jobb: %s - %s %s" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:455 #, c-format msgid "Failed retrieving job description for job: %s" msgstr "Misslyckades med att hämta jobbeskrivning för jobb: %s" #: src/hed/acc/ARCREST/JobListRetrieverPluginREST.cpp:29 msgid "Collecting Job (A-REX REST jobs) information." msgstr "Samlar in jobbinformation (A-REX REST-jobb)." #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:49 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:80 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:115 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:149 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:189 msgid "Failed to communicate to delegation endpoint." msgstr "Misslyckades med att kommunicera med delegeringstjänst." #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:54 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:85 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:120 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:154 #, c-format msgid "Unexpected response code from delegation endpoint - %u" msgstr "Oväntad svarskod frÃ¥n delegeringstjänst - %u" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:56 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:87 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:122 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:156 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:399 #: src/hed/dmc/gridftp/Lister.cpp:223 src/hed/dmc/gridftp/Lister.cpp:243 #: src/hed/dmc/gridftp/Lister.cpp:468 src/hed/dmc/gridftp/Lister.cpp:475 #: src/hed/dmc/gridftp/Lister.cpp:497 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:164 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:197 #, c-format msgid "Response: %s" msgstr "Svar: %s" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:64 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:136 #, c-format msgid "Unexpected delegation location from delegation endpoint - %s." msgstr "Oväntad delegeringsplats frÃ¥n delegeringstjänst - %s." #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:92 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:127 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:161 msgid "Missing response from delegation endpoint." msgstr "Saknat svar frÃ¥n delegeringstjänst." #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:193 #, c-format msgid "Unexpected response code from delegation endpoint: %u, %s." msgstr "Oväntad svarskod frÃ¥n delegeringstjänst: %u, %s." #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:235 #, c-format msgid "Failed to submit all jobs: %s %s" msgstr "Misslyckades med att sända in alla jobb: %s %s" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:249 msgid "Failed uploading local input files" msgstr "Misslyckades med att ladda upp lokala indatafiler" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:304 msgid "Failed to prepare job description" msgstr "Misslyckades med att förbereda jobbeskrivning" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:313 #, c-format msgid "Unable to submit job. Job description is not valid in the %s format: %s" msgstr "Kunde inte sända in jobb. Jobbeskrivning inte giltig i %s-formatet: %s" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:320 msgid "" "Can't submit multiple instances for multiple job descriptions. Not " "implemented yet." msgstr "" "Kan inte sända in mer än en instans för mer än en jobbeskrivning. Ännu ej " "implementerat." #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:331 msgid "Unable to submit jobs. Failed to delegate X.509 credentials." msgstr "" "Kunde inte sända in jobb. Misslyckades med att delegera X.509-referenser." #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:338 msgid "Unable to submit jobs. Failed to delegate token." msgstr "Kunde inte sända in jobb. Misslyckades med att delegera token." #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:348 msgid "Unable to submit job. Failed to assign delegation to job description." msgstr "" "Kunde inte sända in jobb. Misslyckades med att tilldela delegering till " "jobbeskrivning." #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:388 msgid "Failed to submit all jobs." msgstr "Misslyckades med att sända in alla jobb." #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:398 #, c-format msgid "Failed to submit all jobs: %u %s" msgstr "Misslyckades med att sända in alla jobb: %u %s" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:411 #, c-format msgid "Failed to submit all jobs: %s" msgstr "Misslyckades med att sända in alla jobb: %s" #: src/hed/acc/ARCREST/TargetInformationRetrieverPluginREST.cpp:27 msgid "Querying WSRF GLUE2 computing REST endpoint." msgstr "FrÃ¥gar WSRF-GLUE2-beräknings-REST-slutpunkt." #: src/hed/acc/ARCREST/TargetInformationRetrieverPluginREST.cpp:60 #, c-format msgid "CONTENT %u: %s" msgstr "INNEHÃ…LL %u: %s" #: src/hed/acc/ARCREST/TargetInformationRetrieverPluginREST.cpp:64 msgid "Response is not XML" msgstr "Svaret är inte XML" #: src/hed/acc/ARCREST/TargetInformationRetrieverPluginREST.cpp:69 #, c-format msgid "Parsed domains: %u" msgstr "Tolkade domäner: %u" #: src/hed/acc/Broker/DescriptorsBroker.cpp:14 msgid "Sorting according to free slots in queue" msgstr "Sorterar efter lediga slottar i kö" #: src/hed/acc/Broker/DescriptorsBroker.cpp:15 msgid "Random sorting" msgstr "Slumpvis sortering" #: src/hed/acc/Broker/DescriptorsBroker.cpp:16 msgid "Sorting according to specified benchmark (default \"specint2000\")" msgstr "Sorterar efter angivet benchmark (förval \"specint2000\")" #: src/hed/acc/Broker/DescriptorsBroker.cpp:17 msgid "Sorting according to input data availability at target" msgstr "Sorterar efter indatas tillgänglighet pÃ¥ target" #: src/hed/acc/Broker/DescriptorsBroker.cpp:18 msgid "Performs neither sorting nor matching" msgstr "Utför varken sortering eller matchning" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:24 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of waiting " "jobs" msgstr "" "Target %s borttaget av FastestQueueBroker, rapporterar inte antal väntande " "jobb" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:27 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of total slots" msgstr "" "Target %s borttaget av FastestQueueBroker, rapporterar inte totalt antal jobb" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:30 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of free slots" msgstr "" "Target %s borttaget av FastestQueueBroker, rapporterar inte antal lediga " "slottar" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:74 #, c-format msgid "[ADLParser] Unsupported EMI ES state %s." msgstr "[ADLParser] EMI-ES-tillstÃ¥nd stöds inte %s." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:94 #, c-format msgid "[ADLParser] Unsupported internal state %s." msgstr "[ADLParser] Internt tillstÃ¥nd %s stöds inte." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:104 #, c-format msgid "[ADLParser] Optional for %s elements are not supported yet." msgstr "[ADLParser] Utelämnande av %s-element stöds inte än." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:113 #, c-format msgid "[ADLParser] %s element must be boolean." msgstr "[ADLParser] %s-element mÃ¥ste vara boolesk." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:125 #, c-format msgid "[ADLParser] Code in FailIfExitCodeNotEqualTo in %s is not valid number." msgstr "" "[ADLParser] Kod i FailIfExitCodeNotEqualTo i %s är inte ett giltigt nummer." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:363 msgid "[ADLParser] Root element is not ActivityDescription " msgstr "[ADLParser] Rot-element är inte ActivityDescription " #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:410 msgid "[ADLParser] priority is too large - using max value 100" msgstr "[ADLParser] prioritet är för stor - använder maxvärdet 100" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:453 #, c-format msgid "[ADLParser] Unsupported URL %s for RemoteLogging." msgstr "[ADLParser] URL %s för RemoteLogging stöds inte." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:472 #, c-format msgid "[ADLParser] Wrong time %s in ExpirationTime." msgstr "[ADLParser] Felaktig tid %s i ExpirationTime." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:502 msgid "[ADLParser] AccessControl isn't valid XML." msgstr "[ADLParser] AccessControl är inte giltig XML." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:513 msgid "[ADLParser] CredentialService must contain valid URL." msgstr "[ADLParser] CredentialService mÃ¥ste innehÃ¥lle en giltig URL." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:542 #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:545 msgid "[ADLParser] Only email Prorocol for Notification is supported yet." msgstr "[ADLParser] Endast email-protokoll för avisering stöds än." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:603 msgid "[ADLParser] Missing or wrong value in ProcessesPerSlot." msgstr "[ADLParser] Saknat eller felaktigt värde i ProcessesPerSlot." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:608 msgid "[ADLParser] Missing or wrong value in ThreadsPerProcess." msgstr "[ADLParser] Felaktigt eller saknat värde i ThreadsPerProcess." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:614 msgid "" "[ADLParser] Missing Name element or value in ParallelEnvironment/Option " "element." msgstr "" "[ADLParser] Saknat Name-element eller -värde i ParallelEnvironment/Option-" "element." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:631 msgid "[ADLParser] NetworkInfo is not supported yet." msgstr "[ADLParser] NetworkInfo stöds inte än." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:645 #, c-format msgid "[ADLParser] NodeAccess value %s is not supported yet." msgstr "[ADLParser] NodeAccess-värde %s stöds inte än." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:653 msgid "[ADLParser] Missing or wrong value in NumberOfSlots." msgstr "[ADLParser] Saknat eller felaktigt värde i NumberOfSlots." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:660 msgid "" "[ADLParser] The NumberOfSlots element should be specified, when the value of " "useNumberOfSlots attribute of SlotsPerHost element is \"true\"." msgstr "" "[ADLParser] NumberOfSlots-elementet ska anges när värdet pÃ¥ useNumberOfSlots-" "attributet i SlotsPerHost-element är \"true\"." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:667 msgid "[ADLParser] Missing or wrong value in SlotsPerHost." msgstr "[ADLParser] Saknat eller felaktigt värde i SlotsPerHost." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:696 msgid "[ADLParser] Missing or wrong value in IndividualPhysicalMemory." msgstr "[ADLParser] Saknat eller felaktigt värde i IndividualPhysicalMemory." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:706 msgid "[ADLParser] Missing or wrong value in IndividualVirtualMemory." msgstr "[ADLParser] Saknat eller felaktigt värde i IndividualVirtualMemory." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:716 msgid "[ADLParser] Missing or wrong value in DiskSpaceRequirement." msgstr "[ADLParser] Saknat eller felaktigt värde i DiskSpaceRequirement." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:730 msgid "[ADLParser] Benchmark is not supported yet." msgstr "[ADLParser] Benchmark stöds inte än." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:738 msgid "[ADLParser] Missing or wrong value in IndividualCPUTime." msgstr "[ADLParser] Saknat eller felaktigt värde i IndividualCPUTime." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:746 msgid "[ADLParser] Missing or wrong value in TotalCPUTime." msgstr "[ADLParser] Saknat eller felaktigt värde i TotalCPUTime." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:755 msgid "[ADLParser] Missing or wrong value in WallTime." msgstr "[ADLParser] Saknat eller felaktigt värde i WallTime." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:775 msgid "[ADLParser] Missing or empty Name in InputFile." msgstr "[ADLParser] Saknat eller tomt Name i InputFile." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:786 #, c-format msgid "[ADLParser] Wrong URI specified in Source - %s." msgstr "[ADLParser] Felaktig URI angiven i Source - %s." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:808 msgid "[ADLParser] Missing or empty Name in OutputFile." msgstr "[ADLParser] Saknat eller tomt Name i OutputFile." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:814 #, c-format msgid "[ADLParser] Wrong URI specified in Target - %s." msgstr "[ADLParser] Felaktig URI angiven i target - %s." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:827 #, c-format msgid "Location URI for file %s is invalid" msgstr "Location URI för fil %s är ogiltig" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:852 #, c-format msgid "[ADLParser] CreationFlag value %s is not supported." msgstr "[ADLParser] CreationFlag-värde %s stöds inte." #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:42 msgid "Left operand for RSL concatenation does not evaluate to a literal" msgstr "Vänster operand för RSL-konkatenering utvärderas inte till en sträng" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:50 msgid "Right operand for RSL concatenation does not evaluate to a literal" msgstr "Höger operand för RSL-konkatenering utvärderas inte till en sträng" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:161 msgid "Multi-request operator only allowed at top level" msgstr "Flerjobbsoperator endast tillÃ¥ten pÃ¥ toppnivÃ¥n" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:186 msgid "RSL substitution is not a sequence" msgstr "RSL-substitution är inte en sekvens" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:192 msgid "RSL substitution sequence is not of length 2" msgstr "RSL-substitutions-sekvens har inte längden 2" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:211 msgid "RSL substitution variable name does not evaluate to a literal" msgstr "RSL-substitutionsvariabelnamn utvärderas inte till en sträng" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:220 msgid "RSL substitution variable value does not evaluate to a literal" msgstr "RSL-substitutionsvariabelvärde utvärderas inte till en sträng" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:313 msgid "End of comment not found" msgstr "Hittade inte slutet pÃ¥ kommentar" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:324 msgid "Junk at end of RSL" msgstr "Skräp i slutet pÃ¥ RSL" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:424 msgid "End of single quoted string not found" msgstr "Hittade inte slutet pÃ¥ sträng i enkla citattecken" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:441 msgid "End of double quoted string not found" msgstr "Hittade inte slutet pÃ¥ sträng i dubbla citattecken" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:460 #, c-format msgid "End of user delimiter (%s) quoted string not found" msgstr "Hittade inte slutet pÃ¥ sträng i användardefinierade citattecken (%s)" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:518 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:546 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:625 msgid "')' expected" msgstr "')' förväntades" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:528 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:609 msgid "'(' expected" msgstr "'(' förväntades" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:536 msgid "Variable name expected" msgstr "Variabelnamn förväntades" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:541 #, c-format msgid "Variable name (%s) contains invalid character (%s)" msgstr "Variabelnamn (%s) innehÃ¥ller ogiltigt tecken (%s)" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:557 msgid "Broken string" msgstr "Trasig sträng" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:570 msgid "No left operand for concatenation operator" msgstr "Ingen vänsteroperand för konkateneringsoperator" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:574 msgid "No right operand for concatenation operator" msgstr "Ingen högeroperand för konkateneringsoperator" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:638 msgid "Attribute name expected" msgstr "Attributnamn förväntades" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:643 #, c-format msgid "Attribute name (%s) contains invalid character (%s)" msgstr "Attributnamn (%s) innehÃ¥ller ogiltigt tecken (%s)" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:649 msgid "Relation operator expected" msgstr "Relationsoperator förväntades" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:86 msgid "Error parsing the internally set executables attribute." msgstr "Fel vid tolkning av det internt tilldelade executables-attributet." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:102 #, c-format msgid "" "File '%s' in the 'executables' attribute is not present in the 'inputfiles' " "attribute" msgstr "" "Filen '%s' i 'executables'-attributet finns inte i 'inputfiles'-attributet" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:120 msgid "The value of the ftpthreads attribute must be a number from 1 to 10" msgstr "Värdet pÃ¥ ftpthreads-attributet mÃ¥ste vara ett nummer frÃ¥n 1 till 10" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:177 msgid "'stdout' attribute must be specified when 'join' attribute is specified" msgstr "'stdout'-attributet mÃ¥ste anges när 'join'-attributet anges" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:181 msgid "" "Attribute 'join' cannot be specified when both 'stdout' and 'stderr' " "attributes is specified" msgstr "" "Attributet 'join' kan inte anges när bÃ¥de 'stdout'- och 'stderr'-attributen " "anges" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:200 msgid "Attributes 'gridtime' and 'cputime' cannot be specified together" msgstr "Attributen 'gridtime' och 'cputime' kan inte anges samtidigt" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:204 msgid "Attributes 'gridtime' and 'walltime' cannot be specified together" msgstr "Attributen 'gridtime' och 'walltime' kan inte anges samtidigt" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:226 msgid "" "When specifying 'countpernode' attribute, 'count' attribute must also be " "specified" msgstr "" "När 'countpernode'-attributet anges, mÃ¥ste 'count'-attributet ocksÃ¥ anges" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:229 msgid "Value of 'countpernode' attribute must be an integer" msgstr "Värdet pÃ¥ 'countpernode'-attributet mÃ¥ste vara ett heltal" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:287 msgid "No RSL content in job description found" msgstr "Hittade inget RSL-innehÃ¥ll i jobbeskrivning" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:295 msgid "'action' attribute not allowed in user-side job description" msgstr "'action'-attribut inte tillÃ¥tet i jobbeskrivning pÃ¥ användarsidan" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:304 #, c-format msgid "String successfully parsed as %s." msgstr "Sträng framgÃ¥ngsrikt tolkad som %s." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:313 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:331 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:349 #, c-format msgid "Attribute '%s' multiply defined" msgstr "Attribut '%s' definierat mer än en gÃ¥ng" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:317 #, c-format msgid "Value of attribute '%s' expected to be single value" msgstr "Värdet pÃ¥ attributet '%s' förväntas vara ett enstaka värde" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:322 #, c-format msgid "Value of attribute '%s' expected to be a string" msgstr "Värdet pÃ¥ attributet '%s' förväntas vara en sträng" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:338 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:368 #, c-format msgid "Value of attribute '%s' is not a string" msgstr "Värdet pÃ¥ attributet '%s' är inte en sträng" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:356 #, c-format msgid "Value of attribute '%s' is not sequence" msgstr "Värdet pÃ¥ attributet '%s' är inte en sekvens" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:360 #, c-format msgid "" "Value of attribute '%s' has wrong sequence length: Expected %d, found %d" msgstr "" "Värdet pÃ¥ attributet '%s' har fel sekvenslängd: förväntad %d, hittad %d" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:492 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1375 msgid "Unexpected RSL type" msgstr "Oväntad RSL-typ" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:557 msgid "At least two values are needed for the 'inputfiles' attribute" msgstr "Minst tvÃ¥ värden behövs för 'inputfiles'-attributet" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:562 msgid "First value of 'inputfiles' attribute (filename) cannot be empty" msgstr "Första värdet i 'inputfiles'-attributet (filnamn) kan inte vara tomt" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:587 #, c-format msgid "Invalid URL '%s' for input file '%s'" msgstr "Ogiltig URL '%s' för indatafil '%s'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:596 #, c-format msgid "Invalid URL option syntax in option '%s' for input file '%s'" msgstr "Ogiltig URL-alternativ-syntax i alternativ '%s' för indatafil '%s'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:606 #, c-format msgid "Invalid URL: '%s' in input file '%s'" msgstr "Ogiltig URL: '%s' i indatafil '%s'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:677 msgid "At least two values are needed for the 'outputfiles' attribute" msgstr "Minst tvÃ¥ värden behövs för 'outputfiles'-attributet" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:682 msgid "First value of 'outputfiles' attribute (filename) cannot be empty" msgstr "Första värdet i 'inputfiles'-attributet (filnamn) kan inte vara tomt" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:694 #, c-format msgid "Invalid URL '%s' for output file '%s'" msgstr "Ogiltig URL '%s' för utdatafil '%s'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:704 #, c-format msgid "Invalid URL option syntax in option '%s' for output file '%s'" msgstr "Ogiltig URL-alternativ-syntax i alternativ '%s' för utdatafil '%s'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:714 #, c-format msgid "Invalid URL: '%s' in output file '%s'" msgstr "Ogiltig URL: '%s' i utdatafil '%s'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:746 #, c-format msgid "" "Invalid comparison operator '%s' used at 'delegationid' attribute, only \"=" "\" is allowed." msgstr "" "Ogiltig jämförelseoperator '%s' använd i 'delegationid'-attributet, endast " "\"=\" är tillÃ¥ten." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:764 #, c-format msgid "" "Invalid comparison operator '%s' used at 'queue' attribute in 'GRIDMANAGER' " "dialect, only \"=\" is allowed" msgstr "" "Ogiltig jämförelseoperator '%s' använd i 'queue'-attributet i 'GRIDMANAGER'-" "dialekt, endast \"=\" är tillÃ¥tet" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:770 #, c-format msgid "" "Invalid comparison operator '%s' used at 'queue' attribute, only \"!=\" or " "\"=\" are allowed." msgstr "" "Ogiltig jämförelseoperator '%s' använd i 'queue'-attributet, endast \"!=\" " "eller \"=\" är tillÃ¥tna." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:927 #, c-format msgid "Value of attribute '%s' expected not to be empty" msgstr "Värdet av attributet '%s' förväntades inte vara tomt" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1036 msgid "The value of the acl XRSL attribute isn't valid XML." msgstr "Värdet pÃ¥ XRSL-attributet acl är inte giltig XML." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1050 msgid "The cluster XRSL attribute is currently unsupported." msgstr "XRSL-attributet cluster stöds för närvarande inte." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1066 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it must contain an email " "address" msgstr "" "Syntaxfel i 'notify'-attributvärde ('%s'), det mÃ¥ste innehÃ¥lla en e-" "postadress" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1074 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it must only contain email " "addresses after state flag(s)" msgstr "" "Syntaxfel i 'notify'-attributvärde ('%s'), det fÃ¥r endast innehÃ¥lla e-" "postadresser efter tillstÃ¥ndsflagg(a/or)" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1077 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it contains unknown state " "flags" msgstr "" "Syntaxfel i 'notify'-attributvärde ('%s'), det innehÃ¥ller okända " "tillstÃ¥ndsflaggor" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1125 msgid "priority is too large - using max value 100" msgstr "prioritet är för stor - använder maxvärde 100" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1158 #, c-format msgid "Invalid nodeaccess value: %s" msgstr "Ogiltigt nodeaccess-värde: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1201 msgid "Value of 'count' attribute must be an integer" msgstr "Värdet pÃ¥ 'count'-attributet mÃ¥ste vara ett heltal" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1231 msgid "Value of 'exclusiveexecution' attribute must either be 'yes' or 'no'" msgstr "" "Värdet pÃ¥ 'exclusiveexecution'-attributet mÃ¥ste vara endera 'yes' eller 'no'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1277 #, c-format msgid "Invalid action value %s" msgstr "Ogiltigt action-värde %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1367 #, c-format msgid "The specified Globus attribute (%s) is not supported. %s ignored." msgstr "Det angivna Globus-attributet (%s) stöds inte. %s ignoreras." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1371 #, c-format msgid "Unknown XRSL attribute: %s - Ignoring it." msgstr "Okänt XRSL-attribut: %s - ignorerar det." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1385 #, c-format msgid "Wrong language requested: %s" msgstr "Felaktigt sprÃ¥k begärt: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1722 msgid "" "Cannot output XRSL representation: The Resources.SlotRequirement." "NumberOfSlots attribute must be specified when the Resources.SlotRequirement." "SlotsPerHost attribute is specified." msgstr "" "Kan inte skapa XRSL-representation: Resources.SlotRequirement.NumberOfSlots-" "attributet mÃ¥ste anges när Resources.SlotRequirement.SlotsPerHost-attributet " "anges." #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:65 #: src/services/wrappers/python/pythonwrapper.cpp:92 msgid "Failed to initialize main Python thread" msgstr "Misslyckades med att initiera Pythons huvudtrÃ¥d" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:71 #: src/services/wrappers/python/pythonwrapper.cpp:97 msgid "Main Python thread was not initialized" msgstr "Pythons huvudtrÃ¥d initierades inte" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:81 #, c-format msgid "Loading Python broker (%i)" msgstr "Laddar in Python-mäklare (%i)" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:104 #: src/services/wrappers/python/pythonwrapper.cpp:134 msgid "Main Python thread is not initialized" msgstr "Pythons huvudtrÃ¥d är inte initierad" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:108 msgid "PythonBroker init" msgstr "Python-mäklare init" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:116 msgid "" "Invalid class name. The broker argument for the PythonBroker should be\n" " Filename.Class.args (args is optional), for example SampleBroker." "MyBroker" msgstr "" "Ogiltigt klassnamn. broker-argumentet för Python-mäklaren ska vara\n" " Filnamn.Klass.args (args är valfritt), till exempel SampleBroker." "MyBroker" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:122 #, c-format msgid "Class name: %s" msgstr "Klassnamn: %s" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:123 #, c-format msgid "Module name: %s" msgstr "Modulnamn: %s" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:132 #: src/services/wrappers/python/pythonwrapper.cpp:178 msgid "Cannot convert ARC module name to Python string" msgstr "Kan inte konvertera ARC-modulnamn till pythonsträng" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:140 #: src/services/wrappers/python/pythonwrapper.cpp:186 msgid "Cannot import ARC module" msgstr "Kan inte importera ARC-modulen" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:149 #: src/services/wrappers/python/pythonwrapper.cpp:196 #: src/services/wrappers/python/pythonwrapper.cpp:429 msgid "Cannot get dictionary of ARC module" msgstr "Kan inte hämta ordlista för ARC-modulen" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:158 msgid "Cannot find ARC UserConfig class" msgstr "Kan inte hitta ARCs UserConfig-klass" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:166 msgid "UserConfig class is not an object" msgstr "UserConfig-klass är inte ett objekt" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:173 msgid "Cannot find ARC JobDescription class" msgstr "Kan inte hitta ARCs JobDescription-klass" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:181 msgid "JobDescription class is not an object" msgstr "JobDescription-klass är inte ett objekt" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:188 msgid "Cannot find ARC ExecutionTarget class" msgstr "Kan inte hitta ARCs ExecutionTarget-klass" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:196 msgid "ExecutionTarget class is not an object" msgstr "ExecutionTarget-klass är inte ett objekt" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:207 #: src/services/wrappers/python/pythonwrapper.cpp:157 msgid "Cannot convert module name to Python string" msgstr "Kan inte konvertera modulnamn till pythonsträng" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:215 #: src/services/wrappers/python/pythonwrapper.cpp:164 msgid "Cannot import module" msgstr "Kan inte importera modul" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:224 msgid "Cannot get dictionary of custom broker module" msgstr "Kan inte hämta ordlista för mäklarmodul" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:233 msgid "Cannot find custom broker class" msgstr "Kan inte hitta mäklarklass" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:241 #, c-format msgid "%s class is not an object" msgstr "%s-klass är inte ett objekt" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:247 msgid "Cannot create UserConfig argument" msgstr "Kan inte skapa UserConfig-argument" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:255 msgid "Cannot convert UserConfig to Python object" msgstr "Kan inte konvertera UserConfig till pythonobjekt" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:263 #: src/services/wrappers/python/pythonwrapper.cpp:253 msgid "Cannot create argument of the constructor" msgstr "Kan inte skapa argument till konstruktorn" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:272 #: src/services/wrappers/python/pythonwrapper.cpp:261 msgid "Cannot create instance of Python class" msgstr "Kan inte skapa instans av pythonklass" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:278 #, c-format msgid "Python broker constructor called (%d)" msgstr "Python-mäklarens konstruktor anropad (%d)" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:302 #, c-format msgid "Python broker destructor called (%d)" msgstr "Python-mäklarens destruktor anropad (%d)" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:311 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:328 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:361 msgid "Cannot create ExecutionTarget argument" msgstr "Kan inte skapa ExecutionTarget-argument" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:319 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:336 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:369 #, c-format msgid "Cannot convert ExecutionTarget (%s) to python object" msgstr "Kan inte konvertera ExecutionTarget (%s) till pythonobjekt" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:393 msgid "Cannot create JobDescription argument" msgstr "Kan inte skapa JobDescription-argument" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:401 msgid "Cannot convert JobDescription to python object" msgstr "Kan inte konvertera JobDescription till pythonobjekt" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:422 msgid "Do sorting using user created python broker" msgstr "Sortera med användarskapad python-mäklare" #: src/hed/daemon/unix/daemon.cpp:84 #, c-format msgid "Daemonization fork failed: %s" msgstr "Demonisering av fork misslyckades: %s" #: src/hed/daemon/unix/daemon.cpp:95 msgid "Watchdog (re)starting application" msgstr "Vakthund (Ã¥ter)startar programmet" #: src/hed/daemon/unix/daemon.cpp:100 #, c-format msgid "Watchdog fork failed: %s" msgstr "Vakthunds-fork misslyckades: %s" #: src/hed/daemon/unix/daemon.cpp:110 msgid "Watchdog starting monitoring" msgstr "Vakthund startar monitorering" #: src/hed/daemon/unix/daemon.cpp:136 #, c-format msgid "Watchdog detected application exit due to signal %u" msgstr "Vakthund upptäckte att programmet avslutades pÃ¥ grund av signal %u" #: src/hed/daemon/unix/daemon.cpp:138 #, c-format msgid "Watchdog detected application exited with code %u" msgstr "Vakthund upptäckte att programmet avslutades med kod %u" #: src/hed/daemon/unix/daemon.cpp:140 msgid "Watchdog detected application exit" msgstr "Vakthund upptäckte att programmet avslutades" #: src/hed/daemon/unix/daemon.cpp:149 msgid "" "Watchdog exiting because application was purposely killed or exited itself" msgstr "" "Vakthund avslutades eftersom programmet avbröts avsiktligt eller avslutade " "sig självt" #: src/hed/daemon/unix/daemon.cpp:156 msgid "Watchdog detected application timeout or error - killing process" msgstr "Vakthund upptäckte program-timeout eller -fel - avbryter process" #: src/hed/daemon/unix/daemon.cpp:167 msgid "Watchdog failed to wait till application exited - sending KILL" msgstr "" "Vakthund misslyckades med att vänta pÃ¥ programmets avslutande - sänder KILL" #: src/hed/daemon/unix/daemon.cpp:179 msgid "Watchdog failed to kill application - giving up and exiting" msgstr "" "Vakthund misslyckades med att avbryta programmet - ger upp och avslutar" #: src/hed/daemon/unix/daemon.cpp:200 msgid "Shutdown daemon" msgstr "Stänger av demon" #: src/hed/daemon/unix/main_unix.cpp:47 msgid "shutdown" msgstr "avstängning" #: src/hed/daemon/unix/main_unix.cpp:50 msgid "exit" msgstr "avslut" #: src/hed/daemon/unix/main_unix.cpp:88 msgid "No server config part of config file" msgstr "Ingen serverinställningsdel i inställningsfilen" #: src/hed/daemon/unix/main_unix.cpp:163 #, c-format msgid "Unknown log level %s" msgstr "Okänd logg-nivÃ¥ %s" #: src/hed/daemon/unix/main_unix.cpp:173 #, c-format msgid "Failed to open log file: %s" msgstr "Misslyckades med att öppna loggfil: %s" #: src/hed/daemon/unix/main_unix.cpp:205 msgid "Start foreground" msgstr "Startar i förgrunden" #: src/hed/daemon/unix/main_unix.cpp:254 #, c-format msgid "XML config file %s does not exist" msgstr "XML-inställningsfil %s existerar inte" #: src/hed/daemon/unix/main_unix.cpp:258 src/hed/daemon/unix/main_unix.cpp:273 #, c-format msgid "Failed to load service configuration from file %s" msgstr "Misslyckades med att ladda in tjänsteinställningar frÃ¥n fil %s" #: src/hed/daemon/unix/main_unix.cpp:264 #, c-format msgid "INI config file %s does not exist" msgstr "INI-inställningsfil %s existerar inte" #: src/hed/daemon/unix/main_unix.cpp:269 src/hed/daemon/unix/main_unix.cpp:291 msgid "Error evaluating profile" msgstr "Fel vid utvärdering av profil" #: src/hed/daemon/unix/main_unix.cpp:285 msgid "Error loading generated configuration" msgstr "Fel vid inladdning av genererade inställningar" #: src/hed/daemon/unix/main_unix.cpp:296 msgid "Failed to load service configuration from any default config file" msgstr "" "Misslyckades med att ladda in tjänsteinställningar frÃ¥n nÃ¥gon förvald " "inställningsfil" #: src/hed/daemon/unix/main_unix.cpp:357 msgid "Schema validation error" msgstr "Schemavalideringsfel" #: src/hed/daemon/unix/main_unix.cpp:372 msgid "Configuration root element is not " msgstr "Inställningarnas rotelement är inte " #: src/hed/daemon/unix/main_unix.cpp:388 #, c-format msgid "Cannot switch to group (%s)" msgstr "Kan inte byta till grupp (%s)" #: src/hed/daemon/unix/main_unix.cpp:398 #, c-format msgid "Cannot switch to primary group for user (%s)" msgstr "Kan inte byta till primär grupp för användare (%s)" #: src/hed/daemon/unix/main_unix.cpp:403 #, c-format msgid "Cannot switch to user (%s)" msgstr "Kan inte byta till användare (%s)" #: src/hed/daemon/unix/main_unix.cpp:421 msgid "Failed to load service side MCCs" msgstr "Misslyckades med att ladda in tjänstesidans MCCer" #: src/hed/daemon/unix/main_unix.cpp:423 src/tests/count/test_service.cpp:29 #: src/tests/echo/test.cpp:30 src/tests/echo/test_service.cpp:29 msgid "Service side MCCs are loaded" msgstr "Tjänstesidans MCCer har laddats in" #: src/hed/daemon/unix/main_unix.cpp:430 msgid "Unexpected arguments supplied" msgstr "Oväntat argument tillhandahÃ¥llet" #: src/hed/dmc/file/DataPointFile.cpp:87 #, c-format msgid "Unknown channel %s for stdio protocol" msgstr "Okänd kanal %s för stdio-protokoll" #: src/hed/dmc/file/DataPointFile.cpp:94 #, c-format msgid "Failed to open stdio channel %s" msgstr "Misslyckades med att öppna stdio-kanal %s" #: src/hed/dmc/file/DataPointFile.cpp:95 #, c-format msgid "Failed to open stdio channel %d" msgstr "Misslyckades med att öppna stdio-kanal %d" #: src/hed/dmc/file/DataPointFile.cpp:335 #, c-format msgid "fsync of file %s failed: %s" msgstr "fsync för fil %s misslyckades: %s" #: src/hed/dmc/file/DataPointFile.cpp:340 #: src/hed/dmc/file/DataPointFile.cpp:348 #, c-format msgid "closing file %s failed: %s" msgstr "stängning av fil %s misslyckades: %s" #: src/hed/dmc/file/DataPointFile.cpp:367 #, c-format msgid "File is not accessible: %s" msgstr "Filen kan inte kommas Ã¥t: %s" #: src/hed/dmc/file/DataPointFile.cpp:373 #: src/hed/dmc/file/DataPointFile.cpp:458 #, c-format msgid "Can't stat file: %s: %s" msgstr "Kan inte göra stat pÃ¥ filen: %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:419 #: src/hed/dmc/file/DataPointFile.cpp:425 #, c-format msgid "Can't stat stdio channel %s" msgstr "Kan inte göra stat pÃ¥ stdio-kanal %s" #: src/hed/dmc/file/DataPointFile.cpp:473 #, c-format msgid "%s is not a directory" msgstr "%s är inte en katalog" #: src/hed/dmc/file/DataPointFile.cpp:488 #, c-format msgid "Failed to read object %s: %s" msgstr "Misslyckades med att läsa objekt: %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:501 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:534 #, c-format msgid "File is not accessible %s: %s" msgstr "Filen kan inte kommas Ã¥t %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:507 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:540 #, c-format msgid "Can't delete directory %s: %s" msgstr "Kan inte ta bort katalog: %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:514 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:547 #, c-format msgid "Can't delete file %s: %s" msgstr "Kan inte ta bort fil %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:524 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:335 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:315 #: src/hed/dmc/http/DataPointHTTP.cpp:1658 #: src/hed/dmc/http/DataPointHTTP.cpp:1676 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1466 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:562 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:582 #, c-format msgid "Creating directory %s" msgstr "Skapar katalog %s" #: src/hed/dmc/file/DataPointFile.cpp:532 src/hed/dmc/srm/DataPointSRM.cpp:168 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:596 #, c-format msgid "Renaming %s to %s" msgstr "Byter namn pÃ¥ %s till %s" #: src/hed/dmc/file/DataPointFile.cpp:534 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:605 #, c-format msgid "Can't rename file %s: %s" msgstr "Kan inte byta namn pÃ¥ fil %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:562 #, c-format msgid "Failed to open %s for reading: %s" msgstr "Misslyckades med att öppna %s för läsning: %s" #: src/hed/dmc/file/DataPointFile.cpp:577 #: src/hed/dmc/file/DataPointFile.cpp:712 #, c-format msgid "Failed to switch user id to %d/%d" msgstr "Misslyckades med att byta användar-id till %d/%d" #: src/hed/dmc/file/DataPointFile.cpp:583 #, c-format msgid "Failed to create/open file %s: %s" msgstr "Misslyckades med att skapa/öppna fil %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:599 msgid "Failed to create thread" msgstr "Misslyckades med att skapa trÃ¥d" #: src/hed/dmc/file/DataPointFile.cpp:679 #, c-format msgid "Invalid url: %s" msgstr "Ogiltig URL: %s" #: src/hed/dmc/file/DataPointFile.cpp:688 src/hed/libs/data/FileCache.cpp:480 #, c-format msgid "Failed to create directory %s: %s" msgstr "Misslyckades med att skapa katalog %s: %s\"" #: src/hed/dmc/file/DataPointFile.cpp:701 #: src/hed/dmc/file/DataPointFile.cpp:720 #, c-format msgid "Failed to create file %s: %s" msgstr "Misslyckades med att skapa fil %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:732 #, c-format msgid "setting file %s to size %llu" msgstr "Sätter fil %s till storlek %llu" #: src/hed/dmc/file/DataPointFile.cpp:755 #, c-format msgid "Failed to preallocate space for %s" msgstr "Misslyckades med förallokera utrymme for %s" #: src/hed/dmc/file/DataPointFile.cpp:794 src/hed/libs/data/FileCache.cpp:854 #, c-format msgid "Failed to clean up file %s: %s" msgstr "Misslyckades med att ta bort fil %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:808 #, c-format msgid "Error during file validation. Can't stat file %s: %s" msgstr "Fel under filvalidering. Kan inte göra stat pÃ¥ fil %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:812 #, c-format msgid "" "Error during file validation: Local file size %llu does not match source " "file size %llu for file %s" msgstr "" "Fel under filvalidering. Lokal filstorlek %llu stämmer inte överens med " "källans filstorlek %llu för fil %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:53 #, c-format msgid "Using proxy %s" msgstr "Använder proxy %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:54 #, c-format msgid "Using key %s" msgstr "Använder nyckel %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:55 #, c-format msgid "Using cert %s" msgstr "Använder certifikat %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:113 msgid "Locations are missing in destination LFC URL" msgstr "Platser saknas i destinations-LFC-URL" #: src/hed/dmc/gfal/DataPointGFAL.cpp:119 #, c-format msgid "Duplicate replica found in LFC: %s" msgstr "Duplicerad replika hittad i LFC: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:121 #, c-format msgid "Adding location: %s - %s" msgstr "Lägger till plats: %s - %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:129 #: src/hed/libs/data/DataPointIndex.cpp:161 #, c-format msgid "Add location: url: %s" msgstr "Lägg till plats: url: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:130 #: src/hed/libs/data/DataPointIndex.cpp:162 #, c-format msgid "Add location: metadata: %s" msgstr "Lägg till plats: metadata: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:150 #: src/hed/dmc/gfal/DataPointGFAL.cpp:310 #, c-format msgid "gfal_open failed: %s" msgstr "gfal_open misslyckades: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:163 #: src/hed/dmc/gfal/DataPointGFAL.cpp:223 #: src/hed/dmc/gfal/DataPointGFAL.cpp:249 #: src/hed/dmc/gfal/DataPointGFAL.cpp:324 #: src/hed/dmc/gfal/DataPointGFAL.cpp:403 #: src/hed/dmc/gfal/DataPointGFAL.cpp:430 #, c-format msgid "gfal_close failed: %s" msgstr "gfal_close misslyckades: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:195 #, c-format msgid "gfal_read failed: %s" msgstr "gfal_read misslyckades; %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:237 msgid "StopReading starts waiting for transfer_condition." msgstr "StopReading börjar vänta pÃ¥ transfer_condition." #: src/hed/dmc/gfal/DataPointGFAL.cpp:239 msgid "StopReading finished waiting for transfer_condition." msgstr "StopReading slutar vänta pÃ¥ transfer_condition." #: src/hed/dmc/gfal/DataPointGFAL.cpp:271 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:68 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:73 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:44 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:49 #, c-format msgid "No locations defined for %s" msgstr "Inga platser definierade för %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:278 #, c-format msgid "Failed to set LFC replicas: %s" msgstr "Misslyckades med att sätta in LFC-replika: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:304 #, c-format msgid "gfal_mkdir failed (%s), trying to write anyway" msgstr "gfal_mkdir misslyckades (%s), försöker skriva ändÃ¥" #: src/hed/dmc/gfal/DataPointGFAL.cpp:359 #, c-format msgid "DataPointGFAL::write_file got position %d and offset %d, has to seek" msgstr "" "DataPointGFAL::write_file fick position %d och offset %d, mÃ¥ste göra seek" #: src/hed/dmc/gfal/DataPointGFAL.cpp:388 #, c-format msgid "gfal_write failed: %s" msgstr "gfal_write misslyckades: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:418 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:405 msgid "StopWriting starts waiting for transfer_condition." msgstr "StopWriting börjar vänta pÃ¥ transfer_condition." #: src/hed/dmc/gfal/DataPointGFAL.cpp:420 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:407 msgid "StopWriting finished waiting for transfer_condition." msgstr "StopWriting slutar vänta pÃ¥ transfer_condition." #: src/hed/dmc/gfal/DataPointGFAL.cpp:451 #, c-format msgid "gfal_stat failed: %s" msgstr "gfal_stat misslyckades: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:496 #, c-format msgid "gfal_listxattr failed, no replica information can be obtained: %s" msgstr "" "gfal_listxattr misslyckades, ingen replika-information kan erhÃ¥llas: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:537 #, c-format msgid "gfal_opendir failed: %s" msgstr "gfal_opendir misslyckades: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:549 #, c-format msgid "List will stat the URL %s" msgstr "List kommer att göra stat pÃ¥ URL %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:556 #, c-format msgid "gfal_closedir failed: %s" msgstr "gfal_closedir misslyckades: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:584 #, c-format msgid "gfal_rmdir failed: %s" msgstr "gfal_rmdir misslyckades: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:587 #, c-format msgid "gfal_unlink failed: %s" msgstr "gfal_unlink misslyckades: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:604 #, c-format msgid "gfal_mkdir failed: %s" msgstr "gfal_mkdir misslyckades: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:619 #, c-format msgid "gfal_rename failed: %s" msgstr "gfal_rename misslyckades: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:19 #, c-format msgid "Failed to obtain bytes transferred: %s" msgstr "Misslyckades med att erhÃ¥lla antal överförda byte: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:42 #, c-format msgid "Failed to get initiate GFAL2 parameter handle: %s" msgstr "Misslyckades med att initiera GFAL2-parameter-handtag: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:49 #, c-format msgid "Failed to get initiate new GFAL2 context: %s" msgstr "Misslyckades med att initiera ny GFAL2-kontext: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:56 #, c-format msgid "Failed to set GFAL2 monitor callback: %s" msgstr "Misslyckades med att sätta GFAL2-monitor-callback: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:64 #, c-format msgid "Failed to set overwrite option in GFAL2: %s" msgstr "Misslyckades med att sätta skriv-över-option i GFAL2: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:72 #, c-format msgid "Failed to set GFAL2 transfer timeout, will use default: %s" msgstr "" "Misslyckades med att sätta GFAL2-överförings-timeout, använder förval: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:84 msgid "Transfer failed" msgstr "Överföring misslyckades" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:92 msgid "Transfer succeeded" msgstr "Överföring lyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:38 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:56 msgid "ftp_complete_callback: success" msgstr "ftp_complete_callback: OK" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:44 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:62 #, c-format msgid "ftp_complete_callback: error: %s" msgstr "ftp_complete_callback: fel: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:60 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:78 msgid "ftp_check_callback" msgstr "ftp_check_callback" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:62 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:90 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:116 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:135 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:305 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:340 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:678 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:847 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:879 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:913 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1052 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1104 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1114 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1122 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1130 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1138 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1144 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:80 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:108 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:285 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:321 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:731 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:764 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:801 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:932 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:996 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1006 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1014 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1022 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1030 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1036 #, c-format msgid "Globus error: %s" msgstr "Globusfel: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:73 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:91 msgid "Excessive data received while checking file access" msgstr "För mycket data mottaget när filÃ¥tkomst kontrollerades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:89 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:107 msgid "Registration of Globus FTP buffer failed - cancel check" msgstr "Registrering av Globus-FTP-buffer misslyckades - avbryter kontroll" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:115 msgid "check_ftp: globus_ftp_client_size failed" msgstr "check_ftp: globus_ftp_client_size misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:119 msgid "check_ftp: timeout waiting for size" msgstr "check_ftp: timeout vid väntan pÃ¥ storlek" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:124 msgid "check_ftp: failed to get file's size" msgstr "check_ftp: misslyckades med att erhÃ¥lla filens storlek" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:127 #, c-format msgid "check_ftp: obtained size: %lli" msgstr "check_ftp: erhÃ¥llen storlek: %lli" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:134 msgid "check_ftp: globus_ftp_client_modification_time failed" msgstr "check_ftp: globus_ftp_client_modification_time misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:138 msgid "check_ftp: timeout waiting for modification_time" msgstr "check_ftp: timeout vid väntan pÃ¥ ändringstid" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:143 msgid "check_ftp: failed to get file's modification time" msgstr "check_ftp: misslyckades med att erhÃ¥lla filens ändringstid" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:148 #, c-format msgid "check_ftp: obtained modification date: %s" msgstr "check_ftp: erhÃ¥llen ändringstid: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:167 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:147 msgid "check_ftp: globus_ftp_client_get failed" msgstr "check_ftp: globus_ftp_client_get misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:174 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:154 msgid "check_ftp: globus_ftp_client_register_read" msgstr "check_ftp: globus_ftp_client_register_read" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:185 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:166 msgid "check_ftp: timeout waiting for partial get" msgstr "check_ftp: timeout vid väntan pÃ¥ partiell get" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:215 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:193 #, c-format msgid "File delete failed, attempting directory delete for %s" msgstr "" "Borttagning av fil misslyckades, försöker med borttagande av katalog för %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:225 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:204 msgid "delete_ftp: globus_ftp_client_delete failed" msgstr "delete_ftp: globus_ftp_client_delete misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:231 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:252 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:210 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:232 msgid "delete_ftp: timeout waiting for delete" msgstr "delete_ftp: timeout vid väntan pÃ¥ borttagande" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:246 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:226 msgid "delete_ftp: globus_ftp_client_rmdir failed" msgstr "delete_ftp: globus_ftp_client_rmdir misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:301 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:280 #, c-format msgid "mkdir_ftp: making %s" msgstr "mkdir_ftp: skapar %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:309 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:289 msgid "mkdir_ftp: timeout waiting for mkdir" msgstr "mkdir_ftp: timeout vid väntan pÃ¥ mkdir" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:344 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:325 msgid "Timeout waiting for mkdir" msgstr "Timeout vid väntan pÃ¥ mkdir" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:370 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:348 msgid "start_reading_ftp" msgstr "start_reading_ftp" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:374 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:352 msgid "start_reading_ftp: globus_ftp_client_get" msgstr "start_reading_ftp: globus_ftp_client_get" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:388 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:366 msgid "start_reading_ftp: globus_ftp_client_get failed" msgstr "start_reading_ftp: globus_ftp_client_get misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:399 msgid "start_reading_ftp: globus_thread_create failed" msgstr "start_reading_ftp: globus_thread_create misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:418 msgid "stop_reading_ftp: aborting connection" msgstr "stop_reading_ftp: avbryter förbindelse" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:425 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:647 #, c-format msgid "Failed to abort transfer of ftp file: %s" msgstr "Misslyckades med att avbryta överföring av ftp-fil: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:426 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:648 msgid "Assuming transfer is already aborted or failed." msgstr "Antar att överföring redan är avbruten eller misslyckad." #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:433 msgid "stop_reading_ftp: waiting for transfer to finish" msgstr "stop_reading_ftp: väntar pÃ¥ att överföring ska avslutas" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:435 #, c-format msgid "stop_reading_ftp: exiting: %s" msgstr "stop_reading_ftp: avslutar: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:449 msgid "ftp_read_thread: get and register buffers" msgstr "ftp_read_thread: erhÃ¥ll och registrera buffrar" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:455 #, c-format msgid "ftp_read_thread: for_read failed - aborting: %s" msgstr "ftp_read_thread: for_read misslyckades - avbryter: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:465 #, c-format msgid "ftp_read_thread: data callback failed - aborting: %s" msgstr "ftp_read_thread: data-callback misslyckades - avbryter: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:477 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:382 #, c-format msgid "ftp_read_thread: Globus error: %s" msgstr "ftp_read_thread: Globusfel: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:490 #, c-format msgid "ftp_read_thread: too many registration failures - abort: %s" msgstr "ftp_read_thread: för mÃ¥nga registreringsfel - avbryter: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:495 #, c-format msgid "ftp_read_thread: failed to register Globus buffer - will try later: %s" msgstr "" "ftp_read_thread: misslyckades med att registrera globusbuffer - kommer att " "prova senare: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:508 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:398 msgid "ftp_read_thread: waiting for eof" msgstr "ftp_read_thread: väntar pÃ¥ filslut" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:512 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:402 msgid "ftp_read_thread: waiting for buffers released" msgstr "ftp_read_thread: väntar pÃ¥ att buffrar ska frigöras" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:516 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:410 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:664 msgid "ftp_read_thread: failed to release buffers - leaking" msgstr "ftp_read_thread: misslyckades med att frigöra buffrar - läcker" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:521 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:417 msgid "ftp_read_thread: exiting" msgstr "ftp_read_thread: avslutar" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:539 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:438 #, c-format msgid "ftp_read_callback: failure: %s" msgstr "ftp_read_callback: misslyckande: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:542 msgid "ftp_read_callback: success" msgstr "ftp_read_callback: OK" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:558 msgid "Failed to get ftp file" msgstr "Misslyckades med att hämta ftp-fil" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:560 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:819 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:519 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:708 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:129 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:169 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1214 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1248 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1430 #: src/hed/libs/common/Thread.cpp:240 src/hed/libs/common/Thread.cpp:243 #: src/hed/libs/credential/Credential.cpp:1076 #: src/hed/libs/data/DataPointDelegate.cpp:628 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:66 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:82 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:98 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:117 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:127 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:135 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:144 #: src/hed/mcc/tls/PayloadTLSMCC.cpp:69 src/hed/shc/arcpdp/ArcPDP.cpp:234 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:305 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:258 #: src/libs/data-staging/Scheduler.cpp:117 #: src/services/a-rex/delegation/DelegationStore.cpp:36 #: src/services/a-rex/delegation/DelegationStore.cpp:41 #: src/services/a-rex/delegation/DelegationStore.cpp:46 #: src/services/a-rex/delegation/DelegationStore.cpp:75 #: src/services/a-rex/delegation/DelegationStore.cpp:81 #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:233 #: src/services/a-rex/grid-manager/inputcheck.cpp:33 #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:408 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:395 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:435 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:487 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:602 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:636 #, c-format msgid "%s" msgstr "%s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:594 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:545 msgid "start_writing_ftp: mkdir" msgstr "start_writing_ftp: mkdir" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:597 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:547 msgid "start_writing_ftp: mkdir failed - still trying to write" msgstr "start_writing_ftp: mkdir misslyckades - försöker fortfarande skriva" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:599 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:549 msgid "start_writing_ftp: put" msgstr "start_writing_ftp: put" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:613 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:563 msgid "start_writing_ftp: put failed" msgstr "start_writing_ftp: put misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:623 msgid "start_writing_ftp: globus_thread_create failed" msgstr "start_writing_ftp: globus_thread_create misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:640 #: src/hed/libs/data/DataPointDelegate.cpp:307 msgid "StopWriting: aborting connection" msgstr "StopWriting: avbryter förbindelse" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:664 #: src/hed/dmc/http/DataPointHTTP.cpp:982 #: src/hed/libs/data/DataPointDelegate.cpp:321 #, c-format msgid "StopWriting: Calculated checksum %s" msgstr "StopWriting: Beräkna checksumma %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:668 #: src/hed/dmc/http/DataPointHTTP.cpp:986 #: src/hed/libs/data/DataPointDelegate.cpp:325 #, c-format msgid "StopWriting: looking for checksum of %s" msgstr "StopWriting: letar efter för checksumma för %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:677 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:912 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:800 msgid "list_files_ftp: globus_ftp_client_cksm failed" msgstr "list_files_ftp: globus_ftp_client_cksm misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:681 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:916 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:804 msgid "list_files_ftp: timeout waiting for cksum" msgstr "list_files_ftp: timeout vid väntan pÃ¥ cksum" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:688 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:923 msgid "list_files_ftp: no checksum information possible" msgstr "list_files_ftp: information om checksumma inte möjlig" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:691 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:926 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:817 #, c-format msgid "list_files_ftp: checksum %s" msgstr "list_files_ftp: checksumma %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:694 #: src/hed/dmc/http/DataPointHTTP.cpp:995 #: src/hed/libs/data/DataPointDelegate.cpp:332 msgid "" "Checksum type returned by server is different to requested type, cannot " "compare" msgstr "" "Typ av checksumma som returnerades av servern skiljer sig frÃ¥n den begärda " "typen, kan inte jämföra" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:696 #: src/hed/dmc/http/DataPointHTTP.cpp:997 #: src/hed/libs/data/DataPointDelegate.cpp:334 #, c-format msgid "Calculated checksum %s matches checksum reported by server" msgstr "" "Beräknad checksumma %s stämmer överens med checksumma rapporterad av servern" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:699 #: src/hed/dmc/http/DataPointHTTP.cpp:999 #: src/hed/libs/data/DataPointDelegate.cpp:337 #, c-format msgid "" "Checksum mismatch between calculated checksum %s and checksum reported by " "server %s" msgstr "" "Beräknad checksumma %s stämmer inte överens med checksumma rapporterad av " "servern %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:721 msgid "ftp_write_thread: get and register buffers" msgstr "ftp_write_thread: erhÃ¥ll och registrera buffrar" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:725 msgid "ftp_write_thread: for_write failed - aborting" msgstr "ftp_write_thread: for_write misslyckades - avbryter" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:743 msgid "ftp_write_thread: data callback failed - aborting" msgstr "ftp_write_thread: data-callback misslyckades - avbryter" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:759 msgid "ftp_write_thread: waiting for eof" msgstr "ftp_write_thread: väntar pÃ¥ filslut" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:763 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:662 msgid "ftp_write_thread: waiting for buffers released" msgstr "ftp_read_thread: väntar pÃ¥ att buffrar ska frigöras" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:771 msgid "ftp_write_thread: failed to release buffers - leaking" msgstr "ftp_write_thread: misslyckades med att frigöra buffrar - läcker" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:776 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:669 msgid "ftp_write_thread: exiting" msgstr "ftp_write_thread: avslutar" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:799 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:688 #, c-format msgid "ftp_write_callback: failure: %s" msgstr "ftp_write_callback: misslyckande: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:802 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:690 #, c-format msgid "ftp_write_callback: success %s" msgstr "ftp_write_callback: OK %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:817 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:706 msgid "Failed to store ftp file" msgstr "Misslyckades med att spara ftp-fil" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:825 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:711 msgid "ftp_put_complete_callback: success" msgstr "ftp_put_complete_callback: OK" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:841 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:725 #, c-format msgid "list_files_ftp: looking for size of %s" msgstr "list_files_ftp: söker efter storlek pÃ¥ %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:845 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:729 msgid "list_files_ftp: globus_ftp_client_size failed" msgstr "list_files_ftp: globus_ftp_client_size misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:851 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:852 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:735 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:736 msgid "list_files_ftp: timeout waiting for size" msgstr "list_files_ftp: timeout vid väntan pÃ¥ storlek" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:858 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:742 msgid "list_files_ftp: failed to get file's size" msgstr "list_files_ftp: misslyckades med att erhÃ¥lla filens storlek" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:870 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:755 #, c-format msgid "list_files_ftp: looking for modification time of %s" msgstr "list_files_ftp: söker efter ändringstid för %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:876 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:761 msgid "list_files_ftp: globus_ftp_client_modification_time failed" msgstr "list_files_ftp: globus_ftp_client_modification_time misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:883 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:768 msgid "list_files_ftp: timeout waiting for modification_time" msgstr "list_files_ftp: timeout vid väntan pÃ¥ ändringstid" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:891 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:776 msgid "list_files_ftp: failed to get file's modification time" msgstr "list_files_ftp: misslyckades med att erhÃ¥lla filens ändringstid" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:903 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:790 #, c-format msgid "list_files_ftp: looking for checksum of %s" msgstr "list_files_ftp: söker efter checksumma pÃ¥ %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:942 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:830 #, c-format msgid "Failed to obtain stat from FTP: %s" msgstr "Misslyckades med att erhÃ¥lla stat frÃ¥n FTP: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:948 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:835 msgid "No results returned from stat" msgstr "Inga resultat returnerade frÃ¥n stat" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:954 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:841 #, c-format msgid "Wrong number of objects (%i) for stat from ftp: %s" msgstr "Fel antal objekt (%i) för stat frÃ¥n ftp: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:968 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:854 #, c-format msgid "Unexpected path %s returned from server" msgstr "Oväntad sökväg %s returnerad frÃ¥n server" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1007 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:887 #, c-format msgid "Failed to obtain listing from FTP: %s" msgstr "Misslyckades med att erhÃ¥lla listning frÃ¥n FTP: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1050 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:930 msgid "Rename: globus_ftp_client_move failed" msgstr "Rename: globus_ftp_client_move misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1056 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:936 msgid "Rename: timeout waiting for operation to complete" msgstr "Rename: timeout vid väntan pÃ¥ att operationen ska slutföras" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1103 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:995 msgid "init_handle: globus_ftp_client_handleattr_init failed" msgstr "init_handle: globus_ftp_client_handleattr_init misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1112 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1004 msgid "init_handle: globus_ftp_client_handleattr_set_gridftp2 failed" msgstr "init_handle: globus_ftp_client_handleattr_set_gridftp2 misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1121 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1013 msgid "init_handle: globus_ftp_client_handle_init failed" msgstr "init_handle: globus_ftp_client_handle_init misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1128 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1020 msgid "init_handle: globus_ftp_client_operationattr_init failed" msgstr "init_handle: globus_ftp_client_operationattr_init misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1136 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1028 msgid "init_handle: globus_ftp_client_operationattr_set_allow_ipv6 failed" msgstr "" "init_handle: globus_ftp_client_operationattr_set_allow_ipv6 misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1142 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1034 msgid "init_handle: globus_ftp_client_operationattr_set_delayed_pasv failed" msgstr "" "init_handle: globus_ftp_client_operationattr_set_delayed_pasv misslyckades" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1190 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1218 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1086 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1115 #, c-format msgid "globus_ftp_client_operationattr_set_authorization: error: %s" msgstr "globus_ftp_client_operationattr_set_authorization: fel: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1217 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1114 msgid "Failed to set credentials for GridFTP transfer" msgstr "Misslyckades med att sätta referenser för GridFTP-överföring" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1223 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1120 msgid "Using secure data transfer" msgstr "Använder säker dataöverföring" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1228 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1125 msgid "Using insecure data transfer" msgstr "Använder osäker dataöverföring" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1255 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1152 msgid "~DataPoint: destroy ftp_handle" msgstr "~DataPoint: förstör ftp_handle" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1258 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1155 msgid "~DataPoint: destroy ftp_handle failed - retrying" msgstr "~DataPoint: förstör ftp_handle misslyckades - försöker igen" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1276 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1173 msgid "~DataPoint: failed to destroy ftp_handle - leaking" msgstr "~DataPoint: misslyckades med att förstöra ftp_handle - läcker" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1296 msgid "" "Missing reference to factory and/or module. It is unsafe to use Globus in " "non-persistent mode - (Grid)FTP code is disabled. Report to developers." msgstr "" "Saknar referens till fabrik och/eller modul. Det är osäkert att använda " "Globus i icke-persistent mode - (Grid)FTP-koden är deaktiverad. Rapportera " "till utvecklare." #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:390 msgid "ftp_read_thread: failed to register buffers" msgstr "ftp_read_thread: misslyckades med att registrera buffrar" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:405 msgid "ftp_read_thread: failed to release buffers" msgstr "ftp_read_thread: misslyckades med att frigöra buffrar" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:443 #, c-format msgid "ftp_read_callback: success - offset=%u, length=%u, eof=%u, allow oof=%u" msgstr "" "ftp_read_callback: lyckades - offset=%u, längd=%u, eof=%u, tillÃ¥t oof=%u" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:457 #, c-format msgid "ftp_read_callback: delayed data chunk: %llu %llu" msgstr "ftp_read_callback: fördröjd data-chunk: %llu %llu" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:464 #, c-format msgid "ftp_read_callback: unexpected data out of order: %llu != %llu" msgstr "ftp_read_callback: oväntad data i oordning: %llu != %llu" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:471 msgid "ftp_read_callback: too many unexpected out of order chunks" msgstr "ftp_read_callback: för mÃ¥nga oväntade chunks i oordning" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:492 #, c-format msgid "ftp_read_callback: Globus error: %s" msgstr "ftp_read_callback: Globusfel: %s" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:517 msgid "ftp_get_complete_callback: Failed to get ftp file" msgstr "ftp_get_complete_callback: Misslyckades med att hämta ftp-fil" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:522 msgid "ftp_get_complete_callback: success" msgstr "ftp_get_complete_callback: OK" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:577 msgid "start_writing_ftp: waiting for data tag" msgstr "start_writing_ftp: väntar pÃ¥ data-tagg" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:580 msgid "start_writing_ftp: failed to read data tag" msgstr "start_writing_ftp: misslyckades med att läsa data-tagg" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:585 msgid "start_writing_ftp: waiting for data chunk" msgstr "start_writing_ftp: väntar pÃ¥ data-chunk" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:587 msgid "start_writing_ftp: failed to read data chunk" msgstr "start_writing_ftp: misslyckades med att läsa data-chunk" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:598 #, c-format msgid "ftp_write_thread: data out of order in stream mode: %llu != %llu" msgstr "ftp_write_thread: data i oordning i strömningsläge: %llu != %llu" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:605 msgid "ftp_write_thread: too many out of order chunks in stream mode" msgstr "ftp_write_thread: för mÃ¥nga chunks i oordning i strömningsläge" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:610 #, c-format msgid "start_writing_ftp: data chunk: %llu %llu" msgstr "start_writing_ftp: data-chunk: %llu %llu" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:616 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:642 #, c-format msgid "ftp_write_thread: Globus error: %s" msgstr "ftp_write_thread: Globusfel: %s" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:635 #, c-format msgid "start_writing_ftp: delayed data chunk: %llu %llu" msgstr "start_writing_ftp: fördröjd data-chunk: %llu %llu" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:654 msgid "start_writing_ftp: waiting for some buffers sent" msgstr "start_writing_ftp: väntar pÃ¥ nÃ¥gra buffrar som skickats" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:660 msgid "ftp_write_thread: waiting for transfer complete" msgstr "ftp_write_thread: väntar pÃ¥ slutförd överföring" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:811 msgid "list_files_ftp: no checksum information supported" msgstr "list_files_ftp: information om checksumma stöds inte" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:813 msgid "list_files_ftp: no checksum information returned" msgstr "list_files_ftp: information om checksumma returnerades inte" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:908 msgid "Too many failures to obtain checksum - giving up" msgstr "För mÃ¥nga fel för att erhÃ¥lla checksumma - ger up" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1268 msgid "Expecting Command and URL provided" msgstr "Saknar kommando och URL" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1275 #: src/hed/libs/data/DataExternalHelper.cpp:376 msgid "Expecting Command among arguments" msgstr "Saknar kommando bland argumenten" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1279 #: src/hed/libs/data/DataExternalHelper.cpp:380 msgid "Expecting URL among arguments" msgstr "Saknar URL bland argumenten" #: src/hed/dmc/gridftp/Lister.cpp:221 src/hed/dmc/gridftp/Lister.cpp:289 #: src/hed/dmc/gridftp/Lister.cpp:384 src/hed/dmc/gridftp/Lister.cpp:767 #: src/hed/dmc/gridftp/Lister.cpp:812 #, c-format msgid "Failure: %s" msgstr "Misslyckande: %s" #: src/hed/dmc/gridftp/Lister.cpp:288 msgid "Error getting list of files (in list)" msgstr "Fel vid erhÃ¥llande av fillista (i list)" #: src/hed/dmc/gridftp/Lister.cpp:290 msgid "Assuming - file not found" msgstr "Antar - hittade inte filen" #: src/hed/dmc/gridftp/Lister.cpp:307 #, c-format msgid "list record: %s" msgstr "listpost: %s" #: src/hed/dmc/gridftp/Lister.cpp:362 msgid "Failed reading list of files" msgstr "Misslyckades med att läsa fillista" #: src/hed/dmc/gridftp/Lister.cpp:398 msgid "Failed reading data" msgstr "Misslyckades med att läsa data" #: src/hed/dmc/gridftp/Lister.cpp:426 #, c-format msgid "Command: %s" msgstr "Kommando: %s" #: src/hed/dmc/gridftp/Lister.cpp:430 src/hed/dmc/gridftp/Lister.cpp:471 #: src/hed/mcc/http/PayloadHTTP.cpp:1010 msgid "Memory allocation error" msgstr "Minnesallokeringsfel" #: src/hed/dmc/gridftp/Lister.cpp:438 #, c-format msgid "%s failed" msgstr "%s misslyckades" #: src/hed/dmc/gridftp/Lister.cpp:442 msgid "Command is being sent" msgstr "Kommande sänds" #: src/hed/dmc/gridftp/Lister.cpp:447 msgid "Waiting for response" msgstr "Väntar pÃ¥ svar" #: src/hed/dmc/gridftp/Lister.cpp:452 msgid "Callback got failure" msgstr "Callback erhöll misslyckande" #: src/hed/dmc/gridftp/Lister.cpp:538 msgid "Failed in globus_cond_init" msgstr "Misslyckades i globus_cond_init" #: src/hed/dmc/gridftp/Lister.cpp:542 msgid "Failed in globus_mutex_init" msgstr "Misslyckades i globus_mutex_init" #: src/hed/dmc/gridftp/Lister.cpp:549 msgid "Failed allocating memory for handle" msgstr "Misslyckades med att allokera minne för handtag" #: src/hed/dmc/gridftp/Lister.cpp:554 msgid "Failed in globus_ftp_control_handle_init" msgstr "Misslyckades i globus_ftp_control_handle_init" #: src/hed/dmc/gridftp/Lister.cpp:562 msgid "Failed to enable IPv6" msgstr "Misslyckades med att aktivera IPv6" #: src/hed/dmc/gridftp/Lister.cpp:573 msgid "Closing connection" msgstr "Stänger förbindelse" #: src/hed/dmc/gridftp/Lister.cpp:580 src/hed/dmc/gridftp/Lister.cpp:595 msgid "Timeout waiting for Globus callback - leaking connection" msgstr "Timeout vid väntan pÃ¥ Globus callback - läcker förbindelse" #: src/hed/dmc/gridftp/Lister.cpp:605 msgid "Closed successfully" msgstr "Stängdes framgÃ¥ngsrikt" #: src/hed/dmc/gridftp/Lister.cpp:607 msgid "Closing may have failed" msgstr "Stängning kan ha misslyckats" #: src/hed/dmc/gridftp/Lister.cpp:634 msgid "Waiting for globus handle to settle" msgstr "Väntar pÃ¥ att globus-handtag ska lugna ned sig" #: src/hed/dmc/gridftp/Lister.cpp:639 #, c-format msgid "Handle is not in proper state %u/%u" msgstr "Handtag är i felaktigt tillstÃ¥nd %u/%u" #: src/hed/dmc/gridftp/Lister.cpp:645 msgid "Globus handle is stuck" msgstr "Globus-handtag har fastnat" #: src/hed/dmc/gridftp/Lister.cpp:661 #, c-format msgid "Failed destroying handle: %s. Can't handle such situation." msgstr "" "Misslyckades med att förstöra handtag: %s. Kan inte hantera en sÃ¥dan " "situation." #: src/hed/dmc/gridftp/Lister.cpp:684 #, c-format msgid "EPSV failed: %s" msgstr "EPSV misslyckades: %s" #: src/hed/dmc/gridftp/Lister.cpp:688 msgid "EPSV failed" msgstr "EPSV misslyckades" #: src/hed/dmc/gridftp/Lister.cpp:695 #, c-format msgid "PASV failed: %s" msgstr "PASV misslyckades: %s" #: src/hed/dmc/gridftp/Lister.cpp:699 msgid "PASV failed" msgstr "PASV misslyckades" #: src/hed/dmc/gridftp/Lister.cpp:765 msgid "Failed to apply local address to data connection" msgstr "Misslyckades med att tillämpa lokal adress pÃ¥ dataförbindelse" #: src/hed/dmc/gridftp/Lister.cpp:783 msgid "Can't parse host and/or port in response to EPSV/PASV" msgstr "Kan inte tolka värd och/eller port i EPSV/PASV-svar" #: src/hed/dmc/gridftp/Lister.cpp:788 #, c-format msgid "Data channel: %d.%d.%d.%d:%d" msgstr "Datakanal: %d.%d.%d.%d:%d" #: src/hed/dmc/gridftp/Lister.cpp:806 #, c-format msgid "Data channel: [%s]:%d" msgstr "Datakanal: [%s]:%d" #: src/hed/dmc/gridftp/Lister.cpp:810 msgid "Obtained host and address are not acceptable" msgstr "ErhÃ¥llen värd och adress kan inte accepteras" #: src/hed/dmc/gridftp/Lister.cpp:820 msgid "Failed to open data channel" msgstr "Misslyckades med att öppna datakanal" #: src/hed/dmc/gridftp/Lister.cpp:838 #, c-format msgid "Unsupported protocol in url %s" msgstr "Protokoll i url stöds inte %s" #: src/hed/dmc/gridftp/Lister.cpp:850 msgid "Reusing connection" msgstr "Ã…teranvänder förbindelse" #: src/hed/dmc/gridftp/Lister.cpp:874 #, c-format msgid "Failed connecting to server %s:%d" msgstr "Misslyckades med att ansluta till %s:%d" #: src/hed/dmc/gridftp/Lister.cpp:880 #, c-format msgid "Failed to connect to server %s:%d" msgstr "Misslyckades med att ansluta till %s:%d" #: src/hed/dmc/gridftp/Lister.cpp:896 msgid "Missing authentication information" msgstr "Saknad autentiseringsinformation" #: src/hed/dmc/gridftp/Lister.cpp:905 src/hed/dmc/gridftp/Lister.cpp:919 #, c-format msgid "Bad authentication information: %s" msgstr "Felaktig autentiseringsinformation: %s" #: src/hed/dmc/gridftp/Lister.cpp:928 src/hed/dmc/gridftp/Lister.cpp:943 #, c-format msgid "Failed authenticating: %s" msgstr "Misslyckades med autentisering: %s" #: src/hed/dmc/gridftp/Lister.cpp:935 msgid "Failed authenticating" msgstr "Misslyckades med autentisering" #: src/hed/dmc/gridftp/Lister.cpp:970 src/hed/dmc/gridftp/Lister.cpp:1126 #, c-format msgid "DCAU failed: %s" msgstr "DCAU misslyckades: %s" #: src/hed/dmc/gridftp/Lister.cpp:974 src/hed/dmc/gridftp/Lister.cpp:1131 msgid "DCAU failed" msgstr "DCAU misslyckades" #: src/hed/dmc/gridftp/Lister.cpp:994 msgid "MLST is not supported - trying LIST" msgstr "MLST stöds inte - försöker med LIST" #: src/hed/dmc/gridftp/Lister.cpp:1010 #, c-format msgid "Immediate completion expected: %s" msgstr "Omedelbart slutförande förväntas: %s" #: src/hed/dmc/gridftp/Lister.cpp:1014 msgid "Immediate completion expected" msgstr "Omedelbart slutförande förväntas" #: src/hed/dmc/gridftp/Lister.cpp:1027 #, c-format msgid "Missing information in reply: %s" msgstr "Saknad information i svar: %s" #: src/hed/dmc/gridftp/Lister.cpp:1061 #, c-format msgid "Missing final reply: %s" msgstr "Saknat sista svar: %s" #: src/hed/dmc/gridftp/Lister.cpp:1085 #, c-format msgid "Unexpected immediate completion: %s" msgstr "Oväntat omedelbart slutförande: %s" #: src/hed/dmc/gridftp/Lister.cpp:1097 #, c-format msgid "LIST/MLST failed: %s" msgstr "LIST/MLST misslyckades: %s" #: src/hed/dmc/gridftp/Lister.cpp:1102 msgid "LIST/MLST failed" msgstr "LIST/MLST misslyckades" #: src/hed/dmc/gridftp/Lister.cpp:1152 msgid "MLSD is not supported - trying NLST" msgstr "MSLD stöds inte - försöker med NLST" #: src/hed/dmc/gridftp/Lister.cpp:1166 #, c-format msgid "Immediate completion: %s" msgstr "Omedelbart färdigställande: %s" #: src/hed/dmc/gridftp/Lister.cpp:1174 #, c-format msgid "NLST/MLSD failed: %s" msgstr "NLST/MLSD misslyckades: %s" #: src/hed/dmc/gridftp/Lister.cpp:1180 msgid "NLST/MLSD failed" msgstr "NLST/MLSD misslyckades" #: src/hed/dmc/gridftp/Lister.cpp:1201 #, c-format msgid "Data transfer aborted: %s" msgstr "Dataöverföring avbruten: %s" #: src/hed/dmc/gridftp/Lister.cpp:1206 msgid "Data transfer aborted" msgstr "Dataöverföring avbruten" #: src/hed/dmc/gridftp/Lister.cpp:1218 msgid "Failed to transfer data" msgstr "Misslyckades med att överföra data" #: src/hed/dmc/http/DataPointHTTP.cpp:409 #: src/hed/dmc/http/DataPointHTTP.cpp:597 #: src/hed/dmc/http/DataPointHTTP.cpp:691 #: src/hed/dmc/http/DataPointHTTP.cpp:1137 #: src/hed/dmc/http/DataPointHTTP.cpp:1282 #: src/hed/dmc/http/DataPointHTTP.cpp:1431 #, c-format msgid "Redirecting to %s" msgstr "Omdirigerar till %s" #: src/hed/dmc/http/DataPointHTTP.cpp:461 #, c-format msgid "PROPFIND response: %s" msgstr "PROPFIND-svar: %s" #: src/hed/dmc/http/DataPointHTTP.cpp:515 #, c-format msgid "Using checksum %s" msgstr "Använder checksumma %s" #: src/hed/dmc/http/DataPointHTTP.cpp:523 #, c-format msgid "No matching checksum type, using first in list %s" msgstr "Ingen matchande typ av checksumma, använder första i listan %s" #: src/hed/dmc/http/DataPointHTTP.cpp:616 #: src/hed/dmc/http/DataPointHTTP.cpp:710 msgid "No information returned by PROPFIND" msgstr "Ingen information returnerad av PROPFIND" #: src/hed/dmc/http/DataPointHTTP.cpp:767 #, c-format msgid "Stat: obtained size %llu" msgstr "Stat: erhÃ¥llen storlek %llu" #: src/hed/dmc/http/DataPointHTTP.cpp:771 #, c-format msgid "Stat: obtained modification time %s" msgstr "Stat: erhÃ¥llen ändringstid %s" #: src/hed/dmc/http/DataPointHTTP.cpp:775 #, c-format msgid "Stat: obtained checksum %s" msgstr "Stat: erhÃ¥llen checksumma %s" #: src/hed/dmc/http/DataPointHTTP.cpp:991 #, c-format msgid "Could not find checksum: %s" msgstr "Kunde inte hitta checksumma: %s" #: src/hed/dmc/http/DataPointHTTP.cpp:993 #, c-format msgid "Checksum of %s is not available" msgstr "Checksumma för %s är inte tillgänglig" #: src/hed/dmc/http/DataPointHTTP.cpp:1037 #, c-format msgid "Check: obtained size %llu" msgstr "Check: erhÃ¥llen storlek %llu" #: src/hed/dmc/http/DataPointHTTP.cpp:1039 #, c-format msgid "Check: obtained modification time %s" msgstr "Check: erhÃ¥llen ändringstid %s" #: src/hed/dmc/http/DataPointHTTP.cpp:1154 #: src/hed/dmc/http/DataPointHTTP.cpp:1302 #, c-format msgid "HTTP failure %u - %s" msgstr "HTTP-fel %u - %s" #: src/hed/dmc/http/DataPointHTTP.cpp:1459 #, c-format msgid "Failed to create %s, trying to create parent directories" msgstr "Misslyckades med att skapa %s, försöker skapa föräldrakataloger" #: src/hed/dmc/http/DataPointHTTP.cpp:1648 #, c-format msgid "Error creating directory: %s" msgstr "Fel vid skapande av katalog: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:27 #, c-format msgid "Replacing existing token for %s in Rucio token cache" msgstr "Byter ut existerande token för %s i Rucios token-cache" #: src/hed/dmc/rucio/DataPointRucio.cpp:40 #, c-format msgid "Found existing token for %s in Rucio token cache with expiry time %s" msgstr "" "Hittade existerande token för %s i Rucios token-cache vars giltighetstid gÃ¥r " "ut %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:43 #, c-format msgid "Rucio token for %s has expired or is about to expire" msgstr "Rucios token för %s har gÃ¥tt ut eller är pÃ¥ väg att gÃ¥ ut" #: src/hed/dmc/rucio/DataPointRucio.cpp:105 #, c-format msgid "Extracted nickname %s from credentials to use for RUCIO_ACCOUNT" msgstr "Extraherade smeknamn %s frÃ¥n referenser att använda som Rucio-konto" #: src/hed/dmc/rucio/DataPointRucio.cpp:108 msgid "Failed to extract VOMS nickname from proxy" msgstr "Misslyckades med att extrahera VOMS-smeknamn frÃ¥n proxy" #: src/hed/dmc/rucio/DataPointRucio.cpp:110 #, c-format msgid "Using Rucio account %s" msgstr "Använder Rucio-konto %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:123 #, c-format msgid "Strange path in Rucio URL: %s" msgstr "Underlig sökväg i Rucio-URL: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:133 src/hed/libs/common/FileLock.cpp:42 msgid "Cannot determine hostname from gethostname()" msgstr "Kan inte bestämma värdnamn frÃ¥n gethostname()" #: src/hed/dmc/rucio/DataPointRucio.cpp:171 #, c-format msgid "Bad path for %s: Format should be /replicas//" msgstr "Felaktig sökväg för %s: Format ska vara /replicas//" #: src/hed/dmc/rucio/DataPointRucio.cpp:190 #, c-format msgid "Failed to query parent DIDs: %s" msgstr "Misslyckades med att frÃ¥ga föräldra-DIDer: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:195 #, c-format msgid "Failed to parse Rucio info: %s" msgstr "Misslyckades med att tolka Rucio-information: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:242 #: src/hed/dmc/rucio/DataPointRucio.cpp:522 #, c-format msgid "No locations found for %s" msgstr "Inga platser hittade för %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:333 #, c-format msgid "Acquired auth token for %s: %s" msgstr "Erhöll autentiserings-token för %s: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:383 #, c-format msgid "Rucio returned %s" msgstr "Rucio returnerade: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:452 #: src/hed/dmc/rucio/DataPointRucio.cpp:543 #, c-format msgid "Failed to parse Rucio response: %s" msgstr "Misslyckades med att tolka Rucio-svar: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:457 #: src/hed/dmc/rucio/DataPointRucio.cpp:548 #, c-format msgid "Filename not returned in Rucio response: %s" msgstr "Filnamn returnerades inte i Rucio-svar: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:462 #, c-format msgid "Unexpected name returned in Rucio response: %s" msgstr "Oväntat namn returnerat i Rucio-svar: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:467 #, c-format msgid "No pfns returned in Rucio response: %s" msgstr "Inga fysiska filnamn (PFN) returnerade i Rucio-svar: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:477 #, c-format msgid "Cannot determine replica type for %s" msgstr "Kan inte bestämma replika-typ för %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:479 #, c-format msgid "%s: replica type %s" msgstr "%s: replika-typ %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:482 #, c-format msgid "Skipping %s replica %s" msgstr "Hoppar över %s replika %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:498 #, c-format msgid "Error extracting RSE for %s" msgstr "Fel vid extrahering av RSE för %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:508 #, c-format msgid "No filesize information returned in Rucio response for %s" msgstr "Ingen information om filstorlek returnerad i Rucio-svar för %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:511 #, c-format msgid "%s: size %llu" msgstr "%s: storlek %llu" #: src/hed/dmc/rucio/DataPointRucio.cpp:515 #, c-format msgid "No checksum information returned in Rucio response for %s" msgstr "Ingen information om checksumma returnerad i Rucio-svar för %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:518 #, c-format msgid "%s: checksum %s" msgstr "%s: checksumma %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:553 #, c-format msgid "Parent dataset: %s" msgstr "Föräldra-dataset: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:575 #, c-format msgid "Could not find matching RSE to %s" msgstr "Kunde inte hitta matchande RSE till %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:617 #, c-format msgid "Sending Rucio trace: %s" msgstr "Sänder Rucio-spÃ¥r: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:620 #, c-format msgid "Failed to send traces to Rucio: %s" msgstr "Misslyckades med att sända spÃ¥r till Rucio: %s" #: src/hed/dmc/s3/DataPointS3.cpp:269 #, c-format msgid "Initializing S3 connection to %s" msgstr "Initierar S3-förbindelse till %s" #: src/hed/dmc/s3/DataPointS3.cpp:274 #, c-format msgid "Failed to initialize S3 to %s: %s" msgstr "Misslyckades med att initiera S3 till %s: %s" #: src/hed/dmc/s3/DataPointS3.cpp:470 src/hed/dmc/s3/DataPointS3.cpp:592 #, c-format msgid "Failed to read object %s: %s; %s" msgstr "Misslyckades med att läsa objekt: %s: %s; %s" #: src/hed/dmc/s3/DataPointS3.cpp:669 #, c-format msgid "Failed to write object %s: %s; %s" msgstr "Misslyckades med att skriva objekt: %s: %s; %s" #: src/hed/dmc/srm/DataPointSRM.cpp:56 #, c-format msgid "TURL %s cannot be handled" msgstr "TURL %s kan inte hanteras" #: src/hed/dmc/srm/DataPointSRM.cpp:83 #, c-format msgid "Check: looking for metadata: %s" msgstr "Check: letar efter metadata: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:94 #, c-format msgid "Check: obtained size: %lli" msgstr "Check: erhÃ¥llen storlek: %lli" #: src/hed/dmc/srm/DataPointSRM.cpp:100 #, c-format msgid "Check: obtained checksum: %s" msgstr "Check: erhÃ¥llen checksumma: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:104 #, c-format msgid "Check: obtained modification date: %s" msgstr "Check: erhÃ¥llen ändringstid: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:108 msgid "Check: obtained access latency: low (ONLINE)" msgstr "Check: erhÃ¥llen Ã¥tkomst-latency: lÃ¥g (ONLINE)" #: src/hed/dmc/srm/DataPointSRM.cpp:112 msgid "Check: obtained access latency: high (NEARLINE)" msgstr "Check: erhÃ¥llen Ã¥tkomst-latency: hög (NEARLINE)" #: src/hed/dmc/srm/DataPointSRM.cpp:131 #, c-format msgid "Remove: deleting: %s" msgstr "Remove: tar bort: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:149 #, c-format msgid "Creating directory: %s" msgstr "Skapar katalog: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:197 src/hed/dmc/srm/DataPointSRM.cpp:246 msgid "Calling PrepareReading when request was already prepared!" msgstr "Anropar PrepareReading när begäran redan förberetts!" #: src/hed/dmc/srm/DataPointSRM.cpp:217 #, c-format msgid "File %s is NEARLINE, will make request to bring online" msgstr "Filen %s är NEARLINE, kommer att göra begäran att bringa online" #: src/hed/dmc/srm/DataPointSRM.cpp:226 #, c-format msgid "Bring online request %s is still in queue, should wait" msgstr "Begäran att bringa online %s är fortfarande i kö, ska vänta" #: src/hed/dmc/srm/DataPointSRM.cpp:231 #, c-format msgid "Bring online request %s finished successfully, file is now ONLINE" msgstr "" "Begäran att bringa online %s avslutades framgÃ¥ngsrikt, filen är nu ONLINE" #: src/hed/dmc/srm/DataPointSRM.cpp:237 #, c-format msgid "" "Bad logic for %s - bringOnline returned ok but SRM request is not finished " "successfully or on going" msgstr "" "DÃ¥lig logik för %s - bringOnline returnerade OK men SRM-begäran har inte " "avslutats framgÃ¥ngsrikt eller pÃ¥gÃ¥r" #: src/hed/dmc/srm/DataPointSRM.cpp:265 src/hed/dmc/srm/DataPointSRM.cpp:408 msgid "None of the requested transfer protocols are supported" msgstr "Inget av de begärda överföringsprotokollen stöds" #: src/hed/dmc/srm/DataPointSRM.cpp:278 #, c-format msgid "Get request %s is still in queue, should wait %i seconds" msgstr "Begäran att hämta %s är fortfarande i kö, ska vänta %i sekunder" #: src/hed/dmc/srm/DataPointSRM.cpp:286 src/hed/dmc/srm/DataPointSRM.cpp:465 #, c-format msgid "Checking URL returned by SRM: %s" msgstr "Kontrollerar URS returnerad av SRM: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:301 src/hed/dmc/srm/DataPointSRM.cpp:480 #, c-format msgid "SRM returned no useful Transfer URLs: %s" msgstr "SRM returnerade inga användbara överförings-URLer: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:308 #, c-format msgid "" "Bad logic for %s - getTURLs returned ok but SRM request is not finished " "successfully or on going" msgstr "" "DÃ¥lig logik för %s - getTURLs returnerade OK men SRM-begäran har inte " "avslutats framgÃ¥ngsrikt eller pÃ¥gÃ¥r" #: src/hed/dmc/srm/DataPointSRM.cpp:316 msgid "StartReading" msgstr "StartReading" #: src/hed/dmc/srm/DataPointSRM.cpp:318 msgid "StartReading: File was not prepared properly" msgstr "StartReading: Fil förbereddes inte pÃ¥ rätt sätt" #: src/hed/dmc/srm/DataPointSRM.cpp:328 src/hed/dmc/srm/DataPointSRM.cpp:507 #, c-format msgid "Redirecting to new URL: %s" msgstr "Omdirigerar till ny URL: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:389 msgid "Calling PrepareWriting when request was already prepared!" msgstr "Anropar PrepareWriting när begäran redan förberetts" #: src/hed/dmc/srm/DataPointSRM.cpp:418 msgid "No space token specified" msgstr "Inget spacetoken angivet" #: src/hed/dmc/srm/DataPointSRM.cpp:424 msgid "Warning: Using SRM protocol v1 which does not support space tokens" msgstr "Varning: Använder SRM-protokoll v1 som inte stöder spacetoken" #: src/hed/dmc/srm/DataPointSRM.cpp:427 #, c-format msgid "Using space token description %s" msgstr "Använder spacetokenbeskrivning: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:433 #, c-format msgid "Error looking up space tokens matching description %s" msgstr "Fel vid uppslagning av spacetoken som matchar beskrivning %s" #: src/hed/dmc/srm/DataPointSRM.cpp:437 #, c-format msgid "No space tokens found matching description %s" msgstr "Hittade inget spacetoken som matchar beskrivning %s" #: src/hed/dmc/srm/DataPointSRM.cpp:442 #, c-format msgid "Using space token %s" msgstr "Använder spacetoken: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:457 #, c-format msgid "Put request %s is still in queue, should wait %i seconds" msgstr "Begäran att spara %s är fortfarande i kö, ska vänta %i sekunder" #: src/hed/dmc/srm/DataPointSRM.cpp:487 #, c-format msgid "" "Bad logic for %s - putTURLs returned ok but SRM request is not finished " "successfully or on going" msgstr "" "DÃ¥lig logik för %s - putTURLs returnerade OK men SRM-begäran har inte " "avslutats framgÃ¥ngsrikt eller pÃ¥gÃ¥r" #: src/hed/dmc/srm/DataPointSRM.cpp:495 msgid "StartWriting" msgstr "StartWriting" #: src/hed/dmc/srm/DataPointSRM.cpp:497 msgid "StartWriting: File was not prepared properly" msgstr "StartWriting: Fil förbereddes inte pÃ¥ rätt sätt" #: src/hed/dmc/srm/DataPointSRM.cpp:556 #, c-format msgid "FinishWriting: looking for metadata: %s" msgstr "FinishWriting: letar efter metadata: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:571 #, c-format msgid "FinishWriting: obtained checksum: %s" msgstr "FinishWriting: erhÃ¥llen checksumma: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:574 #, c-format msgid "" "Calculated/supplied transfer checksum %s matches checksum reported by SRM " "destination %s" msgstr "" "Beräknad/tillhandahÃ¥llen överföringschecksumma %s stämmer överens med " "checksumma rapporterad av SRM-destinationen %s" #: src/hed/dmc/srm/DataPointSRM.cpp:577 #, c-format msgid "" "Checksum mismatch between calculated/supplied checksum (%s) and checksum " "reported by SRM destination (%s)" msgstr "" "Beräknad/tillhandahÃ¥llen överföringschecksumma %s stämmer inte överens med " "checksumma rapporterad av SRM-destinationen (%s)" #: src/hed/dmc/srm/DataPointSRM.cpp:580 #, c-format msgid "" "Checksum type of SRM (%s) and calculated/supplied checksum (%s) differ, " "cannot compare" msgstr "" "Typ av checksumma frÃ¥n SRM (%s) och beräknad/tillhandahÃ¥llen checksumma (%s) " "är olika, kan inte jämföra" #: src/hed/dmc/srm/DataPointSRM.cpp:581 src/hed/dmc/srm/DataPointSRM.cpp:582 msgid "No checksum information from server" msgstr "Ingen information om checksumma frÃ¥n server" #: src/hed/dmc/srm/DataPointSRM.cpp:583 src/hed/dmc/srm/DataPointSRM.cpp:584 msgid "No checksum verification possible" msgstr "Ingen verifiering av checksumma möjlig" #: src/hed/dmc/srm/DataPointSRM.cpp:590 msgid "Failed to release completed request" msgstr "Misslyckades med att frigöra slutförd begäran" #: src/hed/dmc/srm/DataPointSRM.cpp:633 src/hed/dmc/srm/DataPointSRM.cpp:700 #, c-format msgid "ListFiles: looking for metadata: %s" msgstr "ListFiles: letar efter metadata: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:818 #, c-format msgid "plugin for transport protocol %s is not installed" msgstr "plugin för överföringsprotokoll %s är inte installerad" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:51 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:90 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:142 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:181 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:221 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:259 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:303 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:365 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:438 msgid "SRM did not return any information" msgstr "SRM returnerade inte nÃ¥gon information" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:316 #, c-format msgid "File could not be moved to Running state: %s" msgstr "Fil kunde inte flyttas till tillstÃ¥nd Running: %s" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:372 msgid "SRM did not return any useful information" msgstr "SRM returnerade inte nÃ¥gon användbar information" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:450 msgid "File could not be moved to Done state" msgstr "Fil kunde inte flyttas till tillstÃ¥nd Done" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:88 msgid "Could not determine version of server" msgstr "Kunde inte bestämma servers version" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:94 #, c-format msgid "Server SRM version: %s" msgstr "Server-SRM-version: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:99 #, c-format msgid "Server implementation: %s" msgstr "Server-implementering: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:136 #, c-format msgid "Adding space token %s" msgstr "Lägger till spacetoken: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:163 msgid "No request tokens found" msgstr "Hittade inga begäran-token" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:176 #, c-format msgid "Adding request token %s" msgstr "Lägger till begäran-token %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:237 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:642 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:828 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1385 #, c-format msgid "%s: File request %s in SRM queue. Sleeping for %i seconds" msgstr "%s: Fil-begäran %s i SRM-kö. Väntar i %i sekunder" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:275 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:327 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:698 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:764 #, c-format msgid "File is ready! TURL is %s" msgstr "Fil är klar! TURL är %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:359 #, c-format msgid "Setting userRequestDescription to %s" msgstr "Sätter userRequestDescription till %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:414 #, c-format msgid "%s: Bring online request %s in SRM queue. Sleeping for %i seconds" msgstr "%s: Begäran om att bringa online %s i SRM-kö. Väntar i %i sekunder" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:457 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1160 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1194 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1228 msgid "No request token specified!" msgstr "Inget begäran-token angivet!" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:524 msgid "Request is reported as ABORTED, but all files are done" msgstr "Begäran rapporteras som avbruten, men alla filer är färdiga" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:530 msgid "Request is reported as ABORTED, since it was cancelled" msgstr "Begäran rapporteras som avbruten, eftersom den avbröts" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:536 #, c-format msgid "Request is reported as ABORTED. Reason: %s" msgstr "Begäran rapporteras som avbruten. Orsak: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:673 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:745 #, c-format msgid "Path %s is invalid, creating required directories" msgstr "Sökväg %s är ogiltig, skapar nödvändiga kataloger" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:678 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:750 #, c-format msgid "Error creating required directories for %s" msgstr "Fel vid skapande av nödvändiga kataloger för %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:851 msgid "Too many files in one request - please try again with fewer files" msgstr "För mÃ¥nga filer i en begäran - försök igen med färre filer" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:899 msgid "" "Directory size is too large to list in one call, will have to call multiple " "times" msgstr "" "Katalogstorleken är för stor för att lista i ett anrop, kommer att behöva " "anropa flera gÃ¥nger" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:936 msgid "" "Failure in parsing response from server - some information may be inaccurate" msgstr "" "Misslyckades med att tolka svar frÃ¥n server - en del information kan vara " "felaktig" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:942 #: src/hed/shc/legacy/auth_otokens.cpp:437 #, c-format msgid "%s: %s" msgstr "%s: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:975 #, c-format msgid "" "Directory size is larger than %i files, will have to call multiple times" msgstr "" "Katalogstorleken är större än %i filer, kommer att behöva anropa flera gÃ¥nger" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1185 #, c-format msgid "Files associated with request token %s released successfully" msgstr "Filer associerade med begäran-token %s frigjordes framgÃ¥ngsrikt" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1219 #, c-format msgid "Files associated with request token %s put done successfully" msgstr "Filer associerade med begäran-token %s sparades framgÃ¥ngsrikt" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1254 #, c-format msgid "Files associated with request token %s aborted successfully" msgstr "Filer associerade med begäran-token %s avbröts framgÃ¥ngsrikt" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1271 #, c-format msgid "" "Failed to find metadata info on %s for determining file or directory delete" msgstr "" "Misslyckades med att hitta metadatainformation för %s för att bestämma " "borttagande av fil eller katalog" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1277 msgid "Type is file, calling srmRm" msgstr "Typ är fil, anropar srmRm" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1281 msgid "Type is dir, calling srmRmDir" msgstr "Typ är katalog, anropar srmRmDir" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1285 msgid "File type is not available, attempting file delete" msgstr "Filtyp är inte tillgänglig, försöker med borttagande av fil" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1288 msgid "File delete failed, attempting directory delete" msgstr "Borttagande av fil misslyckades, försöker med borttagande av katalog" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1313 #, c-format msgid "File %s removed successfully" msgstr "Fil %s borttagen framgÃ¥ngsrikt" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1340 #, c-format msgid "Directory %s removed successfully" msgstr "Katalog %s borttagen framgÃ¥ngsrikt" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1455 #, c-format msgid "Checking for existence of %s" msgstr "Kontrollerar om %s finns" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1458 #, c-format msgid "File already exists: %s" msgstr "Filen finns redan: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1495 #, c-format msgid "Error creating directory %s: %s" msgstr "Fel vid skapande av katalog %s: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:82 #, c-format msgid "Attempting to contact %s on port %i" msgstr "Försöker att kontakta %s pÃ¥ port %i" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:88 #, c-format msgid "Storing port %i for %s" msgstr "Sparar port %i för %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:102 #, c-format msgid "No port succeeded for %s" msgstr "Ingen port lyckades för %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:112 #, c-format msgid "URL %s disagrees with stored SRM info, testing new info" msgstr "URL %s överensstämmer inte med sparad SRM-info, provar ny info" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:118 #, c-format msgid "Replacing old SRM info with new for URL %s" msgstr "Byter ut gammal SRM-info mot by för URL %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:140 #, c-format msgid "SOAP request: %s" msgstr "SOAP-begäran: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:147 #: src/hed/dmc/srm/srmclient/SRMClient.cpp:176 #, c-format msgid "SOAP fault: %s" msgstr "SOAP-fel: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:148 msgid "Reconnecting" msgstr "Ã…teransluter" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:158 #, c-format msgid "SRM Client status: %s" msgstr "SRM-klientstatus: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:164 msgid "No SOAP response" msgstr "Inget SOAP-svar" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:171 #, c-format msgid "SOAP response: %s" msgstr "SOAP-svar: %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:75 #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:161 #, c-format msgid "Failed to acquire lock on file %s" msgstr "Misslyckades med erhÃ¥lla lÃ¥s för fil %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:80 #, c-format msgid "Error reading info from file %s:%s" msgstr "Fel vid läsning av information frÃ¥n fil %s:%s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:94 #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:186 #, c-format msgid "Bad or old format detected in file %s, in line %s" msgstr "Felaktigt eller gammalt format upptäckt i fil %s, pÃ¥ rad %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:99 #, c-format msgid "Cannot convert string %s to int in line %s" msgstr "Kan inte konvertera sträng %s till heltal pÃ¥ rad %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:202 #, c-format msgid "Error writing srm info file %s" msgstr "Fel vid skrivning av SRM-infofil %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:81 msgid "" "Missing reference to factory and/or module. It is unsafe to use Xrootd in " "non-persistent mode - Xrootd code is disabled. Report to developers." msgstr "" "Saknar referens till fabrik och/eller modul. Det är osäkert att använda " "Xrootd i icke-persistent mode - Xrootd-koden är deaktiverad. Rapportera till " "utvecklare." #: src/hed/dmc/xrootd/DataPointXrootd.cpp:120 #, c-format msgid "Could not handle checksum %s: skip checksum check" msgstr "Kunde inte hantera checksumma %s: hoppar över kontroll av checksumma" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:126 #, c-format msgid "Failed to create xrootd copy job: %s" msgstr "Misslyckades med att skapa xrootd-kopierings-jobb: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:143 #, c-format msgid "Failed to copy %s: %s" msgstr "Misslyckades med att kopiera %s: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:194 #, c-format msgid "Reading %u bytes from byte %llu" msgstr "Läser %u byte frÃ¥n byte %llu" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:196 #, c-format msgid "Read %i bytes" msgstr "Läste %i byte" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:227 #, c-format msgid "Could not open file %s for reading: %s" msgstr "Kunde inte öppna fil %s för läsning: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:242 #, c-format msgid "Unable to find file size of %s" msgstr "Kunde inte hitta filstorlek för %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:306 #, c-format msgid "DataPointXrootd::write_file got position %d and offset %d, has to seek" msgstr "" "DataPointXrootd::write_file fick position %d och offset %d, mÃ¥ste göra seek" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:329 #, c-format msgid "xrootd write failed: %s" msgstr "xrootd skrivning misslyckades: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:338 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:412 #, c-format msgid "xrootd close failed: %s" msgstr "xrootd stängning misslyckades: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:361 #, c-format msgid "Failed to open %s, trying to create parent directories" msgstr "Misslyckades med att öppna %s, försöker skapa föräldrakataloger" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:374 #, c-format msgid "xrootd open failed: %s" msgstr "xrootd öppning misslyckades: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:388 #, c-format msgid "close failed: %s" msgstr "stängning misslyckades: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:430 #, c-format msgid "Read access not allowed for %s: %s" msgstr "Ã…tkomst för läsning inte tillÃ¥ten för %s: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:449 #, c-format msgid "Could not stat file %s: %s" msgstr "Kunde inte göra stat pÃ¥ filen %s: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:454 msgid "Not getting checksum of zip constituent" msgstr "Hämtar inte checksumma för zip-komponent" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:458 #, c-format msgid "Could not get checksum of %s: %s" msgstr "Kunde inte hämta checksumma %s: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:462 #, c-format msgid "Checksum %s" msgstr "Checksumma: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:500 #, c-format msgid "Failed to open directory %s: %s" msgstr "Misslyckades med att öppna katalog %s: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:518 #, c-format msgid "Error while reading dir %s: %s" msgstr "Fel vid läsande av katalog %s: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:568 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:586 #, c-format msgid "Error creating required dirs: %s" msgstr "Fel vid skapande av nödvändiga kataloger: %s" #: src/hed/identitymap/IdentityMap.cpp:196 #, c-format msgid "PDP: %s can not be loaded" msgstr "PDP: %s kan inte laddas in" #: src/hed/identitymap/IdentityMap.cpp:219 src/hed/shc/legacy/LegacyMap.cpp:221 #, c-format msgid "Grid identity is mapped to local identity '%s'" msgstr "Grididentitet mappas till lokal identitet '%s'" #: src/hed/libs/common/ArcLocation.cpp:129 #, c-format msgid "" "Can not determine the install location. Using %s. Please set ARC_LOCATION if " "this is not correct." msgstr "" "Kan inte bestämma installationsplats. Använder %s. Ange ARC_LOCATION om " "detta inte är korrekt." #: src/hed/libs/common/DateTime.cpp:86 src/hed/libs/common/DateTime.cpp:631 #: src/hed/libs/common/StringConv.h:25 msgid "Empty string" msgstr "Tom sträng" #: src/hed/libs/common/DateTime.cpp:107 #, c-format msgid "Can not parse date: %s" msgstr "Kan inte tolka datum: %s" #: src/hed/libs/common/DateTime.cpp:130 #, c-format msgid "Can not parse time: %s" msgstr "Kan inte tolka tid: %s" #: src/hed/libs/common/DateTime.cpp:160 #, c-format msgid "Can not parse time zone offset: %s" msgstr "Kan inte tolka tidszon: %s" #: src/hed/libs/common/DateTime.cpp:180 src/hed/libs/common/DateTime.cpp:199 #: src/hed/libs/common/DateTime.cpp:252 src/hed/libs/common/DateTime.cpp:291 #, c-format msgid "Illegal time format: %s" msgstr "Ogiltigt tidsformat: %s" #: src/hed/libs/common/DateTime.cpp:230 src/hed/libs/common/DateTime.cpp:283 #, c-format msgid "Can not parse month: %s" msgstr "Kan inte tolka mÃ¥nad: %s" #: src/hed/libs/common/DateTime.cpp:647 src/hed/libs/common/DateTime.cpp:688 #, c-format msgid "Invalid ISO duration format: %s" msgstr "Ogiltigt ISO-tidsperiodsformat: %s" #: src/hed/libs/common/DateTime.cpp:752 #, c-format msgid "Invalid period string: %s" msgstr "Ogiltig periodsträng: %s" #: src/hed/libs/common/DateTime.cpp:874 msgid "hour" msgid_plural "hours" msgstr[0] "timme" msgstr[1] "timmar" #: src/hed/libs/common/DateTime.cpp:880 msgid "minute" msgid_plural "minutes" msgstr[0] "minut" msgstr[1] "minuter" #: src/hed/libs/common/DateTime.cpp:886 msgid "second" msgid_plural "seconds" msgstr[0] "sekund" msgstr[1] "sekunder" #: src/hed/libs/common/FileLock.cpp:92 #, c-format msgid "EACCES Error opening lock file %s: %s" msgstr "EACCES-fel vid öppnande av lÃ¥sfil %s: %s" #: src/hed/libs/common/FileLock.cpp:96 #, c-format msgid "Error opening lock file %s in initial check: %s" msgstr "Fel vid öppnande av lÃ¥sfil %s i initial check: %s" #: src/hed/libs/common/FileLock.cpp:103 #, c-format msgid "Error creating temporary file %s: %s" msgstr "Fel vid skapandet av temporär fil %s: %s" #: src/hed/libs/common/FileLock.cpp:113 #, c-format msgid "Could not create link to lock file %s as it already exists" msgstr "Kunde inte skapa länk till lÃ¥sfil %s eftersom den redan existerar" #: src/hed/libs/common/FileLock.cpp:124 #, c-format msgid "Could not create lock file %s as it already exists" msgstr "Kunde inte skapa lÃ¥sfil %s eftersom den redan existerar" #: src/hed/libs/common/FileLock.cpp:128 #, c-format msgid "Error creating lock file %s: %s" msgstr "Fel vid skapandet av lÃ¥sfil %s: %s" #: src/hed/libs/common/FileLock.cpp:133 #, c-format msgid "Error writing to lock file %s: %s" msgstr "Fel vid skrivning till lÃ¥sfil %s: %s" #: src/hed/libs/common/FileLock.cpp:141 #, c-format msgid "Error linking tmp file %s to lock file %s: %s" msgstr "Fel vid länkning av temporär fil %s till lÃ¥sfil %s: %s" #: src/hed/libs/common/FileLock.cpp:150 #, c-format msgid "Error in lock file %s, even though linking did not return an error" msgstr "Fel i lÃ¥sfil %s, trots att länkning inte returnerade ett fel" #: src/hed/libs/common/FileLock.cpp:158 #, c-format msgid "%li seconds since lock file %s was created" msgstr "%li sekunder sedan lÃ¥sfilen %s skapades" #: src/hed/libs/common/FileLock.cpp:161 #, c-format msgid "Timeout has expired, will remove lock file %s" msgstr "Timeout har passerat, kommer att ta bort lÃ¥sfil %s" #: src/hed/libs/common/FileLock.cpp:165 #, c-format msgid "Failed to remove stale lock file %s: %s" msgstr "Misslyckades med att ta bort gammal lÃ¥sfil %s: %s" #: src/hed/libs/common/FileLock.cpp:178 #, c-format msgid "This process already owns the lock on %s" msgstr "Denna process äger redan lÃ¥set pÃ¥ %s" #: src/hed/libs/common/FileLock.cpp:182 #, c-format msgid "" "The process owning the lock on %s is no longer running, will remove lock" msgstr "Processen som äger lÃ¥set pÃ¥ %s kör inte längre, kommer att ta bort lÃ¥s" #: src/hed/libs/common/FileLock.cpp:184 #, c-format msgid "Failed to remove file %s: %s" msgstr "Misslyckades med att ta bort fil %s: %s" #: src/hed/libs/common/FileLock.cpp:193 #, c-format msgid "The file %s is currently locked with a valid lock" msgstr "Filen %s är för tillfället lÃ¥st med ett giltigt lÃ¥s" #: src/hed/libs/common/FileLock.cpp:210 #, c-format msgid "Failed to unlock file with lock %s: %s" msgstr "Misslyckades med att lÃ¥sa upp fil med lÃ¥s %s: %s" #: src/hed/libs/common/FileLock.cpp:222 #, c-format msgid "Lock file %s doesn't exist" msgstr "LÃ¥sfil %s existerar inte" #: src/hed/libs/common/FileLock.cpp:224 #, c-format msgid "Error listing lock file %s: %s" msgstr "Fel vid listning av lÃ¥sfil %s: %s" #: src/hed/libs/common/FileLock.cpp:230 #, c-format msgid "Found unexpected empty lock file %s. Must go back to acquire()" msgstr "Hittade oväntad tom lÃ¥sfil %s. MÃ¥ste gÃ¥ tillbaka till acquire()" #: src/hed/libs/common/FileLock.cpp:236 #, c-format msgid "Error reading lock file %s: %s" msgstr "Fel vid läsning av lÃ¥sfil %s: %s" #: src/hed/libs/common/FileLock.cpp:240 #, c-format msgid "Error with formatting in lock file %s" msgstr "Formatteringsfel i lÃ¥sfil %s" #: src/hed/libs/common/FileLock.cpp:250 #, c-format msgid "Lock %s is owned by a different host (%s)" msgstr "LÃ¥s %s ägs av en annan värd (%s)" #: src/hed/libs/common/FileLock.cpp:259 #, c-format msgid "Badly formatted pid %s in lock file %s" msgstr "Felaktigt formatterat pid %s i lÃ¥sfil %s" #: src/hed/libs/common/FileLock.cpp:262 #, c-format msgid "Another process (%s) owns the lock on file %s" msgstr "En annan process (%s) äger lÃ¥set pÃ¥ fil %s" #: src/hed/libs/common/IString.cpp:32 src/hed/libs/common/IString.cpp:41 #: src/hed/libs/common/IString.cpp:42 msgid "(empty)" msgstr "(tom)" #: src/hed/libs/common/IString.cpp:32 src/hed/libs/common/IString.cpp:41 #: src/hed/libs/common/IString.cpp:42 msgid "(null)" msgstr "(null)" #: src/hed/libs/common/Logger.cpp:58 #, c-format msgid "Invalid log level. Using default %s." msgstr "Ogiltig logg-nivÃ¥. Använder förval %s." #: src/hed/libs/common/Logger.cpp:123 #, c-format msgid "Invalid old log level. Using default %s." msgstr "Ogiltig gammal logg-nivÃ¥. Använder förval %s." #: src/hed/libs/common/OptionParser.cpp:106 #, c-format msgid "Cannot parse integer value '%s' for -%c" msgstr "Kan inte tolka heltalsvärdet '%s' för -%c" #: src/hed/libs/common/OptionParser.cpp:309 #: src/hed/libs/common/OptionParser.cpp:442 #, c-format msgid "Options Group %s:" msgstr "Alternativgrupp %s:" #: src/hed/libs/common/OptionParser.cpp:311 #: src/hed/libs/common/OptionParser.cpp:445 #, c-format msgid "%s:" msgstr "%s:" #: src/hed/libs/common/OptionParser.cpp:313 #, c-format msgid "Show %s help options" msgstr "Visa %s hjälpalternativ" #: src/hed/libs/common/OptionParser.cpp:348 msgid "Use -? to get usage description" msgstr "Använd -? för att fÃ¥ användningsbeskrivning" #: src/hed/libs/common/OptionParser.cpp:425 msgid "Usage:" msgstr "Användning:" #: src/hed/libs/common/OptionParser.cpp:428 msgid "OPTION..." msgstr "ALTERNATIV..." #: src/hed/libs/common/OptionParser.cpp:434 msgid "Help Options:" msgstr "Hjälpalternativ:" #: src/hed/libs/common/OptionParser.cpp:435 msgid "Show help options" msgstr "Visa hjälpalternativ" #: src/hed/libs/common/Profile.cpp:199 src/hed/libs/common/Profile.cpp:273 #: src/hed/libs/common/Profile.cpp:404 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"inisections\" " "attribute cannot be the empty string." msgstr "" "Elementet \"%s\" i profilen ignoreras: värdet pÃ¥ \"inisections\"-attributet " "kan inte vara en tom sträng." #: src/hed/libs/common/Profile.cpp:205 src/hed/libs/common/Profile.cpp:279 #: src/hed/libs/common/Profile.cpp:411 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"initag\" attribute " "cannot be the empty string." msgstr "" "Elementet \"%s\" i profilen ignoreras: värdet pÃ¥ \"initag\"-attributet kan " "inte vara en tom sträng." #: src/hed/libs/common/Profile.cpp:419 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"initype\" " "attribute cannot be the empty string." msgstr "" "Elementet \"%s\" i profilen ignoreras: värdet pÃ¥ \"initype\"-attributet kan " "inte vara en tom sträng." #: src/hed/libs/common/Profile.cpp:422 #, c-format msgid "" "Element \"%s\" in the profile ignored: the \"inidefaultvalue\" attribute " "cannot be specified when the \"inisections\" and \"initag\" attributes have " "not been specified." msgstr "" "Elementet \"%s\" i profilen ignoreras: värdet pÃ¥ \"inidefaultvalue\"-" "attributet kan inte anges när \"inisections\"- and \"initag\"-attributen " "inte angivits." #: src/hed/libs/common/Profile.cpp:497 #, c-format msgid "" "In the configuration profile the 'initype' attribute on the \"%s\" element " "has a invalid value \"%s\"." msgstr "" "In inställningsprofilen har 'initype'-attributet pÃ¥ \"%s\"-elementet ett " "ogiltigt värde \"%s\"." #: src/hed/libs/common/Run_unix.cpp:225 msgid "Child monitoring signal detected" msgstr "Barnprocessmonitorering signal upptäckt" #: src/hed/libs/common/Run_unix.cpp:230 #, c-format msgid "Child monitoring error: %i" msgstr "Barnprocessmonitorering fel: %i" #: src/hed/libs/common/Run_unix.cpp:243 msgid "Child monitoring kick detected" msgstr "Barnprocessmonitorering kick upptäckt" #: src/hed/libs/common/Run_unix.cpp:246 msgid "Child monitoring internal communication error" msgstr "Barnprocessmonitorering internt kommunikationsfel" #: src/hed/libs/common/Run_unix.cpp:258 msgid "Child monitoring stdout is closed" msgstr "Barnprocessmonitorering stdout är stängd" #: src/hed/libs/common/Run_unix.cpp:268 msgid "Child monitoring stderr is closed" msgstr "Barnprocessmonitorering stderr är stängd" #: src/hed/libs/common/Run_unix.cpp:278 msgid "Child monitoring stdin is closed" msgstr "Barnprocessmonitorering stdin är stängd" #: src/hed/libs/common/Run_unix.cpp:296 #, c-format msgid "Child monitoring child %d exited" msgstr "Barnprocessmonitorering barnprocess %d avslutades" #: src/hed/libs/common/Run_unix.cpp:300 #, c-format msgid "Child monitoring lost child %d (%d)" msgstr "Barnprocessmonitorering borttappad barnprocess %d (%d)" #: src/hed/libs/common/Run_unix.cpp:321 #, c-format msgid "Child monitoring drops abandoned child %d (%d)" msgstr "Barnprocessmonitorering släpper övergiven barnprocess %d (%d)" #: src/hed/libs/common/Run_unix.cpp:484 msgid "Child was already started" msgstr "Barnprocess har redan startats" #: src/hed/libs/common/Run_unix.cpp:488 msgid "No arguments are assigned for external process" msgstr "Inga argument har tilldelats extern process" #: src/hed/libs/common/Run_unix.cpp:621 #, c-format msgid "Excepton while trying to start external process: %s" msgstr "Undantag under försök att starta extern process: %s" #: src/hed/libs/common/StringConv.h:31 #, c-format msgid "Conversion failed: %s" msgstr "Konvertering misslyckades: %s" #: src/hed/libs/common/StringConv.h:35 #, c-format msgid "Full string not used: %s" msgstr "Hela strängen användes inte: %s" #: src/hed/libs/common/Thread.cpp:256 msgid "Maximum number of threads running - putting new request into queue" msgstr "Maximalt antal trÃ¥dar kör - ställer ny begäran i kö" #: src/hed/libs/common/Thread.cpp:304 #, c-format msgid "Thread exited with Glib error: %s" msgstr "TrÃ¥d avslutades med Glib-fel: %s" #: src/hed/libs/common/Thread.cpp:306 #, c-format msgid "Thread exited with generic exception: %s" msgstr "TrÃ¥d avslutades med generellt undantag: %s" #: src/hed/libs/common/URL.cpp:137 #, c-format msgid "URL is not valid: %s" msgstr "URL är inte giltig: %s" #: src/hed/libs/common/URL.cpp:188 #, c-format msgid "Illegal URL - path must be absolute: %s" msgstr "Ogiltig URL - sökväg mÃ¥ste vara absolut: %s" #: src/hed/libs/common/URL.cpp:193 #, c-format msgid "Illegal URL - no hostname given: %s" msgstr "Ogiltig URL - inget värdnamn angivet: %s" #: src/hed/libs/common/URL.cpp:282 #, c-format msgid "Illegal URL - path must be absolute or empty: %s" msgstr "Ogiltig URL - sökväg mÃ¥ste vara absolut eller tom: %s" #: src/hed/libs/common/URL.cpp:298 #, c-format msgid "Illegal URL - no closing ] for IPv6 address found: %s" msgstr "Ogiltig URL - hittade ingen avslutande ] för IPv6-adress: %s" #: src/hed/libs/common/URL.cpp:306 #, c-format msgid "" "Illegal URL - closing ] for IPv6 address is followed by illegal token: %s" msgstr "Ogiltig URL - avslutande ] för IPv6-adress följs av ogiltigt token: %s" #: src/hed/libs/common/URL.cpp:322 #, c-format msgid "Invalid port number in %s" msgstr "Ogiltigt portnummer i %s" #: src/hed/libs/common/URL.cpp:455 #, c-format msgid "Unknown LDAP scope %s - using base" msgstr "Okänt LDAP-scope %s - använder base" #: src/hed/libs/common/URL.cpp:618 msgid "Attempt to assign relative path to URL - making it absolute" msgstr "Försöker tilldela relativ sökväg till URL - gör den absolut" #: src/hed/libs/common/URL.cpp:717 #, c-format msgid "URL option %s does not have format name=value" msgstr "URL-alternativ %s har inte formatet namn=värde" #: src/hed/libs/common/URL.cpp:1186 #, c-format msgid "urllist %s contains invalid URL: %s" msgstr "urllistan %s innehÃ¥ller ogiltig URL: %s" #: src/hed/libs/common/URL.cpp:1191 #, c-format msgid "URL protocol is not urllist: %s" msgstr "URL-protokollet är inte urllist: %s" #: src/hed/libs/common/UserConfig.cpp:38 src/hed/libs/common/UserConfig.cpp:831 #: src/hed/libs/common/UserConfig.cpp:840 #: src/hed/libs/common/UserConfig.cpp:846 #: src/hed/libs/common/UserConfig.cpp:872 #: src/hed/libs/common/UserConfig.cpp:884 #: src/hed/libs/common/UserConfig.cpp:896 #: src/hed/libs/common/UserConfig.cpp:916 #, c-format msgid "Multiple %s attributes in configuration file (%s)" msgstr "Mer än ett %s-attribut i inställningsfil (%s)" #: src/hed/libs/common/UserConfig.cpp:139 #, c-format msgid "Wrong ownership of certificate file: %s" msgstr "Fel ägare för certifikatfil: %s" #: src/hed/libs/common/UserConfig.cpp:141 #, c-format msgid "Wrong permissions of certificate file: %s" msgstr "Fel Ã¥tkomsträttigheter för certifikatfil: %s" #: src/hed/libs/common/UserConfig.cpp:143 #, c-format msgid "Can not access certificate file: %s" msgstr "Kan inte komma Ã¥t certifikatfil: %s" #: src/hed/libs/common/UserConfig.cpp:150 #, c-format msgid "Wrong ownership of key file: %s" msgstr "Fel ägare för nyckelfil: %s" #: src/hed/libs/common/UserConfig.cpp:152 #, c-format msgid "Wrong permissions of key file: %s" msgstr "Fel Ã¥tkomsträttigheter för nyckelfil: %s" #: src/hed/libs/common/UserConfig.cpp:154 #, c-format msgid "Can not access key file: %s" msgstr "Kan inte komma Ã¥t nyckelfil: %s" #: src/hed/libs/common/UserConfig.cpp:161 #, c-format msgid "Wrong ownership of proxy file: %s" msgstr "Fel ägare för proxyfil: %s" #: src/hed/libs/common/UserConfig.cpp:163 #, c-format msgid "Wrong permissions of proxy file: %s" msgstr "Fel Ã¥tkomsträttigheter för proxyfil: %s" #: src/hed/libs/common/UserConfig.cpp:165 #, c-format msgid "Can not access proxy file: %s" msgstr "Kan inte komma Ã¥t proxyfil: %s" #: src/hed/libs/common/UserConfig.cpp:176 msgid "computing" msgstr "beräkning" #: src/hed/libs/common/UserConfig.cpp:178 msgid "index" msgstr "indexering" #: src/hed/libs/common/UserConfig.cpp:277 #: src/hed/libs/common/UserConfig.cpp:281 #: src/hed/libs/common/UserConfig.cpp:328 #: src/hed/libs/common/UserConfig.cpp:332 #, c-format msgid "System configuration file (%s) contains errors." msgstr "Systeminställningsfil (%s) innehÃ¥ller fel." #: src/hed/libs/common/UserConfig.cpp:285 #: src/hed/libs/common/UserConfig.cpp:336 #, c-format msgid "System configuration file (%s or %s) does not exist." msgstr "Systeminställningsfil (%s eller %s) existerar inte." #: src/hed/libs/common/UserConfig.cpp:287 #: src/hed/libs/common/UserConfig.cpp:338 #, c-format msgid "System configuration file (%s) does not exist." msgstr "Systeminställningsfil (%s) existerar inte." #: src/hed/libs/common/UserConfig.cpp:293 #: src/hed/libs/common/UserConfig.cpp:305 #: src/hed/libs/common/UserConfig.cpp:344 #: src/hed/libs/common/UserConfig.cpp:356 #, c-format msgid "User configuration file (%s) contains errors." msgstr "Användarinställningsfil (%s) innehÃ¥ller fel." #: src/hed/libs/common/UserConfig.cpp:298 #: src/hed/libs/common/UserConfig.cpp:349 msgid "No configuration file could be loaded." msgstr "Ingen inställningfil kunde laddas in." #: src/hed/libs/common/UserConfig.cpp:301 #: src/hed/libs/common/UserConfig.cpp:352 #, c-format msgid "User configuration file (%s) does not exist or cannot be loaded." msgstr "" "Användarinställningsfil (%s) existerar inte eller kunde inte laddas in." #: src/hed/libs/common/UserConfig.cpp:438 #, c-format msgid "" "Unable to parse the specified verbosity (%s) to one of the allowed levels" msgstr "" "Kunde inte tolka den angivna debugnivÃ¥n (%s) till en av de tillÃ¥tna nivÃ¥erna" #: src/hed/libs/common/UserConfig.cpp:450 #, c-format msgid "" "Unsupported job list type '%s', using 'SQLITE'. Supported types are: SQLITE, " "XML." msgstr "" "Jobblisttyp '%s' stöds inte, använder 'SQLITE'. Typer som stöds är: SQLITE, " "XML." #: src/hed/libs/common/UserConfig.cpp:511 msgid "Loading OToken failed - ignoring its presence" msgstr "Inladdning av OToken misslyckades - ignorerar dess närvaro" #: src/hed/libs/common/UserConfig.cpp:652 #, c-format msgid "Certificate and key ('%s' and '%s') not found in any of the paths: %s" msgstr "" "Hittade inte certifikat och nyckel ('%s' och '%s') i nÃ¥gon av sökvägarna: %s" #: src/hed/libs/common/UserConfig.cpp:654 #, c-format msgid "" "If the proxy or certificate/key does exist, you can manually specify the " "locations via environment variables '%s'/'%s' or '%s', or the '%s'/'%s' or " "'%s' attributes in the client configuration file (e.g. '%s')" msgstr "" "Om proxy eller certifikat/nyckel existerar, kan du ange deras platser " "manuellt via miljövariablerna '%s'/'%s' eller '%s', eller attributen " "'%s'/'%s' eller '%s' i klientinställningsfilen (t.ex. '%s')" #: src/hed/libs/common/UserConfig.cpp:672 #: src/hed/libs/common/UserConfig.cpp:682 #, c-format msgid "" "Can not access CA certificate directory: %s. The certificates will not be " "verified." msgstr "" "Kan inte komma Ã¥t CA-certifikatkatalog: %s. Certifikaten kommer inte att " "verifieras" #: src/hed/libs/common/UserConfig.cpp:708 #, c-format msgid "" "Can not find CA certificates directory in default locations:\n" "~/.arc/certificates, ~/.globus/certificates,\n" "%s/etc/certificates, %s/etc/grid-security/certificates,\n" "%s/share/certificates, /etc/grid-security/certificates.\n" "The certificate will not be verified.\n" "If the CA certificates directory does exist, please manually specify the " "locations via env\n" "X509_CERT_DIR, or the cacertificatesdirectory item in client.conf\n" msgstr "" "Kan inte hitta CA-certifikatkatalogen pÃ¥ förvalda platser:\n" "~/.arc/certificates, ~/.globus/certificates,\n" "%s/etc/certificates, %s/etc/grid-security/certificates,\n" "%s/share/certificates, /etc/grid-security/certificates.\n" "Certifikaten kommer inte att verifieras.\n" "Om CA-certifikatkatalogen existerar, ange dess plats manuellt ange platsen " "via\n" "miljövariabeln X509_CERT_DIR, eller attributet cacertificatesdirectory i " "client.conf\n" #: src/hed/libs/common/UserConfig.cpp:730 #, c-format msgid "Using proxy file: %s" msgstr "Använder proxyfil: %s" #: src/hed/libs/common/UserConfig.cpp:733 #, c-format msgid "Using certificate file: %s" msgstr "Använder certifikatfil: %s" #: src/hed/libs/common/UserConfig.cpp:734 #, c-format msgid "Using key file: %s" msgstr "Använder nyckelfil: %s" #: src/hed/libs/common/UserConfig.cpp:738 #, c-format msgid "Using CA certificate directory: %s" msgstr "Använder CA-certifikatkatalog: %s" #: src/hed/libs/common/UserConfig.cpp:742 msgid "Using OToken" msgstr "Använder OToken" #: src/hed/libs/common/UserConfig.cpp:755 #: src/hed/libs/common/UserConfig.cpp:761 #, c-format msgid "Can not access VOMSES file/directory: %s." msgstr "Kan inte komma Ã¥t VOMSES-fil/katalog: %s." #: src/hed/libs/common/UserConfig.cpp:767 #, c-format msgid "Can not access VOMS file/directory: %s." msgstr "Kan inte komma Ã¥t VOMS-fil/katalog: %s." #: src/hed/libs/common/UserConfig.cpp:781 msgid "" "Can not find voms service configuration file (vomses) in default locations: " "~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/" "grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomses" msgstr "" "Kan inte hitta voms-tjänst-inställningsfil (vomses) pÃ¥ förvalda platser: ~/." "arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-" "security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomses" #: src/hed/libs/common/UserConfig.cpp:794 #, c-format msgid "Loading configuration (%s)" msgstr "Laddar in inställningar (%s)" #: src/hed/libs/common/UserConfig.cpp:828 #, c-format msgid "" "The value of the timeout attribute in the configuration file (%s) was only " "partially parsed" msgstr "" "Värdet pÃ¥ timeout-attributet i inställningsfilen (%s) tolkades endast delvis" #: src/hed/libs/common/UserConfig.cpp:853 msgid "" "The brokerarguments attribute can only be used in conjunction with the " "brokername attribute" msgstr "" "brokerarguments-attributet kan endast användas i kombination med brokername-" "attributet" #: src/hed/libs/common/UserConfig.cpp:869 #, c-format msgid "" "The value of the keysize attribute in the configuration file (%s) was only " "partially parsed" msgstr "" "Värdet pÃ¥ keysize-attributet i inställningsfilen (%s) tolkades endast delvis" #: src/hed/libs/common/UserConfig.cpp:891 #, c-format msgid "" "Could not convert the slcs attribute value (%s) to an URL instance in " "configuration file (%s)" msgstr "" "Kunde inte konvertera slcs-attributvärdet (%s) till en URL-instans i " "inställningsfilen (%s)" #: src/hed/libs/common/UserConfig.cpp:937 #, c-format msgid "Specified overlay file (%s) does not exist." msgstr "Angiven överlagringsfil (%s) existerar inte." #: src/hed/libs/common/UserConfig.cpp:941 #, c-format msgid "" "Unknown attribute %s in common section of configuration file (%s), ignoring " "it" msgstr "" "Okänt attribut %s i common-sektionen i inställningsfilen (%s), ignorerar det" #: src/hed/libs/common/UserConfig.cpp:982 #, c-format msgid "Unknown section %s, ignoring it" msgstr "Okänd sektion %s, ignorerar den" #: src/hed/libs/common/UserConfig.cpp:986 #, c-format msgid "Configuration (%s) loaded" msgstr "Inställningar (%s) har laddats in" #: src/hed/libs/common/UserConfig.cpp:989 #, c-format msgid "Could not load configuration (%s)" msgstr "Kunde inte ladda in inställningar (%s)" #: src/hed/libs/common/UserConfig.cpp:1086 #, c-format msgid "UserConfiguration saved to file (%s)" msgstr "Användarinställningar sparade till fil (%s)" #: src/hed/libs/common/UserConfig.cpp:1099 #, c-format msgid "Unable to create %s directory." msgstr "Kunde inte skapa %s katalog." #: src/hed/libs/common/UserConfig.cpp:1108 #, c-format msgid "Configuration example file created (%s)" msgstr "Exempel-inställningsfil skapades (%s)" #: src/hed/libs/common/UserConfig.cpp:1110 #, c-format msgid "Unable to copy example configuration from existing configuration (%s)" msgstr "" "Kunde inte kopiera exempelinställningar frÃ¥n existerande inställningar (%s)" #: src/hed/libs/common/UserConfig.cpp:1115 #, c-format msgid "Cannot copy example configuration (%s), it is not a regular file" msgstr "Kan inte kopiera exempelinställningar (%s), det är inte en vanlig fil" #: src/hed/libs/common/UserConfig.cpp:1120 #, c-format msgid "Example configuration (%s) not created." msgstr "Exempelinställningar (%s) skapades inte." #: src/hed/libs/common/UserConfig.cpp:1125 #, c-format msgid "The default configuration file (%s) is not a regular file." msgstr "Den förvalda inställningsfilen (%s) är inte en vanlig fil." #: src/hed/libs/common/UserConfig.cpp:1143 #, c-format msgid "%s directory created" msgstr "%s-katalog skapad" #: src/hed/libs/common/UserConfig.cpp:1145 #: src/hed/libs/common/UserConfig.cpp:1172 src/hed/libs/data/DataMover.cpp:703 #, c-format msgid "Failed to create directory %s" msgstr "Misslyckades med att skapa katalog %s" #: src/hed/libs/common/test/LoggerTest.cpp:58 msgid "This VERBOSE message should not be seen" msgstr "Detta VERBOSE-meddelande borde inte ses" #: src/hed/libs/common/test/LoggerTest.cpp:62 msgid "This INFO message should be seen" msgstr "Detta INFO-meddelande borde ses" #: src/hed/libs/common/test/LoggerTest.cpp:73 msgid "This VERBOSE message should now be seen" msgstr "Detta VERBOSE-meddelande borde nu ses" #: src/hed/libs/common/test/LoggerTest.cpp:79 msgid "This INFO message should also be seen" msgstr "Detta INFO-meddelande borde ocksÃ¥ ses" #: src/hed/libs/common/test/LoggerTest.cpp:93 msgid "This message goes to initial destination" msgstr "Detta meddelande gÃ¥r till ursprungliga destinationen" #: src/hed/libs/common/test/LoggerTest.cpp:108 msgid "This message goes to per-thread destination" msgstr "Detta meddelande gÃ¥r till per-trÃ¥d-destinationen" #: src/hed/libs/communication/ClientSAML2SSO.cpp:80 msgid "Request failed: No response from SPService" msgstr "Begäran misslyckades: Inget svar frÃ¥n SPService" #: src/hed/libs/communication/ClientSAML2SSO.cpp:84 #: src/hed/libs/communication/ClientSAML2SSO.cpp:137 msgid "Request failed: response from SPService is not as expected" msgstr "Begäran misslyckades: svar frÃ¥n SPService är inte som förväntat" #: src/hed/libs/communication/ClientSAML2SSO.cpp:92 #, c-format msgid "Authentication Request URL: %s" msgstr "Autentiseringsbegäran-URL: %s" #: src/hed/libs/communication/ClientSAML2SSO.cpp:133 msgid "Request failed: No response from IdP" msgstr "Begäran misslyckades: Inget svar frÃ¥n IdP" #: src/hed/libs/communication/ClientSAML2SSO.cpp:184 msgid "Request failed: No response from IdP when doing redirecting" msgstr "Begäran misslyckades: Inget svar frÃ¥n IdP när omdirigering görs" #: src/hed/libs/communication/ClientSAML2SSO.cpp:188 msgid "" "Request failed: response from IdP is not as expected when doing redirecting" msgstr "" "Begäran misslyckades: svar frÃ¥n IdP är inte som förväntat när omdirigering " "görs" #: src/hed/libs/communication/ClientSAML2SSO.cpp:245 msgid "Request failed: No response from IdP when doing authentication" msgstr "Begäran misslyckades: Inget svar frÃ¥n IdP när autentisering görs" #: src/hed/libs/communication/ClientSAML2SSO.cpp:249 msgid "" "Request failed: response from IdP is not as expected when doing " "authentication" msgstr "" "Begäran misslyckades: svar frÃ¥n IdP är inte som förväntat när autentisering " "görs" #: src/hed/libs/communication/ClientSAML2SSO.cpp:294 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:312 msgid "Succeeded to verify the signature under " msgstr "Lyckades verifiera signaturen under " #: src/hed/libs/communication/ClientSAML2SSO.cpp:296 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:315 msgid "Failed to verify the signature under " msgstr "Misslyckades med att verifiera signaturen under " #: src/hed/libs/communication/ClientSAML2SSO.cpp:310 msgid "" "Request failed: No response from SP Service when sending SAML assertion to SP" msgstr "" "Begäran misslyckades: Inget svar frÃ¥n SP-tjänsten när SAML-assertion sänds " "till SP" #: src/hed/libs/communication/ClientSAML2SSO.cpp:314 msgid "" "Request failed: response from SP Service is not as expected when sending " "SAML assertion to SP" msgstr "" "Begäran misslyckades: svar frÃ¥n SP-tjänsten är inte som förväntat när SAML-" "assertion sänds till SP" #: src/hed/libs/communication/ClientSAML2SSO.cpp:325 #, c-format msgid "IdP return some error message: %s" msgstr "IdP returnerar ett felmeddelande: %s" #: src/hed/libs/communication/ClientSAML2SSO.cpp:353 #: src/hed/libs/communication/ClientSAML2SSO.cpp:398 msgid "SAML2SSO process failed" msgstr "SAML2SSO-process misslyckades" #: src/hed/libs/communication/ClientX509Delegation.cpp:56 msgid "Creating delegation credential to ARC delegation service" msgstr "Skapar delegeringsreferens till ARC delegeringstjänst" #: src/hed/libs/communication/ClientX509Delegation.cpp:66 #: src/hed/libs/communication/ClientX509Delegation.cpp:269 msgid "DelegateCredentialsInit failed" msgstr "DelegateCredentialsInit misslyckades" #: src/hed/libs/communication/ClientX509Delegation.cpp:70 #: src/hed/libs/communication/ClientX509Delegation.cpp:124 #: src/hed/libs/communication/ClientX509Delegation.cpp:159 #: src/hed/libs/communication/ClientX509Delegation.cpp:214 #: src/hed/libs/communication/ClientX509Delegation.cpp:273 msgid "There is no SOAP response" msgstr "Det finns inget SOAP-svar" #: src/hed/libs/communication/ClientX509Delegation.cpp:75 msgid "There is no X509 request in the response" msgstr "Det finns ingen X509-begäran i svaret" #: src/hed/libs/communication/ClientX509Delegation.cpp:80 msgid "There is no Format request in the response" msgstr "Det finns ingen Format-begäran i svaret" #: src/hed/libs/communication/ClientX509Delegation.cpp:88 msgid "There is no Id or X509 request value in the response" msgstr "Det finns inget Id- eller X509-begäran-värde i svaret" #: src/hed/libs/communication/ClientX509Delegation.cpp:101 #: src/hed/libs/communication/ClientX509Delegation.cpp:189 msgid "DelegateProxy failed" msgstr "DelegateProxy misslyckades" #: src/hed/libs/communication/ClientX509Delegation.cpp:120 msgid "UpdateCredentials failed" msgstr "UpdateCredentials misslyckades" #: src/hed/libs/communication/ClientX509Delegation.cpp:128 msgid "There is no UpdateCredentialsResponse in response" msgstr "Det finns inget UpdateCredentialsResponse in svaret" #: src/hed/libs/communication/ClientX509Delegation.cpp:136 #: src/hed/libs/communication/ClientX509Delegation.cpp:164 #: src/hed/libs/communication/ClientX509Delegation.cpp:219 #: src/hed/libs/communication/ClientX509Delegation.cpp:304 msgid "There is no SOAP connection chain configured" msgstr "Det finns ingen SOAP-förbindelse-kedja i inställningarna" #: src/hed/libs/communication/ClientX509Delegation.cpp:142 msgid "Creating delegation to CREAM delegation service" msgstr "Skapar delegering till CREAM delegeringstjänst" #: src/hed/libs/communication/ClientX509Delegation.cpp:155 msgid "Delegation getProxyReq request failed" msgstr "Delegering-getProxyReq-begäran misslyckades" #: src/hed/libs/communication/ClientX509Delegation.cpp:175 msgid "Creating delegation to CREAM delegation service failed" msgstr "Att skapa delegering till CREAM delegeringstjänst misslyckades" #: src/hed/libs/communication/ClientX509Delegation.cpp:210 msgid "Delegation putProxy request failed" msgstr "Delegering-putProxy-begäran misslyckades" #: src/hed/libs/communication/ClientX509Delegation.cpp:224 msgid "Creating delegation to CREAM delegation failed" msgstr "Att skapa delegering till CREAM delegeringstjänst misslyckades" #: src/hed/libs/communication/ClientX509Delegation.cpp:239 msgid "Getting delegation credential from ARC delegation service" msgstr "Hämtar delegeringsreferens frÃ¥n ARC delegeringstjänst" #: src/hed/libs/communication/ClientX509Delegation.cpp:278 msgid "There is no Delegated X509 token in the response" msgstr "Det finns inget delegerat X509-token i svaret" #: src/hed/libs/communication/ClientX509Delegation.cpp:283 msgid "There is no Format delegated token in the response" msgstr "Det finns inget delegerat Format-token i svaret" #: src/hed/libs/communication/ClientX509Delegation.cpp:291 msgid "There is no Id or X509 token value in the response" msgstr "Det finns inget Id- eller X509-token-värde i svaret" #: src/hed/libs/communication/ClientX509Delegation.cpp:300 #, c-format msgid "" "Get delegated credential from delegation service: \n" " %s" msgstr "" "Hämta delegerad referens frÃ¥n delegeringstjänst:\n" " %s" #: src/hed/libs/compute/Broker.cpp:54 #, c-format msgid "Performing matchmaking against target (%s)." msgstr "Utför matchmaking mot target (%s)." #: src/hed/libs/compute/Broker.cpp:64 #, c-format msgid "Matchmaking, ExecutionTarget: %s matches job description" msgstr "Matchmaking, ExecutionTarget: %s matchar jobbeskrivning" #: src/hed/libs/compute/Broker.cpp:145 #, c-format msgid "" "The CA issuer (%s) of the credentials (%s) is not trusted by the target (%s)." msgstr "" "CA-utfärdaren (%s) för referenserna (%s) är inte betrodd av target (%s)." #: src/hed/libs/compute/Broker.cpp:153 #, c-format msgid "ComputingShareName of ExecutionTarget (%s) is not defined" msgstr "ComputingShareName för ExecutionTarget (%s) är inte definierat" #: src/hed/libs/compute/Broker.cpp:157 src/hed/libs/compute/Broker.cpp:162 #, c-format msgid "ComputingShare (%s) explicitly rejected" msgstr "ComputingShare (%s) explicit avvisad" #: src/hed/libs/compute/Broker.cpp:171 #, c-format msgid "" "Matchmaking, ComputingShareName of ExecutionTarget (%s) is not defined, but " "requested queue is (%s)" msgstr "" "Matchmaking, ComputingShareName för ExecutionTarget (%s) är inte definierat, " "men begärd kö är det (%s)" #: src/hed/libs/compute/Broker.cpp:175 src/hed/libs/compute/Broker.cpp:180 #, c-format msgid "" "Matchmaking, ComputingShare (%s) does not match requested queue (%s): " "skipping" msgstr "" "Matchmaking, ComputingShare (%s) matchar inte begärd kö (%s): hoppar över" #: src/hed/libs/compute/Broker.cpp:184 #, c-format msgid "Matchmaking, ComputingShare (%s) matches requested queue (%s)" msgstr "Matchmaking, ComputingShare (%s) matchar begärd kö (%s)" #: src/hed/libs/compute/Broker.cpp:192 #, c-format msgid "" "ProcessingStartTime (%s) specified in job description is inside the targets " "downtime period [ %s - %s ]." msgstr "" "ProcessingStartTime (%s) angiven i jobbeskrivning ligger inom targets " "driftstoppsperiod [ %s - %s ]." #: src/hed/libs/compute/Broker.cpp:197 #, c-format msgid "The downtime of the target (%s) is not published. Keeping target." msgstr "Targets (%s) driftstopp har inte publicerats. BehÃ¥ller target." #: src/hed/libs/compute/Broker.cpp:203 #, c-format msgid "HealthState of ExecutionTarget (%s) is not OK or WARNING (%s)" msgstr "HealthState för ExecutionTarget (%s) är inte OK eller VARNING (%s)" #: src/hed/libs/compute/Broker.cpp:208 #, c-format msgid "Matchmaking, ExecutionTarget: %s, HealthState is not defined" msgstr "Matchmaking, ExecutionTarget: %s, HealthState är inte definierat" #: src/hed/libs/compute/Broker.cpp:215 #, c-format msgid "" "Matchmaking, Computing endpoint requirement not satisfied. ExecutionTarget: " "%s" msgstr "" "Matchmaking, Beräkningsslutpunktsvillkor inte uppfyllt. ExecutionTarget: %s" #: src/hed/libs/compute/Broker.cpp:220 #, c-format msgid "Matchmaking, ExecutionTarget: %s, ImplementationName is not defined" msgstr "" "Matchmaking, ExecutionTarget: %s, ImplementationName är inte definierat" #: src/hed/libs/compute/Broker.cpp:246 #, c-format msgid "" "Matchmaking, %s (%d) is %s than %s (%d) published by the ExecutionTarget." msgstr "Matchmaking, %s (%d) är %s än %s (%d) publicerat av ExecutionTarget." #: src/hed/libs/compute/Broker.cpp:275 #, c-format msgid "" "Matchmaking, The %s scaled %s (%d) is %s than the %s (%d) published by the " "ExecutionTarget." msgstr "" "Matchmaking, %s skalad %s (%d) är %s än %s (%d) publicerat av " "ExecutionTarget." #: src/hed/libs/compute/Broker.cpp:287 #, c-format msgid "Matchmaking, Benchmark %s is not published by the ExecutionTarget." msgstr "Matchmaking, Benchmark %s är inte publicerat av ExecutionTarget." #: src/hed/libs/compute/Broker.cpp:302 #, c-format msgid "" "Matchmaking, MaxTotalCPUTime problem, ExecutionTarget: %d (MaxTotalCPUTime), " "JobDescription: %d (TotalCPUTime)" msgstr "" "Matchmaking, MaxTotalCPUTime-problem, ExecutionTarget: %d (MaxTotalCPUTime), " "JobDescription: %d (TotalCPUTime)" #: src/hed/libs/compute/Broker.cpp:309 #, c-format msgid "" "Matchmaking, MaxCPUTime problem, ExecutionTarget: %d (MaxCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" msgstr "" "Matchmaking, MaxCPUTime-problem, ExecutionTarget: %d (MaxCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" #: src/hed/libs/compute/Broker.cpp:314 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxTotalCPUTime or MaxCPUTime not " "defined, assuming no CPU time limit" msgstr "" "Matchmaking, ExecutionTarget: %s, varken MaxTotalCPUTime eller MaxCPUTime " "är definierad, antar ingen CPU-tidsgräns" #: src/hed/libs/compute/Broker.cpp:320 #, c-format msgid "" "Matchmaking, MinCPUTime problem, ExecutionTarget: %d (MinCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" msgstr "" "Matchmaking, MinCPUTime-problem, ExecutionTarget: %d (MinCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" #: src/hed/libs/compute/Broker.cpp:325 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MinCPUTime not defined, assuming no CPU " "time limit" msgstr "" "Matchmaking, ExecutionTarget: %s, MinCPUTime inte definierat, antar ingen " "CPU-tidsgräns" #: src/hed/libs/compute/Broker.cpp:333 #, c-format msgid "" "Matchmaking, MainMemorySize problem, ExecutionTarget: %d (MainMemorySize), " "JobDescription: %d (IndividualPhysicalMemory)" msgstr "" "Matchmaking, MainMemorySize-problem, ExecutionTarget: %d (MainMemorySize), " "JobDescription: %d (IndividualPhysicalMemory)" #: src/hed/libs/compute/Broker.cpp:339 #, c-format msgid "" "Matchmaking, MaxMainMemory problem, ExecutionTarget: %d (MaxMainMemory), " "JobDescription: %d (IndividualPhysicalMemory)" msgstr "" "Matchmaking, MaxMainMemory-problem, ExecutionTarget: %d (MaxMainMemory), " "JobDescription: %d (IndividualPhysicalMemory)" #: src/hed/libs/compute/Broker.cpp:344 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxMainMemory and MainMemorySize are not " "defined" msgstr "" "Matchmaking, ExecutionTarget: %s, MaxMainMemory och MainMemorySize är inte " "definierade" #: src/hed/libs/compute/Broker.cpp:352 #, c-format msgid "" "Matchmaking, MaxVirtualMemory problem, ExecutionTarget: %d " "(MaxVirtualMemory), JobDescription: %d (IndividualVirtualMemory)" msgstr "" "Matchmaking, MaxVirtualMemory-problem, ExecutionTarget: %d " "(MaxVirtualMemory), JobDescription: %d (IndividualVirtualMemory)" #: src/hed/libs/compute/Broker.cpp:357 #, c-format msgid "Matchmaking, ExecutionTarget: %s, MaxVirtualMemory is not defined" msgstr "Matchmaking, ExecutionTarget: %s, MaxVirtualMemory är inte definierat" #: src/hed/libs/compute/Broker.cpp:365 #, c-format msgid "" "Matchmaking, Platform problem, ExecutionTarget: %s (Platform) " "JobDescription: %s (Platform)" msgstr "" "Matchmaking, Platform-problem, ExecutionTarget: %s (Platform) " "JobDescription: %s (Platform)" #: src/hed/libs/compute/Broker.cpp:370 #, c-format msgid "Matchmaking, ExecutionTarget: %s, Platform is not defined" msgstr "Matchmaking, ExecutionTarget: %s, Platform är inte definierad" #: src/hed/libs/compute/Broker.cpp:378 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, OperatingSystem requirements not satisfied" msgstr "" "Matchmaking, ExecutionTarget: %s, OperatingSystem-villkor är inte uppfyllt" #: src/hed/libs/compute/Broker.cpp:383 #, c-format msgid "Matchmaking, ExecutionTarget: %s, OperatingSystem is not defined" msgstr "Matchmaking, ExecutionTarget: %s, OperatingSystem är inte definierat" #: src/hed/libs/compute/Broker.cpp:391 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, RunTimeEnvironment requirements not " "satisfied" msgstr "" "Matchmaking, ExecutionTarget: %s, RunTimeEnvironment-villkor är inte " "uppfyllt" #: src/hed/libs/compute/Broker.cpp:396 #, c-format msgid "Matchmaking, ExecutionTarget: %s, ApplicationEnvironments not defined" msgstr "" "Matchmaking, ExecutionTarget: %s, ApplicationEnvironments är inte definierade" #: src/hed/libs/compute/Broker.cpp:405 #, c-format msgid "" "Matchmaking, NetworkInfo demand not fulfilled, ExecutionTarget do not " "support %s, specified in the JobDescription." msgstr "" "Matchmaking, NetworkInfo-begäran inte uppfylld, ExecutionTarget stöder inte " "%s, angiven i jobbeskrivning." #: src/hed/libs/compute/Broker.cpp:409 #, c-format msgid "Matchmaking, ExecutionTarget: %s, NetworkInfo is not defined" msgstr "Matchmaking, ExecutionTarget: %s, NetworkInfo är inte definierad" #: src/hed/libs/compute/Broker.cpp:417 #, c-format msgid "" "Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); " "JobDescription: %d MB (SessionDiskSpace)" msgstr "" "Matchmaking, MaxDiskSpace-problem, ExecutionTarget: %d MB (MaxDiskSpace); " "JobDescription: %d MB (SessionDiskSpace)" #: src/hed/libs/compute/Broker.cpp:424 #, c-format msgid "" "Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB " "(WorkingAreaFree); JobDescription: %d MB (SessionDiskSpace)" msgstr "" "Matchmaking, WorkingAreaFree-problem, ExecutionTarget: %d MB " "(WorkingAreaFree); JobDescription: %d MB (SessionDiskSpace)" #: src/hed/libs/compute/Broker.cpp:430 src/hed/libs/compute/Broker.cpp:451 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxDiskSpace and WorkingAreaFree are not " "defined" msgstr "" "Matchmaking, ExecutionTarget: %s, MaxDiskSpace och WorkingAreaFree är inte " "definierade" #: src/hed/libs/compute/Broker.cpp:438 #, c-format msgid "" "Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); " "JobDescription: %d MB (DiskSpace)" msgstr "" "Matchmaking, MaxDiskSpace-problem, ExecutionTarget: %d MB (MaxDiskSpace); " "JobDescription: %d MB (DiskSpace)" #: src/hed/libs/compute/Broker.cpp:445 #, c-format msgid "" "Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB " "(WorkingAreaFree); JobDescription: %d MB (DiskSpace)" msgstr "" "Matchmaking, WorkingAreaFree-problem, ExecutionTarget: %d MB " "(WorkingAreaFree); JobDescription: %d MB (DiskSpace)" #: src/hed/libs/compute/Broker.cpp:459 #, c-format msgid "" "Matchmaking, CacheTotal problem, ExecutionTarget: %d MB (CacheTotal); " "JobDescription: %d MB (CacheDiskSpace)" msgstr "" "Matchmaking, CacheTotal-problem, ExecutionTarget: %d MB (CacheTotal); " "JobDescription: %d MB (CacheDiskSpace)" #: src/hed/libs/compute/Broker.cpp:464 #, c-format msgid "Matchmaking, ExecutionTarget: %s, CacheTotal is not defined" msgstr "Matchmaking, ExecutionTarget: %s, CacheTotal är inte definierat" #: src/hed/libs/compute/Broker.cpp:472 #, c-format msgid "" "Matchmaking, TotalSlots problem, ExecutionTarget: %d (TotalSlots) " "JobDescription: %d (NumberOfProcesses)" msgstr "" "Matchmaking, TotalSlots-problem, ExecutionTarget: %d (TotalSlots) " "JobDescription: %d (NumberOfProcesses)" #: src/hed/libs/compute/Broker.cpp:478 #, c-format msgid "" "Matchmaking, MaxSlotsPerJob problem, ExecutionTarget: %d (MaxSlotsPerJob) " "JobDescription: %d (NumberOfProcesses)" msgstr "" "Matchmaking, MaxSlotsPerJob-problem, ExecutionTarget: %d (MaxSlotsPerJob) " "JobDescription: %d (NumberOfProcesses)" #: src/hed/libs/compute/Broker.cpp:484 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, TotalSlots and MaxSlotsPerJob are not " "defined" msgstr "" "Matchmaking, ExecutionTarget: %s, TotalSlots och MaxSlotsPerJob är inte " "definierade" #: src/hed/libs/compute/Broker.cpp:492 #, c-format msgid "" "Matchmaking, WorkingAreaLifeTime problem, ExecutionTarget: %s " "(WorkingAreaLifeTime) JobDescription: %s (SessionLifeTime)" msgstr "" "Matchmaking, WorkingAreaLifeTime-problem, ExecutionTarget: %s " "(WorkingAreaLifeTime) JobDescription: %s (SessionLifeTime)" #: src/hed/libs/compute/Broker.cpp:497 #, c-format msgid "Matchmaking, ExecutionTarget: %s, WorkingAreaLifeTime is not defined" msgstr "" "Matchmaking, ExecutionTarget: %s, WorkingAreaLifeTime är inte definierad" #: src/hed/libs/compute/Broker.cpp:505 #, c-format msgid "" "Matchmaking, ConnectivityIn problem, ExecutionTarget: %s (ConnectivityIn) " "JobDescription: %s (InBound)" msgstr "" "Matchmaking, ConnectivityIn-problem, ExecutionTarget: %s (ConnectivityIn) " "JobDescription: %s (InBound)" #: src/hed/libs/compute/Broker.cpp:512 #, c-format msgid "" "Matchmaking, ConnectivityOut problem, ExecutionTarget: %s (ConnectivityOut) " "JobDescription: %s (OutBound)" msgstr "" "Matchmaking, ConnectivityOut-problem, ExecutionTarget: %s (ConnectivityOut) " "JobDescription: %s (OutBound)" #: src/hed/libs/compute/Broker.cpp:535 msgid "Unable to sort added jobs. The BrokerPlugin plugin has not been loaded." msgstr "" "Kan inte sortera tillagda jobb. BrokerPlugin-pluginen har inte laddats in." #: src/hed/libs/compute/Broker.cpp:552 msgid "Unable to match target, marking it as not matching. Broker not valid." msgstr "" "Kan inte matcha target, markerar det som inte matchande. Mäklare inte giltig." #: src/hed/libs/compute/Broker.cpp:588 msgid "Unable to sort ExecutionTarget objects - Invalid Broker object." msgstr "Kan inte sortera ExecutionTarget-objekt - Ogiltigt Broker-objekt." #: src/hed/libs/compute/Broker.cpp:612 msgid "" "Unable to register job submission. Can't get JobDescription object from " "Broker, Broker is invalid." msgstr "" "Kan inte registrera jobbinsändning. Kan inte fÃ¥ JobDescription-objekt frÃ¥n " "mäklare, Mäklare är ogiltig." #: src/hed/libs/compute/BrokerPlugin.cpp:89 #, c-format msgid "Broker plugin \"%s\" not found." msgstr "Hittade inte mäklar-plugin \"%s\"." #: src/hed/libs/compute/BrokerPlugin.cpp:96 #, c-format msgid "Unable to load BrokerPlugin (%s)" msgstr "Kunde inte ladda in BrokerPlugin (%s)" #: src/hed/libs/compute/BrokerPlugin.cpp:106 #, c-format msgid "Broker %s loaded" msgstr "Mäklare %s har laddats in" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:27 #, c-format msgid "Uniq is replacing service coming from %s with service coming from %s" msgstr "Uniq byter ut tjänst som kommer frÃ¥n %s mot tjänst som kommer frÃ¥n %s" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:31 #, c-format msgid "Uniq is ignoring service coming from %s" msgstr "Uniq ignorerar tjänst som kommer frÃ¥n %s" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:38 #, c-format msgid "Uniq is adding service coming from %s" msgstr "Uniq lägger till tjänst som kommer frÃ¥n %s" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:61 #, c-format msgid "Adding endpoint (%s) to TargetInformationRetriever" msgstr "Lägger till slutpunkt (%s) till TargetInformationRetriever" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:64 #, c-format msgid "Adding endpoint (%s) to ServiceEndpointRetriever" msgstr "Lägger till slutpunkt (%s) till ServiceEndpointRetriever" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:67 #, c-format msgid "" "Adding endpoint (%s) to both ServiceEndpointRetriever and " "TargetInformationRetriever" msgstr "" "Lägger till slutpunkt (%s) till bÃ¥de ServiceEndpointRetriever och " "TargetInformationRetriever" #: src/hed/libs/compute/EntityRetriever.cpp:42 #, c-format msgid "The plugin %s does not support any interfaces, skipping it." msgstr "Pluginen %s stöder inte nÃ¥got gränssnitt, hoppar över den." #: src/hed/libs/compute/EntityRetriever.cpp:47 #, c-format msgid "" "The first supported interface of the plugin %s is an empty string, skipping " "the plugin." msgstr "" "Första gränssnittet som stöds an pluginen %s är en tom sträng, hoppar över " "pluginen." #: src/hed/libs/compute/EntityRetriever.cpp:95 #, c-format msgid "Interface on endpoint (%s) %s." msgstr "Gränssnitt pÃ¥ slutpunkt (%s) %s." #: src/hed/libs/compute/EntityRetriever.cpp:101 #: src/hed/libs/compute/EntityRetriever.cpp:133 #: src/hed/libs/compute/EntityRetriever.cpp:425 #, c-format msgid "Ignoring endpoint (%s), it is already registered in retriever." msgstr "Ignorerar slutpunkt (%s), den är redan registrerad i insamlare." #: src/hed/libs/compute/EntityRetriever.cpp:110 #, c-format msgid "Service Loop: Endpoint %s" msgstr "Tjänsteloop: Slutpunkt %s" #: src/hed/libs/compute/EntityRetriever.cpp:112 #, c-format msgid " This endpoint (%s) is STARTED or SUCCESSFUL" msgstr " Denna slutpunkt (%s) är STARTED eller SUCCESSFUL" #: src/hed/libs/compute/EntityRetriever.cpp:115 #, c-format msgid "" "Suspending querying of endpoint (%s) since the service at the endpoint is " "already being queried, or has been queried." msgstr "" "Suspenderar frÃ¥gandet av slutpunkt (%s) eftersom tjänsten pÃ¥ slutpunkten " "redan hÃ¥ller pÃ¥ att frÃ¥gas eller har frÃ¥gats." #: src/hed/libs/compute/EntityRetriever.cpp:122 #: src/hed/libs/compute/EntityRetriever.cpp:237 #, c-format msgid " Status of endpoint (%s) is %s" msgstr " Status för slutpunkt (%s) är %s" #: src/hed/libs/compute/EntityRetriever.cpp:126 #, c-format msgid "Setting status (STARTED) for endpoint: %s" msgstr "Sätter status (STARTED) för slutpunkt: %s" #: src/hed/libs/compute/EntityRetriever.cpp:145 #, c-format msgid "Starting thread to query the endpoint on %s" msgstr "Startar trÃ¥d för att frÃ¥ga slutpunkten pÃ¥ %s" #: src/hed/libs/compute/EntityRetriever.cpp:147 #: src/hed/libs/compute/EntityRetriever.cpp:289 #, c-format msgid "Failed to start querying the endpoint on %s" msgstr "Misslyckades med att börja frÃ¥ga slutpunkten pÃ¥ %s" #: src/hed/libs/compute/EntityRetriever.cpp:174 #, c-format msgid "Found a registry, will query it recursively: %s" msgstr "Hittade ett register, kommer att frÃ¥ga det rekursivt: %s" #: src/hed/libs/compute/EntityRetriever.cpp:211 #, c-format msgid "Setting status (%s) for endpoint: %s" msgstr "Sätter status (%s) för slutpunkt: %s" #: src/hed/libs/compute/EntityRetriever.cpp:231 msgid "Checking for suspended endpoints which should be started." msgstr "Letar efter suspenderade slutpunkter som ska startas." #: src/hed/libs/compute/EntityRetriever.cpp:241 #, c-format msgid "Found started or successful endpoint (%s)" msgstr "Hittade STARTED eller SUCCESSFUL slutpunkt (%s)" #: src/hed/libs/compute/EntityRetriever.cpp:253 #, c-format msgid "Found suspended endpoint (%s)" msgstr "Hittade suspenderad slutpunkt (%s)" #: src/hed/libs/compute/EntityRetriever.cpp:264 #, c-format msgid "Trying to start suspended endpoint (%s)" msgstr "Försöker starta suspenderad slutpunkt (%s)" #: src/hed/libs/compute/EntityRetriever.cpp:284 #, c-format msgid "" "Starting querying of suspended endpoint (%s) - no other endpoints for this " "service is being queried or has been queried successfully." msgstr "" "Börjar frÃ¥ga suspenderad slutpunkt (%s) - ingen annan slutpunkt för denna " "tjänst hÃ¥ller pÃ¥ att frÃ¥gas eller har blivit frÃ¥gad framgÃ¥ngsrikt." #: src/hed/libs/compute/EntityRetriever.cpp:351 #, c-format msgid "Calling plugin %s to query endpoint on %s" msgstr "Anropar plugin %s för att frÃ¥ga slutpunkt pÃ¥ %s" #: src/hed/libs/compute/EntityRetriever.cpp:373 #, c-format msgid "" "The interface of this endpoint (%s) is unspecified, will try all possible " "plugins" msgstr "" "Gränssnittet för denna slutpunkt (%s) är ej angivet, kommer att prova alla " "möjliga pluginer" #: src/hed/libs/compute/EntityRetriever.cpp:389 #, c-format msgid "Problem loading plugin %s, skipping it." msgstr "Problem med att ladda in plugin %s, hoppar över den." #: src/hed/libs/compute/EntityRetriever.cpp:393 #, c-format msgid "The endpoint (%s) is not supported by this plugin (%s)" msgstr "Slutpunkten (%s) stöds inte av denna plugin (%s)" #: src/hed/libs/compute/EntityRetriever.cpp:414 #, c-format msgid "" "New endpoint is created (%s) from the one with the unspecified interface (%s)" msgstr "Ny slutpunkt skapas (%s) frÃ¥n den med ej angivet gränssnitt (%s)" #: src/hed/libs/compute/EntityRetriever.cpp:432 #, c-format msgid "Starting sub-thread to query the endpoint on %s" msgstr "Startar under-trÃ¥d för att frÃ¥ga slutpunkten pÃ¥ %s" #: src/hed/libs/compute/EntityRetriever.cpp:434 #, c-format msgid "" "Failed to start querying the endpoint on %s (unable to create sub-thread)" msgstr "" "Misslyckades med att börja frÃ¥ga slutpunkten pÃ¥ %s (kunde inte skapa under-" "trÃ¥d)" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:32 #, c-format msgid "Found %s %s (it was loaded already)" msgstr "Hittade %s %s (den hade redan laddats in)" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:41 #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:49 #: src/hed/libs/compute/JobControllerPlugin.cpp:98 #: src/hed/libs/compute/JobControllerPlugin.cpp:107 #: src/hed/libs/compute/SubmitterPlugin.cpp:167 #: src/hed/libs/compute/SubmitterPlugin.cpp:177 #, c-format msgid "" "Unable to locate the \"%s\" plugin. Please refer to installation " "instructions and check if package providing support for \"%s\" plugin is " "installed" msgstr "" "Kunde inte hitta \"%s\"-pluginen. Referera till installationsinstruktionerna " "och kontrollera om paketet som tillhandahÃ¥ller stöd för \"%s\"-pluginen är " "installerat" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:42 #, c-format msgid "%s plugin \"%s\" not found." msgstr "Hittade inte %s-plugin \"%s\"." #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:50 #, c-format msgid "%s %s could not be created." msgstr "%s %s kunde inte skapas." #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:55 #, c-format msgid "Loaded %s %s" msgstr "Laddade in %s %s" #: src/hed/libs/compute/ExecutionTarget.cpp:51 #, c-format msgid "" "Skipping ComputingEndpoint '%s', because it has '%s' interface instead of " "the requested '%s'." msgstr "" "Hoppar över beräkningsslutpunkt '%s', eftersom den har '%s'-gränssnitt i " "stället för det begärda '%s'." #: src/hed/libs/compute/ExecutionTarget.cpp:132 #, c-format msgid "" "Computing endpoint %s (type %s) added to the list for submission brokering" msgstr "" "Beräkningsslutpunkt %s (typ %s) lagd till i listan för insändningsmäkling" #: src/hed/libs/compute/ExecutionTarget.cpp:239 #, c-format msgid "Address: %s" msgstr "Adress: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:240 #, c-format msgid "Place: %s" msgstr "Ort: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:241 #, c-format msgid "Country: %s" msgstr "Land: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:242 #, c-format msgid "Postal code: %s" msgstr "Postnummer: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:243 #, c-format msgid "Latitude: %f" msgstr "Latitud: %f" #: src/hed/libs/compute/ExecutionTarget.cpp:244 #, c-format msgid "Longitude: %f" msgstr "Longitud: %f" #: src/hed/libs/compute/ExecutionTarget.cpp:250 #, c-format msgid "Owner: %s" msgstr "Ägare: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:257 #, c-format msgid "ID: %s" msgstr "ID: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:258 #, c-format msgid "Type: %s" msgstr "Typ: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:263 #, c-format msgid "URL: %s" msgstr "URL: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:264 #, c-format msgid "Interface: %s" msgstr "Gränssnitt: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:266 msgid "Interface versions:" msgstr "Gränssnittsversioner:" #: src/hed/libs/compute/ExecutionTarget.cpp:271 msgid "Interface extensions:" msgstr "Gränssnittstillägg:" #: src/hed/libs/compute/ExecutionTarget.cpp:276 msgid "Capabilities:" msgstr "FörmÃ¥gor:" #: src/hed/libs/compute/ExecutionTarget.cpp:280 #, c-format msgid "Technology: %s" msgstr "Teknologi: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:282 msgid "Supported Profiles:" msgstr "Profiler som stöds:" #: src/hed/libs/compute/ExecutionTarget.cpp:286 #, c-format msgid "Implementor: %s" msgstr "Implementerare: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:287 #, c-format msgid "Implementation name: %s" msgstr "Implementeringsnamn: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:288 #, c-format msgid "Quality level: %s" msgstr "KvalitetsnivÃ¥: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:289 #, c-format msgid "Health state: %s" msgstr "HälsotillstÃ¥nd: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:290 #, c-format msgid "Health state info: %s" msgstr "HälsotillstÃ¥ndsinfo: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:291 #, c-format msgid "Serving state: %s" msgstr "BetjäningstillstÃ¥nd: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:292 #, c-format msgid "Issuer CA: %s" msgstr "Utfärdar-CA: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:294 msgid "Trusted CAs:" msgstr "Betrodda CA:" #: src/hed/libs/compute/ExecutionTarget.cpp:298 #, c-format msgid "Downtime starts: %s" msgstr "Driftstopp börjar: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:299 #, c-format msgid "Downtime ends: %s" msgstr "Driftstopp slutar: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:300 #, c-format msgid "Staging: %s" msgstr "Laddar ned/upp: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:302 msgid "Job descriptions:" msgstr "Jobbeskrivningar:" #: src/hed/libs/compute/ExecutionTarget.cpp:314 #, c-format msgid "Scheme: %s" msgstr "Schema: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:317 #, c-format msgid "Rule: %s" msgstr "Regel: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:329 #, c-format msgid "Mapping queue: %s" msgstr "Mappar till kö: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:330 #, c-format msgid "Max wall-time: %s" msgstr "Största klocktid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:331 #, c-format msgid "Max total wall-time: %s" msgstr "Största totala klocktid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:332 #, c-format msgid "Min wall-time: %s" msgstr "Minsta klocktid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:333 #, c-format msgid "Default wall-time: %s" msgstr "Förvald klocktid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:334 #, c-format msgid "Max CPU time: %s" msgstr "Största CPU-tid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:335 #, c-format msgid "Min CPU time: %s" msgstr "Minsta CPU-tid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:336 #, c-format msgid "Default CPU time: %s" msgstr "Förvald CPU-tid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:337 #, c-format msgid "Max total jobs: %i" msgstr "Största totalt antal jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:338 #, c-format msgid "Max running jobs: %i" msgstr "Största antal körande jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:339 #, c-format msgid "Max waiting jobs: %i" msgstr "Största antal väntade jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:340 #, c-format msgid "Max pre-LRMS waiting jobs: %i" msgstr "Största antal pre-LRMS-väntade jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:341 #, c-format msgid "Max user running jobs: %i" msgstr "Största antal körande jobb för användaren: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:342 #, c-format msgid "Max slots per job: %i" msgstr "Största antal slottar per jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:343 #, c-format msgid "Max stage in streams: %i" msgstr "Största antal stage-in-strömmar: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:344 #, c-format msgid "Max stage out streams: %i" msgstr "Största antal stage-out-strömmar: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:345 #, c-format msgid "Scheduling policy: %s" msgstr "Schemaläggningsspolicy: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:346 #, c-format msgid "Max memory: %i" msgstr "Största minne: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:347 #, c-format msgid "Max virtual memory: %i" msgstr "Största virtuella minne: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:348 #, c-format msgid "Max disk space: %i" msgstr "Största diskutrymme: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:349 #, c-format msgid "Default Storage Service: %s" msgstr "Förvald lagringstjänst: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:350 msgid "Supports preemption" msgstr "Stöder preemption" #: src/hed/libs/compute/ExecutionTarget.cpp:351 msgid "Doesn't support preemption" msgstr "Stöder ej preemption" #: src/hed/libs/compute/ExecutionTarget.cpp:352 #, c-format msgid "Total jobs: %i" msgstr "Totalt antal jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:353 #, c-format msgid "Running jobs: %i" msgstr "Antal körande jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:354 #, c-format msgid "Local running jobs: %i" msgstr "Antal lokala körande jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:355 #, c-format msgid "Waiting jobs: %i" msgstr "Antal väntade jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:356 #, c-format msgid "Local waiting jobs: %i" msgstr "Antal lokala väntade jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:357 #, c-format msgid "Suspended jobs: %i" msgstr "Antal suspenderade jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:358 #, c-format msgid "Local suspended jobs: %i" msgstr "Antal lokala suspenderade jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:359 #, c-format msgid "Staging jobs: %i" msgstr "Antal jobb som laddar ned/upp: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:360 #, c-format msgid "Pre-LRMS waiting jobs: %i" msgstr "Antal pre-LRMS-väntade jobb: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:361 #, c-format msgid "Estimated average waiting time: %s" msgstr "Förväntad medelväntetid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:362 #, c-format msgid "Estimated worst waiting time: %s" msgstr "Förväntad värsta väntetid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:363 #, c-format msgid "Free slots: %i" msgstr "Lediga slottar: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:365 msgid "Free slots grouped according to time limits (limit: free slots):" msgstr "Lediga slottar grupperade enligt tidsgräns (gräns: lediga slottar):" #: src/hed/libs/compute/ExecutionTarget.cpp:368 #, c-format msgid " %s: %i" msgstr " %s: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:369 #, c-format msgid " unspecified: %i" msgstr " ospecificerad: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:372 #, c-format msgid "Used slots: %i" msgstr "Använda slottar: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:373 #, c-format msgid "Requested slots: %i" msgstr "Begärda slottar: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:374 #, c-format msgid "Reservation policy: %s" msgstr "Reserveringspolicy: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:381 #, c-format msgid "Resource manager: %s" msgstr "Resurshanterare: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:383 #, c-format msgid " (%s)" msgstr " (%s)" #: src/hed/libs/compute/ExecutionTarget.cpp:387 #, c-format msgid "Total physical CPUs: %i" msgstr "Totalt antal fysiska CPUer: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:388 #, c-format msgid "Total logical CPUs: %i" msgstr "Totalt antal logiska CPUer: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:389 #, c-format msgid "Total slots: %i" msgstr "Totalt antal slottar: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:390 msgid "Supports advance reservations" msgstr "Stöder framtida reservering" #: src/hed/libs/compute/ExecutionTarget.cpp:391 msgid "Doesn't support advance reservations" msgstr "Stöder ej framtida reservering" #: src/hed/libs/compute/ExecutionTarget.cpp:392 msgid "Supports bulk submission" msgstr "Stöder massinsändning" #: src/hed/libs/compute/ExecutionTarget.cpp:393 msgid "Doesn't support bulk Submission" msgstr "Stöder ej massinsändning" #: src/hed/libs/compute/ExecutionTarget.cpp:394 msgid "Homogeneous resource" msgstr "Homogen resurs" #: src/hed/libs/compute/ExecutionTarget.cpp:395 msgid "Non-homogeneous resource" msgstr "Icke-homogen resurs" #: src/hed/libs/compute/ExecutionTarget.cpp:397 msgid "Network information:" msgstr "Nätverksinformation:" #: src/hed/libs/compute/ExecutionTarget.cpp:402 msgid "Working area is shared among jobs" msgstr "Arbetsutrymme delas mellan jobb" #: src/hed/libs/compute/ExecutionTarget.cpp:403 msgid "Working area is not shared among jobs" msgstr "Arbetsutrymme delas inte mellan jobb" #: src/hed/libs/compute/ExecutionTarget.cpp:404 #, c-format msgid "Working area total size: %i GB" msgstr "Arbetsutrymme total storlek: %i GB" #: src/hed/libs/compute/ExecutionTarget.cpp:405 #, c-format msgid "Working area free size: %i GB" msgstr "Arbetsutrymme fri storlek: %i GB" #: src/hed/libs/compute/ExecutionTarget.cpp:406 #, c-format msgid "Working area life time: %s" msgstr "Arbetsutrymme livstid: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:407 #, c-format msgid "Cache area total size: %i GB" msgstr "Cacheutrymme total storlek: %i GB" #: src/hed/libs/compute/ExecutionTarget.cpp:408 #, c-format msgid "Cache area free size: %i GB" msgstr "Cacheutrymme fri storlek: %i GB" #: src/hed/libs/compute/ExecutionTarget.cpp:414 #, c-format msgid "Platform: %s" msgstr "Plattform: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:415 msgid "Execution environment supports inbound connections" msgstr "Exekveringsmiljö stöder inkommande förbindelser" #: src/hed/libs/compute/ExecutionTarget.cpp:416 msgid "Execution environment does not support inbound connections" msgstr "Exekveringsmiljö stöder inte inkommande förbindelser" #: src/hed/libs/compute/ExecutionTarget.cpp:417 msgid "Execution environment supports outbound connections" msgstr "Exekveringsmiljö stöder utgÃ¥ende förbindelser" #: src/hed/libs/compute/ExecutionTarget.cpp:418 msgid "Execution environment does not support outbound connections" msgstr "Exekveringsmiljö stöder inte utgÃ¥ende förbindelser" #: src/hed/libs/compute/ExecutionTarget.cpp:419 msgid "Execution environment is a virtual machine" msgstr "Exekveringsmiljö är en virtuell maskin" #: src/hed/libs/compute/ExecutionTarget.cpp:420 msgid "Execution environment is a physical machine" msgstr "Exekveringsmiljö är en fysisk maskin" #: src/hed/libs/compute/ExecutionTarget.cpp:421 #, c-format msgid "CPU vendor: %s" msgstr "CPU-tillverkare: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:422 #, c-format msgid "CPU model: %s" msgstr "CPU-modell: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:423 #, c-format msgid "CPU version: %s" msgstr "CPU-version %s" #: src/hed/libs/compute/ExecutionTarget.cpp:424 #, c-format msgid "CPU clock speed: %i" msgstr "CPU-klockhastighet: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:425 #, c-format msgid "Main memory size: %i" msgstr "Huvudminnesstorlek: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:426 #, c-format msgid "OS family: %s" msgstr "OS-familj: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:427 #, c-format msgid "OS name: %s" msgstr "OS-namn: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:428 #, c-format msgid "OS version: %s" msgstr "OS-version %s" #: src/hed/libs/compute/ExecutionTarget.cpp:435 msgid "Computing service:" msgstr "Beräkningstjänst:" #: src/hed/libs/compute/ExecutionTarget.cpp:459 #, c-format msgid "%d Endpoints" msgstr "%d Slutpunkter" #: src/hed/libs/compute/ExecutionTarget.cpp:464 msgid "Endpoint Information:" msgstr "Slutpunktsinformation:" #: src/hed/libs/compute/ExecutionTarget.cpp:476 #, c-format msgid "%d Batch Systems" msgstr "%d batchsystem" #: src/hed/libs/compute/ExecutionTarget.cpp:481 msgid "Batch System Information:" msgstr "Batchsysteminformation:" #: src/hed/libs/compute/ExecutionTarget.cpp:487 msgid "Installed application environments:" msgstr "Installerade programmiljöer:" #: src/hed/libs/compute/ExecutionTarget.cpp:500 #, c-format msgid "%d Shares" msgstr "%d andelar" #: src/hed/libs/compute/ExecutionTarget.cpp:505 msgid "Share Information:" msgstr "Andelsinformation:" #: src/hed/libs/compute/ExecutionTarget.cpp:511 #, c-format msgid "%d mapping policies" msgstr "%d mappningspolicyer" #: src/hed/libs/compute/ExecutionTarget.cpp:515 msgid "Mapping policy:" msgstr "Mappningspolicy:" #: src/hed/libs/compute/ExecutionTarget.cpp:531 #, c-format msgid "Execution Target on Computing Service: %s" msgstr "Exekveringstarget pÃ¥ beräkningstjänst: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:533 #, c-format msgid " Computing endpoint URL: %s" msgstr " Beräkningsslutpunkt-URL: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:535 #, c-format msgid " Computing endpoint interface name: %s" msgstr " Beräkningsslutpunktsgränssnittsnamn: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:537 #: src/hed/libs/compute/Job.cpp:579 #, c-format msgid " Queue: %s" msgstr " Kö: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:540 #, c-format msgid " Mapping queue: %s" msgstr " Mappar till kö: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:543 #, c-format msgid " Health state: %s" msgstr " HälsotillstÃ¥nd: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:548 msgid "Service information:" msgstr "Tjänsteinformation:" #: src/hed/libs/compute/ExecutionTarget.cpp:553 msgid " Installed application environments:" msgstr " Installerade programmiljöer:" #: src/hed/libs/compute/ExecutionTarget.cpp:560 msgid "Batch system information:" msgstr "Batchsysteminformation:" #: src/hed/libs/compute/ExecutionTarget.cpp:563 msgid "Queue information:" msgstr "Köinformation:" #: src/hed/libs/compute/ExecutionTarget.cpp:570 msgid " Benchmark information:" msgstr " Benchmarkinformation:" #: src/hed/libs/compute/GLUE2.cpp:53 msgid "The Service doesn't advertise its Type." msgstr "Tjänsten tillkännager inte sin typ." #: src/hed/libs/compute/GLUE2.cpp:58 msgid "The ComputingService doesn't advertise its Quality Level." msgstr "Beräkningstjänsten tillkännager inte sin kvalitetsnivÃ¥." #: src/hed/libs/compute/GLUE2.cpp:99 msgid "The ComputingEndpoint has no URL." msgstr "Beräkningsslutpunkten har ingen URL." #: src/hed/libs/compute/GLUE2.cpp:104 msgid "The Service advertises no Health State." msgstr "Tjänsten tillkännaget inte sitt hälsotillstÃ¥nd." #: src/hed/libs/compute/GLUE2.cpp:117 msgid "The ComputingEndpoint doesn't advertise its Quality Level." msgstr "Beräkningsslutpunkten tillkännager inte sin kvalitetsnivÃ¥." #: src/hed/libs/compute/GLUE2.cpp:128 msgid "The ComputingService doesn't advertise its Interface." msgstr "Beräkningstjänsten tillkännager inte ditt gränssnitt." #: src/hed/libs/compute/GLUE2.cpp:160 msgid "The ComputingEndpoint doesn't advertise its Serving State." msgstr "Beräkningsslutpunkten tillkännager inte sitt servicetillstÃ¥nd." #: src/hed/libs/compute/GLUE2.cpp:247 #, c-format msgid "" "The \"FreeSlotsWithDuration\" attribute published by \"%s\" is wrongly " "formatted. Ignoring it." msgstr "" "\"FreeSlotsWithDuration\"-attributet publicerat av \"%s\" är felformatterat. " "Ignorerar det." #: src/hed/libs/compute/GLUE2.cpp:248 #, c-format msgid "Wrong format of the \"FreeSlotsWithDuration\" = \"%s\" (\"%s\")" msgstr "Felaktigt format för \"FreeSlotsWithDuration\" = \"%s\" (\"%s\")" #: src/hed/libs/compute/GLUE2.cpp:420 #, c-format msgid "" "Couldn't parse benchmark XML:\n" "%s" msgstr "" "Kunde inte tolka benchmark-XML:\n" "%s" #: src/hed/libs/compute/Job.cpp:328 msgid "Unable to detect format of job record." msgstr "Kunde inte detektera format för jobbpost." #: src/hed/libs/compute/Job.cpp:549 #, c-format msgid "Job: %s" msgstr "Jobb: %s" #: src/hed/libs/compute/Job.cpp:551 #, c-format msgid " Name: %s" msgstr " Namn: %s" #: src/hed/libs/compute/Job.cpp:552 #, c-format msgid " State: %s" msgstr " TillstÃ¥nd: %s" #: src/hed/libs/compute/Job.cpp:555 #, c-format msgid " Specific state: %s" msgstr " Specifikt tillstÃ¥nd: %s" #: src/hed/libs/compute/Job.cpp:559 src/hed/libs/compute/Job.cpp:583 #, c-format msgid " Waiting Position: %d" msgstr " Position i kö: %i" #: src/hed/libs/compute/Job.cpp:563 #, c-format msgid " Exit Code: %d" msgstr " Avslutningskod: %d" #: src/hed/libs/compute/Job.cpp:567 #, c-format msgid " Job Error: %s" msgstr " Jobbfel: %s" #: src/hed/libs/compute/Job.cpp:572 #, c-format msgid " Owner: %s" msgstr " Ägare: %s" #: src/hed/libs/compute/Job.cpp:576 #, c-format msgid " Other Messages: %s" msgstr " Övriga meddelanden: %s" #: src/hed/libs/compute/Job.cpp:581 #, c-format msgid " Requested Slots: %d" msgstr " Begärda slottar: %d" #: src/hed/libs/compute/Job.cpp:586 #, c-format msgid " Stdin: %s" msgstr " Stdin: %s" #: src/hed/libs/compute/Job.cpp:588 #, c-format msgid " Stdout: %s" msgstr " Stdout: %s" #: src/hed/libs/compute/Job.cpp:590 #, c-format msgid " Stderr: %s" msgstr " Stderr: %s" #: src/hed/libs/compute/Job.cpp:592 #, c-format msgid " Computing Service Log Directory: %s" msgstr " Beräkningstjänstens loggkatalog: %s" #: src/hed/libs/compute/Job.cpp:595 #, c-format msgid " Submitted: %s" msgstr " Insänt: %s" #: src/hed/libs/compute/Job.cpp:598 #, c-format msgid " End Time: %s" msgstr " Avslutningstid: %s" #: src/hed/libs/compute/Job.cpp:601 #, c-format msgid " Submitted from: %s" msgstr " Insänt frÃ¥n %s" #: src/hed/libs/compute/Job.cpp:604 #, c-format msgid " Submitting client: %s" msgstr " Insänt med klient: %s" #: src/hed/libs/compute/Job.cpp:607 #, c-format msgid " Requested CPU Time: %s" msgstr " Begärd CPU-tid: %s" #: src/hed/libs/compute/Job.cpp:612 #, c-format msgid " Used CPU Time: %s (%s per slot)" msgstr " Använd CPU-tid: %s (%s per slot)" #: src/hed/libs/compute/Job.cpp:616 #, c-format msgid " Used CPU Time: %s" msgstr " Använd CPU-tid: %s" #: src/hed/libs/compute/Job.cpp:622 #, c-format msgid " Used Wall Time: %s (%s per slot)" msgstr " Använd klocktid: %s (%s per slot)" #: src/hed/libs/compute/Job.cpp:626 #, c-format msgid " Used Wall Time: %s" msgstr " Använd klocktid: %s" #: src/hed/libs/compute/Job.cpp:631 #, c-format msgid " Used Memory: %d" msgstr " Använt minne: %i" #: src/hed/libs/compute/Job.cpp:635 #, c-format msgid " Results were deleted: %s" msgstr " Resultaten har raderats: %s" #: src/hed/libs/compute/Job.cpp:636 #, c-format msgid " Results must be retrieved before: %s" msgstr " Resultaten mÃ¥ste hämtas innan: %s" #: src/hed/libs/compute/Job.cpp:640 #, c-format msgid " Proxy valid until: %s" msgstr " Proxy giltig till: %s" #: src/hed/libs/compute/Job.cpp:644 #, c-format msgid " Entry valid from: %s" msgstr " Post giltig frÃ¥n: %s" #: src/hed/libs/compute/Job.cpp:647 #, c-format msgid " Entry valid for: %s" msgstr " Post giltig i: %s" #: src/hed/libs/compute/Job.cpp:651 msgid " Old job IDs:" msgstr " Gamla jobb-id:" #: src/hed/libs/compute/Job.cpp:659 #, c-format msgid " ID on service: %s" msgstr " ID pÃ¥ tjänst: %s" #: src/hed/libs/compute/Job.cpp:660 #, c-format msgid " Service information URL: %s (%s)" msgstr " Tjänsteinformation-URL: %s (%s)" #: src/hed/libs/compute/Job.cpp:661 #, c-format msgid " Job status URL: %s (%s)" msgstr " Jobbstatus-URL: %s (%s)" #: src/hed/libs/compute/Job.cpp:662 #, c-format msgid " Job management URL: %s (%s)" msgstr " Jobbhanterings-URL: %s (%s)" #: src/hed/libs/compute/Job.cpp:663 #, c-format msgid " Stagein directory URL: %s" msgstr " Stage-in-katalog-URL: %s" #: src/hed/libs/compute/Job.cpp:664 #, c-format msgid " Stageout directory URL: %s" msgstr " Stage-out-katalog-URL: %s" #: src/hed/libs/compute/Job.cpp:665 #, c-format msgid " Session directory URL: %s" msgstr " Sessionskatalog-URL: %s" #: src/hed/libs/compute/Job.cpp:667 msgid " Delegation IDs:" msgstr " Delegerings-ID:" #: src/hed/libs/compute/Job.cpp:849 #, c-format msgid "Unable to handle job (%s), no interface specified." msgstr "Kunde inte hantera jobb (%s), inget gränssnitt angivet." #: src/hed/libs/compute/Job.cpp:854 #, c-format msgid "" "Unable to handle job (%s), no plugin associated with the specified interface " "(%s)" msgstr "" "Kunde inte hantera jobb (%s), ingen plugin associerad med det angivna " "gränssnittet (%s)" #: src/hed/libs/compute/Job.cpp:876 #, c-format msgid "Invalid download destination path specified (%s)" msgstr "Ogiltig nedladdningsdestinationssökväg angiven (%s)" #: src/hed/libs/compute/Job.cpp:881 #, c-format msgid "" "Unable to download job (%s), no JobControllerPlugin plugin was set to handle " "the job." msgstr "" "Kunde inte ladda ner jobb (%s), ingen JobControllerPlugin-plugin har satts " "att hantera jobbet." #: src/hed/libs/compute/Job.cpp:885 #, c-format msgid "Downloading job: %s" msgstr "Laddar ner jobb: %s" #: src/hed/libs/compute/Job.cpp:891 #, c-format msgid "" "Can't retrieve job files for job (%s) - unable to determine URL of stage out " "directory" msgstr "" "Kan inte hämta jobbfiler för jobb (%s) - kan inte bestämma URL till stage-" "out-katalog" #: src/hed/libs/compute/Job.cpp:897 #, c-format msgid "" "Can't retrieve job files for job (%s) - unable to determine URL of log " "directory" msgstr "" "Kan inte hämta jobbfiler för jobb (%s) - kan inte bestämma URL till logg-" "katalog" #: src/hed/libs/compute/Job.cpp:903 #, c-format msgid "Invalid stage out path specified (%s)" msgstr "Ogiltig stage-out-sökväg angiven (%s)" #: src/hed/libs/compute/Job.cpp:911 #, c-format msgid "%s directory exist! Skipping job." msgstr "%s-katalog existerar! Hoppar över jobb." #: src/hed/libs/compute/Job.cpp:923 #, c-format msgid "Unable to retrieve list of job files to download for job %s" msgstr "Kunde inte hämta lista med jobbfiler att ladda ned för jobb %s" #: src/hed/libs/compute/Job.cpp:944 #, c-format msgid "Unable to retrieve list of log files to download for job %s" msgstr "Kunde inte hämta lista med loggfiler att ladda ned för jobb %s" #: src/hed/libs/compute/Job.cpp:963 #, c-format msgid "No files to retrieve for job %s" msgstr "Inga filer att hämta för jobb %s" #: src/hed/libs/compute/Job.cpp:969 #, c-format msgid "Failed to create directory %s! Skipping job." msgstr "Misslyckades med att skapa katalog %s! Hoppar över jobb." #: src/hed/libs/compute/Job.cpp:986 #, c-format msgid "Failed downloading %s to %s, destination already exist" msgstr "" "Misslyckades med att ladda ned %s till %s, destinationen existerar redan" #: src/hed/libs/compute/Job.cpp:992 #, c-format msgid "Failed downloading %s to %s, unable to remove existing destination" msgstr "" "Misslyckades med att ladda ned %s till %s, kunde inte ta bort existerande " "destination" #: src/hed/libs/compute/Job.cpp:999 #, c-format msgid "Failed downloading %s to %s" msgstr "Misslyckades med att ladda ned %s till %s" #: src/hed/libs/compute/Job.cpp:1012 #, c-format msgid "Unable to initialize handler for %s" msgstr "Misslyckades med att initiera hanterare för %s" #: src/hed/libs/compute/Job.cpp:1017 #, c-format msgid "Unable to list files at %s" msgstr "Kunde inte lista filer pÃ¥ %s" #: src/hed/libs/compute/Job.cpp:1060 msgid "Now copying (from -> to)" msgstr "Kopierar nu (frÃ¥n -> till)" #: src/hed/libs/compute/Job.cpp:1061 #, c-format msgid " %s -> %s" msgstr " %s -> %s" #: src/hed/libs/compute/Job.cpp:1076 #, c-format msgid "Unable to initialise connection to source: %s" msgstr "Kunde inte initiera förbindelse till källa %s" #: src/hed/libs/compute/Job.cpp:1087 #, c-format msgid "Unable to initialise connection to destination: %s" msgstr "Kunde inte initiera förbindelse till destination: %s" #: src/hed/libs/compute/Job.cpp:1109 #, c-format msgid "File download failed: %s" msgstr "Filnedladdning misslyckades: %s" #: src/hed/libs/compute/Job.cpp:1148 src/hed/libs/compute/Job.cpp:1177 #: src/hed/libs/compute/Job.cpp:1209 src/hed/libs/compute/Job.cpp:1242 #, c-format msgid "Waiting for lock on file %s" msgstr "Väntar pÃ¥ lÃ¥s pÃ¥ fil %s" #: src/hed/libs/compute/JobControllerPlugin.cpp:99 #, c-format msgid "JobControllerPlugin plugin \"%s\" not found." msgstr "Hittade inte JobControllerPlugin-plugin \"%s\"." #: src/hed/libs/compute/JobControllerPlugin.cpp:108 #, c-format msgid "JobControllerPlugin %s could not be created" msgstr "JobControllerPlugin %s kunde inte skapas" #: src/hed/libs/compute/JobControllerPlugin.cpp:113 #, c-format msgid "Loaded JobControllerPlugin %s" msgstr "Laddade in JobControllerPlugin %s" #: src/hed/libs/compute/JobDescription.cpp:26 #, c-format msgid ": %d" msgstr ": %d" #: src/hed/libs/compute/JobDescription.cpp:28 #, c-format msgid ": %s" msgstr ": %s" #: src/hed/libs/compute/JobDescription.cpp:144 msgid " --- DRY RUN --- " msgstr " --- TORRKÖRNING --- " #: src/hed/libs/compute/JobDescription.cpp:154 #, c-format msgid " Annotation: %s" msgstr " Annotering: %s" #: src/hed/libs/compute/JobDescription.cpp:160 #, c-format msgid " Old activity ID: %s" msgstr " Gammalt aktivitets-ID: %s" #: src/hed/libs/compute/JobDescription.cpp:166 #, c-format msgid " Argument: %s" msgstr " Argument: %s" #: src/hed/libs/compute/JobDescription.cpp:177 #, c-format msgid " RemoteLogging (optional): %s (%s)" msgstr " RemoteLogging (valfritt): %s (%s)" #: src/hed/libs/compute/JobDescription.cpp:180 #, c-format msgid " RemoteLogging: %s (%s)" msgstr " RemoteLogging: %s (%s)" #: src/hed/libs/compute/JobDescription.cpp:188 #, c-format msgid " Environment.name: %s" msgstr " Environment.name: %s" #: src/hed/libs/compute/JobDescription.cpp:189 #, c-format msgid " Environment: %s" msgstr " Environment: %s" #: src/hed/libs/compute/JobDescription.cpp:202 #, c-format msgid " PreExecutable.Argument: %s" msgstr " PreExecutable.Argument: %s" #: src/hed/libs/compute/JobDescription.cpp:205 #: src/hed/libs/compute/JobDescription.cpp:223 #, c-format msgid " Exit code for successful execution: %d" msgstr " Avslutningskod för framgÃ¥ngsrik exekvering: %d" #: src/hed/libs/compute/JobDescription.cpp:208 #: src/hed/libs/compute/JobDescription.cpp:226 msgid " No exit code for successful execution specified." msgstr " Ingen avslutningkod för framgÃ¥ngsrik exekvering angiven." #: src/hed/libs/compute/JobDescription.cpp:220 #, c-format msgid " PostExecutable.Argument: %s" msgstr " PostExecutable.Argument: %s" #: src/hed/libs/compute/JobDescription.cpp:236 #, c-format msgid " Access control: %s" msgstr " Ã…tkomstkontroll: %s" #: src/hed/libs/compute/JobDescription.cpp:240 #, c-format msgid " Processing start time: %s" msgstr " Processering starttid: %s" #: src/hed/libs/compute/JobDescription.cpp:243 msgid " Notify:" msgstr " Avisera:" #: src/hed/libs/compute/JobDescription.cpp:257 #, c-format msgid " Credential service: %s" msgstr " Referenstjänst: %s" #: src/hed/libs/compute/JobDescription.cpp:267 msgid " Operating system requirements:" msgstr " Operativsystem-villkor:" #: src/hed/libs/compute/JobDescription.cpp:285 msgid " Computing endpoint requirements:" msgstr " Beräkningslutpunkt-villkor:" #: src/hed/libs/compute/JobDescription.cpp:298 msgid " Node access: inbound" msgstr " Nod-Ã¥tkomst: inkommande" #: src/hed/libs/compute/JobDescription.cpp:301 msgid " Node access: outbound" msgstr " Nod-Ã¥tkomst: utgÃ¥ende" #: src/hed/libs/compute/JobDescription.cpp:304 msgid " Node access: inbound and outbound" msgstr " Nod-Ã¥tkomst: inkommande och utgÃ¥ende" #: src/hed/libs/compute/JobDescription.cpp:314 msgid " Job requires exclusive execution" msgstr " Jobb kräver exklusiv exekvering" #: src/hed/libs/compute/JobDescription.cpp:317 msgid " Job does not require exclusive execution" msgstr " Jobb kräver inte exklusiv exekvering" #: src/hed/libs/compute/JobDescription.cpp:322 msgid " Run time environment requirements:" msgstr " Runtime-miljö-villkor:" #: src/hed/libs/compute/JobDescription.cpp:334 msgid " Inputfile element:" msgstr " Indatafil-element:" #: src/hed/libs/compute/JobDescription.cpp:335 #: src/hed/libs/compute/JobDescription.cpp:357 #, c-format msgid " Name: %s" msgstr " Namn: %s" #: src/hed/libs/compute/JobDescription.cpp:337 msgid " Is executable: true" msgstr " Är exekverbar: sant" #: src/hed/libs/compute/JobDescription.cpp:341 #, c-format msgid " Sources: %s" msgstr " Sources: %s" #: src/hed/libs/compute/JobDescription.cpp:343 #, c-format msgid " Sources.DelegationID: %s" msgstr " Sources.DelegationID: %s" #: src/hed/libs/compute/JobDescription.cpp:347 #, c-format msgid " Sources.Options: %s = %s" msgstr " Sources.Options: %s = %s" #: src/hed/libs/compute/JobDescription.cpp:356 msgid " Outputfile element:" msgstr " Utdatafil-element:" #: src/hed/libs/compute/JobDescription.cpp:360 #, c-format msgid " Targets: %s" msgstr " Targets: %s" #: src/hed/libs/compute/JobDescription.cpp:362 #, c-format msgid " Targets.DelegationID: %s" msgstr " Targets.DelegationID: %s" #: src/hed/libs/compute/JobDescription.cpp:366 #, c-format msgid " Targets.Options: %s = %s" msgstr " Targets.Options: %s = %s" #: src/hed/libs/compute/JobDescription.cpp:373 #, c-format msgid " DelegationID element: %s" msgstr " Delegerings-ID-element: %s" #: src/hed/libs/compute/JobDescription.cpp:380 #, c-format msgid " Other attributes: [%s], %s" msgstr " Övriga attribut: [%s], %s" #: src/hed/libs/compute/JobDescription.cpp:446 msgid "Empty job description source string" msgstr "Tom jobbeskrivnings-källsträng" #: src/hed/libs/compute/JobDescription.cpp:479 msgid "No job description parsers available" msgstr "Inga jobbeskrivningstolkar tillgänglig" #: src/hed/libs/compute/JobDescription.cpp:481 #, c-format msgid "" "No job description parsers suitable for handling '%s' language are available" msgstr "" "Inga jobbeskrivningstolkar lämpliga att hantera '%s'-sprÃ¥ket är tillgängliga" #: src/hed/libs/compute/JobDescription.cpp:489 #, c-format msgid "%s parsing error" msgstr "%s-tolkningsfel" #: src/hed/libs/compute/JobDescription.cpp:505 msgid "No job description parser was able to interpret job description" msgstr "Ingen jobbeskrivningstolk kunde tolka jobbeskrivning" #: src/hed/libs/compute/JobDescription.cpp:515 msgid "" "Job description language is not specified, unable to output description." msgstr "JobbeskrivningssprÃ¥k är inte angivet, kan inte skriva ut beskrivning." #: src/hed/libs/compute/JobDescription.cpp:527 #, c-format msgid "Generating %s job description output" msgstr "Genererar %s-jobbeskrivning" #: src/hed/libs/compute/JobDescription.cpp:543 #, c-format msgid "Language (%s) not recognized by any job description parsers." msgstr "SprÃ¥k (%s) känns inte igen av nÃ¥gon jobbeskrivningstolk." #: src/hed/libs/compute/JobDescription.cpp:556 #, c-format msgid "Two input files have identical name '%s'." msgstr "TvÃ¥ indatafiler har identiska namn '%s'." #: src/hed/libs/compute/JobDescription.cpp:575 #: src/hed/libs/compute/JobDescription.cpp:588 #, c-format msgid "Cannot stat local input file '%s'" msgstr "Kan inte göra stat pÃ¥ lokal indatafil '%s'" #: src/hed/libs/compute/JobDescription.cpp:608 #, c-format msgid "Cannot find local input file '%s' (%s)" msgstr "Kan inte hitta lokal indatafil '%s' (%s)" #: src/hed/libs/compute/JobDescription.cpp:650 msgid "Unable to select runtime environment" msgstr "Kan inte välja runtime-miljö" #: src/hed/libs/compute/JobDescription.cpp:657 msgid "Unable to select middleware" msgstr "Kan inte välja middleware" #: src/hed/libs/compute/JobDescription.cpp:664 msgid "Unable to select operating system." msgstr "Kan inte välja operativsystem." #: src/hed/libs/compute/JobDescription.cpp:683 #, c-format msgid "No test-job with ID %d found." msgstr "Hittade inget test-jobb med ID %d." #: src/hed/libs/compute/JobDescription.cpp:695 #, c-format msgid "Test was defined with ID %d, but some error occurred during parsing it." msgstr "Test definierades med ID %d, men nÃ¥got fel uppstod när det tolkades." #: src/hed/libs/compute/JobDescription.cpp:699 #, c-format msgid "No jobdescription resulted at %d test" msgstr "Ingen jobbeskrivning resulterade vid %d test" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:52 #, c-format msgid "JobDescriptionParserPlugin plugin \"%s\" not found." msgstr "Hittade inte JobDescriptionParserPlugin-plugin \"%s\"." #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:59 #, c-format msgid "JobDescriptionParserPlugin %s could not be created" msgstr "JobDescriptionParserPlugin %s kunde inte skapas" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:64 #, c-format msgid "Loaded JobDescriptionParserPlugin %s" msgstr "Laddar in JobDescriptionParserPlugin %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:125 #, c-format msgid "Unable to create data base (%s)" msgstr "Kunde inte skapa databas (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:133 #, c-format msgid "Unable to create jobs table in data base (%s)" msgstr "Kunde inte skapa jobs-tabell i databas (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:142 #, c-format msgid "Unable to create jobs_new table in data base (%s)" msgstr "Kunde inte skapa jobs_new-tabell i databas (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:148 #, c-format msgid "Unable to transfer from jobs to jobs_new in data base (%s)" msgstr "Kunde inte överföra frÃ¥n jobs till jobs_new i databas (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:154 #, c-format msgid "Unable to drop jobs in data base (%s)" msgstr "Kunde inte ta bort jobs-tabell i databas (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:160 #, c-format msgid "Unable to rename jobs table in data base (%s)" msgstr "Kunde inte byta namn pÃ¥ jobs-tabell i databas (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:170 #, c-format msgid "Unable to create index for jobs table in data base (%s)" msgstr "Kunde inte skapa index för jobs-tabell i databas (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:178 #, c-format msgid "Failed checking database (%s)" msgstr "Misslyckades med att kontrollera databas (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:180 #, c-format msgid "Job database connection established successfully (%s)" msgstr "Jobbdatabasförbindelse etablerad framgÃ¥ngsrikt (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:202 #, c-format msgid "Error from SQLite: %s: %s" msgstr "Fel frÃ¥n SQLite: %s: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:205 #, c-format msgid "Error from SQLite: %s" msgstr "Fel frÃ¥n SQLite: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:226 #: src/hed/libs/compute/JobInformationStorageXML.cpp:36 #, c-format msgid "" "Job list file cannot be created: The parent directory (%s) doesn't exist." msgstr "Jobblistfil kan inte skapas: Föräldrakatalogen (%s) existerar inte." #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:230 #: src/hed/libs/compute/JobInformationStorageXML.cpp:40 #, c-format msgid "Job list file cannot be created: %s is not a directory" msgstr "Jobblistfil kan inte skapas: %s är inte en katalog" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:237 #: src/hed/libs/compute/JobInformationStorageXML.cpp:47 #, c-format msgid "Job list file (%s) is not a regular file" msgstr "Jobblistfil (%s) är inte en vanlig fil" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:367 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:374 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:381 #, c-format msgid "Unable to write records into job database (%s): Id \"%s\"" msgstr "Kunde inte skriva poster till jobbdatabas (%s): Id \"%s\"" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:627 #: src/hed/libs/compute/JobInformationStorageXML.cpp:146 #, c-format msgid "Unable to truncate job database (%s)" msgstr "Kunde inte trunkera jobbdatabas (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:660 #, c-format msgid "Unable to determine error (%d)" msgstr "Kunde inte bestämma fel (%d)" #: src/hed/libs/compute/JobInformationStorageXML.cpp:60 #: src/hed/libs/compute/JobInformationStorageXML.cpp:232 #: src/hed/libs/compute/JobInformationStorageXML.cpp:273 #, c-format msgid "Waiting for lock on job list file %s" msgstr "Väntat pÃ¥ lÃ¥s pÃ¥ jobblistfil %s" #: src/hed/libs/compute/JobInformationStorageXML.cpp:171 #, c-format msgid "Will remove %s on service %s." msgstr "Kommer att ta bort %s pÃ¥ tjänsten %s." #: src/hed/libs/compute/JobSupervisor.cpp:40 msgid "Ignoring job, the job ID is empty" msgstr "Ignorerar jobb, jobb-id är tomt" #: src/hed/libs/compute/JobSupervisor.cpp:45 #, c-format msgid "Ignoring job (%s), the management interface name is unknown" msgstr "Ignorerar jobb (%s), hanteringsgränssnittsnamnet är okänt" #: src/hed/libs/compute/JobSupervisor.cpp:50 #, c-format msgid "Ignoring job (%s), the job management URL is unknown" msgstr "Ignorerar jobb (%s), jobbhanterings-URL är okänd" #: src/hed/libs/compute/JobSupervisor.cpp:55 #, c-format msgid "Ignoring job (%s), the status interface name is unknown" msgstr "Ignorerar jobb (%s), statusgränssnittsnamnet är okänt" #: src/hed/libs/compute/JobSupervisor.cpp:60 #, c-format msgid "Ignoring job (%s), the job status URL is unknown" msgstr "Ignorerar jobb (%s), jobbstatus-URL är okänd" #: src/hed/libs/compute/JobSupervisor.cpp:69 #, c-format msgid "Ignoring job (%s), unable to load JobControllerPlugin for %s" msgstr "Ignorerar jobb (%s), kunde inte ladda in JobControllerPlugin gör %s" #: src/hed/libs/compute/JobSupervisor.cpp:76 #, c-format msgid "" "Ignoring job (%s), already tried and were unable to load JobControllerPlugin" msgstr "" "Ignorerar jobb (%s), redan försökt och kunde inte ladda in " "JobControllerPlugin" #: src/hed/libs/compute/Software.cpp:65 src/hed/libs/compute/Software.cpp:94 #: src/hed/libs/compute/Software.cpp:113 #, c-format msgid "%s > %s => false" msgstr "%s > %s => falskt" #: src/hed/libs/compute/Software.cpp:70 src/hed/libs/compute/Software.cpp:83 #: src/hed/libs/compute/Software.cpp:107 #, c-format msgid "%s > %s => true" msgstr "%s > %s => sant" #: src/hed/libs/compute/Software.cpp:90 src/hed/libs/compute/Software.cpp:102 #, c-format msgid "%s > %s => false: %s contains non numbers in the version part." msgstr "%s > %s => falskt: %s innehÃ¥ller icke-nummer i versionsdelen." #: src/hed/libs/compute/Software.cpp:199 src/hed/libs/compute/Software.cpp:210 #, c-format msgid "Requirement \"%s %s\" NOT satisfied." msgstr "Villkor \"%s %s\" INTE uppfyllt." #: src/hed/libs/compute/Software.cpp:205 #, c-format msgid "Requirement \"%s %s\" satisfied." msgstr "Villkor \"%s %s\" uppfyllt." #: src/hed/libs/compute/Software.cpp:214 #, c-format msgid "Requirement \"%s %s\" satisfied by \"%s\"." msgstr "Villkor \"%s %s\" uppfyllt av \"%s\"." #: src/hed/libs/compute/Software.cpp:219 msgid "All software requirements satisfied." msgstr "Alla mjukvarukrav uppfyllda." #: src/hed/libs/compute/Submitter.cpp:83 #, c-format msgid "Trying to submit directly to endpoint (%s)" msgstr "Försöker sända in direkt till slutpunkt (%s)" #: src/hed/libs/compute/Submitter.cpp:88 #, c-format msgid "Interface (%s) specified, submitting only to that interface" msgstr "Gränssnitt (%s) angivet, sänder endast in till detta gränssnitt" #: src/hed/libs/compute/Submitter.cpp:106 msgid "Trying all available interfaces" msgstr "Provar alla tillgängliga gränssnitt" #: src/hed/libs/compute/Submitter.cpp:112 #, c-format msgid "Trying to submit endpoint (%s) using interface (%s) with plugin (%s)." msgstr "" "Försöker sända in till slutpunkt (%s) med gränssnitt (%s) med plugin (%s)." #: src/hed/libs/compute/Submitter.cpp:116 #, c-format msgid "" "Unable to load plugin (%s) for interface (%s) when trying to submit job " "description." msgstr "" "Kunde inte ladda in plugin (%s) för gränssnitt (%s) vid försök att sända in " "jobbeskrivning." #: src/hed/libs/compute/Submitter.cpp:130 #, c-format msgid "No more interfaces to try for endpoint %s." msgstr "Inga fler gränssnitt att prova för slutpunkt %s." #: src/hed/libs/compute/Submitter.cpp:336 #, c-format msgid "Target %s does not match requested interface(s)." msgstr "Target %s matchar inte begär(t/da) gränssnitt." #: src/hed/libs/compute/SubmitterPlugin.cpp:63 msgid "No stagein URL is provided" msgstr "Ingen stage-in-URL tillhandahÃ¥llen" #: src/hed/libs/compute/SubmitterPlugin.cpp:72 #, c-format msgid "Failed reading file %s" msgstr "Misslyckades med att läsa fil %s" #: src/hed/libs/compute/SubmitterPlugin.cpp:86 #, c-format msgid "Failed uploading file %s to %s: %s" msgstr "Misslyckades med att ladda upp fil %s till %s: %s" #: src/hed/libs/compute/SubmitterPlugin.cpp:168 #, c-format msgid "SubmitterPlugin plugin \"%s\" not found." msgstr "Hittade inte SubmitterPlugin-plugin \"%s\"." #: src/hed/libs/compute/SubmitterPlugin.cpp:178 #, c-format msgid "SubmitterPlugin %s could not be created" msgstr "SubmitterPlugin %s kunde inte skapas" #: src/hed/libs/compute/SubmitterPlugin.cpp:183 #, c-format msgid "Loaded SubmitterPlugin %s" msgstr "Laddade in SubmitterPlugin %s" #: src/hed/libs/compute/examples/basic_job_submission.cpp:28 msgid "Invalid job description" msgstr "Ogiltig jobbeskrivning" #: src/hed/libs/compute/examples/basic_job_submission.cpp:47 msgid "Failed to submit job" msgstr "Misslyckades med att sända in jobb" #: src/hed/libs/compute/examples/basic_job_submission.cpp:54 #, c-format msgid "Failed to write to local job list %s" msgstr "Misslyckades med att skriva till lokal jobblista %s" #: src/hed/libs/compute/test_jobdescription.cpp:20 msgid "[job description ...]" msgstr "[jobbeskrivning ...]" #: src/hed/libs/compute/test_jobdescription.cpp:21 msgid "" "This tiny tool can be used for testing the JobDescription's conversion " "abilities." msgstr "" "Detta lilla verktyg kan användas för att testa JobDescription-klassens " "konverteringsmöjligheter." #: src/hed/libs/compute/test_jobdescription.cpp:23 msgid "" "The job description also can be a file or a string in ADL or XRSL format." msgstr "" "Jobbeskrivningen kan ocksÃ¥ vara en fil eller en sträng i ADL- eller XRSL-" "format." #: src/hed/libs/compute/test_jobdescription.cpp:27 msgid "define the requested format (nordugrid:xrsl, emies:adl)" msgstr "definiera det begärda formatet (nordugrid:xrsl, emies:adl)" #: src/hed/libs/compute/test_jobdescription.cpp:28 msgid "format" msgstr "format" #: src/hed/libs/compute/test_jobdescription.cpp:33 msgid "show the original job description" msgstr "visa den ursprungliga jobbeskrivningen" #: src/hed/libs/compute/test_jobdescription.cpp:43 #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:72 msgid "Use --help option for detailed usage information" msgstr "Använd alternativet --help för detaljerad användningsinformation" #: src/hed/libs/compute/test_jobdescription.cpp:50 msgid " [ JobDescription tester ] " msgstr " [ JobDescription testare ] " #: src/hed/libs/compute/test_jobdescription.cpp:74 msgid " [ Parsing the original text ] " msgstr " [ Tolkar den ursprungliga texten ] " #: src/hed/libs/compute/test_jobdescription.cpp:80 msgid "Unable to parse." msgstr "Kan inte tolka." #: src/hed/libs/compute/test_jobdescription.cpp:89 msgid " [ emies:adl ] " msgstr " [ emies:adl ] " #: src/hed/libs/compute/test_jobdescription.cpp:91 msgid " [ nordugrid:xrsl ] " msgstr " [ nordugrid:xrsl ] " #: src/hed/libs/credential/CertUtil.cpp:127 #, c-format msgid "Error number in store context: %i" msgstr "Felnummer i lager-kontext: %i" #: src/hed/libs/credential/CertUtil.cpp:128 msgid "Self-signed certificate" msgstr "Självsignerat certifikat" #: src/hed/libs/credential/CertUtil.cpp:131 #, c-format msgid "The certificate with subject %s is not valid" msgstr "Certifikatet med subjekt %s är inte giltigt" #: src/hed/libs/credential/CertUtil.cpp:134 #, c-format msgid "" "Can not find issuer certificate for the certificate with subject %s and " "hash: %lu" msgstr "" "Kan inte hitta utfärdarcertifikat för certifikatet med subjekt %s och hash: " "%lu" #: src/hed/libs/credential/CertUtil.cpp:137 #, c-format msgid "Certificate with subject %s has expired" msgstr "Giltighetstiden för certifikat med subjekt %s har gÃ¥tt ut" #: src/hed/libs/credential/CertUtil.cpp:140 #, c-format msgid "" "Untrusted self-signed certificate in chain with subject %s and hash: %lu" msgstr "" "Icke betrott självsignerat certifikat i kedja med subjekt %s och hash: %lu" #: src/hed/libs/credential/CertUtil.cpp:142 #, c-format msgid "Certificate verification error: %s" msgstr "Certifikatverifieringsfel: %s" #: src/hed/libs/credential/CertUtil.cpp:154 msgid "Can not get the certificate type" msgstr "Kan inte erhÃ¥lla certifikattyp" #: src/hed/libs/credential/CertUtil.cpp:194 msgid "Couldn't verify availability of CRL" msgstr "Kunde inte verifiera tillgänglighet för CRL" #: src/hed/libs/credential/CertUtil.cpp:207 msgid "In the available CRL the lastUpdate field is not valid" msgstr "I den tillgängliga CRLen är lastUpdate-fältet inte giltigt" #: src/hed/libs/credential/CertUtil.cpp:214 msgid "The available CRL is not yet valid" msgstr "Den tillgängliga CRLen är inte giltig än" #: src/hed/libs/credential/CertUtil.cpp:223 msgid "In the available CRL, the nextUpdate field is not valid" msgstr "I den tillgängliga CRLen är nextUpdate-fältet inte giltigt" #: src/hed/libs/credential/CertUtil.cpp:229 msgid "The available CRL has expired" msgstr "Giltighetstiden för den tillgängliga CRLen har gÃ¥tt ut" #: src/hed/libs/credential/CertUtil.cpp:252 #, c-format msgid "Certificate with serial number %s and subject \"%s\" is revoked" msgstr "Certifikat med serienummer %s och subjekt \"%s\" är revokerat" #: src/hed/libs/credential/CertUtil.cpp:270 msgid "" "Directory of trusted CAs is not specified/found; Using current path as the " "CA direcroty" msgstr "" "Katalog med betrodda CA har inte angivits eller kan inte hittas; Använder " "nuvarande katalog som CA-katalog" #: src/hed/libs/credential/CertUtil.cpp:279 msgid "Can't allocate memory for CA policy path" msgstr "Kan inte allokera minne för CA-policy-sökväg" #: src/hed/libs/credential/CertUtil.cpp:325 #, c-format msgid "Certificate has unknown extension with numeric ID %u and SN %s" msgstr "Certifikat har okänt tillägg med numeriskt ID %u och SN %s" #: src/hed/libs/credential/CertUtil.cpp:339 #: src/hed/libs/credential/Credential.cpp:1727 msgid "" "Can not convert DER encoded PROXY_CERT_INFO_EXTENSION extension to internal " "format" msgstr "" "Kan inte konvertera DER-kodat PROXY_CERT_INFO_EXTENSION-tillägg till internt " "format" #: src/hed/libs/credential/CertUtil.cpp:385 msgid "Trying to check X509 cert with check_cert_type" msgstr "Försöker kontrollera X509-certifikat med check_cert_type" #: src/hed/libs/credential/CertUtil.cpp:424 msgid "Can't convert DER encoded PROXYCERTINFO extension to internal format" msgstr "" "Kan inte konvertera DER-kodat PROXYCERTINFO-tillägg till internt format" #: src/hed/libs/credential/CertUtil.cpp:428 msgid "Can't get policy from PROXYCERTINFO extension" msgstr "Kan inte erhÃ¥lla policy frÃ¥n PROXYCERTINFO-tillägg" #: src/hed/libs/credential/CertUtil.cpp:432 msgid "Can't get policy language from PROXYCERTINFO extension" msgstr "Kan inte erhÃ¥lla policy-sprÃ¥k frÃ¥n PROXYCERTINFO-tillägg" #: src/hed/libs/credential/CertUtil.cpp:464 msgid "The subject does not match the issuer name + proxy CN entry" msgstr "Subjekt matchar inte utfärdarnamn + proxy-CN-post" #: src/hed/libs/credential/Credential.cpp:48 #, c-format msgid "OpenSSL error string: %s" msgstr "OpenSSL-felsträng: %s" #: src/hed/libs/credential/Credential.cpp:169 msgid "Can't get the first byte of input to determine its format" msgstr "Kan inte erhÃ¥lla indatas fösta byte för att bestämma dess format" #: src/hed/libs/credential/Credential.cpp:183 msgid "Can't reset the input" msgstr "Kan inte Ã¥terställa indata" #: src/hed/libs/credential/Credential.cpp:208 #: src/hed/libs/credential/Credential.cpp:244 msgid "Can't get the first byte of input BIO to get its format" msgstr "Kan inte erhÃ¥lla indata-BIOs första byte för att bestämma dess format" #: src/hed/libs/credential/Credential.cpp:220 msgid "Can not read certificate/key string" msgstr "Kan inte läsa certifikat/nyckel-sträng" #: src/hed/libs/credential/Credential.cpp:433 #, c-format msgid "Can not find certificate file: %s" msgstr "Kan inte hitta certifikatfil: %s" #: src/hed/libs/credential/Credential.cpp:438 #, c-format msgid "Can not read certificate file: %s" msgstr "Kan inte läsa certifikatfil: %s" #: src/hed/libs/credential/Credential.cpp:476 msgid "Can not read certificate string" msgstr "Kan inte läsa certifikatsträng: %s" #: src/hed/libs/credential/Credential.cpp:496 msgid "Certificate format is PEM" msgstr "Certifikatformat är PEM" #: src/hed/libs/credential/Credential.cpp:523 msgid "Certificate format is DER" msgstr "Certifikatformat är DER" #: src/hed/libs/credential/Credential.cpp:552 msgid "Certificate format is PKCS" msgstr "Certifikatformat är PKCS" #: src/hed/libs/credential/Credential.cpp:578 msgid "Certificate format is unknown" msgstr "Certifikatformat är okänt" #: src/hed/libs/credential/Credential.cpp:586 #, c-format msgid "Can not find key file: %s" msgstr "Kan inte hitta nyckelfil: %s" #: src/hed/libs/credential/Credential.cpp:591 #, c-format msgid "Can not open key file %s" msgstr "Kan inte öppna nyckelfil: %s" #: src/hed/libs/credential/Credential.cpp:610 msgid "Can not read key string" msgstr "Kan inte läsa nyckelsträng" #: src/hed/libs/credential/Credential.cpp:673 #: src/hed/libs/credential/VOMSUtil.cpp:210 msgid "Failed to lock arccredential library in memory" msgstr "Misslyckades med att lÃ¥sa arccredential-biblioteket i minnet" #: src/hed/libs/credential/Credential.cpp:685 msgid "Certificate verification succeeded" msgstr "Certifikatverifiering lyckades" #: src/hed/libs/credential/Credential.cpp:689 msgid "Certificate verification failed" msgstr "Certifikatverifiering misslyckades" #: src/hed/libs/credential/Credential.cpp:702 #: src/hed/libs/credential/Credential.cpp:722 #: src/hed/libs/credential/Credential.cpp:742 #: src/hed/libs/credential/Credential.cpp:1024 #: src/hed/libs/credential/Credential.cpp:2398 #: src/hed/libs/credential/Credential.cpp:2428 msgid "Failed to initialize extensions member for Credential" msgstr "Misslyckades med att initiera tilläggsmedlem för referens" #: src/hed/libs/credential/Credential.cpp:787 #, c-format msgid "Unsupported proxy policy language is requested - %s" msgstr "Det begärda proxypolicysprÃ¥ken stöds inte - %s" #: src/hed/libs/credential/Credential.cpp:799 #, c-format msgid "Unsupported proxy version is requested - %s" msgstr "Den begärda proxyversionen stöds inte - %s" #: src/hed/libs/credential/Credential.cpp:810 msgid "If you specify a policy you also need to specify a policy language" msgstr "Om du anger en policy mÃ¥ste du ocksÃ¥ ange ett policysprÃ¥k" #: src/hed/libs/credential/Credential.cpp:857 #, c-format msgid "Error: can't open policy file: %s" msgstr "Fel: kan inte öppna policyfil: %s" #: src/hed/libs/credential/Credential.cpp:870 #, c-format msgid "Error: policy location: %s is not a regular file" msgstr "Fel: policy-plats: %s är inte en vanlig fil" #: src/hed/libs/credential/Credential.cpp:929 #: src/hed/libs/credential/Credential.cpp:962 #: src/hed/libs/credential/Credential.cpp:1029 msgid "Certificate/Proxy path is empty" msgstr "Certifikat/proxy-sökväg är tom" #: src/hed/libs/credential/Credential.cpp:1087 #: src/hed/libs/credential/Credential.cpp:2937 msgid "Failed to duplicate extension" msgstr "Misslyckades med att duplicera tillägg" #: src/hed/libs/credential/Credential.cpp:1091 msgid "Failed to add extension into credential extensions" msgstr "Misslyckades med att lägga till tillägg till referenstillägg" #: src/hed/libs/credential/Credential.cpp:1104 msgid "Certificate information collection failed" msgstr "Certifikatinformationsinsamling misslyckades" #: src/hed/libs/credential/Credential.cpp:1143 #: src/hed/libs/credential/Credential.cpp:1148 msgid "Can not convert string into ASN1_OBJECT" msgstr "Kan inte konvertera sträng till ASN1_OBJECT" #: src/hed/libs/credential/Credential.cpp:1155 msgid "Can not create ASN1_OCTET_STRING" msgstr "Kan inte skapa ASN1_OCTET_STRING" #: src/hed/libs/credential/Credential.cpp:1164 msgid "Can not allocate memory for extension for proxy certificate" msgstr "Kan inte allokera minne för tillägg för proxycertifikat" #: src/hed/libs/credential/Credential.cpp:1174 msgid "Can not create extension for proxy certificate" msgstr "Kan inte skapa tillägg för proxycertifikat" #: src/hed/libs/credential/Credential.cpp:1210 #: src/hed/libs/credential/Credential.cpp:1378 msgid "BN_set_word failed" msgstr "BN_set_word misslyckades" #: src/hed/libs/credential/Credential.cpp:1219 #: src/hed/libs/credential/Credential.cpp:1387 msgid "RSA_generate_key_ex failed" msgstr "RSA_generate_key_ex misslyckades" #: src/hed/libs/credential/Credential.cpp:1228 #: src/hed/libs/credential/Credential.cpp:1395 msgid "BN_new || RSA_new failed" msgstr "BN_new || RSA_new misslyckades" #: src/hed/libs/credential/Credential.cpp:1239 msgid "Created RSA key, proceeding with request" msgstr "Skapade RSA-nyckel, fortsätter med begäran" #: src/hed/libs/credential/Credential.cpp:1244 msgid "pkey and rsa_key exist!" msgstr "pkey och rsa_key existerar!" #: src/hed/libs/credential/Credential.cpp:1247 msgid "Generate new X509 request!" msgstr "Generera ny X509-begäran" #: src/hed/libs/credential/Credential.cpp:1252 msgid "Setting subject name!" msgstr "Sätter subjekt-namn" #: src/hed/libs/credential/Credential.cpp:1260 #: src/hed/libs/credential/Credential.cpp:1474 msgid "PEM_write_bio_X509_REQ failed" msgstr "PEM_write_bio_X509_REQ misslyckades" #: src/hed/libs/credential/Credential.cpp:1290 #: src/hed/libs/credential/Credential.cpp:1331 #: src/hed/libs/credential/Credential.cpp:1506 #: src/hed/libs/credential/Credential.cpp:1526 msgid "Can not create BIO for request" msgstr "Kan inte skapa BIO för begäran" #: src/hed/libs/credential/Credential.cpp:1308 msgid "Failed to write request into string" msgstr "Misslyckades med att skriva begäran till sträng" #: src/hed/libs/credential/Credential.cpp:1335 #: src/hed/libs/credential/Credential.cpp:1340 #: src/hed/libs/credential/Credential.cpp:1530 msgid "Can not set writable file for request BIO" msgstr "Kan inte ange skrivbar fil för begärans BIO" #: src/hed/libs/credential/Credential.cpp:1346 #: src/hed/libs/credential/Credential.cpp:1535 msgid "Wrote request into a file" msgstr "Skrev begäran till en fil" #: src/hed/libs/credential/Credential.cpp:1348 #: src/hed/libs/credential/Credential.cpp:1538 msgid "Failed to write request into a file" msgstr "Misslyckades med att skriva begäran till en fil" #: src/hed/libs/credential/Credential.cpp:1368 msgid "The credential's private key has already been initialized" msgstr "Referensens privata nyckel har redan initierats" #: src/hed/libs/credential/Credential.cpp:1416 msgid "" "Can not duplicate the subject name for the self-signing proxy certificate " "request" msgstr "" "Kan inte duplicera subjektnamnet för den självsignerande " "proxycertifikatbegäran" #: src/hed/libs/credential/Credential.cpp:1426 msgid "Can not create a new X509_NAME_ENTRY for the proxy certificate request" msgstr "Kan inte skapa en ny X509_NAME_ENTRY för proxycertifikatbegäran" #: src/hed/libs/credential/Credential.cpp:1444 #: src/hed/libs/credential/Credential.cpp:1451 #: src/hed/libs/credential/Credential.cpp:2029 #: src/hed/libs/credential/Credential.cpp:2037 msgid "" "Can not convert PROXY_CERT_INFO_EXTENSION struct from internal to DER " "encoded format" msgstr "" "Kan inte konvertera PROXY_CERT_INFO_EXTENSION-struct frÃ¥n internt till DER-" "kodat format" #: src/hed/libs/credential/Credential.cpp:1481 msgid "Can't convert X509 request from internal to DER encoded format" msgstr "Kan inte konvertera X509-begäran frÃ¥n internt till DER-kodat format" #: src/hed/libs/credential/Credential.cpp:1491 msgid "Can not generate X509 request" msgstr "Kan inte generera X509-begäran" #: src/hed/libs/credential/Credential.cpp:1493 msgid "Can not set private key" msgstr "Kan inte ange privat nyckel" #: src/hed/libs/credential/Credential.cpp:1591 msgid "Failed to get private key" msgstr "Misslyckades med att erhÃ¥lla privat nyckel" #: src/hed/libs/credential/Credential.cpp:1610 msgid "Failed to get public key from RSA object" msgstr "Misslyckades med att erhÃ¥lla publik nyckel frÃ¥n RSA-objekt" #: src/hed/libs/credential/Credential.cpp:1618 msgid "Failed to get public key from X509 object" msgstr "Misslyckades med att erhÃ¥lla publik nyckel frÃ¥n X509-objekt" #: src/hed/libs/credential/Credential.cpp:1625 msgid "Failed to get public key" msgstr "Misslyckades med att erhÃ¥lla publik nyckel" #: src/hed/libs/credential/Credential.cpp:1663 #, c-format msgid "Certiticate chain number %d" msgstr "Certifikatkedja nummer %d" #: src/hed/libs/credential/Credential.cpp:1691 msgid "NULL BIO passed to InquireRequest" msgstr "NULL BIO skickad till InquireRequest" #: src/hed/libs/credential/Credential.cpp:1694 msgid "PEM_read_bio_X509_REQ failed" msgstr "PEM_read_bio_X509_REQ misslyckades" #: src/hed/libs/credential/Credential.cpp:1698 msgid "d2i_X509_REQ_bio failed" msgstr "d2i_X509_REQ_bio misslyckades" #: src/hed/libs/credential/Credential.cpp:1720 msgid "Missing data in DER encoded PROXY_CERT_INFO_EXTENSION extension" msgstr "Saknar data i DER-kodat PROXY_CERT_INFO_EXTENSION-tillägg" #: src/hed/libs/credential/Credential.cpp:1732 msgid "Can not create PROXY_CERT_INFO_EXTENSION extension" msgstr "Kan inte skapa PROXY_CERT_INFO_EXTENSION-tillägg" #: src/hed/libs/credential/Credential.cpp:1742 msgid "Can not get policy from PROXY_CERT_INFO_EXTENSION extension" msgstr "Kan inte erhÃ¥lla policy frÃ¥n PROXY_CERT_INFO_EXTENSION-tillägg" #: src/hed/libs/credential/Credential.cpp:1746 msgid "Can not get policy language from PROXY_CERT_INFO_EXTENSION extension" msgstr "Kan inte erhÃ¥lla policy-sprÃ¥k frÃ¥n PROXY_CERT_INFO_EXTENSION-tillägg" #: src/hed/libs/credential/Credential.cpp:1762 #, c-format msgid "Cert Type: %d" msgstr "Certifikattyp: %d" #: src/hed/libs/credential/Credential.cpp:1775 #: src/hed/libs/credential/Credential.cpp:1794 msgid "Can not create BIO for parsing request" msgstr "Kan inte skapa BIO för att tolka begäran" #: src/hed/libs/credential/Credential.cpp:1780 msgid "Read request from a string" msgstr "Läste begäran frÃ¥n en sträng" #: src/hed/libs/credential/Credential.cpp:1783 msgid "Failed to read request from a string" msgstr "Misslyckades med att läsa begäran frÃ¥n en sträng" #: src/hed/libs/credential/Credential.cpp:1798 msgid "Can not set readable file for request BIO" msgstr "Kunde inte ange läsbar fil för begärans BIO" #: src/hed/libs/credential/Credential.cpp:1803 msgid "Read request from a file" msgstr "Läste begäran frÃ¥n en fil" #: src/hed/libs/credential/Credential.cpp:1806 msgid "Failed to read request from a file" msgstr "Misslyckades med att läsa begäran frÃ¥n en fil" #: src/hed/libs/credential/Credential.cpp:1846 msgid "Can not convert private key to DER format" msgstr "Kan inte konvertera privat nyckel till DER-format" #: src/hed/libs/credential/Credential.cpp:2010 msgid "Credential is not initialized" msgstr "Referens har inte initierats" #: src/hed/libs/credential/Credential.cpp:2016 msgid "Failed to duplicate X509 structure" msgstr "Misslyckades med att duplicera X509-struktur" #: src/hed/libs/credential/Credential.cpp:2021 msgid "Failed to initialize X509 structure" msgstr "Misslyckades med att initiera X509-struktur" #: src/hed/libs/credential/Credential.cpp:2044 msgid "Can not create extension for PROXY_CERT_INFO" msgstr "Kan inte skapa tillägg för PROXY_CERT_INFO" #: src/hed/libs/credential/Credential.cpp:2048 #: src/hed/libs/credential/Credential.cpp:2096 msgid "Can not add X509 extension to proxy cert" msgstr "Kan inte lägga till X509-tillägg till proxycertifikat" #: src/hed/libs/credential/Credential.cpp:2064 msgid "Can not convert keyUsage struct from DER encoded format" msgstr "Kan inte konvertera keyUsage-struct frÃ¥n DER-kodat format" #: src/hed/libs/credential/Credential.cpp:2076 #: src/hed/libs/credential/Credential.cpp:2085 msgid "Can not convert keyUsage struct from internal to DER format" msgstr "Kan inte konvertera keyUsage-struct frÃ¥n internt till DER-format" #: src/hed/libs/credential/Credential.cpp:2092 msgid "Can not create extension for keyUsage" msgstr "Kan inte skapa tillägg för keyUsage" #: src/hed/libs/credential/Credential.cpp:2105 msgid "Can not get extended KeyUsage extension from issuer certificate" msgstr "Kan inte erhÃ¥lla utökad KeyUsage-tillägg frÃ¥n utfärdarcertifikatet" #: src/hed/libs/credential/Credential.cpp:2110 msgid "Can not copy extended KeyUsage extension" msgstr "Kan inte kopiera det utökade KeyUsage-tillägget" #: src/hed/libs/credential/Credential.cpp:2115 msgid "Can not add X509 extended KeyUsage extension to new proxy certificate" msgstr "" "Kan inte lägga till X509-utökat KeyUsage-tillägg till det nya " "proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2125 msgid "Can not compute digest of public key" msgstr "Kan inte beräkna digest för publik nyckel" #: src/hed/libs/credential/Credential.cpp:2136 msgid "Can not copy the subject name from issuer for proxy certificate" msgstr "Kan inte kopiera subjektnamnet frÃ¥n utfärdaren för proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2142 msgid "Can not create name entry CN for proxy certificate" msgstr "Kan inte skapa namnpost CN för proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2147 msgid "Can not set CN in proxy certificate" msgstr "Kan inte ange CN i proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2155 msgid "Can not set issuer's subject for proxy certificate" msgstr "Kan inte ange utfärdarens subjekt för proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2160 msgid "Can not set version number for proxy certificate" msgstr "Kan inte ange versionsnummer för proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2168 msgid "Can not set serial number for proxy certificate" msgstr "Kan inte ange serienummer för proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2174 msgid "Can not duplicate serial number for proxy certificate" msgstr "Kan inte duplicera serienummer för proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2180 msgid "Can not set the lifetime for proxy certificate" msgstr "Kan inte ange livstid för proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2184 msgid "Can not set pubkey for proxy certificate" msgstr "Kan inte ange publik nyckel för proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2200 #: src/hed/libs/credential/Credential.cpp:2827 msgid "The credential to be signed is NULL" msgstr "Referensen som skall signeras är NULL" #: src/hed/libs/credential/Credential.cpp:2204 #: src/hed/libs/credential/Credential.cpp:2831 msgid "The credential to be signed contains no request" msgstr "Referensen som skall signeras innehÃ¥ller ingen begäran" #: src/hed/libs/credential/Credential.cpp:2208 #: src/hed/libs/credential/Credential.cpp:2835 msgid "The BIO for output is NULL" msgstr "Utdata-BIO är NULL" #: src/hed/libs/credential/Credential.cpp:2222 #: src/hed/libs/credential/Credential.cpp:2842 msgid "Error when extracting public key from request" msgstr "Fel när publik nyckel extraheras frÃ¥n begäran" #: src/hed/libs/credential/Credential.cpp:2227 #: src/hed/libs/credential/Credential.cpp:2846 msgid "Failed to verify the request" msgstr "Misslyckades med att verifiera begäran" #: src/hed/libs/credential/Credential.cpp:2231 msgid "Failed to add issuer's extension into proxy" msgstr "Misslyckades med att lägga till utfärdarens tillägg till proxyn" #: src/hed/libs/credential/Credential.cpp:2255 msgid "Failed to find extension" msgstr "Misslyckades med att hitta tillägg" #: src/hed/libs/credential/Credential.cpp:2267 msgid "Can not get the issuer's private key" msgstr "Kan inte erhÃ¥lla utfärdarens privata nyckel" #: src/hed/libs/credential/Credential.cpp:2274 #: src/hed/libs/credential/Credential.cpp:2878 msgid "There is no digest in issuer's private key object" msgstr "Det finns inget digest i utfärdarens privata-nyckel-objekt" #: src/hed/libs/credential/Credential.cpp:2279 #: src/hed/libs/credential/Credential.cpp:2882 #, c-format msgid "%s is an unsupported digest type" msgstr "Digesttypen %s stöds inte" #: src/hed/libs/credential/Credential.cpp:2290 #, c-format msgid "" "The signing algorithm %s is not allowed,it should be SHA1 or SHA2 to sign " "certificate requests" msgstr "" "Signeringsalgoritmen %s är ej tillÃ¥ten, den skall vara SHA1 eller SHA2 för " "att signera certifikatbegärningar" #: src/hed/libs/credential/Credential.cpp:2296 msgid "Failed to sign the proxy certificate" msgstr "Misslyckades med att signera proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2298 msgid "Succeeded to sign the proxy certificate" msgstr "Lyckades signera proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2303 msgid "Failed to verify the signed certificate" msgstr "Misslyckades med att verifiera det signerade certifikatet" #: src/hed/libs/credential/Credential.cpp:2305 msgid "Succeeded to verify the signed certificate" msgstr "Lyckades verifiera det signerade certifikatet" #: src/hed/libs/credential/Credential.cpp:2310 #: src/hed/libs/credential/Credential.cpp:2319 msgid "Output the proxy certificate" msgstr "Skriv ut proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2313 msgid "Can not convert signed proxy cert into PEM format" msgstr "Kan inte konvertera det signerade proxycertifikatet till PEM-format" #: src/hed/libs/credential/Credential.cpp:2322 msgid "Can not convert signed proxy cert into DER format" msgstr "Kan inte konvertera det signerade proxycertifikatet till DER-format" #: src/hed/libs/credential/Credential.cpp:2338 #: src/hed/libs/credential/Credential.cpp:2361 msgid "Can not create BIO for signed proxy certificate" msgstr "Kan inte skapa BIO för det signerade proxycertifikatet" #: src/hed/libs/credential/Credential.cpp:2365 msgid "Can not set writable file for signed proxy certificate BIO" msgstr "Kan inte ange skrivbar fil för det signerade proxycertifikatets BIO" #: src/hed/libs/credential/Credential.cpp:2370 msgid "Wrote signed proxy certificate into a file" msgstr "Skrev det signerade proxycertifikatet till en fil" #: src/hed/libs/credential/Credential.cpp:2373 msgid "Failed to write signed proxy certificate into a file" msgstr "" "Misslyckades med att skriva det signerade proxycertifikatet till en fil" #: src/hed/libs/credential/Credential.cpp:2408 #: src/hed/libs/credential/Credential.cpp:2447 #, c-format msgid "ERROR: %s" msgstr "Fel: %s" #: src/hed/libs/credential/Credential.cpp:2455 #, c-format msgid "SSL error: %s, libs: %s, func: %s, reason: %s" msgstr "SSL-fel: %s, bibliotek: %s, funktion: %s, anledning: %s" #: src/hed/libs/credential/Credential.cpp:2500 #, c-format msgid "unable to load number from: %s" msgstr "kunde inre ladda in nummer frÃ¥n: %s" #: src/hed/libs/credential/Credential.cpp:2505 msgid "error converting number from bin to BIGNUM" msgstr "fel vid konvertering av nummer frÃ¥n bin till BIGNUM" #: src/hed/libs/credential/Credential.cpp:2532 msgid "file name too long" msgstr "filnamn för lÃ¥ngt" #: src/hed/libs/credential/Credential.cpp:2555 msgid "error converting serial to ASN.1 format" msgstr "fel vid konvertering av serienummer till ASN.1-format" #: src/hed/libs/credential/Credential.cpp:2588 #, c-format msgid "load serial from %s failure" msgstr "hämta serienummer frÃ¥n %s misslyckades" #: src/hed/libs/credential/Credential.cpp:2593 msgid "add_word failure" msgstr "add_word misslyckades" #: src/hed/libs/credential/Credential.cpp:2598 #, c-format msgid "save serial to %s failure" msgstr "spara serienummer till %s misslyckades" #: src/hed/libs/credential/Credential.cpp:2618 msgid "Error initialising X509 store" msgstr "Fel vid initiering av X509-lager" #: src/hed/libs/credential/Credential.cpp:2625 msgid "Out of memory when generate random serial" msgstr "Minnet tog slut när slump-serienummer genererades" #: src/hed/libs/credential/Credential.cpp:2637 msgid "CA certificate and CA private key do not match" msgstr "CA-certifikat och CA-privat-nyckel matchar inte" #: src/hed/libs/credential/Credential.cpp:2661 #, c-format msgid "Failed to load extension section: %s" msgstr "Misslyckades med att hämta tilläggssektion: %s" #: src/hed/libs/credential/Credential.cpp:2698 msgid "malloc error" msgstr "Minnesallokeringsfel" #: src/hed/libs/credential/Credential.cpp:2702 msgid "Subject does not start with '/'" msgstr "Subjekt börjar inte med '/'" #: src/hed/libs/credential/Credential.cpp:2718 #: src/hed/libs/credential/Credential.cpp:2739 msgid "escape character at end of string" msgstr "escape-tecken vid strängens slut" #: src/hed/libs/credential/Credential.cpp:2730 #, c-format msgid "" "end of string encountered while processing type of subject name element #%d" msgstr "" "strängens slut pÃ¥träffades medan typ för subjekt-namn-element #%d " "processerades" #: src/hed/libs/credential/Credential.cpp:2767 #, c-format msgid "Subject Attribute %s has no known NID, skipped" msgstr "Subjektattribut %s har ingen känd NID, hoppar över" #: src/hed/libs/credential/Credential.cpp:2771 #, c-format msgid "No value provided for Subject Attribute %s skipped" msgstr "Inget värde tillhandahÃ¥llet för subjektattribut %s, hoppar över" #: src/hed/libs/credential/Credential.cpp:2812 msgid "Failed to set the pubkey for X509 object by using pubkey from X509_REQ" msgstr "" "Misslyckades med att ange publik nyckel för X509-objekt genom att använda " "publik nyckel frÃ¥n X509_REQ" #: src/hed/libs/credential/Credential.cpp:2822 msgid "The private key for signing is not initialized" msgstr "Privata nyckeln för signering har inte initierats" #: src/hed/libs/credential/Credential.cpp:2901 #, c-format msgid "Error when loading the extension config file: %s" msgstr "Fel vid inladdning av tilläggsinställningsfilen: %s" #: src/hed/libs/credential/Credential.cpp:2905 #, c-format msgid "Error when loading the extension config file: %s on line: %d" msgstr "Fel vid inladdning av tilläggsinställningsfilen: %s pÃ¥ rad: %d" #: src/hed/libs/credential/Credential.cpp:2953 msgid "Can not sign a EEC" msgstr "Kan inte signera ett EEC" #: src/hed/libs/credential/Credential.cpp:2957 msgid "Output EEC certificate" msgstr "Skriv ut EEC-certifikatet" #: src/hed/libs/credential/Credential.cpp:2960 msgid "Can not convert signed EEC cert into DER format" msgstr "Kan inte konvertera det signerade EEC-certifikatet till DER-format" #: src/hed/libs/credential/Credential.cpp:2974 #: src/hed/libs/credential/Credential.cpp:2993 msgid "Can not create BIO for signed EEC certificate" msgstr "Kan inte skapa BIO för det signerade EEC-certifikatet" #: src/hed/libs/credential/Credential.cpp:2997 msgid "Can not set writable file for signed EEC certificate BIO" msgstr "Kan inte ange skrivbar fil för det signerade EEC-certifikatets BIO" #: src/hed/libs/credential/Credential.cpp:3002 msgid "Wrote signed EEC certificate into a file" msgstr "Skrev det signerade EEC-certifikatet till en fil" #: src/hed/libs/credential/Credential.cpp:3005 msgid "Failed to write signed EEC certificate into a file" msgstr "Misslyckades med att skriva det signerade EEC-certifikatet till en fil" #: src/hed/libs/credential/NSSUtil.cpp:143 msgid "Error writing raw certificate" msgstr "Fel vid skrivning av raw certifikat" #: src/hed/libs/credential/NSSUtil.cpp:222 msgid "Failed to add RFC proxy OID" msgstr "Misslyckades med att lägga till RFC-proxy-OID" #: src/hed/libs/credential/NSSUtil.cpp:225 #, c-format msgid "Succeeded to add RFC proxy OID, tag %d is returned" msgstr "Lyckades lägga till RFC-proxy-OID, tagg %d returnerades" #: src/hed/libs/credential/NSSUtil.cpp:231 msgid "Failed to add anyLanguage OID" msgstr "Misslyckades med att lägga till anyLanguage-OID" #: src/hed/libs/credential/NSSUtil.cpp:234 #, c-format msgid "Succeeded to add anyLanguage OID, tag %d is returned" msgstr "Lyckades lägga till annyLanguage-OID, tagg %d returnerades" #: src/hed/libs/credential/NSSUtil.cpp:240 msgid "Failed to add inheritAll OID" msgstr "Misslyckades med att lägga till inheritAll-OID" #: src/hed/libs/credential/NSSUtil.cpp:243 #, c-format msgid "Succeeded to add inheritAll OID, tag %d is returned" msgstr "Lyckades lägga till inheritAll-OID, tagg %d returnerades" #: src/hed/libs/credential/NSSUtil.cpp:249 msgid "Failed to add Independent OID" msgstr "Misslyckades med att lägga till Independent-OID" #: src/hed/libs/credential/NSSUtil.cpp:252 #, c-format msgid "Succeeded to add Independent OID, tag %d is returned" msgstr "Lyckades lägga till Independent-OID, tagg %d returnerades" #: src/hed/libs/credential/NSSUtil.cpp:258 msgid "Failed to add VOMS AC sequence OID" msgstr "Misslyckades med att lägga till VOMS-AC-sekvens-OID" #: src/hed/libs/credential/NSSUtil.cpp:261 #, c-format msgid "Succeeded to add VOMS AC sequence OID, tag %d is returned" msgstr "Lyckades lägga till VOMS-AC-sekvens-OID, tagg %d returnerades" #: src/hed/libs/credential/NSSUtil.cpp:290 #, c-format msgid "NSS initialization failed on certificate database: %s" msgstr "NSS-initiering misslyckades pÃ¥ certifikatdatabas: %s" #: src/hed/libs/credential/NSSUtil.cpp:301 msgid "Succeeded to initialize NSS" msgstr "Lyckades med att initiera NSS" #: src/hed/libs/credential/NSSUtil.cpp:323 #, c-format msgid "Failed to read attribute %x from private key." msgstr "Misslyckades med att läsa attribut %x frÃ¥n privat nyckel." #: src/hed/libs/credential/NSSUtil.cpp:375 msgid "Succeeded to get credential" msgstr "Lyckades erhÃ¥lla referens" #: src/hed/libs/credential/NSSUtil.cpp:376 msgid "Failed to get credential" msgstr "Misslyckades med att erhÃ¥lla referens" #: src/hed/libs/credential/NSSUtil.cpp:438 msgid "p12 file is empty" msgstr "p12-fil är tom" #: src/hed/libs/credential/NSSUtil.cpp:448 msgid "Unable to write to p12 file" msgstr "Kunde inte skriva till p12-fil" #: src/hed/libs/credential/NSSUtil.cpp:464 msgid "Failed to open p12 file" msgstr "Misslyckades med att öppna p12-fil" #: src/hed/libs/credential/NSSUtil.cpp:492 msgid "Failed to allocate p12 context" msgstr "Misslyckades med allokera p12-kontext" #: src/hed/libs/credential/NSSUtil.cpp:1200 msgid "Failed to find issuer certificate for proxy certificate" msgstr "Misslyckades med att hitta utfärdarcertifikat för proxycertifikat" #: src/hed/libs/credential/NSSUtil.cpp:1351 #, c-format msgid "Failed to authenticate to PKCS11 slot %s" msgstr "Misslyckades med att autentisera till PKCS11 slot %s" #: src/hed/libs/credential/NSSUtil.cpp:1357 #, c-format msgid "Failed to find certificates by nickname: %s" msgstr "Misslyckades med att hitta certifikat med smeknamn: %s" #: src/hed/libs/credential/NSSUtil.cpp:1362 #, c-format msgid "No user certificate by nickname %s found" msgstr "Hittade inget certifikat med smeknamn %s" #: src/hed/libs/credential/NSSUtil.cpp:1375 #: src/hed/libs/credential/NSSUtil.cpp:1411 msgid "Certificate does not have a slot" msgstr "Certifikat har ingen slot" #: src/hed/libs/credential/NSSUtil.cpp:1381 msgid "Failed to create export context" msgstr "Misslyckades med att skapa export-kontext" #: src/hed/libs/credential/NSSUtil.cpp:1396 msgid "PKCS12 output password not provided" msgstr "PKCS12 output-lösenord inte tillhandahÃ¥llet" #: src/hed/libs/credential/NSSUtil.cpp:1403 msgid "PKCS12 add password integrity failed" msgstr "PKCS12 lägg till lösenordsintegritet misslyckades" #: src/hed/libs/credential/NSSUtil.cpp:1424 msgid "Failed to create key or certificate safe" msgstr "Misslyckades med att skapa nyckel- eller certifikat-safe" #: src/hed/libs/credential/NSSUtil.cpp:1440 msgid "Failed to add certificate and key" msgstr "Misslyckades med att lägga till certifikat och nyckel" #: src/hed/libs/credential/NSSUtil.cpp:1449 #, c-format msgid "Failed to initialize PKCS12 file: %s" msgstr "Misslyckades med att initiera PKCS12-fil: %s" #: src/hed/libs/credential/NSSUtil.cpp:1454 msgid "Failed to encode PKCS12" msgstr "Misslyckades med att koda PKCS12" #: src/hed/libs/credential/NSSUtil.cpp:1457 msgid "Succeeded to export PKCS12" msgstr "Lyckades exportera PKCS12" #: src/hed/libs/credential/NSSUtil.cpp:1485 #, c-format msgid "" "There is no certificate named %s found, the certificate could be removed " "when generating CSR" msgstr "" "Det finns inget certifikat med namn %s, certifikatet kan tas bort när CSR " "genereras" #: src/hed/libs/credential/NSSUtil.cpp:1491 msgid "Failed to delete certificate" msgstr "Misslyckades med att ta bort certifikat" #: src/hed/libs/credential/NSSUtil.cpp:1505 msgid "The name of the private key to delete is empty" msgstr "Namnen pÃ¥ den privata nyckeln som ska tas bort är tomt" #: src/hed/libs/credential/NSSUtil.cpp:1510 #: src/hed/libs/credential/NSSUtil.cpp:2939 #: src/hed/libs/credential/NSSUtil.cpp:2956 #, c-format msgid "Failed to authenticate to token %s" msgstr "Misslyckades med att autentisera till token %s" #: src/hed/libs/credential/NSSUtil.cpp:1517 #, c-format msgid "No private key with nickname %s exist in NSS database" msgstr "Ingen privat nyckel med smeknamn %s existerar i NSS-databasen" #: src/hed/libs/credential/NSSUtil.cpp:1550 msgid "Failed to delete private key and certificate" msgstr "Misslyckades med att ta bort privat nyckel och certifikat" #: src/hed/libs/credential/NSSUtil.cpp:1560 msgid "Failed to delete private key" msgstr "Misslyckades med att ta bort privat nyckel" #: src/hed/libs/credential/NSSUtil.cpp:1571 #, c-format msgid "Can not find key with name: %s" msgstr "Kan inte hitta nyckel med namn: %s" #: src/hed/libs/credential/NSSUtil.cpp:1599 msgid "Can not read PEM private key: probably bad password" msgstr "Kan inte läsa PEM privat nyckel: troligen fel lösenord" #: src/hed/libs/credential/NSSUtil.cpp:1601 msgid "Can not read PEM private key: failed to decrypt" msgstr "Kan inte läsa PEM privat nyckel: misslyckades med att avkoda" #: src/hed/libs/credential/NSSUtil.cpp:1603 #: src/hed/libs/credential/NSSUtil.cpp:1605 msgid "Can not read PEM private key: failed to obtain password" msgstr "Kan inte läsa PEM privat nyckel: misslycḱades med att erhÃ¥lla lösenord" #: src/hed/libs/credential/NSSUtil.cpp:1606 msgid "Can not read PEM private key" msgstr "Kan inte läsa PEM privat nyckel" #: src/hed/libs/credential/NSSUtil.cpp:1613 msgid "Failed to convert EVP_PKEY to PKCS8" msgstr "Misslyckades med att konvertera EVP_PKEY till PKCS8" #: src/hed/libs/credential/NSSUtil.cpp:1650 msgid "Failed to load private key" msgstr "Misslyckades med att ladda in privat nyckel" #: src/hed/libs/credential/NSSUtil.cpp:1651 msgid "Succeeded to load PrivateKeyInfo" msgstr "Lyckades ladda in PrivateKeyInfo" #: src/hed/libs/credential/NSSUtil.cpp:1654 msgid "Failed to convert PrivateKeyInfo to EVP_PKEY" msgstr "Misslyckades med att konvertera PrivateKeyInfo till EVP_PKEY" #: src/hed/libs/credential/NSSUtil.cpp:1655 msgid "Succeeded to convert PrivateKeyInfo to EVP_PKEY" msgstr "Lyckades konvertera PrivateKeyInfo till EVP_PKEY" #: src/hed/libs/credential/NSSUtil.cpp:1692 msgid "Failed to import private key" msgstr "Misslyckades med att importera privat nyckel" #: src/hed/libs/credential/NSSUtil.cpp:1695 msgid "Succeeded to import private key" msgstr "Lyckades importera privat nyckel" #: src/hed/libs/credential/NSSUtil.cpp:1708 #: src/hed/libs/credential/NSSUtil.cpp:1750 #: src/hed/libs/credential/NSSUtil.cpp:2889 msgid "Failed to authenticate to key database" msgstr "Misslyckades med att autentisera till nyckeldatabas" #: src/hed/libs/credential/NSSUtil.cpp:1717 msgid "Succeeded to generate public/private key pair" msgstr "Lyckades generera publik/privat nyckelpar" #: src/hed/libs/credential/NSSUtil.cpp:1719 msgid "Failed to generate public/private key pair" msgstr "Misslyckades med att generera publik/privat nyckelpar" #: src/hed/libs/credential/NSSUtil.cpp:1724 msgid "Failed to export private key" msgstr "Misslyckades med att exportera privat nyckel" #: src/hed/libs/credential/NSSUtil.cpp:1791 msgid "Failed to create subject name" msgstr "Misslyckades med att skapa subjektnamn" #: src/hed/libs/credential/NSSUtil.cpp:1807 msgid "Failed to create certificate request" msgstr "Misslyckades med att skapa certifikatbegäran" #: src/hed/libs/credential/NSSUtil.cpp:1820 msgid "Failed to call PORT_NewArena" msgstr "Misslyckades med att anropa PORT_NewArena" #: src/hed/libs/credential/NSSUtil.cpp:1828 msgid "Failed to encode the certificate request with DER format" msgstr "Misslyckades med att koda certifikatbegäran med DER-format" #: src/hed/libs/credential/NSSUtil.cpp:1835 msgid "Unknown key or hash type" msgstr "Okänd nyckel- eller hashtyp" #: src/hed/libs/credential/NSSUtil.cpp:1841 msgid "Failed to sign the certificate request" msgstr "Misslyckades med att signera certifikatbegäran" #: src/hed/libs/credential/NSSUtil.cpp:1857 msgid "Failed to output the certificate request as ASCII format" msgstr "Misslyckades med att skriva ut certifikatbegäran i ASCII-format" #: src/hed/libs/credential/NSSUtil.cpp:1866 msgid "Failed to output the certificate request as DER format" msgstr "Misslyckades med att skriva ut certifikatbegäran i DER-format" #: src/hed/libs/credential/NSSUtil.cpp:1875 #, c-format msgid "Succeeded to output the certificate request into %s" msgstr "Lyckades skriva ut certifikatbegäran till %s" #: src/hed/libs/credential/NSSUtil.cpp:1914 #: src/hed/libs/credential/NSSUtil.cpp:1951 msgid "Failed to read data from input file" msgstr "Misslyckades med att läsa data frÃ¥n indatabuffer" #: src/hed/libs/credential/NSSUtil.cpp:1930 msgid "Input is without trailer\n" msgstr "Indata är utan trailer\n" #: src/hed/libs/credential/NSSUtil.cpp:1941 msgid "Failed to convert ASCII to DER" msgstr "Misslyckades med att konvertera ASCII till DER" #: src/hed/libs/credential/NSSUtil.cpp:1992 msgid "Certificate request is invalid" msgstr "Certifikatbegäran är ogiltig" #: src/hed/libs/credential/NSSUtil.cpp:2212 #, c-format msgid "The policy language: %s is not supported" msgstr "Policy-sprÃ¥ket: %s stöds inte" #: src/hed/libs/credential/NSSUtil.cpp:2220 #: src/hed/libs/credential/NSSUtil.cpp:2245 #: src/hed/libs/credential/NSSUtil.cpp:2268 #: src/hed/libs/credential/NSSUtil.cpp:2290 msgid "Failed to new arena" msgstr "Misslyckades med ny arena" #: src/hed/libs/credential/NSSUtil.cpp:2229 #: src/hed/libs/credential/NSSUtil.cpp:2254 msgid "Failed to create path length" msgstr "Misslyckades med att skapa certifikatkedjelängd" #: src/hed/libs/credential/NSSUtil.cpp:2232 #: src/hed/libs/credential/NSSUtil.cpp:2257 #: src/hed/libs/credential/NSSUtil.cpp:2277 #: src/hed/libs/credential/NSSUtil.cpp:2299 msgid "Failed to create policy language" msgstr "Misslyckades med att skapa policy-sprÃ¥k" #: src/hed/libs/credential/NSSUtil.cpp:2700 #, c-format msgid "Failed to parse certificate request from CSR file %s" msgstr "Misslyckades med att tolka certifikatbegäran frÃ¥n CSR-fil %s" #: src/hed/libs/credential/NSSUtil.cpp:2707 #, c-format msgid "Can not find certificate with name %s" msgstr "Kan inte hitta certifikat med namn %s" #: src/hed/libs/credential/NSSUtil.cpp:2739 msgid "Can not allocate memory" msgstr "Kan inte allokera minne" #: src/hed/libs/credential/NSSUtil.cpp:2747 #, c-format msgid "Proxy subject: %s" msgstr "Proxysubjekt: %s" #: src/hed/libs/credential/NSSUtil.cpp:2764 msgid "Failed to start certificate extension" msgstr "Misslyckades med att pÃ¥börja certifikattillägg" #: src/hed/libs/credential/NSSUtil.cpp:2769 msgid "Failed to add key usage extension" msgstr "Misslyckades med att lägga till nyckelanvändningstillägg" #: src/hed/libs/credential/NSSUtil.cpp:2774 msgid "Failed to add proxy certificate information extension" msgstr "Misslyckades med att lägga till proxycertifikatsinformationstillägg" #: src/hed/libs/credential/NSSUtil.cpp:2778 msgid "Failed to add voms AC extension" msgstr "Misslyckades med att lägga till VOMS-AC-tillägg" #: src/hed/libs/credential/NSSUtil.cpp:2798 msgid "Failed to retrieve private key for issuer" msgstr "Misslyckades med att hämta privat nyckel för utfärdare" #: src/hed/libs/credential/NSSUtil.cpp:2805 msgid "Unknown key or hash type of issuer" msgstr "Okänd nyckel- eller hashtyp för utfärdare" #: src/hed/libs/credential/NSSUtil.cpp:2811 msgid "Failed to set signature algorithm ID" msgstr "Misslyckades med att sätta signeringsalgoritm" #: src/hed/libs/credential/NSSUtil.cpp:2823 msgid "Failed to encode certificate" msgstr "Misslyckades med att koda certifikat" #: src/hed/libs/credential/NSSUtil.cpp:2829 msgid "Failed to allocate item for certificate data" msgstr "Misslyckades med att allokera minne för certifikatdata" #: src/hed/libs/credential/NSSUtil.cpp:2835 msgid "Failed to sign encoded certificate data" msgstr "Misslyckades med att signera kodad certifikatdata" #: src/hed/libs/credential/NSSUtil.cpp:2844 #, c-format msgid "Failed to open file %s" msgstr "Misslyckades med att öppna fil %s" #: src/hed/libs/credential/NSSUtil.cpp:2855 #, c-format msgid "Succeeded to output certificate to %s" msgstr "Lyckades skriva ut certifikat till %s" #: src/hed/libs/credential/NSSUtil.cpp:2896 #, c-format msgid "Failed to open input certificate file %s" msgstr "Misslyckades med att öppna indata-certifikatfil %s" #: src/hed/libs/credential/NSSUtil.cpp:2913 msgid "Failed to read input certificate file" msgstr "Misslyckades med att läsa indata-certifikatfil" #: src/hed/libs/credential/NSSUtil.cpp:2918 msgid "Failed to get certificate from certificate file" msgstr "Misslyckades med att erhÃ¥lla certifikat frÃ¥n certifikatfil" #: src/hed/libs/credential/NSSUtil.cpp:2925 msgid "Failed to allocate certificate trust" msgstr "Misslyckades med att allokera certifikat-tillit" #: src/hed/libs/credential/NSSUtil.cpp:2930 msgid "Failed to decode trust string" msgstr "Misslyckades med att avkoda tillitssträng" #: src/hed/libs/credential/NSSUtil.cpp:2944 #: src/hed/libs/credential/NSSUtil.cpp:2961 msgid "Failed to add certificate to token or database" msgstr "Misslyckades med att lägga till certifikat till token eller databas" #: src/hed/libs/credential/NSSUtil.cpp:2947 #: src/hed/libs/credential/NSSUtil.cpp:2950 msgid "Succeeded to import certificate" msgstr "Lyckades importera certifikat" #: src/hed/libs/credential/NSSUtil.cpp:2964 #: src/hed/libs/credential/NSSUtil.cpp:2967 #, c-format msgid "Succeeded to change trusts to: %s" msgstr "Lyckades ändra tillit till: %s" #: src/hed/libs/credential/NSSUtil.cpp:2994 #, c-format msgid "Failed to import private key from file: %s" msgstr "Misslyckades med att importera privat nyckel frÃ¥n fil: %s" #: src/hed/libs/credential/NSSUtil.cpp:2996 #, c-format msgid "Failed to import certificate from file: %s" msgstr "Misslyckades med att importera certifikat frÃ¥n fil: %s" #: src/hed/libs/credential/VOMSConfig.cpp:147 #, c-format msgid "" "ERROR: VOMS configuration line contains too many tokens. Expecting 5 or 6. " "Line was: %s" msgstr "" "Fel: VOMS-inställningsrad innehÃ¥ller för mÃ¥nga token. Förväntade 5 eller 6. " "Raden är: %s" #: src/hed/libs/credential/VOMSConfig.cpp:163 #, c-format msgid "" "ERROR: file tree is too deep while scanning VOMS configuration. Max allowed " "nesting is %i." msgstr "" "Fel: filträd är för djupt vid skanning av VOMS-inställningar. Max tillÃ¥ten " "nestning är %i." #: src/hed/libs/credential/VOMSConfig.cpp:181 #, c-format msgid "ERROR: failed to read file %s while scanning VOMS configuration." msgstr "" "Fel: misslyckades med att läsa fil %s vid skanning av VOMS-inställningar." #: src/hed/libs/credential/VOMSConfig.cpp:186 #, c-format msgid "" "ERROR: VOMS configuration file %s contains too many lines. Max supported " "number is %i." msgstr "" "Fel: VOMS-inställningsfil %s innehÃ¥ller för mÃ¥nga rader. Max antal som stöds " "är %i." #: src/hed/libs/credential/VOMSConfig.cpp:193 #, c-format msgid "" "ERROR: VOMS configuration file %s contains too long line(s). Max supported " "length is %i characters." msgstr "" "Fel: VOMS-inställningsfil %s innehÃ¥ller för lÃ¥ng(a) rad(er). Max längd som " "stöds är %i tecken." #: src/hed/libs/credential/VOMSUtil.cpp:137 #, c-format msgid "Failed to create OpenSSL object %s %s - %u %s" msgstr "Misslyckades med att skapa OpenSSL-objekt %s %s - %u %s" #: src/hed/libs/credential/VOMSUtil.cpp:144 #, c-format msgid "Failed to obtain OpenSSL identifier for %s" msgstr "Misslyckades med att erhÃ¥lla OpenSSL-identifierare för %s" #: src/hed/libs/credential/VOMSUtil.cpp:302 #: src/hed/libs/credential/VOMSUtil.cpp:571 #, c-format msgid "VOMS: create FQAN: %s" msgstr "VOMS: skapa FQAN: %s" #: src/hed/libs/credential/VOMSUtil.cpp:340 #: src/hed/libs/credential/VOMSUtil.cpp:619 #, c-format msgid "VOMS: create attribute: %s" msgstr "VOMS: skapa attribut: %s" #: src/hed/libs/credential/VOMSUtil.cpp:917 msgid "VOMS: Can not allocate memory for parsing AC" msgstr "VOMS: Kan inte allokera minne för att tolka AC" #: src/hed/libs/credential/VOMSUtil.cpp:925 msgid "VOMS: Can not allocate memory for storing the order of AC" msgstr "VOMS: Kan inte allokera minne för att att lagra ordningen för AC" #: src/hed/libs/credential/VOMSUtil.cpp:951 msgid "VOMS: Can not parse AC" msgstr "VOMS: Kan inte tolka AC" #: src/hed/libs/credential/VOMSUtil.cpp:981 msgid "" "VOMS: CA directory or CA file must be provided or default setting enabled" msgstr "" "VOMS: CA-katalog eller CA-fil mÃ¥ste tillhandahÃ¥llas eller förvalt alternativ " "aktiverat" #: src/hed/libs/credential/VOMSUtil.cpp:1052 msgid "VOMS: failed to verify AC signature" msgstr "VOMS: misslyckades med att verifiera AC-signatur" #: src/hed/libs/credential/VOMSUtil.cpp:1108 #, c-format msgid "VOMS: trust chain to check: %s " msgstr "VOMS: tillitskedja att kontrollera: %s " #: src/hed/libs/credential/VOMSUtil.cpp:1116 #, c-format msgid "" "VOMS: the DN in certificate: %s does not match that in trusted DN list: %s" msgstr "VOMS: DN i certifikat: %s matchar inte det i betrodda DN-listan: %s" #: src/hed/libs/credential/VOMSUtil.cpp:1122 #, c-format msgid "" "VOMS: the Issuer identity in certificate: %s does not match that in trusted " "DN list: %s" msgstr "" "VOMS: utfärdaridentiteten i certifikat: %s matchar inte den i betrodda DN-" "listan: %s" #: src/hed/libs/credential/VOMSUtil.cpp:1157 #, c-format msgid "VOMS: The lsc file %s does not exist" msgstr "VOMS: lsc-filen %s existerar inte" #: src/hed/libs/credential/VOMSUtil.cpp:1163 #, c-format msgid "VOMS: The lsc file %s can not be open" msgstr "VOMS: lsc-filen %s kan inte öppnas" #: src/hed/libs/credential/VOMSUtil.cpp:1215 msgid "" "VOMS: there is no constraints of trusted voms DNs, the certificates stack in " "AC will not be checked." msgstr "" "VOMS: det finns inga villkor pÃ¥ betrodda voms-DN, certifikatstacken i AC " "kommer inte att kontrolleras." #: src/hed/libs/credential/VOMSUtil.cpp:1248 msgid "VOMS: unable to match certificate chain against VOMS trusted DNs" msgstr "VOMS: kunde inte matcha certifikatkedja mot VOMS betrodda DN" #: src/hed/libs/credential/VOMSUtil.cpp:1268 msgid "VOMS: AC signature verification failed" msgstr "VOMS: AC-signaturverifiering misslyckades" #: src/hed/libs/credential/VOMSUtil.cpp:1277 msgid "VOMS: unable to verify certificate chain" msgstr "VOMS: kunde inte verifiera certifikatkedja" #: src/hed/libs/credential/VOMSUtil.cpp:1283 #, c-format msgid "VOMS: cannot validate AC issuer for VO %s" msgstr "VOMS: kunde inte validera AC-utfärdare för VO %s" #: src/hed/libs/credential/VOMSUtil.cpp:1306 #, c-format msgid "VOMS: directory for trusted service certificates: %s" msgstr "VOMS: katalog för betrodda tjänstecertifikat: %s" #: src/hed/libs/credential/VOMSUtil.cpp:1332 #, c-format msgid "VOMS: Cannot find certificate of AC issuer for VO %s" msgstr "VOMS: kan inte hitta AC-utfärdarens certifikat för VO %s" #: src/hed/libs/credential/VOMSUtil.cpp:1358 #: src/hed/libs/credential/VOMSUtil.cpp:1427 msgid "VOMS: Can not find AC_ATTR with IETFATTR type" msgstr "VOMS: kan inte hitta AC_ATTR med IETFATTR-typ" #: src/hed/libs/credential/VOMSUtil.cpp:1365 #: src/hed/libs/credential/VOMSUtil.cpp:1434 msgid "VOMS: case of multiple IETFATTR attributes not supported" msgstr "VOMS: mer än ett IETFATTR-attribut stöds inte" #: src/hed/libs/credential/VOMSUtil.cpp:1375 #: src/hed/libs/credential/VOMSUtil.cpp:1450 msgid "VOMS: case of multiple policyAuthority not supported" msgstr "VOMS: mer än en policyAuthority stöds inte" #: src/hed/libs/credential/VOMSUtil.cpp:1391 #: src/hed/libs/credential/VOMSUtil.cpp:1467 msgid "VOMS: the format of policyAuthority is unsupported - expecting URI" msgstr "VOMS: formatet för policyAuthority stöd inte - förväntar URI" #: src/hed/libs/credential/VOMSUtil.cpp:1400 #: src/hed/libs/credential/VOMSUtil.cpp:1478 msgid "" "VOMS: the format of IETFATTRVAL is not supported - expecting OCTET STRING" msgstr "VOMS: formatet för IETFATTRVAL stöds inte - förväntar OCTET STRING" #: src/hed/libs/credential/VOMSUtil.cpp:1443 msgid "VOMS: failed to access IETFATTR attribute" msgstr "VOMS: misslyckades med att komma Ã¥t IETFATTR-attribut" #: src/hed/libs/credential/VOMSUtil.cpp:1538 msgid "VOMS: the grantor attribute is empty" msgstr "VOMS: grantor-attributet är tomt" #: src/hed/libs/credential/VOMSUtil.cpp:1556 msgid "VOMS: the attribute name is empty" msgstr "VOMS: attributnamnet är tomt" #: src/hed/libs/credential/VOMSUtil.cpp:1562 #, c-format msgid "VOMS: the attribute value for %s is empty" msgstr "VOMS: attributvärdet för %s är tomt" #: src/hed/libs/credential/VOMSUtil.cpp:1567 msgid "VOMS: the attribute qualifier is empty" msgstr "VOMS: attributkvalifieraren är tom" #: src/hed/libs/credential/VOMSUtil.cpp:1602 #: src/hed/libs/credential/VOMSUtil.cpp:1721 msgid "" "VOMS: both idcenoRevAvail and authorityKeyIdentifier certificate extensions " "must be present" msgstr "" "VOMS: bÃ¥de idcenoRevAvail och authorityKeyIdentifier certifikattilläggen " "mÃ¥ste vara närvarande" #: src/hed/libs/credential/VOMSUtil.cpp:1636 #: src/hed/libs/credential/VOMSUtil.cpp:1757 #, c-format msgid "VOMS: FQDN of this host %s does not match any target in AC" msgstr "VOMS: FQDN för denna värd %s matchar inte nÃ¥got target i AC" #: src/hed/libs/credential/VOMSUtil.cpp:1641 #: src/hed/libs/credential/VOMSUtil.cpp:1762 msgid "VOMS: the only supported critical extension of the AC is idceTargets" msgstr "VOMS: det enda kritiska tillägget till AC som stöds är idceTargets" #: src/hed/libs/credential/VOMSUtil.cpp:1656 #: src/hed/libs/credential/VOMSUtil.cpp:1777 msgid "VOMS: failed to parse attributes from AC" msgstr "VOMS: misslyckades med att tolka attribut frÃ¥n AC" #: src/hed/libs/credential/VOMSUtil.cpp:1700 #: src/hed/libs/credential/VOMSUtil.cpp:1829 msgid "VOMS: authorityKey is wrong" msgstr "VOMS: authorityKey är felaktig" #: src/hed/libs/credential/VOMSUtil.cpp:1861 #: src/hed/libs/credential/VOMSUtil.cpp:2029 #: src/hed/libs/credential/VOMSUtil.cpp:2037 msgid "VOMS: missing AC parts" msgstr "VOMS: saknade AC-delar" #: src/hed/libs/credential/VOMSUtil.cpp:1878 #: src/hed/libs/credential/VOMSUtil.cpp:2054 msgid "VOMS: unsupported time format in AC - expecting GENERALIZED TIME" msgstr "VOMS: tidsformat i AC som inte stöds - förväntade GENERALIZED TIME" #: src/hed/libs/credential/VOMSUtil.cpp:1884 #: src/hed/libs/credential/VOMSUtil.cpp:2060 msgid "VOMS: AC is not yet valid" msgstr "VOMS: AC är inte giltig än" #: src/hed/libs/credential/VOMSUtil.cpp:1891 #: src/hed/libs/credential/VOMSUtil.cpp:2067 msgid "VOMS: AC has expired" msgstr "VOMS: giltighetstiden för AC har gÃ¥tt ut" #: src/hed/libs/credential/VOMSUtil.cpp:1906 #: src/hed/libs/credential/VOMSUtil.cpp:2080 msgid "VOMS: AC is not complete - missing Serial or Issuer information" msgstr "VOMS: AC är inte komplett - saknar Serial- eller Issuer-information" #: src/hed/libs/credential/VOMSUtil.cpp:1911 #: src/hed/libs/credential/VOMSUtil.cpp:2085 #, c-format msgid "VOMS: the holder serial number is: %lx" msgstr "VOMS: innehavarens serienummer är: %lx" #: src/hed/libs/credential/VOMSUtil.cpp:1912 #: src/hed/libs/credential/VOMSUtil.cpp:2086 #, c-format msgid "VOMS: the serial number in AC is: %lx" msgstr "VOMS: serienumret i AC är: %lx" #: src/hed/libs/credential/VOMSUtil.cpp:1915 #: src/hed/libs/credential/VOMSUtil.cpp:2089 #, c-format msgid "" "VOMS: the holder serial number %lx is not the same as the serial number in " "AC %lx, the holder certificate that is used to create a voms proxy could be " "a proxy certificate with a different serial number as the original EEC cert" msgstr "" "VOMS: innehavarens serienummer %lx är inte detsamma som serienumret i AC " "%lx, innehavarens certifikat som används för att skapa en vomsproxy kan vara " "ett proxycertifikat med ett annat serienummer än det ursprungliga EEC-" "certifikatet" #: src/hed/libs/credential/VOMSUtil.cpp:1924 #: src/hed/libs/credential/VOMSUtil.cpp:2098 msgid "VOMS: the holder information in AC is wrong" msgstr "VOMS: innehavarinformationen is AC är felaktig" #: src/hed/libs/credential/VOMSUtil.cpp:1946 #: src/hed/libs/credential/VOMSUtil.cpp:2120 #, c-format msgid "VOMS: DN of holder in AC: %s" msgstr "VOMS: innehavarans DN i AC: %s" #: src/hed/libs/credential/VOMSUtil.cpp:1947 #: src/hed/libs/credential/VOMSUtil.cpp:2121 #, c-format msgid "VOMS: DN of holder: %s" msgstr "VOMS: innehavarans DN: %s" #: src/hed/libs/credential/VOMSUtil.cpp:1948 #: src/hed/libs/credential/VOMSUtil.cpp:2122 #, c-format msgid "VOMS: DN of issuer: %s" msgstr "VOMS: utfärdarens DN: %s" #: src/hed/libs/credential/VOMSUtil.cpp:1955 #: src/hed/libs/credential/VOMSUtil.cpp:2129 msgid "" "VOMS: the holder name in AC is not related to the distinguished name in " "holder certificate" msgstr "" "VOMS: innehavarens namn i AC är inte relaterat till DN i innehavarens " "certifikat" #: src/hed/libs/credential/VOMSUtil.cpp:1967 #: src/hed/libs/credential/VOMSUtil.cpp:1974 #: src/hed/libs/credential/VOMSUtil.cpp:2141 #: src/hed/libs/credential/VOMSUtil.cpp:2148 msgid "VOMS: the holder issuerUID is not the same as that in AC" msgstr "VOMS: innehavarens utfärdar-UID är inte detsamma som det i AC" #: src/hed/libs/credential/VOMSUtil.cpp:1987 #: src/hed/libs/credential/VOMSUtil.cpp:2160 msgid "VOMS: the holder issuer name is not the same as that in AC" msgstr "VOMS: innehavarens utfärdarnamn är inte detsamma som det i AC" #: src/hed/libs/credential/VOMSUtil.cpp:1997 #: src/hed/libs/credential/VOMSUtil.cpp:2169 msgid "VOMS: the issuer information in AC is wrong" msgstr "VOMS: utfärdarinformationen i AC är felaktig" #: src/hed/libs/credential/VOMSUtil.cpp:2005 #: src/hed/libs/credential/VOMSUtil.cpp:2177 #, c-format msgid "VOMS: the issuer name %s is not the same as that in AC - %s" msgstr "VOMS: utfärdarnamnet %s är inte det samma som det i AC - %s" #: src/hed/libs/credential/VOMSUtil.cpp:2013 #: src/hed/libs/credential/VOMSUtil.cpp:2185 msgid "" "VOMS: the serial number of AC INFO is too long - expecting no more than 20 " "octets" msgstr "" "VOMS: serienumret i AC INFO är för lÃ¥ngt - förväntade inte mer än 20 bytes" #: src/hed/libs/credential/VOMSUtil.cpp:2221 #: src/hed/libs/credential/VOMSUtil.cpp:2233 #: src/hed/libs/credential/VOMSUtil.cpp:2247 #: src/hed/libs/credential/VOMSUtil.cpp:2259 #: src/hed/libs/credential/VOMSUtil.cpp:2282 msgid "VOMS: unable to extract VO name from AC" msgstr "VOMS: kunde inte extrahera VO-namn frÃ¥n AC" #: src/hed/libs/credential/VOMSUtil.cpp:2273 #, c-format msgid "VOMS: unable to determine hostname of AC from VO name: %s" msgstr "VOMS: Kunde inte bestämma värdnamn i AC frÃ¥n VO-namn: %s" #: src/hed/libs/credential/VOMSUtil.cpp:2292 msgid "VOMS: can not verify the signature of the AC" msgstr "VOMS: kan inte verifiera ACs signatur" #: src/hed/libs/credential/VOMSUtil.cpp:2298 msgid "VOMS: problems while parsing information in AC" msgstr "VOMS: problem vid tolkning ac information i AC" #: src/hed/libs/credential/test/VOMSUtilTest.cpp:126 #, c-format msgid "Line %d.%d of the attributes returned: %s" msgstr "Rad %d.%d i attributen returnerade: %s" #: src/hed/libs/credentialstore/ClientVOMS.cpp:149 msgid "voms" msgstr "voms" #: src/hed/libs/credentialstore/CredentialStore.cpp:194 #: src/hed/libs/credentialstore/CredentialStore.cpp:245 #: src/hed/libs/credentialstore/CredentialStore.cpp:273 #: src/hed/libs/credentialstore/CredentialStore.cpp:336 #: src/hed/libs/credentialstore/CredentialStore.cpp:376 #: src/hed/libs/credentialstore/CredentialStore.cpp:406 #, c-format msgid "MyProxy failure: %s" msgstr "MyProxy-fel: %s" #: src/hed/libs/crypto/OpenSSL.cpp:64 #, c-format msgid "SSL error: %d - %s:%s:%s" msgstr "SSL-fel: %d - %s:%s:%s" #: src/hed/libs/crypto/OpenSSL.cpp:78 msgid "Failed to lock arccrypto library in memory" msgstr "Misslyckades med lÃ¥sa arccrypto-biblioteket i minnet" #: src/hed/libs/crypto/OpenSSL.cpp:81 msgid "Failed to initialize OpenSSL library" msgstr "Misslyckades med att initiera OpenSSL-biblioteket" #: src/hed/libs/data/DataExternalHelper.cpp:157 msgid "failed to read data tag" msgstr "misslyckades med att läsa data-tagg" #: src/hed/libs/data/DataExternalHelper.cpp:161 msgid "waiting for data chunk" msgstr "väntar pÃ¥ data-chunk" #: src/hed/libs/data/DataExternalHelper.cpp:163 msgid "failed to read data chunk" msgstr "misslyckades med att läsa data-chunk" #: src/hed/libs/data/DataExternalHelper.cpp:171 #, c-format msgid "data chunk: %llu %llu" msgstr "data-chunk: %llu %llu" #: src/hed/libs/data/DataExternalHelper.cpp:242 #, c-format msgid "DataMove::Transfer: using supplied checksum %s" msgstr "DataMove::Transfer: använder tillhandahÃ¥llen checksumma %s" #: src/hed/libs/data/DataExternalHelper.cpp:361 msgid "Expecting Module, Command and URL provided" msgstr "Förväntade att modul, kommando och URL tillhandahÃ¥llits" #: src/hed/libs/data/DataExternalHelper.cpp:368 msgid "Expecting Command module path among arguments" msgstr "Förväntade kommando modul-sökväg bland argumenten" #: src/hed/libs/data/DataExternalHelper.cpp:372 msgid "Expecting Command module name among arguments" msgstr "Förväntade kommando modul-namn bland argumenten<" #: src/hed/libs/data/DataMover.cpp:126 msgid "No locations found - probably no more physical instances" msgstr "Hittade inga platser - troligen inga fler fysiska instanser" #: src/hed/libs/data/DataMover.cpp:132 src/hed/libs/data/FileCache.cpp:550 #: src/libs/data-staging/Processor.cpp:394 #: src/libs/data-staging/Processor.cpp:408 #, c-format msgid "Removing %s" msgstr "Tar bort %s" #: src/hed/libs/data/DataMover.cpp:145 msgid "This instance was already deleted" msgstr "Denna instans har redan tagits bort" #: src/hed/libs/data/DataMover.cpp:151 msgid "Failed to delete physical file" msgstr "Misslyckades med att ta bort fysisk fil" #: src/hed/libs/data/DataMover.cpp:162 #, c-format msgid "Removing metadata in %s" msgstr "Tar bort metadata i %s" #: src/hed/libs/data/DataMover.cpp:166 msgid "Failed to delete meta-information" msgstr "Misslyckades med att ta bort metainformation" #: src/hed/libs/data/DataMover.cpp:180 msgid "Failed to remove all physical instances" msgstr "Misslyckades med att ta bort alla instanser" #: src/hed/libs/data/DataMover.cpp:184 #, c-format msgid "Removing logical file from metadata %s" msgstr "Tar bort logisk fil frÃ¥n metadata %s" #: src/hed/libs/data/DataMover.cpp:187 msgid "Failed to delete logical file" msgstr "Misslyckades med att ta bort logisk fil" #: src/hed/libs/data/DataMover.cpp:194 msgid "Failed to remove instance" msgstr "Misslyckades med att ta bort instans" #: src/hed/libs/data/DataMover.cpp:243 msgid "DataMover::Transfer : starting new thread" msgstr "DataMover::Transfer : startar ny trÃ¥d" #: src/hed/libs/data/DataMover.cpp:271 #, c-format msgid "Transfer from %s to %s" msgstr "Överföring frÃ¥n %s till %s" #: src/hed/libs/data/DataMover.cpp:273 msgid "Not valid source" msgstr "Ogiltig källa" #: src/hed/libs/data/DataMover.cpp:278 msgid "Not valid destination" msgstr "Ogiltig destination" #: src/hed/libs/data/DataMover.cpp:300 src/services/candypond/CandyPond.cpp:304 #, c-format msgid "Couldn't handle certificate: %s" msgstr "Kunde inte hantera certifikatfil: %s" #: src/hed/libs/data/DataMover.cpp:309 src/hed/libs/data/DataMover.cpp:614 #: src/libs/data-staging/Processor.cpp:123 #, c-format msgid "File %s is cached (%s) - checking permissions" msgstr "Fil %s är cachad (%s) - kontrollerar Ã¥tkomsträttigheter" #: src/hed/libs/data/DataMover.cpp:313 src/hed/libs/data/DataMover.cpp:633 #: src/hed/libs/data/DataMover.cpp:691 src/libs/data-staging/Processor.cpp:142 msgid "Permission checking passed" msgstr "Ã…tkomsträttighetskontroll godkänd" #: src/hed/libs/data/DataMover.cpp:314 src/hed/libs/data/DataMover.cpp:652 #: src/hed/libs/data/DataMover.cpp:1180 msgid "Linking/copying cached file" msgstr "Länkar/kopierar cachad fil" #: src/hed/libs/data/DataMover.cpp:338 #, c-format msgid "No locations for source found: %s" msgstr "Hittade inga platser för källa: %s" #: src/hed/libs/data/DataMover.cpp:342 #, c-format msgid "Failed to resolve source: %s" msgstr "Misslyckades med att slÃ¥ upp källa: %s" #: src/hed/libs/data/DataMover.cpp:356 src/hed/libs/data/DataMover.cpp:431 #, c-format msgid "No locations for destination found: %s" msgstr "Hittade inga platser för destination: %s" #: src/hed/libs/data/DataMover.cpp:361 src/hed/libs/data/DataMover.cpp:435 #, c-format msgid "Failed to resolve destination: %s" msgstr "Misslyckades med att slÃ¥ upp destination: %s" #: src/hed/libs/data/DataMover.cpp:378 #, c-format msgid "No locations for destination different from source found: %s" msgstr "Hittade inga platser för destinationen som skiljer sig frÃ¥n källan: %s" #: src/hed/libs/data/DataMover.cpp:400 #, c-format msgid "DataMover::Transfer: trying to destroy/overwrite destination: %s" msgstr "DataMover::Transfer: försöker förstöra/skriva över destination: %s" #: src/hed/libs/data/DataMover.cpp:412 #, c-format msgid "Failed to delete %s but will still try to copy" msgstr "" "Misslyckades med att ta bort %s, men kommer fortfarande att försöka kopiera" #: src/hed/libs/data/DataMover.cpp:416 #, c-format msgid "Failed to delete %s" msgstr "Misslyckades med att ta bort %s" #: src/hed/libs/data/DataMover.cpp:447 #, c-format msgid "Deleted but still have locations at %s" msgstr "Borttagen men har fortfarande platser pÃ¥ %s" #: src/hed/libs/data/DataMover.cpp:459 msgid "DataMover: cycle" msgstr "DataMover: nästa cykel" #: src/hed/libs/data/DataMover.cpp:461 msgid "DataMover: no retries requested - exit" msgstr "DataMover: begärt att inte försöka igen - avsluta" #: src/hed/libs/data/DataMover.cpp:466 msgid "DataMover: source out of tries - exit" msgstr "DataMover: källan har slut pÃ¥ försök - avsluta" #: src/hed/libs/data/DataMover.cpp:468 msgid "DataMover: destination out of tries - exit" msgstr "DataMover: destinationen har slut pÃ¥ försök - avsluta" #: src/hed/libs/data/DataMover.cpp:476 #, c-format msgid "Real transfer from %s to %s" msgstr "Reell överföring frÃ¥n %s till %s" #: src/hed/libs/data/DataMover.cpp:502 #, c-format msgid "Creating buffer: %lli x %i" msgstr "Skapar buffer: %lli x %i" #: src/hed/libs/data/DataMover.cpp:518 #, c-format msgid "DataMove::Transfer: no checksum calculation for %s" msgstr "DataMove::Transfer: ingen checksumma beräknad för %s" #: src/hed/libs/data/DataMover.cpp:523 #, c-format msgid "DataMove::Transfer: using supplied checksum %s:%s" msgstr "DataMove::Transfer: använder tillhandahÃ¥llen checksumma %s:%s" #: src/hed/libs/data/DataMover.cpp:547 #, c-format msgid "DataMove::Transfer: will calculate %s checksum" msgstr "DataMove::Transfer: kommer att beräkna %s-checksumma" #: src/hed/libs/data/DataMover.cpp:552 msgid "Buffer creation failed !" msgstr "Skapande av buffer misslyckades" #: src/hed/libs/data/DataMover.cpp:575 #, c-format msgid "URL is mapped to: %s" msgstr "URL mappas till: %s" #: src/hed/libs/data/DataMover.cpp:603 src/hed/libs/data/DataMover.cpp:661 #: src/libs/data-staging/Processor.cpp:78 msgid "Cached file is locked - should retry" msgstr "Cachad fil är lÃ¥st - bör försöka igen" #: src/hed/libs/data/DataMover.cpp:608 src/libs/data-staging/Processor.cpp:96 msgid "Failed to initiate cache" msgstr "Misslyckades med att initiera cache" #: src/hed/libs/data/DataMover.cpp:625 src/services/candypond/CandyPond.cpp:379 #, c-format msgid "Permission checking failed: %s" msgstr "Ã…tkomsträttighetskontroll inte godkänd: %s" #: src/hed/libs/data/DataMover.cpp:627 src/hed/libs/data/DataMover.cpp:685 #: src/hed/libs/data/DataMover.cpp:705 src/hed/libs/data/DataMover.cpp:716 msgid "source.next_location" msgstr "source.next_location" #: src/hed/libs/data/DataMover.cpp:641 src/libs/data-staging/Processor.cpp:147 #, c-format msgid "Source modification date: %s" msgstr "Källans ändringstid: %s" #: src/hed/libs/data/DataMover.cpp:642 src/libs/data-staging/Processor.cpp:148 #, c-format msgid "Cache creation date: %s" msgstr "Cache skapades: %s" #: src/hed/libs/data/DataMover.cpp:648 src/libs/data-staging/Processor.cpp:153 msgid "Cached file is outdated, will re-download" msgstr "Cachad fil är gammal, kommer att ladda ner igen" #: src/hed/libs/data/DataMover.cpp:651 src/libs/data-staging/Processor.cpp:158 msgid "Cached copy is still valid" msgstr "Cachad kopia är fortfarande giltig" #: src/hed/libs/data/DataMover.cpp:678 msgid "URL is mapped to local access - checking permissions on original URL" msgstr "" "URL är mappad till lokal Ã¥tkomst - kontrollerar Ã¥tkomsträttigheter pÃ¥ " "ursprunglig URL" #: src/hed/libs/data/DataMover.cpp:682 #, c-format msgid "Permission checking on original URL failed: %s" msgstr "Ã…tkomsträttighetskontroll pÃ¥ ursprunglig URL inte godkänd: %s" #: src/hed/libs/data/DataMover.cpp:693 msgid "Linking local file" msgstr "Länkar lokal fil" #: src/hed/libs/data/DataMover.cpp:713 #, c-format msgid "Failed to make symbolic link %s to %s : %s" msgstr "Misslyckades med att skapa symbolisk länk %s till %s : %s" #: src/hed/libs/data/DataMover.cpp:722 #, c-format msgid "Failed to change owner of symbolic link %s to %i" msgstr "Misslyckades med att ändra ägare av symbolisk länk %s till %i" #: src/hed/libs/data/DataMover.cpp:733 #, c-format msgid "cache file: %s" msgstr "cachefil: %s" #: src/hed/libs/data/DataMover.cpp:759 #, c-format msgid "Failed to stat source %s" msgstr "Misslyckades med att göra stat pÃ¥ källa: %s" #: src/hed/libs/data/DataMover.cpp:761 src/hed/libs/data/DataMover.cpp:776 #: src/hed/libs/data/DataMover.cpp:808 src/hed/libs/data/DataMover.cpp:828 #: src/hed/libs/data/DataMover.cpp:851 src/hed/libs/data/DataMover.cpp:869 #: src/hed/libs/data/DataMover.cpp:1028 src/hed/libs/data/DataMover.cpp:1061 #: src/hed/libs/data/DataMover.cpp:1072 src/hed/libs/data/DataMover.cpp:1146 msgid "(Re)Trying next source" msgstr "Försöker med nästa källa (igen)" #: src/hed/libs/data/DataMover.cpp:772 #, c-format msgid "Meta info of source and location do not match for %s" msgstr "Metainformation för källa och plats stämmer inte överens för %s" #: src/hed/libs/data/DataMover.cpp:786 #, c-format msgid "" "Replica %s has high latency, but no more sources exist so will use this one" msgstr "" "Replika %s har hög latency, men inga fler källor existerar sÃ¥ kommer att " "använda denna" #: src/hed/libs/data/DataMover.cpp:790 #, c-format msgid "Replica %s has high latency, trying next source" msgstr "Replika %s har hög latency, prövar nästa källa" #: src/hed/libs/data/DataMover.cpp:802 src/hed/libs/data/DataMover.cpp:823 #: src/libs/data-staging/DataStagingDelivery.cpp:376 #: src/libs/data-staging/DataStagingDelivery.cpp:399 #, c-format msgid "Using internal transfer method of %s" msgstr "Använder intern överföringsmetod %s" #: src/hed/libs/data/DataMover.cpp:815 src/hed/libs/data/DataMover.cpp:833 #: src/libs/data-staging/DataStagingDelivery.cpp:392 #: src/libs/data-staging/DataStagingDelivery.cpp:413 #, c-format msgid "Internal transfer method is not supported for %s" msgstr "Intern överföringsmetod stöds inte för %s" #: src/hed/libs/data/DataMover.cpp:840 msgid "Using buffered transfer method" msgstr "Använder buffrad överföringsmetod" #: src/hed/libs/data/DataMover.cpp:844 #, c-format msgid "Failed to prepare source: %s" msgstr "Misslyckades med att förbereda källa: %s" #: src/hed/libs/data/DataMover.cpp:859 #, c-format msgid "Failed to start reading from source: %s" msgstr "Misslyckades med att pÃ¥börja läsning frÃ¥n källa: %s" #: src/hed/libs/data/DataMover.cpp:879 msgid "Metadata of source and destination are different" msgstr "Källans och destinationens metadata är olika" #: src/hed/libs/data/DataMover.cpp:899 #, c-format msgid "Failed to preregister destination: %s" msgstr "Misslyckades med att förregistrera destination: %s" #: src/hed/libs/data/DataMover.cpp:904 src/hed/libs/data/DataMover.cpp:1170 msgid "destination.next_location" msgstr "destination.next_location" #: src/hed/libs/data/DataMover.cpp:915 #, c-format msgid "Failed to prepare destination: %s" msgstr "Misslyckades med att förbereda destination: %s" #: src/hed/libs/data/DataMover.cpp:922 src/hed/libs/data/DataMover.cpp:945 #: src/hed/libs/data/DataMover.cpp:1167 #, c-format msgid "" "Failed to unregister preregistered lfn. You may need to unregister it " "manually: %s" msgstr "" "Misslyckades med att avregistrera förregistrerad lfn. Du kan behöva " "avregistrera det manuellt: %s" #: src/hed/libs/data/DataMover.cpp:926 src/hed/libs/data/DataMover.cpp:948 #: src/hed/libs/data/DataMover.cpp:1037 src/hed/libs/data/DataMover.cpp:1053 #: src/hed/libs/data/DataMover.cpp:1078 src/hed/libs/data/DataMover.cpp:1123 msgid "(Re)Trying next destination" msgstr "Försöker med nästa destination (igen)" #: src/hed/libs/data/DataMover.cpp:937 #, c-format msgid "Failed to start writing to destination: %s" msgstr "Misslyckades med att pÃ¥börja skrivning till destination: %s" #: src/hed/libs/data/DataMover.cpp:960 msgid "Failed to start writing to cache" msgstr "Misslyckades med att pÃ¥börja skrivning till cache" #: src/hed/libs/data/DataMover.cpp:968 src/hed/libs/data/DataMover.cpp:1014 #: src/hed/libs/data/DataMover.cpp:1192 msgid "" "Failed to unregister preregistered lfn. You may need to unregister it " "manually" msgstr "" "Misslyckades med att avregistrera förregistrerad lfn. Du kan behöva " "avregistrera det manuellt" #: src/hed/libs/data/DataMover.cpp:975 msgid "Waiting for buffer" msgstr "Väntar pÃ¥ buffer" #: src/hed/libs/data/DataMover.cpp:982 #, c-format msgid "Failed updating timestamp on cache lock file %s for file %s: %s" msgstr "" "Misslyckades med att uppdatera klockslag pÃ¥ cachelÃ¥sfil %s för fil %s: %s" #: src/hed/libs/data/DataMover.cpp:987 #, c-format msgid "buffer: read EOF : %s" msgstr "buffer: läs EOF : %s" #: src/hed/libs/data/DataMover.cpp:988 #, c-format msgid "buffer: write EOF: %s" msgstr "buffer: skriv EOF: %s" #: src/hed/libs/data/DataMover.cpp:989 #, c-format msgid "buffer: error : %s, read: %s, write: %s" msgstr "buffer: fel : %s, lÃ¥s: %s, skriv: %s" #: src/hed/libs/data/DataMover.cpp:990 msgid "Closing read channel" msgstr "Stänger läskanal" #: src/hed/libs/data/DataMover.cpp:997 msgid "Closing write channel" msgstr "Stänger skrivkanal" #: src/hed/libs/data/DataMover.cpp:1005 msgid "Failed to complete writing to destination" msgstr "Misslyckades med att slutföra skrivning till destination" #: src/hed/libs/data/DataMover.cpp:1019 msgid "Transfer cancelled successfully" msgstr "Överföring avbröts framgÃ¥ngsrikt" #: src/hed/libs/data/DataMover.cpp:1066 msgid "Cause of failure unclear - choosing randomly" msgstr "Anledning till misslyckande oklar - väljer slumpvis" #: src/hed/libs/data/DataMover.cpp:1110 #, c-format msgid "" "Checksum mismatch between checksum given as meta option (%s:%s) and " "calculated checksum (%s)" msgstr "" "Checksumma stämmer inte överens mellan checksumma given som metaalternativ " "(%s:%s) och beräknad checksumma(%s)" #: src/hed/libs/data/DataMover.cpp:1116 msgid "" "Failed to unregister preregistered lfn, You may need to unregister it " "manually" msgstr "" "Misslyckades med att avregistrera förregistrerad lfn, du kan behöva " "avregistrera den manuellt" #: src/hed/libs/data/DataMover.cpp:1120 msgid "Failed to delete destination, retry may fail" msgstr "Misslyckades med att ta bort destination, nytt försök kan misslyckas" #: src/hed/libs/data/DataMover.cpp:1130 msgid "Cannot compare empty checksum" msgstr "Kan inte jämföra tom checksumma" #: src/hed/libs/data/DataMover.cpp:1137 #: src/libs/data-staging/DataStagingDelivery.cpp:570 msgid "Checksum type of source and calculated checksum differ, cannot compare" msgstr "" "Typ av checksumma för källa och beräknad checksumma är olika, kan inte " "jämföra" #: src/hed/libs/data/DataMover.cpp:1139 #, c-format msgid "Checksum mismatch between calcuated checksum %s and source checksum %s" msgstr "" "Checksumma stämmer inte överens mellan beräknad checksumma (%s) och källans " "checksumma %s" #: src/hed/libs/data/DataMover.cpp:1151 #: src/libs/data-staging/DataStagingDelivery.cpp:586 #, c-format msgid "Calculated transfer checksum %s matches source checksum" msgstr "" "Beräknad överförings-checksumma %s stämmer överens med källans checksumma" #: src/hed/libs/data/DataMover.cpp:1157 #: src/libs/data-staging/DataStagingDelivery.cpp:589 msgid "Checksum not computed" msgstr "Checksumma ej beräknad" #: src/hed/libs/data/DataMover.cpp:1163 #, c-format msgid "Failed to postregister destination %s" msgstr "Misslyckades med att efterregistrera destination: %s" #: src/hed/libs/data/DataPoint.cpp:90 #, c-format msgid "Invalid URL option: %s" msgstr "Ogiltigt URL-alternativ: %s" #: src/hed/libs/data/DataPoint.cpp:251 msgid "Checksum types of index and replica are different, skipping comparison" msgstr "" "Typ av checksumma för index och replika är olika, hoppar över jämförelse" #: src/hed/libs/data/DataPoint.cpp:278 #, c-format msgid "Skipping invalid URL option %s" msgstr "Hoppar över ogiltigt URL-alternativ: %s" #: src/hed/libs/data/DataPoint.cpp:293 msgid "" "Third party transfer was requested but the corresponding plugin could\n" " not be loaded. Is the GFAL plugin installed? If not, please install " "the\n" " packages 'nordugrid-arc-plugins-gfal' and 'gfal2-all'. Depending on\n" " your type of installation the package names might differ." msgstr "" "Tredjepartsöverföring begärdes men motsvarande plugin kunde inte laddas in.\n" " Är GFAL-plugin installerad? Om inte, installera paketen\n" " 'nordugrid-arc-plugins-gfal' och 'gfal2-all'. Beroende pÃ¥ din typ av\n" " installation kan paketnamnen variera." #: src/hed/libs/data/DataPoint.cpp:311 #, c-format msgid "Failed to load plugin for URL %s" msgstr "Misslyckades med att ladda in plugin för URL %s" #: src/hed/libs/data/DataPointDelegate.cpp:75 #: src/hed/libs/data/DataPointDelegate.cpp:76 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:2032 #, c-format msgid "Starting helper process: %s" msgstr "Startar hjälpprocess: %s" #: src/hed/libs/data/DataPointDelegate.cpp:180 msgid "start_reading" msgstr "start_reading" #: src/hed/libs/data/DataPointDelegate.cpp:189 msgid "start_reading: helper start failed" msgstr "start_reading: start av hjälpprocess misslyckades" #: src/hed/libs/data/DataPointDelegate.cpp:197 msgid "start_reading: thread create failed" msgstr "start_reading: skapande av trÃ¥d misslyckades" #: src/hed/libs/data/DataPointDelegate.cpp:213 msgid "StopReading: aborting connection" msgstr "StopReading: avbryter förbindelse" #: src/hed/libs/data/DataPointDelegate.cpp:218 msgid "stop_reading: waiting for transfer to finish" msgstr "stop_reading: väntar pÃ¥ att överföring ska avslutas" #: src/hed/libs/data/DataPointDelegate.cpp:221 #, c-format msgid "stop_reading: exiting: %s" msgstr "stop_reading: avslutar: %s" #: src/hed/libs/data/DataPointDelegate.cpp:231 msgid "read_thread: get and register buffers" msgstr "read_thread: erhÃ¥ll och registrera buffrar" #: src/hed/libs/data/DataPointDelegate.cpp:239 #, c-format msgid "read_thread: for_read failed - aborting: %s" msgstr "read_thread: for_read misslyckades - avbryter: %s" #: src/hed/libs/data/DataPointDelegate.cpp:247 #, c-format msgid "read_thread: non-data tag '%c' from external process - leaving: %s" msgstr "read_thread: non-data-tagg '%c' frÃ¥n extern process - lämnar: %s" #: src/hed/libs/data/DataPointDelegate.cpp:256 #, c-format msgid "read_thread: data read error from external process - aborting: %s" msgstr "read_thread: dataläsningsfel frÃ¥n extern process - avbryter: %s" #: src/hed/libs/data/DataPointDelegate.cpp:264 msgid "read_thread: exiting" msgstr "read_thread: avslutar" #: src/hed/libs/data/DataPointDelegate.cpp:285 msgid "start_writing_ftp: helper start failed" msgstr "start_writing_ftp: start av hjälpprocess misslyckades" #: src/hed/libs/data/DataPointDelegate.cpp:293 msgid "start_writing_ftp: thread create failed" msgstr "start_writing_ftp: skapande av trÃ¥d misslyckades" #: src/hed/libs/data/DataPointDelegate.cpp:343 msgid "No checksum information possible" msgstr "Ingen information om checksumma möjlig" #: src/hed/libs/data/DataPointDelegate.cpp:359 msgid "write_thread: get and pass buffers" msgstr "write_thread: erhÃ¥ll och skicka vidare buffrar" #: src/hed/libs/data/DataPointDelegate.cpp:366 msgid "write_thread: for_write failed - aborting" msgstr "write_thread: for_write misslyckades - avbryter" #: src/hed/libs/data/DataPointDelegate.cpp:370 msgid "write_thread: for_write eof" msgstr "write_thread: for_write eof" #: src/hed/libs/data/DataPointDelegate.cpp:384 msgid "write_thread: out failed - aborting" msgstr "write_thread: out misslyckades - avbryter" #: src/hed/libs/data/DataPointDelegate.cpp:392 msgid "write_thread: exiting" msgstr "write_thread: avslutar" #: src/hed/libs/data/DataPointIndex.cpp:91 #, c-format msgid "Can't handle location %s" msgstr "Kan inte hantera plats %s" #: src/hed/libs/data/DataPointIndex.cpp:183 msgid "Sorting replicas according to URL map" msgstr "Sortera replikor enligt URL-mapp" #: src/hed/libs/data/DataPointIndex.cpp:187 #, c-format msgid "Replica %s is mapped" msgstr "Replika %s har mappats" #: src/hed/libs/data/DataPointIndex.cpp:195 #, c-format msgid "Sorting replicas according to preferred pattern %s" msgstr "Sorterar replikor enligt föredraget mönster %s" #: src/hed/libs/data/DataPointIndex.cpp:218 #: src/hed/libs/data/DataPointIndex.cpp:236 #, c-format msgid "Excluding replica %s matching pattern !%s" msgstr "Exkluderar replika %s som matchar mönster !%s" #: src/hed/libs/data/DataPointIndex.cpp:229 #, c-format msgid "Replica %s matches host pattern %s" msgstr "Replika %s matchar värd-mönster %s" #: src/hed/libs/data/DataPointIndex.cpp:247 #, c-format msgid "Replica %s matches pattern %s" msgstr "Replika %s matchar mönster %s" #: src/hed/libs/data/DataPointIndex.cpp:263 #, c-format msgid "Replica %s doesn't match preferred pattern or URL map" msgstr "Replika %s matchar inte föredraget mönster eller URL-map" #: src/hed/libs/data/DataStatus.cpp:12 msgid "Operation completed successfully" msgstr "Operation avslutades framgÃ¥ngsrikt" #: src/hed/libs/data/DataStatus.cpp:13 msgid "Source is invalid URL" msgstr "Källa är inte en giltig URL" #: src/hed/libs/data/DataStatus.cpp:14 msgid "Destination is invalid URL" msgstr "Destination är inte en giltig URL" #: src/hed/libs/data/DataStatus.cpp:15 msgid "Resolving of index service for source failed" msgstr "Uppslagning av indextjänst för källa misslyckades" #: src/hed/libs/data/DataStatus.cpp:16 msgid "Resolving of index service for destination failed" msgstr "Uppslagning av indextjänst för destination misslyckades" #: src/hed/libs/data/DataStatus.cpp:17 msgid "Can't read from source" msgstr "Kan ej läsa frÃ¥n källa" #: src/hed/libs/data/DataStatus.cpp:18 msgid "Can't write to destination" msgstr "Kan ej skriva till destination" #: src/hed/libs/data/DataStatus.cpp:19 msgid "Failed while reading from source" msgstr "Misslyckades under läsning frÃ¥n källa" #: src/hed/libs/data/DataStatus.cpp:20 msgid "Failed while writing to destination" msgstr "Misslyckades under skrivning till destination" #: src/hed/libs/data/DataStatus.cpp:21 msgid "Failed while transferring data" msgstr "Misslyckades under överföring av data" #: src/hed/libs/data/DataStatus.cpp:22 msgid "Failed while finishing reading from source" msgstr "Misslyckades med att avsluta läsning frÃ¥n källa" #: src/hed/libs/data/DataStatus.cpp:23 msgid "Failed while finishing writing to destination" msgstr "Misslyckades med att avsluta skrivning till destination" #: src/hed/libs/data/DataStatus.cpp:24 msgid "First stage of registration to index service failed" msgstr "Första steget av registrering till indextjänst misslyckades" #: src/hed/libs/data/DataStatus.cpp:25 msgid "Last stage of registration to index service failed" msgstr "Sista steget av registrering till indextjänst misslyckades" #: src/hed/libs/data/DataStatus.cpp:26 msgid "Unregistering from index service failed" msgstr "Avregistrering frÃ¥n indextjänst misslyckades" #: src/hed/libs/data/DataStatus.cpp:27 msgid "Error in caching procedure" msgstr "Fel i cachningsprocedur" #: src/hed/libs/data/DataStatus.cpp:28 msgid "Error due to expiration of provided credentials" msgstr "Fel eftersom den tillhandahÃ¥llna referensens livstid har gÃ¥tt ut" #: src/hed/libs/data/DataStatus.cpp:29 msgid "Delete error" msgstr "Borttagningfel" #: src/hed/libs/data/DataStatus.cpp:30 msgid "No valid location available" msgstr "Ingen giltig plats tillgänglig" #: src/hed/libs/data/DataStatus.cpp:31 msgid "Location already exists" msgstr "Plats existerar redan" #: src/hed/libs/data/DataStatus.cpp:32 msgid "Operation not supported for this kind of URL" msgstr "Operationen stöds inte för denna typ av URL" #: src/hed/libs/data/DataStatus.cpp:33 msgid "Feature is not implemented" msgstr "Feature ej implementerad" #: src/hed/libs/data/DataStatus.cpp:34 msgid "Already reading from source" msgstr "Läser redan frÃ¥n källa" #: src/hed/libs/data/DataStatus.cpp:35 msgid "Already writing to destination" msgstr "Skriver redan till destination" #: src/hed/libs/data/DataStatus.cpp:36 msgid "Read access check failed" msgstr "Ã…tkomstkontroll för läsning inte godkänd" #: src/hed/libs/data/DataStatus.cpp:37 msgid "Directory listing failed" msgstr "Kataloglistning Misslyckades" #: src/hed/libs/data/DataStatus.cpp:38 msgid "Object is not suitable for listing" msgstr "Objekt är inte lämpligt för listning" #: src/hed/libs/data/DataStatus.cpp:39 msgid "Failed to obtain information about file" msgstr "Misslyckades med att erhÃ¥lla information om fil" #: src/hed/libs/data/DataStatus.cpp:40 msgid "No such file or directory" msgstr "Ingen sÃ¥dan fil eller katalog" #: src/hed/libs/data/DataStatus.cpp:41 msgid "Object not initialized (internal error)" msgstr "Objekt ej initierat (internt fel)" #: src/hed/libs/data/DataStatus.cpp:42 msgid "Operating System error" msgstr "Operativsystem-fel" #: src/hed/libs/data/DataStatus.cpp:43 msgid "Failed to stage file(s)" msgstr "Misslyckades med att staga file(er)" #: src/hed/libs/data/DataStatus.cpp:44 msgid "Inconsistent metadata" msgstr "Inkonsistent metadata" #: src/hed/libs/data/DataStatus.cpp:45 msgid "Failed to prepare source" msgstr "Misslyckades med att förbereda källa" #: src/hed/libs/data/DataStatus.cpp:46 msgid "Should wait for source to be prepared" msgstr "Ska vänta pÃ¥ att källa förbereds" #: src/hed/libs/data/DataStatus.cpp:47 msgid "Failed to prepare destination" msgstr "Misslyckades med att förbereda destination" #: src/hed/libs/data/DataStatus.cpp:48 msgid "Should wait for destination to be prepared" msgstr "Ska vänta pÃ¥ att destination förbereds" #: src/hed/libs/data/DataStatus.cpp:49 msgid "Failed to finalize reading from source" msgstr "Misslyckades med att slutföra läsning frÃ¥n källa" #: src/hed/libs/data/DataStatus.cpp:50 msgid "Failed to finalize writing to destination" msgstr "Misslyckades med att slutföra skrivning till destination" #: src/hed/libs/data/DataStatus.cpp:51 msgid "Failed to create directory" msgstr "Misslyckades med att skapa katalog" #: src/hed/libs/data/DataStatus.cpp:52 msgid "Failed to rename URL" msgstr "Misslyckades med byta namn pÃ¥ URL" #: src/hed/libs/data/DataStatus.cpp:53 msgid "Data was already cached" msgstr "Data var redan cachat" #: src/hed/libs/data/DataStatus.cpp:54 msgid "Operation cancelled successfully" msgstr "Operation avbröts framgÃ¥ngsrikt" #: src/hed/libs/data/DataStatus.cpp:55 msgid "Generic error" msgstr "Generiskt fel" #: src/hed/libs/data/DataStatus.cpp:56 src/hed/libs/data/DataStatus.cpp:69 msgid "Unknown error" msgstr "Okänt fel" #: src/hed/libs/data/DataStatus.cpp:60 msgid "No error" msgstr "Inget fel" #: src/hed/libs/data/DataStatus.cpp:61 msgid "Transfer timed out" msgstr "Överföring avbröts pÃ¥ grund av timeout" #: src/hed/libs/data/DataStatus.cpp:62 msgid "Checksum mismatch" msgstr "Checksumma stämmer inte överens" #: src/hed/libs/data/DataStatus.cpp:63 msgid "Bad logic" msgstr "DÃ¥lig logik" #: src/hed/libs/data/DataStatus.cpp:64 msgid "All results obtained are invalid" msgstr "Alla erhÃ¥llna resultat är ogiltiga" #: src/hed/libs/data/DataStatus.cpp:65 msgid "Temporary service error" msgstr "Temporärt tjänstefel" #: src/hed/libs/data/DataStatus.cpp:66 msgid "Permanent service error" msgstr "Permanent tjänstefel" #: src/hed/libs/data/DataStatus.cpp:67 msgid "Error switching uid" msgstr "Fel vid byte av UID" #: src/hed/libs/data/DataStatus.cpp:68 msgid "Request timed out" msgstr "Begäran avbröts pÃ¥ grund av timeout" #: src/hed/libs/data/FileCache.cpp:109 msgid "No cache directory specified" msgstr "Ingen cachekatalog angiven" #: src/hed/libs/data/FileCache.cpp:126 msgid "No usable caches" msgstr "Inga användbara cacher" #: src/hed/libs/data/FileCache.cpp:135 msgid "No draining cache directory specified" msgstr "Ingen draining-cachekatalog angiven" #: src/hed/libs/data/FileCache.cpp:153 msgid "No read-only cache directory specified" msgstr "Ingen readonly-cachekatalog angiven" #: src/hed/libs/data/FileCache.cpp:182 #, c-format msgid "Failed to create cache directory for file %s: %s" msgstr "Misslyckades med att skapa cachekatalog för fil %s: %s" #: src/hed/libs/data/FileCache.cpp:192 #, c-format msgid "Failed to create any cache directories for %s" msgstr "Misslyckades med att skapa cachekataloger för %s" #: src/hed/libs/data/FileCache.cpp:199 #, c-format msgid "Failed to change permissions on %s: %s" msgstr "Misslyckades med att ändra Ã¥tkomsträttigheter pÃ¥ %s: %s" #: src/hed/libs/data/FileCache.cpp:211 #, c-format msgid "Failed to delete stale cache file %s: %s" msgstr "Misslyckades med att ta bort gammal cachefil %s: %s" #: src/hed/libs/data/FileCache.cpp:214 #, c-format msgid "Failed to release lock on file %s" msgstr "Misslyckades med att frigöra lÃ¥s pÃ¥ fil %s" #: src/hed/libs/data/FileCache.cpp:232 #, c-format msgid "Failed looking up attributes of cached file: %s" msgstr "Misslyckades med att slÃ¥ upp attribut för cachad fil: %s" #: src/hed/libs/data/FileCache.cpp:238 #, c-format msgid "Failed to obtain lock on cache file %s" msgstr "Misslyckades med att erhÃ¥lla lÃ¥s pÃ¥ cachefil %s" #: src/hed/libs/data/FileCache.cpp:247 src/hed/libs/data/FileCache.cpp:307 #, c-format msgid "Error removing cache file %s: %s" msgstr "Fel vid borttagande av cachefil %s: %s" #: src/hed/libs/data/FileCache.cpp:249 src/hed/libs/data/FileCache.cpp:260 #, c-format msgid "Failed to remove lock on %s. Some manual intervention may be required" msgstr "" "Misslyckades med att ta bort lÃ¥s pÃ¥ %s. Manuell intervention kan behövas" #: src/hed/libs/data/FileCache.cpp:279 src/hed/libs/data/FileCache.cpp:313 #, c-format msgid "Failed to unlock file %s: %s. Manual intervention may be required" msgstr "" "Misslyckades med att lÃ¥sa upp fil %s: %s. Manuell intervention kan behövas" #: src/hed/libs/data/FileCache.cpp:296 #, c-format msgid "Invalid lock on file %s" msgstr "Ogiltigt lÃ¥s pÃ¥ fil %s" #: src/hed/libs/data/FileCache.cpp:302 #, c-format msgid "Failed to remove .meta file %s: %s" msgstr "Misslyckades med att ta bort .meta-fil %s: %s" #: src/hed/libs/data/FileCache.cpp:367 #, c-format msgid "Cache not found for file %s" msgstr "Hittade inte cache för fil %s" #: src/hed/libs/data/FileCache.cpp:377 #, c-format msgid "" "Cache file %s was modified in the last second, sleeping 1 second to avoid " "race condition" msgstr "" "Cachefil %s ändrades under den senaste sekunden, väntar 1 sekund för att " "undvika race condition" #: src/hed/libs/data/FileCache.cpp:382 src/hed/libs/data/FileCache.cpp:687 #, c-format msgid "Cache file %s does not exist" msgstr "Cachefil %s existerar inte" #: src/hed/libs/data/FileCache.cpp:387 src/hed/libs/data/FileCache.cpp:689 #, c-format msgid "Error accessing cache file %s: %s" msgstr "Ã…tkomstfel för cachefil %s: %s" #: src/hed/libs/data/FileCache.cpp:393 #, c-format msgid "Cannot create directory %s for per-job hard links" msgstr "Kan inte skapa katalog %s för per-jobb hÃ¥rda länkar" #: src/hed/libs/data/FileCache.cpp:398 #, c-format msgid "Cannot change permission of %s: %s " msgstr "Kan inte ändra Ã¥tkomsträttigheter för %s: %s" #: src/hed/libs/data/FileCache.cpp:402 #, c-format msgid "Cannot change owner of %s: %s " msgstr "Kan inte ändra ägare för %s: %s" #: src/hed/libs/data/FileCache.cpp:416 #, c-format msgid "Failed to remove existing hard link at %s: %s" msgstr "Misslyckades med att ta bort existerande hÃ¥rd länk pÃ¥ %s: %s" #: src/hed/libs/data/FileCache.cpp:420 src/hed/libs/data/FileCache.cpp:431 #, c-format msgid "Failed to create hard link from %s to %s: %s" msgstr "Misslyckades med att skapa hÃ¥rd länk frÃ¥n %s till %s: %s" #: src/hed/libs/data/FileCache.cpp:426 #, c-format msgid "Cache file %s not found" msgstr "Hittade inte cachefil %s" #: src/hed/libs/data/FileCache.cpp:441 #, c-format msgid "Failed to change permissions or set owner of hard link %s: %s" msgstr "" "Misslyckades med att ändra Ã¥tkomsträttigheter eller ägare för hÃ¥rd länk %s: " "%s" #: src/hed/libs/data/FileCache.cpp:449 #, c-format msgid "Failed to release lock on cache file %s" msgstr "Misslyckades med att frigöra lÃ¥s pÃ¥ cachefil %s" #: src/hed/libs/data/FileCache.cpp:460 #, c-format msgid "Cache file %s was locked during link/copy, must start again" msgstr "Cachefil %s lÃ¥stes under länkning/kopiering, mÃ¥ste börja om" #: src/hed/libs/data/FileCache.cpp:465 #, c-format msgid "Cache file %s was deleted during link/copy, must start again" msgstr "Cachefil %s togs bort under länkning/kopiering, mÃ¥ste börja om" #: src/hed/libs/data/FileCache.cpp:470 #, c-format msgid "Cache file %s was modified while linking, must start again" msgstr "Cachefil %s ändrades under länkning, mÃ¥ste börja om<" #: src/hed/libs/data/FileCache.cpp:488 #, c-format msgid "Failed to copy file %s to %s: %s" msgstr "Misslyckades med att kopiera fil %s till %s: %s" #: src/hed/libs/data/FileCache.cpp:494 #, c-format msgid "Failed to set executable bit on file %s" msgstr "Misslyckades med att sätta exekverbar bit pÃ¥ fil %s" #: src/hed/libs/data/FileCache.cpp:499 #, c-format msgid "Failed to set executable bit on file %s: %s" msgstr "Misslyckades med att sätta exekverbar bit pÃ¥ fil %s: %s" #: src/hed/libs/data/FileCache.cpp:513 #, c-format msgid "Failed to remove existing symbolic link at %s: %s" msgstr "Misslyckades med att ta bort existerande symbolisk länk pÃ¥ %s: %s" #: src/hed/libs/data/FileCache.cpp:517 src/hed/libs/data/FileCache.cpp:522 #, c-format msgid "Failed to create symbolic link from %s to %s: %s" msgstr "Misslyckades med att skapa symbolisk länk frÃ¥n %s till %s: %s" #: src/hed/libs/data/FileCache.cpp:552 #, c-format msgid "Failed to remove cache per-job dir %s: %s" msgstr "Misslyckades med att ta bort cache per-jobb-katalog %s: %s" #: src/hed/libs/data/FileCache.cpp:571 src/hed/libs/data/FileCache.cpp:639 #, c-format msgid "Error reading meta file %s: %s" msgstr "Fel vid läsning av metafil %s: %s" #: src/hed/libs/data/FileCache.cpp:576 src/hed/libs/data/FileCache.cpp:644 #, c-format msgid "Error opening meta file %s" msgstr "Fel vid öppnande av metafil %s" #: src/hed/libs/data/FileCache.cpp:581 src/hed/libs/data/FileCache.cpp:648 #, c-format msgid "meta file %s is empty" msgstr "metafil %s är tom" #: src/hed/libs/data/FileCache.cpp:591 #, c-format msgid "" "File %s is already cached at %s under a different URL: %s - will not add DN " "to cached list" msgstr "" "Fil %s är redan cachad pÃ¥ %s under en annan URL: %s - kommer ej att lägga " "till DN till cachad lista" #: src/hed/libs/data/FileCache.cpp:602 #, c-format msgid "Bad format detected in file %s, in line %s" msgstr "Felaktigt format upptäckt i fil %s, pÃ¥ rad %s" #: src/hed/libs/data/FileCache.cpp:618 #, c-format msgid "Could not acquire lock on meta file %s" msgstr "Kunde inte fÃ¥ lÃ¥s pÃ¥ metafil %s" #: src/hed/libs/data/FileCache.cpp:622 #, c-format msgid "Error opening meta file for writing %s" msgstr "Fel vid öppnande av metafil för skrivning: %s" #: src/hed/libs/data/FileCache.cpp:658 #, c-format msgid "DN %s is cached and is valid until %s for URL %s" msgstr "DN %s är cachat och är giltigt till %s för URL %s" #: src/hed/libs/data/FileCache.cpp:662 #, c-format msgid "DN %s is cached but has expired for URL %s" msgstr "DN %s är cachat men dess giltighetstid har gÃ¥tt ut för URL %s" #: src/hed/libs/data/FileCache.cpp:713 #, c-format msgid "Failed to acquire lock on cache meta file %s" msgstr "Misslyckades med att fÃ¥ lÃ¥s pÃ¥ cachemetafil %s" #: src/hed/libs/data/FileCache.cpp:718 #, c-format msgid "Failed to create cache meta file %s" msgstr "Misslyckades med att skapa cachemetafil %s" #: src/hed/libs/data/FileCache.cpp:733 #, c-format msgid "Failed to read cache meta file %s" msgstr "Misslyckades med att läsa cachemetafil %s" #: src/hed/libs/data/FileCache.cpp:738 #, c-format msgid "Cache meta file %s is empty, will recreate" msgstr "Cachemetafil %s är tom, kommer att Ã¥terskapa" #: src/hed/libs/data/FileCache.cpp:743 #, c-format msgid "Cache meta file %s possibly corrupted, will recreate" msgstr "Cachemetafil %s är möjligen korrupt, kommer att Ã¥terskapa" #: src/hed/libs/data/FileCache.cpp:747 #, c-format msgid "" "File %s is already cached at %s under a different URL: %s - this file will " "not be cached" msgstr "" "Fil %s är redan cachad pÃ¥ %s under en annan URL: %s - denna fil kommer ej " "att cachas" #: src/hed/libs/data/FileCache.cpp:757 #, c-format msgid "Error looking up attributes of cache meta file %s: %s" msgstr "Fel vid uppslagning av attribut för cachemetafil %s: %s" #: src/hed/libs/data/FileCache.cpp:828 #, c-format msgid "Using cache %s" msgstr "Använder cache %s" #: src/hed/libs/data/FileCache.cpp:842 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:79 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:112 #, c-format msgid "Error getting info from statvfs for the path %s: %s" msgstr "Fel vid hämtning av information frÃ¥n statvfs för sökväg %s: %s" #: src/hed/libs/data/FileCache.cpp:848 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:118 #, c-format msgid "Cache %s: Free space %f GB" msgstr "Cache %s: Fritt utrymme %f GB" #: src/hed/libs/data/URLMap.cpp:33 #, c-format msgid "Can't use URL %s" msgstr "Kan inte använda URL: %s" #: src/hed/libs/data/URLMap.cpp:39 #, c-format msgid "file %s is not accessible" msgstr "fil %s kan inte kommas Ã¥t" #: src/hed/libs/data/URLMap.cpp:49 #, c-format msgid "Mapping %s to %s" msgstr "Mappar %s till %s" #: src/hed/libs/data/examples/simple_copy.cpp:17 msgid "Usage: copy source destination" msgstr "Användning: kopiera källa destination" #: src/hed/libs/data/examples/simple_copy.cpp:42 #, c-format msgid "Copy failed: %s" msgstr "Kopiering misslyckades: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:41 #, c-format msgid "Failed to read proxy file: %s" msgstr "Misslyckades med att läsa proxy fil: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:49 #, c-format msgid "Failed to read certificate file: %s" msgstr "Misslyckades med att läsa certifikatfil: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:56 #, c-format msgid "Failed to read private key file: %s" msgstr "Misslyckades med att läsa privat-nyckelfil: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:82 #, c-format msgid "" "Failed to convert GSI credential to GSS credential (major: %d, minor: %d):%s:" "%s" msgstr "" "Misslyckades med att konvertera GSI-referens till GSS-referens (major: %d, " "minor: %d):%s:%s" #: src/hed/libs/globusutils/GSSCredential.cpp:94 #, c-format msgid "Failed to release GSS credential (major: %d, minor: %d):%s:%s" msgstr "Misslyckades med att frigöra GSS-referens (major: %d, minor: %d):%s:%s" #: src/hed/libs/loader/ModuleManager.cpp:30 msgid "Module Manager Init" msgstr "Modulhanterare init" #: src/hed/libs/loader/ModuleManager.cpp:73 msgid "" "Busy plugins found while unloading Module Manager. Waiting for them to be " "released." msgstr "" "Upptagna pluginer hittad när modul-hanteraren laddades ut. Väntar pÃ¥ att de " "ska frigöras." #: src/hed/libs/loader/ModuleManager.cpp:207 #, c-format msgid "Found %s in cache" msgstr "Hittade %s i cache" #: src/hed/libs/loader/ModuleManager.cpp:214 #, c-format msgid "Could not locate module %s in following paths:" msgstr "Kunde inte lokalisera modulen %s pÃ¥ följande sökvägar:" #: src/hed/libs/loader/ModuleManager.cpp:218 #, c-format msgid "\t%s" msgstr "\t%s" #: src/hed/libs/loader/ModuleManager.cpp:232 #, c-format msgid "Loaded %s" msgstr "Laddade in %s" #: src/hed/libs/loader/ModuleManager.cpp:276 msgid "Module Manager Init by ModuleManager::setCfg" msgstr "Modulhanterare init av ModuleManager::setCfg" #: src/hed/libs/loader/ModuleManager.cpp:312 #: src/hed/libs/loader/ModuleManager.cpp:325 #, c-format msgid "%s made persistent" msgstr "%s gjord persistent" #: src/hed/libs/loader/ModuleManager.cpp:316 #, c-format msgid "Not found %s in cache" msgstr "Hittade inte %s i cache" #: src/hed/libs/loader/ModuleManager.cpp:330 msgid "Specified module not found in cache" msgstr "Angiven modul hittades inte i cache" #: src/hed/libs/loader/Plugin.cpp:364 src/hed/libs/loader/Plugin.cpp:557 #, c-format msgid "Could not find loadable module descriptor by name %s" msgstr "Kunde inte hitta inladdningsbar modulbeskrivning med namn %s" #: src/hed/libs/loader/Plugin.cpp:372 src/hed/libs/loader/Plugin.cpp:567 #, c-format msgid "Could not find loadable module by name %s (%s)" msgstr "Kunde inte hitta inladdningsbar modul med namn %s (%s)" #: src/hed/libs/loader/Plugin.cpp:378 src/hed/libs/loader/Plugin.cpp:480 #: src/hed/libs/loader/Plugin.cpp:572 #, c-format msgid "Module %s is not an ARC plugin (%s)" msgstr "Modulen %s är inte en ARC-plugin (%s)" #: src/hed/libs/loader/Plugin.cpp:395 src/hed/libs/loader/Plugin.cpp:490 #: src/hed/libs/loader/Plugin.cpp:598 #, c-format msgid "Module %s failed to reload (%s)" msgstr "Modul %s kunde inte laddas in igen (%s)" #: src/hed/libs/loader/Plugin.cpp:417 #, c-format msgid "Module %s contains no plugin %s" msgstr "Modul %s innehÃ¥ller ingen plugin %s" #: src/hed/libs/loader/Plugin.cpp:462 #, c-format msgid "Could not find loadable module descriptor by name %s or kind %s" msgstr "" "Kunde inte hitta inladdningsbar modulbeskrivning med namn %s eller typ %s" #: src/hed/libs/loader/Plugin.cpp:467 #, c-format msgid "Loadable module %s contains no requested plugin %s of kind %s" msgstr "Inladdningsbar modul %s innehÃ¥ller inte begärd plugin %s av typen %s" #: src/hed/libs/loader/Plugin.cpp:474 #, c-format msgid "Could not find loadable module by names %s and %s (%s)" msgstr "Kunde inte hitta inladdningsbar modul med namn %s och %s (%s)" #: src/hed/libs/loader/Plugin.cpp:503 #, c-format msgid "Module %s contains no requested plugin %s of kind %s" msgstr "Modul %s innehÃ¥ller inte begärd plugin %s av typen %s" #: src/hed/libs/loader/Plugin.cpp:588 #, c-format msgid "Module %s does not contain plugin(s) of specified kind(s)" msgstr "Modul %s innehÃ¥ller inte en plugin(er) av angiven typ" #: src/hed/libs/message/MCC.cpp:76 src/hed/libs/message/Service.cpp:25 #, c-format msgid "No security processing/check requested for '%s'" msgstr "Ingen säkerhetsprocessering/kontroll begärd för '%s'" #: src/hed/libs/message/MCC.cpp:85 #, c-format msgid "Security processing/check failed: %s" msgstr "Säkerhetsprocessering/kontroll misslyckades: %s" #: src/hed/libs/message/MCC.cpp:90 msgid "Security processing/check passed" msgstr "Säkerhetsprocessering/kontroll OK" #: src/hed/libs/message/MCCLoader.cpp:17 msgid "Chain(s) configuration failed" msgstr "Inställning av kedja misslyckades" #: src/hed/libs/message/MCCLoader.cpp:134 msgid "SecHandler configuration is not defined" msgstr "SecHandler-inställningar är inte definierade" #: src/hed/libs/message/MCCLoader.cpp:157 msgid "SecHandler has no configuration" msgstr "Säkerhetshanterare saknar inställningar" #: src/hed/libs/message/MCCLoader.cpp:163 msgid "SecHandler has no name attribute defined" msgstr "Säkerhetshanterare har inget namnattribut definierat" #: src/hed/libs/message/MCCLoader.cpp:173 #, c-format msgid "Security Handler %s(%s) could not be created" msgstr "Säkerhetshanterare %s(%s) kunde inte skapas" #: src/hed/libs/message/MCCLoader.cpp:177 #, c-format msgid "SecHandler: %s(%s)" msgstr "SecHandler: %s(%s)" #: src/hed/libs/message/MCCLoader.cpp:189 msgid "Component has no name attribute defined" msgstr "Komponent har inget namnattribut definierat" #: src/hed/libs/message/MCCLoader.cpp:194 msgid "Component has no ID attribute defined" msgstr "Komponent har inget id-attribut definierat" #: src/hed/libs/message/MCCLoader.cpp:203 #, c-format msgid "Component %s(%s) could not be created" msgstr "Komponent %s(%s) kunde inte skapas" #: src/hed/libs/message/MCCLoader.cpp:229 #, c-format msgid "Component's %s(%s) next has no ID attribute defined" msgstr "Komponentens %s(%s) nästa har inget id-attribut definierat" #: src/hed/libs/message/MCCLoader.cpp:290 #, c-format msgid "Loaded MCC %s(%s)" msgstr "Laddade in MCC %s(%s)" #: src/hed/libs/message/MCCLoader.cpp:308 #, c-format msgid "Plexer's (%s) next has no ID attribute defined" msgstr "Plexerns (%s) nästa har inget id-attribut definierat" #: src/hed/libs/message/MCCLoader.cpp:318 #, c-format msgid "Loaded Plexer %s" msgstr "Laddade in Plexer %s" #: src/hed/libs/message/MCCLoader.cpp:326 msgid "Service has no Name attribute defined" msgstr "Tjänsten har inget namnattribut definierat" #: src/hed/libs/message/MCCLoader.cpp:332 msgid "Service has no ID attribute defined" msgstr "Tjänsten har inget id-attribut definierat" #: src/hed/libs/message/MCCLoader.cpp:341 #, c-format msgid "Service %s(%s) could not be created" msgstr "Tjänsten %s(%s) kunde inte skapas" #: src/hed/libs/message/MCCLoader.cpp:348 #, c-format msgid "Loaded Service %s(%s)" msgstr "Laddade in tjänst %s(%s)" #: src/hed/libs/message/MCCLoader.cpp:390 #, c-format msgid "Linking MCC %s(%s) to MCC (%s) under %s" msgstr "Länkar MCC %s(%s) till MCC (%s) under %s" #: src/hed/libs/message/MCCLoader.cpp:401 #, c-format msgid "Linking MCC %s(%s) to Service (%s) under %s" msgstr "Länkar MCC %s(%s) till tjänst (%s) under %s" #: src/hed/libs/message/MCCLoader.cpp:410 #, c-format msgid "Linking MCC %s(%s) to Plexer (%s) under %s" msgstr "Länkar MCC %s(%s) till Plexer (%s) under %s" #: src/hed/libs/message/MCCLoader.cpp:415 #, c-format msgid "MCC %s(%s) - next %s(%s) has no target" msgstr "MCC %s(%s) - nästa %s(%s) saknar target" #: src/hed/libs/message/MCCLoader.cpp:434 #, c-format msgid "Linking Plexer %s to MCC (%s) under %s" msgstr "Länkar Plexer %s till MCC (%s) under %s" #: src/hed/libs/message/MCCLoader.cpp:445 #, c-format msgid "Linking Plexer %s to Service (%s) under %s" msgstr "Länkar Plexer %s till tjänst (%s) under %s" #: src/hed/libs/message/MCCLoader.cpp:454 #, c-format msgid "Linking Plexer %s to Plexer (%s) under %s" msgstr "Länkar Plexer %s till Plexer (%s) under %s" #: src/hed/libs/message/MCCLoader.cpp:460 #, c-format msgid "Plexer (%s) - next %s(%s) has no target" msgstr "Plexer (%s) - nästa %s(%s) saknar target" #: src/hed/libs/message/Plexer.cpp:31 #, c-format msgid "Bad label: \"%s\"" msgstr "DÃ¥lig etikett: \"%s\"" #: src/hed/libs/message/Plexer.cpp:47 #, c-format msgid "Operation on path \"%s\"" msgstr "Operation pÃ¥ sökväg \"%s\"" #: src/hed/libs/message/Plexer.cpp:60 #, c-format msgid "No next MCC or Service at path \"%s\"" msgstr "Ingen nästa MCC eller tjänst pÃ¥ sökväg \"%s\"" #: src/hed/libs/message/Service.cpp:35 #, c-format msgid "Security processing/check for '%s' failed: %s" msgstr "Säkerhetsprocessering/kontroll för '%s' misslyckades: %s" #: src/hed/libs/message/Service.cpp:41 #, c-format msgid "Security processing/check for '%s' passed" msgstr "Säkerhetsprocessering/kontroll för '%s' OK" #: src/hed/libs/otokens/jwse.cpp:55 #, c-format msgid "JWSE::Input: token: %s" msgstr "JWSE::Input: token: %s" #: src/hed/libs/otokens/jwse.cpp:75 #, c-format msgid "JWSE::Input: header: %s" msgstr "JWSE::Input: header: %s" #: src/hed/libs/otokens/jwse.cpp:101 #, c-format msgid "JWSE::Input: JWS content: %s" msgstr "JWSE::Input: JWS innehÃ¥ll: %s" #: src/hed/libs/otokens/jwse.cpp:111 msgid "JWSE::Input: JWS: token too young" msgstr "JWSE::Input: JWS: token för ungt" #: src/hed/libs/otokens/jwse.cpp:120 msgid "JWSE::Input: JWS: token too old" msgstr "JWSE::Input: JWS: token för gammalt" #: src/hed/libs/otokens/jwse.cpp:131 #, c-format msgid "JWSE::Input: JWS: signature algorithm: %s" msgstr "JWSE::Input: JWS: signeringsalgoritm: %s" #: src/hed/libs/otokens/jwse.cpp:174 #, c-format msgid "JWSE::Input: JWS: signature algorithn not supported: %s" msgstr "JWSE::Input: JWS: signeringsalgoritm stöds inte: %s" #: src/hed/libs/otokens/jwse.cpp:192 msgid "JWSE::Input: JWS: signature verification failed" msgstr "JWSE::Input: JWS: signaturverifiering misslyckades" #: src/hed/libs/otokens/jwse.cpp:198 msgid "JWSE::Input: JWE: not supported yet" msgstr "JWSE::Input: JWE: stöds inte än" #: src/hed/libs/otokens/jwse_ecdsa.cpp:21 msgid "JWSE::VerifyECDSA: missing key" msgstr "JWSE::VerifyECDSA: saknad nyckel" #: src/hed/libs/otokens/jwse_ecdsa.cpp:25 msgid "JWSE::VerifyECDSA: wrong signature size" msgstr "JWSE::VerifyECDSA: fel signaturstorlek" #: src/hed/libs/otokens/jwse_ecdsa.cpp:34 msgid "JWSE::VerifyECDSA: failed to create ECDSA signature" msgstr "JWSE::VerifyECDSA: misslyckades med att skapa ECDSA-signatur" #: src/hed/libs/otokens/jwse_ecdsa.cpp:41 msgid "JWSE::VerifyECDSA: failed to parse signature" msgstr "JWSE::VerifyECDSA: misslyckades med att tolka signatur" #: src/hed/libs/otokens/jwse_ecdsa.cpp:47 #, c-format msgid "JWSE::VerifyECDSA: failed to assign ECDSA signature: %i" msgstr "JWSE::VerifyECDSA: misslyckades med att tilldela ECDSA-signatur: %i" #: src/hed/libs/otokens/jwse_ecdsa.cpp:56 msgid "JWSE::VerifyECDSA: failed to create EVP context" msgstr "JWSE::VerifyECDSA: misslyckades med att skapa EVP-kontext" #: src/hed/libs/otokens/jwse_ecdsa.cpp:61 #, c-format msgid "JWSE::VerifyECDSA: failed to recognize digest: %s" msgstr "JWSE::VerifyECDSA: misslyckades med att känna igen digest: %s" #: src/hed/libs/otokens/jwse_ecdsa.cpp:66 #, c-format msgid "JWSE::VerifyECDSA: failed to initialize hash: %i" msgstr "JWSE::VerifyECDSA: misslyckades med att initialisera hash: %i" #: src/hed/libs/otokens/jwse_ecdsa.cpp:72 #, c-format msgid "JWSE::VerifyECDSA: failed to add message to hash: %i" msgstr "" "JWSE::VerifyECDSA: misslyckades med att lägga till meddelande till hash: %i" #: src/hed/libs/otokens/jwse_ecdsa.cpp:80 #, c-format msgid "JWSE::VerifyECDSA: failed to finalize hash: %i" msgstr "JWSE::VerifyECDSA: misslyckades med att färdigställa hash: %i" #: src/hed/libs/otokens/jwse_ecdsa.cpp:87 #, c-format msgid "JWSE::VerifyECDSA: failed to verify: %i" msgstr "JWSE::VerifyECDSA: misslyckades med att verifiera: %i" #: src/hed/libs/otokens/jwse_ecdsa.cpp:96 msgid "JWSE::SignECDSA: missing key" msgstr "JWSE::SignECDSA: saknad nyckel" #: src/hed/libs/otokens/jwse_ecdsa.cpp:104 msgid "JWSE::SignECDSA: failed to create EVP context" msgstr "JWSE::SignECDSA: misslyckades med att skapa EVP-kontext" #: src/hed/libs/otokens/jwse_ecdsa.cpp:109 #, c-format msgid "JWSE::SignECDSA: failed to recognize digest: %s" msgstr "JWSE::SignECDSA: misslyckades med att känna igen digest: %s" #: src/hed/libs/otokens/jwse_ecdsa.cpp:114 #, c-format msgid "JWSE::SignECDSA: failed to initialize hash: %i" msgstr "JWSE::SignECDSA: misslyckades med att initialisera hash: %i" #: src/hed/libs/otokens/jwse_ecdsa.cpp:120 #, c-format msgid "JWSE::SignECDSA: failed to add message to hash: %i" msgstr "" "JWSE::SignECDSA: misslyckades med att lägga till meddelande till hash: %i" #: src/hed/libs/otokens/jwse_ecdsa.cpp:128 #, c-format msgid "JWSE::SignECDSA: failed to finalize hash: %i" msgstr "JWSE::SignECDSA: misslyckades med att färdigställa hash: %i" #: src/hed/libs/otokens/jwse_ecdsa.cpp:135 msgid "JWSE::SignECDSA: failed to create ECDSA signature" msgstr "JWSE::SignECDSA: misslyckades med att skapa ECDSA-signatur" #: src/hed/libs/otokens/jwse_ecdsa.cpp:143 msgid "JWSE::SignECDSA: failed to parse signature" msgstr "JWSE::SignECDSA: misslyckades med att tolka signatur" #: src/hed/libs/otokens/jwse_ecdsa.cpp:150 #, c-format msgid "JWSE::SignECDSA: wrong signature size: %i + %i" msgstr "JWSE::SignECDSA: fel signaturstorlek: %i + %i" #: src/hed/libs/otokens/jwse_ecdsa.cpp:156 msgid "JWSE::SignECDSA: wrong signature size written" msgstr "JWSE::SignECDSA: fel signaturstorlek skriven" #: src/hed/libs/otokens/jwse_keys.cpp:273 msgid "JWSE::ExtractPublicKey: x5c key" msgstr "JWSE::ExtractPublicKey: x5c-nyckel" #: src/hed/libs/otokens/jwse_keys.cpp:281 msgid "JWSE::ExtractPublicKey: jwk key" msgstr "JWSE::ExtractPublicKey: jwk-nyckel" #: src/hed/libs/otokens/jwse_keys.cpp:288 msgid "JWSE::ExtractPublicKey: external jwk key" msgstr "JWSE::ExtractPublicKey: extern jwk-nyckel" #: src/hed/libs/otokens/jwse_keys.cpp:315 #, c-format msgid "JWSE::ExtractPublicKey: deleting outdated info: %s" msgstr "JWSE::ExtractPublicKey: raderar inaktuell info: %s" #: src/hed/libs/otokens/jwse_keys.cpp:344 #, c-format msgid "JWSE::ExtractPublicKey: fetching jws key from %s" msgstr "JWSE::ExtractPublicKey: hämtar jws-nyckel frÃ¥n %s" #: src/hed/libs/otokens/jwse_keys.cpp:372 msgid "JWSE::ExtractPublicKey: no supported key" msgstr "JWSE::ExtractPublicKey: inte en nyckel som stöds" #: src/hed/libs/otokens/jwse_keys.cpp:375 msgid "JWSE::ExtractPublicKey: key parsing error" msgstr "JWSE::ExtractPublicKey: nyckeltolkningsfel" #: src/hed/libs/otokens/openid_metadata.cpp:40 #: src/hed/libs/otokens/openid_metadata.cpp:45 #, c-format msgid "Input: metadata: %s" msgstr "Indata: metadata: %s" #: src/hed/libs/otokens/openid_metadata.cpp:438 #, c-format msgid "Fetch: response code: %u %s" msgstr "Fetch: svarskod: %u %s" #: src/hed/libs/otokens/openid_metadata.cpp:440 #, c-format msgid "Fetch: response body: %s" msgstr "Fetch: svars-body: %s" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:141 #, c-format msgid "Can not load ARC evaluator object: %s" msgstr "Kan inte ladda in ARC-utvärderingsobjekt: %s" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:192 #, c-format msgid "Can not load ARC request object: %s" msgstr "Kan inte ladda in ARC-begäranobjekt: %s" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:233 #, c-format msgid "Can not load policy object: %s" msgstr "Kan inte ladda in policyobjekt: %s" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:281 msgid "Can not load policy object" msgstr "Kan inte ladda in policyobjekt" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:329 msgid "Can not load request object" msgstr "Kan inte ladda in begäranobjekt" #: src/hed/libs/security/ArcPDP/PolicyParser.cpp:119 msgid "Can not generate policy object" msgstr "Kan inte generera policyobjekt" #: src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp:37 #, c-format msgid "Id= %s,Type= %s,Issuer= %s,Value= %s" msgstr "Id= %s,Typ= %s,Utfärdare= %s,Värde= %s" #: src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp:40 #, c-format msgid "No Attribute exists, which can deal with type: %s" msgstr "Inget attribut existerar som kan hantera typen: %s" #: src/hed/mcc/http/MCCHTTP.cpp:189 #, c-format msgid "HTTP Error: %d %s" msgstr "HTTP-fel: %d %s" #: src/hed/mcc/http/MCCHTTP.cpp:270 msgid "Cannot create http payload" msgstr "Kan inte skapa http-nyttolast" #: src/hed/mcc/http/MCCHTTP.cpp:353 msgid "No next element in the chain" msgstr "Inget nästa element i kedjan" #: src/hed/mcc/http/MCCHTTP.cpp:362 #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:253 msgid "next element of the chain returned error status" msgstr "nästa element i kedjan returnerade felstatus" #: src/hed/mcc/http/MCCHTTP.cpp:371 msgid "next element of the chain returned no payload" msgstr "nästa element i kedjan returnerade ingen nyttolast" #: src/hed/mcc/http/MCCHTTP.cpp:383 msgid "next element of the chain returned invalid/unsupported payload" msgstr "nästa element i kedjan returnerade ogiltig/ej stödd nyttolast" #: src/hed/mcc/http/MCCHTTP.cpp:465 msgid "Error to flush output payload" msgstr "Fel vid utmatning av utdatanyttolast" #: src/hed/mcc/http/PayloadHTTP.cpp:306 #, c-format msgid "<< %s" msgstr "<< %s" #: src/hed/mcc/http/PayloadHTTP.cpp:355 src/hed/mcc/http/PayloadHTTP.cpp:457 #, c-format msgid "< %s" msgstr "< %s" #: src/hed/mcc/http/PayloadHTTP.cpp:576 msgid "Failed to parse HTTP header" msgstr "Misslyckades med att tolka HTTP-huvud" #: src/hed/mcc/http/PayloadHTTP.cpp:837 msgid "Invalid HTTP object can't produce result" msgstr "Ogiltigt HTTP-objekt kan inte producera resultat" #: src/hed/mcc/http/PayloadHTTP.cpp:969 #, c-format msgid "> %s" msgstr "> %s" #: src/hed/mcc/http/PayloadHTTP.cpp:994 msgid "Failed to write header to output stream" msgstr "Misslyckades med att skriva header till utdataström" #: src/hed/mcc/http/PayloadHTTP.cpp:1019 src/hed/mcc/http/PayloadHTTP.cpp:1025 #: src/hed/mcc/http/PayloadHTTP.cpp:1031 src/hed/mcc/http/PayloadHTTP.cpp:1041 #: src/hed/mcc/http/PayloadHTTP.cpp:1053 src/hed/mcc/http/PayloadHTTP.cpp:1058 #: src/hed/mcc/http/PayloadHTTP.cpp:1063 src/hed/mcc/http/PayloadHTTP.cpp:1071 #: src/hed/mcc/http/PayloadHTTP.cpp:1078 msgid "Failed to write body to output stream" msgstr "Misslyckades med att skriva body till utdataström" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:31 msgid "Skipping service: no ServicePath found!" msgstr "Hoppar över tjänst: hittade ingen ServicePath!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:37 msgid "Skipping service: no SchemaPath found!" msgstr "Hoppar över tjänst: hittade ingen SchemaPath!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:89 msgid "Parser Context creation failed!" msgstr "Skapande av tolkningskontext misslyckades!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:98 msgid "Cannot parse schema!" msgstr "Kan inte tolka schema!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:110 msgid "Empty payload!" msgstr "Tom nyttolast!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:119 msgid "Could not convert payload!" msgstr "Kunde inte konvertera nyttolast!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:125 #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:212 msgid "Could not create PayloadSOAP!" msgstr "Kunde inte skapa SOAP nyttolast!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:196 msgid "Empty input payload!" msgstr "Tom indatanyttolast" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:205 msgid "Could not convert incoming payload!" msgstr "Kunde inte konvertera inkommande nyttolast!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:232 msgid "Missing schema! Skipping validation..." msgstr "Schema saknas! Hoppar över validering..." #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:237 msgid "Could not validate message!" msgstr "Kunde inte validera meddelande!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:245 #: src/hed/mcc/soap/MCCSOAP.cpp:238 src/hed/mcc/soap/MCCSOAP.cpp:252 #: src/hed/mcc/soap/MCCSOAP.cpp:282 msgid "empty next chain element" msgstr "tomt nästa kedjeelement" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:257 #: src/hed/mcc/soap/MCCSOAP.cpp:298 msgid "next element of the chain returned empty payload" msgstr "nästa element i kedjan returnerade tom nyttolast" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:265 msgid "next element of the chain returned invalid payload" msgstr "nästa element i kedjan returnerade ogiltig nyttolast" #: src/hed/mcc/soap/MCCSOAP.cpp:223 msgid "empty input payload" msgstr "tom indatanyttolast" #: src/hed/mcc/soap/MCCSOAP.cpp:233 #, c-format msgid "MIME is not suitable for SOAP: %s" msgstr "MIME är inte lämplig för SOAP: %s" #: src/hed/mcc/soap/MCCSOAP.cpp:247 msgid "incoming message is not SOAP" msgstr "inkommande meddelande är inte SOAP" #: src/hed/mcc/soap/MCCSOAP.cpp:274 #, c-format msgid "Security check failed in SOAP MCC for incoming message: %s" msgstr "" "Säkerhetskontroll misslyckades i SOAP-MCC för inkommande meddelande: %s" #: src/hed/mcc/soap/MCCSOAP.cpp:290 #, c-format msgid "next element of the chain returned error status: %s" msgstr "nästa element i kedjan returnerade felstatus: %s" #: src/hed/mcc/soap/MCCSOAP.cpp:309 msgid "next element of the chain returned unknown payload - passing through" msgstr "nästa element i kedjan returnerade ogiltig nyttolast - passerar igenom" #: src/hed/mcc/soap/MCCSOAP.cpp:314 src/hed/mcc/soap/MCCSOAP.cpp:330 #, c-format msgid "Security check failed in SOAP MCC for outgoing message: %s" msgstr "Säkerhetskontroll misslyckades i SOAP-MCC för utgÃ¥ende meddelande: %s" #: src/hed/mcc/soap/MCCSOAP.cpp:384 msgid "Security check failed in SOAP MCC for outgoing message" msgstr "Säkerhetskontroll misslyckades i SOAP-MCC för utgÃ¥ende meddelande" #: src/hed/mcc/soap/MCCSOAP.cpp:437 msgid "Security check failed in SOAP MCC for incoming message" msgstr "Säkerhetskontroll misslyckades i SOAP-MCC för inkommande meddelande" #: src/hed/mcc/tcp/MCCTCP.cpp:82 msgid "Missing Port in Listen element" msgstr "Port saknas i Listen-element" #: src/hed/mcc/tcp/MCCTCP.cpp:91 msgid "Version in Listen element can't be recognized" msgstr "Version i Listen-element kan ej kännas igen" #: src/hed/mcc/tcp/MCCTCP.cpp:100 #, c-format msgid "Failed to obtain local address for port %s - %s" msgstr "Misslyckades med att erhÃ¥lla lokal adress för port %s - %s" #: src/hed/mcc/tcp/MCCTCP.cpp:102 #, c-format msgid "Failed to obtain local address for %s:%s - %s" msgstr "Misslyckades med att erhÃ¥lla lokal adress för %s:%s - %s" #: src/hed/mcc/tcp/MCCTCP.cpp:109 #, c-format msgid "Trying to listen on TCP port %s(%s)" msgstr "Försöker lyssna pÃ¥ TCP-port %s(%s)" #: src/hed/mcc/tcp/MCCTCP.cpp:111 #, c-format msgid "Trying to listen on %s:%s(%s)" msgstr "Försöker lyssna pÃ¥ %s:%s(%s)" #: src/hed/mcc/tcp/MCCTCP.cpp:117 #, c-format msgid "Failed to create socket for listening at TCP port %s(%s): %s" msgstr "" "Misslyckades med att skapa socket för att lyssna pÃ¥ TCP-port %s(%s): %s" #: src/hed/mcc/tcp/MCCTCP.cpp:119 #, c-format msgid "Failed to create socket for listening at %s:%s(%s): %s" msgstr "Misslyckades med att skapa socket för att lyssna pÃ¥ %s:%s(%s): %s" #: src/hed/mcc/tcp/MCCTCP.cpp:134 #, c-format msgid "" "Failed to limit socket to IPv6 at TCP port %s - may cause errors for IPv4 at " "same port" msgstr "" "Misslyckades med att begränsa socket till IPv6 pÃ¥ TCP-port %s - kan orsaka " "fel för IPv4 pÃ¥ samma port" #: src/hed/mcc/tcp/MCCTCP.cpp:136 #, c-format msgid "" "Failed to limit socket to IPv6 at %s:%s - may cause errors for IPv4 at same " "port" msgstr "" "Misslyckades med att begränsa socket till IPv6 pÃ¥ %s:%s - kan orsaka fel för " "IPv4 pÃ¥ samma port" #: src/hed/mcc/tcp/MCCTCP.cpp:144 #, c-format msgid "Failed to bind socket for TCP port %s(%s): %s" msgstr "Misslyckades med att binda socket för TCP-port %s(%s): %s" #: src/hed/mcc/tcp/MCCTCP.cpp:146 #, c-format msgid "Failed to bind socket for %s:%s(%s): %s" msgstr "Misslyckades med att binda socket för %s:%s(%s): %s" #: src/hed/mcc/tcp/MCCTCP.cpp:161 #, c-format msgid "Failed to listen at TCP port %s(%s): %s" msgstr "Misslyckades med att lyssna pÃ¥ TCP-port %s(%s): %s" #: src/hed/mcc/tcp/MCCTCP.cpp:163 #, c-format msgid "Failed to listen at %s:%s(%s): %s" msgstr "Misslyckades med att lyssna pÃ¥ %s:%s(%s): %s" #: src/hed/mcc/tcp/MCCTCP.cpp:180 #, c-format msgid "Listening on TCP port %s(%s)" msgstr "Lyssnar pÃ¥ TCP-port %s(%s)" #: src/hed/mcc/tcp/MCCTCP.cpp:182 #, c-format msgid "Listening on %s:%s(%s)" msgstr "Lyssnar pÃ¥ %s:%s(%s)" #: src/hed/mcc/tcp/MCCTCP.cpp:189 #, c-format msgid "Failed to start listening on any address for %s:%s" msgstr "Misslyckades med att börja lyssna pÃ¥ nÃ¥gon adress för %s:%s" #: src/hed/mcc/tcp/MCCTCP.cpp:191 #, c-format msgid "Failed to start listening on any address for %s:%s(IPv%s)" msgstr "Misslyckades med att börja lyssna pÃ¥ nÃ¥gon adress för %s:%s(IPv%s)" #: src/hed/mcc/tcp/MCCTCP.cpp:197 msgid "No listening ports initiated" msgstr "Inga lyssnande portar initierade" #: src/hed/mcc/tcp/MCCTCP.cpp:208 msgid "dropped" msgstr "tappas" #: src/hed/mcc/tcp/MCCTCP.cpp:208 msgid "put on hold" msgstr "ställas i kö" #: src/hed/mcc/tcp/MCCTCP.cpp:208 #, c-format msgid "Setting connections limit to %i, connections over limit will be %s" msgstr "" "Sätter förbindelsegräns till %i, förbindelse över gränsen kommer att %s" #: src/hed/mcc/tcp/MCCTCP.cpp:212 msgid "Failed to start thread for listening" msgstr "Misslyckades med att starta trÃ¥d för att lyssna" #: src/hed/mcc/tcp/MCCTCP.cpp:245 msgid "Failed to start thread for communication" msgstr "Misslyckades med att starta trÃ¥d för kommunikation" #: src/hed/mcc/tcp/MCCTCP.cpp:271 msgid "Failed while waiting for connection request" msgstr "Misslyckades under väntan pÃ¥ förbindelsebegäran" #: src/hed/mcc/tcp/MCCTCP.cpp:293 msgid "Failed to accept connection request" msgstr "Misslyckades med att acceptera förbindelsebegäran" #: src/hed/mcc/tcp/MCCTCP.cpp:302 msgid "Too many connections - dropping new one" msgstr "För mÃ¥nga förbindelse - tappar en ny" #: src/hed/mcc/tcp/MCCTCP.cpp:309 msgid "Too many connections - waiting for old to close" msgstr "För mÃ¥nga förbindelse - ställer ny i kö" #: src/hed/mcc/tcp/MCCTCP.cpp:548 msgid "next chain element called" msgstr "nästa kedjeelement anropat" #: src/hed/mcc/tcp/MCCTCP.cpp:563 msgid "Only Raw Buffer payload is supported for output" msgstr "Endast raw-buffer-nyttolast stöds för utmatning" #: src/hed/mcc/tcp/MCCTCP.cpp:571 src/hed/mcc/tcp/MCCTCP.cpp:670 #: src/hed/mcc/tls/MCCTLS.cpp:561 msgid "Failed to send content of buffer" msgstr "Misslyckades med att skicka innehÃ¥ll till buffer" #: src/hed/mcc/tcp/MCCTCP.cpp:583 msgid "TCP executor is removed" msgstr "TCP-exekverare tas bort" #: src/hed/mcc/tcp/MCCTCP.cpp:585 #, c-format msgid "Sockets do not match on exit %i != %i" msgstr "Socketar passar inte ihop vid avslut %i != %i" #: src/hed/mcc/tcp/MCCTCP.cpp:606 msgid "No Connect element specified" msgstr "Inget Connect-element angivet" #: src/hed/mcc/tcp/MCCTCP.cpp:612 msgid "Missing Port in Connect element" msgstr "Port saknas i Connect-element" #: src/hed/mcc/tcp/MCCTCP.cpp:618 msgid "Missing Host in Connect element" msgstr "Värd saknas i Connect-element" #: src/hed/mcc/tcp/MCCTCP.cpp:646 msgid "TCP client process called" msgstr "TCP-klientprocess anropad" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:65 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:81 #, c-format msgid "Failed to resolve %s (%s)" msgstr "Misslyckades med att slÃ¥ upp %s (%s)" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:91 #, c-format msgid "Trying to connect %s(%s):%d" msgstr "Försöker koppla upp %s(%s):%d" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:95 #, c-format msgid "Failed to create socket for connecting to %s(%s):%d - %s" msgstr "Misslyckades med att skapa socket för förbindelse till %s(%s):%d - %s" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:107 #, c-format msgid "" "Failed to get TCP socket options for connection to %s(%s):%d - timeout won't " "work - %s" msgstr "" "Misslyckades med att erhÃ¥lla TCP-socket-alternativ för förbindelse till " "%s(%s):%d - timeout kommer inte att fungera - %s" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:114 #, c-format msgid "Failed to connect to %s(%s):%i - %s" msgstr "Misslyckades med att koppla upp mot %s(%s):%i - %s" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:124 #, c-format msgid "Timeout connecting to %s(%s):%i - %i s" msgstr "Timeout vid uppkoppling till %s(%s):%i - %i s" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:132 #, c-format msgid "Failed while waiting for connection to %s(%s):%i - %s" msgstr "Misslyckades under väntande pÃ¥ uppkoppling till %s(%s):%i - %s" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:142 #, c-format msgid "Failed to connect to %s(%s):%i" msgstr "Misslyckades med att koppla upp mot %s(%s):%i" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:198 msgid "" "Received message out-of-band (not critical, ERROR level is just for " "debugging purposes)" msgstr "" "Mottog meddelande out-of-band (inte kritiskt, ERROR-nivÃ¥ är bara för " "debuggningsändamÃ¥l)" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:201 msgid "Using CA default location" msgstr "Använder förvald sökväg för CA" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:210 #, c-format msgid "Using CA file: %s" msgstr "Använder CA-fil: %s" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:212 #, c-format msgid "Using CA dir: %s" msgstr "Använder CA-katalog: %s" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:289 #, c-format msgid "Using DH parameters from file: %s" msgstr "Använder DH-parametrar frÃ¥n fil: %s" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:292 msgid "Failed to open file with DH parameters for reading" msgstr "Misslyckades med att öppna fil med DH-parametrar för läsning" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:297 msgid "Failed to read file with DH parameters" msgstr "Misslyckades med att läsa fil med DH-parametrar" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:300 msgid "Failed to apply DH parameters" msgstr "Misslyckades med att tillämpa DH-parametrar" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:302 msgid "DH parameters applied" msgstr "DH-parametrar tillämpade" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:316 #, c-format msgid "Using curve with NID: %u" msgstr "Använder kurva med NID: %u" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:319 msgid "Failed to generate EC key" msgstr "Misslyckades med att generera EC-nyckel" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:322 msgid "Failed to apply ECDH parameters" msgstr "Misslyckades med att tillämpa ECDH-parametrar" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:324 msgid "ECDH parameters applied" msgstr "ECDH-parametrar tillämpade" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:330 #, c-format msgid "Using cipher list: %s" msgstr "Använder chifferlista: %s" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:354 #, c-format msgid "Using protocol options: 0x%x" msgstr "Använder protokollalternativ: 0x%x" #: src/hed/mcc/tls/DelegationCollector.cpp:39 msgid "Independent proxy - no rights granted" msgstr "Oberoende proxy - inga rättigheter beviljade" #: src/hed/mcc/tls/DelegationCollector.cpp:43 msgid "Proxy with all rights inherited" msgstr "Proxy med alla rättigheter ärvda" #: src/hed/mcc/tls/DelegationCollector.cpp:51 msgid "Proxy with empty policy - fail on unrecognized policy" msgstr "Proxy med tom policy - misslyckades pÃ¥ grund av okänd policy" #: src/hed/mcc/tls/DelegationCollector.cpp:56 #, c-format msgid "Proxy with specific policy: %s" msgstr "Proxy med specifik policy: %s" #: src/hed/mcc/tls/DelegationCollector.cpp:60 msgid "Proxy with ARC Policy" msgstr "Proxy med ARC-policy" #: src/hed/mcc/tls/DelegationCollector.cpp:62 msgid "Proxy with unknown policy - fail on unrecognized policy" msgstr "Proxy med okänd policy - misslyckades pÃ¥ grund av okänd policy" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:116 #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:167 #, c-format msgid "Was expecting %s at the beginning of \"%s\"" msgstr "Förväntade %s i början av \"%s\"" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:121 #, c-format msgid "We only support CAs in Globus signing policy - %s is not supported" msgstr "Vi stöder endast CA i Globus signeringspolicy - %s stöds inte" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:126 #, c-format msgid "We only support X509 CAs in Globus signing policy - %s is not supported" msgstr "Vi stöder endast X509-CA i Globus signeringspolicy - %s stöds inte" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:131 msgid "Missing CA subject in Globus signing policy" msgstr "Saknat CA-subjekt i Globus signeringspolicy" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:141 msgid "Negative rights are not supported in Globus signing policy" msgstr "Negativa rättigheter stöds inte i Globus signeringspolicy" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:145 #, c-format msgid "Unknown rights in Globus signing policy - %s" msgstr "Okända rättigheter i Globus signeringspolicy - %s" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:150 #, c-format msgid "" "Only globus rights are supported in Globus signing policy - %s is not " "supported" msgstr "" "Endast globusrättigheter stöds i Globus signeringspolicy - %s stöds inte" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:155 #, c-format msgid "" "Only signing rights are supported in Globus signing policy - %s is not " "supported" msgstr "" "Endast signeringsrättigheter stöds i Globus signeringspolicy - %s stöds inte" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:172 #, c-format msgid "" "We only support subjects conditions in Globus signing policy - %s is not " "supported" msgstr "" "Vi stöder endast subjektvillkor i Globus signeringspolicy - %s stöds inte" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:177 #, c-format msgid "" "We only support globus conditions in Globus signing policy - %s is not " "supported" msgstr "" "Vi stöder endast globusvillkor i Globus signeringspolicy - %s stöds inte" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:183 msgid "Missing condition subjects in Globus signing policy" msgstr "Saknade villkorssubjekt i Globus signeringspolicy" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:265 msgid "Unknown element in Globus signing policy" msgstr "Okänt element i Globus signeringspolicy" #: src/hed/mcc/tls/MCCTLS.cpp:237 msgid "Critical VOMS attribute processing failed" msgstr "Behandling av kritiskt VOMS-attribut misslyckades" #: src/hed/mcc/tls/MCCTLS.cpp:245 msgid "VOMS attribute validation failed" msgstr "VOMS-attributvalidering misslyckades" #: src/hed/mcc/tls/MCCTLS.cpp:247 msgid "VOMS attribute is ignored due to processing/validation error" msgstr "VOMS-attribut ignoreras pÃ¥ grund av processerings-/valideringsfel" #: src/hed/mcc/tls/MCCTLS.cpp:439 src/hed/mcc/tls/MCCTLS.cpp:578 #: src/hed/mcc/tls/MCCTLS.cpp:597 #, c-format msgid "Failed to establish connection: %s" msgstr "Misslyckades med att etablera förbindelse: %s" #: src/hed/mcc/tls/MCCTLS.cpp:458 src/hed/mcc/tls/MCCTLS.cpp:540 #, c-format msgid "Peer name: %s" msgstr "Peer-namn: %s" #: src/hed/mcc/tls/MCCTLS.cpp:460 src/hed/mcc/tls/MCCTLS.cpp:542 #, c-format msgid "Identity name: %s" msgstr "Identitetsnamn: %s" #: src/hed/mcc/tls/MCCTLS.cpp:462 src/hed/mcc/tls/MCCTLS.cpp:544 #, c-format msgid "CA name: %s" msgstr "CA-namn: %s" #: src/hed/mcc/tls/MCCTLS.cpp:469 msgid "Failed to process security attributes in TLS MCC for incoming message" msgstr "" "Misslyckades med att processera säkerhetsattribut i TLS-MCC för inkommande " "meddelande" #: src/hed/mcc/tls/MCCTLS.cpp:477 msgid "Security check failed in TLS MCC for incoming message" msgstr "Säkerhetskontroll misslyckades i TLS-MCC för inkommande meddelande" #: src/hed/mcc/tls/MCCTLS.cpp:550 msgid "Security check failed for outgoing TLS message" msgstr "Säkerhetskontroll misslyckades för utgÃ¥ende TLS-meddelande" #: src/hed/mcc/tls/MCCTLS.cpp:582 msgid "Security check failed for incoming TLS message" msgstr "Säkerhetskontroll misslyckades för inkommande TLS-meddelande" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:64 #, c-format msgid "Ignoring verification error due to insecure connection allowed: %s" msgstr "" "Ignorerar verifieringsfel p.g.a. att osäkra anslutningar är tillÃ¥tna: %s" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:79 msgid "" "Failed to allocate memory for certificate subject while matching policy." msgstr "" "Misslyckades med att allokera minne för certifikatsubjekt vid " "policymatchning." #: src/hed/mcc/tls/PayloadTLSMCC.cpp:83 msgid "" "Failed to retrieve link to TLS stream. Additional policy matching is skipped." msgstr "" "Misslyckades med att hämta länk till TLS-ström. Ytterligare policymatchning " "hoppas över." #: src/hed/mcc/tls/PayloadTLSMCC.cpp:85 msgid "" "Skipping additional policy matching due to insecure connections allowed." msgstr "" "Hoppar över ytterligare policy-matchning eftersom osäkra anslutningar är " "tillÃ¥tna." #: src/hed/mcc/tls/PayloadTLSMCC.cpp:109 #, c-format msgid "Certificate %s already expired" msgstr "Giltighetstiden för certifikat %s har redan gÃ¥tt ut" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:117 #, c-format msgid "Certificate %s will expire in %s" msgstr "Giltighetstiden för certifikat %s kommer att gÃ¥ ut om %s" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:138 msgid "Failed to store application data" msgstr "Misslyckades med att lagra tillämpningsdata" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:166 msgid "Failed to retrieve application data from OpenSSL" msgstr "Misslyckades med att hämta tillämpningsdata frÃ¥n OpenSSL" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:238 src/hed/mcc/tls/PayloadTLSMCC.cpp:338 msgid "Can not create the SSL Context object" msgstr "Kan inte skapa SSL-kontextobjekt" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:251 src/hed/mcc/tls/PayloadTLSMCC.cpp:358 msgid "Can't set OpenSSL verify flags" msgstr "Kan inte ange OpenSSL verifikationsflaggor" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:270 src/hed/mcc/tls/PayloadTLSMCC.cpp:372 msgid "Can not create the SSL object" msgstr "Kan inte skapa SSL-objektet" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:280 msgid "Faile to assign hostname extension" msgstr "Misslyckades med att tilldela värdnamnstillägg" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:294 msgid "Failed to establish SSL connection" msgstr "Misslyckades med att etablera SSL-förbindelse" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:298 src/hed/mcc/tls/PayloadTLSMCC.cpp:388 #, c-format msgid "Using cipher: %s" msgstr "Använder chiffer: %s" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:384 msgid "Failed to accept SSL connection" msgstr "Misslyckades med att acceptera SSL-förbindelse" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:446 #, c-format msgid "Failed to shut down SSL: %s" msgstr "Misslyckades med att stänga av SSL: %s" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:47 msgid "" "ArcAuthZ: failed to initiate all PDPs - this instance will be non-functional" msgstr "" "ArcAuthZ: misslyckades med att initiera alla PDPer - denna instans kommer " "inte att fungera" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:70 msgid "PDP: missing name attribute" msgstr "PDP: %s namnattribut saknas" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:74 #, c-format msgid "PDP: %s (%s)" msgstr "PDP: %s (%s)" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:79 #, c-format msgid "PDP: %s (%s) can not be loaded" msgstr "PDP: %s (%s) kan inte laddas in" #: src/hed/shc/arcpdp/ArcEvaluationCtx.cpp:251 #, c-format msgid "There are %d RequestItems" msgstr "Det finns %d RequestItem" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:60 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:55 msgid "Can not parse classname for FunctionFactory from configuration" msgstr "Kan inte tolka klassnamn för FunctionFactory frÃ¥n konfigurationen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:68 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:63 msgid "Can not parse classname for AttributeFactory from configuration" msgstr "Kan inte tolka klassnamn för AttributeFactory frÃ¥n konfigurationen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:76 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:71 msgid "" "Can not parse classname for CombiningAlgorithmFactory from configuration" msgstr "" "Kan inte tolka klassnamn för CombiningAlgorithmFactory frÃ¥n konfigurationen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:84 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:79 msgid "Can not parse classname for Request from configuration" msgstr "Kan inte tolka klassnamn för Request frÃ¥n konfigurationen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:93 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:88 msgid "Can not parse classname for Policy from configuration" msgstr "Kan inte tolka klassnamn för Policy frÃ¥n konfigurationen" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:105 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:100 msgid "Can not dynamically produce AttributeFactory" msgstr "Kan inte skapa AttributeFactory dynamiskt" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:110 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:105 msgid "Can not dynamically produce FnFactory" msgstr "Kan inte skapa FnFactory dynamiskt" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:115 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:110 msgid "Can not dynamically produce AlgFacroty" msgstr "Kan inte skapa AlgFactory dynamiskt" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:126 #: src/hed/shc/gaclpdp/GACLEvaluator.cpp:31 #: src/hed/shc/gaclpdp/GACLEvaluator.cpp:37 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:121 msgid "Can not create PolicyStore object" msgstr "Kan inte skapa PolicyStore-objekt" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:177 src/hed/shc/test.cpp:183 #: src/hed/shc/testinterface_arc.cpp:102 src/hed/shc/testinterface_xacml.cpp:54 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:172 msgid "Can not dynamically produce Request" msgstr "Kan inte skapa Request dynamiskt" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:261 #, c-format msgid "Result value (0=Permit, 1=Deny, 2=Indeterminate, 3=Not_Applicable): %d" msgstr "Resultatvärde (0=TillÃ¥t, 1=Vägra, 2=Obestämd, 3=Ej applicerbar): %d" #: src/hed/shc/arcpdp/ArcPDP.cpp:109 msgid "Can not find ArcPDPContext" msgstr "Kan inte hitta ArcPDPContext" #: src/hed/shc/arcpdp/ArcPDP.cpp:138 src/hed/shc/xacmlpdp/XACMLPDP.cpp:116 msgid "Evaluator does not support loadable Combining Algorithms" msgstr "Utvärderare stöder inte laddningsbara kombinerande algoritmer" #: src/hed/shc/arcpdp/ArcPDP.cpp:142 src/hed/shc/xacmlpdp/XACMLPDP.cpp:120 #, c-format msgid "Evaluator does not support specified Combining Algorithm - %s" msgstr "Utvärderare stöder inte den angivna kombinerande algoritmen - %s" #: src/hed/shc/arcpdp/ArcPDP.cpp:154 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:83 #: src/hed/shc/gaclpdp/GACLPDP.cpp:117 src/hed/shc/test.cpp:94 #: src/hed/shc/testinterface_arc.cpp:37 src/hed/shc/testinterface_xacml.cpp:37 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:132 msgid "Can not dynamically produce Evaluator" msgstr "Kan inte skapa utvärderare dynamiskt" #: src/hed/shc/arcpdp/ArcPDP.cpp:157 msgid "Evaluator for ArcPDP was not loaded" msgstr "Utvärderare för Arc-PDP laddades inte" #: src/hed/shc/arcpdp/ArcPDP.cpp:164 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:56 #: src/hed/shc/gaclpdp/GACLPDP.cpp:127 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:88 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:142 src/tests/echo/echo.cpp:108 msgid "Missing security object in message" msgstr "Säkerhetsobjekt saknas i meddelande" #: src/hed/shc/arcpdp/ArcPDP.cpp:172 src/hed/shc/arcpdp/ArcPDP.cpp:180 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:136 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:142 #: src/hed/shc/gaclpdp/GACLPDP.cpp:135 src/hed/shc/gaclpdp/GACLPDP.cpp:143 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:96 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:104 #: src/tests/echo/echo.cpp:116 src/tests/echo/echo.cpp:123 msgid "Failed to convert security information to ARC request" msgstr "Misslyckades med att konvertera säkerhetsinformation till ARC-begäran" #: src/hed/shc/arcpdp/ArcPDP.cpp:188 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:149 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:112 #, c-format msgid "ARC Auth. request: %s" msgstr "ARC-auktoriseringsbegäran: %s" #: src/hed/shc/arcpdp/ArcPDP.cpp:191 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:152 #: src/hed/shc/gaclpdp/GACLPDP.cpp:154 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:115 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:169 msgid "No requested security information was collected" msgstr "Ingen begärd säkerhetsinformation samlades in" #: src/hed/shc/arcpdp/ArcPDP.cpp:198 msgid "Not authorized by arc.pdp - failed to get response from Evaluator" msgstr "Inte auktoriserad av arc.pdp - kunde inte fÃ¥ svar frÃ¥n utvärderare" #: src/hed/shc/arcpdp/ArcPDP.cpp:244 msgid "Authorized by arc.pdp" msgstr "Auktoriserad av arc.pdp" #: src/hed/shc/arcpdp/ArcPDP.cpp:245 msgid "" "Not authorized by arc.pdp - some of the RequestItem elements do not satisfy " "Policy" msgstr "" "Ej auktoriserad av arc.pdp - nÃ¥gra av RequestItem-elementen uppfyller inte " "policy" #: src/hed/shc/arcpdp/ArcPolicy.cpp:56 src/hed/shc/arcpdp/ArcPolicy.cpp:70 #: src/hed/shc/gaclpdp/GACLPolicy.cpp:46 src/hed/shc/gaclpdp/GACLPolicy.cpp:59 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:48 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:64 msgid "Policy is empty" msgstr "Policy är tom" #: src/hed/shc/arcpdp/ArcPolicy.cpp:114 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:115 #, c-format msgid "PolicyId: %s Alg inside this policy is:-- %s" msgstr "PolicyId: %s Alg inuti denna policy är:-- %s" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:74 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:128 msgid "No delegation policies in this context and message - passing through" msgstr "" "Inga delegeringspolicyer i denna kontext och meddelande - passerar igenom" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:94 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:108 msgid "Failed to convert security information to ARC policy" msgstr "Misslyckades med att konvertera säkerhetsinformation till ARC-policy" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:115 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:122 #, c-format msgid "ARC delegation policy: %s" msgstr "ARC delegeringspolicy: %s" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:160 msgid "No authorization response was returned" msgstr "Inget auktoriseringssvar returnerades" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:163 #, c-format msgid "There are %d requests, which satisfy at least one policy" msgstr "Det finns %d begärningar som uppfyller Ã¥tminstone en policy" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:182 msgid "Delegation authorization passed" msgstr "Delegeringsauktorisering lyckades" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:184 msgid "Delegation authorization failed" msgstr "Delegeringsauktorisering misslyckades" #: src/hed/shc/delegationsh/DelegationSH.cpp:63 msgid "" "Missing CertificatePath element or ProxyPath element, or " " is missing" msgstr "" "Saknat CertificatePath-element eller ProxyPath-element, eller " " saknas" #: src/hed/shc/delegationsh/DelegationSH.cpp:68 msgid "" "Missing or empty KeyPath element, or is missing" msgstr "" "Saknat eller tomt KeyPath-element, eller saknas" #: src/hed/shc/delegationsh/DelegationSH.cpp:74 msgid "Missing or empty CertificatePath or CACertificatesDir element" msgstr "Saknat eller tomt CertificatePath- eller CACertificatesDir-element" #: src/hed/shc/delegationsh/DelegationSH.cpp:81 #, c-format msgid "Delegation role not supported: %s" msgstr "Delegeringsroll stöds inte: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:90 #, c-format msgid "Delegation type not supported: %s" msgstr "Delegeringstyp stöds inte: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:115 msgid "Failed to acquire delegation context" msgstr "Misslyckades med att erhÃ¥lla delegeringskontext" #: src/hed/shc/delegationsh/DelegationSH.cpp:143 #: src/hed/shc/delegationsh/DelegationSH.cpp:254 msgid "Can't create delegation context" msgstr "Kan inte skapa delegeringskontext" #: src/hed/shc/delegationsh/DelegationSH.cpp:149 msgid "Delegation handler with delegatee role starts to process" msgstr "Delegeringshanterare med delegeringsmottagarroll börjar behandla" #: src/hed/shc/delegationsh/DelegationSH.cpp:152 #: src/services/a-rex/arex.cpp:478 src/services/candypond/CandyPond.cpp:526 #: src/services/data-staging/DataDeliveryService.cpp:648 msgid "process: POST" msgstr "process: POST" #: src/hed/shc/delegationsh/DelegationSH.cpp:159 #: src/services/a-rex/arex.cpp:485 src/services/candypond/CandyPond.cpp:535 #: src/services/data-staging/DataDeliveryService.cpp:657 #: src/services/wrappers/python/pythonwrapper.cpp:416 msgid "input is not SOAP" msgstr "indata är inte SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:166 #, c-format msgid "Delegation service: %s" msgstr "Delegeringstjänst: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:181 #: src/hed/shc/delegationsh/DelegationSH.cpp:188 #: src/tests/client/test_ClientX509Delegation_ARC.cpp:55 #, c-format msgid "Can not get the delegation credential: %s from delegation service: %s" msgstr "Kan inte erhÃ¥lla delegeringsreferens: %s frÃ¥n delegeringstjänst: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:204 #: src/hed/shc/delegationsh/DelegationSH.cpp:268 #, c-format msgid "Delegated credential identity: %s" msgstr "Delegerad referens-identitet: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:205 #, c-format msgid "" "The delegated credential got from delegation service is stored into path: %s" msgstr "" "Delegerade referensen erhÃ¥llen frÃ¥n delegeringstjänsten lagras till sökväg: " "%s" #: src/hed/shc/delegationsh/DelegationSH.cpp:218 msgid "The endpoint of delegation service should be configured" msgstr "Delegeringstjänstens slutpunkt ska ställas in" #: src/hed/shc/delegationsh/DelegationSH.cpp:228 #: src/hed/shc/delegationsh/DelegationSH.cpp:340 msgid "Delegation handler with delegatee role ends" msgstr "Delegeringshanteraren med delegeringsmottagarroll slutar" #: src/hed/shc/delegationsh/DelegationSH.cpp:260 msgid "Delegation handler with delegator role starts to process" msgstr "Delegeringshanteraren med delegeringssändarroll börjar behandla" #: src/hed/shc/delegationsh/DelegationSH.cpp:269 #, c-format msgid "The delegated credential got from path: %s" msgstr "Delegerade referensen erhÃ¥llen frÃ¥n sökväg: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:290 #, c-format msgid "Can not create delegation crendential to delegation service: %s" msgstr "Kan inte skapa delegeringsreferens för delegeringstjänsten: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:328 msgid "output is not SOAP" msgstr "utdata är inte SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:339 #, c-format msgid "" "Succeeded to send DelegationService: %s and DelegationID: %s info to peer " "service" msgstr "" "Lyckades sända DelegationService: %s och DelegationID: %s info till peer-" "tjänst" #: src/hed/shc/delegationsh/DelegationSH.cpp:345 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:230 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:101 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:94 msgid "Incoming Message is not SOAP" msgstr "Inkommande meddelande är inte SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:352 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:353 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:123 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:108 msgid "Outgoing Message is not SOAP" msgstr "UtgÃ¥ende meddelande är inte SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:356 msgid "Delegation handler is not configured" msgstr "Delegeringshanteraren har ej ställts in" #: src/hed/shc/gaclpdp/GACLPDP.cpp:120 msgid "Evaluator for GACLPDP was not loaded" msgstr "Utvärderare för GACL-PDP laddades inte" #: src/hed/shc/gaclpdp/GACLPDP.cpp:151 #, c-format msgid "GACL Auth. request: %s" msgstr "GACL-auktoriseringsbegäran: %s" #: src/hed/shc/gaclpdp/GACLPolicy.cpp:50 src/hed/shc/gaclpdp/GACLPolicy.cpp:63 msgid "Policy is not gacl" msgstr "Policy är inte gacl" #: src/hed/shc/legacy/ConfigParser.cpp:13 msgid "Configuration file not specified" msgstr "Inställningfil inte angiven" #: src/hed/shc/legacy/ConfigParser.cpp:18 #: src/hed/shc/legacy/ConfigParser.cpp:28 #: src/hed/shc/legacy/ConfigParser.cpp:33 msgid "Configuration file can not be read" msgstr "Inställningsfil kan inte läsas" #: src/hed/shc/legacy/ConfigParser.cpp:43 #, c-format msgid "Configuration file is broken - block name is too short: %s" msgstr "Inställningsfil är trasig - blocknamn är för kort: %s" #: src/hed/shc/legacy/ConfigParser.cpp:47 #, c-format msgid "Configuration file is broken - block name does not end with ]: %s" msgstr "Inställningsfil är trasig - blocknamn slutar inte med ]: %s" #: src/hed/shc/legacy/LegacyMap.cpp:39 src/hed/shc/legacy/LegacyPDP.cpp:119 msgid "Configuration file not specified in ConfigBlock" msgstr "Inställningsfil inte angiven i ConfigBlock" #: src/hed/shc/legacy/LegacyMap.cpp:48 src/hed/shc/legacy/LegacyPDP.cpp:128 msgid "BlockName is empty" msgstr "Blocknamn är tomt" #: src/hed/shc/legacy/LegacyMap.cpp:108 #, c-format msgid "Failed processing user mapping command: %s %s" msgstr "Misslyckades med att behandla användarmappningskommando: %s %s" #: src/hed/shc/legacy/LegacyMap.cpp:114 #, c-format msgid "Failed to change mapping stack processing policy in: %s = %s" msgstr "Misslyckades med att ändra mappningsstack-behandlingspolicy i: %s = %s" #: src/hed/shc/legacy/LegacyMap.cpp:179 msgid "LegacyMap: no configurations blocks defined" msgstr "LegacyMap: inga inställningsblock definierade" #: src/hed/shc/legacy/LegacyMap.cpp:201 src/hed/shc/legacy/LegacyPDP.cpp:255 #, c-format msgid "" "LegacyPDP: there is no %s Sec Attribute defined. Probably ARC Legacy Sec " "Handler is not configured or failed." msgstr "" "LegacyPDP: Det finns inga %s-säkerhetsattribut definierade. ARC Legacy Sec " "Handler troligen inte inställd eller har misslyckats." #: src/hed/shc/legacy/LegacyMap.cpp:206 src/hed/shc/legacy/LegacyPDP.cpp:260 msgid "LegacyPDP: ARC Legacy Sec Attribute not recognized." msgstr "LegacyPDP: ARC Legacy Sec Attribute känns inte igen." #: src/hed/shc/legacy/LegacyPDP.cpp:138 #, c-format msgid "Failed to parse configuration file %s" msgstr "Misslyckades med att tolka inställningsfil %s" #: src/hed/shc/legacy/LegacyPDP.cpp:144 #, c-format msgid "Block %s not found in configuration file %s" msgstr "Hittade inte block %s i inställningsfil %s" #: src/hed/shc/legacy/LegacySecHandler.cpp:40 #: src/hed/shc/legacy/LegacySecHandler.cpp:118 msgid "LegacySecHandler: configuration file not specified" msgstr "LegacySecHandler: inställningsfil inte angiven" #: src/hed/shc/legacy/arc_lcas.cpp:149 src/hed/shc/legacy/arc_lcmaps.cpp:163 #, c-format msgid "" "Failed to convert GSI credential to GSS credential (major: %d, minor: %d)" msgstr "" "Misslyckades med att konvertera GSI-referens till GSS-referens (major: %d, " "minor: %d)" #: src/hed/shc/legacy/arc_lcas.cpp:174 src/hed/shc/legacy/arc_lcmaps.cpp:188 msgid "Missing subject name" msgstr "Subjektnamn saknas" #: src/hed/shc/legacy/arc_lcas.cpp:179 src/hed/shc/legacy/arc_lcmaps.cpp:193 msgid "Missing path of credentials file" msgstr "Sökväg till referensfil saknas" #: src/hed/shc/legacy/arc_lcas.cpp:185 msgid "Missing name of LCAS library" msgstr "Namn pÃ¥ LCAS-biblioteket saknas" #: src/hed/shc/legacy/arc_lcas.cpp:202 #, c-format msgid "Can't load LCAS library %s: %s" msgstr "Kan inte ladda in LCAS-bibliotek %s: %s" #: src/hed/shc/legacy/arc_lcas.cpp:212 #, c-format msgid "Can't find LCAS functions in a library %s" msgstr "Kan inte hitta LCAS-funktioner i bibliotek: %s" #: src/hed/shc/legacy/arc_lcas.cpp:222 msgid "Failed to initialize LCAS" msgstr "Misslyckades med att initiera LCAS" #: src/hed/shc/legacy/arc_lcas.cpp:237 msgid "Failed to terminate LCAS" msgstr "Misslyckades med att avsluta LCAS" #: src/hed/shc/legacy/arc_lcmaps.cpp:199 msgid "Missing name of LCMAPS library" msgstr "Namn pÃ¥ LCMAPS-bibliotek saknas" #: src/hed/shc/legacy/arc_lcmaps.cpp:213 msgid "Can't read policy names" msgstr "Kan inte läsa policynamn" #: src/hed/shc/legacy/arc_lcmaps.cpp:224 #, c-format msgid "Can't load LCMAPS library %s: %s" msgstr "Kan inte ladda in LCMAPS-bibliotek %s: %s" #: src/hed/shc/legacy/arc_lcmaps.cpp:236 #, c-format msgid "Can't find LCMAPS functions in a library %s" msgstr "Kan inte hitta LCMAPS-funktioner i bibliotek %s" #: src/hed/shc/legacy/arc_lcmaps.cpp:248 msgid "LCMAPS has lcmaps_run" msgstr "LCMAPS har lcmaps_run" #: src/hed/shc/legacy/arc_lcmaps.cpp:249 msgid "LCMAPS has getCredentialData" msgstr "LCMAPS har getCredentialData" #: src/hed/shc/legacy/arc_lcmaps.cpp:253 msgid "Failed to initialize LCMAPS" msgstr "Misslyckades med att initiera LCMAPS" #: src/hed/shc/legacy/arc_lcmaps.cpp:293 #, c-format msgid "LCMAPS returned invalid GID: %u" msgstr "LCMAPS returnerade ogiltig GID: %u" #: src/hed/shc/legacy/arc_lcmaps.cpp:296 msgid "LCMAPS did not return any GID" msgstr "LCMAPS returnerade ingen GID" #: src/hed/shc/legacy/arc_lcmaps.cpp:299 #, c-format msgid "LCMAPS returned UID which has no username: %u" msgstr "LCMAPS returnerade UID som saknar användarnamn: %u" #: src/hed/shc/legacy/arc_lcmaps.cpp:302 #, c-format msgid "LCMAPS returned invalid UID: %u" msgstr "LCMAPS returnerade ogiltig UID: %u" #: src/hed/shc/legacy/arc_lcmaps.cpp:305 msgid "LCMAPS did not return any UID" msgstr "LCMAPS returnerade ingen UID" #: src/hed/shc/legacy/arc_lcmaps.cpp:314 msgid "Failed to terminate LCMAPS" msgstr "Misslyckades med att avsluta LCMAPS" #: src/hed/shc/legacy/auth.cpp:35 #, c-format msgid "Unexpected argument for 'all' rule - %s" msgstr "Oväntat argument för 'all'-regel - %s" #: src/hed/shc/legacy/auth.cpp:340 #, c-format msgid "Credentials stored in temporary file %s" msgstr "Referenser lagrade i temporär fil %s" #: src/hed/shc/legacy/auth.cpp:349 #, c-format msgid "Assigned to authorization group %s" msgstr "Tilldelad till delegeringsgrupp %s" #: src/hed/shc/legacy/auth.cpp:354 #, c-format msgid "Assigned to userlist %s" msgstr "Tilldelad till användarlista %s" #: src/hed/shc/legacy/auth_file.cpp:22 #, c-format msgid "Failed to read file %s" msgstr "Misslyckades med att läsa fil %s" #: src/hed/shc/legacy/auth_otokens.cpp:33 msgid "Missing subject in configuration" msgstr "Subjekt saknas i inställningar" #: src/hed/shc/legacy/auth_otokens.cpp:38 msgid "Missing issuer in configuration" msgstr "Utfärdare saknas i inställningar" #: src/hed/shc/legacy/auth_otokens.cpp:43 msgid "Missing audience in configuration" msgstr "Publik saknas i inställningar" #: src/hed/shc/legacy/auth_otokens.cpp:48 msgid "Missing scope in configuration" msgstr "Scope saknas i inställningar" #: src/hed/shc/legacy/auth_otokens.cpp:53 src/hed/shc/legacy/auth_voms.cpp:47 msgid "Missing group in configuration" msgstr "Grupp saknas i inställningar" #: src/hed/shc/legacy/auth_otokens.cpp:56 #, c-format msgid "Rule: subject: %s" msgstr "Regel: subjekt: %s" #: src/hed/shc/legacy/auth_otokens.cpp:57 #, c-format msgid "Rule: issuer: %s" msgstr "Regel: utfärdare: %s" #: src/hed/shc/legacy/auth_otokens.cpp:58 #, c-format msgid "Rule: audience: %s" msgstr "Regel: publik: %s" #: src/hed/shc/legacy/auth_otokens.cpp:59 #, c-format msgid "Rule: scope: %s" msgstr "Regel: scope: %s" #: src/hed/shc/legacy/auth_otokens.cpp:60 src/hed/shc/legacy/auth_voms.cpp:66 #, c-format msgid "Rule: group: %s" msgstr "Regel: grupp: %s" #: src/hed/shc/legacy/auth_otokens.cpp:63 #, c-format msgid "Match issuer: %s" msgstr "Matcha utfärdare: %s" #: src/hed/shc/legacy/auth_otokens.cpp:69 #, c-format msgid "Matched: %s %s %s" msgstr "Matchad: %s %s %s" #: src/hed/shc/legacy/auth_otokens.cpp:83 src/hed/shc/legacy/auth_voms.cpp:93 msgid "Matched nothing" msgstr "Matchad ingenting" #: src/hed/shc/legacy/auth_otokens.cpp:176 #, c-format msgid "Evaluate operator =: left: %s" msgstr "Utvärdera operator =: vänster: %s" #: src/hed/shc/legacy/auth_otokens.cpp:177 #, c-format msgid "Evaluate operator =: right: %s" msgstr "Utvärdera operator =: höger: %s" #: src/hed/shc/legacy/auth_otokens.cpp:182 #, c-format msgid "Evaluate operator =: left from context: %s" msgstr "Utvärdera operator =: vänster frÃ¥n kontext: %s" #: src/hed/shc/legacy/auth_otokens.cpp:239 #, c-format msgid "Operator token: %c" msgstr "Operator-token: %c" #: src/hed/shc/legacy/auth_otokens.cpp:268 #, c-format msgid "String token: %s" msgstr "Sträng-token: %s" #: src/hed/shc/legacy/auth_otokens.cpp:296 #, fuzzy, c-format msgid "Quoted string token: %s" msgstr "Sträng-token: %s" #: src/hed/shc/legacy/auth_otokens.cpp:304 #, c-format msgid "Sequence token parsing: %s" msgstr "Sekvens-token-tolkning: %s" #: src/hed/shc/legacy/auth_otokens.cpp:420 #, c-format msgid "Matching tokens expression: %s" msgstr "Matchar token-uttryck: %s" #: src/hed/shc/legacy/auth_otokens.cpp:424 msgid "Failed to parse expression" msgstr "Misslyckades med att tolka uttryck" #: src/hed/shc/legacy/auth_otokens.cpp:435 #, c-format msgid "%s: " msgstr "%s: " #: src/hed/shc/legacy/auth_otokens.cpp:441 #, c-format msgid " %s" msgstr " %s" #: src/hed/shc/legacy/auth_otokens.cpp:446 msgid "Expression matched" msgstr "Uttryck matchar" #: src/hed/shc/legacy/auth_otokens.cpp:451 #, c-format msgid "Failed to evaluate expression: %s" msgstr "Misslyckades med att utvärdera uttryck: %s" #: src/hed/shc/legacy/auth_otokens.cpp:454 msgid "Expression failed to matched" msgstr "Uttryck misslyckades med att matchas" #: src/hed/shc/legacy/auth_plugin.cpp:79 src/hed/shc/legacy/unixmap.cpp:216 #, c-format msgid "Plugin %s returned: %u" msgstr "Plugin %s returnerade: %u" #: src/hed/shc/legacy/auth_plugin.cpp:83 src/hed/shc/legacy/unixmap.cpp:220 #, c-format msgid "Plugin %s timeout after %u seconds" msgstr "Plugin %s timeout efter %u sekunder" #: src/hed/shc/legacy/auth_plugin.cpp:86 src/hed/shc/legacy/unixmap.cpp:223 #, c-format msgid "Plugin %s failed to start" msgstr "Plugin %s misslyckades med att starta" #: src/hed/shc/legacy/auth_plugin.cpp:88 src/hed/shc/legacy/unixmap.cpp:225 #, c-format msgid "Plugin %s printed: %s" msgstr "Plugin %s skrev ut: %s" #: src/hed/shc/legacy/auth_plugin.cpp:89 src/hed/shc/legacy/unixmap.cpp:213 #: src/hed/shc/legacy/unixmap.cpp:226 #, c-format msgid "Plugin %s error: %s" msgstr "Plugin %s fel: %s" #: src/hed/shc/legacy/auth_voms.cpp:42 msgid "Missing VO in configuration" msgstr "VO saknas i inställningar" #: src/hed/shc/legacy/auth_voms.cpp:52 msgid "Missing role in configuration" msgstr "Roll saknas i inställningar" #: src/hed/shc/legacy/auth_voms.cpp:57 msgid "Missing capabilities in configuration" msgstr "FörmÃ¥ga saknas i inställningar" #: src/hed/shc/legacy/auth_voms.cpp:62 msgid "Too many arguments in configuration" msgstr "För mÃ¥nga argument i inställningar" #: src/hed/shc/legacy/auth_voms.cpp:65 #, c-format msgid "Rule: vo: %s" msgstr "Regel: vo: %s" #: src/hed/shc/legacy/auth_voms.cpp:67 #, c-format msgid "Rule: role: %s" msgstr "Regel: roll: %s" #: src/hed/shc/legacy/auth_voms.cpp:68 #, c-format msgid "Rule: capabilities: %s" msgstr "Regel: förmÃ¥ga: %s" #: src/hed/shc/legacy/auth_voms.cpp:71 #, c-format msgid "Match vo: %s" msgstr "Matcha vo: %s" #: src/hed/shc/legacy/auth_voms.cpp:78 #, c-format msgid "Matched: %s %s %s %s" msgstr "Matchad: %s %s %s %s" #: src/hed/shc/legacy/simplemap.cpp:70 #, c-format msgid "SimpleMap: acquired new unmap time of %u seconds" msgstr "SimpleMap: fick ny unmap-tid pÃ¥ %u sekunder" #: src/hed/shc/legacy/simplemap.cpp:72 msgid "SimpleMap: wrong number in unmaptime command" msgstr "SimpleMap: felaktigt nummer i unmaptime-kommando" #: src/hed/shc/legacy/simplemap.cpp:85 src/hed/shc/legacy/simplemap.cpp:90 #, c-format msgid "SimpleMap: %s" msgstr "SimpleMap: %s" #: src/hed/shc/legacy/unixmap.cpp:65 src/hed/shc/legacy/unixmap.cpp:70 msgid "Mapping policy option has empty value" msgstr "Mappningspolicyalternativ har tomt värde" #: src/hed/shc/legacy/unixmap.cpp:80 #, c-format msgid "Unsupported mapping policy action: %s" msgstr "Mappningspolicyhandling stöds inte: %s" #: src/hed/shc/legacy/unixmap.cpp:91 #, c-format msgid "Unsupported mapping policy option: %s" msgstr "Mappningspolicyalternativ stöds inte: %s" #: src/hed/shc/legacy/unixmap.cpp:103 src/hed/shc/legacy/unixmap.cpp:108 msgid "User name mapping command is empty" msgstr "Användarnamnsmappningskommando är tomt" #: src/hed/shc/legacy/unixmap.cpp:116 #, c-format msgid "User name mapping has empty authgroup: %s" msgstr "Användarnamnsmappning har tom auktoriseringsgrupp. %s" #: src/hed/shc/legacy/unixmap.cpp:147 #, c-format msgid "Unknown user name mapping rule %s" msgstr "Okänd användarnamnsmappningsregel %s" #: src/hed/shc/legacy/unixmap.cpp:156 src/hed/shc/legacy/unixmap.cpp:161 #: src/hed/shc/legacy/unixmap.cpp:177 src/hed/shc/legacy/unixmap.cpp:183 msgid "Plugin (user mapping) command is empty" msgstr "Plugin (användarmappning) kommando är tomt" #: src/hed/shc/legacy/unixmap.cpp:167 #, c-format msgid "Plugin (user mapping) timeout is not a number: %s" msgstr "Plugin (användarmappning) timeout är inte ett nummer: %s" #: src/hed/shc/legacy/unixmap.cpp:171 #, c-format msgid "Plugin (user mapping) timeout is wrong number: %s" msgstr "Plugin (användarmappning) timeout är felaktigt nummer: %s<" #: src/hed/shc/legacy/unixmap.cpp:204 #, c-format msgid "Plugin %s returned no username" msgstr "Plugin %s returnerade inget användarnamn" #: src/hed/shc/legacy/unixmap.cpp:209 #, c-format msgid "Plugin %s returned too much: %s" msgstr "Plugin %s returnerade för mycket: %s" #: src/hed/shc/legacy/unixmap.cpp:212 #, c-format msgid "Plugin %s returned no mapping" msgstr "Plugin %s returnerade ingen mappning" #: src/hed/shc/legacy/unixmap.cpp:235 msgid "User subject match is missing user subject." msgstr "Användarsubjektmatchning saknar användarsubjekt." #: src/hed/shc/legacy/unixmap.cpp:239 #, c-format msgid "Mapfile at %s can't be opened." msgstr "Mappningsfil pÃ¥ %s kan inte öppnas." #: src/hed/shc/legacy/unixmap.cpp:263 msgid "User pool mapping is missing user subject." msgstr "Användarpoolmappning saknar användarsubjekt." #: src/hed/shc/legacy/unixmap.cpp:268 #, c-format msgid "User pool at %s can't be opened." msgstr "Användarpool pÃ¥ %s kan inte öppnas." #: src/hed/shc/legacy/unixmap.cpp:273 #, c-format msgid "User pool at %s failed to perform user mapping." msgstr "Användarpool pÃ¥ %s misslyckades med att utföra användarmappning." #: src/hed/shc/legacy/unixmap.cpp:291 #, c-format msgid "User name direct mapping is missing user name: %s." msgstr "Direkt användarnamnsmappning saknar användarnamn: %s." #: src/hed/shc/otokens/OTokensSH.cpp:65 msgid "OTokens: Attr: message" msgstr "OTokens: Attr: meddelande" #: src/hed/shc/otokens/OTokensSH.cpp:70 #, c-format msgid "OTokens: Attr: %s = %s" msgstr "OTokens: Attr: %s = %s" #: src/hed/shc/otokens/OTokensSH.cpp:75 #, c-format msgid "OTokens: Attr: token: %s" msgstr "OTokens: Attr: token: %s" #: src/hed/shc/otokens/OTokensSH.cpp:78 #, c-format msgid "OTokens: Attr: token: bearer: %s" msgstr "OTokens: Attr: token: bärare: %s" #: src/hed/shc/otokens/OTokensSH.cpp:193 msgid "OTokens: Handle" msgstr "OTokens: Handtag" #: src/hed/shc/otokens/OTokensSH.cpp:195 msgid "OTokens: Handle: message" msgstr "OTokens: Handtag: meddelande" #: src/hed/shc/otokens/OTokensSH.cpp:198 msgid "Failed to create OTokens security attributes" msgstr "Misslyckades med att skapa OTokens säkerhetsattribut" #: src/hed/shc/otokens/OTokensSH.cpp:202 msgid "OTokens: Handle: token was not present" msgstr "OTokens: Handtag: token var inte närvarande" #: src/hed/shc/otokens/OTokensSH.cpp:206 #, c-format msgid "OTokens: Handle: attributes created: subject = %s" msgstr "OTokens: Handtag: attribut skapade: subjekt = %s" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:47 msgid "Creating a pdpservice client" msgstr "Skapar en pdp-tjänste-klient" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:81 msgid "Arc policy can not been carried by SAML2.0 profile of XACML" msgstr "Arc-policy kan inte överföras av XACMLs SAML2.0-profil" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:153 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:186 msgid "Policy Decision Service invocation failed" msgstr "Policy-besluts-tjänst-anrop misslyckades" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:156 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:189 #: src/tests/client/test_ClientInterface.cpp:88 #: src/tests/client/test_ClientSAML2SSO.cpp:81 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:100 #: src/tests/echo/test_clientinterface.cpp:82 #: src/tests/echo/test_clientinterface.cpp:149 #: src/tests/echo/test_clientinterface.py:32 msgid "There was no SOAP response" msgstr "Det fanns inget SOAP-svar" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:171 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:205 msgid "Authorized from remote pdp service" msgstr "Auktoriserad av fjärr-pdp-tjänst" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:172 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:206 msgid "Unauthorized from remote pdp service" msgstr "Oauktoriserad av fjärr-pdp-tjänst" #: src/hed/shc/saml2sso_assertionconsumersh/SAML2SSO_AssertionConsumerSH.cpp:69 msgid "Can not get SAMLAssertion SecAttr from message context" msgstr "Kan inte erhÃ¥lla SAMLAssertion SecAttr frÃ¥n meddelandekontext" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:158 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:44 msgid "Missing or empty CertificatePath element" msgstr "CertificatePath-element saknas eller är tomt" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:163 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:49 msgid "Missing or empty KeyPath element" msgstr "KeyPath-element saknas eller är tomt" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:171 msgid "" "Both of CACertificatePath and CACertificatesDir elements missing or empty" msgstr "" "BÃ¥de CACertificatePath- och CACertificatesDir-elementen saknas eller är tomma" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:185 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:61 msgid "" "Missing or empty CertificatePath or CACertificatesDir element; will only " "check the signature, will not do message authentication" msgstr "" "CertificatePath- eller CACertificatesDir-element saknas eller är tomt; " "kommer endast att kontrollera signatur, kommer ej att göra " "meddelandeautentisering" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:189 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:65 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:65 #, c-format msgid "Processing type not supported: %s" msgstr "Processeringstyp stöds inte: %s" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:209 msgid "Failed to parse SAML Token from incoming SOAP" msgstr "Misslyckades med att tolka SAML-token frÃ¥n inkommande SOAP" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:219 msgid "Failed to authenticate SAML Token inside the incoming SOAP" msgstr "Misslyckades med att autentisera SAML-token inuti inkommande SOAP" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:222 msgid "Succeeded to authenticate SAMLToken" msgstr "Lyckades med att autentisera SAML-token" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:287 #, c-format msgid "No response from AA service %s" msgstr "Inget svar frÃ¥n AA-tjänst %s" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:291 #, c-format msgid "SOAP Request to AA service %s failed" msgstr "SOAP-begäran till AA-tjänst %s misslyckades" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:299 msgid "Cannot find content under response soap message" msgstr "Kan inte hitta innehÃ¥ll under svar-soap-meddelande" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:303 msgid "Cannot find under response soap message:" msgstr "Kan inte hitta under svar-soap-meddelande:" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:320 msgid "The Response is not going to this end" msgstr "Svaret kommer inte till denna ände" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:327 msgid "The StatusCode is Success" msgstr "Statuskoden är Success" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:333 msgid "Succeeded to verify the signature under " msgstr "Lyckades verifiera signaturen under " #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:336 msgid "Failed to verify the signature under " msgstr "Misslyckades med att verifiera signaturen under " #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:347 msgid "Failed to generate SAML Token for outgoing SOAP" msgstr "Misslyckades med att generera SAML-token för utgÃ¥ende SOAP" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:357 msgid "SAML Token handler is not configured" msgstr "SAML-tokenhanteraren har ej ställts in" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:28 #, c-format msgid "Access list location: %s" msgstr "Ã…tkomstlista: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:38 msgid "" "No policy file or DNs specified for simplelist.pdp, please set location " "attribute or at least one DN element for simplelist PDP node in " "configuration." msgstr "" "Ingen policyfil eller DN angiven för simplelist.pdp, ange ett location-" "attribut eller Ã¥tminstone ett DN-element i simplelist-PDP-noden i " "inställningarna." #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:41 #, c-format msgid "Subject to match: %s" msgstr "Subjekt att matcha: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:44 #, c-format msgid "Policy subject: %s" msgstr "Policy-subjekt: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:46 #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:72 #, c-format msgid "Authorized from simplelist.pdp: %s" msgstr "Auktoriserad av simplelist.pdp: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:53 msgid "" "The policy file setup for simplelist.pdp does not exist, please check " "location attribute for simplelist PDP node in service configuration" msgstr "" "Policyfilen angiven för simplelist.pdp existerar inte, kontrollera location-" "attributet i simplelist-PDP-noden i tjänsteinställningarna" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:60 #, c-format msgid "Policy line: %s" msgstr "policyrad: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:78 #, c-format msgid "Not authorized from simplelist.pdp: %s" msgstr "Ej auktoriserad av simplelist.pdp: %s" #: src/hed/shc/test.cpp:27 src/hed/shc/testinterface_arc.cpp:26 #: src/hed/shc/testinterface_xacml.cpp:26 msgid "Start test" msgstr "PÃ¥börja test" #: src/hed/shc/test.cpp:101 msgid "Input request from a file: Request.xml" msgstr "Mata in begäran frÃ¥n en fil: Request.xml" #: src/hed/shc/test.cpp:107 src/hed/shc/test.cpp:197 #: src/hed/shc/testinterface_arc.cpp:124 #, c-format msgid "There is %d subjects, which satisfy at least one policy" msgstr "Det finns %d subjekt som uppfyller Ã¥tminstone en policy" #: src/hed/shc/test.cpp:121 #, c-format msgid "Attribute Value (1): %s" msgstr "Attributvärde (1): %s" #: src/hed/shc/test.cpp:132 msgid "Input request from code" msgstr "Mata in begäran frÃ¥n kod" #: src/hed/shc/test.cpp:211 #, c-format msgid "Attribute Value (2): %s" msgstr "Attributvärde (2): %s" #: src/hed/shc/testinterface_arc.cpp:75 src/hed/shc/testinterface_xacml.cpp:46 msgid "Can not dynamically produce Policy" msgstr "Kan inte skapa policy dynamiskt" #: src/hed/shc/testinterface_arc.cpp:138 #, c-format msgid "Attribute Value inside Subject: %s" msgstr "Attributvärde inuti Subject: %s" #: src/hed/shc/testinterface_arc.cpp:148 msgid "The request has passed the policy evaluation" msgstr "Begäran har passerat policyutvärderingen" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:43 msgid "Missing or empty PasswordSource element" msgstr "PasswordSource-element saknas eller är tomt" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:54 #, c-format msgid "Password encoding type not supported: %s" msgstr "Lösenordskodningstyp stöds inte: %s" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:59 msgid "Missing or empty Username element" msgstr "Username-element saknas eller är tomt" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:79 msgid "The payload of incoming message is empty" msgstr "Nyttolasten i inkommande meddelande är tom" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:84 msgid "Failed to cast PayloadSOAP from incoming payload" msgstr "" "Misslyckades med att konvertera till PayloadSOAP frÃ¥n inkommande nyttolast" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:89 msgid "Failed to parse Username Token from incoming SOAP" msgstr "Misslyckades med att tolka användarnamnstoken frÃ¥n inkommande SOAP" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:95 msgid "Failed to authenticate Username Token inside the incoming SOAP" msgstr "" "Misslyckades med att autentisera användarnamnstoken inuti inkommande SOAP" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:98 msgid "Succeeded to authenticate UsernameToken" msgstr "Lyckades med att autentisera användarnamnstoken" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:108 msgid "The payload of outgoing message is empty" msgstr "Nyttolasten i utgÃ¥ende meddelande är tom," #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:113 msgid "Failed to cast PayloadSOAP from outgoing payload" msgstr "" "Misslyckades med att konvertera till PayloadSOAP frÃ¥n utgÃ¥ende nyttolast" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:119 msgid "Failed to generate Username Token for outgoing SOAP" msgstr "Misslyckades med att skapa användarnamnstoken för utgÃ¥ende SOAP" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:127 msgid "Username Token handler is not configured" msgstr "Användarnamnstokenhanteraren har ej ställts in" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:81 msgid "Failed to parse X509 Token from incoming SOAP" msgstr "Misslyckades med att tolka X509-token frÃ¥n inkommande SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:85 msgid "Failed to verify X509 Token inside the incoming SOAP" msgstr "Misslyckades med att verifiera X509-token inuti inkommande SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:89 msgid "Failed to authenticate X509 Token inside the incoming SOAP" msgstr "Misslyckades med att autentisera X509-token inuti inkommande SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:92 msgid "Succeeded to authenticate X509Token" msgstr "Lyckades med att autentisera X509-token" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:102 msgid "Failed to generate X509 Token for outgoing SOAP" msgstr "Misslyckades med att skapa X509-token för utgÃ¥ende SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:112 msgid "X509 Token handler is not configured" msgstr "X509-tokenhanteraren har ej ställts in" #: src/hed/shc/xacmlpdp/XACMLApply.cpp:29 msgid "Can not create function: FunctionId does not exist" msgstr "Kan inte skapa funktion: Funktions-id existerar inte" #: src/hed/shc/xacmlpdp/XACMLApply.cpp:33 #: src/hed/shc/xacmlpdp/XACMLTarget.cpp:40 #, c-format msgid "Can not create function %s" msgstr "Kan inte skapa funktion %s" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:87 msgid "Can not find XACMLPDPContext" msgstr "Kan inte hitta XACMLPDPContext" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:135 msgid "Evaluator for XACMLPDP was not loaded" msgstr "Utvärderare för XACML-PDP laddades inte" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:150 src/hed/shc/xacmlpdp/XACMLPDP.cpp:158 msgid "Failed to convert security information to XACML request" msgstr "" "Misslyckades med att konvertera säkerhetsinformation till XACML-begäran" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:166 #, c-format msgid "XACML request: %s" msgstr "XACML-begäran: %s" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:178 msgid "Authorized from xacml.pdp" msgstr "Auktoriserad av xacml.pdp" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:179 msgid "UnAuthorized from xacml.pdp" msgstr "Oauktoriserad av xacml.pdp" #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:55 msgid "Can not find element with proper namespace" msgstr "Kan inte hitta -element med rätt namnrymd" #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:132 msgid "No target available inside the policy" msgstr "Inget target tillgängligt inuti policyn" #: src/hed/shc/xacmlpdp/XACMLRequest.cpp:34 msgid "Request is empty" msgstr "Begäran är tom" #: src/hed/shc/xacmlpdp/XACMLRequest.cpp:39 msgid "Can not find element with proper namespace" msgstr "Kan inte hitta -element med rätt namnrymd" #: src/hed/shc/xacmlpdp/XACMLRule.cpp:35 msgid "Invalid Effect" msgstr "Ogiltig effekt" #: src/hed/shc/xacmlpdp/XACMLRule.cpp:48 msgid "No target available inside the rule" msgstr "Inget target tillgängligt inuti regeln" #: src/libs/data-staging/DTR.cpp:81 src/libs/data-staging/DTR.cpp:85 #, c-format msgid "Could not handle endpoint %s" msgstr "Kan inte hantera slutpunkt %s" #: src/libs/data-staging/DTR.cpp:95 msgid "Source is the same as destination" msgstr "källan är densamma som destinationen" #: src/libs/data-staging/DTR.cpp:175 #, c-format msgid "Invalid ID: %s" msgstr "Ogiltig ID: %s" #: src/libs/data-staging/DTR.cpp:212 #, c-format msgid "%s->%s" msgstr "%s->%s" #: src/libs/data-staging/DTR.cpp:320 #, c-format msgid "No callback for %s defined" msgstr "Ingen callback för %s definierad" #: src/libs/data-staging/DTR.cpp:335 #, c-format msgid "NULL callback for %s" msgstr "NULL-callback för %s" #: src/libs/data-staging/DTR.cpp:338 #, c-format msgid "Request to push to unknown owner - %u" msgstr "Begäran att pusha till okänd ägare - %u" #: src/libs/data-staging/DTRList.cpp:216 #, c-format msgid "Boosting priority from %i to %i due to incoming higher priority DTR" msgstr "" "Boostar prioritet frÃ¥n %i till %i pÃ¥ grund av inkommande DTR med högre " "prioritet" #: src/libs/data-staging/DataDelivery.cpp:48 #: src/libs/data-staging/DataDelivery.cpp:72 msgid "Received invalid DTR" msgstr "Mottog ogiltig DTR" #: src/libs/data-staging/DataDelivery.cpp:54 #, c-format msgid "Delivery received new DTR %s with source: %s, destination: %s" msgstr "Leverans mottog ny DTR %s med källa: %s, destination: %s" #: src/libs/data-staging/DataDelivery.cpp:68 msgid "Received no DTR" msgstr "Mottog ingen DTR" #: src/libs/data-staging/DataDelivery.cpp:80 #, c-format msgid "Cancelling DTR %s with source: %s, destination: %s" msgstr "Avbryter DTR %s med källa: %s, destination: %s" #: src/libs/data-staging/DataDelivery.cpp:91 #, c-format msgid "DTR %s requested cancel but no active transfer" msgstr "DTR %s begärd att avbrytas men ingen aktiv överföring" #: src/libs/data-staging/DataDelivery.cpp:147 #, c-format msgid "Cleaning up after failure: deleting %s" msgstr "Rensar upp efter misslyckande: tar bort %s" #: src/libs/data-staging/DataDelivery.cpp:188 #: src/libs/data-staging/DataDelivery.cpp:263 #: src/libs/data-staging/DataDelivery.cpp:303 #: src/libs/data-staging/DataDelivery.cpp:323 msgid "Failed to delete delivery object or deletion timed out" msgstr "" "Misslyckades med att ta bort leverans-objekt eller borttagning avbröts pÃ¥ " "grund av timeout" #: src/libs/data-staging/DataDelivery.cpp:254 #, c-format msgid "Transfer finished: %llu bytes transferred %s" msgstr "Överföring avslutad: %llu byteer överförda %s" #: src/libs/data-staging/DataDelivery.cpp:329 msgid "Data delivery loop exited" msgstr "Dataleveransloop avslutades" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:45 msgid "No source defined" msgstr "Ingen källa definierad" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:49 msgid "No destination defined" msgstr "Ingen destination definierad" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:157 #, c-format msgid "Bad checksum format %s" msgstr "Felaktigt format för checksumma %s" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:196 #, c-format msgid "Failed to run command: %s" msgstr "Misslyckades med att köra kommando: %s" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:235 #, c-format msgid "DataDelivery: %s" msgstr "Dataleverans: %s" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:247 #, c-format msgid "DataStagingDelivery exited with code %i" msgstr "DataStagingDelivery avslutades med kod %i" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:266 #, c-format msgid "Transfer killed after %i seconds without communication" msgstr "Överföring avbröts efter %i sekunder utan kommunikation" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:72 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:326 #, c-format msgid "Connecting to Delivery service at %s" msgstr "Kopplar upp mot leveranstjänst pÃ¥ %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:101 #, c-format msgid "Failed to set up credential delegation with %s" msgstr "Misslyckades med att sätta upp referensdelegering med %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:107 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:185 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:251 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:337 #, c-format msgid "" "Request:\n" "%s" msgstr "" "Begäran:\n" "%s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:113 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:343 #, c-format msgid "Could not connect to service %s: %s" msgstr "Misslyckades med att koppla upp mot tjänst %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:121 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:351 #, c-format msgid "No SOAP response from Delivery service %s" msgstr "Inget SOAP-svar frÃ¥n leveranstjänst %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:126 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:204 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:278 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:357 #, c-format msgid "" "Response:\n" "%s" msgstr "" "Svar:\n" "%s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:135 #, c-format msgid "Failed to start transfer request: %s" msgstr "Misslyckades med att börja överföringsbegäran: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:142 #, c-format msgid "Bad format in XML response from service at %s: %s" msgstr "Felaktigt format i XML-svar frÃ¥n tjänst pÃ¥: %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:150 #, c-format msgid "Could not make new transfer request: %s: %s" msgstr "Kan inte göra ny överföringsbegäran: %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:155 #, c-format msgid "Started remote Delivery at %s" msgstr "Startade fjärrleverans pÃ¥ %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:192 #, c-format msgid "Failed to send cancel request: %s" msgstr "Misslyckades med att sända begäran att avbryta: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:199 msgid "Failed to cancel: No SOAP response" msgstr "Misslyckades med att avbryta: inget SOAP-svar" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:213 #, c-format msgid "Failed to cancel transfer request: %s" msgstr "Misslyckades med att avbryta överföringsbegäran: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:220 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:301 #, c-format msgid "Bad format in XML response: %s" msgstr "Felaktigt format i XML-svar: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:227 #, c-format msgid "Failed to cancel: %s" msgstr "Misslyckades med att avbryta: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:271 msgid "No SOAP response from delivery service" msgstr "Inget SOAP-svar frÃ¥n leveranstjänst" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:292 #, c-format msgid "Failed to query state: %s" msgstr "Misslyckades med att frÃ¥ga om tillstÃ¥nd: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:366 #, c-format msgid "SOAP fault from delivery service at %s: %s" msgstr "SOAP-fel frÃ¥n leveranstjänst pÃ¥ %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:374 #, c-format msgid "Bad format in XML response from delivery service at %s: %s" msgstr "Felaktigt format i XML-svar frÃ¥n leveranstjänst pÃ¥ %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:382 #, c-format msgid "Error pinging delivery service at %s: %s: %s" msgstr "Fel vid pingning av leveranstjänst pÃ¥ %s: %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:390 #, c-format msgid "Dir %s allowed at service %s" msgstr "Katalog %s tillÃ¥ten pÃ¥ tjänst %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:484 #, c-format msgid "" "DataDelivery log tail:\n" "%s" msgstr "" "Dataleverans-logg-svans:\n" "%s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:498 msgid "Failed locating credentials" msgstr "Misslyckades med att lokalisera referenser" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:503 msgid "Failed to initiate client connection" msgstr "Misslyckades med att initiera klientförbindelse" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:509 msgid "Client connection has no entry point" msgstr "Klientförbindelsen har ingen ingÃ¥ngspunkt" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:518 msgid "Initiating delegation procedure" msgstr "Initierar delegeringsprocess" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:520 msgid "Failed to initiate delegation credentials" msgstr "Misslyckades med att initiera delegeringsreferenser" #: src/libs/data-staging/DataStagingDelivery.cpp:97 #, c-format msgid "%5u s: %10.1f kB %8.1f kB/s" msgstr "%5u s: %10.1f kB %8.1f kB/s" #: src/libs/data-staging/DataStagingDelivery.cpp:156 msgid "Unexpected arguments" msgstr "Oväntade argument" #: src/libs/data-staging/DataStagingDelivery.cpp:159 msgid "Source URL missing" msgstr "Käll-URL saknas" #: src/libs/data-staging/DataStagingDelivery.cpp:162 msgid "Destination URL missing" msgstr "Destinations-URL saknas" #: src/libs/data-staging/DataStagingDelivery.cpp:166 #, c-format msgid "Source URL not valid: %s" msgstr "Käll-URL är inte giltig: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:170 #, c-format msgid "Destination URL not valid: %s" msgstr "Destinations-URL är inte giltig: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:235 #, c-format msgid "Unknown transfer option: %s" msgstr "Okänt överföringsalternativ: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:272 #, c-format msgid "Source URL not supported: %s" msgstr "Käll-URL stöds inte: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:277 #: src/libs/data-staging/DataStagingDelivery.cpp:299 msgid "No credentials supplied" msgstr "Inga referenser tillhandahÃ¥llna" #: src/libs/data-staging/DataStagingDelivery.cpp:294 #, c-format msgid "Destination URL not supported: %s" msgstr "Destinations-URL stöds inte: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:348 #, c-format msgid "Will calculate %s checksum" msgstr "Kommer att beräkna %s-checksumma" #: src/libs/data-staging/DataStagingDelivery.cpp:359 msgid "Cannot use supplied --size option" msgstr "Kan inte använda tillhandahÃ¥llet --size-alternativ" #: src/libs/data-staging/DataStagingDelivery.cpp:572 #, c-format msgid "Checksum mismatch between calculated checksum %s and source checksum %s" msgstr "" "Checksumma stämmer inte överens mellan beräknad checksumma %s och källans " "checksumma %s" #: src/libs/data-staging/DataStagingDelivery.cpp:582 #, c-format msgid "Failed cleaning up destination %s" msgstr "Misslyckades med att rensa upp destination %s" #: src/libs/data-staging/Processor.cpp:49 #: src/services/candypond/CandyPond.cpp:117 msgid "Error creating cache" msgstr "Fel vid skapande av cache" #: src/libs/data-staging/Processor.cpp:73 #, c-format msgid "Forcing re-download of file %s" msgstr "Framtvingar Ã¥ternedladdning av fil %s" #: src/libs/data-staging/Processor.cpp:90 #, c-format msgid "Will wait around %is" msgstr "Kommer att att vänta omkring %i s" #: src/libs/data-staging/Processor.cpp:109 #, c-format msgid "Force-checking source of cache file %s" msgstr "TvÃ¥ngskontrollerar källa för cachad fil %s" #: src/libs/data-staging/Processor.cpp:112 #, c-format msgid "Source check requested but failed: %s" msgstr "Kontroll av källa begärd men misslyckades: %s" #: src/libs/data-staging/Processor.cpp:132 msgid "Permission checking failed, will try downloading without using cache" msgstr "" "Ã…tkomsträttighetskontroll inte godkänd, kommer att försöka ladda ned utan " "att använda cache" #: src/libs/data-staging/Processor.cpp:162 #, c-format msgid "Will download to cache file %s" msgstr "Kommer att ladda ned till cachefil %s" #: src/libs/data-staging/Processor.cpp:183 msgid "Looking up source replicas" msgstr "SlÃ¥r upp källreplikor" #: src/libs/data-staging/Processor.cpp:205 #: src/libs/data-staging/Processor.cpp:432 msgid "Resolving destination replicas" msgstr "SlÃ¥r upp destinationsreplikor" #: src/libs/data-staging/Processor.cpp:222 msgid "No locations for destination different from source found" msgstr "Hittade inga platser för destinationen som skiljer sig frÃ¥n källan" #: src/libs/data-staging/Processor.cpp:233 msgid "Pre-registering destination in index service" msgstr "Förregisterar destination i indextjänst" #: src/libs/data-staging/Processor.cpp:259 msgid "Resolving source replicas in bulk" msgstr "Massuppslagning av källreplikor" #: src/libs/data-staging/Processor.cpp:273 #, c-format msgid "No replicas found for %s" msgstr "Hittade inga replikor för %s" #: src/libs/data-staging/Processor.cpp:293 #, c-format msgid "Checking %s" msgstr "Kontrollerar: %s" #: src/libs/data-staging/Processor.cpp:302 #: src/libs/data-staging/Processor.cpp:360 msgid "Metadata of replica and index service differ" msgstr "Metadata för replika och indextjänst skiljer sig Ã¥t" #: src/libs/data-staging/Processor.cpp:310 #, c-format msgid "Failed checking source replica %s: %s" msgstr "Misslyckades med att kontrollera källreplika %s: %s" #: src/libs/data-staging/Processor.cpp:336 msgid "Querying source replicas in bulk" msgstr "MassfrÃ¥gar källreplikor" #: src/libs/data-staging/Processor.cpp:348 #, c-format msgid "Failed checking source replica: %s" msgstr "Misslyckades med att kontrollera källreplikor: %s" #: src/libs/data-staging/Processor.cpp:354 msgid "Failed checking source replica" msgstr "Misslyckades med att kontrollera källreplika" #: src/libs/data-staging/Processor.cpp:391 msgid "Overwrite requested - will pre-clean destination" msgstr "Överskrivning begärd - kommer att för-städa destination" #: src/libs/data-staging/Processor.cpp:400 msgid "Finding existing destination replicas" msgstr "Hittar existerande destinationsreplikor" #: src/libs/data-staging/Processor.cpp:412 #, c-format msgid "Failed to delete replica %s: %s" msgstr "Misslyckades med att ta bort replika %s: %s" #: src/libs/data-staging/Processor.cpp:426 #, c-format msgid "Unregistering %s" msgstr "Avregistrerar %s" #: src/libs/data-staging/Processor.cpp:437 msgid "Pre-registering destination" msgstr "Förregisterar destination" #: src/libs/data-staging/Processor.cpp:443 #, c-format msgid "Failed to pre-clean destination: %s" msgstr "Misslyckades med att förregistrera destination: %s" #: src/libs/data-staging/Processor.cpp:452 msgid "Destination already exists" msgstr "Destinationen existerar redan" #: src/libs/data-staging/Processor.cpp:476 msgid "Preparing to stage source" msgstr "Förberedar att staga källa" #: src/libs/data-staging/Processor.cpp:489 #, c-format msgid "Source is not ready, will wait %u seconds" msgstr "Källa är inte redo, kommer att vänta %u sekunder" #: src/libs/data-staging/Processor.cpp:495 msgid "No physical files found for source" msgstr "Hittade inga platser för destination" #: src/libs/data-staging/Processor.cpp:513 msgid "Preparing to stage destination" msgstr "Förbereder att staga destination" #: src/libs/data-staging/Processor.cpp:526 #, c-format msgid "Destination is not ready, will wait %u seconds" msgstr "Destination är inte redo, kommer att vänta %u sekunder" #: src/libs/data-staging/Processor.cpp:532 msgid "No physical files found for destination" msgstr "Hittade inga fysiska filer för destination" #: src/libs/data-staging/Processor.cpp:558 msgid "Releasing source" msgstr "Frigör källa" #: src/libs/data-staging/Processor.cpp:562 #, c-format msgid "There was a problem during post-transfer source handling: %s" msgstr "Det uppstod ett problem under källans efter-överförings-hantering: %s" #: src/libs/data-staging/Processor.cpp:567 msgid "Releasing destination" msgstr "Frigör destination" #: src/libs/data-staging/Processor.cpp:571 #, c-format msgid "" "There was a problem during post-transfer destination handling after error: %s" msgstr "" "Det uppstod ett problem under destinationens efter-överförings-hantering " "efter felet: %s" #: src/libs/data-staging/Processor.cpp:575 #, c-format msgid "Error with post-transfer destination handling: %s" msgstr "Fel under destinationens efter-överförings-hantering: %s" #: src/libs/data-staging/Processor.cpp:597 #, c-format msgid "Finalising current replica %s" msgstr "Slutför nuvarande replika %s" #: src/libs/data-staging/Processor.cpp:617 msgid "Removing pre-registered destination in index service" msgstr "Tar bort förregistrerad destination i indextjänst" #: src/libs/data-staging/Processor.cpp:620 #, c-format msgid "" "Failed to unregister pre-registered destination %s: %s. You may need to " "unregister it manually" msgstr "" "Misslyckades med att avregistrera förregistrerad destination %s: %s. Du kan " "behöva avregistrera den manuellt" #: src/libs/data-staging/Processor.cpp:626 msgid "Registering destination replica" msgstr "Registrerar destinationsreplika" #: src/libs/data-staging/Processor.cpp:629 #, c-format msgid "Failed to register destination replica: %s" msgstr "Misslyckades med att registrera destinationsreplika: %s" #: src/libs/data-staging/Processor.cpp:632 #, c-format msgid "" "Failed to unregister pre-registered destination %s. You may need to " "unregister it manually" msgstr "" "Misslyckades med att avregistrera förregistrerad destination %s. Du kan " "behöva avregistrera den manuellt" #: src/libs/data-staging/Processor.cpp:662 msgid "Error creating cache. Stale locks may remain." msgstr "Fel vid skapande av cache. Gamle lÃ¥s kan finnas kvar." #: src/libs/data-staging/Processor.cpp:695 #, c-format msgid "Linking/copying cached file to %s" msgstr "Länkar/kopierar cachad fil till %s" #: src/libs/data-staging/Processor.cpp:716 #, c-format msgid "Failed linking cache file to %s" msgstr "Misslyckades med att länka cachefil till %s" #: src/libs/data-staging/Processor.cpp:720 #, c-format msgid "Error linking cache file to %s." msgstr "Fel vid länkning av cachefil till %s." #: src/libs/data-staging/Processor.cpp:741 #: src/libs/data-staging/Processor.cpp:748 msgid "Adding to bulk request" msgstr "Lägger till till massbegäran" #: src/libs/data-staging/Scheduler.cpp:174 #: src/libs/data-staging/Scheduler.cpp:181 msgid "source" msgstr "käll" #: src/libs/data-staging/Scheduler.cpp:174 #: src/libs/data-staging/Scheduler.cpp:181 msgid "destination" msgstr "destinations" #: src/libs/data-staging/Scheduler.cpp:174 #, c-format msgid "Using next %s replica" msgstr "Använder nästa %s-replika" #: src/libs/data-staging/Scheduler.cpp:181 #, c-format msgid "No more %s replicas" msgstr "Inga fler %s-replikor" #: src/libs/data-staging/Scheduler.cpp:183 msgid "Will clean up pre-registered destination" msgstr "Kommer att rensa upp förregistrerad destination" #: src/libs/data-staging/Scheduler.cpp:187 msgid "Will release cache locks" msgstr "Kommer att frigöra cachelÃ¥s" #: src/libs/data-staging/Scheduler.cpp:190 msgid "Moving to end of data staging" msgstr "Hoppar till slutet av datastaging" #: src/libs/data-staging/Scheduler.cpp:199 #, c-format msgid "Source is mapped to %s" msgstr "Källa mappas till %s" #: src/libs/data-staging/Scheduler.cpp:203 msgid "Cannot link to source which can be modified, will copy instead" msgstr "" "Kan inte länka till källa som kan modifieras, kommer att kopiera istället" #: src/libs/data-staging/Scheduler.cpp:212 msgid "Cannot link to a remote destination. Will not use mapped URL" msgstr "" "Kan inte länka till fjärrdestination. Kommer inte att använda mappad URL" #: src/libs/data-staging/Scheduler.cpp:215 msgid "Linking mapped file" msgstr "Länkar mappad fil" #: src/libs/data-staging/Scheduler.cpp:222 #, c-format msgid "Failed to create link: %s. Will not use mapped URL" msgstr "" "Misslyckades med att skapa länk: %s. Kommer inte att använda mappad URL" #: src/libs/data-staging/Scheduler.cpp:247 #, c-format msgid "" "Scheduler received new DTR %s with source: %s, destination: %s, assigned to " "transfer share %s with priority %d" msgstr "" "Schemaläggaren mottog en ny DTR %s med källa: %s och destination: %s, " "tilldelad att överföra andel %s med prioritet %d" #: src/libs/data-staging/Scheduler.cpp:255 msgid "" "File is not cacheable, was requested not to be cached or no cache available, " "skipping cache check" msgstr "" "Fil kan inte cachas, begärdes att inte cachas eller inget cache " "tillgängligt, hoppar över cachekontroll" #: src/libs/data-staging/Scheduler.cpp:261 msgid "File is cacheable, will check cache" msgstr "Fil kan cachas, kommer att kontrollera cache" #: src/libs/data-staging/Scheduler.cpp:264 #: src/libs/data-staging/Scheduler.cpp:289 #, c-format msgid "File is currently being cached, will wait %is" msgstr "Fil hÃ¥ller pÃ¥ att cachas, kommer att vänta %i s" #: src/libs/data-staging/Scheduler.cpp:283 msgid "Timed out while waiting for cache lock" msgstr "Avbröts pÃ¥ grund av timeout under väntan pÃ¥ cachelÃ¥s" #: src/libs/data-staging/Scheduler.cpp:293 msgid "Checking cache again" msgstr "Kontrollerar cache igen" #: src/libs/data-staging/Scheduler.cpp:313 msgid "Destination file is in cache" msgstr "Destinationsfil finns i cache" #: src/libs/data-staging/Scheduler.cpp:317 msgid "Source and/or destination is index service, will resolve replicas" msgstr "" "Källa och/eller destination är indextjänst, kommer att slÃ¥ upp replikor" #: src/libs/data-staging/Scheduler.cpp:320 msgid "" "Neither source nor destination are index services, will skip resolving " "replicas" msgstr "" "Varken källa eller destination är indextjänster, kommer att hoppa över " "uppslagning av replikor" #: src/libs/data-staging/Scheduler.cpp:331 msgid "Problem with index service, will release cache lock" msgstr "Problem med indextjänst, kommer att frigöra cachelÃ¥s" #: src/libs/data-staging/Scheduler.cpp:335 msgid "Problem with index service, will proceed to end of data staging" msgstr "Problem med indextjänst, kommer att hoppa till slutet av datastaging" #: src/libs/data-staging/Scheduler.cpp:345 msgid "Checking source file is present" msgstr "Kontrollerar att källfil är nÃ¥rvarande" #: src/libs/data-staging/Scheduler.cpp:353 msgid "Error with source file, moving to next replica" msgstr "Fel med källfil, hoppar till nästa replika" #: src/libs/data-staging/Scheduler.cpp:375 #, c-format msgid "Replica %s has long latency, trying next replica" msgstr "Replika %s har lÃ¥ng latency, provar nästa replika" #: src/libs/data-staging/Scheduler.cpp:377 #, c-format msgid "No more replicas, will use %s" msgstr "Inga fler replikor, kommer att använda %s" #: src/libs/data-staging/Scheduler.cpp:380 #, c-format msgid "Checking replica %s" msgstr "Kontrollerar replika %s" #: src/libs/data-staging/Scheduler.cpp:392 msgid "Pre-clean failed" msgstr "Förstädning misslyckades" #: src/libs/data-staging/Scheduler.cpp:397 msgid "Pre-clean failed, will still try to copy" msgstr "Förstädning misslyckades, kommer fortfarande att försöka kopiera" #: src/libs/data-staging/Scheduler.cpp:405 msgid "Source or destination requires staging" msgstr "Källa eller destination kräver staging" #: src/libs/data-staging/Scheduler.cpp:409 msgid "No need to stage source or destination, skipping staging" msgstr "Behöver inte staga källa eller destination, hoppar över staging" #: src/libs/data-staging/Scheduler.cpp:439 msgid "Staging request timed out, will release request" msgstr "Stagingbegäran avbröts pÃ¥ grund av timeout, kommer att frigöra begäran" #: src/libs/data-staging/Scheduler.cpp:443 msgid "Querying status of staging request" msgstr "FrÃ¥gar efter status för stagingbegäran" #: src/libs/data-staging/Scheduler.cpp:452 msgid "Releasing requests" msgstr "Frigör begärningar" #: src/libs/data-staging/Scheduler.cpp:477 msgid "DTR is ready for transfer, moving to delivery queue" msgstr "DTR är redo att överföra, flyttar till leveranskö" #: src/libs/data-staging/Scheduler.cpp:492 #, c-format msgid "Transfer failed: %s" msgstr "Överföring misslyckades: %s" #: src/libs/data-staging/Scheduler.cpp:502 msgid "Releasing request(s) made during staging" msgstr "Frigör begärningar som gjordes under staging" #: src/libs/data-staging/Scheduler.cpp:505 msgid "Neither source nor destination were staged, skipping releasing requests" msgstr "" "Varken källa eller destination stagades, hoppar över frigörande av " "begärningar" #: src/libs/data-staging/Scheduler.cpp:526 msgid "Trying next replica" msgstr "Försöker med nästa replika" #: src/libs/data-staging/Scheduler.cpp:531 msgid "unregister" msgstr "avregistrera" #: src/libs/data-staging/Scheduler.cpp:531 msgid "register" msgstr "registrera" #: src/libs/data-staging/Scheduler.cpp:530 #, c-format msgid "Will %s in destination index service" msgstr "Kommer att %s i destinationsindextjänsten" #: src/libs/data-staging/Scheduler.cpp:534 msgid "Destination is not index service, skipping replica registration" msgstr "Destination är inte indextjänst, hoppar över replikaregistrering" #: src/libs/data-staging/Scheduler.cpp:547 msgid "Error registering replica, moving to end of data staging" msgstr "Fel vid registrering av replika, hoppar till slutet av datastaging" #: src/libs/data-staging/Scheduler.cpp:556 msgid "Will process cache" msgstr "Kommer att processera cache" #: src/libs/data-staging/Scheduler.cpp:560 msgid "File is not cacheable, skipping cache processing" msgstr "Filen kan inte cachas, hoppar över cacheprocessering" #: src/libs/data-staging/Scheduler.cpp:574 msgid "Cancellation complete" msgstr "Avbrytande slutfört" #: src/libs/data-staging/Scheduler.cpp:588 msgid "Will wait 10s" msgstr "Kommer att vänta 10 s" #: src/libs/data-staging/Scheduler.cpp:594 msgid "Error in cache processing, will retry without caching" msgstr "Fel vid cacheprocessering, kommer att försöka igen utan cachning" #: src/libs/data-staging/Scheduler.cpp:603 msgid "Will retry without caching" msgstr "Kommer att försöka igen utan cachning<" #: src/libs/data-staging/Scheduler.cpp:621 msgid "Proxy has expired" msgstr "Proxyns livstid har gÃ¥tt ut" #: src/libs/data-staging/Scheduler.cpp:632 #, c-format msgid "%i retries left, will wait until %s before next attempt" msgstr "%i försök kvar, kommer att vänta %s innan nästa försök" #: src/libs/data-staging/Scheduler.cpp:648 msgid "Out of retries" msgstr "Slut pÃ¥ försök" #: src/libs/data-staging/Scheduler.cpp:650 msgid "Permanent failure" msgstr "Permanent fel" #: src/libs/data-staging/Scheduler.cpp:656 msgid "Finished successfully" msgstr "Avslutades framgÃ¥ngsrikt" #: src/libs/data-staging/Scheduler.cpp:666 msgid "Returning to generator" msgstr "Ã…tervänder till generator" #: src/libs/data-staging/Scheduler.cpp:840 #, c-format msgid "File is smaller than %llu bytes, will use local delivery" msgstr "Fil är mindre än %llu bytes, kommer att använda lokal leverans" #: src/libs/data-staging/Scheduler.cpp:894 #, c-format msgid "Delivery service at %s can copy to %s" msgstr "Leveranstjänst pÃ¥ %s kan kopiera till %s" #: src/libs/data-staging/Scheduler.cpp:902 #, c-format msgid "Delivery service at %s can copy from %s" msgstr "Leveranstjänst pÃ¥ %s kan kopiera frÃ¥n %s" #: src/libs/data-staging/Scheduler.cpp:915 msgid "Could not find any useable delivery service, forcing local transfer" msgstr "Kunde inte hitta en lämplig leveranstjänst, tvingar lokal överföring" #: src/libs/data-staging/Scheduler.cpp:931 #, c-format msgid "Not using delivery service at %s because it is full" msgstr "Använder inte leveranstjänst pÃ¥ %s eftersom den är full" #: src/libs/data-staging/Scheduler.cpp:958 #, c-format msgid "Not using delivery service %s due to previous failure" msgstr "Använder inte leveranstjänst %s pÃ¥ grund av tidigare fel" #: src/libs/data-staging/Scheduler.cpp:968 msgid "No remote delivery services are useable, forcing local delivery" msgstr "Inga fjärrleveranstjänster kan användas, tvingar lokal leverans" #: src/libs/data-staging/Scheduler.cpp:1172 msgid "Cancelling active transfer" msgstr "Avbryter aktiv överföring" #: src/libs/data-staging/Scheduler.cpp:1182 msgid "Processing thread timed out. Restarting DTR" msgstr "ProcesseringstrÃ¥d avbröts pÃ¥ grund av timeout. Startar om DTR" #: src/libs/data-staging/Scheduler.cpp:1250 msgid "Will use bulk request" msgstr "Kommer att använda massbegäran" #: src/libs/data-staging/Scheduler.cpp:1272 msgid "No delivery endpoints available, will try later" msgstr "Ingen leveransslutpunkt tillgänglig, kommer försöka senare" #: src/libs/data-staging/Scheduler.cpp:1291 msgid "Scheduler received NULL DTR" msgstr "Schemalägger mottog NULL-DTR" #: src/libs/data-staging/Scheduler.cpp:1301 msgid "Scheduler received invalid DTR" msgstr "Schemaläggare mottog ogiltig DTR" #: src/libs/data-staging/Scheduler.cpp:1390 msgid "Scheduler starting up" msgstr "Schemaläggare startar" #: src/libs/data-staging/Scheduler.cpp:1391 msgid "Scheduler configuration:" msgstr "Schemaläggarinställningar:" #: src/libs/data-staging/Scheduler.cpp:1392 #, c-format msgid " Pre-processor slots: %u" msgstr " Förprocesserings-slottar: %u" #: src/libs/data-staging/Scheduler.cpp:1393 #, c-format msgid " Delivery slots: %u" msgstr " Leverans-slottar: %u" #: src/libs/data-staging/Scheduler.cpp:1394 #, c-format msgid " Post-processor slots: %u" msgstr " Efterprocesserings-slottar: %u" #: src/libs/data-staging/Scheduler.cpp:1395 #, c-format msgid " Emergency slots: %u" msgstr " Akutslottar: %u" #: src/libs/data-staging/Scheduler.cpp:1396 #, c-format msgid " Prepared slots: %u" msgstr " Förberedda slottar: %u" #: src/libs/data-staging/Scheduler.cpp:1397 #, c-format msgid "" " Shares configuration:\n" "%s" msgstr "" " Andelsinställningar:\n" "%s" #: src/libs/data-staging/Scheduler.cpp:1400 msgid " Delivery service: LOCAL" msgstr " Leveranstjänst: LOKAL" #: src/libs/data-staging/Scheduler.cpp:1401 #, c-format msgid " Delivery service: %s" msgstr " Leveranstjänst: %s" #: src/libs/data-staging/Scheduler.cpp:1406 msgid "Failed to create DTR dump thread" msgstr "Misslyckades med att skapa DTR-dumpningstrÃ¥d" #: src/libs/data-staging/Scheduler.cpp:1423 #: src/services/data-staging/DataDeliveryService.cpp:531 #, c-format msgid "DTR %s cancelled" msgstr "DTR %s avbröts" #: src/libs/data-staging/examples/Generator.cpp:15 msgid "Shutting down scheduler" msgstr "Stänger ner schemaläggare" #: src/libs/data-staging/examples/Generator.cpp:17 msgid "Scheduler stopped, exiting" msgstr "Schemaläggare stoppar, avstutar" #: src/libs/data-staging/examples/Generator.cpp:23 #, c-format msgid "Received DTR %s back from scheduler in state %s" msgstr "Fick tillbaka DTR %s frÃ¥n schemaläggare i tillstÃ¥nd %s" #: src/libs/data-staging/examples/Generator.cpp:30 msgid "Generator started" msgstr "Generator startad" #: src/libs/data-staging/examples/Generator.cpp:31 msgid "Starting DTR threads" msgstr "Startar DTR-trÃ¥dar" #: src/libs/data-staging/examples/Generator.cpp:44 msgid "No valid credentials found, exiting" msgstr "Hittade inga giltiga referenser, avslutar" #: src/libs/data-staging/examples/Generator.cpp:55 #, c-format msgid "Problem creating dtr (source %s, destination %s)" msgstr "Problem med att skapa katalog (källa %s, destination %s)" #: src/services/a-rex/arex.cpp:340 src/services/candypond/CandyPond.cpp:569 #: src/services/data-staging/DataDeliveryService.cpp:705 #, c-format msgid "SOAP operation is not supported: %s" msgstr "SOAP-process stöds inte: %s" #: src/services/a-rex/arex.cpp:358 src/services/a-rex/arex.cpp:403 #, c-format msgid "Security Handlers processing failed: %s" msgstr "Säkerhetshanterarprocessering misslyckades: %s" #: src/services/a-rex/arex.cpp:381 msgid "" "Can't obtain configuration. Public information is disallowed for this user." msgstr "" "Kan inte erhÃ¥lla inställningar. Publik information är inte tillÃ¥ten för " "denna användare." #: src/services/a-rex/arex.cpp:388 msgid "Can't obtain configuration. Only public information is provided." msgstr "" "Kan inte erhÃ¥lla inställningar. Endast publik information tillhandahÃ¥lls." #: src/services/a-rex/arex.cpp:416 src/services/a-rex/rest/rest.cpp:740 #, c-format msgid "Connection from %s: %s" msgstr "Förbindelse frÃ¥n %s: %s" #: src/services/a-rex/arex.cpp:419 src/services/a-rex/rest/rest.cpp:744 #, c-format msgid "process: method: %s" msgstr "process: metod: %s" #: src/services/a-rex/arex.cpp:420 src/services/a-rex/rest/rest.cpp:745 #, c-format msgid "process: endpoint: %s" msgstr "process: slutpunkt: %s" #: src/services/a-rex/arex.cpp:445 #, c-format msgid "process: id: %s" msgstr "process: id: %s" #: src/services/a-rex/arex.cpp:446 #, c-format msgid "process: subop: %s" msgstr "process: subop: %s" #: src/services/a-rex/arex.cpp:453 #, c-format msgid "process: subpath: %s" msgstr "process: subsökväg: %s" #: src/services/a-rex/arex.cpp:491 src/services/candypond/CandyPond.cpp:543 #: src/services/data-staging/DataDeliveryService.cpp:665 #: src/tests/echo/echo.cpp:98 #, c-format msgid "process: request=%s" msgstr "process: begäran=%s" #: src/services/a-rex/arex.cpp:496 src/services/candypond/CandyPond.cpp:548 #: src/services/data-staging/DataDeliveryService.cpp:670 #: src/tests/count/count.cpp:69 msgid "input does not define operation" msgstr "indata definierar ej operation" #: src/services/a-rex/arex.cpp:499 src/services/candypond/CandyPond.cpp:551 #: src/services/data-staging/DataDeliveryService.cpp:673 #: src/tests/count/count.cpp:72 #, c-format msgid "process: operation: %s" msgstr "process: operation: %s" #: src/services/a-rex/arex.cpp:526 msgid "POST request on special path is not supported" msgstr "POST-begäran pÃ¥ specialsökväg stöds inte" #: src/services/a-rex/arex.cpp:531 msgid "process: factory endpoint" msgstr "process: factoryslutpunkt" #: src/services/a-rex/arex.cpp:575 src/services/candypond/CandyPond.cpp:580 #: src/services/data-staging/DataDeliveryService.cpp:716 #: src/tests/echo/echo.cpp:158 #, c-format msgid "process: response=%s" msgstr "process: svar=%s" #: src/services/a-rex/arex.cpp:580 msgid "Per-job POST/SOAP requests are not supported" msgstr "Per-jobb POST/SOAP-begäran stöds inte" #: src/services/a-rex/arex.cpp:589 msgid "process: GET" msgstr "process: GET" #: src/services/a-rex/arex.cpp:590 #, c-format msgid "GET: id %s path %s" msgstr "GET: id %s sökväg %s" #: src/services/a-rex/arex.cpp:623 msgid "process: HEAD" msgstr "process: HEAD" #: src/services/a-rex/arex.cpp:624 #, c-format msgid "HEAD: id %s path %s" msgstr "HEAD: id %s sökväg %s" #: src/services/a-rex/arex.cpp:657 msgid "process: PUT" msgstr "process: PUT" #: src/services/a-rex/arex.cpp:690 msgid "process: DELETE" msgstr "process: DELETE" #: src/services/a-rex/arex.cpp:723 #, c-format msgid "process: method %s is not supported" msgstr "process: metod %s stöds inte" #: src/services/a-rex/arex.cpp:726 msgid "process: method is not defined" msgstr "process: metod är inte definierad" #: src/services/a-rex/arex.cpp:836 msgid "Failed to run Grid Manager thread" msgstr "Misslyckades med att köra Grid-Manager-trÃ¥d" #: src/services/a-rex/arex.cpp:889 #, c-format msgid "Failed to process configuration in %s" msgstr "Misslyckades med att processera inställningar i %s" #: src/services/a-rex/arex.cpp:894 msgid "No control directory set in configuration" msgstr "Ingen kontrollkatalog satt i inställningarna" #: src/services/a-rex/arex.cpp:898 msgid "No session directory set in configuration" msgstr "Ingen sessionskatalog satt i inställningarna" #: src/services/a-rex/arex.cpp:902 msgid "No LRMS set in configuration" msgstr "Inget LRMS satt i inställningarna" #: src/services/a-rex/arex.cpp:961 #, c-format msgid "Failed to create control directory %s" msgstr "Misslyckades med att skapa kontrollkatalog %s" #: src/services/a-rex/arex.cpp:965 #, c-format msgid "Failed to update control directory %s" msgstr "Misslyckades med att uppdatera kontrollkatalog %s" #: src/services/a-rex/arex.cpp:972 msgid "Failed to start GM threads" msgstr "Misslyckades med att starta GM-trÃ¥dar" #: src/services/a-rex/arex.cpp:1008 #, c-format msgid "Created entry for JWT issuer %s" msgstr "Skapade post för JWT-utfärdare %s" #: src/services/a-rex/arex.cpp:1010 #, c-format msgid "Failed to create entry for JWT issuer %s" msgstr "Misslyckades med att skapa post för JWT-utfärdare %s" #: src/services/a-rex/arex.cpp:1013 #, c-format msgid "Empty data for JWT issuer %s" msgstr "Ingen data för JWT-utfärdare %s" #: src/services/a-rex/arex.cpp:1016 #, c-format msgid "Failed to read data for JWT issuer %s" msgstr "Misslyckades med att läsa data för JWT-utfärdare %s" #: src/services/a-rex/authop.cpp:26 msgid "CheckOperationAllowed: missing configuration" msgstr "CheckOperationAllowed: inställningar saknas" #: src/services/a-rex/authop.cpp:80 msgid "CheckOperationAllowed: allowed due to missing configuration scopes" msgstr "CheckOperationAllowed: tillÃ¥ten p.g.a. saknade inställnings-scopes" #: src/services/a-rex/authop.cpp:83 #, c-format msgid "CheckOperationAllowed: token scopes: %s" msgstr "CheckOperationAllowed: token-scopes: %s" #: src/services/a-rex/authop.cpp:84 #, c-format msgid "CheckOperationAllowed: configuration scopes: %s" msgstr "CheckOperationAllowed: inställnings-scopes: %s" #: src/services/a-rex/authop.cpp:87 msgid "CheckOperationAllowed: allowed due to matching scopes" msgstr "CheckOperationAllowed: tillÃ¥ten p.g.a. matchande scopes" #: src/services/a-rex/authop.cpp:91 msgid "CheckOperationAllowed: token scopes do not match required scopes" msgstr "CheckOperationAllowed: token-scopes matchar inte begärda scopes" #: src/services/a-rex/authop.cpp:97 msgid "CheckOperationAllowed: allowed for TLS connection" msgstr "CheckOperationAllowed: tillÃ¥ten för TLS-förbindelse" #: src/services/a-rex/authop.cpp:101 msgid "CheckOperationAllowed: no supported identity found" msgstr "CheckOperationAllowed: ingen stödd identitet funnen" #: src/services/a-rex/cachecheck.cpp:37 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:710 #, c-format msgid "Error with cache configuration: %s" msgstr "Fel med cacheinställningar: %s" #: src/services/a-rex/cachecheck.cpp:53 #: src/services/candypond/CandyPond.cpp:318 msgid "Error with cache configuration" msgstr "Fel med cacheinställningar" #: src/services/a-rex/cachecheck.cpp:78 #: src/services/candypond/CandyPond.cpp:146 #: src/services/candypond/CandyPond.cpp:343 #, c-format msgid "Looking up URL %s" msgstr "SlÃ¥r upp URL: %s" #: src/services/a-rex/cachecheck.cpp:80 #: src/services/candypond/CandyPond.cpp:155 #, c-format msgid "Cache file is %s" msgstr "Cachefil är %s" #: src/services/a-rex/change_activity_status.cpp:22 #: src/services/a-rex/put.cpp:163 src/services/a-rex/put.cpp:204 #, c-format msgid "%s: there is no such job: %s" msgstr "%s: det finns inget jobb: %s" #: src/services/a-rex/change_activity_status.cpp:30 #, c-format msgid "%s: put log %s: there is no payload" msgstr "%s: put logg %s: det finns ingen nyttolast" #: src/services/a-rex/change_activity_status.cpp:36 #, c-format msgid "%s: put log %s: unrecognized payload" msgstr "%s: put logg %s: okänd nyttolast" #: src/services/a-rex/change_activity_status.cpp:75 msgid "A-REX REST: Failed to resume job" msgstr "A-REX REST: Misslyckades med att Ã¥teruppta jobb" #: src/services/a-rex/change_activity_status.cpp:79 #, c-format msgid "A-REX REST: State change not allowed: from %s to %s" msgstr "A-REX REST: TillstÃ¥ndsändring inte tillÃ¥ten: frÃ¥n %s till %s" #: src/services/a-rex/create_activity.cpp:24 msgid "NEW: put new job: there is no payload" msgstr "NYTT: put nytt jobb: det finns inge nyttolast" #: src/services/a-rex/create_activity.cpp:28 msgid "NEW: put new job: max jobs total limit reached" msgstr "NYTT: put nytt jobb: gränsen för max totalt antal jobb nÃ¥dd" #: src/services/a-rex/delegation/DelegationStore.cpp:47 msgid "Wiping and re-creating whole storage" msgstr "Raderar och Ã¥terskapar hela lagret" #: src/services/a-rex/delegation/DelegationStore.cpp:207 #: src/services/a-rex/delegation/DelegationStore.cpp:309 #, c-format msgid "DelegationStore: TouchConsumer failed to create file %s" msgstr "DelegationStore: TouchConsumer misslyckades med att skapa fil %s" #: src/services/a-rex/delegation/DelegationStore.cpp:269 msgid "DelegationStore: PeriodicCheckConsumers failed to resume iterator" msgstr "" "DelegationStore: PeriodicCheckConsumers misslyckades med att Ã¥teruppta " "iterator" #: src/services/a-rex/delegation/DelegationStore.cpp:289 #, c-format msgid "" "DelegationStore: PeriodicCheckConsumers failed to remove old delegation %s - " "%s" msgstr "" "DelegationStore: PeriodicCheckConsumers misslyckades med att ta bort gammal " "delegering %s - %s" #: src/services/a-rex/get.cpp:172 src/services/a-rex/get.cpp:227 #: src/services/a-rex/get.cpp:313 #, c-format msgid "Get: there is no job %s - %s" msgstr "Get: det finns inget jobb %s - %s" #: src/services/a-rex/get.cpp:380 #, c-format msgid "Head: there is no job %s - %s" msgstr "Head: det finns inget jobb %s - %s" #: src/services/a-rex/get.cpp:436 msgid "Failed to extract credential information" msgstr "Misslyckades med att extrahera referensinformation" #: src/services/a-rex/get.cpp:439 #, c-format msgid "Checking cache permissions: DN: %s" msgstr "Kontrollerar cacheÃ¥tkomsträttigheter: DN: %s" #: src/services/a-rex/get.cpp:440 #, c-format msgid "Checking cache permissions: VO: %s" msgstr "Kontrollerar cacheÃ¥tkomsträttigheter: VO: %s" #: src/services/a-rex/get.cpp:442 #, c-format msgid "Checking cache permissions: VOMS attr: %s" msgstr "Kontrollerar cacheÃ¥tkomsträttigheter: VOMS attr: %s" #: src/services/a-rex/get.cpp:452 #, c-format msgid "Cache access allowed to %s by DN %s" msgstr "Ã…tkomst till cache tillÃ¥tet för %s av DN %s" #: src/services/a-rex/get.cpp:455 #, c-format msgid "DN %s doesn't match %s" msgstr "DN %s matchar inte %s" #: src/services/a-rex/get.cpp:458 #, c-format msgid "Cache access allowed to %s by VO %s" msgstr "Ã…tkomst till cache tillÃ¥tet för %s av VO %s" #: src/services/a-rex/get.cpp:461 #, c-format msgid "VO %s doesn't match %s" msgstr "VO %s matchar inte %s" #: src/services/a-rex/get.cpp:467 src/services/a-rex/get.cpp:486 #, c-format msgid "Bad credential value %s in cache access rules" msgstr "Felaktigt referensvärde %s i cacheÃ¥tkomstregler" #: src/services/a-rex/get.cpp:475 src/services/a-rex/get.cpp:494 #, c-format msgid "VOMS attr %s matches %s" msgstr "VOMS attr %s matchar %s" #: src/services/a-rex/get.cpp:476 #, c-format msgid "Cache access allowed to %s by VO %s and role %s" msgstr "Ã…tkomst till cache tillÃ¥tet för %s av VO %s och roll %s" #: src/services/a-rex/get.cpp:479 src/services/a-rex/get.cpp:498 #, c-format msgid "VOMS attr %s doesn't match %s" msgstr "VOMS attr %s matchar inte %s" #: src/services/a-rex/get.cpp:495 #, c-format msgid "Cache access allowed to %s by VO %s and group %s" msgstr "Ã…tkomst till cache tillÃ¥tet för %s av VO %s och grupp %s" #: src/services/a-rex/get.cpp:501 #, c-format msgid "Unknown credential type %s for URL pattern %s" msgstr "Okänd referenstyp %s för URL-mönster %s" #: src/services/a-rex/get.cpp:507 #, c-format msgid "No match found in cache access rules for %s" msgstr "Hittade ingen match i cacheÃ¥tkomstregler för %s" #: src/services/a-rex/get.cpp:517 #, c-format msgid "Get from cache: Looking in cache for %s" msgstr "Hämta frÃ¥n cache: Söker i cache efter %s" #: src/services/a-rex/get.cpp:520 #, c-format msgid "Get from cache: Invalid URL %s" msgstr "Hämta frÃ¥n cache: Ogiltig URL: %s" #: src/services/a-rex/get.cpp:537 msgid "Get from cache: Error in cache configuration" msgstr "Hämta frÃ¥n cache: Fel med cacheinställningar" #: src/services/a-rex/get.cpp:546 msgid "Get from cache: File not in cache" msgstr "Hämta frÃ¥n cache: Fil inte i cache" #: src/services/a-rex/get.cpp:549 #, c-format msgid "Get from cache: could not access cached file: %s" msgstr "Hämta frÃ¥n cache: kunde inte komma Ã¥t cachad fil: %s" #: src/services/a-rex/get.cpp:559 msgid "Get from cache: Cached file is locked" msgstr "Hämta frÃ¥n cache: Cachad fil är lÃ¥st" #: src/services/a-rex/grid-manager/GridManager.cpp:98 #, c-format msgid "" "Cannot create directories for log file %s. Messages will be logged to this " "log" msgstr "" "Kan inte skapa kataloger för loggfil %s. Meddelanden kommer att loggas i " "denna logg" #: src/services/a-rex/grid-manager/GridManager.cpp:104 #, c-format msgid "" "Cannot open cache log file %s: %s. Cache cleaning messages will be logged to " "this log" msgstr "" "Kan inte öppna cacheloggfil %s: %s. Cacherensningsmeddelanden kommer att " "loggas till denna logg" #: src/services/a-rex/grid-manager/GridManager.cpp:114 msgid "Failed to start cache clean script" msgstr "Misslyckades med att starta cacherensningsskript" #: src/services/a-rex/grid-manager/GridManager.cpp:115 msgid "Cache cleaning script failed" msgstr "Cacherensningsskript misslyckades" #: src/services/a-rex/grid-manager/GridManager.cpp:183 #, c-format msgid "External request for attention %s" msgstr "Extern begäran om uppmärksamhet %s" #: src/services/a-rex/grid-manager/GridManager.cpp:201 #, c-format msgid "Failed to open heartbeat file %s" msgstr "Misslyckades med att öppna hjärtslagsfil %s" #: src/services/a-rex/grid-manager/GridManager.cpp:223 msgid "Starting jobs processing thread" msgstr "Startar jobbprocesserings-trÃ¥d" #: src/services/a-rex/grid-manager/GridManager.cpp:224 #, c-format msgid "Used configuration file %s" msgstr "Använd inställningsfil %s" #: src/services/a-rex/grid-manager/GridManager.cpp:232 #, c-format msgid "" "Error initiating delegation database in %s. Maybe permissions are not " "suitable. Returned error is: %s." msgstr "" "Fel vid initiering av delegeringsdatabas i %s. Kanske Ã¥tkomsträttigheter " "inte är lämpliga. Returnerat fel är: %s." #: src/services/a-rex/grid-manager/GridManager.cpp:244 msgid "Failed to start new thread: cache won't be cleaned" msgstr "Misslyckades med att starta ny trÃ¥d: cache kommer ej att rensas" #: src/services/a-rex/grid-manager/GridManager.cpp:251 msgid "Failed to activate Jobs Processing object, exiting Grid Manager thread" msgstr "" "Misslyckades med att aktivera jobbprocesseringsobjekt, avslutar grid-manager-" "trÃ¥d" #: src/services/a-rex/grid-manager/GridManager.cpp:260 #, c-format msgid "" "Error adding communication interface in %s. Maybe another instance of A-REX " "is already running." msgstr "" "Fel när kommunikationsgränssnitt lades till i %s. Kanske kör redan en annan " "instans av A-REX." #: src/services/a-rex/grid-manager/GridManager.cpp:263 #, c-format msgid "" "Error adding communication interface in %s. Maybe permissions are not " "suitable." msgstr "" "Fel när kommunikationsgränssnitt lades till i %s. Kanske är " "Ã¥tkomsträttigheter inte lämpliga." #: src/services/a-rex/grid-manager/GridManager.cpp:270 msgid "Failed to start new thread for monitoring job requests" msgstr "Misslyckades med att starta ny trÃ¥d för monitorering av jobbegärningar" #: src/services/a-rex/grid-manager/GridManager.cpp:276 msgid "Picking up left jobs" msgstr "Plockar upp lämnade jobb" #: src/services/a-rex/grid-manager/GridManager.cpp:279 msgid "Starting data staging threads" msgstr "Startar datastaging-trÃ¥dar" #: src/services/a-rex/grid-manager/GridManager.cpp:283 msgid "Starting jobs' monitoring" msgstr "Startar jobbmonitorering" #: src/services/a-rex/grid-manager/GridManager.cpp:291 #, c-format msgid "" "SSHFS mount point of session directory (%s) is broken - waiting for " "reconnect ..." msgstr "" "SSHFS-monteringspunkt för sessionskatalogen (%s) är trasig - väntar pÃ¥ " "Ã¥teruppkoppling ..." #: src/services/a-rex/grid-manager/GridManager.cpp:295 #, c-format msgid "" "SSHFS mount point of runtime directory (%s) is broken - waiting for " "reconnect ..." msgstr "" "SSHFS-monteringspunkt för runtimekatalogen (%s) är trasig - väntar pÃ¥ " "Ã¥teruppkoppling ..." #: src/services/a-rex/grid-manager/GridManager.cpp:300 #, c-format msgid "" "SSHFS mount point of cache directory (%s) is broken - waiting for " "reconnect ..." msgstr "" "SSHFS-monteringspunkt för cachekatalogen (%s) är trasig - väntar pÃ¥ " "Ã¥teruppkoppling ...<" #: src/services/a-rex/grid-manager/GridManager.cpp:349 #, c-format msgid "Orphan delegation lock detected (%s) - cleaning" msgstr "Föräldralöst delegeringslÃ¥s detekterat (%s) - städar" #: src/services/a-rex/grid-manager/GridManager.cpp:354 msgid "Failed to obtain delegation locks for cleaning orphaned locks" msgstr "" "Misslyckades med att erhÃ¥lla delegeringslÃ¥s för att ta bort föräldralösa lÃ¥s" #: src/services/a-rex/grid-manager/GridManager.cpp:368 msgid "Waking up" msgstr "Vaknar upp" #: src/services/a-rex/grid-manager/GridManager.cpp:371 msgid "Stopping jobs processing thread" msgstr "Stoppar jobbprocesseringstrÃ¥d" #: src/services/a-rex/grid-manager/GridManager.cpp:373 msgid "Exiting jobs processing thread" msgstr "Avslutar jobbprocesseringstrÃ¥d" #: src/services/a-rex/grid-manager/GridManager.cpp:391 msgid "Requesting to stop job processing" msgstr "Begär att avsluta jobbprocessering" #: src/services/a-rex/grid-manager/GridManager.cpp:399 msgid "Waiting for main job processing thread to exit" msgstr "Väntar pÃ¥ att huvud-jobbprocesseringstrÃ¥den avslutas" #: src/services/a-rex/grid-manager/GridManager.cpp:401 msgid "Stopped job processing" msgstr "Avslutade jobbprocessering" #: src/services/a-rex/grid-manager/accounting/AAR.cpp:73 msgid "Cannot find information abouto job submission endpoint" msgstr "Kan inte hitta information om jobbinsändningsslutpunkt" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:58 #, c-format msgid "Failed to read database schema file at %s" msgstr "Misslyckades med att läsa databasschemafil pÃ¥ %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:68 msgid "Accounting database initialized successfully" msgstr "Bokföringsdatabas initierad" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:70 msgid "Accounting database connection has been established" msgstr "Bokföringsdatabasförbindelse har etablerats" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:80 #, c-format msgid "%s. SQLite database error: %s" msgstr "%s. SQLite-databasfel: %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:82 #, c-format msgid "SQLite database error: %s" msgstr "SQLite-databasfel: %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:110 #, c-format msgid "Directory %s to store accounting database has been created." msgstr "Katalog %s som ska lagra bokföringsdatabasen har skapats." #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:112 #, c-format msgid "" "Accounting database cannot be created. Faile to create parent directory %s." msgstr "" "Bokföringsdatabasen kan inte skapas. Misslyckades med att skapa " "föräldrakatalog %s." #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:116 #, c-format msgid "Accounting database cannot be created: %s is not a directory" msgstr "Bokföringsdatabasen kan inte skapas. %s är inte en katalog" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:123 msgid "Failed to initialize accounting database" msgstr "Misslyckades med att initiera bokföringsdatabas" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:130 #, c-format msgid "Accounting database file (%s) is not a regular file" msgstr "Bokföringsdatabasfil (%s) är inte en vanlig fil" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:136 msgid "Error opening accounting database" msgstr "Fel vid öppnande av bokföringsdatabas" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:154 msgid "Closing connection to SQLite accounting database" msgstr "Stänger förbindelse till SQLite-bokföringsdatabas" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:243 #, c-format msgid "Failed to fetch data from %s accounting database table" msgstr "Misslyckades med att hämta data frÃ¥n %s bokföringsdatabastabell" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:260 #, c-format msgid "Failed to add '%s' into the accounting database %s table" msgstr "" "Misslyckades med att lägga till '%s' till bokföringsdatabasen %s-tabell" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:327 msgid "Failed to fetch data from accounting database Endpoints table" msgstr "" "Misslyckades med att hämta data frÃ¥n bokföringsdatabasens Endpoints-tabell" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:344 #, c-format msgid "" "Failed to add '%s' URL (interface type %s) into the accounting database " "Endpoints table" msgstr "" "Misslyckades med att lägga till '%s' URL (gränssnittstyp %s) till " "bokföringsdatabasens Endpoints-tabell" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:370 #, c-format msgid "Failed to query AAR database ID for job %s" msgstr "Misslyckades med att frÃ¥ga efter AAR-databas-ID för jobb %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:431 #, c-format msgid "Failed to insert AAR into the database for job %s" msgstr "Misslyckades med att sätta in AAR i databasen för jobb %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:432 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:481 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:512 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:528 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:544 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:565 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:581 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:596 #, c-format msgid "SQL statement used: %s" msgstr "Använd SQL-sats: %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:437 #, c-format msgid "Failed to write authtoken attributes for job %s" msgstr "Misslyckades med att skriva auktoriserings-token-attribut för jobb %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:441 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:498 #, c-format msgid "Failed to write event records for job %s" msgstr "Misslyckades med att skriva händelseposter för jobb %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:452 #, c-format msgid "" "Cannot to update AAR. Cannot find registered AAR for job %s in accounting " "database." msgstr "" "Kan inte uppdatera AAR. Kan inte hitta registrerad AAR för jobb %s i " "bokföringsdatabasen." #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:480 #, c-format msgid "Failed to update AAR in the database for job %s" msgstr "Misslyckades med att uppdatera AAR i databasen för jobb %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:486 #, c-format msgid "Failed to write RTEs information for the job %s" msgstr "Misslyckades med att skriva RTE-information för jobb %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:490 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:494 #, c-format msgid "Failed to write data transfers information for the job %s" msgstr "Misslyckades med att skriva dataöverföringsinformation för jobb %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:590 #, c-format msgid "Unable to add event: cannot find AAR for job %s in accounting database." msgstr "" "Kan inte lägga till händelse: kan inte hitta AAR för jobb %s i " "bokföringsdatabasen." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:73 #, c-format msgid "Unknown option %s" msgstr "Okänt alternativ %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:80 msgid "Job ID argument is required." msgstr "Jobb-id-alternativ är obligatoriskt" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:86 msgid "Path to user's proxy file should be specified." msgstr "Sökväg till användarens proxyfil ska anges" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:92 msgid "User name should be specified." msgstr "Användarnamn ska anges" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:98 msgid "Path to .local job status file is required." msgstr "Sökväg till .local jobbstatusfil är obligatorisk." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:106 msgid "Generating ceID prefix from hostname automatically" msgstr "Genererar ceID-prefix from värdnamn automatiskt" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:109 msgid "" "Cannot determine hostname from gethostname() to generate ceID automatically." msgstr "" "Kan inte bestämma värdnamn frÃ¥n gethostname() för att generera ceID " "automatiskt." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:118 #, c-format msgid "ceID prefix is set to %s" msgstr "ceID-prefix är satt till %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:126 #, c-format msgid "Getting currect timestamp for BLAH parser log: %s" msgstr "Hämtar nuvarande klockslag för BLAH-tolk-logg: %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:135 msgid "Parsing .local file to obtain job-specific identifiers and info" msgstr "" "Tolkar .local-fil för att erhÃ¥lla jobb-specifika identifierare och info" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:145 #, c-format msgid "globalid is set to %s" msgstr "globalid är satt till %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:148 #, c-format msgid "headnode is set to %s" msgstr "headnode är satt till %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:151 #, c-format msgid "interface is set to %s" msgstr "gränssnitt är satt till %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:155 msgid "There is no local LRMS ID. Message will not be written to BLAH log." msgstr "" "Det finns inget lokalt LRMS-ID. Meddelande kommer inte att skrivas till BLAH-" "logg." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:158 #, c-format msgid "localid is set to %s" msgstr "localid är satt till %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:161 #, c-format msgid "queue name is set to %s" msgstr "könamn är satt till %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:164 #, c-format msgid "owner subject is set to %s" msgstr "ägarsubjekt är satt till %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:166 msgid "" "Job did not finished successfully. Message will not be written to BLAH log." msgstr "" "Jobb avslutades inte framgÃ¥ngsrikt. Meddelande kommer inte att skrivas till " "BLAH-logg." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:174 #, c-format msgid "Job timestamp successfully parsed as %s" msgstr "Jobbets klockslag tolkades framgÃ¥ngsrikt som %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:178 msgid "Can not read information from the local job status file" msgstr "Kan inte läsa information frÃ¥n den lokala statusfilen" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:194 #, c-format msgid "" "Unsupported submission interface %s. Seems arc-blahp-logger need to be " "updated accordingly. Please submit the bug to bugzilla." msgstr "" "Insändningsgränssnitt som inte stöds %s. Det ser ut som om arc-blahp-logger " "mÃ¥ste uppdateras. Sänd in buggen till bugzilla." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:204 msgid "Parsing VOMS AC to get FQANs information" msgstr "Tolkar VOMS-AC för att fÃ¥ FQAN-information" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:217 #, c-format msgid "Found VOMS AC attribute: %s" msgstr "Hittade VOMS-AC-attribut: %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:230 msgid "VOMS AC attribute is a tag" msgstr "VOMS-AC-attribut är en tagg" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:237 msgid "Skipping policyAuthority VOMS AC attribute" msgstr "Hoppar över policyAuthority VOMS-AC-attribut" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:241 msgid "VOMS AC attribute is the FQAN" msgstr "VOMS-AC-attribut är FQAN" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:249 msgid "No FQAN found. Using None as userFQAN value" msgstr "Hittade inget FQAN. Använde None som användar-FQAN-värde" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:263 #, c-format msgid "Assembling BLAH parser log entry: %s" msgstr "Sätter samman BLAH-parser-logg-post: %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:268 #, c-format msgid "Writing the info to the BLAH parser log: %s" msgstr "Skriver informationen till BLAH-tolk-loggen: %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:276 #, c-format msgid "Cannot open BLAH log file '%s'" msgstr "Kan inte öppna BLAH-loggfil '%s'" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:36 #, c-format msgid "Missing cancel-%s-job - job cancellation may not work" msgstr "Saknar cancel-%s-job - avbrytande av jobb kanske inte fungerar" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:40 #, c-format msgid "Missing submit-%s-job - job submission to LRMS may not work" msgstr "" "Saknar submit-%s-job - insändning av jobb till LRMS kanske inte fungerar" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:44 #, c-format msgid "Missing scan-%s-job - may miss when job finished executing" msgstr "Saknar scan-%s-job - kan missa när jobb har slutat exekvera" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:58 #, c-format msgid "Wrong option in %s" msgstr "Felaktigt alternativ i %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:69 #, c-format msgid "Can't read configuration file at %s" msgstr "Kan inte läsa inställningsfil pÃ¥ %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:79 #, c-format msgid "Can't recognize type of configuration file at %s" msgstr "Känner inte igen typ av inställningsfil pÃ¥ %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:82 msgid "Could not determine configuration type or configuration is empty" msgstr "Kunde inte bestämma typ av inställningar eller inställningar är tom" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:163 msgid "lrms is empty" msgstr "lrms är tom" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:196 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:205 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:214 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:223 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:232 msgid "Missing number in maxjobs" msgstr "Saknar nummer i maxjobs" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:199 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:208 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:217 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:226 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:235 #, c-format msgid "Wrong number in maxjobs: %s" msgstr "Felaktigt nummer i maxjobs: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:245 #, c-format msgid "Wrong number in wakeupperiod: %s" msgstr "Felaktigt nummer i wakeupperiod: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:251 msgid "mail parameter is empty" msgstr "mail-parametern är tom" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:257 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:261 msgid "Wrong number in defaultttl command" msgstr "Felaktigt nummer i defaultttl-kommando" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:267 msgid "Wrong number in maxrerun command" msgstr "Felaktigt nummer i maxrerun-kommando" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:274 msgid "State name for plugin is missing" msgstr "TillstÃ¥ndsnamn för plugin saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:278 msgid "Options for plugin are missing" msgstr "Alternativ för plugin saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:281 #, c-format msgid "Failed to register plugin for state %s" msgstr "Misslyckades med att registrera plugin för tillstÃ¥nd %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:287 msgid "Session root directory is missing" msgstr "Sessions-rotkatalog saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:290 msgid "Junk in sessiondir command" msgstr "Skräp i sessiondir-kommando" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:302 msgid "Missing directory in controldir command" msgstr "Saknar katalog i controldir-kommando" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:307 msgid "" "'control' configuration option is no longer supported, please use " "'controldir' instead" msgstr "" "'control'-inställningsalternativet stöds inte längre, använd 'controldir' " "istället" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:312 msgid "User for helper program is missing" msgstr "Användare för hjälpprogram saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:315 msgid "Only user '.' for helper program is supported" msgstr "Endast användare '.' för hjälpprogram stöds" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:318 msgid "Helper program is missing" msgstr "Hjälpprogram saknas" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:339 msgid "Wrong option in fixdirectories" msgstr "Felaktigt alternativ i fixdirectories" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:366 msgid "Wrong option in delegationdb" msgstr "Felaktigt alternativ i delegationdb" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:375 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:608 msgid "forcedefaultvoms parameter is empty" msgstr "forcedefaultvoms-parametern är tom" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:486 msgid "Wrong number in maxjobdesc command" msgstr "Felaktigt nummer i maxjobdesc-kommando" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:535 msgid "Missing file name in [arex/jura] logfile" msgstr "Saknat filnamn i [arex/jura] loggfil" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:546 #, c-format msgid "Wrong number in urdelivery_frequency: %s" msgstr "Felaktigt nummer i urdelivery_frequency: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:601 msgid "No queue name given in queue block name" msgstr "Inget könamn givet i queue-blocknamn" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:617 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:652 msgid "advertisedvo parameter is empty" msgstr "advertisedvo-parametern är tom" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:117 #, c-format msgid "\tSession root dir : %s" msgstr "\tSessionsrotkat : %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:118 #, c-format msgid "\tControl dir : %s" msgstr "\tKontrollkatalog : %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:119 #, c-format msgid "\tdefault LRMS : %s" msgstr "\tförvalt LRMS : %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:120 #, c-format msgid "\tdefault queue : %s" msgstr "\tförvald kö : %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:121 #, c-format msgid "\tdefault ttl : %u" msgstr "\tförvald ttl : %u" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:126 msgid "No valid caches found in configuration, caching is disabled" msgstr "Hittade inga giltiga cachar i inställningar, cachning är avstängd" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:131 #, c-format msgid "\tCache : %s" msgstr "\tCache : %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:133 #, c-format msgid "\tCache link dir : %s" msgstr "\tCachelänkkatalog : %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:136 #, c-format msgid "\tCache (read-only): %s" msgstr "\tCache (read-only): %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:138 msgid "\tCache cleaning enabled" msgstr "\tCacherensning pÃ¥slagen" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:139 msgid "\tCache cleaning disabled" msgstr "\tCacherensning avstängd" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:256 msgid "Starting controldir update tool." msgstr "Startar verktyget för att uppdatera kontrollkatalogen." #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:258 msgid "Failed to start controldir update tool." msgstr "" "Misslyckades med att starta verktyget för att uppdatera kontrollkatalogen." #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:261 #, c-format msgid "Failed to run controldir update tool. Exit code: %i" msgstr "" "Misslyckades med att köra verttyget för att uppdatera kontrollkatalogen. " "Avslutningskod: %i" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:381 msgid "" "Globus location variable substitution is not supported anymore. Please " "specify path directly." msgstr "" "Globus platsvariabelsubstitution stöds inte längre. Ange sökväg direkt." #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:35 msgid "Can't read configuration file" msgstr "Kan inte läsa inställningsfil" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:41 #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:29 msgid "Can't recognize type of configuration file" msgstr "Känner inte igen typ av inställningsfil" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:47 msgid "Configuration error" msgstr "Inställningsfel" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:77 msgid "Bad number in maxdelivery" msgstr "Felaktigt nummer i maxdelivery" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:83 msgid "Bad number in maxemergency" msgstr "Felaktigt nummer i maxemergency" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:89 msgid "Bad number in maxprocessor" msgstr "Felaktigt nummer i maxprocessor" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:95 msgid "Bad number in maxprepared" msgstr "Felaktigt nummer i maxprepared" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:101 msgid "Bad number in maxtransfertries" msgstr "Felaktigt nummer i maxtransfertries" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:112 msgid "Bad number in speedcontrol" msgstr "Felaktigt nummer i speedcontrol" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:123 #, c-format msgid "Bad number in definedshare %s" msgstr "Felaktigt nummer i definedshare %s" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:132 #, c-format msgid "Bad URL in deliveryservice: %s" msgstr "Felaktig URL i leveranstjänsten: %s" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:143 msgid "Bad number in remotesizelimit" msgstr "Felaktigt nummer i remotesizelimit" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:168 msgid "Bad value for loglevel" msgstr "Felaktigt värde för loglevel" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:24 msgid "Can't open configuration file" msgstr "Kan inte öppna inställningsfil" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:45 msgid "Not enough parameters in copyurl" msgstr "Ej tillräckligt antal parametrar i copyurl" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:54 msgid "Not enough parameters in linkurl" msgstr "Ej tillräckligt antal parametrar i linkurl" #: src/services/a-rex/grid-manager/files/ControlFileContent.cpp:185 #, c-format msgid "Wrong directory in %s" msgstr "Fel katalog i %s" #: src/services/a-rex/grid-manager/files/ControlFileHandling.cpp:104 #, c-format msgid "Failed setting file owner: %s" msgstr "Misslyckades med ange filägare: %s" #: src/services/a-rex/grid-manager/gm_jobs.cpp:36 #, c-format msgid "Could not read data staging configuration from %s" msgstr "Kunde inte läsa datastaginginställningar frÃ¥n %s" #: src/services/a-rex/grid-manager/gm_jobs.cpp:44 #, c-format msgid "Can't read transfer states from %s. Perhaps A-REX is not running?" msgstr "Kan inte läsa överföringstillstÃ¥nd frÃ¥n %s. Kanske kör inte A-REX?" #: src/services/a-rex/grid-manager/gm_jobs.cpp:100 msgid "gm-jobs displays information on current jobs in the system." msgstr "gm-jobs visar information om nuvarande jobb i systemet." #: src/services/a-rex/grid-manager/gm_jobs.cpp:105 msgid "display more information on each job" msgstr "visa mer information om varje jobb" #: src/services/a-rex/grid-manager/gm_jobs.cpp:110 #: src/services/a-rex/grid-manager/gm_kick.cpp:24 msgid "use specified configuration file" msgstr "använd särskild inställningsfil" #: src/services/a-rex/grid-manager/gm_jobs.cpp:111 #: src/services/a-rex/grid-manager/gm_kick.cpp:25 msgid "file" msgstr "fil" #: src/services/a-rex/grid-manager/gm_jobs.cpp:115 msgid "read information from specified control directory" msgstr "läs information frÃ¥n angiven kontrollkatalog" #: src/services/a-rex/grid-manager/gm_jobs.cpp:116 msgid "dir" msgstr "katalog" #: src/services/a-rex/grid-manager/gm_jobs.cpp:120 msgid "print summary of jobs in each transfer share" msgstr "skriv ut sammanfattning av jobb i varje överföringsandel" #: src/services/a-rex/grid-manager/gm_jobs.cpp:125 msgid "do not print list of jobs" msgstr "skriv inte ut jobblista" #: src/services/a-rex/grid-manager/gm_jobs.cpp:130 msgid "do not print number of jobs in each state" msgstr "skriv inte ut antal jobb i varje tillstÃ¥nd" #: src/services/a-rex/grid-manager/gm_jobs.cpp:135 msgid "print state of the service" msgstr "skriv ut tjänstens tillstÃ¥nd" #: src/services/a-rex/grid-manager/gm_jobs.cpp:140 msgid "show only jobs of user(s) with specified subject name(s)" msgstr "visa endast jobb som ägs av användare med angiv(et/na) subjektnamn" #: src/services/a-rex/grid-manager/gm_jobs.cpp:141 #: src/services/a-rex/grid-manager/gm_jobs.cpp:151 #: src/services/a-rex/grid-manager/gm_jobs.cpp:161 msgid "dn" msgstr "dn" #: src/services/a-rex/grid-manager/gm_jobs.cpp:145 msgid "request to cancel job(s) with specified ID(s)" msgstr "begär att avbryta jobb med angiv(et/na) ID" #: src/services/a-rex/grid-manager/gm_jobs.cpp:146 #: src/services/a-rex/grid-manager/gm_jobs.cpp:156 #: src/services/a-rex/grid-manager/gm_jobs.cpp:166 #: src/services/a-rex/grid-manager/gm_jobs.cpp:176 #: src/services/a-rex/grid-manager/gm_kick.cpp:30 msgid "id" msgstr "id" #: src/services/a-rex/grid-manager/gm_jobs.cpp:150 msgid "" "request to cancel jobs belonging to user(s) with specified subject name(s)" msgstr "" "begär att avbryta jobb som ägs av användare med angiv(et/na) subjektnamn" #: src/services/a-rex/grid-manager/gm_jobs.cpp:155 msgid "request to clean job(s) with specified ID(s)" msgstr "begär att ta bort jobb med angiv(et/na) ID" #: src/services/a-rex/grid-manager/gm_jobs.cpp:160 msgid "" "request to clean jobs belonging to user(s) with specified subject name(s)" msgstr "" "begär att ta bort jobb som ägs av användare med angiv(et/na) subjektnamn" #: src/services/a-rex/grid-manager/gm_jobs.cpp:165 msgid "show only jobs with specified ID(s)" msgstr "visa endast jobb med angiv(et/na) ID<" #: src/services/a-rex/grid-manager/gm_jobs.cpp:170 msgid "print list of available delegation IDs" msgstr "skriv ut lista med tillgängliga delegerings-ID" #: src/services/a-rex/grid-manager/gm_jobs.cpp:175 msgid "print delegation token of specified ID(s)" msgstr "skriv ut delegeringstoken med angiv(et/na) ID" #: src/services/a-rex/grid-manager/gm_jobs.cpp:180 msgid "print main delegation token of specified Job ID(s)" msgstr "skriv ut huvuddelegeringstoken för angiv(et/na) jobb-id" #: src/services/a-rex/grid-manager/gm_jobs.cpp:181 msgid "job id" msgstr "jobb-id" #: src/services/a-rex/grid-manager/gm_jobs.cpp:185 msgid "" "output requested elements (jobs list, delegation ids and tokens) to file" msgstr "" "skriv ut begärda element (jobblista, delegerings-id och token) till fil" #: src/services/a-rex/grid-manager/gm_jobs.cpp:186 msgid "file name" msgstr "filnamn" #: src/services/a-rex/grid-manager/gm_jobs.cpp:209 #, c-format msgid "Using configuration at %s" msgstr "Använder inställningar pÃ¥ %s" #: src/services/a-rex/grid-manager/gm_jobs.cpp:232 #, c-format msgid "Failed to open output file '%s'" msgstr "Misslyckades med att öppna utdatafil '%s'" #: src/services/a-rex/grid-manager/gm_jobs.cpp:241 msgid "Looking for current jobs" msgstr "Letar efter nuvarande jobb" #: src/services/a-rex/grid-manager/gm_jobs.cpp:278 #, c-format msgid "Job: %s : ERROR : Unrecognizable state" msgstr "Jobb: %s : Fel : Okänt tillstÃ¥nd" #: src/services/a-rex/grid-manager/gm_jobs.cpp:287 #, c-format msgid "Job: %s : ERROR : No local information." msgstr "Jobb: %s : Fel : Ingen lokal information." #: src/services/a-rex/grid-manager/gm_jobs.cpp:461 #, c-format msgid "Job: %s : ERROR : Failed to put cancel mark" msgstr "Jobb: %s : Fel : Misslyckades med att sätta avbrytsmarkering" #: src/services/a-rex/grid-manager/gm_jobs.cpp:465 #, c-format msgid "Job: %s : Cancel request put but failed to communicate to service" msgstr "" "Jobb: %s : Begäran att avbryta satt men misslyckades att meddela tjänsten" #: src/services/a-rex/grid-manager/gm_jobs.cpp:467 #, c-format msgid "Job: %s : Cancel request put and communicated to service" msgstr "Jobb: %s : Begäran att avbryta satt och meddelad till tjänsten" #: src/services/a-rex/grid-manager/gm_jobs.cpp:478 #, c-format msgid "Job: %s : ERROR : Failed to put clean mark" msgstr "Jobb: %s : Fel : Misslyckades med att sätta borttagningsmarkering" #: src/services/a-rex/grid-manager/gm_jobs.cpp:482 #, c-format msgid "Job: %s : Clean request put but failed to communicate to service" msgstr "" "Jobb: %s : Begäran om borttagning satt men misslyckades att meddela tjänsten" #: src/services/a-rex/grid-manager/gm_jobs.cpp:484 #, c-format msgid "Job: %s : Clean request put and communicated to service" msgstr "Jobb: %s : Begäran om borttagning satt och meddelad till tjänsten" #: src/services/a-rex/grid-manager/gm_kick.cpp:18 msgid "" "gm-kick wakes up the A-REX corresponding to the given control directory. If " "no directory is given it uses the control directory found in the " "configuration file." msgstr "" "gm-kick väcker den A-REX som motsvarar den angivna kontrollkatalogen. Om " "ingen katalog anges används kontrollkatalogen som hittas i inställningsfilen." #: src/services/a-rex/grid-manager/gm_kick.cpp:29 msgid "inform about changes in particular job (can be used multiple times)" msgstr "informera om ändringar i enstaka jobb (kan användas mer än en gÃ¥ng)" #: src/services/a-rex/grid-manager/inputcheck.cpp:39 #, c-format msgid "Failed to acquire source: %s" msgstr "Misslyckades med att erhÃ¥lla källa: %s" #: src/services/a-rex/grid-manager/inputcheck.cpp:44 #, c-format msgid "Failed to resolve %s" msgstr "Misslyckades med att slÃ¥ upp %s" #: src/services/a-rex/grid-manager/inputcheck.cpp:61 #, c-format msgid "Failed to check %s" msgstr "Misslyckades med att kontrollera %s" #: src/services/a-rex/grid-manager/inputcheck.cpp:75 msgid "job_description_file [proxy_file]" msgstr "jobbeskrivningsfil [proxyfil]" #: src/services/a-rex/grid-manager/inputcheck.cpp:76 msgid "" "inputcheck checks that input files specified in the job description are " "available and accessible using the credentials in the given proxy file." msgstr "" "inputcheck kontrollerar att indatafiler som angivits i jobbeskrivningen är " "tillgängliga och Ã¥tkomliga när referenserna i den givna proxyfilen används." #: src/services/a-rex/grid-manager/inputcheck.cpp:88 msgid "Wrong number of arguments given" msgstr "Fel antal argument angivna" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:75 #, c-format msgid "" "DTR Generator waiting to process: %d jobs to cancel, %d DTRs, %d new jobs" msgstr "" "DTR-generator väntat pÃ¥ att processera: %d jobb att avbryta, %d DTRer, %d " "nya jobb" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:89 #, c-format msgid "%s: Job cancel request from DTR generator to scheduler" msgstr "%s: Begäran att avbryta jobb frÃ¥n DTR-generator till schemaläggare" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:94 #, c-format msgid "%s: Returning canceled job from DTR generator" msgstr "%s: Returnerar avbrutet jobb frÃ¥n DTR-generator" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:135 #, c-format msgid "%s: Re-requesting attention from DTR generator" msgstr "%s: Begär uppmärksamhet frÃ¥n DTR-generator igen" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:145 #, c-format msgid "DTR Generator processed: %d jobs to cancel, %d DTRs, %d new jobs" msgstr "DTR-generator processerade: %d jobb att avbryta, %d DTRer, %d nya jobb" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:164 msgid "Exiting Generator thread" msgstr "Avslutar generator-trÃ¥d" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:236 msgid "Shutting down data staging threads" msgstr "Stänger ner datastaging-trÃ¥dar" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:246 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:259 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:287 msgid "DTRGenerator is not running!" msgstr "DTR-generator kör inte" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:249 #, c-format msgid "Received DTR %s during Generator shutdown - may not be processed" msgstr "Mottog DTR %s under generatoravstängning - kan inte processeras" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:263 msgid "DTRGenerator was sent null job" msgstr "DTR-generator blev tillsänd null-jobb" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:272 #, c-format msgid "%s: Received job in DTR generator" msgstr "%s: Mottog jobb i DTR-generator" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:275 #, c-format msgid "%s: Failed to receive job in DTR generator" msgstr "%s: Misslyckades med att motta jobb i DTR-generator" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:282 msgid "DTRGenerator got request to cancel null job" msgstr "DTR-generator fick begäran att avsluta null-jobb" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:297 msgid "DTRGenerator is queried about null job" msgstr "DTR-generator fick frÃ¥ga om null-jobb" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:327 msgid "DTRGenerator is asked about null job" msgstr "DTR-generator fick frÃ¥ga om null-jobb" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:355 msgid "DTRGenerator is requested to remove null job" msgstr "DTR-generator fick begäran att ta bort null-jobb" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:362 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:370 #, c-format msgid "%s: Trying to remove job from data staging which is still active" msgstr "" "%s: Försöker att ta bort jobb frÃ¥n datastaging som fortfarande är aktivt" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:378 #, c-format msgid "%s: Trying remove job from data staging which does not exist" msgstr "%s: Försöker att ta bort jobb frÃ¥n datastaging som inte existerar" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:389 #, c-format msgid "%s: Invalid DTR" msgstr "%s: Ogiltig DTR" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:406 #, c-format msgid "%s: Received DTR %s to copy file %s in state %s" msgstr "%s: Mottog DTR %s att kopiera fil %s i tillstÃ¥nd %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:410 #, c-format msgid "%s: Received DTR belongs to inactive job" msgstr "%s: Mottagen DTR tillhör inaktivt jobb" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:427 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1065 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:474 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:532 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:646 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:856 #, c-format msgid "%s: Failed reading local information" msgstr "%s: Misslyckades med att läsa lokal information" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:436 #, c-format msgid "%s: DTR %s to copy file %s failed" msgstr "%s: DTR %s att kopiera fil %s misslyckades" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:442 #, c-format msgid "%s: Cancelling other DTRs" msgstr "%s: Avbryter övriga DTRer" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:452 #, c-format msgid "%s: DTR %s to copy to %s failed but is not mandatory" msgstr "%s: DTR %s att kopiera till %s misslyckades men är inte obligatorisk" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:462 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:727 #, c-format msgid "%s: Failed to read list of output files" msgstr "%s: Misslyckades med att läsa lista med utdatafiler" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:476 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:617 #, c-format msgid "%s: Failed to read dynamic output files in %s" msgstr "%s: Misslyckades med att läsa dynamiska utdatafiler i %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:478 #, c-format msgid "%s: Going through files in list %s" msgstr "%s: GÃ¥r igenom filer i lista %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:482 #, c-format msgid "%s: Removing %s from dynamic output file %s" msgstr "%s: Tar bort %s frÃ¥n dynamisk utdatafil %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:486 #, c-format msgid "%s: Failed to write back dynamic output files in %s" msgstr "%s: Misslyckades med att skriva tillbaka dynamiska utdatafiler i %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:502 #, c-format msgid "%s: Failed to write list of output files" msgstr "%s: Misslyckades med att skriva lista med utdatafiler." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:506 #, c-format msgid "%s: Failed to write list of output status files" msgstr "%s: Misslyckades med att skriva lista med utdatastatusfiler." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:518 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:739 #, c-format msgid "%s: Failed to read list of input files" msgstr "%s: Misslyckades med att läsa lista med indatafiler" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:537 #, c-format msgid "%s: Failed to write list of input files" msgstr "%s: Misslyckades med att skriva lista med indatafiler" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:549 #, c-format msgid "%s: Received DTR with two remote endpoints!" msgstr "%s: Mottog DTR med tvÃ¥ fjärrslutpunkter!" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:561 #: src/services/candypond/CandyPondGenerator.cpp:105 #, c-format msgid "No active job id %s" msgstr "Inget aktivt jobb-id: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:605 #, c-format msgid "%s: Failed to read list of output files, can't clean up session dir" msgstr "" "%s: Misslyckades med att läsa lista med utdatafiler, kan inte rensa upp " "sessionskatalog" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:631 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:650 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:777 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:909 #, c-format msgid "%s: Failed to clean up session dir" msgstr "%s: Misslyckades med att rensa upp sessionskatalog" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:641 #, c-format msgid "%s: Failed to read list of input files, can't clean up session dir" msgstr "" "%s: Misslyckades med att läsa lista med indatafiler, kan inte rensa upp " "sessionskatalog" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:663 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:667 msgid "uploads" msgstr "uppladdningar" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:663 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:667 msgid "downloads" msgstr "nedladdningar" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:664 msgid "cancelled" msgstr "avbröts" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:664 msgid "finished" msgstr "avslutade" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:662 #, c-format msgid "%s: All %s %s successfully" msgstr "%s: Alla %s %s framgÃ¥ngsrikt" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:666 #, c-format msgid "%s: Some %s failed" msgstr "%s: NÃ¥gra %s misslyckades" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:670 #, c-format msgid "%s: Requesting attention from DTR generator" msgstr "%s: Begär uppmärksamhet frÃ¥n DTR-generator" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:681 msgid "DTRGenerator is requested to process null job" msgstr "DTR-generator fick begäran att att processera null-jobb" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:687 msgid "download" msgstr "ladda ner" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:687 msgid "upload" msgstr "ladda upp" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:686 #, c-format msgid "%s: Received data staging request to %s files" msgstr "%s: Mottog datastagingbegäran att %s filer" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:748 #, c-format msgid "%s: Duplicate file in list of input files: %s" msgstr "%s: Duplikatfil i lista med indatafiler: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:801 #, c-format msgid "%s: Reading output files from user generated list in %s" msgstr "%s: Läser utdatafiler frÃ¥n användargenererad lista i %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:803 #, c-format msgid "%s: Error reading user generated output file list in %s" msgstr "%s: Fel vid läsning av användargenererad lista med utdatafiler i %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:834 #, c-format msgid "%s: Failed to list output directory %s: %s" msgstr "%s: Misslyckades med att lista utdatakatalog %s: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:852 #, c-format msgid "%s: Adding new output file %s: %s" msgstr "%s: Lägger till ny utdatafil %s: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:875 #, c-format msgid "%s: Two identical output destinations: %s" msgstr "%s: TvÃ¥ identiska utdatadestinationer: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:888 #, c-format msgid "%s: Cannot upload two different files %s and %s to same LFN: %s" msgstr "%s: Kan inte ladda upp tvÃ¥ olika filer %s och %s till samma LFN: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:920 #, c-format msgid "%s: Received job in a bad state: %s" msgstr "%s: Mottog jobb i ett dÃ¥ligt tillstÃ¥nd: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:928 #, c-format msgid "%s: Session directory processing takes too long - %u.%06u seconds" msgstr "%s: Sessionskatalogsprocessering tar för lÃ¥ng tid - %u.%06u sekunder" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:976 #, c-format msgid "" "%s: Destination file %s was possibly left unfinished from previous A-REX " "run, will overwrite" msgstr "" "%s: Destinationsfil %s lämnades möjligen oavslutad frÃ¥n tidigare A-REX-" "körning, kommer att skriva över" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1071 #, c-format msgid "%s: Failed writing local information" msgstr "%s: Misslyckades med att skriva lokal information" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1089 #, c-format msgid "%s: Cancelling active DTRs" msgstr "%s: Avbryter aktiva DTRer" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1096 msgid "DTRGenerator is asked to check files for null job" msgstr "DTR-generator fick förfrÃ¥gan att kontrollera filer för null-jobb" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1116 #, c-format msgid "%s: Can't read list of input files" msgstr "%s: Kan inte läsa lista med indatafiler" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1131 #, c-format msgid "%s: Checking user uploadable file: %s" msgstr "%s: Kontrollerar användaruppladdningsbar fil: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1136 #, c-format msgid "%s: User has uploaded file %s" msgstr "%s: Användare har laddat upp fil %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1143 #, c-format msgid "%s: Failed writing changed input file." msgstr "%s: Misslyckades med att skriva ändrad indatafil." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1147 #, c-format msgid "%s: Critical error for uploadable file %s" msgstr "%s: Kritiskt fel för uppladdningsbar fil %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1153 #, c-format msgid "%s: User has NOT uploaded file %s" msgstr "%s: Användare har INTE laddat upp fil %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1165 #, c-format msgid "%s: Uploadable files timed out" msgstr "%s: Uppladdningsbara filer avbröts pÃ¥ grund av timeout" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1221 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1247 #, c-format msgid "%s: Can't convert checksum %s to int for %s" msgstr "%s: Kan inte konvertera checksumma %s till heltal för %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1228 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1242 #, c-format msgid "%s: Can't convert filesize %s to int for %s" msgstr "%s: Kan inte konvertera filstorlek %s till heltal för %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1237 #, c-format msgid "%s: Invalid size/checksum information (%s) for %s" msgstr "%s: Ogiltig storlek/checksumma information (%s) för %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1259 #, c-format msgid "%s: Invalid file: %s is too big." msgstr "%s: Ogiltig fil: %s är för stor." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1275 #, c-format msgid "%s: Failed to switch user ID to %d/%d to read file %s" msgstr "" "%s: Misslyckades med att byta användar-id till %d/%d för att läsa fil %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1281 #, c-format msgid "%s: Failed to open file %s for reading" msgstr "%s: Misslyckades med att öppna fil %s för läsning" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1289 #, c-format msgid "%s: Error accessing file %s" msgstr "%s: Fel vid Ã¥tkomst för fil %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1301 #, c-format msgid "%s: Error reading file %s" msgstr "%s: Fel vid läsning av fil %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1316 #, c-format msgid "%s: File %s has wrong checksum: %llu. Expected %lli" msgstr "%s: Fil %s har felaktig checksumma: %llu. Förväntade %lli" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1322 #, c-format msgid "%s: Checksum %llu verified for %s" msgstr "%s: Checksumma %llu verifierad för %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1334 msgid "" "Found unfinished DTR transfers. It is possible the previous A-REX process " "did not shut down normally" msgstr "" "Hittade oavslutade DTR-överföringar. Det är möjligt att en tidigare A-REX-" "process inte stängde ned pÃ¥ normal sätt" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1341 #, c-format msgid "Found DTR %s for file %s left in transferring state from previous run" msgstr "" "Hittade DTR %s för fil %s kvarlämnad i överförande tillstÃ¥nd frÃ¥n tidigare " "körning" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1350 msgid "DTRGenerator is requested to clean links for null job" msgstr "DTR-generator fick begäran att ta bort länkar för null-jobb" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1366 #, c-format msgid "%s: Cache cleaning takes too long - %u.%06u seconds" msgstr "%s: Cacherensning tar för lÃ¥ng tid - %u.%06u sekunder" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:108 #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:190 #, c-format msgid "%s: Job monitoring counter is broken" msgstr "%s: Jobbmonitoreringsräknare är trasig" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:115 #, c-format msgid "%s: Job monitoring is unintentionally lost" msgstr "%s: Jobbmonitorering har oavsiktligt förlorats" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:124 #, c-format msgid "%s: Job monitoring stop success" msgstr "%s: Jobmonitorering avslutades" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:129 #, c-format msgid "" "%s: Job monitoring stop requested with %u active references and %s queue " "associated" msgstr "" "%s: Avslutande av jobbmonitorering begärd med %u aktiva referenser och kön " "%s associerad" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:131 #, c-format msgid "%s: Job monitoring stop requested with %u active references" msgstr "%s: Avslutande av jobbmonitorering begärd med %u aktiva referenser" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:195 #, c-format msgid "%s: Job monitoring is lost due to removal from queue" msgstr "%s: Jobbmonitorering förlorad pÃ¥ grund av borttagande frÃ¥n kö" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:278 #, c-format msgid "%s: PushSorted failed to find job where expected" msgstr "%s: PushSorted misslyckades med att hitta jobb där de förväntades" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:161 #, c-format msgid "Replacing queue '%s' with '%s'" msgstr "Byter ut kö '%s' mot '%s'" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:255 #, c-format msgid "Bad name for stdout: %s" msgstr "Felaktigt namn för stdout: %s" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:263 #, c-format msgid "Bad name for stderr: %s" msgstr "Felaktigt namn för stderr: %s" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:326 #, c-format msgid "Bad name for runtime environment: %s" msgstr "Felaktigt namn för runtime-miljö: %s" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:371 msgid "Job description file could not be read." msgstr "Jobbeskrivningsfil kunde inte läsas." #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:422 #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:436 #, c-format msgid "Bad name for executable: %s" msgstr "Felaktigt namn för executable: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:89 msgid "Failed to start data staging threads" msgstr "Misslyckades med att starta datastaging-trÃ¥dar" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:190 #, c-format msgid "" "%s: Failed reading .local and changing state, job and A-REX may be left in " "an inconsistent state" msgstr "" "%s: Misslyckades med att läsa .local och att ändra tillstÃ¥nd, jobb och A-REX " "kan lämnas i ett motsägande tillstÃ¥nd" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:195 #, c-format msgid "%s: unexpected failed job add request: %s" msgstr "%s: oväntad begäran att lägga till misslyckat jobb: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:206 #, c-format msgid "%s: unexpected job add request: %s" msgstr "%s: oväntad begäran att lägga till jobb: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:259 #, c-format msgid "%s: job for attention" msgstr "%s: jobb för uppmärksamhet" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:269 msgid "all for attention" msgstr "alla för uppmärksamhet" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:286 #, c-format msgid "%s: job found while scanning" msgstr "%s: Jobb hittat vid skanning" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:314 #, c-format msgid "%s: job will wait for external process" msgstr "%s: jobb kommer att vänta pÃ¥ extern process" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:331 #, c-format msgid "%s: job assigned for slow polling" msgstr "%s: jobb tilldelat för lÃ¥ngsam utfrÃ¥gning" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:349 #, c-format msgid "%s: job being processed" msgstr "%s: jobb processeras" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:384 #, c-format msgid "Current jobs in system (PREPARING to FINISHING) per-DN (%i entries)" msgstr "" "Nuvarande jobb i systemet (PREPARING till FINISHING) per-DN (%i poster)" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:386 #, c-format msgid "%s: %i" msgstr "%s: %i" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:398 #, c-format msgid "%s: Failed storing failure reason: %s" msgstr "%s: Misslyckades med lagra felorsak: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:404 #, c-format msgid "%s: Failed reading job description: %s" msgstr "%s: Misslyckades med att läsa jobbeskrivning: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:416 #, c-format msgid "%s: Failed parsing job request." msgstr "%s: Misslyckades med att tolka jobbegäran." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:466 #, c-format msgid "%s: Failed writing list of output files: %s" msgstr "%s: Misslyckades med att skriva lista med utdatafiler: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:492 #, c-format msgid "%s: Failed obtaining lrms id" msgstr "%s: Misslyckades med att erhÃ¥lla LRMS-id" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:506 #, c-format msgid "%s: Failed writing local information: %s" msgstr "%s: Misslyckades med att skriva lokal information: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:538 #, c-format msgid "%s: Failed creating grami file" msgstr "%s: Misslyckades med att skapa grami-fil" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:542 #, c-format msgid "%s: Failed setting executable permissions" msgstr "%s: Misslyckades med att sätta körbar Ã¥tkomsträttighet" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:550 #, c-format msgid "%s: state SUBMIT: starting child: %s" msgstr "%s: tillstÃ¥nd SUBMIT: startar barnprocess: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:557 #, c-format msgid "%s: Failed running submission process" msgstr "%s: Misslyckades med att köra insändningsprocess" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:562 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:669 #, c-format msgid "%s: LRMS scripts limit of %u is reached - suspending submit/cancel" msgstr "" "%s: LRMS-skriptets gräns pÃ¥ %u är nÃ¥dd - suspenderar insändning/avbrytande" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:578 #, c-format msgid "" "%s: Job submission to LRMS takes too long, but ID is already obtained. " "Pretending submission is done." msgstr "" "%s: Jobbinsändning till LRMS tar för lÃ¥ng tid, men ID har redan erhÃ¥llits. " "LÃ¥tsas att insändning gjorts" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:585 #, c-format msgid "%s: Job submission to LRMS takes too long. Failing." msgstr "%s: Jobbinsändning till LRMS tar för lÃ¥ng tid. Misslyckas." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:594 #, c-format msgid "%s: state SUBMIT: child exited with code %i" msgstr "%s: tillstÃ¥nd SUBMIT: barnprocess avslutades med kod %i" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:599 #, c-format msgid "%s: Job submission to LRMS failed" msgstr "%s: Jobbinsändning till LRMS misslyckades" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:620 #, c-format msgid "%s: state CANCELING: timeout waiting for cancellation" msgstr "%s: tillstÃ¥nd CANCELING: timeout vid väntan pÃ¥ avbrytande" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:626 #, c-format msgid "%s: state CANCELING: job diagnostics collected" msgstr "%s: tillstÃ¥nd CANCELING: jobbdiagnostik insamlad" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:654 #, c-format msgid "%s: state CANCELING: starting child: %s" msgstr "%s: tillstÃ¥nd CANCELING: startar barnprocess: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:656 #, c-format msgid "%s: Job has completed already. No action taken to cancel" msgstr "%s: Jobb har redan slutförts. Ingen handling tagen för att avbryta" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:664 #, c-format msgid "%s: Failed running cancellation process" msgstr "%s: Misslyckades med att köra avbrytningsprocess" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:683 #, c-format msgid "" "%s: Job cancellation takes too long, but diagnostic collection seems to be " "done. Pretending cancellation succeeded." msgstr "" "%s: Avbrytande av jobb tar för lÃ¥ng tid, men diagnostikinsamling verkar ha " "gjorts. LÃ¥tsas att avbrytande lyckades." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:689 #, c-format msgid "%s: Job cancellation takes too long. Failing." msgstr "%s: Avbrytande av jobb tar för lÃ¥ng tid. Misslyckas." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:699 #, c-format msgid "%s: state CANCELING: child exited with code %i" msgstr "%s: tillstÃ¥nd CANCELING: barnprocess avslutades med kod %i" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:705 #, c-format msgid "%s: Failed to cancel running job" msgstr "%s: Misslyckades med att avbryta körande jobb" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:724 #, c-format msgid "%s: State: %s: data staging finished" msgstr "%s: tillstÃ¥nd: %s: datastaging avslutad" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:759 #, c-format msgid "%s: State: %s: still in data staging" msgstr "%s: tillstÃ¥nd: %s: fortfarande i datastaging" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:772 #, c-format msgid "%s: Job is not allowed to be rerun anymore" msgstr "%s: Jobb har inte rätt att startas om längre" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:782 #, c-format msgid "%s: Job failed in unknown state. Won't rerun." msgstr "%s: Jobbet misslyckades i okänt tillstÃ¥nd. Kommer ej att starta om." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:803 #, c-format msgid "%s: Reprocessing job description failed" msgstr "%s: Omprocessering av jobbeskrivning misslyckades." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:810 #, c-format msgid "%s: Failed to read reprocessed list of output files" msgstr "%s: Misslyckades med att läsa omprocesserad lista med utdatafiler" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:814 #, c-format msgid "%s: Failed to read reprocessed list of input files" msgstr "%s: Misslyckades med att läsa omprocesserad lista med indatafiler" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:898 #, c-format msgid "%s: Reading status of new job failed" msgstr "%s: Läsandet av det nya jobbets status misslyckades" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:911 #, c-format msgid "%s: State: ACCEPTED: parsing job description" msgstr "%s: TillstÃ¥nd: ACCEPTED: tolkar jobbeskrivning" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:913 #, c-format msgid "%s: Processing job description failed" msgstr "%s: Processering av jobbeskrivning misslyckades" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:952 #, c-format msgid "%s: new job is accepted" msgstr "%s: nytt jobb har accepterats" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:964 #, c-format msgid "%s: %s: New job belongs to %i/%i" msgstr "%s: %s: Nytt jobb tillhör %i/%i" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:969 #, c-format msgid "%s: old job is accepted" msgstr "%s: gammalt jobb har accepterats<" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:980 #, c-format msgid "%s: State: ACCEPTED" msgstr "%s: TillstÃ¥nd: ACCEPTED" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:986 #, c-format msgid "%s: State: ACCEPTED: dryrun" msgstr "%s: TillstÃ¥nd: ACCEPTED: dryrun" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1009 #, c-format msgid "%s: State: ACCEPTED: has process time %s" msgstr "%s: TillstÃ¥nd: ACCEPTED: har process-tid %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1015 #, c-format msgid "%s: State: ACCEPTED: moving to PREPARING" msgstr "%s: TillstÃ¥nd: ACCEPTED: flyttar till PREPARING" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1031 #, c-format msgid "%s: State: PREPARING" msgstr "%s: TillstÃ¥nd: PREPARING" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1038 #, c-format msgid "%s: Failed obtaining local job information." msgstr "%s: Misslyckades med att erhÃ¥lla lokal information." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1091 #, c-format msgid "%s: State: SUBMIT" msgstr "%s: TillstÃ¥nd: SUBMIT" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1111 #, c-format msgid "%s: State: CANCELING" msgstr "%s: TillstÃ¥nd: CANCELING" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1131 #, c-format msgid "%s: State: INLRMS" msgstr "%s: TillstÃ¥nd: INLRMS" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1136 #, c-format msgid "%s: State: INLRMS - checking for pending(%u) and mark" msgstr "%s: TillstÃ¥nd: INLRMS - letar efter pending(%u) och markerade" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1138 #, c-format msgid "%s: State: INLRMS - checking for not pending" msgstr "%s: TillstÃ¥nd: INLRMS - letar efter inte pending" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1140 #, c-format msgid "%s: Job finished" msgstr "%s: Jobbet avslutat" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1144 #, c-format msgid "%s: State: INLRMS: exit message is %i %s" msgstr "%s: TillstÃ¥nd: INLRMS: avslutningsmeddelande är %i %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1157 #, c-format msgid "%s: State: INLRMS - no mark found" msgstr "%s: TillstÃ¥nd: INLRMS - hittade ingen markering" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1169 #, c-format msgid "%s: State: FINISHING" msgstr "%s: TillstÃ¥nd: FINISHING" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1190 #, c-format msgid "%s: Job is requested to clean - deleting" msgstr "%s: Jobbet har fÃ¥tt begäran om att tas bort - tar bort" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1207 #, c-format msgid "%s: restarted PREPARING job" msgstr "%s: startade om PREPARING jobb" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1223 #, c-format msgid "%s: restarted INLRMS job" msgstr "%s: startade om INLRMS jobb" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1232 #, c-format msgid "%s: restarted FINISHING job" msgstr "%s: startade om FINISHING jobb" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1237 #, c-format msgid "%s: Can't rerun on request" msgstr "%s: Kan inte starta om pÃ¥ begäran" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1239 #, c-format msgid "%s: Can't rerun on request - not a suitable state" msgstr "%s: Kan inte starta om pÃ¥ begäran - inte ett lämpligt tillstÃ¥nd" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1250 #, c-format msgid "%s: Job is too old - deleting" msgstr "%s: Jobbet är för gammalt - raderar" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1295 #, c-format msgid "%s: Job is ancient - delete rest of information" msgstr "%s: Jobbet är antikt - raderar resterande information" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1313 #, c-format msgid "%s: Canceling job because of user request" msgstr "%s: Avbryter jobb pÃ¥ grund av användarbegäran" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1327 #, c-format msgid "%s: Failed to turn job into failed during cancel processing." msgstr "" "%s: Misslyckades med sätta jobbet som misslyckat under " "avbrytningsprocessering" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1359 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1367 #, c-format msgid "%s: Plugin at state %s : %s" msgstr "%s: Plugin vid tillstÃ¥nd %s : %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1373 #, c-format msgid "%s: Plugin execution failed" msgstr "%s: Pluginexekvering misslyckades" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1480 #, c-format msgid "%s: State: %s from %s" msgstr "%s: TillstÃ¥nd: %s frÃ¥n %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1529 #, c-format msgid "Failed to get DN information from .local file for job %s" msgstr "Misslyckades med att fÃ¥ DN-information frÃ¥n .local-fil för jobb %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1556 #, c-format msgid "%s: Delete request due to internal problems" msgstr "%s: Radera begäran pÃ¥ grund av interna problem" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1591 #, c-format msgid "%s: Job failure detected" msgstr "%s: Jobbfel detekterat" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1651 #, c-format msgid "Failed to move file %s to %s" msgstr "Misslyckades med att flytta fil %s till %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1659 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1769 #, c-format msgid "Failed reading control directory: %s" msgstr "Misslyckades med att läsa kontrollkatalog: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1729 #, c-format msgid "Failed reading control directory: %s: %s" msgstr "Misslyckades med att läsa kontrollkatalog: %s: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:2043 #, c-format msgid "Helper process start failed: %s" msgstr "Hjälpprocesstart misslyckades: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:2050 #, c-format msgid "Stopping helper process %s" msgstr "Stoppar hjälpprocess: %s" #: src/services/a-rex/grid-manager/log/HeartBeatMetrics.cpp:61 #, c-format msgid "Error with hearbeatfile: %s" msgstr "Fel med hjärtslagsfil: %s" #: src/services/a-rex/grid-manager/log/HeartBeatMetrics.cpp:73 #: src/services/a-rex/grid-manager/log/JobsMetrics.cpp:139 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:136 #, c-format msgid ": Metrics tool returned error code %i: %s" msgstr ": Metrikverktyg returnerade felkod %i: %s" #: src/services/a-rex/grid-manager/log/HeartBeatMetrics.cpp:107 #: src/services/a-rex/grid-manager/log/JobsMetrics.cpp:186 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:178 msgid "" "gmetric_bin_path empty in arc.conf (should never happen the default value " "should be used)" msgstr "" "gmetric_bin_path tom i arc.conf (ska aldrig hända, det förvalda värdet ska " "användas)" #: src/services/a-rex/grid-manager/log/JobLog.cpp:114 msgid ": Accounting records reporter tool is not specified" msgstr ": Bokföringspostrapporteringsverktyg är inte angivet" #: src/services/a-rex/grid-manager/log/JobLog.cpp:130 msgid ": Failure creating slot for accounting reporter child process" msgstr ": Misslyckades med skapa slot för bokföringsrapporterings-barnprocess" #: src/services/a-rex/grid-manager/log/JobLog.cpp:143 msgid ": Failure starting accounting reporter child process" msgstr ": Misslyckande med att starta bokföringsrapporterings-barnprocess" #: src/services/a-rex/grid-manager/log/JobLog.cpp:176 msgid ": Failure creating accounting database connection" msgstr ": Misslyckades med att skapa bokföringsdatabasförbindelse" #: src/services/a-rex/grid-manager/log/JobLog.cpp:202 #, c-format msgid ": writing accounting record took %llu ms" msgstr ": skrivning av bokföringspost tog %llu ms" #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:74 #, c-format msgid "Session dir '%s' contains user specific substitutions - skipping it" msgstr "" "Sessionskatalog '%s' innehÃ¥ller användarspecifika substitutioner - hoppar " "över" #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:86 #, c-format msgid "Sessiondir %s: Free space %f GB" msgstr "Sessionskatalog %s: Fritt utrymme %f GB" #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:94 msgid "No session directories found in configuration." msgstr "Hittade ingen sessionkatalog i inställningar." #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:125 msgid "No cachedirs found/configured for calculation of free space." msgstr "" "Inga cachekataloger hittade/konfigurerade för beräkning av fritt utrymme." #: src/services/a-rex/grid-manager/mail/send_mail.cpp:29 msgid "Failed reading local information" msgstr "Misslyckades med att läsa lokal information" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:79 #, c-format msgid "Running mailer command (%s)" msgstr "Kör e-postsändar-kommando (%s)" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:81 msgid "Failed running mailer" msgstr "Misslyckades med att köra e-postsändare" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:34 #, c-format msgid "%s: Job's helper exited" msgstr "%s: Jobbets hjälpprogram avslutades" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:71 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:24 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:56 #, c-format msgid "%s: Failure creating slot for child process" msgstr "%s: Misslyckades med skapa slot för barnprocess" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:120 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:41 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:73 #, c-format msgid "%s: Failure starting child process" msgstr "%s: Misslyckande med att starta av barnprocess" #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:30 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:62 #, c-format msgid "%s: Failure creating data storage for child process" msgstr "%s: Misslyckades med att skapa datalagring för barnprocess" #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:46 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:78 #, c-format msgid "%s: Failure waiting for child process to finish" msgstr "%s: Misslyckande med att vänta pÃ¥ att barnprocess skall avslutas" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:47 msgid "[job description input]" msgstr "[jobbeskrivningsinput]" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:48 msgid "" "Tool for writing the grami file representation of a job description file." msgstr "" "Verktyg för att skriva grami-filrepresentationen av en jobbeskrivningsfil." #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:52 msgid "Name of grami file" msgstr "Namn pÃ¥ grami-fil" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:57 msgid "Configuration file to load" msgstr "Inställningsfil att ladda in" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:58 msgid "arc.conf" msgstr "arc.conf" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:62 msgid "Session directory to use" msgstr "Sessionskatalog att använda" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:63 msgid "directory" msgstr "katalog" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:79 msgid "No job description file name provided." msgstr "Ingen jobbeskrivningsfil tillhandahölls." #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:85 #, c-format msgid "Unable to parse job description input: %s" msgstr "Kunde inte tolka jobbeskrivningsinput: %s" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:91 msgid "Unable to load ARC configuration file." msgstr "Kunde inte ladda in ARC-inställningsfil." #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:111 #, c-format msgid "Unable to write grami file: %s" msgstr "Kunde inte skriva grami-fil: %s" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:117 #, c-format msgid "Unable to write 'output' file: %s" msgstr "Kunde inte skriva 'output'-fil: %s" #: src/services/a-rex/information_collector.cpp:53 #, c-format msgid "Resource information provider: %s" msgstr "ResursinformationstillhandahÃ¥llare: %s" #: src/services/a-rex/information_collector.cpp:56 msgid "Resource information provider failed to start" msgstr "ResursinformationstillhandahÃ¥llare misslyckades med att starta" #: src/services/a-rex/information_collector.cpp:59 msgid "Resource information provider failed to run" msgstr "ResursinformationstillhandahÃ¥llare misslyckades med att köra" #: src/services/a-rex/information_collector.cpp:63 #, c-format msgid "" "Resource information provider failed with exit status: %i\n" "%s" msgstr "" "ResursinformationstillhandahÃ¥llare misslyckades med avslutningsstatus: %i\n" "%s" #: src/services/a-rex/information_collector.cpp:65 #, c-format msgid "" "Resource information provider log:\n" "%s" msgstr "" "ResursinformationstillhandahÃ¥llarlogg:\n" "%s" #: src/services/a-rex/information_collector.cpp:71 msgid "No new informational document assigned" msgstr "Inget nytt informationsdokument tilldelat" #: src/services/a-rex/information_collector.cpp:73 #, c-format msgid "Obtained XML: %s" msgstr "ErhÃ¥llen XML: %s" #: src/services/a-rex/information_collector.cpp:87 msgid "Informational document is empty" msgstr "Informationsdokument är tomt" #: src/services/a-rex/information_collector.cpp:212 msgid "OptimizedInformationContainer failed to create temporary file" msgstr "OptimizedInformationContainer misslyckades med att skapa temporär fil" #: src/services/a-rex/information_collector.cpp:215 #, c-format msgid "OptimizedInformationContainer created temporary file: %s" msgstr "OptimizedInformationContainer skapade temporär fil: %s" #: src/services/a-rex/information_collector.cpp:221 msgid "" "OptimizedInformationContainer failed to store XML document to temporary file" msgstr "" "OptimizedInformationContainer misslyckades med att lagra XML-dokument till " "temporär fil" #: src/services/a-rex/information_collector.cpp:230 msgid "OptimizedInformationContainer failed to parse XML" msgstr "OptimizedInformationContainer misslyckades med att tolka XML" #: src/services/a-rex/information_collector.cpp:242 msgid "OptimizedInformationContainer failed to rename temporary file" msgstr "" "OptimizedInformationContainer misslyckades med att byta namn pÃ¥ temporär fil" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:38 msgid "Default INTERNAL client constructor" msgstr "Förvald INTERNAL klient-konstruktor" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:41 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:61 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:83 msgid "Failed to load grid-manager configfile" msgstr "Misslyckades med att ladda in grid-managerns inställningsfil" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:46 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:66 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:88 msgid "Failed to set INTERNAL endpoint" msgstr "Misslyckades med att sätta INTERNAL slutpunkt" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:131 msgid "Failed to identify grid-manager config file" msgstr "Misslyckades med att identifiera grid-managerns inställningsfil" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:150 #, c-format msgid "Failed to run configuration parser at %s." msgstr "Misslyckades med att köra inställningstolk pÃ¥ %s." #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:154 #, c-format msgid "Parser failed with error code %i." msgstr "Tolk misslyckades med felkod %i." #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:160 #, c-format msgid "No pid file is found at '%s'. Probably A-REX is not running." msgstr "Hittade ingen pid-fil '%s'. Troligen kör inte A-REX." #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:175 #, c-format msgid "Failed to load grid-manager config file from %s" msgstr "Misslyckades med att ladda in grid-managerns inställningsfil frÃ¥n %s" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:266 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:372 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:405 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:451 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:505 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:557 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:575 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:625 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:655 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:673 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:691 msgid "INTERNALClient is not initialized" msgstr "INTERNALClient är inte initierad" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:456 msgid "Submitting job " msgstr "Sänder in jobb " #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:517 #, c-format msgid "Failed to copy input file: %s to path: %s" msgstr "Misslyckades med att kopiera indatafil: %s till sökväg: %s" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:523 #, c-format msgid "Failed to set permissions on: %s" msgstr "Misslyckades med att sätta Ã¥tkomsträttigheter pÃ¥: %s" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:51 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:92 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:119 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:145 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:184 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:246 msgid "Failed to load grid-manager config file" msgstr "Misslyckades med att ladda in grid-managerns inställningsfil" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:191 #, c-format msgid "Job %s does not report a resumable state" msgstr "Jobb %s rapporterar inte ett tillstÃ¥nd varifrÃ¥n det kan Ã¥terupptas" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:196 #, c-format msgid "Resuming job: %s at state: %s (%s)" msgstr "Ã…terupptar jobb: %s i tillstÃ¥nd: %s (%s)" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:205 msgid "Job resuming successful" msgstr "Jobbet Ã¥terupptogs framgÃ¥ngsrikt" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:251 #, c-format msgid "Failed retrieving information for job: %s" msgstr "Misslyckades med att hämta information om jobb: %s" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:324 msgid "Retrieving job description of INTERNAL jobs is not supported" msgstr "Att hämta jobbeskrivning för INTERNAL jobb stöds inte" #: src/services/a-rex/internaljobplugin/JobListRetrieverPluginINTERNAL.cpp:67 #, c-format msgid "Listing localjobs succeeded, %d localjobs found" msgstr "Listning av lokala jobb lyckades, hittade %d lokala jobb" #: src/services/a-rex/internaljobplugin/JobListRetrieverPluginINTERNAL.cpp:83 #, c-format msgid "" "Skipping retrieved job (%s) because it was submitted via another interface " "(%s)." msgstr "" "Hoppar över hämtat jobb (%s) eftersom det sändes in via ett annat gränssnitt " "(%s)." #: src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.cpp:38 msgid "" "Failed to delegate credentials to server - no delegation interface found" msgstr "" "Misslyckades med att delegera referenser till server - hittade inget " "delegeringsgränssnitt" #: src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.cpp:45 #, c-format msgid "Failed to delegate credentials to server - %s" msgstr "Misslyckades med att delegera referenser till server - %s" #: src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.cpp:84 msgid "Failed preparing job description" msgstr "Misslyckades med att förbereda jobbeskrivning" #: src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.cpp:127 msgid "Failed submitting job description" msgstr "Misslyckades med att skicka in jobbeskrivning" #: src/services/a-rex/job.cpp:78 #, c-format msgid "Using cached local account '%s'" msgstr "Använder cachat lokalt konto '%s'" #: src/services/a-rex/job.cpp:89 msgid "Will not map to 'root' account by default" msgstr "Kommer ej att mappa 'root'-konto som förval" #: src/services/a-rex/job.cpp:102 msgid "No local account name specified" msgstr "Inget lokalt kontonamn angivet" #: src/services/a-rex/job.cpp:105 #, c-format msgid "Using local account '%s'" msgstr "Använder lokalt konto '%s'" #: src/services/a-rex/job.cpp:109 msgid "TLS provides no identity, going for OTokens" msgstr "TLS tillhandahÃ¥ller ingen identitet, försöker med OTokens." #: src/services/a-rex/job.cpp:168 msgid "Failed to acquire A-REX's configuration" msgstr "Misslyckades med att förvärva A-REX inställningar" #: src/services/a-rex/job.cpp:240 #, c-format msgid "Cannot handle local user %s" msgstr "Kan inte hantera lokal ägare %s" #: src/services/a-rex/job.cpp:288 #, c-format msgid "%s: Failed to parse user policy" msgstr "%s: Misslyckades med att tolka användarpolicy" #: src/services/a-rex/job.cpp:293 #, c-format msgid "%s: Failed to load evaluator for user policy " msgstr "%s: Misslyckades med ladda in utvärderare för användarpolicy " #: src/services/a-rex/job.cpp:398 #, c-format msgid "%s: Unknown user policy '%s'" msgstr "%s: Okänd användarpolicy '%s'" #: src/services/a-rex/job.cpp:738 src/services/a-rex/job.cpp:756 #, c-format msgid "Credential expires at %s" msgstr "Referensens livslängd gÃ¥r ut %s" #: src/services/a-rex/job.cpp:740 src/services/a-rex/job.cpp:758 #, c-format msgid "Credential handling exception: %s" msgstr "Referenshanteringsundantag: %s" #: src/services/a-rex/job.cpp:924 #, c-format msgid "Failed to run external plugin: %s" msgstr "Misslyckades med att köra extern plugin: %s" #: src/services/a-rex/job.cpp:928 #, c-format msgid "Plugin response: %s" msgstr "Pluginsvar: %s" #: src/services/a-rex/job.cpp:1138 #, c-format msgid "Failed to create job in %s" msgstr "Misslyckades med att skapa jobb i %s" #: src/services/a-rex/job.cpp:1147 #, c-format msgid "Out of tries while allocating new job ID in %s" msgstr "Slut pÃ¥ försök vid allokering av nytt jobb-id i %s" #: src/services/a-rex/job.cpp:1397 msgid "No non-draining session dirs available" msgstr "Inga non-draining sessionskataloger tillgänglig" #: src/services/a-rex/put.cpp:150 #, c-format msgid "%s: put file %s: there is no payload" msgstr "%s: put fil %s: det finns ingen nyttolast" #: src/services/a-rex/put.cpp:156 #, c-format msgid "%s: put file %s: unrecognized payload" msgstr "%s: put fil %s: okänd nyttolast" #: src/services/a-rex/put.cpp:172 src/services/a-rex/rest/rest.cpp:2050 #, c-format msgid "%s: put file %s: failed to create file: %s" msgstr "%s: put fil %s: misslyckades med att skapa fil: %s" #: src/services/a-rex/put.cpp:188 #, c-format msgid "%s: put file %s: %s" msgstr "%s: put fil %s: %s" #: src/services/a-rex/put.cpp:210 #, c-format msgid "%s: delete file %s: failed to obtain file path: %s" msgstr "%s: ta bort fil %s: misslyckades med att erhÃ¥lla filsökväg: %s" #: src/services/a-rex/put.cpp:221 #, c-format msgid "%s: delete file %s: failed to open file/dir: %s" msgstr "%s: ta bort fil %s: misslyckades med att öppna fil/katalog: %s" #: src/services/a-rex/rest/rest.cpp:749 #, c-format msgid "REST: process %s at %s" msgstr "REST: process %s pÃ¥ %s" #: src/services/a-rex/rest/rest.cpp:797 src/services/a-rex/rest/rest.cpp:813 #: src/services/a-rex/rest/rest.cpp:1094 src/services/a-rex/rest/rest.cpp:1185 #: src/services/a-rex/rest/rest.cpp:1549 src/services/a-rex/rest/rest.cpp:2161 #, c-format msgid "process: method %s is not supported for subpath %s" msgstr "process: metod %s stöds inte för subsökväg %s" #: src/services/a-rex/rest/rest.cpp:819 #, c-format msgid "process: schema %s is not supported for subpath %s" msgstr "process: schema %s stöds inte för subsökväg %s" #: src/services/a-rex/rest/rest.cpp:1182 src/services/a-rex/rest/rest.cpp:1546 #, c-format msgid "process: action %s is not supported for subpath %s" msgstr "process: handling %s stöds inte för subsökväg %s" #: src/services/a-rex/rest/rest.cpp:1558 src/services/a-rex/rest/rest.cpp:1627 #: src/services/a-rex/rest/rest.cpp:1987 src/services/a-rex/rest/rest.cpp:2150 #, c-format msgid "REST:GET job %s - %s" msgstr "REST:GET jobb %s - %s" #: src/services/a-rex/rest/rest.cpp:1674 src/services/a-rex/rest/rest.cpp:1682 #, c-format msgid "REST:KILL job %s - %s" msgstr "REST:KILL jobb %s - %s" #: src/services/a-rex/rest/rest.cpp:1699 src/services/a-rex/rest/rest.cpp:1707 #, c-format msgid "REST:CLEAN job %s - %s" msgstr "REST:CLEAN jobb %s - %s" #: src/services/a-rex/rest/rest.cpp:1724 src/services/a-rex/rest/rest.cpp:1732 #: src/services/a-rex/rest/rest.cpp:1749 #, c-format msgid "REST:RESTART job %s - %s" msgstr "REST:RESTART jobb %s - %s" #: src/services/a-rex/rest/rest.cpp:2040 #, c-format msgid "REST:PUT job %s: file %s: there is no payload" msgstr "REST:PUT jobb %s: fil %s: det finns ingen nyttolast" #: src/services/a-rex/rest/rest.cpp:2063 #, c-format msgid "HTTP:PUT %s: put file %s: %s" msgstr "HTTP:PUT %s: put fil %s: %s" #: src/services/a-rex/test_cache_check.cpp:24 #: src/tests/count/test_client.cpp:20 #: src/tests/echo/echo_test4axis2c/test_client.cpp:20 #: src/tests/echo/test_client.cpp:21 msgid "Creating client side chain" msgstr "Skapar klientsidokedjan" #: src/services/a-rex/update_credentials.cpp:29 #, c-format msgid "" "UpdateCredentials: request = \n" "%s" msgstr "" "UpdateCredentials: begäran = \n" "%s" #: src/services/a-rex/update_credentials.cpp:35 msgid "UpdateCredentials: missing Reference" msgstr "UpdateCredentials: saknar Reference" #: src/services/a-rex/update_credentials.cpp:43 msgid "UpdateCredentials: wrong number of Reference" msgstr "UpdateCredentials: fel antal Reference" #: src/services/a-rex/update_credentials.cpp:51 msgid "UpdateCredentials: wrong number of elements inside Reference" msgstr "UpdateCredentials: fel antal element inuti Reference" #: src/services/a-rex/update_credentials.cpp:60 msgid "UpdateCredentials: EPR contains no JobID" msgstr "UpdateCredentials: EPR innehÃ¥ller inget jobb-id" #: src/services/a-rex/update_credentials.cpp:70 #, c-format msgid "UpdateCredentials: no job found: %s" msgstr "UpdateCredentials: hittade inga jobb: %s" #: src/services/a-rex/update_credentials.cpp:77 msgid "UpdateCredentials: failed to update credentials" msgstr "UpdateCredentials: misslyckades att uppdatera referenser" #: src/services/a-rex/update_credentials.cpp:85 #, c-format msgid "" "UpdateCredentials: response = \n" "%s" msgstr "" "UpdateCredentials: svar = \n" "%s" #: src/services/candypond/CandyPond.cpp:52 msgid "No A-REX config file found in candypond configuration" msgstr "Hittade ingen A-REX-inställningsfil i candypond-inställningarna" #: src/services/candypond/CandyPond.cpp:56 #, c-format msgid "Using A-REX config file %s" msgstr "Använder A-REX-inställningsfil: %s" #: src/services/candypond/CandyPond.cpp:60 #, c-format msgid "Failed to process A-REX configuration in %s" msgstr "Misslyckades med att processera AREX-inställningsfil %s" #: src/services/candypond/CandyPond.cpp:65 msgid "No caches defined in configuration" msgstr "Inga cacher definierade i inställningar" #: src/services/candypond/CandyPond.cpp:140 #: src/services/candypond/CandyPond.cpp:347 #, c-format msgid "Can't handle URL %s" msgstr "Kan inte hantera URL %s" #: src/services/candypond/CandyPond.cpp:150 msgid "Empty filename returned from FileCache" msgstr "Tomt filnamn returnerat frÃ¥n FileCache" #: src/services/candypond/CandyPond.cpp:162 #, c-format msgid "Problem accessing cache file %s: %s" msgstr "Problem att komma Ã¥t cachefil %s: %s" #: src/services/candypond/CandyPond.cpp:210 #: src/services/candypond/CandyPond.cpp:474 msgid "No job ID supplied" msgstr "Inget jobb-id tillhandahÃ¥llet" #: src/services/candypond/CandyPond.cpp:219 #, c-format msgid "Bad number in priority element: %s" msgstr "Felaktigt nummer i priority-element: %s" #: src/services/candypond/CandyPond.cpp:228 msgid "No username supplied" msgstr "Inget användarnamn tillhandahÃ¥llet" #: src/services/candypond/CandyPond.cpp:235 #, c-format msgid "Supplied username %s does not match mapped username %s" msgstr "" "Det tillhandahÃ¥llna användarnamnet %s matchar inte det mappade " "användarnamnet %s" #: src/services/candypond/CandyPond.cpp:249 msgid "No session directory found" msgstr "Hittade ingen sessionskatalog" #: src/services/candypond/CandyPond.cpp:253 #, c-format msgid "Using session dir %s" msgstr "Använder sessionskatalog %s" #: src/services/candypond/CandyPond.cpp:257 #, c-format msgid "Failed to stat session dir %s" msgstr "Misslyckades med att göra stat pÃ¥ sessionskatalog %s" #: src/services/candypond/CandyPond.cpp:262 #, c-format msgid "Session dir %s is owned by %i, but current mapped user is %i" msgstr "Sessionkatalog %s ägs av %i, men nuvarande mappade användare är %i" #: src/services/candypond/CandyPond.cpp:289 #, c-format msgid "Failed to access proxy of given job id %s at %s" msgstr "Misslyckades med att komma Ã¥t proxy för givet jobb-id %s pÃ¥ %s" #: src/services/candypond/CandyPond.cpp:307 #, c-format msgid "DN is %s" msgstr "DN är %s" #: src/services/candypond/CandyPond.cpp:385 #, c-format msgid "Permission checking passed for url %s" msgstr "Ã…tkomsträttighetskontroll godkänd för URL %s" #: src/services/candypond/CandyPond.cpp:410 #: src/services/candypond/CandyPondGenerator.cpp:135 #, c-format msgid "Failed to move %s to %s: %s" msgstr "Misslyckades med att flytta %s till %s: %s" #: src/services/candypond/CandyPond.cpp:441 #, c-format msgid "Starting new DTR for %s" msgstr "Startar ny DTR för %s" #: src/services/candypond/CandyPond.cpp:443 #, c-format msgid "Failed to start new DTR for %s" msgstr "Misslyckades med att starta ny DTR för %s" #: src/services/candypond/CandyPond.cpp:487 #, c-format msgid "Job %s: all files downloaded successfully" msgstr "Jobb %s: alla filer nerladdade framgÃ¥ngsrikt" #: src/services/candypond/CandyPond.cpp:494 #, c-format msgid "Job %s: Some downloads failed" msgstr "Jobb %s: nÃ¥gra nerladdningar misslyckades" #: src/services/candypond/CandyPond.cpp:499 #, c-format msgid "Job %s: files still downloading" msgstr "Jobb %s: filer laddas fortfarande ner" #: src/services/candypond/CandyPond.cpp:511 msgid "CandyPond: Unauthorized" msgstr "CandyPond: Oauktoriserad" #: src/services/candypond/CandyPond.cpp:520 msgid "No local user mapping found" msgstr "Hittade ingen lokal användarmappning" #: src/services/candypond/CandyPond.cpp:527 #: src/services/data-staging/DataDeliveryService.cpp:649 #, c-format msgid "Identity is %s" msgstr "Identitet är: %s" #: src/services/candypond/CandyPond.cpp:585 #: src/services/data-staging/DataDeliveryService.cpp:721 msgid "Security Handlers processing failed" msgstr "Säkerhetshanterarprocessering misslyckades" #: src/services/candypond/CandyPond.cpp:592 msgid "Only POST is supported in CandyPond" msgstr "Endast POST stöds i CandyPond" #: src/services/candypond/CandyPondGenerator.cpp:88 #, c-format msgid "DTR %s finished with state %s" msgstr "DTR %s avslutades med tillstÃ¥nd %s" #: src/services/candypond/CandyPondGenerator.cpp:124 #, c-format msgid "Could not determine session directory from filename %s" msgstr "Kunde inte bestämma sessionskatalog frÃ¥n filnamn %s" #: src/services/candypond/CandyPondGenerator.cpp:164 #, c-format msgid "Invalid DTR for source %s, destination %s" msgstr "Ogiltig DTR för källa %s, destination %s" #: src/services/candypond/CandyPondGenerator.cpp:206 #, c-format msgid "DTRs still running for job %s" msgstr "DTRer kör fortfarande för jobb %s" #: src/services/candypond/CandyPondGenerator.cpp:215 #, c-format msgid "All DTRs finished for job %s" msgstr "Alla DTRer avslutade för jobb %s" #: src/services/candypond/CandyPondGenerator.cpp:222 #, c-format msgid "Job %s not found" msgstr "Hittade inte jobb %s" #: src/services/data-staging/DataDeliveryService.cpp:66 #, c-format msgid "Archiving DTR %s, state ERROR" msgstr "Arkiverar DTR %s, tillstÃ¥nd ERROR" #: src/services/data-staging/DataDeliveryService.cpp:70 #, c-format msgid "Archiving DTR %s, state %s" msgstr "Arkiverar DTR %s, tillstÃ¥nd %s" #: src/services/data-staging/DataDeliveryService.cpp:174 msgid "No delegation token in request" msgstr "Inget delegeringstoken i begäran" #: src/services/data-staging/DataDeliveryService.cpp:184 msgid "Failed to accept delegation" msgstr "Misslyckades med att acceptera delegering" #: src/services/data-staging/DataDeliveryService.cpp:214 #: src/services/data-staging/DataDeliveryService.cpp:221 msgid "ErrorDescription" msgstr "ErrorDescription" #: src/services/data-staging/DataDeliveryService.cpp:226 #, c-format msgid "All %u process slots used" msgstr "Alla %u processeringsslottar används" #: src/services/data-staging/DataDeliveryService.cpp:241 #, c-format msgid "Received retry for DTR %s still in transfer" msgstr "Mottog försök igen för DTR %s som fortfarande överför" #: src/services/data-staging/DataDeliveryService.cpp:248 #, c-format msgid "Replacing DTR %s in state %s with new request" msgstr "Byter ut DTR %s i tillstÃ¥nd %s med ny begäran" #: src/services/data-staging/DataDeliveryService.cpp:258 #, c-format msgid "Storing temp proxy at %s" msgstr "Lagrar temporär proxy pÃ¥ %s" #: src/services/data-staging/DataDeliveryService.cpp:266 #, c-format msgid "Failed to create temp proxy at %s: %s" msgstr "Misslyckades med att skapa temporär proxy pÃ¥ %s: %s" #: src/services/data-staging/DataDeliveryService.cpp:273 #, c-format msgid "Failed to change owner of temp proxy at %s to %i:%i: %s" msgstr "" "Misslyckades med att ändra ägare pÃ¥ temporär proxy pÃ¥ %s till %i:%i: %s" #: src/services/data-staging/DataDeliveryService.cpp:302 msgid "Invalid DTR" msgstr "Ogiltig DTR" #: src/services/data-staging/DataDeliveryService.cpp:306 #, c-format msgid "Failed to remove temporary proxy %s: %s" msgstr "Misslyckades med att ta bort temporär proxy %s: %s" #: src/services/data-staging/DataDeliveryService.cpp:407 #, c-format msgid "No such DTR %s" msgstr "Ingen sÃ¥dan DTR %s" #: src/services/data-staging/DataDeliveryService.cpp:425 #, c-format msgid "DTR %s failed: %s" msgstr "DTR %s misslyckades: %s" #: src/services/data-staging/DataDeliveryService.cpp:436 #, c-format msgid "DTR %s finished successfully" msgstr "DTR %s avslutades framgÃ¥ngsrikt" #: src/services/data-staging/DataDeliveryService.cpp:446 #, c-format msgid "DTR %s still in progress (%lluB transferred)" msgstr "DTR %s pÃ¥gÃ¥r fortfarande (%lluB överförda)" #: src/services/data-staging/DataDeliveryService.cpp:506 #, c-format msgid "No active DTR %s" msgstr "Ingen aktiv DTR %s" #: src/services/data-staging/DataDeliveryService.cpp:516 #, c-format msgid "DTR %s was already cancelled" msgstr "DTR %s har redan avbrutits" #: src/services/data-staging/DataDeliveryService.cpp:525 #, c-format msgid "DTR %s could not be cancelled" msgstr "DTR %s kunde inte avbrytas" #: src/services/data-staging/DataDeliveryService.cpp:569 #, c-format msgid "Failed to get load average: %s" msgstr "Misslyckades med att fÃ¥ medellast: %s" #: src/services/data-staging/DataDeliveryService.cpp:593 msgid "Invalid configuration - no allowed IP address specified" msgstr "Ogiltiga inställningar - ingen tillÃ¥ten IP-adress angiven" #: src/services/data-staging/DataDeliveryService.cpp:597 msgid "Invalid configuration - no transfer dirs specified" msgstr "Ogiltiga inställningar - inga överföringskataloger angivna" #: src/services/data-staging/DataDeliveryService.cpp:608 msgid "Failed to start archival thread" msgstr "Misslyckades med att starta arkiveringtrÃ¥d" #: src/services/data-staging/DataDeliveryService.cpp:633 msgid "Shutting down data delivery service" msgstr "Stänger ner dataleveranstjänst" #: src/services/data-staging/DataDeliveryService.cpp:642 msgid "Unauthorized" msgstr "Oauktoriserad" #: src/services/data-staging/DataDeliveryService.cpp:728 msgid "Only POST is supported in DataDeliveryService" msgstr "Endast POST stöds i dataleveranstjänst" #: src/services/examples/echo_python/EchoService.py:12 msgid "EchoService (python) constructor called" msgstr "EchoService (python) konstruktor anropad" #: src/services/examples/echo_python/EchoService.py:17 #, python-format msgid "EchoService (python) has prefix %(prefix)s and suffix %(suffix)s" msgstr "EchoService (python) har prefix %(prefix)s och suffix %(suffix)s" #: src/services/examples/echo_python/EchoService.py:24 msgid "EchoService (python) destructor called" msgstr "EchoService (python) destruktor anropad" #: src/services/examples/echo_python/EchoService.py:54 msgid "EchoService (python) thread test starting" msgstr "EchoService (python) trÃ¥dtest startar" #: src/services/examples/echo_python/EchoService.py:65 #, python-format msgid "EchoService (python) thread test, iteration %(iteration)s %(status)s" msgstr "EchoService (python) trÃ¥dtest, iteration %(iteration)s %(status)s" #: src/services/examples/echo_python/EchoService.py:82 msgid "EchoService (python) 'Process' called" msgstr "EchoService (python) 'Process' anropad" #: src/services/examples/echo_python/EchoService.py:86 #, python-format msgid "inmsg.Auth().Export(arc.SecAttr.ARCAuth) = %s" msgstr "inmsg.Auth().Export(arc.SecAttr.ARCAuth) = %s" #: src/services/examples/echo_python/EchoService.py:87 #, python-format msgid "inmsg.Attributes().getAll() = %s " msgstr "inmsg.Attributes().getAll() = %s " #: src/services/examples/echo_python/EchoService.py:88 #, python-format msgid "EchoService (python) got: %s " msgstr "EchoService (python) fick: %s " #: src/services/examples/echo_python/EchoService.py:93 #, python-format msgid "EchoService (python) request_namespace: %s" msgstr "EchoService (python) request_namespace: %s" #: src/services/examples/echo_python/EchoService.py:99 #: src/services/examples/echo_python/EchoService.py:171 #, python-format msgid "outpayload %s" msgstr "utnyttolast %s" #: src/services/examples/echo_python/EchoService.py:128 msgid "Calling https://localhost:60000/Echo using ClientSOAP" msgstr "Anropar https://localhost:60000/Echo med ClientSOAP" #: src/services/examples/echo_python/EchoService.py:131 msgid "Calling http://localhost:60000/Echo using ClientSOAP" msgstr "Anropar http://localhost:60000/Echo med ClientSOAP" #: src/services/examples/echo_python/EchoService.py:137 #: src/services/examples/echo_python/EchoService.py:155 #, python-format msgid "new_payload %s" msgstr "ny nyttplast %s" #: src/services/examples/echo_python/EchoService.py:149 msgid "Calling http://localhost:60000/Echo using httplib" msgstr "Anropar http://localhost:60000/Echo med httplib" #: src/services/examples/echo_python/EchoService.py:165 msgid "Start waiting 10 sec..." msgstr "Börja vänta 10 sek..." #: src/services/examples/echo_python/EchoService.py:167 msgid "Waiting ends." msgstr "Väntan slutar" #: src/services/wrappers/python/pythonwrapper.cpp:103 #, c-format msgid "Loading %u-th Python service" msgstr "Laddar %ue Python-tjänsten" #: src/services/wrappers/python/pythonwrapper.cpp:107 #, c-format msgid "Initialized %u-th Python service" msgstr "Initierade %ue Python-tjänsten" #: src/services/wrappers/python/pythonwrapper.cpp:142 msgid "Invalid class name" msgstr "Ogiltigt klassnamn" #: src/services/wrappers/python/pythonwrapper.cpp:147 #, c-format msgid "class name: %s" msgstr "klassnamn: %s" #: src/services/wrappers/python/pythonwrapper.cpp:148 #, c-format msgid "module name: %s" msgstr "modulnamn: %s" #: src/services/wrappers/python/pythonwrapper.cpp:205 msgid "Cannot find ARC Config class" msgstr "Kan inte hitta ARCs inställningsklass" #: src/services/wrappers/python/pythonwrapper.cpp:212 msgid "Config class is not an object" msgstr "Inställnings-klass är inte ett objekt" #: src/services/wrappers/python/pythonwrapper.cpp:220 msgid "Cannot get dictionary of module" msgstr "Kan inte erhÃ¥lla ordlista för modulen" #: src/services/wrappers/python/pythonwrapper.cpp:229 msgid "Cannot find service class" msgstr "Kan inte hitta tjänsteklass" #: src/services/wrappers/python/pythonwrapper.cpp:238 msgid "Cannot create config argument" msgstr "Kan inte skapa inställningsargument" #: src/services/wrappers/python/pythonwrapper.cpp:245 msgid "Cannot convert config to Python object" msgstr "Kan inte konvertera inställningar till pythonobjekt" #: src/services/wrappers/python/pythonwrapper.cpp:268 #, c-format msgid "%s is not an object" msgstr "%s är inte ett objekt" #: src/services/wrappers/python/pythonwrapper.cpp:274 msgid "Message class is not an object" msgstr "Meddelande-klass är inte ett objekt" #: src/services/wrappers/python/pythonwrapper.cpp:280 msgid "Python Wrapper constructor succeeded" msgstr "Python-wrapper-konstruktor anropad" #: src/services/wrappers/python/pythonwrapper.cpp:295 #, c-format msgid "Python Wrapper destructor (%d)" msgstr "Python-wrapper-destruktor (%d)" #: src/services/wrappers/python/pythonwrapper.cpp:328 msgid "Python interpreter locked" msgstr "Pythontolkare lÃ¥st" #: src/services/wrappers/python/pythonwrapper.cpp:332 msgid "Python interpreter released" msgstr "Pythontolkare frigjord" #: src/services/wrappers/python/pythonwrapper.cpp:403 msgid "Python wrapper process called" msgstr "Python-wrapper-process anropad" #: src/services/wrappers/python/pythonwrapper.cpp:412 msgid "Failed to create input SOAP container" msgstr "Misslyckades med att skapa indata-SOAP-behÃ¥llare" #: src/services/wrappers/python/pythonwrapper.cpp:422 msgid "Cannot create inmsg argument" msgstr "Kan inte skapa inmsg-argument" #: src/services/wrappers/python/pythonwrapper.cpp:436 msgid "Cannot find ARC Message class" msgstr "Kan inte hitta arcmeddelandeklass" #: src/services/wrappers/python/pythonwrapper.cpp:442 msgid "Cannot convert inmsg to Python object" msgstr "Kan inte konvertera inmsg till pythonobjekt" #: src/services/wrappers/python/pythonwrapper.cpp:451 msgid "Failed to create SOAP containers" msgstr "Misslyckades med att skapa SOAP-behÃ¥llare" #: src/services/wrappers/python/pythonwrapper.cpp:457 msgid "Cannot create outmsg argument" msgstr "Kan inte skapa outmsg-argument" #: src/services/wrappers/python/pythonwrapper.cpp:463 msgid "Cannot convert outmsg to Python object" msgstr "Kan inte konvertera outmsg till pythonobjekt" #: src/tests/client/test_ClientInterface.cpp:36 #: src/tests/client/test_ClientSAML2SSO.cpp:68 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:78 #: src/tests/echo/test_clientinterface.cpp:41 #: src/tests/echo/test_clientinterface.cpp:132 #: src/tests/echo/test_clientinterface.py:12 msgid "Creating a soap client" msgstr "Skapar en echo-klient" #: src/tests/client/test_ClientInterface.cpp:73 #: src/tests/client/test_ClientSAML2SSO.cpp:47 #: src/tests/client/test_ClientSAML2SSO.cpp:71 #: src/tests/count/test_client.cpp:61 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:85 #: src/tests/echo/echo_test4axis2c/test_client.cpp:56 #: src/tests/echo/test.cpp:62 src/tests/echo/test_client.cpp:72 #: src/tests/echo/test_clientinterface.cpp:67 #: src/tests/echo/test_clientinterface.cpp:107 #: src/tests/echo/test_clientinterface.cpp:136 #: src/tests/echo/test_clientinterface.py:22 msgid "Creating and sending request" msgstr "Skapar och skickar begäran" #: src/tests/client/test_ClientInterface.cpp:84 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:97 #: src/tests/echo/test_clientinterface.cpp:78 #: src/tests/echo/test_clientinterface.py:30 msgid "SOAP invocation failed" msgstr "SOAP-anrop misslyckades" #: src/tests/client/test_ClientSAML2SSO.cpp:44 #: src/tests/echo/test_clientinterface.cpp:100 msgid "Creating a http client" msgstr "Skapar en http-klient" #: src/tests/client/test_ClientSAML2SSO.cpp:55 #: src/tests/echo/test_clientinterface.cpp:117 msgid "HTTP with SAML2SSO invocation failed" msgstr "HTTP med SAML2SSO-anrop misslyckades" #: src/tests/client/test_ClientSAML2SSO.cpp:59 #: src/tests/echo/test_clientinterface.cpp:121 msgid "There was no HTTP response" msgstr "Det fanns inget HTTP-svar" #: src/tests/client/test_ClientSAML2SSO.cpp:77 #: src/tests/echo/test_clientinterface.cpp:145 msgid "SOAP with SAML2SSO invocation failed" msgstr "SOAP med SAML2SSO-anrop misslyckades" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:37 #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:38 #: src/tests/delegation/test_delegation_client.cpp:46 #: src/tests/delegation/test_delegation_client.cpp:77 #: src/tests/echo/test_clientinterface.cpp:172 #: src/tests/echo/test_clientinterface.cpp:194 msgid "Creating a delegation soap client" msgstr "Skapar en delegerings-SOAP-klient" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:46 #: src/tests/delegation/test_delegation_client.cpp:52 #: src/tests/echo/test_clientinterface.cpp:178 msgid "Delegation to ARC delegation service failed" msgstr "Delegering till ARCs delegeringstjänst misslyckades" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:50 #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:49 #: src/tests/delegation/test_delegation_client.cpp:57 #: src/tests/delegation/test_delegation_client.cpp:89 #: src/tests/echo/test_clientinterface.cpp:182 #: src/tests/echo/test_clientinterface.cpp:205 #, c-format msgid "Delegation ID: %s" msgstr "Delegerings-ID: %s" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:58 #, c-format msgid "Delegated credential from delegation service: %s" msgstr "Delegerad referens frÃ¥n delegeringstjänst: %s" #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:45 #: src/tests/delegation/test_delegation_client.cpp:84 #: src/tests/echo/test_clientinterface.cpp:201 msgid "Delegation to gridsite delegation service failed" msgstr "Delegering till gridsites delegeringstjänst misslyckades" #: src/tests/count/count.cpp:58 msgid "Input is not SOAP" msgstr "Indata är inte SOAP" #: src/tests/count/count.cpp:89 src/tests/echo/echo.cpp:83 msgid "echo: Unauthorized" msgstr "echo: Oauktoriserad" #: src/tests/count/count.cpp:98 src/tests/count/count.cpp:104 #, c-format msgid "Request is not supported - %s" msgstr "Begäran stöds inte - %s" #: src/tests/count/test_client.cpp:50 #: src/tests/echo/echo_test4axis2c/test_client.cpp:43 #: src/tests/echo/test_client.cpp:59 msgid "Failed to load client configuration" msgstr "Misslyckades med att ladda in klientinställningar" #: src/tests/count/test_client.cpp:54 #: src/tests/echo/echo_test4axis2c/test_client.cpp:47 #: src/tests/echo/test.cpp:58 src/tests/echo/test_client.cpp:63 msgid "Client side MCCs are loaded" msgstr "Klientsidans MCCer har laddats in" #: src/tests/count/test_client.cpp:57 #: src/tests/echo/echo_test4axis2c/test_client.cpp:50 #: src/tests/echo/test_client.cpp:66 msgid "Client chain does not have entry point" msgstr "Klientkedjan har ingen ingÃ¥ngspunkt" #: src/tests/count/test_client.cpp:84 #: src/tests/echo/echo_test4axis2c/test_client.cpp:74 #: src/tests/echo/test.cpp:74 src/tests/echo/test_client.cpp:90 msgid "Request failed" msgstr "Begäran misslyckades" #: src/tests/count/test_client.cpp:90 #: src/tests/echo/echo_test4axis2c/test_client.cpp:80 #: src/tests/echo/test.cpp:79 src/tests/echo/test_client.cpp:96 msgid "There is no response" msgstr "Det finns inget svar" #: src/tests/count/test_client.cpp:97 #: src/tests/echo/echo_test4axis2c/test_client.cpp:87 #: src/tests/echo/test_client.cpp:103 msgid "Response is not SOAP" msgstr "Svaret är inte SOAP" #: src/tests/count/test_service.cpp:22 src/tests/echo/test.cpp:23 #: src/tests/echo/test_service.cpp:22 msgid "Creating service side chain" msgstr "Skapar tjänstesidokedjan" #: src/tests/count/test_service.cpp:25 src/tests/echo/test.cpp:26 #: src/tests/echo/test_service.cpp:25 msgid "Failed to load service configuration" msgstr "Misslyckades med att ladda in tjänsteinställningar" #: src/tests/count/test_service.cpp:30 src/tests/echo/test_service.cpp:30 msgid "Service is waiting for requests" msgstr "Tjänsten väntar pÃ¥ begärningar" #: src/tests/echo/test.cpp:32 msgid "Creating client interface" msgstr "Skapar klientgränssnitt" #: src/tests/echo/test.cpp:82 msgid "Request succeed!!!" msgstr "Begäran lyckades!!!" nordugrid-arc-7.1.1/po/PaxHeaders/hu.gmo0000644000000000000000000000013215067751432015106 xustar0030 mtime=1759499034.573418608 30 atime=1759499034.572507267 30 ctime=1759499034.639120643 nordugrid-arc-7.1.1/po/hu.gmo0000644000175000002070000001637115067751432017020 0ustar00mockbuildmock00000000000000Þ•R¬m<ðñ %-,S'€1¨Úö- #N)r!œ ¾Ëã!ó - J "S v &„ 0« 1Ü 6 E ` o ~ œ © ¶ Ó ã î Bú A= 1 ± Ì "ä  ' B !` ‚ ” ² Á È /×    K6 H‚ Ë Ð 6Ù 8+Iu”¤¦­³¸VÒE)ow7€¸¿ß;ã›!½ Ùç0ü5-/c7“$Ëð/ *P-{-©×ç"@+]‰1’Ä-Ü3 G>=†ÄÞû  & 2 >+Jv ‹ —D¢Tç8<u(‹2´çÿ+-Cq‚Ÿ¯·;É$c=U¡÷ þDFM0”)Åïþ!]@cž  Pm/u¥M©÷)!D &I RF6.%LCJ;A4< 51HP7$8EG?:@B K(29# 3MN0">'+= Q*/,-O%d of %d jobs were submitted%s version %sBroker %s loadedCan not open job description file: %sCan't read list of destinations from file %sCan't read list of sources from file %sContacting VOMS server (named %s): %s on port: %sCurrent transfer FAILED: %sCurrent transfer completeDestination: %sFATAL, ERROR, WARNING, INFO, VERBOSE or DEBUGFailed configuration initializationFileset registration is not supported yetINI config file %s does not existIdentity: %sInvalid JobDescription:Invalid URL: %sJob description to be sent to %s:Job submission summary:Job submitted with jobid: %sName: %sNo job description input specifiedNo jobs givenNo new informational document assignedNumbers of sources and destinations do not matchProxy generation failed: Certificate has expired.Proxy generation failed: Certificate is not valid yet.Proxy generation succeededProxy path: %sProxy type: %sPythonBroker initRequest: %sResponse: %sResponse: %sService side MCCs are loadedShutdown daemonSource: %sSubject: %sThe 'sort' and 'rsort' flags cannot be specified at the same time.The arcget command is used for retrieving the results from a job.The arckill command is used to kill running jobs.There was no SOAP responseTime left for proxy: %sTime left for proxy: Proxy expiredUnsupported destination url: %sUnsupported source url: %sVOMS attribute parsing failedXML config file %s does not existYour identity: %sYour proxy is valid until: %s[filename ...]brokerclass name: %sconfiguration file (default ~/.arc/client.conf)debugleveldirnamedo not ask for verificationdo not submit - dump job description in the language accepted by the targetdownload directory (the job directory will be created in this directory)exitfilenamejobdescription file describing the job to be submittedjobdescription string describing the job to be submittedkeep the files on the server (do not clean)long format (more information)module name: %snnumberorderpathprint version informationremove the job from the local list of jobs even if the job is not found in the infosysreverse sorting of jobs according to jobid, submissiontime or jobnamesecondsshutdownsort jobs according to jobid, submissiontime or jobnamestringtimeout in seconds (default 20)urluse GSI communication protocol for contacting VOMS servicesyProject-Id-Version: Arc Report-Msgid-Bugs-To: support@nordugrid.org PO-Revision-Date: 2010-07-05 12:25+0100 Last-Translator: Gábor RÅ‘czei Language-Team: Hungarian Language: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Plural-Forms: nplurals=2; plural=(n != 1); X-Poedit-Language: Hungarian X-Poedit-Country: HUNGARY X-Poedit-SourceCharset: utf-8 %d %d feladatból elküldve%s verzió %s%s bróker betöltveNem tudom megnyitni a feladat leíró fájlt: %sNem tudom olvasni a célállomásokat a fájlból: %sNem tudom olvasni a forrásokat a fájlból: %sVOMS szerver elérése (neve: %s): %s ezen a porton: %sAz aktuális átvitel MEGSZAKADT: %sAz aktuális átvitel sikeresCélállomás: %sFATAL, ERROR, WARNING, INFO, VERBOSE vagy DEBUGNem sikerült betölteni a konfigurációtA fileset regisztcáció nem támogatott mégAz INI konfigurációs fájl: %s nem létezikAzonosító: %sÉrvénytelen feladat leírás:Érvénytelen URL: %sFeladat leírás elküldve ide: %sJob küldési összefoglalóFeladat elküldve ezzel az azonítóval: %sNév: %sNincs megadva feladat leírás bemeneti adatkéntNem adott meg feladatotNem jött létre új információs dokumentumA forrás és céállomások száma nem egyezik megProxy készítés sikertelen: A publikus kulcs érvényessége lejárt.Proxy készítés sikertelen: A publikus kulcs érvénytelen.Proxy készítés sikeresProxy elérési útvonal: %sProxy típusa: %sPythonBroker betöltéseKérés: %sVálasz: %sVálasz: %sA szolgáltatás oldali MCC-k betöltÅ‘dtekDémon leállításaForrás: %sTárgy: %sA 'sort' vagy az 'rsort' kapcsolókat nem lehet egyszerre használniAz arcget parancsot arra lehet használni, hogy a feladat eredményeit megjelenítseAz arckill paranccsal lehet megölni egy futó feladatotNincs SOAP-os válaszEnnyi ideig érvényes még a proxy: %sNem használható tovább a proxy: Lejárt a proxyNem támogatott url: %sNem támogatott url: %sVOMS attribútumok értelmezése sikertelenAz XML konfigurációs fájl: %s nem létezikAzonosítód: %sA proxy eddig érvényes: %s[fájlnév ...]brókerosztály neve: %skonfigurációs fájl (alapbeállítás ~/.arc/client.conf)logolási szintkönyvtárnévne kérjen ellenÅ‘rzéstnincs küldés - azon feladat leíró formátumban megjelenítése, amit a távoli klaszter elfogadkönyvtár letöltése (a feladat könyvtára ebben a könyvtárban fog létrejönni)kilépfájlnéva feladat leíró fájl tartalmazza magát az elküldendÅ‘ feladatota feladat leíró szöveg tartalmazza magát az elküldendÅ‘ feladatotfájlok megÅ‘rzése a szerveren (nincs törlés)részletes formátum (több információ)modul neve: %snszámsorrendelérési útvonalverzió információ kiírásafeladat eltávolítása a helyi listából ha az nem található az információs rendszerbenfeladatok rendezésének megfordítása az azonosítójuk, az elküldés ideje vagy a neve alapjánmásodpercekleállításfeladatok rendezése az azonosítójuk, az elküldés ideje vagy a neve alapjánszövegidÅ‘korlát másodpercben (alapbeállítás 20)urlGSI kommunikációs protokoll használata a VOMS szolgáltatás eléréséhezynordugrid-arc-7.1.1/po/PaxHeaders/nordugrid-arc.pot0000644000000000000000000000013115067751430017247 xustar0030 mtime=1759499032.888481678 29 atime=1759499032.99848335 30 ctime=1759499034.641591703 nordugrid-arc-7.1.1/po/nordugrid-arc.pot0000644000175000002070000163424115067751430021165 0ustar00mockbuildmock00000000000000# SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR NorduGrid collaboration # This file is distributed under the same license as the nordugrid-arc package. # FIRST AUTHOR , YEAR. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: nordugrid-arc 7.1.1\n" "Report-Msgid-Bugs-To: support@nordugrid.org\n" "POT-Creation-Date: 2025-10-03 15:43+0200\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=CHARSET\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;\n" #: src/clients/compute/arccat.cpp:38 src/clients/compute/arcclean.cpp:34 #: src/clients/compute/arcget.cpp:35 src/clients/compute/arckill.cpp:33 #: src/clients/compute/arcrenew.cpp:32 src/clients/compute/arcresume.cpp:32 #: src/clients/compute/arcstat.cpp:34 msgid "[job ...]" msgstr "" #: src/clients/compute/arccat.cpp:39 msgid "" "The arccat command performs the cat command on the stdout, stderr or grid\n" "manager's error log of the job." msgstr "" #: src/clients/compute/arccat.cpp:46 src/clients/compute/arcclean.cpp:41 #: src/clients/compute/arcget.cpp:42 src/clients/compute/arcinfo.cpp:45 #: src/clients/compute/arckill.cpp:40 src/clients/compute/arcrenew.cpp:37 #: src/clients/compute/arcresume.cpp:37 src/clients/compute/arcstat.cpp:42 #: src/clients/compute/arcsub.cpp:53 src/clients/compute/arcsync.cpp:147 #: src/clients/compute/arctest.cpp:67 src/clients/credentials/arcproxy.cpp:484 #: src/clients/data/arccp.cpp:652 src/clients/data/arcls.cpp:371 #: src/clients/data/arcmkdir.cpp:149 src/clients/data/arcrename.cpp:160 #: src/clients/data/arcrm.cpp:174 src/hed/daemon/unix/main_unix.cpp:345 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1265 #: src/hed/libs/data/DataExternalHelper.cpp:358 #, c-format msgid "%s version %s" msgstr "" #: src/clients/compute/arccat.cpp:55 src/clients/compute/arcclean.cpp:50 #: src/clients/compute/arcget.cpp:51 src/clients/compute/arcinfo.cpp:53 #: src/clients/compute/arckill.cpp:49 src/clients/compute/arcrenew.cpp:46 #: src/clients/compute/arcresume.cpp:46 src/clients/compute/arcstat.cpp:51 #: src/clients/compute/arcsub.cpp:62 src/clients/compute/arcsync.cpp:156 #: src/clients/compute/arctest.cpp:89 src/clients/credentials/arcproxy.cpp:492 #: src/clients/data/arccp.cpp:659 src/clients/data/arcls.cpp:379 #: src/clients/data/arcmkdir.cpp:157 src/clients/data/arcrename.cpp:168 #: src/clients/data/arcrm.cpp:183 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:192 #: src/services/a-rex/grid-manager/GridManager.cpp:110 #: src/services/a-rex/grid-manager/log/JobLog.cpp:139 #, c-format msgid "Running command: %s" msgstr "" #: src/clients/compute/arccat.cpp:66 src/clients/compute/arcclean.cpp:61 #: src/clients/compute/arcget.cpp:62 src/clients/compute/arcinfo.cpp:65 #: src/clients/compute/arckill.cpp:60 src/clients/compute/arcrenew.cpp:57 #: src/clients/compute/arcresume.cpp:50 src/clients/compute/arcstat.cpp:62 #: src/clients/compute/arcsub.cpp:66 src/clients/compute/arcsync.cpp:167 #: src/clients/compute/arctest.cpp:93 src/clients/data/arccp.cpp:682 #: src/clients/data/arcls.cpp:401 src/clients/data/arcmkdir.cpp:179 #: src/clients/data/arcrename.cpp:190 src/clients/data/arcrm.cpp:205 msgid "Failed configuration initialization" msgstr "" #: src/clients/compute/arccat.cpp:78 src/clients/compute/arcclean.cpp:73 #: src/clients/compute/arcget.cpp:87 src/clients/compute/arckill.cpp:72 #: src/clients/compute/arcrenew.cpp:69 src/clients/compute/arcresume.cpp:69 #: src/clients/compute/arcstat.cpp:74 #, c-format msgid "Cannot read specified jobid file: %s" msgstr "" #: src/clients/compute/arccat.cpp:108 src/clients/compute/arcclean.cpp:103 #: src/clients/compute/arcget.cpp:117 src/clients/compute/arckill.cpp:102 #: src/clients/compute/arcrenew.cpp:99 src/clients/compute/arcresume.cpp:99 #: src/clients/compute/arcstat.cpp:127 msgid "No jobs given" msgstr "" #: src/clients/compute/arccat.cpp:121 src/clients/compute/arcclean.cpp:116 #: src/clients/compute/arcget.cpp:130 src/clients/compute/arckill.cpp:115 #: src/clients/compute/arcrenew.cpp:112 src/clients/compute/arcresume.cpp:112 #: src/clients/compute/arcstat.cpp:139 #, c-format msgid "Job list file (%s) doesn't exist" msgstr "" #: src/clients/compute/arccat.cpp:128 src/clients/compute/arcclean.cpp:123 #: src/clients/compute/arcget.cpp:137 src/clients/compute/arckill.cpp:122 #: src/clients/compute/arcrenew.cpp:119 src/clients/compute/arcresume.cpp:119 #: src/clients/compute/arcstat.cpp:146 src/clients/compute/arctest.cpp:296 #, c-format msgid "Unable to read job information from file (%s)" msgstr "" #: src/clients/compute/arccat.cpp:137 src/clients/compute/arcclean.cpp:131 #: src/clients/compute/arcget.cpp:145 src/clients/compute/arckill.cpp:130 #: src/clients/compute/arcrenew.cpp:128 src/clients/compute/arcresume.cpp:128 #: src/clients/compute/arcstat.cpp:155 #, c-format msgid "Warning: Job not found in job list: %s" msgstr "" #: src/clients/compute/arccat.cpp:150 src/clients/compute/arcclean.cpp:186 #: src/clients/compute/arcget.cpp:158 src/clients/compute/arckill.cpp:142 #: src/clients/compute/arcrenew.cpp:140 src/clients/compute/arcresume.cpp:140 msgid "No jobs" msgstr "" #: src/clients/compute/arccat.cpp:165 #, c-format msgid "Could not create temporary file \"%s\"" msgstr "" #: src/clients/compute/arccat.cpp:166 src/clients/compute/arccat.cpp:172 #, c-format msgid "Cannot create output of %s for any jobs" msgstr "" #: src/clients/compute/arccat.cpp:173 #, c-format msgid "Invalid destination URL %s" msgstr "" #: src/clients/compute/arccat.cpp:191 #, c-format msgid "Job deleted: %s" msgstr "" #: src/clients/compute/arccat.cpp:201 #, c-format msgid "Job has not started yet: %s" msgstr "" #: src/clients/compute/arccat.cpp:242 #, c-format msgid "Cannot determine the %s location: %s" msgstr "" #: src/clients/compute/arccat.cpp:247 #, c-format msgid "Cannot create output of %s for job (%s): Invalid source %s" msgstr "" #: src/clients/compute/arccat.cpp:260 #, c-format msgid "Catting %s for job %s" msgstr "" #: src/clients/compute/arcclean.cpp:35 msgid "The arcclean command removes a job from the computing resource." msgstr "" #: src/clients/compute/arcclean.cpp:155 msgid "" "You are about to remove jobs from the job list for which no information " "could be\n" "found. NOTE: Recently submitted jobs might not have appeared in the " "information\n" "system, and this action will also remove such jobs." msgstr "" #: src/clients/compute/arcclean.cpp:158 msgid "Are you sure you want to clean jobs missing information?" msgstr "" #: src/clients/compute/arcclean.cpp:159 src/clients/compute/arcsync.cpp:237 msgid "y" msgstr "" #: src/clients/compute/arcclean.cpp:159 src/clients/compute/arcsync.cpp:237 msgid "n" msgstr "" #: src/clients/compute/arcclean.cpp:164 msgid "Jobs missing information will not be cleaned!" msgstr "" #: src/clients/compute/arcclean.cpp:180 src/clients/compute/arctest.cpp:300 #, c-format msgid "Warning: Failed to write job information to file (%s)" msgstr "" #: src/clients/compute/arcclean.cpp:181 msgid "" " Run 'arcclean -s Undefined' to remove cleaned jobs from job list" msgstr "" #: src/clients/compute/arcclean.cpp:190 #, c-format msgid "Jobs processed: %d, deleted: %d" msgstr "" #: src/clients/compute/arcget.cpp:36 msgid "The arcget command is used for retrieving the results from a job." msgstr "" #: src/clients/compute/arcget.cpp:75 #, c-format msgid "Job download directory from user configuration file: %s" msgstr "" #: src/clients/compute/arcget.cpp:78 msgid "Job download directory will be created in present working directory." msgstr "" #: src/clients/compute/arcget.cpp:82 #, c-format msgid "Job download directory: %s" msgstr "" #: src/clients/compute/arcget.cpp:168 #, c-format msgid "Unable to create directory for storing results (%s) - %s" msgstr "" #: src/clients/compute/arcget.cpp:178 #, c-format msgid "Results stored at: %s" msgstr "" #: src/clients/compute/arcget.cpp:190 src/clients/compute/arckill.cpp:158 msgid "Warning: Some jobs were not removed from server" msgstr "" #: src/clients/compute/arcget.cpp:191 src/clients/compute/arcget.cpp:198 #: src/clients/compute/arckill.cpp:159 msgid " Use arcclean to remove retrieved jobs from job list" msgstr "" #: src/clients/compute/arcget.cpp:197 src/clients/compute/arckill.cpp:165 #, c-format msgid "Warning: Failed removing jobs from file (%s)" msgstr "" #: src/clients/compute/arcget.cpp:202 #, c-format msgid "" "Jobs processed: %d, successfully retrieved: %d, successfully cleaned: %d" msgstr "" #: src/clients/compute/arcget.cpp:206 #, c-format msgid "Jobs processed: %d, successfully retrieved: %d" msgstr "" #: src/clients/compute/arcinfo.cpp:34 msgid "[resource ...]" msgstr "" #: src/clients/compute/arcinfo.cpp:35 msgid "" "The arcinfo command is used for obtaining the status of computing resources " "on the Grid." msgstr "" #: src/clients/compute/arcinfo.cpp:141 msgid "Information endpoint" msgstr "" #: src/clients/compute/arcinfo.cpp:152 msgid "Submission endpoint" msgstr "" #: src/clients/compute/arcinfo.cpp:154 msgid "status" msgstr "" #: src/clients/compute/arcinfo.cpp:156 msgid "interface" msgstr "" #: src/clients/compute/arcinfo.cpp:175 msgid "ERROR: Failed to retrieve information from the following endpoints:" msgstr "" #: src/clients/compute/arcinfo.cpp:188 msgid "ERROR: Failed to retrieve information" msgstr "" #: src/clients/compute/arcinfo.cpp:190 msgid "from the following endpoints:" msgstr "" #: src/clients/compute/arckill.cpp:34 msgid "The arckill command is used to kill running jobs." msgstr "" #: src/clients/compute/arckill.cpp:166 msgid "" " Run 'arcclean -s Undefined' to remove killed jobs from job list" msgstr "" #: src/clients/compute/arckill.cpp:169 #, c-format msgid "Jobs processed: %d, successfully killed: %d, successfully cleaned: %d" msgstr "" #: src/clients/compute/arckill.cpp:171 #, c-format msgid "Jobs processed: %d, successfully killed: %d" msgstr "" #: src/clients/compute/arcrenew.cpp:146 #, c-format msgid "Jobs processed: %d, renewed: %d" msgstr "" #: src/clients/compute/arcresume.cpp:146 #, c-format msgid "Jobs processed: %d, resumed: %d" msgstr "" #: src/clients/compute/arcstat.cpp:35 msgid "" "The arcstat command is used for obtaining the status of jobs that have\n" "been submitted to Grid enabled resources." msgstr "" #: src/clients/compute/arcstat.cpp:101 msgid "The 'sort' and 'rsort' flags cannot be specified at the same time." msgstr "" #: src/clients/compute/arcstat.cpp:171 msgid "No jobs found, try later" msgstr "" #: src/clients/compute/arcstat.cpp:215 #, c-format msgid "Status of %d jobs was queried, %d jobs returned information" msgstr "" #: src/clients/compute/arcsub.cpp:45 msgid "[filename ...]" msgstr "" #: src/clients/compute/arcsub.cpp:46 msgid "" "The arcsub command is used for submitting jobs to Grid enabled computing\n" "resources." msgstr "" #: src/clients/compute/arcsub.cpp:97 msgid "No job description input specified" msgstr "" #: src/clients/compute/arcsub.cpp:110 #, c-format msgid "Can not open job description file: %s" msgstr "" #: src/clients/compute/arcsub.cpp:138 src/clients/compute/arcsub.cpp:166 msgid "Invalid JobDescription:" msgstr "" #: src/clients/compute/arcsub.cpp:208 src/clients/compute/arctest.cpp:250 msgid "" "Cannot adapt job description to the submission target when information " "discovery is turned off" msgstr "" #: src/clients/compute/arcsync.cpp:66 src/clients/compute/arcsync.cpp:177 #, c-format msgid "Warning: Unable to open job list file (%s), unknown format" msgstr "" #: src/clients/compute/arcsync.cpp:76 msgid "Found the following jobs:" msgstr "" #: src/clients/compute/arcsync.cpp:86 msgid "Total number of jobs found: " msgstr "" #: src/clients/compute/arcsync.cpp:98 msgid "Found the following new jobs:" msgstr "" #: src/clients/compute/arcsync.cpp:108 msgid "Total number of new jobs found: " msgstr "" #: src/clients/compute/arcsync.cpp:113 #, c-format msgid "ERROR: Failed to write job information to file (%s)" msgstr "" #: src/clients/compute/arcsync.cpp:140 msgid "" "The arcsync command synchronizes your local job list with the information " "at\n" "the given CEs or registry servers." msgstr "" #: src/clients/compute/arcsync.cpp:183 #, c-format msgid "Warning: Unable to read local list of jobs from file (%s)" msgstr "" #: src/clients/compute/arcsync.cpp:188 #, c-format msgid "Warning: Unable to truncate local list of jobs in file (%s)" msgstr "" #: src/clients/compute/arcsync.cpp:194 #, c-format msgid "Warning: Unable to create job list file (%s), jobs list is destroyed" msgstr "" #: src/clients/compute/arcsync.cpp:198 #, c-format msgid "" "Warning: Failed to write local list of jobs into file (%s), jobs list is " "destroyed" msgstr "" #: src/clients/compute/arcsync.cpp:231 msgid "" "Synchronizing the local list of active jobs with the information in the\n" "information system can result in some inconsistencies. Very recently " "submitted\n" "jobs might not yet be present, whereas jobs very recently scheduled for\n" "deletion can still be present." msgstr "" #: src/clients/compute/arcsync.cpp:236 msgid "Are you sure you want to synchronize your local job list?" msgstr "" #: src/clients/compute/arcsync.cpp:241 msgid "Cancelling synchronization request" msgstr "" #: src/clients/compute/arcsync.cpp:251 msgid "" "No services specified. Please configure default services in the client " "configuration, or specify a cluster or registry (-C or -Y options, see " "arcsync -h)." msgstr "" #: src/clients/compute/arctest.cpp:60 msgid " " msgstr "" #: src/clients/compute/arctest.cpp:61 msgid "The arctest command is used for testing clusters as resources." msgstr "" #: src/clients/compute/arctest.cpp:73 msgid "" "Nothing to do:\n" "you have to either specify a test job id with -J (--job)\n" "or query information about the certificates with -E (--certificate)\n" msgstr "" #: src/clients/compute/arctest.cpp:80 msgid "" "For the 1st test job you also have to specify a runtime value with -r (--" "runtime) option." msgstr "" #: src/clients/compute/arctest.cpp:118 msgid "Certificate information:" msgstr "" #: src/clients/compute/arctest.cpp:122 msgid "No user-certificate found" msgstr "" #: src/clients/compute/arctest.cpp:125 #, c-format msgid "Certificate: %s" msgstr "" #: src/clients/compute/arctest.cpp:127 #, c-format msgid "Subject name: %s" msgstr "" #: src/clients/compute/arctest.cpp:128 #, c-format msgid "Valid until: %s" msgstr "" #: src/clients/compute/arctest.cpp:132 msgid "Unable to determine certificate information" msgstr "" #: src/clients/compute/arctest.cpp:136 msgid "Proxy certificate information:" msgstr "" #: src/clients/compute/arctest.cpp:138 msgid "No proxy found" msgstr "" #: src/clients/compute/arctest.cpp:141 #, c-format msgid "Proxy: %s" msgstr "" #: src/clients/compute/arctest.cpp:142 #, c-format msgid "Proxy-subject: %s" msgstr "" #: src/clients/compute/arctest.cpp:144 msgid "Valid for: Proxy expired" msgstr "" #: src/clients/compute/arctest.cpp:146 msgid "Valid for: Proxy not valid" msgstr "" #: src/clients/compute/arctest.cpp:148 #, c-format msgid "Valid for: %s" msgstr "" #: src/clients/compute/arctest.cpp:153 #, c-format msgid "Certificate issuer: %s" msgstr "" #: src/clients/compute/arctest.cpp:157 msgid "CA-certificates installed:" msgstr "" #: src/clients/compute/arctest.cpp:179 msgid "Unable to detect if issuer certificate is installed." msgstr "" #: src/clients/compute/arctest.cpp:182 msgid "Your issuer's certificate is not installed" msgstr "" #: src/clients/compute/arctest.cpp:196 #, c-format msgid "No test-job, with ID \"%d\"" msgstr "" #: src/clients/compute/arctest.cpp:267 #, c-format msgid "Cannot write jobid (%s) to file (%s)" msgstr "" #: src/clients/compute/arctest.cpp:268 #, c-format msgid "Test submitted with jobid: %s" msgstr "" #: src/clients/compute/arctest.cpp:283 #, c-format msgid "Computing service: %s" msgstr "" #: src/clients/compute/arctest.cpp:289 msgid "Test failed, no more possible targets" msgstr "" #: src/clients/compute/arctest.cpp:302 src/clients/compute/submit.cpp:49 msgid "To recover missing jobs, run arcsync" msgstr "" #: src/clients/compute/arctest.cpp:315 src/clients/compute/submit.cpp:159 #, c-format msgid "" "Unable to prepare job description according to needs of the target resource " "(%s)." msgstr "" #: src/clients/compute/arctest.cpp:325 src/clients/compute/submit.cpp:175 #, c-format msgid "" "An error occurred during the generation of job description to be sent to %s" msgstr "" #: src/clients/compute/arctest.cpp:329 src/clients/compute/submit.cpp:179 #, c-format msgid "Job description to be sent to %s:" msgstr "" #: src/clients/compute/submit.cpp:34 #, c-format msgid "Job submitted with jobid: %s" msgstr "" #: src/clients/compute/submit.cpp:40 #, c-format msgid "Cannot write job IDs to file (%s)" msgstr "" #: src/clients/compute/submit.cpp:45 #, c-format msgid "Unable to open job list file (%s), unknown format" msgstr "" #: src/clients/compute/submit.cpp:47 #, c-format msgid "Failed to write job information to database (%s)" msgstr "" #: src/clients/compute/submit.cpp:51 #, c-format msgid "Record about new job successfully added to the database (%s)" msgstr "" #: src/clients/compute/submit.cpp:57 msgid "Job submission summary:" msgstr "" #: src/clients/compute/submit.cpp:59 #, c-format msgid "%d of %d jobs were submitted" msgstr "" #: src/clients/compute/submit.cpp:61 msgid "The following jobs were not submitted:" msgstr "" #: src/clients/compute/submit.cpp:65 msgid "Job nr." msgstr "" #: src/clients/compute/submit.cpp:75 #, c-format msgid "ERROR: Unable to load broker %s" msgstr "" #: src/clients/compute/submit.cpp:79 msgid "" "ERROR: Job submission aborted because no resource returned any information" msgstr "" #: src/clients/compute/submit.cpp:83 msgid "ERROR: One or multiple job descriptions was not submitted." msgstr "" #: src/clients/compute/submit.cpp:100 #, c-format msgid "" "A computing resource using the GridFTP interface was requested, but\n" "%sthe corresponding plugin could not be loaded. Is the plugin installed?\n" "%sIf not, please install the package 'nordugrid-arc-plugins-globus'.\n" "%sDepending on your type of installation the package name might differ." msgstr "" #: src/clients/compute/submit.cpp:129 msgid "" "Unable to adapt job description to any resource, no resource information " "could be obtained." msgstr "" #: src/clients/compute/submit.cpp:130 msgid "Original job description is listed below:" msgstr "" #: src/clients/compute/submit.cpp:142 #, c-format msgid "Dumping job description aborted: Unable to load broker %s" msgstr "" #: src/clients/compute/submit.cpp:197 msgid "" "Unable to prepare job description according to needs of the target resource." msgstr "" #: src/clients/compute/submit.cpp:281 src/clients/compute/submit.cpp:311 #, c-format msgid "Service endpoint %s (type %s) added to the list for resource discovery" msgstr "" #: src/clients/compute/submit.cpp:291 msgid "" "There are no endpoints in registry that match requested info endpoint type" msgstr "" #: src/clients/compute/submit.cpp:332 #, c-format msgid "Service endpoint %s (type %s) added to the list for direct submission" msgstr "" #: src/clients/compute/submit.cpp:340 msgid "" "There are no endpoints in registry that match requested submission endpoint " "type" msgstr "" #: src/clients/compute/utils.cpp:111 #, c-format msgid "Types of execution services that %s is able to submit jobs to:" msgstr "" #: src/clients/compute/utils.cpp:114 #, c-format msgid "Types of registry services that %s is able to collect information from:" msgstr "" #: src/clients/compute/utils.cpp:117 #, c-format msgid "" "Types of local information services that %s is able to collect information " "from:" msgstr "" #: src/clients/compute/utils.cpp:120 #, c-format msgid "" "Types of local information services that %s is able to collect job " "information from:" msgstr "" #: src/clients/compute/utils.cpp:123 #, c-format msgid "Types of services that %s is able to manage jobs at:" msgstr "" #: src/clients/compute/utils.cpp:126 #, c-format msgid "Job description languages supported by %s:" msgstr "" #: src/clients/compute/utils.cpp:129 #, c-format msgid "Brokers available to %s:" msgstr "" #: src/clients/compute/utils.cpp:152 #, c-format msgid "" "Default broker (%s) is not available. When using %s a broker should be " "specified explicitly (-b option)." msgstr "" #: src/clients/compute/utils.cpp:162 msgid "Proxy expired. Job submission aborted. Please run 'arcproxy'!" msgstr "" #: src/clients/compute/utils.cpp:167 msgid "" "Cannot find any proxy. This application currently cannot run without a " "proxy.\n" " If you have the proxy file in a non-default location,\n" " please make sure the path is specified in the client configuration file.\n" " If you don't have a proxy yet, please run 'arcproxy'!" msgstr "" #: src/clients/compute/utils.cpp:179 src/clients/data/utils.cpp:28 msgid "" "Cannot find any token. Please run 'oidc-token' or use similar\n" " utility to obtain authentication token!" msgstr "" #: src/clients/compute/utils.cpp:308 #, c-format msgid "Unsupported submission endpoint type: %s" msgstr "" #: src/clients/compute/utils.cpp:327 msgid "" "Requested to skip resource discovery. Will try direct submission to arcrest " "endpoint type." msgstr "" #: src/clients/compute/utils.cpp:332 #, c-format msgid "Unsupported information endpoint type: %s" msgstr "" #: src/clients/compute/utils.cpp:385 msgid "Other actions" msgstr "" #: src/clients/compute/utils.cpp:386 msgid "Brokering and filtering" msgstr "" #: src/clients/compute/utils.cpp:387 msgid "Output format modifiers" msgstr "" #: src/clients/compute/utils.cpp:388 msgid "Behaviour tuning" msgstr "" #: src/clients/compute/utils.cpp:389 msgid "Target endpoint selection" msgstr "" #: src/clients/compute/utils.cpp:393 msgid "computing element hostname or a complete endpoint URL" msgstr "" #: src/clients/compute/utils.cpp:394 src/clients/compute/utils.cpp:404 msgid "ce" msgstr "" #: src/clients/compute/utils.cpp:398 msgid "registry service URL with optional specification of protocol" msgstr "" #: src/clients/compute/utils.cpp:399 msgid "registry" msgstr "" #: src/clients/compute/utils.cpp:403 msgid "only select jobs that were submitted to this computing element" msgstr "" #: src/clients/compute/utils.cpp:410 msgid "" "require the specified endpoint type for job submission.\n" "\tAllowed values are: arcrest and internal." msgstr "" #: src/clients/compute/utils.cpp:412 src/clients/compute/utils.cpp:426 #: src/clients/compute/utils.cpp:434 msgid "type" msgstr "" #: src/clients/compute/utils.cpp:418 msgid "skip the service with the given URL during service discovery" msgstr "" #: src/clients/compute/utils.cpp:419 src/clients/compute/utils.cpp:603 #: src/clients/data/arccp.cpp:583 msgid "URL" msgstr "" #: src/clients/compute/utils.cpp:423 msgid "" "require information query using the specified information endpoint type.\n" "\tSpecial value 'NONE' will disable all resource information queries and the " "following brokering.\n" "\tAllowed values are: ldap.nordugrid, ldap.glue2, arcrest and internal." msgstr "" #: src/clients/compute/utils.cpp:432 msgid "" "only get information about executon targets that support this job submission " "endpoint type.\n" "\tAllowed values are: arcrest and internal." msgstr "" #: src/clients/compute/utils.cpp:440 msgid "keep the files on the server (do not clean)" msgstr "" #: src/clients/compute/utils.cpp:446 msgid "do not ask for verification" msgstr "" #: src/clients/compute/utils.cpp:450 msgid "truncate the joblist before synchronizing" msgstr "" #: src/clients/compute/utils.cpp:454 msgid "do not collect information, only convert jobs storage format" msgstr "" #: src/clients/compute/utils.cpp:460 src/clients/data/arcls.cpp:277 msgid "long format (more information)" msgstr "" #: src/clients/compute/utils.cpp:466 msgid "show the stdout of the job (default)" msgstr "" #: src/clients/compute/utils.cpp:470 msgid "show the stderr of the job" msgstr "" #: src/clients/compute/utils.cpp:474 msgid "show the CE's error log of the job" msgstr "" #: src/clients/compute/utils.cpp:478 msgid "show the specified file from job's session directory" msgstr "" #: src/clients/compute/utils.cpp:479 msgid "filepath" msgstr "" #: src/clients/compute/utils.cpp:485 msgid "" "download directory (the job directory will be created in this directory)" msgstr "" #: src/clients/compute/utils.cpp:487 msgid "dirname" msgstr "" #: src/clients/compute/utils.cpp:491 msgid "use the jobname instead of the short ID as the job directory name" msgstr "" #: src/clients/compute/utils.cpp:496 msgid "force download (overwrite existing job directory)" msgstr "" #: src/clients/compute/utils.cpp:502 msgid "instead of the status only the IDs of the selected jobs will be printed" msgstr "" #: src/clients/compute/utils.cpp:506 msgid "sort jobs according to jobid, submissiontime or jobname" msgstr "" #: src/clients/compute/utils.cpp:507 src/clients/compute/utils.cpp:510 msgid "order" msgstr "" #: src/clients/compute/utils.cpp:509 msgid "reverse sorting of jobs according to jobid, submissiontime or jobname" msgstr "" #: src/clients/compute/utils.cpp:513 msgid "show jobs where status information is unavailable" msgstr "" #: src/clients/compute/utils.cpp:517 msgid "show status information in JSON format" msgstr "" #: src/clients/compute/utils.cpp:523 msgid "" "remove the job from the local list of jobs even if the job is not found in " "the infosys" msgstr "" #: src/clients/compute/utils.cpp:530 msgid "submit test job given by the number" msgstr "" #: src/clients/compute/utils.cpp:531 src/clients/compute/utils.cpp:535 msgid "int" msgstr "" #: src/clients/compute/utils.cpp:534 msgid "test job runtime specified by the number" msgstr "" #: src/clients/compute/utils.cpp:541 msgid "only select jobs whose status is statusstr" msgstr "" #: src/clients/compute/utils.cpp:542 msgid "statusstr" msgstr "" #: src/clients/compute/utils.cpp:546 msgid "all jobs" msgstr "" #: src/clients/compute/utils.cpp:552 msgid "jobdescription string describing the job to be submitted" msgstr "" #: src/clients/compute/utils.cpp:554 src/clients/compute/utils.cpp:560 #: src/clients/credentials/arcproxy.cpp:353 #: src/clients/credentials/arcproxy.cpp:360 #: src/clients/credentials/arcproxy.cpp:379 #: src/clients/credentials/arcproxy.cpp:386 #: src/clients/credentials/arcproxy.cpp:404 #: src/clients/credentials/arcproxy.cpp:408 #: src/clients/credentials/arcproxy.cpp:423 #: src/clients/credentials/arcproxy.cpp:432 #: src/clients/credentials/arcproxy.cpp:436 msgid "string" msgstr "" #: src/clients/compute/utils.cpp:558 msgid "jobdescription file describing the job to be submitted" msgstr "" #: src/clients/compute/utils.cpp:566 msgid "select broker method (list available brokers with --listplugins flag)" msgstr "" #: src/clients/compute/utils.cpp:567 msgid "broker" msgstr "" #: src/clients/compute/utils.cpp:570 msgid "the IDs of the submitted jobs will be appended to this file" msgstr "" #: src/clients/compute/utils.cpp:571 src/clients/compute/utils.cpp:598 #: src/clients/compute/utils.cpp:625 src/clients/compute/utils.cpp:633 #: src/clients/credentials/arcproxy.cpp:445 src/clients/data/arccp.cpp:603 #: src/clients/data/arcls.cpp:322 src/clients/data/arcmkdir.cpp:100 #: src/clients/data/arcrename.cpp:111 src/clients/data/arcrm.cpp:125 #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:53 msgid "filename" msgstr "" #: src/clients/compute/utils.cpp:575 msgid "do not perform any delegation for submitted jobs" msgstr "" #: src/clients/compute/utils.cpp:579 msgid "perform X.509 delegation for submitted jobs" msgstr "" #: src/clients/compute/utils.cpp:583 msgid "perform token delegation for submitted jobs" msgstr "" #: src/clients/compute/utils.cpp:587 msgid "" "request at most this number of job instances submitted in single submit " "request" msgstr "" #: src/clients/compute/utils.cpp:591 msgid "" "request at least this number of job instances submitted in single submit " "request" msgstr "" #: src/clients/compute/utils.cpp:597 msgid "a file containing a list of jobIDs" msgstr "" #: src/clients/compute/utils.cpp:602 msgid "skip jobs that are on a computing element with a given URL" msgstr "" #: src/clients/compute/utils.cpp:608 msgid "submit jobs as dry run (no submission to batch system)" msgstr "" #: src/clients/compute/utils.cpp:612 msgid "" "do not submit - dump job description in the language accepted by the target" msgstr "" #: src/clients/compute/utils.cpp:618 msgid "prints info about installed user- and CA-certificates" msgstr "" #: src/clients/compute/utils.cpp:619 src/clients/credentials/arcproxy.cpp:469 #: src/clients/data/arccp.cpp:637 src/clients/data/arcls.cpp:356 #: src/clients/data/arcmkdir.cpp:134 src/clients/data/arcrename.cpp:145 #: src/clients/data/arcrm.cpp:159 msgid "allow TLS connection which failed verification" msgstr "" #: src/clients/compute/utils.cpp:624 #, c-format msgid "the file storing information about active jobs (default %s)" msgstr "" #: src/clients/compute/utils.cpp:632 src/clients/credentials/arcproxy.cpp:444 #: src/clients/data/arccp.cpp:602 src/clients/data/arcls.cpp:321 #: src/clients/data/arcmkdir.cpp:99 src/clients/data/arcrename.cpp:110 #: src/clients/data/arcrm.cpp:124 msgid "configuration file (default ~/.arc/client.conf)" msgstr "" #: src/clients/compute/utils.cpp:635 src/clients/credentials/arcproxy.cpp:439 #: src/clients/data/arccp.cpp:597 src/clients/data/arcls.cpp:316 #: src/clients/data/arcmkdir.cpp:94 src/clients/data/arcrename.cpp:105 #: src/clients/data/arcrm.cpp:119 msgid "timeout in seconds (default 20)" msgstr "" #: src/clients/compute/utils.cpp:636 src/clients/credentials/arcproxy.cpp:440 #: src/clients/data/arccp.cpp:598 src/clients/data/arcls.cpp:317 #: src/clients/data/arcmkdir.cpp:95 src/clients/data/arcrename.cpp:106 #: src/clients/data/arcrm.cpp:120 msgid "seconds" msgstr "" #: src/clients/compute/utils.cpp:639 msgid "list the available plugins" msgstr "" #: src/clients/compute/utils.cpp:643 src/clients/credentials/arcproxy.cpp:449 #: src/clients/data/arccp.cpp:642 src/clients/data/arcls.cpp:361 #: src/clients/data/arcmkdir.cpp:139 src/clients/data/arcrename.cpp:150 #: src/clients/data/arcrm.cpp:164 #: src/hed/libs/compute/test_jobdescription.cpp:38 #: src/services/a-rex/grid-manager/gm_jobs.cpp:190 #: src/services/a-rex/grid-manager/inputcheck.cpp:81 #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:67 msgid "FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG" msgstr "" #: src/clients/compute/utils.cpp:644 src/clients/credentials/arcproxy.cpp:450 #: src/clients/data/arccp.cpp:643 src/clients/data/arcls.cpp:362 #: src/clients/data/arcmkdir.cpp:140 src/clients/data/arcrename.cpp:151 #: src/clients/data/arcrm.cpp:165 #: src/hed/libs/compute/test_jobdescription.cpp:39 #: src/services/a-rex/grid-manager/gm_jobs.cpp:191 #: src/services/a-rex/grid-manager/inputcheck.cpp:82 #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:68 msgid "debuglevel" msgstr "" #: src/clients/compute/utils.cpp:646 src/clients/credentials/arcproxy.cpp:473 #: src/clients/data/arccp.cpp:646 src/clients/data/arcls.cpp:365 #: src/clients/data/arcmkdir.cpp:143 src/clients/data/arcrename.cpp:154 #: src/clients/data/arcrm.cpp:168 msgid "print version information" msgstr "" #: src/clients/compute/utils.cpp:652 src/clients/data/arccp.cpp:607 #: src/clients/data/arcls.cpp:326 src/clients/data/arcmkdir.cpp:104 #: src/clients/data/arcrename.cpp:115 src/clients/data/arcrm.cpp:129 msgid "do not perform any authentication for opened connections" msgstr "" #: src/clients/compute/utils.cpp:656 src/clients/data/arccp.cpp:612 #: src/clients/data/arcls.cpp:331 src/clients/data/arcmkdir.cpp:109 #: src/clients/data/arcrename.cpp:120 src/clients/data/arcrm.cpp:134 msgid "perform X.509 authentication for opened connections" msgstr "" #: src/clients/compute/utils.cpp:660 src/clients/data/arccp.cpp:617 #: src/clients/data/arcls.cpp:336 src/clients/data/arcmkdir.cpp:114 #: src/clients/data/arcrename.cpp:125 src/clients/data/arcrm.cpp:139 msgid "perform token authentication for opened connections" msgstr "" #: src/clients/compute/utils.cpp:664 src/clients/credentials/arcproxy.cpp:454 #: src/clients/data/arccp.cpp:622 src/clients/data/arcls.cpp:341 #: src/clients/data/arcmkdir.cpp:119 src/clients/data/arcrename.cpp:130 #: src/clients/data/arcrm.cpp:144 msgid "force using CA certificates configuration provided by OpenSSL" msgstr "" #: src/clients/compute/utils.cpp:668 src/clients/credentials/arcproxy.cpp:459 #: src/clients/data/arccp.cpp:627 src/clients/data/arcls.cpp:346 #: src/clients/data/arcmkdir.cpp:124 src/clients/data/arcrename.cpp:135 #: src/clients/data/arcrm.cpp:149 msgid "" "force using CA certificates configuration for Grid services (typically IGTF)" msgstr "" #: src/clients/compute/utils.cpp:672 src/clients/credentials/arcproxy.cpp:464 msgid "" "force using CA certificates configuration for Grid services (typically IGTF) " "and one provided by OpenSSL" msgstr "" #: src/clients/compute/utils.cpp:681 src/clients/compute/utils.cpp:688 #: src/clients/compute/utils.cpp:695 msgid "Conflicting delegation types specified." msgstr "" #: src/clients/compute/utils.cpp:727 src/clients/compute/utils.cpp:734 #: src/clients/compute/utils.cpp:741 src/clients/data/utils.cpp:41 #: src/clients/data/utils.cpp:48 src/clients/data/utils.cpp:55 msgid "Conflicting authentication types specified." msgstr "" #: src/clients/credentials/arcproxy.cpp:151 #, c-format msgid "There are %d user certificates existing in the NSS database" msgstr "" #: src/clients/credentials/arcproxy.cpp:167 #, c-format msgid "Number %d is with nickname: %s%s" msgstr "" #: src/clients/credentials/arcproxy.cpp:176 #, c-format msgid " expiration time: %s " msgstr "" #: src/clients/credentials/arcproxy.cpp:180 #, c-format msgid " certificate dn: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:181 #, c-format msgid " issuer dn: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:182 #, c-format msgid " serial number: %d" msgstr "" #: src/clients/credentials/arcproxy.cpp:186 #, c-format msgid "Please choose the one you would use (1-%d): " msgstr "" #: src/clients/credentials/arcproxy.cpp:251 msgid "" "The arcproxy command creates a proxy from a key/certificate pair which can\n" "then be used to access grid resources." msgstr "" #: src/clients/credentials/arcproxy.cpp:253 msgid "" "Supported constraints are:\n" " validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start\n" " from now)\n" "\n" " validityEnd=time\n" "\n" " validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod and\n" " validityEnd not specified, the default is 12 hours for local proxy, and\n" " 168 hours for delegated proxy on myproxy server)\n" "\n" " vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, " "the\n" " default is the minimum value of 12 hours and validityPeriod)\n" "\n" " myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy " "server,\n" " e.g. 43200 or 12h or 12H; if not specified, the default is the minimum " "value\n" " of 12 hours and validityPeriod (which is lifetime of the delegated proxy " "on\n" " myproxy server))\n" "\n" " proxyPolicy=policy content\n" "\n" " proxyPolicyFile=policy file\n" "\n" " keybits=number - length of the key to generate. Default is 2048 bits.\n" " Special value 'inherit' is to use key length of signing certificate.\n" "\n" " signingAlgorithm=name - signing algorithm to use for signing public key " "of\n" " proxy. Possible values are sha1, sha2 (alias for sha256), sha224, sha256,\n" " sha384, sha512 and inherit (use algorithm of signing certificate). " "Default\n" " is inherit. With old systems, only sha1 is acceptable.\n" "\n" "Supported information item names are:\n" " subject - subject name of proxy certificate.\n" "\n" " identity - identity subject name of proxy certificate.\n" "\n" " issuer - issuer subject name of proxy certificate.\n" "\n" " ca - subject name of CA which issued initial certificate.\n" "\n" " path - file system path to file containing proxy.\n" "\n" " type - type of proxy certificate.\n" "\n" " validityStart - timestamp when proxy validity starts.\n" "\n" " validityEnd - timestamp when proxy validity ends.\n" "\n" " validityPeriod - duration of proxy validity in seconds.\n" "\n" " validityLeft - duration of proxy validity left in seconds.\n" "\n" " vomsVO - VO name represented by VOMS attribute\n" "\n" " vomsSubject - subject of certificate for which VOMS attribute is issued\n" "\n" " vomsIssuer - subject of service which issued VOMS certificate\n" "\n" " vomsACvalidityStart - timestamp when VOMS attribute validity starts.\n" "\n" " vomsACvalidityEnd - timestamp when VOMS attribute validity ends.\n" "\n" " vomsACvalidityPeriod - duration of VOMS attribute validity in seconds.\n" "\n" " vomsACvalidityLeft - duration of VOMS attribute validity left in seconds.\n" "\n" " proxyPolicy\n" "\n" " keybits - size of proxy certificate key in bits.\n" "\n" " signingAlgorithm - algorithm used to sign proxy certificate.\n" "\n" "Items are printed in requested order and are separated by newline.\n" "If item has multiple values they are printed in same line separated by |.\n" "\n" "Supported password destinations are:\n" " key - for reading private key\n" "\n" " myproxy - for accessing credentials at MyProxy service\n" "\n" " myproxynew - for creating credentials at MyProxy service\n" "\n" " all - for any purspose.\n" "\n" "Supported password sources are:\n" " quoted string (\"password\") - explicitly specified password\n" "\n" " int - interactively request password from console\n" "\n" " stdin - read password from standard input delimited by newline\n" "\n" " file:filename - read password from file named filename\n" "\n" " stream:# - read password from input stream number #.\n" " Currently only 0 (standard input) is supported." msgstr "" #: src/clients/credentials/arcproxy.cpp:315 msgid "path to the proxy file" msgstr "" #: src/clients/credentials/arcproxy.cpp:316 #: src/clients/credentials/arcproxy.cpp:320 #: src/clients/credentials/arcproxy.cpp:324 #: src/clients/credentials/arcproxy.cpp:328 #: src/clients/credentials/arcproxy.cpp:332 #: src/clients/credentials/arcproxy.cpp:336 src/clients/data/arccp.cpp:560 msgid "path" msgstr "" #: src/clients/credentials/arcproxy.cpp:319 msgid "" "path to the certificate file, it can be either PEM, DER, or PKCS12 formatted" msgstr "" #: src/clients/credentials/arcproxy.cpp:323 msgid "" "path to the private key file, if the certificate is in PKCS12 format, then " "no need to give private key" msgstr "" #: src/clients/credentials/arcproxy.cpp:327 msgid "" "path to the trusted certificate directory, only needed for the VOMS client " "functionality" msgstr "" #: src/clients/credentials/arcproxy.cpp:331 msgid "" "path to the top directory of VOMS *.lsc files, only needed for the VOMS " "client functionality" msgstr "" #: src/clients/credentials/arcproxy.cpp:335 msgid "path to the VOMS server configuration file" msgstr "" #: src/clients/credentials/arcproxy.cpp:339 msgid "" "voms<:command>. Specify VOMS server\n" " More than one VOMS server can be specified like this:\n" " --voms VOa:command1 --voms VOb:command2.\n" " :command is optional, and is used to ask for specific attributes (e.g: " "roles)\n" " command options are:\n" "\n" " all --- put all of this DN's attributes into AC;\n" "\n" " list --- list all of the DN's attribute, will not create AC " "extension;\n" "\n" " /Role=yourRole --- specify the role, if this DN\n" " has such a role, the role will be put into AC;\n" "\n" " /voname/groupname/Role=yourRole --- specify the VO, group and " "role; if this DN\n" " has such a role, the role will be put into AC.\n" "\n" " If this option is not specified values from configuration " "files are used.\n" " To avoid anything to be used specify -S with empty value.\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:356 msgid "" "group<:role>. Specify ordering of attributes\n" " Example: --order /knowarc.eu/coredev:Developer,/knowarc.eu/" "testers:Tester\n" " or: --order /knowarc.eu/coredev:Developer --order /knowarc.eu/" "testers:Tester\n" " Note that it does not make sense to specify the order if you have two or " "more different VOMS servers specified" msgstr "" #: src/clients/credentials/arcproxy.cpp:363 msgid "use GSI communication protocol for contacting VOMS services" msgstr "" #: src/clients/credentials/arcproxy.cpp:366 msgid "" "use HTTP communication protocol for contacting VOMS services that provide " "RESTful access\n" " Note for RESTful access, 'list' command and multiple VOMS " "servers are not supported\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:370 msgid "" "use old communication protocol for contacting VOMS services instead of " "RESTful access\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:373 msgid "" "this option is not functional (old GSI proxies are not supported anymore)" msgstr "" #: src/clients/credentials/arcproxy.cpp:376 msgid "print all information about this proxy." msgstr "" #: src/clients/credentials/arcproxy.cpp:379 msgid "print selected information about this proxy." msgstr "" #: src/clients/credentials/arcproxy.cpp:382 msgid "remove proxy" msgstr "" #: src/clients/credentials/arcproxy.cpp:385 msgid "" "username to MyProxy server (if missing subject of user certificate is used)" msgstr "" #: src/clients/credentials/arcproxy.cpp:390 msgid "" "don't prompt for a credential passphrase, when retrieving a credential from " "a MyProxy server.\n" " The precondition of this choice is that the credential was PUT onto\n" " the MyProxy server without a passphrase by using the\n" " -R (--retrievable_by_cert) option.\n" " This option is specific to the GET command when contacting a Myproxy\n" " server." msgstr "" #: src/clients/credentials/arcproxy.cpp:401 msgid "" "Allow specified entity to retrieve credential without passphrase.\n" " This option is specific to the PUT command when contacting a Myproxy\n" " server." msgstr "" #: src/clients/credentials/arcproxy.cpp:407 msgid "hostname[:port] of MyProxy server" msgstr "" #: src/clients/credentials/arcproxy.cpp:412 msgid "" "command to MyProxy server. The command can be PUT, GET, INFO, NEWPASS or " "DESTROY.\n" " PUT -- put a delegated credentials to the MyProxy server;\n" "\n" " GET -- get a delegated credentials from the MyProxy server;\n" "\n" " INFO -- get and present information about credentials stored " "at the MyProxy server;\n" "\n" " NEWPASS -- change password protecting credentials stored at " "the MyProxy server;\n" "\n" " DESTROY -- wipe off credentials stored at the MyProxy server;\n" "\n" " Local credentials (certificate and key) are not necessary " "except in case of PUT.\n" " MyProxy functionality can be used together with VOMS " "functionality.\n" " --voms and --vomses can be used for Get command if VOMS " "attributes\n" " is required to be included in the proxy.\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:427 msgid "" "use NSS credential database in default Mozilla profiles, including Firefox, " "Seamonkey and Thunderbird." msgstr "" #: src/clients/credentials/arcproxy.cpp:431 msgid "proxy constraints" msgstr "" #: src/clients/credentials/arcproxy.cpp:435 msgid "password destination=password source" msgstr "" #: src/clients/credentials/arcproxy.cpp:479 msgid "" "RESTful and old VOMS communication protocols can't be requested " "simultaneously." msgstr "" #: src/clients/credentials/arcproxy.cpp:509 #: src/clients/credentials/arcproxy.cpp:1220 msgid "Failed configuration initialization." msgstr "" #: src/clients/credentials/arcproxy.cpp:544 msgid "" "Failed to find certificate and/or private key or files have improper " "permissions or ownership." msgstr "" #: src/clients/credentials/arcproxy.cpp:545 #: src/clients/credentials/arcproxy.cpp:557 msgid "You may try to increase verbosity to get more information." msgstr "" #: src/clients/credentials/arcproxy.cpp:553 msgid "Failed to find CA certificates" msgstr "" #: src/clients/credentials/arcproxy.cpp:554 msgid "" "Cannot find the CA certificates directory path, please set environment " "variable X509_CERT_DIR, or cacertificatesdirectory in a configuration file." msgstr "" #: src/clients/credentials/arcproxy.cpp:558 msgid "" "The CA certificates directory is required for contacting VOMS and MyProxy " "servers." msgstr "" #: src/clients/credentials/arcproxy.cpp:570 msgid "" "$X509_VOMS_FILE, and $X509_VOMSES are not set;\n" "User has not specified the location for vomses information;\n" "There is also not vomses location information in user's configuration file;\n" "Can not find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses,\n" "$ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/" "vomses,\n" "/etc/vomses, /etc/grid-security/vomses, and the location at the " "corresponding sub-directory" msgstr "" #: src/clients/credentials/arcproxy.cpp:615 msgid "Wrong number of arguments!" msgstr "" #: src/clients/credentials/arcproxy.cpp:623 #: src/clients/credentials/arcproxy.cpp:647 #: src/clients/credentials/arcproxy.cpp:780 msgid "" "Cannot find the path of the proxy file, please setup environment " "X509_USER_PROXY, or proxypath in a configuration file" msgstr "" #: src/clients/credentials/arcproxy.cpp:630 #, c-format msgid "Cannot remove proxy file at %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:632 #, c-format msgid "Cannot remove proxy file at %s, because it's not there" msgstr "" #: src/clients/credentials/arcproxy.cpp:641 msgid "Bearer token is available. It is preferred for job submission." msgstr "" #: src/clients/credentials/arcproxy.cpp:653 #: src/clients/credentials/arcproxy.cpp:786 #, c-format msgid "" "Cannot find file at %s for getting the proxy. Please make sure this file " "exists." msgstr "" #: src/clients/credentials/arcproxy.cpp:659 #: src/clients/credentials/arcproxy.cpp:792 #, c-format msgid "Cannot process proxy file at %s." msgstr "" #: src/clients/credentials/arcproxy.cpp:662 #, c-format msgid "Subject: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:663 #, c-format msgid "Issuer: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:664 #, c-format msgid "Identity: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:666 msgid "Time left for proxy: Proxy expired" msgstr "" #: src/clients/credentials/arcproxy.cpp:668 msgid "Time left for proxy: Proxy not valid yet" msgstr "" #: src/clients/credentials/arcproxy.cpp:670 #, c-format msgid "Time left for proxy: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:671 #, c-format msgid "Proxy path: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:672 #, c-format msgid "Proxy type: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:673 #, c-format msgid "Proxy key length: %i" msgstr "" #: src/clients/credentials/arcproxy.cpp:674 #, c-format msgid "Proxy signature: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:683 msgid "AC extension information for VO " msgstr "" #: src/clients/credentials/arcproxy.cpp:686 msgid "Error detected while parsing this AC" msgstr "" #: src/clients/credentials/arcproxy.cpp:699 msgid "AC is invalid: " msgstr "" #: src/clients/credentials/arcproxy.cpp:729 #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:222 #, c-format msgid "Malformed VOMS AC attribute %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:760 msgid "Time left for AC: AC is not valid yet" msgstr "" #: src/clients/credentials/arcproxy.cpp:762 msgid "Time left for AC: AC has expired" msgstr "" #: src/clients/credentials/arcproxy.cpp:764 #, c-format msgid "Time left for AC: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:871 #, c-format msgid "Information item '%s' is not known" msgstr "" #: src/clients/credentials/arcproxy.cpp:883 msgid "" "Cannot find the user certificate path, please setup environment " "X509_USER_CERT, or certificatepath in a configuration file" msgstr "" #: src/clients/credentials/arcproxy.cpp:887 msgid "" "Cannot find the user private key path, please setup environment " "X509_USER_KEY, or keypath in a configuration file" msgstr "" #: src/clients/credentials/arcproxy.cpp:911 #, c-format msgid "" "Cannot parse password source expression %s it must be of type=source format" msgstr "" #: src/clients/credentials/arcproxy.cpp:928 #, c-format msgid "" "Cannot parse password type %s. Currently supported values are " "'key','myproxy','myproxynew' and 'all'." msgstr "" #: src/clients/credentials/arcproxy.cpp:943 #, c-format msgid "" "Cannot parse password source %s it must be of source_type or source_type:" "data format. Supported source types are int, stdin, stream, file." msgstr "" #: src/clients/credentials/arcproxy.cpp:957 msgid "Only standard input is currently supported for password source." msgstr "" #: src/clients/credentials/arcproxy.cpp:962 #, c-format msgid "" "Cannot parse password source type %s. Supported source types are int, stdin, " "stream, file." msgstr "" #: src/clients/credentials/arcproxy.cpp:1001 msgid "The start, end and period can't be set simultaneously" msgstr "" #: src/clients/credentials/arcproxy.cpp:1007 #, c-format msgid "The start time that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1014 #, c-format msgid "The period that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1021 #, c-format msgid "The end time that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1030 #, c-format msgid "The end time that you set: %s is before start time: %s." msgstr "" #: src/clients/credentials/arcproxy.cpp:1041 #, c-format msgid "WARNING: The start time that you set: %s is before current time: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:1044 #, c-format msgid "WARNING: The end time that you set: %s is before current time: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:1054 #, c-format msgid "The VOMS AC period that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1072 #, c-format msgid "The MyProxy period that you set: %s can't be recognized." msgstr "" #: src/clients/credentials/arcproxy.cpp:1087 #, c-format msgid "The keybits constraint is wrong: %s." msgstr "" #: src/clients/credentials/arcproxy.cpp:1101 msgid "The NSS database can not be detected in the Firefox profile" msgstr "" #: src/clients/credentials/arcproxy.cpp:1110 #, c-format msgid "" "There are %d NSS base directories where the certificate, key, and module " "databases live" msgstr "" #: src/clients/credentials/arcproxy.cpp:1112 #, c-format msgid "Number %d is: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:1114 #, c-format msgid "Please choose the NSS database you would like to use (1-%d): " msgstr "" #: src/clients/credentials/arcproxy.cpp:1130 #, c-format msgid "NSS database to be accessed: %s\n" msgstr "" #: src/clients/credentials/arcproxy.cpp:1201 #, c-format msgid "Certificate to use is: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:1252 #: src/clients/credentials/arcproxy.cpp:1366 msgid "Proxy generation succeeded" msgstr "" #: src/clients/credentials/arcproxy.cpp:1253 #: src/clients/credentials/arcproxy.cpp:1367 #, c-format msgid "Your proxy is valid until: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:1272 msgid "" "The old GSI proxies are not supported anymore. Please do not use -O/--old " "option." msgstr "" #: src/clients/credentials/arcproxy.cpp:1291 src/hed/mcc/tls/MCCTLS.cpp:182 #: src/hed/mcc/tls/MCCTLS.cpp:215 src/hed/mcc/tls/MCCTLS.cpp:241 msgid "VOMS attribute parsing failed" msgstr "" #: src/clients/credentials/arcproxy.cpp:1293 msgid "Myproxy server did not return proxy with VOMS AC included" msgstr "" #: src/clients/credentials/arcproxy.cpp:1314 msgid "Proxy generation failed: No valid certificate found." msgstr "" #: src/clients/credentials/arcproxy.cpp:1319 msgid "Proxy generation failed: No valid private key found." msgstr "" #: src/clients/credentials/arcproxy.cpp:1323 #, c-format msgid "Your identity: %s" msgstr "" #: src/clients/credentials/arcproxy.cpp:1325 msgid "Proxy generation failed: Certificate has expired." msgstr "" #: src/clients/credentials/arcproxy.cpp:1329 msgid "Proxy generation failed: Certificate is not valid yet." msgstr "" #: src/clients/credentials/arcproxy.cpp:1340 msgid "Proxy generation failed: Failed to create temporary file." msgstr "" #: src/clients/credentials/arcproxy.cpp:1348 msgid "Proxy generation failed: Failed to retrieve VOMS information." msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:100 msgid "Succeeded to get info from MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:144 msgid "Succeeded to change password on MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:185 msgid "Succeeded to destroy credential on MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:241 #, c-format msgid "Succeeded to get a proxy in %s from MyProxy server %s" msgstr "" #: src/clients/credentials/arcproxy_myproxy.cpp:294 msgid "Succeeded to put a proxy onto MyProxy server" msgstr "" #: src/clients/credentials/arcproxy_proxy.cpp:93 msgid "Failed to add VOMS AC extension. Your proxy may be incomplete." msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:63 msgid "" "Failed to process VOMS configuration or no suitable configuration lines " "found." msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:75 #, c-format msgid "Failed to parse requested VOMS lifetime: %s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:93 #, c-format msgid "Cannot get VOMS server address information from vomses line: \"%s\"" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:97 #: src/clients/credentials/arcproxy_voms.cpp:99 #, c-format msgid "Contacting VOMS server (named %s): %s on port: %s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:105 #, c-format msgid "Failed to parse requested VOMS server port number: %s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:122 msgid "List functionality is not supported for RESTful VOMS interface" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:132 #: src/clients/credentials/arcproxy_voms.cpp:188 #, c-format msgid "" "The VOMS server with the information:\n" "\t%s\n" "can not be reached, please make sure it is available." msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:133 #: src/clients/credentials/arcproxy_voms.cpp:138 #: src/clients/credentials/arcproxy_voms.cpp:189 #: src/clients/credentials/arcproxy_voms.cpp:194 #, c-format msgid "" "Collected error is:\n" "\t%s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:137 #: src/clients/credentials/arcproxy_voms.cpp:193 #, c-format msgid "No valid response from VOMS server: %s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:155 msgid "List functionality is not supported for legacy VOMS interface" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:167 #, c-format msgid "Failed to parse VOMS command: %s" msgstr "" #: src/clients/credentials/arcproxy_voms.cpp:204 #, c-format msgid "" "There are %d servers with the same name: %s in your vomses file, but none of " "them can be reached, or can return a valid message." msgstr "" #: src/clients/data/arccp.cpp:79 src/clients/data/arccp.cpp:315 #, c-format msgid "Current transfer FAILED: %s" msgstr "" #: src/clients/data/arccp.cpp:81 src/clients/data/arccp.cpp:119 #: src/clients/data/arccp.cpp:317 src/clients/data/arcls.cpp:214 #: src/clients/data/arcmkdir.cpp:62 src/clients/data/arcrename.cpp:78 #: src/clients/data/arcrm.cpp:83 msgid "This seems like a temporary error, please try again later" msgstr "" #: src/clients/data/arccp.cpp:96 src/clients/data/arccp.cpp:100 #: src/clients/data/arccp.cpp:133 src/clients/data/arccp.cpp:137 #: src/clients/data/arccp.cpp:343 src/clients/data/arccp.cpp:348 #: src/clients/data/arcls.cpp:125 src/clients/data/arcmkdir.cpp:30 #: src/clients/data/arcrename.cpp:31 src/clients/data/arcrename.cpp:35 #: src/clients/data/arcrm.cpp:38 #, c-format msgid "Invalid URL: %s" msgstr "" #: src/clients/data/arccp.cpp:112 msgid "Third party transfer is not supported for these endpoints" msgstr "" #: src/clients/data/arccp.cpp:114 msgid "" "Protocol(s) not supported - please check that the relevant gfal2\n" " plugins are installed (gfal2-plugin-* packages)" msgstr "" #: src/clients/data/arccp.cpp:117 #, c-format msgid "Transfer FAILED: %s" msgstr "" #: src/clients/data/arccp.cpp:145 src/clients/data/arccp.cpp:171 #: src/clients/data/arccp.cpp:359 src/clients/data/arccp.cpp:387 #, c-format msgid "Can't read list of sources from file %s" msgstr "" #: src/clients/data/arccp.cpp:150 src/clients/data/arccp.cpp:186 #: src/clients/data/arccp.cpp:364 src/clients/data/arccp.cpp:403 #, c-format msgid "Can't read list of destinations from file %s" msgstr "" #: src/clients/data/arccp.cpp:155 src/clients/data/arccp.cpp:370 msgid "Numbers of sources and destinations do not match" msgstr "" #: src/clients/data/arccp.cpp:200 msgid "Fileset registration is not supported yet" msgstr "" #: src/clients/data/arccp.cpp:206 src/clients/data/arccp.cpp:279 #: src/clients/data/arccp.cpp:441 #, c-format msgid "Unsupported source url: %s" msgstr "" #: src/clients/data/arccp.cpp:210 src/clients/data/arccp.cpp:283 #, c-format msgid "Unsupported destination url: %s" msgstr "" #: src/clients/data/arccp.cpp:217 msgid "" "For registration source must be ordinary URL and destination must be " "indexing service" msgstr "" #: src/clients/data/arccp.cpp:227 #, c-format msgid "Could not obtain information about source: %s" msgstr "" #: src/clients/data/arccp.cpp:234 msgid "" "Metadata of source does not match existing destination. Use the --force " "option to override this." msgstr "" #: src/clients/data/arccp.cpp:246 msgid "Failed to accept new file/destination" msgstr "" #: src/clients/data/arccp.cpp:252 src/clients/data/arccp.cpp:258 #, c-format msgid "Failed to register new file/destination: %s" msgstr "" #: src/clients/data/arccp.cpp:421 msgid "Fileset copy to single object is not supported yet" msgstr "" #: src/clients/data/arccp.cpp:431 msgid "Can't extract object's name from source url" msgstr "" #: src/clients/data/arccp.cpp:450 #, c-format msgid "%s. Cannot copy fileset" msgstr "" #: src/clients/data/arccp.cpp:460 src/hed/libs/compute/ExecutionTarget.cpp:256 #: src/hed/libs/compute/ExecutionTarget.cpp:328 #, c-format msgid "Name: %s" msgstr "" #: src/clients/data/arccp.cpp:463 #, c-format msgid "Source: %s" msgstr "" #: src/clients/data/arccp.cpp:464 #, c-format msgid "Destination: %s" msgstr "" #: src/clients/data/arccp.cpp:470 msgid "Current transfer complete" msgstr "" #: src/clients/data/arccp.cpp:473 msgid "Some transfers failed" msgstr "" #: src/clients/data/arccp.cpp:483 #, c-format msgid "Directory: %s" msgstr "" #: src/clients/data/arccp.cpp:503 msgid "Transfer complete" msgstr "" #: src/clients/data/arccp.cpp:522 msgid "source destination" msgstr "" #: src/clients/data/arccp.cpp:523 msgid "" "The arccp command copies files to, from and between grid storage elements." msgstr "" #: src/clients/data/arccp.cpp:528 msgid "" "use passive transfer (off by default if secure is on, on by default if " "secure is not requested)" msgstr "" #: src/clients/data/arccp.cpp:534 msgid "do not try to force passive transfer" msgstr "" #: src/clients/data/arccp.cpp:539 msgid "force overwrite of existing destination" msgstr "" #: src/clients/data/arccp.cpp:543 msgid "show progress indicator" msgstr "" #: src/clients/data/arccp.cpp:548 msgid "" "do not transfer, but register source into destination. destination must be a " "meta-url." msgstr "" #: src/clients/data/arccp.cpp:554 msgid "use secure transfer (insecure by default)" msgstr "" #: src/clients/data/arccp.cpp:559 msgid "path to local cache (use to put file into cache)" msgstr "" #: src/clients/data/arccp.cpp:564 src/clients/data/arcls.cpp:290 msgid "operate recursively" msgstr "" #: src/clients/data/arccp.cpp:569 src/clients/data/arcls.cpp:295 msgid "operate recursively up to specified level" msgstr "" #: src/clients/data/arccp.cpp:570 src/clients/data/arcls.cpp:296 msgid "level" msgstr "" #: src/clients/data/arccp.cpp:574 msgid "number of retries before failing file transfer" msgstr "" #: src/clients/data/arccp.cpp:575 msgid "number" msgstr "" #: src/clients/data/arccp.cpp:579 msgid "" "physical location to write to when destination is an indexing service. Must " "be specified for indexing services which do not automatically generate " "physical locations. Can be specified multiple times - locations will be " "tried in order until one succeeds." msgstr "" #: src/clients/data/arccp.cpp:587 msgid "" "perform third party transfer, where the destination pulls from the source " "(only available with GFAL plugin)" msgstr "" #: src/clients/data/arccp.cpp:593 src/clients/data/arcls.cpp:312 #: src/clients/data/arcmkdir.cpp:90 src/clients/data/arcrename.cpp:101 #: src/clients/data/arcrm.cpp:115 msgid "list the available plugins (protocols supported)" msgstr "" #: src/clients/data/arccp.cpp:632 src/clients/data/arcls.cpp:351 #: src/clients/data/arcmkdir.cpp:129 src/clients/data/arcrename.cpp:140 #: src/clients/data/arcrm.cpp:154 msgid "" "force using both CA certificates configuration for Grid services (typically " "IGTF) and those provided by OpenSSL" msgstr "" #: src/clients/data/arccp.cpp:667 src/clients/data/arcls.cpp:387 #: src/clients/data/arcmkdir.cpp:165 src/clients/data/arcrename.cpp:176 #: src/clients/data/arcrm.cpp:191 msgid "Protocol plugins available:" msgstr "" #: src/clients/data/arccp.cpp:715 src/clients/data/arcls.cpp:435 #: src/clients/data/arcmkdir.cpp:212 src/clients/data/arcrename.cpp:222 #: src/clients/data/arcrm.cpp:239 msgid "Wrong number of parameters specified" msgstr "" #: src/clients/data/arccp.cpp:720 msgid "Options 'p' and 'n' can't be used simultaneously" msgstr "" #: src/clients/data/arcls.cpp:131 src/clients/data/arcmkdir.cpp:36 #: src/clients/data/arcrm.cpp:45 #, c-format msgid "Can't read list of locations from file %s" msgstr "" #: src/clients/data/arcls.cpp:146 src/clients/data/arcmkdir.cpp:51 #: src/clients/data/arcrename.cpp:63 msgid "Unsupported URL given" msgstr "" #: src/clients/data/arcls.cpp:217 msgid "Warning: Failed listing files but some information is obtained" msgstr "" #: src/clients/data/arcls.cpp:271 src/clients/data/arcmkdir.cpp:79 msgid "url" msgstr "" #: src/clients/data/arcls.cpp:272 msgid "" "The arcls command is used for listing files in grid storage elements and " "file\n" "index catalogues." msgstr "" #: src/clients/data/arcls.cpp:281 msgid "show URLs of file locations" msgstr "" #: src/clients/data/arcls.cpp:285 msgid "display all available metadata" msgstr "" #: src/clients/data/arcls.cpp:299 msgid "" "show only description of requested object, do not list content of directories" msgstr "" #: src/clients/data/arcls.cpp:303 msgid "treat requested object as directory and always try to list content" msgstr "" #: src/clients/data/arcls.cpp:307 msgid "check readability of object, does not show any information about object" msgstr "" #: src/clients/data/arcls.cpp:440 msgid "Incompatible options --nolist and --forcelist requested" msgstr "" #: src/clients/data/arcls.cpp:445 msgid "Requesting recursion and --nolist has no sense" msgstr "" #: src/clients/data/arcmkdir.cpp:80 msgid "" "The arcmkdir command creates directories on grid storage elements and " "catalogs." msgstr "" #: src/clients/data/arcmkdir.cpp:85 msgid "make parent directories as needed" msgstr "" #: src/clients/data/arcrename.cpp:43 msgid "Both URLs must have the same protocol, host and port" msgstr "" #: src/clients/data/arcrename.cpp:53 msgid "Cannot rename to or from root directory" msgstr "" #: src/clients/data/arcrename.cpp:57 msgid "Cannot rename to the same URL" msgstr "" #: src/clients/data/arcrename.cpp:95 msgid "old_url new_url" msgstr "" #: src/clients/data/arcrename.cpp:96 msgid "The arcrename command renames files on grid storage elements." msgstr "" #: src/clients/data/arcrm.cpp:60 #, c-format msgid "Unsupported URL given: %s" msgstr "" #: src/clients/data/arcrm.cpp:103 msgid "url [url ...]" msgstr "" #: src/clients/data/arcrm.cpp:104 msgid "The arcrm command deletes files on grid storage elements." msgstr "" #: src/clients/data/arcrm.cpp:109 msgid "" "remove logical file name registration even if not all physical instances " "were removed" msgstr "" #: src/clients/data/utils.cpp:18 msgid "Proxy expired. Please run 'arcproxy'!" msgstr "" #: src/clients/data/utils.cpp:81 src/clients/data/utils.cpp:90 #, c-format msgid "Unable to handle %s" msgstr "" #: src/clients/data/utils.cpp:82 src/clients/data/utils.cpp:91 msgid "Invalid credentials, please check proxy and/or CA certificates" msgstr "" #: src/clients/data/utils.cpp:88 msgid "Proxy expired" msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:53 msgid "Cannot initialize ARCHERY domain name for query" msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:60 msgid "Cannot create resolver from /etc/resolv.conf" msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:68 msgid "Cannot query service endpoint TXT records from DNS" msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:79 msgid "Cannot parse service endpoint TXT records." msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:124 #, c-format msgid "Wrong service record field \"%s\" found in the \"%s\"" msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:129 #, c-format msgid "Malformed ARCHERY record found (endpoint url is not defined): %s" msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:134 #, c-format msgid "Malformed ARCHERY record found (endpoint type is not defined): %s" msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:138 #, c-format msgid "Found service endpoint %s (type %s)" msgstr "" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:157 #, c-format msgid "" "Status for service endpoint \"%s\" is set to inactive in ARCHERY. Skipping." msgstr "" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:229 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:149 #, c-format msgid "Job %s has no delegation associated. Can't renew such job." msgstr "" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:241 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:161 #, c-format msgid "Job %s failed to renew delegation %s." msgstr "" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:314 #, c-format msgid "Failed to process jobs - error response: %s" msgstr "" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:316 #, c-format msgid "Failed to process jobs - wrong response: %u" msgstr "" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:318 #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:327 #, c-format msgid "Content: %s" msgstr "" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:321 #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:333 #, c-format msgid "Failed to process job: %s" msgstr "" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:331 msgid "Failed to process jobs - failed to parse response" msgstr "" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:344 #, c-format msgid "No response returned: %s" msgstr "" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:368 #, c-format msgid "Failed to process job: %s - %s %s" msgstr "" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:455 #, c-format msgid "Failed retrieving job description for job: %s" msgstr "" #: src/hed/acc/ARCREST/JobListRetrieverPluginREST.cpp:29 msgid "Collecting Job (A-REX REST jobs) information." msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:49 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:80 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:115 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:149 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:189 msgid "Failed to communicate to delegation endpoint." msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:54 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:85 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:120 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:154 #, c-format msgid "Unexpected response code from delegation endpoint - %u" msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:56 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:87 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:122 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:156 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:399 #: src/hed/dmc/gridftp/Lister.cpp:223 src/hed/dmc/gridftp/Lister.cpp:243 #: src/hed/dmc/gridftp/Lister.cpp:468 src/hed/dmc/gridftp/Lister.cpp:475 #: src/hed/dmc/gridftp/Lister.cpp:497 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:164 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:197 #, c-format msgid "Response: %s" msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:64 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:136 #, c-format msgid "Unexpected delegation location from delegation endpoint - %s." msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:92 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:127 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:161 msgid "Missing response from delegation endpoint." msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:193 #, c-format msgid "Unexpected response code from delegation endpoint: %u, %s." msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:235 #, c-format msgid "Failed to submit all jobs: %s %s" msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:249 msgid "Failed uploading local input files" msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:304 msgid "Failed to prepare job description" msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:313 #, c-format msgid "Unable to submit job. Job description is not valid in the %s format: %s" msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:320 msgid "" "Can't submit multiple instances for multiple job descriptions. Not " "implemented yet." msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:331 msgid "Unable to submit jobs. Failed to delegate X.509 credentials." msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:338 msgid "Unable to submit jobs. Failed to delegate token." msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:348 msgid "Unable to submit job. Failed to assign delegation to job description." msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:388 msgid "Failed to submit all jobs." msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:398 #, c-format msgid "Failed to submit all jobs: %u %s" msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:411 #, c-format msgid "Failed to submit all jobs: %s" msgstr "" #: src/hed/acc/ARCREST/TargetInformationRetrieverPluginREST.cpp:27 msgid "Querying WSRF GLUE2 computing REST endpoint." msgstr "" #: src/hed/acc/ARCREST/TargetInformationRetrieverPluginREST.cpp:60 #, c-format msgid "CONTENT %u: %s" msgstr "" #: src/hed/acc/ARCREST/TargetInformationRetrieverPluginREST.cpp:64 msgid "Response is not XML" msgstr "" #: src/hed/acc/ARCREST/TargetInformationRetrieverPluginREST.cpp:69 #, c-format msgid "Parsed domains: %u" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:14 msgid "Sorting according to free slots in queue" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:15 msgid "Random sorting" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:16 msgid "Sorting according to specified benchmark (default \"specint2000\")" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:17 msgid "Sorting according to input data availability at target" msgstr "" #: src/hed/acc/Broker/DescriptorsBroker.cpp:18 msgid "Performs neither sorting nor matching" msgstr "" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:24 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of waiting " "jobs" msgstr "" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:27 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of total slots" msgstr "" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:30 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of free slots" msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:74 #, c-format msgid "[ADLParser] Unsupported EMI ES state %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:94 #, c-format msgid "[ADLParser] Unsupported internal state %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:104 #, c-format msgid "[ADLParser] Optional for %s elements are not supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:113 #, c-format msgid "[ADLParser] %s element must be boolean." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:125 #, c-format msgid "[ADLParser] Code in FailIfExitCodeNotEqualTo in %s is not valid number." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:363 msgid "[ADLParser] Root element is not ActivityDescription " msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:410 msgid "[ADLParser] priority is too large - using max value 100" msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:453 #, c-format msgid "[ADLParser] Unsupported URL %s for RemoteLogging." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:472 #, c-format msgid "[ADLParser] Wrong time %s in ExpirationTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:502 msgid "[ADLParser] AccessControl isn't valid XML." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:513 msgid "[ADLParser] CredentialService must contain valid URL." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:542 #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:545 msgid "[ADLParser] Only email Prorocol for Notification is supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:603 msgid "[ADLParser] Missing or wrong value in ProcessesPerSlot." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:608 msgid "[ADLParser] Missing or wrong value in ThreadsPerProcess." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:614 msgid "" "[ADLParser] Missing Name element or value in ParallelEnvironment/Option " "element." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:631 msgid "[ADLParser] NetworkInfo is not supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:645 #, c-format msgid "[ADLParser] NodeAccess value %s is not supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:653 msgid "[ADLParser] Missing or wrong value in NumberOfSlots." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:660 msgid "" "[ADLParser] The NumberOfSlots element should be specified, when the value of " "useNumberOfSlots attribute of SlotsPerHost element is \"true\"." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:667 msgid "[ADLParser] Missing or wrong value in SlotsPerHost." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:696 msgid "[ADLParser] Missing or wrong value in IndividualPhysicalMemory." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:706 msgid "[ADLParser] Missing or wrong value in IndividualVirtualMemory." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:716 msgid "[ADLParser] Missing or wrong value in DiskSpaceRequirement." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:730 msgid "[ADLParser] Benchmark is not supported yet." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:738 msgid "[ADLParser] Missing or wrong value in IndividualCPUTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:746 msgid "[ADLParser] Missing or wrong value in TotalCPUTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:755 msgid "[ADLParser] Missing or wrong value in WallTime." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:775 msgid "[ADLParser] Missing or empty Name in InputFile." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:786 #, c-format msgid "[ADLParser] Wrong URI specified in Source - %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:808 msgid "[ADLParser] Missing or empty Name in OutputFile." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:814 #, c-format msgid "[ADLParser] Wrong URI specified in Target - %s." msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:827 #, c-format msgid "Location URI for file %s is invalid" msgstr "" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:852 #, c-format msgid "[ADLParser] CreationFlag value %s is not supported." msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:42 msgid "Left operand for RSL concatenation does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:50 msgid "Right operand for RSL concatenation does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:161 msgid "Multi-request operator only allowed at top level" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:186 msgid "RSL substitution is not a sequence" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:192 msgid "RSL substitution sequence is not of length 2" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:211 msgid "RSL substitution variable name does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:220 msgid "RSL substitution variable value does not evaluate to a literal" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:313 msgid "End of comment not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:324 msgid "Junk at end of RSL" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:424 msgid "End of single quoted string not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:441 msgid "End of double quoted string not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:460 #, c-format msgid "End of user delimiter (%s) quoted string not found" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:518 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:546 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:625 msgid "')' expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:528 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:609 msgid "'(' expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:536 msgid "Variable name expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:541 #, c-format msgid "Variable name (%s) contains invalid character (%s)" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:557 msgid "Broken string" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:570 msgid "No left operand for concatenation operator" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:574 msgid "No right operand for concatenation operator" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:638 msgid "Attribute name expected" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:643 #, c-format msgid "Attribute name (%s) contains invalid character (%s)" msgstr "" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:649 msgid "Relation operator expected" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:86 msgid "Error parsing the internally set executables attribute." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:102 #, c-format msgid "" "File '%s' in the 'executables' attribute is not present in the 'inputfiles' " "attribute" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:120 msgid "The value of the ftpthreads attribute must be a number from 1 to 10" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:177 msgid "'stdout' attribute must be specified when 'join' attribute is specified" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:181 msgid "" "Attribute 'join' cannot be specified when both 'stdout' and 'stderr' " "attributes is specified" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:200 msgid "Attributes 'gridtime' and 'cputime' cannot be specified together" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:204 msgid "Attributes 'gridtime' and 'walltime' cannot be specified together" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:226 msgid "" "When specifying 'countpernode' attribute, 'count' attribute must also be " "specified" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:229 msgid "Value of 'countpernode' attribute must be an integer" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:287 msgid "No RSL content in job description found" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:295 msgid "'action' attribute not allowed in user-side job description" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:304 #, c-format msgid "String successfully parsed as %s." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:313 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:331 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:349 #, c-format msgid "Attribute '%s' multiply defined" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:317 #, c-format msgid "Value of attribute '%s' expected to be single value" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:322 #, c-format msgid "Value of attribute '%s' expected to be a string" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:338 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:368 #, c-format msgid "Value of attribute '%s' is not a string" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:356 #, c-format msgid "Value of attribute '%s' is not sequence" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:360 #, c-format msgid "" "Value of attribute '%s' has wrong sequence length: Expected %d, found %d" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:492 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1375 msgid "Unexpected RSL type" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:557 msgid "At least two values are needed for the 'inputfiles' attribute" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:562 msgid "First value of 'inputfiles' attribute (filename) cannot be empty" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:587 #, c-format msgid "Invalid URL '%s' for input file '%s'" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:596 #, c-format msgid "Invalid URL option syntax in option '%s' for input file '%s'" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:606 #, c-format msgid "Invalid URL: '%s' in input file '%s'" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:677 msgid "At least two values are needed for the 'outputfiles' attribute" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:682 msgid "First value of 'outputfiles' attribute (filename) cannot be empty" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:694 #, c-format msgid "Invalid URL '%s' for output file '%s'" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:704 #, c-format msgid "Invalid URL option syntax in option '%s' for output file '%s'" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:714 #, c-format msgid "Invalid URL: '%s' in output file '%s'" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:746 #, c-format msgid "" "Invalid comparison operator '%s' used at 'delegationid' attribute, only \"=" "\" is allowed." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:764 #, c-format msgid "" "Invalid comparison operator '%s' used at 'queue' attribute in 'GRIDMANAGER' " "dialect, only \"=\" is allowed" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:770 #, c-format msgid "" "Invalid comparison operator '%s' used at 'queue' attribute, only \"!=\" or " "\"=\" are allowed." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:927 #, c-format msgid "Value of attribute '%s' expected not to be empty" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1036 msgid "The value of the acl XRSL attribute isn't valid XML." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1050 msgid "The cluster XRSL attribute is currently unsupported." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1066 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it must contain an email " "address" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1074 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it must only contain email " "addresses after state flag(s)" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1077 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it contains unknown state " "flags" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1125 msgid "priority is too large - using max value 100" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1158 #, c-format msgid "Invalid nodeaccess value: %s" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1201 msgid "Value of 'count' attribute must be an integer" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1231 msgid "Value of 'exclusiveexecution' attribute must either be 'yes' or 'no'" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1277 #, c-format msgid "Invalid action value %s" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1367 #, c-format msgid "The specified Globus attribute (%s) is not supported. %s ignored." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1371 #, c-format msgid "Unknown XRSL attribute: %s - Ignoring it." msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1385 #, c-format msgid "Wrong language requested: %s" msgstr "" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1722 msgid "" "Cannot output XRSL representation: The Resources.SlotRequirement." "NumberOfSlots attribute must be specified when the Resources.SlotRequirement." "SlotsPerHost attribute is specified." msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:65 #: src/services/wrappers/python/pythonwrapper.cpp:92 msgid "Failed to initialize main Python thread" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:71 #: src/services/wrappers/python/pythonwrapper.cpp:97 msgid "Main Python thread was not initialized" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:81 #, c-format msgid "Loading Python broker (%i)" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:104 #: src/services/wrappers/python/pythonwrapper.cpp:134 msgid "Main Python thread is not initialized" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:108 msgid "PythonBroker init" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:116 msgid "" "Invalid class name. The broker argument for the PythonBroker should be\n" " Filename.Class.args (args is optional), for example SampleBroker." "MyBroker" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:122 #, c-format msgid "Class name: %s" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:123 #, c-format msgid "Module name: %s" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:132 #: src/services/wrappers/python/pythonwrapper.cpp:178 msgid "Cannot convert ARC module name to Python string" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:140 #: src/services/wrappers/python/pythonwrapper.cpp:186 msgid "Cannot import ARC module" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:149 #: src/services/wrappers/python/pythonwrapper.cpp:196 #: src/services/wrappers/python/pythonwrapper.cpp:429 msgid "Cannot get dictionary of ARC module" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:158 msgid "Cannot find ARC UserConfig class" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:166 msgid "UserConfig class is not an object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:173 msgid "Cannot find ARC JobDescription class" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:181 msgid "JobDescription class is not an object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:188 msgid "Cannot find ARC ExecutionTarget class" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:196 msgid "ExecutionTarget class is not an object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:207 #: src/services/wrappers/python/pythonwrapper.cpp:157 msgid "Cannot convert module name to Python string" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:215 #: src/services/wrappers/python/pythonwrapper.cpp:164 msgid "Cannot import module" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:224 msgid "Cannot get dictionary of custom broker module" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:233 msgid "Cannot find custom broker class" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:241 #, c-format msgid "%s class is not an object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:247 msgid "Cannot create UserConfig argument" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:255 msgid "Cannot convert UserConfig to Python object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:263 #: src/services/wrappers/python/pythonwrapper.cpp:253 msgid "Cannot create argument of the constructor" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:272 #: src/services/wrappers/python/pythonwrapper.cpp:261 msgid "Cannot create instance of Python class" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:278 #, c-format msgid "Python broker constructor called (%d)" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:302 #, c-format msgid "Python broker destructor called (%d)" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:311 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:328 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:361 msgid "Cannot create ExecutionTarget argument" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:319 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:336 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:369 #, c-format msgid "Cannot convert ExecutionTarget (%s) to python object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:393 msgid "Cannot create JobDescription argument" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:401 msgid "Cannot convert JobDescription to python object" msgstr "" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:422 msgid "Do sorting using user created python broker" msgstr "" #: src/hed/daemon/unix/daemon.cpp:84 #, c-format msgid "Daemonization fork failed: %s" msgstr "" #: src/hed/daemon/unix/daemon.cpp:95 msgid "Watchdog (re)starting application" msgstr "" #: src/hed/daemon/unix/daemon.cpp:100 #, c-format msgid "Watchdog fork failed: %s" msgstr "" #: src/hed/daemon/unix/daemon.cpp:110 msgid "Watchdog starting monitoring" msgstr "" #: src/hed/daemon/unix/daemon.cpp:136 #, c-format msgid "Watchdog detected application exit due to signal %u" msgstr "" #: src/hed/daemon/unix/daemon.cpp:138 #, c-format msgid "Watchdog detected application exited with code %u" msgstr "" #: src/hed/daemon/unix/daemon.cpp:140 msgid "Watchdog detected application exit" msgstr "" #: src/hed/daemon/unix/daemon.cpp:149 msgid "" "Watchdog exiting because application was purposely killed or exited itself" msgstr "" #: src/hed/daemon/unix/daemon.cpp:156 msgid "Watchdog detected application timeout or error - killing process" msgstr "" #: src/hed/daemon/unix/daemon.cpp:167 msgid "Watchdog failed to wait till application exited - sending KILL" msgstr "" #: src/hed/daemon/unix/daemon.cpp:179 msgid "Watchdog failed to kill application - giving up and exiting" msgstr "" #: src/hed/daemon/unix/daemon.cpp:200 msgid "Shutdown daemon" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:47 msgid "shutdown" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:50 msgid "exit" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:88 msgid "No server config part of config file" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:163 #, c-format msgid "Unknown log level %s" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:173 #, c-format msgid "Failed to open log file: %s" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:205 msgid "Start foreground" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:254 #, c-format msgid "XML config file %s does not exist" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:258 src/hed/daemon/unix/main_unix.cpp:273 #, c-format msgid "Failed to load service configuration from file %s" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:264 #, c-format msgid "INI config file %s does not exist" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:269 src/hed/daemon/unix/main_unix.cpp:291 msgid "Error evaluating profile" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:285 msgid "Error loading generated configuration" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:296 msgid "Failed to load service configuration from any default config file" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:357 msgid "Schema validation error" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:372 msgid "Configuration root element is not " msgstr "" #: src/hed/daemon/unix/main_unix.cpp:388 #, c-format msgid "Cannot switch to group (%s)" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:398 #, c-format msgid "Cannot switch to primary group for user (%s)" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:403 #, c-format msgid "Cannot switch to user (%s)" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:421 msgid "Failed to load service side MCCs" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:423 src/tests/count/test_service.cpp:29 #: src/tests/echo/test.cpp:30 src/tests/echo/test_service.cpp:29 msgid "Service side MCCs are loaded" msgstr "" #: src/hed/daemon/unix/main_unix.cpp:430 msgid "Unexpected arguments supplied" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:87 #, c-format msgid "Unknown channel %s for stdio protocol" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:94 #, c-format msgid "Failed to open stdio channel %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:95 #, c-format msgid "Failed to open stdio channel %d" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:335 #, c-format msgid "fsync of file %s failed: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:340 #: src/hed/dmc/file/DataPointFile.cpp:348 #, c-format msgid "closing file %s failed: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:367 #, c-format msgid "File is not accessible: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:373 #: src/hed/dmc/file/DataPointFile.cpp:458 #, c-format msgid "Can't stat file: %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:419 #: src/hed/dmc/file/DataPointFile.cpp:425 #, c-format msgid "Can't stat stdio channel %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:473 #, c-format msgid "%s is not a directory" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:488 #, c-format msgid "Failed to read object %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:501 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:534 #, c-format msgid "File is not accessible %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:507 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:540 #, c-format msgid "Can't delete directory %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:514 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:547 #, c-format msgid "Can't delete file %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:524 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:335 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:315 #: src/hed/dmc/http/DataPointHTTP.cpp:1658 #: src/hed/dmc/http/DataPointHTTP.cpp:1676 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1466 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:562 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:582 #, c-format msgid "Creating directory %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:532 src/hed/dmc/srm/DataPointSRM.cpp:168 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:596 #, c-format msgid "Renaming %s to %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:534 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:605 #, c-format msgid "Can't rename file %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:562 #, c-format msgid "Failed to open %s for reading: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:577 #: src/hed/dmc/file/DataPointFile.cpp:712 #, c-format msgid "Failed to switch user id to %d/%d" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:583 #, c-format msgid "Failed to create/open file %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:599 msgid "Failed to create thread" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:679 #, c-format msgid "Invalid url: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:688 src/hed/libs/data/FileCache.cpp:480 #, c-format msgid "Failed to create directory %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:701 #: src/hed/dmc/file/DataPointFile.cpp:720 #, c-format msgid "Failed to create file %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:732 #, c-format msgid "setting file %s to size %llu" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:755 #, c-format msgid "Failed to preallocate space for %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:794 src/hed/libs/data/FileCache.cpp:854 #, c-format msgid "Failed to clean up file %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:808 #, c-format msgid "Error during file validation. Can't stat file %s: %s" msgstr "" #: src/hed/dmc/file/DataPointFile.cpp:812 #, c-format msgid "" "Error during file validation: Local file size %llu does not match source " "file size %llu for file %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:53 #, c-format msgid "Using proxy %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:54 #, c-format msgid "Using key %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:55 #, c-format msgid "Using cert %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:113 msgid "Locations are missing in destination LFC URL" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:119 #, c-format msgid "Duplicate replica found in LFC: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:121 #, c-format msgid "Adding location: %s - %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:129 #: src/hed/libs/data/DataPointIndex.cpp:161 #, c-format msgid "Add location: url: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:130 #: src/hed/libs/data/DataPointIndex.cpp:162 #, c-format msgid "Add location: metadata: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:150 #: src/hed/dmc/gfal/DataPointGFAL.cpp:310 #, c-format msgid "gfal_open failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:163 #: src/hed/dmc/gfal/DataPointGFAL.cpp:223 #: src/hed/dmc/gfal/DataPointGFAL.cpp:249 #: src/hed/dmc/gfal/DataPointGFAL.cpp:324 #: src/hed/dmc/gfal/DataPointGFAL.cpp:403 #: src/hed/dmc/gfal/DataPointGFAL.cpp:430 #, c-format msgid "gfal_close failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:195 #, c-format msgid "gfal_read failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:237 msgid "StopReading starts waiting for transfer_condition." msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:239 msgid "StopReading finished waiting for transfer_condition." msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:271 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:68 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:73 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:44 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:49 #, c-format msgid "No locations defined for %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:278 #, c-format msgid "Failed to set LFC replicas: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:304 #, c-format msgid "gfal_mkdir failed (%s), trying to write anyway" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:359 #, c-format msgid "DataPointGFAL::write_file got position %d and offset %d, has to seek" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:388 #, c-format msgid "gfal_write failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:418 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:405 msgid "StopWriting starts waiting for transfer_condition." msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:420 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:407 msgid "StopWriting finished waiting for transfer_condition." msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:451 #, c-format msgid "gfal_stat failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:496 #, c-format msgid "gfal_listxattr failed, no replica information can be obtained: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:537 #, c-format msgid "gfal_opendir failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:549 #, c-format msgid "List will stat the URL %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:556 #, c-format msgid "gfal_closedir failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:584 #, c-format msgid "gfal_rmdir failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:587 #, c-format msgid "gfal_unlink failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:604 #, c-format msgid "gfal_mkdir failed: %s" msgstr "" #: src/hed/dmc/gfal/DataPointGFAL.cpp:619 #, c-format msgid "gfal_rename failed: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:19 #, c-format msgid "Failed to obtain bytes transferred: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:42 #, c-format msgid "Failed to get initiate GFAL2 parameter handle: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:49 #, c-format msgid "Failed to get initiate new GFAL2 context: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:56 #, c-format msgid "Failed to set GFAL2 monitor callback: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:64 #, c-format msgid "Failed to set overwrite option in GFAL2: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:72 #, c-format msgid "Failed to set GFAL2 transfer timeout, will use default: %s" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:84 msgid "Transfer failed" msgstr "" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:92 msgid "Transfer succeeded" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:38 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:56 msgid "ftp_complete_callback: success" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:44 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:62 #, c-format msgid "ftp_complete_callback: error: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:60 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:78 msgid "ftp_check_callback" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:62 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:90 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:116 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:135 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:305 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:340 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:678 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:847 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:879 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:913 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1052 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1104 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1114 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1122 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1130 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1138 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1144 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:80 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:108 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:285 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:321 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:731 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:764 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:801 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:932 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:996 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1006 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1014 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1022 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1030 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1036 #, c-format msgid "Globus error: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:73 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:91 msgid "Excessive data received while checking file access" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:89 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:107 msgid "Registration of Globus FTP buffer failed - cancel check" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:115 msgid "check_ftp: globus_ftp_client_size failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:119 msgid "check_ftp: timeout waiting for size" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:124 msgid "check_ftp: failed to get file's size" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:127 #, c-format msgid "check_ftp: obtained size: %lli" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:134 msgid "check_ftp: globus_ftp_client_modification_time failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:138 msgid "check_ftp: timeout waiting for modification_time" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:143 msgid "check_ftp: failed to get file's modification time" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:148 #, c-format msgid "check_ftp: obtained modification date: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:167 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:147 msgid "check_ftp: globus_ftp_client_get failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:174 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:154 msgid "check_ftp: globus_ftp_client_register_read" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:185 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:166 msgid "check_ftp: timeout waiting for partial get" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:215 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:193 #, c-format msgid "File delete failed, attempting directory delete for %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:225 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:204 msgid "delete_ftp: globus_ftp_client_delete failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:231 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:252 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:210 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:232 msgid "delete_ftp: timeout waiting for delete" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:246 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:226 msgid "delete_ftp: globus_ftp_client_rmdir failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:301 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:280 #, c-format msgid "mkdir_ftp: making %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:309 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:289 msgid "mkdir_ftp: timeout waiting for mkdir" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:344 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:325 msgid "Timeout waiting for mkdir" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:370 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:348 msgid "start_reading_ftp" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:374 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:352 msgid "start_reading_ftp: globus_ftp_client_get" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:388 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:366 msgid "start_reading_ftp: globus_ftp_client_get failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:399 msgid "start_reading_ftp: globus_thread_create failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:418 msgid "stop_reading_ftp: aborting connection" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:425 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:647 #, c-format msgid "Failed to abort transfer of ftp file: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:426 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:648 msgid "Assuming transfer is already aborted or failed." msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:433 msgid "stop_reading_ftp: waiting for transfer to finish" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:435 #, c-format msgid "stop_reading_ftp: exiting: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:449 msgid "ftp_read_thread: get and register buffers" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:455 #, c-format msgid "ftp_read_thread: for_read failed - aborting: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:465 #, c-format msgid "ftp_read_thread: data callback failed - aborting: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:477 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:382 #, c-format msgid "ftp_read_thread: Globus error: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:490 #, c-format msgid "ftp_read_thread: too many registration failures - abort: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:495 #, c-format msgid "ftp_read_thread: failed to register Globus buffer - will try later: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:508 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:398 msgid "ftp_read_thread: waiting for eof" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:512 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:402 msgid "ftp_read_thread: waiting for buffers released" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:516 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:410 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:664 msgid "ftp_read_thread: failed to release buffers - leaking" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:521 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:417 msgid "ftp_read_thread: exiting" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:539 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:438 #, c-format msgid "ftp_read_callback: failure: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:542 msgid "ftp_read_callback: success" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:558 msgid "Failed to get ftp file" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:560 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:819 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:519 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:708 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:129 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:169 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1214 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1248 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1430 #: src/hed/libs/common/Thread.cpp:240 src/hed/libs/common/Thread.cpp:243 #: src/hed/libs/credential/Credential.cpp:1076 #: src/hed/libs/data/DataPointDelegate.cpp:628 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:66 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:82 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:98 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:117 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:127 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:135 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:144 #: src/hed/mcc/tls/PayloadTLSMCC.cpp:69 src/hed/shc/arcpdp/ArcPDP.cpp:234 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:305 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:258 #: src/libs/data-staging/Scheduler.cpp:117 #: src/services/a-rex/delegation/DelegationStore.cpp:36 #: src/services/a-rex/delegation/DelegationStore.cpp:41 #: src/services/a-rex/delegation/DelegationStore.cpp:46 #: src/services/a-rex/delegation/DelegationStore.cpp:75 #: src/services/a-rex/delegation/DelegationStore.cpp:81 #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:233 #: src/services/a-rex/grid-manager/inputcheck.cpp:33 #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:408 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:395 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:435 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:487 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:602 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:636 #, c-format msgid "%s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:594 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:545 msgid "start_writing_ftp: mkdir" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:597 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:547 msgid "start_writing_ftp: mkdir failed - still trying to write" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:599 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:549 msgid "start_writing_ftp: put" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:613 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:563 msgid "start_writing_ftp: put failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:623 msgid "start_writing_ftp: globus_thread_create failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:640 #: src/hed/libs/data/DataPointDelegate.cpp:307 msgid "StopWriting: aborting connection" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:664 #: src/hed/dmc/http/DataPointHTTP.cpp:982 #: src/hed/libs/data/DataPointDelegate.cpp:321 #, c-format msgid "StopWriting: Calculated checksum %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:668 #: src/hed/dmc/http/DataPointHTTP.cpp:986 #: src/hed/libs/data/DataPointDelegate.cpp:325 #, c-format msgid "StopWriting: looking for checksum of %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:677 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:912 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:800 msgid "list_files_ftp: globus_ftp_client_cksm failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:681 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:916 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:804 msgid "list_files_ftp: timeout waiting for cksum" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:688 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:923 msgid "list_files_ftp: no checksum information possible" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:691 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:926 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:817 #, c-format msgid "list_files_ftp: checksum %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:694 #: src/hed/dmc/http/DataPointHTTP.cpp:995 #: src/hed/libs/data/DataPointDelegate.cpp:332 msgid "" "Checksum type returned by server is different to requested type, cannot " "compare" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:696 #: src/hed/dmc/http/DataPointHTTP.cpp:997 #: src/hed/libs/data/DataPointDelegate.cpp:334 #, c-format msgid "Calculated checksum %s matches checksum reported by server" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:699 #: src/hed/dmc/http/DataPointHTTP.cpp:999 #: src/hed/libs/data/DataPointDelegate.cpp:337 #, c-format msgid "" "Checksum mismatch between calculated checksum %s and checksum reported by " "server %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:721 msgid "ftp_write_thread: get and register buffers" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:725 msgid "ftp_write_thread: for_write failed - aborting" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:743 msgid "ftp_write_thread: data callback failed - aborting" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:759 msgid "ftp_write_thread: waiting for eof" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:763 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:662 msgid "ftp_write_thread: waiting for buffers released" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:771 msgid "ftp_write_thread: failed to release buffers - leaking" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:776 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:669 msgid "ftp_write_thread: exiting" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:799 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:688 #, c-format msgid "ftp_write_callback: failure: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:802 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:690 #, c-format msgid "ftp_write_callback: success %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:817 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:706 msgid "Failed to store ftp file" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:825 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:711 msgid "ftp_put_complete_callback: success" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:841 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:725 #, c-format msgid "list_files_ftp: looking for size of %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:845 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:729 msgid "list_files_ftp: globus_ftp_client_size failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:851 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:852 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:735 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:736 msgid "list_files_ftp: timeout waiting for size" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:858 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:742 msgid "list_files_ftp: failed to get file's size" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:870 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:755 #, c-format msgid "list_files_ftp: looking for modification time of %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:876 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:761 msgid "list_files_ftp: globus_ftp_client_modification_time failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:883 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:768 msgid "list_files_ftp: timeout waiting for modification_time" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:891 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:776 msgid "list_files_ftp: failed to get file's modification time" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:903 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:790 #, c-format msgid "list_files_ftp: looking for checksum of %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:942 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:830 #, c-format msgid "Failed to obtain stat from FTP: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:948 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:835 msgid "No results returned from stat" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:954 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:841 #, c-format msgid "Wrong number of objects (%i) for stat from ftp: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:968 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:854 #, c-format msgid "Unexpected path %s returned from server" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1007 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:887 #, c-format msgid "Failed to obtain listing from FTP: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1050 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:930 msgid "Rename: globus_ftp_client_move failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1056 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:936 msgid "Rename: timeout waiting for operation to complete" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1103 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:995 msgid "init_handle: globus_ftp_client_handleattr_init failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1112 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1004 msgid "init_handle: globus_ftp_client_handleattr_set_gridftp2 failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1121 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1013 msgid "init_handle: globus_ftp_client_handle_init failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1128 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1020 msgid "init_handle: globus_ftp_client_operationattr_init failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1136 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1028 msgid "init_handle: globus_ftp_client_operationattr_set_allow_ipv6 failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1142 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1034 msgid "init_handle: globus_ftp_client_operationattr_set_delayed_pasv failed" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1190 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1218 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1086 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1115 #, c-format msgid "globus_ftp_client_operationattr_set_authorization: error: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1217 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1114 msgid "Failed to set credentials for GridFTP transfer" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1223 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1120 msgid "Using secure data transfer" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1228 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1125 msgid "Using insecure data transfer" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1255 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1152 msgid "~DataPoint: destroy ftp_handle" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1258 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1155 msgid "~DataPoint: destroy ftp_handle failed - retrying" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1276 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1173 msgid "~DataPoint: failed to destroy ftp_handle - leaking" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1296 msgid "" "Missing reference to factory and/or module. It is unsafe to use Globus in " "non-persistent mode - (Grid)FTP code is disabled. Report to developers." msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:390 msgid "ftp_read_thread: failed to register buffers" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:405 msgid "ftp_read_thread: failed to release buffers" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:443 #, c-format msgid "ftp_read_callback: success - offset=%u, length=%u, eof=%u, allow oof=%u" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:457 #, c-format msgid "ftp_read_callback: delayed data chunk: %llu %llu" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:464 #, c-format msgid "ftp_read_callback: unexpected data out of order: %llu != %llu" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:471 msgid "ftp_read_callback: too many unexpected out of order chunks" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:492 #, c-format msgid "ftp_read_callback: Globus error: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:517 msgid "ftp_get_complete_callback: Failed to get ftp file" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:522 msgid "ftp_get_complete_callback: success" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:577 msgid "start_writing_ftp: waiting for data tag" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:580 msgid "start_writing_ftp: failed to read data tag" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:585 msgid "start_writing_ftp: waiting for data chunk" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:587 msgid "start_writing_ftp: failed to read data chunk" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:598 #, c-format msgid "ftp_write_thread: data out of order in stream mode: %llu != %llu" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:605 msgid "ftp_write_thread: too many out of order chunks in stream mode" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:610 #, c-format msgid "start_writing_ftp: data chunk: %llu %llu" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:616 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:642 #, c-format msgid "ftp_write_thread: Globus error: %s" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:635 #, c-format msgid "start_writing_ftp: delayed data chunk: %llu %llu" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:654 msgid "start_writing_ftp: waiting for some buffers sent" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:660 msgid "ftp_write_thread: waiting for transfer complete" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:811 msgid "list_files_ftp: no checksum information supported" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:813 msgid "list_files_ftp: no checksum information returned" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:908 msgid "Too many failures to obtain checksum - giving up" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1268 msgid "Expecting Command and URL provided" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1275 #: src/hed/libs/data/DataExternalHelper.cpp:376 msgid "Expecting Command among arguments" msgstr "" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1279 #: src/hed/libs/data/DataExternalHelper.cpp:380 msgid "Expecting URL among arguments" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:221 src/hed/dmc/gridftp/Lister.cpp:289 #: src/hed/dmc/gridftp/Lister.cpp:384 src/hed/dmc/gridftp/Lister.cpp:767 #: src/hed/dmc/gridftp/Lister.cpp:812 #, c-format msgid "Failure: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:288 msgid "Error getting list of files (in list)" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:290 msgid "Assuming - file not found" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:307 #, c-format msgid "list record: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:362 msgid "Failed reading list of files" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:398 msgid "Failed reading data" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:426 #, c-format msgid "Command: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:430 src/hed/dmc/gridftp/Lister.cpp:471 #: src/hed/mcc/http/PayloadHTTP.cpp:1010 msgid "Memory allocation error" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:438 #, c-format msgid "%s failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:442 msgid "Command is being sent" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:447 msgid "Waiting for response" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:452 msgid "Callback got failure" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:538 msgid "Failed in globus_cond_init" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:542 msgid "Failed in globus_mutex_init" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:549 msgid "Failed allocating memory for handle" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:554 msgid "Failed in globus_ftp_control_handle_init" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:562 msgid "Failed to enable IPv6" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:573 msgid "Closing connection" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:580 src/hed/dmc/gridftp/Lister.cpp:595 msgid "Timeout waiting for Globus callback - leaking connection" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:605 msgid "Closed successfully" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:607 msgid "Closing may have failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:634 msgid "Waiting for globus handle to settle" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:639 #, c-format msgid "Handle is not in proper state %u/%u" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:645 msgid "Globus handle is stuck" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:661 #, c-format msgid "Failed destroying handle: %s. Can't handle such situation." msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:684 #, c-format msgid "EPSV failed: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:688 msgid "EPSV failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:695 #, c-format msgid "PASV failed: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:699 msgid "PASV failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:765 msgid "Failed to apply local address to data connection" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:783 msgid "Can't parse host and/or port in response to EPSV/PASV" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:788 #, c-format msgid "Data channel: %d.%d.%d.%d:%d" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:806 #, c-format msgid "Data channel: [%s]:%d" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:810 msgid "Obtained host and address are not acceptable" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:820 msgid "Failed to open data channel" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:838 #, c-format msgid "Unsupported protocol in url %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:850 msgid "Reusing connection" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:874 #, c-format msgid "Failed connecting to server %s:%d" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:880 #, c-format msgid "Failed to connect to server %s:%d" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:896 msgid "Missing authentication information" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:905 src/hed/dmc/gridftp/Lister.cpp:919 #, c-format msgid "Bad authentication information: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:928 src/hed/dmc/gridftp/Lister.cpp:943 #, c-format msgid "Failed authenticating: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:935 msgid "Failed authenticating" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:970 src/hed/dmc/gridftp/Lister.cpp:1126 #, c-format msgid "DCAU failed: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:974 src/hed/dmc/gridftp/Lister.cpp:1131 msgid "DCAU failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:994 msgid "MLST is not supported - trying LIST" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1010 #, c-format msgid "Immediate completion expected: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1014 msgid "Immediate completion expected" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1027 #, c-format msgid "Missing information in reply: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1061 #, c-format msgid "Missing final reply: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1085 #, c-format msgid "Unexpected immediate completion: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1097 #, c-format msgid "LIST/MLST failed: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1102 msgid "LIST/MLST failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1152 msgid "MLSD is not supported - trying NLST" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1166 #, c-format msgid "Immediate completion: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1174 #, c-format msgid "NLST/MLSD failed: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1180 msgid "NLST/MLSD failed" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1201 #, c-format msgid "Data transfer aborted: %s" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1206 msgid "Data transfer aborted" msgstr "" #: src/hed/dmc/gridftp/Lister.cpp:1218 msgid "Failed to transfer data" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:409 #: src/hed/dmc/http/DataPointHTTP.cpp:597 #: src/hed/dmc/http/DataPointHTTP.cpp:691 #: src/hed/dmc/http/DataPointHTTP.cpp:1137 #: src/hed/dmc/http/DataPointHTTP.cpp:1282 #: src/hed/dmc/http/DataPointHTTP.cpp:1431 #, c-format msgid "Redirecting to %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:461 #, c-format msgid "PROPFIND response: %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:515 #, c-format msgid "Using checksum %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:523 #, c-format msgid "No matching checksum type, using first in list %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:616 #: src/hed/dmc/http/DataPointHTTP.cpp:710 msgid "No information returned by PROPFIND" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:767 #, c-format msgid "Stat: obtained size %llu" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:771 #, c-format msgid "Stat: obtained modification time %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:775 #, c-format msgid "Stat: obtained checksum %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:991 #, c-format msgid "Could not find checksum: %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:993 #, c-format msgid "Checksum of %s is not available" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:1037 #, c-format msgid "Check: obtained size %llu" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:1039 #, c-format msgid "Check: obtained modification time %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:1154 #: src/hed/dmc/http/DataPointHTTP.cpp:1302 #, c-format msgid "HTTP failure %u - %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:1459 #, c-format msgid "Failed to create %s, trying to create parent directories" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:1648 #, c-format msgid "Error creating directory: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:27 #, c-format msgid "Replacing existing token for %s in Rucio token cache" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:40 #, c-format msgid "Found existing token for %s in Rucio token cache with expiry time %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:43 #, c-format msgid "Rucio token for %s has expired or is about to expire" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:105 #, c-format msgid "Extracted nickname %s from credentials to use for RUCIO_ACCOUNT" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:108 msgid "Failed to extract VOMS nickname from proxy" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:110 #, c-format msgid "Using Rucio account %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:123 #, c-format msgid "Strange path in Rucio URL: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:133 src/hed/libs/common/FileLock.cpp:42 msgid "Cannot determine hostname from gethostname()" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:171 #, c-format msgid "Bad path for %s: Format should be /replicas//" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:190 #, c-format msgid "Failed to query parent DIDs: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:195 #, c-format msgid "Failed to parse Rucio info: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:242 #: src/hed/dmc/rucio/DataPointRucio.cpp:522 #, c-format msgid "No locations found for %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:333 #, c-format msgid "Acquired auth token for %s: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:383 #, c-format msgid "Rucio returned %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:452 #: src/hed/dmc/rucio/DataPointRucio.cpp:543 #, c-format msgid "Failed to parse Rucio response: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:457 #: src/hed/dmc/rucio/DataPointRucio.cpp:548 #, c-format msgid "Filename not returned in Rucio response: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:462 #, c-format msgid "Unexpected name returned in Rucio response: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:467 #, c-format msgid "No pfns returned in Rucio response: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:477 #, c-format msgid "Cannot determine replica type for %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:479 #, c-format msgid "%s: replica type %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:482 #, c-format msgid "Skipping %s replica %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:498 #, c-format msgid "Error extracting RSE for %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:508 #, c-format msgid "No filesize information returned in Rucio response for %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:511 #, c-format msgid "%s: size %llu" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:515 #, c-format msgid "No checksum information returned in Rucio response for %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:518 #, c-format msgid "%s: checksum %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:553 #, c-format msgid "Parent dataset: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:575 #, c-format msgid "Could not find matching RSE to %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:617 #, c-format msgid "Sending Rucio trace: %s" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:620 #, c-format msgid "Failed to send traces to Rucio: %s" msgstr "" #: src/hed/dmc/s3/DataPointS3.cpp:269 #, c-format msgid "Initializing S3 connection to %s" msgstr "" #: src/hed/dmc/s3/DataPointS3.cpp:274 #, c-format msgid "Failed to initialize S3 to %s: %s" msgstr "" #: src/hed/dmc/s3/DataPointS3.cpp:470 src/hed/dmc/s3/DataPointS3.cpp:592 #, c-format msgid "Failed to read object %s: %s; %s" msgstr "" #: src/hed/dmc/s3/DataPointS3.cpp:669 #, c-format msgid "Failed to write object %s: %s; %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:56 #, c-format msgid "TURL %s cannot be handled" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:83 #, c-format msgid "Check: looking for metadata: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:94 #, c-format msgid "Check: obtained size: %lli" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:100 #, c-format msgid "Check: obtained checksum: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:104 #, c-format msgid "Check: obtained modification date: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:108 msgid "Check: obtained access latency: low (ONLINE)" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:112 msgid "Check: obtained access latency: high (NEARLINE)" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:131 #, c-format msgid "Remove: deleting: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:149 #, c-format msgid "Creating directory: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:197 src/hed/dmc/srm/DataPointSRM.cpp:246 msgid "Calling PrepareReading when request was already prepared!" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:217 #, c-format msgid "File %s is NEARLINE, will make request to bring online" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:226 #, c-format msgid "Bring online request %s is still in queue, should wait" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:231 #, c-format msgid "Bring online request %s finished successfully, file is now ONLINE" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:237 #, c-format msgid "" "Bad logic for %s - bringOnline returned ok but SRM request is not finished " "successfully or on going" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:265 src/hed/dmc/srm/DataPointSRM.cpp:408 msgid "None of the requested transfer protocols are supported" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:278 #, c-format msgid "Get request %s is still in queue, should wait %i seconds" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:286 src/hed/dmc/srm/DataPointSRM.cpp:465 #, c-format msgid "Checking URL returned by SRM: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:301 src/hed/dmc/srm/DataPointSRM.cpp:480 #, c-format msgid "SRM returned no useful Transfer URLs: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:308 #, c-format msgid "" "Bad logic for %s - getTURLs returned ok but SRM request is not finished " "successfully or on going" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:316 msgid "StartReading" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:318 msgid "StartReading: File was not prepared properly" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:328 src/hed/dmc/srm/DataPointSRM.cpp:507 #, c-format msgid "Redirecting to new URL: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:389 msgid "Calling PrepareWriting when request was already prepared!" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:418 msgid "No space token specified" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:424 msgid "Warning: Using SRM protocol v1 which does not support space tokens" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:427 #, c-format msgid "Using space token description %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:433 #, c-format msgid "Error looking up space tokens matching description %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:437 #, c-format msgid "No space tokens found matching description %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:442 #, c-format msgid "Using space token %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:457 #, c-format msgid "Put request %s is still in queue, should wait %i seconds" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:487 #, c-format msgid "" "Bad logic for %s - putTURLs returned ok but SRM request is not finished " "successfully or on going" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:495 msgid "StartWriting" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:497 msgid "StartWriting: File was not prepared properly" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:556 #, c-format msgid "FinishWriting: looking for metadata: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:571 #, c-format msgid "FinishWriting: obtained checksum: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:574 #, c-format msgid "" "Calculated/supplied transfer checksum %s matches checksum reported by SRM " "destination %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:577 #, c-format msgid "" "Checksum mismatch between calculated/supplied checksum (%s) and checksum " "reported by SRM destination (%s)" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:580 #, c-format msgid "" "Checksum type of SRM (%s) and calculated/supplied checksum (%s) differ, " "cannot compare" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:581 src/hed/dmc/srm/DataPointSRM.cpp:582 msgid "No checksum information from server" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:583 src/hed/dmc/srm/DataPointSRM.cpp:584 msgid "No checksum verification possible" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:590 msgid "Failed to release completed request" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:633 src/hed/dmc/srm/DataPointSRM.cpp:700 #, c-format msgid "ListFiles: looking for metadata: %s" msgstr "" #: src/hed/dmc/srm/DataPointSRM.cpp:818 #, c-format msgid "plugin for transport protocol %s is not installed" msgstr "" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:51 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:90 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:142 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:181 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:221 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:259 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:303 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:365 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:438 msgid "SRM did not return any information" msgstr "" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:316 #, c-format msgid "File could not be moved to Running state: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:372 msgid "SRM did not return any useful information" msgstr "" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:450 msgid "File could not be moved to Done state" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:88 msgid "Could not determine version of server" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:94 #, c-format msgid "Server SRM version: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:99 #, c-format msgid "Server implementation: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:136 #, c-format msgid "Adding space token %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:163 msgid "No request tokens found" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:176 #, c-format msgid "Adding request token %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:237 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:642 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:828 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1385 #, c-format msgid "%s: File request %s in SRM queue. Sleeping for %i seconds" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:275 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:327 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:698 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:764 #, c-format msgid "File is ready! TURL is %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:359 #, c-format msgid "Setting userRequestDescription to %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:414 #, c-format msgid "%s: Bring online request %s in SRM queue. Sleeping for %i seconds" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:457 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1160 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1194 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1228 msgid "No request token specified!" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:524 msgid "Request is reported as ABORTED, but all files are done" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:530 msgid "Request is reported as ABORTED, since it was cancelled" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:536 #, c-format msgid "Request is reported as ABORTED. Reason: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:673 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:745 #, c-format msgid "Path %s is invalid, creating required directories" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:678 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:750 #, c-format msgid "Error creating required directories for %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:851 msgid "Too many files in one request - please try again with fewer files" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:899 msgid "" "Directory size is too large to list in one call, will have to call multiple " "times" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:936 msgid "" "Failure in parsing response from server - some information may be inaccurate" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:942 #: src/hed/shc/legacy/auth_otokens.cpp:437 #, c-format msgid "%s: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:975 #, c-format msgid "" "Directory size is larger than %i files, will have to call multiple times" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1185 #, c-format msgid "Files associated with request token %s released successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1219 #, c-format msgid "Files associated with request token %s put done successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1254 #, c-format msgid "Files associated with request token %s aborted successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1271 #, c-format msgid "" "Failed to find metadata info on %s for determining file or directory delete" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1277 msgid "Type is file, calling srmRm" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1281 msgid "Type is dir, calling srmRmDir" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1285 msgid "File type is not available, attempting file delete" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1288 msgid "File delete failed, attempting directory delete" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1313 #, c-format msgid "File %s removed successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1340 #, c-format msgid "Directory %s removed successfully" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1455 #, c-format msgid "Checking for existence of %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1458 #, c-format msgid "File already exists: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1495 #, c-format msgid "Error creating directory %s: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:82 #, c-format msgid "Attempting to contact %s on port %i" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:88 #, c-format msgid "Storing port %i for %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:102 #, c-format msgid "No port succeeded for %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:112 #, c-format msgid "URL %s disagrees with stored SRM info, testing new info" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:118 #, c-format msgid "Replacing old SRM info with new for URL %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:140 #, c-format msgid "SOAP request: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:147 #: src/hed/dmc/srm/srmclient/SRMClient.cpp:176 #, c-format msgid "SOAP fault: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:148 msgid "Reconnecting" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:158 #, c-format msgid "SRM Client status: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:164 msgid "No SOAP response" msgstr "" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:171 #, c-format msgid "SOAP response: %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:75 #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:161 #, c-format msgid "Failed to acquire lock on file %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:80 #, c-format msgid "Error reading info from file %s:%s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:94 #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:186 #, c-format msgid "Bad or old format detected in file %s, in line %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:99 #, c-format msgid "Cannot convert string %s to int in line %s" msgstr "" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:202 #, c-format msgid "Error writing srm info file %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:81 msgid "" "Missing reference to factory and/or module. It is unsafe to use Xrootd in " "non-persistent mode - Xrootd code is disabled. Report to developers." msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:120 #, c-format msgid "Could not handle checksum %s: skip checksum check" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:126 #, c-format msgid "Failed to create xrootd copy job: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:143 #, c-format msgid "Failed to copy %s: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:194 #, c-format msgid "Reading %u bytes from byte %llu" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:196 #, c-format msgid "Read %i bytes" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:227 #, c-format msgid "Could not open file %s for reading: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:242 #, c-format msgid "Unable to find file size of %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:306 #, c-format msgid "DataPointXrootd::write_file got position %d and offset %d, has to seek" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:329 #, c-format msgid "xrootd write failed: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:338 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:412 #, c-format msgid "xrootd close failed: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:361 #, c-format msgid "Failed to open %s, trying to create parent directories" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:374 #, c-format msgid "xrootd open failed: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:388 #, c-format msgid "close failed: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:430 #, c-format msgid "Read access not allowed for %s: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:449 #, c-format msgid "Could not stat file %s: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:454 msgid "Not getting checksum of zip constituent" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:458 #, c-format msgid "Could not get checksum of %s: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:462 #, c-format msgid "Checksum %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:500 #, c-format msgid "Failed to open directory %s: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:518 #, c-format msgid "Error while reading dir %s: %s" msgstr "" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:568 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:586 #, c-format msgid "Error creating required dirs: %s" msgstr "" #: src/hed/identitymap/IdentityMap.cpp:196 #, c-format msgid "PDP: %s can not be loaded" msgstr "" #: src/hed/identitymap/IdentityMap.cpp:219 src/hed/shc/legacy/LegacyMap.cpp:221 #, c-format msgid "Grid identity is mapped to local identity '%s'" msgstr "" #: src/hed/libs/common/ArcLocation.cpp:129 #, c-format msgid "" "Can not determine the install location. Using %s. Please set ARC_LOCATION if " "this is not correct." msgstr "" #: src/hed/libs/common/DateTime.cpp:86 src/hed/libs/common/DateTime.cpp:631 #: src/hed/libs/common/StringConv.h:25 msgid "Empty string" msgstr "" #: src/hed/libs/common/DateTime.cpp:107 #, c-format msgid "Can not parse date: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:130 #, c-format msgid "Can not parse time: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:160 #, c-format msgid "Can not parse time zone offset: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:180 src/hed/libs/common/DateTime.cpp:199 #: src/hed/libs/common/DateTime.cpp:252 src/hed/libs/common/DateTime.cpp:291 #, c-format msgid "Illegal time format: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:230 src/hed/libs/common/DateTime.cpp:283 #, c-format msgid "Can not parse month: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:647 src/hed/libs/common/DateTime.cpp:688 #, c-format msgid "Invalid ISO duration format: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:752 #, c-format msgid "Invalid period string: %s" msgstr "" #: src/hed/libs/common/DateTime.cpp:874 msgid "hour" msgid_plural "hours" msgstr[0] "" msgstr[1] "" #: src/hed/libs/common/DateTime.cpp:880 msgid "minute" msgid_plural "minutes" msgstr[0] "" msgstr[1] "" #: src/hed/libs/common/DateTime.cpp:886 msgid "second" msgid_plural "seconds" msgstr[0] "" msgstr[1] "" #: src/hed/libs/common/FileLock.cpp:92 #, c-format msgid "EACCES Error opening lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:96 #, c-format msgid "Error opening lock file %s in initial check: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:103 #, c-format msgid "Error creating temporary file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:113 #, c-format msgid "Could not create link to lock file %s as it already exists" msgstr "" #: src/hed/libs/common/FileLock.cpp:124 #, c-format msgid "Could not create lock file %s as it already exists" msgstr "" #: src/hed/libs/common/FileLock.cpp:128 #, c-format msgid "Error creating lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:133 #, c-format msgid "Error writing to lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:141 #, c-format msgid "Error linking tmp file %s to lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:150 #, c-format msgid "Error in lock file %s, even though linking did not return an error" msgstr "" #: src/hed/libs/common/FileLock.cpp:158 #, c-format msgid "%li seconds since lock file %s was created" msgstr "" #: src/hed/libs/common/FileLock.cpp:161 #, c-format msgid "Timeout has expired, will remove lock file %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:165 #, c-format msgid "Failed to remove stale lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:178 #, c-format msgid "This process already owns the lock on %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:182 #, c-format msgid "" "The process owning the lock on %s is no longer running, will remove lock" msgstr "" #: src/hed/libs/common/FileLock.cpp:184 #, c-format msgid "Failed to remove file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:193 #, c-format msgid "The file %s is currently locked with a valid lock" msgstr "" #: src/hed/libs/common/FileLock.cpp:210 #, c-format msgid "Failed to unlock file with lock %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:222 #, c-format msgid "Lock file %s doesn't exist" msgstr "" #: src/hed/libs/common/FileLock.cpp:224 #, c-format msgid "Error listing lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:230 #, c-format msgid "Found unexpected empty lock file %s. Must go back to acquire()" msgstr "" #: src/hed/libs/common/FileLock.cpp:236 #, c-format msgid "Error reading lock file %s: %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:240 #, c-format msgid "Error with formatting in lock file %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:250 #, c-format msgid "Lock %s is owned by a different host (%s)" msgstr "" #: src/hed/libs/common/FileLock.cpp:259 #, c-format msgid "Badly formatted pid %s in lock file %s" msgstr "" #: src/hed/libs/common/FileLock.cpp:262 #, c-format msgid "Another process (%s) owns the lock on file %s" msgstr "" #: src/hed/libs/common/IString.cpp:32 src/hed/libs/common/IString.cpp:41 #: src/hed/libs/common/IString.cpp:42 msgid "(empty)" msgstr "" #: src/hed/libs/common/IString.cpp:32 src/hed/libs/common/IString.cpp:41 #: src/hed/libs/common/IString.cpp:42 msgid "(null)" msgstr "" #: src/hed/libs/common/Logger.cpp:58 #, c-format msgid "Invalid log level. Using default %s." msgstr "" #: src/hed/libs/common/Logger.cpp:123 #, c-format msgid "Invalid old log level. Using default %s." msgstr "" #: src/hed/libs/common/OptionParser.cpp:106 #, c-format msgid "Cannot parse integer value '%s' for -%c" msgstr "" #: src/hed/libs/common/OptionParser.cpp:309 #: src/hed/libs/common/OptionParser.cpp:442 #, c-format msgid "Options Group %s:" msgstr "" #: src/hed/libs/common/OptionParser.cpp:311 #: src/hed/libs/common/OptionParser.cpp:445 #, c-format msgid "%s:" msgstr "" #: src/hed/libs/common/OptionParser.cpp:313 #, c-format msgid "Show %s help options" msgstr "" #: src/hed/libs/common/OptionParser.cpp:348 msgid "Use -? to get usage description" msgstr "" #: src/hed/libs/common/OptionParser.cpp:425 msgid "Usage:" msgstr "" #: src/hed/libs/common/OptionParser.cpp:428 msgid "OPTION..." msgstr "" #: src/hed/libs/common/OptionParser.cpp:434 msgid "Help Options:" msgstr "" #: src/hed/libs/common/OptionParser.cpp:435 msgid "Show help options" msgstr "" #: src/hed/libs/common/Profile.cpp:199 src/hed/libs/common/Profile.cpp:273 #: src/hed/libs/common/Profile.cpp:404 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"inisections\" " "attribute cannot be the empty string." msgstr "" #: src/hed/libs/common/Profile.cpp:205 src/hed/libs/common/Profile.cpp:279 #: src/hed/libs/common/Profile.cpp:411 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"initag\" attribute " "cannot be the empty string." msgstr "" #: src/hed/libs/common/Profile.cpp:419 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"initype\" " "attribute cannot be the empty string." msgstr "" #: src/hed/libs/common/Profile.cpp:422 #, c-format msgid "" "Element \"%s\" in the profile ignored: the \"inidefaultvalue\" attribute " "cannot be specified when the \"inisections\" and \"initag\" attributes have " "not been specified." msgstr "" #: src/hed/libs/common/Profile.cpp:497 #, c-format msgid "" "In the configuration profile the 'initype' attribute on the \"%s\" element " "has a invalid value \"%s\"." msgstr "" #: src/hed/libs/common/Run_unix.cpp:225 msgid "Child monitoring signal detected" msgstr "" #: src/hed/libs/common/Run_unix.cpp:230 #, c-format msgid "Child monitoring error: %i" msgstr "" #: src/hed/libs/common/Run_unix.cpp:243 msgid "Child monitoring kick detected" msgstr "" #: src/hed/libs/common/Run_unix.cpp:246 msgid "Child monitoring internal communication error" msgstr "" #: src/hed/libs/common/Run_unix.cpp:258 msgid "Child monitoring stdout is closed" msgstr "" #: src/hed/libs/common/Run_unix.cpp:268 msgid "Child monitoring stderr is closed" msgstr "" #: src/hed/libs/common/Run_unix.cpp:278 msgid "Child monitoring stdin is closed" msgstr "" #: src/hed/libs/common/Run_unix.cpp:296 #, c-format msgid "Child monitoring child %d exited" msgstr "" #: src/hed/libs/common/Run_unix.cpp:300 #, c-format msgid "Child monitoring lost child %d (%d)" msgstr "" #: src/hed/libs/common/Run_unix.cpp:321 #, c-format msgid "Child monitoring drops abandoned child %d (%d)" msgstr "" #: src/hed/libs/common/Run_unix.cpp:484 msgid "Child was already started" msgstr "" #: src/hed/libs/common/Run_unix.cpp:488 msgid "No arguments are assigned for external process" msgstr "" #: src/hed/libs/common/Run_unix.cpp:621 #, c-format msgid "Excepton while trying to start external process: %s" msgstr "" #: src/hed/libs/common/StringConv.h:31 #, c-format msgid "Conversion failed: %s" msgstr "" #: src/hed/libs/common/StringConv.h:35 #, c-format msgid "Full string not used: %s" msgstr "" #: src/hed/libs/common/Thread.cpp:256 msgid "Maximum number of threads running - putting new request into queue" msgstr "" #: src/hed/libs/common/Thread.cpp:304 #, c-format msgid "Thread exited with Glib error: %s" msgstr "" #: src/hed/libs/common/Thread.cpp:306 #, c-format msgid "Thread exited with generic exception: %s" msgstr "" #: src/hed/libs/common/URL.cpp:137 #, c-format msgid "URL is not valid: %s" msgstr "" #: src/hed/libs/common/URL.cpp:188 #, c-format msgid "Illegal URL - path must be absolute: %s" msgstr "" #: src/hed/libs/common/URL.cpp:193 #, c-format msgid "Illegal URL - no hostname given: %s" msgstr "" #: src/hed/libs/common/URL.cpp:282 #, c-format msgid "Illegal URL - path must be absolute or empty: %s" msgstr "" #: src/hed/libs/common/URL.cpp:298 #, c-format msgid "Illegal URL - no closing ] for IPv6 address found: %s" msgstr "" #: src/hed/libs/common/URL.cpp:306 #, c-format msgid "" "Illegal URL - closing ] for IPv6 address is followed by illegal token: %s" msgstr "" #: src/hed/libs/common/URL.cpp:322 #, c-format msgid "Invalid port number in %s" msgstr "" #: src/hed/libs/common/URL.cpp:455 #, c-format msgid "Unknown LDAP scope %s - using base" msgstr "" #: src/hed/libs/common/URL.cpp:618 msgid "Attempt to assign relative path to URL - making it absolute" msgstr "" #: src/hed/libs/common/URL.cpp:717 #, c-format msgid "URL option %s does not have format name=value" msgstr "" #: src/hed/libs/common/URL.cpp:1186 #, c-format msgid "urllist %s contains invalid URL: %s" msgstr "" #: src/hed/libs/common/URL.cpp:1191 #, c-format msgid "URL protocol is not urllist: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:38 src/hed/libs/common/UserConfig.cpp:831 #: src/hed/libs/common/UserConfig.cpp:840 #: src/hed/libs/common/UserConfig.cpp:846 #: src/hed/libs/common/UserConfig.cpp:872 #: src/hed/libs/common/UserConfig.cpp:884 #: src/hed/libs/common/UserConfig.cpp:896 #: src/hed/libs/common/UserConfig.cpp:916 #, c-format msgid "Multiple %s attributes in configuration file (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:139 #, c-format msgid "Wrong ownership of certificate file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:141 #, c-format msgid "Wrong permissions of certificate file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:143 #, c-format msgid "Can not access certificate file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:150 #, c-format msgid "Wrong ownership of key file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:152 #, c-format msgid "Wrong permissions of key file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:154 #, c-format msgid "Can not access key file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:161 #, c-format msgid "Wrong ownership of proxy file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:163 #, c-format msgid "Wrong permissions of proxy file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:165 #, c-format msgid "Can not access proxy file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:176 msgid "computing" msgstr "" #: src/hed/libs/common/UserConfig.cpp:178 msgid "index" msgstr "" #: src/hed/libs/common/UserConfig.cpp:277 #: src/hed/libs/common/UserConfig.cpp:281 #: src/hed/libs/common/UserConfig.cpp:328 #: src/hed/libs/common/UserConfig.cpp:332 #, c-format msgid "System configuration file (%s) contains errors." msgstr "" #: src/hed/libs/common/UserConfig.cpp:285 #: src/hed/libs/common/UserConfig.cpp:336 #, c-format msgid "System configuration file (%s or %s) does not exist." msgstr "" #: src/hed/libs/common/UserConfig.cpp:287 #: src/hed/libs/common/UserConfig.cpp:338 #, c-format msgid "System configuration file (%s) does not exist." msgstr "" #: src/hed/libs/common/UserConfig.cpp:293 #: src/hed/libs/common/UserConfig.cpp:305 #: src/hed/libs/common/UserConfig.cpp:344 #: src/hed/libs/common/UserConfig.cpp:356 #, c-format msgid "User configuration file (%s) contains errors." msgstr "" #: src/hed/libs/common/UserConfig.cpp:298 #: src/hed/libs/common/UserConfig.cpp:349 msgid "No configuration file could be loaded." msgstr "" #: src/hed/libs/common/UserConfig.cpp:301 #: src/hed/libs/common/UserConfig.cpp:352 #, c-format msgid "User configuration file (%s) does not exist or cannot be loaded." msgstr "" #: src/hed/libs/common/UserConfig.cpp:438 #, c-format msgid "" "Unable to parse the specified verbosity (%s) to one of the allowed levels" msgstr "" #: src/hed/libs/common/UserConfig.cpp:450 #, c-format msgid "" "Unsupported job list type '%s', using 'SQLITE'. Supported types are: SQLITE, " "XML." msgstr "" #: src/hed/libs/common/UserConfig.cpp:511 msgid "Loading OToken failed - ignoring its presence" msgstr "" #: src/hed/libs/common/UserConfig.cpp:652 #, c-format msgid "Certificate and key ('%s' and '%s') not found in any of the paths: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:654 #, c-format msgid "" "If the proxy or certificate/key does exist, you can manually specify the " "locations via environment variables '%s'/'%s' or '%s', or the '%s'/'%s' or " "'%s' attributes in the client configuration file (e.g. '%s')" msgstr "" #: src/hed/libs/common/UserConfig.cpp:672 #: src/hed/libs/common/UserConfig.cpp:682 #, c-format msgid "" "Can not access CA certificate directory: %s. The certificates will not be " "verified." msgstr "" #: src/hed/libs/common/UserConfig.cpp:708 #, c-format msgid "" "Can not find CA certificates directory in default locations:\n" "~/.arc/certificates, ~/.globus/certificates,\n" "%s/etc/certificates, %s/etc/grid-security/certificates,\n" "%s/share/certificates, /etc/grid-security/certificates.\n" "The certificate will not be verified.\n" "If the CA certificates directory does exist, please manually specify the " "locations via env\n" "X509_CERT_DIR, or the cacertificatesdirectory item in client.conf\n" msgstr "" #: src/hed/libs/common/UserConfig.cpp:730 #, c-format msgid "Using proxy file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:733 #, c-format msgid "Using certificate file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:734 #, c-format msgid "Using key file: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:738 #, c-format msgid "Using CA certificate directory: %s" msgstr "" #: src/hed/libs/common/UserConfig.cpp:742 msgid "Using OToken" msgstr "" #: src/hed/libs/common/UserConfig.cpp:755 #: src/hed/libs/common/UserConfig.cpp:761 #, c-format msgid "Can not access VOMSES file/directory: %s." msgstr "" #: src/hed/libs/common/UserConfig.cpp:767 #, c-format msgid "Can not access VOMS file/directory: %s." msgstr "" #: src/hed/libs/common/UserConfig.cpp:781 msgid "" "Can not find voms service configuration file (vomses) in default locations: " "~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/" "grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomses" msgstr "" #: src/hed/libs/common/UserConfig.cpp:794 #, c-format msgid "Loading configuration (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:828 #, c-format msgid "" "The value of the timeout attribute in the configuration file (%s) was only " "partially parsed" msgstr "" #: src/hed/libs/common/UserConfig.cpp:853 msgid "" "The brokerarguments attribute can only be used in conjunction with the " "brokername attribute" msgstr "" #: src/hed/libs/common/UserConfig.cpp:869 #, c-format msgid "" "The value of the keysize attribute in the configuration file (%s) was only " "partially parsed" msgstr "" #: src/hed/libs/common/UserConfig.cpp:891 #, c-format msgid "" "Could not convert the slcs attribute value (%s) to an URL instance in " "configuration file (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:937 #, c-format msgid "Specified overlay file (%s) does not exist." msgstr "" #: src/hed/libs/common/UserConfig.cpp:941 #, c-format msgid "" "Unknown attribute %s in common section of configuration file (%s), ignoring " "it" msgstr "" #: src/hed/libs/common/UserConfig.cpp:982 #, c-format msgid "Unknown section %s, ignoring it" msgstr "" #: src/hed/libs/common/UserConfig.cpp:986 #, c-format msgid "Configuration (%s) loaded" msgstr "" #: src/hed/libs/common/UserConfig.cpp:989 #, c-format msgid "Could not load configuration (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:1086 #, c-format msgid "UserConfiguration saved to file (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:1099 #, c-format msgid "Unable to create %s directory." msgstr "" #: src/hed/libs/common/UserConfig.cpp:1108 #, c-format msgid "Configuration example file created (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:1110 #, c-format msgid "Unable to copy example configuration from existing configuration (%s)" msgstr "" #: src/hed/libs/common/UserConfig.cpp:1115 #, c-format msgid "Cannot copy example configuration (%s), it is not a regular file" msgstr "" #: src/hed/libs/common/UserConfig.cpp:1120 #, c-format msgid "Example configuration (%s) not created." msgstr "" #: src/hed/libs/common/UserConfig.cpp:1125 #, c-format msgid "The default configuration file (%s) is not a regular file." msgstr "" #: src/hed/libs/common/UserConfig.cpp:1143 #, c-format msgid "%s directory created" msgstr "" #: src/hed/libs/common/UserConfig.cpp:1145 #: src/hed/libs/common/UserConfig.cpp:1172 src/hed/libs/data/DataMover.cpp:703 #, c-format msgid "Failed to create directory %s" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:58 msgid "This VERBOSE message should not be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:62 msgid "This INFO message should be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:73 msgid "This VERBOSE message should now be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:79 msgid "This INFO message should also be seen" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:93 msgid "This message goes to initial destination" msgstr "" #: src/hed/libs/common/test/LoggerTest.cpp:108 msgid "This message goes to per-thread destination" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:80 msgid "Request failed: No response from SPService" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:84 #: src/hed/libs/communication/ClientSAML2SSO.cpp:137 msgid "Request failed: response from SPService is not as expected" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:92 #, c-format msgid "Authentication Request URL: %s" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:133 msgid "Request failed: No response from IdP" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:184 msgid "Request failed: No response from IdP when doing redirecting" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:188 msgid "" "Request failed: response from IdP is not as expected when doing redirecting" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:245 msgid "Request failed: No response from IdP when doing authentication" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:249 msgid "" "Request failed: response from IdP is not as expected when doing " "authentication" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:294 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:312 msgid "Succeeded to verify the signature under " msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:296 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:315 msgid "Failed to verify the signature under " msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:310 msgid "" "Request failed: No response from SP Service when sending SAML assertion to SP" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:314 msgid "" "Request failed: response from SP Service is not as expected when sending " "SAML assertion to SP" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:325 #, c-format msgid "IdP return some error message: %s" msgstr "" #: src/hed/libs/communication/ClientSAML2SSO.cpp:353 #: src/hed/libs/communication/ClientSAML2SSO.cpp:398 msgid "SAML2SSO process failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:56 msgid "Creating delegation credential to ARC delegation service" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:66 #: src/hed/libs/communication/ClientX509Delegation.cpp:269 msgid "DelegateCredentialsInit failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:70 #: src/hed/libs/communication/ClientX509Delegation.cpp:124 #: src/hed/libs/communication/ClientX509Delegation.cpp:159 #: src/hed/libs/communication/ClientX509Delegation.cpp:214 #: src/hed/libs/communication/ClientX509Delegation.cpp:273 msgid "There is no SOAP response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:75 msgid "There is no X509 request in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:80 msgid "There is no Format request in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:88 msgid "There is no Id or X509 request value in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:101 #: src/hed/libs/communication/ClientX509Delegation.cpp:189 msgid "DelegateProxy failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:120 msgid "UpdateCredentials failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:128 msgid "There is no UpdateCredentialsResponse in response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:136 #: src/hed/libs/communication/ClientX509Delegation.cpp:164 #: src/hed/libs/communication/ClientX509Delegation.cpp:219 #: src/hed/libs/communication/ClientX509Delegation.cpp:304 msgid "There is no SOAP connection chain configured" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:142 msgid "Creating delegation to CREAM delegation service" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:155 msgid "Delegation getProxyReq request failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:175 msgid "Creating delegation to CREAM delegation service failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:210 msgid "Delegation putProxy request failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:224 msgid "Creating delegation to CREAM delegation failed" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:239 msgid "Getting delegation credential from ARC delegation service" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:278 msgid "There is no Delegated X509 token in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:283 msgid "There is no Format delegated token in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:291 msgid "There is no Id or X509 token value in the response" msgstr "" #: src/hed/libs/communication/ClientX509Delegation.cpp:300 #, c-format msgid "" "Get delegated credential from delegation service: \n" " %s" msgstr "" #: src/hed/libs/compute/Broker.cpp:54 #, c-format msgid "Performing matchmaking against target (%s)." msgstr "" #: src/hed/libs/compute/Broker.cpp:64 #, c-format msgid "Matchmaking, ExecutionTarget: %s matches job description" msgstr "" #: src/hed/libs/compute/Broker.cpp:145 #, c-format msgid "" "The CA issuer (%s) of the credentials (%s) is not trusted by the target (%s)." msgstr "" #: src/hed/libs/compute/Broker.cpp:153 #, c-format msgid "ComputingShareName of ExecutionTarget (%s) is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:157 src/hed/libs/compute/Broker.cpp:162 #, c-format msgid "ComputingShare (%s) explicitly rejected" msgstr "" #: src/hed/libs/compute/Broker.cpp:171 #, c-format msgid "" "Matchmaking, ComputingShareName of ExecutionTarget (%s) is not defined, but " "requested queue is (%s)" msgstr "" #: src/hed/libs/compute/Broker.cpp:175 src/hed/libs/compute/Broker.cpp:180 #, c-format msgid "" "Matchmaking, ComputingShare (%s) does not match requested queue (%s): " "skipping" msgstr "" #: src/hed/libs/compute/Broker.cpp:184 #, c-format msgid "Matchmaking, ComputingShare (%s) matches requested queue (%s)" msgstr "" #: src/hed/libs/compute/Broker.cpp:192 #, c-format msgid "" "ProcessingStartTime (%s) specified in job description is inside the targets " "downtime period [ %s - %s ]." msgstr "" #: src/hed/libs/compute/Broker.cpp:197 #, c-format msgid "The downtime of the target (%s) is not published. Keeping target." msgstr "" #: src/hed/libs/compute/Broker.cpp:203 #, c-format msgid "HealthState of ExecutionTarget (%s) is not OK or WARNING (%s)" msgstr "" #: src/hed/libs/compute/Broker.cpp:208 #, c-format msgid "Matchmaking, ExecutionTarget: %s, HealthState is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:215 #, c-format msgid "" "Matchmaking, Computing endpoint requirement not satisfied. ExecutionTarget: " "%s" msgstr "" #: src/hed/libs/compute/Broker.cpp:220 #, c-format msgid "Matchmaking, ExecutionTarget: %s, ImplementationName is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:246 #, c-format msgid "" "Matchmaking, %s (%d) is %s than %s (%d) published by the ExecutionTarget." msgstr "" #: src/hed/libs/compute/Broker.cpp:275 #, c-format msgid "" "Matchmaking, The %s scaled %s (%d) is %s than the %s (%d) published by the " "ExecutionTarget." msgstr "" #: src/hed/libs/compute/Broker.cpp:287 #, c-format msgid "Matchmaking, Benchmark %s is not published by the ExecutionTarget." msgstr "" #: src/hed/libs/compute/Broker.cpp:302 #, c-format msgid "" "Matchmaking, MaxTotalCPUTime problem, ExecutionTarget: %d (MaxTotalCPUTime), " "JobDescription: %d (TotalCPUTime)" msgstr "" #: src/hed/libs/compute/Broker.cpp:309 #, c-format msgid "" "Matchmaking, MaxCPUTime problem, ExecutionTarget: %d (MaxCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" msgstr "" #: src/hed/libs/compute/Broker.cpp:314 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxTotalCPUTime or MaxCPUTime not " "defined, assuming no CPU time limit" msgstr "" #: src/hed/libs/compute/Broker.cpp:320 #, c-format msgid "" "Matchmaking, MinCPUTime problem, ExecutionTarget: %d (MinCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" msgstr "" #: src/hed/libs/compute/Broker.cpp:325 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MinCPUTime not defined, assuming no CPU " "time limit" msgstr "" #: src/hed/libs/compute/Broker.cpp:333 #, c-format msgid "" "Matchmaking, MainMemorySize problem, ExecutionTarget: %d (MainMemorySize), " "JobDescription: %d (IndividualPhysicalMemory)" msgstr "" #: src/hed/libs/compute/Broker.cpp:339 #, c-format msgid "" "Matchmaking, MaxMainMemory problem, ExecutionTarget: %d (MaxMainMemory), " "JobDescription: %d (IndividualPhysicalMemory)" msgstr "" #: src/hed/libs/compute/Broker.cpp:344 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxMainMemory and MainMemorySize are not " "defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:352 #, c-format msgid "" "Matchmaking, MaxVirtualMemory problem, ExecutionTarget: %d " "(MaxVirtualMemory), JobDescription: %d (IndividualVirtualMemory)" msgstr "" #: src/hed/libs/compute/Broker.cpp:357 #, c-format msgid "Matchmaking, ExecutionTarget: %s, MaxVirtualMemory is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:365 #, c-format msgid "" "Matchmaking, Platform problem, ExecutionTarget: %s (Platform) " "JobDescription: %s (Platform)" msgstr "" #: src/hed/libs/compute/Broker.cpp:370 #, c-format msgid "Matchmaking, ExecutionTarget: %s, Platform is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:378 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, OperatingSystem requirements not satisfied" msgstr "" #: src/hed/libs/compute/Broker.cpp:383 #, c-format msgid "Matchmaking, ExecutionTarget: %s, OperatingSystem is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:391 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, RunTimeEnvironment requirements not " "satisfied" msgstr "" #: src/hed/libs/compute/Broker.cpp:396 #, c-format msgid "Matchmaking, ExecutionTarget: %s, ApplicationEnvironments not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:405 #, c-format msgid "" "Matchmaking, NetworkInfo demand not fulfilled, ExecutionTarget do not " "support %s, specified in the JobDescription." msgstr "" #: src/hed/libs/compute/Broker.cpp:409 #, c-format msgid "Matchmaking, ExecutionTarget: %s, NetworkInfo is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:417 #, c-format msgid "" "Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); " "JobDescription: %d MB (SessionDiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:424 #, c-format msgid "" "Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB " "(WorkingAreaFree); JobDescription: %d MB (SessionDiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:430 src/hed/libs/compute/Broker.cpp:451 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxDiskSpace and WorkingAreaFree are not " "defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:438 #, c-format msgid "" "Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); " "JobDescription: %d MB (DiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:445 #, c-format msgid "" "Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB " "(WorkingAreaFree); JobDescription: %d MB (DiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:459 #, c-format msgid "" "Matchmaking, CacheTotal problem, ExecutionTarget: %d MB (CacheTotal); " "JobDescription: %d MB (CacheDiskSpace)" msgstr "" #: src/hed/libs/compute/Broker.cpp:464 #, c-format msgid "Matchmaking, ExecutionTarget: %s, CacheTotal is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:472 #, c-format msgid "" "Matchmaking, TotalSlots problem, ExecutionTarget: %d (TotalSlots) " "JobDescription: %d (NumberOfProcesses)" msgstr "" #: src/hed/libs/compute/Broker.cpp:478 #, c-format msgid "" "Matchmaking, MaxSlotsPerJob problem, ExecutionTarget: %d (MaxSlotsPerJob) " "JobDescription: %d (NumberOfProcesses)" msgstr "" #: src/hed/libs/compute/Broker.cpp:484 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, TotalSlots and MaxSlotsPerJob are not " "defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:492 #, c-format msgid "" "Matchmaking, WorkingAreaLifeTime problem, ExecutionTarget: %s " "(WorkingAreaLifeTime) JobDescription: %s (SessionLifeTime)" msgstr "" #: src/hed/libs/compute/Broker.cpp:497 #, c-format msgid "Matchmaking, ExecutionTarget: %s, WorkingAreaLifeTime is not defined" msgstr "" #: src/hed/libs/compute/Broker.cpp:505 #, c-format msgid "" "Matchmaking, ConnectivityIn problem, ExecutionTarget: %s (ConnectivityIn) " "JobDescription: %s (InBound)" msgstr "" #: src/hed/libs/compute/Broker.cpp:512 #, c-format msgid "" "Matchmaking, ConnectivityOut problem, ExecutionTarget: %s (ConnectivityOut) " "JobDescription: %s (OutBound)" msgstr "" #: src/hed/libs/compute/Broker.cpp:535 msgid "Unable to sort added jobs. The BrokerPlugin plugin has not been loaded." msgstr "" #: src/hed/libs/compute/Broker.cpp:552 msgid "Unable to match target, marking it as not matching. Broker not valid." msgstr "" #: src/hed/libs/compute/Broker.cpp:588 msgid "Unable to sort ExecutionTarget objects - Invalid Broker object." msgstr "" #: src/hed/libs/compute/Broker.cpp:612 msgid "" "Unable to register job submission. Can't get JobDescription object from " "Broker, Broker is invalid." msgstr "" #: src/hed/libs/compute/BrokerPlugin.cpp:89 #, c-format msgid "Broker plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/BrokerPlugin.cpp:96 #, c-format msgid "Unable to load BrokerPlugin (%s)" msgstr "" #: src/hed/libs/compute/BrokerPlugin.cpp:106 #, c-format msgid "Broker %s loaded" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:27 #, c-format msgid "Uniq is replacing service coming from %s with service coming from %s" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:31 #, c-format msgid "Uniq is ignoring service coming from %s" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:38 #, c-format msgid "Uniq is adding service coming from %s" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:61 #, c-format msgid "Adding endpoint (%s) to TargetInformationRetriever" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:64 #, c-format msgid "Adding endpoint (%s) to ServiceEndpointRetriever" msgstr "" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:67 #, c-format msgid "" "Adding endpoint (%s) to both ServiceEndpointRetriever and " "TargetInformationRetriever" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:42 #, c-format msgid "The plugin %s does not support any interfaces, skipping it." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:47 #, c-format msgid "" "The first supported interface of the plugin %s is an empty string, skipping " "the plugin." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:95 #, c-format msgid "Interface on endpoint (%s) %s." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:101 #: src/hed/libs/compute/EntityRetriever.cpp:133 #: src/hed/libs/compute/EntityRetriever.cpp:425 #, c-format msgid "Ignoring endpoint (%s), it is already registered in retriever." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:110 #, c-format msgid "Service Loop: Endpoint %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:112 #, c-format msgid " This endpoint (%s) is STARTED or SUCCESSFUL" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:115 #, c-format msgid "" "Suspending querying of endpoint (%s) since the service at the endpoint is " "already being queried, or has been queried." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:122 #: src/hed/libs/compute/EntityRetriever.cpp:237 #, c-format msgid " Status of endpoint (%s) is %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:126 #, c-format msgid "Setting status (STARTED) for endpoint: %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:145 #, c-format msgid "Starting thread to query the endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:147 #: src/hed/libs/compute/EntityRetriever.cpp:289 #, c-format msgid "Failed to start querying the endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:174 #, c-format msgid "Found a registry, will query it recursively: %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:211 #, c-format msgid "Setting status (%s) for endpoint: %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:231 msgid "Checking for suspended endpoints which should be started." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:241 #, c-format msgid "Found started or successful endpoint (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:253 #, c-format msgid "Found suspended endpoint (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:264 #, c-format msgid "Trying to start suspended endpoint (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:284 #, c-format msgid "" "Starting querying of suspended endpoint (%s) - no other endpoints for this " "service is being queried or has been queried successfully." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:351 #, c-format msgid "Calling plugin %s to query endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:373 #, c-format msgid "" "The interface of this endpoint (%s) is unspecified, will try all possible " "plugins" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:389 #, c-format msgid "Problem loading plugin %s, skipping it." msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:393 #, c-format msgid "The endpoint (%s) is not supported by this plugin (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:414 #, c-format msgid "" "New endpoint is created (%s) from the one with the unspecified interface (%s)" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:432 #, c-format msgid "Starting sub-thread to query the endpoint on %s" msgstr "" #: src/hed/libs/compute/EntityRetriever.cpp:434 #, c-format msgid "" "Failed to start querying the endpoint on %s (unable to create sub-thread)" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:32 #, c-format msgid "Found %s %s (it was loaded already)" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:41 #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:49 #: src/hed/libs/compute/JobControllerPlugin.cpp:98 #: src/hed/libs/compute/JobControllerPlugin.cpp:107 #: src/hed/libs/compute/SubmitterPlugin.cpp:167 #: src/hed/libs/compute/SubmitterPlugin.cpp:177 #, c-format msgid "" "Unable to locate the \"%s\" plugin. Please refer to installation " "instructions and check if package providing support for \"%s\" plugin is " "installed" msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:42 #, c-format msgid "%s plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:50 #, c-format msgid "%s %s could not be created." msgstr "" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:55 #, c-format msgid "Loaded %s %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:51 #, c-format msgid "" "Skipping ComputingEndpoint '%s', because it has '%s' interface instead of " "the requested '%s'." msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:132 #, c-format msgid "" "Computing endpoint %s (type %s) added to the list for submission brokering" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:239 #, c-format msgid "Address: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:240 #, c-format msgid "Place: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:241 #, c-format msgid "Country: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:242 #, c-format msgid "Postal code: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:243 #, c-format msgid "Latitude: %f" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:244 #, c-format msgid "Longitude: %f" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:250 #, c-format msgid "Owner: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:257 #, c-format msgid "ID: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:258 #, c-format msgid "Type: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:263 #, c-format msgid "URL: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:264 #, c-format msgid "Interface: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:266 msgid "Interface versions:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:271 msgid "Interface extensions:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:276 msgid "Capabilities:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:280 #, c-format msgid "Technology: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:282 msgid "Supported Profiles:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:286 #, c-format msgid "Implementor: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:287 #, c-format msgid "Implementation name: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:288 #, c-format msgid "Quality level: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:289 #, c-format msgid "Health state: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:290 #, c-format msgid "Health state info: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:291 #, c-format msgid "Serving state: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:292 #, c-format msgid "Issuer CA: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:294 msgid "Trusted CAs:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:298 #, c-format msgid "Downtime starts: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:299 #, c-format msgid "Downtime ends: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:300 #, c-format msgid "Staging: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:302 msgid "Job descriptions:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:314 #, c-format msgid "Scheme: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:317 #, c-format msgid "Rule: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:329 #, c-format msgid "Mapping queue: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:330 #, c-format msgid "Max wall-time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:331 #, c-format msgid "Max total wall-time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:332 #, c-format msgid "Min wall-time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:333 #, c-format msgid "Default wall-time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:334 #, c-format msgid "Max CPU time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:335 #, c-format msgid "Min CPU time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:336 #, c-format msgid "Default CPU time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:337 #, c-format msgid "Max total jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:338 #, c-format msgid "Max running jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:339 #, c-format msgid "Max waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:340 #, c-format msgid "Max pre-LRMS waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:341 #, c-format msgid "Max user running jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:342 #, c-format msgid "Max slots per job: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:343 #, c-format msgid "Max stage in streams: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:344 #, c-format msgid "Max stage out streams: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:345 #, c-format msgid "Scheduling policy: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:346 #, c-format msgid "Max memory: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:347 #, c-format msgid "Max virtual memory: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:348 #, c-format msgid "Max disk space: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:349 #, c-format msgid "Default Storage Service: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:350 msgid "Supports preemption" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:351 msgid "Doesn't support preemption" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:352 #, c-format msgid "Total jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:353 #, c-format msgid "Running jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:354 #, c-format msgid "Local running jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:355 #, c-format msgid "Waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:356 #, c-format msgid "Local waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:357 #, c-format msgid "Suspended jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:358 #, c-format msgid "Local suspended jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:359 #, c-format msgid "Staging jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:360 #, c-format msgid "Pre-LRMS waiting jobs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:361 #, c-format msgid "Estimated average waiting time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:362 #, c-format msgid "Estimated worst waiting time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:363 #, c-format msgid "Free slots: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:365 msgid "Free slots grouped according to time limits (limit: free slots):" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:368 #, c-format msgid " %s: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:369 #, c-format msgid " unspecified: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:372 #, c-format msgid "Used slots: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:373 #, c-format msgid "Requested slots: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:374 #, c-format msgid "Reservation policy: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:381 #, c-format msgid "Resource manager: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:383 #, c-format msgid " (%s)" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:387 #, c-format msgid "Total physical CPUs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:388 #, c-format msgid "Total logical CPUs: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:389 #, c-format msgid "Total slots: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:390 msgid "Supports advance reservations" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:391 msgid "Doesn't support advance reservations" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:392 msgid "Supports bulk submission" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:393 msgid "Doesn't support bulk Submission" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:394 msgid "Homogeneous resource" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:395 msgid "Non-homogeneous resource" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:397 msgid "Network information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:402 msgid "Working area is shared among jobs" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:403 msgid "Working area is not shared among jobs" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:404 #, c-format msgid "Working area total size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:405 #, c-format msgid "Working area free size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:406 #, c-format msgid "Working area life time: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:407 #, c-format msgid "Cache area total size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:408 #, c-format msgid "Cache area free size: %i GB" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:414 #, c-format msgid "Platform: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:415 msgid "Execution environment supports inbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:416 msgid "Execution environment does not support inbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:417 msgid "Execution environment supports outbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:418 msgid "Execution environment does not support outbound connections" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:419 msgid "Execution environment is a virtual machine" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:420 msgid "Execution environment is a physical machine" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:421 #, c-format msgid "CPU vendor: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:422 #, c-format msgid "CPU model: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:423 #, c-format msgid "CPU version: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:424 #, c-format msgid "CPU clock speed: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:425 #, c-format msgid "Main memory size: %i" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:426 #, c-format msgid "OS family: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:427 #, c-format msgid "OS name: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:428 #, c-format msgid "OS version: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:435 msgid "Computing service:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:459 #, c-format msgid "%d Endpoints" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:464 msgid "Endpoint Information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:476 #, c-format msgid "%d Batch Systems" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:481 msgid "Batch System Information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:487 msgid "Installed application environments:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:500 #, c-format msgid "%d Shares" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:505 msgid "Share Information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:511 #, c-format msgid "%d mapping policies" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:515 msgid "Mapping policy:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:531 #, c-format msgid "Execution Target on Computing Service: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:533 #, c-format msgid " Computing endpoint URL: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:535 #, c-format msgid " Computing endpoint interface name: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:537 #: src/hed/libs/compute/Job.cpp:579 #, c-format msgid " Queue: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:540 #, c-format msgid " Mapping queue: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:543 #, c-format msgid " Health state: %s" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:548 msgid "Service information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:553 msgid " Installed application environments:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:560 msgid "Batch system information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:563 msgid "Queue information:" msgstr "" #: src/hed/libs/compute/ExecutionTarget.cpp:570 msgid " Benchmark information:" msgstr "" #: src/hed/libs/compute/GLUE2.cpp:53 msgid "The Service doesn't advertise its Type." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:58 msgid "The ComputingService doesn't advertise its Quality Level." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:99 msgid "The ComputingEndpoint has no URL." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:104 msgid "The Service advertises no Health State." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:117 msgid "The ComputingEndpoint doesn't advertise its Quality Level." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:128 msgid "The ComputingService doesn't advertise its Interface." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:160 msgid "The ComputingEndpoint doesn't advertise its Serving State." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:247 #, c-format msgid "" "The \"FreeSlotsWithDuration\" attribute published by \"%s\" is wrongly " "formatted. Ignoring it." msgstr "" #: src/hed/libs/compute/GLUE2.cpp:248 #, c-format msgid "Wrong format of the \"FreeSlotsWithDuration\" = \"%s\" (\"%s\")" msgstr "" #: src/hed/libs/compute/GLUE2.cpp:420 #, c-format msgid "" "Couldn't parse benchmark XML:\n" "%s" msgstr "" #: src/hed/libs/compute/Job.cpp:328 msgid "Unable to detect format of job record." msgstr "" #: src/hed/libs/compute/Job.cpp:549 #, c-format msgid "Job: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:551 #, c-format msgid " Name: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:552 #, c-format msgid " State: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:555 #, c-format msgid " Specific state: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:559 src/hed/libs/compute/Job.cpp:583 #, c-format msgid " Waiting Position: %d" msgstr "" #: src/hed/libs/compute/Job.cpp:563 #, c-format msgid " Exit Code: %d" msgstr "" #: src/hed/libs/compute/Job.cpp:567 #, c-format msgid " Job Error: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:572 #, c-format msgid " Owner: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:576 #, c-format msgid " Other Messages: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:581 #, c-format msgid " Requested Slots: %d" msgstr "" #: src/hed/libs/compute/Job.cpp:586 #, c-format msgid " Stdin: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:588 #, c-format msgid " Stdout: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:590 #, c-format msgid " Stderr: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:592 #, c-format msgid " Computing Service Log Directory: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:595 #, c-format msgid " Submitted: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:598 #, c-format msgid " End Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:601 #, c-format msgid " Submitted from: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:604 #, c-format msgid " Submitting client: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:607 #, c-format msgid " Requested CPU Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:612 #, c-format msgid " Used CPU Time: %s (%s per slot)" msgstr "" #: src/hed/libs/compute/Job.cpp:616 #, c-format msgid " Used CPU Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:622 #, c-format msgid " Used Wall Time: %s (%s per slot)" msgstr "" #: src/hed/libs/compute/Job.cpp:626 #, c-format msgid " Used Wall Time: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:631 #, c-format msgid " Used Memory: %d" msgstr "" #: src/hed/libs/compute/Job.cpp:635 #, c-format msgid " Results were deleted: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:636 #, c-format msgid " Results must be retrieved before: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:640 #, c-format msgid " Proxy valid until: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:644 #, c-format msgid " Entry valid from: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:647 #, c-format msgid " Entry valid for: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:651 msgid " Old job IDs:" msgstr "" #: src/hed/libs/compute/Job.cpp:659 #, c-format msgid " ID on service: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:660 #, c-format msgid " Service information URL: %s (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:661 #, c-format msgid " Job status URL: %s (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:662 #, c-format msgid " Job management URL: %s (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:663 #, c-format msgid " Stagein directory URL: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:664 #, c-format msgid " Stageout directory URL: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:665 #, c-format msgid " Session directory URL: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:667 msgid " Delegation IDs:" msgstr "" #: src/hed/libs/compute/Job.cpp:849 #, c-format msgid "Unable to handle job (%s), no interface specified." msgstr "" #: src/hed/libs/compute/Job.cpp:854 #, c-format msgid "" "Unable to handle job (%s), no plugin associated with the specified interface " "(%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:876 #, c-format msgid "Invalid download destination path specified (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:881 #, c-format msgid "" "Unable to download job (%s), no JobControllerPlugin plugin was set to handle " "the job." msgstr "" #: src/hed/libs/compute/Job.cpp:885 #, c-format msgid "Downloading job: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:891 #, c-format msgid "" "Can't retrieve job files for job (%s) - unable to determine URL of stage out " "directory" msgstr "" #: src/hed/libs/compute/Job.cpp:897 #, c-format msgid "" "Can't retrieve job files for job (%s) - unable to determine URL of log " "directory" msgstr "" #: src/hed/libs/compute/Job.cpp:903 #, c-format msgid "Invalid stage out path specified (%s)" msgstr "" #: src/hed/libs/compute/Job.cpp:911 #, c-format msgid "%s directory exist! Skipping job." msgstr "" #: src/hed/libs/compute/Job.cpp:923 #, c-format msgid "Unable to retrieve list of job files to download for job %s" msgstr "" #: src/hed/libs/compute/Job.cpp:944 #, c-format msgid "Unable to retrieve list of log files to download for job %s" msgstr "" #: src/hed/libs/compute/Job.cpp:963 #, c-format msgid "No files to retrieve for job %s" msgstr "" #: src/hed/libs/compute/Job.cpp:969 #, c-format msgid "Failed to create directory %s! Skipping job." msgstr "" #: src/hed/libs/compute/Job.cpp:986 #, c-format msgid "Failed downloading %s to %s, destination already exist" msgstr "" #: src/hed/libs/compute/Job.cpp:992 #, c-format msgid "Failed downloading %s to %s, unable to remove existing destination" msgstr "" #: src/hed/libs/compute/Job.cpp:999 #, c-format msgid "Failed downloading %s to %s" msgstr "" #: src/hed/libs/compute/Job.cpp:1012 #, c-format msgid "Unable to initialize handler for %s" msgstr "" #: src/hed/libs/compute/Job.cpp:1017 #, c-format msgid "Unable to list files at %s" msgstr "" #: src/hed/libs/compute/Job.cpp:1060 msgid "Now copying (from -> to)" msgstr "" #: src/hed/libs/compute/Job.cpp:1061 #, c-format msgid " %s -> %s" msgstr "" #: src/hed/libs/compute/Job.cpp:1076 #, c-format msgid "Unable to initialise connection to source: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:1087 #, c-format msgid "Unable to initialise connection to destination: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:1109 #, c-format msgid "File download failed: %s" msgstr "" #: src/hed/libs/compute/Job.cpp:1148 src/hed/libs/compute/Job.cpp:1177 #: src/hed/libs/compute/Job.cpp:1209 src/hed/libs/compute/Job.cpp:1242 #, c-format msgid "Waiting for lock on file %s" msgstr "" #: src/hed/libs/compute/JobControllerPlugin.cpp:99 #, c-format msgid "JobControllerPlugin plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/JobControllerPlugin.cpp:108 #, c-format msgid "JobControllerPlugin %s could not be created" msgstr "" #: src/hed/libs/compute/JobControllerPlugin.cpp:113 #, c-format msgid "Loaded JobControllerPlugin %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:26 #, c-format msgid ": %d" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:28 #, c-format msgid ": %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:144 msgid " --- DRY RUN --- " msgstr "" #: src/hed/libs/compute/JobDescription.cpp:154 #, c-format msgid " Annotation: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:160 #, c-format msgid " Old activity ID: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:166 #, c-format msgid " Argument: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:177 #, c-format msgid " RemoteLogging (optional): %s (%s)" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:180 #, c-format msgid " RemoteLogging: %s (%s)" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:188 #, c-format msgid " Environment.name: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:189 #, c-format msgid " Environment: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:202 #, c-format msgid " PreExecutable.Argument: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:205 #: src/hed/libs/compute/JobDescription.cpp:223 #, c-format msgid " Exit code for successful execution: %d" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:208 #: src/hed/libs/compute/JobDescription.cpp:226 msgid " No exit code for successful execution specified." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:220 #, c-format msgid " PostExecutable.Argument: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:236 #, c-format msgid " Access control: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:240 #, c-format msgid " Processing start time: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:243 msgid " Notify:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:257 #, c-format msgid " Credential service: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:267 msgid " Operating system requirements:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:285 msgid " Computing endpoint requirements:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:298 msgid " Node access: inbound" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:301 msgid " Node access: outbound" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:304 msgid " Node access: inbound and outbound" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:314 msgid " Job requires exclusive execution" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:317 msgid " Job does not require exclusive execution" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:322 msgid " Run time environment requirements:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:334 msgid " Inputfile element:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:335 #: src/hed/libs/compute/JobDescription.cpp:357 #, c-format msgid " Name: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:337 msgid " Is executable: true" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:341 #, c-format msgid " Sources: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:343 #, c-format msgid " Sources.DelegationID: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:347 #, c-format msgid " Sources.Options: %s = %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:356 msgid " Outputfile element:" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:360 #, c-format msgid " Targets: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:362 #, c-format msgid " Targets.DelegationID: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:366 #, c-format msgid " Targets.Options: %s = %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:373 #, c-format msgid " DelegationID element: %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:380 #, c-format msgid " Other attributes: [%s], %s" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:446 msgid "Empty job description source string" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:479 msgid "No job description parsers available" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:481 #, c-format msgid "" "No job description parsers suitable for handling '%s' language are available" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:489 #, c-format msgid "%s parsing error" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:505 msgid "No job description parser was able to interpret job description" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:515 msgid "" "Job description language is not specified, unable to output description." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:527 #, c-format msgid "Generating %s job description output" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:543 #, c-format msgid "Language (%s) not recognized by any job description parsers." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:556 #, c-format msgid "Two input files have identical name '%s'." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:575 #: src/hed/libs/compute/JobDescription.cpp:588 #, c-format msgid "Cannot stat local input file '%s'" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:608 #, c-format msgid "Cannot find local input file '%s' (%s)" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:650 msgid "Unable to select runtime environment" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:657 msgid "Unable to select middleware" msgstr "" #: src/hed/libs/compute/JobDescription.cpp:664 msgid "Unable to select operating system." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:683 #, c-format msgid "No test-job with ID %d found." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:695 #, c-format msgid "Test was defined with ID %d, but some error occurred during parsing it." msgstr "" #: src/hed/libs/compute/JobDescription.cpp:699 #, c-format msgid "No jobdescription resulted at %d test" msgstr "" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:52 #, c-format msgid "JobDescriptionParserPlugin plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:59 #, c-format msgid "JobDescriptionParserPlugin %s could not be created" msgstr "" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:64 #, c-format msgid "Loaded JobDescriptionParserPlugin %s" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:125 #, c-format msgid "Unable to create data base (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:133 #, c-format msgid "Unable to create jobs table in data base (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:142 #, c-format msgid "Unable to create jobs_new table in data base (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:148 #, c-format msgid "Unable to transfer from jobs to jobs_new in data base (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:154 #, c-format msgid "Unable to drop jobs in data base (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:160 #, c-format msgid "Unable to rename jobs table in data base (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:170 #, c-format msgid "Unable to create index for jobs table in data base (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:178 #, c-format msgid "Failed checking database (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:180 #, c-format msgid "Job database connection established successfully (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:202 #, c-format msgid "Error from SQLite: %s: %s" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:205 #, c-format msgid "Error from SQLite: %s" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:226 #: src/hed/libs/compute/JobInformationStorageXML.cpp:36 #, c-format msgid "" "Job list file cannot be created: The parent directory (%s) doesn't exist." msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:230 #: src/hed/libs/compute/JobInformationStorageXML.cpp:40 #, c-format msgid "Job list file cannot be created: %s is not a directory" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:237 #: src/hed/libs/compute/JobInformationStorageXML.cpp:47 #, c-format msgid "Job list file (%s) is not a regular file" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:367 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:374 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:381 #, c-format msgid "Unable to write records into job database (%s): Id \"%s\"" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:627 #: src/hed/libs/compute/JobInformationStorageXML.cpp:146 #, c-format msgid "Unable to truncate job database (%s)" msgstr "" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:660 #, c-format msgid "Unable to determine error (%d)" msgstr "" #: src/hed/libs/compute/JobInformationStorageXML.cpp:60 #: src/hed/libs/compute/JobInformationStorageXML.cpp:232 #: src/hed/libs/compute/JobInformationStorageXML.cpp:273 #, c-format msgid "Waiting for lock on job list file %s" msgstr "" #: src/hed/libs/compute/JobInformationStorageXML.cpp:171 #, c-format msgid "Will remove %s on service %s." msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:40 msgid "Ignoring job, the job ID is empty" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:45 #, c-format msgid "Ignoring job (%s), the management interface name is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:50 #, c-format msgid "Ignoring job (%s), the job management URL is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:55 #, c-format msgid "Ignoring job (%s), the status interface name is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:60 #, c-format msgid "Ignoring job (%s), the job status URL is unknown" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:69 #, c-format msgid "Ignoring job (%s), unable to load JobControllerPlugin for %s" msgstr "" #: src/hed/libs/compute/JobSupervisor.cpp:76 #, c-format msgid "" "Ignoring job (%s), already tried and were unable to load JobControllerPlugin" msgstr "" #: src/hed/libs/compute/Software.cpp:65 src/hed/libs/compute/Software.cpp:94 #: src/hed/libs/compute/Software.cpp:113 #, c-format msgid "%s > %s => false" msgstr "" #: src/hed/libs/compute/Software.cpp:70 src/hed/libs/compute/Software.cpp:83 #: src/hed/libs/compute/Software.cpp:107 #, c-format msgid "%s > %s => true" msgstr "" #: src/hed/libs/compute/Software.cpp:90 src/hed/libs/compute/Software.cpp:102 #, c-format msgid "%s > %s => false: %s contains non numbers in the version part." msgstr "" #: src/hed/libs/compute/Software.cpp:199 src/hed/libs/compute/Software.cpp:210 #, c-format msgid "Requirement \"%s %s\" NOT satisfied." msgstr "" #: src/hed/libs/compute/Software.cpp:205 #, c-format msgid "Requirement \"%s %s\" satisfied." msgstr "" #: src/hed/libs/compute/Software.cpp:214 #, c-format msgid "Requirement \"%s %s\" satisfied by \"%s\"." msgstr "" #: src/hed/libs/compute/Software.cpp:219 msgid "All software requirements satisfied." msgstr "" #: src/hed/libs/compute/Submitter.cpp:83 #, c-format msgid "Trying to submit directly to endpoint (%s)" msgstr "" #: src/hed/libs/compute/Submitter.cpp:88 #, c-format msgid "Interface (%s) specified, submitting only to that interface" msgstr "" #: src/hed/libs/compute/Submitter.cpp:106 msgid "Trying all available interfaces" msgstr "" #: src/hed/libs/compute/Submitter.cpp:112 #, c-format msgid "Trying to submit endpoint (%s) using interface (%s) with plugin (%s)." msgstr "" #: src/hed/libs/compute/Submitter.cpp:116 #, c-format msgid "" "Unable to load plugin (%s) for interface (%s) when trying to submit job " "description." msgstr "" #: src/hed/libs/compute/Submitter.cpp:130 #, c-format msgid "No more interfaces to try for endpoint %s." msgstr "" #: src/hed/libs/compute/Submitter.cpp:336 #, c-format msgid "Target %s does not match requested interface(s)." msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:63 msgid "No stagein URL is provided" msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:72 #, c-format msgid "Failed reading file %s" msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:86 #, c-format msgid "Failed uploading file %s to %s: %s" msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:168 #, c-format msgid "SubmitterPlugin plugin \"%s\" not found." msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:178 #, c-format msgid "SubmitterPlugin %s could not be created" msgstr "" #: src/hed/libs/compute/SubmitterPlugin.cpp:183 #, c-format msgid "Loaded SubmitterPlugin %s" msgstr "" #: src/hed/libs/compute/examples/basic_job_submission.cpp:28 msgid "Invalid job description" msgstr "" #: src/hed/libs/compute/examples/basic_job_submission.cpp:47 msgid "Failed to submit job" msgstr "" #: src/hed/libs/compute/examples/basic_job_submission.cpp:54 #, c-format msgid "Failed to write to local job list %s" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:20 msgid "[job description ...]" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:21 msgid "" "This tiny tool can be used for testing the JobDescription's conversion " "abilities." msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:23 msgid "" "The job description also can be a file or a string in ADL or XRSL format." msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:27 msgid "define the requested format (nordugrid:xrsl, emies:adl)" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:28 msgid "format" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:33 msgid "show the original job description" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:43 #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:72 msgid "Use --help option for detailed usage information" msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:50 msgid " [ JobDescription tester ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:74 msgid " [ Parsing the original text ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:80 msgid "Unable to parse." msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:89 msgid " [ emies:adl ] " msgstr "" #: src/hed/libs/compute/test_jobdescription.cpp:91 msgid " [ nordugrid:xrsl ] " msgstr "" #: src/hed/libs/credential/CertUtil.cpp:127 #, c-format msgid "Error number in store context: %i" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:128 msgid "Self-signed certificate" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:131 #, c-format msgid "The certificate with subject %s is not valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:134 #, c-format msgid "" "Can not find issuer certificate for the certificate with subject %s and " "hash: %lu" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:137 #, c-format msgid "Certificate with subject %s has expired" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:140 #, c-format msgid "" "Untrusted self-signed certificate in chain with subject %s and hash: %lu" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:142 #, c-format msgid "Certificate verification error: %s" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:154 msgid "Can not get the certificate type" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:194 msgid "Couldn't verify availability of CRL" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:207 msgid "In the available CRL the lastUpdate field is not valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:214 msgid "The available CRL is not yet valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:223 msgid "In the available CRL, the nextUpdate field is not valid" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:229 msgid "The available CRL has expired" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:252 #, c-format msgid "Certificate with serial number %s and subject \"%s\" is revoked" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:270 msgid "" "Directory of trusted CAs is not specified/found; Using current path as the " "CA direcroty" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:279 msgid "Can't allocate memory for CA policy path" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:325 #, c-format msgid "Certificate has unknown extension with numeric ID %u and SN %s" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:339 #: src/hed/libs/credential/Credential.cpp:1727 msgid "" "Can not convert DER encoded PROXY_CERT_INFO_EXTENSION extension to internal " "format" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:385 msgid "Trying to check X509 cert with check_cert_type" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:424 msgid "Can't convert DER encoded PROXYCERTINFO extension to internal format" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:428 msgid "Can't get policy from PROXYCERTINFO extension" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:432 msgid "Can't get policy language from PROXYCERTINFO extension" msgstr "" #: src/hed/libs/credential/CertUtil.cpp:464 msgid "The subject does not match the issuer name + proxy CN entry" msgstr "" #: src/hed/libs/credential/Credential.cpp:48 #, c-format msgid "OpenSSL error string: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:169 msgid "Can't get the first byte of input to determine its format" msgstr "" #: src/hed/libs/credential/Credential.cpp:183 msgid "Can't reset the input" msgstr "" #: src/hed/libs/credential/Credential.cpp:208 #: src/hed/libs/credential/Credential.cpp:244 msgid "Can't get the first byte of input BIO to get its format" msgstr "" #: src/hed/libs/credential/Credential.cpp:220 msgid "Can not read certificate/key string" msgstr "" #: src/hed/libs/credential/Credential.cpp:433 #, c-format msgid "Can not find certificate file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:438 #, c-format msgid "Can not read certificate file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:476 msgid "Can not read certificate string" msgstr "" #: src/hed/libs/credential/Credential.cpp:496 msgid "Certificate format is PEM" msgstr "" #: src/hed/libs/credential/Credential.cpp:523 msgid "Certificate format is DER" msgstr "" #: src/hed/libs/credential/Credential.cpp:552 msgid "Certificate format is PKCS" msgstr "" #: src/hed/libs/credential/Credential.cpp:578 msgid "Certificate format is unknown" msgstr "" #: src/hed/libs/credential/Credential.cpp:586 #, c-format msgid "Can not find key file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:591 #, c-format msgid "Can not open key file %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:610 msgid "Can not read key string" msgstr "" #: src/hed/libs/credential/Credential.cpp:673 #: src/hed/libs/credential/VOMSUtil.cpp:210 msgid "Failed to lock arccredential library in memory" msgstr "" #: src/hed/libs/credential/Credential.cpp:685 msgid "Certificate verification succeeded" msgstr "" #: src/hed/libs/credential/Credential.cpp:689 msgid "Certificate verification failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:702 #: src/hed/libs/credential/Credential.cpp:722 #: src/hed/libs/credential/Credential.cpp:742 #: src/hed/libs/credential/Credential.cpp:1024 #: src/hed/libs/credential/Credential.cpp:2398 #: src/hed/libs/credential/Credential.cpp:2428 msgid "Failed to initialize extensions member for Credential" msgstr "" #: src/hed/libs/credential/Credential.cpp:787 #, c-format msgid "Unsupported proxy policy language is requested - %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:799 #, c-format msgid "Unsupported proxy version is requested - %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:810 msgid "If you specify a policy you also need to specify a policy language" msgstr "" #: src/hed/libs/credential/Credential.cpp:857 #, c-format msgid "Error: can't open policy file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:870 #, c-format msgid "Error: policy location: %s is not a regular file" msgstr "" #: src/hed/libs/credential/Credential.cpp:929 #: src/hed/libs/credential/Credential.cpp:962 #: src/hed/libs/credential/Credential.cpp:1029 msgid "Certificate/Proxy path is empty" msgstr "" #: src/hed/libs/credential/Credential.cpp:1087 #: src/hed/libs/credential/Credential.cpp:2937 msgid "Failed to duplicate extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1091 msgid "Failed to add extension into credential extensions" msgstr "" #: src/hed/libs/credential/Credential.cpp:1104 msgid "Certificate information collection failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1143 #: src/hed/libs/credential/Credential.cpp:1148 msgid "Can not convert string into ASN1_OBJECT" msgstr "" #: src/hed/libs/credential/Credential.cpp:1155 msgid "Can not create ASN1_OCTET_STRING" msgstr "" #: src/hed/libs/credential/Credential.cpp:1164 msgid "Can not allocate memory for extension for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:1174 msgid "Can not create extension for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:1210 #: src/hed/libs/credential/Credential.cpp:1378 msgid "BN_set_word failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1219 #: src/hed/libs/credential/Credential.cpp:1387 msgid "RSA_generate_key_ex failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1228 #: src/hed/libs/credential/Credential.cpp:1395 msgid "BN_new || RSA_new failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1239 msgid "Created RSA key, proceeding with request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1244 msgid "pkey and rsa_key exist!" msgstr "" #: src/hed/libs/credential/Credential.cpp:1247 msgid "Generate new X509 request!" msgstr "" #: src/hed/libs/credential/Credential.cpp:1252 msgid "Setting subject name!" msgstr "" #: src/hed/libs/credential/Credential.cpp:1260 #: src/hed/libs/credential/Credential.cpp:1474 msgid "PEM_write_bio_X509_REQ failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1290 #: src/hed/libs/credential/Credential.cpp:1331 #: src/hed/libs/credential/Credential.cpp:1506 #: src/hed/libs/credential/Credential.cpp:1526 msgid "Can not create BIO for request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1308 msgid "Failed to write request into string" msgstr "" #: src/hed/libs/credential/Credential.cpp:1335 #: src/hed/libs/credential/Credential.cpp:1340 #: src/hed/libs/credential/Credential.cpp:1530 msgid "Can not set writable file for request BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:1346 #: src/hed/libs/credential/Credential.cpp:1535 msgid "Wrote request into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1348 #: src/hed/libs/credential/Credential.cpp:1538 msgid "Failed to write request into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1368 msgid "The credential's private key has already been initialized" msgstr "" #: src/hed/libs/credential/Credential.cpp:1416 msgid "" "Can not duplicate the subject name for the self-signing proxy certificate " "request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1426 msgid "Can not create a new X509_NAME_ENTRY for the proxy certificate request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1444 #: src/hed/libs/credential/Credential.cpp:1451 #: src/hed/libs/credential/Credential.cpp:2029 #: src/hed/libs/credential/Credential.cpp:2037 msgid "" "Can not convert PROXY_CERT_INFO_EXTENSION struct from internal to DER " "encoded format" msgstr "" #: src/hed/libs/credential/Credential.cpp:1481 msgid "Can't convert X509 request from internal to DER encoded format" msgstr "" #: src/hed/libs/credential/Credential.cpp:1491 msgid "Can not generate X509 request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1493 msgid "Can not set private key" msgstr "" #: src/hed/libs/credential/Credential.cpp:1591 msgid "Failed to get private key" msgstr "" #: src/hed/libs/credential/Credential.cpp:1610 msgid "Failed to get public key from RSA object" msgstr "" #: src/hed/libs/credential/Credential.cpp:1618 msgid "Failed to get public key from X509 object" msgstr "" #: src/hed/libs/credential/Credential.cpp:1625 msgid "Failed to get public key" msgstr "" #: src/hed/libs/credential/Credential.cpp:1663 #, c-format msgid "Certiticate chain number %d" msgstr "" #: src/hed/libs/credential/Credential.cpp:1691 msgid "NULL BIO passed to InquireRequest" msgstr "" #: src/hed/libs/credential/Credential.cpp:1694 msgid "PEM_read_bio_X509_REQ failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1698 msgid "d2i_X509_REQ_bio failed" msgstr "" #: src/hed/libs/credential/Credential.cpp:1720 msgid "Missing data in DER encoded PROXY_CERT_INFO_EXTENSION extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1732 msgid "Can not create PROXY_CERT_INFO_EXTENSION extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1742 msgid "Can not get policy from PROXY_CERT_INFO_EXTENSION extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1746 msgid "Can not get policy language from PROXY_CERT_INFO_EXTENSION extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:1762 #, c-format msgid "Cert Type: %d" msgstr "" #: src/hed/libs/credential/Credential.cpp:1775 #: src/hed/libs/credential/Credential.cpp:1794 msgid "Can not create BIO for parsing request" msgstr "" #: src/hed/libs/credential/Credential.cpp:1780 msgid "Read request from a string" msgstr "" #: src/hed/libs/credential/Credential.cpp:1783 msgid "Failed to read request from a string" msgstr "" #: src/hed/libs/credential/Credential.cpp:1798 msgid "Can not set readable file for request BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:1803 msgid "Read request from a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1806 msgid "Failed to read request from a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:1846 msgid "Can not convert private key to DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2010 msgid "Credential is not initialized" msgstr "" #: src/hed/libs/credential/Credential.cpp:2016 msgid "Failed to duplicate X509 structure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2021 msgid "Failed to initialize X509 structure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2044 msgid "Can not create extension for PROXY_CERT_INFO" msgstr "" #: src/hed/libs/credential/Credential.cpp:2048 #: src/hed/libs/credential/Credential.cpp:2096 msgid "Can not add X509 extension to proxy cert" msgstr "" #: src/hed/libs/credential/Credential.cpp:2064 msgid "Can not convert keyUsage struct from DER encoded format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2076 #: src/hed/libs/credential/Credential.cpp:2085 msgid "Can not convert keyUsage struct from internal to DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2092 msgid "Can not create extension for keyUsage" msgstr "" #: src/hed/libs/credential/Credential.cpp:2105 msgid "Can not get extended KeyUsage extension from issuer certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2110 msgid "Can not copy extended KeyUsage extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:2115 msgid "Can not add X509 extended KeyUsage extension to new proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2125 msgid "Can not compute digest of public key" msgstr "" #: src/hed/libs/credential/Credential.cpp:2136 msgid "Can not copy the subject name from issuer for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2142 msgid "Can not create name entry CN for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2147 msgid "Can not set CN in proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2155 msgid "Can not set issuer's subject for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2160 msgid "Can not set version number for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2168 msgid "Can not set serial number for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2174 msgid "Can not duplicate serial number for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2180 msgid "Can not set the lifetime for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2184 msgid "Can not set pubkey for proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2200 #: src/hed/libs/credential/Credential.cpp:2827 msgid "The credential to be signed is NULL" msgstr "" #: src/hed/libs/credential/Credential.cpp:2204 #: src/hed/libs/credential/Credential.cpp:2831 msgid "The credential to be signed contains no request" msgstr "" #: src/hed/libs/credential/Credential.cpp:2208 #: src/hed/libs/credential/Credential.cpp:2835 msgid "The BIO for output is NULL" msgstr "" #: src/hed/libs/credential/Credential.cpp:2222 #: src/hed/libs/credential/Credential.cpp:2842 msgid "Error when extracting public key from request" msgstr "" #: src/hed/libs/credential/Credential.cpp:2227 #: src/hed/libs/credential/Credential.cpp:2846 msgid "Failed to verify the request" msgstr "" #: src/hed/libs/credential/Credential.cpp:2231 msgid "Failed to add issuer's extension into proxy" msgstr "" #: src/hed/libs/credential/Credential.cpp:2255 msgid "Failed to find extension" msgstr "" #: src/hed/libs/credential/Credential.cpp:2267 msgid "Can not get the issuer's private key" msgstr "" #: src/hed/libs/credential/Credential.cpp:2274 #: src/hed/libs/credential/Credential.cpp:2878 msgid "There is no digest in issuer's private key object" msgstr "" #: src/hed/libs/credential/Credential.cpp:2279 #: src/hed/libs/credential/Credential.cpp:2882 #, c-format msgid "%s is an unsupported digest type" msgstr "" #: src/hed/libs/credential/Credential.cpp:2290 #, c-format msgid "" "The signing algorithm %s is not allowed,it should be SHA1 or SHA2 to sign " "certificate requests" msgstr "" #: src/hed/libs/credential/Credential.cpp:2296 msgid "Failed to sign the proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2298 msgid "Succeeded to sign the proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2303 msgid "Failed to verify the signed certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2305 msgid "Succeeded to verify the signed certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2310 #: src/hed/libs/credential/Credential.cpp:2319 msgid "Output the proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2313 msgid "Can not convert signed proxy cert into PEM format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2322 msgid "Can not convert signed proxy cert into DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2338 #: src/hed/libs/credential/Credential.cpp:2361 msgid "Can not create BIO for signed proxy certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2365 msgid "Can not set writable file for signed proxy certificate BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:2370 msgid "Wrote signed proxy certificate into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:2373 msgid "Failed to write signed proxy certificate into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:2408 #: src/hed/libs/credential/Credential.cpp:2447 #, c-format msgid "ERROR: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2455 #, c-format msgid "SSL error: %s, libs: %s, func: %s, reason: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2500 #, c-format msgid "unable to load number from: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2505 msgid "error converting number from bin to BIGNUM" msgstr "" #: src/hed/libs/credential/Credential.cpp:2532 msgid "file name too long" msgstr "" #: src/hed/libs/credential/Credential.cpp:2555 msgid "error converting serial to ASN.1 format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2588 #, c-format msgid "load serial from %s failure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2593 msgid "add_word failure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2598 #, c-format msgid "save serial to %s failure" msgstr "" #: src/hed/libs/credential/Credential.cpp:2618 msgid "Error initialising X509 store" msgstr "" #: src/hed/libs/credential/Credential.cpp:2625 msgid "Out of memory when generate random serial" msgstr "" #: src/hed/libs/credential/Credential.cpp:2637 msgid "CA certificate and CA private key do not match" msgstr "" #: src/hed/libs/credential/Credential.cpp:2661 #, c-format msgid "Failed to load extension section: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2698 msgid "malloc error" msgstr "" #: src/hed/libs/credential/Credential.cpp:2702 msgid "Subject does not start with '/'" msgstr "" #: src/hed/libs/credential/Credential.cpp:2718 #: src/hed/libs/credential/Credential.cpp:2739 msgid "escape character at end of string" msgstr "" #: src/hed/libs/credential/Credential.cpp:2730 #, c-format msgid "" "end of string encountered while processing type of subject name element #%d" msgstr "" #: src/hed/libs/credential/Credential.cpp:2767 #, c-format msgid "Subject Attribute %s has no known NID, skipped" msgstr "" #: src/hed/libs/credential/Credential.cpp:2771 #, c-format msgid "No value provided for Subject Attribute %s skipped" msgstr "" #: src/hed/libs/credential/Credential.cpp:2812 msgid "Failed to set the pubkey for X509 object by using pubkey from X509_REQ" msgstr "" #: src/hed/libs/credential/Credential.cpp:2822 msgid "The private key for signing is not initialized" msgstr "" #: src/hed/libs/credential/Credential.cpp:2901 #, c-format msgid "Error when loading the extension config file: %s" msgstr "" #: src/hed/libs/credential/Credential.cpp:2905 #, c-format msgid "Error when loading the extension config file: %s on line: %d" msgstr "" #: src/hed/libs/credential/Credential.cpp:2953 msgid "Can not sign a EEC" msgstr "" #: src/hed/libs/credential/Credential.cpp:2957 msgid "Output EEC certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2960 msgid "Can not convert signed EEC cert into DER format" msgstr "" #: src/hed/libs/credential/Credential.cpp:2974 #: src/hed/libs/credential/Credential.cpp:2993 msgid "Can not create BIO for signed EEC certificate" msgstr "" #: src/hed/libs/credential/Credential.cpp:2997 msgid "Can not set writable file for signed EEC certificate BIO" msgstr "" #: src/hed/libs/credential/Credential.cpp:3002 msgid "Wrote signed EEC certificate into a file" msgstr "" #: src/hed/libs/credential/Credential.cpp:3005 msgid "Failed to write signed EEC certificate into a file" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:143 msgid "Error writing raw certificate" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:222 msgid "Failed to add RFC proxy OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:225 #, c-format msgid "Succeeded to add RFC proxy OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:231 msgid "Failed to add anyLanguage OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:234 #, c-format msgid "Succeeded to add anyLanguage OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:240 msgid "Failed to add inheritAll OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:243 #, c-format msgid "Succeeded to add inheritAll OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:249 msgid "Failed to add Independent OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:252 #, c-format msgid "Succeeded to add Independent OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:258 msgid "Failed to add VOMS AC sequence OID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:261 #, c-format msgid "Succeeded to add VOMS AC sequence OID, tag %d is returned" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:290 #, c-format msgid "NSS initialization failed on certificate database: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:301 msgid "Succeeded to initialize NSS" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:323 #, c-format msgid "Failed to read attribute %x from private key." msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:375 msgid "Succeeded to get credential" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:376 msgid "Failed to get credential" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:438 msgid "p12 file is empty" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:448 msgid "Unable to write to p12 file" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:464 msgid "Failed to open p12 file" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:492 msgid "Failed to allocate p12 context" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1200 msgid "Failed to find issuer certificate for proxy certificate" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1351 #, c-format msgid "Failed to authenticate to PKCS11 slot %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1357 #, c-format msgid "Failed to find certificates by nickname: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1362 #, c-format msgid "No user certificate by nickname %s found" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1375 #: src/hed/libs/credential/NSSUtil.cpp:1411 msgid "Certificate does not have a slot" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1381 msgid "Failed to create export context" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1396 msgid "PKCS12 output password not provided" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1403 msgid "PKCS12 add password integrity failed" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1424 msgid "Failed to create key or certificate safe" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1440 msgid "Failed to add certificate and key" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1449 #, c-format msgid "Failed to initialize PKCS12 file: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1454 msgid "Failed to encode PKCS12" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1457 msgid "Succeeded to export PKCS12" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1485 #, c-format msgid "" "There is no certificate named %s found, the certificate could be removed " "when generating CSR" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1491 msgid "Failed to delete certificate" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1505 msgid "The name of the private key to delete is empty" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1510 #: src/hed/libs/credential/NSSUtil.cpp:2939 #: src/hed/libs/credential/NSSUtil.cpp:2956 #, c-format msgid "Failed to authenticate to token %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1517 #, c-format msgid "No private key with nickname %s exist in NSS database" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1550 msgid "Failed to delete private key and certificate" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1560 msgid "Failed to delete private key" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1571 #, c-format msgid "Can not find key with name: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1599 msgid "Can not read PEM private key: probably bad password" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1601 msgid "Can not read PEM private key: failed to decrypt" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1603 #: src/hed/libs/credential/NSSUtil.cpp:1605 msgid "Can not read PEM private key: failed to obtain password" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1606 msgid "Can not read PEM private key" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1613 msgid "Failed to convert EVP_PKEY to PKCS8" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1650 msgid "Failed to load private key" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1651 msgid "Succeeded to load PrivateKeyInfo" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1654 msgid "Failed to convert PrivateKeyInfo to EVP_PKEY" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1655 msgid "Succeeded to convert PrivateKeyInfo to EVP_PKEY" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1692 msgid "Failed to import private key" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1695 msgid "Succeeded to import private key" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1708 #: src/hed/libs/credential/NSSUtil.cpp:1750 #: src/hed/libs/credential/NSSUtil.cpp:2889 msgid "Failed to authenticate to key database" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1717 msgid "Succeeded to generate public/private key pair" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1719 msgid "Failed to generate public/private key pair" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1724 msgid "Failed to export private key" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1791 msgid "Failed to create subject name" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1807 msgid "Failed to create certificate request" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1820 msgid "Failed to call PORT_NewArena" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1828 msgid "Failed to encode the certificate request with DER format" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1835 msgid "Unknown key or hash type" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1841 msgid "Failed to sign the certificate request" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1857 msgid "Failed to output the certificate request as ASCII format" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1866 msgid "Failed to output the certificate request as DER format" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1875 #, c-format msgid "Succeeded to output the certificate request into %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1914 #: src/hed/libs/credential/NSSUtil.cpp:1951 msgid "Failed to read data from input file" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1930 msgid "Input is without trailer\n" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1941 msgid "Failed to convert ASCII to DER" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:1992 msgid "Certificate request is invalid" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2212 #, c-format msgid "The policy language: %s is not supported" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2220 #: src/hed/libs/credential/NSSUtil.cpp:2245 #: src/hed/libs/credential/NSSUtil.cpp:2268 #: src/hed/libs/credential/NSSUtil.cpp:2290 msgid "Failed to new arena" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2229 #: src/hed/libs/credential/NSSUtil.cpp:2254 msgid "Failed to create path length" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2232 #: src/hed/libs/credential/NSSUtil.cpp:2257 #: src/hed/libs/credential/NSSUtil.cpp:2277 #: src/hed/libs/credential/NSSUtil.cpp:2299 msgid "Failed to create policy language" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2700 #, c-format msgid "Failed to parse certificate request from CSR file %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2707 #, c-format msgid "Can not find certificate with name %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2739 msgid "Can not allocate memory" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2747 #, c-format msgid "Proxy subject: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2764 msgid "Failed to start certificate extension" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2769 msgid "Failed to add key usage extension" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2774 msgid "Failed to add proxy certificate information extension" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2778 msgid "Failed to add voms AC extension" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2798 msgid "Failed to retrieve private key for issuer" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2805 msgid "Unknown key or hash type of issuer" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2811 msgid "Failed to set signature algorithm ID" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2823 msgid "Failed to encode certificate" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2829 msgid "Failed to allocate item for certificate data" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2835 msgid "Failed to sign encoded certificate data" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2844 #, c-format msgid "Failed to open file %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2855 #, c-format msgid "Succeeded to output certificate to %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2896 #, c-format msgid "Failed to open input certificate file %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2913 msgid "Failed to read input certificate file" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2918 msgid "Failed to get certificate from certificate file" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2925 msgid "Failed to allocate certificate trust" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2930 msgid "Failed to decode trust string" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2944 #: src/hed/libs/credential/NSSUtil.cpp:2961 msgid "Failed to add certificate to token or database" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2947 #: src/hed/libs/credential/NSSUtil.cpp:2950 msgid "Succeeded to import certificate" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2964 #: src/hed/libs/credential/NSSUtil.cpp:2967 #, c-format msgid "Succeeded to change trusts to: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2994 #, c-format msgid "Failed to import private key from file: %s" msgstr "" #: src/hed/libs/credential/NSSUtil.cpp:2996 #, c-format msgid "Failed to import certificate from file: %s" msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:147 #, c-format msgid "" "ERROR: VOMS configuration line contains too many tokens. Expecting 5 or 6. " "Line was: %s" msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:163 #, c-format msgid "" "ERROR: file tree is too deep while scanning VOMS configuration. Max allowed " "nesting is %i." msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:181 #, c-format msgid "ERROR: failed to read file %s while scanning VOMS configuration." msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:186 #, c-format msgid "" "ERROR: VOMS configuration file %s contains too many lines. Max supported " "number is %i." msgstr "" #: src/hed/libs/credential/VOMSConfig.cpp:193 #, c-format msgid "" "ERROR: VOMS configuration file %s contains too long line(s). Max supported " "length is %i characters." msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:137 #, c-format msgid "Failed to create OpenSSL object %s %s - %u %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:144 #, c-format msgid "Failed to obtain OpenSSL identifier for %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:302 #: src/hed/libs/credential/VOMSUtil.cpp:571 #, c-format msgid "VOMS: create FQAN: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:340 #: src/hed/libs/credential/VOMSUtil.cpp:619 #, c-format msgid "VOMS: create attribute: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:917 msgid "VOMS: Can not allocate memory for parsing AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:925 msgid "VOMS: Can not allocate memory for storing the order of AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:951 msgid "VOMS: Can not parse AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:981 msgid "" "VOMS: CA directory or CA file must be provided or default setting enabled" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1052 msgid "VOMS: failed to verify AC signature" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1108 #, c-format msgid "VOMS: trust chain to check: %s " msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1116 #, c-format msgid "" "VOMS: the DN in certificate: %s does not match that in trusted DN list: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1122 #, c-format msgid "" "VOMS: the Issuer identity in certificate: %s does not match that in trusted " "DN list: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1157 #, c-format msgid "VOMS: The lsc file %s does not exist" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1163 #, c-format msgid "VOMS: The lsc file %s can not be open" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1215 msgid "" "VOMS: there is no constraints of trusted voms DNs, the certificates stack in " "AC will not be checked." msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1248 msgid "VOMS: unable to match certificate chain against VOMS trusted DNs" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1268 msgid "VOMS: AC signature verification failed" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1277 msgid "VOMS: unable to verify certificate chain" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1283 #, c-format msgid "VOMS: cannot validate AC issuer for VO %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1306 #, c-format msgid "VOMS: directory for trusted service certificates: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1332 #, c-format msgid "VOMS: Cannot find certificate of AC issuer for VO %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1358 #: src/hed/libs/credential/VOMSUtil.cpp:1427 msgid "VOMS: Can not find AC_ATTR with IETFATTR type" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1365 #: src/hed/libs/credential/VOMSUtil.cpp:1434 msgid "VOMS: case of multiple IETFATTR attributes not supported" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1375 #: src/hed/libs/credential/VOMSUtil.cpp:1450 msgid "VOMS: case of multiple policyAuthority not supported" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1391 #: src/hed/libs/credential/VOMSUtil.cpp:1467 msgid "VOMS: the format of policyAuthority is unsupported - expecting URI" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1400 #: src/hed/libs/credential/VOMSUtil.cpp:1478 msgid "" "VOMS: the format of IETFATTRVAL is not supported - expecting OCTET STRING" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1443 msgid "VOMS: failed to access IETFATTR attribute" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1538 msgid "VOMS: the grantor attribute is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1556 msgid "VOMS: the attribute name is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1562 #, c-format msgid "VOMS: the attribute value for %s is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1567 msgid "VOMS: the attribute qualifier is empty" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1602 #: src/hed/libs/credential/VOMSUtil.cpp:1721 msgid "" "VOMS: both idcenoRevAvail and authorityKeyIdentifier certificate extensions " "must be present" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1636 #: src/hed/libs/credential/VOMSUtil.cpp:1757 #, c-format msgid "VOMS: FQDN of this host %s does not match any target in AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1641 #: src/hed/libs/credential/VOMSUtil.cpp:1762 msgid "VOMS: the only supported critical extension of the AC is idceTargets" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1656 #: src/hed/libs/credential/VOMSUtil.cpp:1777 msgid "VOMS: failed to parse attributes from AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1700 #: src/hed/libs/credential/VOMSUtil.cpp:1829 msgid "VOMS: authorityKey is wrong" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1861 #: src/hed/libs/credential/VOMSUtil.cpp:2029 #: src/hed/libs/credential/VOMSUtil.cpp:2037 msgid "VOMS: missing AC parts" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1878 #: src/hed/libs/credential/VOMSUtil.cpp:2054 msgid "VOMS: unsupported time format in AC - expecting GENERALIZED TIME" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1884 #: src/hed/libs/credential/VOMSUtil.cpp:2060 msgid "VOMS: AC is not yet valid" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1891 #: src/hed/libs/credential/VOMSUtil.cpp:2067 msgid "VOMS: AC has expired" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1906 #: src/hed/libs/credential/VOMSUtil.cpp:2080 msgid "VOMS: AC is not complete - missing Serial or Issuer information" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1911 #: src/hed/libs/credential/VOMSUtil.cpp:2085 #, c-format msgid "VOMS: the holder serial number is: %lx" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1912 #: src/hed/libs/credential/VOMSUtil.cpp:2086 #, c-format msgid "VOMS: the serial number in AC is: %lx" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1915 #: src/hed/libs/credential/VOMSUtil.cpp:2089 #, c-format msgid "" "VOMS: the holder serial number %lx is not the same as the serial number in " "AC %lx, the holder certificate that is used to create a voms proxy could be " "a proxy certificate with a different serial number as the original EEC cert" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1924 #: src/hed/libs/credential/VOMSUtil.cpp:2098 msgid "VOMS: the holder information in AC is wrong" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1946 #: src/hed/libs/credential/VOMSUtil.cpp:2120 #, c-format msgid "VOMS: DN of holder in AC: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1947 #: src/hed/libs/credential/VOMSUtil.cpp:2121 #, c-format msgid "VOMS: DN of holder: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1948 #: src/hed/libs/credential/VOMSUtil.cpp:2122 #, c-format msgid "VOMS: DN of issuer: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1955 #: src/hed/libs/credential/VOMSUtil.cpp:2129 msgid "" "VOMS: the holder name in AC is not related to the distinguished name in " "holder certificate" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1967 #: src/hed/libs/credential/VOMSUtil.cpp:1974 #: src/hed/libs/credential/VOMSUtil.cpp:2141 #: src/hed/libs/credential/VOMSUtil.cpp:2148 msgid "VOMS: the holder issuerUID is not the same as that in AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1987 #: src/hed/libs/credential/VOMSUtil.cpp:2160 msgid "VOMS: the holder issuer name is not the same as that in AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:1997 #: src/hed/libs/credential/VOMSUtil.cpp:2169 msgid "VOMS: the issuer information in AC is wrong" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:2005 #: src/hed/libs/credential/VOMSUtil.cpp:2177 #, c-format msgid "VOMS: the issuer name %s is not the same as that in AC - %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:2013 #: src/hed/libs/credential/VOMSUtil.cpp:2185 msgid "" "VOMS: the serial number of AC INFO is too long - expecting no more than 20 " "octets" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:2221 #: src/hed/libs/credential/VOMSUtil.cpp:2233 #: src/hed/libs/credential/VOMSUtil.cpp:2247 #: src/hed/libs/credential/VOMSUtil.cpp:2259 #: src/hed/libs/credential/VOMSUtil.cpp:2282 msgid "VOMS: unable to extract VO name from AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:2273 #, c-format msgid "VOMS: unable to determine hostname of AC from VO name: %s" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:2292 msgid "VOMS: can not verify the signature of the AC" msgstr "" #: src/hed/libs/credential/VOMSUtil.cpp:2298 msgid "VOMS: problems while parsing information in AC" msgstr "" #: src/hed/libs/credential/test/VOMSUtilTest.cpp:126 #, c-format msgid "Line %d.%d of the attributes returned: %s" msgstr "" #: src/hed/libs/credentialstore/ClientVOMS.cpp:149 msgid "voms" msgstr "" #: src/hed/libs/credentialstore/CredentialStore.cpp:194 #: src/hed/libs/credentialstore/CredentialStore.cpp:245 #: src/hed/libs/credentialstore/CredentialStore.cpp:273 #: src/hed/libs/credentialstore/CredentialStore.cpp:336 #: src/hed/libs/credentialstore/CredentialStore.cpp:376 #: src/hed/libs/credentialstore/CredentialStore.cpp:406 #, c-format msgid "MyProxy failure: %s" msgstr "" #: src/hed/libs/crypto/OpenSSL.cpp:64 #, c-format msgid "SSL error: %d - %s:%s:%s" msgstr "" #: src/hed/libs/crypto/OpenSSL.cpp:78 msgid "Failed to lock arccrypto library in memory" msgstr "" #: src/hed/libs/crypto/OpenSSL.cpp:81 msgid "Failed to initialize OpenSSL library" msgstr "" #: src/hed/libs/data/DataExternalHelper.cpp:157 msgid "failed to read data tag" msgstr "" #: src/hed/libs/data/DataExternalHelper.cpp:161 msgid "waiting for data chunk" msgstr "" #: src/hed/libs/data/DataExternalHelper.cpp:163 msgid "failed to read data chunk" msgstr "" #: src/hed/libs/data/DataExternalHelper.cpp:171 #, c-format msgid "data chunk: %llu %llu" msgstr "" #: src/hed/libs/data/DataExternalHelper.cpp:242 #, c-format msgid "DataMove::Transfer: using supplied checksum %s" msgstr "" #: src/hed/libs/data/DataExternalHelper.cpp:361 msgid "Expecting Module, Command and URL provided" msgstr "" #: src/hed/libs/data/DataExternalHelper.cpp:368 msgid "Expecting Command module path among arguments" msgstr "" #: src/hed/libs/data/DataExternalHelper.cpp:372 msgid "Expecting Command module name among arguments" msgstr "" #: src/hed/libs/data/DataMover.cpp:126 msgid "No locations found - probably no more physical instances" msgstr "" #: src/hed/libs/data/DataMover.cpp:132 src/hed/libs/data/FileCache.cpp:550 #: src/libs/data-staging/Processor.cpp:394 #: src/libs/data-staging/Processor.cpp:408 #, c-format msgid "Removing %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:145 msgid "This instance was already deleted" msgstr "" #: src/hed/libs/data/DataMover.cpp:151 msgid "Failed to delete physical file" msgstr "" #: src/hed/libs/data/DataMover.cpp:162 #, c-format msgid "Removing metadata in %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:166 msgid "Failed to delete meta-information" msgstr "" #: src/hed/libs/data/DataMover.cpp:180 msgid "Failed to remove all physical instances" msgstr "" #: src/hed/libs/data/DataMover.cpp:184 #, c-format msgid "Removing logical file from metadata %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:187 msgid "Failed to delete logical file" msgstr "" #: src/hed/libs/data/DataMover.cpp:194 msgid "Failed to remove instance" msgstr "" #: src/hed/libs/data/DataMover.cpp:243 msgid "DataMover::Transfer : starting new thread" msgstr "" #: src/hed/libs/data/DataMover.cpp:271 #, c-format msgid "Transfer from %s to %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:273 msgid "Not valid source" msgstr "" #: src/hed/libs/data/DataMover.cpp:278 msgid "Not valid destination" msgstr "" #: src/hed/libs/data/DataMover.cpp:300 src/services/candypond/CandyPond.cpp:304 #, c-format msgid "Couldn't handle certificate: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:309 src/hed/libs/data/DataMover.cpp:614 #: src/libs/data-staging/Processor.cpp:123 #, c-format msgid "File %s is cached (%s) - checking permissions" msgstr "" #: src/hed/libs/data/DataMover.cpp:313 src/hed/libs/data/DataMover.cpp:633 #: src/hed/libs/data/DataMover.cpp:691 src/libs/data-staging/Processor.cpp:142 msgid "Permission checking passed" msgstr "" #: src/hed/libs/data/DataMover.cpp:314 src/hed/libs/data/DataMover.cpp:652 #: src/hed/libs/data/DataMover.cpp:1180 msgid "Linking/copying cached file" msgstr "" #: src/hed/libs/data/DataMover.cpp:338 #, c-format msgid "No locations for source found: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:342 #, c-format msgid "Failed to resolve source: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:356 src/hed/libs/data/DataMover.cpp:431 #, c-format msgid "No locations for destination found: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:361 src/hed/libs/data/DataMover.cpp:435 #, c-format msgid "Failed to resolve destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:378 #, c-format msgid "No locations for destination different from source found: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:400 #, c-format msgid "DataMover::Transfer: trying to destroy/overwrite destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:412 #, c-format msgid "Failed to delete %s but will still try to copy" msgstr "" #: src/hed/libs/data/DataMover.cpp:416 #, c-format msgid "Failed to delete %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:447 #, c-format msgid "Deleted but still have locations at %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:459 msgid "DataMover: cycle" msgstr "" #: src/hed/libs/data/DataMover.cpp:461 msgid "DataMover: no retries requested - exit" msgstr "" #: src/hed/libs/data/DataMover.cpp:466 msgid "DataMover: source out of tries - exit" msgstr "" #: src/hed/libs/data/DataMover.cpp:468 msgid "DataMover: destination out of tries - exit" msgstr "" #: src/hed/libs/data/DataMover.cpp:476 #, c-format msgid "Real transfer from %s to %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:502 #, c-format msgid "Creating buffer: %lli x %i" msgstr "" #: src/hed/libs/data/DataMover.cpp:518 #, c-format msgid "DataMove::Transfer: no checksum calculation for %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:523 #, c-format msgid "DataMove::Transfer: using supplied checksum %s:%s" msgstr "" #: src/hed/libs/data/DataMover.cpp:547 #, c-format msgid "DataMove::Transfer: will calculate %s checksum" msgstr "" #: src/hed/libs/data/DataMover.cpp:552 msgid "Buffer creation failed !" msgstr "" #: src/hed/libs/data/DataMover.cpp:575 #, c-format msgid "URL is mapped to: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:603 src/hed/libs/data/DataMover.cpp:661 #: src/libs/data-staging/Processor.cpp:78 msgid "Cached file is locked - should retry" msgstr "" #: src/hed/libs/data/DataMover.cpp:608 src/libs/data-staging/Processor.cpp:96 msgid "Failed to initiate cache" msgstr "" #: src/hed/libs/data/DataMover.cpp:625 src/services/candypond/CandyPond.cpp:379 #, c-format msgid "Permission checking failed: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:627 src/hed/libs/data/DataMover.cpp:685 #: src/hed/libs/data/DataMover.cpp:705 src/hed/libs/data/DataMover.cpp:716 msgid "source.next_location" msgstr "" #: src/hed/libs/data/DataMover.cpp:641 src/libs/data-staging/Processor.cpp:147 #, c-format msgid "Source modification date: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:642 src/libs/data-staging/Processor.cpp:148 #, c-format msgid "Cache creation date: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:648 src/libs/data-staging/Processor.cpp:153 msgid "Cached file is outdated, will re-download" msgstr "" #: src/hed/libs/data/DataMover.cpp:651 src/libs/data-staging/Processor.cpp:158 msgid "Cached copy is still valid" msgstr "" #: src/hed/libs/data/DataMover.cpp:678 msgid "URL is mapped to local access - checking permissions on original URL" msgstr "" #: src/hed/libs/data/DataMover.cpp:682 #, c-format msgid "Permission checking on original URL failed: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:693 msgid "Linking local file" msgstr "" #: src/hed/libs/data/DataMover.cpp:713 #, c-format msgid "Failed to make symbolic link %s to %s : %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:722 #, c-format msgid "Failed to change owner of symbolic link %s to %i" msgstr "" #: src/hed/libs/data/DataMover.cpp:733 #, c-format msgid "cache file: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:759 #, c-format msgid "Failed to stat source %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:761 src/hed/libs/data/DataMover.cpp:776 #: src/hed/libs/data/DataMover.cpp:808 src/hed/libs/data/DataMover.cpp:828 #: src/hed/libs/data/DataMover.cpp:851 src/hed/libs/data/DataMover.cpp:869 #: src/hed/libs/data/DataMover.cpp:1028 src/hed/libs/data/DataMover.cpp:1061 #: src/hed/libs/data/DataMover.cpp:1072 src/hed/libs/data/DataMover.cpp:1146 msgid "(Re)Trying next source" msgstr "" #: src/hed/libs/data/DataMover.cpp:772 #, c-format msgid "Meta info of source and location do not match for %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:786 #, c-format msgid "" "Replica %s has high latency, but no more sources exist so will use this one" msgstr "" #: src/hed/libs/data/DataMover.cpp:790 #, c-format msgid "Replica %s has high latency, trying next source" msgstr "" #: src/hed/libs/data/DataMover.cpp:802 src/hed/libs/data/DataMover.cpp:823 #: src/libs/data-staging/DataStagingDelivery.cpp:376 #: src/libs/data-staging/DataStagingDelivery.cpp:399 #, c-format msgid "Using internal transfer method of %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:815 src/hed/libs/data/DataMover.cpp:833 #: src/libs/data-staging/DataStagingDelivery.cpp:392 #: src/libs/data-staging/DataStagingDelivery.cpp:413 #, c-format msgid "Internal transfer method is not supported for %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:840 msgid "Using buffered transfer method" msgstr "" #: src/hed/libs/data/DataMover.cpp:844 #, c-format msgid "Failed to prepare source: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:859 #, c-format msgid "Failed to start reading from source: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:879 msgid "Metadata of source and destination are different" msgstr "" #: src/hed/libs/data/DataMover.cpp:899 #, c-format msgid "Failed to preregister destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:904 src/hed/libs/data/DataMover.cpp:1170 msgid "destination.next_location" msgstr "" #: src/hed/libs/data/DataMover.cpp:915 #, c-format msgid "Failed to prepare destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:922 src/hed/libs/data/DataMover.cpp:945 #: src/hed/libs/data/DataMover.cpp:1167 #, c-format msgid "" "Failed to unregister preregistered lfn. You may need to unregister it " "manually: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:926 src/hed/libs/data/DataMover.cpp:948 #: src/hed/libs/data/DataMover.cpp:1037 src/hed/libs/data/DataMover.cpp:1053 #: src/hed/libs/data/DataMover.cpp:1078 src/hed/libs/data/DataMover.cpp:1123 msgid "(Re)Trying next destination" msgstr "" #: src/hed/libs/data/DataMover.cpp:937 #, c-format msgid "Failed to start writing to destination: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:960 msgid "Failed to start writing to cache" msgstr "" #: src/hed/libs/data/DataMover.cpp:968 src/hed/libs/data/DataMover.cpp:1014 #: src/hed/libs/data/DataMover.cpp:1192 msgid "" "Failed to unregister preregistered lfn. You may need to unregister it " "manually" msgstr "" #: src/hed/libs/data/DataMover.cpp:975 msgid "Waiting for buffer" msgstr "" #: src/hed/libs/data/DataMover.cpp:982 #, c-format msgid "Failed updating timestamp on cache lock file %s for file %s: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:987 #, c-format msgid "buffer: read EOF : %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:988 #, c-format msgid "buffer: write EOF: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:989 #, c-format msgid "buffer: error : %s, read: %s, write: %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:990 msgid "Closing read channel" msgstr "" #: src/hed/libs/data/DataMover.cpp:997 msgid "Closing write channel" msgstr "" #: src/hed/libs/data/DataMover.cpp:1005 msgid "Failed to complete writing to destination" msgstr "" #: src/hed/libs/data/DataMover.cpp:1019 msgid "Transfer cancelled successfully" msgstr "" #: src/hed/libs/data/DataMover.cpp:1066 msgid "Cause of failure unclear - choosing randomly" msgstr "" #: src/hed/libs/data/DataMover.cpp:1110 #, c-format msgid "" "Checksum mismatch between checksum given as meta option (%s:%s) and " "calculated checksum (%s)" msgstr "" #: src/hed/libs/data/DataMover.cpp:1116 msgid "" "Failed to unregister preregistered lfn, You may need to unregister it " "manually" msgstr "" #: src/hed/libs/data/DataMover.cpp:1120 msgid "Failed to delete destination, retry may fail" msgstr "" #: src/hed/libs/data/DataMover.cpp:1130 msgid "Cannot compare empty checksum" msgstr "" #: src/hed/libs/data/DataMover.cpp:1137 #: src/libs/data-staging/DataStagingDelivery.cpp:570 msgid "Checksum type of source and calculated checksum differ, cannot compare" msgstr "" #: src/hed/libs/data/DataMover.cpp:1139 #, c-format msgid "Checksum mismatch between calcuated checksum %s and source checksum %s" msgstr "" #: src/hed/libs/data/DataMover.cpp:1151 #: src/libs/data-staging/DataStagingDelivery.cpp:586 #, c-format msgid "Calculated transfer checksum %s matches source checksum" msgstr "" #: src/hed/libs/data/DataMover.cpp:1157 #: src/libs/data-staging/DataStagingDelivery.cpp:589 msgid "Checksum not computed" msgstr "" #: src/hed/libs/data/DataMover.cpp:1163 #, c-format msgid "Failed to postregister destination %s" msgstr "" #: src/hed/libs/data/DataPoint.cpp:90 #, c-format msgid "Invalid URL option: %s" msgstr "" #: src/hed/libs/data/DataPoint.cpp:251 msgid "Checksum types of index and replica are different, skipping comparison" msgstr "" #: src/hed/libs/data/DataPoint.cpp:278 #, c-format msgid "Skipping invalid URL option %s" msgstr "" #: src/hed/libs/data/DataPoint.cpp:293 msgid "" "Third party transfer was requested but the corresponding plugin could\n" " not be loaded. Is the GFAL plugin installed? If not, please install " "the\n" " packages 'nordugrid-arc-plugins-gfal' and 'gfal2-all'. Depending on\n" " your type of installation the package names might differ." msgstr "" #: src/hed/libs/data/DataPoint.cpp:311 #, c-format msgid "Failed to load plugin for URL %s" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:75 #: src/hed/libs/data/DataPointDelegate.cpp:76 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:2032 #, c-format msgid "Starting helper process: %s" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:180 msgid "start_reading" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:189 msgid "start_reading: helper start failed" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:197 msgid "start_reading: thread create failed" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:213 msgid "StopReading: aborting connection" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:218 msgid "stop_reading: waiting for transfer to finish" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:221 #, c-format msgid "stop_reading: exiting: %s" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:231 msgid "read_thread: get and register buffers" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:239 #, c-format msgid "read_thread: for_read failed - aborting: %s" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:247 #, c-format msgid "read_thread: non-data tag '%c' from external process - leaving: %s" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:256 #, c-format msgid "read_thread: data read error from external process - aborting: %s" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:264 msgid "read_thread: exiting" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:285 msgid "start_writing_ftp: helper start failed" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:293 msgid "start_writing_ftp: thread create failed" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:343 msgid "No checksum information possible" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:359 msgid "write_thread: get and pass buffers" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:366 msgid "write_thread: for_write failed - aborting" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:370 msgid "write_thread: for_write eof" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:384 msgid "write_thread: out failed - aborting" msgstr "" #: src/hed/libs/data/DataPointDelegate.cpp:392 msgid "write_thread: exiting" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:91 #, c-format msgid "Can't handle location %s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:183 msgid "Sorting replicas according to URL map" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:187 #, c-format msgid "Replica %s is mapped" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:195 #, c-format msgid "Sorting replicas according to preferred pattern %s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:218 #: src/hed/libs/data/DataPointIndex.cpp:236 #, c-format msgid "Excluding replica %s matching pattern !%s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:229 #, c-format msgid "Replica %s matches host pattern %s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:247 #, c-format msgid "Replica %s matches pattern %s" msgstr "" #: src/hed/libs/data/DataPointIndex.cpp:263 #, c-format msgid "Replica %s doesn't match preferred pattern or URL map" msgstr "" #: src/hed/libs/data/DataStatus.cpp:12 msgid "Operation completed successfully" msgstr "" #: src/hed/libs/data/DataStatus.cpp:13 msgid "Source is invalid URL" msgstr "" #: src/hed/libs/data/DataStatus.cpp:14 msgid "Destination is invalid URL" msgstr "" #: src/hed/libs/data/DataStatus.cpp:15 msgid "Resolving of index service for source failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:16 msgid "Resolving of index service for destination failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:17 msgid "Can't read from source" msgstr "" #: src/hed/libs/data/DataStatus.cpp:18 msgid "Can't write to destination" msgstr "" #: src/hed/libs/data/DataStatus.cpp:19 msgid "Failed while reading from source" msgstr "" #: src/hed/libs/data/DataStatus.cpp:20 msgid "Failed while writing to destination" msgstr "" #: src/hed/libs/data/DataStatus.cpp:21 msgid "Failed while transferring data" msgstr "" #: src/hed/libs/data/DataStatus.cpp:22 msgid "Failed while finishing reading from source" msgstr "" #: src/hed/libs/data/DataStatus.cpp:23 msgid "Failed while finishing writing to destination" msgstr "" #: src/hed/libs/data/DataStatus.cpp:24 msgid "First stage of registration to index service failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:25 msgid "Last stage of registration to index service failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:26 msgid "Unregistering from index service failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:27 msgid "Error in caching procedure" msgstr "" #: src/hed/libs/data/DataStatus.cpp:28 msgid "Error due to expiration of provided credentials" msgstr "" #: src/hed/libs/data/DataStatus.cpp:29 msgid "Delete error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:30 msgid "No valid location available" msgstr "" #: src/hed/libs/data/DataStatus.cpp:31 msgid "Location already exists" msgstr "" #: src/hed/libs/data/DataStatus.cpp:32 msgid "Operation not supported for this kind of URL" msgstr "" #: src/hed/libs/data/DataStatus.cpp:33 msgid "Feature is not implemented" msgstr "" #: src/hed/libs/data/DataStatus.cpp:34 msgid "Already reading from source" msgstr "" #: src/hed/libs/data/DataStatus.cpp:35 msgid "Already writing to destination" msgstr "" #: src/hed/libs/data/DataStatus.cpp:36 msgid "Read access check failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:37 msgid "Directory listing failed" msgstr "" #: src/hed/libs/data/DataStatus.cpp:38 msgid "Object is not suitable for listing" msgstr "" #: src/hed/libs/data/DataStatus.cpp:39 msgid "Failed to obtain information about file" msgstr "" #: src/hed/libs/data/DataStatus.cpp:40 msgid "No such file or directory" msgstr "" #: src/hed/libs/data/DataStatus.cpp:41 msgid "Object not initialized (internal error)" msgstr "" #: src/hed/libs/data/DataStatus.cpp:42 msgid "Operating System error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:43 msgid "Failed to stage file(s)" msgstr "" #: src/hed/libs/data/DataStatus.cpp:44 msgid "Inconsistent metadata" msgstr "" #: src/hed/libs/data/DataStatus.cpp:45 msgid "Failed to prepare source" msgstr "" #: src/hed/libs/data/DataStatus.cpp:46 msgid "Should wait for source to be prepared" msgstr "" #: src/hed/libs/data/DataStatus.cpp:47 msgid "Failed to prepare destination" msgstr "" #: src/hed/libs/data/DataStatus.cpp:48 msgid "Should wait for destination to be prepared" msgstr "" #: src/hed/libs/data/DataStatus.cpp:49 msgid "Failed to finalize reading from source" msgstr "" #: src/hed/libs/data/DataStatus.cpp:50 msgid "Failed to finalize writing to destination" msgstr "" #: src/hed/libs/data/DataStatus.cpp:51 msgid "Failed to create directory" msgstr "" #: src/hed/libs/data/DataStatus.cpp:52 msgid "Failed to rename URL" msgstr "" #: src/hed/libs/data/DataStatus.cpp:53 msgid "Data was already cached" msgstr "" #: src/hed/libs/data/DataStatus.cpp:54 msgid "Operation cancelled successfully" msgstr "" #: src/hed/libs/data/DataStatus.cpp:55 msgid "Generic error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:56 src/hed/libs/data/DataStatus.cpp:69 msgid "Unknown error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:60 msgid "No error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:61 msgid "Transfer timed out" msgstr "" #: src/hed/libs/data/DataStatus.cpp:62 msgid "Checksum mismatch" msgstr "" #: src/hed/libs/data/DataStatus.cpp:63 msgid "Bad logic" msgstr "" #: src/hed/libs/data/DataStatus.cpp:64 msgid "All results obtained are invalid" msgstr "" #: src/hed/libs/data/DataStatus.cpp:65 msgid "Temporary service error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:66 msgid "Permanent service error" msgstr "" #: src/hed/libs/data/DataStatus.cpp:67 msgid "Error switching uid" msgstr "" #: src/hed/libs/data/DataStatus.cpp:68 msgid "Request timed out" msgstr "" #: src/hed/libs/data/FileCache.cpp:109 msgid "No cache directory specified" msgstr "" #: src/hed/libs/data/FileCache.cpp:126 msgid "No usable caches" msgstr "" #: src/hed/libs/data/FileCache.cpp:135 msgid "No draining cache directory specified" msgstr "" #: src/hed/libs/data/FileCache.cpp:153 msgid "No read-only cache directory specified" msgstr "" #: src/hed/libs/data/FileCache.cpp:182 #, c-format msgid "Failed to create cache directory for file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:192 #, c-format msgid "Failed to create any cache directories for %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:199 #, c-format msgid "Failed to change permissions on %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:211 #, c-format msgid "Failed to delete stale cache file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:214 #, c-format msgid "Failed to release lock on file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:232 #, c-format msgid "Failed looking up attributes of cached file: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:238 #, c-format msgid "Failed to obtain lock on cache file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:247 src/hed/libs/data/FileCache.cpp:307 #, c-format msgid "Error removing cache file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:249 src/hed/libs/data/FileCache.cpp:260 #, c-format msgid "Failed to remove lock on %s. Some manual intervention may be required" msgstr "" #: src/hed/libs/data/FileCache.cpp:279 src/hed/libs/data/FileCache.cpp:313 #, c-format msgid "Failed to unlock file %s: %s. Manual intervention may be required" msgstr "" #: src/hed/libs/data/FileCache.cpp:296 #, c-format msgid "Invalid lock on file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:302 #, c-format msgid "Failed to remove .meta file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:367 #, c-format msgid "Cache not found for file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:377 #, c-format msgid "" "Cache file %s was modified in the last second, sleeping 1 second to avoid " "race condition" msgstr "" #: src/hed/libs/data/FileCache.cpp:382 src/hed/libs/data/FileCache.cpp:687 #, c-format msgid "Cache file %s does not exist" msgstr "" #: src/hed/libs/data/FileCache.cpp:387 src/hed/libs/data/FileCache.cpp:689 #, c-format msgid "Error accessing cache file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:393 #, c-format msgid "Cannot create directory %s for per-job hard links" msgstr "" #: src/hed/libs/data/FileCache.cpp:398 #, c-format msgid "Cannot change permission of %s: %s " msgstr "" #: src/hed/libs/data/FileCache.cpp:402 #, c-format msgid "Cannot change owner of %s: %s " msgstr "" #: src/hed/libs/data/FileCache.cpp:416 #, c-format msgid "Failed to remove existing hard link at %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:420 src/hed/libs/data/FileCache.cpp:431 #, c-format msgid "Failed to create hard link from %s to %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:426 #, c-format msgid "Cache file %s not found" msgstr "" #: src/hed/libs/data/FileCache.cpp:441 #, c-format msgid "Failed to change permissions or set owner of hard link %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:449 #, c-format msgid "Failed to release lock on cache file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:460 #, c-format msgid "Cache file %s was locked during link/copy, must start again" msgstr "" #: src/hed/libs/data/FileCache.cpp:465 #, c-format msgid "Cache file %s was deleted during link/copy, must start again" msgstr "" #: src/hed/libs/data/FileCache.cpp:470 #, c-format msgid "Cache file %s was modified while linking, must start again" msgstr "" #: src/hed/libs/data/FileCache.cpp:488 #, c-format msgid "Failed to copy file %s to %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:494 #, c-format msgid "Failed to set executable bit on file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:499 #, c-format msgid "Failed to set executable bit on file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:513 #, c-format msgid "Failed to remove existing symbolic link at %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:517 src/hed/libs/data/FileCache.cpp:522 #, c-format msgid "Failed to create symbolic link from %s to %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:552 #, c-format msgid "Failed to remove cache per-job dir %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:571 src/hed/libs/data/FileCache.cpp:639 #, c-format msgid "Error reading meta file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:576 src/hed/libs/data/FileCache.cpp:644 #, c-format msgid "Error opening meta file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:581 src/hed/libs/data/FileCache.cpp:648 #, c-format msgid "meta file %s is empty" msgstr "" #: src/hed/libs/data/FileCache.cpp:591 #, c-format msgid "" "File %s is already cached at %s under a different URL: %s - will not add DN " "to cached list" msgstr "" #: src/hed/libs/data/FileCache.cpp:602 #, c-format msgid "Bad format detected in file %s, in line %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:618 #, c-format msgid "Could not acquire lock on meta file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:622 #, c-format msgid "Error opening meta file for writing %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:658 #, c-format msgid "DN %s is cached and is valid until %s for URL %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:662 #, c-format msgid "DN %s is cached but has expired for URL %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:713 #, c-format msgid "Failed to acquire lock on cache meta file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:718 #, c-format msgid "Failed to create cache meta file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:733 #, c-format msgid "Failed to read cache meta file %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:738 #, c-format msgid "Cache meta file %s is empty, will recreate" msgstr "" #: src/hed/libs/data/FileCache.cpp:743 #, c-format msgid "Cache meta file %s possibly corrupted, will recreate" msgstr "" #: src/hed/libs/data/FileCache.cpp:747 #, c-format msgid "" "File %s is already cached at %s under a different URL: %s - this file will " "not be cached" msgstr "" #: src/hed/libs/data/FileCache.cpp:757 #, c-format msgid "Error looking up attributes of cache meta file %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:828 #, c-format msgid "Using cache %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:842 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:79 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:112 #, c-format msgid "Error getting info from statvfs for the path %s: %s" msgstr "" #: src/hed/libs/data/FileCache.cpp:848 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:118 #, c-format msgid "Cache %s: Free space %f GB" msgstr "" #: src/hed/libs/data/URLMap.cpp:33 #, c-format msgid "Can't use URL %s" msgstr "" #: src/hed/libs/data/URLMap.cpp:39 #, c-format msgid "file %s is not accessible" msgstr "" #: src/hed/libs/data/URLMap.cpp:49 #, c-format msgid "Mapping %s to %s" msgstr "" #: src/hed/libs/data/examples/simple_copy.cpp:17 msgid "Usage: copy source destination" msgstr "" #: src/hed/libs/data/examples/simple_copy.cpp:42 #, c-format msgid "Copy failed: %s" msgstr "" #: src/hed/libs/globusutils/GSSCredential.cpp:41 #, c-format msgid "Failed to read proxy file: %s" msgstr "" #: src/hed/libs/globusutils/GSSCredential.cpp:49 #, c-format msgid "Failed to read certificate file: %s" msgstr "" #: src/hed/libs/globusutils/GSSCredential.cpp:56 #, c-format msgid "Failed to read private key file: %s" msgstr "" #: src/hed/libs/globusutils/GSSCredential.cpp:82 #, c-format msgid "" "Failed to convert GSI credential to GSS credential (major: %d, minor: %d):%s:" "%s" msgstr "" #: src/hed/libs/globusutils/GSSCredential.cpp:94 #, c-format msgid "Failed to release GSS credential (major: %d, minor: %d):%s:%s" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:30 msgid "Module Manager Init" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:73 msgid "" "Busy plugins found while unloading Module Manager. Waiting for them to be " "released." msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:207 #, c-format msgid "Found %s in cache" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:214 #, c-format msgid "Could not locate module %s in following paths:" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:218 #, c-format msgid "\t%s" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:232 #, c-format msgid "Loaded %s" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:276 msgid "Module Manager Init by ModuleManager::setCfg" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:312 #: src/hed/libs/loader/ModuleManager.cpp:325 #, c-format msgid "%s made persistent" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:316 #, c-format msgid "Not found %s in cache" msgstr "" #: src/hed/libs/loader/ModuleManager.cpp:330 msgid "Specified module not found in cache" msgstr "" #: src/hed/libs/loader/Plugin.cpp:364 src/hed/libs/loader/Plugin.cpp:557 #, c-format msgid "Could not find loadable module descriptor by name %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:372 src/hed/libs/loader/Plugin.cpp:567 #, c-format msgid "Could not find loadable module by name %s (%s)" msgstr "" #: src/hed/libs/loader/Plugin.cpp:378 src/hed/libs/loader/Plugin.cpp:480 #: src/hed/libs/loader/Plugin.cpp:572 #, c-format msgid "Module %s is not an ARC plugin (%s)" msgstr "" #: src/hed/libs/loader/Plugin.cpp:395 src/hed/libs/loader/Plugin.cpp:490 #: src/hed/libs/loader/Plugin.cpp:598 #, c-format msgid "Module %s failed to reload (%s)" msgstr "" #: src/hed/libs/loader/Plugin.cpp:417 #, c-format msgid "Module %s contains no plugin %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:462 #, c-format msgid "Could not find loadable module descriptor by name %s or kind %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:467 #, c-format msgid "Loadable module %s contains no requested plugin %s of kind %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:474 #, c-format msgid "Could not find loadable module by names %s and %s (%s)" msgstr "" #: src/hed/libs/loader/Plugin.cpp:503 #, c-format msgid "Module %s contains no requested plugin %s of kind %s" msgstr "" #: src/hed/libs/loader/Plugin.cpp:588 #, c-format msgid "Module %s does not contain plugin(s) of specified kind(s)" msgstr "" #: src/hed/libs/message/MCC.cpp:76 src/hed/libs/message/Service.cpp:25 #, c-format msgid "No security processing/check requested for '%s'" msgstr "" #: src/hed/libs/message/MCC.cpp:85 #, c-format msgid "Security processing/check failed: %s" msgstr "" #: src/hed/libs/message/MCC.cpp:90 msgid "Security processing/check passed" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:17 msgid "Chain(s) configuration failed" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:134 msgid "SecHandler configuration is not defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:157 msgid "SecHandler has no configuration" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:163 msgid "SecHandler has no name attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:173 #, c-format msgid "Security Handler %s(%s) could not be created" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:177 #, c-format msgid "SecHandler: %s(%s)" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:189 msgid "Component has no name attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:194 msgid "Component has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:203 #, c-format msgid "Component %s(%s) could not be created" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:229 #, c-format msgid "Component's %s(%s) next has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:290 #, c-format msgid "Loaded MCC %s(%s)" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:308 #, c-format msgid "Plexer's (%s) next has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:318 #, c-format msgid "Loaded Plexer %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:326 msgid "Service has no Name attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:332 msgid "Service has no ID attribute defined" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:341 #, c-format msgid "Service %s(%s) could not be created" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:348 #, c-format msgid "Loaded Service %s(%s)" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:390 #, c-format msgid "Linking MCC %s(%s) to MCC (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:401 #, c-format msgid "Linking MCC %s(%s) to Service (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:410 #, c-format msgid "Linking MCC %s(%s) to Plexer (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:415 #, c-format msgid "MCC %s(%s) - next %s(%s) has no target" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:434 #, c-format msgid "Linking Plexer %s to MCC (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:445 #, c-format msgid "Linking Plexer %s to Service (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:454 #, c-format msgid "Linking Plexer %s to Plexer (%s) under %s" msgstr "" #: src/hed/libs/message/MCCLoader.cpp:460 #, c-format msgid "Plexer (%s) - next %s(%s) has no target" msgstr "" #: src/hed/libs/message/Plexer.cpp:31 #, c-format msgid "Bad label: \"%s\"" msgstr "" #: src/hed/libs/message/Plexer.cpp:47 #, c-format msgid "Operation on path \"%s\"" msgstr "" #: src/hed/libs/message/Plexer.cpp:60 #, c-format msgid "No next MCC or Service at path \"%s\"" msgstr "" #: src/hed/libs/message/Service.cpp:35 #, c-format msgid "Security processing/check for '%s' failed: %s" msgstr "" #: src/hed/libs/message/Service.cpp:41 #, c-format msgid "Security processing/check for '%s' passed" msgstr "" #: src/hed/libs/otokens/jwse.cpp:55 #, c-format msgid "JWSE::Input: token: %s" msgstr "" #: src/hed/libs/otokens/jwse.cpp:75 #, c-format msgid "JWSE::Input: header: %s" msgstr "" #: src/hed/libs/otokens/jwse.cpp:101 #, c-format msgid "JWSE::Input: JWS content: %s" msgstr "" #: src/hed/libs/otokens/jwse.cpp:111 msgid "JWSE::Input: JWS: token too young" msgstr "" #: src/hed/libs/otokens/jwse.cpp:120 msgid "JWSE::Input: JWS: token too old" msgstr "" #: src/hed/libs/otokens/jwse.cpp:131 #, c-format msgid "JWSE::Input: JWS: signature algorithm: %s" msgstr "" #: src/hed/libs/otokens/jwse.cpp:174 #, c-format msgid "JWSE::Input: JWS: signature algorithn not supported: %s" msgstr "" #: src/hed/libs/otokens/jwse.cpp:192 msgid "JWSE::Input: JWS: signature verification failed" msgstr "" #: src/hed/libs/otokens/jwse.cpp:198 msgid "JWSE::Input: JWE: not supported yet" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:21 msgid "JWSE::VerifyECDSA: missing key" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:25 msgid "JWSE::VerifyECDSA: wrong signature size" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:34 msgid "JWSE::VerifyECDSA: failed to create ECDSA signature" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:41 msgid "JWSE::VerifyECDSA: failed to parse signature" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:47 #, c-format msgid "JWSE::VerifyECDSA: failed to assign ECDSA signature: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:56 msgid "JWSE::VerifyECDSA: failed to create EVP context" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:61 #, c-format msgid "JWSE::VerifyECDSA: failed to recognize digest: %s" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:66 #, c-format msgid "JWSE::VerifyECDSA: failed to initialize hash: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:72 #, c-format msgid "JWSE::VerifyECDSA: failed to add message to hash: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:80 #, c-format msgid "JWSE::VerifyECDSA: failed to finalize hash: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:87 #, c-format msgid "JWSE::VerifyECDSA: failed to verify: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:96 msgid "JWSE::SignECDSA: missing key" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:104 msgid "JWSE::SignECDSA: failed to create EVP context" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:109 #, c-format msgid "JWSE::SignECDSA: failed to recognize digest: %s" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:114 #, c-format msgid "JWSE::SignECDSA: failed to initialize hash: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:120 #, c-format msgid "JWSE::SignECDSA: failed to add message to hash: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:128 #, c-format msgid "JWSE::SignECDSA: failed to finalize hash: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:135 msgid "JWSE::SignECDSA: failed to create ECDSA signature" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:143 msgid "JWSE::SignECDSA: failed to parse signature" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:150 #, c-format msgid "JWSE::SignECDSA: wrong signature size: %i + %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:156 msgid "JWSE::SignECDSA: wrong signature size written" msgstr "" #: src/hed/libs/otokens/jwse_keys.cpp:273 msgid "JWSE::ExtractPublicKey: x5c key" msgstr "" #: src/hed/libs/otokens/jwse_keys.cpp:281 msgid "JWSE::ExtractPublicKey: jwk key" msgstr "" #: src/hed/libs/otokens/jwse_keys.cpp:288 msgid "JWSE::ExtractPublicKey: external jwk key" msgstr "" #: src/hed/libs/otokens/jwse_keys.cpp:315 #, c-format msgid "JWSE::ExtractPublicKey: deleting outdated info: %s" msgstr "" #: src/hed/libs/otokens/jwse_keys.cpp:344 #, c-format msgid "JWSE::ExtractPublicKey: fetching jws key from %s" msgstr "" #: src/hed/libs/otokens/jwse_keys.cpp:372 msgid "JWSE::ExtractPublicKey: no supported key" msgstr "" #: src/hed/libs/otokens/jwse_keys.cpp:375 msgid "JWSE::ExtractPublicKey: key parsing error" msgstr "" #: src/hed/libs/otokens/openid_metadata.cpp:40 #: src/hed/libs/otokens/openid_metadata.cpp:45 #, c-format msgid "Input: metadata: %s" msgstr "" #: src/hed/libs/otokens/openid_metadata.cpp:438 #, c-format msgid "Fetch: response code: %u %s" msgstr "" #: src/hed/libs/otokens/openid_metadata.cpp:440 #, c-format msgid "Fetch: response body: %s" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:141 #, c-format msgid "Can not load ARC evaluator object: %s" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:192 #, c-format msgid "Can not load ARC request object: %s" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:233 #, c-format msgid "Can not load policy object: %s" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:281 msgid "Can not load policy object" msgstr "" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:329 msgid "Can not load request object" msgstr "" #: src/hed/libs/security/ArcPDP/PolicyParser.cpp:119 msgid "Can not generate policy object" msgstr "" #: src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp:37 #, c-format msgid "Id= %s,Type= %s,Issuer= %s,Value= %s" msgstr "" #: src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp:40 #, c-format msgid "No Attribute exists, which can deal with type: %s" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:189 #, c-format msgid "HTTP Error: %d %s" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:270 msgid "Cannot create http payload" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:353 msgid "No next element in the chain" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:362 #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:253 msgid "next element of the chain returned error status" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:371 msgid "next element of the chain returned no payload" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:383 msgid "next element of the chain returned invalid/unsupported payload" msgstr "" #: src/hed/mcc/http/MCCHTTP.cpp:465 msgid "Error to flush output payload" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:306 #, c-format msgid "<< %s" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:355 src/hed/mcc/http/PayloadHTTP.cpp:457 #, c-format msgid "< %s" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:576 msgid "Failed to parse HTTP header" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:837 msgid "Invalid HTTP object can't produce result" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:969 #, c-format msgid "> %s" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:994 msgid "Failed to write header to output stream" msgstr "" #: src/hed/mcc/http/PayloadHTTP.cpp:1019 src/hed/mcc/http/PayloadHTTP.cpp:1025 #: src/hed/mcc/http/PayloadHTTP.cpp:1031 src/hed/mcc/http/PayloadHTTP.cpp:1041 #: src/hed/mcc/http/PayloadHTTP.cpp:1053 src/hed/mcc/http/PayloadHTTP.cpp:1058 #: src/hed/mcc/http/PayloadHTTP.cpp:1063 src/hed/mcc/http/PayloadHTTP.cpp:1071 #: src/hed/mcc/http/PayloadHTTP.cpp:1078 msgid "Failed to write body to output stream" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:31 msgid "Skipping service: no ServicePath found!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:37 msgid "Skipping service: no SchemaPath found!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:89 msgid "Parser Context creation failed!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:98 msgid "Cannot parse schema!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:110 msgid "Empty payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:119 msgid "Could not convert payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:125 #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:212 msgid "Could not create PayloadSOAP!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:196 msgid "Empty input payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:205 msgid "Could not convert incoming payload!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:232 msgid "Missing schema! Skipping validation..." msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:237 msgid "Could not validate message!" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:245 #: src/hed/mcc/soap/MCCSOAP.cpp:238 src/hed/mcc/soap/MCCSOAP.cpp:252 #: src/hed/mcc/soap/MCCSOAP.cpp:282 msgid "empty next chain element" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:257 #: src/hed/mcc/soap/MCCSOAP.cpp:298 msgid "next element of the chain returned empty payload" msgstr "" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:265 msgid "next element of the chain returned invalid payload" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:223 msgid "empty input payload" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:233 #, c-format msgid "MIME is not suitable for SOAP: %s" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:247 msgid "incoming message is not SOAP" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:274 #, c-format msgid "Security check failed in SOAP MCC for incoming message: %s" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:290 #, c-format msgid "next element of the chain returned error status: %s" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:309 msgid "next element of the chain returned unknown payload - passing through" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:314 src/hed/mcc/soap/MCCSOAP.cpp:330 #, c-format msgid "Security check failed in SOAP MCC for outgoing message: %s" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:384 msgid "Security check failed in SOAP MCC for outgoing message" msgstr "" #: src/hed/mcc/soap/MCCSOAP.cpp:437 msgid "Security check failed in SOAP MCC for incoming message" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:82 msgid "Missing Port in Listen element" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:91 msgid "Version in Listen element can't be recognized" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:100 #, c-format msgid "Failed to obtain local address for port %s - %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:102 #, c-format msgid "Failed to obtain local address for %s:%s - %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:109 #, c-format msgid "Trying to listen on TCP port %s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:111 #, c-format msgid "Trying to listen on %s:%s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:117 #, c-format msgid "Failed to create socket for listening at TCP port %s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:119 #, c-format msgid "Failed to create socket for listening at %s:%s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:134 #, c-format msgid "" "Failed to limit socket to IPv6 at TCP port %s - may cause errors for IPv4 at " "same port" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:136 #, c-format msgid "" "Failed to limit socket to IPv6 at %s:%s - may cause errors for IPv4 at same " "port" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:144 #, c-format msgid "Failed to bind socket for TCP port %s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:146 #, c-format msgid "Failed to bind socket for %s:%s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:161 #, c-format msgid "Failed to listen at TCP port %s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:163 #, c-format msgid "Failed to listen at %s:%s(%s): %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:180 #, c-format msgid "Listening on TCP port %s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:182 #, c-format msgid "Listening on %s:%s(%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:189 #, c-format msgid "Failed to start listening on any address for %s:%s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:191 #, c-format msgid "Failed to start listening on any address for %s:%s(IPv%s)" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:197 msgid "No listening ports initiated" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:208 msgid "dropped" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:208 msgid "put on hold" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:208 #, c-format msgid "Setting connections limit to %i, connections over limit will be %s" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:212 msgid "Failed to start thread for listening" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:245 msgid "Failed to start thread for communication" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:271 msgid "Failed while waiting for connection request" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:293 msgid "Failed to accept connection request" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:302 msgid "Too many connections - dropping new one" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:309 msgid "Too many connections - waiting for old to close" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:548 msgid "next chain element called" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:563 msgid "Only Raw Buffer payload is supported for output" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:571 src/hed/mcc/tcp/MCCTCP.cpp:670 #: src/hed/mcc/tls/MCCTLS.cpp:561 msgid "Failed to send content of buffer" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:583 msgid "TCP executor is removed" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:585 #, c-format msgid "Sockets do not match on exit %i != %i" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:606 msgid "No Connect element specified" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:612 msgid "Missing Port in Connect element" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:618 msgid "Missing Host in Connect element" msgstr "" #: src/hed/mcc/tcp/MCCTCP.cpp:646 msgid "TCP client process called" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:65 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:81 #, c-format msgid "Failed to resolve %s (%s)" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:91 #, c-format msgid "Trying to connect %s(%s):%d" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:95 #, c-format msgid "Failed to create socket for connecting to %s(%s):%d - %s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:107 #, c-format msgid "" "Failed to get TCP socket options for connection to %s(%s):%d - timeout won't " "work - %s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:114 #, c-format msgid "Failed to connect to %s(%s):%i - %s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:124 #, c-format msgid "Timeout connecting to %s(%s):%i - %i s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:132 #, c-format msgid "Failed while waiting for connection to %s(%s):%i - %s" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:142 #, c-format msgid "Failed to connect to %s(%s):%i" msgstr "" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:198 msgid "" "Received message out-of-band (not critical, ERROR level is just for " "debugging purposes)" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:201 msgid "Using CA default location" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:210 #, c-format msgid "Using CA file: %s" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:212 #, c-format msgid "Using CA dir: %s" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:289 #, c-format msgid "Using DH parameters from file: %s" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:292 msgid "Failed to open file with DH parameters for reading" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:297 msgid "Failed to read file with DH parameters" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:300 msgid "Failed to apply DH parameters" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:302 msgid "DH parameters applied" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:316 #, c-format msgid "Using curve with NID: %u" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:319 msgid "Failed to generate EC key" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:322 msgid "Failed to apply ECDH parameters" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:324 msgid "ECDH parameters applied" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:330 #, c-format msgid "Using cipher list: %s" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:354 #, c-format msgid "Using protocol options: 0x%x" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:39 msgid "Independent proxy - no rights granted" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:43 msgid "Proxy with all rights inherited" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:51 msgid "Proxy with empty policy - fail on unrecognized policy" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:56 #, c-format msgid "Proxy with specific policy: %s" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:60 msgid "Proxy with ARC Policy" msgstr "" #: src/hed/mcc/tls/DelegationCollector.cpp:62 msgid "Proxy with unknown policy - fail on unrecognized policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:116 #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:167 #, c-format msgid "Was expecting %s at the beginning of \"%s\"" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:121 #, c-format msgid "We only support CAs in Globus signing policy - %s is not supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:126 #, c-format msgid "We only support X509 CAs in Globus signing policy - %s is not supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:131 msgid "Missing CA subject in Globus signing policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:141 msgid "Negative rights are not supported in Globus signing policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:145 #, c-format msgid "Unknown rights in Globus signing policy - %s" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:150 #, c-format msgid "" "Only globus rights are supported in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:155 #, c-format msgid "" "Only signing rights are supported in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:172 #, c-format msgid "" "We only support subjects conditions in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:177 #, c-format msgid "" "We only support globus conditions in Globus signing policy - %s is not " "supported" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:183 msgid "Missing condition subjects in Globus signing policy" msgstr "" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:265 msgid "Unknown element in Globus signing policy" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:237 msgid "Critical VOMS attribute processing failed" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:245 msgid "VOMS attribute validation failed" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:247 msgid "VOMS attribute is ignored due to processing/validation error" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:439 src/hed/mcc/tls/MCCTLS.cpp:578 #: src/hed/mcc/tls/MCCTLS.cpp:597 #, c-format msgid "Failed to establish connection: %s" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:458 src/hed/mcc/tls/MCCTLS.cpp:540 #, c-format msgid "Peer name: %s" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:460 src/hed/mcc/tls/MCCTLS.cpp:542 #, c-format msgid "Identity name: %s" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:462 src/hed/mcc/tls/MCCTLS.cpp:544 #, c-format msgid "CA name: %s" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:469 msgid "Failed to process security attributes in TLS MCC for incoming message" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:477 msgid "Security check failed in TLS MCC for incoming message" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:550 msgid "Security check failed for outgoing TLS message" msgstr "" #: src/hed/mcc/tls/MCCTLS.cpp:582 msgid "Security check failed for incoming TLS message" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:64 #, c-format msgid "Ignoring verification error due to insecure connection allowed: %s" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:79 msgid "" "Failed to allocate memory for certificate subject while matching policy." msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:83 msgid "" "Failed to retrieve link to TLS stream. Additional policy matching is skipped." msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:85 msgid "" "Skipping additional policy matching due to insecure connections allowed." msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:109 #, c-format msgid "Certificate %s already expired" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:117 #, c-format msgid "Certificate %s will expire in %s" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:138 msgid "Failed to store application data" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:166 msgid "Failed to retrieve application data from OpenSSL" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:238 src/hed/mcc/tls/PayloadTLSMCC.cpp:338 msgid "Can not create the SSL Context object" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:251 src/hed/mcc/tls/PayloadTLSMCC.cpp:358 msgid "Can't set OpenSSL verify flags" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:270 src/hed/mcc/tls/PayloadTLSMCC.cpp:372 msgid "Can not create the SSL object" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:280 msgid "Faile to assign hostname extension" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:294 msgid "Failed to establish SSL connection" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:298 src/hed/mcc/tls/PayloadTLSMCC.cpp:388 #, c-format msgid "Using cipher: %s" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:384 msgid "Failed to accept SSL connection" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:446 #, c-format msgid "Failed to shut down SSL: %s" msgstr "" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:47 msgid "" "ArcAuthZ: failed to initiate all PDPs - this instance will be non-functional" msgstr "" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:70 msgid "PDP: missing name attribute" msgstr "" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:74 #, c-format msgid "PDP: %s (%s)" msgstr "" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:79 #, c-format msgid "PDP: %s (%s) can not be loaded" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluationCtx.cpp:251 #, c-format msgid "There are %d RequestItems" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:60 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:55 msgid "Can not parse classname for FunctionFactory from configuration" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:68 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:63 msgid "Can not parse classname for AttributeFactory from configuration" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:76 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:71 msgid "" "Can not parse classname for CombiningAlgorithmFactory from configuration" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:84 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:79 msgid "Can not parse classname for Request from configuration" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:93 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:88 msgid "Can not parse classname for Policy from configuration" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:105 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:100 msgid "Can not dynamically produce AttributeFactory" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:110 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:105 msgid "Can not dynamically produce FnFactory" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:115 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:110 msgid "Can not dynamically produce AlgFacroty" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:126 #: src/hed/shc/gaclpdp/GACLEvaluator.cpp:31 #: src/hed/shc/gaclpdp/GACLEvaluator.cpp:37 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:121 msgid "Can not create PolicyStore object" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:177 src/hed/shc/test.cpp:183 #: src/hed/shc/testinterface_arc.cpp:102 src/hed/shc/testinterface_xacml.cpp:54 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:172 msgid "Can not dynamically produce Request" msgstr "" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:261 #, c-format msgid "Result value (0=Permit, 1=Deny, 2=Indeterminate, 3=Not_Applicable): %d" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:109 msgid "Can not find ArcPDPContext" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:138 src/hed/shc/xacmlpdp/XACMLPDP.cpp:116 msgid "Evaluator does not support loadable Combining Algorithms" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:142 src/hed/shc/xacmlpdp/XACMLPDP.cpp:120 #, c-format msgid "Evaluator does not support specified Combining Algorithm - %s" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:154 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:83 #: src/hed/shc/gaclpdp/GACLPDP.cpp:117 src/hed/shc/test.cpp:94 #: src/hed/shc/testinterface_arc.cpp:37 src/hed/shc/testinterface_xacml.cpp:37 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:132 msgid "Can not dynamically produce Evaluator" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:157 msgid "Evaluator for ArcPDP was not loaded" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:164 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:56 #: src/hed/shc/gaclpdp/GACLPDP.cpp:127 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:88 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:142 src/tests/echo/echo.cpp:108 msgid "Missing security object in message" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:172 src/hed/shc/arcpdp/ArcPDP.cpp:180 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:136 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:142 #: src/hed/shc/gaclpdp/GACLPDP.cpp:135 src/hed/shc/gaclpdp/GACLPDP.cpp:143 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:96 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:104 #: src/tests/echo/echo.cpp:116 src/tests/echo/echo.cpp:123 msgid "Failed to convert security information to ARC request" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:188 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:149 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:112 #, c-format msgid "ARC Auth. request: %s" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:191 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:152 #: src/hed/shc/gaclpdp/GACLPDP.cpp:154 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:115 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:169 msgid "No requested security information was collected" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:198 msgid "Not authorized by arc.pdp - failed to get response from Evaluator" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:244 msgid "Authorized by arc.pdp" msgstr "" #: src/hed/shc/arcpdp/ArcPDP.cpp:245 msgid "" "Not authorized by arc.pdp - some of the RequestItem elements do not satisfy " "Policy" msgstr "" #: src/hed/shc/arcpdp/ArcPolicy.cpp:56 src/hed/shc/arcpdp/ArcPolicy.cpp:70 #: src/hed/shc/gaclpdp/GACLPolicy.cpp:46 src/hed/shc/gaclpdp/GACLPolicy.cpp:59 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:48 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:64 msgid "Policy is empty" msgstr "" #: src/hed/shc/arcpdp/ArcPolicy.cpp:114 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:115 #, c-format msgid "PolicyId: %s Alg inside this policy is:-- %s" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:74 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:128 msgid "No delegation policies in this context and message - passing through" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:94 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:108 msgid "Failed to convert security information to ARC policy" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:115 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:122 #, c-format msgid "ARC delegation policy: %s" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:160 msgid "No authorization response was returned" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:163 #, c-format msgid "There are %d requests, which satisfy at least one policy" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:182 msgid "Delegation authorization passed" msgstr "" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:184 msgid "Delegation authorization failed" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:63 msgid "" "Missing CertificatePath element or ProxyPath element, or " " is missing" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:68 msgid "" "Missing or empty KeyPath element, or is missing" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:74 msgid "Missing or empty CertificatePath or CACertificatesDir element" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:81 #, c-format msgid "Delegation role not supported: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:90 #, c-format msgid "Delegation type not supported: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:115 msgid "Failed to acquire delegation context" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:143 #: src/hed/shc/delegationsh/DelegationSH.cpp:254 msgid "Can't create delegation context" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:149 msgid "Delegation handler with delegatee role starts to process" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:152 #: src/services/a-rex/arex.cpp:478 src/services/candypond/CandyPond.cpp:526 #: src/services/data-staging/DataDeliveryService.cpp:648 msgid "process: POST" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:159 #: src/services/a-rex/arex.cpp:485 src/services/candypond/CandyPond.cpp:535 #: src/services/data-staging/DataDeliveryService.cpp:657 #: src/services/wrappers/python/pythonwrapper.cpp:416 msgid "input is not SOAP" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:166 #, c-format msgid "Delegation service: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:181 #: src/hed/shc/delegationsh/DelegationSH.cpp:188 #: src/tests/client/test_ClientX509Delegation_ARC.cpp:55 #, c-format msgid "Can not get the delegation credential: %s from delegation service: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:204 #: src/hed/shc/delegationsh/DelegationSH.cpp:268 #, c-format msgid "Delegated credential identity: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:205 #, c-format msgid "" "The delegated credential got from delegation service is stored into path: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:218 msgid "The endpoint of delegation service should be configured" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:228 #: src/hed/shc/delegationsh/DelegationSH.cpp:340 msgid "Delegation handler with delegatee role ends" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:260 msgid "Delegation handler with delegator role starts to process" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:269 #, c-format msgid "The delegated credential got from path: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:290 #, c-format msgid "Can not create delegation crendential to delegation service: %s" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:328 msgid "output is not SOAP" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:339 #, c-format msgid "" "Succeeded to send DelegationService: %s and DelegationID: %s info to peer " "service" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:345 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:230 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:101 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:94 msgid "Incoming Message is not SOAP" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:352 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:353 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:123 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:108 msgid "Outgoing Message is not SOAP" msgstr "" #: src/hed/shc/delegationsh/DelegationSH.cpp:356 msgid "Delegation handler is not configured" msgstr "" #: src/hed/shc/gaclpdp/GACLPDP.cpp:120 msgid "Evaluator for GACLPDP was not loaded" msgstr "" #: src/hed/shc/gaclpdp/GACLPDP.cpp:151 #, c-format msgid "GACL Auth. request: %s" msgstr "" #: src/hed/shc/gaclpdp/GACLPolicy.cpp:50 src/hed/shc/gaclpdp/GACLPolicy.cpp:63 msgid "Policy is not gacl" msgstr "" #: src/hed/shc/legacy/ConfigParser.cpp:13 msgid "Configuration file not specified" msgstr "" #: src/hed/shc/legacy/ConfigParser.cpp:18 #: src/hed/shc/legacy/ConfigParser.cpp:28 #: src/hed/shc/legacy/ConfigParser.cpp:33 msgid "Configuration file can not be read" msgstr "" #: src/hed/shc/legacy/ConfigParser.cpp:43 #, c-format msgid "Configuration file is broken - block name is too short: %s" msgstr "" #: src/hed/shc/legacy/ConfigParser.cpp:47 #, c-format msgid "Configuration file is broken - block name does not end with ]: %s" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:39 src/hed/shc/legacy/LegacyPDP.cpp:119 msgid "Configuration file not specified in ConfigBlock" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:48 src/hed/shc/legacy/LegacyPDP.cpp:128 msgid "BlockName is empty" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:108 #, c-format msgid "Failed processing user mapping command: %s %s" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:114 #, c-format msgid "Failed to change mapping stack processing policy in: %s = %s" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:179 msgid "LegacyMap: no configurations blocks defined" msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:201 src/hed/shc/legacy/LegacyPDP.cpp:255 #, c-format msgid "" "LegacyPDP: there is no %s Sec Attribute defined. Probably ARC Legacy Sec " "Handler is not configured or failed." msgstr "" #: src/hed/shc/legacy/LegacyMap.cpp:206 src/hed/shc/legacy/LegacyPDP.cpp:260 msgid "LegacyPDP: ARC Legacy Sec Attribute not recognized." msgstr "" #: src/hed/shc/legacy/LegacyPDP.cpp:138 #, c-format msgid "Failed to parse configuration file %s" msgstr "" #: src/hed/shc/legacy/LegacyPDP.cpp:144 #, c-format msgid "Block %s not found in configuration file %s" msgstr "" #: src/hed/shc/legacy/LegacySecHandler.cpp:40 #: src/hed/shc/legacy/LegacySecHandler.cpp:118 msgid "LegacySecHandler: configuration file not specified" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:149 src/hed/shc/legacy/arc_lcmaps.cpp:163 #, c-format msgid "" "Failed to convert GSI credential to GSS credential (major: %d, minor: %d)" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:174 src/hed/shc/legacy/arc_lcmaps.cpp:188 msgid "Missing subject name" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:179 src/hed/shc/legacy/arc_lcmaps.cpp:193 msgid "Missing path of credentials file" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:185 msgid "Missing name of LCAS library" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:202 #, c-format msgid "Can't load LCAS library %s: %s" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:212 #, c-format msgid "Can't find LCAS functions in a library %s" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:222 msgid "Failed to initialize LCAS" msgstr "" #: src/hed/shc/legacy/arc_lcas.cpp:237 msgid "Failed to terminate LCAS" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:199 msgid "Missing name of LCMAPS library" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:213 msgid "Can't read policy names" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:224 #, c-format msgid "Can't load LCMAPS library %s: %s" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:236 #, c-format msgid "Can't find LCMAPS functions in a library %s" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:248 msgid "LCMAPS has lcmaps_run" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:249 msgid "LCMAPS has getCredentialData" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:253 msgid "Failed to initialize LCMAPS" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:293 #, c-format msgid "LCMAPS returned invalid GID: %u" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:296 msgid "LCMAPS did not return any GID" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:299 #, c-format msgid "LCMAPS returned UID which has no username: %u" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:302 #, c-format msgid "LCMAPS returned invalid UID: %u" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:305 msgid "LCMAPS did not return any UID" msgstr "" #: src/hed/shc/legacy/arc_lcmaps.cpp:314 msgid "Failed to terminate LCMAPS" msgstr "" #: src/hed/shc/legacy/auth.cpp:35 #, c-format msgid "Unexpected argument for 'all' rule - %s" msgstr "" #: src/hed/shc/legacy/auth.cpp:340 #, c-format msgid "Credentials stored in temporary file %s" msgstr "" #: src/hed/shc/legacy/auth.cpp:349 #, c-format msgid "Assigned to authorization group %s" msgstr "" #: src/hed/shc/legacy/auth.cpp:354 #, c-format msgid "Assigned to userlist %s" msgstr "" #: src/hed/shc/legacy/auth_file.cpp:22 #, c-format msgid "Failed to read file %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:33 msgid "Missing subject in configuration" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:38 msgid "Missing issuer in configuration" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:43 msgid "Missing audience in configuration" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:48 msgid "Missing scope in configuration" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:53 src/hed/shc/legacy/auth_voms.cpp:47 msgid "Missing group in configuration" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:56 #, c-format msgid "Rule: subject: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:57 #, c-format msgid "Rule: issuer: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:58 #, c-format msgid "Rule: audience: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:59 #, c-format msgid "Rule: scope: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:60 src/hed/shc/legacy/auth_voms.cpp:66 #, c-format msgid "Rule: group: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:63 #, c-format msgid "Match issuer: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:69 #, c-format msgid "Matched: %s %s %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:83 src/hed/shc/legacy/auth_voms.cpp:93 msgid "Matched nothing" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:176 #, c-format msgid "Evaluate operator =: left: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:177 #, c-format msgid "Evaluate operator =: right: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:182 #, c-format msgid "Evaluate operator =: left from context: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:239 #, c-format msgid "Operator token: %c" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:268 #, c-format msgid "String token: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:296 #, c-format msgid "Quoted string token: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:304 #, c-format msgid "Sequence token parsing: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:420 #, c-format msgid "Matching tokens expression: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:424 msgid "Failed to parse expression" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:435 #, c-format msgid "%s: " msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:441 #, c-format msgid " %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:446 msgid "Expression matched" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:451 #, c-format msgid "Failed to evaluate expression: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:454 msgid "Expression failed to matched" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:79 src/hed/shc/legacy/unixmap.cpp:216 #, c-format msgid "Plugin %s returned: %u" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:83 src/hed/shc/legacy/unixmap.cpp:220 #, c-format msgid "Plugin %s timeout after %u seconds" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:86 src/hed/shc/legacy/unixmap.cpp:223 #, c-format msgid "Plugin %s failed to start" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:88 src/hed/shc/legacy/unixmap.cpp:225 #, c-format msgid "Plugin %s printed: %s" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:89 src/hed/shc/legacy/unixmap.cpp:213 #: src/hed/shc/legacy/unixmap.cpp:226 #, c-format msgid "Plugin %s error: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:42 msgid "Missing VO in configuration" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:52 msgid "Missing role in configuration" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:57 msgid "Missing capabilities in configuration" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:62 msgid "Too many arguments in configuration" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:65 #, c-format msgid "Rule: vo: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:67 #, c-format msgid "Rule: role: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:68 #, c-format msgid "Rule: capabilities: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:71 #, c-format msgid "Match vo: %s" msgstr "" #: src/hed/shc/legacy/auth_voms.cpp:78 #, c-format msgid "Matched: %s %s %s %s" msgstr "" #: src/hed/shc/legacy/simplemap.cpp:70 #, c-format msgid "SimpleMap: acquired new unmap time of %u seconds" msgstr "" #: src/hed/shc/legacy/simplemap.cpp:72 msgid "SimpleMap: wrong number in unmaptime command" msgstr "" #: src/hed/shc/legacy/simplemap.cpp:85 src/hed/shc/legacy/simplemap.cpp:90 #, c-format msgid "SimpleMap: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:65 src/hed/shc/legacy/unixmap.cpp:70 msgid "Mapping policy option has empty value" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:80 #, c-format msgid "Unsupported mapping policy action: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:91 #, c-format msgid "Unsupported mapping policy option: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:103 src/hed/shc/legacy/unixmap.cpp:108 msgid "User name mapping command is empty" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:116 #, c-format msgid "User name mapping has empty authgroup: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:147 #, c-format msgid "Unknown user name mapping rule %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:156 src/hed/shc/legacy/unixmap.cpp:161 #: src/hed/shc/legacy/unixmap.cpp:177 src/hed/shc/legacy/unixmap.cpp:183 msgid "Plugin (user mapping) command is empty" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:167 #, c-format msgid "Plugin (user mapping) timeout is not a number: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:171 #, c-format msgid "Plugin (user mapping) timeout is wrong number: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:204 #, c-format msgid "Plugin %s returned no username" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:209 #, c-format msgid "Plugin %s returned too much: %s" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:212 #, c-format msgid "Plugin %s returned no mapping" msgstr "" #: src/hed/shc/legacy/unixmap.cpp:235 msgid "User subject match is missing user subject." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:239 #, c-format msgid "Mapfile at %s can't be opened." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:263 msgid "User pool mapping is missing user subject." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:268 #, c-format msgid "User pool at %s can't be opened." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:273 #, c-format msgid "User pool at %s failed to perform user mapping." msgstr "" #: src/hed/shc/legacy/unixmap.cpp:291 #, c-format msgid "User name direct mapping is missing user name: %s." msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:65 msgid "OTokens: Attr: message" msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:70 #, c-format msgid "OTokens: Attr: %s = %s" msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:75 #, c-format msgid "OTokens: Attr: token: %s" msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:78 #, c-format msgid "OTokens: Attr: token: bearer: %s" msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:193 msgid "OTokens: Handle" msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:195 msgid "OTokens: Handle: message" msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:198 msgid "Failed to create OTokens security attributes" msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:202 msgid "OTokens: Handle: token was not present" msgstr "" #: src/hed/shc/otokens/OTokensSH.cpp:206 #, c-format msgid "OTokens: Handle: attributes created: subject = %s" msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:47 msgid "Creating a pdpservice client" msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:81 msgid "Arc policy can not been carried by SAML2.0 profile of XACML" msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:153 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:186 msgid "Policy Decision Service invocation failed" msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:156 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:189 #: src/tests/client/test_ClientInterface.cpp:88 #: src/tests/client/test_ClientSAML2SSO.cpp:81 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:100 #: src/tests/echo/test_clientinterface.cpp:82 #: src/tests/echo/test_clientinterface.cpp:149 #: src/tests/echo/test_clientinterface.py:32 msgid "There was no SOAP response" msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:171 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:205 msgid "Authorized from remote pdp service" msgstr "" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:172 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:206 msgid "Unauthorized from remote pdp service" msgstr "" #: src/hed/shc/saml2sso_assertionconsumersh/SAML2SSO_AssertionConsumerSH.cpp:69 msgid "Can not get SAMLAssertion SecAttr from message context" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:158 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:44 msgid "Missing or empty CertificatePath element" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:163 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:49 msgid "Missing or empty KeyPath element" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:171 msgid "" "Both of CACertificatePath and CACertificatesDir elements missing or empty" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:185 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:61 msgid "" "Missing or empty CertificatePath or CACertificatesDir element; will only " "check the signature, will not do message authentication" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:189 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:65 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:65 #, c-format msgid "Processing type not supported: %s" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:209 msgid "Failed to parse SAML Token from incoming SOAP" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:219 msgid "Failed to authenticate SAML Token inside the incoming SOAP" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:222 msgid "Succeeded to authenticate SAMLToken" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:287 #, c-format msgid "No response from AA service %s" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:291 #, c-format msgid "SOAP Request to AA service %s failed" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:299 msgid "Cannot find content under response soap message" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:303 msgid "Cannot find under response soap message:" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:320 msgid "The Response is not going to this end" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:327 msgid "The StatusCode is Success" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:333 msgid "Succeeded to verify the signature under " msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:336 msgid "Failed to verify the signature under " msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:347 msgid "Failed to generate SAML Token for outgoing SOAP" msgstr "" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:357 msgid "SAML Token handler is not configured" msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:28 #, c-format msgid "Access list location: %s" msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:38 msgid "" "No policy file or DNs specified for simplelist.pdp, please set location " "attribute or at least one DN element for simplelist PDP node in " "configuration." msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:41 #, c-format msgid "Subject to match: %s" msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:44 #, c-format msgid "Policy subject: %s" msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:46 #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:72 #, c-format msgid "Authorized from simplelist.pdp: %s" msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:53 msgid "" "The policy file setup for simplelist.pdp does not exist, please check " "location attribute for simplelist PDP node in service configuration" msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:60 #, c-format msgid "Policy line: %s" msgstr "" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:78 #, c-format msgid "Not authorized from simplelist.pdp: %s" msgstr "" #: src/hed/shc/test.cpp:27 src/hed/shc/testinterface_arc.cpp:26 #: src/hed/shc/testinterface_xacml.cpp:26 msgid "Start test" msgstr "" #: src/hed/shc/test.cpp:101 msgid "Input request from a file: Request.xml" msgstr "" #: src/hed/shc/test.cpp:107 src/hed/shc/test.cpp:197 #: src/hed/shc/testinterface_arc.cpp:124 #, c-format msgid "There is %d subjects, which satisfy at least one policy" msgstr "" #: src/hed/shc/test.cpp:121 #, c-format msgid "Attribute Value (1): %s" msgstr "" #: src/hed/shc/test.cpp:132 msgid "Input request from code" msgstr "" #: src/hed/shc/test.cpp:211 #, c-format msgid "Attribute Value (2): %s" msgstr "" #: src/hed/shc/testinterface_arc.cpp:75 src/hed/shc/testinterface_xacml.cpp:46 msgid "Can not dynamically produce Policy" msgstr "" #: src/hed/shc/testinterface_arc.cpp:138 #, c-format msgid "Attribute Value inside Subject: %s" msgstr "" #: src/hed/shc/testinterface_arc.cpp:148 msgid "The request has passed the policy evaluation" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:43 msgid "Missing or empty PasswordSource element" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:54 #, c-format msgid "Password encoding type not supported: %s" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:59 msgid "Missing or empty Username element" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:79 msgid "The payload of incoming message is empty" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:84 msgid "Failed to cast PayloadSOAP from incoming payload" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:89 msgid "Failed to parse Username Token from incoming SOAP" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:95 msgid "Failed to authenticate Username Token inside the incoming SOAP" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:98 msgid "Succeeded to authenticate UsernameToken" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:108 msgid "The payload of outgoing message is empty" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:113 msgid "Failed to cast PayloadSOAP from outgoing payload" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:119 msgid "Failed to generate Username Token for outgoing SOAP" msgstr "" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:127 msgid "Username Token handler is not configured" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:81 msgid "Failed to parse X509 Token from incoming SOAP" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:85 msgid "Failed to verify X509 Token inside the incoming SOAP" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:89 msgid "Failed to authenticate X509 Token inside the incoming SOAP" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:92 msgid "Succeeded to authenticate X509Token" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:102 msgid "Failed to generate X509 Token for outgoing SOAP" msgstr "" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:112 msgid "X509 Token handler is not configured" msgstr "" #: src/hed/shc/xacmlpdp/XACMLApply.cpp:29 msgid "Can not create function: FunctionId does not exist" msgstr "" #: src/hed/shc/xacmlpdp/XACMLApply.cpp:33 #: src/hed/shc/xacmlpdp/XACMLTarget.cpp:40 #, c-format msgid "Can not create function %s" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:87 msgid "Can not find XACMLPDPContext" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:135 msgid "Evaluator for XACMLPDP was not loaded" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:150 src/hed/shc/xacmlpdp/XACMLPDP.cpp:158 msgid "Failed to convert security information to XACML request" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:166 #, c-format msgid "XACML request: %s" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:178 msgid "Authorized from xacml.pdp" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:179 msgid "UnAuthorized from xacml.pdp" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:55 msgid "Can not find element with proper namespace" msgstr "" #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:132 msgid "No target available inside the policy" msgstr "" #: src/hed/shc/xacmlpdp/XACMLRequest.cpp:34 msgid "Request is empty" msgstr "" #: src/hed/shc/xacmlpdp/XACMLRequest.cpp:39 msgid "Can not find element with proper namespace" msgstr "" #: src/hed/shc/xacmlpdp/XACMLRule.cpp:35 msgid "Invalid Effect" msgstr "" #: src/hed/shc/xacmlpdp/XACMLRule.cpp:48 msgid "No target available inside the rule" msgstr "" #: src/libs/data-staging/DTR.cpp:81 src/libs/data-staging/DTR.cpp:85 #, c-format msgid "Could not handle endpoint %s" msgstr "" #: src/libs/data-staging/DTR.cpp:95 msgid "Source is the same as destination" msgstr "" #: src/libs/data-staging/DTR.cpp:175 #, c-format msgid "Invalid ID: %s" msgstr "" #: src/libs/data-staging/DTR.cpp:212 #, c-format msgid "%s->%s" msgstr "" #: src/libs/data-staging/DTR.cpp:320 #, c-format msgid "No callback for %s defined" msgstr "" #: src/libs/data-staging/DTR.cpp:335 #, c-format msgid "NULL callback for %s" msgstr "" #: src/libs/data-staging/DTR.cpp:338 #, c-format msgid "Request to push to unknown owner - %u" msgstr "" #: src/libs/data-staging/DTRList.cpp:216 #, c-format msgid "Boosting priority from %i to %i due to incoming higher priority DTR" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:48 #: src/libs/data-staging/DataDelivery.cpp:72 msgid "Received invalid DTR" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:54 #, c-format msgid "Delivery received new DTR %s with source: %s, destination: %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:68 msgid "Received no DTR" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:80 #, c-format msgid "Cancelling DTR %s with source: %s, destination: %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:91 #, c-format msgid "DTR %s requested cancel but no active transfer" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:147 #, c-format msgid "Cleaning up after failure: deleting %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:188 #: src/libs/data-staging/DataDelivery.cpp:263 #: src/libs/data-staging/DataDelivery.cpp:303 #: src/libs/data-staging/DataDelivery.cpp:323 msgid "Failed to delete delivery object or deletion timed out" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:254 #, c-format msgid "Transfer finished: %llu bytes transferred %s" msgstr "" #: src/libs/data-staging/DataDelivery.cpp:329 msgid "Data delivery loop exited" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:45 msgid "No source defined" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:49 msgid "No destination defined" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:157 #, c-format msgid "Bad checksum format %s" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:196 #, c-format msgid "Failed to run command: %s" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:235 #, c-format msgid "DataDelivery: %s" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:247 #, c-format msgid "DataStagingDelivery exited with code %i" msgstr "" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:266 #, c-format msgid "Transfer killed after %i seconds without communication" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:72 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:326 #, c-format msgid "Connecting to Delivery service at %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:101 #, c-format msgid "Failed to set up credential delegation with %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:107 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:185 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:251 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:337 #, c-format msgid "" "Request:\n" "%s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:113 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:343 #, c-format msgid "Could not connect to service %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:121 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:351 #, c-format msgid "No SOAP response from Delivery service %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:126 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:204 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:278 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:357 #, c-format msgid "" "Response:\n" "%s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:135 #, c-format msgid "Failed to start transfer request: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:142 #, c-format msgid "Bad format in XML response from service at %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:150 #, c-format msgid "Could not make new transfer request: %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:155 #, c-format msgid "Started remote Delivery at %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:192 #, c-format msgid "Failed to send cancel request: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:199 msgid "Failed to cancel: No SOAP response" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:213 #, c-format msgid "Failed to cancel transfer request: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:220 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:301 #, c-format msgid "Bad format in XML response: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:227 #, c-format msgid "Failed to cancel: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:271 msgid "No SOAP response from delivery service" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:292 #, c-format msgid "Failed to query state: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:366 #, c-format msgid "SOAP fault from delivery service at %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:374 #, c-format msgid "Bad format in XML response from delivery service at %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:382 #, c-format msgid "Error pinging delivery service at %s: %s: %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:390 #, c-format msgid "Dir %s allowed at service %s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:484 #, c-format msgid "" "DataDelivery log tail:\n" "%s" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:498 msgid "Failed locating credentials" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:503 msgid "Failed to initiate client connection" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:509 msgid "Client connection has no entry point" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:518 msgid "Initiating delegation procedure" msgstr "" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:520 msgid "Failed to initiate delegation credentials" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:97 #, c-format msgid "%5u s: %10.1f kB %8.1f kB/s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:156 msgid "Unexpected arguments" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:159 msgid "Source URL missing" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:162 msgid "Destination URL missing" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:166 #, c-format msgid "Source URL not valid: %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:170 #, c-format msgid "Destination URL not valid: %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:235 #, c-format msgid "Unknown transfer option: %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:272 #, c-format msgid "Source URL not supported: %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:277 #: src/libs/data-staging/DataStagingDelivery.cpp:299 msgid "No credentials supplied" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:294 #, c-format msgid "Destination URL not supported: %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:348 #, c-format msgid "Will calculate %s checksum" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:359 msgid "Cannot use supplied --size option" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:572 #, c-format msgid "Checksum mismatch between calculated checksum %s and source checksum %s" msgstr "" #: src/libs/data-staging/DataStagingDelivery.cpp:582 #, c-format msgid "Failed cleaning up destination %s" msgstr "" #: src/libs/data-staging/Processor.cpp:49 #: src/services/candypond/CandyPond.cpp:117 msgid "Error creating cache" msgstr "" #: src/libs/data-staging/Processor.cpp:73 #, c-format msgid "Forcing re-download of file %s" msgstr "" #: src/libs/data-staging/Processor.cpp:90 #, c-format msgid "Will wait around %is" msgstr "" #: src/libs/data-staging/Processor.cpp:109 #, c-format msgid "Force-checking source of cache file %s" msgstr "" #: src/libs/data-staging/Processor.cpp:112 #, c-format msgid "Source check requested but failed: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:132 msgid "Permission checking failed, will try downloading without using cache" msgstr "" #: src/libs/data-staging/Processor.cpp:162 #, c-format msgid "Will download to cache file %s" msgstr "" #: src/libs/data-staging/Processor.cpp:183 msgid "Looking up source replicas" msgstr "" #: src/libs/data-staging/Processor.cpp:205 #: src/libs/data-staging/Processor.cpp:432 msgid "Resolving destination replicas" msgstr "" #: src/libs/data-staging/Processor.cpp:222 msgid "No locations for destination different from source found" msgstr "" #: src/libs/data-staging/Processor.cpp:233 msgid "Pre-registering destination in index service" msgstr "" #: src/libs/data-staging/Processor.cpp:259 msgid "Resolving source replicas in bulk" msgstr "" #: src/libs/data-staging/Processor.cpp:273 #, c-format msgid "No replicas found for %s" msgstr "" #: src/libs/data-staging/Processor.cpp:293 #, c-format msgid "Checking %s" msgstr "" #: src/libs/data-staging/Processor.cpp:302 #: src/libs/data-staging/Processor.cpp:360 msgid "Metadata of replica and index service differ" msgstr "" #: src/libs/data-staging/Processor.cpp:310 #, c-format msgid "Failed checking source replica %s: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:336 msgid "Querying source replicas in bulk" msgstr "" #: src/libs/data-staging/Processor.cpp:348 #, c-format msgid "Failed checking source replica: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:354 msgid "Failed checking source replica" msgstr "" #: src/libs/data-staging/Processor.cpp:391 msgid "Overwrite requested - will pre-clean destination" msgstr "" #: src/libs/data-staging/Processor.cpp:400 msgid "Finding existing destination replicas" msgstr "" #: src/libs/data-staging/Processor.cpp:412 #, c-format msgid "Failed to delete replica %s: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:426 #, c-format msgid "Unregistering %s" msgstr "" #: src/libs/data-staging/Processor.cpp:437 msgid "Pre-registering destination" msgstr "" #: src/libs/data-staging/Processor.cpp:443 #, c-format msgid "Failed to pre-clean destination: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:452 msgid "Destination already exists" msgstr "" #: src/libs/data-staging/Processor.cpp:476 msgid "Preparing to stage source" msgstr "" #: src/libs/data-staging/Processor.cpp:489 #, c-format msgid "Source is not ready, will wait %u seconds" msgstr "" #: src/libs/data-staging/Processor.cpp:495 msgid "No physical files found for source" msgstr "" #: src/libs/data-staging/Processor.cpp:513 msgid "Preparing to stage destination" msgstr "" #: src/libs/data-staging/Processor.cpp:526 #, c-format msgid "Destination is not ready, will wait %u seconds" msgstr "" #: src/libs/data-staging/Processor.cpp:532 msgid "No physical files found for destination" msgstr "" #: src/libs/data-staging/Processor.cpp:558 msgid "Releasing source" msgstr "" #: src/libs/data-staging/Processor.cpp:562 #, c-format msgid "There was a problem during post-transfer source handling: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:567 msgid "Releasing destination" msgstr "" #: src/libs/data-staging/Processor.cpp:571 #, c-format msgid "" "There was a problem during post-transfer destination handling after error: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:575 #, c-format msgid "Error with post-transfer destination handling: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:597 #, c-format msgid "Finalising current replica %s" msgstr "" #: src/libs/data-staging/Processor.cpp:617 msgid "Removing pre-registered destination in index service" msgstr "" #: src/libs/data-staging/Processor.cpp:620 #, c-format msgid "" "Failed to unregister pre-registered destination %s: %s. You may need to " "unregister it manually" msgstr "" #: src/libs/data-staging/Processor.cpp:626 msgid "Registering destination replica" msgstr "" #: src/libs/data-staging/Processor.cpp:629 #, c-format msgid "Failed to register destination replica: %s" msgstr "" #: src/libs/data-staging/Processor.cpp:632 #, c-format msgid "" "Failed to unregister pre-registered destination %s. You may need to " "unregister it manually" msgstr "" #: src/libs/data-staging/Processor.cpp:662 msgid "Error creating cache. Stale locks may remain." msgstr "" #: src/libs/data-staging/Processor.cpp:695 #, c-format msgid "Linking/copying cached file to %s" msgstr "" #: src/libs/data-staging/Processor.cpp:716 #, c-format msgid "Failed linking cache file to %s" msgstr "" #: src/libs/data-staging/Processor.cpp:720 #, c-format msgid "Error linking cache file to %s." msgstr "" #: src/libs/data-staging/Processor.cpp:741 #: src/libs/data-staging/Processor.cpp:748 msgid "Adding to bulk request" msgstr "" #: src/libs/data-staging/Scheduler.cpp:174 #: src/libs/data-staging/Scheduler.cpp:181 msgid "source" msgstr "" #: src/libs/data-staging/Scheduler.cpp:174 #: src/libs/data-staging/Scheduler.cpp:181 msgid "destination" msgstr "" #: src/libs/data-staging/Scheduler.cpp:174 #, c-format msgid "Using next %s replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:181 #, c-format msgid "No more %s replicas" msgstr "" #: src/libs/data-staging/Scheduler.cpp:183 msgid "Will clean up pre-registered destination" msgstr "" #: src/libs/data-staging/Scheduler.cpp:187 msgid "Will release cache locks" msgstr "" #: src/libs/data-staging/Scheduler.cpp:190 msgid "Moving to end of data staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:199 #, c-format msgid "Source is mapped to %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:203 msgid "Cannot link to source which can be modified, will copy instead" msgstr "" #: src/libs/data-staging/Scheduler.cpp:212 msgid "Cannot link to a remote destination. Will not use mapped URL" msgstr "" #: src/libs/data-staging/Scheduler.cpp:215 msgid "Linking mapped file" msgstr "" #: src/libs/data-staging/Scheduler.cpp:222 #, c-format msgid "Failed to create link: %s. Will not use mapped URL" msgstr "" #: src/libs/data-staging/Scheduler.cpp:247 #, c-format msgid "" "Scheduler received new DTR %s with source: %s, destination: %s, assigned to " "transfer share %s with priority %d" msgstr "" #: src/libs/data-staging/Scheduler.cpp:255 msgid "" "File is not cacheable, was requested not to be cached or no cache available, " "skipping cache check" msgstr "" #: src/libs/data-staging/Scheduler.cpp:261 msgid "File is cacheable, will check cache" msgstr "" #: src/libs/data-staging/Scheduler.cpp:264 #: src/libs/data-staging/Scheduler.cpp:289 #, c-format msgid "File is currently being cached, will wait %is" msgstr "" #: src/libs/data-staging/Scheduler.cpp:283 msgid "Timed out while waiting for cache lock" msgstr "" #: src/libs/data-staging/Scheduler.cpp:293 msgid "Checking cache again" msgstr "" #: src/libs/data-staging/Scheduler.cpp:313 msgid "Destination file is in cache" msgstr "" #: src/libs/data-staging/Scheduler.cpp:317 msgid "Source and/or destination is index service, will resolve replicas" msgstr "" #: src/libs/data-staging/Scheduler.cpp:320 msgid "" "Neither source nor destination are index services, will skip resolving " "replicas" msgstr "" #: src/libs/data-staging/Scheduler.cpp:331 msgid "Problem with index service, will release cache lock" msgstr "" #: src/libs/data-staging/Scheduler.cpp:335 msgid "Problem with index service, will proceed to end of data staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:345 msgid "Checking source file is present" msgstr "" #: src/libs/data-staging/Scheduler.cpp:353 msgid "Error with source file, moving to next replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:375 #, c-format msgid "Replica %s has long latency, trying next replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:377 #, c-format msgid "No more replicas, will use %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:380 #, c-format msgid "Checking replica %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:392 msgid "Pre-clean failed" msgstr "" #: src/libs/data-staging/Scheduler.cpp:397 msgid "Pre-clean failed, will still try to copy" msgstr "" #: src/libs/data-staging/Scheduler.cpp:405 msgid "Source or destination requires staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:409 msgid "No need to stage source or destination, skipping staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:439 msgid "Staging request timed out, will release request" msgstr "" #: src/libs/data-staging/Scheduler.cpp:443 msgid "Querying status of staging request" msgstr "" #: src/libs/data-staging/Scheduler.cpp:452 msgid "Releasing requests" msgstr "" #: src/libs/data-staging/Scheduler.cpp:477 msgid "DTR is ready for transfer, moving to delivery queue" msgstr "" #: src/libs/data-staging/Scheduler.cpp:492 #, c-format msgid "Transfer failed: %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:502 msgid "Releasing request(s) made during staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:505 msgid "Neither source nor destination were staged, skipping releasing requests" msgstr "" #: src/libs/data-staging/Scheduler.cpp:526 msgid "Trying next replica" msgstr "" #: src/libs/data-staging/Scheduler.cpp:531 msgid "unregister" msgstr "" #: src/libs/data-staging/Scheduler.cpp:531 msgid "register" msgstr "" #: src/libs/data-staging/Scheduler.cpp:530 #, c-format msgid "Will %s in destination index service" msgstr "" #: src/libs/data-staging/Scheduler.cpp:534 msgid "Destination is not index service, skipping replica registration" msgstr "" #: src/libs/data-staging/Scheduler.cpp:547 msgid "Error registering replica, moving to end of data staging" msgstr "" #: src/libs/data-staging/Scheduler.cpp:556 msgid "Will process cache" msgstr "" #: src/libs/data-staging/Scheduler.cpp:560 msgid "File is not cacheable, skipping cache processing" msgstr "" #: src/libs/data-staging/Scheduler.cpp:574 msgid "Cancellation complete" msgstr "" #: src/libs/data-staging/Scheduler.cpp:588 msgid "Will wait 10s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:594 msgid "Error in cache processing, will retry without caching" msgstr "" #: src/libs/data-staging/Scheduler.cpp:603 msgid "Will retry without caching" msgstr "" #: src/libs/data-staging/Scheduler.cpp:621 msgid "Proxy has expired" msgstr "" #: src/libs/data-staging/Scheduler.cpp:632 #, c-format msgid "%i retries left, will wait until %s before next attempt" msgstr "" #: src/libs/data-staging/Scheduler.cpp:648 msgid "Out of retries" msgstr "" #: src/libs/data-staging/Scheduler.cpp:650 msgid "Permanent failure" msgstr "" #: src/libs/data-staging/Scheduler.cpp:656 msgid "Finished successfully" msgstr "" #: src/libs/data-staging/Scheduler.cpp:666 msgid "Returning to generator" msgstr "" #: src/libs/data-staging/Scheduler.cpp:840 #, c-format msgid "File is smaller than %llu bytes, will use local delivery" msgstr "" #: src/libs/data-staging/Scheduler.cpp:894 #, c-format msgid "Delivery service at %s can copy to %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:902 #, c-format msgid "Delivery service at %s can copy from %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:915 msgid "Could not find any useable delivery service, forcing local transfer" msgstr "" #: src/libs/data-staging/Scheduler.cpp:931 #, c-format msgid "Not using delivery service at %s because it is full" msgstr "" #: src/libs/data-staging/Scheduler.cpp:958 #, c-format msgid "Not using delivery service %s due to previous failure" msgstr "" #: src/libs/data-staging/Scheduler.cpp:968 msgid "No remote delivery services are useable, forcing local delivery" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1172 msgid "Cancelling active transfer" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1182 msgid "Processing thread timed out. Restarting DTR" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1250 msgid "Will use bulk request" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1272 msgid "No delivery endpoints available, will try later" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1291 msgid "Scheduler received NULL DTR" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1301 msgid "Scheduler received invalid DTR" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1390 msgid "Scheduler starting up" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1391 msgid "Scheduler configuration:" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1392 #, c-format msgid " Pre-processor slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1393 #, c-format msgid " Delivery slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1394 #, c-format msgid " Post-processor slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1395 #, c-format msgid " Emergency slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1396 #, c-format msgid " Prepared slots: %u" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1397 #, c-format msgid "" " Shares configuration:\n" "%s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1400 msgid " Delivery service: LOCAL" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1401 #, c-format msgid " Delivery service: %s" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1406 msgid "Failed to create DTR dump thread" msgstr "" #: src/libs/data-staging/Scheduler.cpp:1423 #: src/services/data-staging/DataDeliveryService.cpp:531 #, c-format msgid "DTR %s cancelled" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:15 msgid "Shutting down scheduler" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:17 msgid "Scheduler stopped, exiting" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:23 #, c-format msgid "Received DTR %s back from scheduler in state %s" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:30 msgid "Generator started" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:31 msgid "Starting DTR threads" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:44 msgid "No valid credentials found, exiting" msgstr "" #: src/libs/data-staging/examples/Generator.cpp:55 #, c-format msgid "Problem creating dtr (source %s, destination %s)" msgstr "" #: src/services/a-rex/arex.cpp:340 src/services/candypond/CandyPond.cpp:569 #: src/services/data-staging/DataDeliveryService.cpp:705 #, c-format msgid "SOAP operation is not supported: %s" msgstr "" #: src/services/a-rex/arex.cpp:358 src/services/a-rex/arex.cpp:403 #, c-format msgid "Security Handlers processing failed: %s" msgstr "" #: src/services/a-rex/arex.cpp:381 msgid "" "Can't obtain configuration. Public information is disallowed for this user." msgstr "" #: src/services/a-rex/arex.cpp:388 msgid "Can't obtain configuration. Only public information is provided." msgstr "" #: src/services/a-rex/arex.cpp:416 src/services/a-rex/rest/rest.cpp:740 #, c-format msgid "Connection from %s: %s" msgstr "" #: src/services/a-rex/arex.cpp:419 src/services/a-rex/rest/rest.cpp:744 #, c-format msgid "process: method: %s" msgstr "" #: src/services/a-rex/arex.cpp:420 src/services/a-rex/rest/rest.cpp:745 #, c-format msgid "process: endpoint: %s" msgstr "" #: src/services/a-rex/arex.cpp:445 #, c-format msgid "process: id: %s" msgstr "" #: src/services/a-rex/arex.cpp:446 #, c-format msgid "process: subop: %s" msgstr "" #: src/services/a-rex/arex.cpp:453 #, c-format msgid "process: subpath: %s" msgstr "" #: src/services/a-rex/arex.cpp:491 src/services/candypond/CandyPond.cpp:543 #: src/services/data-staging/DataDeliveryService.cpp:665 #: src/tests/echo/echo.cpp:98 #, c-format msgid "process: request=%s" msgstr "" #: src/services/a-rex/arex.cpp:496 src/services/candypond/CandyPond.cpp:548 #: src/services/data-staging/DataDeliveryService.cpp:670 #: src/tests/count/count.cpp:69 msgid "input does not define operation" msgstr "" #: src/services/a-rex/arex.cpp:499 src/services/candypond/CandyPond.cpp:551 #: src/services/data-staging/DataDeliveryService.cpp:673 #: src/tests/count/count.cpp:72 #, c-format msgid "process: operation: %s" msgstr "" #: src/services/a-rex/arex.cpp:526 msgid "POST request on special path is not supported" msgstr "" #: src/services/a-rex/arex.cpp:531 msgid "process: factory endpoint" msgstr "" #: src/services/a-rex/arex.cpp:575 src/services/candypond/CandyPond.cpp:580 #: src/services/data-staging/DataDeliveryService.cpp:716 #: src/tests/echo/echo.cpp:158 #, c-format msgid "process: response=%s" msgstr "" #: src/services/a-rex/arex.cpp:580 msgid "Per-job POST/SOAP requests are not supported" msgstr "" #: src/services/a-rex/arex.cpp:589 msgid "process: GET" msgstr "" #: src/services/a-rex/arex.cpp:590 #, c-format msgid "GET: id %s path %s" msgstr "" #: src/services/a-rex/arex.cpp:623 msgid "process: HEAD" msgstr "" #: src/services/a-rex/arex.cpp:624 #, c-format msgid "HEAD: id %s path %s" msgstr "" #: src/services/a-rex/arex.cpp:657 msgid "process: PUT" msgstr "" #: src/services/a-rex/arex.cpp:690 msgid "process: DELETE" msgstr "" #: src/services/a-rex/arex.cpp:723 #, c-format msgid "process: method %s is not supported" msgstr "" #: src/services/a-rex/arex.cpp:726 msgid "process: method is not defined" msgstr "" #: src/services/a-rex/arex.cpp:836 msgid "Failed to run Grid Manager thread" msgstr "" #: src/services/a-rex/arex.cpp:889 #, c-format msgid "Failed to process configuration in %s" msgstr "" #: src/services/a-rex/arex.cpp:894 msgid "No control directory set in configuration" msgstr "" #: src/services/a-rex/arex.cpp:898 msgid "No session directory set in configuration" msgstr "" #: src/services/a-rex/arex.cpp:902 msgid "No LRMS set in configuration" msgstr "" #: src/services/a-rex/arex.cpp:961 #, c-format msgid "Failed to create control directory %s" msgstr "" #: src/services/a-rex/arex.cpp:965 #, c-format msgid "Failed to update control directory %s" msgstr "" #: src/services/a-rex/arex.cpp:972 msgid "Failed to start GM threads" msgstr "" #: src/services/a-rex/arex.cpp:1008 #, c-format msgid "Created entry for JWT issuer %s" msgstr "" #: src/services/a-rex/arex.cpp:1010 #, c-format msgid "Failed to create entry for JWT issuer %s" msgstr "" #: src/services/a-rex/arex.cpp:1013 #, c-format msgid "Empty data for JWT issuer %s" msgstr "" #: src/services/a-rex/arex.cpp:1016 #, c-format msgid "Failed to read data for JWT issuer %s" msgstr "" #: src/services/a-rex/authop.cpp:26 msgid "CheckOperationAllowed: missing configuration" msgstr "" #: src/services/a-rex/authop.cpp:80 msgid "CheckOperationAllowed: allowed due to missing configuration scopes" msgstr "" #: src/services/a-rex/authop.cpp:83 #, c-format msgid "CheckOperationAllowed: token scopes: %s" msgstr "" #: src/services/a-rex/authop.cpp:84 #, c-format msgid "CheckOperationAllowed: configuration scopes: %s" msgstr "" #: src/services/a-rex/authop.cpp:87 msgid "CheckOperationAllowed: allowed due to matching scopes" msgstr "" #: src/services/a-rex/authop.cpp:91 msgid "CheckOperationAllowed: token scopes do not match required scopes" msgstr "" #: src/services/a-rex/authop.cpp:97 msgid "CheckOperationAllowed: allowed for TLS connection" msgstr "" #: src/services/a-rex/authop.cpp:101 msgid "CheckOperationAllowed: no supported identity found" msgstr "" #: src/services/a-rex/cachecheck.cpp:37 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:710 #, c-format msgid "Error with cache configuration: %s" msgstr "" #: src/services/a-rex/cachecheck.cpp:53 #: src/services/candypond/CandyPond.cpp:318 msgid "Error with cache configuration" msgstr "" #: src/services/a-rex/cachecheck.cpp:78 #: src/services/candypond/CandyPond.cpp:146 #: src/services/candypond/CandyPond.cpp:343 #, c-format msgid "Looking up URL %s" msgstr "" #: src/services/a-rex/cachecheck.cpp:80 #: src/services/candypond/CandyPond.cpp:155 #, c-format msgid "Cache file is %s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:22 #: src/services/a-rex/put.cpp:163 src/services/a-rex/put.cpp:204 #, c-format msgid "%s: there is no such job: %s" msgstr "" #: src/services/a-rex/change_activity_status.cpp:30 #, c-format msgid "%s: put log %s: there is no payload" msgstr "" #: src/services/a-rex/change_activity_status.cpp:36 #, c-format msgid "%s: put log %s: unrecognized payload" msgstr "" #: src/services/a-rex/change_activity_status.cpp:75 msgid "A-REX REST: Failed to resume job" msgstr "" #: src/services/a-rex/change_activity_status.cpp:79 #, c-format msgid "A-REX REST: State change not allowed: from %s to %s" msgstr "" #: src/services/a-rex/create_activity.cpp:24 msgid "NEW: put new job: there is no payload" msgstr "" #: src/services/a-rex/create_activity.cpp:28 msgid "NEW: put new job: max jobs total limit reached" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:47 msgid "Wiping and re-creating whole storage" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:207 #: src/services/a-rex/delegation/DelegationStore.cpp:309 #, c-format msgid "DelegationStore: TouchConsumer failed to create file %s" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:269 msgid "DelegationStore: PeriodicCheckConsumers failed to resume iterator" msgstr "" #: src/services/a-rex/delegation/DelegationStore.cpp:289 #, c-format msgid "" "DelegationStore: PeriodicCheckConsumers failed to remove old delegation %s - " "%s" msgstr "" #: src/services/a-rex/get.cpp:172 src/services/a-rex/get.cpp:227 #: src/services/a-rex/get.cpp:313 #, c-format msgid "Get: there is no job %s - %s" msgstr "" #: src/services/a-rex/get.cpp:380 #, c-format msgid "Head: there is no job %s - %s" msgstr "" #: src/services/a-rex/get.cpp:436 msgid "Failed to extract credential information" msgstr "" #: src/services/a-rex/get.cpp:439 #, c-format msgid "Checking cache permissions: DN: %s" msgstr "" #: src/services/a-rex/get.cpp:440 #, c-format msgid "Checking cache permissions: VO: %s" msgstr "" #: src/services/a-rex/get.cpp:442 #, c-format msgid "Checking cache permissions: VOMS attr: %s" msgstr "" #: src/services/a-rex/get.cpp:452 #, c-format msgid "Cache access allowed to %s by DN %s" msgstr "" #: src/services/a-rex/get.cpp:455 #, c-format msgid "DN %s doesn't match %s" msgstr "" #: src/services/a-rex/get.cpp:458 #, c-format msgid "Cache access allowed to %s by VO %s" msgstr "" #: src/services/a-rex/get.cpp:461 #, c-format msgid "VO %s doesn't match %s" msgstr "" #: src/services/a-rex/get.cpp:467 src/services/a-rex/get.cpp:486 #, c-format msgid "Bad credential value %s in cache access rules" msgstr "" #: src/services/a-rex/get.cpp:475 src/services/a-rex/get.cpp:494 #, c-format msgid "VOMS attr %s matches %s" msgstr "" #: src/services/a-rex/get.cpp:476 #, c-format msgid "Cache access allowed to %s by VO %s and role %s" msgstr "" #: src/services/a-rex/get.cpp:479 src/services/a-rex/get.cpp:498 #, c-format msgid "VOMS attr %s doesn't match %s" msgstr "" #: src/services/a-rex/get.cpp:495 #, c-format msgid "Cache access allowed to %s by VO %s and group %s" msgstr "" #: src/services/a-rex/get.cpp:501 #, c-format msgid "Unknown credential type %s for URL pattern %s" msgstr "" #: src/services/a-rex/get.cpp:507 #, c-format msgid "No match found in cache access rules for %s" msgstr "" #: src/services/a-rex/get.cpp:517 #, c-format msgid "Get from cache: Looking in cache for %s" msgstr "" #: src/services/a-rex/get.cpp:520 #, c-format msgid "Get from cache: Invalid URL %s" msgstr "" #: src/services/a-rex/get.cpp:537 msgid "Get from cache: Error in cache configuration" msgstr "" #: src/services/a-rex/get.cpp:546 msgid "Get from cache: File not in cache" msgstr "" #: src/services/a-rex/get.cpp:549 #, c-format msgid "Get from cache: could not access cached file: %s" msgstr "" #: src/services/a-rex/get.cpp:559 msgid "Get from cache: Cached file is locked" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:98 #, c-format msgid "" "Cannot create directories for log file %s. Messages will be logged to this " "log" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:104 #, c-format msgid "" "Cannot open cache log file %s: %s. Cache cleaning messages will be logged to " "this log" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:114 msgid "Failed to start cache clean script" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:115 msgid "Cache cleaning script failed" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:183 #, c-format msgid "External request for attention %s" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:201 #, c-format msgid "Failed to open heartbeat file %s" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:223 msgid "Starting jobs processing thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:224 #, c-format msgid "Used configuration file %s" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:232 #, c-format msgid "" "Error initiating delegation database in %s. Maybe permissions are not " "suitable. Returned error is: %s." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:244 msgid "Failed to start new thread: cache won't be cleaned" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:251 msgid "Failed to activate Jobs Processing object, exiting Grid Manager thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:260 #, c-format msgid "" "Error adding communication interface in %s. Maybe another instance of A-REX " "is already running." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:263 #, c-format msgid "" "Error adding communication interface in %s. Maybe permissions are not " "suitable." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:270 msgid "Failed to start new thread for monitoring job requests" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:276 msgid "Picking up left jobs" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:279 msgid "Starting data staging threads" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:283 msgid "Starting jobs' monitoring" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:291 #, c-format msgid "" "SSHFS mount point of session directory (%s) is broken - waiting for " "reconnect ..." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:295 #, c-format msgid "" "SSHFS mount point of runtime directory (%s) is broken - waiting for " "reconnect ..." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:300 #, c-format msgid "" "SSHFS mount point of cache directory (%s) is broken - waiting for " "reconnect ..." msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:349 #, c-format msgid "Orphan delegation lock detected (%s) - cleaning" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:354 msgid "Failed to obtain delegation locks for cleaning orphaned locks" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:368 msgid "Waking up" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:371 msgid "Stopping jobs processing thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:373 msgid "Exiting jobs processing thread" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:391 msgid "Requesting to stop job processing" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:399 msgid "Waiting for main job processing thread to exit" msgstr "" #: src/services/a-rex/grid-manager/GridManager.cpp:401 msgid "Stopped job processing" msgstr "" #: src/services/a-rex/grid-manager/accounting/AAR.cpp:73 msgid "Cannot find information abouto job submission endpoint" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:58 #, c-format msgid "Failed to read database schema file at %s" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:68 msgid "Accounting database initialized successfully" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:70 msgid "Accounting database connection has been established" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:80 #, c-format msgid "%s. SQLite database error: %s" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:82 #, c-format msgid "SQLite database error: %s" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:110 #, c-format msgid "Directory %s to store accounting database has been created." msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:112 #, c-format msgid "" "Accounting database cannot be created. Faile to create parent directory %s." msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:116 #, c-format msgid "Accounting database cannot be created: %s is not a directory" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:123 msgid "Failed to initialize accounting database" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:130 #, c-format msgid "Accounting database file (%s) is not a regular file" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:136 msgid "Error opening accounting database" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:154 msgid "Closing connection to SQLite accounting database" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:243 #, c-format msgid "Failed to fetch data from %s accounting database table" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:260 #, c-format msgid "Failed to add '%s' into the accounting database %s table" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:327 msgid "Failed to fetch data from accounting database Endpoints table" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:344 #, c-format msgid "" "Failed to add '%s' URL (interface type %s) into the accounting database " "Endpoints table" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:370 #, c-format msgid "Failed to query AAR database ID for job %s" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:431 #, c-format msgid "Failed to insert AAR into the database for job %s" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:432 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:481 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:512 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:528 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:544 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:565 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:581 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:596 #, c-format msgid "SQL statement used: %s" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:437 #, c-format msgid "Failed to write authtoken attributes for job %s" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:441 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:498 #, c-format msgid "Failed to write event records for job %s" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:452 #, c-format msgid "" "Cannot to update AAR. Cannot find registered AAR for job %s in accounting " "database." msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:480 #, c-format msgid "Failed to update AAR in the database for job %s" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:486 #, c-format msgid "Failed to write RTEs information for the job %s" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:490 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:494 #, c-format msgid "Failed to write data transfers information for the job %s" msgstr "" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:590 #, c-format msgid "Unable to add event: cannot find AAR for job %s in accounting database." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:73 #, c-format msgid "Unknown option %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:80 msgid "Job ID argument is required." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:86 msgid "Path to user's proxy file should be specified." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:92 msgid "User name should be specified." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:98 msgid "Path to .local job status file is required." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:106 msgid "Generating ceID prefix from hostname automatically" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:109 msgid "" "Cannot determine hostname from gethostname() to generate ceID automatically." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:118 #, c-format msgid "ceID prefix is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:126 #, c-format msgid "Getting currect timestamp for BLAH parser log: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:135 msgid "Parsing .local file to obtain job-specific identifiers and info" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:145 #, c-format msgid "globalid is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:148 #, c-format msgid "headnode is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:151 #, c-format msgid "interface is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:155 msgid "There is no local LRMS ID. Message will not be written to BLAH log." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:158 #, c-format msgid "localid is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:161 #, c-format msgid "queue name is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:164 #, c-format msgid "owner subject is set to %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:166 msgid "" "Job did not finished successfully. Message will not be written to BLAH log." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:174 #, c-format msgid "Job timestamp successfully parsed as %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:178 msgid "Can not read information from the local job status file" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:194 #, c-format msgid "" "Unsupported submission interface %s. Seems arc-blahp-logger need to be " "updated accordingly. Please submit the bug to bugzilla." msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:204 msgid "Parsing VOMS AC to get FQANs information" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:217 #, c-format msgid "Found VOMS AC attribute: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:230 msgid "VOMS AC attribute is a tag" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:237 msgid "Skipping policyAuthority VOMS AC attribute" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:241 msgid "VOMS AC attribute is the FQAN" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:249 msgid "No FQAN found. Using None as userFQAN value" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:263 #, c-format msgid "Assembling BLAH parser log entry: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:268 #, c-format msgid "Writing the info to the BLAH parser log: %s" msgstr "" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:276 #, c-format msgid "Cannot open BLAH log file '%s'" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:36 #, c-format msgid "Missing cancel-%s-job - job cancellation may not work" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:40 #, c-format msgid "Missing submit-%s-job - job submission to LRMS may not work" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:44 #, c-format msgid "Missing scan-%s-job - may miss when job finished executing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:58 #, c-format msgid "Wrong option in %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:69 #, c-format msgid "Can't read configuration file at %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:79 #, c-format msgid "Can't recognize type of configuration file at %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:82 msgid "Could not determine configuration type or configuration is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:163 msgid "lrms is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:196 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:205 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:214 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:223 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:232 msgid "Missing number in maxjobs" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:199 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:208 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:217 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:226 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:235 #, c-format msgid "Wrong number in maxjobs: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:245 #, c-format msgid "Wrong number in wakeupperiod: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:251 msgid "mail parameter is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:257 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:261 msgid "Wrong number in defaultttl command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:267 msgid "Wrong number in maxrerun command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:274 msgid "State name for plugin is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:278 msgid "Options for plugin are missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:281 #, c-format msgid "Failed to register plugin for state %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:287 msgid "Session root directory is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:290 msgid "Junk in sessiondir command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:302 msgid "Missing directory in controldir command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:307 msgid "" "'control' configuration option is no longer supported, please use " "'controldir' instead" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:312 msgid "User for helper program is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:315 msgid "Only user '.' for helper program is supported" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:318 msgid "Helper program is missing" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:339 msgid "Wrong option in fixdirectories" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:366 msgid "Wrong option in delegationdb" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:375 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:608 msgid "forcedefaultvoms parameter is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:486 msgid "Wrong number in maxjobdesc command" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:535 msgid "Missing file name in [arex/jura] logfile" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:546 #, c-format msgid "Wrong number in urdelivery_frequency: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:601 msgid "No queue name given in queue block name" msgstr "" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:617 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:652 msgid "advertisedvo parameter is empty" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:117 #, c-format msgid "\tSession root dir : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:118 #, c-format msgid "\tControl dir : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:119 #, c-format msgid "\tdefault LRMS : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:120 #, c-format msgid "\tdefault queue : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:121 #, c-format msgid "\tdefault ttl : %u" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:126 msgid "No valid caches found in configuration, caching is disabled" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:131 #, c-format msgid "\tCache : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:133 #, c-format msgid "\tCache link dir : %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:136 #, c-format msgid "\tCache (read-only): %s" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:138 msgid "\tCache cleaning enabled" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:139 msgid "\tCache cleaning disabled" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:256 msgid "Starting controldir update tool." msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:258 msgid "Failed to start controldir update tool." msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:261 #, c-format msgid "Failed to run controldir update tool. Exit code: %i" msgstr "" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:381 msgid "" "Globus location variable substitution is not supported anymore. Please " "specify path directly." msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:35 msgid "Can't read configuration file" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:41 #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:29 msgid "Can't recognize type of configuration file" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:47 msgid "Configuration error" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:77 msgid "Bad number in maxdelivery" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:83 msgid "Bad number in maxemergency" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:89 msgid "Bad number in maxprocessor" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:95 msgid "Bad number in maxprepared" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:101 msgid "Bad number in maxtransfertries" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:112 msgid "Bad number in speedcontrol" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:123 #, c-format msgid "Bad number in definedshare %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:132 #, c-format msgid "Bad URL in deliveryservice: %s" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:143 msgid "Bad number in remotesizelimit" msgstr "" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:168 msgid "Bad value for loglevel" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:24 msgid "Can't open configuration file" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:45 msgid "Not enough parameters in copyurl" msgstr "" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:54 msgid "Not enough parameters in linkurl" msgstr "" #: src/services/a-rex/grid-manager/files/ControlFileContent.cpp:185 #, c-format msgid "Wrong directory in %s" msgstr "" #: src/services/a-rex/grid-manager/files/ControlFileHandling.cpp:104 #, c-format msgid "Failed setting file owner: %s" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:36 #, c-format msgid "Could not read data staging configuration from %s" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:44 #, c-format msgid "Can't read transfer states from %s. Perhaps A-REX is not running?" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:100 msgid "gm-jobs displays information on current jobs in the system." msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:105 msgid "display more information on each job" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:110 #: src/services/a-rex/grid-manager/gm_kick.cpp:24 msgid "use specified configuration file" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:111 #: src/services/a-rex/grid-manager/gm_kick.cpp:25 msgid "file" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:115 msgid "read information from specified control directory" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:116 msgid "dir" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:120 msgid "print summary of jobs in each transfer share" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:125 msgid "do not print list of jobs" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:130 msgid "do not print number of jobs in each state" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:135 msgid "print state of the service" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:140 msgid "show only jobs of user(s) with specified subject name(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:141 #: src/services/a-rex/grid-manager/gm_jobs.cpp:151 #: src/services/a-rex/grid-manager/gm_jobs.cpp:161 msgid "dn" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:145 msgid "request to cancel job(s) with specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:146 #: src/services/a-rex/grid-manager/gm_jobs.cpp:156 #: src/services/a-rex/grid-manager/gm_jobs.cpp:166 #: src/services/a-rex/grid-manager/gm_jobs.cpp:176 #: src/services/a-rex/grid-manager/gm_kick.cpp:30 msgid "id" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:150 msgid "" "request to cancel jobs belonging to user(s) with specified subject name(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:155 msgid "request to clean job(s) with specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:160 msgid "" "request to clean jobs belonging to user(s) with specified subject name(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:165 msgid "show only jobs with specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:170 msgid "print list of available delegation IDs" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:175 msgid "print delegation token of specified ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:180 msgid "print main delegation token of specified Job ID(s)" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:181 msgid "job id" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:185 msgid "" "output requested elements (jobs list, delegation ids and tokens) to file" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:186 msgid "file name" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:209 #, c-format msgid "Using configuration at %s" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:232 #, c-format msgid "Failed to open output file '%s'" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:241 msgid "Looking for current jobs" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:278 #, c-format msgid "Job: %s : ERROR : Unrecognizable state" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:287 #, c-format msgid "Job: %s : ERROR : No local information." msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:461 #, c-format msgid "Job: %s : ERROR : Failed to put cancel mark" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:465 #, c-format msgid "Job: %s : Cancel request put but failed to communicate to service" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:467 #, c-format msgid "Job: %s : Cancel request put and communicated to service" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:478 #, c-format msgid "Job: %s : ERROR : Failed to put clean mark" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:482 #, c-format msgid "Job: %s : Clean request put but failed to communicate to service" msgstr "" #: src/services/a-rex/grid-manager/gm_jobs.cpp:484 #, c-format msgid "Job: %s : Clean request put and communicated to service" msgstr "" #: src/services/a-rex/grid-manager/gm_kick.cpp:18 msgid "" "gm-kick wakes up the A-REX corresponding to the given control directory. If " "no directory is given it uses the control directory found in the " "configuration file." msgstr "" #: src/services/a-rex/grid-manager/gm_kick.cpp:29 msgid "inform about changes in particular job (can be used multiple times)" msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:39 #, c-format msgid "Failed to acquire source: %s" msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:44 #, c-format msgid "Failed to resolve %s" msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:61 #, c-format msgid "Failed to check %s" msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:75 msgid "job_description_file [proxy_file]" msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:76 msgid "" "inputcheck checks that input files specified in the job description are " "available and accessible using the credentials in the given proxy file." msgstr "" #: src/services/a-rex/grid-manager/inputcheck.cpp:88 msgid "Wrong number of arguments given" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:75 #, c-format msgid "" "DTR Generator waiting to process: %d jobs to cancel, %d DTRs, %d new jobs" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:89 #, c-format msgid "%s: Job cancel request from DTR generator to scheduler" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:94 #, c-format msgid "%s: Returning canceled job from DTR generator" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:135 #, c-format msgid "%s: Re-requesting attention from DTR generator" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:145 #, c-format msgid "DTR Generator processed: %d jobs to cancel, %d DTRs, %d new jobs" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:164 msgid "Exiting Generator thread" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:236 msgid "Shutting down data staging threads" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:246 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:259 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:287 msgid "DTRGenerator is not running!" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:249 #, c-format msgid "Received DTR %s during Generator shutdown - may not be processed" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:263 msgid "DTRGenerator was sent null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:272 #, c-format msgid "%s: Received job in DTR generator" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:275 #, c-format msgid "%s: Failed to receive job in DTR generator" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:282 msgid "DTRGenerator got request to cancel null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:297 msgid "DTRGenerator is queried about null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:327 msgid "DTRGenerator is asked about null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:355 msgid "DTRGenerator is requested to remove null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:362 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:370 #, c-format msgid "%s: Trying to remove job from data staging which is still active" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:378 #, c-format msgid "%s: Trying remove job from data staging which does not exist" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:389 #, c-format msgid "%s: Invalid DTR" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:406 #, c-format msgid "%s: Received DTR %s to copy file %s in state %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:410 #, c-format msgid "%s: Received DTR belongs to inactive job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:427 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1065 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:474 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:532 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:646 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:856 #, c-format msgid "%s: Failed reading local information" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:436 #, c-format msgid "%s: DTR %s to copy file %s failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:442 #, c-format msgid "%s: Cancelling other DTRs" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:452 #, c-format msgid "%s: DTR %s to copy to %s failed but is not mandatory" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:462 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:727 #, c-format msgid "%s: Failed to read list of output files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:476 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:617 #, c-format msgid "%s: Failed to read dynamic output files in %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:478 #, c-format msgid "%s: Going through files in list %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:482 #, c-format msgid "%s: Removing %s from dynamic output file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:486 #, c-format msgid "%s: Failed to write back dynamic output files in %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:502 #, c-format msgid "%s: Failed to write list of output files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:506 #, c-format msgid "%s: Failed to write list of output status files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:518 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:739 #, c-format msgid "%s: Failed to read list of input files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:537 #, c-format msgid "%s: Failed to write list of input files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:549 #, c-format msgid "%s: Received DTR with two remote endpoints!" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:561 #: src/services/candypond/CandyPondGenerator.cpp:105 #, c-format msgid "No active job id %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:605 #, c-format msgid "%s: Failed to read list of output files, can't clean up session dir" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:631 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:650 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:777 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:909 #, c-format msgid "%s: Failed to clean up session dir" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:641 #, c-format msgid "%s: Failed to read list of input files, can't clean up session dir" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:663 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:667 msgid "uploads" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:663 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:667 msgid "downloads" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:664 msgid "cancelled" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:664 msgid "finished" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:662 #, c-format msgid "%s: All %s %s successfully" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:666 #, c-format msgid "%s: Some %s failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:670 #, c-format msgid "%s: Requesting attention from DTR generator" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:681 msgid "DTRGenerator is requested to process null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:687 msgid "download" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:687 msgid "upload" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:686 #, c-format msgid "%s: Received data staging request to %s files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:748 #, c-format msgid "%s: Duplicate file in list of input files: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:801 #, c-format msgid "%s: Reading output files from user generated list in %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:803 #, c-format msgid "%s: Error reading user generated output file list in %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:834 #, c-format msgid "%s: Failed to list output directory %s: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:852 #, c-format msgid "%s: Adding new output file %s: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:875 #, c-format msgid "%s: Two identical output destinations: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:888 #, c-format msgid "%s: Cannot upload two different files %s and %s to same LFN: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:920 #, c-format msgid "%s: Received job in a bad state: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:928 #, c-format msgid "%s: Session directory processing takes too long - %u.%06u seconds" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:976 #, c-format msgid "" "%s: Destination file %s was possibly left unfinished from previous A-REX " "run, will overwrite" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1071 #, c-format msgid "%s: Failed writing local information" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1089 #, c-format msgid "%s: Cancelling active DTRs" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1096 msgid "DTRGenerator is asked to check files for null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1116 #, c-format msgid "%s: Can't read list of input files" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1131 #, c-format msgid "%s: Checking user uploadable file: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1136 #, c-format msgid "%s: User has uploaded file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1143 #, c-format msgid "%s: Failed writing changed input file." msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1147 #, c-format msgid "%s: Critical error for uploadable file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1153 #, c-format msgid "%s: User has NOT uploaded file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1165 #, c-format msgid "%s: Uploadable files timed out" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1221 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1247 #, c-format msgid "%s: Can't convert checksum %s to int for %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1228 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1242 #, c-format msgid "%s: Can't convert filesize %s to int for %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1237 #, c-format msgid "%s: Invalid size/checksum information (%s) for %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1259 #, c-format msgid "%s: Invalid file: %s is too big." msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1275 #, c-format msgid "%s: Failed to switch user ID to %d/%d to read file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1281 #, c-format msgid "%s: Failed to open file %s for reading" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1289 #, c-format msgid "%s: Error accessing file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1301 #, c-format msgid "%s: Error reading file %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1316 #, c-format msgid "%s: File %s has wrong checksum: %llu. Expected %lli" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1322 #, c-format msgid "%s: Checksum %llu verified for %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1334 msgid "" "Found unfinished DTR transfers. It is possible the previous A-REX process " "did not shut down normally" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1341 #, c-format msgid "Found DTR %s for file %s left in transferring state from previous run" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1350 msgid "DTRGenerator is requested to clean links for null job" msgstr "" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1366 #, c-format msgid "%s: Cache cleaning takes too long - %u.%06u seconds" msgstr "" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:108 #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:190 #, c-format msgid "%s: Job monitoring counter is broken" msgstr "" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:115 #, c-format msgid "%s: Job monitoring is unintentionally lost" msgstr "" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:124 #, c-format msgid "%s: Job monitoring stop success" msgstr "" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:129 #, c-format msgid "" "%s: Job monitoring stop requested with %u active references and %s queue " "associated" msgstr "" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:131 #, c-format msgid "%s: Job monitoring stop requested with %u active references" msgstr "" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:195 #, c-format msgid "%s: Job monitoring is lost due to removal from queue" msgstr "" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:278 #, c-format msgid "%s: PushSorted failed to find job where expected" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:161 #, c-format msgid "Replacing queue '%s' with '%s'" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:255 #, c-format msgid "Bad name for stdout: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:263 #, c-format msgid "Bad name for stderr: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:326 #, c-format msgid "Bad name for runtime environment: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:371 msgid "Job description file could not be read." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:422 #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:436 #, c-format msgid "Bad name for executable: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:89 msgid "Failed to start data staging threads" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:190 #, c-format msgid "" "%s: Failed reading .local and changing state, job and A-REX may be left in " "an inconsistent state" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:195 #, c-format msgid "%s: unexpected failed job add request: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:206 #, c-format msgid "%s: unexpected job add request: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:259 #, c-format msgid "%s: job for attention" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:269 msgid "all for attention" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:286 #, c-format msgid "%s: job found while scanning" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:314 #, c-format msgid "%s: job will wait for external process" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:331 #, c-format msgid "%s: job assigned for slow polling" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:349 #, c-format msgid "%s: job being processed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:384 #, c-format msgid "Current jobs in system (PREPARING to FINISHING) per-DN (%i entries)" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:386 #, c-format msgid "%s: %i" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:398 #, c-format msgid "%s: Failed storing failure reason: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:404 #, c-format msgid "%s: Failed reading job description: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:416 #, c-format msgid "%s: Failed parsing job request." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:466 #, c-format msgid "%s: Failed writing list of output files: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:492 #, c-format msgid "%s: Failed obtaining lrms id" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:506 #, c-format msgid "%s: Failed writing local information: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:538 #, c-format msgid "%s: Failed creating grami file" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:542 #, c-format msgid "%s: Failed setting executable permissions" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:550 #, c-format msgid "%s: state SUBMIT: starting child: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:557 #, c-format msgid "%s: Failed running submission process" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:562 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:669 #, c-format msgid "%s: LRMS scripts limit of %u is reached - suspending submit/cancel" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:578 #, c-format msgid "" "%s: Job submission to LRMS takes too long, but ID is already obtained. " "Pretending submission is done." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:585 #, c-format msgid "%s: Job submission to LRMS takes too long. Failing." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:594 #, c-format msgid "%s: state SUBMIT: child exited with code %i" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:599 #, c-format msgid "%s: Job submission to LRMS failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:620 #, c-format msgid "%s: state CANCELING: timeout waiting for cancellation" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:626 #, c-format msgid "%s: state CANCELING: job diagnostics collected" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:654 #, c-format msgid "%s: state CANCELING: starting child: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:656 #, c-format msgid "%s: Job has completed already. No action taken to cancel" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:664 #, c-format msgid "%s: Failed running cancellation process" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:683 #, c-format msgid "" "%s: Job cancellation takes too long, but diagnostic collection seems to be " "done. Pretending cancellation succeeded." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:689 #, c-format msgid "%s: Job cancellation takes too long. Failing." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:699 #, c-format msgid "%s: state CANCELING: child exited with code %i" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:705 #, c-format msgid "%s: Failed to cancel running job" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:724 #, c-format msgid "%s: State: %s: data staging finished" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:759 #, c-format msgid "%s: State: %s: still in data staging" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:772 #, c-format msgid "%s: Job is not allowed to be rerun anymore" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:782 #, c-format msgid "%s: Job failed in unknown state. Won't rerun." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:803 #, c-format msgid "%s: Reprocessing job description failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:810 #, c-format msgid "%s: Failed to read reprocessed list of output files" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:814 #, c-format msgid "%s: Failed to read reprocessed list of input files" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:898 #, c-format msgid "%s: Reading status of new job failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:911 #, c-format msgid "%s: State: ACCEPTED: parsing job description" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:913 #, c-format msgid "%s: Processing job description failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:952 #, c-format msgid "%s: new job is accepted" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:964 #, c-format msgid "%s: %s: New job belongs to %i/%i" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:969 #, c-format msgid "%s: old job is accepted" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:980 #, c-format msgid "%s: State: ACCEPTED" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:986 #, c-format msgid "%s: State: ACCEPTED: dryrun" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1009 #, c-format msgid "%s: State: ACCEPTED: has process time %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1015 #, c-format msgid "%s: State: ACCEPTED: moving to PREPARING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1031 #, c-format msgid "%s: State: PREPARING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1038 #, c-format msgid "%s: Failed obtaining local job information." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1091 #, c-format msgid "%s: State: SUBMIT" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1111 #, c-format msgid "%s: State: CANCELING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1131 #, c-format msgid "%s: State: INLRMS" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1136 #, c-format msgid "%s: State: INLRMS - checking for pending(%u) and mark" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1138 #, c-format msgid "%s: State: INLRMS - checking for not pending" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1140 #, c-format msgid "%s: Job finished" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1144 #, c-format msgid "%s: State: INLRMS: exit message is %i %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1157 #, c-format msgid "%s: State: INLRMS - no mark found" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1169 #, c-format msgid "%s: State: FINISHING" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1190 #, c-format msgid "%s: Job is requested to clean - deleting" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1207 #, c-format msgid "%s: restarted PREPARING job" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1223 #, c-format msgid "%s: restarted INLRMS job" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1232 #, c-format msgid "%s: restarted FINISHING job" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1237 #, c-format msgid "%s: Can't rerun on request" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1239 #, c-format msgid "%s: Can't rerun on request - not a suitable state" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1250 #, c-format msgid "%s: Job is too old - deleting" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1295 #, c-format msgid "%s: Job is ancient - delete rest of information" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1313 #, c-format msgid "%s: Canceling job because of user request" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1327 #, c-format msgid "%s: Failed to turn job into failed during cancel processing." msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1359 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1367 #, c-format msgid "%s: Plugin at state %s : %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1373 #, c-format msgid "%s: Plugin execution failed" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1480 #, c-format msgid "%s: State: %s from %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1529 #, c-format msgid "Failed to get DN information from .local file for job %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1556 #, c-format msgid "%s: Delete request due to internal problems" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1591 #, c-format msgid "%s: Job failure detected" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1651 #, c-format msgid "Failed to move file %s to %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1659 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1769 #, c-format msgid "Failed reading control directory: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1729 #, c-format msgid "Failed reading control directory: %s: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:2043 #, c-format msgid "Helper process start failed: %s" msgstr "" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:2050 #, c-format msgid "Stopping helper process %s" msgstr "" #: src/services/a-rex/grid-manager/log/HeartBeatMetrics.cpp:61 #, c-format msgid "Error with hearbeatfile: %s" msgstr "" #: src/services/a-rex/grid-manager/log/HeartBeatMetrics.cpp:73 #: src/services/a-rex/grid-manager/log/JobsMetrics.cpp:139 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:136 #, c-format msgid ": Metrics tool returned error code %i: %s" msgstr "" #: src/services/a-rex/grid-manager/log/HeartBeatMetrics.cpp:107 #: src/services/a-rex/grid-manager/log/JobsMetrics.cpp:186 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:178 msgid "" "gmetric_bin_path empty in arc.conf (should never happen the default value " "should be used)" msgstr "" #: src/services/a-rex/grid-manager/log/JobLog.cpp:114 msgid ": Accounting records reporter tool is not specified" msgstr "" #: src/services/a-rex/grid-manager/log/JobLog.cpp:130 msgid ": Failure creating slot for accounting reporter child process" msgstr "" #: src/services/a-rex/grid-manager/log/JobLog.cpp:143 msgid ": Failure starting accounting reporter child process" msgstr "" #: src/services/a-rex/grid-manager/log/JobLog.cpp:176 msgid ": Failure creating accounting database connection" msgstr "" #: src/services/a-rex/grid-manager/log/JobLog.cpp:202 #, c-format msgid ": writing accounting record took %llu ms" msgstr "" #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:74 #, c-format msgid "Session dir '%s' contains user specific substitutions - skipping it" msgstr "" #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:86 #, c-format msgid "Sessiondir %s: Free space %f GB" msgstr "" #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:94 msgid "No session directories found in configuration." msgstr "" #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:125 msgid "No cachedirs found/configured for calculation of free space." msgstr "" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:29 msgid "Failed reading local information" msgstr "" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:79 #, c-format msgid "Running mailer command (%s)" msgstr "" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:81 msgid "Failed running mailer" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:34 #, c-format msgid "%s: Job's helper exited" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:71 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:24 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:56 #, c-format msgid "%s: Failure creating slot for child process" msgstr "" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:120 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:41 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:73 #, c-format msgid "%s: Failure starting child process" msgstr "" #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:30 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:62 #, c-format msgid "%s: Failure creating data storage for child process" msgstr "" #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:46 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:78 #, c-format msgid "%s: Failure waiting for child process to finish" msgstr "" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:47 msgid "[job description input]" msgstr "" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:48 msgid "" "Tool for writing the grami file representation of a job description file." msgstr "" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:52 msgid "Name of grami file" msgstr "" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:57 msgid "Configuration file to load" msgstr "" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:58 msgid "arc.conf" msgstr "" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:62 msgid "Session directory to use" msgstr "" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:63 msgid "directory" msgstr "" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:79 msgid "No job description file name provided." msgstr "" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:85 #, c-format msgid "Unable to parse job description input: %s" msgstr "" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:91 msgid "Unable to load ARC configuration file." msgstr "" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:111 #, c-format msgid "Unable to write grami file: %s" msgstr "" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:117 #, c-format msgid "Unable to write 'output' file: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:53 #, c-format msgid "Resource information provider: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:56 msgid "Resource information provider failed to start" msgstr "" #: src/services/a-rex/information_collector.cpp:59 msgid "Resource information provider failed to run" msgstr "" #: src/services/a-rex/information_collector.cpp:63 #, c-format msgid "" "Resource information provider failed with exit status: %i\n" "%s" msgstr "" #: src/services/a-rex/information_collector.cpp:65 #, c-format msgid "" "Resource information provider log:\n" "%s" msgstr "" #: src/services/a-rex/information_collector.cpp:71 msgid "No new informational document assigned" msgstr "" #: src/services/a-rex/information_collector.cpp:73 #, c-format msgid "Obtained XML: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:87 msgid "Informational document is empty" msgstr "" #: src/services/a-rex/information_collector.cpp:212 msgid "OptimizedInformationContainer failed to create temporary file" msgstr "" #: src/services/a-rex/information_collector.cpp:215 #, c-format msgid "OptimizedInformationContainer created temporary file: %s" msgstr "" #: src/services/a-rex/information_collector.cpp:221 msgid "" "OptimizedInformationContainer failed to store XML document to temporary file" msgstr "" #: src/services/a-rex/information_collector.cpp:230 msgid "OptimizedInformationContainer failed to parse XML" msgstr "" #: src/services/a-rex/information_collector.cpp:242 msgid "OptimizedInformationContainer failed to rename temporary file" msgstr "" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:38 msgid "Default INTERNAL client constructor" msgstr "" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:41 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:61 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:83 msgid "Failed to load grid-manager configfile" msgstr "" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:46 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:66 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:88 msgid "Failed to set INTERNAL endpoint" msgstr "" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:131 msgid "Failed to identify grid-manager config file" msgstr "" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:150 #, c-format msgid "Failed to run configuration parser at %s." msgstr "" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:154 #, c-format msgid "Parser failed with error code %i." msgstr "" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:160 #, c-format msgid "No pid file is found at '%s'. Probably A-REX is not running." msgstr "" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:175 #, c-format msgid "Failed to load grid-manager config file from %s" msgstr "" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:266 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:372 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:405 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:451 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:505 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:557 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:575 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:625 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:655 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:673 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:691 msgid "INTERNALClient is not initialized" msgstr "" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:456 msgid "Submitting job " msgstr "" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:517 #, c-format msgid "Failed to copy input file: %s to path: %s" msgstr "" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:523 #, c-format msgid "Failed to set permissions on: %s" msgstr "" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:51 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:92 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:119 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:145 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:184 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:246 msgid "Failed to load grid-manager config file" msgstr "" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:191 #, c-format msgid "Job %s does not report a resumable state" msgstr "" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:196 #, c-format msgid "Resuming job: %s at state: %s (%s)" msgstr "" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:205 msgid "Job resuming successful" msgstr "" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:251 #, c-format msgid "Failed retrieving information for job: %s" msgstr "" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:324 msgid "Retrieving job description of INTERNAL jobs is not supported" msgstr "" #: src/services/a-rex/internaljobplugin/JobListRetrieverPluginINTERNAL.cpp:67 #, c-format msgid "Listing localjobs succeeded, %d localjobs found" msgstr "" #: src/services/a-rex/internaljobplugin/JobListRetrieverPluginINTERNAL.cpp:83 #, c-format msgid "" "Skipping retrieved job (%s) because it was submitted via another interface " "(%s)." msgstr "" #: src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.cpp:38 msgid "" "Failed to delegate credentials to server - no delegation interface found" msgstr "" #: src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.cpp:45 #, c-format msgid "Failed to delegate credentials to server - %s" msgstr "" #: src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.cpp:84 msgid "Failed preparing job description" msgstr "" #: src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.cpp:127 msgid "Failed submitting job description" msgstr "" #: src/services/a-rex/job.cpp:78 #, c-format msgid "Using cached local account '%s'" msgstr "" #: src/services/a-rex/job.cpp:89 msgid "Will not map to 'root' account by default" msgstr "" #: src/services/a-rex/job.cpp:102 msgid "No local account name specified" msgstr "" #: src/services/a-rex/job.cpp:105 #, c-format msgid "Using local account '%s'" msgstr "" #: src/services/a-rex/job.cpp:109 msgid "TLS provides no identity, going for OTokens" msgstr "" #: src/services/a-rex/job.cpp:168 msgid "Failed to acquire A-REX's configuration" msgstr "" #: src/services/a-rex/job.cpp:240 #, c-format msgid "Cannot handle local user %s" msgstr "" #: src/services/a-rex/job.cpp:288 #, c-format msgid "%s: Failed to parse user policy" msgstr "" #: src/services/a-rex/job.cpp:293 #, c-format msgid "%s: Failed to load evaluator for user policy " msgstr "" #: src/services/a-rex/job.cpp:398 #, c-format msgid "%s: Unknown user policy '%s'" msgstr "" #: src/services/a-rex/job.cpp:738 src/services/a-rex/job.cpp:756 #, c-format msgid "Credential expires at %s" msgstr "" #: src/services/a-rex/job.cpp:740 src/services/a-rex/job.cpp:758 #, c-format msgid "Credential handling exception: %s" msgstr "" #: src/services/a-rex/job.cpp:924 #, c-format msgid "Failed to run external plugin: %s" msgstr "" #: src/services/a-rex/job.cpp:928 #, c-format msgid "Plugin response: %s" msgstr "" #: src/services/a-rex/job.cpp:1138 #, c-format msgid "Failed to create job in %s" msgstr "" #: src/services/a-rex/job.cpp:1147 #, c-format msgid "Out of tries while allocating new job ID in %s" msgstr "" #: src/services/a-rex/job.cpp:1397 msgid "No non-draining session dirs available" msgstr "" #: src/services/a-rex/put.cpp:150 #, c-format msgid "%s: put file %s: there is no payload" msgstr "" #: src/services/a-rex/put.cpp:156 #, c-format msgid "%s: put file %s: unrecognized payload" msgstr "" #: src/services/a-rex/put.cpp:172 src/services/a-rex/rest/rest.cpp:2050 #, c-format msgid "%s: put file %s: failed to create file: %s" msgstr "" #: src/services/a-rex/put.cpp:188 #, c-format msgid "%s: put file %s: %s" msgstr "" #: src/services/a-rex/put.cpp:210 #, c-format msgid "%s: delete file %s: failed to obtain file path: %s" msgstr "" #: src/services/a-rex/put.cpp:221 #, c-format msgid "%s: delete file %s: failed to open file/dir: %s" msgstr "" #: src/services/a-rex/rest/rest.cpp:749 #, c-format msgid "REST: process %s at %s" msgstr "" #: src/services/a-rex/rest/rest.cpp:797 src/services/a-rex/rest/rest.cpp:813 #: src/services/a-rex/rest/rest.cpp:1094 src/services/a-rex/rest/rest.cpp:1185 #: src/services/a-rex/rest/rest.cpp:1549 src/services/a-rex/rest/rest.cpp:2161 #, c-format msgid "process: method %s is not supported for subpath %s" msgstr "" #: src/services/a-rex/rest/rest.cpp:819 #, c-format msgid "process: schema %s is not supported for subpath %s" msgstr "" #: src/services/a-rex/rest/rest.cpp:1182 src/services/a-rex/rest/rest.cpp:1546 #, c-format msgid "process: action %s is not supported for subpath %s" msgstr "" #: src/services/a-rex/rest/rest.cpp:1558 src/services/a-rex/rest/rest.cpp:1627 #: src/services/a-rex/rest/rest.cpp:1987 src/services/a-rex/rest/rest.cpp:2150 #, c-format msgid "REST:GET job %s - %s" msgstr "" #: src/services/a-rex/rest/rest.cpp:1674 src/services/a-rex/rest/rest.cpp:1682 #, c-format msgid "REST:KILL job %s - %s" msgstr "" #: src/services/a-rex/rest/rest.cpp:1699 src/services/a-rex/rest/rest.cpp:1707 #, c-format msgid "REST:CLEAN job %s - %s" msgstr "" #: src/services/a-rex/rest/rest.cpp:1724 src/services/a-rex/rest/rest.cpp:1732 #: src/services/a-rex/rest/rest.cpp:1749 #, c-format msgid "REST:RESTART job %s - %s" msgstr "" #: src/services/a-rex/rest/rest.cpp:2040 #, c-format msgid "REST:PUT job %s: file %s: there is no payload" msgstr "" #: src/services/a-rex/rest/rest.cpp:2063 #, c-format msgid "HTTP:PUT %s: put file %s: %s" msgstr "" #: src/services/a-rex/test_cache_check.cpp:24 #: src/tests/count/test_client.cpp:20 #: src/tests/echo/echo_test4axis2c/test_client.cpp:20 #: src/tests/echo/test_client.cpp:21 msgid "Creating client side chain" msgstr "" #: src/services/a-rex/update_credentials.cpp:29 #, c-format msgid "" "UpdateCredentials: request = \n" "%s" msgstr "" #: src/services/a-rex/update_credentials.cpp:35 msgid "UpdateCredentials: missing Reference" msgstr "" #: src/services/a-rex/update_credentials.cpp:43 msgid "UpdateCredentials: wrong number of Reference" msgstr "" #: src/services/a-rex/update_credentials.cpp:51 msgid "UpdateCredentials: wrong number of elements inside Reference" msgstr "" #: src/services/a-rex/update_credentials.cpp:60 msgid "UpdateCredentials: EPR contains no JobID" msgstr "" #: src/services/a-rex/update_credentials.cpp:70 #, c-format msgid "UpdateCredentials: no job found: %s" msgstr "" #: src/services/a-rex/update_credentials.cpp:77 msgid "UpdateCredentials: failed to update credentials" msgstr "" #: src/services/a-rex/update_credentials.cpp:85 #, c-format msgid "" "UpdateCredentials: response = \n" "%s" msgstr "" #: src/services/candypond/CandyPond.cpp:52 msgid "No A-REX config file found in candypond configuration" msgstr "" #: src/services/candypond/CandyPond.cpp:56 #, c-format msgid "Using A-REX config file %s" msgstr "" #: src/services/candypond/CandyPond.cpp:60 #, c-format msgid "Failed to process A-REX configuration in %s" msgstr "" #: src/services/candypond/CandyPond.cpp:65 msgid "No caches defined in configuration" msgstr "" #: src/services/candypond/CandyPond.cpp:140 #: src/services/candypond/CandyPond.cpp:347 #, c-format msgid "Can't handle URL %s" msgstr "" #: src/services/candypond/CandyPond.cpp:150 msgid "Empty filename returned from FileCache" msgstr "" #: src/services/candypond/CandyPond.cpp:162 #, c-format msgid "Problem accessing cache file %s: %s" msgstr "" #: src/services/candypond/CandyPond.cpp:210 #: src/services/candypond/CandyPond.cpp:474 msgid "No job ID supplied" msgstr "" #: src/services/candypond/CandyPond.cpp:219 #, c-format msgid "Bad number in priority element: %s" msgstr "" #: src/services/candypond/CandyPond.cpp:228 msgid "No username supplied" msgstr "" #: src/services/candypond/CandyPond.cpp:235 #, c-format msgid "Supplied username %s does not match mapped username %s" msgstr "" #: src/services/candypond/CandyPond.cpp:249 msgid "No session directory found" msgstr "" #: src/services/candypond/CandyPond.cpp:253 #, c-format msgid "Using session dir %s" msgstr "" #: src/services/candypond/CandyPond.cpp:257 #, c-format msgid "Failed to stat session dir %s" msgstr "" #: src/services/candypond/CandyPond.cpp:262 #, c-format msgid "Session dir %s is owned by %i, but current mapped user is %i" msgstr "" #: src/services/candypond/CandyPond.cpp:289 #, c-format msgid "Failed to access proxy of given job id %s at %s" msgstr "" #: src/services/candypond/CandyPond.cpp:307 #, c-format msgid "DN is %s" msgstr "" #: src/services/candypond/CandyPond.cpp:385 #, c-format msgid "Permission checking passed for url %s" msgstr "" #: src/services/candypond/CandyPond.cpp:410 #: src/services/candypond/CandyPondGenerator.cpp:135 #, c-format msgid "Failed to move %s to %s: %s" msgstr "" #: src/services/candypond/CandyPond.cpp:441 #, c-format msgid "Starting new DTR for %s" msgstr "" #: src/services/candypond/CandyPond.cpp:443 #, c-format msgid "Failed to start new DTR for %s" msgstr "" #: src/services/candypond/CandyPond.cpp:487 #, c-format msgid "Job %s: all files downloaded successfully" msgstr "" #: src/services/candypond/CandyPond.cpp:494 #, c-format msgid "Job %s: Some downloads failed" msgstr "" #: src/services/candypond/CandyPond.cpp:499 #, c-format msgid "Job %s: files still downloading" msgstr "" #: src/services/candypond/CandyPond.cpp:511 msgid "CandyPond: Unauthorized" msgstr "" #: src/services/candypond/CandyPond.cpp:520 msgid "No local user mapping found" msgstr "" #: src/services/candypond/CandyPond.cpp:527 #: src/services/data-staging/DataDeliveryService.cpp:649 #, c-format msgid "Identity is %s" msgstr "" #: src/services/candypond/CandyPond.cpp:585 #: src/services/data-staging/DataDeliveryService.cpp:721 msgid "Security Handlers processing failed" msgstr "" #: src/services/candypond/CandyPond.cpp:592 msgid "Only POST is supported in CandyPond" msgstr "" #: src/services/candypond/CandyPondGenerator.cpp:88 #, c-format msgid "DTR %s finished with state %s" msgstr "" #: src/services/candypond/CandyPondGenerator.cpp:124 #, c-format msgid "Could not determine session directory from filename %s" msgstr "" #: src/services/candypond/CandyPondGenerator.cpp:164 #, c-format msgid "Invalid DTR for source %s, destination %s" msgstr "" #: src/services/candypond/CandyPondGenerator.cpp:206 #, c-format msgid "DTRs still running for job %s" msgstr "" #: src/services/candypond/CandyPondGenerator.cpp:215 #, c-format msgid "All DTRs finished for job %s" msgstr "" #: src/services/candypond/CandyPondGenerator.cpp:222 #, c-format msgid "Job %s not found" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:66 #, c-format msgid "Archiving DTR %s, state ERROR" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:70 #, c-format msgid "Archiving DTR %s, state %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:174 msgid "No delegation token in request" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:184 msgid "Failed to accept delegation" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:214 #: src/services/data-staging/DataDeliveryService.cpp:221 msgid "ErrorDescription" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:226 #, c-format msgid "All %u process slots used" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:241 #, c-format msgid "Received retry for DTR %s still in transfer" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:248 #, c-format msgid "Replacing DTR %s in state %s with new request" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:258 #, c-format msgid "Storing temp proxy at %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:266 #, c-format msgid "Failed to create temp proxy at %s: %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:273 #, c-format msgid "Failed to change owner of temp proxy at %s to %i:%i: %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:302 msgid "Invalid DTR" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:306 #, c-format msgid "Failed to remove temporary proxy %s: %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:407 #, c-format msgid "No such DTR %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:425 #, c-format msgid "DTR %s failed: %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:436 #, c-format msgid "DTR %s finished successfully" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:446 #, c-format msgid "DTR %s still in progress (%lluB transferred)" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:506 #, c-format msgid "No active DTR %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:516 #, c-format msgid "DTR %s was already cancelled" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:525 #, c-format msgid "DTR %s could not be cancelled" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:569 #, c-format msgid "Failed to get load average: %s" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:593 msgid "Invalid configuration - no allowed IP address specified" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:597 msgid "Invalid configuration - no transfer dirs specified" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:608 msgid "Failed to start archival thread" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:633 msgid "Shutting down data delivery service" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:642 msgid "Unauthorized" msgstr "" #: src/services/data-staging/DataDeliveryService.cpp:728 msgid "Only POST is supported in DataDeliveryService" msgstr "" #: src/services/examples/echo_python/EchoService.py:12 msgid "EchoService (python) constructor called" msgstr "" #: src/services/examples/echo_python/EchoService.py:17 #, python-format msgid "EchoService (python) has prefix %(prefix)s and suffix %(suffix)s" msgstr "" #: src/services/examples/echo_python/EchoService.py:24 msgid "EchoService (python) destructor called" msgstr "" #: src/services/examples/echo_python/EchoService.py:54 msgid "EchoService (python) thread test starting" msgstr "" #: src/services/examples/echo_python/EchoService.py:65 #, python-format msgid "EchoService (python) thread test, iteration %(iteration)s %(status)s" msgstr "" #: src/services/examples/echo_python/EchoService.py:82 msgid "EchoService (python) 'Process' called" msgstr "" #: src/services/examples/echo_python/EchoService.py:86 #, python-format msgid "inmsg.Auth().Export(arc.SecAttr.ARCAuth) = %s" msgstr "" #: src/services/examples/echo_python/EchoService.py:87 #, python-format msgid "inmsg.Attributes().getAll() = %s " msgstr "" #: src/services/examples/echo_python/EchoService.py:88 #, python-format msgid "EchoService (python) got: %s " msgstr "" #: src/services/examples/echo_python/EchoService.py:93 #, python-format msgid "EchoService (python) request_namespace: %s" msgstr "" #: src/services/examples/echo_python/EchoService.py:99 #: src/services/examples/echo_python/EchoService.py:171 #, python-format msgid "outpayload %s" msgstr "" #: src/services/examples/echo_python/EchoService.py:128 msgid "Calling https://localhost:60000/Echo using ClientSOAP" msgstr "" #: src/services/examples/echo_python/EchoService.py:131 msgid "Calling http://localhost:60000/Echo using ClientSOAP" msgstr "" #: src/services/examples/echo_python/EchoService.py:137 #: src/services/examples/echo_python/EchoService.py:155 #, python-format msgid "new_payload %s" msgstr "" #: src/services/examples/echo_python/EchoService.py:149 msgid "Calling http://localhost:60000/Echo using httplib" msgstr "" #: src/services/examples/echo_python/EchoService.py:165 msgid "Start waiting 10 sec..." msgstr "" #: src/services/examples/echo_python/EchoService.py:167 msgid "Waiting ends." msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:103 #, c-format msgid "Loading %u-th Python service" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:107 #, c-format msgid "Initialized %u-th Python service" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:142 msgid "Invalid class name" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:147 #, c-format msgid "class name: %s" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:148 #, c-format msgid "module name: %s" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:205 msgid "Cannot find ARC Config class" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:212 msgid "Config class is not an object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:220 msgid "Cannot get dictionary of module" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:229 msgid "Cannot find service class" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:238 msgid "Cannot create config argument" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:245 msgid "Cannot convert config to Python object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:268 #, c-format msgid "%s is not an object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:274 msgid "Message class is not an object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:280 msgid "Python Wrapper constructor succeeded" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:295 #, c-format msgid "Python Wrapper destructor (%d)" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:328 msgid "Python interpreter locked" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:332 msgid "Python interpreter released" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:403 msgid "Python wrapper process called" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:412 msgid "Failed to create input SOAP container" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:422 msgid "Cannot create inmsg argument" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:436 msgid "Cannot find ARC Message class" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:442 msgid "Cannot convert inmsg to Python object" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:451 msgid "Failed to create SOAP containers" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:457 msgid "Cannot create outmsg argument" msgstr "" #: src/services/wrappers/python/pythonwrapper.cpp:463 msgid "Cannot convert outmsg to Python object" msgstr "" #: src/tests/client/test_ClientInterface.cpp:36 #: src/tests/client/test_ClientSAML2SSO.cpp:68 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:78 #: src/tests/echo/test_clientinterface.cpp:41 #: src/tests/echo/test_clientinterface.cpp:132 #: src/tests/echo/test_clientinterface.py:12 msgid "Creating a soap client" msgstr "" #: src/tests/client/test_ClientInterface.cpp:73 #: src/tests/client/test_ClientSAML2SSO.cpp:47 #: src/tests/client/test_ClientSAML2SSO.cpp:71 #: src/tests/count/test_client.cpp:61 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:85 #: src/tests/echo/echo_test4axis2c/test_client.cpp:56 #: src/tests/echo/test.cpp:62 src/tests/echo/test_client.cpp:72 #: src/tests/echo/test_clientinterface.cpp:67 #: src/tests/echo/test_clientinterface.cpp:107 #: src/tests/echo/test_clientinterface.cpp:136 #: src/tests/echo/test_clientinterface.py:22 msgid "Creating and sending request" msgstr "" #: src/tests/client/test_ClientInterface.cpp:84 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:97 #: src/tests/echo/test_clientinterface.cpp:78 #: src/tests/echo/test_clientinterface.py:30 msgid "SOAP invocation failed" msgstr "" #: src/tests/client/test_ClientSAML2SSO.cpp:44 #: src/tests/echo/test_clientinterface.cpp:100 msgid "Creating a http client" msgstr "" #: src/tests/client/test_ClientSAML2SSO.cpp:55 #: src/tests/echo/test_clientinterface.cpp:117 msgid "HTTP with SAML2SSO invocation failed" msgstr "" #: src/tests/client/test_ClientSAML2SSO.cpp:59 #: src/tests/echo/test_clientinterface.cpp:121 msgid "There was no HTTP response" msgstr "" #: src/tests/client/test_ClientSAML2SSO.cpp:77 #: src/tests/echo/test_clientinterface.cpp:145 msgid "SOAP with SAML2SSO invocation failed" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:37 #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:38 #: src/tests/delegation/test_delegation_client.cpp:46 #: src/tests/delegation/test_delegation_client.cpp:77 #: src/tests/echo/test_clientinterface.cpp:172 #: src/tests/echo/test_clientinterface.cpp:194 msgid "Creating a delegation soap client" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:46 #: src/tests/delegation/test_delegation_client.cpp:52 #: src/tests/echo/test_clientinterface.cpp:178 msgid "Delegation to ARC delegation service failed" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:50 #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:49 #: src/tests/delegation/test_delegation_client.cpp:57 #: src/tests/delegation/test_delegation_client.cpp:89 #: src/tests/echo/test_clientinterface.cpp:182 #: src/tests/echo/test_clientinterface.cpp:205 #, c-format msgid "Delegation ID: %s" msgstr "" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:58 #, c-format msgid "Delegated credential from delegation service: %s" msgstr "" #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:45 #: src/tests/delegation/test_delegation_client.cpp:84 #: src/tests/echo/test_clientinterface.cpp:201 msgid "Delegation to gridsite delegation service failed" msgstr "" #: src/tests/count/count.cpp:58 msgid "Input is not SOAP" msgstr "" #: src/tests/count/count.cpp:89 src/tests/echo/echo.cpp:83 msgid "echo: Unauthorized" msgstr "" #: src/tests/count/count.cpp:98 src/tests/count/count.cpp:104 #, c-format msgid "Request is not supported - %s" msgstr "" #: src/tests/count/test_client.cpp:50 #: src/tests/echo/echo_test4axis2c/test_client.cpp:43 #: src/tests/echo/test_client.cpp:59 msgid "Failed to load client configuration" msgstr "" #: src/tests/count/test_client.cpp:54 #: src/tests/echo/echo_test4axis2c/test_client.cpp:47 #: src/tests/echo/test.cpp:58 src/tests/echo/test_client.cpp:63 msgid "Client side MCCs are loaded" msgstr "" #: src/tests/count/test_client.cpp:57 #: src/tests/echo/echo_test4axis2c/test_client.cpp:50 #: src/tests/echo/test_client.cpp:66 msgid "Client chain does not have entry point" msgstr "" #: src/tests/count/test_client.cpp:84 #: src/tests/echo/echo_test4axis2c/test_client.cpp:74 #: src/tests/echo/test.cpp:74 src/tests/echo/test_client.cpp:90 msgid "Request failed" msgstr "" #: src/tests/count/test_client.cpp:90 #: src/tests/echo/echo_test4axis2c/test_client.cpp:80 #: src/tests/echo/test.cpp:79 src/tests/echo/test_client.cpp:96 msgid "There is no response" msgstr "" #: src/tests/count/test_client.cpp:97 #: src/tests/echo/echo_test4axis2c/test_client.cpp:87 #: src/tests/echo/test_client.cpp:103 msgid "Response is not SOAP" msgstr "" #: src/tests/count/test_service.cpp:22 src/tests/echo/test.cpp:23 #: src/tests/echo/test_service.cpp:22 msgid "Creating service side chain" msgstr "" #: src/tests/count/test_service.cpp:25 src/tests/echo/test.cpp:26 #: src/tests/echo/test_service.cpp:25 msgid "Failed to load service configuration" msgstr "" #: src/tests/count/test_service.cpp:30 src/tests/echo/test_service.cpp:30 msgid "Service is waiting for requests" msgstr "" #: src/tests/echo/test.cpp:32 msgid "Creating client interface" msgstr "" #: src/tests/echo/test.cpp:82 msgid "Request succeed!!!" msgstr "" nordugrid-arc-7.1.1/po/PaxHeaders/Makevars0000644000000000000000000000013215067751327015465 xustar0030 mtime=1759498967.644063541 30 atime=1759498967.808492785 30 ctime=1759499034.625048793 nordugrid-arc-7.1.1/po/Makevars0000644000175000002070000000034615067751327017372 0ustar00mockbuildmock00000000000000DOMAIN = $(PACKAGE) subdir = po top_builddir = .. XGETTEXT_OPTIONS = -kmsg:2 -kIString:1 -kistring:1 -kFindNTrans:1,2 COPYRIGHT_HOLDER = NorduGrid collaboration MSGID_BUGS_ADDRESS = support@nordugrid.org EXTRA_LOCALE_CATEGORIES = nordugrid-arc-7.1.1/po/PaxHeaders/ru.po0000644000000000000000000000013215067751431014753 xustar0030 mtime=1759499033.390489307 30 atime=1759499034.289502967 30 ctime=1759499034.627624518 nordugrid-arc-7.1.1/po/ru.po0000644000175000002070000442357015067751431016674 0ustar00mockbuildmock00000000000000# translation of Arc.po to Russian # Oxana Smirnova , 2007. # Translation file for the Advanced Resource Connector (Arc) msgid "" msgstr "" "Project-Id-Version: Arc\n" "Report-Msgid-Bugs-To: support@nordugrid.org\n" "POT-Creation-Date: 2025-10-03 15:43+0200\n" "PO-Revision-Date: 2021-11-26 12:09+0100\n" "Last-Translator: Oxana Smirnova \n" "Language-Team: Russian\n" "Language: ru\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "X-Generator: Poedit 2.3\n" "Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" "X-Poedit-KeywordsList: msg:2;IString:1;istring:1;FindNTrans:1,2\n" "X-Poedit-Basepath: /home/oxana/GITROOT/arc6\n" "X-Poedit-SearchPath-0: src\n" #: src/clients/compute/arccat.cpp:38 src/clients/compute/arcclean.cpp:34 #: src/clients/compute/arcget.cpp:35 src/clients/compute/arckill.cpp:33 #: src/clients/compute/arcrenew.cpp:32 src/clients/compute/arcresume.cpp:32 #: src/clients/compute/arcstat.cpp:34 msgid "[job ...]" msgstr "[задача ...]" #: src/clients/compute/arccat.cpp:39 msgid "" "The arccat command performs the cat command on the stdout, stderr or grid\n" "manager's error log of the job." msgstr "" "Команда arccat предназначена Ð´Ð»Ñ Ð²Ñ‹Ð²Ð¾Ð´Ð° на Ñкран Ñообщений Ñтандартного\n" "выхода, Ñтандартной ошибки или ошибок ÑиÑтемы при иÑполнении задачи." #: src/clients/compute/arccat.cpp:46 src/clients/compute/arcclean.cpp:41 #: src/clients/compute/arcget.cpp:42 src/clients/compute/arcinfo.cpp:45 #: src/clients/compute/arckill.cpp:40 src/clients/compute/arcrenew.cpp:37 #: src/clients/compute/arcresume.cpp:37 src/clients/compute/arcstat.cpp:42 #: src/clients/compute/arcsub.cpp:53 src/clients/compute/arcsync.cpp:147 #: src/clients/compute/arctest.cpp:67 src/clients/credentials/arcproxy.cpp:484 #: src/clients/data/arccp.cpp:652 src/clients/data/arcls.cpp:371 #: src/clients/data/arcmkdir.cpp:149 src/clients/data/arcrename.cpp:160 #: src/clients/data/arcrm.cpp:174 src/hed/daemon/unix/main_unix.cpp:345 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1265 #: src/hed/libs/data/DataExternalHelper.cpp:358 #, c-format msgid "%s version %s" msgstr "%s, верÑÐ¸Ñ %s" #: src/clients/compute/arccat.cpp:55 src/clients/compute/arcclean.cpp:50 #: src/clients/compute/arcget.cpp:51 src/clients/compute/arcinfo.cpp:53 #: src/clients/compute/arckill.cpp:49 src/clients/compute/arcrenew.cpp:46 #: src/clients/compute/arcresume.cpp:46 src/clients/compute/arcstat.cpp:51 #: src/clients/compute/arcsub.cpp:62 src/clients/compute/arcsync.cpp:156 #: src/clients/compute/arctest.cpp:89 src/clients/credentials/arcproxy.cpp:492 #: src/clients/data/arccp.cpp:659 src/clients/data/arcls.cpp:379 #: src/clients/data/arcmkdir.cpp:157 src/clients/data/arcrename.cpp:168 #: src/clients/data/arcrm.cpp:183 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:192 #: src/services/a-rex/grid-manager/GridManager.cpp:110 #: src/services/a-rex/grid-manager/log/JobLog.cpp:139 #, c-format msgid "Running command: %s" msgstr "ВыполнÑетÑÑ ÐºÐ¾Ð¼Ð°Ð½Ð´Ð° %s" #: src/clients/compute/arccat.cpp:66 src/clients/compute/arcclean.cpp:61 #: src/clients/compute/arcget.cpp:62 src/clients/compute/arcinfo.cpp:65 #: src/clients/compute/arckill.cpp:60 src/clients/compute/arcrenew.cpp:57 #: src/clients/compute/arcresume.cpp:50 src/clients/compute/arcstat.cpp:62 #: src/clients/compute/arcsub.cpp:66 src/clients/compute/arcsync.cpp:167 #: src/clients/compute/arctest.cpp:93 src/clients/data/arccp.cpp:682 #: src/clients/data/arcls.cpp:401 src/clients/data/arcmkdir.cpp:179 #: src/clients/data/arcrename.cpp:190 src/clients/data/arcrm.cpp:205 msgid "Failed configuration initialization" msgstr "Ðе удалоÑÑŒ загрузить наÑтройки" #: src/clients/compute/arccat.cpp:78 src/clients/compute/arcclean.cpp:73 #: src/clients/compute/arcget.cpp:87 src/clients/compute/arckill.cpp:72 #: src/clients/compute/arcrenew.cpp:69 src/clients/compute/arcresume.cpp:69 #: src/clients/compute/arcstat.cpp:74 #, c-format msgid "Cannot read specified jobid file: %s" msgstr "Ðе удаётÑÑ Ð¿Ñ€Ð¾Ñ‡ÐµÑть указанный файл, Ñодержащий Ñрлыки задач: %s" #: src/clients/compute/arccat.cpp:108 src/clients/compute/arcclean.cpp:103 #: src/clients/compute/arcget.cpp:117 src/clients/compute/arckill.cpp:102 #: src/clients/compute/arcrenew.cpp:99 src/clients/compute/arcresume.cpp:99 #: src/clients/compute/arcstat.cpp:127 msgid "No jobs given" msgstr "Задачи не указаны" #: src/clients/compute/arccat.cpp:121 src/clients/compute/arcclean.cpp:116 #: src/clients/compute/arcget.cpp:130 src/clients/compute/arckill.cpp:115 #: src/clients/compute/arcrenew.cpp:112 src/clients/compute/arcresume.cpp:112 #: src/clients/compute/arcstat.cpp:139 #, c-format msgid "Job list file (%s) doesn't exist" msgstr "Файл ÑпиÑка задач (%s) не ÑущеÑтвует" #: src/clients/compute/arccat.cpp:128 src/clients/compute/arcclean.cpp:123 #: src/clients/compute/arcget.cpp:137 src/clients/compute/arckill.cpp:122 #: src/clients/compute/arcrenew.cpp:119 src/clients/compute/arcresume.cpp:119 #: src/clients/compute/arcstat.cpp:146 src/clients/compute/arctest.cpp:296 #, c-format msgid "Unable to read job information from file (%s)" msgstr "Ðевозможно прочитать информацию о задаче из файла (%s)" #: src/clients/compute/arccat.cpp:137 src/clients/compute/arcclean.cpp:131 #: src/clients/compute/arcget.cpp:145 src/clients/compute/arckill.cpp:130 #: src/clients/compute/arcrenew.cpp:128 src/clients/compute/arcresume.cpp:128 #: src/clients/compute/arcstat.cpp:155 #, c-format msgid "Warning: Job not found in job list: %s" msgstr "Предупреждение: Задача не обнаружена в ÑпиÑке задач: %s" #: src/clients/compute/arccat.cpp:150 src/clients/compute/arcclean.cpp:186 #: src/clients/compute/arcget.cpp:158 src/clients/compute/arckill.cpp:142 #: src/clients/compute/arcrenew.cpp:140 src/clients/compute/arcresume.cpp:140 msgid "No jobs" msgstr "Задач нет" #: src/clients/compute/arccat.cpp:165 #, c-format msgid "Could not create temporary file \"%s\"" msgstr "Ðе удалоÑÑŒ Ñоздать временный файл \"%s\"" #: src/clients/compute/arccat.cpp:166 src/clients/compute/arccat.cpp:172 #, c-format msgid "Cannot create output of %s for any jobs" msgstr "Ðевозможно Ñоздать выход %s ни Ð´Ð»Ñ Ð¾Ð´Ð½Ð¾Ð¹ задачи" #: src/clients/compute/arccat.cpp:173 #, c-format msgid "Invalid destination URL %s" msgstr "Ðеверный URL цели %s" #: src/clients/compute/arccat.cpp:191 #, c-format msgid "Job deleted: %s" msgstr "Задача удалена: %s" #: src/clients/compute/arccat.cpp:201 #, c-format msgid "Job has not started yet: %s" msgstr "ИÑполнение задачи ещё не началоÑÑŒ: %s" #: src/clients/compute/arccat.cpp:242 #, c-format msgid "Cannot determine the %s location: %s" msgstr "Ðе удаётÑÑ Ð¾Ð¿Ñ€ÐµÐ´ÐµÐ»Ð¸Ñ‚ÑŒ раÑположение %s: %s" #: src/clients/compute/arccat.cpp:247 #, c-format msgid "Cannot create output of %s for job (%s): Invalid source %s" msgstr "Ðевозможно Ñоздать вывод %s Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ (%s): ÐедопуÑтимый иÑточник %s" #: src/clients/compute/arccat.cpp:260 #, c-format msgid "Catting %s for job %s" msgstr "ПодцеплÑетÑÑ %s Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s" #: src/clients/compute/arcclean.cpp:35 msgid "The arcclean command removes a job from the computing resource." msgstr "" "Команда arcclean иÑпользуетÑÑ Ð´Ð»Ñ ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ñ€ÐµÐ·ÑƒÐ»ÑŒÑ‚Ð°Ñ‚Ð¾Ð² работы задач\n" "Ñ ÑƒÐ´Ð°Ð»Ñ‘Ð½Ð½Ð¾Ð³Ð¾ компьютера." #: src/clients/compute/arcclean.cpp:155 msgid "" "You are about to remove jobs from the job list for which no information " "could be\n" "found. NOTE: Recently submitted jobs might not have appeared in the " "information\n" "system, and this action will also remove such jobs." msgstr "" "Из ÑпиÑка задач будут удалены задачи, о которых не обнаружена информациÑ.\n" "Ð’ÐИМÐÐИЕ: задачи, запущенные недавно, могли ещё не поÑвитьÑÑ Ð² " "информационной\n" "ÑиÑтеме, и Ñта Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ñ ÑƒÐ´Ð°Ð»Ð¸Ñ‚ также Ñти задачи." #: src/clients/compute/arcclean.cpp:158 msgid "Are you sure you want to clean jobs missing information?" msgstr "Ð’Ñ‹ уверены, что хотите вычиÑтить задачи Ñ Ð¾Ñ‚ÑутÑтвующей информацией?" #: src/clients/compute/arcclean.cpp:159 src/clients/compute/arcsync.cpp:237 msgid "y" msgstr "y" #: src/clients/compute/arcclean.cpp:159 src/clients/compute/arcsync.cpp:237 msgid "n" msgstr "n" #: src/clients/compute/arcclean.cpp:164 msgid "Jobs missing information will not be cleaned!" msgstr "Задачи Ñ Ð¾Ñ‚ÑутÑтвующей информацией не будут вычищены!" #: src/clients/compute/arcclean.cpp:180 src/clients/compute/arctest.cpp:300 #, c-format msgid "Warning: Failed to write job information to file (%s)" msgstr "Предупреждение: Сбой запиÑи информации о задаче в файл (%s)" #: src/clients/compute/arcclean.cpp:181 msgid "" " Run 'arcclean -s Undefined' to remove cleaned jobs from job list" msgstr "" " ЗапуÑтите 'arcclean -s Undefined' Ð´Ð»Ñ ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð²Ñ‹Ñ‡Ð¸Ñ‰ÐµÐ½Ð½Ñ‹Ñ… задач из " "ÑпиÑка" #: src/clients/compute/arcclean.cpp:190 #, c-format msgid "Jobs processed: %d, deleted: %d" msgstr "Обработано задач: %d, уничтожено: %d" #: src/clients/compute/arcget.cpp:36 msgid "The arcget command is used for retrieving the results from a job." msgstr "Команда arcget иÑпользуетÑÑ Ð´Ð»Ñ Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ñ€ÐµÐ·ÑƒÐ»ÑŒÑ‚Ð°Ñ‚Ð¾Ð² работы задач." #: src/clients/compute/arcget.cpp:75 #, c-format msgid "Job download directory from user configuration file: %s" msgstr "Каталог Ð´Ð»Ñ Ð·Ð°Ð³Ñ€ÑƒÐ·ÐºÐ¸ задач из пользовательÑких наÑтроек: %s" #: src/clients/compute/arcget.cpp:78 msgid "Job download directory will be created in present working directory." msgstr "Каталог Ð´Ð»Ñ Ð·Ð°Ð³Ñ€ÑƒÐ·ÐºÐ¸ задачи будет Ñоздан в текущей рабочей директории." #: src/clients/compute/arcget.cpp:82 #, c-format msgid "Job download directory: %s" msgstr "Каталог Ð´Ð»Ñ Ð·Ð°Ð³Ñ€ÑƒÐ·ÐºÐ¸ задач: %s" #: src/clients/compute/arcget.cpp:168 #, c-format msgid "Unable to create directory for storing results (%s) - %s" msgstr "Ðе удалоÑÑŒ Ñоздать каталог Ð´Ð»Ñ ÑÐ¾Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ Ñ€ÐµÐ·ÑƒÐ»ÑŒÑ‚Ð°Ñ‚Ð¾Ð² (%s) - %s" #: src/clients/compute/arcget.cpp:178 #, c-format msgid "Results stored at: %s" msgstr "Результаты Ñохранены в: %s" #: src/clients/compute/arcget.cpp:190 src/clients/compute/arckill.cpp:158 msgid "Warning: Some jobs were not removed from server" msgstr "Предупреждение: некоторые задачи не были удалены Ñ Ñервера" #: src/clients/compute/arcget.cpp:191 src/clients/compute/arcget.cpp:198 #: src/clients/compute/arckill.cpp:159 msgid " Use arcclean to remove retrieved jobs from job list" msgstr " ИÑпользуйте arcclean Ð´Ð»Ñ ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð½Ñ‹Ñ… задач из ÑпиÑка" #: src/clients/compute/arcget.cpp:197 src/clients/compute/arckill.cpp:165 #, c-format msgid "Warning: Failed removing jobs from file (%s)" msgstr "Предупреждение: Сбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸ о задачах из файла (%s)" #: src/clients/compute/arcget.cpp:202 #, c-format msgid "" "Jobs processed: %d, successfully retrieved: %d, successfully cleaned: %d" msgstr "Обработано задач: %d, уÑпешно получено: %d, уÑпешно очищено: %d" #: src/clients/compute/arcget.cpp:206 #, c-format msgid "Jobs processed: %d, successfully retrieved: %d" msgstr "Обработано задач: %d, уÑпешно получено: %d" #: src/clients/compute/arcinfo.cpp:34 msgid "[resource ...]" msgstr "[реÑÑƒÑ€Ñ ...]" #: src/clients/compute/arcinfo.cpp:35 msgid "" "The arcinfo command is used for obtaining the status of computing resources " "on the Grid." msgstr "" "Команда arcinfo иÑпользуетÑÑ Ð´Ð»Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ¸ ÑоÑтоÑÐ½Ð¸Ñ Ð²Ñ‹Ñ‡Ð¸Ñлительных реÑурÑов " "на Гриде." #: src/clients/compute/arcinfo.cpp:141 msgid "Information endpoint" msgstr "Точка входа Ð´Ð»Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸" #: src/clients/compute/arcinfo.cpp:152 msgid "Submission endpoint" msgstr "Точка входа Ð´Ð»Ñ Ð·Ð°Ñылки задач" #: src/clients/compute/arcinfo.cpp:154 msgid "status" msgstr "ÑоÑтоÑние" #: src/clients/compute/arcinfo.cpp:156 msgid "interface" msgstr "интерфейÑ" #: src/clients/compute/arcinfo.cpp:175 msgid "ERROR: Failed to retrieve information from the following endpoints:" msgstr "ОШИБКÐ: Ðе удалоÑÑŒ получить информацию через Ñледующие точки входа:" #: src/clients/compute/arcinfo.cpp:188 msgid "ERROR: Failed to retrieve information" msgstr "ОШИБКÐ: не удалоÑÑŒ получить информацию" #: src/clients/compute/arcinfo.cpp:190 msgid "from the following endpoints:" msgstr "через Ñледующие точки входа:" #: src/clients/compute/arckill.cpp:34 msgid "The arckill command is used to kill running jobs." msgstr "Команда arckill иÑпользуетÑÑ Ð´Ð»Ñ Ð¿Ñ€ÐµÑ€Ñ‹Ð²Ð°Ð½Ð¸Ñ Ð¸ÑполнÑющихÑÑ Ð·Ð°Ð´Ð°Ñ‡." #: src/clients/compute/arckill.cpp:166 msgid "" " Run 'arcclean -s Undefined' to remove killed jobs from job list" msgstr "" " ЗапуÑтите 'arcclean -s Undefined' Ð´Ð»Ñ ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð¾Ð±Ð¾Ñ€Ð²Ð°Ð½Ð½Ñ‹Ñ… задач из " "ÑпиÑка" #: src/clients/compute/arckill.cpp:169 #, c-format msgid "Jobs processed: %d, successfully killed: %d, successfully cleaned: %d" msgstr "Обработано задач: %d, уÑпешно оборвано: %d, уÑпешно очищено: %d" #: src/clients/compute/arckill.cpp:171 #, c-format msgid "Jobs processed: %d, successfully killed: %d" msgstr "Обработано задач: %d, уÑпешно оборвано: %d" #: src/clients/compute/arcrenew.cpp:146 #, c-format msgid "Jobs processed: %d, renewed: %d" msgstr "Обработано задач: %d, обновлено: %d" #: src/clients/compute/arcresume.cpp:146 #, c-format msgid "Jobs processed: %d, resumed: %d" msgstr "Обработано задач: %d, возобновлено: %d" #: src/clients/compute/arcstat.cpp:35 msgid "" "The arcstat command is used for obtaining the status of jobs that have\n" "been submitted to Grid enabled resources." msgstr "" "Команда arcstat иÑпользуетÑÑ Ð´Ð»Ñ Ð²Ñ‹Ð²Ð¾Ð´Ð° информации о ÑоÑтоÑнии\n" "задач, отправленных на Грид ." #: src/clients/compute/arcstat.cpp:101 msgid "The 'sort' and 'rsort' flags cannot be specified at the same time." msgstr "Опции 'sort' и 'rsort' не могут быть указаны одновременно." #: src/clients/compute/arcstat.cpp:171 msgid "No jobs found, try later" msgstr "Ðе найдено ни одной задачи, попробуйте позже" #: src/clients/compute/arcstat.cpp:215 #, c-format msgid "Status of %d jobs was queried, %d jobs returned information" msgstr "Опрошено ÑоÑтоÑние %d задач, %d задач отозвалиÑÑŒ" #: src/clients/compute/arcsub.cpp:45 msgid "[filename ...]" msgstr "[файл ...]" #: src/clients/compute/arcsub.cpp:46 msgid "" "The arcsub command is used for submitting jobs to Grid enabled computing\n" "resources." msgstr "" "Команда arcsub иÑпользуетÑÑ Ð´Ð»Ñ Ð·Ð°Ð¿ÑƒÑка задач на вычиÑлительные\n" "реÑурÑÑ‹ Грид." #: src/clients/compute/arcsub.cpp:97 msgid "No job description input specified" msgstr "Ðе задано опиÑание задачи" #: src/clients/compute/arcsub.cpp:110 #, c-format msgid "Can not open job description file: %s" msgstr "Ðевозможно открыть файл Ñ Ð¾Ð¿Ð¸Ñанием задачи: %s" #: src/clients/compute/arcsub.cpp:138 src/clients/compute/arcsub.cpp:166 msgid "Invalid JobDescription:" msgstr "Ðеверный Ñлемент JobDescription:" #: src/clients/compute/arcsub.cpp:208 src/clients/compute/arctest.cpp:250 msgid "" "Cannot adapt job description to the submission target when information " "discovery is turned off" msgstr "" "Ðевозможно адаптировать опиÑание задачи ни к одному реÑурÑу когда отключён " "Ñбор информации" #: src/clients/compute/arcsync.cpp:66 src/clients/compute/arcsync.cpp:177 #, c-format msgid "Warning: Unable to open job list file (%s), unknown format" msgstr "" "Предупреждение: Ðевозможно открыть файл ÑпиÑка задач (%s), формат неизвеÑтен" #: src/clients/compute/arcsync.cpp:76 msgid "Found the following jobs:" msgstr "Обнаружены Ñледующие задачи:" #: src/clients/compute/arcsync.cpp:86 msgid "Total number of jobs found: " msgstr "КоличеÑтво вÑех обнаруженных задач: " #: src/clients/compute/arcsync.cpp:98 msgid "Found the following new jobs:" msgstr "Обнаружены Ñледующие новые задачи:" #: src/clients/compute/arcsync.cpp:108 msgid "Total number of new jobs found: " msgstr "КоличеÑтво обнаруженных новых задач: " #: src/clients/compute/arcsync.cpp:113 #, c-format msgid "ERROR: Failed to write job information to file (%s)" msgstr "ОШИБКÐ: Сбой запиÑи информации о задаче в файл (%s)" #: src/clients/compute/arcsync.cpp:140 #, fuzzy msgid "" "The arcsync command synchronizes your local job list with the information " "at\n" "the given CEs or registry servers." msgstr "" "Команда arcsync Ñинхронизирует Ваш локальный ÑпиÑок задач Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸ÐµÐ¹\n" "на заданных клаÑтерах или каталогах реÑурÑов." #: src/clients/compute/arcsync.cpp:183 #, c-format msgid "Warning: Unable to read local list of jobs from file (%s)" msgstr "Предупреждение: Сбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð¾Ð³Ð¾ ÑпиÑка задач из файла (%s)" #: src/clients/compute/arcsync.cpp:188 #, c-format msgid "Warning: Unable to truncate local list of jobs in file (%s)" msgstr "Предупреждение: Сбой ÑÐ¾ÐºÑ€Ð°Ñ‰ÐµÐ½Ð¸Ñ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð¾Ð³Ð¾ ÑпиÑка задач в файле (%s)" #: src/clients/compute/arcsync.cpp:194 #, c-format msgid "Warning: Unable to create job list file (%s), jobs list is destroyed" msgstr "" "Предупреждение: Ðевозможно Ñоздать файл ÑпиÑка задач (%s), ÑпиÑок задач " "уничтожен" #: src/clients/compute/arcsync.cpp:198 #, c-format msgid "" "Warning: Failed to write local list of jobs into file (%s), jobs list is " "destroyed" msgstr "" "Предупреждение: Сбой запиÑи ÑпиÑка локальных задач в файл (%s), ÑпиÑок задач " "уничтожен" #: src/clients/compute/arcsync.cpp:231 msgid "" "Synchronizing the local list of active jobs with the information in the\n" "information system can result in some inconsistencies. Very recently " "submitted\n" "jobs might not yet be present, whereas jobs very recently scheduled for\n" "deletion can still be present." msgstr "" "Ð¡Ð¸Ð½Ñ…Ñ€Ð¾Ð½Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð¾Ð³Ð¾ ÑпиÑка активных задач Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸ÐµÐ¹ в ÑиÑтеме Грид\n" "может привеÑти к некоторым неÑоответÑтвиÑм: только что запущенные задачи\n" "могут быть ещё не зарегиÑтрированы в ÑиÑтеме, тогда как только что " "удалённые\n" "задачи могут вÑÑ‘ ещё приÑутÑтвовать." #: src/clients/compute/arcsync.cpp:236 msgid "Are you sure you want to synchronize your local job list?" msgstr "Ð’Ñ‹ уверены, что хотите Ñинхронизировать ÑпиÑок локальных задач?" #: src/clients/compute/arcsync.cpp:241 msgid "Cancelling synchronization request" msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð¾ Ñинхронизации отменÑетÑÑ" #: src/clients/compute/arcsync.cpp:251 #, fuzzy msgid "" "No services specified. Please configure default services in the client " "configuration, or specify a cluster or registry (-C or -Y options, see " "arcsync -h)." msgstr "" "Ðе задано ни одного ÑервиÑа. ПожалуйÑта, наÑтройте ÑервиÑÑ‹ по умолчанию в " "файле наÑтроек клиента, либо укажите реÑÑƒÑ€Ñ Ð¸Ð»Ð¸ каталог реÑурÑов (опции -c " "или -g, Ñм. arcsync -h)." #: src/clients/compute/arctest.cpp:60 msgid " " msgstr " " #: src/clients/compute/arctest.cpp:61 msgid "The arctest command is used for testing clusters as resources." msgstr "" "Команда arctest иÑпользуетÑÑ Ð´Ð»Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ¸ клаÑтеров как вычиÑлительных " "реÑурÑов." #: src/clients/compute/arctest.cpp:73 msgid "" "Nothing to do:\n" "you have to either specify a test job id with -J (--job)\n" "or query information about the certificates with -E (--certificate)\n" msgstr "" "Задание не указано:\n" "Ð’Ñ‹ должны либо указать номер теÑтового заданиÑ, иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ Ð¾Ð¿Ñ†Ð¸ÑŽ -J (--job),\n" "либо запроÑить информацию о Ñертификатах, иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ Ð¾Ð¿Ñ†Ð¸ÑŽ -E (--" "certificate)\n" #: src/clients/compute/arctest.cpp:80 msgid "" "For the 1st test job you also have to specify a runtime value with -r (--" "runtime) option." msgstr "" "Ð”Ð»Ñ Ñ‚ÐµÑтовой задачи номер 1 необходимо задать Ð²Ñ€ÐµÐ¼Ñ Ð¸ÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ " "опции -r (--runtime)." #: src/clients/compute/arctest.cpp:118 msgid "Certificate information:" msgstr "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ Ñертификате:" #: src/clients/compute/arctest.cpp:122 msgid "No user-certificate found" msgstr "Сертификат Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð½Ðµ обнаружен" #: src/clients/compute/arctest.cpp:125 #, c-format msgid "Certificate: %s" msgstr "Сертификат: %s" #: src/clients/compute/arctest.cpp:127 #, c-format msgid "Subject name: %s" msgstr "Ð˜Ð¼Ñ Ñубъекта: %s" #: src/clients/compute/arctest.cpp:128 #, c-format msgid "Valid until: %s" msgstr "ДейÑтвует по: %s" #: src/clients/compute/arctest.cpp:132 msgid "Unable to determine certificate information" msgstr "Ðе удалоÑÑŒ получить информацию о Ñертификате" #: src/clients/compute/arctest.cpp:136 msgid "Proxy certificate information:" msgstr "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ доверенноÑти:" #: src/clients/compute/arctest.cpp:138 msgid "No proxy found" msgstr "Ðе удалоÑÑŒ обнаружить доверенноÑть" #: src/clients/compute/arctest.cpp:141 #, c-format msgid "Proxy: %s" msgstr "ДоверенноÑть: %s" #: src/clients/compute/arctest.cpp:142 #, c-format msgid "Proxy-subject: %s" msgstr "Ð˜Ð¼Ñ Ñубъекта доверенноÑти: %s" #: src/clients/compute/arctest.cpp:144 msgid "Valid for: Proxy expired" msgstr "ДоверенноÑть дейÑтвительна на: Срок дейÑÑ‚Ð²Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти вышел" #: src/clients/compute/arctest.cpp:146 msgid "Valid for: Proxy not valid" msgstr "ДоверенноÑть дейÑтвительна на: ДоверенноÑть недейÑтвительна" #: src/clients/compute/arctest.cpp:148 #, c-format msgid "Valid for: %s" msgstr "Срок дейÑÑ‚Ð²Ð¸Ñ Ð¸Ñтекает через: %s" #: src/clients/compute/arctest.cpp:153 #, c-format msgid "Certificate issuer: %s" msgstr "Сертификат выдан: %s" #: src/clients/compute/arctest.cpp:157 msgid "CA-certificates installed:" msgstr "УÑтановленные Ñертификаты CA:" #: src/clients/compute/arctest.cpp:179 msgid "Unable to detect if issuer certificate is installed." msgstr "Ðе удалоÑÑŒ определить, уÑтановлены ли ключи центра Ñертификации." #: src/clients/compute/arctest.cpp:182 msgid "Your issuer's certificate is not installed" msgstr "Ðе уÑтановлен Ñертификат Вашего центра Ñертификации" #: src/clients/compute/arctest.cpp:196 #, c-format msgid "No test-job, with ID \"%d\"" msgstr "ТеÑÑ‚Ð¾Ð²Ð°Ñ Ð·Ð°Ð´Ð°Ñ‡Ð° под номером \"%d\" не ÑущеÑтвует" #: src/clients/compute/arctest.cpp:267 #, c-format msgid "Cannot write jobid (%s) to file (%s)" msgstr "Ðевозможно запиÑать Ñрлык задачи (%s) в файл (%s)" #: src/clients/compute/arctest.cpp:268 #, c-format msgid "Test submitted with jobid: %s" msgstr "ТеÑÑ‚ запущен Ñ Ñрлыком: %s" #: src/clients/compute/arctest.cpp:283 #, c-format msgid "Computing service: %s" msgstr "ВычиÑлительный ÑервиÑ: %s" #: src/clients/compute/arctest.cpp:289 msgid "Test failed, no more possible targets" msgstr "Ðе удалоÑÑŒ заÑлать теÑÑ‚, возможные Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð¾Ñ‚ÑутÑтвуют" #: src/clients/compute/arctest.cpp:302 src/clients/compute/submit.cpp:49 msgid "To recover missing jobs, run arcsync" msgstr "Ð”Ð»Ñ Ð²Ð¾ÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð½ÐµÐ´Ð¾Ñтающих задач, запуÑтите arcsync" #: src/clients/compute/arctest.cpp:315 src/clients/compute/submit.cpp:159 #, c-format msgid "" "Unable to prepare job description according to needs of the target resource " "(%s)." msgstr "" "Ðевозможно адаптировать опиÑание задачи в ÑоответÑтвии Ñ Ñ‚Ñ€ÐµÐ±Ð¾Ð²Ð°Ð½Ð¸Ñми " "Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ (%s)." #: src/clients/compute/arctest.cpp:325 src/clients/compute/submit.cpp:175 #, c-format msgid "" "An error occurred during the generation of job description to be sent to %s" msgstr "Возникла ошибка при ÑоÑтавлении опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ Ð´Ð»Ñ Ð·Ð°Ñылки на %s" #: src/clients/compute/arctest.cpp:329 src/clients/compute/submit.cpp:179 #, c-format msgid "Job description to be sent to %s:" msgstr "ОпиÑание задачи Ð´Ð»Ñ Ð¾Ñ‚Ð¿Ñ€Ð°Ð²ÐºÐ¸ на %s:" #: src/clients/compute/submit.cpp:34 #, c-format msgid "Job submitted with jobid: %s" msgstr "Задача запущена Ñ Ñрлыком: %s" #: src/clients/compute/submit.cpp:40 #, c-format msgid "Cannot write job IDs to file (%s)" msgstr "Ðевозможно запиÑать Ñрлыки задач в файл (%s)" #: src/clients/compute/submit.cpp:45 #, c-format msgid "Unable to open job list file (%s), unknown format" msgstr "Ðевозможно открыть файл ÑпиÑка задач (%s), формат неизвеÑтен" #: src/clients/compute/submit.cpp:47 #, c-format msgid "Failed to write job information to database (%s)" msgstr "Сбой запиÑи информации о задаче в базу данных (%s)" #: src/clients/compute/submit.cpp:51 #, c-format msgid "Record about new job successfully added to the database (%s)" msgstr "ЗапиÑÑŒ о новой задаче уÑпешно добавлена в базу данных (%s)" #: src/clients/compute/submit.cpp:57 msgid "Job submission summary:" msgstr "Сводка заÑылки задач:" #: src/clients/compute/submit.cpp:59 #, c-format msgid "%d of %d jobs were submitted" msgstr "%d из %d задач были заÑланы" #: src/clients/compute/submit.cpp:61 msgid "The following jobs were not submitted:" msgstr "Следующие задачи не были заÑланы:" #: src/clients/compute/submit.cpp:65 msgid "Job nr." msgstr "Задача номер" #: src/clients/compute/submit.cpp:75 #, c-format msgid "ERROR: Unable to load broker %s" msgstr "ОШИБКÐ: не удалоÑÑŒ подгрузить планировщик %s" #: src/clients/compute/submit.cpp:79 msgid "" "ERROR: Job submission aborted because no resource returned any information" msgstr "" "ОШИБКÐ: Обрыв заÑылки задачи, так как ни один из реÑурÑов не предоÑтавил " "информацию" #: src/clients/compute/submit.cpp:83 msgid "ERROR: One or multiple job descriptions was not submitted." msgstr "ОШИБКÐ: Одна или неÑколько задач не были запущены." #: src/clients/compute/submit.cpp:100 #, c-format msgid "" "A computing resource using the GridFTP interface was requested, but\n" "%sthe corresponding plugin could not be loaded. Is the plugin installed?\n" "%sIf not, please install the package 'nordugrid-arc-plugins-globus'.\n" "%sDepending on your type of installation the package name might differ." msgstr "" "Запрошен вычиÑлительный реÑурÑ, иÑпользующий Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ GridFTP, но " "необходимый\n" "%sподключаемый модуль не был подгружен. УÑтанавливали ли Ð’Ñ‹ Ñтот модуль?\n" "%sЕÑли нет, пожалуйÑта, уÑтановите пакет 'nordugrid-arc-plugins-globus'.\n" "%sÐазвание пакета может завиÑеть от типа вашего диÑтрибутива." #: src/clients/compute/submit.cpp:129 msgid "" "Unable to adapt job description to any resource, no resource information " "could be obtained." msgstr "" "Ðе удалоÑÑŒ адаптировать опиÑание задачи ни к одному реÑурÑу, Ñ‚.к. не " "получено никакой информации." #: src/clients/compute/submit.cpp:130 msgid "Original job description is listed below:" msgstr "Изначальное опиÑание задачи приведено ниже:" #: src/clients/compute/submit.cpp:142 #, c-format msgid "Dumping job description aborted: Unable to load broker %s" msgstr "" "РаÑпечатка опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ оборвана: Ðевозможно подгрузить планировщик %s" #: src/clients/compute/submit.cpp:197 msgid "" "Unable to prepare job description according to needs of the target resource." msgstr "" "Ðевозможно адаптировать опиÑание задачи в ÑоответÑтвии Ñ Ñ‚Ñ€ÐµÐ±Ð¾Ð²Ð°Ð½Ð¸Ñми " "назначениÑ." #: src/clients/compute/submit.cpp:281 src/clients/compute/submit.cpp:311 #, c-format msgid "Service endpoint %s (type %s) added to the list for resource discovery" msgstr "Точка входа ÑервиÑа %s (тип %s) добавлена в ÑпиÑок Ð´Ð»Ñ Ð¿Ð¾Ð¸Ñка реÑурÑов" #: src/clients/compute/submit.cpp:291 msgid "" "There are no endpoints in registry that match requested info endpoint type" msgstr "" "Ð’ учётном ÑпиÑке нет точек входа, ÑоответÑтвующих запрошенному типу точки " "входа информации" #: src/clients/compute/submit.cpp:332 #, c-format msgid "Service endpoint %s (type %s) added to the list for direct submission" msgstr "" "Точка входа ÑервиÑа %s (тип %s) добавлена в ÑпиÑок Ð´Ð»Ñ Ð½ÐµÐ¿Ð¾ÑредÑтвенной " "заÑылки" #: src/clients/compute/submit.cpp:340 msgid "" "There are no endpoints in registry that match requested submission endpoint " "type" msgstr "" "Ð’ учётном ÑпиÑке нет точек входа, ÑоответÑтвующих запрошенному типу точки " "входа заÑылки" #: src/clients/compute/utils.cpp:111 #, c-format msgid "Types of execution services that %s is able to submit jobs to:" msgstr "Типы Ñлужб Ð²Ñ‹Ð¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡, на которые %s может заÑылать задачи:" #: src/clients/compute/utils.cpp:114 #, c-format msgid "Types of registry services that %s is able to collect information from:" msgstr "Типы Ñлужб региÑтрации, в которых %s может получить информацию:" #: src/clients/compute/utils.cpp:117 #, c-format msgid "" "Types of local information services that %s is able to collect information " "from:" msgstr "" "Типы локальных Ñлужб информации, Ñ ÐºÐ¾Ñ‚Ð¾Ñ€Ñ‹Ñ… %s может получить информацию:" #: src/clients/compute/utils.cpp:120 #, c-format msgid "" "Types of local information services that %s is able to collect job " "information from:" msgstr "" "Типы локальных Ñлужб информации, Ñ ÐºÐ¾Ñ‚Ð¾Ñ€Ñ‹Ñ… %s может получить информацию о " "задачах:" #: src/clients/compute/utils.cpp:123 #, c-format msgid "Types of services that %s is able to manage jobs at:" msgstr "Типы Ñлужб,на которых %s может управлÑть задачами:" #: src/clients/compute/utils.cpp:126 #, c-format msgid "Job description languages supported by %s:" msgstr "Следующие Ñзыки опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡ поддерживаютÑÑ %s:" #: src/clients/compute/utils.cpp:129 #, c-format msgid "Brokers available to %s:" msgstr "Следующие планировщики доÑтупны Ð´Ð»Ñ %s:" #: src/clients/compute/utils.cpp:152 #, c-format msgid "" "Default broker (%s) is not available. When using %s a broker should be " "specified explicitly (-b option)." msgstr "" "Планировщик по умолчанию (%s) недоÑтупен. При иÑпользовании %s планировщик " "должен быть указан Ñвным образом (Ð¾Ð¿Ñ†Ð¸Ñ -b)." #: src/clients/compute/utils.cpp:162 msgid "Proxy expired. Job submission aborted. Please run 'arcproxy'!" msgstr "" "Срок дейÑÑ‚Ð²Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти вышел. ЗаÑылка задачи оборвана. ПожалуйÑта, " "запуÑтите 'arcproxy'!" #: src/clients/compute/utils.cpp:167 msgid "" "Cannot find any proxy. This application currently cannot run without a " "proxy.\n" " If you have the proxy file in a non-default location,\n" " please make sure the path is specified in the client configuration file.\n" " If you don't have a proxy yet, please run 'arcproxy'!" msgstr "" "Ðе удалоÑÑŒ обнаружить доверенноÑть. Это приложение не работает без " "доверенноÑти.\n" " ЕÑли Ваша доверенноÑть хранитÑÑ Ð² неÑтандартном меÑте, пожалуйÑта,\n" " убедитеÑÑŒ, что в наÑтройках клиента указан правильный путь.\n" " ЕÑли же Ð’Ñ‹ пока не Ñоздали доверенноÑть, запуÑтите 'arcproxy'!" #: src/clients/compute/utils.cpp:179 src/clients/data/utils.cpp:28 msgid "" "Cannot find any token. Please run 'oidc-token' or use similar\n" " utility to obtain authentication token!" msgstr "" #: src/clients/compute/utils.cpp:308 #, c-format msgid "Unsupported submission endpoint type: %s" msgstr "Ðеподдерживаемый тип точки входа заÑылки: %s" #: src/clients/compute/utils.cpp:327 #, fuzzy msgid "" "Requested to skip resource discovery. Will try direct submission to arcrest " "endpoint type." msgstr "" "ПоÑтупил Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¿Ñ€Ð¾Ð¿ÑƒÑтить поиÑк реÑурÑов. Будет произведена попытка " "непоÑредÑтвенной заÑылки на точки входа заÑылки типа %s и %s" #: src/clients/compute/utils.cpp:332 #, c-format msgid "Unsupported information endpoint type: %s" msgstr "Ðеподдерживаемый тип точки входа информации: %s" #: src/clients/compute/utils.cpp:385 msgid "Other actions" msgstr "Другие дейÑтвиÑ" #: src/clients/compute/utils.cpp:386 msgid "Brokering and filtering" msgstr "Планировка и выборка" #: src/clients/compute/utils.cpp:387 msgid "Output format modifiers" msgstr "Варианты Ñ„Ð¾Ñ€Ð¼Ð°Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð²Ñ‹Ð´Ð°Ñ‡Ð¸" #: src/clients/compute/utils.cpp:388 msgid "Behaviour tuning" msgstr "ÐаÑтройки поведениÑ" #: src/clients/compute/utils.cpp:389 #, fuzzy msgid "Target endpoint selection" msgstr "Выбор точки входа Ð´Ð»Ñ Ð·Ð°Ñылки задач ARC6" #: src/clients/compute/utils.cpp:393 #, fuzzy msgid "computing element hostname or a complete endpoint URL" msgstr "указать Ð¸Ð¼Ñ Ñервера вычиÑлительного реÑурÑа или полный URL точки входа" #: src/clients/compute/utils.cpp:394 src/clients/compute/utils.cpp:404 msgid "ce" msgstr "ce" #: src/clients/compute/utils.cpp:398 msgid "registry service URL with optional specification of protocol" msgstr "URL Ñлужбы учёта Ñ Ð½ÐµÐ¾Ð±Ñзательным указанием протокола" #: src/clients/compute/utils.cpp:399 msgid "registry" msgstr "учётный ÑпиÑок" #: src/clients/compute/utils.cpp:403 #, fuzzy msgid "only select jobs that were submitted to this computing element" msgstr "выбрать лишь задачи, заÑланные на Ñтот реÑурÑ" #: src/clients/compute/utils.cpp:410 #, fuzzy msgid "" "require the specified endpoint type for job submission.\n" "\tAllowed values are: arcrest and internal." msgstr "" "потребовать указанный тип точки входа Ð´Ð»Ñ Ð·Ð°Ð¿ÑƒÑка задачи.\n" "\tДопуÑтимые типы: arcrest, emies, gridftp или gridftpjob и internal." #: src/clients/compute/utils.cpp:412 src/clients/compute/utils.cpp:426 #: src/clients/compute/utils.cpp:434 msgid "type" msgstr "тип" #: src/clients/compute/utils.cpp:418 msgid "skip the service with the given URL during service discovery" msgstr "пропуÑтить Ñлужбу Ñ Ñтим URL при обнаружении Ñлужб" #: src/clients/compute/utils.cpp:419 src/clients/compute/utils.cpp:603 #: src/clients/data/arccp.cpp:583 msgid "URL" msgstr "URL" #: src/clients/compute/utils.cpp:423 #, fuzzy msgid "" "require information query using the specified information endpoint type.\n" "\tSpecial value 'NONE' will disable all resource information queries and the " "following brokering.\n" "\tAllowed values are: ldap.nordugrid, ldap.glue2, arcrest and internal." msgstr "" "потребовать поиÑк информации иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ñ‹Ð¹ тип точки входа " "информации.\n" "\tСпециальное значение 'NONE' предотвратит любой поиÑк информации и " "поÑледующую планировку\n" "\tДопуÑтимые значениÑ: ldap.nordugrid, ldap.glue2, emies, arcrest и internal." #: src/clients/compute/utils.cpp:432 #, fuzzy msgid "" "only get information about executon targets that support this job submission " "endpoint type.\n" "\tAllowed values are: arcrest and internal." msgstr "" "получить информацию только о тех вычиÑлительных реÑурÑах, которые " "поддерживают указанный Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ Ð´Ð»Ñ Ð·Ð°Ñылки задач.\n" "\tДопуÑтимые значениÑ: org.nordugrid.gridftpjob или org.nordugrid.gridftp, " "org.ogf.glue.emies.activitycreation и org.nordugrid.internal" #: src/clients/compute/utils.cpp:440 msgid "keep the files on the server (do not clean)" msgstr "ÑохранÑть файлы на Ñервере (не удалÑть)" #: src/clients/compute/utils.cpp:446 msgid "do not ask for verification" msgstr "не запрашивать подтверждениÑ" #: src/clients/compute/utils.cpp:450 msgid "truncate the joblist before synchronizing" msgstr "Ñжать ÑпиÑок задач перед Ñинхронизацией" #: src/clients/compute/utils.cpp:454 msgid "do not collect information, only convert jobs storage format" msgstr "не Ñобирать информацию, а лишь конвертировать формат Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡" #: src/clients/compute/utils.cpp:460 src/clients/data/arcls.cpp:277 msgid "long format (more information)" msgstr "раÑширенный формат (Ð´Ð¾Ð¿Ð¾Ð»Ð½Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ)" #: src/clients/compute/utils.cpp:466 msgid "show the stdout of the job (default)" msgstr "вывеÑти Ñтандартный выход задачи (по умолчанию)" #: src/clients/compute/utils.cpp:470 msgid "show the stderr of the job" msgstr "вывеÑти Ñтандартную ошибку задачи" #: src/clients/compute/utils.cpp:474 msgid "show the CE's error log of the job" msgstr "вывеÑти ошибки ÑиÑтемы при иÑполнении задачи" #: src/clients/compute/utils.cpp:478 msgid "show the specified file from job's session directory" msgstr "показать заданный файл из рабочего каталога задачи" #: src/clients/compute/utils.cpp:479 msgid "filepath" msgstr "путь к файлу" #: src/clients/compute/utils.cpp:485 msgid "" "download directory (the job directory will be created in this directory)" msgstr "каталог загрузки (подкаталог задачи будет Ñоздан в Ñтом каталоге)" #: src/clients/compute/utils.cpp:487 msgid "dirname" msgstr "каталог" #: src/clients/compute/utils.cpp:491 msgid "use the jobname instead of the short ID as the job directory name" msgstr "" "иÑпользовать Ð¸Ð¼Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ вмеÑто краткого идентификатора в качеÑтве Ð½Ð°Ð·Ð²Ð°Ð½Ð¸Ñ " "каталога" #: src/clients/compute/utils.cpp:496 msgid "force download (overwrite existing job directory)" msgstr "Ð¿Ñ€Ð¸Ð½ÑƒÐ´Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ð·Ð°Ð³Ñ€ÑƒÐ·ÐºÐ° (перезапиÑать ÑущеÑтвующий каталог задачи)" #: src/clients/compute/utils.cpp:502 msgid "instead of the status only the IDs of the selected jobs will be printed" msgstr "вмеÑто ÑоÑтоÑÐ½Ð¸Ñ Ð±ÑƒÐ´ÑƒÑ‚ выведены только Ñрлыки указанных задач" #: src/clients/compute/utils.cpp:506 msgid "sort jobs according to jobid, submissiontime or jobname" msgstr "Ñортировать задачи по идентификатору, времени запуÑка или имени" #: src/clients/compute/utils.cpp:507 src/clients/compute/utils.cpp:510 msgid "order" msgstr "порÑдок" #: src/clients/compute/utils.cpp:509 msgid "reverse sorting of jobs according to jobid, submissiontime or jobname" msgstr "" "Ñортировать задачи в обратном порÑдке по идентификатору, времени запуÑка или " "имени" #: src/clients/compute/utils.cpp:513 msgid "show jobs where status information is unavailable" msgstr "перечиÑлить задачи, Ð´Ð»Ñ ÐºÐ¾Ñ‚Ð¾Ñ€Ñ‹Ñ… отÑутÑтвует Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ ÑоÑтоÑнии" #: src/clients/compute/utils.cpp:517 msgid "show status information in JSON format" msgstr "вывеÑти информацию о ÑоÑтоÑнии в формате JSON" #: src/clients/compute/utils.cpp:523 msgid "" "remove the job from the local list of jobs even if the job is not found in " "the infosys" msgstr "" "удалить задачу из локального ÑпиÑка, даже еÑли Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ ней \n" "отÑутÑтвует" #: src/clients/compute/utils.cpp:530 msgid "submit test job given by the number" msgstr "апуÑтить теÑтовую задачу под ÑоответÑтвующим номером" #: src/clients/compute/utils.cpp:531 src/clients/compute/utils.cpp:535 msgid "int" msgstr "чиÑло" #: src/clients/compute/utils.cpp:534 msgid "test job runtime specified by the number" msgstr "Ð²Ñ€ÐµÐ¼Ñ Ð¸ÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ñ‚ÐµÑтовой задачи" #: src/clients/compute/utils.cpp:541 msgid "only select jobs whose status is statusstr" msgstr "выполнить дейÑтвие лишь над задачами в указанном ÑоÑтоÑнии" #: src/clients/compute/utils.cpp:542 msgid "statusstr" msgstr "ÑоÑтоÑние" #: src/clients/compute/utils.cpp:546 msgid "all jobs" msgstr "вÑе задачи" #: src/clients/compute/utils.cpp:552 msgid "jobdescription string describing the job to be submitted" msgstr "Ñтрока, ÑÐ¾Ð´ÐµÑ€Ð¶Ð°Ñ‰Ð°Ñ Ð¾Ð¿Ð¸Ñание запуÑкаемой задачи" #: src/clients/compute/utils.cpp:554 src/clients/compute/utils.cpp:560 #: src/clients/credentials/arcproxy.cpp:353 #: src/clients/credentials/arcproxy.cpp:360 #: src/clients/credentials/arcproxy.cpp:379 #: src/clients/credentials/arcproxy.cpp:386 #: src/clients/credentials/arcproxy.cpp:404 #: src/clients/credentials/arcproxy.cpp:408 #: src/clients/credentials/arcproxy.cpp:423 #: src/clients/credentials/arcproxy.cpp:432 #: src/clients/credentials/arcproxy.cpp:436 msgid "string" msgstr "Ñтрока" #: src/clients/compute/utils.cpp:558 msgid "jobdescription file describing the job to be submitted" msgstr "файл, Ñодержащий опиÑание запуÑкаемой задачи" #: src/clients/compute/utils.cpp:566 msgid "select broker method (list available brokers with --listplugins flag)" msgstr "" "выбрать ÑпоÑоб планировки (ÑпиÑок доÑтупных планировщиков выводитÑÑ Ð¾Ð¿Ñ†Ð¸ÐµÐ¹ --" "listplugins)" #: src/clients/compute/utils.cpp:567 msgid "broker" msgstr "планировщик" #: src/clients/compute/utils.cpp:570 msgid "the IDs of the submitted jobs will be appended to this file" msgstr "Ñрлыки запущенных задач будут занеÑены в Ñтот файл" #: src/clients/compute/utils.cpp:571 src/clients/compute/utils.cpp:598 #: src/clients/compute/utils.cpp:625 src/clients/compute/utils.cpp:633 #: src/clients/credentials/arcproxy.cpp:445 src/clients/data/arccp.cpp:603 #: src/clients/data/arcls.cpp:322 src/clients/data/arcmkdir.cpp:100 #: src/clients/data/arcrename.cpp:111 src/clients/data/arcrm.cpp:125 #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:53 msgid "filename" msgstr "файл" #: src/clients/compute/utils.cpp:575 msgid "do not perform any delegation for submitted jobs" msgstr "" #: src/clients/compute/utils.cpp:579 msgid "perform X.509 delegation for submitted jobs" msgstr "" #: src/clients/compute/utils.cpp:583 msgid "perform token delegation for submitted jobs" msgstr "" #: src/clients/compute/utils.cpp:587 msgid "" "request at most this number of job instances submitted in single submit " "request" msgstr "" #: src/clients/compute/utils.cpp:591 msgid "" "request at least this number of job instances submitted in single submit " "request" msgstr "" #: src/clients/compute/utils.cpp:597 msgid "a file containing a list of jobIDs" msgstr "файл, Ñодержащий Ñрлыки задач" #: src/clients/compute/utils.cpp:602 msgid "skip jobs that are on a computing element with a given URL" msgstr "" "пропуÑтить задачи, находÑщиеÑÑ Ð½Ð° вычиÑлительном реÑурÑе Ñ Ð·Ð°Ð´Ð°Ð½Ð½Ñ‹Ð¼ URL" #: src/clients/compute/utils.cpp:608 msgid "submit jobs as dry run (no submission to batch system)" msgstr "запуÑк задач в режиме холоÑтой прогонки (без заÑылки на Ñчёт)" #: src/clients/compute/utils.cpp:612 msgid "" "do not submit - dump job description in the language accepted by the target" msgstr "" "не выполнÑть заÑылку: раÑпечатка опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ на Ñзыке, приемлемом " "назначением" #: src/clients/compute/utils.cpp:618 msgid "prints info about installed user- and CA-certificates" msgstr "" "вывеÑти информацию об уÑтановленных Ñертификатах Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð¸ " "Ñертификационных агентÑтв" #: src/clients/compute/utils.cpp:619 src/clients/credentials/arcproxy.cpp:469 #: src/clients/data/arccp.cpp:637 src/clients/data/arcls.cpp:356 #: src/clients/data/arcmkdir.cpp:134 src/clients/data/arcrename.cpp:145 #: src/clients/data/arcrm.cpp:159 msgid "allow TLS connection which failed verification" msgstr "" #: src/clients/compute/utils.cpp:624 #, c-format msgid "the file storing information about active jobs (default %s)" msgstr "файл Ñ Ð·Ð°Ð¿Ð¸Ñью информации о задачах на Ñчёте (по умолчанию %s)" #: src/clients/compute/utils.cpp:632 src/clients/credentials/arcproxy.cpp:444 #: src/clients/data/arccp.cpp:602 src/clients/data/arcls.cpp:321 #: src/clients/data/arcmkdir.cpp:99 src/clients/data/arcrename.cpp:110 #: src/clients/data/arcrm.cpp:124 msgid "configuration file (default ~/.arc/client.conf)" msgstr "файл наÑтроек (по умолчанию ~/.arc/client.conf)" #: src/clients/compute/utils.cpp:635 src/clients/credentials/arcproxy.cpp:439 #: src/clients/data/arccp.cpp:597 src/clients/data/arcls.cpp:316 #: src/clients/data/arcmkdir.cpp:94 src/clients/data/arcrename.cpp:105 #: src/clients/data/arcrm.cpp:119 msgid "timeout in seconds (default 20)" msgstr "Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð² Ñекундах (по умолчанию 20)" #: src/clients/compute/utils.cpp:636 src/clients/credentials/arcproxy.cpp:440 #: src/clients/data/arccp.cpp:598 src/clients/data/arcls.cpp:317 #: src/clients/data/arcmkdir.cpp:95 src/clients/data/arcrename.cpp:106 #: src/clients/data/arcrm.cpp:120 msgid "seconds" msgstr "Ñекунд(а/Ñ‹)" #: src/clients/compute/utils.cpp:639 msgid "list the available plugins" msgstr "перечиÑление доÑтупных подключаемых модулей" #: src/clients/compute/utils.cpp:643 src/clients/credentials/arcproxy.cpp:449 #: src/clients/data/arccp.cpp:642 src/clients/data/arcls.cpp:361 #: src/clients/data/arcmkdir.cpp:139 src/clients/data/arcrename.cpp:150 #: src/clients/data/arcrm.cpp:164 #: src/hed/libs/compute/test_jobdescription.cpp:38 #: src/services/a-rex/grid-manager/gm_jobs.cpp:190 #: src/services/a-rex/grid-manager/inputcheck.cpp:81 #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:67 msgid "FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG" msgstr "FATAL, ERROR, WARNING, INFO, VERBOSE или DEBUG" #: src/clients/compute/utils.cpp:644 src/clients/credentials/arcproxy.cpp:450 #: src/clients/data/arccp.cpp:643 src/clients/data/arcls.cpp:362 #: src/clients/data/arcmkdir.cpp:140 src/clients/data/arcrename.cpp:151 #: src/clients/data/arcrm.cpp:165 #: src/hed/libs/compute/test_jobdescription.cpp:39 #: src/services/a-rex/grid-manager/gm_jobs.cpp:191 #: src/services/a-rex/grid-manager/inputcheck.cpp:82 #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:68 msgid "debuglevel" msgstr "уровень" #: src/clients/compute/utils.cpp:646 src/clients/credentials/arcproxy.cpp:473 #: src/clients/data/arccp.cpp:646 src/clients/data/arcls.cpp:365 #: src/clients/data/arcmkdir.cpp:143 src/clients/data/arcrename.cpp:154 #: src/clients/data/arcrm.cpp:168 msgid "print version information" msgstr "вывеÑти информацию о верÑии" #: src/clients/compute/utils.cpp:652 src/clients/data/arccp.cpp:607 #: src/clients/data/arcls.cpp:326 src/clients/data/arcmkdir.cpp:104 #: src/clients/data/arcrename.cpp:115 src/clients/data/arcrm.cpp:129 msgid "do not perform any authentication for opened connections" msgstr "" #: src/clients/compute/utils.cpp:656 src/clients/data/arccp.cpp:612 #: src/clients/data/arcls.cpp:331 src/clients/data/arcmkdir.cpp:109 #: src/clients/data/arcrename.cpp:120 src/clients/data/arcrm.cpp:134 msgid "perform X.509 authentication for opened connections" msgstr "" #: src/clients/compute/utils.cpp:660 src/clients/data/arccp.cpp:617 #: src/clients/data/arcls.cpp:336 src/clients/data/arcmkdir.cpp:114 #: src/clients/data/arcrename.cpp:125 src/clients/data/arcrm.cpp:139 msgid "perform token authentication for opened connections" msgstr "" #: src/clients/compute/utils.cpp:664 src/clients/credentials/arcproxy.cpp:454 #: src/clients/data/arccp.cpp:622 src/clients/data/arcls.cpp:341 #: src/clients/data/arcmkdir.cpp:119 src/clients/data/arcrename.cpp:130 #: src/clients/data/arcrm.cpp:144 msgid "force using CA certificates configuration provided by OpenSSL" msgstr "" #: src/clients/compute/utils.cpp:668 src/clients/credentials/arcproxy.cpp:459 #: src/clients/data/arccp.cpp:627 src/clients/data/arcls.cpp:346 #: src/clients/data/arcmkdir.cpp:124 src/clients/data/arcrename.cpp:135 #: src/clients/data/arcrm.cpp:149 msgid "" "force using CA certificates configuration for Grid services (typically IGTF)" msgstr "" #: src/clients/compute/utils.cpp:672 src/clients/credentials/arcproxy.cpp:464 msgid "" "force using CA certificates configuration for Grid services (typically IGTF) " "and one provided by OpenSSL" msgstr "" #: src/clients/compute/utils.cpp:681 src/clients/compute/utils.cpp:688 #: src/clients/compute/utils.cpp:695 #, fuzzy msgid "Conflicting delegation types specified." msgstr "вывеÑти токен Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ð¾Ð³Ð¾ идентификатора" #: src/clients/compute/utils.cpp:727 src/clients/compute/utils.cpp:734 #: src/clients/compute/utils.cpp:741 src/clients/data/utils.cpp:41 #: src/clients/data/utils.cpp:48 src/clients/data/utils.cpp:55 #, fuzzy msgid "Conflicting authentication types specified." msgstr "Ðе указан файл наÑтроек" #: src/clients/credentials/arcproxy.cpp:151 #, c-format msgid "There are %d user certificates existing in the NSS database" msgstr "Ð’ базе данных NSS обнаружено %d Ñертификата пользователÑ" #: src/clients/credentials/arcproxy.cpp:167 #, c-format msgid "Number %d is with nickname: %s%s" msgstr "Ðомер %d Ñ ÐºÑ€Ð°Ñ‚ÐºÐ¸Ð¼ именем: %s%s" #: src/clients/credentials/arcproxy.cpp:176 #, c-format msgid " expiration time: %s " msgstr " дейÑтвителен до: %s " #: src/clients/credentials/arcproxy.cpp:180 #, c-format msgid " certificate dn: %s" msgstr " DN Ñертификата: %s" #: src/clients/credentials/arcproxy.cpp:181 #, c-format msgid " issuer dn: %s" msgstr " DN Ñмитента: %s" #: src/clients/credentials/arcproxy.cpp:182 #, c-format msgid " serial number: %d" msgstr " Серийный номер: %d" #: src/clients/credentials/arcproxy.cpp:186 #, c-format msgid "Please choose the one you would use (1-%d): " msgstr "ПожалуйÑта, выберите то, что будет иÑпользоватьÑÑ (1-%d): " #: src/clients/credentials/arcproxy.cpp:251 msgid "" "The arcproxy command creates a proxy from a key/certificate pair which can\n" "then be used to access grid resources." msgstr "" "Команда arcproxy Ñоздаёт доверенноÑть из пары закрытый/открытый ключ\n" "Ð´Ð»Ñ Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð´Ð¾Ñтупа к гриду." #: src/clients/credentials/arcproxy.cpp:253 #, fuzzy msgid "" "Supported constraints are:\n" " validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start\n" " from now)\n" "\n" " validityEnd=time\n" "\n" " validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod and\n" " validityEnd not specified, the default is 12 hours for local proxy, and\n" " 168 hours for delegated proxy on myproxy server)\n" "\n" " vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, " "the\n" " default is the minimum value of 12 hours and validityPeriod)\n" "\n" " myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy " "server,\n" " e.g. 43200 or 12h or 12H; if not specified, the default is the minimum " "value\n" " of 12 hours and validityPeriod (which is lifetime of the delegated proxy " "on\n" " myproxy server))\n" "\n" " proxyPolicy=policy content\n" "\n" " proxyPolicyFile=policy file\n" "\n" " keybits=number - length of the key to generate. Default is 2048 bits.\n" " Special value 'inherit' is to use key length of signing certificate.\n" "\n" " signingAlgorithm=name - signing algorithm to use for signing public key " "of\n" " proxy. Possible values are sha1, sha2 (alias for sha256), sha224, sha256,\n" " sha384, sha512 and inherit (use algorithm of signing certificate). " "Default\n" " is inherit. With old systems, only sha1 is acceptable.\n" "\n" "Supported information item names are:\n" " subject - subject name of proxy certificate.\n" "\n" " identity - identity subject name of proxy certificate.\n" "\n" " issuer - issuer subject name of proxy certificate.\n" "\n" " ca - subject name of CA which issued initial certificate.\n" "\n" " path - file system path to file containing proxy.\n" "\n" " type - type of proxy certificate.\n" "\n" " validityStart - timestamp when proxy validity starts.\n" "\n" " validityEnd - timestamp when proxy validity ends.\n" "\n" " validityPeriod - duration of proxy validity in seconds.\n" "\n" " validityLeft - duration of proxy validity left in seconds.\n" "\n" " vomsVO - VO name represented by VOMS attribute\n" "\n" " vomsSubject - subject of certificate for which VOMS attribute is issued\n" "\n" " vomsIssuer - subject of service which issued VOMS certificate\n" "\n" " vomsACvalidityStart - timestamp when VOMS attribute validity starts.\n" "\n" " vomsACvalidityEnd - timestamp when VOMS attribute validity ends.\n" "\n" " vomsACvalidityPeriod - duration of VOMS attribute validity in seconds.\n" "\n" " vomsACvalidityLeft - duration of VOMS attribute validity left in seconds.\n" "\n" " proxyPolicy\n" "\n" " keybits - size of proxy certificate key in bits.\n" "\n" " signingAlgorithm - algorithm used to sign proxy certificate.\n" "\n" "Items are printed in requested order and are separated by newline.\n" "If item has multiple values they are printed in same line separated by |.\n" "\n" "Supported password destinations are:\n" " key - for reading private key\n" "\n" " myproxy - for accessing credentials at MyProxy service\n" "\n" " myproxynew - for creating credentials at MyProxy service\n" "\n" " all - for any purspose.\n" "\n" "Supported password sources are:\n" " quoted string (\"password\") - explicitly specified password\n" "\n" " int - interactively request password from console\n" "\n" " stdin - read password from standard input delimited by newline\n" "\n" " file:filename - read password from file named filename\n" "\n" " stream:# - read password from input stream number #.\n" " Currently only 0 (standard input) is supported." msgstr "" "Поддерживаемые ограничениÑ:\n" " validityStart=Ð²Ñ€ÐµÐ¼Ñ (например, 2008-05-29T10:20:30Z; еÑли не указано, то " "начинаетÑÑ Ð½ÐµÐ¼ÐµÐ´Ð»ÐµÐ½Ð½Ð¾)\n" " validityEnd=времÑ\n" " validityPeriod=Ð²Ñ€ÐµÐ¼Ñ (например, 43200, или 12h, или 12H; еÑли не указаны " "ни validityPeriod,\n" " ни validityEnd, то Ñрок дейÑÑ‚Ð²Ð¸Ñ Ð¿Ð¾ умолчанию ÑоÑтавлÑет 12 чаÑов Ð´Ð»Ñ " "локальной доверенноÑти,\n" " и 168 чаÑов Ð´Ð»Ñ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð½Ð¾Ð¹ доверенноÑти на Ñервере MyProxy)\n" " vomsACvalidityPeriod=Ð²Ñ€ÐµÐ¼Ñ (например, 43200, или 12h, или 12H; еÑли не " "указано, то иÑпользуетÑÑ\n" " наименьшее между 12 чаÑами и значением validityPeriod)\n" " myproxyvalidityPeriod=Ð²Ñ€ÐµÐ¼Ñ (Ñрок годноÑти доверенноÑти, делегированной " "через Ñервер MyProxy\n" " например, 43200, или 12h, или 12H; еÑли не указано, то иÑпользуетÑÑ " "наименьшее между 12 чаÑами\n" " и значением validityPeriod - Ñроком годноÑти доверенноÑти, " "делегированной через Ñервер MyProxy)\n" " proxyPolicy=Ñодержимое политики\n" " proxyPolicyFile=файл политики\n" " keybits=чиÑло - длина генерируемого ключа. По умолчанию - 2048 бит.\n" " Специальное значение 'inherit' означает иÑпользование длины ключа " "подпиÑывающего Ñертификата.\n" " signingAlgorithm=название - алгоритм, иÑпользуемый Ð´Ð»Ñ Ð¿Ð¾Ð´Ð¿Ð¸ÑÐ°Ð½Ð¸Ñ " "открытого ключа или доверенноÑти.\n" " По умолчанию - sha1. Возможные значениÑ: sha1, sha2 (Ñокращение от " "sha256), sha224, sha256, sha384,\n" " sha512 и inherit (иÑпользовать алгоритм подпиÑывающего Ñертификата). По " "умолчанию иÑпользуетÑÑ inherit.\n" " Старые ÑиÑтемы поддерживают лишь sha1.\n" "\n" "Поддерживаемые Ð¿Ð¾Ð»Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸:\n" " subject - Ð¸Ð¼Ñ Ñубъекта доверенноÑти.\n" " identity - идентифицируемое Ð¸Ð¼Ñ Ñубъекта доверенноÑти.\n" " issuer - Ð¸Ð¼Ñ Ñубъекта, выдавшего доверенноÑть.\n" " ca - Ð¸Ð¼Ñ Ñубъекта агентÑтва, выдавшего иÑходный Ñертификат\n" " path - локальный путь к файлу, Ñодержащему доверенноÑть.\n" " type - тип доверенноÑти.\n" " validityStart - Ð²Ñ€ÐµÐ¼Ñ Ð½Ð°Ñ‡Ð°Ð»Ð° дейÑÑ‚Ð²Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти.\n" " validityEnd - Ð²Ñ€ÐµÐ¼Ñ Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ð½Ð¸Ñ Ð´ÐµÐ¹ÑÑ‚Ð²Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти.\n" " validityPeriod - продолжительноÑть годноÑти доверенноÑти в Ñекундах.\n" " validityLeft - оÑтавшаÑÑÑ Ð¿Ñ€Ð¾Ð´Ð¾Ð»Ð¶Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð¾Ñть годноÑти доверенноÑти в " "Ñекундах.\n" " vomsVO - Ð¸Ð¼Ñ Ð²Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð¾Ð¹ организации, указанное в атрибуте VOMS.\n" " vomsSubject - Ñубъект Ñертификата, которому был приÑвоен атрибут VOMS.\n" " vomsIssuer - Ñубъект Ñлужбы, выдавшей Ñертификат VOMS.\n" " vomsACvalidityStart - Ð²Ñ€ÐµÐ¼Ñ Ð½Ð°Ñ‡Ð°Ð»Ð° дейÑÑ‚Ð²Ð¸Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð° VOMS.\n" " vomsACvalidityEnd - Ð²Ñ€ÐµÐ¼Ñ Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ð½Ð¸Ñ Ð´ÐµÐ¹ÑÑ‚Ð²Ð¸Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð° VOMS.\n" " vomsACvalidityPeriod - продолжительноÑть годноÑти атрибута VOMS в " "Ñекундах.\n" " vomsACvalidityLeft - оÑтавшаÑÑÑ Ð¿Ñ€Ð¾Ð´Ð¾Ð»Ð¶Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð¾Ñть годноÑти атрибута VOMS в " "Ñекундах.\n" " proxyPolicy - Ñодержимое политики\n" " keybits - длина ключа доверенноÑти в битах.\n" " signingAlgorithm - алгоритм, иÑпользуемый при подпиÑи Ñертификата.\n" "Ð—Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð²Ñ‹Ð²Ð¾Ð´ÑÑ‚ÑÑ Ð² порÑдке запроÑа, каждое Ñ Ð½Ð¾Ð²Ð¾Ð¹ Ñтроки.\n" "ЕÑли полю ÑоответÑтвуют неÑколько значений, они выводÑÑ‚ÑÑ Ð² Ñтроку и " "разделÑÑŽÑ‚ÑÑ |.\n" "\n" "Поддерживаемые Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð¿Ð°Ñ€Ð¾Ð»ÐµÐ¹:\n" " key - Ð´Ð»Ñ Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð·Ð°ÐºÑ€Ñ‹Ñ‚Ñ‹Ñ… ключей\n" " myproxy - Ð´Ð»Ñ Ð´Ð¾Ñтупа к Ñертификатам на Ñервере MyProxy\n" " myproxynew - Ð´Ð»Ñ ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñертификатов на Ñервере MyProxy\n" " all - Ñ Ð»ÑŽÐ±Ð¾Ð¹ целью.\n" "\n" "Поддерживаемые иÑточники паролей:\n" " quoted string (\"password\") - Ñвно указанный пароль\n" " int - интерактивный Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° ввод Ð¿Ð°Ñ€Ð¾Ð»Ñ Ñ Ñ‚ÐµÑ€Ð¼Ð¸Ð½Ð°Ð»Ð°\n" " stdin - чтение Ð¿Ð°Ñ€Ð¾Ð»Ñ Ñо Ñтандартного ввода по переводу Ñтроки\n" " file:filename - чтение Ð¿Ð°Ñ€Ð¾Ð»Ñ Ð¸Ð· файла filename\n" " stream:# - чтение Ð¿Ð°Ñ€Ð¾Ð»Ñ Ð¸Ð· входного потока номер #.\n" " Ðа текущий момент поддерживаетÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ 0 (Ñтандартный ввод).\n" #: src/clients/credentials/arcproxy.cpp:315 msgid "path to the proxy file" msgstr "путь к файлу доверенноÑти" #: src/clients/credentials/arcproxy.cpp:316 #: src/clients/credentials/arcproxy.cpp:320 #: src/clients/credentials/arcproxy.cpp:324 #: src/clients/credentials/arcproxy.cpp:328 #: src/clients/credentials/arcproxy.cpp:332 #: src/clients/credentials/arcproxy.cpp:336 src/clients/data/arccp.cpp:560 msgid "path" msgstr "путь" #: src/clients/credentials/arcproxy.cpp:319 msgid "" "path to the certificate file, it can be either PEM, DER, or PKCS12 formatted" msgstr "" "путь к файлу Ñертификата, который может быть в формате PEM, DER, или PKCS12" #: src/clients/credentials/arcproxy.cpp:323 msgid "" "path to the private key file, if the certificate is in PKCS12 format, then " "no need to give private key" msgstr "" "путь к закрытому ключу; еÑли Ñертификат указан в формате PKCS12, закрытый " "ключ не нужен" #: src/clients/credentials/arcproxy.cpp:327 msgid "" "path to the trusted certificate directory, only needed for the VOMS client " "functionality" msgstr "" "путь к каталогу Ñ Ð´Ð¾Ð²ÐµÑ€Ñемыми Ñертификатами, иÑпользуетÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ клиентом " "VOMS" #: src/clients/credentials/arcproxy.cpp:331 msgid "" "path to the top directory of VOMS *.lsc files, only needed for the VOMS " "client functionality" msgstr "" "путь к корневому каталогу Ñ Ñ„Ð°Ð¹Ð»Ð°Ð¼Ð¸ VOMS *.lsc, иÑпользуетÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ клиентом " "VOMS" #: src/clients/credentials/arcproxy.cpp:335 msgid "path to the VOMS server configuration file" msgstr "путь к файлу наÑтроек Ñерверов VOMS" #: src/clients/credentials/arcproxy.cpp:339 #, fuzzy msgid "" "voms<:command>. Specify VOMS server\n" " More than one VOMS server can be specified like this:\n" " --voms VOa:command1 --voms VOb:command2.\n" " :command is optional, and is used to ask for specific attributes (e.g: " "roles)\n" " command options are:\n" "\n" " all --- put all of this DN's attributes into AC;\n" "\n" " list --- list all of the DN's attribute, will not create AC " "extension;\n" "\n" " /Role=yourRole --- specify the role, if this DN\n" " has such a role, the role will be put into AC;\n" "\n" " /voname/groupname/Role=yourRole --- specify the VO, group and " "role; if this DN\n" " has such a role, the role will be put into AC.\n" "\n" " If this option is not specified values from configuration " "files are used.\n" " To avoid anything to be used specify -S with empty value.\n" msgstr "" "voms<:инÑтрукциÑ>. ОпиÑание Ñервера VOMS (неÑколько Ñерверов задаютÑÑ\n" " Ñледующим образом: --voms VOa:инÑтрукциÑ1 --voms VOb:" "инÑтрукциÑ2).\n" " <:инÑтрукциÑ> не обÑзательна и Ñлужит Ð´Ð»Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа " "дополнительных\n" " атрибутов (например, ролей)\n" " ИнÑтрукции:\n" " all --- добавить вÑе атрибуты, доÑтупные данному " "пользователю;\n" " list --- перечиÑлить вÑе атрибуты, доÑтупные данному " "пользователю,\n" " без ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ€Ð°ÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ AC; \n" " /Role=вашаРоль --- указать желаемую роль; еÑли данный " "пользователь\n" " может играть такую роль, она будет " "добавлена в AC;\n" " /voname/groupname/Role=вашаРоль --- указать ВО, группу и роль; " "еÑли\n" " данный пользователь может играть такую " "роль, она\n" " будет добавлена.\n" " ЕÑли Ñта Ð¾Ð¿Ñ†Ð¸Ñ Ð½Ðµ задана, будут иÑпользоватьÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð¸Ð· " "файлов наÑтроек.\n" " Ð”Ð»Ñ Ð¿Ñ€ÐµÐ´Ð¾Ñ‚Ð²Ñ€Ð°Ñ‰ÐµÐ½Ð¸Ñ Ð¸ÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ Ñ‡ÐµÐ³Ð¾-либо, укажите -S без " "значениÑ.\n" #: src/clients/credentials/arcproxy.cpp:356 #, fuzzy msgid "" "group<:role>. Specify ordering of attributes\n" " Example: --order /knowarc.eu/coredev:Developer,/knowarc.eu/" "testers:Tester\n" " or: --order /knowarc.eu/coredev:Developer --order /knowarc.eu/" "testers:Tester\n" " Note that it does not make sense to specify the order if you have two or " "more different VOMS servers specified" msgstr "" "group<:role>. Ð£ÐºÐ°Ð·Ð°Ð½Ð½Ð°Ñ Ð¿Ð¾ÑледовательноÑть атрибутов \n" " Пример: --order /knowarc.eu/coredev:Developer,/knowarc.eu/" "testers:Tester \n" " или: --order /knowarc.eu/coredev:Developer --order /knowarc.eu/" "testers:Tester \n" " Имейте в виду, что при иÑпользовании неÑкольких Ñерверов VOMS не имеет " "ÑмыÑла указывать поÑледовательноÑть атрибутов" #: src/clients/credentials/arcproxy.cpp:363 msgid "use GSI communication protocol for contacting VOMS services" msgstr "иÑпользовать протокол GSI Ð´Ð»Ñ ÐºÐ¾Ð½Ñ‚Ð°ÐºÑ‚Ð° Ñлужб VOMS" #: src/clients/credentials/arcproxy.cpp:366 #, fuzzy msgid "" "use HTTP communication protocol for contacting VOMS services that provide " "RESTful access\n" " Note for RESTful access, 'list' command and multiple VOMS " "servers are not supported\n" msgstr "" "иÑпользовать протокол HTTP Ð´Ð»Ñ ÑвÑзи Ñо Ñлужбами VOMS, поддерживающими " "доÑтуп типа REST \n" " Внимание: Ð´Ð»Ñ Ð´Ð¾Ñтупа REST, команда 'list' и множеÑтвенный " "Ñервер VOMS не поддерживаютÑÑ\n" #: src/clients/credentials/arcproxy.cpp:370 msgid "" "use old communication protocol for contacting VOMS services instead of " "RESTful access\n" msgstr "" "иÑпользовать уÑтаревший протокол ÑвÑзи Ñо Ñлужбами VOMS вмеÑто доÑтупа по " "протоколу REST\n" #: src/clients/credentials/arcproxy.cpp:373 msgid "" "this option is not functional (old GSI proxies are not supported anymore)" msgstr "Ð¾Ð¿Ñ†Ð¸Ñ Ð½ÐµÐ´Ð¾Ñтупна (Ñтарые доверенноÑти GSI более не поддерживаютÑÑ)" #: src/clients/credentials/arcproxy.cpp:376 msgid "print all information about this proxy." msgstr "вывеÑти вÑÑŽ информацию об Ñтой доверенноÑти." #: src/clients/credentials/arcproxy.cpp:379 msgid "print selected information about this proxy." msgstr "вывеÑти избранную информацию об Ñтой доверенноÑти." #: src/clients/credentials/arcproxy.cpp:382 msgid "remove proxy" msgstr "удаление доверенноÑти" #: src/clients/credentials/arcproxy.cpp:385 msgid "" "username to MyProxy server (if missing subject of user certificate is used)" msgstr "" "Ð¸Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ñервера MyProxy (при отÑутÑтвии имени Ñубъекта, или при " "применении Ñертификата пользователÑ)" #: src/clients/credentials/arcproxy.cpp:390 #, fuzzy msgid "" "don't prompt for a credential passphrase, when retrieving a credential from " "a MyProxy server.\n" " The precondition of this choice is that the credential was PUT onto\n" " the MyProxy server without a passphrase by using the\n" " -R (--retrievable_by_cert) option.\n" " This option is specific to the GET command when contacting a Myproxy\n" " server." msgstr "" "не запрашивать пароль учётных данных при получении Ñтих \n" " данных Ñ Ñервера MyProxy. \n" " Это возможно при уÑловии, еÑли данные были Ñохранены методом " "PUT\n" " на Ñервере MyProxy без паролÑ, иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ Ð¾Ð¿Ñ†Ð¸ÑŽ -R (--" "retrievable_by_cert) \n" " при выполнении операции PUT в отношении Ñервера Myproxy. \n" " Эта Ð¾Ð¿Ñ†Ð¸Ñ Ð¸ÑпользуетÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ командой GET в отношении Ñервера " "Myproxy." #: src/clients/credentials/arcproxy.cpp:401 #, fuzzy msgid "" "Allow specified entity to retrieve credential without passphrase.\n" " This option is specific to the PUT command when contacting a Myproxy\n" " server." msgstr "" "Разрешить указанному клиенту получать учётные данные без паролÑ.\n" " Эта Ð¾Ð¿Ñ†Ð¸Ñ Ð¸ÑпользуетÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ командой PUT в отношении Ñервера " "Myproxy." #: src/clients/credentials/arcproxy.cpp:407 msgid "hostname[:port] of MyProxy server" msgstr "hostname[:port] Ñервера MyProxy" #: src/clients/credentials/arcproxy.cpp:412 #, fuzzy msgid "" "command to MyProxy server. The command can be PUT, GET, INFO, NEWPASS or " "DESTROY.\n" " PUT -- put a delegated credentials to the MyProxy server;\n" "\n" " GET -- get a delegated credentials from the MyProxy server;\n" "\n" " INFO -- get and present information about credentials stored " "at the MyProxy server;\n" "\n" " NEWPASS -- change password protecting credentials stored at " "the MyProxy server;\n" "\n" " DESTROY -- wipe off credentials stored at the MyProxy server;\n" "\n" " Local credentials (certificate and key) are not necessary " "except in case of PUT.\n" " MyProxy functionality can be used together with VOMS " "functionality.\n" " --voms and --vomses can be used for Get command if VOMS " "attributes\n" " is required to be included in the proxy.\n" msgstr "" "инÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ Ñерверу MyProxy. Возможны Ñледующие инÑтрукции: PUT, GET, INFO, " "NEWPASS или DESTROY.\n" " PUT -- Ñохранить делегированный Ñертификат на Ñервере " "MyProxy;\n" " GET -- получить делегированный Ñертификат Ñ Ñервера MyProxy,\n" " INFO -- вывеÑти информацию о Ñертификатах, хранÑщихÑÑ Ð½Ð° " "Ñервере MyProxy; \n" " NEWPASS -- изменить пароль, защищающий Ñертификаты, хранÑщиеÑÑ " "на Ñервере MyProxy; \n" " DESTROY -- удалить Ñертификаты, хранÑщиеÑÑ Ð½Ð° Ñервере " "MyProxy; \n" " Личные Ñертификаты и ключи не требуютÑÑ, за иÑключением " "инÑтрукции PUT.\n" " ИнÑтрукции MyProxy и VOMS могут иÑпользоватьÑÑ Ð¾Ð´Ð½Ð¾Ð²Ñ€ÐµÐ¼ÐµÐ½Ð½Ð¾.\n" " Опции --voms and --vomses могут быть иÑпользованы Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ð¾Ð¹ " "Get, еÑли\n" " в доверенноÑть необходимо включить атрибуты VOMS.\n" #: src/clients/credentials/arcproxy.cpp:427 #, fuzzy msgid "" "use NSS credential database in default Mozilla profiles, including Firefox, " "Seamonkey and Thunderbird." msgstr "" "иÑпользовать базу данных параметров доÑтупа NSS из профилей Mozilla \n" " по умолчанию, Ð²ÐºÐ»ÑŽÑ‡Ð°Ñ Firefox, Seamonkey и Thunderbird.\n" #: src/clients/credentials/arcproxy.cpp:431 msgid "proxy constraints" msgstr "Ð¾Ð³Ñ€Ð°Ð½Ð¸Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти" #: src/clients/credentials/arcproxy.cpp:435 msgid "password destination=password source" msgstr "назначение паролÑ=иÑточник паролÑ" #: src/clients/credentials/arcproxy.cpp:479 msgid "" "RESTful and old VOMS communication protocols can't be requested " "simultaneously." msgstr "Протоколы REST и уÑтаревший VOMS не могут быть запрошены одновременно." #: src/clients/credentials/arcproxy.cpp:509 #: src/clients/credentials/arcproxy.cpp:1220 msgid "Failed configuration initialization." msgstr "Ðе удалоÑÑŒ загрузить наÑтройки." #: src/clients/credentials/arcproxy.cpp:544 msgid "" "Failed to find certificate and/or private key or files have improper " "permissions or ownership." msgstr "" "Ðе удалоÑÑŒ обнаружить Ñертификат и/или закрытый ключ, либо у файлов " "неподходÑщие параметры доÑтупа." #: src/clients/credentials/arcproxy.cpp:545 #: src/clients/credentials/arcproxy.cpp:557 msgid "You may try to increase verbosity to get more information." msgstr "" "Ð’Ñ‹ можете попытатьÑÑ ÑƒÐ²ÐµÐ»Ð¸Ñ‡Ð¸Ñ‚ÑŒ уровень детальноÑти Ð´Ð»Ñ Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ " "дополнительной информации." #: src/clients/credentials/arcproxy.cpp:553 msgid "Failed to find CA certificates" msgstr "Ðевозможно найти Ñертификаты CA" #: src/clients/credentials/arcproxy.cpp:554 msgid "" "Cannot find the CA certificates directory path, please set environment " "variable X509_CERT_DIR, or cacertificatesdirectory in a configuration file." msgstr "" "Ðе удалоÑÑŒ найти каталог Ñ Ñертификатами агентÑтв CA. ПожалуйÑта, задайте " "переменную Ñреды X509_CERT_DIR, или значение cacertificatesdirectory в файле " "наÑтроек." #: src/clients/credentials/arcproxy.cpp:558 msgid "" "The CA certificates directory is required for contacting VOMS and MyProxy " "servers." msgstr "" "Каталог Ñертификатов агентÑтв CA необходим Ð´Ð»Ñ ÑвÑзи Ñ Ñерверами VOMS и " "MyProxy." #: src/clients/credentials/arcproxy.cpp:570 msgid "" "$X509_VOMS_FILE, and $X509_VOMSES are not set;\n" "User has not specified the location for vomses information;\n" "There is also not vomses location information in user's configuration file;\n" "Can not find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses,\n" "$ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/" "vomses,\n" "/etc/vomses, /etc/grid-security/vomses, and the location at the " "corresponding sub-directory" msgstr "" "$X509_VOMS_FILE и $X509_VOMSES не наÑтроены;\n" "Пользователь не указал раÑположение файла vomses;\n" "РаÑположение файла vomses не найдено в файле наÑтроек пользователÑ;\n" "Файл vomses не обнаружен в ~/.arc/vomses, ~/.voms/vomses,\n" "$ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/" "vomses,\n" "/etc/vomses, /etc/grid-security/vomses, а также в ÑоответÑтвующих " "подкаталогах" #: src/clients/credentials/arcproxy.cpp:615 msgid "Wrong number of arguments!" msgstr "ÐедопуÑтимое чиÑло аргументов!" #: src/clients/credentials/arcproxy.cpp:623 #: src/clients/credentials/arcproxy.cpp:647 #: src/clients/credentials/arcproxy.cpp:780 msgid "" "Cannot find the path of the proxy file, please setup environment " "X509_USER_PROXY, or proxypath in a configuration file" msgstr "" "Ðе удалоÑÑŒ найти доверенноÑть пользователÑ. ПожалуйÑта, задайте переменную " "Ñреды X509_USER_PROXY, или значение proxypath в файле наÑтроек" #: src/clients/credentials/arcproxy.cpp:630 #, c-format msgid "Cannot remove proxy file at %s" msgstr "Ðевозможно удалить файл доверенноÑти в %s" #: src/clients/credentials/arcproxy.cpp:632 #, c-format msgid "Cannot remove proxy file at %s, because it's not there" msgstr "Ðевозможно удалить файл доверенноÑти в %s, потому что его там нет" #: src/clients/credentials/arcproxy.cpp:641 msgid "Bearer token is available. It is preferred for job submission." msgstr "ПриÑутÑтвует маркер доÑтупа. Предпочтителен Ð´Ð»Ñ Ð·Ð°Ñылки задач." #: src/clients/credentials/arcproxy.cpp:653 #: src/clients/credentials/arcproxy.cpp:786 #, c-format msgid "" "Cannot find file at %s for getting the proxy. Please make sure this file " "exists." msgstr "" "Ðе удалоÑÑŒ найти файл по адреÑу %s, Ñодержащий доверенноÑть. ПожалуйÑта, " "убедитеÑÑŒ, что файл ÑущеÑтвует." #: src/clients/credentials/arcproxy.cpp:659 #: src/clients/credentials/arcproxy.cpp:792 #, c-format msgid "Cannot process proxy file at %s." msgstr "Ðевозможно обработать файл доверенноÑти в %s." #: src/clients/credentials/arcproxy.cpp:662 #, c-format msgid "Subject: %s" msgstr "Субъект: %s" #: src/clients/credentials/arcproxy.cpp:663 #, c-format msgid "Issuer: %s" msgstr "Кем выдана: %s" #: src/clients/credentials/arcproxy.cpp:664 #, c-format msgid "Identity: %s" msgstr "Личные данные: %s" #: src/clients/credentials/arcproxy.cpp:666 msgid "Time left for proxy: Proxy expired" msgstr "ДоверенноÑть дейÑтвительна на: Срок дейÑÑ‚Ð²Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти вышел" #: src/clients/credentials/arcproxy.cpp:668 msgid "Time left for proxy: Proxy not valid yet" msgstr "ДоверенноÑть дейÑтвительна на: ДоверенноÑть пока недейÑтвительна" #: src/clients/credentials/arcproxy.cpp:670 #, c-format msgid "Time left for proxy: %s" msgstr "ДоверенноÑть дейÑтвительна на: %s" #: src/clients/credentials/arcproxy.cpp:671 #, c-format msgid "Proxy path: %s" msgstr "РаÑположение доверенноÑти: %s" #: src/clients/credentials/arcproxy.cpp:672 #, c-format msgid "Proxy type: %s" msgstr "Тип доверенноÑти: %s" #: src/clients/credentials/arcproxy.cpp:673 #, c-format msgid "Proxy key length: %i" msgstr "Длина ключа доверенноÑти: %i" #: src/clients/credentials/arcproxy.cpp:674 #, c-format msgid "Proxy signature: %s" msgstr "ПодпиÑÑŒ доверенноÑти: %s" #: src/clients/credentials/arcproxy.cpp:683 msgid "AC extension information for VO " msgstr "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ раÑширении AC Ð´Ð»Ñ VO " #: src/clients/credentials/arcproxy.cpp:686 msgid "Error detected while parsing this AC" msgstr "Обнаружена ошибка при разборе Ñертификата атрибута" #: src/clients/credentials/arcproxy.cpp:699 msgid "AC is invalid: " msgstr "Сертификат атрибута недейÑтвителен: " #: src/clients/credentials/arcproxy.cpp:729 #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:222 #, c-format msgid "Malformed VOMS AC attribute %s" msgstr "Ðеверный атрибут VOMS AC %s" #: src/clients/credentials/arcproxy.cpp:760 msgid "Time left for AC: AC is not valid yet" msgstr "Сертификат атрибута дейÑтвителен на: Сертификат пока недейÑтвителен" #: src/clients/credentials/arcproxy.cpp:762 msgid "Time left for AC: AC has expired" msgstr "" "Сертификат атрибута дейÑтвителен на: Срок дейÑÑ‚Ð²Ð¸Ñ Ñертификата закончилÑÑ" #: src/clients/credentials/arcproxy.cpp:764 #, c-format msgid "Time left for AC: %s" msgstr "Сертификат атрибута дейÑтвителен на: %s" #: src/clients/credentials/arcproxy.cpp:871 #, c-format msgid "Information item '%s' is not known" msgstr "ÐеизвеÑтный тип информации '%s'" #: src/clients/credentials/arcproxy.cpp:883 msgid "" "Cannot find the user certificate path, please setup environment " "X509_USER_CERT, or certificatepath in a configuration file" msgstr "" "Ðе удалоÑÑŒ найти путь к открытому ключу пользователÑ. ПожалуйÑта, задайте " "переменную Ñреды X509_USER_CERT, или значение certificatepath в файле " "наÑтроек" #: src/clients/credentials/arcproxy.cpp:887 msgid "" "Cannot find the user private key path, please setup environment " "X509_USER_KEY, or keypath in a configuration file" msgstr "" "Ðе удалоÑÑŒ найти закрытый ключ пользователÑ. ПожалуйÑта, задайте переменную " "Ñреды X509_USER_KEY, или значение keypath в файле наÑтроек" #: src/clients/credentials/arcproxy.cpp:911 #, c-format msgid "" "Cannot parse password source expression %s it must be of type=source format" msgstr "" "Ðе удалоÑÑŒ разобрать выражение %s Ð´Ð»Ñ Ð¸Ñточника паролÑ: формат должен быть " "type=source" #: src/clients/credentials/arcproxy.cpp:928 #, c-format msgid "" "Cannot parse password type %s. Currently supported values are " "'key','myproxy','myproxynew' and 'all'." msgstr "" "Ðе удалоÑÑŒ разобрать тип Ð¿Ð°Ñ€Ð¾Ð»Ñ %s. Ð’ наÑтоÑщий момент поддерживаютÑÑ " "Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ 'key','myproxy','myproxynew' и 'all'." #: src/clients/credentials/arcproxy.cpp:943 #, fuzzy, c-format msgid "" "Cannot parse password source %s it must be of source_type or source_type:" "data format. Supported source types are int, stdin, stream, file." msgstr "" "Ðе удалоÑÑŒ разобрать иÑточник Ð¿Ð°Ñ€Ð¾Ð»Ñ %s. Формат должен быть source_type или " "source_type:data format. ПоддерживаютÑÑ Ñледующие типы иÑточников: int,stdin," "stream,file." #: src/clients/credentials/arcproxy.cpp:957 msgid "Only standard input is currently supported for password source." msgstr "" "Ðа наÑтоÑщий момент единÑтвенным поддерживаемым иÑточником Ð¿Ð°Ñ€Ð¾Ð»Ñ ÑвлÑетÑÑ " "Ñтандартный вход." #: src/clients/credentials/arcproxy.cpp:962 #, fuzzy, c-format msgid "" "Cannot parse password source type %s. Supported source types are int, stdin, " "stream, file." msgstr "" "Ðе удалоÑÑŒ разобрать тип иÑточника Ð¿Ð°Ñ€Ð¾Ð»Ñ %s. ПоддерживаютÑÑ Ñледующие типы " "иÑточников: int,stdin,stream,file." #: src/clients/credentials/arcproxy.cpp:1001 msgid "The start, end and period can't be set simultaneously" msgstr "Опции start, end и period не могут быть заданы одновременно" #: src/clients/credentials/arcproxy.cpp:1007 #, c-format msgid "The start time that you set: %s can't be recognized." msgstr "Ðевозможно раÑпознать заданное Вами Ð²Ñ€ÐµÐ¼Ñ Ð½Ð°Ñ‡Ð°Ð»Ð°: %s." #: src/clients/credentials/arcproxy.cpp:1014 #, c-format msgid "The period that you set: %s can't be recognized." msgstr "Ðевозможно раÑпознать заданный Вами интервал: %s." #: src/clients/credentials/arcproxy.cpp:1021 #, c-format msgid "The end time that you set: %s can't be recognized." msgstr "Ðевозможно раÑпознать заданное Вами Ð²Ñ€ÐµÐ¼Ñ Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ð½Ð¸Ñ: %s." #: src/clients/credentials/arcproxy.cpp:1030 #, c-format msgid "The end time that you set: %s is before start time: %s." msgstr "Заданное Вами Ð²Ñ€ÐµÐ¼Ñ Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ð½Ð¸Ñ: %s предшеÑтвует времени начала: %s." #: src/clients/credentials/arcproxy.cpp:1041 #, c-format msgid "WARNING: The start time that you set: %s is before current time: %s" msgstr "" "ПРЕДУПРЕЖДЕÐИЕ: Заданное Вами Ð²Ñ€ÐµÐ¼Ñ Ð½Ð°Ñ‡Ð°Ð»Ð°: %s предшеÑтвует текущему " "времени: %s" #: src/clients/credentials/arcproxy.cpp:1044 #, c-format msgid "WARNING: The end time that you set: %s is before current time: %s" msgstr "" "ПРЕДУПРЕЖДЕÐИЕ: Заданное Вами Ð²Ñ€ÐµÐ¼Ñ Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ð½Ð¸Ñ: %s предшеÑтвует текущему " "времени: %s" #: src/clients/credentials/arcproxy.cpp:1054 #, c-format msgid "The VOMS AC period that you set: %s can't be recognized." msgstr "Ðевозможно раÑпознать заданный Вами период VOMS AC: %s." #: src/clients/credentials/arcproxy.cpp:1072 #, c-format msgid "The MyProxy period that you set: %s can't be recognized." msgstr "Ðевозможно раÑпознать заданный Вами период MyProxy: %s." #: src/clients/credentials/arcproxy.cpp:1087 #, c-format msgid "The keybits constraint is wrong: %s." msgstr "ÐедопуÑтимое значение Ð¾Ð³Ñ€Ð°Ð½Ð¸Ñ‡ÐµÐ½Ð¸Ñ keybits: %s." #: src/clients/credentials/arcproxy.cpp:1101 msgid "The NSS database can not be detected in the Firefox profile" msgstr "База данных NSS в профиле Firefox не обнаружена" #: src/clients/credentials/arcproxy.cpp:1110 #, c-format msgid "" "There are %d NSS base directories where the certificate, key, and module " "databases live" msgstr "" "Обнаружено %d оÑновных директорий NSS, Ñодержащих базы данных Ñертификатов, " "ключей и модулей" #: src/clients/credentials/arcproxy.cpp:1112 #, c-format msgid "Number %d is: %s" msgstr "Ðомер %d: %s" #: src/clients/credentials/arcproxy.cpp:1114 #, c-format msgid "Please choose the NSS database you would like to use (1-%d): " msgstr "ПожалуйÑта, выберите базу данных NSS Ð´Ð»Ñ Ð¸ÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ (1-%d): " #: src/clients/credentials/arcproxy.cpp:1130 #, c-format msgid "NSS database to be accessed: %s\n" msgstr "Будет иÑпользоватьÑÑ Ð±Ð°Ð·Ð° данных NSS %s\n" #: src/clients/credentials/arcproxy.cpp:1201 #, c-format msgid "Certificate to use is: %s" msgstr "ИÑпользуемый Ñертификат: %s" #: src/clients/credentials/arcproxy.cpp:1252 #: src/clients/credentials/arcproxy.cpp:1366 msgid "Proxy generation succeeded" msgstr "ДоверенноÑть уÑпешно Ñоздана" #: src/clients/credentials/arcproxy.cpp:1253 #: src/clients/credentials/arcproxy.cpp:1367 #, c-format msgid "Your proxy is valid until: %s" msgstr "Ваша доверенноÑть дейÑтвительна до: %s" #: src/clients/credentials/arcproxy.cpp:1272 msgid "" "The old GSI proxies are not supported anymore. Please do not use -O/--old " "option." msgstr "" "Старые доверенноÑти GSI более не поддерживаютÑÑ. ПожалуйÑта, не иÑпользуйте " "опцию -O/--old." #: src/clients/credentials/arcproxy.cpp:1291 src/hed/mcc/tls/MCCTLS.cpp:182 #: src/hed/mcc/tls/MCCTLS.cpp:215 src/hed/mcc/tls/MCCTLS.cpp:241 msgid "VOMS attribute parsing failed" msgstr "Сбой обработки атрибутов VOMS" #: src/clients/credentials/arcproxy.cpp:1293 msgid "Myproxy server did not return proxy with VOMS AC included" msgstr "Сервер Myproxy не приÑлал Ñертификат Ñ Ñ€Ð°Ñширением VOMS AC" #: src/clients/credentials/arcproxy.cpp:1314 msgid "Proxy generation failed: No valid certificate found." msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти: Ðе обнаружено дейÑтвительных Ñертификатов." #: src/clients/credentials/arcproxy.cpp:1319 msgid "Proxy generation failed: No valid private key found." msgstr "" "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти: Ðе обнаружено дейÑтвительных закрытых ключей." #: src/clients/credentials/arcproxy.cpp:1323 #, c-format msgid "Your identity: %s" msgstr "Ваши личные данные: %s" #: src/clients/credentials/arcproxy.cpp:1325 msgid "Proxy generation failed: Certificate has expired." msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти: Срок дейÑÑ‚Ð²Ð¸Ñ Ñертификата иÑтёк." #: src/clients/credentials/arcproxy.cpp:1329 msgid "Proxy generation failed: Certificate is not valid yet." msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти: Срок дейÑÑ‚Ð²Ð¸Ñ Ñертификата ещё не началÑÑ." #: src/clients/credentials/arcproxy.cpp:1340 msgid "Proxy generation failed: Failed to create temporary file." msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти: Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð²Ñ€ÐµÐ¼ÐµÐ½Ð½Ð¾Ð³Ð¾ файла." #: src/clients/credentials/arcproxy.cpp:1348 msgid "Proxy generation failed: Failed to retrieve VOMS information." msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти: Сбой Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸ VOMS." #: src/clients/credentials/arcproxy_myproxy.cpp:100 msgid "Succeeded to get info from MyProxy server" msgstr "УдалоÑÑŒ получить информацию Ñ Ñервера MyProxy" #: src/clients/credentials/arcproxy_myproxy.cpp:144 msgid "Succeeded to change password on MyProxy server" msgstr "УдалоÑÑŒ поменÑть пароль на Ñервере MyProxy" #: src/clients/credentials/arcproxy_myproxy.cpp:185 msgid "Succeeded to destroy credential on MyProxy server" msgstr "УдалоÑÑŒ уничтожить доверенноÑть на Ñервере MyProxy" #: src/clients/credentials/arcproxy_myproxy.cpp:241 #, c-format msgid "Succeeded to get a proxy in %s from MyProxy server %s" msgstr "УдалоÑÑŒ получить доверенноÑть в %s Ñ Ñервера MyProxy %s" #: src/clients/credentials/arcproxy_myproxy.cpp:294 msgid "Succeeded to put a proxy onto MyProxy server" msgstr "УдалоÑÑŒ делегировать доверенноÑть Ñерверу MyProxy" #: src/clients/credentials/arcproxy_proxy.cpp:93 msgid "Failed to add VOMS AC extension. Your proxy may be incomplete." msgstr "" "Сбой Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ñ€Ð°ÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ VOMS AC. Ваша доверенноÑть может быть неполной." #: src/clients/credentials/arcproxy_voms.cpp:63 msgid "" "Failed to process VOMS configuration or no suitable configuration lines " "found." msgstr "" "Ðе удалоÑÑŒ обработать наÑтройки VOMS, или не найдены приемлемые Ñтроки " "конфигурации." #: src/clients/credentials/arcproxy_voms.cpp:75 #, c-format msgid "Failed to parse requested VOMS lifetime: %s" msgstr "Сбой разборки указанного времени дейÑÑ‚Ð²Ð¸Ñ VOMS: %s" #: src/clients/credentials/arcproxy_voms.cpp:93 #, c-format msgid "Cannot get VOMS server address information from vomses line: \"%s\"" msgstr "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾Ð± адреÑе Ñервера VOMS отÑутÑтвует в Ñтроке: %s\"" #: src/clients/credentials/arcproxy_voms.cpp:97 #: src/clients/credentials/arcproxy_voms.cpp:99 #, c-format msgid "Contacting VOMS server (named %s): %s on port: %s" msgstr "УÑтанавливаетÑÑ ÑвÑзь Ñ Ñервером VOMS (по имени %s): %s по порту: %s" #: src/clients/credentials/arcproxy_voms.cpp:105 #, c-format msgid "Failed to parse requested VOMS server port number: %s" msgstr "Сбой разборки указанного номера порта Ñервера VOMS: %s" #: src/clients/credentials/arcproxy_voms.cpp:122 msgid "List functionality is not supported for RESTful VOMS interface" msgstr "ПеречиÑление не поддерживаетÑÑ Ð´Ð»Ñ REST-интерфейÑа VOMS" #: src/clients/credentials/arcproxy_voms.cpp:132 #: src/clients/credentials/arcproxy_voms.cpp:188 #, c-format msgid "" "The VOMS server with the information:\n" "\t%s\n" "can not be reached, please make sure it is available." msgstr "" "Ðевозможно ÑвÑзатьÑÑ Ñ Ñервером VOMS Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸ÐµÐ¹:\n" "\t%s\n" "ПожалуйÑта, проверьте, доÑтупен ли Ñтот Ñервер." #: src/clients/credentials/arcproxy_voms.cpp:133 #: src/clients/credentials/arcproxy_voms.cpp:138 #: src/clients/credentials/arcproxy_voms.cpp:189 #: src/clients/credentials/arcproxy_voms.cpp:194 #, c-format msgid "" "Collected error is:\n" "\t%s" msgstr "" "ÐŸÐ¾Ð»ÑƒÑ‡ÐµÐ½Ð½Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°:\n" "\t%s" #: src/clients/credentials/arcproxy_voms.cpp:137 #: src/clients/credentials/arcproxy_voms.cpp:193 #, c-format msgid "No valid response from VOMS server: %s" msgstr "Ðе получено приемлемого отзыва от Ñервера VOMS: %s" #: src/clients/credentials/arcproxy_voms.cpp:155 msgid "List functionality is not supported for legacy VOMS interface" msgstr "ПеречиÑление не поддерживаетÑÑ Ð´Ð»Ñ Ñ‚Ñ€Ð°Ð´Ð¸Ñ†Ð¸Ð¾Ð½Ð½Ð¾Ð³Ð¾ интерфейÑа VOMS" #: src/clients/credentials/arcproxy_voms.cpp:167 #, c-format msgid "Failed to parse VOMS command: %s" msgstr "Ðе удалоÑÑŒ разобрать команду VOMS: %s" #: src/clients/credentials/arcproxy_voms.cpp:204 #, c-format msgid "" "There are %d servers with the same name: %s in your vomses file, but none of " "them can be reached, or can return a valid message." msgstr "" "Ð’ Вашем файле vomses указаны %d Ñерверов Ñ Ð¾Ð´Ð¸Ð½Ð°ÐºÐ¾Ð²Ñ‹Ð¼ именем %s, но ни один " "из них не доÑтупен или не отзываетÑÑ Ð¿Ñ€Ð°Ð²Ð¸Ð»ÑŒÐ½Ð¾." #: src/clients/data/arccp.cpp:79 src/clients/data/arccp.cpp:315 #, c-format msgid "Current transfer FAILED: %s" msgstr "Ð¢ÐµÐºÑƒÑ‰Ð°Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð° ÐЕ СОСТОЯЛÐСЬ: %s" #: src/clients/data/arccp.cpp:81 src/clients/data/arccp.cpp:119 #: src/clients/data/arccp.cpp:317 src/clients/data/arcls.cpp:214 #: src/clients/data/arcmkdir.cpp:62 src/clients/data/arcrename.cpp:78 #: src/clients/data/arcrm.cpp:83 msgid "This seems like a temporary error, please try again later" msgstr "Похоже на временный Ñбой - пожалуйÑта, попытайтеÑÑŒ Ñнова попозже" #: src/clients/data/arccp.cpp:96 src/clients/data/arccp.cpp:100 #: src/clients/data/arccp.cpp:133 src/clients/data/arccp.cpp:137 #: src/clients/data/arccp.cpp:343 src/clients/data/arccp.cpp:348 #: src/clients/data/arcls.cpp:125 src/clients/data/arcmkdir.cpp:30 #: src/clients/data/arcrename.cpp:31 src/clients/data/arcrename.cpp:35 #: src/clients/data/arcrm.cpp:38 #, c-format msgid "Invalid URL: %s" msgstr "Ðеверный URL: %s" #: src/clients/data/arccp.cpp:112 msgid "Third party transfer is not supported for these endpoints" msgstr "Ð”Ð»Ñ Ñтих точек входа ÑтороннÑÑ Ð¿ÐµÑ€ÐµÑылка не поддерживаетÑÑ" #: src/clients/data/arccp.cpp:114 msgid "" "Protocol(s) not supported - please check that the relevant gfal2\n" " plugins are installed (gfal2-plugin-* packages)" msgstr "" "Протокол не поддерживаетÑÑ - пожалуйÑта, убедитеÑÑŒ что\n" " уÑтановлены необходимые подключаемые модули gfal2 (пакеты gfal2-" "plugin-*)" #: src/clients/data/arccp.cpp:117 #, c-format msgid "Transfer FAILED: %s" msgstr "Передача ÐЕ УДÐЛÐСЬ: %s" #: src/clients/data/arccp.cpp:145 src/clients/data/arccp.cpp:171 #: src/clients/data/arccp.cpp:359 src/clients/data/arccp.cpp:387 #, c-format msgid "Can't read list of sources from file %s" msgstr "Ðевозможно прочеÑть ÑпиÑок иÑточников из файла %s" #: src/clients/data/arccp.cpp:150 src/clients/data/arccp.cpp:186 #: src/clients/data/arccp.cpp:364 src/clients/data/arccp.cpp:403 #, c-format msgid "Can't read list of destinations from file %s" msgstr "Ðевозможно прочеÑтьÑпиÑок назначений из файла %s" #: src/clients/data/arccp.cpp:155 src/clients/data/arccp.cpp:370 msgid "Numbers of sources and destinations do not match" msgstr "ЧиÑло иÑточников и чиÑло назначений не ÑоответÑтвуют друг другу" #: src/clients/data/arccp.cpp:200 msgid "Fileset registration is not supported yet" msgstr "РегиÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ Ð½Ð°Ð±Ð¾Ñ€Ð¾Ð² файлов пока не поддерживаетÑÑ" #: src/clients/data/arccp.cpp:206 src/clients/data/arccp.cpp:279 #: src/clients/data/arccp.cpp:441 #, c-format msgid "Unsupported source url: %s" msgstr "Ðеподдерживаемый URL иÑточника: %s" #: src/clients/data/arccp.cpp:210 src/clients/data/arccp.cpp:283 #, c-format msgid "Unsupported destination url: %s" msgstr "Ðеподдерживаемый URL назначениÑ: %s" #: src/clients/data/arccp.cpp:217 msgid "" "For registration source must be ordinary URL and destination must be " "indexing service" msgstr "" "Ð”Ð»Ñ Ñ€ÐµÐ³Ð¸Ñтрации, иÑточник должен быть задан обычным URL, а назначением " "должен быть каталог реÑурÑов" #: src/clients/data/arccp.cpp:227 #, c-format msgid "Could not obtain information about source: %s" msgstr "Ðе удалоÑÑŒ получить информацию об иÑточнике: %s" #: src/clients/data/arccp.cpp:234 msgid "" "Metadata of source does not match existing destination. Use the --force " "option to override this." msgstr "" "Метаданные иÑточника и цели не Ñовпадают. ИÑпользуйте опцию --force Ð´Ð»Ñ " "принудительного копированиÑ." #: src/clients/data/arccp.cpp:246 msgid "Failed to accept new file/destination" msgstr "Сбой при приёме нового файла/направлениÑ" #: src/clients/data/arccp.cpp:252 src/clients/data/arccp.cpp:258 #, c-format msgid "Failed to register new file/destination: %s" msgstr "Сбой при региÑтрации нового файла/цели: %s" #: src/clients/data/arccp.cpp:421 msgid "Fileset copy to single object is not supported yet" msgstr "Копирование набора файлов в отдельный объект пока не поддерживаетÑÑ" #: src/clients/data/arccp.cpp:431 msgid "Can't extract object's name from source url" msgstr "Ðевозможно извлечь Ð¸Ð¼Ñ Ð¾Ð±ÑŠÐµÐºÑ‚Ð° из URL иÑточника" #: src/clients/data/arccp.cpp:450 #, c-format msgid "%s. Cannot copy fileset" msgstr "%s. Ðевозможно Ñкопировать набор файлов" #: src/clients/data/arccp.cpp:460 src/hed/libs/compute/ExecutionTarget.cpp:256 #: src/hed/libs/compute/ExecutionTarget.cpp:328 #, c-format msgid "Name: %s" msgstr "ИмÑ: %s" #: src/clients/data/arccp.cpp:463 #, c-format msgid "Source: %s" msgstr "ИÑточник: %s" #: src/clients/data/arccp.cpp:464 #, c-format msgid "Destination: %s" msgstr "Ðазначение: %s" #: src/clients/data/arccp.cpp:470 msgid "Current transfer complete" msgstr "Ð¢ÐµÐºÑƒÑ‰Ð°Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð° завершена" #: src/clients/data/arccp.cpp:473 msgid "Some transfers failed" msgstr "Ðекоторые загрузки не удалиÑÑŒ" #: src/clients/data/arccp.cpp:483 #, c-format msgid "Directory: %s" msgstr "Каталог: %s" #: src/clients/data/arccp.cpp:503 msgid "Transfer complete" msgstr "Передача данных завершена" #: src/clients/data/arccp.cpp:522 msgid "source destination" msgstr "иÑточник назначение" #: src/clients/data/arccp.cpp:523 msgid "" "The arccp command copies files to, from and between grid storage elements." msgstr "" "Команда arccp копирует файлы на, Ñ Ð¸ между запоминающими уÑтройÑтвами Грид." #: src/clients/data/arccp.cpp:528 msgid "" "use passive transfer (off by default if secure is on, on by default if " "secure is not requested)" msgstr "" "иÑпользовать паÑÑивную передачу данных (по умолчанию, Ð¾Ð¿Ñ†Ð¸Ñ Ð¾Ñ‚ÐºÐ»ÑŽÑ‡ÐµÐ½Ð° при " "защищённой передаче, и включена при незащищённой)" #: src/clients/data/arccp.cpp:534 msgid "do not try to force passive transfer" msgstr "не пытатьÑÑ Ñ„Ð¾Ñ€Ñировать паÑÑивный ÑпоÑоб передачи данных" #: src/clients/data/arccp.cpp:539 #, fuzzy msgid "force overwrite of existing destination" msgstr "ÐŸÑ€ÐµÐ´Ð²Ð°Ñ€Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ñ€ÐµÐ³Ð¸ÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ" #: src/clients/data/arccp.cpp:543 msgid "show progress indicator" msgstr "показать индикатор выполнениÑ" #: src/clients/data/arccp.cpp:548 msgid "" "do not transfer, but register source into destination. destination must be a " "meta-url." msgstr "" "зарегиÑтрировать файл, не Ð¿ÐµÑ€ÐµÐ´Ð°Ð²Ð°Ñ ÐµÐ³Ð¾ - назначением должен быть мета-URL." #: src/clients/data/arccp.cpp:554 msgid "use secure transfer (insecure by default)" msgstr "" "иÑпользовать защищённую передачу данных (передача не защищена по умолчанию)" #: src/clients/data/arccp.cpp:559 msgid "path to local cache (use to put file into cache)" msgstr "путь к локальному кÑшу (иÑпользуетÑÑ Ð´Ð»Ñ Ð·Ð°Ð¿Ð¸Ñи файла в кÑш)" #: src/clients/data/arccp.cpp:564 src/clients/data/arcls.cpp:290 msgid "operate recursively" msgstr "обработать рекурÑивно" #: src/clients/data/arccp.cpp:569 src/clients/data/arcls.cpp:295 msgid "operate recursively up to specified level" msgstr "рекурÑивное иÑполнение до указанного уровнÑ" #: src/clients/data/arccp.cpp:570 src/clients/data/arcls.cpp:296 msgid "level" msgstr "уровень" #: src/clients/data/arccp.cpp:574 msgid "number of retries before failing file transfer" msgstr "количеÑтво попыток передачи файла" #: src/clients/data/arccp.cpp:575 msgid "number" msgstr "чиÑло" #: src/clients/data/arccp.cpp:579 msgid "" "physical location to write to when destination is an indexing service. Must " "be specified for indexing services which do not automatically generate " "physical locations. Can be specified multiple times - locations will be " "tried in order until one succeeds." msgstr "" "физичеÑкий Ð°Ð´Ñ€ÐµÑ Ð´Ð»Ñ Ð·Ð°Ð¿Ð¸Ñи, еÑли в качеÑтве Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ ÑƒÐºÐ°Ð·Ð°Ð½ каталог " "реÑурÑов. Должен быть указан Ð´Ð»Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð¾Ð², не генерирующих физичеÑкие " "адреÑа автоматичеÑки. ÐеÑколько значений может быть указано - адреÑа будут " "перебиратьÑÑ, пока не будет доÑтигнут уÑпех." #: src/clients/data/arccp.cpp:587 msgid "" "perform third party transfer, where the destination pulls from the source " "(only available with GFAL plugin)" msgstr "" "выполнить Ñтороннюю переÑылку, когда назначение закачивает файл из иÑточника " "(доÑтупно только Ñ Ð¼Ð¾Ð´ÑƒÐ»ÐµÐ¼ GFAL)" #: src/clients/data/arccp.cpp:593 src/clients/data/arcls.cpp:312 #: src/clients/data/arcmkdir.cpp:90 src/clients/data/arcrename.cpp:101 #: src/clients/data/arcrm.cpp:115 msgid "list the available plugins (protocols supported)" msgstr "показать ÑпиÑок доÑтупных модулей (поддерживаемые протоколы)" #: src/clients/data/arccp.cpp:632 src/clients/data/arcls.cpp:351 #: src/clients/data/arcmkdir.cpp:129 src/clients/data/arcrename.cpp:140 #: src/clients/data/arcrm.cpp:154 msgid "" "force using both CA certificates configuration for Grid services (typically " "IGTF) and those provided by OpenSSL" msgstr "" #: src/clients/data/arccp.cpp:667 src/clients/data/arcls.cpp:387 #: src/clients/data/arcmkdir.cpp:165 src/clients/data/arcrename.cpp:176 #: src/clients/data/arcrm.cpp:191 msgid "Protocol plugins available:" msgstr "ДоÑтупны модули Ð´Ð»Ñ Ñледующих протоколов:" #: src/clients/data/arccp.cpp:715 src/clients/data/arcls.cpp:435 #: src/clients/data/arcmkdir.cpp:212 src/clients/data/arcrename.cpp:222 #: src/clients/data/arcrm.cpp:239 msgid "Wrong number of parameters specified" msgstr "Задано неверное количеÑтво параметров" #: src/clients/data/arccp.cpp:720 msgid "Options 'p' and 'n' can't be used simultaneously" msgstr "Опции 'p' и 'n' не могут быть иÑпользованы одновременно" #: src/clients/data/arcls.cpp:131 src/clients/data/arcmkdir.cpp:36 #: src/clients/data/arcrm.cpp:45 #, c-format msgid "Can't read list of locations from file %s" msgstr "Ðевозможно прочеÑть ÑпиÑок адреÑов из файла %s" #: src/clients/data/arcls.cpp:146 src/clients/data/arcmkdir.cpp:51 #: src/clients/data/arcrename.cpp:63 msgid "Unsupported URL given" msgstr "Заданный URL не поддерживаетÑÑ" #: src/clients/data/arcls.cpp:217 msgid "Warning: Failed listing files but some information is obtained" msgstr "" "Предупреждение: Ðе удалоÑÑŒ вывеÑти ÑпиÑок файлов, но Ð½ÐµÐºÐ¾Ñ‚Ð¾Ñ€Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ " "была получена" #: src/clients/data/arcls.cpp:271 src/clients/data/arcmkdir.cpp:79 msgid "url" msgstr "URL" #: src/clients/data/arcls.cpp:272 msgid "" "The arcls command is used for listing files in grid storage elements and " "file\n" "index catalogues." msgstr "" "Команда arcls иÑпользуетÑÑ Ð´Ð»Ñ Ð¿Ñ€Ð¾Ñмотра информации о файлах,\n" "хранÑщихÑÑ Ð½Ð° накопительных уÑтройÑтвах Грид, а также в занеÑённых\n" "в каталоги данных." #: src/clients/data/arcls.cpp:281 msgid "show URLs of file locations" msgstr "вывеÑти адреÑа физичеÑких файлов" #: src/clients/data/arcls.cpp:285 msgid "display all available metadata" msgstr "показать вÑе доÑтупные метаданные" #: src/clients/data/arcls.cpp:299 msgid "" "show only description of requested object, do not list content of directories" msgstr "" "показывать только опиÑание запрашиваемого объекта, не выводить Ñодержимое " "каталогов" #: src/clients/data/arcls.cpp:303 msgid "treat requested object as directory and always try to list content" msgstr "" "интерпретировать запрошенный объект как каталог, и вÑегда пытатьÑÑ Ð²Ñ‹Ð²ÐµÑти " "его Ñодержимое" #: src/clients/data/arcls.cpp:307 msgid "check readability of object, does not show any information about object" msgstr "проверить читаемоÑть объекта, не показывать информацию об объекте" #: src/clients/data/arcls.cpp:440 msgid "Incompatible options --nolist and --forcelist requested" msgstr "Запрошены неÑовмеÑтимые опции --nolist и --forcelist" #: src/clients/data/arcls.cpp:445 msgid "Requesting recursion and --nolist has no sense" msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ñ€ÐµÐºÑƒÑ€Ñивного проÑмотра и --nolist не имеет ÑмыÑла" #: src/clients/data/arcmkdir.cpp:80 msgid "" "The arcmkdir command creates directories on grid storage elements and " "catalogs." msgstr "" "Команда arcmkdir Ñоздаёт директории на грид-хранилищах и в каталогах данных." #: src/clients/data/arcmkdir.cpp:85 msgid "make parent directories as needed" msgstr "Ñоздавать родительÑкие директории по мере необходимоÑти" #: src/clients/data/arcrename.cpp:43 msgid "Both URLs must have the same protocol, host and port" msgstr "Оба URL должны Ñодержать одинаковый протокол, Ð°Ð´Ñ€ÐµÑ Ñервера и порт" #: src/clients/data/arcrename.cpp:53 msgid "Cannot rename to or from root directory" msgstr "Ðевозможно перемеÑтить в корневую директорию или из неё" #: src/clients/data/arcrename.cpp:57 msgid "Cannot rename to the same URL" msgstr "Ðевозможно переименовать в идентичный URL" #: src/clients/data/arcrename.cpp:95 msgid "old_url new_url" msgstr "old_url new_url" #: src/clients/data/arcrename.cpp:96 msgid "The arcrename command renames files on grid storage elements." msgstr "Команда arcrename переименовывает файлы на запоминающих уÑтройÑтвах." #: src/clients/data/arcrm.cpp:60 #, c-format msgid "Unsupported URL given: %s" msgstr "Заданный URL не поддерживаетÑÑ: %s" #: src/clients/data/arcrm.cpp:103 msgid "url [url ...]" msgstr "url [url ...]" #: src/clients/data/arcrm.cpp:104 msgid "The arcrm command deletes files on grid storage elements." msgstr "Команда arcrm удалÑет файлы Ñ Ð·Ð°Ð¿Ð¾Ð¼Ð¸Ð½Ð°ÑŽÑ‰Ð¸Ñ… уÑтройÑтв." #: src/clients/data/arcrm.cpp:109 msgid "" "remove logical file name registration even if not all physical instances " "were removed" msgstr "" "удалить логичеÑкое Ð¸Ð¼Ñ Ñ„Ð°Ð¹Ð»Ð°, даже еÑли не вÑе физичеÑкие копии удалены" #: src/clients/data/utils.cpp:18 #, fuzzy msgid "Proxy expired. Please run 'arcproxy'!" msgstr "" "Срок дейÑÑ‚Ð²Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти вышел. ЗаÑылка задачи оборвана. ПожалуйÑта, " "запуÑтите 'arcproxy'!" #: src/clients/data/utils.cpp:81 src/clients/data/utils.cpp:90 #, fuzzy, c-format msgid "Unable to handle %s" msgstr "Ðе удалоÑÑŒ переименовать %s" #: src/clients/data/utils.cpp:82 src/clients/data/utils.cpp:91 msgid "Invalid credentials, please check proxy and/or CA certificates" msgstr "" "ÐедейÑтвительные реквизиты доÑтупа, пожалуйÑта, проверьте Ñертификат " "доверенноÑти и/или реквизиты органа Ñертификации" #: src/clients/data/utils.cpp:88 msgid "Proxy expired" msgstr "Срок дейÑÑ‚Ð²Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти вышел" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:53 msgid "Cannot initialize ARCHERY domain name for query" msgstr "Ðе удалоÑÑŒ инициализировать доменное Ð¸Ð¼Ñ ARCHERY Ð´Ð»Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:60 msgid "Cannot create resolver from /etc/resolv.conf" msgstr "Ðе удалоÑÑŒ Ñоздать преобразователь из /etc/resolv.conf" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:68 msgid "Cannot query service endpoint TXT records from DNS" msgstr "Ðе удалоÑÑŒ запроÑить TXT-запиÑи конечных точек Ñлужбы из DNS" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:79 msgid "Cannot parse service endpoint TXT records." msgstr "Ðе удалоÑÑŒ разобрать TXT-запиÑи конечных точек Ñлужбы." #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:124 #, c-format msgid "Wrong service record field \"%s\" found in the \"%s\"" msgstr "Обнаружено недопуÑтимое поле запиÑи \"%s\" в \"%s\"" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:129 #, c-format msgid "Malformed ARCHERY record found (endpoint url is not defined): %s" msgstr "Обнаружена Ð½ÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ ARCHERY (не задан URL конечной точки): %s" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:134 #, c-format msgid "Malformed ARCHERY record found (endpoint type is not defined): %s" msgstr "Обнаружена Ð½ÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ ARCHERY (не задан тип конечной точки): %s" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:138 #, c-format msgid "Found service endpoint %s (type %s)" msgstr "Обнаружена ÐºÐ¾Ð½ÐµÑ‡Ð½Ð°Ñ Ñ‚Ð¾Ñ‡ÐºÐ° Ñлужбы %s (тип %s)" #: src/hed/acc/ARCHERY/ServiceEndpointRetrieverPluginARCHERY.cpp:157 #, c-format msgid "" "Status for service endpoint \"%s\" is set to inactive in ARCHERY. Skipping." msgstr "" "СоÑтоÑние точки доÑтупа Ñлужбы \"%s\" задано как неактивное в ARCHERY. " "ПропуÑкаетÑÑ." #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:229 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:149 #, c-format msgid "Job %s has no delegation associated. Can't renew such job." msgstr "" "С задачей %s не аÑÑоциировано никакого делегированиÑ. Задача не может быть " "обновлена." #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:241 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:161 #, c-format msgid "Job %s failed to renew delegation %s." msgstr "Задача %s не Ñмогла обновить делегирование %s." #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:314 #, fuzzy, c-format msgid "Failed to process jobs - error response: %s" msgstr "Сбой обработки задач - неверный отклик: %u" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:316 #, c-format msgid "Failed to process jobs - wrong response: %u" msgstr "Сбой обработки задач - неверный отклик: %u" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:318 #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:327 #, c-format msgid "Content: %s" msgstr "Содержимое: %s" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:321 #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:333 #, c-format msgid "Failed to process job: %s" msgstr "Сбой обработки задачи: %s" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:331 msgid "Failed to process jobs - failed to parse response" msgstr "Сбой обработки задач - Ñбой разборки отклика" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:344 #, c-format msgid "No response returned: %s" msgstr "Ðе получен отклик: %s" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:368 #, c-format msgid "Failed to process job: %s - %s %s" msgstr "Сбой разборки задачи: %s - %s %s" #: src/hed/acc/ARCREST/JobControllerPluginREST.cpp:455 #, c-format msgid "Failed retrieving job description for job: %s" msgstr "Сбой Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ Ð¾Ð¿Ð¸ÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸: %s" #: src/hed/acc/ARCREST/JobListRetrieverPluginREST.cpp:29 msgid "Collecting Job (A-REX REST jobs) information." msgstr "СобираетÑÑ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ задаче (задачи на A-REX REST)" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:49 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:80 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:115 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:149 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:189 msgid "Failed to communicate to delegation endpoint." msgstr "Сбой ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ñо Ñлужбой делегированиÑ." #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:54 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:85 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:120 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:154 #, c-format msgid "Unexpected response code from delegation endpoint - %u" msgstr "Ðеверный код отклика Ñлужбы Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ - %u" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:56 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:87 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:122 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:156 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:399 #: src/hed/dmc/gridftp/Lister.cpp:223 src/hed/dmc/gridftp/Lister.cpp:243 #: src/hed/dmc/gridftp/Lister.cpp:468 src/hed/dmc/gridftp/Lister.cpp:475 #: src/hed/dmc/gridftp/Lister.cpp:497 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:164 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:197 #, c-format msgid "Response: %s" msgstr "Ответ: %s" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:64 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:136 #, c-format msgid "Unexpected delegation location from delegation endpoint - %s." msgstr "Ðеверное раÑположение Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¾Ñ‚ Ñлужбы Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ - %s." #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:92 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:127 #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:161 msgid "Missing response from delegation endpoint." msgstr "ОтÑутÑтвует отклик Ñлужбы делегированиÑ." #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:193 #, c-format msgid "Unexpected response code from delegation endpoint: %u, %s." msgstr "Ðеверный код отклика Ñлужбы делегированиÑ: %u, %s." #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:235 #, c-format msgid "Failed to submit all jobs: %s %s" msgstr "Сбой заÑылки вÑех задач: %s %s" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:249 msgid "Failed uploading local input files" msgstr "Сбой выгрузки локальных входных файлов" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:304 msgid "Failed to prepare job description" msgstr "Сбой подготовки опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:313 #, c-format msgid "Unable to submit job. Job description is not valid in the %s format: %s" msgstr "" "Ðевозможно заÑлать задачу. ОпиÑание задачи в формате %s недейÑтвительно: %s" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:320 msgid "" "Can't submit multiple instances for multiple job descriptions. Not " "implemented yet." msgstr "" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:331 #, fuzzy msgid "Unable to submit jobs. Failed to delegate X.509 credentials." msgstr "ЗаÑылка задач не удалаÑÑŒ. Сбой Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð¾Ð² доÑтупа." #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:338 #, fuzzy msgid "Unable to submit jobs. Failed to delegate token." msgstr "ЗаÑылка задач не удалаÑÑŒ. Сбой Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð¾Ð² доÑтупа." #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:348 msgid "Unable to submit job. Failed to assign delegation to job description." msgstr "" "ЗаÑылка задачи не удалаÑÑŒ. Сбой приÑÐ²Ð¾ÐµÐ½Ð¸Ñ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¾Ð¿Ð¸Ñанию задачи." #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:388 msgid "Failed to submit all jobs." msgstr "Сбой заÑылки вÑех задач." #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:398 #, c-format msgid "Failed to submit all jobs: %u %s" msgstr "Сбой заÑылки вÑех задач: %u %s" #: src/hed/acc/ARCREST/SubmitterPluginREST.cpp:411 #, c-format msgid "Failed to submit all jobs: %s" msgstr "Сбой заÑылки вÑех задач: %s" #: src/hed/acc/ARCREST/TargetInformationRetrieverPluginREST.cpp:27 msgid "Querying WSRF GLUE2 computing REST endpoint." msgstr "" "ОпрашиваетÑÑ Ñ‚Ð¾Ñ‡ÐºÐ° доÑтупа WSRF GLUE2 к информации о вычиÑлительном реÑурÑе " "REST." #: src/hed/acc/ARCREST/TargetInformationRetrieverPluginREST.cpp:60 #, c-format msgid "CONTENT %u: %s" msgstr "СОДЕРЖИМОЕ %u: %s" #: src/hed/acc/ARCREST/TargetInformationRetrieverPluginREST.cpp:64 msgid "Response is not XML" msgstr "Отклик не в формате XML" #: src/hed/acc/ARCREST/TargetInformationRetrieverPluginREST.cpp:69 #, c-format msgid "Parsed domains: %u" msgstr "Разобрано доменов: %u" #: src/hed/acc/Broker/DescriptorsBroker.cpp:14 msgid "Sorting according to free slots in queue" msgstr "Сортировка в ÑоответÑтвии Ñ Ð½Ð°Ð»Ð¸Ñ‡Ð¸ÐµÐ¼ Ñвободных меÑÑ‚ в очереди" #: src/hed/acc/Broker/DescriptorsBroker.cpp:15 msgid "Random sorting" msgstr "Ð¡Ð»ÑƒÑ‡Ð°Ð¹Ð½Ð°Ñ Ñортировка" #: src/hed/acc/Broker/DescriptorsBroker.cpp:16 msgid "Sorting according to specified benchmark (default \"specint2000\")" msgstr "" "Сортировка в ÑоответÑтвии Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ñ‹Ð¼ Ñталонным теÑтом (по умолчанию - " "\"specint2000\")" #: src/hed/acc/Broker/DescriptorsBroker.cpp:17 msgid "Sorting according to input data availability at target" msgstr "" "Сортировка в ÑоответÑтвии Ñ Ð´Ð¾ÑтупноÑтью входных данных в пункте назначениÑ" #: src/hed/acc/Broker/DescriptorsBroker.cpp:18 msgid "Performs neither sorting nor matching" msgstr "Ðе производитÑÑ Ð½Ð¸ Ñортировки, ни поиÑка ÑоответÑтвиÑ" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:24 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of waiting " "jobs" msgstr "" "Ðазначение %s отброшено алгоритмом FastestQueueBroker, Ñ‚.к. не Ñообщает " "чиÑло ожидающих задач" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:27 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of total slots" msgstr "" "Ðазначение %s отброшено алгоритмом FastestQueueBroker, Ñ‚.к. не Ñообщает " "общее чиÑло Ñчеек" #: src/hed/acc/Broker/FastestQueueBrokerPlugin.cpp:30 #, c-format msgid "" "Target %s removed by FastestQueueBroker, doesn't report number of free slots" msgstr "" "Ðазначение %s отброшено алгоритмом FastestQueueBroker, Ñ‚.к. не Ñообщает " "чиÑло Ñвободных Ñчеек" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:74 #, c-format msgid "[ADLParser] Unsupported EMI ES state %s." msgstr "[ADLParser] Ðеподдерживаемое ÑоÑтоÑние EMI ES %s." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:94 #, c-format msgid "[ADLParser] Unsupported internal state %s." msgstr "[ADLParser] Ðеподдерживаемое внутреннее ÑоÑтоÑние %s." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:104 #, c-format msgid "[ADLParser] Optional for %s elements are not supported yet." msgstr "[ADLParser] Ðтрибут optional Ð´Ð»Ñ Ñлементов %s пока не поддерживаетÑÑ." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:113 #, c-format msgid "[ADLParser] %s element must be boolean." msgstr "[ADLParser] Ñлемент %s должен быть логичеÑким." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:125 #, c-format msgid "[ADLParser] Code in FailIfExitCodeNotEqualTo in %s is not valid number." msgstr "" "[ADLParser] Код в FailIfExitCodeNotEqualTo в %s не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым " "чиÑлом." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:363 msgid "[ADLParser] Root element is not ActivityDescription " msgstr "[ADLParser] Корневой Ñлемент не ÑвлÑетÑÑ ActivityDescription " #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:410 msgid "[ADLParser] priority is too large - using max value 100" msgstr "" "[ADLParser] Ñлишком выÑокий приоритет - иÑпользуетÑÑ Ð¼Ð°ÐºÑимальное значение " "100" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:453 #, c-format msgid "[ADLParser] Unsupported URL %s for RemoteLogging." msgstr "[ADLParser] Ðеподдерживаемый URL %s в RemoteLogging." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:472 #, c-format msgid "[ADLParser] Wrong time %s in ExpirationTime." msgstr "[ADLParser] ExpirationTime Ñодержит недопуÑтимое Ð²Ñ€ÐµÐ¼Ñ %s." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:502 msgid "[ADLParser] AccessControl isn't valid XML." msgstr "[ADLParser] AccessControl не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым XML." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:513 msgid "[ADLParser] CredentialService must contain valid URL." msgstr "[ADLParser] CredentialService должен Ñодержать допуÑтимый URL." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:542 #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:545 msgid "[ADLParser] Only email Prorocol for Notification is supported yet." msgstr "" "[ADLParser] Пока что поддерживаетÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ email Prorocol Ð´Ð»Ñ Notification." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:603 msgid "[ADLParser] Missing or wrong value in ProcessesPerSlot." msgstr "[ADLParser] Значение ProcessesPerSlot отÑутÑтвует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:608 msgid "[ADLParser] Missing or wrong value in ThreadsPerProcess." msgstr "[ADLParser] Значение ThreadsPerProcess отÑутÑтвует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:614 msgid "" "[ADLParser] Missing Name element or value in ParallelEnvironment/Option " "element." msgstr "" "[ADLParser] ОтÑутÑтвует Ñлемент Name или значение Ñлемента " "ParallelEnvironment/Option." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:631 msgid "[ADLParser] NetworkInfo is not supported yet." msgstr "[ADLParser] NetworkInfo пока что не поддерживаетÑÑ." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:645 #, c-format msgid "[ADLParser] NodeAccess value %s is not supported yet." msgstr "[ADLParser] Значение NodeAccess %s пока что не поддерживаетÑÑ." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:653 msgid "[ADLParser] Missing or wrong value in NumberOfSlots." msgstr "[ADLParser] Значение NumberOfSlots отÑутÑтвует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:660 msgid "" "[ADLParser] The NumberOfSlots element should be specified, when the value of " "useNumberOfSlots attribute of SlotsPerHost element is \"true\"." msgstr "" "[ADLParser] Значение Ñлемента NumberOfSlots должно быть указано, еÑли " "значение атрибута useNumberOfSlots Ñлемента SlotsPerHost - \"true\"." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:667 msgid "[ADLParser] Missing or wrong value in SlotsPerHost." msgstr "[ADLParser] Значение SlotsPerHost отÑутÑтвует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:696 msgid "[ADLParser] Missing or wrong value in IndividualPhysicalMemory." msgstr "[ADLParser] Значение IndividualPhysicalMemory отÑутÑтвует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:706 msgid "[ADLParser] Missing or wrong value in IndividualVirtualMemory." msgstr "[ADLParser] Значение IndividualVirtualMemory отÑутÑтвует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:716 msgid "[ADLParser] Missing or wrong value in DiskSpaceRequirement." msgstr "[ADLParser] Значение DiskSpaceRequirement отÑутÑтвует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:730 msgid "[ADLParser] Benchmark is not supported yet." msgstr "[ADLParser] Benchmark пока что не поддерживаетÑÑ." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:738 msgid "[ADLParser] Missing or wrong value in IndividualCPUTime." msgstr "[ADLParser] Значение IndividualCPUTime отÑутÑтвует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:746 msgid "[ADLParser] Missing or wrong value in TotalCPUTime." msgstr "[ADLParser] Значение TotalCPUTime отÑутÑтвует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:755 msgid "[ADLParser] Missing or wrong value in WallTime." msgstr "[ADLParser] Значение WallTime отÑутÑтвует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:775 msgid "[ADLParser] Missing or empty Name in InputFile." msgstr "[ADLParser] Значение Name в InputFile отÑутÑтвует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:786 #, c-format msgid "[ADLParser] Wrong URI specified in Source - %s." msgstr "[ADLParser] Указан неверный URI в Source - %s." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:808 msgid "[ADLParser] Missing or empty Name in OutputFile." msgstr "[ADLParser] Значение Name в OutputFile отÑутÑтвует или неверно." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:814 #, c-format msgid "[ADLParser] Wrong URI specified in Target - %s." msgstr "[ADLParser] Указан неверный URI в Target - %s." #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:827 #, c-format msgid "Location URI for file %s is invalid" msgstr "ÐедопуÑтимый URI в Location Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° %s" #: src/hed/acc/JobDescriptionParser/ADLParser.cpp:852 #, c-format msgid "[ADLParser] CreationFlag value %s is not supported." msgstr "[ADLParser] Значение CreationFlag %s не поддерживаетÑÑ." #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:42 msgid "Left operand for RSL concatenation does not evaluate to a literal" msgstr "Левый операнд Ð´Ð»Ñ ÑÑ†ÐµÐ¿Ð»ÐµÐ½Ð¸Ñ RSL не приводитÑÑ Ðº буквенной конÑтанте" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:50 msgid "Right operand for RSL concatenation does not evaluate to a literal" msgstr "Правый операнд Ð´Ð»Ñ ÑÑ†ÐµÐ¿Ð»ÐµÐ½Ð¸Ñ RSL не приводитÑÑ Ðº буквенной конÑтанте" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:161 msgid "Multi-request operator only allowed at top level" msgstr "Оператор множеÑтвенноÑти RSL допуÑкаетÑÑ Ð»Ð¸ÑˆÑŒ в начале документа" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:186 msgid "RSL substitution is not a sequence" msgstr "Замена в RSL не ÑвлÑетÑÑ Ð¿Ð¾ÑледовательноÑтью" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:192 msgid "RSL substitution sequence is not of length 2" msgstr "Замена в RSL не ÑвлÑетÑÑ Ð¿Ð¾ÑледовательноÑтью из двух Ñлементов" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:211 msgid "RSL substitution variable name does not evaluate to a literal" msgstr "Ð˜Ð¼Ñ Ð¿ÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð¾Ð¹ Ð´Ð»Ñ Ð·Ð°Ð¼ÐµÐ½Ñ‹ RSL не приводитÑÑ Ðº буквенной конÑтанте" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:220 msgid "RSL substitution variable value does not evaluate to a literal" msgstr "Значение переменной Ð´Ð»Ñ Ð·Ð°Ð¼ÐµÐ½Ñ‹ RSL не приводитÑÑ Ðº буквенной конÑтанте" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:313 msgid "End of comment not found" msgstr "Ðе найдено окончание комментариÑ" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:324 msgid "Junk at end of RSL" msgstr "Ðеразборчивые фрагменты в конце RSL" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:424 msgid "End of single quoted string not found" msgstr "Ðе обнаружено конца Ñтроки в одиночных кавычках" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:441 msgid "End of double quoted string not found" msgstr "Ðе обнаружено конца Ñтроки в двойных кавычках" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:460 #, c-format msgid "End of user delimiter (%s) quoted string not found" msgstr "" "Ðе обнаружено конца Ñтроки, выделенной пользовательÑким ограничителем (%s)" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:518 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:546 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:625 msgid "')' expected" msgstr "ожидаетÑÑ ')'" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:528 #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:609 msgid "'(' expected" msgstr "ожидаетÑÑ ')'" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:536 msgid "Variable name expected" msgstr "ОжидаетÑÑ Ð¸Ð¼Ñ Ð¿ÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð¾Ð¹" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:541 #, c-format msgid "Variable name (%s) contains invalid character (%s)" msgstr "Ð˜Ð¼Ñ Ð¿ÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð¾Ð¹ (%s) Ñодержит неверный Ñимвол (%s)" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:557 msgid "Broken string" msgstr "ÐедопуÑÑ‚Ð¸Ð¼Ð°Ñ Ñтрока" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:570 msgid "No left operand for concatenation operator" msgstr "ОтÑутÑтвует левый операнд оператора подцеплениÑ" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:574 msgid "No right operand for concatenation operator" msgstr "ОтÑутÑтвует правый операнд оператора подцеплениÑ" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:638 msgid "Attribute name expected" msgstr "ОжидаетÑÑ Ð¸Ð¼Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð°" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:643 #, c-format msgid "Attribute name (%s) contains invalid character (%s)" msgstr "Ð˜Ð¼Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð° (%s) Ñодержит неверный Ñимвол (%s)" #: src/hed/acc/JobDescriptionParser/RSLParser.cpp:649 msgid "Relation operator expected" msgstr "ОжидаетÑÑ Ð¸Ñпользование релÑционного оператора" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:86 msgid "Error parsing the internally set executables attribute." msgstr "Ошибка разбора переопределённого ÑиÑтемой атрибута executables." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:102 #, c-format msgid "" "File '%s' in the 'executables' attribute is not present in the 'inputfiles' " "attribute" msgstr "" "Файл '%s' перечиÑленный в атрибуте 'executables' отÑутÑтвует в атрибуте " "'inputfiles'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:120 msgid "The value of the ftpthreads attribute must be a number from 1 to 10" msgstr "Значение атрибута 'ftpthreads' должно быть целым чиÑлом от 1 до 10" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:177 msgid "'stdout' attribute must be specified when 'join' attribute is specified" msgstr "" "Ðеобходимо задать значение атрибута 'stdout', еÑли задано значение атрибута " "'join'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:181 msgid "" "Attribute 'join' cannot be specified when both 'stdout' and 'stderr' " "attributes is specified" msgstr "" "Ðтрибут 'join' не может иÑпользоватьÑÑ, еÑли заданы оба атрибута 'stdout' и " "'stderr'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:200 msgid "Attributes 'gridtime' and 'cputime' cannot be specified together" msgstr "Ðтрибуты 'gridtime' и 'cputime' не могут быть заданы одновременно" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:204 msgid "Attributes 'gridtime' and 'walltime' cannot be specified together" msgstr "Ðтрибуты 'gridtime' и 'walltime' не могут быть заданы одновременно" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:226 msgid "" "When specifying 'countpernode' attribute, 'count' attribute must also be " "specified" msgstr "" "При задании атрибута 'countpernode', атрибут 'count' также должен быть задан" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:229 msgid "Value of 'countpernode' attribute must be an integer" msgstr "Значение атрибута 'countpernode' должно быть целочиÑленным" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:287 msgid "No RSL content in job description found" msgstr "Ð’ опиÑании задачи не найдено Ñтруктуры RSL" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:295 msgid "'action' attribute not allowed in user-side job description" msgstr "" "ИÑпользование атрибута 'action' в пользовательÑком опиÑании задачи не " "допуÑкаетÑÑ" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:304 #, c-format msgid "String successfully parsed as %s." msgstr "Строка уÑпешно разобрана как %s." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:313 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:331 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:349 #, c-format msgid "Attribute '%s' multiply defined" msgstr "Ðтрибут '%s' задан неÑколько раз" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:317 #, c-format msgid "Value of attribute '%s' expected to be single value" msgstr "Значение атрибута '%s' неоднозначно" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:322 #, c-format msgid "Value of attribute '%s' expected to be a string" msgstr "Значение атрибута '%s' не ÑвлÑетÑÑ Ñтрокой" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:338 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:368 #, c-format msgid "Value of attribute '%s' is not a string" msgstr "Значение атрибута '%s' не ÑвлÑетÑÑ Ñтрокой" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:356 #, c-format msgid "Value of attribute '%s' is not sequence" msgstr "Значение атрибута '%s' не ÑвлÑетÑÑ Ð¿Ð¾ÑледовательноÑтью" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:360 #, c-format msgid "" "Value of attribute '%s' has wrong sequence length: Expected %d, found %d" msgstr "" "Значение атрибута '%s' Ñодержит поÑледовательноÑть недопуÑтимой длины: " "ожидаетÑÑ %d, получено %d" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:492 #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1375 msgid "Unexpected RSL type" msgstr "Ðеожиданный тип RSL" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:557 msgid "At least two values are needed for the 'inputfiles' attribute" msgstr "Ð”Ð»Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð° 'inputfiles' необходимы как минимум два значениÑ" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:562 msgid "First value of 'inputfiles' attribute (filename) cannot be empty" msgstr "" "ÐŸÐµÑ€Ð²Ð°Ñ Ñ‡Ð°Ñть Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð° 'inputfiles' (filename) не может быть пуÑтой" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:587 #, c-format msgid "Invalid URL '%s' for input file '%s'" msgstr "ÐедопуÑтимый URL '%s' Ð´Ð»Ñ Ð²Ñ…Ð¾Ð´Ð½Ð¾Ð³Ð¾ файла '%s'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:596 #, c-format msgid "Invalid URL option syntax in option '%s' for input file '%s'" msgstr "ÐедопуÑтимый ÑинтакÑÐ¸Ñ Ð¾Ð¿Ñ†Ð¸Ð¸ URL в опции '%s' Ð´Ð»Ñ Ð²Ñ…Ð¾Ð´Ð½Ð¾Ð³Ð¾ файла '%s'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:606 #, c-format msgid "Invalid URL: '%s' in input file '%s'" msgstr "ÐедопуÑтимый URL: '%s' во входном файле '%s'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:677 msgid "At least two values are needed for the 'outputfiles' attribute" msgstr "Ð”Ð»Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð° 'outputfiles' необходимы как минимум два значениÑ" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:682 msgid "First value of 'outputfiles' attribute (filename) cannot be empty" msgstr "" "ÐŸÐµÑ€Ð²Ð°Ñ Ñ‡Ð°Ñть Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð° 'outputfiles' (filename) не может быть пуÑтой" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:694 #, c-format msgid "Invalid URL '%s' for output file '%s'" msgstr "ÐедопуÑтимый URL '%s' Ð´Ð»Ñ Ð²Ñ‹Ñ…Ð¾Ð´Ð½Ð¾Ð³Ð¾ файла '%s'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:704 #, c-format msgid "Invalid URL option syntax in option '%s' for output file '%s'" msgstr "ÐедопуÑтимый ÑинтакÑÐ¸Ñ Ð¾Ð¿Ñ†Ð¸Ð¸ URL в опции '%s' Ð´Ð»Ñ Ð²Ñ‹Ñ…Ð¾Ð´Ð½Ð¾Ð³Ð¾ файла '%s'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:714 #, c-format msgid "Invalid URL: '%s' in output file '%s'" msgstr "ÐедопуÑтимый URL: '%s' в выходном файле '%s'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:746 #, c-format msgid "" "Invalid comparison operator '%s' used at 'delegationid' attribute, only \"=" "\" is allowed." msgstr "" "ÐедопуÑтимый оператор ÑÑ€Ð°Ð²Ð½ÐµÐ½Ð¸Ñ '%s' иÑпользуетÑÑ Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð¾Ð¼ " "'delegationid', допуÑкаетÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ \"=\"." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:764 #, c-format msgid "" "Invalid comparison operator '%s' used at 'queue' attribute in 'GRIDMANAGER' " "dialect, only \"=\" is allowed" msgstr "" "ÐедопуÑтимый оператор ÑÑ€Ð°Ð²Ð½ÐµÐ½Ð¸Ñ '%s' иÑпользуетÑÑ Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð¾Ð¼ 'queue' в " "диалекте GRIDMANAGER, допуÑкаетÑÑ Ð»Ð¸ÑˆÑŒ \"=\"" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:770 #, c-format msgid "" "Invalid comparison operator '%s' used at 'queue' attribute, only \"!=\" or " "\"=\" are allowed." msgstr "" "ÐедопуÑтимый оператор ÑÑ€Ð°Ð²Ð½ÐµÐ½Ð¸Ñ '%s' иÑпользуетÑÑ Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð¾Ð¼ 'queue', " "допуÑкаютÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ \"!=\" или \"=\"." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:927 #, c-format msgid "Value of attribute '%s' expected not to be empty" msgstr "Значение атрибута '%s' не должно быть пуÑтым" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1036 msgid "The value of the acl XRSL attribute isn't valid XML." msgstr "Значение атрибута XRSL acl не ÑвлÑетÑÑ Ð´ÐµÐ¹Ñтвительным кодом XML." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1050 msgid "The cluster XRSL attribute is currently unsupported." msgstr "Ðтрибут XRSL cluster пока что не поддерживаетÑÑ." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1066 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it must contain an email " "address" msgstr "" "СинтакÑичеÑÐºÐ°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° в значении атрибута 'notify' ('%s'), он должен " "Ñодержать Ð°Ð´Ñ€ÐµÑ Ñлектронной почты" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1074 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it must only contain email " "addresses after state flag(s)" msgstr "" "СинтакÑичеÑÐºÐ°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° в значении атрибута 'notify' ('%s'), он должен " "Ñодержать лишь адреÑа Ñлектронной почты поÑле меток ÑтатуÑа" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1077 #, c-format msgid "" "Syntax error in 'notify' attribute value ('%s'), it contains unknown state " "flags" msgstr "" "СинтакÑичеÑÐºÐ°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° в значении атрибута 'notify' ('%s'), он Ñодержит " "неизвеÑтные метки ÑтатуÑа" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1125 msgid "priority is too large - using max value 100" msgstr "Ñлишком выÑокий приоритет - иÑпользуетÑÑ Ð¼Ð°ÐºÑимальное значение 100" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1158 #, c-format msgid "Invalid nodeaccess value: %s" msgstr "ÐедопуÑтимое значение nodeaccess: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1201 msgid "Value of 'count' attribute must be an integer" msgstr "Значение атрибута 'count' должно быть целочиÑленным" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1231 msgid "Value of 'exclusiveexecution' attribute must either be 'yes' or 'no'" msgstr "" "Значением атрибута 'exclusiveexecution' может быть либо 'yes', либо 'no'" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1277 #, c-format msgid "Invalid action value %s" msgstr "ÐедопуÑтимое значение action %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1367 #, c-format msgid "The specified Globus attribute (%s) is not supported. %s ignored." msgstr "Указанный атрибут Globus (%s) не поддерживаетÑÑ. %s игнорируетÑÑ." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1371 #, c-format msgid "Unknown XRSL attribute: %s - Ignoring it." msgstr "ÐеизвеÑтный атрибут XRSL: %s - игнорируетÑÑ." #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1385 #, c-format msgid "Wrong language requested: %s" msgstr "Запрошен неверный Ñзык: %s" #: src/hed/acc/JobDescriptionParser/XRSLParser.cpp:1722 msgid "" "Cannot output XRSL representation: The Resources.SlotRequirement." "NumberOfSlots attribute must be specified when the Resources.SlotRequirement." "SlotsPerHost attribute is specified." msgstr "" "Ðевозможно вывеÑти предÑтавление XRSL: атрибут Resources.SlotRequirement." "NumberOfSlots должен быть задан, еÑли задан атрибут Resources." "SlotRequirement.SlotsPerHost ." #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:65 #: src/services/wrappers/python/pythonwrapper.cpp:92 msgid "Failed to initialize main Python thread" msgstr "Сбой запуÑка головного потока Python" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:71 #: src/services/wrappers/python/pythonwrapper.cpp:97 msgid "Main Python thread was not initialized" msgstr "Головной поток Python не был запущен" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:81 #, c-format msgid "Loading Python broker (%i)" msgstr "Подгрузка Python broker (%i)" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:104 #: src/services/wrappers/python/pythonwrapper.cpp:134 msgid "Main Python thread is not initialized" msgstr "Головной процеÑÑ Python не был запущен" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:108 msgid "PythonBroker init" msgstr "Ð˜Ð½Ð¸Ñ†Ð¸Ð°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ PythonBroker" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:116 msgid "" "Invalid class name. The broker argument for the PythonBroker should be\n" " Filename.Class.args (args is optional), for example SampleBroker." "MyBroker" msgstr "" "ÐедопуÑтимое Ð¸Ð¼Ñ ÐºÐ»Ð°ÑÑа. Ðргумент брокера Ð´Ð»Ñ PythonBroker должен быть\n" " Filename.Class.args (args не обÑзательно), например: SampleBroker." "MyBroker" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:122 #, c-format msgid "Class name: %s" msgstr "Ðазвание клаÑÑа: %s" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:123 #, c-format msgid "Module name: %s" msgstr "Ðазвание модулÑ: %s" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:132 #: src/services/wrappers/python/pythonwrapper.cpp:178 msgid "Cannot convert ARC module name to Python string" msgstr "Ðевозможно перевеÑти название Ð¼Ð¾Ð´ÑƒÐ»Ñ ARC в Ñтроку Python" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:140 #: src/services/wrappers/python/pythonwrapper.cpp:186 msgid "Cannot import ARC module" msgstr "Ðе удалоÑÑŒ импортировать модуль ARC" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:149 #: src/services/wrappers/python/pythonwrapper.cpp:196 #: src/services/wrappers/python/pythonwrapper.cpp:429 msgid "Cannot get dictionary of ARC module" msgstr "Ошибка доÑтупа к Ñловарю Ð¼Ð¾Ð´ÑƒÐ»Ñ ARC" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:158 msgid "Cannot find ARC UserConfig class" msgstr "Ðе удалоÑÑŒ найти клаÑÑ ARC UserConfig" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:166 msgid "UserConfig class is not an object" msgstr "КлаÑÑ UserConfig не ÑвлÑетÑÑ Ð¾Ð±ÑŠÐµÐºÑ‚Ð¾Ð¼" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:173 msgid "Cannot find ARC JobDescription class" msgstr "Ðе удалоÑÑŒ найти клаÑÑ ARC JobDescription" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:181 msgid "JobDescription class is not an object" msgstr "КлаÑÑ JobDescription не ÑвлÑетÑÑ Ð¾Ð±ÑŠÐµÐºÑ‚Ð¾Ð¼" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:188 msgid "Cannot find ARC ExecutionTarget class" msgstr "Ðе удалоÑÑŒ найти клаÑÑ ARC ExecutionTarget" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:196 msgid "ExecutionTarget class is not an object" msgstr "КлаÑÑ ExecutionTarget не ÑвлÑетÑÑ Ð¾Ð±ÑŠÐµÐºÑ‚Ð¾Ð¼" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:207 #: src/services/wrappers/python/pythonwrapper.cpp:157 msgid "Cannot convert module name to Python string" msgstr "Ðевозможно перевеÑти название Ð¼Ð¾Ð´ÑƒÐ»Ñ Ð² Ñтроку Python" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:215 #: src/services/wrappers/python/pythonwrapper.cpp:164 msgid "Cannot import module" msgstr "Ðе удалоÑÑŒ импортировать модуль" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:224 msgid "Cannot get dictionary of custom broker module" msgstr "Ðевозможно обнаружить Ñловарь пользовательÑкого Ð¼Ð¾Ð´ÑƒÐ»Ñ Ð¿Ð»Ð°Ð½Ð¸Ñ€Ð¾Ð²Ñ‰Ð¸ÐºÐ°" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:233 msgid "Cannot find custom broker class" msgstr "Ðе обнаружен клаÑÑ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»ÑŒÑкого планировщика" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:241 #, c-format msgid "%s class is not an object" msgstr "КлаÑÑ %s не ÑвлÑетÑÑ Ð¾Ð±ÑŠÐµÐºÑ‚Ð¾Ð¼" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:247 msgid "Cannot create UserConfig argument" msgstr "Ðе удалоÑÑŒ Ñоздать аргумент UserConfig" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:255 msgid "Cannot convert UserConfig to Python object" msgstr "Ðе удалоÑÑŒ преобразовать UserConfig в объект Python" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:263 #: src/services/wrappers/python/pythonwrapper.cpp:253 msgid "Cannot create argument of the constructor" msgstr "Ðе удалоÑÑŒ Ñоздать аргумент конÑтруктора" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:272 #: src/services/wrappers/python/pythonwrapper.cpp:261 msgid "Cannot create instance of Python class" msgstr "Ðе удалоÑÑŒ реализовать клаÑÑ Python" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:278 #, c-format msgid "Python broker constructor called (%d)" msgstr "Вызван Python-конÑтруктор планировщика (%d)" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:302 #, c-format msgid "Python broker destructor called (%d)" msgstr "Вызван Python-деÑтруктор планировщика (%d)" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:311 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:328 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:361 msgid "Cannot create ExecutionTarget argument" msgstr "Ðевозможно Ñоздать аргумент ExecutionTarget" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:319 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:336 #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:369 #, c-format msgid "Cannot convert ExecutionTarget (%s) to python object" msgstr "Ðевозможно преобразовать ExecutionTarget (%s) в объект Python" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:393 msgid "Cannot create JobDescription argument" msgstr "Ðевозможно Ñоздать аргумент JobDescription" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:401 msgid "Cannot convert JobDescription to python object" msgstr "Ðевозможно преобразовать JobDescription в объект Python" #: src/hed/acc/PythonBroker/PythonBrokerPlugin.cpp:422 msgid "Do sorting using user created python broker" msgstr "" "Сортировка Ñ Ð¸Ñпользованием пользовательÑкого python-Ñкрипта планировщика" #: src/hed/daemon/unix/daemon.cpp:84 #, c-format msgid "Daemonization fork failed: %s" msgstr "Ðе удалоÑÑŒ Ñоздать дочерний демон: %s" #: src/hed/daemon/unix/daemon.cpp:95 msgid "Watchdog (re)starting application" msgstr "Самоконтроль (пере)запуÑкает приложение" #: src/hed/daemon/unix/daemon.cpp:100 #, c-format msgid "Watchdog fork failed: %s" msgstr "Ðе удалоÑÑŒ Ñоздать дочерний Ñторожевой процеÑÑ: %s" #: src/hed/daemon/unix/daemon.cpp:110 msgid "Watchdog starting monitoring" msgstr "Самоконтроль запуÑкает мониторинг" #: src/hed/daemon/unix/daemon.cpp:136 #, c-format msgid "Watchdog detected application exit due to signal %u" msgstr "Самоконтроль обнаружил завершение Ð¿Ñ€Ð¸Ð»Ð¾Ð¶ÐµÐ½Ð¸Ñ Ð¿Ð¾ Ñигналу %u" #: src/hed/daemon/unix/daemon.cpp:138 #, c-format msgid "Watchdog detected application exited with code %u" msgstr "Самоконтроль обнаружил приложение, завершившееÑÑ Ñ ÐºÐ¾Ð´Ð¾Ð¼ %u" #: src/hed/daemon/unix/daemon.cpp:140 msgid "Watchdog detected application exit" msgstr "Самоконтроль обнаружил завершение приложениÑ" #: src/hed/daemon/unix/daemon.cpp:149 msgid "" "Watchdog exiting because application was purposely killed or exited itself" msgstr "" "Самоконтроль оÑтанавливаетÑÑ, потому что приложение было прервано намеренно, " "или завершилоÑÑŒ" #: src/hed/daemon/unix/daemon.cpp:156 msgid "Watchdog detected application timeout or error - killing process" msgstr "" "Самоконтроль обнаружил превышение времени Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¿Ñ€Ð¸Ð»Ð¾Ð¶ÐµÐ½Ð¸Ñ Ð¸Ð»Ð¸ Ñбой - " "процеÑÑ Ð¿Ñ€ÐµÑ€Ñ‹Ð²Ð°ÐµÑ‚ÑÑ" #: src/hed/daemon/unix/daemon.cpp:167 msgid "Watchdog failed to wait till application exited - sending KILL" msgstr "" "Самоконтроль не дождалÑÑ Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ð¿Ñ€Ð¸Ð»Ð¾Ð¶ÐµÐ½Ð¸Ñ - поÑылаетÑÑ Ñигнал KILL" #: src/hed/daemon/unix/daemon.cpp:179 msgid "Watchdog failed to kill application - giving up and exiting" msgstr "Самоконтроль не Ñмог оборвать приложение - отказ и завершение" #: src/hed/daemon/unix/daemon.cpp:200 msgid "Shutdown daemon" msgstr "ОÑтанов демона" #: src/hed/daemon/unix/main_unix.cpp:47 msgid "shutdown" msgstr "Выключение" #: src/hed/daemon/unix/main_unix.cpp:50 msgid "exit" msgstr "выход" #: src/hed/daemon/unix/main_unix.cpp:88 msgid "No server config part of config file" msgstr "Ð’ файле наÑтроек отÑутÑтвуют наÑтройки Ñервера" #: src/hed/daemon/unix/main_unix.cpp:163 #, c-format msgid "Unknown log level %s" msgstr "ÐеизвеÑтный уровень Ð¶ÑƒÑ€Ð½Ð°Ð»Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ %s" #: src/hed/daemon/unix/main_unix.cpp:173 #, c-format msgid "Failed to open log file: %s" msgstr "Ðе удалоÑÑŒ открыть журнальный файл: %s" #: src/hed/daemon/unix/main_unix.cpp:205 msgid "Start foreground" msgstr "ЗапуÑк Ñ Ð²Ñ‹Ñоким приоритетом" #: src/hed/daemon/unix/main_unix.cpp:254 #, c-format msgid "XML config file %s does not exist" msgstr "Файл наÑтроек XML %s не ÑущеÑтвует" #: src/hed/daemon/unix/main_unix.cpp:258 src/hed/daemon/unix/main_unix.cpp:273 #, c-format msgid "Failed to load service configuration from file %s" msgstr "Ðе удалоÑÑŒ загрузить наÑтройки ÑервиÑа из файла %s" #: src/hed/daemon/unix/main_unix.cpp:264 #, c-format msgid "INI config file %s does not exist" msgstr "Файл наÑтроек INI %s не ÑущеÑтвует" #: src/hed/daemon/unix/main_unix.cpp:269 src/hed/daemon/unix/main_unix.cpp:291 msgid "Error evaluating profile" msgstr "Ошибка проверки профилÑ" #: src/hed/daemon/unix/main_unix.cpp:285 msgid "Error loading generated configuration" msgstr "Ошибка загрузки Ñгенерированных наÑтроек" #: src/hed/daemon/unix/main_unix.cpp:296 msgid "Failed to load service configuration from any default config file" msgstr "Ðе удалоÑÑŒ загрузить наÑтройки ÑервиÑа ни из какого файла наÑтроек" #: src/hed/daemon/unix/main_unix.cpp:357 msgid "Schema validation error" msgstr "Ошибка проверки Ñхемы" #: src/hed/daemon/unix/main_unix.cpp:372 msgid "Configuration root element is not " msgstr "Корневой Ñлемент наÑтроек не ÑвлÑетÑÑ " #: src/hed/daemon/unix/main_unix.cpp:388 #, c-format msgid "Cannot switch to group (%s)" msgstr "Ðевозможно перейти к группе (%s)" #: src/hed/daemon/unix/main_unix.cpp:398 #, c-format msgid "Cannot switch to primary group for user (%s)" msgstr "Ðевозможно переключить на оÑновную группу Ð´Ð»Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ (%s)" #: src/hed/daemon/unix/main_unix.cpp:403 #, c-format msgid "Cannot switch to user (%s)" msgstr "Ðевозможно перейти к пользователю (%s)" #: src/hed/daemon/unix/main_unix.cpp:421 msgid "Failed to load service side MCCs" msgstr "Ðе удалоÑÑŒ загрузить компоненты MCC Ñервера" #: src/hed/daemon/unix/main_unix.cpp:423 src/tests/count/test_service.cpp:29 #: src/tests/echo/test.cpp:30 src/tests/echo/test_service.cpp:29 msgid "Service side MCCs are loaded" msgstr "Подгружены ÑервиÑные компоненты цепи Ñообщений" #: src/hed/daemon/unix/main_unix.cpp:430 msgid "Unexpected arguments supplied" msgstr "Заданы непредуÑмотренные аргументы" #: src/hed/dmc/file/DataPointFile.cpp:87 #, c-format msgid "Unknown channel %s for stdio protocol" msgstr "ÐеизвеÑтный канал %s Ð´Ð»Ñ Ð¿Ñ€Ð¾Ñ‚Ð¾ÐºÐ¾Ð»Ð° stdio" #: src/hed/dmc/file/DataPointFile.cpp:94 #, c-format msgid "Failed to open stdio channel %s" msgstr "Ðе удалоÑÑŒ открыть канал stdio %s" #: src/hed/dmc/file/DataPointFile.cpp:95 #, c-format msgid "Failed to open stdio channel %d" msgstr "Ðе удалоÑÑŒ открыть канал stdio %d" #: src/hed/dmc/file/DataPointFile.cpp:335 #, c-format msgid "fsync of file %s failed: %s" msgstr "Сбой операции fsync на файле %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:340 #: src/hed/dmc/file/DataPointFile.cpp:348 #, c-format msgid "closing file %s failed: %s" msgstr "Ñбой при закрытии файла %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:367 #, c-format msgid "File is not accessible: %s" msgstr "Файл недоÑтупен: %s" #: src/hed/dmc/file/DataPointFile.cpp:373 #: src/hed/dmc/file/DataPointFile.cpp:458 #, c-format msgid "Can't stat file: %s: %s" msgstr "Ðевозможно получить ÑÑ‚Ð°Ñ‚ÑƒÑ Ñ„Ð°Ð¹Ð»Ð°: %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:419 #: src/hed/dmc/file/DataPointFile.cpp:425 #, c-format msgid "Can't stat stdio channel %s" msgstr "Ðевозможно выполнить операцию stat Ð´Ð»Ñ ÐºÐ°Ð½Ð°Ð»Ð° stdio %s" #: src/hed/dmc/file/DataPointFile.cpp:473 #, c-format msgid "%s is not a directory" msgstr "%s не ÑвлÑетÑÑ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð¾Ð¼" #: src/hed/dmc/file/DataPointFile.cpp:488 #, c-format msgid "Failed to read object %s: %s" msgstr "Сбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¾Ð±ÑŠÐµÐºÑ‚Ð° %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:501 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:534 #, c-format msgid "File is not accessible %s: %s" msgstr "Файл недоÑтупен %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:507 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:540 #, c-format msgid "Can't delete directory %s: %s" msgstr "Ðевозможно удалить каталог %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:514 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:547 #, c-format msgid "Can't delete file %s: %s" msgstr "Ðевозможно удалить файл %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:524 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:335 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:315 #: src/hed/dmc/http/DataPointHTTP.cpp:1658 #: src/hed/dmc/http/DataPointHTTP.cpp:1676 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1466 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:562 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:582 #, c-format msgid "Creating directory %s" msgstr "СоздаетÑÑ Ð´Ð¸Ñ€ÐµÐºÑ‚Ð¾Ñ€Ð¸Ñ %s" #: src/hed/dmc/file/DataPointFile.cpp:532 src/hed/dmc/srm/DataPointSRM.cpp:168 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:596 #, c-format msgid "Renaming %s to %s" msgstr "%s переименовываетÑÑ Ð² %s" #: src/hed/dmc/file/DataPointFile.cpp:534 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:605 #, c-format msgid "Can't rename file %s: %s" msgstr "Ðе удалоÑÑŒ переименовать файл %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:562 #, c-format msgid "Failed to open %s for reading: %s" msgstr "Ðевозможно открыть %s Ð´Ð»Ñ Ñ‡Ñ‚ÐµÐ½Ð¸Ñ: %s" #: src/hed/dmc/file/DataPointFile.cpp:577 #: src/hed/dmc/file/DataPointFile.cpp:712 #, c-format msgid "Failed to switch user id to %d/%d" msgstr "Ðе удалоÑÑŒ изменить идентификатор Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð½Ð° %d/%d" #: src/hed/dmc/file/DataPointFile.cpp:583 #, c-format msgid "Failed to create/open file %s: %s" msgstr "Сбой при Ñоздании/открытии файла %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:599 msgid "Failed to create thread" msgstr "Ðе удалоÑÑŒ Ñоздать поток" #: src/hed/dmc/file/DataPointFile.cpp:679 #, c-format msgid "Invalid url: %s" msgstr "Ðеверный URL: %s" #: src/hed/dmc/file/DataPointFile.cpp:688 src/hed/libs/data/FileCache.cpp:480 #, c-format msgid "Failed to create directory %s: %s" msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð° %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:701 #: src/hed/dmc/file/DataPointFile.cpp:720 #, c-format msgid "Failed to create file %s: %s" msgstr "Сбой при Ñоздании файла %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:732 #, c-format msgid "setting file %s to size %llu" msgstr "файлу %s приÑваиваетÑÑ Ñ€Ð°Ð·Ð¼ÐµÑ€ %llu" #: src/hed/dmc/file/DataPointFile.cpp:755 #, c-format msgid "Failed to preallocate space for %s" msgstr "Сбой предварительного Ñ€ÐµÐ·ÐµÑ€Ð²Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¼ÐµÑта Ð´Ð»Ñ %s" #: src/hed/dmc/file/DataPointFile.cpp:794 src/hed/libs/data/FileCache.cpp:854 #, c-format msgid "Failed to clean up file %s: %s" msgstr "Сбой при очиÑтке файла %s: %s" #: src/hed/dmc/file/DataPointFile.cpp:808 #, c-format msgid "Error during file validation. Can't stat file %s: %s" msgstr "" "Ошибка при проверке файла. Ðевозможно выполнить операцию stat Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° %s: " "%s" #: src/hed/dmc/file/DataPointFile.cpp:812 #, c-format msgid "" "Error during file validation: Local file size %llu does not match source " "file size %llu for file %s" msgstr "" "Ошибка при Ñверке: размер локального файла %llu не ÑоответÑтвует размеру " "файла-иÑточника %llu Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:53 #, c-format msgid "Using proxy %s" msgstr "ИÑпользуетÑÑ Ð¿Ñ€Ð¾ÐºÑи %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:54 #, c-format msgid "Using key %s" msgstr "ИÑпользуетÑÑ ÐºÐ»ÑŽÑ‡ %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:55 #, c-format msgid "Using cert %s" msgstr "ИÑпользуетÑÑ Ñертификат %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:113 msgid "Locations are missing in destination LFC URL" msgstr "Ð’ URL Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ LFC отÑутÑтвуют меÑтоположениÑ" #: src/hed/dmc/gfal/DataPointGFAL.cpp:119 #, c-format msgid "Duplicate replica found in LFC: %s" msgstr "Ð’ LFC обнаружена Ð¸Ð´ÐµÐ½Ñ‚Ð¸Ñ‡Ð½Ð°Ñ ÐºÐ¾Ð¿Ð¸Ñ: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:121 #, c-format msgid "Adding location: %s - %s" msgstr "ДобавлÑетÑÑ Ð°Ð´Ñ€ÐµÑ: %s - %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:129 #: src/hed/libs/data/DataPointIndex.cpp:161 #, c-format msgid "Add location: url: %s" msgstr "Добавление раÑположениÑ: url: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:130 #: src/hed/libs/data/DataPointIndex.cpp:162 #, c-format msgid "Add location: metadata: %s" msgstr "Добавление раÑположениÑ: metadata: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:150 #: src/hed/dmc/gfal/DataPointGFAL.cpp:310 #, c-format msgid "gfal_open failed: %s" msgstr "Сбой gfal_open: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:163 #: src/hed/dmc/gfal/DataPointGFAL.cpp:223 #: src/hed/dmc/gfal/DataPointGFAL.cpp:249 #: src/hed/dmc/gfal/DataPointGFAL.cpp:324 #: src/hed/dmc/gfal/DataPointGFAL.cpp:403 #: src/hed/dmc/gfal/DataPointGFAL.cpp:430 #, c-format msgid "gfal_close failed: %s" msgstr "Сбой gfal_close: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:195 #, c-format msgid "gfal_read failed: %s" msgstr "Сбой gfal_read: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:237 msgid "StopReading starts waiting for transfer_condition." msgstr "StopReading начинает ожидание transfer_condition." #: src/hed/dmc/gfal/DataPointGFAL.cpp:239 msgid "StopReading finished waiting for transfer_condition." msgstr "StopReading закончил ожидание transfer_condition." #: src/hed/dmc/gfal/DataPointGFAL.cpp:271 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:68 #: src/libs/data-staging/DataDeliveryLocalComm.cpp:73 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:44 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:49 #, c-format msgid "No locations defined for %s" msgstr "Ðе найдено ни одного меÑÑ‚Ð¾Ð½Ð°Ñ…Ð¾Ð¶Ð´ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:278 #, c-format msgid "Failed to set LFC replicas: %s" msgstr "Сбой Ð·Ð°Ð´Ð°Ð½Ð¸Ñ ÐºÐ¾Ð¿Ð¸Ð¹ в LFC: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:304 #, c-format msgid "gfal_mkdir failed (%s), trying to write anyway" msgstr "Сбой в gfal_mkdir (%s), вÑÑ‘ же попытаемÑÑ Ð·Ð°Ð¿Ð¸Ñать" #: src/hed/dmc/gfal/DataPointGFAL.cpp:359 #, c-format msgid "DataPointGFAL::write_file got position %d and offset %d, has to seek" msgstr "" "DataPointGFAL::write_file получил на входе Ð°Ð´Ñ€ÐµÑ %d и Ñдвиг %d, проводитÑÑ " "поиÑк" #: src/hed/dmc/gfal/DataPointGFAL.cpp:388 #, c-format msgid "gfal_write failed: %s" msgstr "Сбой gfal_write: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:418 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:405 msgid "StopWriting starts waiting for transfer_condition." msgstr "StopWriting начинает ожидание transfer_condition." #: src/hed/dmc/gfal/DataPointGFAL.cpp:420 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:407 msgid "StopWriting finished waiting for transfer_condition." msgstr "StopWriting закончил ожидание transfer_condition." #: src/hed/dmc/gfal/DataPointGFAL.cpp:451 #, c-format msgid "gfal_stat failed: %s" msgstr "Сбой gfal_stat: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:496 #, c-format msgid "gfal_listxattr failed, no replica information can be obtained: %s" msgstr "Сбой в gfal_listxattr, невозможно получить информацию о копиÑÑ…: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:537 #, c-format msgid "gfal_opendir failed: %s" msgstr "Сбой gfal_opendir: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:549 #, c-format msgid "List will stat the URL %s" msgstr "ПеречиÑление запроÑит информацию stat об URL %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:556 #, c-format msgid "gfal_closedir failed: %s" msgstr "Сбой gfal_closedir: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:584 #, c-format msgid "gfal_rmdir failed: %s" msgstr "Сбой gfal_rmdir: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:587 #, c-format msgid "gfal_unlink failed: %s" msgstr "Сбой gfal_unlink: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:604 #, c-format msgid "gfal_mkdir failed: %s" msgstr "Сбой gfal_mkdir: %s" #: src/hed/dmc/gfal/DataPointGFAL.cpp:619 #, c-format msgid "gfal_rename failed: %s" msgstr "Сбой gfal_rename: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:19 #, c-format msgid "Failed to obtain bytes transferred: %s" msgstr "Сбой Ð¾Ð¿Ñ€ÐµÐ´ÐµÐ»ÐµÐ½Ð¸Ñ ÐºÐ¾Ð»Ð¸Ñ‡ÐµÑтва переданных байтов: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:42 #, c-format msgid "Failed to get initiate GFAL2 parameter handle: %s" msgstr "Ðе удалоÑÑŒ получить ÑÑылку параметра GFAL2: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:49 #, c-format msgid "Failed to get initiate new GFAL2 context: %s" msgstr "Ðе удалоÑÑŒ получить новый контекÑÑ‚ GFAL2: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:56 #, c-format msgid "Failed to set GFAL2 monitor callback: %s" msgstr "Сбой уÑтановки обратного вызова монитора GFAL2: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:64 #, c-format msgid "Failed to set overwrite option in GFAL2: %s" msgstr "Сбой уÑтановки опции перезапиÑи в GFAL2: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:72 #, c-format msgid "Failed to set GFAL2 transfer timeout, will use default: %s" msgstr "" "Сбой уÑтановки времени Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð¸ GFAL2, будет иÑпользоватьÑÑ " "значение по умолчанию: %s" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:84 msgid "Transfer failed" msgstr "Передача не удалаÑÑŒ" #: src/hed/dmc/gfal/GFALTransfer3rdParty.cpp:92 msgid "Transfer succeeded" msgstr "Передача удалаÑÑŒ" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:38 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:56 msgid "ftp_complete_callback: success" msgstr "ftp_complete_callback: уÑпех" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:44 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:62 #, c-format msgid "ftp_complete_callback: error: %s" msgstr "ftp_complete_callback: ошибка: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:60 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:78 msgid "ftp_check_callback" msgstr "ftp_check_callback" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:62 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:90 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:116 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:135 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:305 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:340 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:678 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:847 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:879 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:913 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1052 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1104 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1114 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1122 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1130 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1138 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1144 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:80 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:108 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:285 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:321 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:731 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:764 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:801 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:932 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:996 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1006 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1014 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1022 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1030 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1036 #, c-format msgid "Globus error: %s" msgstr "Ошибка Globus: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:73 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:91 msgid "Excessive data received while checking file access" msgstr "При проверке прав доÑтупа к файлу получены избыточные данные" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:89 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:107 msgid "Registration of Globus FTP buffer failed - cancel check" msgstr "Сбой региÑтрации буфера Globus FTP - проверка прерываетÑÑ" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:115 msgid "check_ftp: globus_ftp_client_size failed" msgstr "check_ftp: Ñбой в globus_ftp_client_size" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:119 msgid "check_ftp: timeout waiting for size" msgstr "check_ftp: иÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ñ‹ size" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:124 msgid "check_ftp: failed to get file's size" msgstr "check_ftp: не удалоÑÑŒ определить размер файла" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:127 #, c-format msgid "check_ftp: obtained size: %lli" msgstr "check_ftp: получен размер: %lli" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:134 msgid "check_ftp: globus_ftp_client_modification_time failed" msgstr "check_ftp: Ñбой в globus_ftp_client_modification_time" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:138 msgid "check_ftp: timeout waiting for modification_time" msgstr "check_ftp: иÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ modification_time" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:143 msgid "check_ftp: failed to get file's modification time" msgstr "check_ftp: Ñбой при определении времени Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð°" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:148 #, c-format msgid "check_ftp: obtained modification date: %s" msgstr "check_ftp: получена дата изменениÑ: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:167 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:147 msgid "check_ftp: globus_ftp_client_get failed" msgstr "check_ftp: Ñбой в globus_ftp_client_get" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:174 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:154 msgid "check_ftp: globus_ftp_client_register_read" msgstr "check_ftp: globus_ftp_client_register_read" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:185 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:166 msgid "check_ftp: timeout waiting for partial get" msgstr "check_ftp: иÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ñ‡Ð°Ñтичной загрузки" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:215 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:193 #, c-format msgid "File delete failed, attempting directory delete for %s" msgstr "Сбой при удалении файла, попытка ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð° Ð´Ð»Ñ %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:225 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:204 msgid "delete_ftp: globus_ftp_client_delete failed" msgstr "delete_ftp: Ñбой в globus_ftp_client_delete" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:231 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:252 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:210 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:232 msgid "delete_ftp: timeout waiting for delete" msgstr "delete_ftp: иÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ñ‹ delete" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:246 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:226 msgid "delete_ftp: globus_ftp_client_rmdir failed" msgstr "delete_ftp: Ñбой в globus_ftp_client_rmdir" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:301 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:280 #, c-format msgid "mkdir_ftp: making %s" msgstr "mkdir_ftp: ÑоздаётÑÑ %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:309 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:289 msgid "mkdir_ftp: timeout waiting for mkdir" msgstr "mkdir_ftp: иÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ñ‹ mkdir" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:344 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:325 msgid "Timeout waiting for mkdir" msgstr "ИÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ñ‹ mkdir" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:370 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:348 msgid "start_reading_ftp" msgstr "start_reading_ftp" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:374 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:352 msgid "start_reading_ftp: globus_ftp_client_get" msgstr "start_reading_ftp: globus_ftp_client_get" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:388 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:366 msgid "start_reading_ftp: globus_ftp_client_get failed" msgstr "start_reading_ftp: Ñбой в globus_ftp_client_get" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:399 msgid "start_reading_ftp: globus_thread_create failed" msgstr "start_reading_ftp: Ñбой в globus_thread_create" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:418 msgid "stop_reading_ftp: aborting connection" msgstr "stop_reading_ftp: отменÑетÑÑ Ñоединение" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:425 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:647 #, c-format msgid "Failed to abort transfer of ftp file: %s" msgstr "Сбой Ð¿Ñ€ÐµÑ€Ñ‹Ð²Ð°Ð½Ð¸Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð¸ файла по ftp: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:426 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:648 msgid "Assuming transfer is already aborted or failed." msgstr "Предполагаем, что переÑылка уже отменена, либо оборвалаÑÑŒ." #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:433 msgid "stop_reading_ftp: waiting for transfer to finish" msgstr "stop_reading_ftp: ожидание Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ð¿ÐµÑ€ÐµÑылки" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:435 #, c-format msgid "stop_reading_ftp: exiting: %s" msgstr "stop_reading_ftp: выход: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:449 msgid "ftp_read_thread: get and register buffers" msgstr "ftp_read_thread: получение и региÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ Ð±ÑƒÑ„ÐµÑ€Ð¾Ð²" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:455 #, c-format msgid "ftp_read_thread: for_read failed - aborting: %s" msgstr "ftp_read_thread: Ñбой for_read - прерывание: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:465 #, c-format msgid "ftp_read_thread: data callback failed - aborting: %s" msgstr "ftp_read_thread: Ñбой обратного вызова данных - прерывание: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:477 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:382 #, c-format msgid "ftp_read_thread: Globus error: %s" msgstr "ftp_read_thread: ошибка Globus: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:490 #, c-format msgid "ftp_read_thread: too many registration failures - abort: %s" msgstr "ftp_read_thread: Ñлишком много Ñбоев региÑтрации - отмена: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:495 #, c-format msgid "ftp_read_thread: failed to register Globus buffer - will try later: %s" msgstr "" "ftp_read_thread: Ñбой при региÑтрации буфера Globus - попробуем попозже: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:508 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:398 msgid "ftp_read_thread: waiting for eof" msgstr "ftp_read_thread: ожидание конца файла" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:512 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:402 msgid "ftp_read_thread: waiting for buffers released" msgstr "ftp_read_thread: ожидание разблокировки буферов" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:516 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:410 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:664 msgid "ftp_read_thread: failed to release buffers - leaking" msgstr "ftp_read_thread: Ñбой ÑброÑа буферов - утечка" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:521 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:417 msgid "ftp_read_thread: exiting" msgstr "ftp_read_thread: выход" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:539 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:438 #, c-format msgid "ftp_read_callback: failure: %s" msgstr "ftp_read_callback: Ñбой: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:542 msgid "ftp_read_callback: success" msgstr "ftp_read_callback: уÑпех" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:558 msgid "Failed to get ftp file" msgstr "Ðе удалоÑÑŒ получить файл ftp" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:560 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:819 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:519 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:708 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:129 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:169 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1214 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1248 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1430 #: src/hed/libs/common/Thread.cpp:240 src/hed/libs/common/Thread.cpp:243 #: src/hed/libs/credential/Credential.cpp:1076 #: src/hed/libs/data/DataPointDelegate.cpp:628 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:66 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:82 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:98 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:117 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:127 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:135 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:144 #: src/hed/mcc/tls/PayloadTLSMCC.cpp:69 src/hed/shc/arcpdp/ArcPDP.cpp:234 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:305 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:258 #: src/libs/data-staging/Scheduler.cpp:117 #: src/services/a-rex/delegation/DelegationStore.cpp:36 #: src/services/a-rex/delegation/DelegationStore.cpp:41 #: src/services/a-rex/delegation/DelegationStore.cpp:46 #: src/services/a-rex/delegation/DelegationStore.cpp:75 #: src/services/a-rex/delegation/DelegationStore.cpp:81 #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:233 #: src/services/a-rex/grid-manager/inputcheck.cpp:33 #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:408 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:395 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:435 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:487 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:602 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:636 #, c-format msgid "%s" msgstr "%s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:594 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:545 msgid "start_writing_ftp: mkdir" msgstr "start_writing_ftp: mkdir" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:597 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:547 msgid "start_writing_ftp: mkdir failed - still trying to write" msgstr "start_writing_ftp: Ñбой mkdir - вÑÑ‘ же пытаемÑÑ Ð·Ð°Ð¿Ð¸Ñать" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:599 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:549 msgid "start_writing_ftp: put" msgstr "start_writing_ftp: put" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:613 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:563 msgid "start_writing_ftp: put failed" msgstr "start_writing_ftp: Ñбой в put" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:623 msgid "start_writing_ftp: globus_thread_create failed" msgstr "start_writing_ftp: Ñбой в globus_thread_create" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:640 #: src/hed/libs/data/DataPointDelegate.cpp:307 msgid "StopWriting: aborting connection" msgstr "StopWriting: прерывание ÑвÑзи" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:664 #: src/hed/dmc/http/DataPointHTTP.cpp:982 #: src/hed/libs/data/DataPointDelegate.cpp:321 #, c-format msgid "StopWriting: Calculated checksum %s" msgstr "StopWriting: ВычиÑлена ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:668 #: src/hed/dmc/http/DataPointHTTP.cpp:986 #: src/hed/libs/data/DataPointDelegate.cpp:325 #, c-format msgid "StopWriting: looking for checksum of %s" msgstr "StopWriting: поиÑк контрольной Ñуммы %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:677 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:912 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:800 msgid "list_files_ftp: globus_ftp_client_cksm failed" msgstr "list_files_ftp: Ñбой globus_ftp_client_cksm" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:681 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:916 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:804 msgid "list_files_ftp: timeout waiting for cksum" msgstr "list_files_ftp: иÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€Ð¾Ñ‡Ð½Ð¾Ð¹ Ñуммы" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:688 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:923 msgid "list_files_ftp: no checksum information possible" msgstr "list_files_ftp: Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ контрольных Ñуммах недоÑтупна" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:691 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:926 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:817 #, c-format msgid "list_files_ftp: checksum %s" msgstr "list_files_ftp: Ð¿Ñ€Ð¾Ð²ÐµÑ€Ð¾Ñ‡Ð½Ð°Ñ Ñумма %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:694 #: src/hed/dmc/http/DataPointHTTP.cpp:995 #: src/hed/libs/data/DataPointDelegate.cpp:332 msgid "" "Checksum type returned by server is different to requested type, cannot " "compare" msgstr "" "Тип контрольной Ñуммы на Ñервере отличаетÑÑ Ð¾Ñ‚ запрошенного, Ñравнение " "невозможно" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:696 #: src/hed/dmc/http/DataPointHTTP.cpp:997 #: src/hed/libs/data/DataPointDelegate.cpp:334 #, c-format msgid "Calculated checksum %s matches checksum reported by server" msgstr "" "ВычиÑÐ»ÐµÐ½Ð½Ð°Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€Ð¾Ñ‡Ð½Ð°Ñ Ñумма %s Ñовпадает Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€Ð¾Ñ‡Ð½Ð¾Ð¹ Ñуммой Ñервера" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:699 #: src/hed/dmc/http/DataPointHTTP.cpp:999 #: src/hed/libs/data/DataPointDelegate.cpp:337 #, c-format msgid "" "Checksum mismatch between calculated checksum %s and checksum reported by " "server %s" msgstr "" "ÐеÑовпадение между вычиÑленной контрольной Ñуммой %s и контрольной Ñуммой, " "выданной Ñервером %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:721 msgid "ftp_write_thread: get and register buffers" msgstr "ftp_write_thread: получение и региÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ Ð±ÑƒÑ„ÐµÑ€Ð¾Ð²" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:725 msgid "ftp_write_thread: for_write failed - aborting" msgstr "ftp_write_thread: Ñбой for_write - прерывание" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:743 msgid "ftp_write_thread: data callback failed - aborting" msgstr "ftp_write_thread: Ñбой обратного вызова данных - прерывание" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:759 msgid "ftp_write_thread: waiting for eof" msgstr "ftp_read_thread: ожидание конца файла" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:763 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:662 msgid "ftp_write_thread: waiting for buffers released" msgstr "ftp_write_thread: ожидание разблокировки буферов" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:771 msgid "ftp_write_thread: failed to release buffers - leaking" msgstr "ftp_write_thread: Ñбой ÑброÑа буферов - утечка" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:776 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:669 msgid "ftp_write_thread: exiting" msgstr "ftp_write_thread: выход" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:799 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:688 #, c-format msgid "ftp_write_callback: failure: %s" msgstr "ftp_write_callback: Ñбой: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:802 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:690 #, c-format msgid "ftp_write_callback: success %s" msgstr "ftp_write_callback: уÑпех %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:817 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:706 msgid "Failed to store ftp file" msgstr "Ðе удалоÑÑŒ Ñохранить файл ftp" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:825 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:711 msgid "ftp_put_complete_callback: success" msgstr "ftp_put_complete_callback: уÑпех" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:841 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:725 #, c-format msgid "list_files_ftp: looking for size of %s" msgstr "list_files_ftp: поиÑк размера %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:845 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:729 msgid "list_files_ftp: globus_ftp_client_size failed" msgstr "list_files_ftp: Ñбой в globus_ftp_client_size" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:851 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:852 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:735 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:736 msgid "list_files_ftp: timeout waiting for size" msgstr "list_files_ftp: иÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ñ€Ð°Ð·Ð¼ÐµÑ€Ð°" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:858 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:742 msgid "list_files_ftp: failed to get file's size" msgstr "list_files_ftp: не удалоÑÑŒ определить размер файла" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:870 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:755 #, c-format msgid "list_files_ftp: looking for modification time of %s" msgstr "list_files_ftp: определение времени Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:876 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:761 msgid "list_files_ftp: globus_ftp_client_modification_time failed" msgstr "list_files_ftp: Ñбой globus_ftp_client_modification_time" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:883 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:768 msgid "list_files_ftp: timeout waiting for modification_time" msgstr "list_files_ftp: иÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ modification_time" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:891 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:776 msgid "list_files_ftp: failed to get file's modification time" msgstr "list_files_ftp: не удалоÑÑŒ определить Ð²Ñ€ÐµÐ¼Ñ Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð°" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:903 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:790 #, c-format msgid "list_files_ftp: looking for checksum of %s" msgstr "list_files_ftp: поиÑк проверочной Ñуммы %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:942 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:830 #, c-format msgid "Failed to obtain stat from FTP: %s" msgstr "Ðе удалоÑÑŒ получить ÑпиÑок ÑÑ‚Ð°Ñ‚ÑƒÑ Ñ‡ÐµÑ€ÐµÐ· FTP: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:948 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:835 msgid "No results returned from stat" msgstr "Вызов stat не возвратил никаких результатов" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:954 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:841 #, c-format msgid "Wrong number of objects (%i) for stat from ftp: %s" msgstr "Ðеверное количеÑтво объектов (%i) Ð´Ð»Ñ Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ð¸ stat от ftp: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:968 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:854 #, c-format msgid "Unexpected path %s returned from server" msgstr "Сервер возвратил неожиданный путь %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1007 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:887 #, c-format msgid "Failed to obtain listing from FTP: %s" msgstr "Ðе удалоÑÑŒ получить ÑпиÑок файлов через FTP: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1050 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:930 msgid "Rename: globus_ftp_client_move failed" msgstr "Переименование: Ñбой в globus_ftp_client_move" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1056 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:936 msgid "Rename: timeout waiting for operation to complete" msgstr "Переименование: иÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ð¸" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1103 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:995 msgid "init_handle: globus_ftp_client_handleattr_init failed" msgstr "init_handle: Ñбой в globus_ftp_client_handleattr_init" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1112 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1004 msgid "init_handle: globus_ftp_client_handleattr_set_gridftp2 failed" msgstr "init_handle: Ñбой в globus_ftp_client_handleattr_set_gridftp2" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1121 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1013 msgid "init_handle: globus_ftp_client_handle_init failed" msgstr "init_handle: Ñбой в globus_ftp_client_handlea_init" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1128 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1020 msgid "init_handle: globus_ftp_client_operationattr_init failed" msgstr "init_handle: Ñбой в globus_ftp_client_operationattr_init" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1136 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1028 msgid "init_handle: globus_ftp_client_operationattr_set_allow_ipv6 failed" msgstr "init_handle: Ñбой globus_ftp_client_operationattr_set_allow_ipv6" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1142 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1034 msgid "init_handle: globus_ftp_client_operationattr_set_delayed_pasv failed" msgstr "init_handle: Ñбой globus_ftp_client_operationattr_set_delayed_pasv" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1190 #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1218 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1086 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1115 #, c-format msgid "globus_ftp_client_operationattr_set_authorization: error: %s" msgstr "globus_ftp_client_operationattr_set_authorization: ошибка: %s" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1217 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1114 msgid "Failed to set credentials for GridFTP transfer" msgstr "Ðе удалоÑÑŒ уÑтановить параметры доÑтупа Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð¸ данных по GridFTP" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1223 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1120 msgid "Using secure data transfer" msgstr "ИÑпользуетÑÑ Ð·Ð°Ñ‰Ð¸Ñ‰Ñ‘Ð½Ð½Ð°Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð° данных" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1228 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1125 msgid "Using insecure data transfer" msgstr "ИÑпользуетÑÑ Ð½ÐµÐ·Ð°Ñ‰Ð¸Ñ‰Ñ‘Ð½Ð½Ð°Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð° данных" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1255 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1152 msgid "~DataPoint: destroy ftp_handle" msgstr "~DataPoint: уничтожение ftp_handle" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1258 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1155 msgid "~DataPoint: destroy ftp_handle failed - retrying" msgstr "~DataPoint: уничтожение ftp_handle не удалоÑÑŒ - Ð½Ð¾Ð²Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ°" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1276 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1173 msgid "~DataPoint: failed to destroy ftp_handle - leaking" msgstr "~DataPoint: уничтожение ftp_handle не удалоÑÑŒ - утечка" #: src/hed/dmc/gridftp/DataPointGridFTP.cpp:1296 msgid "" "Missing reference to factory and/or module. It is unsafe to use Globus in " "non-persistent mode - (Grid)FTP code is disabled. Report to developers." msgstr "" "ОтÑутÑтвует указание на фабрику и/или модуль. ИÑпользование Globus в " "неопределённом режиме небезопаÑно - вызов (Grid)FTP заблокирован. СвÑжитеÑÑŒ " "Ñ Ñ€Ð°Ð·Ñ€Ð°Ð±Ð¾Ñ‚Ñ‡Ð¸ÐºÐ°Ð¼Ð¸." #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:390 msgid "ftp_read_thread: failed to register buffers" msgstr "ftp_read_thread: Ñбой региÑтрации буферов" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:405 msgid "ftp_read_thread: failed to release buffers" msgstr "ftp_read_thread: Ñбой ÑброÑа буферов" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:443 #, c-format msgid "ftp_read_callback: success - offset=%u, length=%u, eof=%u, allow oof=%u" msgstr "ftp_read_callback: уÑпех - offset=%u, length=%u, eof=%u, allow oof=%u" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:457 #, c-format msgid "ftp_read_callback: delayed data chunk: %llu %llu" msgstr "ftp_read_callback: задержанный блок данных: %llu %llu" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:464 #, c-format msgid "ftp_read_callback: unexpected data out of order: %llu != %llu" msgstr "ftp_read_callback: неверные неупорÑдоченные данные: %llu != %llu" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:471 msgid "ftp_read_callback: too many unexpected out of order chunks" msgstr "ftp_read_callback: избыток неверных неупорÑдоченных блоков" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:492 #, c-format msgid "ftp_read_callback: Globus error: %s" msgstr "ftp_read_callback: ошибка Globus: %s" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:517 msgid "ftp_get_complete_callback: Failed to get ftp file" msgstr "ftp_get_complete_callback: Сбой Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° ftp" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:522 msgid "ftp_get_complete_callback: success" msgstr "ftp_get_complete_callback: уÑпех" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:577 msgid "start_writing_ftp: waiting for data tag" msgstr "start_writing_ftp: ожидание метки данных" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:580 msgid "start_writing_ftp: failed to read data tag" msgstr "start_writing_ftp: Ñбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¼ÐµÑ‚ÐºÐ¸ данных" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:585 msgid "start_writing_ftp: waiting for data chunk" msgstr "start_writing_ftp: ожидание куÑка данных" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:587 msgid "start_writing_ftp: failed to read data chunk" msgstr "start_writing_ftp: Ñбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ ÐºÑƒÑка данных" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:598 #, c-format msgid "ftp_write_thread: data out of order in stream mode: %llu != %llu" msgstr "" "ftp_write_thread: неупорÑдоченные данные в поточном режиме: %llu != %llu" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:605 msgid "ftp_write_thread: too many out of order chunks in stream mode" msgstr "ftp_write_thread: избыток неупорÑдоченных блоков в поточном режиме" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:610 #, c-format msgid "start_writing_ftp: data chunk: %llu %llu" msgstr "start_writing_ftp: куÑок данных: %llu %llu" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:616 #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:642 #, c-format msgid "ftp_write_thread: Globus error: %s" msgstr "ftp_write_thread: ошибка Globus: %s" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:635 #, c-format msgid "start_writing_ftp: delayed data chunk: %llu %llu" msgstr "start_writing_ftp: задержанный блок данных: %llu %llu" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:654 msgid "start_writing_ftp: waiting for some buffers sent" msgstr "start_writing_ftp: ожидание отправки буферов" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:660 msgid "ftp_write_thread: waiting for transfer complete" msgstr "ftp_write_thread: ожидание Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð¸" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:811 msgid "list_files_ftp: no checksum information supported" msgstr "list_files_ftp: Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ контрольных Ñуммах не поддерживаетÑÑ" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:813 msgid "list_files_ftp: no checksum information returned" msgstr "list_files_ftp: не получена Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ контрольных Ñуммах" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:908 msgid "Too many failures to obtain checksum - giving up" msgstr "Слишком много Ñбоев попытки Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð¾Ð¹ Ñуммы - прерывание" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1268 msgid "Expecting Command and URL provided" msgstr "Задайте команду и URL" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1275 #: src/hed/libs/data/DataExternalHelper.cpp:376 msgid "Expecting Command among arguments" msgstr "Одним из аргументов должна быть команда" #: src/hed/dmc/gridftp/DataPointGridFTPHelper.cpp:1279 #: src/hed/libs/data/DataExternalHelper.cpp:380 msgid "Expecting URL among arguments" msgstr "Одним из аргументов должен быть URL" #: src/hed/dmc/gridftp/Lister.cpp:221 src/hed/dmc/gridftp/Lister.cpp:289 #: src/hed/dmc/gridftp/Lister.cpp:384 src/hed/dmc/gridftp/Lister.cpp:767 #: src/hed/dmc/gridftp/Lister.cpp:812 #, c-format msgid "Failure: %s" msgstr "Ошибка: %s" #: src/hed/dmc/gridftp/Lister.cpp:288 msgid "Error getting list of files (in list)" msgstr "Ðе удалоÑÑŒ получить ÑпиÑок файлов (в list)" #: src/hed/dmc/gridftp/Lister.cpp:290 msgid "Assuming - file not found" msgstr "ПредполагаетÑÑ, что файл не найден" #: src/hed/dmc/gridftp/Lister.cpp:307 #, c-format msgid "list record: %s" msgstr "перечиÑление запиÑи: %s" #: src/hed/dmc/gridftp/Lister.cpp:362 msgid "Failed reading list of files" msgstr "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ ÑпиÑка файлов" #: src/hed/dmc/gridftp/Lister.cpp:398 msgid "Failed reading data" msgstr "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ…" #: src/hed/dmc/gridftp/Lister.cpp:426 #, c-format msgid "Command: %s" msgstr "Команда: %s" #: src/hed/dmc/gridftp/Lister.cpp:430 src/hed/dmc/gridftp/Lister.cpp:471 #: src/hed/mcc/http/PayloadHTTP.cpp:1010 msgid "Memory allocation error" msgstr "Ошибка Ð²Ñ‹Ð´ÐµÐ»ÐµÐ½Ð¸Ñ Ð¿Ð°Ð¼Ñти" #: src/hed/dmc/gridftp/Lister.cpp:438 #, c-format msgid "%s failed" msgstr "%s не удалоÑÑŒ" #: src/hed/dmc/gridftp/Lister.cpp:442 msgid "Command is being sent" msgstr "ПоÑылаетÑÑ Ð¸Ð½ÑтрукциÑ" #: src/hed/dmc/gridftp/Lister.cpp:447 msgid "Waiting for response" msgstr "Ожидание отклика" #: src/hed/dmc/gridftp/Lister.cpp:452 msgid "Callback got failure" msgstr "Сбой обратного вызова" #: src/hed/dmc/gridftp/Lister.cpp:538 msgid "Failed in globus_cond_init" msgstr "Сбой в globus_cond_init" #: src/hed/dmc/gridftp/Lister.cpp:542 msgid "Failed in globus_mutex_init" msgstr "Сбой в globus_mutex_init" #: src/hed/dmc/gridftp/Lister.cpp:549 msgid "Failed allocating memory for handle" msgstr "Ðе удалоÑÑŒ зарезервировать памÑть под ÑÑылку" #: src/hed/dmc/gridftp/Lister.cpp:554 msgid "Failed in globus_ftp_control_handle_init" msgstr "Сбой в globus_ftp_control_handle_init" #: src/hed/dmc/gridftp/Lister.cpp:562 msgid "Failed to enable IPv6" msgstr "Ðе удалоÑÑŒ включить IPv6" #: src/hed/dmc/gridftp/Lister.cpp:573 msgid "Closing connection" msgstr "Прекращение ÑвÑзи" #: src/hed/dmc/gridftp/Lister.cpp:580 src/hed/dmc/gridftp/Lister.cpp:595 msgid "Timeout waiting for Globus callback - leaking connection" msgstr "ИÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¾Ñ‚Ð²ÐµÑ‚Ð½Ð¾Ð³Ð¾ ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Globus - утечка ÑоединениÑ" #: src/hed/dmc/gridftp/Lister.cpp:605 msgid "Closed successfully" msgstr "УÑпешное прекращение" #: src/hed/dmc/gridftp/Lister.cpp:607 msgid "Closing may have failed" msgstr "Возможно, был Ñбой про закрытии" #: src/hed/dmc/gridftp/Lister.cpp:634 msgid "Waiting for globus handle to settle" msgstr "Ждём пока ÑÑылка globus уÑтаканитÑÑ" #: src/hed/dmc/gridftp/Lister.cpp:639 #, c-format msgid "Handle is not in proper state %u/%u" msgstr "СÑылка в недопуÑтимом ÑоÑтоÑии %u/%u" #: src/hed/dmc/gridftp/Lister.cpp:645 msgid "Globus handle is stuck" msgstr "СÑылка globus заÑтрÑла" #: src/hed/dmc/gridftp/Lister.cpp:661 #, c-format msgid "Failed destroying handle: %s. Can't handle such situation." msgstr "" "Ðе удалоÑÑŒ уничтожить ÑÑылку: %s. Ðевозможно ÑправитьÑÑ Ñ Ñ‚Ð°ÐºÐ¸Ð¼ положением." #: src/hed/dmc/gridftp/Lister.cpp:684 #, c-format msgid "EPSV failed: %s" msgstr "Сбой EPSV: %s" #: src/hed/dmc/gridftp/Lister.cpp:688 msgid "EPSV failed" msgstr "Сбой EPSV" #: src/hed/dmc/gridftp/Lister.cpp:695 #, c-format msgid "PASV failed: %s" msgstr "ИнÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ PASV не прошла: %s" #: src/hed/dmc/gridftp/Lister.cpp:699 msgid "PASV failed" msgstr "ИнÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ PASV не прошла" #: src/hed/dmc/gridftp/Lister.cpp:765 msgid "Failed to apply local address to data connection" msgstr "Ðе удалоÑÑŒ применить локальный Ð°Ð´Ñ€ÐµÑ Ðº Ñоединению передачи данных" #: src/hed/dmc/gridftp/Lister.cpp:783 msgid "Can't parse host and/or port in response to EPSV/PASV" msgstr "" "Ðе удалоÑÑŒ извлечь Ð°Ð´Ñ€ÐµÑ ÑƒÐ·Ð»Ð° и/или номер порта из ответа на Ð·Ð°Ð¿Ñ€Ð¾Ñ EPSV/PASV" #: src/hed/dmc/gridftp/Lister.cpp:788 #, c-format msgid "Data channel: %d.%d.%d.%d:%d" msgstr "Канал передачи данных: %d.%d.%d.%d:%d" #: src/hed/dmc/gridftp/Lister.cpp:806 #, c-format msgid "Data channel: [%s]:%d" msgstr "Канал передачи данных: [%s]:%d" #: src/hed/dmc/gridftp/Lister.cpp:810 msgid "Obtained host and address are not acceptable" msgstr "Полученные Ð°Ð´Ñ€ÐµÑ Ð¸ номер порта неприемлемы" #: src/hed/dmc/gridftp/Lister.cpp:820 msgid "Failed to open data channel" msgstr "Ðе удалоÑÑŒ открыть канал передачи данных" #: src/hed/dmc/gridftp/Lister.cpp:838 #, c-format msgid "Unsupported protocol in url %s" msgstr "Ðеподдерживаемый протокол в URL %s" #: src/hed/dmc/gridftp/Lister.cpp:850 msgid "Reusing connection" msgstr "Повторное иÑпользование ÑоединениÑ" #: src/hed/dmc/gridftp/Lister.cpp:874 #, c-format msgid "Failed connecting to server %s:%d" msgstr "Сбой уÑÑ‚Ð°Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ ÑвÑзи Ñ Ñервером %s:%d" #: src/hed/dmc/gridftp/Lister.cpp:880 #, c-format msgid "Failed to connect to server %s:%d" msgstr "Ðе удалоÑÑŒ уÑтановить ÑвÑзь Ñ Ñервером %s:%d" #: src/hed/dmc/gridftp/Lister.cpp:896 msgid "Missing authentication information" msgstr "ОтÑутÑтвует Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð´Ð»Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ¸ подлинноÑти" #: src/hed/dmc/gridftp/Lister.cpp:905 src/hed/dmc/gridftp/Lister.cpp:919 #, c-format msgid "Bad authentication information: %s" msgstr "ÐÐµÐ¿Ñ€Ð¸ÐµÐ¼Ð»ÐµÐ¼Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð´Ð»Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ¸ подлинноÑти: %s" #: src/hed/dmc/gridftp/Lister.cpp:928 src/hed/dmc/gridftp/Lister.cpp:943 #, c-format msgid "Failed authenticating: %s" msgstr "Ошибка проверки подлинноÑти: %s" #: src/hed/dmc/gridftp/Lister.cpp:935 msgid "Failed authenticating" msgstr "Ошибка проверки подлинноÑти" #: src/hed/dmc/gridftp/Lister.cpp:970 src/hed/dmc/gridftp/Lister.cpp:1126 #, c-format msgid "DCAU failed: %s" msgstr "ИнÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ DCAU не прошла: %s" #: src/hed/dmc/gridftp/Lister.cpp:974 src/hed/dmc/gridftp/Lister.cpp:1131 msgid "DCAU failed" msgstr "ИнÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ DCAU не прошла" #: src/hed/dmc/gridftp/Lister.cpp:994 msgid "MLST is not supported - trying LIST" msgstr "MLST не поддерживаетÑÑ - пробуем LIST" #: src/hed/dmc/gridftp/Lister.cpp:1010 #, c-format msgid "Immediate completion expected: %s" msgstr "ОжидаетÑÑ Ð½ÐµÐ¼ÐµÐ´Ð»ÐµÐ½Ð½Ð¾Ðµ Ñоединение: %s" #: src/hed/dmc/gridftp/Lister.cpp:1014 msgid "Immediate completion expected" msgstr "ОжидаетÑÑ Ð½ÐµÐ¼ÐµÐ´Ð»ÐµÐ½Ð½Ð¾Ðµ Ñоединение" #: src/hed/dmc/gridftp/Lister.cpp:1027 #, c-format msgid "Missing information in reply: %s" msgstr "ÐÐµÐ¿Ð¾Ð»Ð½Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð² отклике: %s" #: src/hed/dmc/gridftp/Lister.cpp:1061 #, c-format msgid "Missing final reply: %s" msgstr "ОтÑутÑтвует заключительный отклик: %s" #: src/hed/dmc/gridftp/Lister.cpp:1085 #, c-format msgid "Unexpected immediate completion: %s" msgstr "Ðеожиданное немедленное завершение: %s" #: src/hed/dmc/gridftp/Lister.cpp:1097 #, c-format msgid "LIST/MLST failed: %s" msgstr "Сбой LIST/MLST: %s" #: src/hed/dmc/gridftp/Lister.cpp:1102 msgid "LIST/MLST failed" msgstr "Сбой LIST/MLST" #: src/hed/dmc/gridftp/Lister.cpp:1152 msgid "MLSD is not supported - trying NLST" msgstr "MLSD не поддерживаетÑÑ - пробуем NLST" #: src/hed/dmc/gridftp/Lister.cpp:1166 #, c-format msgid "Immediate completion: %s" msgstr "Ðемедленное завершение: %s" #: src/hed/dmc/gridftp/Lister.cpp:1174 #, c-format msgid "NLST/MLSD failed: %s" msgstr "ИнÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ NLST/MLSD не прошла: %s" #: src/hed/dmc/gridftp/Lister.cpp:1180 msgid "NLST/MLSD failed" msgstr "ИнÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ NLST/MLSD не прошла" #: src/hed/dmc/gridftp/Lister.cpp:1201 #, c-format msgid "Data transfer aborted: %s" msgstr "Передача данных прервана: %s" #: src/hed/dmc/gridftp/Lister.cpp:1206 msgid "Data transfer aborted" msgstr "Передача данных прервана" #: src/hed/dmc/gridftp/Lister.cpp:1218 msgid "Failed to transfer data" msgstr "Ðе удалоÑÑŒ передать данные" #: src/hed/dmc/http/DataPointHTTP.cpp:409 #: src/hed/dmc/http/DataPointHTTP.cpp:597 #: src/hed/dmc/http/DataPointHTTP.cpp:691 #: src/hed/dmc/http/DataPointHTTP.cpp:1137 #: src/hed/dmc/http/DataPointHTTP.cpp:1282 #: src/hed/dmc/http/DataPointHTTP.cpp:1431 #, c-format msgid "Redirecting to %s" msgstr "Перенаправление к %s" #: src/hed/dmc/http/DataPointHTTP.cpp:461 #, fuzzy, c-format msgid "PROPFIND response: %s" msgstr "Ответ SOAP: %s" #: src/hed/dmc/http/DataPointHTTP.cpp:515 #, fuzzy, c-format msgid "Using checksum %s" msgstr "%s: ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма %s" #: src/hed/dmc/http/DataPointHTTP.cpp:523 #, c-format msgid "No matching checksum type, using first in list %s" msgstr "" #: src/hed/dmc/http/DataPointHTTP.cpp:616 #: src/hed/dmc/http/DataPointHTTP.cpp:710 #, fuzzy msgid "No information returned by PROPFIND" msgstr "A-REX не возвратил ни одного Ñрлыка задачи" #: src/hed/dmc/http/DataPointHTTP.cpp:767 #, c-format msgid "Stat: obtained size %llu" msgstr "Проверка: получен размер %llu" #: src/hed/dmc/http/DataPointHTTP.cpp:771 #, c-format msgid "Stat: obtained modification time %s" msgstr "Stat: получено Ð²Ñ€ÐµÐ¼Ñ Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ %s" #: src/hed/dmc/http/DataPointHTTP.cpp:775 #, fuzzy, c-format msgid "Stat: obtained checksum %s" msgstr "Проверка: получена ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма: %s" #: src/hed/dmc/http/DataPointHTTP.cpp:991 #, fuzzy, c-format msgid "Could not find checksum: %s" msgstr "Ðе удалоÑÑŒ получить контрольную Ñумму %s: %s" #: src/hed/dmc/http/DataPointHTTP.cpp:993 #, fuzzy, c-format msgid "Checksum of %s is not available" msgstr "Сервер ISIS (%s) недоÑтупен." #: src/hed/dmc/http/DataPointHTTP.cpp:1037 #, c-format msgid "Check: obtained size %llu" msgstr "Проверка: получен размер %llu" #: src/hed/dmc/http/DataPointHTTP.cpp:1039 #, c-format msgid "Check: obtained modification time %s" msgstr "Check: получено Ð²Ñ€ÐµÐ¼Ñ Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ %s" #: src/hed/dmc/http/DataPointHTTP.cpp:1154 #: src/hed/dmc/http/DataPointHTTP.cpp:1302 #, c-format msgid "HTTP failure %u - %s" msgstr "Ошибка HTTP %u - %s" #: src/hed/dmc/http/DataPointHTTP.cpp:1459 #, fuzzy, c-format msgid "Failed to create %s, trying to create parent directories" msgstr "Сбой Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð¸Ñ %s, попытка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ€Ð¾Ð´Ð¸Ñ‚ÐµÐ»ÑŒÑких каталогов" #: src/hed/dmc/http/DataPointHTTP.cpp:1648 #, fuzzy, c-format msgid "Error creating directory: %s" msgstr "Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð° %s: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:27 #, c-format msgid "Replacing existing token for %s in Rucio token cache" msgstr "ЗаменÑетÑÑ ÑущеÑтвующий маркер Ð´Ð»Ñ %s в кÑше маркеров Rucio" #: src/hed/dmc/rucio/DataPointRucio.cpp:40 #, c-format msgid "Found existing token for %s in Rucio token cache with expiry time %s" msgstr "" "Обнаружен ÑущеÑтвующий маркер Ð´Ð»Ñ %s в кÑше маркеров Rucio, иÑтекающий %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:43 #, c-format msgid "Rucio token for %s has expired or is about to expire" msgstr "Срок дейÑÑ‚Ð²Ð¸Ñ Ð¼Ð°Ñ€ÐºÐµÑ€Ð° Rucio Ð´Ð»Ñ %s иÑтёк, или вÑкоре иÑтечёт" #: src/hed/dmc/rucio/DataPointRucio.cpp:105 #, c-format msgid "Extracted nickname %s from credentials to use for RUCIO_ACCOUNT" msgstr "" "Выделен пÑевдоним %s Ð´Ð»Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð¾Ð² доÑтупа, иÑпользуемых в RUCIO_ACCOUNT" #: src/hed/dmc/rucio/DataPointRucio.cpp:108 msgid "Failed to extract VOMS nickname from proxy" msgstr "Сбой Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ Ð¿Ñевдонима VOMS из Ñертификата доверенноÑти" #: src/hed/dmc/rucio/DataPointRucio.cpp:110 #, c-format msgid "Using Rucio account %s" msgstr "ИÑпользуетÑÑ ÑƒÑ‡Ñ‘Ñ‚Ð½Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ Rucio %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:123 #, fuzzy, c-format msgid "Strange path in Rucio URL: %s" msgstr " URL каталога Ð´Ð»Ñ Ð·Ð°Ð³Ñ€ÑƒÐ·ÐºÐ¸: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:133 src/hed/libs/common/FileLock.cpp:42 msgid "Cannot determine hostname from gethostname()" msgstr "Ðевозможно извлечь Ð¸Ð¼Ñ ÑƒÐ·Ð»Ð° иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ gethostname()" #: src/hed/dmc/rucio/DataPointRucio.cpp:171 #, c-format msgid "Bad path for %s: Format should be /replicas//" msgstr "" #: src/hed/dmc/rucio/DataPointRucio.cpp:190 #, fuzzy, c-format msgid "Failed to query parent DIDs: %s" msgstr "Сбой опроÑа ÑоÑтоÑниÑ: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:195 #, fuzzy, c-format msgid "Failed to parse Rucio info: %s" msgstr "Ðе удалоÑÑŒ разобрать отзыв Rucio: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:242 #: src/hed/dmc/rucio/DataPointRucio.cpp:522 #, c-format msgid "No locations found for %s" msgstr "Ðе найдено раÑположений Ð´Ð»Ñ %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:333 #, c-format msgid "Acquired auth token for %s: %s" msgstr "Получен маркер доÑтупа Ð´Ð»Ñ %s: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:383 #, c-format msgid "Rucio returned %s" msgstr "Rucio возвратил %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:452 #: src/hed/dmc/rucio/DataPointRucio.cpp:543 #, c-format msgid "Failed to parse Rucio response: %s" msgstr "Ðе удалоÑÑŒ разобрать отзыв Rucio: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:457 #: src/hed/dmc/rucio/DataPointRucio.cpp:548 #, c-format msgid "Filename not returned in Rucio response: %s" msgstr "Ð’ отзыве Rucio отÑутÑтвует Ð¸Ð¼Ñ Ñ„Ð°Ð¹Ð»Ð°: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:462 #, c-format msgid "Unexpected name returned in Rucio response: %s" msgstr "Отзыв Rucio Ñодержит недопуÑтимое имÑ: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:467 #, c-format msgid "No pfns returned in Rucio response: %s" msgstr "Ð’ отзыве Rucio отÑутÑтвуeÑ‚ pnfs: %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:477 #, c-format msgid "Cannot determine replica type for %s" msgstr "Ðе удаётÑÑ Ð¾Ð¿Ñ€ÐµÐ´ÐµÐ»Ð¸Ñ‚ÑŒ тип копии Ð´Ð»Ñ %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:479 #, c-format msgid "%s: replica type %s" msgstr "%s: тип копии %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:482 #, c-format msgid "Skipping %s replica %s" msgstr "ПропуÑкаетÑÑ %s ÐºÐ¾Ð¿Ð¸Ñ %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:498 #, fuzzy, c-format msgid "Error extracting RSE for %s" msgstr "Ошибка Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:508 #, c-format msgid "No filesize information returned in Rucio response for %s" msgstr "Ð’ отзыве Rucio Ð´Ð»Ñ %s отÑутÑтвует Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ размере файла" #: src/hed/dmc/rucio/DataPointRucio.cpp:511 #, c-format msgid "%s: size %llu" msgstr "%s: размер %llu" #: src/hed/dmc/rucio/DataPointRucio.cpp:515 #, c-format msgid "No checksum information returned in Rucio response for %s" msgstr "Ð’ отзыве Rucio Ð´Ð»Ñ %s отÑутÑтвует Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ контрольной Ñумме" #: src/hed/dmc/rucio/DataPointRucio.cpp:518 #, c-format msgid "%s: checksum %s" msgstr "%s: ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:553 #, fuzzy, c-format msgid "Parent dataset: %s" msgstr "ОбрабатываетÑÑ Ð¾Ð±ÑŠÐµÐºÑ‚ данных %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:575 #, fuzzy, c-format msgid "Could not find matching RSE to %s" msgstr "невозможно найти начало опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ в формате XRSL" #: src/hed/dmc/rucio/DataPointRucio.cpp:617 #, fuzzy, c-format msgid "Sending Rucio trace: %s" msgstr "ИÑпользуетÑÑ ÑƒÑ‡Ñ‘Ñ‚Ð½Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ Rucio %s" #: src/hed/dmc/rucio/DataPointRucio.cpp:620 #, fuzzy, c-format msgid "Failed to send traces to Rucio: %s" msgstr "Сбой отправки запроÑа на прерывание: %s" #: src/hed/dmc/s3/DataPointS3.cpp:269 #, fuzzy, c-format msgid "Initializing S3 connection to %s" msgstr "LDAPQuery: уÑтанавливаетÑÑ Ñоединение Ñ %s:%d" #: src/hed/dmc/s3/DataPointS3.cpp:274 #, fuzzy, c-format msgid "Failed to initialize S3 to %s: %s" msgstr "Ðе удалоÑÑŒ инициализировать файл PKCS12: %s" #: src/hed/dmc/s3/DataPointS3.cpp:470 src/hed/dmc/s3/DataPointS3.cpp:592 #, fuzzy, c-format msgid "Failed to read object %s: %s; %s" msgstr "Сбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¾Ð±ÑŠÐµÐºÑ‚Ð° %s: %s" #: src/hed/dmc/s3/DataPointS3.cpp:669 #, fuzzy, c-format msgid "Failed to write object %s: %s; %s" msgstr "Сбой запиÑи объекта %s: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:56 #, c-format msgid "TURL %s cannot be handled" msgstr "TURL %s не может быть обработан" #: src/hed/dmc/srm/DataPointSRM.cpp:83 #, c-format msgid "Check: looking for metadata: %s" msgstr "Проверка: поиÑк метаданных: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:94 #, c-format msgid "Check: obtained size: %lli" msgstr "Проверка: получен размер: %lli" #: src/hed/dmc/srm/DataPointSRM.cpp:100 #, c-format msgid "Check: obtained checksum: %s" msgstr "Проверка: получена ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:104 #, c-format msgid "Check: obtained modification date: %s" msgstr "Проверка: получено Ð²Ñ€ÐµÐ¼Ñ Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:108 msgid "Check: obtained access latency: low (ONLINE)" msgstr "Проверка: получена задержка доÑтупа: ÐºÐ¾Ñ€Ð¾Ñ‚ÐºÐ°Ñ (ONLINE)" #: src/hed/dmc/srm/DataPointSRM.cpp:112 msgid "Check: obtained access latency: high (NEARLINE)" msgstr "Проверка: получена задержка доÑтупа: Ð´Ð»Ð¸Ð½Ð½Ð°Ñ (NEARLINE)" #: src/hed/dmc/srm/DataPointSRM.cpp:131 #, c-format msgid "Remove: deleting: %s" msgstr "Remove: удалÑетÑÑ: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:149 #, c-format msgid "Creating directory: %s" msgstr "СоздаетÑÑ Ð´Ð¸Ñ€ÐµÐºÑ‚Ð¾Ñ€Ð¸Ñ %s" #: src/hed/dmc/srm/DataPointSRM.cpp:197 src/hed/dmc/srm/DataPointSRM.cpp:246 msgid "Calling PrepareReading when request was already prepared!" msgstr "Вызов PrepareReading когда Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð±Ñ‹Ð» уже подготовлен!" #: src/hed/dmc/srm/DataPointSRM.cpp:217 #, c-format msgid "File %s is NEARLINE, will make request to bring online" msgstr "" "Файл %s в ÑоÑтоÑнии NEARLINE, будет Ñделан Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾ размещении на диÑке" #: src/hed/dmc/srm/DataPointSRM.cpp:226 #, c-format msgid "Bring online request %s is still in queue, should wait" msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ %s на размещение на диÑке вÑÑ‘ ещё в очереди, Ñледует подождать" #: src/hed/dmc/srm/DataPointSRM.cpp:231 #, c-format msgid "Bring online request %s finished successfully, file is now ONLINE" msgstr "" "Ð—Ð°Ð¿Ñ€Ð¾Ñ %s на размещение на диÑке уÑпешно выполнен, файл теперь в ÑоÑтоÑнии " "ONLINE" #: src/hed/dmc/srm/DataPointSRM.cpp:237 #, c-format msgid "" "Bad logic for %s - bringOnline returned ok but SRM request is not finished " "successfully or on going" msgstr "" "ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð»Ð¾Ð³Ð¸ÐºÐ° в %s - bringOnline завершилÑÑ ÑƒÑпешно, но Ð·Ð°Ð¿Ñ€Ð¾Ñ SRM не " "завершилÑÑ ÑƒÑпехом, либо ещё в процеÑÑе" #: src/hed/dmc/srm/DataPointSRM.cpp:265 src/hed/dmc/srm/DataPointSRM.cpp:408 msgid "None of the requested transfer protocols are supported" msgstr "" "Ðе поддерживаетÑÑ Ð½Ð¸ один из запрошенных протоколов транÑпортного уровнÑ" #: src/hed/dmc/srm/DataPointSRM.cpp:278 #, c-format msgid "Get request %s is still in queue, should wait %i seconds" msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° получение %s вÑÑ‘ ещё в очереди, Ñледует подождать %i Ñекунд" #: src/hed/dmc/srm/DataPointSRM.cpp:286 src/hed/dmc/srm/DataPointSRM.cpp:465 #, c-format msgid "Checking URL returned by SRM: %s" msgstr "ПроверÑетÑÑ URL выданный SRM: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:301 src/hed/dmc/srm/DataPointSRM.cpp:480 #, c-format msgid "SRM returned no useful Transfer URLs: %s" msgstr "SRM не выдал пригодных Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð¸ URL: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:308 #, c-format msgid "" "Bad logic for %s - getTURLs returned ok but SRM request is not finished " "successfully or on going" msgstr "" "ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð»Ð¾Ð³Ð¸ÐºÐ° в %s - getTURLs завершилÑÑ ÑƒÑпешно, но Ð·Ð°Ð¿Ñ€Ð¾Ñ SRM не " "завершилÑÑ ÑƒÑпехом, либо ещё в процеÑÑе" #: src/hed/dmc/srm/DataPointSRM.cpp:316 msgid "StartReading" msgstr "Ðачало чтениÑ" #: src/hed/dmc/srm/DataPointSRM.cpp:318 msgid "StartReading: File was not prepared properly" msgstr "StartReading: Файл не был подготовлен должным образом" #: src/hed/dmc/srm/DataPointSRM.cpp:328 src/hed/dmc/srm/DataPointSRM.cpp:507 #, c-format msgid "Redirecting to new URL: %s" msgstr "Перенаправление к новому URL: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:389 msgid "Calling PrepareWriting when request was already prepared!" msgstr "Вызов PrepareWriting когда Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð±Ñ‹Ð» уже подготовлен!" #: src/hed/dmc/srm/DataPointSRM.cpp:418 msgid "No space token specified" msgstr "Ðе указан маркёр проÑтранÑтва памÑти" #: src/hed/dmc/srm/DataPointSRM.cpp:424 msgid "Warning: Using SRM protocol v1 which does not support space tokens" msgstr "" "Warning: ИÑпользуетÑÑ Ð²ÐµÑ€ÑÐ¸Ñ v1 протокола SRM, ÐºÐ¾Ñ‚Ð¾Ñ€Ð°Ñ Ð½Ðµ поддерживает " "токены меÑта" #: src/hed/dmc/srm/DataPointSRM.cpp:427 #, c-format msgid "Using space token description %s" msgstr "ИÑпользуетÑÑ Ð¾Ð¿Ð¸Ñание маркёра проÑтранÑтва памÑти %s" #: src/hed/dmc/srm/DataPointSRM.cpp:433 #, c-format msgid "Error looking up space tokens matching description %s" msgstr "" "Ошибка поиÑка маркёров проÑтранÑтва памÑти, ÑоответÑтвующих опиÑанию %s" #: src/hed/dmc/srm/DataPointSRM.cpp:437 #, c-format msgid "No space tokens found matching description %s" msgstr "Ðе найдены маркёры проÑтранÑтва памÑти, ÑоответÑтвующие опиÑанию %s" #: src/hed/dmc/srm/DataPointSRM.cpp:442 #, c-format msgid "Using space token %s" msgstr "ИÑпользуетÑÑ Ð¼Ð°Ñ€ÐºÑ‘Ñ€ проÑтранÑтва памÑти %s" #: src/hed/dmc/srm/DataPointSRM.cpp:457 #, c-format msgid "Put request %s is still in queue, should wait %i seconds" msgstr "" "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° размещение %s вÑÑ‘ ещё в очереди, Ñледует подождать %i Ñекунд" #: src/hed/dmc/srm/DataPointSRM.cpp:487 #, c-format msgid "" "Bad logic for %s - putTURLs returned ok but SRM request is not finished " "successfully or on going" msgstr "" "ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð»Ð¾Ð³Ð¸ÐºÐ° в %s - putTURLs завершилÑÑ ÑƒÑпешно, но Ð·Ð°Ð¿Ñ€Ð¾Ñ SRM не " "завершилÑÑ ÑƒÑпехом, либо ещё в процеÑÑе" #: src/hed/dmc/srm/DataPointSRM.cpp:495 msgid "StartWriting" msgstr "Ðачало запиÑи" #: src/hed/dmc/srm/DataPointSRM.cpp:497 msgid "StartWriting: File was not prepared properly" msgstr "StartWriting: Файл не был подготовлен должным образом" #: src/hed/dmc/srm/DataPointSRM.cpp:556 #, c-format msgid "FinishWriting: looking for metadata: %s" msgstr "FinishWriting: поиÑк метаданных: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:571 #, c-format msgid "FinishWriting: obtained checksum: %s" msgstr "FinishWriting: получена ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:574 #, c-format msgid "" "Calculated/supplied transfer checksum %s matches checksum reported by SRM " "destination %s" msgstr "" "ВычиÑленнаÑ/ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ð°Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма %s Ñовпадает Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð¾Ð¹ Ñуммой, " "заÑвленной точкой Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ SRM %s" #: src/hed/dmc/srm/DataPointSRM.cpp:577 #, c-format msgid "" "Checksum mismatch between calculated/supplied checksum (%s) and checksum " "reported by SRM destination (%s)" msgstr "" "ÐеÑовпадение между вычиÑленной/указанной контрольной Ñуммой %s и контрольной " "Ñуммой, заÑвленной точкой Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ SRM %s" #: src/hed/dmc/srm/DataPointSRM.cpp:580 #, c-format msgid "" "Checksum type of SRM (%s) and calculated/supplied checksum (%s) differ, " "cannot compare" msgstr "" "Типы контрольной Ñуммы в SRM (%s) и вычиÑленной/указанной контрольной Ñуммы " "(%s) различаютÑÑ, Ñравнение невозможно" #: src/hed/dmc/srm/DataPointSRM.cpp:581 src/hed/dmc/srm/DataPointSRM.cpp:582 msgid "No checksum information from server" msgstr "Сервер не выдал информацию о контрольной Ñумме" #: src/hed/dmc/srm/DataPointSRM.cpp:583 src/hed/dmc/srm/DataPointSRM.cpp:584 msgid "No checksum verification possible" msgstr "Ðевозможно подтвердить контрольную Ñумму" #: src/hed/dmc/srm/DataPointSRM.cpp:590 msgid "Failed to release completed request" msgstr "Сбой ÑброÑа завершившегоÑÑ Ð·Ð°Ð¿Ñ€Ð¾Ñа" #: src/hed/dmc/srm/DataPointSRM.cpp:633 src/hed/dmc/srm/DataPointSRM.cpp:700 #, c-format msgid "ListFiles: looking for metadata: %s" msgstr "ListFiles: поиÑк метаданных: %s" #: src/hed/dmc/srm/DataPointSRM.cpp:818 #, c-format msgid "plugin for transport protocol %s is not installed" msgstr "" "не уÑтановлен подключаемый модуль Ð´Ð»Ñ Ð¿Ñ€Ð¾Ñ‚Ð¾ÐºÐ¾Ð»Ð° транÑпортного ÑƒÑ€Ð¾Ð²Ð½Ñ %s" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:51 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:90 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:142 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:181 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:221 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:259 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:303 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:365 #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:438 msgid "SRM did not return any information" msgstr "SRM не возвратил никакой информации" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:316 #, c-format msgid "File could not be moved to Running state: %s" msgstr "Файл не может быть переведён в ÑоÑтоÑние Running: %s" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:372 msgid "SRM did not return any useful information" msgstr "SRM не возвратил никакой полезной информации" #: src/hed/dmc/srm/srmclient/SRM1Client.cpp:450 msgid "File could not be moved to Done state" msgstr "Файл не может быть переведён в ÑоÑтоÑние Done" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:88 msgid "Could not determine version of server" msgstr "Ðе удалоÑÑŒ определить верÑию Ñервера" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:94 #, c-format msgid "Server SRM version: %s" msgstr "ВерÑÐ¸Ñ Ñервера SRM: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:99 #, c-format msgid "Server implementation: %s" msgstr "Ð ÐµÐ°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ Ñервера: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:136 #, c-format msgid "Adding space token %s" msgstr "ДобавлÑетÑÑ Ð¼Ð°Ñ€ÐºÑ‘Ñ€ проÑтранÑтва памÑти %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:163 msgid "No request tokens found" msgstr "Ðе найдены маркёры запроÑа" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:176 #, c-format msgid "Adding request token %s" msgstr "ДобавлÑетÑÑ Ð¼Ð°Ñ€ÐºÑ‘Ñ€ запроÑа %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:237 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:642 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:828 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1385 #, c-format msgid "%s: File request %s in SRM queue. Sleeping for %i seconds" msgstr "%s: Ð—Ð°Ð¿Ñ€Ð¾Ñ Ñ„Ð°Ð¹Ð»Ð° %s в очереди SRM. Ожидание %i Ñекунд" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:275 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:327 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:698 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:764 #, c-format msgid "File is ready! TURL is %s" msgstr "Файл готов! TURL: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:359 #, c-format msgid "Setting userRequestDescription to %s" msgstr "УÑтановка userRequestDescription в %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:414 #, c-format msgid "%s: Bring online request %s in SRM queue. Sleeping for %i seconds" msgstr "%s: Ð—Ð°Ð¿Ñ€Ð¾Ñ Ñ„Ð°Ð¹Ð»Ð° Ñ Ð»ÐµÐ½Ñ‚Ñ‹ %s в очереди SRM. Ожидание %i Ñекунд" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:457 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1160 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1194 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1228 msgid "No request token specified!" msgstr "Ðе указан маркёр запроÑа!" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:524 msgid "Request is reported as ABORTED, but all files are done" msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð¿Ñ€ÐµÑ€Ð²Ð°Ð½ (ABORTED), но вÑе файлы готовы" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:530 msgid "Request is reported as ABORTED, since it was cancelled" msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð¿Ñ€ÐµÑ€Ð²Ð°Ð½ (ABORTED), так как он был отменён" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:536 #, c-format msgid "Request is reported as ABORTED. Reason: %s" msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð¿Ñ€ÐµÑ€Ð²Ð°Ð½ (ABORTED). Причина: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:673 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:745 #, c-format msgid "Path %s is invalid, creating required directories" msgstr "Путь %s недейÑтвителен, ÑоздаютÑÑ Ð½ÐµÐ´Ð¾Ñтающие директории" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:678 #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:750 #, c-format msgid "Error creating required directories for %s" msgstr "Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð½ÐµÐ¾Ð±Ñ…Ð¾Ð´Ð¸Ð¼Ñ‹Ñ… каталогов Ð´Ð»Ñ %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:851 msgid "Too many files in one request - please try again with fewer files" msgstr "" "Слишком много файлов на один Ð·Ð°Ð¿Ñ€Ð¾Ñ - пожалуйÑта, попробуйте Ñнова, Ñ " "меньшим количеÑтвом файлов" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:899 msgid "" "Directory size is too large to list in one call, will have to call multiple " "times" msgstr "" "Размер директории Ñлишком велик Ð´Ð»Ñ Ñ€Ð°Ñпечатки в одном запроÑе, придётÑÑ " "делать неÑколько запроÑов" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:936 msgid "" "Failure in parsing response from server - some information may be inaccurate" msgstr "" "Ошибка при разборе отзыва Ñ Ñервера - Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¼Ð¾Ð¶ÐµÑ‚ быть чаÑтично неверной" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:942 #: src/hed/shc/legacy/auth_otokens.cpp:437 #, c-format msgid "%s: %s" msgstr "%s: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:975 #, c-format msgid "" "Directory size is larger than %i files, will have to call multiple times" msgstr "" "Размер директории превышает %i файлов, придётÑÑ Ð´ÐµÐ»Ð°Ñ‚ÑŒ неÑколько запроÑов" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1185 #, c-format msgid "Files associated with request token %s released successfully" msgstr "Файлы, аÑÑоциированные Ñ Ð¼Ð°Ñ€ÐºÑ‘Ñ€Ð¾Ð¼ запроÑа %s, уÑпешно разблокированы" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1219 #, c-format msgid "Files associated with request token %s put done successfully" msgstr "Файлы, аÑÑоциированные Ñ Ð¼Ð°Ñ€ÐºÑ‘Ñ€Ð¾Ð¼ запроÑа %s, уÑпешно отгружены" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1254 #, c-format msgid "Files associated with request token %s aborted successfully" msgstr "Файлы, аÑÑоциированные Ñ Ð¼Ð°Ñ€ÐºÑ‘Ñ€Ð¾Ð¼ запроÑа %s, уÑпешно прерваны" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1271 #, c-format msgid "" "Failed to find metadata info on %s for determining file or directory delete" msgstr "" "Ðе удалоÑÑŒ найти информацию о типе %s, чтобы определить, ÑтираетÑÑ Ñ„Ð°Ð¹Ð» или " "каталог" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1277 msgid "Type is file, calling srmRm" msgstr "Тип file, вызываетÑÑ srmRm" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1281 msgid "Type is dir, calling srmRmDir" msgstr "Тип dir, вызываетÑÑ srmRmDir" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1285 msgid "File type is not available, attempting file delete" msgstr "Тип файла недоÑтупен, попытка Ñтереть файл" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1288 msgid "File delete failed, attempting directory delete" msgstr "Сбой при удалении файла, попытка ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð°" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1313 #, c-format msgid "File %s removed successfully" msgstr "УÑпешно удалён файл %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1340 #, c-format msgid "Directory %s removed successfully" msgstr "УÑпешно удалён каталог %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1455 #, c-format msgid "Checking for existence of %s" msgstr "Проверка ÑущеÑÑ‚Ð²Ð¾Ð²Ð°Ð½Ð¸Ñ %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1458 #, c-format msgid "File already exists: %s" msgstr "Файл уже ÑущеÑтвует: %s" #: src/hed/dmc/srm/srmclient/SRM22Client.cpp:1495 #, c-format msgid "Error creating directory %s: %s" msgstr "Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð° %s: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:82 #, c-format msgid "Attempting to contact %s on port %i" msgstr "Попытка ÑоединитьÑÑ Ñ %s по порту %i" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:88 #, c-format msgid "Storing port %i for %s" msgstr "СохранÑетÑÑ Ð¿Ð¾Ñ€Ñ‚ %i Ð´Ð»Ñ %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:102 #, c-format msgid "No port succeeded for %s" msgstr "Ðе найдено подходÑщего порта Ð´Ð»Ñ %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:112 #, c-format msgid "URL %s disagrees with stored SRM info, testing new info" msgstr "" "URL %s не ÑоответÑтвует информации, хранÑщейÑÑ Ð² SRM info; проверÑетÑÑ Ð½Ð¾Ð²Ð°Ñ " "информациÑ" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:118 #, c-format msgid "Replacing old SRM info with new for URL %s" msgstr "Замена Ñтарой информации в SRM на новую Ð´Ð»Ñ URL %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:140 #, c-format msgid "SOAP request: %s" msgstr "ЗапроÑа SOAP: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:147 #: src/hed/dmc/srm/srmclient/SRMClient.cpp:176 #, c-format msgid "SOAP fault: %s" msgstr "Ошибка SOAP: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:148 msgid "Reconnecting" msgstr "ПереÑоединение" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:158 #, c-format msgid "SRM Client status: %s" msgstr "СоÑтоÑние клиента SRM: %s" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:164 msgid "No SOAP response" msgstr "Ðет ответа SOAP" #: src/hed/dmc/srm/srmclient/SRMClient.cpp:171 #, c-format msgid "SOAP response: %s" msgstr "Ответ SOAP: %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:75 #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:161 #, c-format msgid "Failed to acquire lock on file %s" msgstr "Сбой уÑтановки блокировки на файл %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:80 #, c-format msgid "Error reading info from file %s:%s" msgstr "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸ из файла %s:%s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:94 #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:186 #, c-format msgid "Bad or old format detected in file %s, in line %s" msgstr "Обнаружен неверный или уÑтаревший формат в файле %s, Ñтроке %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:99 #, c-format msgid "Cannot convert string %s to int in line %s" msgstr "" "Ðевозможно преобразовать Ñтроку %s в целочиÑленное значение в Ñтроке %s" #: src/hed/dmc/srm/srmclient/SRMInfo.cpp:202 #, c-format msgid "Error writing srm info file %s" msgstr "Ошибка запиÑи файла информации SRM %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:81 msgid "" "Missing reference to factory and/or module. It is unsafe to use Xrootd in " "non-persistent mode - Xrootd code is disabled. Report to developers." msgstr "" "ОтÑутÑтвует указание на фабрику и/или модуль. ИÑпользование Xrootd в " "неопределённом режиме небезопаÑно - Xrootd заблокирован. СвÑжитеÑÑŒ Ñ " "разработчиками." #: src/hed/dmc/xrootd/DataPointXrootd.cpp:120 #, c-format msgid "Could not handle checksum %s: skip checksum check" msgstr "" "Ðевозможно обработать контрольную Ñумму %s: пропуÑкаетÑÑ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ° " "контрольной Ñуммы" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:126 #, c-format msgid "Failed to create xrootd copy job: %s" msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ ÐºÐ¾Ð¿Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ xrootd %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:143 #, c-format msgid "Failed to copy %s: %s" msgstr "Сбой ÐºÐ¾Ð¿Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ %s: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:194 #, c-format msgid "Reading %u bytes from byte %llu" msgstr "Чтение %u байтов из байта %llu" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:196 #, c-format msgid "Read %i bytes" msgstr "Прочитано %i байт" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:227 #, c-format msgid "Could not open file %s for reading: %s" msgstr "Ðевозможно открыть файл %s Ð´Ð»Ñ Ñ‡Ñ‚ÐµÐ½Ð¸Ñ: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:242 #, c-format msgid "Unable to find file size of %s" msgstr "Ðе удалоÑÑŒ определить размер файла %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:306 #, c-format msgid "DataPointXrootd::write_file got position %d and offset %d, has to seek" msgstr "" "DataPointXrootd::write_file получил Ð°Ð´Ñ€ÐµÑ %d и Ñдвиг %d, проводитÑÑ Ð¿Ð¾Ð¸Ñк" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:329 #, c-format msgid "xrootd write failed: %s" msgstr "Сбой при запиÑи xrootd: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:338 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:412 #, c-format msgid "xrootd close failed: %s" msgstr "Сбой при закрытии xrootd: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:361 #, c-format msgid "Failed to open %s, trying to create parent directories" msgstr "Сбой Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð¸Ñ %s, попытка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ€Ð¾Ð´Ð¸Ñ‚ÐµÐ»ÑŒÑких каталогов" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:374 #, c-format msgid "xrootd open failed: %s" msgstr "Сбой при открытии xrootd: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:388 #, c-format msgid "close failed: %s" msgstr "Сбой при закрытии: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:430 #, c-format msgid "Read access not allowed for %s: %s" msgstr "Закрыт доÑтуп на чтение Ð´Ð»Ñ %s: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:449 #, c-format msgid "Could not stat file %s: %s" msgstr "Ðе удалоÑÑŒ определить ÑоÑтоÑние файла %s: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:454 msgid "Not getting checksum of zip constituent" msgstr "Ðе получена ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма Ð´Ð»Ñ zip-ÑоÑтавлÑющей" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:458 #, c-format msgid "Could not get checksum of %s: %s" msgstr "Ðе удалоÑÑŒ получить контрольную Ñумму %s: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:462 #, c-format msgid "Checksum %s" msgstr "ÐšÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:500 #, c-format msgid "Failed to open directory %s: %s" msgstr "Ðе удалоÑÑŒ открыть каталог %s: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:518 #, c-format msgid "Error while reading dir %s: %s" msgstr "Ошибка при чтении каталога %s: %s" #: src/hed/dmc/xrootd/DataPointXrootd.cpp:568 #: src/hed/dmc/xrootd/DataPointXrootd.cpp:586 #, c-format msgid "Error creating required dirs: %s" msgstr "Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ‚Ñ€ÐµÐ±ÑƒÐµÐ¼Ñ‹Ñ… каталогов: %s" #: src/hed/identitymap/IdentityMap.cpp:196 #, c-format msgid "PDP: %s can not be loaded" msgstr "PDP: %s не может быть подгружен" #: src/hed/identitymap/IdentityMap.cpp:219 src/hed/shc/legacy/LegacyMap.cpp:221 #, c-format msgid "Grid identity is mapped to local identity '%s'" msgstr "" "Опознавательные признаки Грид поÑтавлены в ÑоответÑтвие меÑтной учётной " "запиÑи '%s'" #: src/hed/libs/common/ArcLocation.cpp:129 #, c-format msgid "" "Can not determine the install location. Using %s. Please set ARC_LOCATION if " "this is not correct." msgstr "" "Ðевозможно определить меÑто уÑтановки. ИÑпользуетÑÑ %s. ЕÑли Ñто не " "ÑоответÑтвует дейÑтвительноÑти, задайте, пожалуйÑта, переменную ARC_LOCATION." #: src/hed/libs/common/DateTime.cpp:86 src/hed/libs/common/DateTime.cpp:631 #: src/hed/libs/common/StringConv.h:25 msgid "Empty string" msgstr "ПуÑÑ‚Ð°Ñ Ñтрока" #: src/hed/libs/common/DateTime.cpp:107 #, c-format msgid "Can not parse date: %s" msgstr "Ðевозможно определить дату: %s" #: src/hed/libs/common/DateTime.cpp:130 #, c-format msgid "Can not parse time: %s" msgstr "Ðевозможно определить времÑ: %s" #: src/hed/libs/common/DateTime.cpp:160 #, c-format msgid "Can not parse time zone offset: %s" msgstr "Ðевозможно определить чаÑовой поÑÑ: %s" #: src/hed/libs/common/DateTime.cpp:180 src/hed/libs/common/DateTime.cpp:199 #: src/hed/libs/common/DateTime.cpp:252 src/hed/libs/common/DateTime.cpp:291 #, c-format msgid "Illegal time format: %s" msgstr "ÐедопуÑтимый формат времени: %s" #: src/hed/libs/common/DateTime.cpp:230 src/hed/libs/common/DateTime.cpp:283 #, c-format msgid "Can not parse month: %s" msgstr "Ðевозможно определить меÑÑц: %s" #: src/hed/libs/common/DateTime.cpp:647 src/hed/libs/common/DateTime.cpp:688 #, c-format msgid "Invalid ISO duration format: %s" msgstr "Ðеверный ISO-формат продолжительноÑти: %s" #: src/hed/libs/common/DateTime.cpp:752 #, c-format msgid "Invalid period string: %s" msgstr "ÐедопуÑтимый интервал времени: %s" #: src/hed/libs/common/DateTime.cpp:874 msgid "hour" msgid_plural "hours" msgstr[0] "чаÑ" msgstr[1] "чаÑа" msgstr[2] "чаÑов" #: src/hed/libs/common/DateTime.cpp:880 msgid "minute" msgid_plural "minutes" msgstr[0] "минута" msgstr[1] "минуты" msgstr[2] "минут" #: src/hed/libs/common/DateTime.cpp:886 msgid "second" msgid_plural "seconds" msgstr[0] "Ñекунда" msgstr[1] "Ñекунды" msgstr[2] "Ñекунд" #: src/hed/libs/common/FileLock.cpp:92 #, c-format msgid "EACCES Error opening lock file %s: %s" msgstr "EACCES Ошибка Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° блокировки %s: %s" #: src/hed/libs/common/FileLock.cpp:96 #, c-format msgid "Error opening lock file %s in initial check: %s" msgstr "" "Ошибка при открытии файла блокировки %s при предварительной проверке: %s" #: src/hed/libs/common/FileLock.cpp:103 #, c-format msgid "Error creating temporary file %s: %s" msgstr "Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð²Ñ€ÐµÐ¼ÐµÐ½Ð½Ð¾Ð³Ð¾ файла %s: %s" #: src/hed/libs/common/FileLock.cpp:113 #, c-format msgid "Could not create link to lock file %s as it already exists" msgstr "" "Ðевозможно Ñоздать ÑÑылку на файл блокировки %s, потому что она уже " "ÑущеÑтвует" #: src/hed/libs/common/FileLock.cpp:124 #, c-format msgid "Could not create lock file %s as it already exists" msgstr "Ðевозможно Ñоздать файл блокировки %s, потому что он уже ÑущеÑтвует" #: src/hed/libs/common/FileLock.cpp:128 #, c-format msgid "Error creating lock file %s: %s" msgstr "Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° блокировки %s: %s" #: src/hed/libs/common/FileLock.cpp:133 #, c-format msgid "Error writing to lock file %s: %s" msgstr "Ошибка запиÑи в файл блокировки %s: %s" #: src/hed/libs/common/FileLock.cpp:141 #, c-format msgid "Error linking tmp file %s to lock file %s: %s" msgstr "Ðе удалоÑÑŒ ÑвÑзать временный файл %s Ñ Ñ„Ð°Ð¹Ð»Ð¾Ð¼ блокировки %s: %s" #: src/hed/libs/common/FileLock.cpp:150 #, c-format msgid "Error in lock file %s, even though linking did not return an error" msgstr "" "Ошибка в файле блокировки %s, неÑÐ¼Ð¾Ñ‚Ñ€Ñ Ð½Ð° то, что Ñоздание ÑÑылки прошло без " "Ñбоев" #: src/hed/libs/common/FileLock.cpp:158 #, c-format msgid "%li seconds since lock file %s was created" msgstr "%li Ñекунд(Ñ‹) Ñ Ð¼Ð¾Ð¼ÐµÐ½Ñ‚Ð° ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° блокировки %s" #: src/hed/libs/common/FileLock.cpp:161 #, c-format msgid "Timeout has expired, will remove lock file %s" msgstr "Ð’Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¸Ñтекло, файл блокировки %s будет удалён" #: src/hed/libs/common/FileLock.cpp:165 #, c-format msgid "Failed to remove stale lock file %s: %s" msgstr "Сбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ ÑƒÑтаревшего файла блокировки %s: %s" #: src/hed/libs/common/FileLock.cpp:178 #, c-format msgid "This process already owns the lock on %s" msgstr "У Ñтого процеÑÑа уже ÑущеÑтвует блокировка в %s" #: src/hed/libs/common/FileLock.cpp:182 #, c-format msgid "" "The process owning the lock on %s is no longer running, will remove lock" msgstr "" "ПроцеÑÑ, которому принадлежит блок в %s, больше не ÑущеÑтвует, блок будет " "удалён" #: src/hed/libs/common/FileLock.cpp:184 #, c-format msgid "Failed to remove file %s: %s" msgstr "Ðе удалоÑÑŒ удалить файл %s: %s" #: src/hed/libs/common/FileLock.cpp:193 #, c-format msgid "The file %s is currently locked with a valid lock" msgstr "Файл %s в наÑтоÑщий момент заблокирован дейÑтвительным блоком" #: src/hed/libs/common/FileLock.cpp:210 #, c-format msgid "Failed to unlock file with lock %s: %s" msgstr "Сбой Ñ€Ð°Ð·Ð±Ð»Ð¾ÐºÐ¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° Ñ Ð±Ð»Ð¾ÐºÐ¾Ð¼ %s: %s" #: src/hed/libs/common/FileLock.cpp:222 #, c-format msgid "Lock file %s doesn't exist" msgstr "Файл блокировки %s не ÑущеÑтвует" #: src/hed/libs/common/FileLock.cpp:224 #, c-format msgid "Error listing lock file %s: %s" msgstr "Ошибка перечиÑÐ»ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° блокировки %s: %s" #: src/hed/libs/common/FileLock.cpp:230 #, c-format msgid "Found unexpected empty lock file %s. Must go back to acquire()" msgstr "" "Ðайден непредвиденный пуÑтой файл блокировки %s. Ðеобходимо вернутьÑÑ Ð² " "acquire()" #: src/hed/libs/common/FileLock.cpp:236 #, c-format msgid "Error reading lock file %s: %s" msgstr "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° блокировки %s: %s" #: src/hed/libs/common/FileLock.cpp:240 #, c-format msgid "Error with formatting in lock file %s" msgstr "Ошибка формата в файле блокировки %s" #: src/hed/libs/common/FileLock.cpp:250 #, c-format msgid "Lock %s is owned by a different host (%s)" msgstr "Блок %s принадлежит другому процеÑÑу (%s)" #: src/hed/libs/common/FileLock.cpp:259 #, c-format msgid "Badly formatted pid %s in lock file %s" msgstr "Ðеверно Ñформированный pid %s в файле блокировки %s" #: src/hed/libs/common/FileLock.cpp:262 #, c-format msgid "Another process (%s) owns the lock on file %s" msgstr "Другой процеÑÑ (%s) обладает блоком файла %s" #: src/hed/libs/common/IString.cpp:32 src/hed/libs/common/IString.cpp:41 #: src/hed/libs/common/IString.cpp:42 msgid "(empty)" msgstr "(пуÑто)" #: src/hed/libs/common/IString.cpp:32 src/hed/libs/common/IString.cpp:41 #: src/hed/libs/common/IString.cpp:42 msgid "(null)" msgstr "(нулевой)" #: src/hed/libs/common/Logger.cpp:58 #, c-format msgid "Invalid log level. Using default %s." msgstr "Ðеверный уровень отладки. ИÑпользуетÑÑ ÑƒÑ€Ð¾Ð²ÐµÐ½ÑŒ по умолчанию %s." #: src/hed/libs/common/Logger.cpp:123 #, c-format msgid "Invalid old log level. Using default %s." msgstr "" "Ðеверный Ñтарый уровень отладки. ИÑпользуетÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ по умолчанию %s." #: src/hed/libs/common/OptionParser.cpp:106 #, c-format msgid "Cannot parse integer value '%s' for -%c" msgstr "Ðе удаётÑÑ Ñ€Ð°Ð·Ð¾Ð±Ñ€Ð°Ñ‚ÑŒ целое значение '%s' Ð´Ð»Ñ -%c" #: src/hed/libs/common/OptionParser.cpp:309 #: src/hed/libs/common/OptionParser.cpp:442 #, c-format msgid "Options Group %s:" msgstr "Группа опций %s:" #: src/hed/libs/common/OptionParser.cpp:311 #: src/hed/libs/common/OptionParser.cpp:445 #, c-format msgid "%s:" msgstr "%s:" #: src/hed/libs/common/OptionParser.cpp:313 #, c-format msgid "Show %s help options" msgstr "Показать %s параметров Ñправки" #: src/hed/libs/common/OptionParser.cpp:348 msgid "Use -? to get usage description" msgstr "Ð”Ð»Ñ Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ñправки иÑпользуйте \"-?\"" #: src/hed/libs/common/OptionParser.cpp:425 msgid "Usage:" msgstr "ИÑпользование:" #: src/hed/libs/common/OptionParser.cpp:428 msgid "OPTION..." msgstr "ПÐРÐМЕТР..." #: src/hed/libs/common/OptionParser.cpp:434 msgid "Help Options:" msgstr "Параметры Ñправки:" #: src/hed/libs/common/OptionParser.cpp:435 msgid "Show help options" msgstr "Показать параметры Ñправки" #: src/hed/libs/common/Profile.cpp:199 src/hed/libs/common/Profile.cpp:273 #: src/hed/libs/common/Profile.cpp:404 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"inisections\" " "attribute cannot be the empty string." msgstr "" "Элемент \"%s\" в профиле игнорируетÑÑ: значение атрибута \"inisections\" не " "может быть пуÑтой Ñтрокой." #: src/hed/libs/common/Profile.cpp:205 src/hed/libs/common/Profile.cpp:279 #: src/hed/libs/common/Profile.cpp:411 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"initag\" attribute " "cannot be the empty string." msgstr "" "Элемент \"%s\" в профиле игнорируетÑÑ: значение атрибута \"initag\" не может " "быть пуÑтой Ñтрокой." #: src/hed/libs/common/Profile.cpp:419 #, c-format msgid "" "Element \"%s\" in the profile ignored: the value of the \"initype\" " "attribute cannot be the empty string." msgstr "" "Элемент \"%s\" в профиле игнорируетÑÑ: значение атрибута \"initype\" не " "может быть пуÑтой Ñтрокой." #: src/hed/libs/common/Profile.cpp:422 #, c-format msgid "" "Element \"%s\" in the profile ignored: the \"inidefaultvalue\" attribute " "cannot be specified when the \"inisections\" and \"initag\" attributes have " "not been specified." msgstr "" "Элемент \"%s\" в профиле игнорируетÑÑ: значение атрибута \"inidefaultvalue\" " "не может быть задано, когда не заданы Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð¾Ð² \"inisections\" и " "\"initag\"." #: src/hed/libs/common/Profile.cpp:497 #, c-format msgid "" "In the configuration profile the 'initype' attribute on the \"%s\" element " "has a invalid value \"%s\"." msgstr "" "Ð’ профиле наÑтроек атрибут 'initype' Ñлемента \"%s\" имеет ÑобÑтвенное " "значение \"%s\"." #: src/hed/libs/common/Run_unix.cpp:225 msgid "Child monitoring signal detected" msgstr "Мониторинг дочерних процеÑÑов: обнаружен Ñигнал" #: src/hed/libs/common/Run_unix.cpp:230 #, c-format msgid "Child monitoring error: %i" msgstr "Мониторинг дочерних процеÑÑов: ошибка: %i" #: src/hed/libs/common/Run_unix.cpp:243 msgid "Child monitoring kick detected" msgstr "Мониторинг дочерних процеÑÑов: обнаружен запуÑк" #: src/hed/libs/common/Run_unix.cpp:246 msgid "Child monitoring internal communication error" msgstr "Мониторинг дочерних процеÑÑов: внутренний Ñбой взаимодейÑтвиÑ" #: src/hed/libs/common/Run_unix.cpp:258 msgid "Child monitoring stdout is closed" msgstr "Мониторинг дочерних процеÑÑов: stdout закрыт" #: src/hed/libs/common/Run_unix.cpp:268 msgid "Child monitoring stderr is closed" msgstr "Мониторинг дочерних процеÑÑов: stderr закрыт" #: src/hed/libs/common/Run_unix.cpp:278 msgid "Child monitoring stdin is closed" msgstr "Мониторинг дочерних процеÑÑов: stdin закрыт" #: src/hed/libs/common/Run_unix.cpp:296 #, c-format msgid "Child monitoring child %d exited" msgstr "Мониторинг дочерних процеÑÑов: процеÑÑ %d завершилÑÑ" #: src/hed/libs/common/Run_unix.cpp:300 #, c-format msgid "Child monitoring lost child %d (%d)" msgstr "Мониторинг дочерних процеÑÑов: потерÑн процеÑÑ %d (%d)" #: src/hed/libs/common/Run_unix.cpp:321 #, c-format msgid "Child monitoring drops abandoned child %d (%d)" msgstr "" "Мониторинг дочерних процеÑÑов: игнорируетÑÑ Ð·Ð°Ð±Ñ€Ð¾ÑˆÐµÐ½Ð½Ñ‹Ð¹ процеÑÑ %d (%d)" #: src/hed/libs/common/Run_unix.cpp:484 msgid "Child was already started" msgstr "Дочерний процеÑÑ ÑƒÐ¶Ðµ запущен" #: src/hed/libs/common/Run_unix.cpp:488 msgid "No arguments are assigned for external process" msgstr "Внешнему процеÑÑу не приÑвоены аргументы" #: src/hed/libs/common/Run_unix.cpp:621 #, c-format msgid "Excepton while trying to start external process: %s" msgstr "Сбой при попытке запуÑка внешнего процеÑÑа: %s" #: src/hed/libs/common/StringConv.h:31 #, c-format msgid "Conversion failed: %s" msgstr "Преобразование не удалоÑÑŒ: %s" #: src/hed/libs/common/StringConv.h:35 #, c-format msgid "Full string not used: %s" msgstr "Строка иÑпользована неполноÑтью: %s" #: src/hed/libs/common/Thread.cpp:256 msgid "Maximum number of threads running - putting new request into queue" msgstr "" "Запущено макÑимальное количеÑтво потоков - новый Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¿Ð¾Ð¼ÐµÑ‰Ñ‘Ð½ в очередь" #: src/hed/libs/common/Thread.cpp:304 #, c-format msgid "Thread exited with Glib error: %s" msgstr "Поток завершилÑÑ Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ¾Ð¹ Glib: %s" #: src/hed/libs/common/Thread.cpp:306 #, c-format msgid "Thread exited with generic exception: %s" msgstr "Поток завершилÑÑ Ð¾Ð±Ñ‰Ð¸Ð¼ прерыванием: %s" #: src/hed/libs/common/URL.cpp:137 #, c-format msgid "URL is not valid: %s" msgstr "ÐедейÑтвительный адреÑ: %s" #: src/hed/libs/common/URL.cpp:188 #, c-format msgid "Illegal URL - path must be absolute: %s" msgstr "ÐедопуÑтимый URL - путь должен быть абÑолютным: %s" #: src/hed/libs/common/URL.cpp:193 #, c-format msgid "Illegal URL - no hostname given: %s" msgstr "ÐедопуÑтимый Ð°Ð´Ñ€ÐµÑ - не ÑодержитÑÑ Ð¸Ð¼Ñ ÑƒÐ·Ð»Ð°: %s" #: src/hed/libs/common/URL.cpp:282 #, c-format msgid "Illegal URL - path must be absolute or empty: %s" msgstr "ÐедопуÑтимый URL - путь должен быть абÑолютным или пуÑтым: %s" #: src/hed/libs/common/URL.cpp:298 #, c-format msgid "Illegal URL - no closing ] for IPv6 address found: %s" msgstr "" "ÐедопуÑтимый URL - отÑутÑтвует Ð·Ð°ÐºÑ€Ñ‹Ð²Ð°ÑŽÑ‰Ð°Ñ Ñкобка ] Ð´Ð»Ñ Ð°Ð´Ñ€ÐµÑа IPv6: %s" #: src/hed/libs/common/URL.cpp:306 #, c-format msgid "" "Illegal URL - closing ] for IPv6 address is followed by illegal token: %s" msgstr "" "ÐедопуÑтимый URL - за закрывающей Ñкобкой ] Ð´Ð»Ñ Ð°Ð´Ñ€ÐµÑа IPv6 Ñледует " "недопуÑтимый маркёр: %s" #: src/hed/libs/common/URL.cpp:322 #, c-format msgid "Invalid port number in %s" msgstr "ÐедопуÑтимый номер порта в %s" #: src/hed/libs/common/URL.cpp:455 #, c-format msgid "Unknown LDAP scope %s - using base" msgstr "ÐеизвеÑтный контекÑÑ‚ LDAP %s - иÑпользуетÑÑ base" #: src/hed/libs/common/URL.cpp:618 msgid "Attempt to assign relative path to URL - making it absolute" msgstr "" "Попытка интерпретации отноÑительного путь как URL - заменÑетÑÑ Ð½Ð° абÑолютный" #: src/hed/libs/common/URL.cpp:717 #, c-format msgid "URL option %s does not have format name=value" msgstr "ÐžÐ¿Ñ†Ð¸Ñ URL %s не задана в формате имÑ=значение" #: src/hed/libs/common/URL.cpp:1186 #, c-format msgid "urllist %s contains invalid URL: %s" msgstr "urllist %s Ñодержит недопуÑтимый URL: %s" #: src/hed/libs/common/URL.cpp:1191 #, c-format msgid "URL protocol is not urllist: %s" msgstr "Протокол URL не ÑвлÑетÑÑ urllist: %s" #: src/hed/libs/common/UserConfig.cpp:38 src/hed/libs/common/UserConfig.cpp:831 #: src/hed/libs/common/UserConfig.cpp:840 #: src/hed/libs/common/UserConfig.cpp:846 #: src/hed/libs/common/UserConfig.cpp:872 #: src/hed/libs/common/UserConfig.cpp:884 #: src/hed/libs/common/UserConfig.cpp:896 #: src/hed/libs/common/UserConfig.cpp:916 #, c-format msgid "Multiple %s attributes in configuration file (%s)" msgstr "МножеÑтвенные атрибуты %s в файле наÑтроек (%s)" #: src/hed/libs/common/UserConfig.cpp:139 #, c-format msgid "Wrong ownership of certificate file: %s" msgstr "ÐÐµÐ¿Ñ€Ð°Ð²Ð¸Ð»ÑŒÐ½Ð°Ñ Ð¿Ñ€Ð¸Ð½Ð°Ð´Ð»ÐµÐ¶Ð½Ð¾Ñть файла открытого ключа: %s" #: src/hed/libs/common/UserConfig.cpp:141 #, c-format msgid "Wrong permissions of certificate file: %s" msgstr "Ðеправильные права доÑтупа к файлу открытого ключа: %s" #: src/hed/libs/common/UserConfig.cpp:143 #, c-format msgid "Can not access certificate file: %s" msgstr "Ðет доÑтупа к файлу Ñертификата: %s" #: src/hed/libs/common/UserConfig.cpp:150 #, c-format msgid "Wrong ownership of key file: %s" msgstr "ÐÐµÐ¿Ñ€Ð°Ð²Ð¸Ð»ÑŒÐ½Ð°Ñ Ð¿Ñ€Ð¸Ð½Ð°Ð´Ð»ÐµÐ¶Ð½Ð¾Ñть файла личного ключа: %s" #: src/hed/libs/common/UserConfig.cpp:152 #, c-format msgid "Wrong permissions of key file: %s" msgstr "Ðеправильные права доÑтупа к файлу личного ключа: %s" #: src/hed/libs/common/UserConfig.cpp:154 #, c-format msgid "Can not access key file: %s" msgstr "Ðет доÑтупа к файлу личного ключа: %s" #: src/hed/libs/common/UserConfig.cpp:161 #, c-format msgid "Wrong ownership of proxy file: %s" msgstr "ÐÐµÐ¿Ñ€Ð°Ð²Ð¸Ð»ÑŒÐ½Ð°Ñ Ð¿Ñ€Ð¸Ð½Ð°Ð´Ð»ÐµÐ¶Ð½Ð¾Ñть файла доверенноÑти: %s" #: src/hed/libs/common/UserConfig.cpp:163 #, c-format msgid "Wrong permissions of proxy file: %s" msgstr "Ðеправильные права доÑтупа к файлу доверенноÑти: %s" #: src/hed/libs/common/UserConfig.cpp:165 #, c-format msgid "Can not access proxy file: %s" msgstr "Ðет доÑтупа к файлу доверенноÑти: %s" #: src/hed/libs/common/UserConfig.cpp:176 msgid "computing" msgstr "computing" #: src/hed/libs/common/UserConfig.cpp:178 msgid "index" msgstr "index" #: src/hed/libs/common/UserConfig.cpp:277 #: src/hed/libs/common/UserConfig.cpp:281 #: src/hed/libs/common/UserConfig.cpp:328 #: src/hed/libs/common/UserConfig.cpp:332 #, c-format msgid "System configuration file (%s) contains errors." msgstr "Файл ÑиÑтемных наÑтроек (%s) Ñодержит ошибки." #: src/hed/libs/common/UserConfig.cpp:285 #: src/hed/libs/common/UserConfig.cpp:336 #, c-format msgid "System configuration file (%s or %s) does not exist." msgstr "Файл ÑиÑтемных наÑтроек (%s or %s) не ÑущеÑтвует." #: src/hed/libs/common/UserConfig.cpp:287 #: src/hed/libs/common/UserConfig.cpp:338 #, c-format msgid "System configuration file (%s) does not exist." msgstr "Файл ÑиÑтемных наÑтроек (%s) не ÑущеÑтвует." #: src/hed/libs/common/UserConfig.cpp:293 #: src/hed/libs/common/UserConfig.cpp:305 #: src/hed/libs/common/UserConfig.cpp:344 #: src/hed/libs/common/UserConfig.cpp:356 #, c-format msgid "User configuration file (%s) contains errors." msgstr "Файл наÑтроек Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ (%s) Ñодержит ошибки." #: src/hed/libs/common/UserConfig.cpp:298 #: src/hed/libs/common/UserConfig.cpp:349 msgid "No configuration file could be loaded." msgstr "Файл наÑтроек не может быть подгружен." #: src/hed/libs/common/UserConfig.cpp:301 #: src/hed/libs/common/UserConfig.cpp:352 #, c-format msgid "User configuration file (%s) does not exist or cannot be loaded." msgstr "" "Файл наÑтроек Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ (%s) не ÑущеÑтвует или не может быть подгружен." #: src/hed/libs/common/UserConfig.cpp:438 #, c-format msgid "" "Unable to parse the specified verbosity (%s) to one of the allowed levels" msgstr "" "Ðевозможно ÑопоÑтавить запрашиваемый уровень отладки (%s) ни Ñ Ð¾Ð´Ð½Ð¸Ð¼ из " "допуÑтимых" #: src/hed/libs/common/UserConfig.cpp:450 #, fuzzy, c-format msgid "" "Unsupported job list type '%s', using 'SQLITE'. Supported types are: SQLITE, " "XML." msgstr "" "Тип ÑпиÑка задач '%s' не поддерживаетÑÑ, будет иÑпользоватьÑÑ 'BDB'. " "ПоддерживаютÑÑ Ñледующие типы: BDB, SQLITE, XML." #: src/hed/libs/common/UserConfig.cpp:511 msgid "Loading OToken failed - ignoring its presence" msgstr "Сбой загрузки OToken - токен игнорируетÑÑ" #: src/hed/libs/common/UserConfig.cpp:652 #, c-format msgid "Certificate and key ('%s' and '%s') not found in any of the paths: %s" msgstr "" "Сертификат и ключ ('%s' и '%s') не обнаружены ни в одном из раÑположений: %s" #: src/hed/libs/common/UserConfig.cpp:654 #, c-format msgid "" "If the proxy or certificate/key does exist, you can manually specify the " "locations via environment variables '%s'/'%s' or '%s', or the '%s'/'%s' or " "'%s' attributes in the client configuration file (e.g. '%s')" msgstr "" "ЕÑли пара Ñертификат/ключ или файл Ñертификата доверенноÑти ÑущеÑтвуют, Ð’Ñ‹ " "можете вручную указать их раÑположение Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ переменных Ñреды '%s'/'%s' " "или '%s', или Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ атрибутов '%s'/'%s' или '%s' в файле наÑтроек " "клиента (например, '%s')" #: src/hed/libs/common/UserConfig.cpp:672 #: src/hed/libs/common/UserConfig.cpp:682 #, c-format msgid "" "Can not access CA certificate directory: %s. The certificates will not be " "verified." msgstr "Ðе удалоÑÑŒ открыть каталог Ñертификатов CA: %s. Сертификаты ." #: src/hed/libs/common/UserConfig.cpp:708 #, c-format msgid "" "Can not find CA certificates directory in default locations:\n" "~/.arc/certificates, ~/.globus/certificates,\n" "%s/etc/certificates, %s/etc/grid-security/certificates,\n" "%s/share/certificates, /etc/grid-security/certificates.\n" "The certificate will not be verified.\n" "If the CA certificates directory does exist, please manually specify the " "locations via env\n" "X509_CERT_DIR, or the cacertificatesdirectory item in client.conf\n" msgstr "" "Каталог Ñертификатов СРне обнаружен ни в одном из Ñтандартных меÑÑ‚:\n" "~/.arc/certificates, ~/.globus/certificates,\n" "%s/etc/certificates, %s/etc/grid-security/certificates,\n" "%s/share/certificates, /etc/grid-security/certificates.\n" "Сертификат не будет подтверждён.\n" "ЕÑли каталог Ñертификатов СРÑущеÑтвует, пожалуйÑта, укажите вручную\n" "его раÑÐ¿Ð¾Ð»Ð¾Ð¶ÐµÐ½Ð¸Ñ Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ переменной X509_CERT_DIR, или задайте\n" "cacertificatesdirectory в файле наÑтроек клиента client.conf\n" #: src/hed/libs/common/UserConfig.cpp:730 #, c-format msgid "Using proxy file: %s" msgstr "ИÑпользуетÑÑ Ñ„Ð°Ð¹Ð» доверенноÑти: %s" #: src/hed/libs/common/UserConfig.cpp:733 #, c-format msgid "Using certificate file: %s" msgstr "ИÑпользуетÑÑ Ñ„Ð°Ð¹Ð» Ñертификата: %s" #: src/hed/libs/common/UserConfig.cpp:734 #, c-format msgid "Using key file: %s" msgstr "ИÑпользуетÑÑ Ñ„Ð°Ð¹Ð» личного ключа: %s" #: src/hed/libs/common/UserConfig.cpp:738 #, c-format msgid "Using CA certificate directory: %s" msgstr "ИÑпользуетÑÑ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³ доверенных Ñертификатов CA: %s" #: src/hed/libs/common/UserConfig.cpp:742 msgid "Using OToken" msgstr "ИÑпользуетÑÑ OToken" #: src/hed/libs/common/UserConfig.cpp:755 #: src/hed/libs/common/UserConfig.cpp:761 #, c-format msgid "Can not access VOMSES file/directory: %s." msgstr "Ðевозможно открыть каталог или файл VOMSES: %s." #: src/hed/libs/common/UserConfig.cpp:767 #, c-format msgid "Can not access VOMS file/directory: %s." msgstr "Ðевозможно открыть каталог или файл VOMS: %s." #: src/hed/libs/common/UserConfig.cpp:781 msgid "" "Can not find voms service configuration file (vomses) in default locations: " "~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/" "grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomses" msgstr "" "ÐšÐ¾Ð½Ñ„Ð¸Ð³ÑƒÑ€Ð°Ñ†Ð¸Ñ Ñерверов VOMS не обнаружена ни в одном из Ñтандартных " "раÑположений: ~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, " "$ARC_LOCATION/etc/grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-" "security/vomses" #: src/hed/libs/common/UserConfig.cpp:794 #, c-format msgid "Loading configuration (%s)" msgstr "Чтение файла наÑтроек (%s)" #: src/hed/libs/common/UserConfig.cpp:828 #, c-format msgid "" "The value of the timeout attribute in the configuration file (%s) was only " "partially parsed" msgstr "Значение атрибута timeout (%s) в файле наÑтроек разобрано неполноÑтью" #: src/hed/libs/common/UserConfig.cpp:853 msgid "" "The brokerarguments attribute can only be used in conjunction with the " "brokername attribute" msgstr "" "Ðтрибут brokerarguments может быть иÑпользован только в ÑвÑзи Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð¾Ð¼ " "brokername" #: src/hed/libs/common/UserConfig.cpp:869 #, c-format msgid "" "The value of the keysize attribute in the configuration file (%s) was only " "partially parsed" msgstr "Значение атрибута keysize (%s) в файле наÑтроек разобрано неполноÑтью" #: src/hed/libs/common/UserConfig.cpp:891 #, c-format msgid "" "Could not convert the slcs attribute value (%s) to an URL instance in " "configuration file (%s)" msgstr "" "Ðе удалоÑÑŒ преобразовать значение атрибута slcs (%s) в файле наÑтроек в URL " "(%s)" #: src/hed/libs/common/UserConfig.cpp:937 #, c-format msgid "Specified overlay file (%s) does not exist." msgstr "Указанный файл Ñ Ñ‚Ñ€Ð°Ñ„Ð°Ñ€ÐµÑ‚Ð¾Ð¼ (%s) не ÑущеÑтвует." #: src/hed/libs/common/UserConfig.cpp:941 #, c-format msgid "" "Unknown attribute %s in common section of configuration file (%s), ignoring " "it" msgstr "" "ИгнорируетÑÑ Ð½ÐµÐ¸Ð·Ð²ÐµÑтный атрибут %s в разделе common файла наÑтроек (%s)" #: src/hed/libs/common/UserConfig.cpp:982 #, c-format msgid "Unknown section %s, ignoring it" msgstr "ИгнорируетÑÑ Ð½ÐµÐ¸Ð·Ð²ÐµÑтный раздел %s" #: src/hed/libs/common/UserConfig.cpp:986 #, c-format msgid "Configuration (%s) loaded" msgstr "ÐаÑтройки (%s) подгружены" #: src/hed/libs/common/UserConfig.cpp:989 #, c-format msgid "Could not load configuration (%s)" msgstr "Ðе удалоÑÑŒ подгрузить наÑтройки (%s)" #: src/hed/libs/common/UserConfig.cpp:1086 #, c-format msgid "UserConfiguration saved to file (%s)" msgstr "UserConfiguration Ñохранены в файле (%s)" #: src/hed/libs/common/UserConfig.cpp:1099 #, c-format msgid "Unable to create %s directory." msgstr "Ðе удалоÑÑŒ Ñоздать каталог %s." #: src/hed/libs/common/UserConfig.cpp:1108 #, c-format msgid "Configuration example file created (%s)" msgstr "Создан шаблонный файл наÑтроек (%s)" #: src/hed/libs/common/UserConfig.cpp:1110 #, c-format msgid "Unable to copy example configuration from existing configuration (%s)" msgstr "Ðе удалоÑÑŒ Ñкопировать шаблон наÑтроек из ÑущеÑтвующих наÑтроек (%s)" #: src/hed/libs/common/UserConfig.cpp:1115 #, c-format msgid "Cannot copy example configuration (%s), it is not a regular file" msgstr "" "Ðе удалоÑÑŒ Ñкопировать шаблон наÑтроек (%s), Ñ‚.к. Ñто неÑтандартный файл" #: src/hed/libs/common/UserConfig.cpp:1120 #, c-format msgid "Example configuration (%s) not created." msgstr "Шаблон наÑтроек (%s) не Ñоздан." #: src/hed/libs/common/UserConfig.cpp:1125 #, c-format msgid "The default configuration file (%s) is not a regular file." msgstr "Файл наÑтроек по умолчанию (%s) не ÑвлÑетÑÑ Ð¾Ð±Ñ‹Ñ‡Ð½Ñ‹Ð¼ файлом." #: src/hed/libs/common/UserConfig.cpp:1143 #, c-format msgid "%s directory created" msgstr "Ñоздан каталог %s" #: src/hed/libs/common/UserConfig.cpp:1145 #: src/hed/libs/common/UserConfig.cpp:1172 src/hed/libs/data/DataMover.cpp:703 #, c-format msgid "Failed to create directory %s" msgstr "Ðе удалоÑÑŒ Ñоздать каталог %s" #: src/hed/libs/common/test/LoggerTest.cpp:58 msgid "This VERBOSE message should not be seen" msgstr "Этого ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ VERBOSE не должно быть видно" #: src/hed/libs/common/test/LoggerTest.cpp:62 msgid "This INFO message should be seen" msgstr "Это Ñообщение INFO должно быть видно" #: src/hed/libs/common/test/LoggerTest.cpp:73 msgid "This VERBOSE message should now be seen" msgstr "Это Ñообщение VERBOSE теперь должно быть видно" #: src/hed/libs/common/test/LoggerTest.cpp:79 msgid "This INFO message should also be seen" msgstr "Это Ñообщение INFO тоже должно быть видно" #: src/hed/libs/common/test/LoggerTest.cpp:93 msgid "This message goes to initial destination" msgstr "Это Ñообщение выводитÑÑ Ð² изначальное назначение" #: src/hed/libs/common/test/LoggerTest.cpp:108 msgid "This message goes to per-thread destination" msgstr "Это Ñообщение направлÑетÑÑ Ð² каждый поток" #: src/hed/libs/communication/ClientSAML2SSO.cpp:80 msgid "Request failed: No response from SPService" msgstr "Сбой запроÑа: нет ответа от Ñлужбы SPService" #: src/hed/libs/communication/ClientSAML2SSO.cpp:84 #: src/hed/libs/communication/ClientSAML2SSO.cpp:137 msgid "Request failed: response from SPService is not as expected" msgstr "Сбой запроÑа: неверный ответ от Ñлужбы SPService" #: src/hed/libs/communication/ClientSAML2SSO.cpp:92 #, c-format msgid "Authentication Request URL: %s" msgstr "ÐÐ´Ñ€ÐµÑ URL запроÑа Ð¿Ð¾Ð´Ñ‚Ð²ÐµÑ€Ð¶Ð´ÐµÐ½Ð¸Ñ Ð¿Ð¾Ð´Ð»Ð¸Ð½Ð½Ð¾Ñти: %s" #: src/hed/libs/communication/ClientSAML2SSO.cpp:133 msgid "Request failed: No response from IdP" msgstr "Сбой запроÑа: нет ответа от Ñлужбы IdP" #: src/hed/libs/communication/ClientSAML2SSO.cpp:184 msgid "Request failed: No response from IdP when doing redirecting" msgstr "Сбой запроÑа: нет ответа Ð¾Ñ Ñлужбы IdP при перенаправлении" #: src/hed/libs/communication/ClientSAML2SSO.cpp:188 msgid "" "Request failed: response from IdP is not as expected when doing redirecting" msgstr "Сбой запроÑа: неверный ответ от Ñлужбы IdP при перенаправлении" #: src/hed/libs/communication/ClientSAML2SSO.cpp:245 msgid "Request failed: No response from IdP when doing authentication" msgstr "Сбой запроÑа: нет ответа от Ñлужбы IdP при проверке подлинноÑти" #: src/hed/libs/communication/ClientSAML2SSO.cpp:249 msgid "" "Request failed: response from IdP is not as expected when doing " "authentication" msgstr "Сбой запроÑа: неверный ответ от Ñлужбы IdP при проверке подлинноÑти" #: src/hed/libs/communication/ClientSAML2SSO.cpp:294 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:312 msgid "Succeeded to verify the signature under " msgstr "ПодпиÑÑŒ уÑпешно подтверждена" #: src/hed/libs/communication/ClientSAML2SSO.cpp:296 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:315 msgid "Failed to verify the signature under " msgstr "ПодпиÑÑŒ не подтверждена" #: src/hed/libs/communication/ClientSAML2SSO.cpp:310 msgid "" "Request failed: No response from SP Service when sending SAML assertion to SP" msgstr "" "Сбой запроÑа: нет ответа от Ñлужбы SP при отÑылке ÑƒÑ‚Ð²ÐµÑ€Ð¶Ð´ÐµÐ½Ð¸Ñ SAML на SP" #: src/hed/libs/communication/ClientSAML2SSO.cpp:314 msgid "" "Request failed: response from SP Service is not as expected when sending " "SAML assertion to SP" msgstr "" "Сбой запроÑа: неприемлемый ответ от Ñлужбы SP при отÑылке ÑƒÑ‚Ð²ÐµÑ€Ð¶Ð´ÐµÐ½Ð¸Ñ SAML " "на SP" #: src/hed/libs/communication/ClientSAML2SSO.cpp:325 #, c-format msgid "IdP return some error message: %s" msgstr "Служба IdP выдала Ñообщение об ошибке: %s" #: src/hed/libs/communication/ClientSAML2SSO.cpp:353 #: src/hed/libs/communication/ClientSAML2SSO.cpp:398 msgid "SAML2SSO process failed" msgstr "Сбой процеÑÑа SAML2SSO" #: src/hed/libs/communication/ClientX509Delegation.cpp:56 msgid "Creating delegation credential to ARC delegation service" msgstr "Создание делегируемых параметров доÑтупа Ð´Ð»Ñ Ñлужбы Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ ARC" #: src/hed/libs/communication/ClientX509Delegation.cpp:66 #: src/hed/libs/communication/ClientX509Delegation.cpp:269 msgid "DelegateCredentialsInit failed" msgstr "Сбой в DelegateCredentialsInit" #: src/hed/libs/communication/ClientX509Delegation.cpp:70 #: src/hed/libs/communication/ClientX509Delegation.cpp:124 #: src/hed/libs/communication/ClientX509Delegation.cpp:159 #: src/hed/libs/communication/ClientX509Delegation.cpp:214 #: src/hed/libs/communication/ClientX509Delegation.cpp:273 msgid "There is no SOAP response" msgstr "Ðет ответа SOAP" #: src/hed/libs/communication/ClientX509Delegation.cpp:75 msgid "There is no X509 request in the response" msgstr "Ð’ ответе отÑутÑтвует Ð·Ð°Ð¿Ñ€Ð¾Ñ X509" #: src/hed/libs/communication/ClientX509Delegation.cpp:80 msgid "There is no Format request in the response" msgstr "Ð’ ответе отÑутÑтвует Ð·Ð°Ð¿Ñ€Ð¾Ñ Format" #: src/hed/libs/communication/ClientX509Delegation.cpp:88 msgid "There is no Id or X509 request value in the response" msgstr "Ответ не Ñодержит Id или значение запроÑа X509" #: src/hed/libs/communication/ClientX509Delegation.cpp:101 #: src/hed/libs/communication/ClientX509Delegation.cpp:189 msgid "DelegateProxy failed" msgstr "Сбой в DelegateProxy" #: src/hed/libs/communication/ClientX509Delegation.cpp:120 msgid "UpdateCredentials failed" msgstr "Сбой в UpdateCredentials" #: src/hed/libs/communication/ClientX509Delegation.cpp:128 msgid "There is no UpdateCredentialsResponse in response" msgstr "Ð’ ответе отÑутÑтвует UpdateCredentialsResponse" #: src/hed/libs/communication/ClientX509Delegation.cpp:136 #: src/hed/libs/communication/ClientX509Delegation.cpp:164 #: src/hed/libs/communication/ClientX509Delegation.cpp:219 #: src/hed/libs/communication/ClientX509Delegation.cpp:304 msgid "There is no SOAP connection chain configured" msgstr "Ðе наÑтроена цепочка ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ SOAP" #: src/hed/libs/communication/ClientX509Delegation.cpp:142 msgid "Creating delegation to CREAM delegation service" msgstr "Создание Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð´Ð»Ñ Ñлужбы Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ CREAM" #: src/hed/libs/communication/ClientX509Delegation.cpp:155 msgid "Delegation getProxyReq request failed" msgstr "Сбой запроÑа Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ getProxyReq" #: src/hed/libs/communication/ClientX509Delegation.cpp:175 msgid "Creating delegation to CREAM delegation service failed" msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð´Ð»Ñ Ñлужбы Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ CREAM" #: src/hed/libs/communication/ClientX509Delegation.cpp:210 msgid "Delegation putProxy request failed" msgstr "Сбой запроÑа Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ putProxy" #: src/hed/libs/communication/ClientX509Delegation.cpp:224 msgid "Creating delegation to CREAM delegation failed" msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð´Ð»Ñ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ CREAM" #: src/hed/libs/communication/ClientX509Delegation.cpp:239 msgid "Getting delegation credential from ARC delegation service" msgstr "" "Получение делегированных параметров доÑтупа от Ñлужбы Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ ARC" #: src/hed/libs/communication/ClientX509Delegation.cpp:278 msgid "There is no Delegated X509 token in the response" msgstr "Ответ не Ñодержит делегированный токен X509" #: src/hed/libs/communication/ClientX509Delegation.cpp:283 msgid "There is no Format delegated token in the response" msgstr "Ответ не Ñодержит делегированный токен в нужном формате" #: src/hed/libs/communication/ClientX509Delegation.cpp:291 msgid "There is no Id or X509 token value in the response" msgstr "Ответ не Ñодержит Id или значение маркёра X509" #: src/hed/libs/communication/ClientX509Delegation.cpp:300 #, c-format msgid "" "Get delegated credential from delegation service: \n" " %s" msgstr "" "Получение делегированных параметров доÑтупа от Ñлужбы делегированиÑ: \n" " %s" #: src/hed/libs/compute/Broker.cpp:54 #, c-format msgid "Performing matchmaking against target (%s)." msgstr "ПроизводитÑÑ Ñравнение Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸ÐµÐ¼ (%s)." #: src/hed/libs/compute/Broker.cpp:64 #, c-format msgid "Matchmaking, ExecutionTarget: %s matches job description" msgstr "Сравнение; ExecutionTarget: %s ÑоответÑтвует опиÑанию задачи" #: src/hed/libs/compute/Broker.cpp:145 #, c-format msgid "" "The CA issuer (%s) of the credentials (%s) is not trusted by the target (%s)." msgstr "" "ÐгентÑтво (%s), выдавшее Ñертификат (%s), не отноÑитÑÑ Ðº доверÑемым целью " "(%s)." #: src/hed/libs/compute/Broker.cpp:153 #, c-format msgid "ComputingShareName of ExecutionTarget (%s) is not defined" msgstr "Ðе определён параметр ComputingShareName атрибута ExecutionTarget (%s)" #: src/hed/libs/compute/Broker.cpp:157 src/hed/libs/compute/Broker.cpp:162 #, c-format msgid "ComputingShare (%s) explicitly rejected" msgstr "Цель ComputingShare (%s) Ñвно отклонена" #: src/hed/libs/compute/Broker.cpp:171 #, fuzzy, c-format msgid "" "Matchmaking, ComputingShareName of ExecutionTarget (%s) is not defined, but " "requested queue is (%s)" msgstr "Ðе определён параметр ComputingShareName атрибута ExecutionTarget (%s)" #: src/hed/libs/compute/Broker.cpp:175 src/hed/libs/compute/Broker.cpp:180 #, fuzzy, c-format msgid "" "Matchmaking, ComputingShare (%s) does not match requested queue (%s): " "skipping" msgstr "Цель ComputingShare (%s) не ÑоответÑтвует выбранной очереди (%s)" #: src/hed/libs/compute/Broker.cpp:184 #, fuzzy, c-format msgid "Matchmaking, ComputingShare (%s) matches requested queue (%s)" msgstr "Цель ComputingShare (%s) не ÑоответÑтвует выбранной очереди (%s)" #: src/hed/libs/compute/Broker.cpp:192 #, c-format msgid "" "ProcessingStartTime (%s) specified in job description is inside the targets " "downtime period [ %s - %s ]." msgstr "" "Ð’Ñ€ÐµÐ¼Ñ Ð½Ð°Ñ‡Ð°Ð»Ð° Ñчёта (%s), указанное в опиÑании задачи, приходитÑÑ Ð½Ð° период " "недоÑтупноÑти цели [ %s - %s ]." #: src/hed/libs/compute/Broker.cpp:197 #, c-format msgid "The downtime of the target (%s) is not published. Keeping target." msgstr "Период недоÑтупноÑти цели (%s) не объÑвлен. Цель ÑохранÑетÑÑ." #: src/hed/libs/compute/Broker.cpp:203 #, c-format msgid "HealthState of ExecutionTarget (%s) is not OK or WARNING (%s)" msgstr "" "СоÑтоÑние Ð·Ð´Ð¾Ñ€Ð¾Ð²ÑŒÑ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ (%s) неудовлетворительное (%s)" #: src/hed/libs/compute/Broker.cpp:208 #, c-format msgid "Matchmaking, ExecutionTarget: %s, HealthState is not defined" msgstr "" "Сравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, ÑоÑтоÑние Ð·Ð´Ð¾Ñ€Ð¾Ð²ÑŒÑ Ð½Ðµ определено" #: src/hed/libs/compute/Broker.cpp:215 #, c-format msgid "" "Matchmaking, Computing endpoint requirement not satisfied. ExecutionTarget: " "%s" msgstr "" "Сравнение; не удовлетворено требование к вычиÑлительному реÑурÑу. Ðазначение " "Ð´Ð»Ñ Ð¸ÑполнениÑ: %s" #: src/hed/libs/compute/Broker.cpp:220 #, c-format msgid "Matchmaking, ExecutionTarget: %s, ImplementationName is not defined" msgstr "" "Сравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, значение ImplementationName не " "определено" #: src/hed/libs/compute/Broker.cpp:246 #, c-format msgid "" "Matchmaking, %s (%d) is %s than %s (%d) published by the ExecutionTarget." msgstr "" "Сравнение; %s (%d) не ÑоответÑтвует (%s) значению %s (%d), публикуемому " "назначением Ð´Ð»Ñ Ð¸ÑполнениÑ." #: src/hed/libs/compute/Broker.cpp:275 #, c-format msgid "" "Matchmaking, The %s scaled %s (%d) is %s than the %s (%d) published by the " "ExecutionTarget." msgstr "" "Сравнение; приведённое к значению %s значение %s (%d) не ÑоответÑтвует (%s) " "значению %s (%d) публикуемому назначением Ð´Ð»Ñ Ð¸ÑполнениÑ." #: src/hed/libs/compute/Broker.cpp:287 #, c-format msgid "Matchmaking, Benchmark %s is not published by the ExecutionTarget." msgstr "" "Сравнение; значение Ñталонного теÑта %s не публикуетÑÑ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸ÐµÐ¼ Ð´Ð»Ñ " "иÑполнениÑ." #: src/hed/libs/compute/Broker.cpp:302 #, c-format msgid "" "Matchmaking, MaxTotalCPUTime problem, ExecutionTarget: %d (MaxTotalCPUTime), " "JobDescription: %d (TotalCPUTime)" msgstr "" "Сравнение; проблема Ñ MaxTotalCPUTime, ExecutionTarget: %d " "(MaxTotalCPUTime), JobDescription: %d (TotalCPUTime)" #: src/hed/libs/compute/Broker.cpp:309 #, c-format msgid "" "Matchmaking, MaxCPUTime problem, ExecutionTarget: %d (MaxCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" msgstr "" "Сравнение; проблема Ñ MaxCPUTime, ExecutionTarget: %d (MaxCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" #: src/hed/libs/compute/Broker.cpp:314 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxTotalCPUTime or MaxCPUTime not " "defined, assuming no CPU time limit" msgstr "" "Сравнение; ExecutionTarget: %s, не задано MaxTotalCPUTime или MaxCPUTime, " "предполагаетÑÑ Ð¾Ñ‚ÑутÑтвие ограничений на процеÑÑорное времÑ" #: src/hed/libs/compute/Broker.cpp:320 #, c-format msgid "" "Matchmaking, MinCPUTime problem, ExecutionTarget: %d (MinCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" msgstr "" "Сравнение; проблема Ñ MinCPUTime, ExecutionTarget: %d (MinCPUTime), " "JobDescription: %d (TotalCPUTime/NumberOfSlots)" #: src/hed/libs/compute/Broker.cpp:325 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MinCPUTime not defined, assuming no CPU " "time limit" msgstr "" "Сравнение; ExecutionTarget: %s, не задано MinCPUTime, предполагаетÑÑ " "отÑутÑтвие ограничений на процеÑÑорное времÑ" #: src/hed/libs/compute/Broker.cpp:333 #, c-format msgid "" "Matchmaking, MainMemorySize problem, ExecutionTarget: %d (MainMemorySize), " "JobDescription: %d (IndividualPhysicalMemory)" msgstr "" "Сравнение; неÑовпадение MainMemorySize: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %d " "(MainMemorySize), в опиÑании задачи: %d (IndividualPhysicalMemory)" #: src/hed/libs/compute/Broker.cpp:339 #, c-format msgid "" "Matchmaking, MaxMainMemory problem, ExecutionTarget: %d (MaxMainMemory), " "JobDescription: %d (IndividualPhysicalMemory)" msgstr "" "Сравнение; неÑовпадение MaxMainMemory: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %d " "(MaxMainMemory), в опиÑании задачи: %d (IndividualPhysicalMemory)" #: src/hed/libs/compute/Broker.cpp:344 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxMainMemory and MainMemorySize are not " "defined" msgstr "" "Сравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ MaxMainMemory и " "MainMemorySize не определены" #: src/hed/libs/compute/Broker.cpp:352 #, c-format msgid "" "Matchmaking, MaxVirtualMemory problem, ExecutionTarget: %d " "(MaxVirtualMemory), JobDescription: %d (IndividualVirtualMemory)" msgstr "" "Сравнение; неÑовпадение MaxVirtualMemory: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %d " "(MaxVirtualMemory), в опиÑании задачи: %d (IndividualVirtualMemory)" #: src/hed/libs/compute/Broker.cpp:357 #, c-format msgid "Matchmaking, ExecutionTarget: %s, MaxVirtualMemory is not defined" msgstr "" "Сравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, значение MaxVirtualMemory не " "определено" #: src/hed/libs/compute/Broker.cpp:365 #, c-format msgid "" "Matchmaking, Platform problem, ExecutionTarget: %s (Platform) " "JobDescription: %s (Platform)" msgstr "" "Сравнение; неÑовпадение платформ: ExecutionTarget: %s (Platform) " "JobDescription: %s (Platform)" #: src/hed/libs/compute/Broker.cpp:370 #, c-format msgid "Matchmaking, ExecutionTarget: %s, Platform is not defined" msgstr "" "Сравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, значение Platform не определено" #: src/hed/libs/compute/Broker.cpp:378 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, OperatingSystem requirements not satisfied" msgstr "" "Сравнение; не удовлетворены Ñ‚Ñ€ÐµÐ±Ð¾Ð²Ð°Ð½Ð¸Ñ OperatingSystem к ExecutionTarget: %s" #: src/hed/libs/compute/Broker.cpp:383 #, c-format msgid "Matchmaking, ExecutionTarget: %s, OperatingSystem is not defined" msgstr "" "Сравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, значение OperatingSystem не " "определено" #: src/hed/libs/compute/Broker.cpp:391 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, RunTimeEnvironment requirements not " "satisfied" msgstr "" "Сравнение; не удовлетворены Ñ‚Ñ€ÐµÐ±Ð¾Ð²Ð°Ð½Ð¸Ñ RunTimeEnvironment к ExecutionTarget: " "%s" #: src/hed/libs/compute/Broker.cpp:396 #, c-format msgid "Matchmaking, ExecutionTarget: %s, ApplicationEnvironments not defined" msgstr "" "Сравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, значение ApplicationEnvironments " "не определено" #: src/hed/libs/compute/Broker.cpp:405 #, c-format msgid "" "Matchmaking, NetworkInfo demand not fulfilled, ExecutionTarget do not " "support %s, specified in the JobDescription." msgstr "" "Сравнение; не удовлетворено требование NetworkInfo, назначение Ð´Ð»Ñ " "иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð½Ðµ поддерживает %s, указанное в опиÑании задачи." #: src/hed/libs/compute/Broker.cpp:409 #, c-format msgid "Matchmaking, ExecutionTarget: %s, NetworkInfo is not defined" msgstr "" "Сравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, значение NetworkInfo не определено" #: src/hed/libs/compute/Broker.cpp:417 #, c-format msgid "" "Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); " "JobDescription: %d MB (SessionDiskSpace)" msgstr "" "Сравнение; неÑовпадение MaxDiskSpace: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %d MB " "(MaxDiskSpace), в опиÑании задачи: %d MB (SessionDiskSpace)" #: src/hed/libs/compute/Broker.cpp:424 #, c-format msgid "" "Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB " "(WorkingAreaFree); JobDescription: %d MB (SessionDiskSpace)" msgstr "" "Сравнение; неÑовпадение WorkingAreaFree: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %d MB " "(WorkingAreaFree), в опиÑании задачи: %d MB (SessionDiskSpace)" #: src/hed/libs/compute/Broker.cpp:430 src/hed/libs/compute/Broker.cpp:451 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, MaxDiskSpace and WorkingAreaFree are not " "defined" msgstr "" "Сравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ MaxDiskSpace и " "WorkingAreaFree не определено" #: src/hed/libs/compute/Broker.cpp:438 #, c-format msgid "" "Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); " "JobDescription: %d MB (DiskSpace)" msgstr "" "Сравнение; неÑовпадение MaxDiskSpace: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %d MB " "(MaxDiskSpace), в опиÑании задачи: %d MB (DiskSpace)" #: src/hed/libs/compute/Broker.cpp:445 #, c-format msgid "" "Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB " "(WorkingAreaFree); JobDescription: %d MB (DiskSpace)" msgstr "" "Сравнение; неÑовпадение WorkingAreaFree: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %d MB " "(WorkingAreaFree), в опиÑании задачи: %d MB (DiskSpace)" #: src/hed/libs/compute/Broker.cpp:459 #, c-format msgid "" "Matchmaking, CacheTotal problem, ExecutionTarget: %d MB (CacheTotal); " "JobDescription: %d MB (CacheDiskSpace)" msgstr "" "Сравнение; неÑовпадение CacheTotal: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %d MB " "(CacheTotal), в опиÑании задачи: %d MB (CacheDiskSpace)" #: src/hed/libs/compute/Broker.cpp:464 #, c-format msgid "Matchmaking, ExecutionTarget: %s, CacheTotal is not defined" msgstr "" "Сравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, значение CacheTotal не определено" #: src/hed/libs/compute/Broker.cpp:472 #, c-format msgid "" "Matchmaking, TotalSlots problem, ExecutionTarget: %d (TotalSlots) " "JobDescription: %d (NumberOfProcesses)" msgstr "" "Сравнение; неÑовпадение TotalSlots: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %d " "(TotalSlots), в опиÑании задачи: %d (NumberOfProcesses)" #: src/hed/libs/compute/Broker.cpp:478 #, c-format msgid "" "Matchmaking, MaxSlotsPerJob problem, ExecutionTarget: %d (MaxSlotsPerJob) " "JobDescription: %d (NumberOfProcesses)" msgstr "" "Сравнение; неÑовпадение MaxSlotsPerJob: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %d " "(MaxSlotsPerJob), в опиÑании задачи: %d (NumberOfProcesses)" #: src/hed/libs/compute/Broker.cpp:484 #, c-format msgid "" "Matchmaking, ExecutionTarget: %s, TotalSlots and MaxSlotsPerJob are not " "defined" msgstr "" "Сравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ TotalSlots и " "MaxSlotsPerJob не определены" #: src/hed/libs/compute/Broker.cpp:492 #, c-format msgid "" "Matchmaking, WorkingAreaLifeTime problem, ExecutionTarget: %s " "(WorkingAreaLifeTime) JobDescription: %s (SessionLifeTime)" msgstr "" "Сравнение; неÑовпадение WorkingAreaLifeTime: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %s " "(WorkingAreaLifeTime), в опиÑании задачи: %s (SessionLifeTime)" #: src/hed/libs/compute/Broker.cpp:497 #, c-format msgid "Matchmaking, ExecutionTarget: %s, WorkingAreaLifeTime is not defined" msgstr "" "Сравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, значение WorkingAreaLifeTime не " "определено" #: src/hed/libs/compute/Broker.cpp:505 #, c-format msgid "" "Matchmaking, ConnectivityIn problem, ExecutionTarget: %s (ConnectivityIn) " "JobDescription: %s (InBound)" msgstr "" "Сравнение; неÑовпадение ConnectivityIn: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %s " "(ConnectivityIn), в опиÑании задачи: %s (InBound)" #: src/hed/libs/compute/Broker.cpp:512 #, c-format msgid "" "Matchmaking, ConnectivityOut problem, ExecutionTarget: %s (ConnectivityOut) " "JobDescription: %s (OutBound)" msgstr "" "Сравнение; неÑовпадение ConnectivityOut: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %s " "(ConnectivityOut), в опиÑании задачи: %s (OutBound)" #: src/hed/libs/compute/Broker.cpp:535 msgid "Unable to sort added jobs. The BrokerPlugin plugin has not been loaded." msgstr "" "Ðевозможно упорÑдочить добавленные задачи. Подключаемый модуль BrokerPlugin " "не был подгружен." #: src/hed/libs/compute/Broker.cpp:552 msgid "Unable to match target, marking it as not matching. Broker not valid." msgstr "" "РеÑÑƒÑ€Ñ Ð½Ðµ ÑоответÑтвует заданию, помечаетÑÑ ÐºÐ°Ðº неÑоответÑтвующий. " "Планировщик недейÑтвителен." #: src/hed/libs/compute/Broker.cpp:588 msgid "Unable to sort ExecutionTarget objects - Invalid Broker object." msgstr "" "Ðевозможно упорÑдочить объекты ExecutionTarget - недопуÑтимый объект Broker." #: src/hed/libs/compute/Broker.cpp:612 msgid "" "Unable to register job submission. Can't get JobDescription object from " "Broker, Broker is invalid." msgstr "" "Ðевозможно зарегиÑтрировать заÑылку задачи. Ðевозможно получить объект " "JobDescription из планировщика, планировщик недейÑтвителен." #: src/hed/libs/compute/BrokerPlugin.cpp:89 #, c-format msgid "Broker plugin \"%s\" not found." msgstr "Подключаемый модуль брокера \"%s\" не обнаружен." #: src/hed/libs/compute/BrokerPlugin.cpp:96 #, c-format msgid "Unable to load BrokerPlugin (%s)" msgstr "Ðевозможно загрузить модуль BrokerPlugin (%s)" #: src/hed/libs/compute/BrokerPlugin.cpp:106 #, c-format msgid "Broker %s loaded" msgstr "Подгружен планировщик %s" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:27 #, c-format msgid "Uniq is replacing service coming from %s with service coming from %s" msgstr "" "Uniq заменÑет ÑервиÑ, обнаруженный через %s, на ÑервиÑ, обнаруженный через %s" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:31 #, c-format msgid "Uniq is ignoring service coming from %s" msgstr "Uniq игнорирует ÑервиÑ, обнаруженный через %s" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:38 #, c-format msgid "Uniq is adding service coming from %s" msgstr "Uniq добавлÑет ÑервиÑ, обнаруженный через %s" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:61 #, c-format msgid "Adding endpoint (%s) to TargetInformationRetriever" msgstr "Добавление точки входа (%s) в TargetInformationRetriever" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:64 #, c-format msgid "Adding endpoint (%s) to ServiceEndpointRetriever" msgstr "Добавление точки входа (%s) в ServiceEndpointRetriever" #: src/hed/libs/compute/ComputingServiceRetriever.cpp:67 #, c-format msgid "" "Adding endpoint (%s) to both ServiceEndpointRetriever and " "TargetInformationRetriever" msgstr "" "Точка входа (%s) добавлÑетÑÑ ÐºÐ°Ðº к ServiceEndpointRetriever, так и к " "TargetInformationRetriever" #: src/hed/libs/compute/EntityRetriever.cpp:42 #, c-format msgid "The plugin %s does not support any interfaces, skipping it." msgstr "" "Подключаемый модуль %s не поддерживает никаких интерфейÑов, пропуÑкаетÑÑ." #: src/hed/libs/compute/EntityRetriever.cpp:47 #, c-format msgid "" "The first supported interface of the plugin %s is an empty string, skipping " "the plugin." msgstr "" "Первый поддерживаемый Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡Ð°ÐµÐ¼Ð¾Ð³Ð¾ Ð¼Ð¾Ð´ÑƒÐ»Ñ %s оказалÑÑ Ð¿ÑƒÑтой " "Ñтрокой, модуль пропуÑкаетÑÑ." #: src/hed/libs/compute/EntityRetriever.cpp:95 #, c-format msgid "Interface on endpoint (%s) %s." msgstr "Ð˜Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ Ñ‚Ð¾Ñ‡ÐºÐ¸ входа (%s) %s." #: src/hed/libs/compute/EntityRetriever.cpp:101 #: src/hed/libs/compute/EntityRetriever.cpp:133 #: src/hed/libs/compute/EntityRetriever.cpp:425 #, c-format msgid "Ignoring endpoint (%s), it is already registered in retriever." msgstr "" "ИгнорируетÑÑ Ñ‚Ð¾Ñ‡ÐºÐ° входа (%s), Ñ‚.к. она уже зарегиÑтрирована в загрузчике." #: src/hed/libs/compute/EntityRetriever.cpp:110 #, c-format msgid "Service Loop: Endpoint %s" msgstr "Цикл по ÑервиÑам: точка входа %s" #: src/hed/libs/compute/EntityRetriever.cpp:112 #, c-format msgid " This endpoint (%s) is STARTED or SUCCESSFUL" msgstr " СоÑтоÑние точки входа (%s) - STARTED или SUCCESSFUL" #: src/hed/libs/compute/EntityRetriever.cpp:115 #, c-format msgid "" "Suspending querying of endpoint (%s) since the service at the endpoint is " "already being queried, or has been queried." msgstr "" "ПриоÑтанавливаетÑÑ Ð¾Ð¿Ñ€Ð¾Ñ Ñ‚Ð¾Ñ‡ÐºÐ¸ входа (%s), Ñ‚.к. ÑÐµÑ€Ð²Ð¸Ñ Ð¿Ð¾ Ñтому адреÑу уже " "опрашиваетÑÑ Ð¸Ð»Ð¸ опрошен." #: src/hed/libs/compute/EntityRetriever.cpp:122 #: src/hed/libs/compute/EntityRetriever.cpp:237 #, c-format msgid " Status of endpoint (%s) is %s" msgstr " СоÑтоÑние точки входа (%s): %s" #: src/hed/libs/compute/EntityRetriever.cpp:126 #, c-format msgid "Setting status (STARTED) for endpoint: %s" msgstr "ЗадаётÑÑ ÑоÑтоÑние (STARTED) Ð´Ð»Ñ Ñ‚Ð¾Ñ‡ÐºÐ¸ входа: %s" #: src/hed/libs/compute/EntityRetriever.cpp:145 #, c-format msgid "Starting thread to query the endpoint on %s" msgstr "ЗапуÑкаетÑÑ Ð¿Ð¾Ñ‚Ð¾Ðº Ð´Ð»Ñ Ð¾Ð¿Ñ€Ð¾Ñа точки доÑтупа %s" #: src/hed/libs/compute/EntityRetriever.cpp:147 #: src/hed/libs/compute/EntityRetriever.cpp:289 #, c-format msgid "Failed to start querying the endpoint on %s" msgstr "Ðе удалоÑÑŒ начать Ð¾Ð¿Ñ€Ð¾Ñ Ñ‚Ð¾Ñ‡ÐºÐ¸ входа на %s" #: src/hed/libs/compute/EntityRetriever.cpp:174 #, c-format msgid "Found a registry, will query it recursively: %s" msgstr "Ðайден рееÑтр, который будет опрошен рекурÑивно: %s" #: src/hed/libs/compute/EntityRetriever.cpp:211 #, c-format msgid "Setting status (%s) for endpoint: %s" msgstr "ПриÑваиваетÑÑ ÑоÑтоÑние (%s) точки входа: %s" #: src/hed/libs/compute/EntityRetriever.cpp:231 msgid "Checking for suspended endpoints which should be started." msgstr "Проверка отложенных точек входа на предмет повторного опроÑа." #: src/hed/libs/compute/EntityRetriever.cpp:241 #, c-format msgid "Found started or successful endpoint (%s)" msgstr "Ðайдена точка входа в ÑоÑтоÑнии STARTED или SUCCESSFUL (%s)" #: src/hed/libs/compute/EntityRetriever.cpp:253 #, c-format msgid "Found suspended endpoint (%s)" msgstr "Обнаружена временно иÑÐºÐ»ÑŽÑ‡Ñ‘Ð½Ð½Ð°Ñ Ñ‚Ð¾Ñ‡ÐºÐ° входа (%s)" #: src/hed/libs/compute/EntityRetriever.cpp:264 #, c-format msgid "Trying to start suspended endpoint (%s)" msgstr "Попытка активации временно иÑключённой точки входа (%s)" #: src/hed/libs/compute/EntityRetriever.cpp:284 #, c-format msgid "" "Starting querying of suspended endpoint (%s) - no other endpoints for this " "service is being queried or has been queried successfully." msgstr "" "ÐачинаетÑÑ Ð¾Ð¿Ñ€Ð¾Ñ Ð¾Ñ‚Ð»Ð¾Ð¶ÐµÐ½Ð½Ð¾Ð¹ точки входа (%s) - другие точки входа Ñтого " "ÑервиÑа не опрашиваютÑÑ, либо были уже уÑпешно опрошены." #: src/hed/libs/compute/EntityRetriever.cpp:351 #, c-format msgid "Calling plugin %s to query endpoint on %s" msgstr "ВызываетÑÑ Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡Ð°ÐµÐ¼Ñ‹Ð¹ модуль %s Ð´Ð»Ñ Ð¾Ð¿Ñ€Ð¾Ñа точки входа на %s" #: src/hed/libs/compute/EntityRetriever.cpp:373 #, c-format msgid "" "The interface of this endpoint (%s) is unspecified, will try all possible " "plugins" msgstr "" "Ð˜Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ Ñтой точки доÑтупа (%s) не задан, пробуютÑÑ Ð²Ñе возможные " "подключаемые модули" #: src/hed/libs/compute/EntityRetriever.cpp:389 #, c-format msgid "Problem loading plugin %s, skipping it." msgstr "Проблемы при подключении Ð¼Ð¾Ð´ÑƒÐ»Ñ %s, модуль пропуÑкаетÑÑ." #: src/hed/libs/compute/EntityRetriever.cpp:393 #, c-format msgid "The endpoint (%s) is not supported by this plugin (%s)" msgstr "Точка входа (%s) не поддерживаетÑÑ Ñтим подключаемым модулем (%s)" #: src/hed/libs/compute/EntityRetriever.cpp:414 #, c-format msgid "" "New endpoint is created (%s) from the one with the unspecified interface (%s)" msgstr "" "Создана Ð½Ð¾Ð²Ð°Ñ Ñ‚Ð¾Ñ‡ÐºÐ° доÑтупа (%s) из точки Ñ Ð½ÐµÐ¸Ð·Ð²ÐµÑтным интерфейÑом (%s)" #: src/hed/libs/compute/EntityRetriever.cpp:432 #, c-format msgid "Starting sub-thread to query the endpoint on %s" msgstr "ЗапуÑкаетÑÑ Ð¿Ð¾Ð´Ð¿Ð¾Ñ‚Ð¾Ðº Ð´Ð»Ñ Ð¾Ð¿Ñ€Ð¾Ñа точки доÑтупа по %s" #: src/hed/libs/compute/EntityRetriever.cpp:434 #, c-format msgid "" "Failed to start querying the endpoint on %s (unable to create sub-thread)" msgstr "Сбой начала опроÑа точки доÑтупа по %s (не удалоÑÑŒ Ñоздать подпоток)" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:32 #, c-format msgid "Found %s %s (it was loaded already)" msgstr "Ðайден подключаемый модуль %s %s (уже подгружен)" #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:41 #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:49 #: src/hed/libs/compute/JobControllerPlugin.cpp:98 #: src/hed/libs/compute/JobControllerPlugin.cpp:107 #: src/hed/libs/compute/SubmitterPlugin.cpp:167 #: src/hed/libs/compute/SubmitterPlugin.cpp:177 #, c-format msgid "" "Unable to locate the \"%s\" plugin. Please refer to installation " "instructions and check if package providing support for \"%s\" plugin is " "installed" msgstr "" "Ðе удалоÑÑŒ обнаружить подключаемый модуль \"%s\". ПожалуйÑта, " "проконÑультируйтеÑÑŒ Ñ Ð¸Ð½Ñтрукцией по уÑтановке и проверьте, уÑтановлен ли " "пакет, Ñодержащий модуль \"%s\"." #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:42 #, c-format msgid "%s plugin \"%s\" not found." msgstr "Ðе найден подключаемый модуль %s \"%s\"." #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:50 #, c-format msgid "%s %s could not be created." msgstr "%s %s не может быть Ñоздан." #: src/hed/libs/compute/EntityRetrieverPlugin.cpp:55 #, c-format msgid "Loaded %s %s" msgstr "Загружен %s %s" #: src/hed/libs/compute/ExecutionTarget.cpp:51 #, c-format msgid "" "Skipping ComputingEndpoint '%s', because it has '%s' interface instead of " "the requested '%s'." msgstr "" "ПропуÑкаетÑÑ ComputingEndpoint '%s', потому что объÑвлен Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ '%s' " "вмеÑто запрошенного '%s'." #: src/hed/libs/compute/ExecutionTarget.cpp:132 #, c-format msgid "" "Computing endpoint %s (type %s) added to the list for submission brokering" msgstr "" "ВычиÑÐ»Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ñ‚Ð¾Ñ‡ÐºÐ° входа %s (тип %s) добавлена в ÑпиÑок Ð´Ð»Ñ Ð¿Ð»Ð°Ð½Ð¸Ñ€Ð¾Ð²ÐºÐ¸ " "заÑылки" #: src/hed/libs/compute/ExecutionTarget.cpp:239 #, c-format msgid "Address: %s" msgstr "ÐдреÑ: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:240 #, c-format msgid "Place: %s" msgstr "МеÑто: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:241 #, c-format msgid "Country: %s" msgstr "Страна: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:242 #, c-format msgid "Postal code: %s" msgstr "Почтовый индекÑ: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:243 #, c-format msgid "Latitude: %f" msgstr "Широта: %f" #: src/hed/libs/compute/ExecutionTarget.cpp:244 #, c-format msgid "Longitude: %f" msgstr "Долгота: %f" #: src/hed/libs/compute/ExecutionTarget.cpp:250 #, c-format msgid "Owner: %s" msgstr "Владелец: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:257 #, c-format msgid "ID: %s" msgstr "ID: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:258 #, c-format msgid "Type: %s" msgstr "Тип: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:263 #, c-format msgid "URL: %s" msgstr "URL: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:264 #, c-format msgid "Interface: %s" msgstr "Ð˜Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ %s" #: src/hed/libs/compute/ExecutionTarget.cpp:266 msgid "Interface versions:" msgstr "ВерÑии интерфейÑа:" #: src/hed/libs/compute/ExecutionTarget.cpp:271 msgid "Interface extensions:" msgstr "РаÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñа:" #: src/hed/libs/compute/ExecutionTarget.cpp:276 msgid "Capabilities:" msgstr "ВозможноÑти:" #: src/hed/libs/compute/ExecutionTarget.cpp:280 #, c-format msgid "Technology: %s" msgstr "ТехнологиÑ: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:282 msgid "Supported Profiles:" msgstr "Поддерживаемые профили:" #: src/hed/libs/compute/ExecutionTarget.cpp:286 #, c-format msgid "Implementor: %s" msgstr "Внедритель: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:287 #, c-format msgid "Implementation name: %s" msgstr "Ð˜Ð¼Ñ Ñ€ÐµÐ°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ð¸: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:288 #, c-format msgid "Quality level: %s" msgstr "Уровень качеÑтва: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:289 #, c-format msgid "Health state: %s" msgstr "СоÑтоÑние здоровьÑ: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:290 #, c-format msgid "Health state info: %s" msgstr "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ ÑоÑтоÑнии здоровьÑ: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:291 #, c-format msgid "Serving state: %s" msgstr "СоÑтоÑние обÑлуживаниÑ: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:292 #, c-format msgid "Issuer CA: %s" msgstr "Сертификат выдан CA: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:294 msgid "Trusted CAs:" msgstr "Доверенные центры Ñертификации:" #: src/hed/libs/compute/ExecutionTarget.cpp:298 #, c-format msgid "Downtime starts: %s" msgstr "Ðачало проÑтоÑ: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:299 #, c-format msgid "Downtime ends: %s" msgstr "Конец проÑтоÑ: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:300 #, c-format msgid "Staging: %s" msgstr "РазмещаетÑÑ: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:302 msgid "Job descriptions:" msgstr "ОпиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡:" #: src/hed/libs/compute/ExecutionTarget.cpp:314 #, c-format msgid "Scheme: %s" msgstr "Схема: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:317 #, c-format msgid "Rule: %s" msgstr "Правило: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:329 #, c-format msgid "Mapping queue: %s" msgstr "ÐазначаетÑÑ Ð¾Ñ‡ÐµÑ€ÐµÐ´ÑŒ: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:330 #, c-format msgid "Max wall-time: %s" msgstr "ДлительноÑть, Ð½Ð°Ð¸Ð±Ð¾Ð»ÑŒÑˆÐ°Ñ (по чаÑам): %s" #: src/hed/libs/compute/ExecutionTarget.cpp:331 #, c-format msgid "Max total wall-time: %s" msgstr "Предел общего времени (по чаÑам): %s" #: src/hed/libs/compute/ExecutionTarget.cpp:332 #, c-format msgid "Min wall-time: %s" msgstr "ДлительноÑть, Ð½Ð°Ð¸Ð¼ÐµÐ½ÑŒÑˆÐ°Ñ (по чаÑам): %s" #: src/hed/libs/compute/ExecutionTarget.cpp:333 #, c-format msgid "Default wall-time: %s" msgstr "ДлительноÑть по умолчанию (по чаÑам): %s" #: src/hed/libs/compute/ExecutionTarget.cpp:334 #, c-format msgid "Max CPU time: %s" msgstr "ДлительноÑть, Ð½Ð°Ð¸Ð±Ð¾Ð»ÑŒÑˆÐ°Ñ (процеÑÑорнаÑ): %s" #: src/hed/libs/compute/ExecutionTarget.cpp:335 #, c-format msgid "Min CPU time: %s" msgstr "ДлительноÑть, Ð½Ð°Ð¸Ð¼ÐµÐ½ÑŒÑˆÐ°Ñ (процеÑÑорнаÑ): %s" #: src/hed/libs/compute/ExecutionTarget.cpp:336 #, c-format msgid "Default CPU time: %s" msgstr "ДлительноÑть по умолчанию (процеÑÑорнаÑ): %s" #: src/hed/libs/compute/ExecutionTarget.cpp:337 #, c-format msgid "Max total jobs: %i" msgstr "Ð’Ñего заданий (предел): %i" #: src/hed/libs/compute/ExecutionTarget.cpp:338 #, c-format msgid "Max running jobs: %i" msgstr "Задачи в Ñчёте (предел): %i" #: src/hed/libs/compute/ExecutionTarget.cpp:339 #, c-format msgid "Max waiting jobs: %i" msgstr "Предел задач в очереди: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:340 #, c-format msgid "Max pre-LRMS waiting jobs: %i" msgstr "Предел задач в очереди до СУПО: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:341 #, c-format msgid "Max user running jobs: %i" msgstr "Задачи Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð² Ñчёте (предел): %i" #: src/hed/libs/compute/ExecutionTarget.cpp:342 #, c-format msgid "Max slots per job: %i" msgstr "Предел Ñегментов на задачу: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:343 #, c-format msgid "Max stage in streams: %i" msgstr "Предел потоков размещениÑ: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:344 #, c-format msgid "Max stage out streams: %i" msgstr "Потоки отгрузки (верхний предел): %i" #: src/hed/libs/compute/ExecutionTarget.cpp:345 #, c-format msgid "Scheduling policy: %s" msgstr "Правила планировки: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:346 #, c-format msgid "Max memory: %i" msgstr "МакÑ. памÑть: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:347 #, c-format msgid "Max virtual memory: %i" msgstr "Предел виртуальной памÑти: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:348 #, c-format msgid "Max disk space: %i" msgstr "Предел диÑкового проÑтранÑтва: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:349 #, c-format msgid "Default Storage Service: %s" msgstr "Хранилище по умолчанию: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:350 msgid "Supports preemption" msgstr "Поддержка упреждениÑ" #: src/hed/libs/compute/ExecutionTarget.cpp:351 msgid "Doesn't support preemption" msgstr "Упреждение не поддерживаетÑÑ" #: src/hed/libs/compute/ExecutionTarget.cpp:352 #, c-format msgid "Total jobs: %i" msgstr "Ð’Ñего задач: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:353 #, c-format msgid "Running jobs: %i" msgstr "Задачи в Ñчёте: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:354 #, c-format msgid "Local running jobs: %i" msgstr "Внутренние задачи в Ñчёте: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:355 #, c-format msgid "Waiting jobs: %i" msgstr "Задачи в очереди: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:356 #, c-format msgid "Local waiting jobs: %i" msgstr "Внутренние задачи в очереди: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:357 #, c-format msgid "Suspended jobs: %i" msgstr "ПриоÑтановленные задачи: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:358 #, c-format msgid "Local suspended jobs: %i" msgstr "Внутренние приоÑтановленные задачи: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:359 #, c-format msgid "Staging jobs: %i" msgstr "Задачи, выполнÑющие размещение данных: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:360 #, c-format msgid "Pre-LRMS waiting jobs: %i" msgstr "Задачи в очереди до СУПО: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:361 #, c-format msgid "Estimated average waiting time: %s" msgstr "Оценка уÑреднённого времени ожиданиÑ: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:362 #, c-format msgid "Estimated worst waiting time: %s" msgstr "Оценка худшего времени ожиданиÑ: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:363 #, c-format msgid "Free slots: %i" msgstr "Свободные Ñдра: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:365 msgid "Free slots grouped according to time limits (limit: free slots):" msgstr "" "ДоÑтупные меÑта Ñгруппированы по предельному времени (предел: доÑтупные " "меÑта):" #: src/hed/libs/compute/ExecutionTarget.cpp:368 #, c-format msgid " %s: %i" msgstr " %s: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:369 #, c-format msgid " unspecified: %i" msgstr " непределённых: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:372 #, c-format msgid "Used slots: %i" msgstr "ИÑпользованные Ñдра: %d" #: src/hed/libs/compute/ExecutionTarget.cpp:373 #, c-format msgid "Requested slots: %i" msgstr "Запрошено Ñегментов Ñдер: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:374 #, c-format msgid "Reservation policy: %s" msgstr "Политика бронированиÑ: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:381 #, c-format msgid "Resource manager: %s" msgstr "СиÑтема управлениÑ: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:383 #, c-format msgid " (%s)" msgstr " (%s)" #: src/hed/libs/compute/ExecutionTarget.cpp:387 #, c-format msgid "Total physical CPUs: %i" msgstr "Общее количеÑтво физичеÑких процеÑÑоров: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:388 #, c-format msgid "Total logical CPUs: %i" msgstr "Общее количеÑтво логичеÑких процеÑÑоров: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:389 #, c-format msgid "Total slots: %i" msgstr "Общее количеÑтво Ñдер: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:390 msgid "Supports advance reservations" msgstr "Поддержка предварительного бронированиÑ" #: src/hed/libs/compute/ExecutionTarget.cpp:391 msgid "Doesn't support advance reservations" msgstr "Ðет поддержки предварительного бронированиÑ" #: src/hed/libs/compute/ExecutionTarget.cpp:392 msgid "Supports bulk submission" msgstr "Поддерживает групповую заÑылку" #: src/hed/libs/compute/ExecutionTarget.cpp:393 msgid "Doesn't support bulk Submission" msgstr "Ðе поддерживает групповую заÑылку" #: src/hed/libs/compute/ExecutionTarget.cpp:394 msgid "Homogeneous resource" msgstr "Однородный реÑурÑ" #: src/hed/libs/compute/ExecutionTarget.cpp:395 msgid "Non-homogeneous resource" msgstr "Ðеоднородный реÑурÑ" #: src/hed/libs/compute/ExecutionTarget.cpp:397 msgid "Network information:" msgstr "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ Ñети:" #: src/hed/libs/compute/ExecutionTarget.cpp:402 msgid "Working area is shared among jobs" msgstr "Рабочее проÑтранÑтво иÑпользуетÑÑ Ñ€Ð°Ð·Ð½Ñ‹Ð¼Ð¸ задачами" #: src/hed/libs/compute/ExecutionTarget.cpp:403 msgid "Working area is not shared among jobs" msgstr "Рабочее проÑтранÑтво иÑпользуетÑÑ Ð¾Ð´Ð½Ð¾Ð¹ задачей" #: src/hed/libs/compute/ExecutionTarget.cpp:404 #, c-format msgid "Working area total size: %i GB" msgstr "Общий объём рабочего проÑтранÑтва: %i GB" #: src/hed/libs/compute/ExecutionTarget.cpp:405 #, c-format msgid "Working area free size: %i GB" msgstr "Свободное рабочее проÑтранÑтво: %i GB" #: src/hed/libs/compute/ExecutionTarget.cpp:406 #, c-format msgid "Working area life time: %s" msgstr "Ð’Ñ€ÐµÐ¼Ñ Ð¶Ð¸Ð·Ð½Ð¸ рабочего проÑтранÑтва: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:407 #, c-format msgid "Cache area total size: %i GB" msgstr "Общий объём проÑтранÑтва кÑша: %i GB" #: src/hed/libs/compute/ExecutionTarget.cpp:408 #, c-format msgid "Cache area free size: %i GB" msgstr "Свободное проÑтранÑтво кÑша: %i GB" #: src/hed/libs/compute/ExecutionTarget.cpp:414 #, c-format msgid "Platform: %s" msgstr "Платформа: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:415 msgid "Execution environment supports inbound connections" msgstr "Среда иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð¿Ð¾Ð´Ð´ÐµÑ€Ð¶Ð¸Ð²Ð°ÐµÑ‚ входÑщие ÑоединениÑ" #: src/hed/libs/compute/ExecutionTarget.cpp:416 msgid "Execution environment does not support inbound connections" msgstr "Среда иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð½Ðµ поддерживает входÑщие ÑоединениÑ" #: src/hed/libs/compute/ExecutionTarget.cpp:417 msgid "Execution environment supports outbound connections" msgstr "Среда иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð¿Ð¾Ð´Ð´ÐµÑ€Ð¶Ð¸Ð²Ð°ÐµÑ‚ иÑходÑщие ÑоединениÑ" #: src/hed/libs/compute/ExecutionTarget.cpp:418 msgid "Execution environment does not support outbound connections" msgstr "Среда иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð½Ðµ поддерживает иÑходÑщие ÑоединениÑ" #: src/hed/libs/compute/ExecutionTarget.cpp:419 msgid "Execution environment is a virtual machine" msgstr "Ð Ð°Ð±Ð¾Ñ‡Ð°Ñ Ñреда - Ð²Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð°Ñ Ð¼Ð°ÑˆÐ¸Ð½Ð°" #: src/hed/libs/compute/ExecutionTarget.cpp:420 msgid "Execution environment is a physical machine" msgstr "Ð Ð°Ð±Ð¾Ñ‡Ð°Ñ Ñреда - Ñ€ÐµÐ°Ð»ÑŒÐ½Ð°Ñ Ð¼Ð°ÑˆÐ¸Ð½Ð°" #: src/hed/libs/compute/ExecutionTarget.cpp:421 #, c-format msgid "CPU vendor: %s" msgstr "Производитель процеÑÑора: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:422 #, c-format msgid "CPU model: %s" msgstr "Модель процеÑÑора: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:423 #, c-format msgid "CPU version: %s" msgstr "ВерÑÐ¸Ñ Ð¿Ñ€Ð¾Ñ†ÐµÑÑора: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:424 #, c-format msgid "CPU clock speed: %i" msgstr "Ð¢Ð°ÐºÑ‚Ð¾Ð²Ð°Ñ Ñ‡Ð°Ñтота процеÑÑора: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:425 #, c-format msgid "Main memory size: %i" msgstr "Объём оÑновной памÑти: %i" #: src/hed/libs/compute/ExecutionTarget.cpp:426 #, c-format msgid "OS family: %s" msgstr "СемейÑтво ОС: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:427 #, c-format msgid "OS name: %s" msgstr "Ðазвание ОС: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:428 #, c-format msgid "OS version: %s" msgstr "ВерÑÐ¸Ñ ÐžÐ¡: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:435 msgid "Computing service:" msgstr "ВычиÑлительный ÑервиÑ:" #: src/hed/libs/compute/ExecutionTarget.cpp:459 #, c-format msgid "%d Endpoints" msgstr "%d точки входа" #: src/hed/libs/compute/ExecutionTarget.cpp:464 msgid "Endpoint Information:" msgstr "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ точке входа:" #: src/hed/libs/compute/ExecutionTarget.cpp:476 #, c-format msgid "%d Batch Systems" msgstr "%d ÑиÑтемы ÑƒÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð¿Ð°ÐºÐµÑ‚Ð½Ð¾Ð¹ обработкой" #: src/hed/libs/compute/ExecutionTarget.cpp:481 msgid "Batch System Information:" msgstr "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ СУПО:" #: src/hed/libs/compute/ExecutionTarget.cpp:487 msgid "Installed application environments:" msgstr "УÑтановленные рабочие Ñреды:" #: src/hed/libs/compute/ExecutionTarget.cpp:500 #, c-format msgid "%d Shares" msgstr "%d СовмеÑтные реÑурÑÑ‹" #: src/hed/libs/compute/ExecutionTarget.cpp:505 msgid "Share Information:" msgstr "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ ÑовмеÑтном реÑурÑе:" #: src/hed/libs/compute/ExecutionTarget.cpp:511 #, c-format msgid "%d mapping policies" msgstr "%d правила приÑвоениÑ" #: src/hed/libs/compute/ExecutionTarget.cpp:515 msgid "Mapping policy:" msgstr "Правило приÑвоениÑ:" #: src/hed/libs/compute/ExecutionTarget.cpp:531 #, c-format msgid "Execution Target on Computing Service: %s" msgstr "ИÑполнÑющий реÑÑƒÑ€Ñ Ð²Ñ‹Ñ‡Ð¸Ñлительного ÑервиÑа: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:533 #, c-format msgid " Computing endpoint URL: %s" msgstr " URL точки входа Ð´Ð»Ñ Ð²Ñ‹Ñ‡Ð¸Ñлений: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:535 #, c-format msgid " Computing endpoint interface name: %s" msgstr " Ðазвание интерфейÑа точки входа Ð´Ð»Ñ Ð²Ñ‹Ñ‡Ð¸Ñлений: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:537 #: src/hed/libs/compute/Job.cpp:579 #, c-format msgid " Queue: %s" msgstr " Очередь: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:540 #, c-format msgid " Mapping queue: %s" msgstr " Очередь приÑвоениÑ: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:543 #, c-format msgid " Health state: %s" msgstr " СоÑтоÑние здоровьÑ: %s" #: src/hed/libs/compute/ExecutionTarget.cpp:548 msgid "Service information:" msgstr "Ð¡Ð²ÐµÐ´ÐµÐ½Ð¸Ñ Ð¾ Ñлужбе:" #: src/hed/libs/compute/ExecutionTarget.cpp:553 msgid " Installed application environments:" msgstr " УÑтановленные рабочие Ñреды:" #: src/hed/libs/compute/ExecutionTarget.cpp:560 msgid "Batch system information:" msgstr "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ СУПО:" #: src/hed/libs/compute/ExecutionTarget.cpp:563 msgid "Queue information:" msgstr "Ð¡Ð²ÐµÐ´ÐµÐ½Ð¸Ñ Ð¾Ð± очереди:" #: src/hed/libs/compute/ExecutionTarget.cpp:570 msgid " Benchmark information:" msgstr " Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾Ð± Ñталонных теÑтах:" #: src/hed/libs/compute/GLUE2.cpp:53 msgid "The Service doesn't advertise its Type." msgstr "Служба не Ñообщает о Ñвоём типе." #: src/hed/libs/compute/GLUE2.cpp:58 msgid "The ComputingService doesn't advertise its Quality Level." msgstr "Служба ComputingService не Ñообщает о Ñвоём уровне качеÑтва." #: src/hed/libs/compute/GLUE2.cpp:99 msgid "The ComputingEndpoint has no URL." msgstr "У ComputingEndpoint отÑутÑтвует URL." #: src/hed/libs/compute/GLUE2.cpp:104 msgid "The Service advertises no Health State." msgstr "Служба не предоÑтавлÑет информации о ÑоÑтоÑнии здоровьÑ." #: src/hed/libs/compute/GLUE2.cpp:117 msgid "The ComputingEndpoint doesn't advertise its Quality Level." msgstr "Служба ComputingEndpoint не Ñообщает о Ñвоём уровне качеÑтва." #: src/hed/libs/compute/GLUE2.cpp:128 msgid "The ComputingService doesn't advertise its Interface." msgstr "Служба ComputingService не Ñообщает о Ñвоём интерфейÑе." #: src/hed/libs/compute/GLUE2.cpp:160 msgid "The ComputingEndpoint doesn't advertise its Serving State." msgstr "Служба ComputingEndpoint не Ñообщает о Ñвоём ÑоÑтоÑнии обÑлуживаниÑ." #: src/hed/libs/compute/GLUE2.cpp:247 #, c-format msgid "" "The \"FreeSlotsWithDuration\" attribute published by \"%s\" is wrongly " "formatted. Ignoring it." msgstr "" "Ðеверно отформатирован атрибут \"FreeSlotsWithDuration\", публикуемый \"%s" "\", - игнорируетÑÑ." #: src/hed/libs/compute/GLUE2.cpp:248 #, c-format msgid "Wrong format of the \"FreeSlotsWithDuration\" = \"%s\" (\"%s\")" msgstr "Ðеверный формат \"FreeSlotsWithDuration\" = \"%s\" (\"%s\")" #: src/hed/libs/compute/GLUE2.cpp:420 #, c-format msgid "" "Couldn't parse benchmark XML:\n" "%s" msgstr "" "Ðевозможно разобрать Ñталонный XML:\n" "%s" #: src/hed/libs/compute/Job.cpp:328 msgid "Unable to detect format of job record." msgstr "Ðевозможно определить формат учётной запиÑи о задаче." #: src/hed/libs/compute/Job.cpp:549 #, c-format msgid "Job: %s" msgstr "Задача: %s" #: src/hed/libs/compute/Job.cpp:551 #, c-format msgid " Name: %s" msgstr " ИмÑ: %s" #: src/hed/libs/compute/Job.cpp:552 #, c-format msgid " State: %s" msgstr " СоÑтоÑние: %s" #: src/hed/libs/compute/Job.cpp:555 #, c-format msgid " Specific state: %s" msgstr " СпецифичеÑкое ÑоÑтоÑние: %s" #: src/hed/libs/compute/Job.cpp:559 src/hed/libs/compute/Job.cpp:583 #, c-format msgid " Waiting Position: %d" msgstr " Положение в очереди: %d" #: src/hed/libs/compute/Job.cpp:563 #, c-format msgid " Exit Code: %d" msgstr " Код выхода: %d" #: src/hed/libs/compute/Job.cpp:567 #, c-format msgid " Job Error: %s" msgstr " Ошибка задачи: %s" #: src/hed/libs/compute/Job.cpp:572 #, c-format msgid " Owner: %s" msgstr " Владелец: %s" #: src/hed/libs/compute/Job.cpp:576 #, c-format msgid " Other Messages: %s" msgstr " Другие ÑообщениÑ: %s" #: src/hed/libs/compute/Job.cpp:581 #, c-format msgid " Requested Slots: %d" msgstr " Запрошено ваканÑий: %i" #: src/hed/libs/compute/Job.cpp:586 #, c-format msgid " Stdin: %s" msgstr " Стандартный вход: %s" #: src/hed/libs/compute/Job.cpp:588 #, c-format msgid " Stdout: %s" msgstr " Стандартный выход: %s" #: src/hed/libs/compute/Job.cpp:590 #, c-format msgid " Stderr: %s" msgstr " Ð¡Ñ‚Ð°Ð½Ð´Ð°Ñ€Ñ‚Ð½Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°: %s" #: src/hed/libs/compute/Job.cpp:592 #, c-format msgid " Computing Service Log Directory: %s" msgstr " Каталог, Ñодержащий журнальную запиÑÑŒ вычиÑлительного ÑервиÑа: %s" #: src/hed/libs/compute/Job.cpp:595 #, c-format msgid " Submitted: %s" msgstr " ЗаÑлана: %s" #: src/hed/libs/compute/Job.cpp:598 #, c-format msgid " End Time: %s" msgstr " Ð’Ñ€ÐµÐ¼Ñ Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ð½Ð¸Ñ: %s" #: src/hed/libs/compute/Job.cpp:601 #, c-format msgid " Submitted from: %s" msgstr " ЗаÑылающий клиент: %s" #: src/hed/libs/compute/Job.cpp:604 #, c-format msgid " Submitting client: %s" msgstr " ВерÑÐ¸Ñ ÐºÐ»Ð¸ÐµÐ½Ñ‚Ð°: %s" #: src/hed/libs/compute/Job.cpp:607 #, c-format msgid " Requested CPU Time: %s" msgstr " Запрошенное процеÑÑорное времÑ: %s" #: src/hed/libs/compute/Job.cpp:612 #, c-format msgid " Used CPU Time: %s (%s per slot)" msgstr " ИÑпользованное процеÑÑорное времÑ: %s (%s на Ñлот)" #: src/hed/libs/compute/Job.cpp:616 #, c-format msgid " Used CPU Time: %s" msgstr " ИÑпользованное процеÑÑорное времÑ: %s" #: src/hed/libs/compute/Job.cpp:622 #, c-format msgid " Used Wall Time: %s (%s per slot)" msgstr " ИÑпользованное времÑ: %s (%s на Ñлот)" #: src/hed/libs/compute/Job.cpp:626 #, c-format msgid " Used Wall Time: %s" msgstr " ИÑпользованное времÑ: %s" #: src/hed/libs/compute/Job.cpp:631 #, c-format msgid " Used Memory: %d" msgstr " ИÑпользование ОЗУ: %d" #: src/hed/libs/compute/Job.cpp:635 #, c-format msgid " Results were deleted: %s" msgstr " Результаты были удалены: %s" #: src/hed/libs/compute/Job.cpp:636 #, c-format msgid " Results must be retrieved before: %s" msgstr " Результаты должны быть воÑтребованы до: %s" #: src/hed/libs/compute/Job.cpp:640 #, c-format msgid " Proxy valid until: %s" msgstr " ДоверенноÑть дейÑтвительна до: %s" #: src/hed/libs/compute/Job.cpp:644 #, c-format msgid " Entry valid from: %s" msgstr " ЗапиÑÑŒ дейÑтвительна Ñ: %s" #: src/hed/libs/compute/Job.cpp:647 #, c-format msgid " Entry valid for: %s" msgstr " ЗапиÑÑŒ дейÑтвительна на: %s" #: src/hed/libs/compute/Job.cpp:651 msgid " Old job IDs:" msgstr " Старый Ñрлык задачи:" #: src/hed/libs/compute/Job.cpp:659 #, c-format msgid " ID on service: %s" msgstr " ID ÑервиÑа: %s" #: src/hed/libs/compute/Job.cpp:660 #, c-format msgid " Service information URL: %s (%s)" msgstr " URL информации о ÑервиÑе: %s (%s)" #: src/hed/libs/compute/Job.cpp:661 #, c-format msgid " Job status URL: %s (%s)" msgstr " URL ÑоÑтоÑÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸: %s (%s)" #: src/hed/libs/compute/Job.cpp:662 #, c-format msgid " Job management URL: %s (%s)" msgstr " URL ÑƒÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡ÐµÐ¹: %s (%s)" #: src/hed/libs/compute/Job.cpp:663 #, c-format msgid " Stagein directory URL: %s" msgstr " URL каталога Ð´Ð»Ñ Ð·Ð°Ð³Ñ€ÑƒÐ·ÐºÐ¸: %s" #: src/hed/libs/compute/Job.cpp:664 #, c-format msgid " Stageout directory URL: %s" msgstr " URL каталога Ð´Ð»Ñ Ð¾Ñ‚Ð³Ñ€ÑƒÐ·ÐºÐ¸: %s" #: src/hed/libs/compute/Job.cpp:665 #, c-format msgid " Session directory URL: %s" msgstr " URL каталога Грид-ÑеÑии: %s" #: src/hed/libs/compute/Job.cpp:667 msgid " Delegation IDs:" msgstr " Идентификаторы делегированиÑ:" #: src/hed/libs/compute/Job.cpp:849 #, c-format msgid "Unable to handle job (%s), no interface specified." msgstr "Ðевозможно обработать задачу (%s), не указан интерфейÑ." #: src/hed/libs/compute/Job.cpp:854 #, c-format msgid "" "Unable to handle job (%s), no plugin associated with the specified interface " "(%s)" msgstr "" "Ðевозможно обработать задачу (%s), Ð´Ð»Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ð¾Ð³Ð¾ интерфейÑа (%s) нету " "подключаемых модулей" #: src/hed/libs/compute/Job.cpp:876 #, c-format msgid "Invalid download destination path specified (%s)" msgstr "Указан неверный путь к каталогу загрузки (%s)" #: src/hed/libs/compute/Job.cpp:881 #, c-format msgid "" "Unable to download job (%s), no JobControllerPlugin plugin was set to handle " "the job." msgstr "" "Ðевозможно загрузить задачу (%s), не был задан модуль JobControllerPlugin " "Ð´Ð»Ñ Ñ€Ð°Ð±Ð¾Ñ‚Ñ‹ Ñ Ð·Ð°Ð´Ð°Ñ‡ÐµÐ¹." #: src/hed/libs/compute/Job.cpp:885 #, c-format msgid "Downloading job: %s" msgstr "ЗагружаетÑÑ Ð·Ð°Ð´Ð°Ñ‡Ð°: %s" #: src/hed/libs/compute/Job.cpp:891 #, fuzzy, c-format msgid "" "Can't retrieve job files for job (%s) - unable to determine URL of stage out " "directory" msgstr "" "Ðе удалоÑÑŒ получить выходные файлы задачи (%s) - невозможно определить URL " "Ð´Ð»Ñ Ð·Ð°Ð¿Ð¸Ñи" #: src/hed/libs/compute/Job.cpp:897 #, fuzzy, c-format msgid "" "Can't retrieve job files for job (%s) - unable to determine URL of log " "directory" msgstr "" "Ðе удалоÑÑŒ получить выходные файлы задачи (%s) - невозможно определить URL " "Ð´Ð»Ñ Ð·Ð°Ð¿Ð¸Ñи" #: src/hed/libs/compute/Job.cpp:903 #, c-format msgid "Invalid stage out path specified (%s)" msgstr "Указан неверный путь Ð´Ð»Ñ Ð¾Ñ‚Ð³Ñ€ÑƒÐ·ÐºÐ¸(%s)" #: src/hed/libs/compute/Job.cpp:911 #, c-format msgid "%s directory exist! Skipping job." msgstr "Каталог %s уже ÑушеÑтвует! Задача пропуÑкаетÑÑ." #: src/hed/libs/compute/Job.cpp:923 #, c-format msgid "Unable to retrieve list of job files to download for job %s" msgstr "Ðевозможно получить ÑпиÑок загружаемых файлов Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s" #: src/hed/libs/compute/Job.cpp:944 #, fuzzy, c-format msgid "Unable to retrieve list of log files to download for job %s" msgstr "Ðевозможно получить ÑпиÑок загружаемых файлов Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s" #: src/hed/libs/compute/Job.cpp:963 #, c-format msgid "No files to retrieve for job %s" msgstr "ОтÑутÑтвуют загружаемые файлы Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s" #: src/hed/libs/compute/Job.cpp:969 #, c-format msgid "Failed to create directory %s! Skipping job." msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð° %s! Задача пропуÑкаетÑÑ." #: src/hed/libs/compute/Job.cpp:986 #, c-format msgid "Failed downloading %s to %s, destination already exist" msgstr "Ошибка загрузки %s в %s, файл-приёмник уже ÑущеÑтвует" #: src/hed/libs/compute/Job.cpp:992 #, c-format msgid "Failed downloading %s to %s, unable to remove existing destination" msgstr "Ошибка загрузки %s в %s, Ñбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ ÑущеÑтвующего файла-приёмника" #: src/hed/libs/compute/Job.cpp:999 #, c-format msgid "Failed downloading %s to %s" msgstr "Ошибка загрузки %s в %s" #: src/hed/libs/compute/Job.cpp:1012 #, c-format msgid "Unable to initialize handler for %s" msgstr "Ðевозможно инициализировать обработчик Ð´Ð»Ñ %s" #: src/hed/libs/compute/Job.cpp:1017 #, c-format msgid "Unable to list files at %s" msgstr "Ðевозможно перечиÑлить файлы на %s" #: src/hed/libs/compute/Job.cpp:1060 msgid "Now copying (from -> to)" msgstr "ПроизводитÑÑ ÐºÐ¾Ð¿Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ðµ (из -> в)" #: src/hed/libs/compute/Job.cpp:1061 #, c-format msgid " %s -> %s" msgstr " %s -> %s" #: src/hed/libs/compute/Job.cpp:1076 #, c-format msgid "Unable to initialise connection to source: %s" msgstr "Ðевозможно инициализировать Ñоединение Ñ Ð¸Ñточником: %s" #: src/hed/libs/compute/Job.cpp:1087 #, c-format msgid "Unable to initialise connection to destination: %s" msgstr "Ðевозможно инициализировать Ñоединение Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸ÐµÐ¼: %s" #: src/hed/libs/compute/Job.cpp:1109 #, c-format msgid "File download failed: %s" msgstr "Ðевозможно загрузить файл: %s" #: src/hed/libs/compute/Job.cpp:1148 src/hed/libs/compute/Job.cpp:1177 #: src/hed/libs/compute/Job.cpp:1209 src/hed/libs/compute/Job.cpp:1242 #, c-format msgid "Waiting for lock on file %s" msgstr "Ожидание Ñ€Ð°Ð·Ð±Ð»Ð¾ÐºÐ¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° %s" #: src/hed/libs/compute/JobControllerPlugin.cpp:99 #, c-format msgid "JobControllerPlugin plugin \"%s\" not found." msgstr "Подключаемый модуль JobControllerPlugin \"%s\" не обнаружен." #: src/hed/libs/compute/JobControllerPlugin.cpp:108 #, c-format msgid "JobControllerPlugin %s could not be created" msgstr "Подключаемый модуль JobControllerPlugin %s не может быть Ñоздан" #: src/hed/libs/compute/JobControllerPlugin.cpp:113 #, c-format msgid "Loaded JobControllerPlugin %s" msgstr "Подгружен JobControllerPlugin %s" #: src/hed/libs/compute/JobDescription.cpp:26 #, c-format msgid ": %d" msgstr ": %d" #: src/hed/libs/compute/JobDescription.cpp:28 #, c-format msgid ": %s" msgstr ": %s" #: src/hed/libs/compute/JobDescription.cpp:144 msgid " --- DRY RUN --- " msgstr " --- ХОЛОСТÐЯ ПРОГОÐКР--- " #: src/hed/libs/compute/JobDescription.cpp:154 #, c-format msgid " Annotation: %s" msgstr " ÐннотациÑ: %s" #: src/hed/libs/compute/JobDescription.cpp:160 #, c-format msgid " Old activity ID: %s" msgstr " Старый Ñрлык заданиÑ: %s" #: src/hed/libs/compute/JobDescription.cpp:166 #, c-format msgid " Argument: %s" msgstr " Argument: %s" #: src/hed/libs/compute/JobDescription.cpp:177 #, c-format msgid " RemoteLogging (optional): %s (%s)" msgstr " Удалённое журналирование (по выбору): %s (%s)" #: src/hed/libs/compute/JobDescription.cpp:180 #, c-format msgid " RemoteLogging: %s (%s)" msgstr " Удалённое журналирование: %s (%s)" #: src/hed/libs/compute/JobDescription.cpp:188 #, c-format msgid " Environment.name: %s" msgstr " Environment.name: %s" #: src/hed/libs/compute/JobDescription.cpp:189 #, c-format msgid " Environment: %s" msgstr " Environment: %s" #: src/hed/libs/compute/JobDescription.cpp:202 #, c-format msgid " PreExecutable.Argument: %s" msgstr " PreExecutable.Argument: %s" #: src/hed/libs/compute/JobDescription.cpp:205 #: src/hed/libs/compute/JobDescription.cpp:223 #, c-format msgid " Exit code for successful execution: %d" msgstr " Код выхода уÑпешного иÑполнениÑ: %d" #: src/hed/libs/compute/JobDescription.cpp:208 #: src/hed/libs/compute/JobDescription.cpp:226 msgid " No exit code for successful execution specified." msgstr " Код выхода Ð´Ð»Ñ ÑƒÑпешного иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð½Ðµ указан." #: src/hed/libs/compute/JobDescription.cpp:220 #, c-format msgid " PostExecutable.Argument: %s" msgstr " PostExecutable.Argument: %s" #: src/hed/libs/compute/JobDescription.cpp:236 #, c-format msgid " Access control: %s" msgstr " Контроль доÑтупа: %s" #: src/hed/libs/compute/JobDescription.cpp:240 #, c-format msgid " Processing start time: %s" msgstr " Ð’Ñ€ÐµÐ¼Ñ Ð½Ð°Ñ‡Ð°Ð»Ð° обработки: %s" #: src/hed/libs/compute/JobDescription.cpp:243 msgid " Notify:" msgstr " Уведомить:" #: src/hed/libs/compute/JobDescription.cpp:257 #, c-format msgid " Credential service: %s" msgstr " Служба параметров доÑтупа: %s" #: src/hed/libs/compute/JobDescription.cpp:267 msgid " Operating system requirements:" msgstr " Ð¢Ñ€ÐµÐ±Ð¾Ð²Ð°Ð½Ð¸Ñ Ðº операционной ÑиÑтеме:" #: src/hed/libs/compute/JobDescription.cpp:285 msgid " Computing endpoint requirements:" msgstr " Ð¢Ñ€ÐµÐ±Ð¾Ð²Ð°Ð½Ð¸Ñ Ðº вычиÑлительному реÑурÑу:" #: src/hed/libs/compute/JobDescription.cpp:298 msgid " Node access: inbound" msgstr " ДоÑтуп к узлу: входÑщий" #: src/hed/libs/compute/JobDescription.cpp:301 msgid " Node access: outbound" msgstr " ДоÑтуп к узлу: иÑходÑщий" #: src/hed/libs/compute/JobDescription.cpp:304 msgid " Node access: inbound and outbound" msgstr " ДоÑтуп к узлу: входÑщий и иÑходÑщий" #: src/hed/libs/compute/JobDescription.cpp:314 msgid " Job requires exclusive execution" msgstr " Задача требует ÑкÑклюзивного иÑполнениÑ" #: src/hed/libs/compute/JobDescription.cpp:317 msgid " Job does not require exclusive execution" msgstr " Задача не требует ÑкÑклюзивного иÑполнениÑ" #: src/hed/libs/compute/JobDescription.cpp:322 msgid " Run time environment requirements:" msgstr " Ð¢Ñ€ÐµÐ±Ð¾Ð²Ð°Ð½Ð¸Ñ Ñреды выполнениÑ:" #: src/hed/libs/compute/JobDescription.cpp:334 msgid " Inputfile element:" msgstr " Элемент Inputfile:" #: src/hed/libs/compute/JobDescription.cpp:335 #: src/hed/libs/compute/JobDescription.cpp:357 #, c-format msgid " Name: %s" msgstr " Name: %s" #: src/hed/libs/compute/JobDescription.cpp:337 msgid " Is executable: true" msgstr " ИÑполнÑемый: верно" #: src/hed/libs/compute/JobDescription.cpp:341 #, c-format msgid " Sources: %s" msgstr " ИÑточники: %s" #: src/hed/libs/compute/JobDescription.cpp:343 #, c-format msgid " Sources.DelegationID: %s" msgstr " Sources.DelegationID: %s" #: src/hed/libs/compute/JobDescription.cpp:347 #, c-format msgid " Sources.Options: %s = %s" msgstr " Sources.Options: %s = %s" #: src/hed/libs/compute/JobDescription.cpp:356 msgid " Outputfile element:" msgstr " Элемент Outputfile:" #: src/hed/libs/compute/JobDescription.cpp:360 #, c-format msgid " Targets: %s" msgstr " ÐазначениÑ: %s" #: src/hed/libs/compute/JobDescription.cpp:362 #, c-format msgid " Targets.DelegationID: %s" msgstr " Targets.DelegationID: %s" #: src/hed/libs/compute/JobDescription.cpp:366 #, c-format msgid " Targets.Options: %s = %s" msgstr " Targets.Options: %s = %s" #: src/hed/libs/compute/JobDescription.cpp:373 #, c-format msgid " DelegationID element: %s" msgstr " Элемент DelegationID: %s" #: src/hed/libs/compute/JobDescription.cpp:380 #, c-format msgid " Other attributes: [%s], %s" msgstr " Другие атрибуты: [%s], %s" #: src/hed/libs/compute/JobDescription.cpp:446 msgid "Empty job description source string" msgstr "ПуÑтое иÑходное опиÑание задачи" #: src/hed/libs/compute/JobDescription.cpp:479 msgid "No job description parsers available" msgstr "ОтÑутÑтвуют разборщики опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ð½Ð¸Ñ" #: src/hed/libs/compute/JobDescription.cpp:481 #, c-format msgid "" "No job description parsers suitable for handling '%s' language are available" msgstr "Ðет разборщиков опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸, подходÑщих Ð´Ð»Ñ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚ÐºÐ¸ Ñзыка '%s'" #: src/hed/libs/compute/JobDescription.cpp:489 #, c-format msgid "%s parsing error" msgstr "%s ошибка разборки" #: src/hed/libs/compute/JobDescription.cpp:505 msgid "No job description parser was able to interpret job description" msgstr "Ðи один разборщик не Ñмог обработать опиÑание задачи" #: src/hed/libs/compute/JobDescription.cpp:515 msgid "" "Job description language is not specified, unable to output description." msgstr "Язык опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ не указан, невозможно вывеÑти опиÑание." #: src/hed/libs/compute/JobDescription.cpp:527 #, c-format msgid "Generating %s job description output" msgstr "СоздаётÑÑ Ð¾Ð¿Ð¸Ñание задачи в формате %s" #: src/hed/libs/compute/JobDescription.cpp:543 #, c-format msgid "Language (%s) not recognized by any job description parsers." msgstr "Язык (%s) не опознан ни одним из модулей разборки опиÑаний задач." #: src/hed/libs/compute/JobDescription.cpp:556 #, c-format msgid "Two input files have identical name '%s'." msgstr "Два входных файла Ñ Ð¸Ð´ÐµÐ½Ñ‚Ð¸Ñ‡Ð½Ñ‹Ð¼Ð¸ именами '%s'." #: src/hed/libs/compute/JobDescription.cpp:575 #: src/hed/libs/compute/JobDescription.cpp:588 #, c-format msgid "Cannot stat local input file '%s'" msgstr "Ðевозможно определить ÑÑ‚Ð°Ñ‚ÑƒÑ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð¾Ð³Ð¾ входного файла '%s'" #: src/hed/libs/compute/JobDescription.cpp:608 #, c-format msgid "Cannot find local input file '%s' (%s)" msgstr "Ðевозможно обнаружить локальный входной файл '%s' (%s)" #: src/hed/libs/compute/JobDescription.cpp:650 msgid "Unable to select runtime environment" msgstr "Ðевозможно выбрать Ñреду выполнениÑ" #: src/hed/libs/compute/JobDescription.cpp:657 msgid "Unable to select middleware" msgstr "Ðевозможно выбрать подпрограммное обеÑпечение" #: src/hed/libs/compute/JobDescription.cpp:664 msgid "Unable to select operating system." msgstr "Ðевозможно выбрать операционную ÑиÑтему." #: src/hed/libs/compute/JobDescription.cpp:683 #, c-format msgid "No test-job with ID %d found." msgstr "ТеÑÑ‚Ð¾Ð²Ð°Ñ Ð·Ð°Ð´Ð°Ñ‡Ð° под номером %d не найдена." #: src/hed/libs/compute/JobDescription.cpp:695 #, c-format msgid "Test was defined with ID %d, but some error occurred during parsing it." msgstr "" "ТеÑÑ‚ был Ñоздан Ñ Ð¸Ð´ÐµÐ½Ñ‚Ð¸Ñ„Ð¸ÐºÐ°Ñ‚Ð¾Ñ€Ð¾Ð¼ %d, но при обработке возникла ошибка." #: src/hed/libs/compute/JobDescription.cpp:699 #, c-format msgid "No jobdescription resulted at %d test" msgstr "Ð”Ð»Ñ Ñ‚ÐµÑта %d отÑутÑтвует опиÑание задачи" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:52 #, c-format msgid "JobDescriptionParserPlugin plugin \"%s\" not found." msgstr "Подключаемый модуль JobDescriptionParserPlugin \"%s\" не обнаружен." #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:59 #, c-format msgid "JobDescriptionParserPlugin %s could not be created" msgstr "Подключаемый модуль JobDescriptionParserPlugin %s не может быть Ñоздан" #: src/hed/libs/compute/JobDescriptionParserPlugin.cpp:64 #, c-format msgid "Loaded JobDescriptionParserPlugin %s" msgstr "Подгружен JobDescriptionParserPlugin %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:125 #, c-format msgid "Unable to create data base (%s)" msgstr "Ðе удалоÑÑŒ Ñоздать базу данных (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:133 #, c-format msgid "Unable to create jobs table in data base (%s)" msgstr "Ðе удалоÑÑŒ Ñоздать таблицу задач в базе данных (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:142 #, c-format msgid "Unable to create jobs_new table in data base (%s)" msgstr "Ðе удалоÑÑŒ Ñоздать таблицу jobs_new в базе данных (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:148 #, c-format msgid "Unable to transfer from jobs to jobs_new in data base (%s)" msgstr "Ðе удалоÑÑŒ перенеÑти из таблицы jobs в jobs_new в базе данных (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:154 #, c-format msgid "Unable to drop jobs in data base (%s)" msgstr "Ðе удалоÑÑŒ ÑброÑить таблицу jobs в базе данных (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:160 #, c-format msgid "Unable to rename jobs table in data base (%s)" msgstr "Ðе удалоÑÑŒ переименовать таблицу jobs в базе данных (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:170 #, c-format msgid "Unable to create index for jobs table in data base (%s)" msgstr "Ðе удалоÑÑŒ Ñоздать Ð¸Ð½Ð´ÐµÐºÑ Ð´Ð»Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ‹ задач в базе данных (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:178 #, c-format msgid "Failed checking database (%s)" msgstr "Сбой проверки базы данных (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:180 #, c-format msgid "Job database connection established successfully (%s)" msgstr "СвÑзь Ñ Ð±Ð°Ð·Ð¾Ð¹ данных задач уÑпешно уÑтановлена (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:202 #, c-format msgid "Error from SQLite: %s: %s" msgstr "Ошибка SQLite: %s: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:205 #, c-format msgid "Error from SQLite: %s" msgstr "Ошибка SQLite: %s" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:226 #: src/hed/libs/compute/JobInformationStorageXML.cpp:36 #, c-format msgid "" "Job list file cannot be created: The parent directory (%s) doesn't exist." msgstr "" "Файл ÑпиÑка задач не может быть Ñоздан: родительÑкий каталог (%s) не " "ÑущеÑтвует." #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:230 #: src/hed/libs/compute/JobInformationStorageXML.cpp:40 #, c-format msgid "Job list file cannot be created: %s is not a directory" msgstr "Файл ÑпиÑка задач не может быть Ñоздан: %s не ÑвлÑетÑÑ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð¾Ð¼" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:237 #: src/hed/libs/compute/JobInformationStorageXML.cpp:47 #, c-format msgid "Job list file (%s) is not a regular file" msgstr "СпиÑок задач (%s) не ÑвлÑетÑÑ Ñтандартным файлом" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:367 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:374 #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:381 #, c-format msgid "Unable to write records into job database (%s): Id \"%s\"" msgstr "Ðе удалоÑÑŒ внеÑти запиÑи в базу данных задач (%s): Id \"%s\"" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:627 #: src/hed/libs/compute/JobInformationStorageXML.cpp:146 #, c-format msgid "Unable to truncate job database (%s)" msgstr "Ðе удалоÑÑŒ укоротить базу данных задач (%s)" #: src/hed/libs/compute/JobInformationStorageSQLite.cpp:660 #, c-format msgid "Unable to determine error (%d)" msgstr "Ðевозможно раÑпознать ошибку (%d)" #: src/hed/libs/compute/JobInformationStorageXML.cpp:60 #: src/hed/libs/compute/JobInformationStorageXML.cpp:232 #: src/hed/libs/compute/JobInformationStorageXML.cpp:273 #, c-format msgid "Waiting for lock on job list file %s" msgstr "Ожидание Ñ€Ð°Ð·Ð±Ð»Ð¾ÐºÐ¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° ÑпиÑка задач %s" #: src/hed/libs/compute/JobInformationStorageXML.cpp:171 #, c-format msgid "Will remove %s on service %s." msgstr "Задача %s будет удалена Ñ ÑеривÑа %s." #: src/hed/libs/compute/JobSupervisor.cpp:40 msgid "Ignoring job, the job ID is empty" msgstr "Задача игнорируетÑÑ, так как её Ñрлык пуÑÑ‚" #: src/hed/libs/compute/JobSupervisor.cpp:45 #, c-format msgid "Ignoring job (%s), the management interface name is unknown" msgstr "ИгнорируетÑÑ Ð·Ð°Ð´Ð°Ñ‡Ð° (%s), отÑутÑтвует название интерфейÑа управлениÑ" #: src/hed/libs/compute/JobSupervisor.cpp:50 #, c-format msgid "Ignoring job (%s), the job management URL is unknown" msgstr "ИгнорируетÑÑ Ð·Ð°Ð´Ð°Ñ‡Ð° (%s), отÑутÑтвует URL интерфейÑа управлениÑ" #: src/hed/libs/compute/JobSupervisor.cpp:55 #, c-format msgid "Ignoring job (%s), the status interface name is unknown" msgstr "ИгнорируетÑÑ Ð·Ð°Ð´Ð°Ñ‡Ð° (%s), отÑутÑтвует название интерфейÑа ÑоÑтоÑниÑ" #: src/hed/libs/compute/JobSupervisor.cpp:60 #, c-format msgid "Ignoring job (%s), the job status URL is unknown" msgstr "ИгнорируетÑÑ Ð·Ð°Ð´Ð°Ñ‡Ð° (%s), отÑутÑтвует URL ÑоÑтоÑÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" #: src/hed/libs/compute/JobSupervisor.cpp:69 #, c-format msgid "Ignoring job (%s), unable to load JobControllerPlugin for %s" msgstr "" "ИгнорируетÑÑ Ð·Ð°Ð´Ð°Ñ‡Ð° (%s), невозможно подгрузить JobControllerPlugin Ð´Ð»Ñ %s" #: src/hed/libs/compute/JobSupervisor.cpp:76 #, c-format msgid "" "Ignoring job (%s), already tried and were unable to load JobControllerPlugin" msgstr "" "ИгнорируетÑÑ Ð·Ð°Ð´Ð°Ñ‡Ð° (%s), Ð¿Ñ€ÐµÐ´Ñ‹Ð´ÑƒÑ‰Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ° подгрузить JobControllerPlugin " "завершилаÑÑŒ неудачей" #: src/hed/libs/compute/Software.cpp:65 src/hed/libs/compute/Software.cpp:94 #: src/hed/libs/compute/Software.cpp:113 #, c-format msgid "%s > %s => false" msgstr "%s > %s => неверно" #: src/hed/libs/compute/Software.cpp:70 src/hed/libs/compute/Software.cpp:83 #: src/hed/libs/compute/Software.cpp:107 #, c-format msgid "%s > %s => true" msgstr "%s > %s => верно" #: src/hed/libs/compute/Software.cpp:90 src/hed/libs/compute/Software.cpp:102 #, c-format msgid "%s > %s => false: %s contains non numbers in the version part." msgstr "%s > %s => неверно: %s Ñодержит нецифровые Ñимволы в номере верÑии." #: src/hed/libs/compute/Software.cpp:199 src/hed/libs/compute/Software.cpp:210 #, c-format msgid "Requirement \"%s %s\" NOT satisfied." msgstr "Требование \"%s %s\" ÐЕ удовлетворено." #: src/hed/libs/compute/Software.cpp:205 #, c-format msgid "Requirement \"%s %s\" satisfied." msgstr "Требование \"%s %s\" удовлетворено." #: src/hed/libs/compute/Software.cpp:214 #, c-format msgid "Requirement \"%s %s\" satisfied by \"%s\"." msgstr "Требование \"%s %s\" удовлетворено \"%s\"." #: src/hed/libs/compute/Software.cpp:219 #, fuzzy msgid "All software requirements satisfied." msgstr "Ð’Ñе Ñ‚Ñ€ÐµÐ±Ð¾Ð²Ð°Ð½Ð¸Ñ ÑƒÐ´Ð¾Ð²Ð»ÐµÑ‚Ð²Ð¾Ñ€ÐµÐ½Ñ‹." #: src/hed/libs/compute/Submitter.cpp:83 #, c-format msgid "Trying to submit directly to endpoint (%s)" msgstr "Попытка заÑылки задачи напрÑмую к точке входа (%s)" #: src/hed/libs/compute/Submitter.cpp:88 #, c-format msgid "Interface (%s) specified, submitting only to that interface" msgstr "Задан Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ (%s), заÑылка производитÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ через него" #: src/hed/libs/compute/Submitter.cpp:106 msgid "Trying all available interfaces" msgstr "ПробуютÑÑ Ð²Ñе доÑтупные интерфейÑÑ‹" #: src/hed/libs/compute/Submitter.cpp:112 #, c-format msgid "Trying to submit endpoint (%s) using interface (%s) with plugin (%s)." msgstr "" "Попытка заÑылки на точку входа (%s) иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ (%s) Ñ Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡Ð°ÐµÐ¼Ñ‹Ð¼ " "модулем (%s)." #: src/hed/libs/compute/Submitter.cpp:116 #, c-format msgid "" "Unable to load plugin (%s) for interface (%s) when trying to submit job " "description." msgstr "" "Ðевозможно подгрузить модуль (%s) Ð´Ð»Ñ Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñа (%s) при попытке заÑылки " "опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸." #: src/hed/libs/compute/Submitter.cpp:130 #, c-format msgid "No more interfaces to try for endpoint %s." msgstr "Опробованы вÑе интерфейÑÑ‹ Ð´Ð»Ñ Ñ‚Ð¾Ñ‡ÐºÐ¸ входа %s." #: src/hed/libs/compute/Submitter.cpp:336 #, c-format msgid "Target %s does not match requested interface(s)." msgstr "Ðазначение %s не ÑоответÑтвует запрошенному интерфейÑу." #: src/hed/libs/compute/SubmitterPlugin.cpp:63 msgid "No stagein URL is provided" msgstr "Ðе указан URL Ð´Ð»Ñ Ð·Ð°Ð³Ñ€ÑƒÐ·ÐºÐ¸" #: src/hed/libs/compute/SubmitterPlugin.cpp:72 #, fuzzy, c-format msgid "Failed reading file %s" msgstr "Сбой при чтении файла %s" #: src/hed/libs/compute/SubmitterPlugin.cpp:86 #, c-format msgid "Failed uploading file %s to %s: %s" msgstr "Ðе удалоÑÑŒ отгрузить файл %s в %s: %s" #: src/hed/libs/compute/SubmitterPlugin.cpp:168 #, c-format msgid "SubmitterPlugin plugin \"%s\" not found." msgstr "Подключаемый модуль SubmitterPlugin \"%s\" не обнаружен." #: src/hed/libs/compute/SubmitterPlugin.cpp:178 #, c-format msgid "SubmitterPlugin %s could not be created" msgstr "Подключаемый модуль SubmitterPlugin %s не может быть Ñоздан" #: src/hed/libs/compute/SubmitterPlugin.cpp:183 #, c-format msgid "Loaded SubmitterPlugin %s" msgstr "Подгружен SubmitterPlugin %s" #: src/hed/libs/compute/examples/basic_job_submission.cpp:28 msgid "Invalid job description" msgstr "ÐедопуÑтимое опиÑание задачи" #: src/hed/libs/compute/examples/basic_job_submission.cpp:47 msgid "Failed to submit job" msgstr "Ошибка запуÑка задачи" #: src/hed/libs/compute/examples/basic_job_submission.cpp:54 #, c-format msgid "Failed to write to local job list %s" msgstr "Ошибка запиÑи в локальный файл ÑпиÑка задач %s" #: src/hed/libs/compute/test_jobdescription.cpp:20 msgid "[job description ...]" msgstr "[опиÑание задачи...]" #: src/hed/libs/compute/test_jobdescription.cpp:21 msgid "" "This tiny tool can be used for testing the JobDescription's conversion " "abilities." msgstr "" "Эта программулечка может быть иÑпользована Ð´Ð»Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ¸ ÑпоÑобноÑтей " "Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ð½Ð¸Ñ JobDescription." #: src/hed/libs/compute/test_jobdescription.cpp:23 msgid "" "The job description also can be a file or a string in ADL or XRSL format." msgstr "" "ОпиÑание задачи может быть также задано файлом или Ñтрокой в формате ADL или " "XRSL." #: src/hed/libs/compute/test_jobdescription.cpp:27 msgid "define the requested format (nordugrid:xrsl, emies:adl)" msgstr "укажите запрашиваемый формат (nordugrid:xrsl, emies:adl)" #: src/hed/libs/compute/test_jobdescription.cpp:28 msgid "format" msgstr "формат" #: src/hed/libs/compute/test_jobdescription.cpp:33 msgid "show the original job description" msgstr "показать изначальное опиÑание задачи" #: src/hed/libs/compute/test_jobdescription.cpp:43 #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:72 msgid "Use --help option for detailed usage information" msgstr "ИÑпользуйте опцию --help Ð´Ð»Ñ Ð¿Ð¾Ð´Ñ€Ð¾Ð±Ð½Ð¾Ð³Ð¾ опиÑаниÑ" #: src/hed/libs/compute/test_jobdescription.cpp:50 msgid " [ JobDescription tester ] " msgstr " [ теÑтировщик JobDescription ] " #: src/hed/libs/compute/test_jobdescription.cpp:74 msgid " [ Parsing the original text ] " msgstr " [ ОбрабатываетÑÑ Ð¸Ñходный текÑÑ‚ ] " #: src/hed/libs/compute/test_jobdescription.cpp:80 msgid "Unable to parse." msgstr "Ðе удалоÑÑŒ обработать." #: src/hed/libs/compute/test_jobdescription.cpp:89 msgid " [ emies:adl ] " msgstr " [ emies:adl ] " #: src/hed/libs/compute/test_jobdescription.cpp:91 msgid " [ nordugrid:xrsl ] " msgstr " [ nordugrid:xrsl ] " #: src/hed/libs/credential/CertUtil.cpp:127 #, c-format msgid "Error number in store context: %i" msgstr "Ðомер ошибки в контекÑте хранилища: %i" #: src/hed/libs/credential/CertUtil.cpp:128 msgid "Self-signed certificate" msgstr "СамоÑтоÑтельно подпиÑанный Ñертификат" #: src/hed/libs/credential/CertUtil.cpp:131 #, c-format msgid "The certificate with subject %s is not valid" msgstr "Сертификат Ñ Ñубъектом %s недейÑтвителен" #: src/hed/libs/credential/CertUtil.cpp:134 #, c-format msgid "" "Can not find issuer certificate for the certificate with subject %s and " "hash: %lu" msgstr "" "Ðевозможно найти Ñертификат агентÑтва, выдавшего Ñертификат Ñ Ñубъектом %s и " "отпечатком: %lu" #: src/hed/libs/credential/CertUtil.cpp:137 #, c-format msgid "Certificate with subject %s has expired" msgstr "Срок дейÑÑ‚Ð²Ð¸Ñ Ñертификата Ñ Ñубъектом %s иÑтёк" #: src/hed/libs/credential/CertUtil.cpp:140 #, c-format msgid "" "Untrusted self-signed certificate in chain with subject %s and hash: %lu" msgstr "" "Цепочка Ñодержит недоверÑемый ÑамоподпиÑанный Ñертификат Ñ Ñубъектом %s и " "отпечатком: %lu" #: src/hed/libs/credential/CertUtil.cpp:142 #, c-format msgid "Certificate verification error: %s" msgstr "Ошибка проверки Ñертификата: %s" #: src/hed/libs/credential/CertUtil.cpp:154 msgid "Can not get the certificate type" msgstr "Ðе удалоÑÑŒ определить тип Ñертификата" #: src/hed/libs/credential/CertUtil.cpp:194 msgid "Couldn't verify availability of CRL" msgstr "Ðевозможно подтвердить доÑтупноÑть ÑпиÑков отзыва Ñертификатов (CRL)" #: src/hed/libs/credential/CertUtil.cpp:207 msgid "In the available CRL the lastUpdate field is not valid" msgstr "" "Ð’ доÑтупном ÑпиÑке отзыва Ñертификатов (CRL) значение lastUpdate " "недейÑтвительно" #: src/hed/libs/credential/CertUtil.cpp:214 msgid "The available CRL is not yet valid" msgstr "ДоÑтупный ÑпиÑок отзыва Ñертификатов (CRL) пока недейÑтвителен" #: src/hed/libs/credential/CertUtil.cpp:223 msgid "In the available CRL, the nextUpdate field is not valid" msgstr "" "Ð’ доÑтупном ÑпиÑке отзыва Ñертификатов (CRL) значение nextUpdate " "недейÑтвительно" #: src/hed/libs/credential/CertUtil.cpp:229 msgid "The available CRL has expired" msgstr "ДоÑтупный ÑпиÑок отзыва Ñертификатов (CRL) проÑрочен" #: src/hed/libs/credential/CertUtil.cpp:252 #, c-format msgid "Certificate with serial number %s and subject \"%s\" is revoked" msgstr "Сертификат Ñ Ñерийным номером %s и Ñубъектом \"%s\" отозван" #: src/hed/libs/credential/CertUtil.cpp:270 msgid "" "Directory of trusted CAs is not specified/found; Using current path as the " "CA direcroty" msgstr "" "Каталог доверÑемых агентÑтв не указан/найден; в качеÑтве такового " "иÑпользуетÑÑ Ñ‚ÐµÐºÑƒÑ‰Ð¸Ð¹ путь" #: src/hed/libs/credential/CertUtil.cpp:279 msgid "Can't allocate memory for CA policy path" msgstr "Ðевозможно выделить памÑть Ð´Ð»Ñ Ð¿ÑƒÑ‚Ð¸ к файлу политик агентÑтва" #: src/hed/libs/credential/CertUtil.cpp:325 #, c-format msgid "Certificate has unknown extension with numeric ID %u and SN %s" msgstr "" "Сертификат Ñодержит неизвеÑтное раÑширение Ñ Ñ‡Ð¸Ñленным идентификатором %u и " "именем Ñубъекта %s" #: src/hed/libs/credential/CertUtil.cpp:339 #: src/hed/libs/credential/Credential.cpp:1727 msgid "" "Can not convert DER encoded PROXY_CERT_INFO_EXTENSION extension to internal " "format" msgstr "" "Ðевозможно преобразовать раÑширение PROXY_CERT_INFO_EXTENSION в кодировке " "DER во внутренний формат" #: src/hed/libs/credential/CertUtil.cpp:385 msgid "Trying to check X509 cert with check_cert_type" msgstr "Попытка проверки Ñертификата X509 Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ check_cert_type" #: src/hed/libs/credential/CertUtil.cpp:424 msgid "Can't convert DER encoded PROXYCERTINFO extension to internal format" msgstr "" "Ðевозможно преобразовать раÑширение PROXYCERTINFO в кодировке DER во " "внутренний формат" #: src/hed/libs/credential/CertUtil.cpp:428 msgid "Can't get policy from PROXYCERTINFO extension" msgstr "Ðевозможно извлечь политику из раÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ PROXYCERTINFO" #: src/hed/libs/credential/CertUtil.cpp:432 msgid "Can't get policy language from PROXYCERTINFO extension" msgstr "Ðевозможно извлечь Ñзык политики из раÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ PROXYCERTINFO" #: src/hed/libs/credential/CertUtil.cpp:464 msgid "The subject does not match the issuer name + proxy CN entry" msgstr "" "Субъект не ÑоответÑтвует имени выдавшего агентÑтва и атрибуту доверенноÑти CN" #: src/hed/libs/credential/Credential.cpp:48 #, c-format msgid "OpenSSL error string: %s" msgstr "Ошибка OpenSSL: %s" #: src/hed/libs/credential/Credential.cpp:169 msgid "Can't get the first byte of input to determine its format" msgstr "" "Ðевозможно получить первый байт Ñертификата, чтобы определить его формат" #: src/hed/libs/credential/Credential.cpp:183 msgid "Can't reset the input" msgstr "Ðевозможно ÑброÑить ввод" #: src/hed/libs/credential/Credential.cpp:208 #: src/hed/libs/credential/Credential.cpp:244 msgid "Can't get the first byte of input BIO to get its format" msgstr "" "Ðевозможно получить первый байт Ñертификата, чтобы определить его формат" #: src/hed/libs/credential/Credential.cpp:220 msgid "Can not read certificate/key string" msgstr "Ðевозможно прочеÑть Ñтроку Ñертификата/ключа" #: src/hed/libs/credential/Credential.cpp:433 #, c-format msgid "Can not find certificate file: %s" msgstr "Ðе найден файл Ñертификата: %s" #: src/hed/libs/credential/Credential.cpp:438 #, c-format msgid "Can not read certificate file: %s" msgstr "Ðе удалоÑÑŒ прочитать файл Ñертификата: %s" #: src/hed/libs/credential/Credential.cpp:476 msgid "Can not read certificate string" msgstr "Ðе удалоÑÑŒ прочитать Ñертификат" #: src/hed/libs/credential/Credential.cpp:496 msgid "Certificate format is PEM" msgstr "Сертификат в формате PEM" #: src/hed/libs/credential/Credential.cpp:523 msgid "Certificate format is DER" msgstr "Сертификат в формате DER" #: src/hed/libs/credential/Credential.cpp:552 msgid "Certificate format is PKCS" msgstr "Сертификат в формате PKCS" #: src/hed/libs/credential/Credential.cpp:578 msgid "Certificate format is unknown" msgstr "Формат Ñертификата неизвеÑтен" #: src/hed/libs/credential/Credential.cpp:586 #, c-format msgid "Can not find key file: %s" msgstr "Ðе удалоÑÑŒ обнаружить файл личного ключа: %s" #: src/hed/libs/credential/Credential.cpp:591 #, c-format msgid "Can not open key file %s" msgstr "Ðе удалоÑÑŒ открыть файл личного ключа %s" #: src/hed/libs/credential/Credential.cpp:610 msgid "Can not read key string" msgstr "Ðе удалоÑÑŒ прочитать личный ключ" #: src/hed/libs/credential/Credential.cpp:673 #: src/hed/libs/credential/VOMSUtil.cpp:210 msgid "Failed to lock arccredential library in memory" msgstr "Ðевозможно заблокировать библиотеку arccredential в памÑти" #: src/hed/libs/credential/Credential.cpp:685 msgid "Certificate verification succeeded" msgstr "УÑпешное подтверждение Ñертификата" #: src/hed/libs/credential/Credential.cpp:689 msgid "Certificate verification failed" msgstr "Сертификат не подтверждён" #: src/hed/libs/credential/Credential.cpp:702 #: src/hed/libs/credential/Credential.cpp:722 #: src/hed/libs/credential/Credential.cpp:742 #: src/hed/libs/credential/Credential.cpp:1024 #: src/hed/libs/credential/Credential.cpp:2398 #: src/hed/libs/credential/Credential.cpp:2428 msgid "Failed to initialize extensions member for Credential" msgstr "Сбой инициализации раздела раÑширений параметров доÑтупа" #: src/hed/libs/credential/Credential.cpp:787 #, c-format msgid "Unsupported proxy policy language is requested - %s" msgstr "Запрошен неподдерживаемый Ñзык политик доверенноÑти - %s" #: src/hed/libs/credential/Credential.cpp:799 #, c-format msgid "Unsupported proxy version is requested - %s" msgstr "Запрошена Ð½ÐµÐ¿Ð¾Ð´Ð´ÐµÑ€Ð¶Ð¸Ð²Ð°ÐµÐ¼Ð°Ñ Ð²ÐµÑ€ÑÐ¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти - %s" #: src/hed/libs/credential/Credential.cpp:810 msgid "If you specify a policy you also need to specify a policy language" msgstr "Ð£ÐºÐ°Ð·Ñ‹Ð²Ð°Ñ Ð¿Ð¾Ð»Ð¸Ñ‚Ð¸ÐºÑƒ, указывайте также её Ñзык" #: src/hed/libs/credential/Credential.cpp:857 #, c-format msgid "Error: can't open policy file: %s" msgstr "Ошибка: невозможно открыть файл политик: %s" #: src/hed/libs/credential/Credential.cpp:870 #, c-format msgid "Error: policy location: %s is not a regular file" msgstr "Ошибка: меÑтонахождение политик: %s не ÑвлÑетÑÑ Ñтандартным файлом" #: src/hed/libs/credential/Credential.cpp:929 #: src/hed/libs/credential/Credential.cpp:962 #: src/hed/libs/credential/Credential.cpp:1029 msgid "Certificate/Proxy path is empty" msgstr "Путь к Ñертификату/доверенноÑти не задан" #: src/hed/libs/credential/Credential.cpp:1087 #: src/hed/libs/credential/Credential.cpp:2937 msgid "Failed to duplicate extension" msgstr "Ðе удалоÑÑŒ Ñкопировать раÑширение" #: src/hed/libs/credential/Credential.cpp:1091 msgid "Failed to add extension into credential extensions" msgstr "Ðе удалоÑÑŒ добавить раÑширение к раÑширениÑм параметров доÑтупа" #: src/hed/libs/credential/Credential.cpp:1104 msgid "Certificate information collection failed" msgstr "Сбой Ñбора информации о Ñертификате" #: src/hed/libs/credential/Credential.cpp:1143 #: src/hed/libs/credential/Credential.cpp:1148 msgid "Can not convert string into ASN1_OBJECT" msgstr "Ðевозможно преобразовать Ñтроку в ASN1_OBJECT" #: src/hed/libs/credential/Credential.cpp:1155 msgid "Can not create ASN1_OCTET_STRING" msgstr "Ðевозможно Ñоздать ASN1_OCTET_STRING" #: src/hed/libs/credential/Credential.cpp:1164 msgid "Can not allocate memory for extension for proxy certificate" msgstr "Ðевозможно зарезервировать памÑть Ð´Ð»Ñ Ñ€Ð°ÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти" #: src/hed/libs/credential/Credential.cpp:1174 msgid "Can not create extension for proxy certificate" msgstr "Ðевозможно Ñоздать раÑширение Ð´Ð»Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти" #: src/hed/libs/credential/Credential.cpp:1210 #: src/hed/libs/credential/Credential.cpp:1378 msgid "BN_set_word failed" msgstr "Сбой метода BN_set_word" #: src/hed/libs/credential/Credential.cpp:1219 #: src/hed/libs/credential/Credential.cpp:1387 msgid "RSA_generate_key_ex failed" msgstr "Сбой метода RSA_generate_key_ex" #: src/hed/libs/credential/Credential.cpp:1228 #: src/hed/libs/credential/Credential.cpp:1395 msgid "BN_new || RSA_new failed" msgstr "Сбой метода BN_new или RSA_new" #: src/hed/libs/credential/Credential.cpp:1239 msgid "Created RSA key, proceeding with request" msgstr "Создан ключ RSA, теперь обрабатываетÑÑ Ð·Ð°Ð¿Ñ€Ð¾Ñ" #: src/hed/libs/credential/Credential.cpp:1244 msgid "pkey and rsa_key exist!" msgstr "pkey и rsa_key ÑущеÑтвуют!" #: src/hed/libs/credential/Credential.cpp:1247 msgid "Generate new X509 request!" msgstr "Создайте новый Ð·Ð°Ð¿Ñ€Ð¾Ñ X509!" #: src/hed/libs/credential/Credential.cpp:1252 msgid "Setting subject name!" msgstr "ЗадаётÑÑ Ð¸Ð¼Ñ Ñубъекта!" #: src/hed/libs/credential/Credential.cpp:1260 #: src/hed/libs/credential/Credential.cpp:1474 msgid "PEM_write_bio_X509_REQ failed" msgstr "Сбой PEM_write_bio_X509_REQ" #: src/hed/libs/credential/Credential.cpp:1290 #: src/hed/libs/credential/Credential.cpp:1331 #: src/hed/libs/credential/Credential.cpp:1506 #: src/hed/libs/credential/Credential.cpp:1526 msgid "Can not create BIO for request" msgstr "Ðевозможно Ñоздать BIO Ð´Ð»Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа" #: src/hed/libs/credential/Credential.cpp:1308 msgid "Failed to write request into string" msgstr "Ðе удалоÑÑŒ запиÑать Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð² Ñтроку" #: src/hed/libs/credential/Credential.cpp:1335 #: src/hed/libs/credential/Credential.cpp:1340 #: src/hed/libs/credential/Credential.cpp:1530 msgid "Can not set writable file for request BIO" msgstr "Ðевозможно Ñоздать запиÑываемый файл Ð´Ð»Ñ BIO запроÑа" #: src/hed/libs/credential/Credential.cpp:1346 #: src/hed/libs/credential/Credential.cpp:1535 msgid "Wrote request into a file" msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð·Ð°Ð¿Ð¸Ñан в файл" #: src/hed/libs/credential/Credential.cpp:1348 #: src/hed/libs/credential/Credential.cpp:1538 msgid "Failed to write request into a file" msgstr "Ðе удалоÑÑŒ запиÑать Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð² файл" #: src/hed/libs/credential/Credential.cpp:1368 msgid "The credential's private key has already been initialized" msgstr "Закрытый ключ параметров доÑтупа уже инициализирован" #: src/hed/libs/credential/Credential.cpp:1416 msgid "" "Can not duplicate the subject name for the self-signing proxy certificate " "request" msgstr "" "Ðевозможно дублировать Ð¸Ð¼Ñ Ñубъекта Ð´Ð»Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа ÑамозаверÑющей доверенноÑти" #: src/hed/libs/credential/Credential.cpp:1426 msgid "Can not create a new X509_NAME_ENTRY for the proxy certificate request" msgstr "" "Ðевозможно Ñоздать новую переменную X509_NAME_ENTRY Ð´Ð»Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа доверенноÑти" #: src/hed/libs/credential/Credential.cpp:1444 #: src/hed/libs/credential/Credential.cpp:1451 #: src/hed/libs/credential/Credential.cpp:2029 #: src/hed/libs/credential/Credential.cpp:2037 msgid "" "Can not convert PROXY_CERT_INFO_EXTENSION struct from internal to DER " "encoded format" msgstr "" "Ðевозможно преобразовать Ñтруктуру PROXY_CERT_INFO_EXTENSION из внутреннего " "формата в DER" #: src/hed/libs/credential/Credential.cpp:1481 msgid "Can't convert X509 request from internal to DER encoded format" msgstr "Ðевозможно преобразовать Ð·Ð°Ð¿Ñ€Ð¾Ñ X509 из внутреннего формата в DER" #: src/hed/libs/credential/Credential.cpp:1491 msgid "Can not generate X509 request" msgstr "Ðе удалоÑÑŒ Ñоздать Ð·Ð°Ð¿Ñ€Ð¾Ñ X509" #: src/hed/libs/credential/Credential.cpp:1493 msgid "Can not set private key" msgstr "Ðе удалоÑÑŒ задать закрытый ключ" #: src/hed/libs/credential/Credential.cpp:1591 msgid "Failed to get private key" msgstr "Ðе удалоÑÑŒ получить закрытый ключ" #: src/hed/libs/credential/Credential.cpp:1610 msgid "Failed to get public key from RSA object" msgstr "Ðевозможно извлечь открытый ключ из объекта RSA" #: src/hed/libs/credential/Credential.cpp:1618 msgid "Failed to get public key from X509 object" msgstr "Ðевозможно извлечь открытый ключ из объекта X509" #: src/hed/libs/credential/Credential.cpp:1625 msgid "Failed to get public key" msgstr "Ðе удалоÑÑŒ получить открытый ключ" #: src/hed/libs/credential/Credential.cpp:1663 #, c-format msgid "Certiticate chain number %d" msgstr "Ðомер цепочки Ñертификатов %d" #: src/hed/libs/credential/Credential.cpp:1691 msgid "NULL BIO passed to InquireRequest" msgstr "NULL BIO передан в InquireRequest" #: src/hed/libs/credential/Credential.cpp:1694 msgid "PEM_read_bio_X509_REQ failed" msgstr "Сбой PEM_read_bio_X509_REQ" #: src/hed/libs/credential/Credential.cpp:1698 msgid "d2i_X509_REQ_bio failed" msgstr "Сбой d2i_X509_REQ_bio" #: src/hed/libs/credential/Credential.cpp:1720 msgid "Missing data in DER encoded PROXY_CERT_INFO_EXTENSION extension" msgstr "" "ÐедоÑтаточно данных в раÑширении PROXY_CERT_INFO_EXTENSION в кодировке DER" #: src/hed/libs/credential/Credential.cpp:1732 msgid "Can not create PROXY_CERT_INFO_EXTENSION extension" msgstr "Ðевозможно Ñоздать раÑширение PROXY_CERT_INFO_EXTENSION" #: src/hed/libs/credential/Credential.cpp:1742 msgid "Can not get policy from PROXY_CERT_INFO_EXTENSION extension" msgstr "Ðевозможно извлечь политику из раÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ PROXY_CERT_INFO_EXTENSION" #: src/hed/libs/credential/Credential.cpp:1746 msgid "Can not get policy language from PROXY_CERT_INFO_EXTENSION extension" msgstr "" "Ðевозможно извлечь Ñзык политик из раÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ PROXY_CERT_INFO_EXTENSION" #: src/hed/libs/credential/Credential.cpp:1762 #, c-format msgid "Cert Type: %d" msgstr "Тип Ñертификата: %d" #: src/hed/libs/credential/Credential.cpp:1775 #: src/hed/libs/credential/Credential.cpp:1794 msgid "Can not create BIO for parsing request" msgstr "Ðевозможно Ñоздать BIO Ð´Ð»Ñ Ñ€Ð°Ð·Ð±Ð¾Ñ€Ð° запроÑа" #: src/hed/libs/credential/Credential.cpp:1780 msgid "Read request from a string" msgstr "Чтение запроÑа из Ñтроки" #: src/hed/libs/credential/Credential.cpp:1783 msgid "Failed to read request from a string" msgstr "Сбой при чтении запроÑа из Ñтроки" #: src/hed/libs/credential/Credential.cpp:1798 msgid "Can not set readable file for request BIO" msgstr "Ðевозможно открыть на чтение файл Ð´Ð»Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа BIO" #: src/hed/libs/credential/Credential.cpp:1803 msgid "Read request from a file" msgstr "ПрочеÑть Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¸Ð· файла" #: src/hed/libs/credential/Credential.cpp:1806 msgid "Failed to read request from a file" msgstr "Произошёл Ñбой при чтении запроÑа из файла" #: src/hed/libs/credential/Credential.cpp:1846 msgid "Can not convert private key to DER format" msgstr "Ðевозможно преобразовать закрытый ключ в формат DER" #: src/hed/libs/credential/Credential.cpp:2010 msgid "Credential is not initialized" msgstr "Параметры доÑтупа не инициализированы" #: src/hed/libs/credential/Credential.cpp:2016 msgid "Failed to duplicate X509 structure" msgstr "Ðе удалоÑÑŒ Ñкопировать Ñтруктуру X509" #: src/hed/libs/credential/Credential.cpp:2021 msgid "Failed to initialize X509 structure" msgstr "Ðе удалоÑÑŒ инициализировать Ñтруктуру X509" #: src/hed/libs/credential/Credential.cpp:2044 msgid "Can not create extension for PROXY_CERT_INFO" msgstr "Ðевозможно Ñоздать раÑширение Ð´Ð»Ñ PROXY_CERT_INFO" #: src/hed/libs/credential/Credential.cpp:2048 #: src/hed/libs/credential/Credential.cpp:2096 msgid "Can not add X509 extension to proxy cert" msgstr "Ðевозможно добавить раÑширение X509 к доверенноÑти" #: src/hed/libs/credential/Credential.cpp:2064 msgid "Can not convert keyUsage struct from DER encoded format" msgstr "Ðевозможно преобразовать Ñтруктуру keyUsage из формата кодировки DER" #: src/hed/libs/credential/Credential.cpp:2076 #: src/hed/libs/credential/Credential.cpp:2085 msgid "Can not convert keyUsage struct from internal to DER format" msgstr "" "Ðевозможно преобразовать Ñтруктуру keyUsage из внутреннего формата в DER" #: src/hed/libs/credential/Credential.cpp:2092 msgid "Can not create extension for keyUsage" msgstr "Ðевозможно Ñоздать раÑширение Ð´Ð»Ñ keyUsage" #: src/hed/libs/credential/Credential.cpp:2105 msgid "Can not get extended KeyUsage extension from issuer certificate" msgstr "" "Ðевозможно получить раÑширенное раÑширение KeyUsage из Ñертификата агентÑтва" #: src/hed/libs/credential/Credential.cpp:2110 msgid "Can not copy extended KeyUsage extension" msgstr "Ðевозможно Ñкопировать раÑширенное раÑширение KeyUsage" #: src/hed/libs/credential/Credential.cpp:2115 msgid "Can not add X509 extended KeyUsage extension to new proxy certificate" msgstr "" "Ðевозможно добавить раÑширенное X509 раÑширение KeyUsage к новой доверенноÑти" #: src/hed/libs/credential/Credential.cpp:2125 msgid "Can not compute digest of public key" msgstr "Ðе удалоÑÑŒ вычиÑлить профиль открытого ключа" #: src/hed/libs/credential/Credential.cpp:2136 msgid "Can not copy the subject name from issuer for proxy certificate" msgstr "Ðевозможно Ñкопировать Ð¸Ð¼Ñ Ñубъекта выдающего агентÑтва в доверенноÑть" #: src/hed/libs/credential/Credential.cpp:2142 msgid "Can not create name entry CN for proxy certificate" msgstr "Ðевозможно Ñоздать компонент Ð½Ð°Ð·Ð²Ð°Ð½Ð¸Ñ CN Ð´Ð»Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти" #: src/hed/libs/credential/Credential.cpp:2147 msgid "Can not set CN in proxy certificate" msgstr "Ðевозможно задать Ñлемент CN в доверенноÑти" #: src/hed/libs/credential/Credential.cpp:2155 msgid "Can not set issuer's subject for proxy certificate" msgstr "Ðевозможно задать Ð¸Ð¼Ñ Ð²Ñ‹Ð´Ð°ÑŽÑ‰ÐµÐ³Ð¾ агентÑтва в доверенноÑти" #: src/hed/libs/credential/Credential.cpp:2160 msgid "Can not set version number for proxy certificate" msgstr "Ðевозможно задать номер верÑии в доверенноÑти" #: src/hed/libs/credential/Credential.cpp:2168 msgid "Can not set serial number for proxy certificate" msgstr "Ðевозможно задать Ñерийный номер в доверенноÑти" #: src/hed/libs/credential/Credential.cpp:2174 msgid "Can not duplicate serial number for proxy certificate" msgstr "Ðевозможно Ñкопировать Ñерийный номер Ð´Ð»Ñ Ñертификата доверенноÑти" #: src/hed/libs/credential/Credential.cpp:2180 msgid "Can not set the lifetime for proxy certificate" msgstr "Ðевозможно задать Ñрок годноÑти доверенноÑти" #: src/hed/libs/credential/Credential.cpp:2184 msgid "Can not set pubkey for proxy certificate" msgstr "Ðевозможно задать открытый ключ доверенноÑти" #: src/hed/libs/credential/Credential.cpp:2200 #: src/hed/libs/credential/Credential.cpp:2827 msgid "The credential to be signed is NULL" msgstr "Параметры доÑтупа Ð´Ð»Ñ Ð¿Ð¾Ð´Ð¿Ð¸Ñи имеют значение NULL" #: src/hed/libs/credential/Credential.cpp:2204 #: src/hed/libs/credential/Credential.cpp:2831 msgid "The credential to be signed contains no request" msgstr "Параметры доÑтупа Ð´Ð»Ñ Ð¿Ð¾Ð´Ð¿Ð¸Ñи не Ñодержат запроÑа" #: src/hed/libs/credential/Credential.cpp:2208 #: src/hed/libs/credential/Credential.cpp:2835 msgid "The BIO for output is NULL" msgstr "BIO Ð´Ð»Ñ Ð²Ñ‹Ñ…Ð¾Ð´Ð°: NULL" #: src/hed/libs/credential/Credential.cpp:2222 #: src/hed/libs/credential/Credential.cpp:2842 msgid "Error when extracting public key from request" msgstr "Ошибка при извлечении открытого ключа из запроÑа" #: src/hed/libs/credential/Credential.cpp:2227 #: src/hed/libs/credential/Credential.cpp:2846 msgid "Failed to verify the request" msgstr "Ðе удалоÑÑŒ подтвердить запроÑ" #: src/hed/libs/credential/Credential.cpp:2231 msgid "Failed to add issuer's extension into proxy" msgstr "Сбой Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ñ€Ð°ÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ Ð²Ñ‹Ð´Ð°ÑŽÑ‰ÐµÐ³Ð¾ агентÑтва в доверенноÑть" #: src/hed/libs/credential/Credential.cpp:2255 msgid "Failed to find extension" msgstr "Ðе удалоÑÑŒ найти раÑширение" #: src/hed/libs/credential/Credential.cpp:2267 msgid "Can not get the issuer's private key" msgstr "Ðевозможно извлечь закрытый ключ выдающего агентÑтва" #: src/hed/libs/credential/Credential.cpp:2274 #: src/hed/libs/credential/Credential.cpp:2878 msgid "There is no digest in issuer's private key object" msgstr "Ð’ объекте закрытого ключа Ð¸Ð·Ð´Ð°Ñ‚ÐµÐ»Ñ Ð¾Ñ‚ÑутÑтвует профиль" #: src/hed/libs/credential/Credential.cpp:2279 #: src/hed/libs/credential/Credential.cpp:2882 #, c-format msgid "%s is an unsupported digest type" msgstr "%s не ÑвлÑетÑÑ Ð¿Ð¾Ð´Ð´ÐµÑ€Ð¶Ð¸Ð²Ð°ÐµÐ¼Ñ‹Ð¼ типом профилÑ" #: src/hed/libs/credential/Credential.cpp:2290 #, c-format msgid "" "The signing algorithm %s is not allowed,it should be SHA1 or SHA2 to sign " "certificate requests" msgstr "" "ÐедопуÑтимый алгоритм подпиÑи %s: запроÑÑ‹ Ñертификата должны подпиÑыватьÑÑ " "SHA1 или SHA2" #: src/hed/libs/credential/Credential.cpp:2296 msgid "Failed to sign the proxy certificate" msgstr "Ðе удалоÑÑŒ подпиÑать доверенноÑть" #: src/hed/libs/credential/Credential.cpp:2298 msgid "Succeeded to sign the proxy certificate" msgstr "ДоверенноÑть уÑпешно подпиÑана" #: src/hed/libs/credential/Credential.cpp:2303 msgid "Failed to verify the signed certificate" msgstr "Сбой проверки подпиÑанного Ñертификата" #: src/hed/libs/credential/Credential.cpp:2305 msgid "Succeeded to verify the signed certificate" msgstr "ПодпиÑанный Ñертификат уÑпешно проверен" #: src/hed/libs/credential/Credential.cpp:2310 #: src/hed/libs/credential/Credential.cpp:2319 msgid "Output the proxy certificate" msgstr "Вывод доверенноÑти" #: src/hed/libs/credential/Credential.cpp:2313 msgid "Can not convert signed proxy cert into PEM format" msgstr "Ðевозможно преобразовать подпиÑанную доверенноÑть в формат PEM" #: src/hed/libs/credential/Credential.cpp:2322 msgid "Can not convert signed proxy cert into DER format" msgstr "Ðевозможно преобразовать подпиÑанную доверенноÑть в формат DER" #: src/hed/libs/credential/Credential.cpp:2338 #: src/hed/libs/credential/Credential.cpp:2361 msgid "Can not create BIO for signed proxy certificate" msgstr "" "Ðевозможно Ñоздать неформатированный ввод/вывод BIO Ð´Ð»Ñ Ð¿Ð¾Ð´Ð¿Ð¸Ñанной " "доверенноÑти" #: src/hed/libs/credential/Credential.cpp:2365 msgid "Can not set writable file for signed proxy certificate BIO" msgstr "" "Ðевозможно открыть на запиÑÑŒ файл Ð´Ð»Ñ Ð½ÐµÑ„Ð¾Ñ€Ð¼Ð°Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð½Ð¾Ð³Ð¾ ввода/вывода " "подпиÑанной доверенноÑти" #: src/hed/libs/credential/Credential.cpp:2370 msgid "Wrote signed proxy certificate into a file" msgstr "ПодпиÑÐ°Ð½Ð½Ð°Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñть запиÑана в файл" #: src/hed/libs/credential/Credential.cpp:2373 msgid "Failed to write signed proxy certificate into a file" msgstr "Сбой запиÑи подпиÑанной доверенноÑти в файл" #: src/hed/libs/credential/Credential.cpp:2408 #: src/hed/libs/credential/Credential.cpp:2447 #, c-format msgid "ERROR: %s" msgstr "Ошибка: %s" #: src/hed/libs/credential/Credential.cpp:2455 #, c-format msgid "SSL error: %s, libs: %s, func: %s, reason: %s" msgstr "Ошибка SSL: %s, libs: %s, func: %s, причина: %s" #: src/hed/libs/credential/Credential.cpp:2500 #, c-format msgid "unable to load number from: %s" msgstr "невозможно прочеÑть номер из: %s" #: src/hed/libs/credential/Credential.cpp:2505 msgid "error converting number from bin to BIGNUM" msgstr "ошибка Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ð½Ð¸Ñ Ñ‡Ð¸Ñла из bin в BIGNUM" #: src/hed/libs/credential/Credential.cpp:2532 msgid "file name too long" msgstr "Ñлишком длинное Ð¸Ð¼Ñ Ñ„Ð°Ð¹Ð»Ð°" #: src/hed/libs/credential/Credential.cpp:2555 msgid "error converting serial to ASN.1 format" msgstr "ошибка Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ð½Ð¸Ñ Ñерийного номера в формат ASN.1" #: src/hed/libs/credential/Credential.cpp:2588 #, c-format msgid "load serial from %s failure" msgstr "Ñбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ñерийного номера из %s" #: src/hed/libs/credential/Credential.cpp:2593 msgid "add_word failure" msgstr "Сбой add_word" #: src/hed/libs/credential/Credential.cpp:2598 #, c-format msgid "save serial to %s failure" msgstr "Ñбой запиÑи Ñерийного номера в %s" #: src/hed/libs/credential/Credential.cpp:2618 msgid "Error initialising X509 store" msgstr "Ошибка при инициализации хранилища X509" #: src/hed/libs/credential/Credential.cpp:2625 msgid "Out of memory when generate random serial" msgstr "ÐедоÑтаточно памÑти Ð´Ð»Ñ ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñлучайного Ñерийного номера" #: src/hed/libs/credential/Credential.cpp:2637 msgid "CA certificate and CA private key do not match" msgstr "Сертификат и закрытый ключ агентÑтва не Ñовпадают" #: src/hed/libs/credential/Credential.cpp:2661 #, c-format msgid "Failed to load extension section: %s" msgstr "Сбой загрузки раздела раÑширений: %s" #: src/hed/libs/credential/Credential.cpp:2698 msgid "malloc error" msgstr "ошибка malloc" #: src/hed/libs/credential/Credential.cpp:2702 msgid "Subject does not start with '/'" msgstr "Субъект не начинаетÑÑ Ñ '/'" #: src/hed/libs/credential/Credential.cpp:2718 #: src/hed/libs/credential/Credential.cpp:2739 msgid "escape character at end of string" msgstr "Ñимвол выхода в конце Ñтроки" #: src/hed/libs/credential/Credential.cpp:2730 #, c-format msgid "" "end of string encountered while processing type of subject name element #%d" msgstr "доÑтигнут конец Ñтроки при обработке типа Ñлемента имени Ñубъекта #%d" #: src/hed/libs/credential/Credential.cpp:2767 #, c-format msgid "Subject Attribute %s has no known NID, skipped" msgstr "Ðтрибут Ñубъекта %s не Ñодержит извеÑтного NID, пропуÑкаетÑÑ" #: src/hed/libs/credential/Credential.cpp:2771 #, c-format msgid "No value provided for Subject Attribute %s skipped" msgstr "Ðе задана значение атрибута Ñубъекта %s, пропуÑкаетÑÑ" #: src/hed/libs/credential/Credential.cpp:2812 msgid "Failed to set the pubkey for X509 object by using pubkey from X509_REQ" msgstr "" "Ðе удалоÑÑŒ задать открытый ключ Ð´Ð»Ñ Ð¾Ð±ÑŠÐµÐºÑ‚Ð° X509 иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ñ‹Ð¹ ключ из " "X509_REQ" #: src/hed/libs/credential/Credential.cpp:2822 msgid "The private key for signing is not initialized" msgstr "Закрытый ключ Ð´Ð»Ñ Ð¿Ð¾Ð´Ð¿Ð¸Ñи не инициализирован" #: src/hed/libs/credential/Credential.cpp:2901 #, c-format msgid "Error when loading the extension config file: %s" msgstr "Ошибка при загрузке файла наÑтроек раÑширений: %s" #: src/hed/libs/credential/Credential.cpp:2905 #, c-format msgid "Error when loading the extension config file: %s on line: %d" msgstr "Ошибка при загрузке файла наÑтроек раÑширений: %s в Ñтроке: %d" #: src/hed/libs/credential/Credential.cpp:2953 msgid "Can not sign a EEC" msgstr "Ðевозможно подпиÑать EEC" #: src/hed/libs/credential/Credential.cpp:2957 msgid "Output EEC certificate" msgstr "Вывод Ñертификата EEC" #: src/hed/libs/credential/Credential.cpp:2960 msgid "Can not convert signed EEC cert into DER format" msgstr "Ðевозможно преобразовать подпиÑанный Ñертификат EEC в формат DER" #: src/hed/libs/credential/Credential.cpp:2974 #: src/hed/libs/credential/Credential.cpp:2993 msgid "Can not create BIO for signed EEC certificate" msgstr "" "Ðевозможно Ñоздать неформатированный ввод/вывод BIO Ð´Ð»Ñ Ð¿Ð¾Ð´Ð¿Ð¸Ñанного " "Ñертификата EEC" #: src/hed/libs/credential/Credential.cpp:2997 msgid "Can not set writable file for signed EEC certificate BIO" msgstr "" "Ðевозможно открыть на запиÑÑŒ файл Ð´Ð»Ñ Ð½ÐµÑ„Ð¾Ñ€Ð¼Ð°Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð½Ð¾Ð³Ð¾ ввода/вывода " "подпиÑанного Ñертификата EEC" #: src/hed/libs/credential/Credential.cpp:3002 msgid "Wrote signed EEC certificate into a file" msgstr "ПодпиÑанный Ñертификат EEC запиÑан в файл" #: src/hed/libs/credential/Credential.cpp:3005 msgid "Failed to write signed EEC certificate into a file" msgstr "Сбой запиÑи подпиÑанного Ñертификата EEC в файл" #: src/hed/libs/credential/NSSUtil.cpp:143 msgid "Error writing raw certificate" msgstr "Ошибка запиÑи иÑходного Ñертификата" #: src/hed/libs/credential/NSSUtil.cpp:222 msgid "Failed to add RFC proxy OID" msgstr "Ðе удалоÑÑŒ добавить OID доверенноÑти RFC" #: src/hed/libs/credential/NSSUtil.cpp:225 #, c-format msgid "Succeeded to add RFC proxy OID, tag %d is returned" msgstr "УÑпешно добавлен OID доверенноÑти RFC, возвращена метка %d" #: src/hed/libs/credential/NSSUtil.cpp:231 msgid "Failed to add anyLanguage OID" msgstr "Ðе удалоÑÑŒ добавить anyLanguage OID" #: src/hed/libs/credential/NSSUtil.cpp:234 #, c-format msgid "Succeeded to add anyLanguage OID, tag %d is returned" msgstr "УÑпешно добавлен OID anyLanguage, возвращена метка %d" #: src/hed/libs/credential/NSSUtil.cpp:240 msgid "Failed to add inheritAll OID" msgstr "Ðе удалоÑÑŒ добавить inheritAll OID" #: src/hed/libs/credential/NSSUtil.cpp:243 #, c-format msgid "Succeeded to add inheritAll OID, tag %d is returned" msgstr "УÑпешно добавлен OID inheritAll, возвращена метка %d" #: src/hed/libs/credential/NSSUtil.cpp:249 msgid "Failed to add Independent OID" msgstr "Ðе удалоÑÑŒ добавить Independent OID" #: src/hed/libs/credential/NSSUtil.cpp:252 #, c-format msgid "Succeeded to add Independent OID, tag %d is returned" msgstr "УÑпешно добавлен Independent OID, возвращена метка %d" #: src/hed/libs/credential/NSSUtil.cpp:258 msgid "Failed to add VOMS AC sequence OID" msgstr "Ðе удалоÑÑŒ добавить OID поÑледовательноÑти VOMS AC" #: src/hed/libs/credential/NSSUtil.cpp:261 #, c-format msgid "Succeeded to add VOMS AC sequence OID, tag %d is returned" msgstr "УÑпешно добавлен OID поÑледовательноÑти VOMS AC, возвращена метка %d" #: src/hed/libs/credential/NSSUtil.cpp:290 #, c-format msgid "NSS initialization failed on certificate database: %s" msgstr "Ð˜Ð½Ð¸Ñ†Ð¸Ð°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ NSS оборвалаÑÑŒ на базе данных Ñертификатов: %s" #: src/hed/libs/credential/NSSUtil.cpp:301 msgid "Succeeded to initialize NSS" msgstr "NSS уÑпешно инициализирован" #: src/hed/libs/credential/NSSUtil.cpp:323 #, c-format msgid "Failed to read attribute %x from private key." msgstr "Ðе удалоÑÑŒ прочеÑть атрибут %x из закрытого ключа." #: src/hed/libs/credential/NSSUtil.cpp:375 msgid "Succeeded to get credential" msgstr "Параметры доÑтупа получены" #: src/hed/libs/credential/NSSUtil.cpp:376 msgid "Failed to get credential" msgstr "Ðе удалоÑÑŒ получить параметры доÑтупа" #: src/hed/libs/credential/NSSUtil.cpp:438 msgid "p12 file is empty" msgstr "Файл Ñертификата p12 пуÑÑ‚" #: src/hed/libs/credential/NSSUtil.cpp:448 msgid "Unable to write to p12 file" msgstr "Сбой запиÑи в файл p12" #: src/hed/libs/credential/NSSUtil.cpp:464 msgid "Failed to open p12 file" msgstr "Сбой при открытии файла pk12" #: src/hed/libs/credential/NSSUtil.cpp:492 msgid "Failed to allocate p12 context" msgstr "Ðе удалоÑÑŒ зарезервировать контекÑÑ‚ p12" #: src/hed/libs/credential/NSSUtil.cpp:1200 msgid "Failed to find issuer certificate for proxy certificate" msgstr "Ðе удалоÑÑŒ обнаружить агентÑтво, выдавшее Ñертификат доверенноÑти" #: src/hed/libs/credential/NSSUtil.cpp:1351 #, c-format msgid "Failed to authenticate to PKCS11 slot %s" msgstr "Сбой проверки подлинноÑти Ð´Ð»Ñ Ñчейки PKCS11 %s" #: src/hed/libs/credential/NSSUtil.cpp:1357 #, c-format msgid "Failed to find certificates by nickname: %s" msgstr "Ðе удалоÑÑŒ обнаружить Ñертификат по краткому имени: %s" #: src/hed/libs/credential/NSSUtil.cpp:1362 #, c-format msgid "No user certificate by nickname %s found" msgstr "Ðе удалоÑÑŒ обнаружить Ñертификат Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ñ ÐºÑ€Ð°Ñ‚ÐºÐ¸Ð¼ именем %s" #: src/hed/libs/credential/NSSUtil.cpp:1375 #: src/hed/libs/credential/NSSUtil.cpp:1411 msgid "Certificate does not have a slot" msgstr "У Ñертификата нет Ñчейки" #: src/hed/libs/credential/NSSUtil.cpp:1381 msgid "Failed to create export context" msgstr "Ðе удалоÑÑŒ Ñоздать контекÑÑ‚ Ð´Ð»Ñ ÑкÑпорта" #: src/hed/libs/credential/NSSUtil.cpp:1396 msgid "PKCS12 output password not provided" msgstr "Ðе задан пароль Ð´Ð»Ñ Ð½Ð¾Ð²Ð¾Ð³Ð¾ Ñертификата PKCS12" #: src/hed/libs/credential/NSSUtil.cpp:1403 msgid "PKCS12 add password integrity failed" msgstr "Ðе удалоÑÑŒ задать ÑпоÑоб проверки целоÑтноÑти PKCS12 и паролÑ" #: src/hed/libs/credential/NSSUtil.cpp:1424 msgid "Failed to create key or certificate safe" msgstr "" "Ðе удалоÑÑŒ Ñоздать безопаÑное хранилище Ð´Ð»Ñ Ð·Ð°ÐºÑ€Ñ‹Ñ‚Ð¾Ð³Ð¾ ключа или Ñертификата" #: src/hed/libs/credential/NSSUtil.cpp:1440 msgid "Failed to add certificate and key" msgstr "Ðе удалоÑÑŒ добавить закрытый ключ и Ñертификат" #: src/hed/libs/credential/NSSUtil.cpp:1449 #, c-format msgid "Failed to initialize PKCS12 file: %s" msgstr "Ðе удалоÑÑŒ инициализировать файл PKCS12: %s" #: src/hed/libs/credential/NSSUtil.cpp:1454 msgid "Failed to encode PKCS12" msgstr "Ðе удалоÑÑŒ шифрование в формат PKCS12" #: src/hed/libs/credential/NSSUtil.cpp:1457 msgid "Succeeded to export PKCS12" msgstr "УдалоÑÑŒ извлечь Ñертификат в формате PKCS12" #: src/hed/libs/credential/NSSUtil.cpp:1485 #, c-format msgid "" "There is no certificate named %s found, the certificate could be removed " "when generating CSR" msgstr "" "Ðе найден Ñертификат Ñ Ð¸Ð¼ÐµÐ½ÐµÐ¼ %s, Ñертификат мог быть удалён при Ñоздании CSR" #: src/hed/libs/credential/NSSUtil.cpp:1491 msgid "Failed to delete certificate" msgstr "Ðе удалоÑÑŒ уничтожить Ñертификат" #: src/hed/libs/credential/NSSUtil.cpp:1505 msgid "The name of the private key to delete is empty" msgstr "Ð˜Ð¼Ñ Ð·Ð°ÐºÑ€Ñ‹Ñ‚Ð¾Ð³Ð¾ ключа Ð´Ð»Ñ ÑƒÐ½Ð¸Ñ‡Ñ‚Ð¾Ð¶ÐµÐ½Ð¸Ñ Ð¿ÑƒÑто" #: src/hed/libs/credential/NSSUtil.cpp:1510 #: src/hed/libs/credential/NSSUtil.cpp:2939 #: src/hed/libs/credential/NSSUtil.cpp:2956 #, c-format msgid "Failed to authenticate to token %s" msgstr "Ðе удалоÑÑŒ аутентифицироватьÑÑ Ðº маркёру %s" #: src/hed/libs/credential/NSSUtil.cpp:1517 #, c-format msgid "No private key with nickname %s exist in NSS database" msgstr "Закрытый ключ Ñ Ð¸Ð¼ÐµÐ½ÐµÐ¼ %s отÑутÑтвует в базе данных NSS" #: src/hed/libs/credential/NSSUtil.cpp:1550 msgid "Failed to delete private key and certificate" msgstr "Ðе удалоÑÑŒ уничтожить закрытый ключ и Ñертификат" #: src/hed/libs/credential/NSSUtil.cpp:1560 msgid "Failed to delete private key" msgstr "Ðе удалоÑÑŒ уничтожить закрытый ключ" #: src/hed/libs/credential/NSSUtil.cpp:1571 #, c-format msgid "Can not find key with name: %s" msgstr "Ðе удалоÑÑŒ найти закрытый ключ по имени: %s" #: src/hed/libs/credential/NSSUtil.cpp:1599 msgid "Can not read PEM private key: probably bad password" msgstr "" "Ðевозможно прочеÑть закрытый ключ PEM: возможно, введён неверный пароль" #: src/hed/libs/credential/NSSUtil.cpp:1601 msgid "Can not read PEM private key: failed to decrypt" msgstr "Сбой при чтении файла личного ключа PEM: не удалоÑÑŒ раÑшифровать" #: src/hed/libs/credential/NSSUtil.cpp:1603 #: src/hed/libs/credential/NSSUtil.cpp:1605 msgid "Can not read PEM private key: failed to obtain password" msgstr "Сбой при чтении файла личного ключа PEM: не был введён пароль" #: src/hed/libs/credential/NSSUtil.cpp:1606 msgid "Can not read PEM private key" msgstr "Ðе удалоÑÑŒ прочеÑть закрытый ключ PEM" #: src/hed/libs/credential/NSSUtil.cpp:1613 msgid "Failed to convert EVP_PKEY to PKCS8" msgstr "Ðе удалоÑÑŒ преобразовать EVP_PKEY в PKCS8" #: src/hed/libs/credential/NSSUtil.cpp:1650 msgid "Failed to load private key" msgstr "Ðе удалоÑÑŒ загрузить закрытый ключ" #: src/hed/libs/credential/NSSUtil.cpp:1651 msgid "Succeeded to load PrivateKeyInfo" msgstr "УÑпешно подгружен PrivateKeyInfo" #: src/hed/libs/credential/NSSUtil.cpp:1654 msgid "Failed to convert PrivateKeyInfo to EVP_PKEY" msgstr "Сбой Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ð½Ð¸Ñ PrivateKeyInfo в EVP_PKEY" #: src/hed/libs/credential/NSSUtil.cpp:1655 msgid "Succeeded to convert PrivateKeyInfo to EVP_PKEY" msgstr "УÑпешное преобразование PrivateKeyInfo в EVP_PKEY" #: src/hed/libs/credential/NSSUtil.cpp:1692 msgid "Failed to import private key" msgstr "Ðе удалоÑÑŒ получить закрытый ключ" #: src/hed/libs/credential/NSSUtil.cpp:1695 msgid "Succeeded to import private key" msgstr "Закрытый ключ уÑпешно получен" #: src/hed/libs/credential/NSSUtil.cpp:1708 #: src/hed/libs/credential/NSSUtil.cpp:1750 #: src/hed/libs/credential/NSSUtil.cpp:2889 msgid "Failed to authenticate to key database" msgstr "Сбой проверки подлинноÑти на базе данных ключей" #: src/hed/libs/credential/NSSUtil.cpp:1717 msgid "Succeeded to generate public/private key pair" msgstr "УÑпешное Ñоздание пары открытого/закрытого ключей" #: src/hed/libs/credential/NSSUtil.cpp:1719 msgid "Failed to generate public/private key pair" msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¿Ð°Ñ€Ñ‹ открытого/закрытого ключей" #: src/hed/libs/credential/NSSUtil.cpp:1724 msgid "Failed to export private key" msgstr "Ðе удалоÑÑŒ Ñохранить закрытый ключ" #: src/hed/libs/credential/NSSUtil.cpp:1791 msgid "Failed to create subject name" msgstr "Ðе удалоÑÑŒ Ñформировать Ð¸Ð¼Ñ Ñубъекта" #: src/hed/libs/credential/NSSUtil.cpp:1807 msgid "Failed to create certificate request" msgstr "Ðе удалоÑÑŒ Ñоздать Ð·Ð°Ð¿Ñ€Ð¾Ñ Ñертификата" #: src/hed/libs/credential/NSSUtil.cpp:1820 msgid "Failed to call PORT_NewArena" msgstr "Ðе удалоÑÑŒ вызвать PORT_NewArena" #: src/hed/libs/credential/NSSUtil.cpp:1828 msgid "Failed to encode the certificate request with DER format" msgstr "Сбой ÑˆÐ¸Ñ„Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа Ñертификата в формате DER" #: src/hed/libs/credential/NSSUtil.cpp:1835 msgid "Unknown key or hash type" msgstr "ÐеизвеÑтный ключ или тип хешированиÑ" #: src/hed/libs/credential/NSSUtil.cpp:1841 msgid "Failed to sign the certificate request" msgstr "Ðе удалоÑÑŒ подпиÑать Ð·Ð°Ð¿Ñ€Ð¾Ñ Ñертификата" #: src/hed/libs/credential/NSSUtil.cpp:1857 msgid "Failed to output the certificate request as ASCII format" msgstr "Сбой вывода запроÑа Ñертификата в формате ASCII" #: src/hed/libs/credential/NSSUtil.cpp:1866 msgid "Failed to output the certificate request as DER format" msgstr "Сбой вывода запроÑа Ñертификата в формате DER" #: src/hed/libs/credential/NSSUtil.cpp:1875 #, c-format msgid "Succeeded to output the certificate request into %s" msgstr "УÑпешный вывод запроÑа Ñертификата в %s" #: src/hed/libs/credential/NSSUtil.cpp:1914 #: src/hed/libs/credential/NSSUtil.cpp:1951 msgid "Failed to read data from input file" msgstr "Ðевозможно прочитать данные из входного файла" #: src/hed/libs/credential/NSSUtil.cpp:1930 msgid "Input is without trailer\n" msgstr "Входные данные не Ñодержат Ñтроки окончаниÑ\n" #: src/hed/libs/credential/NSSUtil.cpp:1941 msgid "Failed to convert ASCII to DER" msgstr "Ðе удалоÑÑŒ преобразовать ASCII в DER" #: src/hed/libs/credential/NSSUtil.cpp:1992 msgid "Certificate request is invalid" msgstr "ÐедопуÑтимый Ð·Ð°Ð¿Ñ€Ð¾Ñ Ñертификата" #: src/hed/libs/credential/NSSUtil.cpp:2212 #, c-format msgid "The policy language: %s is not supported" msgstr "Язык политик %s не поддерживаетÑÑ" #: src/hed/libs/credential/NSSUtil.cpp:2220 #: src/hed/libs/credential/NSSUtil.cpp:2245 #: src/hed/libs/credential/NSSUtil.cpp:2268 #: src/hed/libs/credential/NSSUtil.cpp:2290 msgid "Failed to new arena" msgstr "Сбой Ð²Ñ‹Ð´ÐµÐ»ÐµÐ½Ð¸Ñ Ð½Ð¾Ð²Ð¾Ð¹ облаÑти" #: src/hed/libs/credential/NSSUtil.cpp:2229 #: src/hed/libs/credential/NSSUtil.cpp:2254 msgid "Failed to create path length" msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð´Ð»Ð¸Ð½Ñ‹ пути" #: src/hed/libs/credential/NSSUtil.cpp:2232 #: src/hed/libs/credential/NSSUtil.cpp:2257 #: src/hed/libs/credential/NSSUtil.cpp:2277 #: src/hed/libs/credential/NSSUtil.cpp:2299 msgid "Failed to create policy language" msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñзыка политик" #: src/hed/libs/credential/NSSUtil.cpp:2700 #, c-format msgid "Failed to parse certificate request from CSR file %s" msgstr "Сбой обработки запроÑа Ñертификата из файла CSR %s" #: src/hed/libs/credential/NSSUtil.cpp:2707 #, c-format msgid "Can not find certificate with name %s" msgstr "Ðе удалоÑÑŒ найти открытый ключ по имени: %s" #: src/hed/libs/credential/NSSUtil.cpp:2739 msgid "Can not allocate memory" msgstr "Ðе удалоÑÑŒ зарезервировать памÑть" #: src/hed/libs/credential/NSSUtil.cpp:2747 #, c-format msgid "Proxy subject: %s" msgstr "Ð˜Ð¼Ñ Ñубъекта доверенноÑти: %s" #: src/hed/libs/credential/NSSUtil.cpp:2764 msgid "Failed to start certificate extension" msgstr "Сбой начала ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ€Ð°ÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ Ñертификата" #: src/hed/libs/credential/NSSUtil.cpp:2769 msgid "Failed to add key usage extension" msgstr "Сбой Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ñ€Ð°ÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ Ð¾Ð± иÑпользовании ключа" #: src/hed/libs/credential/NSSUtil.cpp:2774 msgid "Failed to add proxy certificate information extension" msgstr "Сбой Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ñ€Ð°ÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ Ð¾Ð± информации Ñертификата доверенноÑти" #: src/hed/libs/credential/NSSUtil.cpp:2778 msgid "Failed to add voms AC extension" msgstr "Сбой Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ñ€Ð°ÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ VOMS AC" #: src/hed/libs/credential/NSSUtil.cpp:2798 msgid "Failed to retrieve private key for issuer" msgstr "Сбой Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° закрытого ключа издателÑ" #: src/hed/libs/credential/NSSUtil.cpp:2805 msgid "Unknown key or hash type of issuer" msgstr "ÐеизвеÑтный ключ или тип Ñ…ÐµÑˆÐ¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¸Ð·Ð´Ð°Ñ‚ÐµÐ»Ñ Ñертификата" #: src/hed/libs/credential/NSSUtil.cpp:2811 msgid "Failed to set signature algorithm ID" msgstr "Сбой Ð·Ð°Ð´Ð°Ð½Ð¸Ñ ID алгоритма подпиÑи" #: src/hed/libs/credential/NSSUtil.cpp:2823 msgid "Failed to encode certificate" msgstr "Ошибка ÑˆÐ¸Ñ„Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñертификата" #: src/hed/libs/credential/NSSUtil.cpp:2829 msgid "Failed to allocate item for certificate data" msgstr "Ðе удалоÑÑŒ зарезервировать Ñлемент Ð´Ð»Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ… о Ñертификате" #: src/hed/libs/credential/NSSUtil.cpp:2835 msgid "Failed to sign encoded certificate data" msgstr "Сбой подпиÑи данных зашифрованного Ñертификата" #: src/hed/libs/credential/NSSUtil.cpp:2844 #, c-format msgid "Failed to open file %s" msgstr "Ðе удалоÑÑŒ открыть файл %s" #: src/hed/libs/credential/NSSUtil.cpp:2855 #, c-format msgid "Succeeded to output certificate to %s" msgstr "УÑпешный вывод Ñертификата в %s" #: src/hed/libs/credential/NSSUtil.cpp:2896 #, c-format msgid "Failed to open input certificate file %s" msgstr "Сбой Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° входного Ñертификата %s" #: src/hed/libs/credential/NSSUtil.cpp:2913 msgid "Failed to read input certificate file" msgstr "Сбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° входного Ñертификата" #: src/hed/libs/credential/NSSUtil.cpp:2918 msgid "Failed to get certificate from certificate file" msgstr "Сбой Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ Ñертификата из файла" #: src/hed/libs/credential/NSSUtil.cpp:2925 msgid "Failed to allocate certificate trust" msgstr "Сбой Ñ€ÐµÐ·ÐµÑ€Ð²Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ñ‹Ñ… отношений Ñертификата" #: src/hed/libs/credential/NSSUtil.cpp:2930 msgid "Failed to decode trust string" msgstr "Сбой раÑшифровки опиÑÐ°Ð½Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ñ‹Ñ… отношений" #: src/hed/libs/credential/NSSUtil.cpp:2944 #: src/hed/libs/credential/NSSUtil.cpp:2961 msgid "Failed to add certificate to token or database" msgstr "Сбой Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ñертификата к маркёру или базе данных" #: src/hed/libs/credential/NSSUtil.cpp:2947 #: src/hed/libs/credential/NSSUtil.cpp:2950 msgid "Succeeded to import certificate" msgstr "УÑпешное импортирование Ñертификата" #: src/hed/libs/credential/NSSUtil.cpp:2964 #: src/hed/libs/credential/NSSUtil.cpp:2967 #, c-format msgid "Succeeded to change trusts to: %s" msgstr "УÑÐ¿ÐµÑˆÐ½Ð°Ñ Ñмена доверительных отношений на: %s" #: src/hed/libs/credential/NSSUtil.cpp:2994 #, c-format msgid "Failed to import private key from file: %s" msgstr "Сбой Ð¸Ð¼Ð¿Ð¾Ñ€Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð·Ð°ÐºÑ€Ñ‹Ñ‚Ð¾Ð³Ð¾ ключа из файла: %s" #: src/hed/libs/credential/NSSUtil.cpp:2996 #, c-format msgid "Failed to import certificate from file: %s" msgstr "Сбой Ð¸Ð¼Ð¿Ð¾Ñ€Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñертификата из файла: %s" #: src/hed/libs/credential/VOMSConfig.cpp:147 #, c-format msgid "" "ERROR: VOMS configuration line contains too many tokens. Expecting 5 or 6. " "Line was: %s" msgstr "" "ERROR: Ñтрока наÑтройки VOMS Ñодержит избыточное чиÑло Ñлементов. ОжидаетÑÑ " "5 или 6. Строка: %s" #: src/hed/libs/credential/VOMSConfig.cpp:163 #, c-format msgid "" "ERROR: file tree is too deep while scanning VOMS configuration. Max allowed " "nesting is %i." msgstr "" "ERROR: каталог Ñодержит Ñлишком много уровней Ð´Ð»Ñ ÑÐºÐ°Ð½Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð½Ð°Ñтроек " "VOMS. МакÑимально допуÑтимое чиÑло уровней: %i." #: src/hed/libs/credential/VOMSConfig.cpp:181 #, c-format msgid "ERROR: failed to read file %s while scanning VOMS configuration." msgstr "ERROR: Ñбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° %s при Ñканировании наÑтроек VOMS." #: src/hed/libs/credential/VOMSConfig.cpp:186 #, c-format msgid "" "ERROR: VOMS configuration file %s contains too many lines. Max supported " "number is %i." msgstr "" "ERROR: файл наÑтроек VOMS %s Ñодержит Ñлишком много Ñтрок. МакÑимально " "допуÑтимое количеÑтво: %i." #: src/hed/libs/credential/VOMSConfig.cpp:193 #, c-format msgid "" "ERROR: VOMS configuration file %s contains too long line(s). Max supported " "length is %i characters." msgstr "" "ERROR: файл наÑтроек VOMS %s Ñодержит Ñлишком длинную Ñтроку. МакÑимально " "допуÑÑ‚Ð¸Ð¼Ð°Ñ Ð´Ð»Ð¸Ð½Ð°: %i знаков." #: src/hed/libs/credential/VOMSUtil.cpp:137 #, c-format msgid "Failed to create OpenSSL object %s %s - %u %s" msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¾Ð±ÑŠÐµÐºÑ‚Ð° OpenSSL %s %s - %u %s" #: src/hed/libs/credential/VOMSUtil.cpp:144 #, c-format msgid "Failed to obtain OpenSSL identifier for %s" msgstr "Сбой Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ Ð¸Ð´ÐµÐ½Ñ‚Ð¸Ñ„Ð¸ÐºÐ°Ñ‚Ð¾Ñ€Ð° OpenSSL Ð´Ð»Ñ %s" #: src/hed/libs/credential/VOMSUtil.cpp:302 #: src/hed/libs/credential/VOMSUtil.cpp:571 #, c-format msgid "VOMS: create FQAN: %s" msgstr "VOMS: ÑоÑтавление FQAN: %s" #: src/hed/libs/credential/VOMSUtil.cpp:340 #: src/hed/libs/credential/VOMSUtil.cpp:619 #, c-format msgid "VOMS: create attribute: %s" msgstr "VOMS: Ñозадние атрибута: %s" #: src/hed/libs/credential/VOMSUtil.cpp:917 msgid "VOMS: Can not allocate memory for parsing AC" msgstr "VOMS: Ðе удалоÑÑŒ зарезервировать памÑть Ð´Ð»Ñ Ñ€Ð°Ð·Ð±Ð¾Ñ€Ð° AC" #: src/hed/libs/credential/VOMSUtil.cpp:925 msgid "VOMS: Can not allocate memory for storing the order of AC" msgstr "" "VOMS: Ðе удалоÑÑŒ зарезервировать памÑть Ð´Ð»Ñ Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ Ð¿Ð¾ÑледовательноÑти AC" #: src/hed/libs/credential/VOMSUtil.cpp:951 msgid "VOMS: Can not parse AC" msgstr "VOMS: Ðе удалоÑÑŒ обработать AC" #: src/hed/libs/credential/VOMSUtil.cpp:981 #, fuzzy msgid "" "VOMS: CA directory or CA file must be provided or default setting enabled" msgstr "VOMS: Ðеобходимо задать каталог или файл Ñертификационного агентÑтва" #: src/hed/libs/credential/VOMSUtil.cpp:1052 msgid "VOMS: failed to verify AC signature" msgstr "VOMS: не удалоÑÑŒ подтвердить подпиÑÑŒ Ñертификата атрибута" #: src/hed/libs/credential/VOMSUtil.cpp:1108 #, c-format msgid "VOMS: trust chain to check: %s " msgstr "VOMS: подтверждаетÑÑ Ñ†ÐµÐ¿Ð¾Ñ‡ÐºÐ° Ñертификатов: %s " #: src/hed/libs/credential/VOMSUtil.cpp:1116 #, c-format msgid "" "VOMS: the DN in certificate: %s does not match that in trusted DN list: %s" msgstr "" "VOMS: Отличительное Ð¸Ð¼Ñ (DN) в Ñертификате %s не ÑоответÑтвует таковому в " "доверÑемом ÑпиÑке: %s" #: src/hed/libs/credential/VOMSUtil.cpp:1122 #, c-format msgid "" "VOMS: the Issuer identity in certificate: %s does not match that in trusted " "DN list: %s" msgstr "" "VOMS: Отличительный признак агентÑтва, выдавшего Ñертификат %s, не " "ÑоответÑтвует таковому в доверÑемом ÑпиÑке: %s" #: src/hed/libs/credential/VOMSUtil.cpp:1157 #, c-format msgid "VOMS: The lsc file %s does not exist" msgstr "VOMS: Файл lsc %s не ÑущеÑтвует" #: src/hed/libs/credential/VOMSUtil.cpp:1163 #, c-format msgid "VOMS: The lsc file %s can not be open" msgstr "VOMS: Файл lsc %s не может быть открыт" #: src/hed/libs/credential/VOMSUtil.cpp:1215 msgid "" "VOMS: there is no constraints of trusted voms DNs, the certificates stack in " "AC will not be checked." msgstr "" "VOMS: отÑутÑтвуют Ð¾Ð³Ñ€Ð°Ð½Ð¸Ñ‡ÐµÐ½Ð¸Ñ Ð¿Ð¾ отличительным признакам доверÑемых VOMS, " "цепочка Ñертификатов в Ñертификате атрибута (AC) не будет проверена." #: src/hed/libs/credential/VOMSUtil.cpp:1248 msgid "VOMS: unable to match certificate chain against VOMS trusted DNs" msgstr "" "VOMS: невозможно найти цепочку Ñертификатов, ÑоответÑтвующую доверÑемым " "отличительным признакам VOMS" #: src/hed/libs/credential/VOMSUtil.cpp:1268 msgid "VOMS: AC signature verification failed" msgstr "VOMS: Ñбой Ð¿Ð¾Ð´Ñ‚Ð²ÐµÑ€Ð¶Ð´ÐµÐ½Ð¸Ñ Ð¿Ð¾Ð´Ð¿Ð¸Ñи Ñертификата атрибута" #: src/hed/libs/credential/VOMSUtil.cpp:1277 msgid "VOMS: unable to verify certificate chain" msgstr "VOMS: невозможно подтвердить цепочку Ñертификатов" #: src/hed/libs/credential/VOMSUtil.cpp:1283 #, c-format msgid "VOMS: cannot validate AC issuer for VO %s" msgstr "" "VOMS: невозможно удоÑтоверить лицо, выдавшее Ñертификат атрибута Ð´Ð»Ñ " "виртуальной организации %s" #: src/hed/libs/credential/VOMSUtil.cpp:1306 #, c-format msgid "VOMS: directory for trusted service certificates: %s" msgstr "VOMS: директориÑ, ÑÐ¾Ð´ÐµÑ€Ð¶Ð°Ñ‰Ð°Ñ Ñертификаты доверÑемых Ñлужб: %s" #: src/hed/libs/credential/VOMSUtil.cpp:1332 #, c-format msgid "VOMS: Cannot find certificate of AC issuer for VO %s" msgstr "" "VOMS: невозможно найти Ñертификат лица, выдавшего Ñертификат атрибута Ð´Ð»Ñ " "виртуальной организации %s" #: src/hed/libs/credential/VOMSUtil.cpp:1358 #: src/hed/libs/credential/VOMSUtil.cpp:1427 msgid "VOMS: Can not find AC_ATTR with IETFATTR type" msgstr "VOMS: Ðевозможно найти AC_ATTR типа IETFATTR" #: src/hed/libs/credential/VOMSUtil.cpp:1365 #: src/hed/libs/credential/VOMSUtil.cpp:1434 msgid "VOMS: case of multiple IETFATTR attributes not supported" msgstr "VOMS: иÑпользование множеÑтвенных атрибутов IETFATTR не поддерживаетÑÑ" #: src/hed/libs/credential/VOMSUtil.cpp:1375 #: src/hed/libs/credential/VOMSUtil.cpp:1450 msgid "VOMS: case of multiple policyAuthority not supported" msgstr "" "VOMS: иÑпользование множеÑтвенных атрибутов policyAuthority не поддерживаетÑÑ" #: src/hed/libs/credential/VOMSUtil.cpp:1391 #: src/hed/libs/credential/VOMSUtil.cpp:1467 msgid "VOMS: the format of policyAuthority is unsupported - expecting URI" msgstr "VOMS: недопуÑтимый формат атрибута policyAuthority - ожидаетÑÑ URI" #: src/hed/libs/credential/VOMSUtil.cpp:1400 #: src/hed/libs/credential/VOMSUtil.cpp:1478 msgid "" "VOMS: the format of IETFATTRVAL is not supported - expecting OCTET STRING" msgstr "VOMS: недопуÑтимый формат IETFATTRVAL - ожидаетÑÑ OCTET STRING" #: src/hed/libs/credential/VOMSUtil.cpp:1443 #, fuzzy msgid "VOMS: failed to access IETFATTR attribute" msgstr "VOMS: Ñбой при разборе атрибутов в Ñертификате атрибута (AC)" #: src/hed/libs/credential/VOMSUtil.cpp:1538 msgid "VOMS: the grantor attribute is empty" msgstr "VOMS: атрибут grantor пуÑÑ‚" #: src/hed/libs/credential/VOMSUtil.cpp:1556 msgid "VOMS: the attribute name is empty" msgstr "VOMS: отÑутÑтвует Ð¸Ð¼Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð°" #: src/hed/libs/credential/VOMSUtil.cpp:1562 #, c-format msgid "VOMS: the attribute value for %s is empty" msgstr "VOMS: отÑутÑвует значение атрибута Ð´Ð»Ñ %s" #: src/hed/libs/credential/VOMSUtil.cpp:1567 msgid "VOMS: the attribute qualifier is empty" msgstr "VOMS: атрибут qualifier пуÑÑ‚" #: src/hed/libs/credential/VOMSUtil.cpp:1602 #: src/hed/libs/credential/VOMSUtil.cpp:1721 msgid "" "VOMS: both idcenoRevAvail and authorityKeyIdentifier certificate extensions " "must be present" msgstr "" "VOMS: должны приÑутÑтвовать оба раÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ Ñертификата, idcenoRevAvail и " "authorityKeyIdentifier" #: src/hed/libs/credential/VOMSUtil.cpp:1636 #: src/hed/libs/credential/VOMSUtil.cpp:1757 #, c-format msgid "VOMS: FQDN of this host %s does not match any target in AC" msgstr "" "VOMS: FQDN узла %s не ÑоответÑтвует ни одному из назначений в Ñертификате " "атрибута (AC)" #: src/hed/libs/credential/VOMSUtil.cpp:1641 #: src/hed/libs/credential/VOMSUtil.cpp:1762 msgid "VOMS: the only supported critical extension of the AC is idceTargets" msgstr "" "VOMS: единÑтвенным поддерживаемым критичеÑким раÑширением атрибута " "Ñертификата (AC) ÑвлÑетÑÑ idceTargets" #: src/hed/libs/credential/VOMSUtil.cpp:1656 #: src/hed/libs/credential/VOMSUtil.cpp:1777 msgid "VOMS: failed to parse attributes from AC" msgstr "VOMS: Ñбой при разборе атрибутов в Ñертификате атрибута (AC)" #: src/hed/libs/credential/VOMSUtil.cpp:1700 #: src/hed/libs/credential/VOMSUtil.cpp:1829 msgid "VOMS: authorityKey is wrong" msgstr "VOMS: неверный authorityKey" #: src/hed/libs/credential/VOMSUtil.cpp:1861 #: src/hed/libs/credential/VOMSUtil.cpp:2029 #: src/hed/libs/credential/VOMSUtil.cpp:2037 msgid "VOMS: missing AC parts" msgstr "VOMS: отÑутÑтвуют чаÑти AC" #: src/hed/libs/credential/VOMSUtil.cpp:1878 #: src/hed/libs/credential/VOMSUtil.cpp:2054 msgid "VOMS: unsupported time format in AC - expecting GENERALIZED TIME" msgstr "" "VOMS: неверный формат времени в Ñертификате атрибута (AC) - ожидаетÑÑ " "GENERALIZED TIME" #: src/hed/libs/credential/VOMSUtil.cpp:1884 #: src/hed/libs/credential/VOMSUtil.cpp:2060 msgid "VOMS: AC is not yet valid" msgstr "VOMS: Ñертификат атрибута ещё не дейÑтвителен" #: src/hed/libs/credential/VOMSUtil.cpp:1891 #: src/hed/libs/credential/VOMSUtil.cpp:2067 msgid "VOMS: AC has expired" msgstr "VOMS: Ñрок годноÑти AC вышел" #: src/hed/libs/credential/VOMSUtil.cpp:1906 #: src/hed/libs/credential/VOMSUtil.cpp:2080 msgid "VOMS: AC is not complete - missing Serial or Issuer information" msgstr "" "VOMS: Сертификат атрибута (AC) неполон - отÑутÑтвует Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾Ð± атрибутах " "Serial и/или Issuer" #: src/hed/libs/credential/VOMSUtil.cpp:1911 #: src/hed/libs/credential/VOMSUtil.cpp:2085 #, c-format msgid "VOMS: the holder serial number is: %lx" msgstr "VOMS: Ñерийный номер владельца: %lx" #: src/hed/libs/credential/VOMSUtil.cpp:1912 #: src/hed/libs/credential/VOMSUtil.cpp:2086 #, c-format msgid "VOMS: the serial number in AC is: %lx" msgstr "VOMS: Ñерийный номер в Ñертификате атрибута (AC): %lx" #: src/hed/libs/credential/VOMSUtil.cpp:1915 #: src/hed/libs/credential/VOMSUtil.cpp:2089 #, c-format msgid "" "VOMS: the holder serial number %lx is not the same as the serial number in " "AC %lx, the holder certificate that is used to create a voms proxy could be " "a proxy certificate with a different serial number as the original EEC cert" msgstr "" "VOMS: Ñерийный номер владельца %lx не Ñовпадает Ñ Ñ‚Ð°ÐºÐ¾Ð²Ñ‹Ð¼ в Ñертификате " "атрибута (AC) %lx; Ñертификат, иÑпользуемый Ð´Ð»Ñ ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти VOMS, " "может быть доверенноÑтью Ñ Ñерийным номером, отличным от изначального " "Ñертификата" #: src/hed/libs/credential/VOMSUtil.cpp:1924 #: src/hed/libs/credential/VOMSUtil.cpp:2098 msgid "VOMS: the holder information in AC is wrong" msgstr "VOMS: Ð½ÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ владельце в Ñертификате атрибута (AC)" #: src/hed/libs/credential/VOMSUtil.cpp:1946 #: src/hed/libs/credential/VOMSUtil.cpp:2120 #, c-format msgid "VOMS: DN of holder in AC: %s" msgstr "VOMS: DN владельца в Ñертификате атрибута (AC): %s" #: src/hed/libs/credential/VOMSUtil.cpp:1947 #: src/hed/libs/credential/VOMSUtil.cpp:2121 #, c-format msgid "VOMS: DN of holder: %s" msgstr "VOMS: DN владельца: %s" #: src/hed/libs/credential/VOMSUtil.cpp:1948 #: src/hed/libs/credential/VOMSUtil.cpp:2122 #, c-format msgid "VOMS: DN of issuer: %s" msgstr "VOMS: DN Ñмитента: %s" #: src/hed/libs/credential/VOMSUtil.cpp:1955 #: src/hed/libs/credential/VOMSUtil.cpp:2129 msgid "" "VOMS: the holder name in AC is not related to the distinguished name in " "holder certificate" msgstr "" "VOMS: Ð¸Ð¼Ñ Ð²Ð»Ð°Ð´ÐµÐ»ÑŒÑ†Ð° в Ñертификате атрибута (AC) не имеет Ð¾Ñ‚Ð½Ð¾ÑˆÐµÐ½Ð¸Ñ Ðº " "отличительному имени в Ñертификате владельца" #: src/hed/libs/credential/VOMSUtil.cpp:1967 #: src/hed/libs/credential/VOMSUtil.cpp:1974 #: src/hed/libs/credential/VOMSUtil.cpp:2141 #: src/hed/libs/credential/VOMSUtil.cpp:2148 msgid "VOMS: the holder issuerUID is not the same as that in AC" msgstr "" "VOMS: атрибут issuerUID в Ñертификате владельца не Ñовпадает Ñ Ñ‚Ð°ÐºÐ¾Ð²Ñ‹Ð¼ в " "Ñертификате атрибута (AC)" #: src/hed/libs/credential/VOMSUtil.cpp:1987 #: src/hed/libs/credential/VOMSUtil.cpp:2160 msgid "VOMS: the holder issuer name is not the same as that in AC" msgstr "" "VOMS: Ð¸Ð¼Ñ Ð°Ð³ÐµÐ½Ñ‚Ñтва, выдавшего Ñертификат, не Ñовпадает Ñ Ñ‚Ð°ÐºÐ¾Ð²Ñ‹Ð¼ в " "Ñертификате атрибута (AC)" #: src/hed/libs/credential/VOMSUtil.cpp:1997 #: src/hed/libs/credential/VOMSUtil.cpp:2169 msgid "VOMS: the issuer information in AC is wrong" msgstr "" "VOMS: Ð½ÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾Ð± агентÑтве, выдавшем Ñертификат, в Ñертификате " "атрибута (AC)" #: src/hed/libs/credential/VOMSUtil.cpp:2005 #: src/hed/libs/credential/VOMSUtil.cpp:2177 #, c-format msgid "VOMS: the issuer name %s is not the same as that in AC - %s" msgstr "" "VOMS: Ð¸Ð¼Ñ Ð°Ð³ÐµÐ½Ñ‚Ñтва, выдавшего Ñертификат - %s - не Ñовпадает Ñ Ñ‚Ð°ÐºÐ¾Ð²Ñ‹Ð¼ в " "Ñертификате атрибута (AC) - %s" #: src/hed/libs/credential/VOMSUtil.cpp:2013 #: src/hed/libs/credential/VOMSUtil.cpp:2185 msgid "" "VOMS: the serial number of AC INFO is too long - expecting no more than 20 " "octets" msgstr "" "VOMS: Ñлишком длинный Ñерийный номер AC INFO - ожидаетÑÑ Ð½Ðµ более 20-и " "октетов" #: src/hed/libs/credential/VOMSUtil.cpp:2221 #: src/hed/libs/credential/VOMSUtil.cpp:2233 #: src/hed/libs/credential/VOMSUtil.cpp:2247 #: src/hed/libs/credential/VOMSUtil.cpp:2259 #: src/hed/libs/credential/VOMSUtil.cpp:2282 msgid "VOMS: unable to extract VO name from AC" msgstr "" "VOMS: невозможно извлечь название виртуальной организации из Ñертификата " "атрибута (AC)" #: src/hed/libs/credential/VOMSUtil.cpp:2273 #, c-format msgid "VOMS: unable to determine hostname of AC from VO name: %s" msgstr "" "VOMS: невозможно определить название узла Ñертификата атрибута (AC) из " "Ð½Ð°Ð·Ð²Ð°Ð½Ð¸Ñ Ð²Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð¾Ð¹ организации: %s" #: src/hed/libs/credential/VOMSUtil.cpp:2292 msgid "VOMS: can not verify the signature of the AC" msgstr "VOMS: не удалоÑÑŒ подтвердить подпиÑÑŒ Ñертификата атрибута" #: src/hed/libs/credential/VOMSUtil.cpp:2298 msgid "VOMS: problems while parsing information in AC" msgstr "VOMS: проблемы при разборке информации в AC" #: src/hed/libs/credential/test/VOMSUtilTest.cpp:126 #, c-format msgid "Line %d.%d of the attributes returned: %s" msgstr "Строка %d.%d атрибутов выдала: %s" #: src/hed/libs/credentialstore/ClientVOMS.cpp:149 msgid "voms" msgstr "VOMS" #: src/hed/libs/credentialstore/CredentialStore.cpp:194 #: src/hed/libs/credentialstore/CredentialStore.cpp:245 #: src/hed/libs/credentialstore/CredentialStore.cpp:273 #: src/hed/libs/credentialstore/CredentialStore.cpp:336 #: src/hed/libs/credentialstore/CredentialStore.cpp:376 #: src/hed/libs/credentialstore/CredentialStore.cpp:406 #, c-format msgid "MyProxy failure: %s" msgstr "Сбой MyProxy: %s" #: src/hed/libs/crypto/OpenSSL.cpp:64 #, c-format msgid "SSL error: %d - %s:%s:%s" msgstr "Ошибка SSL: %d - %s:%s:%s" #: src/hed/libs/crypto/OpenSSL.cpp:78 msgid "Failed to lock arccrypto library in memory" msgstr "Ðевозможно заблокировать библиотеку arccrypto в памÑти" #: src/hed/libs/crypto/OpenSSL.cpp:81 msgid "Failed to initialize OpenSSL library" msgstr "Ошибка инициализации библиотеки OpenSSL" #: src/hed/libs/data/DataExternalHelper.cpp:157 msgid "failed to read data tag" msgstr "Ñбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¼ÐµÑ‚ÐºÐ¸ данных" #: src/hed/libs/data/DataExternalHelper.cpp:161 msgid "waiting for data chunk" msgstr "ожидание куÑка данных" #: src/hed/libs/data/DataExternalHelper.cpp:163 msgid "failed to read data chunk" msgstr "Ñбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ ÐºÑƒÑка данных" #: src/hed/libs/data/DataExternalHelper.cpp:171 #, c-format msgid "data chunk: %llu %llu" msgstr "куÑок данных: %llu %llu" #: src/hed/libs/data/DataExternalHelper.cpp:242 #, c-format msgid "DataMove::Transfer: using supplied checksum %s" msgstr "DataMove::Transfer: иÑпользуетÑÑ Ð·Ð°Ð´Ð°Ð½Ð½Ð°Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма %s" #: src/hed/libs/data/DataExternalHelper.cpp:361 msgid "Expecting Module, Command and URL provided" msgstr "ОжидаетÑÑ ÑƒÐºÐ°Ð·Ð°Ð½Ð¸Ðµ модулÑ, команды и URL" #: src/hed/libs/data/DataExternalHelper.cpp:368 msgid "Expecting Command module path among arguments" msgstr "Одним из аргументов должен быть путь к Command module" #: src/hed/libs/data/DataExternalHelper.cpp:372 msgid "Expecting Command module name among arguments" msgstr "Одним из аргументов должно быть название Command module" #: src/hed/libs/data/DataMover.cpp:126 msgid "No locations found - probably no more physical instances" msgstr "Ðе найдено раÑположений - возможно, копий больше нет" #: src/hed/libs/data/DataMover.cpp:132 src/hed/libs/data/FileCache.cpp:550 #: src/libs/data-staging/Processor.cpp:394 #: src/libs/data-staging/Processor.cpp:408 #, c-format msgid "Removing %s" msgstr "УдалÑетÑÑ %s" #: src/hed/libs/data/DataMover.cpp:145 msgid "This instance was already deleted" msgstr "Эта ÐºÐ¾Ð¿Ð¸Ñ ÑƒÐ¶Ðµ удалена" #: src/hed/libs/data/DataMover.cpp:151 msgid "Failed to delete physical file" msgstr "Сбой при удалении физичеÑкого файла" #: src/hed/libs/data/DataMover.cpp:162 #, c-format msgid "Removing metadata in %s" msgstr "УдалÑÑŽÑ‚ÑÑ Ð¼ÐµÑ‚Ð°Ð´Ð°Ð½Ð½Ñ‹Ðµ в %s" #: src/hed/libs/data/DataMover.cpp:166 msgid "Failed to delete meta-information" msgstr "Сбой при удалении мета-информации" #: src/hed/libs/data/DataMover.cpp:180 msgid "Failed to remove all physical instances" msgstr "Сбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð²Ñех фактичеÑких копий" #: src/hed/libs/data/DataMover.cpp:184 #, c-format msgid "Removing logical file from metadata %s" msgstr "УдалÑетÑÑ Ð»Ð¾Ð³Ð¸Ñ‡ÐµÑкий файл из метаданных %s" #: src/hed/libs/data/DataMover.cpp:187 msgid "Failed to delete logical file" msgstr "Сбой при удалении логичеÑкого файла" #: src/hed/libs/data/DataMover.cpp:194 msgid "Failed to remove instance" msgstr "Ðе удалоÑÑŒ удалить копию" #: src/hed/libs/data/DataMover.cpp:243 msgid "DataMover::Transfer : starting new thread" msgstr "DataMover::Transfer : запуÑк нового потока" #: src/hed/libs/data/DataMover.cpp:271 #, c-format msgid "Transfer from %s to %s" msgstr "Передача из %s в %s" #: src/hed/libs/data/DataMover.cpp:273 msgid "Not valid source" msgstr "ÐедейÑтвительный иÑточник" #: src/hed/libs/data/DataMover.cpp:278 msgid "Not valid destination" msgstr "Цель недейÑтвительна" #: src/hed/libs/data/DataMover.cpp:300 src/services/candypond/CandyPond.cpp:304 #, c-format msgid "Couldn't handle certificate: %s" msgstr "Ðе удалоÑÑŒ иÑпользовать Ñертификат: %s" #: src/hed/libs/data/DataMover.cpp:309 src/hed/libs/data/DataMover.cpp:614 #: src/libs/data-staging/Processor.cpp:123 #, c-format msgid "File %s is cached (%s) - checking permissions" msgstr "Файл %s приÑутÑтвует в кÑше (%s) - проверÑетÑÑ Ð´Ð¾Ð¿ÑƒÑк" #: src/hed/libs/data/DataMover.cpp:313 src/hed/libs/data/DataMover.cpp:633 #: src/hed/libs/data/DataMover.cpp:691 src/libs/data-staging/Processor.cpp:142 msgid "Permission checking passed" msgstr "Проверка допуÑка пройдена" #: src/hed/libs/data/DataMover.cpp:314 src/hed/libs/data/DataMover.cpp:652 #: src/hed/libs/data/DataMover.cpp:1180 msgid "Linking/copying cached file" msgstr "Подцепление/копирование файла из кÑша" #: src/hed/libs/data/DataMover.cpp:338 #, c-format msgid "No locations for source found: %s" msgstr "Ðе найдено раÑположений Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° иÑточника: %s" #: src/hed/libs/data/DataMover.cpp:342 #, c-format msgid "Failed to resolve source: %s" msgstr "Ðе удалоÑÑŒ определить иÑточник: %s" #: src/hed/libs/data/DataMover.cpp:356 src/hed/libs/data/DataMover.cpp:431 #, c-format msgid "No locations for destination found: %s" msgstr "Ðе найдено физичеÑких адреÑов Ð´Ð»Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ: %s" #: src/hed/libs/data/DataMover.cpp:361 src/hed/libs/data/DataMover.cpp:435 #, c-format msgid "Failed to resolve destination: %s" msgstr "Ðе удалоÑÑŒ определить назначение: %s" #: src/hed/libs/data/DataMover.cpp:378 #, c-format msgid "No locations for destination different from source found: %s" msgstr "Ðе найдено раÑположений Ð´Ð»Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ, отличающихÑÑ Ð¾Ñ‚ иÑточника: %s" #: src/hed/libs/data/DataMover.cpp:400 #, c-format msgid "DataMover::Transfer: trying to destroy/overwrite destination: %s" msgstr "DataMover::Transfer: попытка Ñтереть/перезапиÑать назначение: %s" #: src/hed/libs/data/DataMover.cpp:412 #, c-format msgid "Failed to delete %s but will still try to copy" msgstr "Сбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ %s, вÑÑ‘ равно попытаемÑÑ Ñкопировать" #: src/hed/libs/data/DataMover.cpp:416 #, c-format msgid "Failed to delete %s" msgstr "Сбой при удалении %s" #: src/hed/libs/data/DataMover.cpp:447 #, c-format msgid "Deleted but still have locations at %s" msgstr "Удалён, но оÑталиÑÑŒ копии в %s" #: src/hed/libs/data/DataMover.cpp:459 msgid "DataMover: cycle" msgstr "DataMover: цикл" #: src/hed/libs/data/DataMover.cpp:461 msgid "DataMover: no retries requested - exit" msgstr "DataMover: не запрошено повторных попыток, выход" #: src/hed/libs/data/DataMover.cpp:466 msgid "DataMover: source out of tries - exit" msgstr "DataMover: закончилиÑÑŒ попытки поиÑка иÑточника - завершение" #: src/hed/libs/data/DataMover.cpp:468 msgid "DataMover: destination out of tries - exit" msgstr "DataMover: закончилиÑÑŒ попытки поиÑка назначений - завершение" #: src/hed/libs/data/DataMover.cpp:476 #, c-format msgid "Real transfer from %s to %s" msgstr "ФактичеÑÐºÐ°Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð° из %s в %s" #: src/hed/libs/data/DataMover.cpp:502 #, c-format msgid "Creating buffer: %lli x %i" msgstr "СоздаётÑÑ Ð±ÑƒÑ„ÐµÑ€: %lli x %i" #: src/hed/libs/data/DataMover.cpp:518 #, c-format msgid "DataMove::Transfer: no checksum calculation for %s" msgstr "DataMove::Transfer: ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма Ð´Ð»Ñ %s не будет вычиÑлена" #: src/hed/libs/data/DataMover.cpp:523 #, c-format msgid "DataMove::Transfer: using supplied checksum %s:%s" msgstr "DataMove::Transfer: иÑпользуетÑÑ Ð·Ð°Ð´Ð°Ð½Ð½Ð°Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма %s:%s" #: src/hed/libs/data/DataMover.cpp:547 #, c-format msgid "DataMove::Transfer: will calculate %s checksum" msgstr "DataMove::Transfer: будет вычиÑлена ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма Ð´Ð»Ñ %s" #: src/hed/libs/data/DataMover.cpp:552 msgid "Buffer creation failed !" msgstr "Ðевозможно Ñоздать буфер!" #: src/hed/libs/data/DataMover.cpp:575 #, c-format msgid "URL is mapped to: %s" msgstr "URL поÑтавлен в ÑоответÑтвие к: %s" #: src/hed/libs/data/DataMover.cpp:603 src/hed/libs/data/DataMover.cpp:661 #: src/libs/data-staging/Processor.cpp:78 msgid "Cached file is locked - should retry" msgstr "Файл в кÑше заблокирован - попытаемÑÑ Ð·Ð°Ð½Ð¾Ð²Ð¾" #: src/hed/libs/data/DataMover.cpp:608 src/libs/data-staging/Processor.cpp:96 msgid "Failed to initiate cache" msgstr "Сбой при инициализации кÑша" #: src/hed/libs/data/DataMover.cpp:625 src/services/candypond/CandyPond.cpp:379 #, c-format msgid "Permission checking failed: %s" msgstr "Проверка прав доÑтупа не удалаÑÑŒ: %s" #: src/hed/libs/data/DataMover.cpp:627 src/hed/libs/data/DataMover.cpp:685 #: src/hed/libs/data/DataMover.cpp:705 src/hed/libs/data/DataMover.cpp:716 msgid "source.next_location" msgstr "source.next_location" #: src/hed/libs/data/DataMover.cpp:641 src/libs/data-staging/Processor.cpp:147 #, c-format msgid "Source modification date: %s" msgstr "Дата Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ Ð¸Ñточника: %s" #: src/hed/libs/data/DataMover.cpp:642 src/libs/data-staging/Processor.cpp:148 #, c-format msgid "Cache creation date: %s" msgstr "Дата ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÐºÑша: %s" #: src/hed/libs/data/DataMover.cpp:648 src/libs/data-staging/Processor.cpp:153 msgid "Cached file is outdated, will re-download" msgstr "Файл в кÑше уÑтарел, будет загружен заново" #: src/hed/libs/data/DataMover.cpp:651 src/libs/data-staging/Processor.cpp:158 msgid "Cached copy is still valid" msgstr "ÐšÐ¾Ð¿Ð¸Ñ Ð² кÑше ещё дейÑтвительна" #: src/hed/libs/data/DataMover.cpp:678 msgid "URL is mapped to local access - checking permissions on original URL" msgstr "" "URL ÑопоÑтавлен локальному файлу - проверка прав доÑтупа к иÑходному URL" #: src/hed/libs/data/DataMover.cpp:682 #, c-format msgid "Permission checking on original URL failed: %s" msgstr "Сбой проверки прав доÑтупа к иÑходному URL: %s" #: src/hed/libs/data/DataMover.cpp:693 msgid "Linking local file" msgstr "ПодцеплÑетÑÑ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ñ‹Ð¹ файл" #: src/hed/libs/data/DataMover.cpp:713 #, c-format msgid "Failed to make symbolic link %s to %s : %s" msgstr "Сбой при Ñоздании ÑимволичеÑкой ÑÑылки %s на %s : %s" #: src/hed/libs/data/DataMover.cpp:722 #, c-format msgid "Failed to change owner of symbolic link %s to %i" msgstr "Ðевозможно заменить владельца Ñимвольной ÑÑылки %s на %i" #: src/hed/libs/data/DataMover.cpp:733 #, c-format msgid "cache file: %s" msgstr "кÑш-файл: %s" #: src/hed/libs/data/DataMover.cpp:759 #, c-format msgid "Failed to stat source %s" msgstr "Сбой проверки ÑтатуÑа иÑточника %s" #: src/hed/libs/data/DataMover.cpp:761 src/hed/libs/data/DataMover.cpp:776 #: src/hed/libs/data/DataMover.cpp:808 src/hed/libs/data/DataMover.cpp:828 #: src/hed/libs/data/DataMover.cpp:851 src/hed/libs/data/DataMover.cpp:869 #: src/hed/libs/data/DataMover.cpp:1028 src/hed/libs/data/DataMover.cpp:1061 #: src/hed/libs/data/DataMover.cpp:1072 src/hed/libs/data/DataMover.cpp:1146 msgid "(Re)Trying next source" msgstr "Следующий иÑточник" #: src/hed/libs/data/DataMover.cpp:772 #, c-format msgid "Meta info of source and location do not match for %s" msgstr "Мета-Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¸Ñточника и Ð°Ð´Ñ€ÐµÑ Ð½Ðµ ÑоответÑтвуют друг другу Ð´Ð»Ñ %s" #: src/hed/libs/data/DataMover.cpp:786 #, c-format msgid "" "Replica %s has high latency, but no more sources exist so will use this one" msgstr "" "ÐšÐ¾Ð¿Ð¸Ñ %s доÑтупна Ñ Ð±Ð¾Ð»ÑŒÑˆÐ¾Ð¹ задержкой, но вÑÑ‘ равно будет иÑпользоватьÑÑ Ð² " "ÑвÑзи Ñ Ð¾Ñ‚ÑутÑтвием других иÑточников" #: src/hed/libs/data/DataMover.cpp:790 #, c-format msgid "Replica %s has high latency, trying next source" msgstr "ÐšÐ¾Ð¿Ð¸Ñ %s доÑтупна Ñ Ð±Ð¾Ð»ÑŒÑˆÐ¾Ð¹ задержкой, пробуетÑÑ Ð´Ñ€ÑƒÐ³Ð¾Ð¹ иÑточник" #: src/hed/libs/data/DataMover.cpp:802 src/hed/libs/data/DataMover.cpp:823 #: src/libs/data-staging/DataStagingDelivery.cpp:376 #: src/libs/data-staging/DataStagingDelivery.cpp:399 #, c-format msgid "Using internal transfer method of %s" msgstr "ИÑпользуетÑÑ Ð²Ð½ÑƒÑ‚Ñ€ÐµÐ½Ð½Ð¸Ð¹ метод передачи данных %s" #: src/hed/libs/data/DataMover.cpp:815 src/hed/libs/data/DataMover.cpp:833 #: src/libs/data-staging/DataStagingDelivery.cpp:392 #: src/libs/data-staging/DataStagingDelivery.cpp:413 #, c-format msgid "Internal transfer method is not supported for %s" msgstr "Внутренний метод передачи данных не поддерживаетÑÑ Ð´Ð»Ñ %s" #: src/hed/libs/data/DataMover.cpp:840 msgid "Using buffered transfer method" msgstr "ИÑпользуетÑÑ Ð±ÑƒÑ„ÐµÑ€Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð½Ñ‹Ð¹ метод передачи данных" #: src/hed/libs/data/DataMover.cpp:844 #, c-format msgid "Failed to prepare source: %s" msgstr "Ðе удалоÑÑŒ подготовить иÑточник: %s" #: src/hed/libs/data/DataMover.cpp:859 #, c-format msgid "Failed to start reading from source: %s" msgstr "Ðе удалоÑÑŒ начать чтение из иÑточника: %s" #: src/hed/libs/data/DataMover.cpp:879 msgid "Metadata of source and destination are different" msgstr "Метаданные иÑточника и Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð½Ðµ Ñовпадают" #: src/hed/libs/data/DataMover.cpp:899 #, c-format msgid "Failed to preregister destination: %s" msgstr "Ðе удалоÑÑŒ предварительно зарегиÑтрировать назначение: %s" #: src/hed/libs/data/DataMover.cpp:904 src/hed/libs/data/DataMover.cpp:1170 msgid "destination.next_location" msgstr "destination.next_location" #: src/hed/libs/data/DataMover.cpp:915 #, c-format msgid "Failed to prepare destination: %s" msgstr "Ðе удалоÑÑŒ подготовить назначение: %s" #: src/hed/libs/data/DataMover.cpp:922 src/hed/libs/data/DataMover.cpp:945 #: src/hed/libs/data/DataMover.cpp:1167 #, c-format msgid "" "Failed to unregister preregistered lfn. You may need to unregister it " "manually: %s" msgstr "" "Сбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð¿Ñ€ÐµÐ´Ð²Ð°Ñ€Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð¾Ð¹ запиÑи LFN. Возможно, необходимо удалить её " "вручную: %s" #: src/hed/libs/data/DataMover.cpp:926 src/hed/libs/data/DataMover.cpp:948 #: src/hed/libs/data/DataMover.cpp:1037 src/hed/libs/data/DataMover.cpp:1053 #: src/hed/libs/data/DataMover.cpp:1078 src/hed/libs/data/DataMover.cpp:1123 msgid "(Re)Trying next destination" msgstr "Следующее назначение" #: src/hed/libs/data/DataMover.cpp:937 #, c-format msgid "Failed to start writing to destination: %s" msgstr "Сбой начала запиÑи в назначение: %s" #: src/hed/libs/data/DataMover.cpp:960 msgid "Failed to start writing to cache" msgstr "Сбой начала запиÑи в кÑш" #: src/hed/libs/data/DataMover.cpp:968 src/hed/libs/data/DataMover.cpp:1014 #: src/hed/libs/data/DataMover.cpp:1192 msgid "" "Failed to unregister preregistered lfn. You may need to unregister it " "manually" msgstr "" "Сбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð¿Ñ€ÐµÐ´Ð²Ð°Ñ€Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð¾Ð¹ запиÑи LFN. Возможно, необходимо удалить её " "вручную" #: src/hed/libs/data/DataMover.cpp:975 msgid "Waiting for buffer" msgstr "Ожидание буфера" #: src/hed/libs/data/DataMover.cpp:982 #, c-format msgid "Failed updating timestamp on cache lock file %s for file %s: %s" msgstr "" "Сбой Ð¾Ð±Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ Ð¼ÐµÑ‚ÐºÐ¸ времени файла блокировки кÑша %s Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° %s: %s" #: src/hed/libs/data/DataMover.cpp:987 #, c-format msgid "buffer: read EOF : %s" msgstr "буфер: чтение конца файла : %s" #: src/hed/libs/data/DataMover.cpp:988 #, c-format msgid "buffer: write EOF: %s" msgstr "буфер: запиÑÑŒ конца файла: %s" #: src/hed/libs/data/DataMover.cpp:989 #, c-format msgid "buffer: error : %s, read: %s, write: %s" msgstr "буфер: ошибка: %s, чтение: %s, запиÑÑŒ: %s" #: src/hed/libs/data/DataMover.cpp:990 msgid "Closing read channel" msgstr "ЗакрываетÑÑ ÐºÐ°Ð½Ð°Ð» чтениÑ" #: src/hed/libs/data/DataMover.cpp:997 msgid "Closing write channel" msgstr "ЗакрываетÑÑ ÐºÐ°Ð½Ð°Ð» передачи" #: src/hed/libs/data/DataMover.cpp:1005 msgid "Failed to complete writing to destination" msgstr "Сбой Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ð·Ð°Ð¿Ð¸Ñи в назначение" #: src/hed/libs/data/DataMover.cpp:1019 msgid "Transfer cancelled successfully" msgstr "Передача файлов уÑпешно отменена" #: src/hed/libs/data/DataMover.cpp:1066 msgid "Cause of failure unclear - choosing randomly" msgstr "Причина ÑÐ±Ð¾Ñ Ð½Ðµ уÑтановлена - выбираетÑÑ ÑÐ»ÑƒÑ‡Ð°Ð¹Ð½Ð°Ñ ÐºÐ¾Ð¿Ð¸Ñ" #: src/hed/libs/data/DataMover.cpp:1110 #, c-format msgid "" "Checksum mismatch between checksum given as meta option (%s:%s) and " "calculated checksum (%s)" msgstr "" "ÐеÑовпадение контрольной Ñуммы, указанной в метаданных (%s:%s), Ñ " "вычиÑленной (%s)" #: src/hed/libs/data/DataMover.cpp:1116 msgid "" "Failed to unregister preregistered lfn, You may need to unregister it " "manually" msgstr "" "Сбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð¿Ñ€ÐµÐ´Ð²Ð°Ñ€Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð¾Ð¹ запиÑи LFN. Возможно, необходимо удалить её " "вручную" #: src/hed/libs/data/DataMover.cpp:1120 msgid "Failed to delete destination, retry may fail" msgstr "" "Ðе удалоÑÑŒ уничтожить назначение, новые попытки могут быть безуÑпешными" #: src/hed/libs/data/DataMover.cpp:1130 msgid "Cannot compare empty checksum" msgstr "Ðевозможно Ñравнить пуÑтую контрольную Ñумму" #: src/hed/libs/data/DataMover.cpp:1137 #: src/libs/data-staging/DataStagingDelivery.cpp:570 msgid "Checksum type of source and calculated checksum differ, cannot compare" msgstr "" "Тип контрольной Ñуммы иÑточника отличаетÑÑ Ð¾Ñ‚ вычиÑленной, Ñравнение " "невозможно" #: src/hed/libs/data/DataMover.cpp:1139 #, c-format msgid "Checksum mismatch between calcuated checksum %s and source checksum %s" msgstr "" "ÐеÑовпадение вычиÑленной контрольной Ñуммы %s и контрольной Ñуммы иÑточника " "%s" #: src/hed/libs/data/DataMover.cpp:1151 #: src/libs/data-staging/DataStagingDelivery.cpp:586 #, c-format msgid "Calculated transfer checksum %s matches source checksum" msgstr "" "ВычиÑÐ»ÐµÐ½Ð½Ð°Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма передачи %s Ñовпадает Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð¾Ð¹ Ñуммой " "иÑточника" #: src/hed/libs/data/DataMover.cpp:1157 #: src/libs/data-staging/DataStagingDelivery.cpp:589 msgid "Checksum not computed" msgstr "ÐšÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма не вычиÑлена" #: src/hed/libs/data/DataMover.cpp:1163 #, c-format msgid "Failed to postregister destination %s" msgstr "Ðе удалоÑÑŒ зарегиÑтрировать назначение: %s" #: src/hed/libs/data/DataPoint.cpp:90 #, c-format msgid "Invalid URL option: %s" msgstr "ÐедопуÑÑ‚Ð¸Ð¼Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ URL: %s" #: src/hed/libs/data/DataPoint.cpp:251 #, fuzzy msgid "Checksum types of index and replica are different, skipping comparison" msgstr "" "Тип контрольной Ñуммы иÑточника отличаетÑÑ Ð¾Ñ‚ вычиÑленной, Ñравнение " "невозможно" #: src/hed/libs/data/DataPoint.cpp:278 #, c-format msgid "Skipping invalid URL option %s" msgstr "ПропуÑкаетÑÑ Ð½ÐµÐ´Ð¾Ð¿ÑƒÑÑ‚Ð¸Ð¼Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ URL %s" #: src/hed/libs/data/DataPoint.cpp:293 msgid "" "Third party transfer was requested but the corresponding plugin could\n" " not be loaded. Is the GFAL plugin installed? If not, please install " "the\n" " packages 'nordugrid-arc-plugins-gfal' and 'gfal2-all'. Depending on\n" " your type of installation the package names might differ." msgstr "" "Запрошена переÑылка файла третьим лицом, но необходимый\n" " подключаемый модуль не был подгружен. УÑтанавливали ли\n" " Ð’Ñ‹ модуль GFAL? ЕÑли нет, пожалуйÑта, уÑтановите пакеты\n" " 'nordugrid-arc-plugins-gfal' и 'gfal2-all'. Эти Ð½Ð°Ð·Ð²Ð°Ð½Ð¸Ñ Ð¼Ð¾Ð³ÑƒÑ‚ " "завиÑеть\n" " от типа вашего диÑтрибутива." #: src/hed/libs/data/DataPoint.cpp:311 #, c-format msgid "Failed to load plugin for URL %s" msgstr "Сбой подгрузки подключаемого Ð¼Ð¾Ð´ÑƒÐ»Ñ Ð´Ð»Ñ URL %s" #: src/hed/libs/data/DataPointDelegate.cpp:75 #: src/hed/libs/data/DataPointDelegate.cpp:76 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:2032 #, c-format msgid "Starting helper process: %s" msgstr "ЗапуÑкаетÑÑ Ð²Ñпомогательный процеÑÑ: %s" #: src/hed/libs/data/DataPointDelegate.cpp:180 msgid "start_reading" msgstr "start_reading" #: src/hed/libs/data/DataPointDelegate.cpp:189 msgid "start_reading: helper start failed" msgstr "start_reading: Ñбой запуÑка аÑÑиÑтента" #: src/hed/libs/data/DataPointDelegate.cpp:197 msgid "start_reading: thread create failed" msgstr "start_reading: Ñбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¿Ð¾Ñ‚Ð¾ÐºÐ°" #: src/hed/libs/data/DataPointDelegate.cpp:213 msgid "StopReading: aborting connection" msgstr "StopReading: прерывание ÑвÑзи" #: src/hed/libs/data/DataPointDelegate.cpp:218 msgid "stop_reading: waiting for transfer to finish" msgstr "stop_reading: ожидание Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ð¿ÐµÑ€ÐµÑылки" #: src/hed/libs/data/DataPointDelegate.cpp:221 #, c-format msgid "stop_reading: exiting: %s" msgstr "stop_reading: выход: %s" #: src/hed/libs/data/DataPointDelegate.cpp:231 msgid "read_thread: get and register buffers" msgstr "read_thread: получение и региÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ Ð±ÑƒÑ„ÐµÑ€Ð¾Ð²" #: src/hed/libs/data/DataPointDelegate.cpp:239 #, c-format msgid "read_thread: for_read failed - aborting: %s" msgstr "read_thread: Ñбой for_read - прерывание: %s" #: src/hed/libs/data/DataPointDelegate.cpp:247 #, c-format msgid "read_thread: non-data tag '%c' from external process - leaving: %s" msgstr "" "read_thread: неÑоответÑÑ‚Ð²ÑƒÑŽÑ‰Ð°Ñ Ð´Ð°Ð½Ð½Ñ‹Ð¼ метка '%c' из внешнего процеÑÑа - " "выход: %s" #: src/hed/libs/data/DataPointDelegate.cpp:256 #, c-format msgid "read_thread: data read error from external process - aborting: %s" msgstr "" "read_thread: ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ… из внешнего процеÑÑа - прерывание: %s" #: src/hed/libs/data/DataPointDelegate.cpp:264 msgid "read_thread: exiting" msgstr "read_thread: выход" #: src/hed/libs/data/DataPointDelegate.cpp:285 msgid "start_writing_ftp: helper start failed" msgstr "start_writing_ftp: Ñбой запуÑка аÑÑиÑтента" #: src/hed/libs/data/DataPointDelegate.cpp:293 msgid "start_writing_ftp: thread create failed" msgstr "start_writing_ftp: Ñбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¿Ð¾Ñ‚Ð¾ÐºÐ°" #: src/hed/libs/data/DataPointDelegate.cpp:343 msgid "No checksum information possible" msgstr "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ контрольных Ñуммах недоÑтупна" #: src/hed/libs/data/DataPointDelegate.cpp:359 msgid "write_thread: get and pass buffers" msgstr "write_thread: получение и передача буферов" #: src/hed/libs/data/DataPointDelegate.cpp:366 msgid "write_thread: for_write failed - aborting" msgstr "write_thread: Ñбой for_write - прерывание" #: src/hed/libs/data/DataPointDelegate.cpp:370 msgid "write_thread: for_write eof" msgstr "write_thread: конец файла for_write" #: src/hed/libs/data/DataPointDelegate.cpp:384 msgid "write_thread: out failed - aborting" msgstr "write_thread: Ñбой вывода - прерывание" #: src/hed/libs/data/DataPointDelegate.cpp:392 msgid "write_thread: exiting" msgstr "write_thread: выход" #: src/hed/libs/data/DataPointIndex.cpp:91 #, c-format msgid "Can't handle location %s" msgstr "Ðевозможно иÑпользовать Ð°Ð´Ñ€ÐµÑ %s" #: src/hed/libs/data/DataPointIndex.cpp:183 msgid "Sorting replicas according to URL map" msgstr "Копии ÑортируютÑÑ Ð² ÑоответÑтвии Ñ Ñ€Ð°Ñположением URL" #: src/hed/libs/data/DataPointIndex.cpp:187 #, c-format msgid "Replica %s is mapped" msgstr "ÐšÐ¾Ð¿Ð¸Ñ %s локализована" #: src/hed/libs/data/DataPointIndex.cpp:195 #, c-format msgid "Sorting replicas according to preferred pattern %s" msgstr "Копии ÑортируютÑÑ Ð² ÑоответÑтвии Ñ Ð¿Ñ€ÐµÐ´Ð¿Ð¾Ñ‡Ð¸Ñ‚Ð°ÐµÐ¼Ñ‹Ð¼ шаблоном %s" #: src/hed/libs/data/DataPointIndex.cpp:218 #: src/hed/libs/data/DataPointIndex.cpp:236 #, c-format msgid "Excluding replica %s matching pattern !%s" msgstr "ОтбраÑываетÑÑ ÐºÐ¾Ð¿Ð¸Ñ %s ÑоответÑÑ‚Ð²ÑƒÑŽÑ‰Ð°Ñ ÑˆÐ°Ð±Ð»Ð¾Ð½Ñƒ !%s" #: src/hed/libs/data/DataPointIndex.cpp:229 #, c-format msgid "Replica %s matches host pattern %s" msgstr "ÐšÐ¾Ð¿Ð¸Ñ %s ÑоответÑтвует шаблону узла %s" #: src/hed/libs/data/DataPointIndex.cpp:247 #, c-format msgid "Replica %s matches pattern %s" msgstr "ÐšÐ¾Ð¿Ð¸Ñ %s ÑоответÑтвует шаблону %s" #: src/hed/libs/data/DataPointIndex.cpp:263 #, c-format msgid "Replica %s doesn't match preferred pattern or URL map" msgstr "ÐšÐ¾Ð¿Ð¸Ñ %s не ÑоответÑтвует предпочитаемому шаблону или раÑположению URL" #: src/hed/libs/data/DataStatus.cpp:12 msgid "Operation completed successfully" msgstr "ÐžÐ¿ÐµÑ€Ð°Ñ†Ð¸Ñ Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð° уÑпешно" #: src/hed/libs/data/DataStatus.cpp:13 msgid "Source is invalid URL" msgstr "ÐедопуÑтимый URL иÑточника" #: src/hed/libs/data/DataStatus.cpp:14 msgid "Destination is invalid URL" msgstr "ÐедопуÑтимый URL цели" #: src/hed/libs/data/DataStatus.cpp:15 msgid "Resolving of index service for source failed" msgstr "Сбой Ð¾Ð±Ð½Ð°Ñ€ÑƒÐ¶ÐµÐ½Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð° Ð´Ð»Ñ Ð¸Ñточника" #: src/hed/libs/data/DataStatus.cpp:16 msgid "Resolving of index service for destination failed" msgstr "Сбой Ð¾Ð±Ð½Ð°Ñ€ÑƒÐ¶ÐµÐ½Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð° Ð´Ð»Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ" #: src/hed/libs/data/DataStatus.cpp:17 msgid "Can't read from source" msgstr "Ðе удалоÑÑŒ Ñчитать Ñ Ð¸Ñточника" #: src/hed/libs/data/DataStatus.cpp:18 msgid "Can't write to destination" msgstr "Ðе удалоÑÑŒ запиÑать в цель" #: src/hed/libs/data/DataStatus.cpp:19 msgid "Failed while reading from source" msgstr "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¸Ð· иÑточника" #: src/hed/libs/data/DataStatus.cpp:20 msgid "Failed while writing to destination" msgstr "Ошибка при запиÑи в цель" #: src/hed/libs/data/DataStatus.cpp:21 msgid "Failed while transferring data" msgstr "Сбой при передаче данных" #: src/hed/libs/data/DataStatus.cpp:22 msgid "Failed while finishing reading from source" msgstr "Сбой Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¸Ð· иÑточника" #: src/hed/libs/data/DataStatus.cpp:23 msgid "Failed while finishing writing to destination" msgstr "Сбой при завершении запиÑи в назначение" #: src/hed/libs/data/DataStatus.cpp:24 msgid "First stage of registration to index service failed" msgstr "Сбой первого шага региÑтрации в каталоге" #: src/hed/libs/data/DataStatus.cpp:25 msgid "Last stage of registration to index service failed" msgstr "Сбой поÑледнего шага региÑтрации в каталоге" #: src/hed/libs/data/DataStatus.cpp:26 msgid "Unregistering from index service failed" msgstr "Сбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ñ€ÐµÐ³Ð¸Ñтрации из каталога" #: src/hed/libs/data/DataStatus.cpp:27 msgid "Error in caching procedure" msgstr "Ошибка кÑшированиÑ" #: src/hed/libs/data/DataStatus.cpp:28 msgid "Error due to expiration of provided credentials" msgstr "" "Ошибка в ÑвÑзи Ñ Ð¸Ñтечением Ñрока годноÑти предоÑтавленных параметров доÑтупа" #: src/hed/libs/data/DataStatus.cpp:29 msgid "Delete error" msgstr "Ошибка удалениÑ" #: src/hed/libs/data/DataStatus.cpp:30 msgid "No valid location available" msgstr "Ðет допуÑтимых адреÑов" #: src/hed/libs/data/DataStatus.cpp:31 msgid "Location already exists" msgstr "Такой файл уже ÑущеÑтвует" #: src/hed/libs/data/DataStatus.cpp:32 msgid "Operation not supported for this kind of URL" msgstr "Эта Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ñ Ð½Ðµ поддерживаетÑÑ Ð´Ð»Ñ Ð´Ð°Ð½Ð½Ð¾Ð³Ð¾ типа URL" #: src/hed/libs/data/DataStatus.cpp:33 msgid "Feature is not implemented" msgstr "Эта Ñ„ÑƒÐ½ÐºÑ†Ð¸Ñ Ð½Ðµ реализована" #: src/hed/libs/data/DataStatus.cpp:34 msgid "Already reading from source" msgstr "Ð§Ñ‚ÐµÐ½Ð¸Ñ Ð¸Ð· иÑточника уже в процеÑÑе" #: src/hed/libs/data/DataStatus.cpp:35 msgid "Already writing to destination" msgstr "ЗапиÑÑŒ в цель уже в процеÑÑе" #: src/hed/libs/data/DataStatus.cpp:36 msgid "Read access check failed" msgstr "Ðе удалоÑÑŒ подтвердить наличие доÑтупа на чтение" #: src/hed/libs/data/DataStatus.cpp:37 msgid "Directory listing failed" msgstr "Ðе удалоÑÑŒ вывеÑти ÑпиÑок каталога" #: src/hed/libs/data/DataStatus.cpp:38 msgid "Object is not suitable for listing" msgstr "Объект не подходит Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÑ‡Ð¸ÑлениÑ" #: src/hed/libs/data/DataStatus.cpp:39 msgid "Failed to obtain information about file" msgstr "Сбой Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸ о файле" #: src/hed/libs/data/DataStatus.cpp:40 msgid "No such file or directory" msgstr "Ðет такого файла или каталога" #: src/hed/libs/data/DataStatus.cpp:41 msgid "Object not initialized (internal error)" msgstr "Объект не инициализирован (внутреннÑÑ Ð¾ÑˆÐ¸Ð±ÐºÐ°)" #: src/hed/libs/data/DataStatus.cpp:42 msgid "Operating System error" msgstr "Ошибка операционной ÑиÑтемы" #: src/hed/libs/data/DataStatus.cpp:43 msgid "Failed to stage file(s)" msgstr "Ðе удалоÑÑŒ размеÑтить файл(Ñ‹)" #: src/hed/libs/data/DataStatus.cpp:44 msgid "Inconsistent metadata" msgstr "Противоречивые метаданные" #: src/hed/libs/data/DataStatus.cpp:45 msgid "Failed to prepare source" msgstr "Ðе удалоÑÑŒ подготовить иÑточник" #: src/hed/libs/data/DataStatus.cpp:46 msgid "Should wait for source to be prepared" msgstr "Следует подождать, когда иÑточник будет готов" #: src/hed/libs/data/DataStatus.cpp:47 msgid "Failed to prepare destination" msgstr "Ðе удалоÑÑŒ подготовить назначение" #: src/hed/libs/data/DataStatus.cpp:48 msgid "Should wait for destination to be prepared" msgstr "Следует подождать, когда назначение будет готово" #: src/hed/libs/data/DataStatus.cpp:49 msgid "Failed to finalize reading from source" msgstr "Сбой Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¸Ð· иÑточника" #: src/hed/libs/data/DataStatus.cpp:50 msgid "Failed to finalize writing to destination" msgstr "Сбой Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ð·Ð°Ð¿Ð¸Ñи в цель" #: src/hed/libs/data/DataStatus.cpp:51 msgid "Failed to create directory" msgstr "Ðе удалоÑÑŒ Ñоздать каталог" #: src/hed/libs/data/DataStatus.cpp:52 msgid "Failed to rename URL" msgstr "Ðе удалоÑÑŒ переименовать URL" #: src/hed/libs/data/DataStatus.cpp:53 msgid "Data was already cached" msgstr "Данные уже запиÑаны в кÑщ" #: src/hed/libs/data/DataStatus.cpp:54 msgid "Operation cancelled successfully" msgstr "ÐžÐ¿ÐµÑ€Ð°Ñ†Ð¸Ñ ÑƒÑпешно прервана" #: src/hed/libs/data/DataStatus.cpp:55 msgid "Generic error" msgstr "ÐеÑпецифичеÑÐºÐ°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°" #: src/hed/libs/data/DataStatus.cpp:56 src/hed/libs/data/DataStatus.cpp:69 msgid "Unknown error" msgstr "ÐеизвеÑÑ‚Ð½Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°" #: src/hed/libs/data/DataStatus.cpp:60 msgid "No error" msgstr "Ðет ошибок" #: src/hed/libs/data/DataStatus.cpp:61 msgid "Transfer timed out" msgstr "ИÑтечение времени Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ ÑоединениÑ" #: src/hed/libs/data/DataStatus.cpp:62 msgid "Checksum mismatch" msgstr "ÐеÑовпадение контрольной Ñумм" #: src/hed/libs/data/DataStatus.cpp:63 msgid "Bad logic" msgstr "ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð»Ð¾Ð³Ð¸ÐºÐ°" #: src/hed/libs/data/DataStatus.cpp:64 msgid "All results obtained are invalid" msgstr "Ð’Ñе полученные результаты неверны" #: src/hed/libs/data/DataStatus.cpp:65 msgid "Temporary service error" msgstr "ПреходÑÑ‰Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° Ñлужбы" #: src/hed/libs/data/DataStatus.cpp:66 msgid "Permanent service error" msgstr "ХроничеÑÐºÐ°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° Ñлужбы" #: src/hed/libs/data/DataStatus.cpp:67 msgid "Error switching uid" msgstr "Ошибка Ñмены uid" #: src/hed/libs/data/DataStatus.cpp:68 msgid "Request timed out" msgstr "ИÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа" #: src/hed/libs/data/FileCache.cpp:109 msgid "No cache directory specified" msgstr "Ðе указан каталог кÑша" #: src/hed/libs/data/FileCache.cpp:126 msgid "No usable caches" msgstr "Ðет подходÑщих кÑшей" #: src/hed/libs/data/FileCache.cpp:135 msgid "No draining cache directory specified" msgstr "Ðе указан каталог кÑша Ð´Ð»Ñ Ð¾Ð¿Ð¾Ñ€Ð¾Ð¶Ð½ÐµÐ½Ð¸Ñ" #: src/hed/libs/data/FileCache.cpp:153 msgid "No read-only cache directory specified" msgstr "Ðе указан доÑтупный по чтению каталог кÑша" #: src/hed/libs/data/FileCache.cpp:182 #, c-format msgid "Failed to create cache directory for file %s: %s" msgstr "Ðе удалоÑÑŒ Ñоздать каталог кÑша Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° %s: %s" #: src/hed/libs/data/FileCache.cpp:192 #, c-format msgid "Failed to create any cache directories for %s" msgstr "Ðе удалоÑÑŒ Ñоздать каталоги кÑша Ð´Ð»Ñ %s" #: src/hed/libs/data/FileCache.cpp:199 #, c-format msgid "Failed to change permissions on %s: %s" msgstr "Ðевозможно изменить права доÑтупа к %s: %s" #: src/hed/libs/data/FileCache.cpp:211 #, c-format msgid "Failed to delete stale cache file %s: %s" msgstr "Ðе удалоÑÑŒ удалить уÑтаревший файл кÑша %s: %s" #: src/hed/libs/data/FileCache.cpp:214 #, c-format msgid "Failed to release lock on file %s" msgstr "Ðевозможно разблокировать файл %s" #: src/hed/libs/data/FileCache.cpp:232 #, c-format msgid "Failed looking up attributes of cached file: %s" msgstr "Ошибка поиÑка атрибутов кÑшированного файла: %s" #: src/hed/libs/data/FileCache.cpp:238 #, c-format msgid "Failed to obtain lock on cache file %s" msgstr "Ðевозможно заблокировать файл в кÑше %s" #: src/hed/libs/data/FileCache.cpp:247 src/hed/libs/data/FileCache.cpp:307 #, c-format msgid "Error removing cache file %s: %s" msgstr "Ошибка ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ ÐºÑшированного файла %s: %s" #: src/hed/libs/data/FileCache.cpp:249 src/hed/libs/data/FileCache.cpp:260 #, c-format msgid "Failed to remove lock on %s. Some manual intervention may be required" msgstr "" "Сбой Ñ€Ð°Ð·Ð±Ð»Ð¾ÐºÐ¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° на %s. Возможно, необходимо ручное вмешательÑтво" #: src/hed/libs/data/FileCache.cpp:279 src/hed/libs/data/FileCache.cpp:313 #, c-format msgid "Failed to unlock file %s: %s. Manual intervention may be required" msgstr "" "Ðе удалоÑÑŒ разблокировать файл %s: %s. Возможно, необходимо ручное " "вмешательÑтво" #: src/hed/libs/data/FileCache.cpp:296 #, c-format msgid "Invalid lock on file %s" msgstr "ÐедопуÑÑ‚Ð¸Ð¼Ð°Ñ Ð±Ð»Ð¾ÐºÐ¸Ñ€Ð¾Ð²ÐºÐ° файла %s" #: src/hed/libs/data/FileCache.cpp:302 #, c-format msgid "Failed to remove .meta file %s: %s" msgstr "Сбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° .meta %s: %s" #: src/hed/libs/data/FileCache.cpp:367 #, c-format msgid "Cache not found for file %s" msgstr "Ðе обнаружен кÑш файла %s" #: src/hed/libs/data/FileCache.cpp:377 #, c-format msgid "" "Cache file %s was modified in the last second, sleeping 1 second to avoid " "race condition" msgstr "" "КÑшированный файл %s был изменён в поÑледний момент, приоÑтановка процеÑÑа " "на 1 Ñекунду Ð´Ð»Ñ Ð¿Ñ€ÐµÐ´Ð¾Ñ‚Ð²Ñ€Ð°Ñ‰ÐµÐ½Ð¸Ñ Ð³Ð¾Ð½ÐºÐ¸" #: src/hed/libs/data/FileCache.cpp:382 src/hed/libs/data/FileCache.cpp:687 #, c-format msgid "Cache file %s does not exist" msgstr "КÑшированный файл %s не ÑущеÑтвует" #: src/hed/libs/data/FileCache.cpp:387 src/hed/libs/data/FileCache.cpp:689 #, c-format msgid "Error accessing cache file %s: %s" msgstr "Ошибка доÑтупа к кÑшированному файлу %s: %s" #: src/hed/libs/data/FileCache.cpp:393 #, c-format msgid "Cannot create directory %s for per-job hard links" msgstr "Ðевозможно Ñоздать каталог %s Ð´Ð»Ñ Ð¶Ñ‘Ñтких ÑÑылок задач" #: src/hed/libs/data/FileCache.cpp:398 #, c-format msgid "Cannot change permission of %s: %s " msgstr "Ðе удалоÑÑŒ изменить права доÑтупа к %s: %s " #: src/hed/libs/data/FileCache.cpp:402 #, c-format msgid "Cannot change owner of %s: %s " msgstr "Ðевозможно изменить владельца %s: %s " #: src/hed/libs/data/FileCache.cpp:416 #, c-format msgid "Failed to remove existing hard link at %s: %s" msgstr "Ðевозможно удалить ÑущеÑтвующую жёÑткую ÑÑылку на %s: %s" #: src/hed/libs/data/FileCache.cpp:420 src/hed/libs/data/FileCache.cpp:431 #, c-format msgid "Failed to create hard link from %s to %s: %s" msgstr "Ðевозможно Ñоздать жёÑткую ÑÑылку Ñ %s на %s: %s" #: src/hed/libs/data/FileCache.cpp:426 #, c-format msgid "Cache file %s not found" msgstr "Ðе обнаружен кÑшированый файл %s" #: src/hed/libs/data/FileCache.cpp:441 #, c-format msgid "Failed to change permissions or set owner of hard link %s: %s" msgstr "Ðе удалоÑÑŒ Ñменить права доÑтупа или владельца жёÑткой ÑÑылки %s: %s" #: src/hed/libs/data/FileCache.cpp:449 #, c-format msgid "Failed to release lock on cache file %s" msgstr "Ðевозможно разблокировать файл в кÑше %s" #: src/hed/libs/data/FileCache.cpp:460 #, c-format msgid "Cache file %s was locked during link/copy, must start again" msgstr "" "КÑшированный файл %s был заблокирован во Ð²Ñ€ÐµÐ¼Ñ ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÑÑылки или копии, " "Ð½Ð¾Ð²Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ°" #: src/hed/libs/data/FileCache.cpp:465 #, c-format msgid "Cache file %s was deleted during link/copy, must start again" msgstr "" "КÑшированный файл %s был удалён во Ð²Ñ€ÐµÐ¼Ñ ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÑÑылки или копии, Ð½Ð¾Ð²Ð°Ñ " "попытка" #: src/hed/libs/data/FileCache.cpp:470 #, c-format msgid "Cache file %s was modified while linking, must start again" msgstr "" "КÑшированный файл %s был изменён во Ð²Ñ€ÐµÐ¼Ñ ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÑÑылки или копии, Ð½Ð¾Ð²Ð°Ñ " "попытка" #: src/hed/libs/data/FileCache.cpp:488 #, c-format msgid "Failed to copy file %s to %s: %s" msgstr "Сбой ÐºÐ¾Ð¿Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° %s в %s: %s" #: src/hed/libs/data/FileCache.cpp:494 #, c-format msgid "Failed to set executable bit on file %s" msgstr "Ðевозможно выÑтавить иÑполнÑемый бит Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° %s" #: src/hed/libs/data/FileCache.cpp:499 #, c-format msgid "Failed to set executable bit on file %s: %s" msgstr "Ðевозможно выÑтавить иÑполнÑемый бит Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° %s: %s" #: src/hed/libs/data/FileCache.cpp:513 #, c-format msgid "Failed to remove existing symbolic link at %s: %s" msgstr "Ðевозможно удалить ÑущеÑтвующую Ñимвольную ÑÑылку на %s: %s" #: src/hed/libs/data/FileCache.cpp:517 src/hed/libs/data/FileCache.cpp:522 #, c-format msgid "Failed to create symbolic link from %s to %s: %s" msgstr "Ðевозможно Ñоздать Ñимвольную ÑÑылку Ñ %s на %s: %s" #: src/hed/libs/data/FileCache.cpp:552 #, c-format msgid "Failed to remove cache per-job dir %s: %s" msgstr "Сбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð° кÑша задач %s: %s" #: src/hed/libs/data/FileCache.cpp:571 src/hed/libs/data/FileCache.cpp:639 #, c-format msgid "Error reading meta file %s: %s" msgstr "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¼ÐµÑ‚Ð°-файла %s: %s" #: src/hed/libs/data/FileCache.cpp:576 src/hed/libs/data/FileCache.cpp:644 #, c-format msgid "Error opening meta file %s" msgstr "Ошибка Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð¸Ñ Ð¼ÐµÑ‚Ð°-файла %s" #: src/hed/libs/data/FileCache.cpp:581 src/hed/libs/data/FileCache.cpp:648 #, c-format msgid "meta file %s is empty" msgstr "Мета-файл %s пуÑÑ‚" #: src/hed/libs/data/FileCache.cpp:591 #, c-format msgid "" "File %s is already cached at %s under a different URL: %s - will not add DN " "to cached list" msgstr "" "Файл %s уже кÑширован в %s Ñ Ð´Ñ€ÑƒÐ³Ð¸Ð¼ URL: %s - выделенное Ð¸Ð¼Ñ Ð½Ðµ будет " "добавлено в кÑшированный ÑпиÑок" #: src/hed/libs/data/FileCache.cpp:602 #, c-format msgid "Bad format detected in file %s, in line %s" msgstr "Обнаружен недопуÑтимый формат в файле %s, Ñтроке %s" #: src/hed/libs/data/FileCache.cpp:618 #, c-format msgid "Could not acquire lock on meta file %s" msgstr "Ðевозможно уÑтановить блокировку на мета-файл %s" #: src/hed/libs/data/FileCache.cpp:622 #, c-format msgid "Error opening meta file for writing %s" msgstr "Ошибка Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð¸Ñ Ð¼ÐµÑ‚Ð°-файла Ð´Ð»Ñ Ð·Ð°Ð¿Ð¸Ñи %s" #: src/hed/libs/data/FileCache.cpp:658 #, c-format msgid "DN %s is cached and is valid until %s for URL %s" msgstr "Выделенное Ð¸Ð¼Ñ %s Ð´Ð»Ñ URL %s кÑшировано, и дейÑтвительно до %s" #: src/hed/libs/data/FileCache.cpp:662 #, c-format msgid "DN %s is cached but has expired for URL %s" msgstr "Выделенное Ð¸Ð¼Ñ %s Ð´Ð»Ñ URL %s кÑшировано, но уже проÑрочено" #: src/hed/libs/data/FileCache.cpp:713 #, c-format msgid "Failed to acquire lock on cache meta file %s" msgstr "Сбой уÑтановки блокировки на кÑшированный мета-файл %s" #: src/hed/libs/data/FileCache.cpp:718 #, c-format msgid "Failed to create cache meta file %s" msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¼ÐµÑ‚Ð°-файла кÑша %s" #: src/hed/libs/data/FileCache.cpp:733 #, c-format msgid "Failed to read cache meta file %s" msgstr "Сбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¼ÐµÑ‚Ð°-файла кÑша %s" #: src/hed/libs/data/FileCache.cpp:738 #, c-format msgid "Cache meta file %s is empty, will recreate" msgstr "Мета-файл кÑша %s пуÑÑ‚, будет воÑÑоздан" #: src/hed/libs/data/FileCache.cpp:743 #, c-format msgid "Cache meta file %s possibly corrupted, will recreate" msgstr "Мета-файл кÑша %s, возможно, повреждён, будет воÑÑоздан" #: src/hed/libs/data/FileCache.cpp:747 #, c-format msgid "" "File %s is already cached at %s under a different URL: %s - this file will " "not be cached" msgstr "" "Файл %s уже находитÑÑ Ð² кÑше %s Ñ Ð´Ñ€ÑƒÐ³Ð¸Ð¼ URL: %s - Ñтот файл не будет " "кÑширован" #: src/hed/libs/data/FileCache.cpp:757 #, c-format msgid "Error looking up attributes of cache meta file %s: %s" msgstr "Ошибка поиÑка атрибутов мета-файла кÑша %s: %s" #: src/hed/libs/data/FileCache.cpp:828 #, c-format msgid "Using cache %s" msgstr "ИÑпользуетÑÑ ÐºÑш %s" #: src/hed/libs/data/FileCache.cpp:842 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:79 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:112 #, c-format msgid "Error getting info from statvfs for the path %s: %s" msgstr "Ошибка Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸ от statvfs Ð´Ð»Ñ Ð¿ÑƒÑ‚Ð¸ %s: %s" #: src/hed/libs/data/FileCache.cpp:848 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:118 #, c-format msgid "Cache %s: Free space %f GB" msgstr "КÑш %s: Свободное проÑтранÑтво %f GB" #: src/hed/libs/data/URLMap.cpp:33 #, c-format msgid "Can't use URL %s" msgstr "Ðевозможно иÑпользовать URL %s" #: src/hed/libs/data/URLMap.cpp:39 #, c-format msgid "file %s is not accessible" msgstr "файл %s недоÑтупен" #: src/hed/libs/data/URLMap.cpp:49 #, c-format msgid "Mapping %s to %s" msgstr "%s ÑтавитÑÑ Ð² ÑоответÑтвие %s" #: src/hed/libs/data/examples/simple_copy.cpp:17 msgid "Usage: copy source destination" msgstr "ИÑпользование: copy иÑточник назначение" #: src/hed/libs/data/examples/simple_copy.cpp:42 #, c-format msgid "Copy failed: %s" msgstr "Сбой копированиÑ: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:41 #, c-format msgid "Failed to read proxy file: %s" msgstr "Сбой при чтении файла доверенноÑти: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:49 #, c-format msgid "Failed to read certificate file: %s" msgstr "Сбой при чтении файла Ñертификата: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:56 #, c-format msgid "Failed to read private key file: %s" msgstr "Сбой при чтении файла личного ключа: %s" #: src/hed/libs/globusutils/GSSCredential.cpp:82 #, c-format msgid "" "Failed to convert GSI credential to GSS credential (major: %d, minor: %d):%s:" "%s" msgstr "" "Ðе удалоÑÑŒ преобразовать параметры доÑтупа GSI в GSS (major: %d, minor: " "%d)%s:%s" #: src/hed/libs/globusutils/GSSCredential.cpp:94 #, c-format msgid "Failed to release GSS credential (major: %d, minor: %d):%s:%s" msgstr "" "Ðе удалоÑÑŒ оÑвободить параметры доÑтупа GSS (major: %d, minor: %d):%s:%s" #: src/hed/libs/loader/ModuleManager.cpp:30 msgid "Module Manager Init" msgstr "ЗапуÑк ÑƒÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð¼Ð¾Ð´ÑƒÐ»Ñми" #: src/hed/libs/loader/ModuleManager.cpp:73 msgid "" "Busy plugins found while unloading Module Manager. Waiting for them to be " "released." msgstr "" "Ð’ процеÑÑе Ð¾Ñ‚ÐºÐ»ÑŽÑ‡ÐµÐ½Ð¸Ñ Ð¼ÐµÐ½ÐµÐ´Ð¶ÐµÑ€Ð° модулей обнаружены занÑтые подключаемые " "модули. ОжидаетÑÑ Ð¸Ñ… завершение." #: src/hed/libs/loader/ModuleManager.cpp:207 #, c-format msgid "Found %s in cache" msgstr "%s обнаружен в кÑше" #: src/hed/libs/loader/ModuleManager.cpp:214 #, c-format msgid "Could not locate module %s in following paths:" msgstr "Ðевозможно найти модуль %s в Ñледующих меÑтах:" #: src/hed/libs/loader/ModuleManager.cpp:218 #, c-format msgid "\t%s" msgstr "\t%s" #: src/hed/libs/loader/ModuleManager.cpp:232 #, c-format msgid "Loaded %s" msgstr "Подгружен модуль %s" #: src/hed/libs/loader/ModuleManager.cpp:276 msgid "Module Manager Init by ModuleManager::setCfg" msgstr "Ð˜Ð½Ð¸Ñ†Ð¸Ð°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð¼ÐµÐ½ÐµÐ´Ð¶ÐµÑ€Ð° модулей в ModuleManager::setCfg" #: src/hed/libs/loader/ModuleManager.cpp:312 #: src/hed/libs/loader/ModuleManager.cpp:325 #, c-format msgid "%s made persistent" msgstr "Модуль %s Ñброшен на диÑк" #: src/hed/libs/loader/ModuleManager.cpp:316 #, c-format msgid "Not found %s in cache" msgstr "Модуль %s не найден в кÑше" #: src/hed/libs/loader/ModuleManager.cpp:330 msgid "Specified module not found in cache" msgstr "Указанные модули не найдены в кÑше" #: src/hed/libs/loader/Plugin.cpp:364 src/hed/libs/loader/Plugin.cpp:557 #, c-format msgid "Could not find loadable module descriptor by name %s" msgstr "Ðе удалоÑÑŒ найти деÑкриптор подгружаемого Ð¼Ð¾Ð´ÑƒÐ»Ñ Ð¿Ð¾ имени %s" #: src/hed/libs/loader/Plugin.cpp:372 src/hed/libs/loader/Plugin.cpp:567 #, c-format msgid "Could not find loadable module by name %s (%s)" msgstr "Ðе удалоÑÑŒ найти подгружаемый модуль %s (%s)" #: src/hed/libs/loader/Plugin.cpp:378 src/hed/libs/loader/Plugin.cpp:480 #: src/hed/libs/loader/Plugin.cpp:572 #, c-format msgid "Module %s is not an ARC plugin (%s)" msgstr "Модуль %s не ÑвлÑетÑÑ Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡Ð°ÐµÐ¼Ñ‹Ð¼ модулем ARC (%s)" #: src/hed/libs/loader/Plugin.cpp:395 src/hed/libs/loader/Plugin.cpp:490 #: src/hed/libs/loader/Plugin.cpp:598 #, c-format msgid "Module %s failed to reload (%s)" msgstr "Ðе удалоÑÑŒ перезагрузить модуль %s (%s)" #: src/hed/libs/loader/Plugin.cpp:417 #, c-format msgid "Module %s contains no plugin %s" msgstr "Модуль %s не Ñодержит подключаемый модуль %s" #: src/hed/libs/loader/Plugin.cpp:462 #, c-format msgid "Could not find loadable module descriptor by name %s or kind %s" msgstr "" "Ðе удалоÑÑŒ найти деÑкрипторы подгружаемых модулей по имени %s или типу %s" #: src/hed/libs/loader/Plugin.cpp:467 #, c-format msgid "Loadable module %s contains no requested plugin %s of kind %s" msgstr "Подгружаемый модуль %s не Ñодержит запрашиваемого Ð¼Ð¾Ð´ÑƒÐ»Ñ %s типа %s" #: src/hed/libs/loader/Plugin.cpp:474 #, c-format msgid "Could not find loadable module by names %s and %s (%s)" msgstr "Ðевозможно найти подгружаемые модули по имени %s и %s (%s)" #: src/hed/libs/loader/Plugin.cpp:503 #, c-format msgid "Module %s contains no requested plugin %s of kind %s" msgstr "Модуль %s не Ñодержит запрашиваемого подключаемого Ð¼Ð¾Ð´ÑƒÐ»Ñ %s типа %s" #: src/hed/libs/loader/Plugin.cpp:588 #, c-format msgid "Module %s does not contain plugin(s) of specified kind(s)" msgstr "Модуль %s не Ñодержит подключаемых модулей указанных типов" #: src/hed/libs/message/MCC.cpp:76 src/hed/libs/message/Service.cpp:25 #, c-format msgid "No security processing/check requested for '%s'" msgstr "Обработка/проверка параметров доÑтупа не запрошена Ð´Ð»Ñ '%s'" #: src/hed/libs/message/MCC.cpp:85 #, c-format msgid "Security processing/check failed: %s" msgstr "Сбой обработки/проверки безопаÑноÑти: %s" #: src/hed/libs/message/MCC.cpp:90 msgid "Security processing/check passed" msgstr "Обработка/проверка параметров доÑтупа завершилаÑÑŒ уÑпехом" #: src/hed/libs/message/MCCLoader.cpp:17 msgid "Chain(s) configuration failed" msgstr "Ðе удалоÑÑŒ наÑтроить цепочку/и" #: src/hed/libs/message/MCCLoader.cpp:134 msgid "SecHandler configuration is not defined" msgstr "ÐаÑтройки SecHandler не заданы" #: src/hed/libs/message/MCCLoader.cpp:157 msgid "SecHandler has no configuration" msgstr "ÐаÑтройки SecHandler отÑутÑтвуют" #: src/hed/libs/message/MCCLoader.cpp:163 msgid "SecHandler has no name attribute defined" msgstr "Ðе задан атрибут name Ð´Ð»Ñ SecHandler" #: src/hed/libs/message/MCCLoader.cpp:173 #, c-format msgid "Security Handler %s(%s) could not be created" msgstr "Обработчик безопаÑноÑти %s(%s) не может быть Ñоздан" #: src/hed/libs/message/MCCLoader.cpp:177 #, c-format msgid "SecHandler: %s(%s)" msgstr "SecHandler: %s(%s)" #: src/hed/libs/message/MCCLoader.cpp:189 msgid "Component has no name attribute defined" msgstr "Ð”Ð»Ñ ÐºÐ¾Ð¼Ð¿Ð¾Ð½ÐµÐ½Ñ‚Ð° не задан атрибут name" #: src/hed/libs/message/MCCLoader.cpp:194 msgid "Component has no ID attribute defined" msgstr "Ð”Ð»Ñ ÐºÐ¾Ð¼Ð¿Ð¾Ð½ÐµÐ½Ñ‚Ð° не задан атрибут ID" #: src/hed/libs/message/MCCLoader.cpp:203 #, c-format msgid "Component %s(%s) could not be created" msgstr "Компонента %s(%s) не может быть Ñоздана" #: src/hed/libs/message/MCCLoader.cpp:229 #, c-format msgid "Component's %s(%s) next has no ID attribute defined" msgstr "Ð”Ð»Ñ ÐºÐ¾Ð¼Ð¿Ð¾Ð½ÐµÐ½Ñ‚Ð° %s(%s) отÑутÑтвует атрибут ID Ñледующей цели" #: src/hed/libs/message/MCCLoader.cpp:290 #, c-format msgid "Loaded MCC %s(%s)" msgstr "Подгружен MCC %s(%s)" #: src/hed/libs/message/MCCLoader.cpp:308 #, c-format msgid "Plexer's (%s) next has no ID attribute defined" msgstr "Ð”Ð»Ñ Ñледующего поÑле %s компонента Plexer не задан атрибут ID" #: src/hed/libs/message/MCCLoader.cpp:318 #, c-format msgid "Loaded Plexer %s" msgstr "Подгружен Plexer %s" #: src/hed/libs/message/MCCLoader.cpp:326 msgid "Service has no Name attribute defined" msgstr "Ð”Ð»Ñ Ñлужбы не задан атрибут Name" #: src/hed/libs/message/MCCLoader.cpp:332 msgid "Service has no ID attribute defined" msgstr "Ð”Ð»Ñ Ñлужбы не задан атрибут ID" #: src/hed/libs/message/MCCLoader.cpp:341 #, c-format msgid "Service %s(%s) could not be created" msgstr "Служба %s(%s) не может быть Ñоздана" #: src/hed/libs/message/MCCLoader.cpp:348 #, c-format msgid "Loaded Service %s(%s)" msgstr "Подгружена Ñлужба %s(%s)" #: src/hed/libs/message/MCCLoader.cpp:390 #, c-format msgid "Linking MCC %s(%s) to MCC (%s) under %s" msgstr "Подцепление MCC %s(%s) к MCC (%s) в %s" #: src/hed/libs/message/MCCLoader.cpp:401 #, c-format msgid "Linking MCC %s(%s) to Service (%s) under %s" msgstr "Подцепление MCC %s(%s) к Ñлужбе (%s) в %s" #: src/hed/libs/message/MCCLoader.cpp:410 #, c-format msgid "Linking MCC %s(%s) to Plexer (%s) under %s" msgstr "Подцепление MCC %s(%s) к коммутатору (%s) в %s" #: src/hed/libs/message/MCCLoader.cpp:415 #, c-format msgid "MCC %s(%s) - next %s(%s) has no target" msgstr "MCC %s(%s) - Ñледующий %s(%s) не Ñодержит назначениÑ" #: src/hed/libs/message/MCCLoader.cpp:434 #, c-format msgid "Linking Plexer %s to MCC (%s) under %s" msgstr "Подцепление коммутатора %s к MCC (%s) в %s" #: src/hed/libs/message/MCCLoader.cpp:445 #, c-format msgid "Linking Plexer %s to Service (%s) under %s" msgstr "Подцепление коммутатора %s к Ñлужбе (%s) в %s" #: src/hed/libs/message/MCCLoader.cpp:454 #, c-format msgid "Linking Plexer %s to Plexer (%s) under %s" msgstr "Подцепление коммутатора %s к коммутатору (%s) в %s" #: src/hed/libs/message/MCCLoader.cpp:460 #, c-format msgid "Plexer (%s) - next %s(%s) has no target" msgstr "Коммутатор (%s) - Ñледующий %s(%s) не Ñодержит назначениÑ" #: src/hed/libs/message/Plexer.cpp:31 #, c-format msgid "Bad label: \"%s\"" msgstr "ÐŸÐ»Ð¾Ñ…Ð°Ñ Ð¼ÐµÑ‚ÐºÐ°: \"%s\"" #: src/hed/libs/message/Plexer.cpp:47 #, c-format msgid "Operation on path \"%s\"" msgstr "ДейÑтвие над путём \"%s\"" #: src/hed/libs/message/Plexer.cpp:60 #, c-format msgid "No next MCC or Service at path \"%s\"" msgstr "Ðе найдено больше MCC или Ñлужб в пути \"%s\"" #: src/hed/libs/message/Service.cpp:35 #, c-format msgid "Security processing/check for '%s' failed: %s" msgstr "Сбой обработки/проверки безопаÑноÑти Ð´Ð»Ñ '%s': %s" #: src/hed/libs/message/Service.cpp:41 #, c-format msgid "Security processing/check for '%s' passed" msgstr "Обработка/проверка параметров доÑтупа '%s' завершилаÑÑŒ уÑпехом" #: src/hed/libs/otokens/jwse.cpp:55 #, c-format msgid "JWSE::Input: token: %s" msgstr "JWSE::Input: токен: %s" #: src/hed/libs/otokens/jwse.cpp:75 #, c-format msgid "JWSE::Input: header: %s" msgstr "JWSE::Input: заголовок: %s" #: src/hed/libs/otokens/jwse.cpp:101 #, c-format msgid "JWSE::Input: JWS content: %s" msgstr "JWSE::Input: Ñодержимое JWS: %s" #: src/hed/libs/otokens/jwse.cpp:111 msgid "JWSE::Input: JWS: token too young" msgstr "JWSE::Input: JWS: токен Ñлишком Ñвежий" #: src/hed/libs/otokens/jwse.cpp:120 msgid "JWSE::Input: JWS: token too old" msgstr "JWSE::Input: JWS: токен Ñлишком Ñтарый" #: src/hed/libs/otokens/jwse.cpp:131 #, c-format msgid "JWSE::Input: JWS: signature algorithm: %s" msgstr "JWSE::Input: JWS: алгоритм подпиÑи: %s" #: src/hed/libs/otokens/jwse.cpp:174 #, fuzzy, c-format msgid "JWSE::Input: JWS: signature algorithn not supported: %s" msgstr "JWSE::Input: JWS: алгоритм подпиÑи: %s" #: src/hed/libs/otokens/jwse.cpp:192 msgid "JWSE::Input: JWS: signature verification failed" msgstr "JWSE::Input: JWS: Ñбой Ð¿Ð¾Ð´Ñ‚Ð²ÐµÑ€Ð¶Ð´ÐµÐ½Ð¸Ñ Ð¿Ð¾Ð´Ð¿Ð¸Ñи" #: src/hed/libs/otokens/jwse.cpp:198 msgid "JWSE::Input: JWE: not supported yet" msgstr "JWSE::Input: JWE: пока не поддерживаетÑÑ" #: src/hed/libs/otokens/jwse_ecdsa.cpp:21 msgid "JWSE::VerifyECDSA: missing key" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:25 msgid "JWSE::VerifyECDSA: wrong signature size" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:34 #, fuzzy msgid "JWSE::VerifyECDSA: failed to create ECDSA signature" msgstr "VOMS: не удалоÑÑŒ подтвердить подпиÑÑŒ Ñертификата атрибута" #: src/hed/libs/otokens/jwse_ecdsa.cpp:41 #, fuzzy msgid "JWSE::VerifyECDSA: failed to parse signature" msgstr "VOMS: не удалоÑÑŒ подтвердить подпиÑÑŒ Ñертификата атрибута" #: src/hed/libs/otokens/jwse_ecdsa.cpp:47 #, fuzzy, c-format msgid "JWSE::VerifyECDSA: failed to assign ECDSA signature: %i" msgstr "VOMS: не удалоÑÑŒ подтвердить подпиÑÑŒ Ñертификата атрибута" #: src/hed/libs/otokens/jwse_ecdsa.cpp:56 #, fuzzy msgid "JWSE::VerifyECDSA: failed to create EVP context" msgstr "Ðе удалоÑÑŒ зарезервировать памÑть Ð´Ð»Ñ ÐºÐ¾Ð½Ñ‚ÐµÐºÑта анализатора" #: src/hed/libs/otokens/jwse_ecdsa.cpp:61 #, c-format msgid "JWSE::VerifyECDSA: failed to recognize digest: %s" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:66 #, c-format msgid "JWSE::VerifyECDSA: failed to initialize hash: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:72 #, c-format msgid "JWSE::VerifyECDSA: failed to add message to hash: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:80 #, c-format msgid "JWSE::VerifyECDSA: failed to finalize hash: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:87 #, c-format msgid "JWSE::VerifyECDSA: failed to verify: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:96 msgid "JWSE::SignECDSA: missing key" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:104 #, fuzzy msgid "JWSE::SignECDSA: failed to create EVP context" msgstr "Ðе удалоÑÑŒ Ñоздать контекÑÑ‚ Ð´Ð»Ñ ÑкÑпорта" #: src/hed/libs/otokens/jwse_ecdsa.cpp:109 #, c-format msgid "JWSE::SignECDSA: failed to recognize digest: %s" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:114 #, c-format msgid "JWSE::SignECDSA: failed to initialize hash: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:120 #, fuzzy, c-format msgid "JWSE::SignECDSA: failed to add message to hash: %i" msgstr "Ðе удалоÑÑŒ добавить LFN-GUID в RLS: %s" #: src/hed/libs/otokens/jwse_ecdsa.cpp:128 #, c-format msgid "JWSE::SignECDSA: failed to finalize hash: %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:135 #, fuzzy msgid "JWSE::SignECDSA: failed to create ECDSA signature" msgstr "VOMS: не удалоÑÑŒ подтвердить подпиÑÑŒ Ñертификата атрибута" #: src/hed/libs/otokens/jwse_ecdsa.cpp:143 #, fuzzy msgid "JWSE::SignECDSA: failed to parse signature" msgstr "VOMS: не удалоÑÑŒ подтвердить подпиÑÑŒ Ñертификата атрибута" #: src/hed/libs/otokens/jwse_ecdsa.cpp:150 #, c-format msgid "JWSE::SignECDSA: wrong signature size: %i + %i" msgstr "" #: src/hed/libs/otokens/jwse_ecdsa.cpp:156 msgid "JWSE::SignECDSA: wrong signature size written" msgstr "" #: src/hed/libs/otokens/jwse_keys.cpp:273 msgid "JWSE::ExtractPublicKey: x5c key" msgstr "JWSE::ExtractPublicKey: ключ x5c" #: src/hed/libs/otokens/jwse_keys.cpp:281 msgid "JWSE::ExtractPublicKey: jwk key" msgstr "JWSE::ExtractPublicKey: ключ jwk" #: src/hed/libs/otokens/jwse_keys.cpp:288 msgid "JWSE::ExtractPublicKey: external jwk key" msgstr "JWSE::ExtractPublicKey: внешний ключ jwk" #: src/hed/libs/otokens/jwse_keys.cpp:315 #, fuzzy, c-format msgid "JWSE::ExtractPublicKey: deleting outdated info: %s" msgstr "JWSE::ExtractPublicKey: извлечение ключа jwk из: %s" #: src/hed/libs/otokens/jwse_keys.cpp:344 #, fuzzy, c-format msgid "JWSE::ExtractPublicKey: fetching jws key from %s" msgstr "JWSE::ExtractPublicKey: извлечение ключа jwk из: %s" #: src/hed/libs/otokens/jwse_keys.cpp:372 msgid "JWSE::ExtractPublicKey: no supported key" msgstr "JWSE::ExtractPublicKey: нет поддерживаемого ключа" #: src/hed/libs/otokens/jwse_keys.cpp:375 msgid "JWSE::ExtractPublicKey: key parsing error" msgstr "JWSE::ExtractPublicKey: ошибка разбора ключа" #: src/hed/libs/otokens/openid_metadata.cpp:40 #: src/hed/libs/otokens/openid_metadata.cpp:45 #, c-format msgid "Input: metadata: %s" msgstr "Ввод: метаданные: %s" #: src/hed/libs/otokens/openid_metadata.cpp:438 #, c-format msgid "Fetch: response code: %u %s" msgstr "Извлечение: код отклика: %u %s" #: src/hed/libs/otokens/openid_metadata.cpp:440 #, c-format msgid "Fetch: response body: %s" msgstr "Извлечение: тело отклика: %s" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:141 #, c-format msgid "Can not load ARC evaluator object: %s" msgstr "Ðевозможно подгрузить объект интерпретатора ARC : %s" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:192 #, c-format msgid "Can not load ARC request object: %s" msgstr "Ðевозможно подгрузить объект запроÑа ARC: %s" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:233 #, c-format msgid "Can not load policy object: %s" msgstr "Ðевозможно подгрузить объект политик: %s" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:281 msgid "Can not load policy object" msgstr "Ðевозможно подгрузить объект политик" #: src/hed/libs/security/ArcPDP/EvaluatorLoader.cpp:329 msgid "Can not load request object" msgstr "Ðевозможно подгрузить объект запроÑа" #: src/hed/libs/security/ArcPDP/PolicyParser.cpp:119 msgid "Can not generate policy object" msgstr "Ðевозможно Ñоздать объект правил доÑтупа" #: src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp:37 #, c-format msgid "Id= %s,Type= %s,Issuer= %s,Value= %s" msgstr "Id= %s,Тип= %s,Издатель= %s,Значение= %s" #: src/hed/libs/security/ArcPDP/attr/RequestAttribute.cpp:40 #, c-format msgid "No Attribute exists, which can deal with type: %s" msgstr "Ðе ÑущеÑтвует атрибутов, ÑпоÑобных трактовать Ñтот тип: %s" #: src/hed/mcc/http/MCCHTTP.cpp:189 #, c-format msgid "HTTP Error: %d %s" msgstr "Ошибка HTTP: %d %s" #: src/hed/mcc/http/MCCHTTP.cpp:270 msgid "Cannot create http payload" msgstr "Ðе удалоÑÑŒ Ñоздать нагрузку http" #: src/hed/mcc/http/MCCHTTP.cpp:353 msgid "No next element in the chain" msgstr "ОтÑутÑтвует Ñледующий Ñлемент цепи" #: src/hed/mcc/http/MCCHTTP.cpp:362 #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:253 msgid "next element of the chain returned error status" msgstr "Ñледующий Ñлемент цепи возвратил ÑÑ‚Ð°Ñ‚ÑƒÑ Ð¾ÑˆÐ¸Ð±ÐºÐ¸" #: src/hed/mcc/http/MCCHTTP.cpp:371 msgid "next element of the chain returned no payload" msgstr "Ñледующий Ñлемент в цепочке возвратил пуÑтую нагрузку" #: src/hed/mcc/http/MCCHTTP.cpp:383 msgid "next element of the chain returned invalid/unsupported payload" msgstr "" "Ñледующий Ñлемент в цепи возвратил недопуÑтимую или неподдерживаемую нагрузку" #: src/hed/mcc/http/MCCHTTP.cpp:465 msgid "Error to flush output payload" msgstr "Ошибка ÑброÑа иÑходÑщей нагрузки" #: src/hed/mcc/http/PayloadHTTP.cpp:306 #, c-format msgid "<< %s" msgstr "<< %s" #: src/hed/mcc/http/PayloadHTTP.cpp:355 src/hed/mcc/http/PayloadHTTP.cpp:457 #, c-format msgid "< %s" msgstr "< %s" #: src/hed/mcc/http/PayloadHTTP.cpp:576 msgid "Failed to parse HTTP header" msgstr "Сбой разбора заголовка HTTP" #: src/hed/mcc/http/PayloadHTTP.cpp:837 msgid "Invalid HTTP object can't produce result" msgstr "ÐедопуÑтимый объект HTTP не может дать результат" #: src/hed/mcc/http/PayloadHTTP.cpp:969 #, c-format msgid "> %s" msgstr "> %s" #: src/hed/mcc/http/PayloadHTTP.cpp:994 msgid "Failed to write header to output stream" msgstr "Сбой при запиÑи заголовка в выходной поток" #: src/hed/mcc/http/PayloadHTTP.cpp:1019 src/hed/mcc/http/PayloadHTTP.cpp:1025 #: src/hed/mcc/http/PayloadHTTP.cpp:1031 src/hed/mcc/http/PayloadHTTP.cpp:1041 #: src/hed/mcc/http/PayloadHTTP.cpp:1053 src/hed/mcc/http/PayloadHTTP.cpp:1058 #: src/hed/mcc/http/PayloadHTTP.cpp:1063 src/hed/mcc/http/PayloadHTTP.cpp:1071 #: src/hed/mcc/http/PayloadHTTP.cpp:1078 msgid "Failed to write body to output stream" msgstr "Сбой при запиÑи тела в выходной поток" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:31 msgid "Skipping service: no ServicePath found!" msgstr "Ð¡ÐµÑ€Ð²Ð¸Ñ Ð¿Ñ€Ð¾Ð¿ÑƒÑкаетÑÑ: отÑутÑтвует ServicePath!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:37 msgid "Skipping service: no SchemaPath found!" msgstr "Ð¡ÐµÑ€Ð²Ð¸Ñ Ð¿Ñ€Ð¾Ð¿ÑƒÑкаетÑÑ: отÑутÑтвует SchemaPath!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:89 msgid "Parser Context creation failed!" msgstr "Ðе удалоÑÑŒ Ñоздать контекÑÑ‚ анализатора!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:98 msgid "Cannot parse schema!" msgstr "Ðевозможно интерпретировать Ñхему!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:110 msgid "Empty payload!" msgstr "ПуÑÑ‚Ð°Ñ Ð½Ð°Ð³Ñ€ÑƒÐ·ÐºÐ°!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:119 msgid "Could not convert payload!" msgstr "Ðевозможно преобразовать нагрузку!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:125 #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:212 msgid "Could not create PayloadSOAP!" msgstr "Ðе удалоÑÑŒ Ñоздать PayloadSOAP!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:196 msgid "Empty input payload!" msgstr "ПуÑÑ‚Ð°Ñ Ð½Ð°Ð³Ñ€ÑƒÐ·ÐºÐ° на входе!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:205 msgid "Could not convert incoming payload!" msgstr "Ðе удалоÑÑŒ преобразовать входную информацию!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:232 msgid "Missing schema! Skipping validation..." msgstr "Схема отÑутÑтвует! Сверка пропуÑкаетÑÑ..." #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:237 msgid "Could not validate message!" msgstr "Ðе удалоÑÑŒ подтвердить доÑтоверноÑть ÑообщениÑ!" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:245 #: src/hed/mcc/soap/MCCSOAP.cpp:238 src/hed/mcc/soap/MCCSOAP.cpp:252 #: src/hed/mcc/soap/MCCSOAP.cpp:282 msgid "empty next chain element" msgstr "Ñледующий Ñлемент в цепи пуÑтой" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:257 #: src/hed/mcc/soap/MCCSOAP.cpp:298 msgid "next element of the chain returned empty payload" msgstr "Ñледующий Ñлемент в цепи возвратил пуÑтую нагрузку" #: src/hed/mcc/msgvalidator/MCCMsgValidator.cpp:265 msgid "next element of the chain returned invalid payload" msgstr "Ñледующий Ñлемент в цепи возвратил пуÑтую нагрузку" #: src/hed/mcc/soap/MCCSOAP.cpp:223 msgid "empty input payload" msgstr "пуÑÑ‚Ð°Ñ Ð½Ð°Ð³Ñ€ÑƒÐ·ÐºÐ° на входе" #: src/hed/mcc/soap/MCCSOAP.cpp:233 #, c-format msgid "MIME is not suitable for SOAP: %s" msgstr "MIME не подходит Ð´Ð»Ñ SOAP: %s" #: src/hed/mcc/soap/MCCSOAP.cpp:247 msgid "incoming message is not SOAP" msgstr "входÑщее Ñообщение не в формате SOAP" #: src/hed/mcc/soap/MCCSOAP.cpp:274 #, c-format msgid "Security check failed in SOAP MCC for incoming message: %s" msgstr "Сбой проверки безопаÑноÑти в SOAP MCC Ð´Ð»Ñ Ð²Ñ…Ð¾Ð´Ñщего ÑообщениÑ: %s" #: src/hed/mcc/soap/MCCSOAP.cpp:290 #, c-format msgid "next element of the chain returned error status: %s" msgstr "Ñледующий Ñлемент цепи возвратил ÑÑ‚Ð°Ñ‚ÑƒÑ Ð¾ÑˆÐ¸Ð±ÐºÐ¸: %s" #: src/hed/mcc/soap/MCCSOAP.cpp:309 msgid "next element of the chain returned unknown payload - passing through" msgstr "" "Ñледующий Ñлемент в цепи возвратил неопознанную нагрузку - пропуÑкаетÑÑ" #: src/hed/mcc/soap/MCCSOAP.cpp:314 src/hed/mcc/soap/MCCSOAP.cpp:330 #, c-format msgid "Security check failed in SOAP MCC for outgoing message: %s" msgstr "Сбой проверки безопаÑноÑти в SOAP MCC Ð´Ð»Ñ Ð¸ÑходÑщего ÑообщениÑ: %s" #: src/hed/mcc/soap/MCCSOAP.cpp:384 msgid "Security check failed in SOAP MCC for outgoing message" msgstr "Ðе прошла проверка безопаÑноÑти в SOAP MCC Ð´Ð»Ñ Ð¸ÑходÑщего ÑообщениÑ" #: src/hed/mcc/soap/MCCSOAP.cpp:437 msgid "Security check failed in SOAP MCC for incoming message" msgstr "Ðе прошла проверка безопаÑноÑти в SOAP MCC Ð´Ð»Ñ Ð²Ñ…Ð¾Ð´Ñщего ÑообщениÑ" #: src/hed/mcc/tcp/MCCTCP.cpp:82 msgid "Missing Port in Listen element" msgstr "Ð’ Ñлементе Listen отÑутÑтвует номер порта (Port)" #: src/hed/mcc/tcp/MCCTCP.cpp:91 msgid "Version in Listen element can't be recognized" msgstr "ВерÑÐ¸Ñ Ð² Ñлементе Listen не опознана" #: src/hed/mcc/tcp/MCCTCP.cpp:100 #, c-format msgid "Failed to obtain local address for port %s - %s" msgstr "Ðе удалоÑÑŒ получить локальный Ð°Ð´Ñ€ÐµÑ Ð´Ð»Ñ Ð¿Ð¾Ñ€Ñ‚Ð° %s - %s" #: src/hed/mcc/tcp/MCCTCP.cpp:102 #, c-format msgid "Failed to obtain local address for %s:%s - %s" msgstr "Ðе удалоÑÑŒ получить локальный Ð°Ð´Ñ€ÐµÑ Ð´Ð»Ñ %s:%s - %s" #: src/hed/mcc/tcp/MCCTCP.cpp:109 #, c-format msgid "Trying to listen on TCP port %s(%s)" msgstr "Попытка проÑлушать порт TCP %s(%s)" #: src/hed/mcc/tcp/MCCTCP.cpp:111 #, c-format msgid "Trying to listen on %s:%s(%s)" msgstr "Попытка проÑлушать %s:%s(%s)" #: src/hed/mcc/tcp/MCCTCP.cpp:117 #, c-format msgid "Failed to create socket for listening at TCP port %s(%s): %s" msgstr "Ðе удалоÑÑŒ Ñоздать Ñокет Ð´Ð»Ñ Ð¿Ñ€Ð¾Ñлушки порта TCP %s(%s): %s" #: src/hed/mcc/tcp/MCCTCP.cpp:119 #, c-format msgid "Failed to create socket for listening at %s:%s(%s): %s" msgstr "Ðе удалоÑÑŒ Ñоздать Ñокет Ð´Ð»Ñ Ð¿Ñ€Ð¾Ñлушки %s:%s(%s): %s" #: src/hed/mcc/tcp/MCCTCP.cpp:134 #, c-format msgid "" "Failed to limit socket to IPv6 at TCP port %s - may cause errors for IPv4 at " "same port" msgstr "" "Ðе удалоÑÑŒ ограничить Ñокет под IPv6 на порте TCP %s - может привеÑти к " "ошибкам Ð´Ð»Ñ IPv4 по Ñтому же порту" #: src/hed/mcc/tcp/MCCTCP.cpp:136 #, c-format msgid "" "Failed to limit socket to IPv6 at %s:%s - may cause errors for IPv4 at same " "port" msgstr "" "Ðе удалоÑÑŒ ограничить Ñокет под IPv6 на %s:%s - может привеÑти к ошибкам Ð´Ð»Ñ " "IPv4 по Ñтому же порту" #: src/hed/mcc/tcp/MCCTCP.cpp:144 #, c-format msgid "Failed to bind socket for TCP port %s(%s): %s" msgstr "Ðе удалоÑÑŒ ÑвÑзать Ñокет Ñ Ð¿Ð¾Ñ€Ñ‚Ð¾Ð¼ TCP %s(%s): %s" #: src/hed/mcc/tcp/MCCTCP.cpp:146 #, c-format msgid "Failed to bind socket for %s:%s(%s): %s" msgstr "Ðе удалоÑÑŒ ÑвÑзать Ñокет Ñ %s:%s(%s): %s" #: src/hed/mcc/tcp/MCCTCP.cpp:161 #, c-format msgid "Failed to listen at TCP port %s(%s): %s" msgstr "Ðе удалоÑÑŒ проÑлушать порт TCP %s(%s): %s" #: src/hed/mcc/tcp/MCCTCP.cpp:163 #, c-format msgid "Failed to listen at %s:%s(%s): %s" msgstr "Ðе удалоÑÑŒ проÑлушать %s:%s(%s): %s" #: src/hed/mcc/tcp/MCCTCP.cpp:180 #, c-format msgid "Listening on TCP port %s(%s)" msgstr "ПроÑлушиваетÑÑ Ð¿Ð¾Ñ€Ñ‚ TCP %s(%s)" #: src/hed/mcc/tcp/MCCTCP.cpp:182 #, c-format msgid "Listening on %s:%s(%s)" msgstr "ПроÑлушиваетÑÑ %s:%s(%s)" #: src/hed/mcc/tcp/MCCTCP.cpp:189 #, c-format msgid "Failed to start listening on any address for %s:%s" msgstr "Ðе удалоÑÑŒ начать проÑлушивание ни по какому адреÑу Ð´Ð»Ñ %s:%s" #: src/hed/mcc/tcp/MCCTCP.cpp:191 #, c-format msgid "Failed to start listening on any address for %s:%s(IPv%s)" msgstr "Ðе удалоÑÑŒ начать проÑлушивание ни по какому адреÑу Ð´Ð»Ñ %s:%s(IPv%s)" #: src/hed/mcc/tcp/MCCTCP.cpp:197 msgid "No listening ports initiated" msgstr "Ðе инициализированы проÑлушивающие порты" #: src/hed/mcc/tcp/MCCTCP.cpp:208 msgid "dropped" msgstr "игнорируетÑÑ" #: src/hed/mcc/tcp/MCCTCP.cpp:208 msgid "put on hold" msgstr "приоÑтановлен" #: src/hed/mcc/tcp/MCCTCP.cpp:208 #, c-format msgid "Setting connections limit to %i, connections over limit will be %s" msgstr "" "Предельное количеÑтво Ñоединений выÑтавлÑетÑÑ Ð½Ð° %i, ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñверх " "предела будут переведены в ÑоÑтоÑние %s" #: src/hed/mcc/tcp/MCCTCP.cpp:212 msgid "Failed to start thread for listening" msgstr "Ðе удалоÑÑŒ запуÑтить поток Ð´Ð»Ñ Ð¿Ñ€Ð¾ÑлушиваниÑ" #: src/hed/mcc/tcp/MCCTCP.cpp:245 msgid "Failed to start thread for communication" msgstr "Ðе удалоÑÑŒ запуÑтить поток Ð´Ð»Ñ Ð¾Ð±Ð¼ÐµÐ½Ð° информацией" #: src/hed/mcc/tcp/MCCTCP.cpp:271 msgid "Failed while waiting for connection request" msgstr "Сбой при ожидании запроÑа на Ñоединение" #: src/hed/mcc/tcp/MCCTCP.cpp:293 msgid "Failed to accept connection request" msgstr "Ðе удалоÑÑŒ принÑть Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° Ñоединение" #: src/hed/mcc/tcp/MCCTCP.cpp:302 msgid "Too many connections - dropping new one" msgstr "Слишком много Ñоединений - новое отклонено" #: src/hed/mcc/tcp/MCCTCP.cpp:309 msgid "Too many connections - waiting for old to close" msgstr "Слишком много Ñоединений - ожидание Ð·Ð°ÐºÑ€Ñ‹Ñ‚Ð¸Ñ Ñтарых" #: src/hed/mcc/tcp/MCCTCP.cpp:548 msgid "next chain element called" msgstr "вызван Ñледующий Ñлемент в цепи" #: src/hed/mcc/tcp/MCCTCP.cpp:563 msgid "Only Raw Buffer payload is supported for output" msgstr "Ð”Ð»Ñ Ð²Ñ‹Ð²Ð¾Ð´Ð° поддерживаетÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ неформатированный буфер" #: src/hed/mcc/tcp/MCCTCP.cpp:571 src/hed/mcc/tcp/MCCTCP.cpp:670 #: src/hed/mcc/tls/MCCTLS.cpp:561 msgid "Failed to send content of buffer" msgstr "Ðе удалоÑÑŒ отправить Ñодержимое буфера" #: src/hed/mcc/tcp/MCCTCP.cpp:583 msgid "TCP executor is removed" msgstr "ИÑполнитель TCP удалён" #: src/hed/mcc/tcp/MCCTCP.cpp:585 #, c-format msgid "Sockets do not match on exit %i != %i" msgstr "ÐеÑовпадение Ñокетов при завершении %i != %i" #: src/hed/mcc/tcp/MCCTCP.cpp:606 msgid "No Connect element specified" msgstr "Ðе задан Ñлемент Connect" #: src/hed/mcc/tcp/MCCTCP.cpp:612 msgid "Missing Port in Connect element" msgstr "Ð’ Ñлементе Connect отÑутÑтвует номер порта (Port)" #: src/hed/mcc/tcp/MCCTCP.cpp:618 msgid "Missing Host in Connect element" msgstr "Ð’ Ñлементе Connect отÑутÑтвует название узла (Host)" #: src/hed/mcc/tcp/MCCTCP.cpp:646 msgid "TCP client process called" msgstr "Вызван процеÑÑ TCP клиента" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:65 #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:81 #, c-format msgid "Failed to resolve %s (%s)" msgstr "Сбой при разрешении %s (%s)" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:91 #, c-format msgid "Trying to connect %s(%s):%d" msgstr "Попытка ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ %s(%s):%d" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:95 #, c-format msgid "Failed to create socket for connecting to %s(%s):%d - %s" msgstr "Ðе удалоÑÑŒ Ñоздать Ñокет Ð´Ð»Ñ ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ %s(%s):%d - %s" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:107 #, c-format msgid "" "Failed to get TCP socket options for connection to %s(%s):%d - timeout won't " "work - %s" msgstr "" "Ðе удалоÑÑŒ получить параметры TCP-Ñокета Ð´Ð»Ñ ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ %s(%s):%d - " "прерывание по времени не будет работать - %s" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:114 #, c-format msgid "Failed to connect to %s(%s):%i - %s" msgstr "Ðе удалоÑÑŒ уÑтановить Ñоединение Ñ %s(%s):%i - %s" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:124 #, c-format msgid "Timeout connecting to %s(%s):%i - %i s" msgstr "ИÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ %s(%s):%i - %i Ñ" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:132 #, c-format msgid "Failed while waiting for connection to %s(%s):%i - %s" msgstr "Сбой при ожидании ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ %s(%s):%i - %s" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:142 #, c-format msgid "Failed to connect to %s(%s):%i" msgstr "Ðе удалоÑÑŒ уÑтановить Ñоединение Ñ %s(%s):%i" #: src/hed/mcc/tcp/PayloadTCPSocket.cpp:198 msgid "" "Received message out-of-band (not critical, ERROR level is just for " "debugging purposes)" msgstr "" "Получено Ñообщение вне полоÑÑ‹ (некритично, уровень ERROR лишь Ð´Ð»Ñ Ð¾Ñ‚Ð»Ð°Ð´ÐºÐ¸)" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:201 msgid "Using CA default location" msgstr "" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:210 #, fuzzy, c-format msgid "Using CA file: %s" msgstr "ИÑпользуетÑÑ Ñ„Ð°Ð¹Ð» личного ключа: %s" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:212 #, fuzzy, c-format msgid "Using CA dir: %s" msgstr "ИÑпользуетÑÑ Ð°Ð»Ð³Ð¾Ñ€Ð¸Ñ‚Ð¼ ÑˆÐ¸Ñ„Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ %s" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:289 #, c-format msgid "Using DH parameters from file: %s" msgstr "ИÑпользуютÑÑ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ñ‹ DH из файла %s" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:292 msgid "Failed to open file with DH parameters for reading" msgstr "Ðе удалоÑÑŒ открыть на чтение файл Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð°Ð¼Ð¸ DH" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:297 msgid "Failed to read file with DH parameters" msgstr "Сбой при чтении файла Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð°Ð¼Ð¸ DH" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:300 msgid "Failed to apply DH parameters" msgstr "Ðе удалоÑÑŒ применить параметры DH" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:302 msgid "DH parameters applied" msgstr "Применены параметры DH" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:316 #, c-format msgid "Using curve with NID: %u" msgstr "ИÑпользуетÑÑ ÐºÑ€Ð¸Ð²Ð°Ñ Ñ NID %u" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:319 msgid "Failed to generate EC key" msgstr "Сбой про Ñоздании ключа EC" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:322 msgid "Failed to apply ECDH parameters" msgstr "Сбой Ð¿Ñ€Ð¸Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð¾Ð² ECDH" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:324 msgid "ECDH parameters applied" msgstr "Применены параметры ECDH" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:330 #, c-format msgid "Using cipher list: %s" msgstr "ИÑпользуемый ÑпиÑок шифров: %s" #: src/hed/mcc/tls/ConfigTLSMCC.cpp:354 #, c-format msgid "Using protocol options: 0x%x" msgstr "ИÑпользуемые опции протокола: 0x%x" #: src/hed/mcc/tls/DelegationCollector.cpp:39 msgid "Independent proxy - no rights granted" msgstr "ÐезавиÑÐ¸Ð¼Ð°Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñть - права не выделены" #: src/hed/mcc/tls/DelegationCollector.cpp:43 msgid "Proxy with all rights inherited" msgstr "ДоверенноÑть Ñо вÑеми унаÑледованными правами" #: src/hed/mcc/tls/DelegationCollector.cpp:51 msgid "Proxy with empty policy - fail on unrecognized policy" msgstr "ДоверенноÑть Ñ Ð½ÐµÐ·Ð°Ð¿Ð¾Ð»Ð½ÐµÐ½Ð½Ð¾Ð¹ политикой - отказ по неизвеÑтной политике" #: src/hed/mcc/tls/DelegationCollector.cpp:56 #, c-format msgid "Proxy with specific policy: %s" msgstr "ДоверенноÑть Ñ Ð¾Ð³Ñ€Ð°Ð½Ð¸Ñ‡ÐµÐ½Ð½Ð¾Ð¹ политикой: %s" #: src/hed/mcc/tls/DelegationCollector.cpp:60 msgid "Proxy with ARC Policy" msgstr "ДоверенноÑть Ñ Ð¿Ð¾Ð»Ð¸Ñ‚Ð¸ÐºÐ¾Ð¹ ARC" #: src/hed/mcc/tls/DelegationCollector.cpp:62 msgid "Proxy with unknown policy - fail on unrecognized policy" msgstr "ДоверенноÑть Ñ Ð½ÐµÐ¸Ð·Ð²ÐµÑтной политикой - отказ по неизвеÑтной политике" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:116 #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:167 #, c-format msgid "Was expecting %s at the beginning of \"%s\"" msgstr "ОжидалоÑÑŒ %s в начале \"%s\"" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:121 #, c-format msgid "We only support CAs in Globus signing policy - %s is not supported" msgstr "" "Мы поддерживаем только CA в Globus signing policy - %s не поддерживаетÑÑ" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:126 #, c-format msgid "We only support X509 CAs in Globus signing policy - %s is not supported" msgstr "" "Мы поддерживаем только центры Ñертификации X509 в политике подпиÑи Globus - " "%s не поддерживаетÑÑ" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:131 msgid "Missing CA subject in Globus signing policy" msgstr "Субъект центра Ñертификации отÑутÑтвует в политике подпиÑи Globus" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:141 msgid "Negative rights are not supported in Globus signing policy" msgstr "Отрицательные права не поддерживаютÑÑ Ð¿Ð¾Ð»Ð¸Ñ‚Ð¸ÐºÐ¾Ð¹ подпиÑи Globus" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:145 #, c-format msgid "Unknown rights in Globus signing policy - %s" msgstr "ÐеизвеÑтные права в политике подпиÑи Globus - %s" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:150 #, c-format msgid "" "Only globus rights are supported in Globus signing policy - %s is not " "supported" msgstr "" "Мы поддерживаем только права globus в политике подпиÑи Globus - %s не " "поддерживаетÑÑ" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:155 #, c-format msgid "" "Only signing rights are supported in Globus signing policy - %s is not " "supported" msgstr "" "Мы поддерживаем только права подпиÑи в политике подпиÑи Globus - %s не " "поддерживаетÑÑ" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:172 #, c-format msgid "" "We only support subjects conditions in Globus signing policy - %s is not " "supported" msgstr "" "Мы поддерживаем только уÑÐ»Ð¾Ð²Ð¸Ñ Ñубъекта в политике подпиÑи Globus - %s не " "поддерживаетÑÑ" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:177 #, c-format msgid "" "We only support globus conditions in Globus signing policy - %s is not " "supported" msgstr "" "Мы поддерживаем только уÑÐ»Ð¾Ð²Ð¸Ñ globus в политике подпиÑи Globus - %s не " "поддерживаетÑÑ" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:183 msgid "Missing condition subjects in Globus signing policy" msgstr "УÑÐ»Ð¾Ð²Ð¸Ñ Ñубъекта отÑутÑтвуют в политике подпиÑи Globus" #: src/hed/mcc/tls/GlobusSigningPolicy.cpp:265 msgid "Unknown element in Globus signing policy" msgstr "ÐеизвеÑтный Ñлемент в политике подпиÑи Globus" #: src/hed/mcc/tls/MCCTLS.cpp:237 msgid "Critical VOMS attribute processing failed" msgstr "Сбой обработки критичеÑкого атрибута VOMS" #: src/hed/mcc/tls/MCCTLS.cpp:245 msgid "VOMS attribute validation failed" msgstr "Сбой проверки атрибутов VOMS" #: src/hed/mcc/tls/MCCTLS.cpp:247 msgid "VOMS attribute is ignored due to processing/validation error" msgstr "Ðтрибут VOMS игнорируетÑÑ Ð¸Ð·-за ошибки обработки или проверки" #: src/hed/mcc/tls/MCCTLS.cpp:439 src/hed/mcc/tls/MCCTLS.cpp:578 #: src/hed/mcc/tls/MCCTLS.cpp:597 #, c-format msgid "Failed to establish connection: %s" msgstr "Сбой уÑÑ‚Ð°Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ ÑоединениÑ: %s" #: src/hed/mcc/tls/MCCTLS.cpp:458 src/hed/mcc/tls/MCCTLS.cpp:540 #, c-format msgid "Peer name: %s" msgstr "Ð˜Ð¼Ñ ÐºÐ¾Ð½Ñ‚Ð°ÐºÑ‚Ð°: %s" #: src/hed/mcc/tls/MCCTLS.cpp:460 src/hed/mcc/tls/MCCTLS.cpp:542 #, c-format msgid "Identity name: %s" msgstr "Выделенное имÑ: %s" #: src/hed/mcc/tls/MCCTLS.cpp:462 src/hed/mcc/tls/MCCTLS.cpp:544 #, c-format msgid "CA name: %s" msgstr "Ð˜Ð¼Ñ Ñертификационного агентÑтва: %s" #: src/hed/mcc/tls/MCCTLS.cpp:469 msgid "Failed to process security attributes in TLS MCC for incoming message" msgstr "" "Ðе удалоÑÑŒ обработать атрибуты безопаÑноÑти в TLS MCC Ð´Ð»Ñ Ð²Ñ…Ð¾Ð´Ñщего ÑообщениÑ" #: src/hed/mcc/tls/MCCTLS.cpp:477 msgid "Security check failed in TLS MCC for incoming message" msgstr "Ðе прошла проверка безопаÑноÑти в TLS MCC Ð´Ð»Ñ Ð²Ñ…Ð¾Ð´Ñщего ÑообщениÑ" #: src/hed/mcc/tls/MCCTLS.cpp:550 msgid "Security check failed for outgoing TLS message" msgstr "Ðе прошла проверка безопаÑноÑти Ð´Ð»Ñ Ð¸ÑходÑщего ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ TLS" #: src/hed/mcc/tls/MCCTLS.cpp:582 msgid "Security check failed for incoming TLS message" msgstr "Ðе прошла проверка безопаÑноÑти Ð´Ð»Ñ Ð²Ñ…Ð¾Ð´Ñщего ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ TLS" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:64 #, c-format msgid "Ignoring verification error due to insecure connection allowed: %s" msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:79 msgid "" "Failed to allocate memory for certificate subject while matching policy." msgstr "" "Ðе удалоÑÑŒ зарезервировать памÑть Ð´Ð»Ñ Ð¸Ð¼ÐµÐ½Ð¸ Ñубъекта Ñертификата при Ñверке " "Ñ Ð¿Ð¾Ð»Ð¸Ñ‚Ð¸ÐºÐ°Ð¼Ð¸." #: src/hed/mcc/tls/PayloadTLSMCC.cpp:83 msgid "" "Failed to retrieve link to TLS stream. Additional policy matching is skipped." msgstr "" "Ðе удалоÑÑŒ получить ÑÑылку на поток TLS. Ð”Ð¾Ð¿Ð¾Ð»Ð½Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ñверка политики " "пропуÑкаетÑÑ." #: src/hed/mcc/tls/PayloadTLSMCC.cpp:85 msgid "" "Skipping additional policy matching due to insecure connections allowed." msgstr "" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:109 #, c-format msgid "Certificate %s already expired" msgstr "Срок дейÑÑ‚Ð²Ð¸Ñ Ñертификата %s уже иÑтёк" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:117 #, c-format msgid "Certificate %s will expire in %s" msgstr "Срок дейÑÑ‚Ð²Ð¸Ñ Ñертификата %s иÑтечёт через %s" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:138 msgid "Failed to store application data" msgstr "Ðе удалоÑÑŒ запиÑать данные приложениÑ" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:166 msgid "Failed to retrieve application data from OpenSSL" msgstr "Ðе удалоÑÑŒ получить данные о приложении через OpenSSL" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:238 src/hed/mcc/tls/PayloadTLSMCC.cpp:338 msgid "Can not create the SSL Context object" msgstr "Ðе удалоÑÑŒ Ñоздать объект SSL Context" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:251 src/hed/mcc/tls/PayloadTLSMCC.cpp:358 msgid "Can't set OpenSSL verify flags" msgstr "Ðе удалоÑÑŒ выÑтавить метки Ð¿Ð¾Ð´Ñ‚Ð²ÐµÑ€Ð¶Ð´ÐµÐ½Ð¸Ñ OpenSSL" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:270 src/hed/mcc/tls/PayloadTLSMCC.cpp:372 msgid "Can not create the SSL object" msgstr "Ðе удалоÑÑŒ Ñоздать объект SSL" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:280 msgid "Faile to assign hostname extension" msgstr "Ðе удалоÑÑŒ приÑвоить раÑширение hostname" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:294 msgid "Failed to establish SSL connection" msgstr "Ðе удалоÑÑŒ уÑтановить Ñоединение SSL" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:298 src/hed/mcc/tls/PayloadTLSMCC.cpp:388 #, c-format msgid "Using cipher: %s" msgstr "ИÑпользуетÑÑ Ð°Ð»Ð³Ð¾Ñ€Ð¸Ñ‚Ð¼ ÑˆÐ¸Ñ„Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ %s" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:384 msgid "Failed to accept SSL connection" msgstr "Ðе удалоÑÑŒ принÑть Ñоединение SSL" #: src/hed/mcc/tls/PayloadTLSMCC.cpp:446 #, c-format msgid "Failed to shut down SSL: %s" msgstr "Ðе удалоÑÑŒ прервать Ñоединение SSL: %s" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:47 msgid "" "ArcAuthZ: failed to initiate all PDPs - this instance will be non-functional" msgstr "" "ArcAuthZ: не удалоÑÑŒ инициализировать вÑе PDP - Ñтот процеÑÑ Ð±ÑƒÐ´ÐµÑ‚ нерабочим" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:70 msgid "PDP: missing name attribute" msgstr "PDP: отÑутÑтвует атрибут имени" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:74 #, c-format msgid "PDP: %s (%s)" msgstr "PDP: %s (%s)" #: src/hed/shc/arcauthzsh/ArcAuthZ.cpp:79 #, c-format msgid "PDP: %s (%s) can not be loaded" msgstr "PDP: %s (%s) не может быть подгружен" #: src/hed/shc/arcpdp/ArcEvaluationCtx.cpp:251 #, c-format msgid "There are %d RequestItems" msgstr "Обнаружено %d Ñлементов запроÑа" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:60 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:55 msgid "Can not parse classname for FunctionFactory from configuration" msgstr "Ðе удалоÑÑŒ определить Ð¸Ð¼Ñ ÐºÐ»Ð°ÑÑа Ð´Ð»Ñ FunctionFactory из наÑтроек" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:68 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:63 msgid "Can not parse classname for AttributeFactory from configuration" msgstr "Ðе удалоÑÑŒ определить Ð¸Ð¼Ñ ÐºÐ»Ð°ÑÑа Ð´Ð»Ñ AttributeFactory из наÑтроек" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:76 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:71 msgid "" "Can not parse classname for CombiningAlgorithmFactory from configuration" msgstr "" "Ðе удалоÑÑŒ определить Ð¸Ð¼Ñ ÐºÐ»Ð°ÑÑа Ð´Ð»Ñ CombiningAlgorithmFactory из наÑтроек" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:84 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:79 msgid "Can not parse classname for Request from configuration" msgstr "Ðе удалоÑÑŒ определить Ð¸Ð¼Ñ ÐºÐ»Ð°ÑÑа Ð´Ð»Ñ Request из наÑтроек" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:93 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:88 msgid "Can not parse classname for Policy from configuration" msgstr "Ðе удалоÑÑŒ определить Ð¸Ð¼Ñ ÐºÐ»Ð°ÑÑа Ð´Ð»Ñ Policy из наÑтроек" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:105 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:100 msgid "Can not dynamically produce AttributeFactory" msgstr "Ðе удалоÑÑŒ динамичеÑки Ñоздать AttributeFactory" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:110 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:105 msgid "Can not dynamically produce FnFactory" msgstr "Ðе удалоÑÑŒ динамичеÑки Ñоздать FnFactory" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:115 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:110 msgid "Can not dynamically produce AlgFacroty" msgstr "Ðе удалоÑÑŒ динамичеÑки Ñоздать AlgFacroty" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:126 #: src/hed/shc/gaclpdp/GACLEvaluator.cpp:31 #: src/hed/shc/gaclpdp/GACLEvaluator.cpp:37 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:121 msgid "Can not create PolicyStore object" msgstr "Ðе удалоÑÑŒ Ñоздать объект PolicyStore" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:177 src/hed/shc/test.cpp:183 #: src/hed/shc/testinterface_arc.cpp:102 src/hed/shc/testinterface_xacml.cpp:54 #: src/hed/shc/xacmlpdp/XACMLEvaluator.cpp:172 msgid "Can not dynamically produce Request" msgstr "Ðе удалоÑÑŒ динамичеÑки Ñоздать Request" #: src/hed/shc/arcpdp/ArcEvaluator.cpp:261 #, c-format msgid "Result value (0=Permit, 1=Deny, 2=Indeterminate, 3=Not_Applicable): %d" msgstr "Результат (0=ДопуÑк, 1=Отказ, 2=Ðеопределённый, 3=Ðеприменим): %d" #: src/hed/shc/arcpdp/ArcPDP.cpp:109 msgid "Can not find ArcPDPContext" msgstr "Ðе обнаружен ArcPDPContext" #: src/hed/shc/arcpdp/ArcPDP.cpp:138 src/hed/shc/xacmlpdp/XACMLPDP.cpp:116 msgid "Evaluator does not support loadable Combining Algorithms" msgstr "Обработчик не поддерживает подгружаемые алгоритмы комбинированиÑ" #: src/hed/shc/arcpdp/ArcPDP.cpp:142 src/hed/shc/xacmlpdp/XACMLPDP.cpp:120 #, c-format msgid "Evaluator does not support specified Combining Algorithm - %s" msgstr "Обработчик не поддерживает указанный алгоритм ÐºÐ¾Ð¼Ð±Ð¸Ð½Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ - %s" #: src/hed/shc/arcpdp/ArcPDP.cpp:154 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:83 #: src/hed/shc/gaclpdp/GACLPDP.cpp:117 src/hed/shc/test.cpp:94 #: src/hed/shc/testinterface_arc.cpp:37 src/hed/shc/testinterface_xacml.cpp:37 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:132 msgid "Can not dynamically produce Evaluator" msgstr "Ðе удалоÑÑŒ динамичеÑки Ñоздать анализатор" #: src/hed/shc/arcpdp/ArcPDP.cpp:157 msgid "Evaluator for ArcPDP was not loaded" msgstr "Обработчик Ð´Ð»Ñ ArcPDP не был загружен" #: src/hed/shc/arcpdp/ArcPDP.cpp:164 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:56 #: src/hed/shc/gaclpdp/GACLPDP.cpp:127 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:88 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:142 src/tests/echo/echo.cpp:108 msgid "Missing security object in message" msgstr "Ð’ Ñообщении отÑутÑтвует объект авторизации" #: src/hed/shc/arcpdp/ArcPDP.cpp:172 src/hed/shc/arcpdp/ArcPDP.cpp:180 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:136 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:142 #: src/hed/shc/gaclpdp/GACLPDP.cpp:135 src/hed/shc/gaclpdp/GACLPDP.cpp:143 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:96 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:104 #: src/tests/echo/echo.cpp:116 src/tests/echo/echo.cpp:123 msgid "Failed to convert security information to ARC request" msgstr "Ðе удалоÑÑŒ преобразовать информацию о защите в Ð·Ð°Ð¿Ñ€Ð¾Ñ ARC" #: src/hed/shc/arcpdp/ArcPDP.cpp:188 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:149 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:112 #, c-format msgid "ARC Auth. request: %s" msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð°Ð²Ñ‚Ð¾Ñ€Ð¸Ð·Ð°Ñ†Ð¸Ð¸ ARC: %s" #: src/hed/shc/arcpdp/ArcPDP.cpp:191 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:152 #: src/hed/shc/gaclpdp/GACLPDP.cpp:154 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:115 #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:169 msgid "No requested security information was collected" msgstr "Ðе удалоÑÑŒ Ñобрать запрошенную информацию о безопаÑноÑти" #: src/hed/shc/arcpdp/ArcPDP.cpp:198 msgid "Not authorized by arc.pdp - failed to get response from Evaluator" msgstr "arc.pdp запретил доÑтуп - не удалоÑÑŒ получить отклик обработчика" #: src/hed/shc/arcpdp/ArcPDP.cpp:244 msgid "Authorized by arc.pdp" msgstr "Допущен через arc.pdp" #: src/hed/shc/arcpdp/ArcPDP.cpp:245 msgid "" "Not authorized by arc.pdp - some of the RequestItem elements do not satisfy " "Policy" msgstr "" "Ðет допуÑка от arc.pdp - некоторые Ñлементы RequestItem не удовлетворÑÑŽÑ‚ " "политике" #: src/hed/shc/arcpdp/ArcPolicy.cpp:56 src/hed/shc/arcpdp/ArcPolicy.cpp:70 #: src/hed/shc/gaclpdp/GACLPolicy.cpp:46 src/hed/shc/gaclpdp/GACLPolicy.cpp:59 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:48 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:64 msgid "Policy is empty" msgstr "ПуÑтые правила" #: src/hed/shc/arcpdp/ArcPolicy.cpp:114 #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:115 #, c-format msgid "PolicyId: %s Alg inside this policy is:-- %s" msgstr "PolicyId: %s Внутренний алгоритм политики:-- %s" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:74 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:128 msgid "No delegation policies in this context and message - passing through" msgstr "" "Ð’ данном контекÑте и Ñообщении отÑутÑтвуют политики Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ - " "пропуÑкаетÑÑ" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:94 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:108 msgid "Failed to convert security information to ARC policy" msgstr "Ðе удалоÑÑŒ преобразовать информацию о безопаÑноÑти в политику ARC" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:115 #: src/hed/shc/delegationpdp/DelegationPDP.cpp:122 #, c-format msgid "ARC delegation policy: %s" msgstr "Политика Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ ARC: %s" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:160 msgid "No authorization response was returned" msgstr "Ðе получен ответ о допуÑке" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:163 #, c-format msgid "There are %d requests, which satisfy at least one policy" msgstr "Обнаружены %d запроÑа, удовлетворÑющих Ñ…Ð¾Ñ‚Ñ Ð±Ñ‹ одной политике" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:182 msgid "Delegation authorization passed" msgstr "ÐÐ²Ñ‚Ð¾Ñ€Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ðµ пройдена" #: src/hed/shc/delegationpdp/DelegationPDP.cpp:184 msgid "Delegation authorization failed" msgstr "ÐÐ²Ñ‚Ð¾Ñ€Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð½Ð° делегирование не выдана" #: src/hed/shc/delegationsh/DelegationSH.cpp:63 msgid "" "Missing CertificatePath element or ProxyPath element, or " " is missing" msgstr "" "ОтÑутÑтвует Ñлемент CertificatePath или ProxyPath element, или " "" #: src/hed/shc/delegationsh/DelegationSH.cpp:68 msgid "" "Missing or empty KeyPath element, or is missing" msgstr "" "Элемент KeyPath отÑутÑтвует или пуÑÑ‚, либо отÑутÑтвует " "" #: src/hed/shc/delegationsh/DelegationSH.cpp:74 msgid "Missing or empty CertificatePath or CACertificatesDir element" msgstr "Элемент CertificatePath или CACertificatesDir отÑутÑтвует или пуÑÑ‚" #: src/hed/shc/delegationsh/DelegationSH.cpp:81 #, c-format msgid "Delegation role not supported: %s" msgstr "ÐÐµÐ¿Ð¾Ð´Ð´ÐµÑ€Ð¶Ð¸Ð²Ð°ÐµÐ¼Ð°Ñ Ñ€Ð¾Ð»ÑŒ делегированиÑ: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:90 #, c-format msgid "Delegation type not supported: %s" msgstr "Ðеподдерживаемый тип делегированиÑ: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:115 msgid "Failed to acquire delegation context" msgstr "Ðе удалоÑÑŒ извлечь контекÑÑ‚ делегированиÑ" #: src/hed/shc/delegationsh/DelegationSH.cpp:143 #: src/hed/shc/delegationsh/DelegationSH.cpp:254 msgid "Can't create delegation context" msgstr "Ðе удалоÑÑŒ Ñоздать контекÑÑ‚ делегированиÑ" #: src/hed/shc/delegationsh/DelegationSH.cpp:149 msgid "Delegation handler with delegatee role starts to process" msgstr "Запущен обработчик Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñ Ñ€Ð¾Ð»ÑŒÑŽ делегата" #: src/hed/shc/delegationsh/DelegationSH.cpp:152 #: src/services/a-rex/arex.cpp:478 src/services/candypond/CandyPond.cpp:526 #: src/services/data-staging/DataDeliveryService.cpp:648 msgid "process: POST" msgstr "процеÑÑ: POST" #: src/hed/shc/delegationsh/DelegationSH.cpp:159 #: src/services/a-rex/arex.cpp:485 src/services/candypond/CandyPond.cpp:535 #: src/services/data-staging/DataDeliveryService.cpp:657 #: src/services/wrappers/python/pythonwrapper.cpp:416 msgid "input is not SOAP" msgstr "ввод не в формате SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:166 #, c-format msgid "Delegation service: %s" msgstr "Служба делегированиÑ: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:181 #: src/hed/shc/delegationsh/DelegationSH.cpp:188 #: src/tests/client/test_ClientX509Delegation_ARC.cpp:55 #, c-format msgid "Can not get the delegation credential: %s from delegation service: %s" msgstr "" "Ðе удалоÑÑŒ получить делегированные параметры доÑтупа: %s от Ñлужбы " "делегированиÑ:%s" #: src/hed/shc/delegationsh/DelegationSH.cpp:204 #: src/hed/shc/delegationsh/DelegationSH.cpp:268 #, c-format msgid "Delegated credential identity: %s" msgstr "Отличительные признаки делегированных параметров доÑтупа: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:205 #, c-format msgid "" "The delegated credential got from delegation service is stored into path: %s" msgstr "" "Делегированные параметры доÑтупа полученные от Ñлужбы Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð·Ð°Ð¿Ð¸Ñаны " "в каталоге: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:218 msgid "The endpoint of delegation service should be configured" msgstr "ÐšÐ¾Ð½ÐµÑ‡Ð½Ð°Ñ Ñ‚Ð¾Ñ‡ÐºÐ° ÑервиÑа Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð´Ð¾Ð»Ð¶Ð½Ð° быть наÑтроена" #: src/hed/shc/delegationsh/DelegationSH.cpp:228 #: src/hed/shc/delegationsh/DelegationSH.cpp:340 msgid "Delegation handler with delegatee role ends" msgstr "Завершена обработка Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñ Ñ€Ð¾Ð»ÑŒÑŽ делегата" #: src/hed/shc/delegationsh/DelegationSH.cpp:260 msgid "Delegation handler with delegator role starts to process" msgstr "Запущен обработчик Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñ Ñ€Ð¾Ð»ÑŒÑŽ поручителÑ" #: src/hed/shc/delegationsh/DelegationSH.cpp:269 #, c-format msgid "The delegated credential got from path: %s" msgstr "Делегированные параметры доÑтупа извлечены из каталога: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:290 #, c-format msgid "Can not create delegation crendential to delegation service: %s" msgstr "Ðевозможно Ñоздать делегируемый документ Ð´Ð»Ñ Ñлужбы делегированию: %s" #: src/hed/shc/delegationsh/DelegationSH.cpp:328 msgid "output is not SOAP" msgstr "вывод не в формате SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:339 #, c-format msgid "" "Succeeded to send DelegationService: %s and DelegationID: %s info to peer " "service" msgstr "" "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ DelegationService: %s и DelegationID: %s уÑпешно отправлена " "партнёрÑкому ÑервиÑу" #: src/hed/shc/delegationsh/DelegationSH.cpp:345 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:230 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:101 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:94 msgid "Incoming Message is not SOAP" msgstr "ВходÑщее Ñообщение не в формате SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:352 #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:353 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:123 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:108 msgid "Outgoing Message is not SOAP" msgstr "ИÑходÑщее Ñообщение не ÑвлÑетÑÑ Ñообщением SOAP" #: src/hed/shc/delegationsh/DelegationSH.cpp:356 msgid "Delegation handler is not configured" msgstr "Обработчик Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð²Ð°Ð½Ð¸Ñ Ð½Ðµ наÑтроен" #: src/hed/shc/gaclpdp/GACLPDP.cpp:120 msgid "Evaluator for GACLPDP was not loaded" msgstr "Обработчик Ð´Ð»Ñ GACLPDP не был загружен" #: src/hed/shc/gaclpdp/GACLPDP.cpp:151 #, c-format msgid "GACL Auth. request: %s" msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð°Ð²Ñ‚Ð¾Ñ€Ð¸Ð·Ð°Ñ†Ð¸Ð¸ GACL: %s" #: src/hed/shc/gaclpdp/GACLPolicy.cpp:50 src/hed/shc/gaclpdp/GACLPolicy.cpp:63 msgid "Policy is not gacl" msgstr "Политика не в формате GACL" #: src/hed/shc/legacy/ConfigParser.cpp:13 msgid "Configuration file not specified" msgstr "Ðе указан файл наÑтроек" #: src/hed/shc/legacy/ConfigParser.cpp:18 #: src/hed/shc/legacy/ConfigParser.cpp:28 #: src/hed/shc/legacy/ConfigParser.cpp:33 msgid "Configuration file can not be read" msgstr "Ðевозможно прочеÑть файл наÑтроек" #: src/hed/shc/legacy/ConfigParser.cpp:43 #, c-format msgid "Configuration file is broken - block name is too short: %s" msgstr "Файл наÑтроек иÑпорчен - Ñлишком короткое название блока: %s" #: src/hed/shc/legacy/ConfigParser.cpp:47 #, c-format msgid "Configuration file is broken - block name does not end with ]: %s" msgstr "Файл наÑтроек иÑпорчен - название блока не заканчиваетÑÑ ]: %s" #: src/hed/shc/legacy/LegacyMap.cpp:39 src/hed/shc/legacy/LegacyPDP.cpp:119 msgid "Configuration file not specified in ConfigBlock" msgstr "Ðе указан файл наÑтроек в ConfigBlock" #: src/hed/shc/legacy/LegacyMap.cpp:48 src/hed/shc/legacy/LegacyPDP.cpp:128 msgid "BlockName is empty" msgstr "Ðе указан BlockName" #: src/hed/shc/legacy/LegacyMap.cpp:108 #, c-format msgid "Failed processing user mapping command: %s %s" msgstr "Сбой работы команды ÑоответÑÑ‚Ð²Ð¸Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ: %s %s" #: src/hed/shc/legacy/LegacyMap.cpp:114 #, c-format msgid "Failed to change mapping stack processing policy in: %s = %s" msgstr "Ðе удалоÑÑŒ изменить политики обработки Ñтека ÑоответÑтвий в: %s = %s" #: src/hed/shc/legacy/LegacyMap.cpp:179 msgid "LegacyMap: no configurations blocks defined" msgstr "LegacyMap: не заданы группы наÑтроек" #: src/hed/shc/legacy/LegacyMap.cpp:201 src/hed/shc/legacy/LegacyPDP.cpp:255 #, c-format msgid "" "LegacyPDP: there is no %s Sec Attribute defined. Probably ARC Legacy Sec " "Handler is not configured or failed." msgstr "" "LegacyPDP: атрибут безопаÑноÑти %s не задан. Возможно, обработчик " "безопаÑноÑти ARC Legacy не наÑтроен, или претерпел Ñбой." #: src/hed/shc/legacy/LegacyMap.cpp:206 src/hed/shc/legacy/LegacyPDP.cpp:260 msgid "LegacyPDP: ARC Legacy Sec Attribute not recognized." msgstr "LegacyPDP: атрибут безопаÑноÑти ARC Legacy не опознан." #: src/hed/shc/legacy/LegacyPDP.cpp:138 #, c-format msgid "Failed to parse configuration file %s" msgstr "Сбой при разборе файла наÑтроек %s" #: src/hed/shc/legacy/LegacyPDP.cpp:144 #, c-format msgid "Block %s not found in configuration file %s" msgstr "Блок %s не обнаружен в файле наÑтроек %s" #: src/hed/shc/legacy/LegacySecHandler.cpp:40 #: src/hed/shc/legacy/LegacySecHandler.cpp:118 msgid "LegacySecHandler: configuration file not specified" msgstr "LegacySecHandler: не указан файл наÑтроек" #: src/hed/shc/legacy/arc_lcas.cpp:149 src/hed/shc/legacy/arc_lcmaps.cpp:163 #, c-format msgid "" "Failed to convert GSI credential to GSS credential (major: %d, minor: %d)" msgstr "" "Ðе удалоÑÑŒ преобразовать параметры доÑтупа GSI в GSS (major: %d, minor: %d)" #: src/hed/shc/legacy/arc_lcas.cpp:174 src/hed/shc/legacy/arc_lcmaps.cpp:188 msgid "Missing subject name" msgstr "ОтÑутÑтвует Ð¸Ð¼Ñ Ñубъекта" #: src/hed/shc/legacy/arc_lcas.cpp:179 src/hed/shc/legacy/arc_lcmaps.cpp:193 msgid "Missing path of credentials file" msgstr "ОтÑутÑтвует путь к файлу параметров доÑтупа" #: src/hed/shc/legacy/arc_lcas.cpp:185 msgid "Missing name of LCAS library" msgstr "ОтÑутÑтвует Ð¸Ð¼Ñ Ð±Ð¸Ð±Ð»Ð¸Ð¾Ñ‚ÐµÐºÐ¸ LCAS" #: src/hed/shc/legacy/arc_lcas.cpp:202 #, c-format msgid "Can't load LCAS library %s: %s" msgstr "Ðевозможно загрузить библиотеку LCAS %s: %s" #: src/hed/shc/legacy/arc_lcas.cpp:212 #, c-format msgid "Can't find LCAS functions in a library %s" msgstr "Ðе удалоÑÑŒ обнаружить функции LCAS в библиотеке %s" #: src/hed/shc/legacy/arc_lcas.cpp:222 msgid "Failed to initialize LCAS" msgstr "Сбой инициализации LCAS" #: src/hed/shc/legacy/arc_lcas.cpp:237 msgid "Failed to terminate LCAS" msgstr "Сбой оÑтановки LCAS" #: src/hed/shc/legacy/arc_lcmaps.cpp:199 msgid "Missing name of LCMAPS library" msgstr "ОтÑутÑтвует Ð¸Ð¼Ñ Ð±Ð¸Ð±Ð»Ð¸Ð¾Ñ‚ÐµÐºÐ¸ LCMAPS" #: src/hed/shc/legacy/arc_lcmaps.cpp:213 msgid "Can't read policy names" msgstr "Ðевозможно прочеÑть Ð½Ð°Ð·Ð²Ð°Ð½Ð¸Ñ Ð¿Ð¾Ð»Ð¸Ñ‚Ð¸Ðº" #: src/hed/shc/legacy/arc_lcmaps.cpp:224 #, c-format msgid "Can't load LCMAPS library %s: %s" msgstr "Ðевозможно загрузить библиотеку LCMAPS %s: %s" #: src/hed/shc/legacy/arc_lcmaps.cpp:236 #, c-format msgid "Can't find LCMAPS functions in a library %s" msgstr "Ðе удалоÑÑŒ обнаружить функции LCMAPS в библиотеке %s" #: src/hed/shc/legacy/arc_lcmaps.cpp:248 msgid "LCMAPS has lcmaps_run" msgstr "LCMAPS Ñодержит lcmaps_run" #: src/hed/shc/legacy/arc_lcmaps.cpp:249 msgid "LCMAPS has getCredentialData" msgstr "LCMAPS Ñодержит getCredentialData" #: src/hed/shc/legacy/arc_lcmaps.cpp:253 msgid "Failed to initialize LCMAPS" msgstr "Сбой инициализации LCMAPS" #: src/hed/shc/legacy/arc_lcmaps.cpp:293 #, c-format msgid "LCMAPS returned invalid GID: %u" msgstr "LCMAPS возвратил недопуÑтимый GID: %u" #: src/hed/shc/legacy/arc_lcmaps.cpp:296 msgid "LCMAPS did not return any GID" msgstr "LCMAPS не возвратил никакого GID" #: src/hed/shc/legacy/arc_lcmaps.cpp:299 #, c-format msgid "LCMAPS returned UID which has no username: %u" msgstr "LCMAPS возвратил UID не ÑоответÑтвующий учётной запиÑи: %u" #: src/hed/shc/legacy/arc_lcmaps.cpp:302 #, c-format msgid "LCMAPS returned invalid UID: %u" msgstr "LCMAPS возвратил недопуÑтимый UID: %u" #: src/hed/shc/legacy/arc_lcmaps.cpp:305 msgid "LCMAPS did not return any UID" msgstr "LCMAPS не возвратил никакого UID" #: src/hed/shc/legacy/arc_lcmaps.cpp:314 msgid "Failed to terminate LCMAPS" msgstr "Сбой оÑтановки LCMAPS" #: src/hed/shc/legacy/auth.cpp:35 #, c-format msgid "Unexpected argument for 'all' rule - %s" msgstr "ÐепредуÑмотренный аргумент Ð´Ð»Ñ Ð¿Ñ€Ð°Ð²Ð¸Ð»Ð° 'all' - %s" #: src/hed/shc/legacy/auth.cpp:340 #, c-format msgid "Credentials stored in temporary file %s" msgstr "Параметры доÑтупа Ñохранены во временном файле %s" #: src/hed/shc/legacy/auth.cpp:349 #, c-format msgid "Assigned to authorization group %s" msgstr "ПрипиÑан к группе допуÑка %s" #: src/hed/shc/legacy/auth.cpp:354 #, c-format msgid "Assigned to userlist %s" msgstr "ПрипиÑан к ÑпиÑку пользователей %s" #: src/hed/shc/legacy/auth_file.cpp:22 #, c-format msgid "Failed to read file %s" msgstr "Сбой при чтении файла %s" #: src/hed/shc/legacy/auth_otokens.cpp:33 msgid "Missing subject in configuration" msgstr "Ð’ наÑтройках отÑутÑтвует Ñубъект" #: src/hed/shc/legacy/auth_otokens.cpp:38 msgid "Missing issuer in configuration" msgstr "Ð’ наÑтройках отÑутÑтвует издатель" #: src/hed/shc/legacy/auth_otokens.cpp:43 msgid "Missing audience in configuration" msgstr "Ð’ наÑтройках отÑутÑтвуют получатели" #: src/hed/shc/legacy/auth_otokens.cpp:48 msgid "Missing scope in configuration" msgstr "Ð’ наÑтройках отÑутÑтвует контекÑÑ‚" #: src/hed/shc/legacy/auth_otokens.cpp:53 src/hed/shc/legacy/auth_voms.cpp:47 msgid "Missing group in configuration" msgstr "Ð’ наÑтройках отÑутÑтвует группа" #: src/hed/shc/legacy/auth_otokens.cpp:56 #, c-format msgid "Rule: subject: %s" msgstr "Правило: Ñубъект: %s" #: src/hed/shc/legacy/auth_otokens.cpp:57 #, c-format msgid "Rule: issuer: %s" msgstr "Правило: издатель: %s" #: src/hed/shc/legacy/auth_otokens.cpp:58 #, c-format msgid "Rule: audience: %s" msgstr "Правило: получатели: %s" #: src/hed/shc/legacy/auth_otokens.cpp:59 #, c-format msgid "Rule: scope: %s" msgstr "Правило: контекÑÑ‚: %s" #: src/hed/shc/legacy/auth_otokens.cpp:60 src/hed/shc/legacy/auth_voms.cpp:66 #, c-format msgid "Rule: group: %s" msgstr "Правило: группа: %s" #: src/hed/shc/legacy/auth_otokens.cpp:63 #, c-format msgid "Match issuer: %s" msgstr "СоответÑтвующий издатель: %s" #: src/hed/shc/legacy/auth_otokens.cpp:69 #, c-format msgid "Matched: %s %s %s" msgstr "СоответÑтвие: %s %s %s" #: src/hed/shc/legacy/auth_otokens.cpp:83 src/hed/shc/legacy/auth_voms.cpp:93 msgid "Matched nothing" msgstr "Совпадений нет" #: src/hed/shc/legacy/auth_otokens.cpp:176 #, fuzzy, c-format msgid "Evaluate operator =: left: %s" msgstr "Обработчик: %s" #: src/hed/shc/legacy/auth_otokens.cpp:177 #, fuzzy, c-format msgid "Evaluate operator =: right: %s" msgstr "Обработчик: %s" #: src/hed/shc/legacy/auth_otokens.cpp:182 #, c-format msgid "Evaluate operator =: left from context: %s" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:239 #, fuzzy, c-format msgid "Operator token: %c" msgstr "OTokens: Attr: токен: %s" #: src/hed/shc/legacy/auth_otokens.cpp:268 #, fuzzy, c-format msgid "String token: %s" msgstr "СоÑтоÑние обÑлуживаниÑ: %s" #: src/hed/shc/legacy/auth_otokens.cpp:296 #, fuzzy, c-format msgid "Quoted string token: %s" msgstr "Ðе удалоÑÑŒ Ñоздать контекÑÑ‚ GSI: %s" #: src/hed/shc/legacy/auth_otokens.cpp:304 #, fuzzy, c-format msgid "Sequence token parsing: %s" msgstr "Длина отправленного токена: %i" #: src/hed/shc/legacy/auth_otokens.cpp:420 #, fuzzy, c-format msgid "Matching tokens expression: %s" msgstr "Ошибка запуÑка ÑеÑÑии: %s" #: src/hed/shc/legacy/auth_otokens.cpp:424 #, fuzzy msgid "Failed to parse expression" msgstr "Ðе удалоÑÑŒ Ñкопировать раÑширение" #: src/hed/shc/legacy/auth_otokens.cpp:435 #, c-format msgid "%s: " msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:441 #, fuzzy, c-format msgid " %s" msgstr " %s" #: src/hed/shc/legacy/auth_otokens.cpp:446 msgid "Expression matched" msgstr "" #: src/hed/shc/legacy/auth_otokens.cpp:451 #, fuzzy, c-format msgid "Failed to evaluate expression: %s" msgstr "Сбой Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ñ€Ð°ÑширениÑ: %s" #: src/hed/shc/legacy/auth_otokens.cpp:454 msgid "Expression failed to matched" msgstr "" #: src/hed/shc/legacy/auth_plugin.cpp:79 src/hed/shc/legacy/unixmap.cpp:216 #, c-format msgid "Plugin %s returned: %u" msgstr "Подключаемый модуль %s ответил: %u" #: src/hed/shc/legacy/auth_plugin.cpp:83 src/hed/shc/legacy/unixmap.cpp:220 #, c-format msgid "Plugin %s timeout after %u seconds" msgstr "Ð’Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡Ð°ÐµÐ¼Ð¾Ð³Ð¾ Ð¼Ð¾Ð´ÑƒÐ»Ñ %s иÑтекло поÑле %u Ñекунд" #: src/hed/shc/legacy/auth_plugin.cpp:86 src/hed/shc/legacy/unixmap.cpp:223 #, c-format msgid "Plugin %s failed to start" msgstr "Подключаемый модуль %s не Ñмог запуÑтитьÑÑ" #: src/hed/shc/legacy/auth_plugin.cpp:88 src/hed/shc/legacy/unixmap.cpp:225 #, c-format msgid "Plugin %s printed: %s" msgstr "Подключаемый модуль %s вывел на печать: %s" #: src/hed/shc/legacy/auth_plugin.cpp:89 src/hed/shc/legacy/unixmap.cpp:213 #: src/hed/shc/legacy/unixmap.cpp:226 #, c-format msgid "Plugin %s error: %s" msgstr "Ошибка подключаемого Ð¼Ð¾Ð´ÑƒÐ»Ñ %s: %s" #: src/hed/shc/legacy/auth_voms.cpp:42 msgid "Missing VO in configuration" msgstr "Ð’ наÑтройках отÑутÑтвует ВО" #: src/hed/shc/legacy/auth_voms.cpp:52 msgid "Missing role in configuration" msgstr "Ð’ наÑтройках отÑутÑтвует роль" #: src/hed/shc/legacy/auth_voms.cpp:57 msgid "Missing capabilities in configuration" msgstr "Ð’ наÑтройках отÑутÑтвуют возможноÑти" #: src/hed/shc/legacy/auth_voms.cpp:62 msgid "Too many arguments in configuration" msgstr "Слишком много аргументов в наÑтройках" #: src/hed/shc/legacy/auth_voms.cpp:65 #, c-format msgid "Rule: vo: %s" msgstr "Правило: ВО: %s" #: src/hed/shc/legacy/auth_voms.cpp:67 #, c-format msgid "Rule: role: %s" msgstr "Правило: роль: %s" #: src/hed/shc/legacy/auth_voms.cpp:68 #, c-format msgid "Rule: capabilities: %s" msgstr "Правило: возможноÑти: %s" #: src/hed/shc/legacy/auth_voms.cpp:71 #, c-format msgid "Match vo: %s" msgstr "Совпадение ВО: %s" #: src/hed/shc/legacy/auth_voms.cpp:78 #, c-format msgid "Matched: %s %s %s %s" msgstr "СоответÑтвие: %s %s %s %s" #: src/hed/shc/legacy/simplemap.cpp:70 #, c-format msgid "SimpleMap: acquired new unmap time of %u seconds" msgstr "SimpleMap: получено новое Ð²Ñ€ÐµÐ¼Ñ Ñ€Ð°ÑÑоглаÑÐ¾Ð²Ð°Ð½Ð¸Ñ Ð½Ð° %u Ñекунд" #: src/hed/shc/legacy/simplemap.cpp:72 msgid "SimpleMap: wrong number in unmaptime command" msgstr "SimpleMap: недопуÑтимое значение в команде unmaptime" #: src/hed/shc/legacy/simplemap.cpp:85 src/hed/shc/legacy/simplemap.cpp:90 #, c-format msgid "SimpleMap: %s" msgstr "SimpleMap: %s" #: src/hed/shc/legacy/unixmap.cpp:65 src/hed/shc/legacy/unixmap.cpp:70 msgid "Mapping policy option has empty value" msgstr "Значение параметра политики приÑÐ²Ð¾ÐµÐ½Ð¸Ñ Ð¿ÑƒÑто" #: src/hed/shc/legacy/unixmap.cpp:80 #, c-format msgid "Unsupported mapping policy action: %s" msgstr "Ðеподдерживаемое дейÑтвие политики ÑоответÑтвиÑ: %s" #: src/hed/shc/legacy/unixmap.cpp:91 #, c-format msgid "Unsupported mapping policy option: %s" msgstr "ÐÐµÐ¿Ð¾Ð´Ð´ÐµÑ€Ð¶Ð¸Ð²Ð°ÐµÐ¼Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ Ð¿Ð¾Ð»Ð¸Ñ‚Ð¸ÐºÐ¸ ÑоответÑтвиÑ: %s" #: src/hed/shc/legacy/unixmap.cpp:103 src/hed/shc/legacy/unixmap.cpp:108 msgid "User name mapping command is empty" msgstr "ПуÑÑ‚Ð°Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ð° в приÑвоении имени пользователÑ" #: src/hed/shc/legacy/unixmap.cpp:116 #, c-format msgid "User name mapping has empty authgroup: %s" msgstr "ПуÑÑ‚Ð°Ñ Ð³Ñ€ÑƒÐ¿Ð¿Ð° authgroup в приÑвоении имени пользователÑ: %s" #: src/hed/shc/legacy/unixmap.cpp:147 #, c-format msgid "Unknown user name mapping rule %s" msgstr "ÐеизвеÑтное правило приÑÐ²Ð¾ÐµÐ½Ð¸Ñ Ð¸Ð¼ÐµÐ½Ð¸ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %s" #: src/hed/shc/legacy/unixmap.cpp:156 src/hed/shc/legacy/unixmap.cpp:161 #: src/hed/shc/legacy/unixmap.cpp:177 src/hed/shc/legacy/unixmap.cpp:183 msgid "Plugin (user mapping) command is empty" msgstr "ПуÑÑ‚Ð°Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ð° в подключаемом модуле (приÑвоение имени пользователÑ)" #: src/hed/shc/legacy/unixmap.cpp:167 #, c-format msgid "Plugin (user mapping) timeout is not a number: %s" msgstr "" "Ðецифровое значение времени Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð² подключаемом модуле (приÑвоение имени " "пользователÑ): %s" #: src/hed/shc/legacy/unixmap.cpp:171 #, c-format msgid "Plugin (user mapping) timeout is wrong number: %s" msgstr "" "Ðеприемлемое значение времени Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð² подключаемом модуле (приÑвоение " "имени пользователÑ): %s" #: src/hed/shc/legacy/unixmap.cpp:204 #, c-format msgid "Plugin %s returned no username" msgstr "Подключаемый модуль %s не выдал имени пользователÑ" #: src/hed/shc/legacy/unixmap.cpp:209 #, c-format msgid "Plugin %s returned too much: %s" msgstr "Подключаемый модуль %s ответил Ñлишком длинно: %s" #: src/hed/shc/legacy/unixmap.cpp:212 #, c-format msgid "Plugin %s returned no mapping" msgstr "Подключаемый модуль %s не выдал привÑзки" #: src/hed/shc/legacy/unixmap.cpp:235 msgid "User subject match is missing user subject." msgstr "ОтÑутÑтвует Ñубъект Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð´Ð»Ñ ÑоответÑÑ‚Ð²Ð¸Ñ Ñубъекта." #: src/hed/shc/legacy/unixmap.cpp:239 #, c-format msgid "Mapfile at %s can't be opened." msgstr "Ðевозможно открыть пул пользователей в %s." #: src/hed/shc/legacy/unixmap.cpp:263 msgid "User pool mapping is missing user subject." msgstr "ОтÑутÑтвует Ñубъект Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð² приÑвоении пула пользователей." #: src/hed/shc/legacy/unixmap.cpp:268 #, c-format msgid "User pool at %s can't be opened." msgstr "Ðевозможно открыть пул пользователей в %s." #: src/hed/shc/legacy/unixmap.cpp:273 #, c-format msgid "User pool at %s failed to perform user mapping." msgstr "" "Пул пользователей в %s не Ñмог уÑтановить ÑоответÑтвие Ð´Ð»Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ." #: src/hed/shc/legacy/unixmap.cpp:291 #, c-format msgid "User name direct mapping is missing user name: %s." msgstr "ОтÑутÑтвует Ð¸Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð² прÑмом приÑвоении имени: %s." #: src/hed/shc/otokens/OTokensSH.cpp:65 msgid "OTokens: Attr: message" msgstr "OTokens: Attr: Ñообщение" #: src/hed/shc/otokens/OTokensSH.cpp:70 #, c-format msgid "OTokens: Attr: %s = %s" msgstr "OTokens: Attr: %s = %s" #: src/hed/shc/otokens/OTokensSH.cpp:75 #, c-format msgid "OTokens: Attr: token: %s" msgstr "OTokens: Attr: токен: %s" #: src/hed/shc/otokens/OTokensSH.cpp:78 #, c-format msgid "OTokens: Attr: token: bearer: %s" msgstr "OTokens: Attr: токен: ноÑитель: %s" #: src/hed/shc/otokens/OTokensSH.cpp:193 msgid "OTokens: Handle" msgstr "OTokens: Handle" #: src/hed/shc/otokens/OTokensSH.cpp:195 msgid "OTokens: Handle: message" msgstr "OTokens: Handle: Ñообщение" #: src/hed/shc/otokens/OTokensSH.cpp:198 msgid "Failed to create OTokens security attributes" msgstr "Ðе удалоÑÑŒ Ñоздать атрибуты безопаÑноÑти OTokens" #: src/hed/shc/otokens/OTokensSH.cpp:202 #, fuzzy msgid "OTokens: Handle: token was not present" msgstr "OTokens: Handle: Ñообщение" #: src/hed/shc/otokens/OTokensSH.cpp:206 #, c-format msgid "OTokens: Handle: attributes created: subject = %s" msgstr "OTokens: Handle: Ñозданы атрибуты: Ñубъект = %s" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:47 msgid "Creating a pdpservice client" msgstr "СоздаётÑÑ ÐºÐ»Ð¸ÐµÐ½Ñ‚ pdpservice" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:81 msgid "Arc policy can not been carried by SAML2.0 profile of XACML" msgstr "Политика ARC не может быть задана в профиле SAML2.0 XACML" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:153 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:186 msgid "Policy Decision Service invocation failed" msgstr "Ðе удалоÑÑŒ запуÑтить Ñлужбу принÑÑ‚Ð¸Ñ Ñ€ÐµÑˆÐµÐ½Ð¸Ð¹ по политикам" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:156 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:189 #: src/tests/client/test_ClientInterface.cpp:88 #: src/tests/client/test_ClientSAML2SSO.cpp:81 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:100 #: src/tests/echo/test_clientinterface.cpp:82 #: src/tests/echo/test_clientinterface.cpp:149 #: src/tests/echo/test_clientinterface.py:32 msgid "There was no SOAP response" msgstr "Ðет ответа SOAP" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:171 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:205 msgid "Authorized from remote pdp service" msgstr "Допущен удалённой Ñлужбой PDP" #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:172 #: src/hed/shc/pdpserviceinvoker/PDPServiceInvoker.cpp:206 msgid "Unauthorized from remote pdp service" msgstr "Ðе допущен удалённой Ñлужбой PDP" #: src/hed/shc/saml2sso_assertionconsumersh/SAML2SSO_AssertionConsumerSH.cpp:69 msgid "Can not get SAMLAssertion SecAttr from message context" msgstr "Ðевозможно извлечь SAMLAssertion SecAttr из контекÑта ÑообщениÑ" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:158 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:44 msgid "Missing or empty CertificatePath element" msgstr "Элемент CertificatePath отÑутÑтвует или пуÑÑ‚" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:163 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:49 msgid "Missing or empty KeyPath element" msgstr "Элемент KeyPath отÑутÑтвует или пуÑÑ‚" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:171 msgid "" "Both of CACertificatePath and CACertificatesDir elements missing or empty" msgstr "" "Оба Ñлемента CACertificatePath and CACertificatesDir отÑутÑтвуют или пуÑты" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:185 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:61 msgid "" "Missing or empty CertificatePath or CACertificatesDir element; will only " "check the signature, will not do message authentication" msgstr "" "Элемент CertificatePath или CACertificatesDir отÑутÑтвует или пуÑÑ‚; будет " "выполнена лишь проверка подпиÑи, а не удоÑтоверение подлинноÑти ÑообщениÑ" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:189 #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:65 #: src/hed/shc/x509tokensh/X509TokenSH.cpp:65 #, c-format msgid "Processing type not supported: %s" msgstr "Ðеподдерживаемый тип обработки: %s" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:209 msgid "Failed to parse SAML Token from incoming SOAP" msgstr "Ðе удалоÑÑŒ разобрать токен SAML из входÑщего документа SOAP" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:219 msgid "Failed to authenticate SAML Token inside the incoming SOAP" msgstr "" "Ðе удалоÑÑŒ уÑтановить подлинноÑть токена SAML во входÑщем документе SOAP" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:222 msgid "Succeeded to authenticate SAMLToken" msgstr "УÑÐ¿ÐµÑˆÐ½Ð°Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ° подлинноÑти токена SAML" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:287 #, c-format msgid "No response from AA service %s" msgstr "Ðет ответа от Ñервера AA %s" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:291 #, c-format msgid "SOAP Request to AA service %s failed" msgstr "Ошибка запроÑа SOAP к Ñерверу AA %s" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:299 msgid "Cannot find content under response soap message" msgstr "Ðе удалоÑÑŒ найти Ñодержание ответного ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ SOAP" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:303 msgid "Cannot find under response soap message:" msgstr "Ðе удалоÑÑŒ найти Ñлемент в ответном Ñообщении SOAP:" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:320 msgid "The Response is not going to this end" msgstr "Отклик доÑюда не дошёл" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:327 msgid "The StatusCode is Success" msgstr "StatusCode: Success" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:333 msgid "Succeeded to verify the signature under " msgstr "ПодпиÑÑŒ уÑпешно подтверждена" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:336 msgid "Failed to verify the signature under " msgstr "ПодпиÑÑŒ не подтверждена" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:347 msgid "Failed to generate SAML Token for outgoing SOAP" msgstr "Ðе удалоÑÑŒ Ñоздать токен SAML Ð´Ð»Ñ Ð¸ÑходÑщего ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ SOAP" #: src/hed/shc/samltokensh/SAMLTokenSH.cpp:357 msgid "SAML Token handler is not configured" msgstr "Обработчик токена SAML не наÑтроен" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:28 #, c-format msgid "Access list location: %s" msgstr "МеÑтонахождение ÑпиÑка доÑтупа: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:38 msgid "" "No policy file or DNs specified for simplelist.pdp, please set location " "attribute or at least one DN element for simplelist PDP node in " "configuration." msgstr "" "Ð”Ð»Ñ simplelist.pdp не задан файл политик или DN; пожалуйÑта, задайте в " "наÑтройках атрибут location или Ñ…Ð¾Ñ‚Ñ Ð±Ñ‹ один Ñлемент DN Ð´Ð»Ñ ÑƒÐ·Ð»Ð° PDP " "simplelist." #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:41 #, c-format msgid "Subject to match: %s" msgstr "Субъект Ð´Ð»Ñ Ñверки: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:44 #, c-format msgid "Policy subject: %s" msgstr "Субъект политики: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:46 #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:72 #, c-format msgid "Authorized from simplelist.pdp: %s" msgstr "Допущен через simplelist.pdp: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:53 msgid "" "The policy file setup for simplelist.pdp does not exist, please check " "location attribute for simplelist PDP node in service configuration" msgstr "" "Ð”Ð»Ñ simplelist.pdp не задан файл наÑтройки политик; пожалуйÑта, проверьте " "атрибут location в наÑтройках Ñлужбы узла PDP simplelist" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:60 #, c-format msgid "Policy line: %s" msgstr "Строка политики: %s" #: src/hed/shc/simplelistpdp/SimpleListPDP.cpp:78 #, c-format msgid "Not authorized from simplelist.pdp: %s" msgstr "Ðе допущен через simplelist.pdp: %s" #: src/hed/shc/test.cpp:27 src/hed/shc/testinterface_arc.cpp:26 #: src/hed/shc/testinterface_xacml.cpp:26 msgid "Start test" msgstr "Ðачать теÑÑ‚" #: src/hed/shc/test.cpp:101 msgid "Input request from a file: Request.xml" msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð²Ð²Ð¾Ð´Ð° из файла: Request.xml" #: src/hed/shc/test.cpp:107 src/hed/shc/test.cpp:197 #: src/hed/shc/testinterface_arc.cpp:124 #, c-format msgid "There is %d subjects, which satisfy at least one policy" msgstr "Обнаружены %d Ñубъекта, удовлетворÑющих Ñ…Ð¾Ñ‚Ñ Ð±Ñ‹ одной политике" #: src/hed/shc/test.cpp:121 #, c-format msgid "Attribute Value (1): %s" msgstr "Значение атрибута (1): %s" #: src/hed/shc/test.cpp:132 msgid "Input request from code" msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð²Ð²Ð¾Ð´Ð° из программы" #: src/hed/shc/test.cpp:211 #, c-format msgid "Attribute Value (2): %s" msgstr "Значение атрибута (2): %s" #: src/hed/shc/testinterface_arc.cpp:75 src/hed/shc/testinterface_xacml.cpp:46 msgid "Can not dynamically produce Policy" msgstr "Ðе удалоÑÑŒ динамичеÑки Ñоздать Policy" #: src/hed/shc/testinterface_arc.cpp:138 #, c-format msgid "Attribute Value inside Subject: %s" msgstr "Значение атрибута в Ñубъекте: %s" #: src/hed/shc/testinterface_arc.cpp:148 msgid "The request has passed the policy evaluation" msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð¿Ñ€Ð¾ÑˆÑ‘Ð» Ñверку Ñ Ð¿Ð¾Ð»Ð¸Ñ‚Ð¸ÐºÐ¾Ð¹" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:43 msgid "Missing or empty PasswordSource element" msgstr "Элемент PasswordSource отÑутÑтвует или пуÑÑ‚" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:54 #, c-format msgid "Password encoding type not supported: %s" msgstr "Тип ÑˆÐ¸Ñ„Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¿Ð°Ñ€Ð¾Ð»Ñ Ð½Ðµ поддерживаетÑÑ: %s" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:59 msgid "Missing or empty Username element" msgstr "Элемент Username отÑутÑтвует или пуÑÑ‚" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:79 msgid "The payload of incoming message is empty" msgstr "Во входÑщем Ñообщении отÑутÑтвует Ð¿Ð¾Ð»ÐµÐ·Ð½Ð°Ñ Ð½Ð°Ð³Ñ€ÑƒÐ·ÐºÐ°" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:84 msgid "Failed to cast PayloadSOAP from incoming payload" msgstr "Ðе удалоÑÑŒ Ñоздать PayloadSOAP из входÑщей нагрузки" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:89 msgid "Failed to parse Username Token from incoming SOAP" msgstr "Ðе удалоÑÑŒ разобрать токен Username из входÑщего документа SOAP" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:95 msgid "Failed to authenticate Username Token inside the incoming SOAP" msgstr "" "Ðе удалоÑÑŒ уÑтановить подлинноÑть токена Username во входÑщем документе SOAP" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:98 msgid "Succeeded to authenticate UsernameToken" msgstr "УÑÐ¿ÐµÑˆÐ½Ð°Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ° подлинноÑти UsernameToken" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:108 msgid "The payload of outgoing message is empty" msgstr "Ð’ иÑходÑщем Ñообщении отÑутÑтвует Ð¿Ð¾Ð»ÐµÐ·Ð½Ð°Ñ Ð½Ð°Ð³Ñ€ÑƒÐ·ÐºÐ°" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:113 msgid "Failed to cast PayloadSOAP from outgoing payload" msgstr "Ðе удалоÑÑŒ Ñоздать PayloadSOAP из иÑходÑщей нагрузки" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:119 msgid "Failed to generate Username Token for outgoing SOAP" msgstr "" "Ðе удалоÑÑŒ Ñоздать токен имени Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð´Ð»Ñ Ð¸ÑходÑщего ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ SOAP" #: src/hed/shc/usernametokensh/UsernameTokenSH.cpp:127 msgid "Username Token handler is not configured" msgstr "Обработчик токена Username не наÑтроен" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:81 msgid "Failed to parse X509 Token from incoming SOAP" msgstr "Ðе удалоÑÑŒ разобрать токен X509 из входÑщего документа SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:85 msgid "Failed to verify X509 Token inside the incoming SOAP" msgstr "Ðе удалоÑÑŒ подтвердить токен X509 во входÑщем документе SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:89 msgid "Failed to authenticate X509 Token inside the incoming SOAP" msgstr "" "Ðе удалоÑÑŒ уÑтановить подлинноÑть токена X509 во входÑщем документе SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:92 msgid "Succeeded to authenticate X509Token" msgstr "УÑпешное подтверждение подлинноÑти токена X509" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:102 msgid "Failed to generate X509 Token for outgoing SOAP" msgstr "Ðе удалоÑÑŒ Ñоздать токен X509 Ð´Ð»Ñ Ð¸ÑходÑщего ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ SOAP" #: src/hed/shc/x509tokensh/X509TokenSH.cpp:112 msgid "X509 Token handler is not configured" msgstr "Обработчик токена X509 не наÑтроен" #: src/hed/shc/xacmlpdp/XACMLApply.cpp:29 msgid "Can not create function: FunctionId does not exist" msgstr "Ðевозможно Ñоздать функцию: FunctionId не ÑущеÑтвует" #: src/hed/shc/xacmlpdp/XACMLApply.cpp:33 #: src/hed/shc/xacmlpdp/XACMLTarget.cpp:40 #, c-format msgid "Can not create function %s" msgstr "Ðевозможно Ñоздать функцию %s" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:87 msgid "Can not find XACMLPDPContext" msgstr "Ðевозможно найти XACMLPDPContext" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:135 msgid "Evaluator for XACMLPDP was not loaded" msgstr "Обработчик Ð´Ð»Ñ XACMLPDP не был загружен" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:150 src/hed/shc/xacmlpdp/XACMLPDP.cpp:158 msgid "Failed to convert security information to XACML request" msgstr "Ðе удалоÑÑŒ преобразовать информацию о защите в Ð·Ð°Ð¿Ñ€Ð¾Ñ XACML" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:166 #, c-format msgid "XACML request: %s" msgstr "Ð·Ð°Ð¿Ñ€Ð¾Ñ XACML: %s" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:178 msgid "Authorized from xacml.pdp" msgstr "Допущен через xacml.pdp" #: src/hed/shc/xacmlpdp/XACMLPDP.cpp:179 msgid "UnAuthorized from xacml.pdp" msgstr "Ðе допущен через xacml.pdp" #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:55 msgid "Can not find element with proper namespace" msgstr "Ðевозможно найти Ñлемент Ñ Ð½ÑƒÐ¶Ð½Ñ‹Ð¼ проÑтранÑтвом имён" #: src/hed/shc/xacmlpdp/XACMLPolicy.cpp:132 msgid "No target available inside the policy" msgstr "Политика не Ñодержит назначений" #: src/hed/shc/xacmlpdp/XACMLRequest.cpp:34 msgid "Request is empty" msgstr "ПуÑтой запроÑ" #: src/hed/shc/xacmlpdp/XACMLRequest.cpp:39 msgid "Can not find element with proper namespace" msgstr "Ðевозможно найти Ñлемент Ñ Ð½ÑƒÐ¶Ð½Ñ‹Ð¼ проÑтранÑтвом имён" #: src/hed/shc/xacmlpdp/XACMLRule.cpp:35 msgid "Invalid Effect" msgstr "ÐедопуÑтимый Ñффект" #: src/hed/shc/xacmlpdp/XACMLRule.cpp:48 msgid "No target available inside the rule" msgstr "Правило не Ñодержит назначений" #: src/libs/data-staging/DTR.cpp:81 src/libs/data-staging/DTR.cpp:85 #, c-format msgid "Could not handle endpoint %s" msgstr "Ðевозможно обработать точку входа %s" #: src/libs/data-staging/DTR.cpp:95 msgid "Source is the same as destination" msgstr "ИÑточник идентичен назначению" #: src/libs/data-staging/DTR.cpp:175 #, c-format msgid "Invalid ID: %s" msgstr "Ðеверный ID: %s" #: src/libs/data-staging/DTR.cpp:212 #, c-format msgid "%s->%s" msgstr "%s->%s" #: src/libs/data-staging/DTR.cpp:320 #, c-format msgid "No callback for %s defined" msgstr "Ðе определена Ñ„ÑƒÐ½ÐºÑ†Ð¸Ñ Ð¾Ð±Ñ€Ð°Ñ‚Ð½Ð¾Ð³Ð¾ вызова Ð´Ð»Ñ %s" #: src/libs/data-staging/DTR.cpp:335 #, c-format msgid "NULL callback for %s" msgstr "Ðулевой обратный вызов Ð´Ð»Ñ %s" #: src/libs/data-staging/DTR.cpp:338 #, c-format msgid "Request to push to unknown owner - %u" msgstr "Попытка передачи неизвеÑтному владельцу - %u" #: src/libs/data-staging/DTRList.cpp:216 #, c-format msgid "Boosting priority from %i to %i due to incoming higher priority DTR" msgstr "" "Увеличение приоритета %i до %i в ÑвÑзи Ñ Ð²Ñ…Ð¾Ð´Ñщим DTR более выÑокого " "приоритета" #: src/libs/data-staging/DataDelivery.cpp:48 #: src/libs/data-staging/DataDelivery.cpp:72 msgid "Received invalid DTR" msgstr "ПринÑÑ‚ неверный Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR" #: src/libs/data-staging/DataDelivery.cpp:54 #, c-format msgid "Delivery received new DTR %s with source: %s, destination: %s" msgstr "" "Служба доÑтавки получила новый Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR %s Ñ Ð¸Ñточником %s и назначением %s" #: src/libs/data-staging/DataDelivery.cpp:68 msgid "Received no DTR" msgstr "Ðе получено запроÑов DTR" #: src/libs/data-staging/DataDelivery.cpp:80 #, c-format msgid "Cancelling DTR %s with source: %s, destination: %s" msgstr "ОтменÑетÑÑ DTR %s Ñ Ð¸Ñточником: %s, назначением: %s" #: src/libs/data-staging/DataDelivery.cpp:91 #, c-format msgid "DTR %s requested cancel but no active transfer" msgstr "DTR %s запроÑил прерывание, но активные передачи отÑутÑтвуют" #: src/libs/data-staging/DataDelivery.cpp:147 #, c-format msgid "Cleaning up after failure: deleting %s" msgstr "ОчиÑтка поÑле ÑбоÑ: уничтожаетÑÑ %s" #: src/libs/data-staging/DataDelivery.cpp:188 #: src/libs/data-staging/DataDelivery.cpp:263 #: src/libs/data-staging/DataDelivery.cpp:303 #: src/libs/data-staging/DataDelivery.cpp:323 msgid "Failed to delete delivery object or deletion timed out" msgstr "Сбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð¾Ð±ÑŠÐµÐºÑ‚Ð° доÑтавки, или иÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ" #: src/libs/data-staging/DataDelivery.cpp:254 #, c-format msgid "Transfer finished: %llu bytes transferred %s" msgstr "Передача завершена: %llu байтов передано %s" #: src/libs/data-staging/DataDelivery.cpp:329 msgid "Data delivery loop exited" msgstr "Прерван цикл Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ…" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:45 msgid "No source defined" msgstr "ИÑточник не задан" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:49 msgid "No destination defined" msgstr "Ðазначение не задано" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:157 #, c-format msgid "Bad checksum format %s" msgstr "Ðеверный формат контрольной Ñуммы %s" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:196 #, c-format msgid "Failed to run command: %s" msgstr "Сбой иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ñ‹ %s" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:235 #, c-format msgid "DataDelivery: %s" msgstr "DataDelivery: %s" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:247 #, c-format msgid "DataStagingDelivery exited with code %i" msgstr "ПроцеÑÑ DataStagingDelivery завершилÑÑ Ñ ÐºÐ¾Ð´Ð¾Ð¼ %i" #: src/libs/data-staging/DataDeliveryLocalComm.cpp:266 #, c-format msgid "Transfer killed after %i seconds without communication" msgstr "ПереÑылка оборвана поÑле %i Ñекунд бездейÑтвиÑ" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:72 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:326 #, c-format msgid "Connecting to Delivery service at %s" msgstr "СоединÑемÑÑ Ñо Ñлужбой доÑтавки на %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:101 #, c-format msgid "Failed to set up credential delegation with %s" msgstr "Сбой уÑтановки Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¿Ñ€Ð°Ð² доÑтупа Ñ %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:107 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:185 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:251 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:337 #, c-format msgid "" "Request:\n" "%s" msgstr "" "ЗапроÑ:\n" "%s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:113 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:343 #, c-format msgid "Could not connect to service %s: %s" msgstr "Ðе удалоÑÑŒ ÑоединитьÑÑ Ñо Ñлужбой %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:121 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:351 #, c-format msgid "No SOAP response from Delivery service %s" msgstr "Ðет ответа SOAP от Ñлужбы доÑтавки %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:126 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:204 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:278 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:357 #, c-format msgid "" "Response:\n" "%s" msgstr "" "Отзыв:\n" "%s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:135 #, c-format msgid "Failed to start transfer request: %s" msgstr "Сбой запуÑка запроÑа на передачу: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:142 #, c-format msgid "Bad format in XML response from service at %s: %s" msgstr "ÐедопуÑтимый формат отзыва XML от ÑервиÑа в %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:150 #, c-format msgid "Could not make new transfer request: %s: %s" msgstr "Ðевозможно Ñоздать новый Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¿ÐµÑ€ÐµÑылки: %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:155 #, c-format msgid "Started remote Delivery at %s" msgstr "Запущена ÑƒÐ´Ð°Ð»Ñ‘Ð½Ð½Ð°Ñ Ñлужба доÑтавки на %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:192 #, c-format msgid "Failed to send cancel request: %s" msgstr "Сбой отправки запроÑа на прерывание: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:199 msgid "Failed to cancel: No SOAP response" msgstr "Сбой прерываниÑ: нет ответа SOAP" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:213 #, c-format msgid "Failed to cancel transfer request: %s" msgstr "Сбой Ð¿Ñ€ÐµÑ€Ñ‹Ð²Ð°Ð½Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа на передачу: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:220 #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:301 #, c-format msgid "Bad format in XML response: %s" msgstr "Ðеверный формат отклика XML: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:227 #, c-format msgid "Failed to cancel: %s" msgstr "Ошибка отмены: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:271 msgid "No SOAP response from delivery service" msgstr "Ðет ответа SOAP от Ñлужбы доÑтавки" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:292 #, c-format msgid "Failed to query state: %s" msgstr "Сбой опроÑа ÑоÑтоÑниÑ: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:366 #, c-format msgid "SOAP fault from delivery service at %s: %s" msgstr "Ошибка SOAP Ñлужбы доÑтавки на %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:374 #, c-format msgid "Bad format in XML response from delivery service at %s: %s" msgstr "Ðеверный формат отклика XML Ñлужбы доÑтавки на %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:382 #, c-format msgid "Error pinging delivery service at %s: %s: %s" msgstr "Ошибка ÑвÑзи Ñо Ñлужбой доÑтавки на %s: %s: %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:390 #, c-format msgid "Dir %s allowed at service %s" msgstr "Каталог %s допуÑкаетÑÑ Ð´Ð»Ñ Ñлужбы %s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:484 #, c-format msgid "" "DataDelivery log tail:\n" "%s" msgstr "" "ПоÑледние запиÑи журнала DataDelivery:\n" "%s" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:498 msgid "Failed locating credentials" msgstr "Сбой Ð¾Ð±Ð½Ð°Ñ€ÑƒÐ¶ÐµÐ½Ð¸Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð¾Ð² доÑтупа" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:503 msgid "Failed to initiate client connection" msgstr "Сбой запуÑка ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ ÐºÐ»Ð¸ÐµÐ½Ñ‚Ð¾Ð¼" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:509 msgid "Client connection has no entry point" msgstr "ОтÑутÑтвует точка входа в клиентÑкую цепь" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:518 msgid "Initiating delegation procedure" msgstr "Ð˜Ð½Ð¸Ñ†Ð¸Ð°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð¿Ñ€Ð¾Ñ†ÐµÐ´ÑƒÑ€Ñ‹ делегированиÑ" #: src/libs/data-staging/DataDeliveryRemoteComm.cpp:520 msgid "Failed to initiate delegation credentials" msgstr "Сбой инициализации параметров доÑтупа Ð´Ð»Ñ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ" #: src/libs/data-staging/DataStagingDelivery.cpp:97 #, c-format msgid "%5u s: %10.1f kB %8.1f kB/s" msgstr "%5u Ñ: %10.1f кБ %8.1f кБ/Ñ" #: src/libs/data-staging/DataStagingDelivery.cpp:156 msgid "Unexpected arguments" msgstr "ÐепредуÑмотренные аргументы" #: src/libs/data-staging/DataStagingDelivery.cpp:159 msgid "Source URL missing" msgstr "ОтÑутÑтвует URL иÑточника" #: src/libs/data-staging/DataStagingDelivery.cpp:162 msgid "Destination URL missing" msgstr "ОтÑутÑтвует URL назначениÑ" #: src/libs/data-staging/DataStagingDelivery.cpp:166 #, c-format msgid "Source URL not valid: %s" msgstr "ÐедейÑтвительный URL иÑточника: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:170 #, c-format msgid "Destination URL not valid: %s" msgstr "ÐедейÑтвительный URL назначениÑ: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:235 #, c-format msgid "Unknown transfer option: %s" msgstr "ÐеизвеÑÑ‚Ð½Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð¸ файлов: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:272 #, c-format msgid "Source URL not supported: %s" msgstr "Ðеподдерживаемый URL иÑточника: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:277 #: src/libs/data-staging/DataStagingDelivery.cpp:299 msgid "No credentials supplied" msgstr "Ðе указаны параметры доÑтупа" #: src/libs/data-staging/DataStagingDelivery.cpp:294 #, c-format msgid "Destination URL not supported: %s" msgstr "Ðеподдерживаемый URL назначениÑ: %s" #: src/libs/data-staging/DataStagingDelivery.cpp:348 #, c-format msgid "Will calculate %s checksum" msgstr "Будет вычиÑлена ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма Ð´Ð»Ñ %s" #: src/libs/data-staging/DataStagingDelivery.cpp:359 msgid "Cannot use supplied --size option" msgstr "Ðевозможно иÑпользовать заÑвленную опцию --size" #: src/libs/data-staging/DataStagingDelivery.cpp:572 #, c-format msgid "Checksum mismatch between calculated checksum %s and source checksum %s" msgstr "" "ÐеÑовпадение вычиÑленной контрольной Ñуммы %s и контрольной Ñуммы иÑточника " "%s" #: src/libs/data-staging/DataStagingDelivery.cpp:582 #, c-format msgid "Failed cleaning up destination %s" msgstr "Ошибка очиÑтки цели %s" #: src/libs/data-staging/Processor.cpp:49 #: src/services/candypond/CandyPond.cpp:117 msgid "Error creating cache" msgstr "Ошибка при Ñоздании кÑша" #: src/libs/data-staging/Processor.cpp:73 #, c-format msgid "Forcing re-download of file %s" msgstr "ÐŸÑ€Ð¸Ð½ÑƒÐ´Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ð¿ÐµÑ€ÐµÐ·Ð°Ð³Ñ€ÑƒÐ·ÐºÐ° файла %s" #: src/libs/data-staging/Processor.cpp:90 #, c-format msgid "Will wait around %is" msgstr "Ожидание порÑдка %i Ñек" #: src/libs/data-staging/Processor.cpp:109 #, c-format msgid "Force-checking source of cache file %s" msgstr "ÐŸÑ€Ð¸Ð½ÑƒÐ´Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ° иÑточника кÑшированного файла %s" #: src/libs/data-staging/Processor.cpp:112 #, c-format msgid "Source check requested but failed: %s" msgstr "Проверка иÑточника запрошена, но не прошла: %s" #: src/libs/data-staging/Processor.cpp:132 msgid "Permission checking failed, will try downloading without using cache" msgstr "Сбой проверки прав доÑтупа, попытка загрузки без иÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ ÐºÑша" #: src/libs/data-staging/Processor.cpp:162 #, c-format msgid "Will download to cache file %s" msgstr "Будет произведена загрузка в файл кÑша %s" #: src/libs/data-staging/Processor.cpp:183 msgid "Looking up source replicas" msgstr "ПоиÑк копий файла-иÑточника" #: src/libs/data-staging/Processor.cpp:205 #: src/libs/data-staging/Processor.cpp:432 msgid "Resolving destination replicas" msgstr "Обнаружение копий назначениÑ" #: src/libs/data-staging/Processor.cpp:222 msgid "No locations for destination different from source found" msgstr "Ðе найдено раÑположений Ð´Ð»Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ, отличающихÑÑ Ð¾Ñ‚ иÑточника" #: src/libs/data-staging/Processor.cpp:233 msgid "Pre-registering destination in index service" msgstr "ÐŸÑ€ÐµÐ´Ð²Ð°Ñ€Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ñ€ÐµÐ³Ð¸ÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð² каталоге" #: src/libs/data-staging/Processor.cpp:259 msgid "Resolving source replicas in bulk" msgstr "МаÑÑовое обнаружение копий иÑточника" #: src/libs/data-staging/Processor.cpp:273 #, c-format msgid "No replicas found for %s" msgstr "Ðе найдено копий Ð´Ð»Ñ %s" #: src/libs/data-staging/Processor.cpp:293 #, c-format msgid "Checking %s" msgstr "Проверка %s" #: src/libs/data-staging/Processor.cpp:302 #: src/libs/data-staging/Processor.cpp:360 msgid "Metadata of replica and index service differ" msgstr "Метаданные копии отличаютÑÑ Ð¾Ñ‚ тех, что в каталоге" #: src/libs/data-staging/Processor.cpp:310 #, c-format msgid "Failed checking source replica %s: %s" msgstr "Сбой проверки копии иÑточника %s: %s" #: src/libs/data-staging/Processor.cpp:336 msgid "Querying source replicas in bulk" msgstr "МаÑÑовый Ð¾Ð¿Ñ€Ð¾Ñ ÐºÐ¾Ð¿Ð¸Ð¹ иÑточника" #: src/libs/data-staging/Processor.cpp:348 #, c-format msgid "Failed checking source replica: %s" msgstr "Сбой проверки копии иÑточника: %s" #: src/libs/data-staging/Processor.cpp:354 msgid "Failed checking source replica" msgstr "Сбой проверки копии иÑточника" #: src/libs/data-staging/Processor.cpp:391 msgid "Overwrite requested - will pre-clean destination" msgstr "Запрошена перезапиÑÑŒ - назначение будет предварительно очищено" #: src/libs/data-staging/Processor.cpp:400 msgid "Finding existing destination replicas" msgstr "Обнаружение ÑущеÑтвующих копий назначениÑ" #: src/libs/data-staging/Processor.cpp:412 #, c-format msgid "Failed to delete replica %s: %s" msgstr "Сбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ ÐºÐ¾Ð¿Ð¸Ð¸ %s: %s" #: src/libs/data-staging/Processor.cpp:426 #, c-format msgid "Unregistering %s" msgstr "УдалÑетÑÑ Ð·Ð°Ð¿Ð¸ÑÑŒ о %s" #: src/libs/data-staging/Processor.cpp:437 msgid "Pre-registering destination" msgstr "ÐŸÑ€ÐµÐ´Ð²Ð°Ñ€Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ñ€ÐµÐ³Ð¸ÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ" #: src/libs/data-staging/Processor.cpp:443 #, c-format msgid "Failed to pre-clean destination: %s" msgstr "Сбой предварительной очиÑтки назначениÑ: %s" #: src/libs/data-staging/Processor.cpp:452 #, fuzzy msgid "Destination already exists" msgstr "Такой файл уже ÑущеÑтвует" #: src/libs/data-staging/Processor.cpp:476 msgid "Preparing to stage source" msgstr "Подготовка к размещению файла-иÑточника" #: src/libs/data-staging/Processor.cpp:489 #, c-format msgid "Source is not ready, will wait %u seconds" msgstr "ИÑточник неготов, ÑÐ»ÐµÐ´ÑƒÑŽÑ‰Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ° через %u Ñек" #: src/libs/data-staging/Processor.cpp:495 msgid "No physical files found for source" msgstr "Ðе найдено реальных файлов иÑточника" #: src/libs/data-staging/Processor.cpp:513 msgid "Preparing to stage destination" msgstr "Подготовка к размещению назначениÑ" #: src/libs/data-staging/Processor.cpp:526 #, c-format msgid "Destination is not ready, will wait %u seconds" msgstr "Ðазначение неготово, ÑÐ»ÐµÐ´ÑƒÑŽÑ‰Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ° через %u Ñек" #: src/libs/data-staging/Processor.cpp:532 msgid "No physical files found for destination" msgstr "Ðе найдено реальных файлов Ð´Ð»Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ" #: src/libs/data-staging/Processor.cpp:558 msgid "Releasing source" msgstr "Ð¡Ð±Ñ€Ð¾Ñ Ð¸Ñточника" #: src/libs/data-staging/Processor.cpp:562 #, c-format msgid "There was a problem during post-transfer source handling: %s" msgstr "Обнаружена проблема при обÑлуживании иÑточника поÑле переÑылки: %s" #: src/libs/data-staging/Processor.cpp:567 msgid "Releasing destination" msgstr "Ð¡Ð±Ñ€Ð¾Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ" #: src/libs/data-staging/Processor.cpp:571 #, c-format msgid "" "There was a problem during post-transfer destination handling after error: %s" msgstr "" "Обнаружена проблема при обÑлуживании Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð¿Ð¾Ñле переÑылки поÑле ÑбоÑ: " "%s" #: src/libs/data-staging/Processor.cpp:575 #, c-format msgid "Error with post-transfer destination handling: %s" msgstr "Ошибка обÑÐ»ÑƒÐ¶Ð¸Ð²Ð°Ð½Ð¸Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð¿Ð¾Ñле переÑылки: %s" #: src/libs/data-staging/Processor.cpp:597 #, fuzzy, c-format msgid "Finalising current replica %s" msgstr "Сбой проверки копии иÑточника: %s" #: src/libs/data-staging/Processor.cpp:617 msgid "Removing pre-registered destination in index service" msgstr "Отмена предварительной региÑтрации Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð² каталоге" #: src/libs/data-staging/Processor.cpp:620 #, c-format msgid "" "Failed to unregister pre-registered destination %s: %s. You may need to " "unregister it manually" msgstr "" "Ðе удалоÑÑŒ отменить предварительную региÑтрацию Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ %s: %s. Возможно, " "Вам придётÑÑ Ñделать Ñто вручную" #: src/libs/data-staging/Processor.cpp:626 msgid "Registering destination replica" msgstr "РегиÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ ÐºÐ¾Ð¿Ð¸Ð¸ назначениÑ" #: src/libs/data-staging/Processor.cpp:629 #, c-format msgid "Failed to register destination replica: %s" msgstr "Сбой региÑтрации копии назначениÑ: %s" #: src/libs/data-staging/Processor.cpp:632 #, c-format msgid "" "Failed to unregister pre-registered destination %s. You may need to " "unregister it manually" msgstr "" "Ðе удалоÑÑŒ отменить предварительную региÑтрацию Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ %s. Возможно, Вам " "придётÑÑ Ñделать Ñто вручную" #: src/libs/data-staging/Processor.cpp:662 msgid "Error creating cache. Stale locks may remain." msgstr "Ошибка про Ñоздании кÑша. Возможно, оÑталиÑÑŒ Ñтарые блокировки." #: src/libs/data-staging/Processor.cpp:695 #, c-format msgid "Linking/copying cached file to %s" msgstr "Создание ÑÑылки/копирование файла из кÑша в %s" #: src/libs/data-staging/Processor.cpp:716 #, c-format msgid "Failed linking cache file to %s" msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÑÑылки на файл из кÑша из %s" #: src/libs/data-staging/Processor.cpp:720 #, c-format msgid "Error linking cache file to %s." msgstr "Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÑÑылки на файл из кÑша из %s." #: src/libs/data-staging/Processor.cpp:741 #: src/libs/data-staging/Processor.cpp:748 msgid "Adding to bulk request" msgstr "Добавление к маÑÑовому запроÑу" #: src/libs/data-staging/Scheduler.cpp:174 #: src/libs/data-staging/Scheduler.cpp:181 msgid "source" msgstr "иÑточник" #: src/libs/data-staging/Scheduler.cpp:174 #: src/libs/data-staging/Scheduler.cpp:181 msgid "destination" msgstr "назначение" #: src/libs/data-staging/Scheduler.cpp:174 #, c-format msgid "Using next %s replica" msgstr "ИÑпользуетÑÑ ÑÐ»ÐµÐ´ÑƒÑŽÑ‰Ð°Ñ ÐºÐ¾Ð¿Ð¸Ñ (%s)" #: src/libs/data-staging/Scheduler.cpp:181 #, c-format msgid "No more %s replicas" msgstr "Больше копий нет (%s)" #: src/libs/data-staging/Scheduler.cpp:183 msgid "Will clean up pre-registered destination" msgstr "Предварительное назначение будет Ñброшено" #: src/libs/data-staging/Scheduler.cpp:187 msgid "Will release cache locks" msgstr "Будет отменены блокировки в кÑше" #: src/libs/data-staging/Scheduler.cpp:190 msgid "Moving to end of data staging" msgstr "ЗаканчиваетÑÑ Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ðµ данных" #: src/libs/data-staging/Scheduler.cpp:199 #, c-format msgid "Source is mapped to %s" msgstr "ИÑточник поÑтавлен в ÑоответÑтвие %s" #: src/libs/data-staging/Scheduler.cpp:203 msgid "Cannot link to source which can be modified, will copy instead" msgstr "" "Ðевозможно Ñоздать ÑÑылку на иÑточник, который может изменитьÑÑ; будет " "Ñделана копиÑ" #: src/libs/data-staging/Scheduler.cpp:212 msgid "Cannot link to a remote destination. Will not use mapped URL" msgstr "" "Ðевозможно Ñоздать ÑÑылку на удалённое назначение. ПрипиÑанный URL не будет " "иÑпользован" #: src/libs/data-staging/Scheduler.cpp:215 msgid "Linking mapped file" msgstr "СоздаётÑÑ ÑимволичеÑÐºÐ°Ñ ÑÑылка на ÑоответÑтвующий файл" #: src/libs/data-staging/Scheduler.cpp:222 #, c-format msgid "Failed to create link: %s. Will not use mapped URL" msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÑÑылки %s. ПрипиÑанный URL не будет иÑпользован" #: src/libs/data-staging/Scheduler.cpp:247 #, c-format msgid "" "Scheduler received new DTR %s with source: %s, destination: %s, assigned to " "transfer share %s with priority %d" msgstr "" "Планировщик получил новый Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR %s Ñ Ð¸Ñточником: %s, назначением: %s, " "припиÑан к доле %s Ñ Ð¿Ñ€Ð¸Ð¾Ñ€Ð¸Ñ‚ÐµÑ‚Ð¾Ð¼ %d" #: src/libs/data-staging/Scheduler.cpp:255 msgid "" "File is not cacheable, was requested not to be cached or no cache available, " "skipping cache check" msgstr "" "Файл либо не может быть кÑширован, либо кÑширование не было запрошено, либо " "кÑша нет; пропуÑкаетÑÑ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ° кÑша" #: src/libs/data-staging/Scheduler.cpp:261 msgid "File is cacheable, will check cache" msgstr "Файл может быть кÑширован, проверÑетÑÑ ÐºÑш" #: src/libs/data-staging/Scheduler.cpp:264 #: src/libs/data-staging/Scheduler.cpp:289 #, c-format msgid "File is currently being cached, will wait %is" msgstr "Файл ещё кÑшируетÑÑ, ожидание %i Ñек" #: src/libs/data-staging/Scheduler.cpp:283 msgid "Timed out while waiting for cache lock" msgstr "ИÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð±Ð»Ð¾ÐºÐ¸Ñ€Ð¾Ð²ÐºÐ¸ кÑша" #: src/libs/data-staging/Scheduler.cpp:293 msgid "Checking cache again" msgstr "КÑш проверÑетÑÑ Ñнова" #: src/libs/data-staging/Scheduler.cpp:313 msgid "Destination file is in cache" msgstr "Файл Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð·Ð°Ð¿Ð¸Ñан в кÑш" #: src/libs/data-staging/Scheduler.cpp:317 msgid "Source and/or destination is index service, will resolve replicas" msgstr "" "ИÑточник и/или назначение ÑвлÑетÑÑ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð¾Ð¼, будет произведён поиÑк копий" #: src/libs/data-staging/Scheduler.cpp:320 msgid "" "Neither source nor destination are index services, will skip resolving " "replicas" msgstr "" "Ðи иÑточник, ни назначение не ÑвлÑÑŽÑ‚ÑÑ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð¾Ð¼, поиÑк копий не будет " "произведён" #: src/libs/data-staging/Scheduler.cpp:331 msgid "Problem with index service, will release cache lock" msgstr "Проблема Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð¾Ð¼, кÑш будет разблокирован" #: src/libs/data-staging/Scheduler.cpp:335 msgid "Problem with index service, will proceed to end of data staging" msgstr "Проблема Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð¾Ð¼, переходим к завершению Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ…" #: src/libs/data-staging/Scheduler.cpp:345 msgid "Checking source file is present" msgstr "Проверка Ð½Ð°Ð»Ð¸Ñ‡Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð°-иÑточника" #: src/libs/data-staging/Scheduler.cpp:353 msgid "Error with source file, moving to next replica" msgstr "Ошибка в файле иÑточника, пробуем другую копию" #: src/libs/data-staging/Scheduler.cpp:375 #, c-format msgid "Replica %s has long latency, trying next replica" msgstr "У копии %s Ð´Ð¾Ð»Ð³Ð°Ñ Ð·Ð°Ð´ÐµÑ€Ð¶ÐºÐ°, пробуем Ñледующую копию" #: src/libs/data-staging/Scheduler.cpp:377 #, c-format msgid "No more replicas, will use %s" msgstr "Больше копий нет, будет иÑпользован файл %s" #: src/libs/data-staging/Scheduler.cpp:380 #, c-format msgid "Checking replica %s" msgstr "ПроверÑетÑÑ ÐºÐ¾Ð¿Ð¸Ñ %s" #: src/libs/data-staging/Scheduler.cpp:392 #, fuzzy msgid "Pre-clean failed" msgstr "Сбой очиÑтки" #: src/libs/data-staging/Scheduler.cpp:397 msgid "Pre-clean failed, will still try to copy" msgstr "Сбой предварительной очиÑтки, вÑÑ‘ же попытаемÑÑ Ñкопировать" #: src/libs/data-staging/Scheduler.cpp:405 msgid "Source or destination requires staging" msgstr "ИÑточник или назначение требуют Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ñ Ð»ÐµÐ½Ñ‚Ð¾Ñ‡Ð½Ð¾Ð³Ð¾ накопителÑ" #: src/libs/data-staging/Scheduler.cpp:409 msgid "No need to stage source or destination, skipping staging" msgstr "" "Ðе требуетÑÑ Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ðµ Ñ Ð»ÐµÐ½Ñ‚Ð¾Ñ‡Ð½Ð¾Ð³Ð¾ Ð½Ð°ÐºÐ¾Ð¿Ð¸Ñ‚ÐµÐ»Ñ Ð½Ð¸ иÑточника, ни назначениÑ; " "размещение пропуÑкаетÑÑ" #: src/libs/data-staging/Scheduler.cpp:439 msgid "Staging request timed out, will release request" msgstr "ИÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа на размещение, Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð±ÑƒÐ´ÐµÑ‚ отозван" #: src/libs/data-staging/Scheduler.cpp:443 msgid "Querying status of staging request" msgstr "ÐžÐ¿Ñ€Ð¾Ñ ÑоÑтоÑÐ½Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа на размещение" #: src/libs/data-staging/Scheduler.cpp:452 msgid "Releasing requests" msgstr "Ð¡Ð±Ñ€Ð¾Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñов" #: src/libs/data-staging/Scheduler.cpp:477 msgid "DTR is ready for transfer, moving to delivery queue" msgstr "DTR готов к переÑылке, переводитÑÑ Ð² очередь на доÑтавку" #: src/libs/data-staging/Scheduler.cpp:492 #, c-format msgid "Transfer failed: %s" msgstr "Сбой передачи: %s" #: src/libs/data-staging/Scheduler.cpp:502 msgid "Releasing request(s) made during staging" msgstr "Отзыв запроÑов, Ñделанных при размещении" #: src/libs/data-staging/Scheduler.cpp:505 msgid "Neither source nor destination were staged, skipping releasing requests" msgstr "" "Ðи иÑточник, ни назначение не были размещены Ñ Ð»ÐµÐ½Ñ‚Ð¾Ñ‡Ð½Ð¾Ð³Ð¾ накопителÑ, " "пропуÑкаетÑÑ Ð¾Ñ‚Ð¼ÐµÐ½Ð° запроÑов" #: src/libs/data-staging/Scheduler.cpp:526 msgid "Trying next replica" msgstr "Пробуем Ñледующую копию" #: src/libs/data-staging/Scheduler.cpp:531 msgid "unregister" msgstr "дерегиÑтрациÑ" #: src/libs/data-staging/Scheduler.cpp:531 msgid "register" msgstr "региÑтрациÑ" #: src/libs/data-staging/Scheduler.cpp:530 #, c-format msgid "Will %s in destination index service" msgstr "Будет выполнена Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ñ %s в каталоге назначениÑ" #: src/libs/data-staging/Scheduler.cpp:534 msgid "Destination is not index service, skipping replica registration" msgstr "Ðазначение не ÑвлÑетÑÑ ÑƒÐºÐ°Ð·Ð°Ñ‚ÐµÐ»ÐµÐ¼, пропуÑкаетÑÑ Ñ€ÐµÐ³Ð¸ÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ ÐºÐ¾Ð¿Ð¸Ð¸" #: src/libs/data-staging/Scheduler.cpp:547 msgid "Error registering replica, moving to end of data staging" msgstr "Ошибка региÑтрации копии, переход к завершению размещениÑ" #: src/libs/data-staging/Scheduler.cpp:556 msgid "Will process cache" msgstr "Будет обработан кÑш" #: src/libs/data-staging/Scheduler.cpp:560 msgid "File is not cacheable, skipping cache processing" msgstr "Файл не может быть кÑширован, пропуÑкаетÑÑ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚ÐºÐ° кÑша" #: src/libs/data-staging/Scheduler.cpp:574 msgid "Cancellation complete" msgstr "Отмена завершена" #: src/libs/data-staging/Scheduler.cpp:588 msgid "Will wait 10s" msgstr "Ожидание 10 Ñекунд" #: src/libs/data-staging/Scheduler.cpp:594 msgid "Error in cache processing, will retry without caching" msgstr "Ошибка при обработке кÑша, попытаемÑÑ Ð±ÐµÐ· кÑшированиÑ" #: src/libs/data-staging/Scheduler.cpp:603 msgid "Will retry without caching" msgstr "Будет произведена Ð¿Ð¾Ð²Ñ‚Ð¾Ñ€Ð½Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ° без кÑшированиÑ" #: src/libs/data-staging/Scheduler.cpp:621 msgid "Proxy has expired" msgstr "Срок дейÑÑ‚Ð²Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти иÑтёк" #: src/libs/data-staging/Scheduler.cpp:632 #, c-format msgid "%i retries left, will wait until %s before next attempt" msgstr "ОÑталоÑÑŒ %i попыток, Ð¿Ð¾Ð²Ñ‚Ð¾Ñ€Ð½Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ° в %s" #: src/libs/data-staging/Scheduler.cpp:648 msgid "Out of retries" msgstr "ДоÑтигнут предел количеÑтва попыток" #: src/libs/data-staging/Scheduler.cpp:650 msgid "Permanent failure" msgstr "УÑтойчивый Ñбой" #: src/libs/data-staging/Scheduler.cpp:656 msgid "Finished successfully" msgstr "УÑпешное завершение" #: src/libs/data-staging/Scheduler.cpp:666 msgid "Returning to generator" msgstr "Возврат в генератор" #: src/libs/data-staging/Scheduler.cpp:840 #, c-format msgid "File is smaller than %llu bytes, will use local delivery" msgstr "Файл меньше %llu байт, будет иÑпользована Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð°Ñ Ð´Ð¾Ñтавка" #: src/libs/data-staging/Scheduler.cpp:894 #, c-format msgid "Delivery service at %s can copy to %s" msgstr "Служба доÑтавки в %s может копировать в %s" #: src/libs/data-staging/Scheduler.cpp:902 #, c-format msgid "Delivery service at %s can copy from %s" msgstr "Служба доÑтавки в %s может копировать из %s" #: src/libs/data-staging/Scheduler.cpp:915 msgid "Could not find any useable delivery service, forcing local transfer" msgstr "" "Ðе удалоÑÑŒ обнаружить подходÑщую Ñлужбу доÑтавки, вынужденно иÑпользуетÑÑ " "Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð°Ñ Ð¿ÐµÑ€ÐµÑылка" #: src/libs/data-staging/Scheduler.cpp:931 #, c-format msgid "Not using delivery service at %s because it is full" msgstr "Служба доÑтавки на %s не иÑпользуетÑÑ Ð² ÑвÑзи Ñ Ð¿ÐµÑ€ÐµÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸ÐµÐ¼" #: src/libs/data-staging/Scheduler.cpp:958 #, c-format msgid "Not using delivery service %s due to previous failure" msgstr "Служба доÑтавки %s не иÑпользуетÑÑ Ð² ÑвÑзи Ñ Ð¿Ñ€ÐµÐ´Ñ‹Ð´ÑƒÑ‰Ð¸Ð¼ Ñбоем" #: src/libs/data-staging/Scheduler.cpp:968 msgid "No remote delivery services are useable, forcing local delivery" msgstr "" "Ðи одна из удалённых Ñлужб доÑтавки не подходит, вынужденно иÑпользуетÑÑ " "Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð°Ñ Ð´Ð¾Ñтавка" #: src/libs/data-staging/Scheduler.cpp:1172 msgid "Cancelling active transfer" msgstr "Отмена активных передач" #: src/libs/data-staging/Scheduler.cpp:1182 msgid "Processing thread timed out. Restarting DTR" msgstr "Вышло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¿Ð¾Ñ‚Ð¾ÐºÐ° обработки. DTR перезапуÑкаетÑÑ" #: src/libs/data-staging/Scheduler.cpp:1250 msgid "Will use bulk request" msgstr "Будет иÑпользован маÑÑовый запроÑ" #: src/libs/data-staging/Scheduler.cpp:1272 msgid "No delivery endpoints available, will try later" msgstr "Ðет доÑтупных назначений Ð´Ð»Ñ Ð¾Ñ‚Ð³Ñ€ÑƒÐ·ÐºÐ¸, попытаемÑÑ Ð¿Ð¾Ð·Ð¶Ðµ" #: src/libs/data-staging/Scheduler.cpp:1291 msgid "Scheduler received NULL DTR" msgstr "Планировщик получил пуÑтой Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR" #: src/libs/data-staging/Scheduler.cpp:1301 msgid "Scheduler received invalid DTR" msgstr "Планировщик получил недопуÑтимый Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR" #: src/libs/data-staging/Scheduler.cpp:1390 msgid "Scheduler starting up" msgstr "ЗапуÑк планировщика" #: src/libs/data-staging/Scheduler.cpp:1391 msgid "Scheduler configuration:" msgstr "ÐšÐ¾Ð½Ñ„Ð¸Ð³ÑƒÑ€Ð°Ñ†Ð¸Ñ Ð¿Ð»Ð°Ð½Ð¸Ñ€Ð¾Ð²Ñ‰Ð¸ÐºÐ°:" #: src/libs/data-staging/Scheduler.cpp:1392 #, c-format msgid " Pre-processor slots: %u" msgstr " МеÑÑ‚ Ð´Ð»Ñ Ð¿Ñ€ÐµÐ´Ð²Ð°Ñ€Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð¾Ð¹ обработки: %u" #: src/libs/data-staging/Scheduler.cpp:1393 #, c-format msgid " Delivery slots: %u" msgstr " МеÑÑ‚ Ð´Ð»Ñ Ð¾Ñ‚Ð³Ñ€ÑƒÐ·ÐºÐ¸: %u" #: src/libs/data-staging/Scheduler.cpp:1394 #, c-format msgid " Post-processor slots: %u" msgstr " МеÑÑ‚ Ð´Ð»Ñ Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ñ‚ÐµÐ»ÑŒÐ½Ð¾Ð¹ обработки: %u" #: src/libs/data-staging/Scheduler.cpp:1395 #, c-format msgid " Emergency slots: %u" msgstr " МеÑÑ‚ Ð´Ð»Ñ Ñрочной обработки: %u" #: src/libs/data-staging/Scheduler.cpp:1396 #, c-format msgid " Prepared slots: %u" msgstr " Подготовленных меÑÑ‚: %u" #: src/libs/data-staging/Scheduler.cpp:1397 #, c-format msgid "" " Shares configuration:\n" "%s" msgstr "" " ÐšÐ¾Ð½Ñ„Ð¸Ð³ÑƒÑ€Ð°Ñ†Ð¸Ñ ÐºÐ²Ð¾Ñ‚:\n" "%s" #: src/libs/data-staging/Scheduler.cpp:1400 msgid " Delivery service: LOCAL" msgstr " Служба доÑтавки: LOCAL" #: src/libs/data-staging/Scheduler.cpp:1401 #, c-format msgid " Delivery service: %s" msgstr " Служба доÑтавки: %s" #: src/libs/data-staging/Scheduler.cpp:1406 msgid "Failed to create DTR dump thread" msgstr "Ðе удалоÑÑŒ Ñоздать поток ÑброÑа DTR" #: src/libs/data-staging/Scheduler.cpp:1423 #: src/services/data-staging/DataDeliveryService.cpp:531 #, c-format msgid "DTR %s cancelled" msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ DTR %s отменён" #: src/libs/data-staging/examples/Generator.cpp:15 msgid "Shutting down scheduler" msgstr "Планировщик оÑтанавливаетÑÑ" #: src/libs/data-staging/examples/Generator.cpp:17 msgid "Scheduler stopped, exiting" msgstr "Планировщик оÑтановлен, выход" #: src/libs/data-staging/examples/Generator.cpp:23 #, c-format msgid "Received DTR %s back from scheduler in state %s" msgstr "Планировщик вернул Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR %s в ÑоÑтоÑнии %s" #: src/libs/data-staging/examples/Generator.cpp:30 msgid "Generator started" msgstr "Генератор запущен" #: src/libs/data-staging/examples/Generator.cpp:31 msgid "Starting DTR threads" msgstr "ЗапуÑкаютÑÑ Ð¿Ð¾Ñ‚Ð¾ÐºÐ¸ DTR" #: src/libs/data-staging/examples/Generator.cpp:44 msgid "No valid credentials found, exiting" msgstr "Ðе найдены дейÑтвительные параметры доÑтупа, выход" #: src/libs/data-staging/examples/Generator.cpp:55 #, c-format msgid "Problem creating dtr (source %s, destination %s)" msgstr "Проблема при Ñоздании DTR (иÑточник %s, назначение %s)" #: src/services/a-rex/arex.cpp:340 src/services/candypond/CandyPond.cpp:569 #: src/services/data-staging/DataDeliveryService.cpp:705 #, c-format msgid "SOAP operation is not supported: %s" msgstr "ÐžÐ¿ÐµÑ€Ð°Ñ†Ð¸Ñ SOAP не поддерживаетÑÑ: %s" #: src/services/a-rex/arex.cpp:358 src/services/a-rex/arex.cpp:403 #, c-format msgid "Security Handlers processing failed: %s" msgstr "Сбой в процеÑÑе обработки прав доÑтупа: %s" #: src/services/a-rex/arex.cpp:381 msgid "" "Can't obtain configuration. Public information is disallowed for this user." msgstr "" "Ðе удалоÑÑŒ получить конфигурацию. ÐžÑ‚ÐºÑ€Ñ‹Ñ‚Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð½ÐµÐ´Ð¾Ñтупна Ð´Ð»Ñ Ñтого " "пользователÑ." #: src/services/a-rex/arex.cpp:388 msgid "Can't obtain configuration. Only public information is provided." msgstr "Ðе удалоÑÑŒ получить конфигурацию. Только Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð´Ð¾Ñтупна." #: src/services/a-rex/arex.cpp:416 src/services/a-rex/rest/rest.cpp:740 #, c-format msgid "Connection from %s: %s" msgstr "Соединение Ñ %s: %s" #: src/services/a-rex/arex.cpp:419 src/services/a-rex/rest/rest.cpp:744 #, c-format msgid "process: method: %s" msgstr "процеÑÑ: метод: %s" #: src/services/a-rex/arex.cpp:420 src/services/a-rex/rest/rest.cpp:745 #, c-format msgid "process: endpoint: %s" msgstr "процеÑÑ: ÐºÐ¾Ð½ÐµÑ‡Ð½Ð°Ñ Ñ‚Ð¾Ñ‡ÐºÐ°: %s" #: src/services/a-rex/arex.cpp:445 #, c-format msgid "process: id: %s" msgstr "процеÑÑ: идентификатор: %s" #: src/services/a-rex/arex.cpp:446 #, c-format msgid "process: subop: %s" msgstr "процеÑÑ: подопциÑ: %s" #: src/services/a-rex/arex.cpp:453 #, c-format msgid "process: subpath: %s" msgstr "процеÑÑ: подкаталог: %s" #: src/services/a-rex/arex.cpp:491 src/services/candypond/CandyPond.cpp:543 #: src/services/data-staging/DataDeliveryService.cpp:665 #: src/tests/echo/echo.cpp:98 #, c-format msgid "process: request=%s" msgstr "процеÑÑ: запроÑ=%s" #: src/services/a-rex/arex.cpp:496 src/services/candypond/CandyPond.cpp:548 #: src/services/data-staging/DataDeliveryService.cpp:670 #: src/tests/count/count.cpp:69 msgid "input does not define operation" msgstr "не задана Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ñ Ð½Ð° вводе" #: src/services/a-rex/arex.cpp:499 src/services/candypond/CandyPond.cpp:551 #: src/services/data-staging/DataDeliveryService.cpp:673 #: src/tests/count/count.cpp:72 #, c-format msgid "process: operation: %s" msgstr "процеÑÑ: операциÑ: %s" #: src/services/a-rex/arex.cpp:526 msgid "POST request on special path is not supported" msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ POST на ÑпецифичеÑкий путь не поддерживаетÑÑ" #: src/services/a-rex/arex.cpp:531 msgid "process: factory endpoint" msgstr "процеÑÑ: ÐºÐ¾Ð½ÐµÑ‡Ð½Ð°Ñ Ñ‚Ð¾Ñ‡ÐºÐ° фабрики" #: src/services/a-rex/arex.cpp:575 src/services/candypond/CandyPond.cpp:580 #: src/services/data-staging/DataDeliveryService.cpp:716 #: src/tests/echo/echo.cpp:158 #, c-format msgid "process: response=%s" msgstr "процеÑÑ: отзыв=%s" #: src/services/a-rex/arex.cpp:580 msgid "Per-job POST/SOAP requests are not supported" msgstr "ЗапроÑÑ‹ POST/SOAP предварÑющие задачу не поддерживаютÑÑ" #: src/services/a-rex/arex.cpp:589 msgid "process: GET" msgstr "процеÑÑ: GET" #: src/services/a-rex/arex.cpp:590 #, c-format msgid "GET: id %s path %s" msgstr "GET: идентификатор %s путь %s" #: src/services/a-rex/arex.cpp:623 msgid "process: HEAD" msgstr "процеÑÑ: HEAD" #: src/services/a-rex/arex.cpp:624 #, c-format msgid "HEAD: id %s path %s" msgstr "HEAD: идентификатор %s путь %s" #: src/services/a-rex/arex.cpp:657 msgid "process: PUT" msgstr "процеÑ: PUT" #: src/services/a-rex/arex.cpp:690 msgid "process: DELETE" msgstr "процеÑÑ: DELETE" #: src/services/a-rex/arex.cpp:723 #, c-format msgid "process: method %s is not supported" msgstr "процеÑÑ: метод %s не поддерживаетÑÑ" #: src/services/a-rex/arex.cpp:726 msgid "process: method is not defined" msgstr "процеÑÑ: неопределённый метод" #: src/services/a-rex/arex.cpp:836 msgid "Failed to run Grid Manager thread" msgstr "Сбой запуÑка потока Grid Manager" #: src/services/a-rex/arex.cpp:889 #, c-format msgid "Failed to process configuration in %s" msgstr "Ðе удалоÑÑŒ обработать наÑтройки в %s" #: src/services/a-rex/arex.cpp:894 msgid "No control directory set in configuration" msgstr "Ðе найден контрольный каталог в файле наÑтроек" #: src/services/a-rex/arex.cpp:898 msgid "No session directory set in configuration" msgstr "Ðе найден каталог ÑеÑÑии в файле наÑтроек" #: src/services/a-rex/arex.cpp:902 msgid "No LRMS set in configuration" msgstr "Ðе найдена СУПО в файле наÑтроек" #: src/services/a-rex/arex.cpp:961 #, c-format msgid "Failed to create control directory %s" msgstr "Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð¾Ð³Ð¾ каталога %s" #: src/services/a-rex/arex.cpp:965 #, fuzzy, c-format msgid "Failed to update control directory %s" msgstr "Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð¾Ð³Ð¾ каталога %s" #: src/services/a-rex/arex.cpp:972 #, fuzzy msgid "Failed to start GM threads" msgstr "Ðе удалоÑÑŒ запуÑтить поток архивированиÑ" #: src/services/a-rex/arex.cpp:1008 #, fuzzy, c-format msgid "Created entry for JWT issuer %s" msgstr "Создан пуÑтой файл Ð´Ð»Ñ Ð·Ð°Ð¿Ð¸Ñи задач ARC: %s" #: src/services/a-rex/arex.cpp:1010 #, fuzzy, c-format msgid "Failed to create entry for JWT issuer %s" msgstr "Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð¾Ð³Ð¾ каталога %s" #: src/services/a-rex/arex.cpp:1013 #, c-format msgid "Empty data for JWT issuer %s" msgstr "" #: src/services/a-rex/arex.cpp:1016 #, fuzzy, c-format msgid "Failed to read data for JWT issuer %s" msgstr "Ðевозможно прочитать данные из входного файла" #: src/services/a-rex/authop.cpp:26 #, fuzzy msgid "CheckOperationAllowed: missing configuration" msgstr "Сбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° наÑтроек" #: src/services/a-rex/authop.cpp:80 msgid "CheckOperationAllowed: allowed due to missing configuration scopes" msgstr "" #: src/services/a-rex/authop.cpp:83 #, c-format msgid "CheckOperationAllowed: token scopes: %s" msgstr "" #: src/services/a-rex/authop.cpp:84 #, fuzzy, c-format msgid "CheckOperationAllowed: configuration scopes: %s" msgstr "Чтение файла наÑтроек: %s" #: src/services/a-rex/authop.cpp:87 msgid "CheckOperationAllowed: allowed due to matching scopes" msgstr "" #: src/services/a-rex/authop.cpp:91 msgid "CheckOperationAllowed: token scopes do not match required scopes" msgstr "" #: src/services/a-rex/authop.cpp:97 msgid "CheckOperationAllowed: allowed for TLS connection" msgstr "" #: src/services/a-rex/authop.cpp:101 msgid "CheckOperationAllowed: no supported identity found" msgstr "" #: src/services/a-rex/cachecheck.cpp:37 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:710 #, c-format msgid "Error with cache configuration: %s" msgstr "Ошибка при наÑтройке кÑша: %s" #: src/services/a-rex/cachecheck.cpp:53 #: src/services/candypond/CandyPond.cpp:318 msgid "Error with cache configuration" msgstr "Ошибка при наÑтройке кÑша" #: src/services/a-rex/cachecheck.cpp:78 #: src/services/candypond/CandyPond.cpp:146 #: src/services/candypond/CandyPond.cpp:343 #, c-format msgid "Looking up URL %s" msgstr "ПоиÑк URL %s" #: src/services/a-rex/cachecheck.cpp:80 #: src/services/candypond/CandyPond.cpp:155 #, c-format msgid "Cache file is %s" msgstr "Файл кÑша: %s" #: src/services/a-rex/change_activity_status.cpp:22 #: src/services/a-rex/put.cpp:163 src/services/a-rex/put.cpp:204 #, c-format msgid "%s: there is no such job: %s" msgstr "%s: задача отÑутÑтвует: %s" #: src/services/a-rex/change_activity_status.cpp:30 #, c-format msgid "%s: put log %s: there is no payload" msgstr "%s: запиÑÑŒ журнала %s: отÑутÑтвуют полезные файлы" #: src/services/a-rex/change_activity_status.cpp:36 #, c-format msgid "%s: put log %s: unrecognized payload" msgstr "%s: запиÑÑŒ журнала %s: неопознанные полезные файлы" #: src/services/a-rex/change_activity_status.cpp:75 msgid "A-REX REST: Failed to resume job" msgstr "A-REX REST: Сбой Ð²Ð¾Ð·Ð¾Ð±Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" #: src/services/a-rex/change_activity_status.cpp:79 #, c-format msgid "A-REX REST: State change not allowed: from %s to %s" msgstr "A-REX REST: ÐедопуÑтимое изменение ÑоÑтоÑниÑ: Ñ %s на %s" #: src/services/a-rex/create_activity.cpp:24 msgid "NEW: put new job: there is no payload" msgstr "NEW: запиÑÑŒ новой задачи: отÑутÑтвуют полезные файлы" #: src/services/a-rex/create_activity.cpp:28 msgid "NEW: put new job: max jobs total limit reached" msgstr "" "NEW: запиÑÑŒ новой задачи: доÑтигнут макÑимальный предел общего количеÑтва " "задач" #: src/services/a-rex/delegation/DelegationStore.cpp:47 msgid "Wiping and re-creating whole storage" msgstr "Уничтожение и воÑÑоздание вÑего хранилища" #: src/services/a-rex/delegation/DelegationStore.cpp:207 #: src/services/a-rex/delegation/DelegationStore.cpp:309 #, c-format msgid "DelegationStore: TouchConsumer failed to create file %s" msgstr "DelegationStore: TouchConsumer не Ñмог Ñоздать файл %s" #: src/services/a-rex/delegation/DelegationStore.cpp:269 msgid "DelegationStore: PeriodicCheckConsumers failed to resume iterator" msgstr "" "DelegationStore: Ñбой Ð²Ð¾Ð·Ð¾Ð±Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ Ð¸Ñ‚ÐµÑ€Ð°Ñ‚Ð¾Ñ€Ð° процеÑÑом " "PeriodicCheckConsumers" #: src/services/a-rex/delegation/DelegationStore.cpp:289 #, c-format msgid "" "DelegationStore: PeriodicCheckConsumers failed to remove old delegation %s - " "%s" msgstr "" "DelegationStore: Ñбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð¿Ñ€Ð¾Ñ†ÐµÑÑом PeriodicCheckConsumers уÑтаревшего " "Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ %s - %s" #: src/services/a-rex/get.cpp:172 src/services/a-rex/get.cpp:227 #: src/services/a-rex/get.cpp:313 #, c-format msgid "Get: there is no job %s - %s" msgstr "Get: отÑутÑвует задача %s - %s" #: src/services/a-rex/get.cpp:380 #, c-format msgid "Head: there is no job %s - %s" msgstr "Head: отÑутÑвует задача %s - %s" #: src/services/a-rex/get.cpp:436 msgid "Failed to extract credential information" msgstr "Сбой Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸ о параметрах доÑтупа" #: src/services/a-rex/get.cpp:439 #, c-format msgid "Checking cache permissions: DN: %s" msgstr "Проверка прав доÑтупа к кÑшу: DN: %s" #: src/services/a-rex/get.cpp:440 #, c-format msgid "Checking cache permissions: VO: %s" msgstr "Проверка прав доÑтупа к кÑшу: ВО: %s" #: src/services/a-rex/get.cpp:442 #, c-format msgid "Checking cache permissions: VOMS attr: %s" msgstr "Checking cache permissions: атрибуты VOMS: %s" #: src/services/a-rex/get.cpp:452 #, c-format msgid "Cache access allowed to %s by DN %s" msgstr "ДоÑтуп к кÑшу разрешён Ð´Ð»Ñ %s пользователю Ñ DN %s" #: src/services/a-rex/get.cpp:455 #, c-format msgid "DN %s doesn't match %s" msgstr "DN %s не Ñовпадает Ñ %s" #: src/services/a-rex/get.cpp:458 #, c-format msgid "Cache access allowed to %s by VO %s" msgstr "ДоÑтуп к кÑшу разрешён Ð´Ð»Ñ %s Ð´Ð»Ñ Ð’Ðž %s" #: src/services/a-rex/get.cpp:461 #, c-format msgid "VO %s doesn't match %s" msgstr "ВО %s не Ñовпадает Ñ %s" #: src/services/a-rex/get.cpp:467 src/services/a-rex/get.cpp:486 #, c-format msgid "Bad credential value %s in cache access rules" msgstr "ÐедопуÑтимое значение параметра доÑтупа %s в правилах доÑтупа к кÑшу" #: src/services/a-rex/get.cpp:475 src/services/a-rex/get.cpp:494 #, c-format msgid "VOMS attr %s matches %s" msgstr "Ðтрибут VOMS %s Ñовпадает Ñ %s" #: src/services/a-rex/get.cpp:476 #, c-format msgid "Cache access allowed to %s by VO %s and role %s" msgstr "ДоÑтуп к кÑшу разрешён Ð´Ð»Ñ %s Ð´Ð»Ñ Ð’Ðž %s и роли %s" #: src/services/a-rex/get.cpp:479 src/services/a-rex/get.cpp:498 #, c-format msgid "VOMS attr %s doesn't match %s" msgstr "Ðтрибут VOMS %s не Ñовпадает Ñ %s" #: src/services/a-rex/get.cpp:495 #, c-format msgid "Cache access allowed to %s by VO %s and group %s" msgstr "ДоÑтуп к кÑшу разрешён Ð´Ð»Ñ %s Ð´Ð»Ñ Ð’Ðž %s и группы %s" #: src/services/a-rex/get.cpp:501 #, c-format msgid "Unknown credential type %s for URL pattern %s" msgstr "ÐеизвеÑтный тип параметра доÑтупа %s Ð´Ð»Ñ ÑˆÐ°Ð±Ð»Ð¾Ð½Ð° URL %s" #: src/services/a-rex/get.cpp:507 #, c-format msgid "No match found in cache access rules for %s" msgstr "Ðе найдено ÑоответÑÑ‚Ð²Ð¸Ñ Ð´Ð»Ñ %s в правилах доÑтупа к кÑшу" #: src/services/a-rex/get.cpp:517 #, c-format msgid "Get from cache: Looking in cache for %s" msgstr "Получение из кÑша: ПоиÑк %s в кÑше" #: src/services/a-rex/get.cpp:520 #, c-format msgid "Get from cache: Invalid URL %s" msgstr "Получение из кÑша: ÐедопуÑтимый URL %s" #: src/services/a-rex/get.cpp:537 msgid "Get from cache: Error in cache configuration" msgstr "Получение из кÑша: Ошибка наÑтроек кÑша" #: src/services/a-rex/get.cpp:546 msgid "Get from cache: File not in cache" msgstr "Получение из кÑша: Файла в кÑше нет" #: src/services/a-rex/get.cpp:549 #, c-format msgid "Get from cache: could not access cached file: %s" msgstr "" "Получение из кÑша: не удалоÑÑŒ получить доÑтуп к кÑшированному файлу: %s" #: src/services/a-rex/get.cpp:559 msgid "Get from cache: Cached file is locked" msgstr "Получение из кÑша: КÑшированный файл забклокирован" #: src/services/a-rex/grid-manager/GridManager.cpp:98 #, c-format msgid "" "Cannot create directories for log file %s. Messages will be logged to this " "log" msgstr "" "Ðе удалоÑÑŒ Ñоздать каталоги Ð´Ð»Ñ Ð¶ÑƒÑ€Ð½Ð°Ð»ÑŒÐ½Ð¾Ð³Ð¾ файла %s. Ð¡Ð¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð±ÑƒÐ´ÑƒÑ‚ " "запиÑыватьÑÑ Ð² Ñтот журнал" #: src/services/a-rex/grid-manager/GridManager.cpp:104 #, c-format msgid "" "Cannot open cache log file %s: %s. Cache cleaning messages will be logged to " "this log" msgstr "" "Ðе удалоÑÑŒ открыть журнальный файл кÑша %s: %s. Ð¡Ð¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð¾Ð± очиÑтке кÑша " "будут запиÑыватьÑÑ Ð² Ñтот журнал" #: src/services/a-rex/grid-manager/GridManager.cpp:114 msgid "Failed to start cache clean script" msgstr "Ðе удалоÑÑŒ запуÑтить Ñкрипт очиÑтки кÑша" #: src/services/a-rex/grid-manager/GridManager.cpp:115 msgid "Cache cleaning script failed" msgstr "Сбой в работе Ñкрипта очиÑтки кÑша" #: src/services/a-rex/grid-manager/GridManager.cpp:183 #, c-format msgid "External request for attention %s" msgstr "Внешний Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° обÑлуживание %s" #: src/services/a-rex/grid-manager/GridManager.cpp:201 #, c-format msgid "Failed to open heartbeat file %s" msgstr "Ðе удалоÑÑŒ открыть мониторинговый файл %s" #: src/services/a-rex/grid-manager/GridManager.cpp:223 msgid "Starting jobs processing thread" msgstr "ЗапуÑкаетÑÑ Ð¿Ð¾Ñ‚Ð¾Ðº обработки задачи" #: src/services/a-rex/grid-manager/GridManager.cpp:224 #, c-format msgid "Used configuration file %s" msgstr "ИÑпользуетÑÑ Ñ„Ð°Ð¹Ð» наÑтроек %s" #: src/services/a-rex/grid-manager/GridManager.cpp:232 #, c-format msgid "" "Error initiating delegation database in %s. Maybe permissions are not " "suitable. Returned error is: %s." msgstr "" "Сбой при Ñоздании базы данных Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð² %s. Возможно, отÑутÑтвует " "доÑтуп к директории. Возвращена ошибка %s." #: src/services/a-rex/grid-manager/GridManager.cpp:244 msgid "Failed to start new thread: cache won't be cleaned" msgstr "Ðе удалоÑÑŒ запуÑтить новый поток: кÑш не будет очищен" #: src/services/a-rex/grid-manager/GridManager.cpp:251 msgid "Failed to activate Jobs Processing object, exiting Grid Manager thread" msgstr "" "Ðе удалоÑÑŒ активировать объект обработки задач, закрываетÑÑ Ð¿Ð¾Ñ‚Ð¾Ðº Grid " "Manager" #: src/services/a-rex/grid-manager/GridManager.cpp:260 #, c-format msgid "" "Error adding communication interface in %s. Maybe another instance of A-REX " "is already running." msgstr "" "Сбой при добавлении интерфейÑа ÑвÑзи в %s. Возможно, уже запущен другой " "процеÑÑ A-REX." #: src/services/a-rex/grid-manager/GridManager.cpp:263 #, c-format msgid "" "Error adding communication interface in %s. Maybe permissions are not " "suitable." msgstr "" "Сбой при добавлении интерфейÑа ÑвÑзи в %s. Возможно, отÑутÑтвует доÑтуп к " "директории." #: src/services/a-rex/grid-manager/GridManager.cpp:270 msgid "Failed to start new thread for monitoring job requests" msgstr "Ðе удалоÑÑŒ запуÑтить поток Ð´Ð»Ñ Ð¾Ñ‚ÑÐ»ÐµÐ¶Ð¸Ð²Ð°Ð½Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñов на задачи" #: src/services/a-rex/grid-manager/GridManager.cpp:276 msgid "Picking up left jobs" msgstr "Обработка оÑтавшихÑÑ Ð·Ð°Ð´Ð°Ñ‡" #: src/services/a-rex/grid-manager/GridManager.cpp:279 msgid "Starting data staging threads" msgstr "ЗапуÑкаютÑÑ Ð¿Ð¾Ñ‚Ð¾ÐºÐ¸ Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ…" #: src/services/a-rex/grid-manager/GridManager.cpp:283 msgid "Starting jobs' monitoring" msgstr "ЗапуÑк мониторинга задач" #: src/services/a-rex/grid-manager/GridManager.cpp:291 #, c-format msgid "" "SSHFS mount point of session directory (%s) is broken - waiting for " "reconnect ..." msgstr "" "Точка Ð¼Ð¾Ð½Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ SSHFS каталога ÑеÑÑии (%s) недоÑтупна - ожидаетÑÑ " "повторное Ñоединение ..." #: src/services/a-rex/grid-manager/GridManager.cpp:295 #, c-format msgid "" "SSHFS mount point of runtime directory (%s) is broken - waiting for " "reconnect ..." msgstr "" "Точка Ð¼Ð¾Ð½Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ SSHFS каталога иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ (%s) недоÑтупна - ожидаетÑÑ " "повторное Ñоединение ..." #: src/services/a-rex/grid-manager/GridManager.cpp:300 #, c-format msgid "" "SSHFS mount point of cache directory (%s) is broken - waiting for " "reconnect ..." msgstr "" "Точка Ð¼Ð¾Ð½Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ SSHFS каталога кÑша (%s) недоÑтупна - ожидаетÑÑ Ð¿Ð¾Ð²Ñ‚Ð¾Ñ€Ð½Ð¾Ðµ " "Ñоединение ..." #: src/services/a-rex/grid-manager/GridManager.cpp:349 #, c-format msgid "Orphan delegation lock detected (%s) - cleaning" msgstr "Обнаружен неиÑпользуемый блок Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ (%s) - очиÑтка" #: src/services/a-rex/grid-manager/GridManager.cpp:354 msgid "Failed to obtain delegation locks for cleaning orphaned locks" msgstr "" "Ðе удалоÑÑŒ получить блоки Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð´Ð»Ñ Ð¾Ñ‡Ð¸Ñтки неиÑпользуемых блоков" #: src/services/a-rex/grid-manager/GridManager.cpp:368 msgid "Waking up" msgstr "ÐктивизациÑ" #: src/services/a-rex/grid-manager/GridManager.cpp:371 msgid "Stopping jobs processing thread" msgstr "ОÑтанавливаетÑÑ Ð¿Ð¾Ñ‚Ð¾Ðº обработки задачи" #: src/services/a-rex/grid-manager/GridManager.cpp:373 msgid "Exiting jobs processing thread" msgstr "ОÑтанавливаетÑÑ Ð¿Ð¾Ñ‚Ð¾Ðº обработки задачи" #: src/services/a-rex/grid-manager/GridManager.cpp:391 msgid "Requesting to stop job processing" msgstr "ЗапрашиваетÑÑ Ð¿Ñ€ÐµÐºÑ€Ð°Ñ‰ÐµÐ½Ð¸Ðµ обработки задачи" #: src/services/a-rex/grid-manager/GridManager.cpp:399 msgid "Waiting for main job processing thread to exit" msgstr "Ожидание Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ð¾Ñновного потока обработки задачи" #: src/services/a-rex/grid-manager/GridManager.cpp:401 msgid "Stopped job processing" msgstr "Обработка задачи завершена" #: src/services/a-rex/grid-manager/accounting/AAR.cpp:73 msgid "Cannot find information abouto job submission endpoint" msgstr "Ðевозможно обнаружить информацию о меÑте Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:58 #, c-format msgid "Failed to read database schema file at %s" msgstr "Сбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° Ñхемы базы данных в %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:68 msgid "Accounting database initialized successfully" msgstr "УÑпешно инициализирована база данных учёта задач" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:70 msgid "Accounting database connection has been established" msgstr "УÑтановлено Ñоединение Ñ ÑƒÑ‡Ñ‘Ñ‚Ð½Ð¾Ð¹ базой данных" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:80 #, c-format msgid "%s. SQLite database error: %s" msgstr "%s. ошибка базы данных SQLite: %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:82 #, c-format msgid "SQLite database error: %s" msgstr "Ошибка базы даных SQLite: %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:110 #, c-format msgid "Directory %s to store accounting database has been created." msgstr "Создан каталог %s Ð´Ð»Ñ Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ ÑƒÑ‡Ñ‘Ñ‚Ð½Ð¾Ð¹ базы данных." #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:112 #, c-format msgid "" "Accounting database cannot be created. Faile to create parent directory %s." msgstr "" "База данных учёта задач не может быть Ñоздана. Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ€Ð¾Ð´Ð¸Ñ‚ÐµÐ»ÑŒÑкого " "каталога %s." #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:116 #, c-format msgid "Accounting database cannot be created: %s is not a directory" msgstr "" "База данных учёта задач не может быть Ñоздана: %s не ÑвлÑетÑÑ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð¾Ð¼" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:123 msgid "Failed to initialize accounting database" msgstr "Сбой инициализации базы данных учёта задач" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:130 #, c-format msgid "Accounting database file (%s) is not a regular file" msgstr "Файл базы данных учёта задач (%s) не ÑвлÑетÑÑ Ñтандартным файлом" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:136 msgid "Error opening accounting database" msgstr "Ошибка Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð¸Ñ Ð±Ð°Ð·Ñ‹ данных учёта задач" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:154 msgid "Closing connection to SQLite accounting database" msgstr "ЗакрываетÑÑ Ñоединение Ñ ÑƒÑ‡Ñ‘Ñ‚Ð½Ð¾Ð¹ базой данных SQLite" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:243 #, c-format msgid "Failed to fetch data from %s accounting database table" msgstr "Сбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ… из таблицы %s базы данных учёта задач" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:260 #, c-format msgid "Failed to add '%s' into the accounting database %s table" msgstr "Сбой Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ '%s' в таблицу %s базы данных учёта задач" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:327 msgid "Failed to fetch data from accounting database Endpoints table" msgstr "Сбой Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ… из таблицы Endpoints базы данных учёта задач" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:344 #, c-format msgid "" "Failed to add '%s' URL (interface type %s) into the accounting database " "Endpoints table" msgstr "" "Сбой Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ '%s' URL (Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ Ñ‚Ð¸Ð¿Ð° %s) в таблицу Endpoints базы " "данных учёта задач" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:370 #, c-format msgid "Failed to query AAR database ID for job %s" msgstr "Ðе удалоÑÑŒ опроÑить базу данных о AAR Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:431 #, c-format msgid "Failed to insert AAR into the database for job %s" msgstr "Ðе удалоÑÑŒ добавить AAR в базу данных Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:432 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:481 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:512 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:528 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:544 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:565 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:581 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:596 #, c-format msgid "SQL statement used: %s" msgstr "ИÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð½Ð°Ñ Ð¸Ð½ÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ SQL: %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:437 #, c-format msgid "Failed to write authtoken attributes for job %s" msgstr "Сбой запиÑи атрибутов authtoken Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:441 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:498 #, c-format msgid "Failed to write event records for job %s" msgstr "Сбой запиÑи информации о ÑобытиÑÑ… Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:452 #, c-format msgid "" "Cannot to update AAR. Cannot find registered AAR for job %s in accounting " "database." msgstr "" "Ðевозможно обновить AAR. Ðе удалоÑÑŒ обнаружить зарегиÑтрированную запиÑÑŒ AAR " "Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s в учётной базе данных." #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:480 #, c-format msgid "Failed to update AAR in the database for job %s" msgstr "Ðе удалоÑÑŒ обновить AAR в базе данных Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:486 #, c-format msgid "Failed to write RTEs information for the job %s" msgstr "Сбой запиÑи информации о RTE Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:490 #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:494 #, c-format msgid "Failed to write data transfers information for the job %s" msgstr "Сбой запиÑи информации о передаче данных Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s" #: src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp:590 #, c-format msgid "Unable to add event: cannot find AAR for job %s in accounting database." msgstr "" "Сбой Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ ÑобытиÑ: не обнаружена запиÑÑŒ AAR Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s в базе " "данных учёта задач." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:73 #, c-format msgid "Unknown option %s" msgstr "ÐеизвеÑтный параметр %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:80 msgid "Job ID argument is required." msgstr "ТребуетÑÑ Ð°Ñ€Ð³ÑƒÐ¼ÐµÐ½Ñ‚ - идентификатор задачи." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:86 msgid "Path to user's proxy file should be specified." msgstr "Должен быть указан путь к Ñертификату доверенноÑти пользователÑ." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:92 msgid "User name should be specified." msgstr "Должно быть указано Ð¸Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:98 msgid "Path to .local job status file is required." msgstr "ТребуетÑÑ Ð¿ÑƒÑ‚ÑŒ к файлу ÑоÑтоÑÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡ .local." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:106 msgid "Generating ceID prefix from hostname automatically" msgstr "ÐвтоматичеÑкое Ñоздание префикÑа ceID из имени узла" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:109 msgid "" "Cannot determine hostname from gethostname() to generate ceID automatically." msgstr "" "Ðевозможно определить hostname из gethostname() Ð´Ð»Ñ Ð°Ð²Ñ‚Ð¾Ð¼Ð°Ñ‚Ð¸Ñ‡ÐµÑкого ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ " "ceID." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:118 #, c-format msgid "ceID prefix is set to %s" msgstr "ÐŸÑ€ÐµÑ„Ð¸ÐºÑ ceID задан как %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:126 #, c-format msgid "Getting currect timestamp for BLAH parser log: %s" msgstr "Создание текущей метки времени Ð´Ð»Ñ Ð¶ÑƒÑ€Ð½Ð°Ð»Ð° программы разбора BLAH: %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:135 msgid "Parsing .local file to obtain job-specific identifiers and info" msgstr "" "РазбираетÑÑ Ñ„Ð°Ð¹Ð» .local Ñ Ñ†ÐµÐ»ÑŒÑŽ Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ ÑпецифичеÑких Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ " "идентификаторов и информации" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:145 #, c-format msgid "globalid is set to %s" msgstr "globalid задан как %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:148 #, c-format msgid "headnode is set to %s" msgstr "Головной узел задан как %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:151 #, c-format msgid "interface is set to %s" msgstr "Ð˜Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ Ð·Ð°Ð´Ð°Ð½ как %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:155 msgid "There is no local LRMS ID. Message will not be written to BLAH log." msgstr "" "ОтÑутÑтвует идентификатор СУПО. Сообщение не будет запиÑано в журнал BLAH." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:158 #, c-format msgid "localid is set to %s" msgstr "localid задан как %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:161 #, c-format msgid "queue name is set to %s" msgstr "Ð˜Ð¼Ñ Ð¾Ñ‡ÐµÑ€ÐµÐ´Ð¸ задано как %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:164 #, c-format msgid "owner subject is set to %s" msgstr "Ð˜Ð¼Ñ Ñубъекта владельца задано как %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:166 msgid "" "Job did not finished successfully. Message will not be written to BLAH log." msgstr "" "Задача не завершилаÑÑŒ уÑпехом. Сообщение не будет запиÑано в журнал BLAH." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:174 #, c-format msgid "Job timestamp successfully parsed as %s" msgstr "Ð’Ñ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ Ð¼ÐµÑ‚ÐºÐ° задачи уÑпешно разобрана как %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:178 msgid "Can not read information from the local job status file" msgstr "Ðевозможно прочеÑть информацию из файла ÑоÑтоÑÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:194 #, c-format msgid "" "Unsupported submission interface %s. Seems arc-blahp-logger need to be " "updated accordingly. Please submit the bug to bugzilla." msgstr "" "Ð˜Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ Ð·Ð°Ñылки %s не поддерживаетÑÑ. Похоже, arc-blahp-logger пора " "обновить. ПожалуйÑта, опишите проблему в bugzill-е." #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:204 msgid "Parsing VOMS AC to get FQANs information" msgstr "Разборка VOMS AC Ñ Ñ†ÐµÐ»ÑŒÑŽ Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸ о FQAN" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:217 #, c-format msgid "Found VOMS AC attribute: %s" msgstr "Обнаружен атрибут VOMS AC: %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:230 msgid "VOMS AC attribute is a tag" msgstr "Ðтрибут VOMS AC ÑвлÑетÑÑ Ñ‚ÐµÐ³Ð¾Ð¼" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:237 msgid "Skipping policyAuthority VOMS AC attribute" msgstr "ПропуÑкаетÑÑ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚ VOMS AC policyAuthority" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:241 msgid "VOMS AC attribute is the FQAN" msgstr "Ðтрибут VOMS AC ÑвлÑетÑÑ FQAN" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:249 msgid "No FQAN found. Using None as userFQAN value" msgstr "" "FQAN не обнаружен. Ð’ качеÑтве Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ userFQAN будет иÑпользоватьÑÑ None" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:263 #, c-format msgid "Assembling BLAH parser log entry: %s" msgstr "Формирование запиÑи журнала программы разбора BLAH: %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:268 #, c-format msgid "Writing the info to the BLAH parser log: %s" msgstr "ЗапиÑÑŒ информации в журнал программы разбора BLAH: %s" #: src/services/a-rex/grid-manager/arc_blahp_logger.cpp:276 #, c-format msgid "Cannot open BLAH log file '%s'" msgstr "Ðе удалоÑÑŒ открыть журнальный файл BLAH '%s'" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:36 #, c-format msgid "Missing cancel-%s-job - job cancellation may not work" msgstr "Ðе найден Ñкрипт cancel-%s-job - прерывание задачи может не работать" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:40 #, c-format msgid "Missing submit-%s-job - job submission to LRMS may not work" msgstr "" "Ðе найден Ñкрипт submit-%s-job - заÑылка задачи в СУПО может не работать" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:44 #, c-format msgid "Missing scan-%s-job - may miss when job finished executing" msgstr "Ðе найден Ñкрипт scan-%s-job - окончание задачи может быть незамеченым" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:58 #, c-format msgid "Wrong option in %s" msgstr "ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ Ð² %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:69 #, c-format msgid "Can't read configuration file at %s" msgstr "Ðевозможно прочеÑть файл наÑтроек в %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:79 #, c-format msgid "Can't recognize type of configuration file at %s" msgstr "Ðевозможно определить тип файла наÑтроек в %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:82 msgid "Could not determine configuration type or configuration is empty" msgstr "Ðевозможно определить тип файла наÑтроек, или же он пуÑÑ‚" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:163 msgid "lrms is empty" msgstr "пуÑтое значение lrms" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:196 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:205 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:214 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:223 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:232 msgid "Missing number in maxjobs" msgstr "ÐедоÑтающее чиÑло в maxjobs" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:199 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:208 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:217 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:226 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:235 #, c-format msgid "Wrong number in maxjobs: %s" msgstr "ÐедопуÑтимое чиÑло в maxjobs: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:245 #, c-format msgid "Wrong number in wakeupperiod: %s" msgstr "ÐедопуÑтимое чиÑло в wakeupperiod: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:251 msgid "mail parameter is empty" msgstr "Параметр mail пуÑÑ‚" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:257 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:261 msgid "Wrong number in defaultttl command" msgstr "ÐедопуÑтимое чиÑло в команде defaultttl" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:267 msgid "Wrong number in maxrerun command" msgstr "ÐедопуÑтимое чиÑло в команде maxrerun" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:274 msgid "State name for plugin is missing" msgstr "ОтÑутÑтвует наименование ÑоÑтоÑÐ½Ð¸Ñ Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡Ð°ÐµÐ¼Ð¾Ð³Ð¾ модулÑ" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:278 msgid "Options for plugin are missing" msgstr "Этот модуль не имеет наÑтраиваемых параметров" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:281 #, c-format msgid "Failed to register plugin for state %s" msgstr "Сбой региÑтрации подключаемого Ð¼Ð¾Ð´ÑƒÐ»Ñ Ð´Ð»Ñ ÑоÑтоÑÐ½Ð¸Ñ %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:287 msgid "Session root directory is missing" msgstr "ОтÑутÑтвует ÐºÐ¾Ñ€Ð½ÐµÐ²Ð°Ñ Ð´Ð¸Ñ€ÐµÐºÑ‚Ð¾Ñ€Ð¸Ñ ÑеÑÑии" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:290 msgid "Junk in sessiondir command" msgstr "БеÑÑмыÑлица в команде sessiondir" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:302 msgid "Missing directory in controldir command" msgstr "Ð’ команде controldir пропущен каталог" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:307 msgid "" "'control' configuration option is no longer supported, please use " "'controldir' instead" msgstr "" "ÐžÐ¿Ñ†Ð¸Ñ Ð½Ð°Ñтроек 'control' теперь называетÑÑ 'controldir'; пожалуйÑта, " "иÑпользуйте новое название" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:312 msgid "User for helper program is missing" msgstr "ОтÑутÑтвует пользователь Ð´Ð»Ñ Ð²Ñпомогательной программы" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:315 msgid "Only user '.' for helper program is supported" msgstr "Ð”Ð»Ñ Ð²Ñпомогательной программы поддерживаетÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ пользователь '.'" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:318 msgid "Helper program is missing" msgstr "ОтÑутÑтвует вÑÐ¿Ð¾Ð¼Ð¾Ð³Ð°Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ð¿Ñ€Ð¾Ð³Ñ€Ð°Ð¼Ð¼Ð°" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:339 msgid "Wrong option in fixdirectories" msgstr "ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ Ð² fixdirectories" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:366 msgid "Wrong option in delegationdb" msgstr "ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ Ð´Ð»Ñ Ð±Ð°Ð·Ñ‹ данных delegationdb" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:375 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:608 msgid "forcedefaultvoms parameter is empty" msgstr "Параметр forcedefaultvoms пуÑÑ‚" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:486 msgid "Wrong number in maxjobdesc command" msgstr "ÐедопуÑтимое чиÑло в команде maxjobdesc" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:535 msgid "Missing file name in [arex/jura] logfile" msgstr "ОтÑутÑтвует Ð¸Ð¼Ñ Ñ„Ð°Ð¹Ð»Ð° в журнальном файле [arex/jura]" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:546 #, c-format msgid "Wrong number in urdelivery_frequency: %s" msgstr "ÐедопуÑтимое значение в urdelivery_frequency: %s" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:601 msgid "No queue name given in queue block name" msgstr "Ðе указано название очереди в названии блока queue" #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:617 #: src/services/a-rex/grid-manager/conf/CoreConfig.cpp:652 msgid "advertisedvo parameter is empty" msgstr "Параметр authorizedvo пуÑÑ‚" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:117 #, c-format msgid "\tSession root dir : %s" msgstr "\tКорневой каталог ÑеÑÑии: %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:118 #, c-format msgid "\tControl dir : %s" msgstr "\tКонтрольный каталог: %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:119 #, c-format msgid "\tdefault LRMS : %s" msgstr "\tСУПО по умолчанию : %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:120 #, c-format msgid "\tdefault queue : %s" msgstr "\tочередь по умолчанию : %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:121 #, c-format msgid "\tdefault ttl : %u" msgstr "\tÐ’Ñ€ÐµÐ¼Ñ Ð¶Ð¸Ð·Ð½Ð¸ по умолчанию : %u" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:126 msgid "No valid caches found in configuration, caching is disabled" msgstr "" "Ð’ наÑтройках не обнаружено ни одного приемлемого кÑша, кÑширование отключено" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:131 #, c-format msgid "\tCache : %s" msgstr "\tКÑш : %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:133 #, c-format msgid "\tCache link dir : %s" msgstr "\tКаталог Ñ ÐºÑшем ÑÑылок: %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:136 #, c-format msgid "\tCache (read-only): %s" msgstr "\tПапка кÑша (только чтение): %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:138 msgid "\tCache cleaning enabled" msgstr "\tОчиÑтка кÑша включена" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:139 msgid "\tCache cleaning disabled" msgstr "\tОчиÑтка кÑша отключена" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:256 #, fuzzy msgid "Starting controldir update tool." msgstr "ЗапуÑкаетÑÑ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»Ð¸Ñ€ÑƒÐµÐ¼Ñ‹Ð¹ процеÑÑ" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:258 #, fuzzy msgid "Failed to start controldir update tool." msgstr "Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð¾Ð³Ð¾ каталога %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:261 #, fuzzy, c-format msgid "Failed to run controldir update tool. Exit code: %i" msgstr "Сбой запуÑка процеÑÑа загрузчика Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s" #: src/services/a-rex/grid-manager/conf/GMConfig.cpp:381 msgid "" "Globus location variable substitution is not supported anymore. Please " "specify path directly." msgstr "" "ПеременнаÑ, ÑƒÐºÐ°Ð·Ñ‹Ð²Ð°ÑŽÑ‰Ð°Ñ Ð½Ð° раÑположение Globus, больше не поддерживаетÑÑ. " "ПожалуйÑта, укажите полный путь." #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:35 msgid "Can't read configuration file" msgstr "Ðе удалоÑÑŒ прочеÑть файл наÑтроек" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:41 #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:29 msgid "Can't recognize type of configuration file" msgstr "Ðевозможно определить тип файла наÑтроек" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:47 msgid "Configuration error" msgstr "Ошибка наÑтройки" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:77 msgid "Bad number in maxdelivery" msgstr "ÐедопуÑтимое значение maxdelivery" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:83 msgid "Bad number in maxemergency" msgstr "ÐедопуÑтимое значение maxemergency" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:89 msgid "Bad number in maxprocessor" msgstr "ÐедопуÑтимое значение maxprocessor" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:95 msgid "Bad number in maxprepared" msgstr "ÐедопуÑтимое значение maxprepared" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:101 msgid "Bad number in maxtransfertries" msgstr "недопуÑтимое чиÑло в maxtransfertries" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:112 msgid "Bad number in speedcontrol" msgstr "ÐедопуÑтимое значение speedcontrol" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:123 #, c-format msgid "Bad number in definedshare %s" msgstr "ÐедопуÑтимое значение definedshare %s" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:132 #, c-format msgid "Bad URL in deliveryservice: %s" msgstr "ÐедопуÑтимый URL в deliveryservice: %s" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:143 msgid "Bad number in remotesizelimit" msgstr "ÐедопуÑтимое значение remotesizelimit" #: src/services/a-rex/grid-manager/conf/StagingConfig.cpp:168 msgid "Bad value for loglevel" msgstr "ÐедопуÑтимое значение Ð´Ð»Ñ loglevel" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:24 msgid "Can't open configuration file" msgstr "Ðе удалоÑÑŒ открыть файл наÑтроек" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:45 msgid "Not enough parameters in copyurl" msgstr "ÐедоÑтаточное количеÑтво параметров в copyurl" #: src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp:54 msgid "Not enough parameters in linkurl" msgstr "ÐедоÑтаточное количеÑтво параметров в linkurl" #: src/services/a-rex/grid-manager/files/ControlFileContent.cpp:185 #, c-format msgid "Wrong directory in %s" msgstr "Ðеверный каталог в %s" #: src/services/a-rex/grid-manager/files/ControlFileHandling.cpp:104 #, c-format msgid "Failed setting file owner: %s" msgstr "Ðе удалоÑÑŒ задать владельца файла: %s" #: src/services/a-rex/grid-manager/gm_jobs.cpp:36 #, c-format msgid "Could not read data staging configuration from %s" msgstr "Ðе удалоÑÑŒ прочитать наÑтройки Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ… в %s" #: src/services/a-rex/grid-manager/gm_jobs.cpp:44 #, c-format msgid "Can't read transfer states from %s. Perhaps A-REX is not running?" msgstr "" "Ðе удалоÑÑŒ прочеÑть ÑоÑтоÑÐ½Ð¸Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡ Ñ %s. Возможно, A-REX не запущен?" #: src/services/a-rex/grid-manager/gm_jobs.cpp:100 msgid "gm-jobs displays information on current jobs in the system." msgstr "gm-jobs выводит информацию о текущих заданиÑÑ… в ÑиÑтеме." #: src/services/a-rex/grid-manager/gm_jobs.cpp:105 msgid "display more information on each job" msgstr "вывеÑти больше информации о каждом задании" #: src/services/a-rex/grid-manager/gm_jobs.cpp:110 #: src/services/a-rex/grid-manager/gm_kick.cpp:24 msgid "use specified configuration file" msgstr "иÑпользовать указанный файл наÑтроек" #: src/services/a-rex/grid-manager/gm_jobs.cpp:111 #: src/services/a-rex/grid-manager/gm_kick.cpp:25 msgid "file" msgstr "файл" #: src/services/a-rex/grid-manager/gm_jobs.cpp:115 msgid "read information from specified control directory" msgstr "читать информацию из указанного контрольного каталога" #: src/services/a-rex/grid-manager/gm_jobs.cpp:116 msgid "dir" msgstr "каталог" #: src/services/a-rex/grid-manager/gm_jobs.cpp:120 msgid "print summary of jobs in each transfer share" msgstr "вывеÑти Ñводку о задачах в каждой из транÑферных квот" #: src/services/a-rex/grid-manager/gm_jobs.cpp:125 msgid "do not print list of jobs" msgstr "не выводить ÑпиÑок задач" #: src/services/a-rex/grid-manager/gm_jobs.cpp:130 msgid "do not print number of jobs in each state" msgstr "не выводить количеÑтво задач в каждом ÑоÑтоÑнии" #: src/services/a-rex/grid-manager/gm_jobs.cpp:135 msgid "print state of the service" msgstr "вывеÑти ÑоÑтоÑние ÑервиÑа" #: src/services/a-rex/grid-manager/gm_jobs.cpp:140 msgid "show only jobs of user(s) with specified subject name(s)" msgstr "" "показать задачи, принадлежащие пользователÑм Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ñ‹Ð¼Ð¸ именами Ñубъекта" #: src/services/a-rex/grid-manager/gm_jobs.cpp:141 #: src/services/a-rex/grid-manager/gm_jobs.cpp:151 #: src/services/a-rex/grid-manager/gm_jobs.cpp:161 msgid "dn" msgstr "DN" #: src/services/a-rex/grid-manager/gm_jobs.cpp:145 msgid "request to cancel job(s) with specified ID(s)" msgstr "запроÑить обрыв задач Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ñ‹Ð¼Ð¸ Ñрлыками" #: src/services/a-rex/grid-manager/gm_jobs.cpp:146 #: src/services/a-rex/grid-manager/gm_jobs.cpp:156 #: src/services/a-rex/grid-manager/gm_jobs.cpp:166 #: src/services/a-rex/grid-manager/gm_jobs.cpp:176 #: src/services/a-rex/grid-manager/gm_kick.cpp:30 msgid "id" msgstr "ID" #: src/services/a-rex/grid-manager/gm_jobs.cpp:150 msgid "" "request to cancel jobs belonging to user(s) with specified subject name(s)" msgstr "" "запроÑить обрыв задач, принадлежащих пользователÑм Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ñ‹Ð¼Ð¸ именами " "Ñубъекта" #: src/services/a-rex/grid-manager/gm_jobs.cpp:155 msgid "request to clean job(s) with specified ID(s)" msgstr "запроÑить удаление задач Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ñ‹Ð¼Ð¸ Ñрлыками" #: src/services/a-rex/grid-manager/gm_jobs.cpp:160 msgid "" "request to clean jobs belonging to user(s) with specified subject name(s)" msgstr "" "запроÑить удаление задач, принадлежащих пользователÑм Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ñ‹Ð¼Ð¸ именами " "Ñубъекта" #: src/services/a-rex/grid-manager/gm_jobs.cpp:165 msgid "show only jobs with specified ID(s)" msgstr "показать задачи Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ñ‹Ð¼Ð¸ Ñрлыками" #: src/services/a-rex/grid-manager/gm_jobs.cpp:170 msgid "print list of available delegation IDs" msgstr "вывеÑти ÑпиÑок доÑтупных идентификаторов делегированиÑ" #: src/services/a-rex/grid-manager/gm_jobs.cpp:175 msgid "print delegation token of specified ID(s)" msgstr "вывеÑти токен Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ð¾Ð³Ð¾ идентификатора" #: src/services/a-rex/grid-manager/gm_jobs.cpp:180 msgid "print main delegation token of specified Job ID(s)" msgstr "вывеÑти оÑновной токен Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ð¾Ð³Ð¾ идентификатора" #: src/services/a-rex/grid-manager/gm_jobs.cpp:181 msgid "job id" msgstr "ID заданиÑ" #: src/services/a-rex/grid-manager/gm_jobs.cpp:185 msgid "" "output requested elements (jobs list, delegation ids and tokens) to file" msgstr "" "запиÑать указанные Ñлементы (ÑпиÑок задач, идентификаторы и токены " "делегированиÑ) в файл" #: src/services/a-rex/grid-manager/gm_jobs.cpp:186 msgid "file name" msgstr "Ðазвание файла" #: src/services/a-rex/grid-manager/gm_jobs.cpp:209 #, c-format msgid "Using configuration at %s" msgstr "ИÑпользуютÑÑ Ð½Ð°Ñтройки в %s" #: src/services/a-rex/grid-manager/gm_jobs.cpp:232 #, c-format msgid "Failed to open output file '%s'" msgstr "Ðе удалоÑÑŒ открыть выходной файл '%s'" #: src/services/a-rex/grid-manager/gm_jobs.cpp:241 msgid "Looking for current jobs" msgstr "ПоиÑк текущих задач" #: src/services/a-rex/grid-manager/gm_jobs.cpp:278 #, c-format msgid "Job: %s : ERROR : Unrecognizable state" msgstr "Задача: %s : ERROR : Ðеопознанное ÑоÑтоÑние" #: src/services/a-rex/grid-manager/gm_jobs.cpp:287 #, c-format msgid "Job: %s : ERROR : No local information." msgstr "Задача: %s : ERROR : ОтÑутÑтвует Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ." #: src/services/a-rex/grid-manager/gm_jobs.cpp:461 #, c-format msgid "Job: %s : ERROR : Failed to put cancel mark" msgstr "Задача: %s : ERROR : Сбой запиÑи метки прерываниÑ" #: src/services/a-rex/grid-manager/gm_jobs.cpp:465 #, c-format msgid "Job: %s : Cancel request put but failed to communicate to service" msgstr "" "Задача: %s : Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° отмену отправлен, но ÑвÑзь Ñо Ñлужбой отÑутÑтвует" #: src/services/a-rex/grid-manager/gm_jobs.cpp:467 #, c-format msgid "Job: %s : Cancel request put and communicated to service" msgstr "Задача: %s : Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° отмену отправлен и Ñообщён Ñлужбе" #: src/services/a-rex/grid-manager/gm_jobs.cpp:478 #, c-format msgid "Job: %s : ERROR : Failed to put clean mark" msgstr "Задача: %s : ERROR : Сбой запиÑи отметки об очиÑтке" #: src/services/a-rex/grid-manager/gm_jobs.cpp:482 #, c-format msgid "Job: %s : Clean request put but failed to communicate to service" msgstr "Job: %s : Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° очиÑтку отправлен, но ÑвÑзь Ñо Ñлужбой отÑутÑтвует" #: src/services/a-rex/grid-manager/gm_jobs.cpp:484 #, c-format msgid "Job: %s : Clean request put and communicated to service" msgstr "Задача: %s : Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° очиÑтку отправлен и Ñообщён Ñлужбе" #: src/services/a-rex/grid-manager/gm_kick.cpp:18 #, fuzzy msgid "" "gm-kick wakes up the A-REX corresponding to the given control directory. If " "no directory is given it uses the control directory found in the " "configuration file." msgstr "" "gm-kick принудительно запуÑкает цикл A-REX в ÑоответÑтвии Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ñ‹Ð¼ " "управлÑющим файлом. ЕÑли файл не указан, иÑпользуетÑÑ ÑƒÐ¿Ñ€Ð°Ð²Ð»Ñющий каталог из " "файла наÑтроек." #: src/services/a-rex/grid-manager/gm_kick.cpp:29 msgid "inform about changes in particular job (can be used multiple times)" msgstr "" "информировать об изменениÑÑ… в заданной задаче (допуÑкаетÑÑ Ð¼Ð½Ð¾Ð³Ð¾ÐºÑ€Ð°Ñ‚Ð½Ð¾Ðµ " "иÑпользование)" #: src/services/a-rex/grid-manager/inputcheck.cpp:39 #, c-format msgid "Failed to acquire source: %s" msgstr "Ðе удалоÑÑŒ получить иÑточник: %s" #: src/services/a-rex/grid-manager/inputcheck.cpp:44 #, c-format msgid "Failed to resolve %s" msgstr "Ðе удалоÑÑŒ разрешить %s" #: src/services/a-rex/grid-manager/inputcheck.cpp:61 #, c-format msgid "Failed to check %s" msgstr "Ðе удалоÑÑŒ проверить %s" #: src/services/a-rex/grid-manager/inputcheck.cpp:75 msgid "job_description_file [proxy_file]" msgstr "job_description_file [proxy_file]" #: src/services/a-rex/grid-manager/inputcheck.cpp:76 msgid "" "inputcheck checks that input files specified in the job description are " "available and accessible using the credentials in the given proxy file." msgstr "" "inputcheck проверÑет, доÑтупны ли входные файлы, указанные в опиÑании " "задачи, иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ñ‹ доÑтупа в указанном файле доверенноÑти." #: src/services/a-rex/grid-manager/inputcheck.cpp:88 msgid "Wrong number of arguments given" msgstr "Указано неверное количеÑтво аргументов" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:75 #, c-format msgid "" "DTR Generator waiting to process: %d jobs to cancel, %d DTRs, %d new jobs" msgstr "" "DTRGenerator ожидает обработки: %d отменённых задач, %d DTR, %d новых задач" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:89 #, c-format msgid "%s: Job cancel request from DTR generator to scheduler" msgstr "%s: Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð¾ прерывании задачи от генератора DTR к планировщику" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:94 #, c-format msgid "%s: Returning canceled job from DTR generator" msgstr "%s: Возврат прерванной задачи из генератора DTR" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:135 #, c-format msgid "%s: Re-requesting attention from DTR generator" msgstr "%s: Повторный Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾Ð± обÑлуживании к генератору DTR" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:145 #, c-format msgid "DTR Generator processed: %d jobs to cancel, %d DTRs, %d new jobs" msgstr "DTRGenerator обработал: %d отменённых задач, %d DTR, %d новых задач" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:164 msgid "Exiting Generator thread" msgstr "ОÑтанавливаетÑÑ Ð¿Ð¾Ñ‚Ð¾Ðº Generator" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:236 msgid "Shutting down data staging threads" msgstr "ЗакрываютÑÑ Ð¿Ð¾Ñ‚Ð¾ÐºÐ¸ Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ…" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:246 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:259 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:287 msgid "DTRGenerator is not running!" msgstr "DTRGenerator не запущен!" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:249 #, c-format msgid "Received DTR %s during Generator shutdown - may not be processed" msgstr "" "Ð—Ð°Ð¿Ñ€Ð¾Ñ DTR %s получен в процеÑÑе Ð·Ð°ÐºÑ€Ñ‹Ñ‚Ð¸Ñ Ð³ÐµÐ½ÐµÑ€Ð°Ñ‚Ð¾Ñ€Ð° - не может быть " "обработан" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:263 msgid "DTRGenerator was sent null job" msgstr "DTRGenerator получил ноль задач" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:272 #, c-format msgid "%s: Received job in DTR generator" msgstr "%s: Получена задача в DTRGenerator" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:275 #, c-format msgid "%s: Failed to receive job in DTR generator" msgstr "%s: Сбой Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ в DTRGenerator" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:282 msgid "DTRGenerator got request to cancel null job" msgstr "DTRGenerator получил Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾Ñ‚Ð¼ÐµÐ½Ð¸Ñ‚ÑŒ ноль задач" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:297 msgid "DTRGenerator is queried about null job" msgstr "DTRGenerator опрошен о нуле задач" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:327 msgid "DTRGenerator is asked about null job" msgstr "DTRGenerator запрошен о нуле задач" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:355 msgid "DTRGenerator is requested to remove null job" msgstr "DTRGenerator получил Ð·Ð°Ð¿Ñ€Ð¾Ñ ÑƒÐ´Ð°Ð»Ð¸Ñ‚ÑŒ ноль задач" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:362 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:370 #, c-format msgid "%s: Trying to remove job from data staging which is still active" msgstr "%s: Попытка удалить задание из активного процеÑÑа Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ…" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:378 #, c-format msgid "%s: Trying remove job from data staging which does not exist" msgstr "" "%s: Попытка удалить задание из неÑущеÑтвующего процеÑÑа Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ…" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:389 #, c-format msgid "%s: Invalid DTR" msgstr "%s: ÐедейÑтвительный Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:406 #, c-format msgid "%s: Received DTR %s to copy file %s in state %s" msgstr "%s: Получен Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR %s на копирование файла %s в ÑоÑтоÑнии %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:410 #, c-format msgid "%s: Received DTR belongs to inactive job" msgstr "%s: Полученный DTR принадлежит неактивной задаче" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:427 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1065 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:474 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:532 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:646 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:856 #, c-format msgid "%s: Failed reading local information" msgstr "%s: Ðе удалоÑÑŒ прочеÑть локальную информацию" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:436 #, c-format msgid "%s: DTR %s to copy file %s failed" msgstr "%s: Сбой запроÑа DTR %s на копирование файла %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:442 #, c-format msgid "%s: Cancelling other DTRs" msgstr "%s: Прерывание оÑтальных запроÑов DTR" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:452 #, c-format msgid "%s: DTR %s to copy to %s failed but is not mandatory" msgstr "%s: копирование DTR %s в %s не удалоÑÑŒ, но не было обÑзательным" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:462 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:727 #, c-format msgid "%s: Failed to read list of output files" msgstr "%s: Ðе удалоÑÑŒ прочеÑть ÑпиÑок выходных файлов" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:476 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:617 #, c-format msgid "%s: Failed to read dynamic output files in %s" msgstr "%s: Ðе удалоÑÑŒ прочеÑть динамичеÑкий ÑпиÑок выходных файлов в %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:478 #, c-format msgid "%s: Going through files in list %s" msgstr "%s: ОбрабатываютÑÑ Ñ„Ð°Ð¹Ð»Ñ‹ в ÑпиÑке %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:482 #, c-format msgid "%s: Removing %s from dynamic output file %s" msgstr "%s: УдалÑетÑÑ %s из динамичеÑкого ÑпиÑка выходных файлов %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:486 #, c-format msgid "%s: Failed to write back dynamic output files in %s" msgstr "%s: Ðе удалоÑÑŒ запиÑать динамичеÑкие выходные файлы обратно в %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:502 #, c-format msgid "%s: Failed to write list of output files" msgstr "%s: Ðе удалоÑÑŒ запиÑать ÑпиÑок выходных файлов" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:506 #, c-format msgid "%s: Failed to write list of output status files" msgstr "%s: Ðе удалоÑÑŒ вывеÑти ÑпиÑок ÑоÑтоÑний выходных файлов" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:518 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:739 #, c-format msgid "%s: Failed to read list of input files" msgstr "%s: Ðе удалоÑÑŒ прочеÑть ÑпиÑок входных файлов" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:537 #, c-format msgid "%s: Failed to write list of input files" msgstr "%s: Ðе удалоÑÑŒ запиÑать ÑпиÑок входных файлов" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:549 #, c-format msgid "%s: Received DTR with two remote endpoints!" msgstr "%s: Получен Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR Ñ Ð´Ð²ÑƒÐ¼Ñ ÑƒÐ´Ð°Ð»Ñ‘Ð½Ð½Ñ‹Ð¼Ð¸ адреÑами!" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:561 #: src/services/candypond/CandyPondGenerator.cpp:105 #, c-format msgid "No active job id %s" msgstr "Ðет активной задачи Ñ Ñрлыком %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:605 #, c-format msgid "%s: Failed to read list of output files, can't clean up session dir" msgstr "" "%s: Ðе удалоÑÑŒ прочеÑть ÑпиÑок выходных файлов, невозможно очиÑтить каталог " "ÑеÑÑии" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:631 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:650 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:777 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:909 #, c-format msgid "%s: Failed to clean up session dir" msgstr "%s: Ðе удалоÑÑŒ очиÑтить каталог ÑеÑÑии" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:641 #, c-format msgid "%s: Failed to read list of input files, can't clean up session dir" msgstr "" "%s: Ðе удалоÑÑŒ прочеÑть ÑпиÑок входных файлов, невозможно очиÑтить каталог " "ÑеÑÑии" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:663 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:667 msgid "uploads" msgstr "отгрузок" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:663 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:667 msgid "downloads" msgstr "загрузок" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:664 msgid "cancelled" msgstr "отменено" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:664 msgid "finished" msgstr "готово" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:662 #, c-format msgid "%s: All %s %s successfully" msgstr "%s: Ð’Ñе процеÑÑÑ‹ %s уÑпешно завершилиÑÑŒ (%s)" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:666 #, c-format msgid "%s: Some %s failed" msgstr "%s: Ðекоторые процеÑÑÑ‹ %s дали Ñбой" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:670 #, c-format msgid "%s: Requesting attention from DTR generator" msgstr "%s: Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð¾Ð± обÑлуживании к генератору DTR" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:681 msgid "DTRGenerator is requested to process null job" msgstr "DTRGenerator получил Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° обработку Ð½ÑƒÐ»Ñ Ð·Ð°Ð´Ð°Ñ‡" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:687 msgid "download" msgstr "передача" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:687 msgid "upload" msgstr "отгрузка" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:686 #, c-format msgid "%s: Received data staging request to %s files" msgstr "%s: Получен Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° размещение файлов (%s)" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:748 #, c-format msgid "%s: Duplicate file in list of input files: %s" msgstr "%s: ПовторÑющееÑÑ Ð¸Ð¼Ñ Ñ„Ð°Ð¹Ð»Ð° в ÑпиÑке входных файлов: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:801 #, c-format msgid "%s: Reading output files from user generated list in %s" msgstr "%s: Чтение выходных файлов в ÑпиÑке Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:803 #, c-format msgid "%s: Error reading user generated output file list in %s" msgstr "%s: Чтение выходных файлов в ÑпиÑке Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:834 #, c-format msgid "%s: Failed to list output directory %s: %s" msgstr "%s: Сбой вывода Ñодержимого каталога Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ %s: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:852 #, c-format msgid "%s: Adding new output file %s: %s" msgstr "%s: Добавление нового файла выхода %s: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:875 #, c-format msgid "%s: Two identical output destinations: %s" msgstr "%s: Два одинаковых Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð²Ñ‹Ð´Ð°Ñ‡Ð¸: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:888 #, c-format msgid "%s: Cannot upload two different files %s and %s to same LFN: %s" msgstr "%s: Ðевозможно запиÑать два разных файла %s и %s Ñ Ð¾Ð´Ð½Ð¸Ð¼ LFN: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:920 #, c-format msgid "%s: Received job in a bad state: %s" msgstr "%s: Задача получена в плохом ÑоÑтоÑнии: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:928 #, c-format msgid "%s: Session directory processing takes too long - %u.%06u seconds" msgstr "" "%s: Обработка каталога ÑеÑÑий продолжаетÑÑ Ñлишком долго - %u.%06u Ñекунд" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:976 #, c-format msgid "" "%s: Destination file %s was possibly left unfinished from previous A-REX " "run, will overwrite" msgstr "" "%s: Файл Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ %s вероÑтно оÑталÑÑ Ð½ÐµÐ´Ð¾Ð¿Ð¸Ñанным поÑле предыдущего " "запуÑка A-REX, перезапиÑÑŒ" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1071 #, c-format msgid "%s: Failed writing local information" msgstr "%s: Ðе удалоÑÑŒ запиÑать локальную информацию" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1089 #, c-format msgid "%s: Cancelling active DTRs" msgstr "%s: Прерывание активных запроÑов" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1096 msgid "DTRGenerator is asked to check files for null job" msgstr "DTRGenerator получил Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€Ð¸Ñ‚ÑŒ файлы Ð½ÑƒÐ»Ñ Ð·Ð°Ð´Ð°Ñ‡" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1116 #, c-format msgid "%s: Can't read list of input files" msgstr "%s: Ðевозможно прочеÑть ÑпиÑок входных файлов" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1131 #, c-format msgid "%s: Checking user uploadable file: %s" msgstr "%s: Проверка отгружаемого файла пользователÑ: %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1136 #, c-format msgid "%s: User has uploaded file %s" msgstr "%s: Пользователь отгрузил файл %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1143 #, c-format msgid "%s: Failed writing changed input file." msgstr "%s: Ðе удалоÑÑŒ запиÑать изменившийÑÑ Ð²Ñ…Ð¾Ð´Ð½Ð¾Ð¹ файл." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1147 #, c-format msgid "%s: Critical error for uploadable file %s" msgstr "%s: КритичеÑÐºÐ°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° Ð´Ð»Ñ Ð¾Ñ‚Ð³Ñ€ÑƒÐ¶Ð°ÐµÐ¼Ð¾Ð³Ð¾ файла %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1153 #, c-format msgid "%s: User has NOT uploaded file %s" msgstr "%s: Пользователь ÐЕ отгрузил файл %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1165 #, c-format msgid "%s: Uploadable files timed out" msgstr "%s: ИÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¾Ñ‚Ð³Ñ€ÑƒÐ¶Ð°ÐµÐ¼Ñ‹Ñ… файлов" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1221 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1247 #, c-format msgid "%s: Can't convert checksum %s to int for %s" msgstr "%s: Ðевозможно преобразовать контрольную Ñумму файла %s в целое Ð´Ð»Ñ %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1228 #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1242 #, c-format msgid "%s: Can't convert filesize %s to int for %s" msgstr "%s: Ðевозможно преобразовать размер файла %s в целое Ð´Ð»Ñ %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1237 #, c-format msgid "%s: Invalid size/checksum information (%s) for %s" msgstr "%s: ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ размере/контрольной Ñумме (%s) Ð´Ð»Ñ %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1259 #, c-format msgid "%s: Invalid file: %s is too big." msgstr "%s: Ðеверный файл: %s Ñлишком велик." #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1275 #, c-format msgid "%s: Failed to switch user ID to %d/%d to read file %s" msgstr "" "%s: Ðе удалоÑÑŒ изменить идентификатор Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð½Ð° %d/%d Ð´Ð»Ñ Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° " "%s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1281 #, c-format msgid "%s: Failed to open file %s for reading" msgstr "%s: Ðе удалоÑÑŒ открыть файл %s на чтение" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1289 #, c-format msgid "%s: Error accessing file %s" msgstr "%s: Ошибка доÑтупа к файлу %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1301 #, c-format msgid "%s: Error reading file %s" msgstr "%s: Ошибка при чтении файла %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1316 #, c-format msgid "%s: File %s has wrong checksum: %llu. Expected %lli" msgstr "%s: У файла %s Ð½ÐµÐ²ÐµÑ€Ð½Ð°Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма: %llu. ОжидалаÑÑŒ %lli" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1322 #, c-format msgid "%s: Checksum %llu verified for %s" msgstr "%s: ÐŸÑ€Ð¾Ð²ÐµÑ€Ð¾Ñ‡Ð½Ð°Ñ Ñумма %llu подтверждена Ð´Ð»Ñ %s" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1334 msgid "" "Found unfinished DTR transfers. It is possible the previous A-REX process " "did not shut down normally" msgstr "" "Ðайдены незаконченные процеÑÑÑ‹ DTR. ВероÑтно, предыдущий процеÑÑ A-REX " "завершилÑÑ Ñбоем" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1341 #, c-format msgid "Found DTR %s for file %s left in transferring state from previous run" msgstr "" "Ðайден Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR %s Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° %s, оÑтавшийÑÑ Ð² ÑоÑтоÑнии передачи поÑле " "предыдущего запуÑка" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1350 msgid "DTRGenerator is requested to clean links for null job" msgstr "DTRGenerator получил Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾Ñ‡Ð¸Ñтить ÑÑылки Ð½ÑƒÐ»Ñ Ð·Ð°Ð´Ð°Ñ‡" #: src/services/a-rex/grid-manager/jobs/DTRGenerator.cpp:1366 #, c-format msgid "%s: Cache cleaning takes too long - %u.%06u seconds" msgstr "%s: ОчиÑтка кÑша продолжаетÑÑ Ñлишком долго - %u.%06u Ñекунд" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:108 #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:190 #, c-format msgid "%s: Job monitoring counter is broken" msgstr "%s: Счётчик ÑÐ»ÐµÐ¶ÐµÐ½Ð¸Ñ Ð·Ð° задачей Ñбит" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:115 #, c-format msgid "%s: Job monitoring is unintentionally lost" msgstr "%s: Слежение за задачей непреднамеренно прервано" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:124 #, c-format msgid "%s: Job monitoring stop success" msgstr "%s: Слежение за задачей уÑпешно прекращено" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:129 #, c-format msgid "" "%s: Job monitoring stop requested with %u active references and %s queue " "associated" msgstr "" "%s: Запрошено прекращение ÑÐ»ÐµÐ¶ÐµÐ½Ð¸Ñ Ð·Ð° задачей Ñ %u активными ÑÑылками и " "аÑÑоциированной очередью %s" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:131 #, c-format msgid "%s: Job monitoring stop requested with %u active references" msgstr "%s: Запрошено прекращение ÑÐ»ÐµÐ¶ÐµÐ½Ð¸Ñ Ð·Ð° задачей Ñ %u активными ÑÑылками" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:195 #, c-format msgid "%s: Job monitoring is lost due to removal from queue" msgstr "%s: Слежение за задачей прервано в ÑвÑзи Ñ ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸ÐµÐ¼ из очереди" #: src/services/a-rex/grid-manager/jobs/GMJob.cpp:278 #, c-format msgid "%s: PushSorted failed to find job where expected" msgstr "%s: PushSorted не Ñмог обнаружить задачу в ожидаемом меÑте" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:161 #, c-format msgid "Replacing queue '%s' with '%s'" msgstr "Очередь '%s' заменÑетÑÑ Ð½Ð° '%s'" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:255 #, c-format msgid "Bad name for stdout: %s" msgstr "ÐедопуÑтимое Ð¸Ð¼Ñ Ð´Ð»Ñ stdout: %s" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:263 #, c-format msgid "Bad name for stderr: %s" msgstr "ÐедопуÑтимое Ð¸Ð¼Ñ Ð´Ð»Ñ stderr: %s" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:326 #, c-format msgid "Bad name for runtime environment: %s" msgstr "ÐедопуÑтимое название Ñреды выполнениÑ: %s" #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:371 msgid "Job description file could not be read." msgstr "Ðевозможно прочеÑть файл Ñ Ð¾Ð¿Ð¸Ñанием задачи." #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:422 #: src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp:436 #, c-format msgid "Bad name for executable: %s" msgstr "ÐедопуÑтимое Ð¸Ð¼Ñ Ð´Ð»Ñ Ð¸ÑполнÑемого файла: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:89 msgid "Failed to start data staging threads" msgstr "Ðе удалоÑÑŒ запуÑтить потоки Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ…" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:190 #, c-format msgid "" "%s: Failed reading .local and changing state, job and A-REX may be left in " "an inconsistent state" msgstr "" "%s: Ошибка при чтении .local и изменении ÑоÑтоÑниÑ, задачи и A-REX могут " "оказатьÑÑ Ð² противоречивом ÑоÑтоÑнии" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:195 #, c-format msgid "%s: unexpected failed job add request: %s" msgstr "%s: непредуÑмотренный Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð½ÐµÑƒÑпешной задачи: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:206 #, c-format msgid "%s: unexpected job add request: %s" msgstr "%s: непредуÑмотренный Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:259 #, c-format msgid "%s: job for attention" msgstr "%s: задача Ð´Ð»Ñ Ð¾Ð±ÑлуживаниÑ" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:269 msgid "all for attention" msgstr "вÑе Ð´Ð»Ñ Ð¾Ð±ÑлуживаниÑ" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:286 #, c-format msgid "%s: job found while scanning" msgstr "%s: задача обнаружена при Ñканировании" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:314 #, c-format msgid "%s: job will wait for external process" msgstr "%s: задача будет ожидать внешнего процеÑÑа" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:331 #, c-format msgid "%s: job assigned for slow polling" msgstr "%s: задача назначена Ð´Ð»Ñ Ð¼ÐµÐ´Ð»ÐµÐ½Ð½Ð¾Ð³Ð¾ опроÑа" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:349 #, c-format msgid "%s: job being processed" msgstr "%s: задача обрабатываетÑÑ" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:384 #, c-format msgid "Current jobs in system (PREPARING to FINISHING) per-DN (%i entries)" msgstr "" "Текущие задачи в ÑиÑтеме (от PREPARING до FINISHING) на DN (%i запиÑей)" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:386 #, c-format msgid "%s: %i" msgstr "%s: %i" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:398 #, c-format msgid "%s: Failed storing failure reason: %s" msgstr "%s: Сбой запиÑи причины ÑбоÑ: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:404 #, c-format msgid "%s: Failed reading job description: %s" msgstr "%s: Сбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¾Ð¿Ð¸ÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:416 #, c-format msgid "%s: Failed parsing job request." msgstr "%s: Сбой разборки запроÑа задачи." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:466 #, c-format msgid "%s: Failed writing list of output files: %s" msgstr "%s: Ðе удалоÑÑŒ запиÑать ÑпиÑок выходных файлов: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:492 #, c-format msgid "%s: Failed obtaining lrms id" msgstr "%s: Ðе удалоÑÑŒ получить номер из СУПО" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:506 #, c-format msgid "%s: Failed writing local information: %s" msgstr "%s: Ðе удалоÑÑŒ запиÑать локальную информацию: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:538 #, c-format msgid "%s: Failed creating grami file" msgstr "%s: Ðе удалоÑÑŒ Ñоздать файл grami" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:542 #, c-format msgid "%s: Failed setting executable permissions" msgstr "%s: Ðе удалоÑÑŒ уÑтановить права на иÑполнение" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:550 #, c-format msgid "%s: state SUBMIT: starting child: %s" msgstr "%s: ÑоÑтоÑние SUBMIT: запуÑк дочернего процеÑÑа: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:557 #, c-format msgid "%s: Failed running submission process" msgstr "%s: Ðе удалоÑÑŒ выполнить процедуру запуÑка" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:562 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:669 #, c-format msgid "%s: LRMS scripts limit of %u is reached - suspending submit/cancel" msgstr "" "%s: доÑтигнут предел Ñкрипта СУПО %u - приоÑтанавливаетÑÑ Ð·Ð°Ð¿ÑƒÑк/ÑнÑтие" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:578 #, c-format msgid "" "%s: Job submission to LRMS takes too long, but ID is already obtained. " "Pretending submission is done." msgstr "" "%s: ЗаÑылка задачи в СУПО проиÑходит Ñлишком медленно, но идентификатор уже " "доÑтупен. Будем Ñчитать, что заÑылка произведена." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:585 #, c-format msgid "%s: Job submission to LRMS takes too long. Failing." msgstr "%s: ЗаÑылка задачи в СУПО проиÑходит Ñлишком долго. Сбой." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:594 #, c-format msgid "%s: state SUBMIT: child exited with code %i" msgstr "%s: ÑоÑтоÑние: SUBMIT: дочерний процеÑÑ Ð·Ð°Ð²ÐµÑ€ÑˆÐ¸Ð»ÑÑ Ñ ÐºÐ¾Ð´Ð¾Ð¼ выхода: %i" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:599 #, c-format msgid "%s: Job submission to LRMS failed" msgstr "%s: Ðе удалоÑÑŒ направить задачу в СУПО" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:620 #, c-format msgid "%s: state CANCELING: timeout waiting for cancellation" msgstr "%s: ÑоÑтоÑние CANCELING: Ñрок Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¿Ñ€ÐµÑ€Ñ‹Ð²Ð°Ð½Ð¸Ñ Ð¸Ñтёк" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:626 #, c-format msgid "%s: state CANCELING: job diagnostics collected" msgstr "%s: ÑоÑтоÑние CANCELING: ÑобираетÑÑ Ð´Ð¸Ð°Ð³Ð½Ð¾Ñтика задачи" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:654 #, c-format msgid "%s: state CANCELING: starting child: %s" msgstr "%s: ÑоÑтоÑние CANCELING: запуÑк дочернего процеÑÑа: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:656 #, c-format msgid "%s: Job has completed already. No action taken to cancel" msgstr "%s:Задача уже завершилаÑÑŒ. ДейÑÑ‚Ð²Ð¸Ñ Ð¿Ð¾ прерыванию не применÑÑŽÑ‚ÑÑ" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:664 #, c-format msgid "%s: Failed running cancellation process" msgstr "%s: Ðе удалоÑÑŒ выполнить процедуру прерываниÑ" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:683 #, c-format msgid "" "%s: Job cancellation takes too long, but diagnostic collection seems to be " "done. Pretending cancellation succeeded." msgstr "" "%s: Прерывание задачи проиÑходит Ñлишком медленно, но диагноÑтика уже " "доÑтупна. Будем Ñчитать, что прерывание произошло." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:689 #, c-format msgid "%s: Job cancellation takes too long. Failing." msgstr "%s: Прерывание задачи проиÑходит Ñлишком долго. Сбой." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:699 #, c-format msgid "%s: state CANCELING: child exited with code %i" msgstr "%s: ÑоÑтоÑние CANCELING: дочерний процеÑÑ Ð·Ð°Ð²ÐµÑ€ÑˆÐ¸Ð»ÑÑ Ñ ÐºÐ¾Ð´Ð¾Ð¼ выхода %i" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:705 #, c-format msgid "%s: Failed to cancel running job" msgstr "%s: Ðе удалоÑÑŒ оборвать иÑполнÑющуюÑÑ Ð·Ð°Ð´Ð°Ñ‡Ñƒ" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:724 #, c-format msgid "%s: State: %s: data staging finished" msgstr "%s: СоÑтоÑние: %s: размещение данных завершено" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:759 #, c-format msgid "%s: State: %s: still in data staging" msgstr "%s: СоÑтоÑние: %s: вÑÑ‘ ещё в процеÑÑе переноÑа данных" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:772 #, c-format msgid "%s: Job is not allowed to be rerun anymore" msgstr "%s: Задачу Ð½ÐµÐ»ÑŒÐ·Ñ Ð±Ð¾Ð»ÑŒÑˆÐµ перезапуÑкать" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:782 #, c-format msgid "%s: Job failed in unknown state. Won't rerun." msgstr "" "%s: Сбой иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ в неизвеÑтном ÑоÑтоÑнии. ПерезапуÑка не будет." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:803 #, c-format msgid "%s: Reprocessing job description failed" msgstr "%s: Сбой повторной обработки опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:810 #, c-format msgid "%s: Failed to read reprocessed list of output files" msgstr "%s: Ðе удалоÑÑŒ прочеÑть переработанный ÑпиÑок выходных файлов" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:814 #, c-format msgid "%s: Failed to read reprocessed list of input files" msgstr "%s: Ðе удалоÑÑŒ прочеÑть переработанный ÑпиÑок входных файлов" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:898 #, c-format msgid "%s: Reading status of new job failed" msgstr "%s: Ðе удалоÑÑŒ прочеÑть ÑоÑтоÑние новой задачи" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:911 #, c-format msgid "%s: State: ACCEPTED: parsing job description" msgstr "%s: СоÑтоÑние: ACCEPTED: обрабатываетÑÑ Ð¾Ð¿Ð¸Ñание задачи" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:913 #, c-format msgid "%s: Processing job description failed" msgstr "%s: Ðе удалоÑÑŒ обработать опиÑание задачи" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:952 #, c-format msgid "%s: new job is accepted" msgstr "%s: Ð½Ð¾Ð²Ð°Ñ Ð·Ð°Ð´Ð°Ñ‡Ð° принÑта" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:964 #, c-format msgid "%s: %s: New job belongs to %i/%i" msgstr "%s: %s: ÐÐ¾Ð²Ð°Ñ Ð·Ð°Ð´Ð°Ñ‡Ð° принадлежит %i/%i" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:969 #, c-format msgid "%s: old job is accepted" msgstr "%s: ÑÑ‚Ð°Ñ€Ð°Ñ Ð·Ð°Ð´Ð°Ñ‡Ð° принÑта" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:980 #, c-format msgid "%s: State: ACCEPTED" msgstr "%s: СоÑтоÑние: ACCEPTED" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:986 #, c-format msgid "%s: State: ACCEPTED: dryrun" msgstr "%s: СоÑтоÑние: ACCEPTED: dryrun" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1009 #, c-format msgid "%s: State: ACCEPTED: has process time %s" msgstr "%s: СоÑтоÑние: ACCEPTED: Ð²Ñ€ÐµÐ¼Ñ Ð½Ð° иÑполнение %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1015 #, c-format msgid "%s: State: ACCEPTED: moving to PREPARING" msgstr "%s: ÑоÑтоÑние ACCEPTED: переход в PREPARING" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1031 #, c-format msgid "%s: State: PREPARING" msgstr "%s: СоÑтоÑние: PREPARING" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1038 #, c-format msgid "%s: Failed obtaining local job information." msgstr "%s: Ðе удалоÑÑŒ извлечь информацию о локальном ÑоÑтоÑнии задачи." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1091 #, c-format msgid "%s: State: SUBMIT" msgstr "%s: СоÑтоÑние: SUBMIT" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1111 #, c-format msgid "%s: State: CANCELING" msgstr "%s: СоÑтоÑние: CANCELING" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1131 #, c-format msgid "%s: State: INLRMS" msgstr "%s: СоÑтоÑние: INLRMS" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1136 #, c-format msgid "%s: State: INLRMS - checking for pending(%u) and mark" msgstr "%s: СоÑтоÑние: INLRMS - проверка приоÑтановки(%u) и метка" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1138 #, c-format msgid "%s: State: INLRMS - checking for not pending" msgstr "%s: СоÑтоÑние: INLRMS - проверка отÑутÑÑ‚Ð²Ð¸Ñ Ð¿Ñ€Ð¸Ð¾Ñтановки" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1140 #, c-format msgid "%s: Job finished" msgstr "%s: Задача завершена" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1144 #, c-format msgid "%s: State: INLRMS: exit message is %i %s" msgstr "%s: ÑоÑтоÑние INLRMS: Ñообщение на выходе %i %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1157 #, c-format msgid "%s: State: INLRMS - no mark found" msgstr "%s: СоÑтоÑние: INLRMS - метки не найдены" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1169 #, c-format msgid "%s: State: FINISHING" msgstr "%s: СоÑтоÑние: FINISHING" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1190 #, c-format msgid "%s: Job is requested to clean - deleting" msgstr "%s: ПоÑтупил Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° удаление задачи - удалÑетÑÑ" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1207 #, c-format msgid "%s: restarted PREPARING job" msgstr "%s: перезапущена задача из PREPARING" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1223 #, c-format msgid "%s: restarted INLRMS job" msgstr "%s: перезапущена задача из INLRMS" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1232 #, c-format msgid "%s: restarted FINISHING job" msgstr "%s: перезапущена задача из FINISHING" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1237 #, c-format msgid "%s: Can't rerun on request" msgstr "%s: ПерезапуÑк по требованию невозможен" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1239 #, c-format msgid "%s: Can't rerun on request - not a suitable state" msgstr "%s: ПерезапуÑк по запроÑу невозможен - неподходÑщее ÑоÑтоÑние" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1250 #, c-format msgid "%s: Job is too old - deleting" msgstr "%s: Задача Ñлишком ÑÑ‚Ð°Ñ€Ð°Ñ - удалÑетÑÑ" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1295 #, c-format msgid "%s: Job is ancient - delete rest of information" msgstr "%s: Задача уÑтарела - удалÑетÑÑ Ð¾ÑтавшаÑÑÑ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1313 #, c-format msgid "%s: Canceling job because of user request" msgstr "%s: Прерывание задачи по запроÑу пользователÑ" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1327 #, c-format msgid "%s: Failed to turn job into failed during cancel processing." msgstr "%s: Сбой приÑÐ²Ð¾ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ðµ ÑоÑтоÑÐ½Ð¸Ñ ÑÐ±Ð¾Ñ Ð¿Ñ€Ð¸ обрыве иÑполнениÑ." #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1359 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1367 #, c-format msgid "%s: Plugin at state %s : %s" msgstr "%s: Подключаемый модуль в ÑоÑтоÑнии %s : %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1373 #, c-format msgid "%s: Plugin execution failed" msgstr "%s: Сбой при иÑполнении подключаемого модулÑ" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1480 #, c-format msgid "%s: State: %s from %s" msgstr "%s: СоÑтоÑние: %s поÑле %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1529 #, c-format msgid "Failed to get DN information from .local file for job %s" msgstr "Ðе удалоÑÑŒ извлечь информацию о DN из файла .local задачи %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1556 #, c-format msgid "%s: Delete request due to internal problems" msgstr "%s: Удаление запроÑа в ÑвÑзи Ñ Ð²Ð½ÑƒÑ‚Ñ€ÐµÐ½Ð½Ð¸Ð¼Ð¸ неполадками" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1591 #, c-format msgid "%s: Job failure detected" msgstr "%s: Обнаружен Ñбой задачи" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1651 #, c-format msgid "Failed to move file %s to %s" msgstr "Ðе удалоÑÑŒ перемеÑтить файл %s в %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1659 #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1769 #, c-format msgid "Failed reading control directory: %s" msgstr "Сбой при чтении управлÑющего каталога: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:1729 #, c-format msgid "Failed reading control directory: %s: %s" msgstr "Сбой при чтении управлÑющего каталога: %s: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:2043 #, c-format msgid "Helper process start failed: %s" msgstr "Сбой при запуÑке вÑпомогательного процеÑÑа: %s" #: src/services/a-rex/grid-manager/jobs/JobsList.cpp:2050 #, c-format msgid "Stopping helper process %s" msgstr "ОÑтанавливаетÑÑ Ð²Ñпомогательный процеÑÑ %s" #: src/services/a-rex/grid-manager/log/HeartBeatMetrics.cpp:61 #, c-format msgid "Error with hearbeatfile: %s" msgstr "Ошибка в файле такта: %s" #: src/services/a-rex/grid-manager/log/HeartBeatMetrics.cpp:73 #: src/services/a-rex/grid-manager/log/JobsMetrics.cpp:139 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:136 #, c-format msgid ": Metrics tool returned error code %i: %s" msgstr ": СредÑтво Ð¸Ð·Ð¼ÐµÑ€ÐµÐ½Ð¸Ñ Ñ…Ð°Ñ€Ð°ÐºÑ‚ÐµÑ€Ð¸Ñтик выдало ошибку %i: %s" #: src/services/a-rex/grid-manager/log/HeartBeatMetrics.cpp:107 #: src/services/a-rex/grid-manager/log/JobsMetrics.cpp:186 #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:178 msgid "" "gmetric_bin_path empty in arc.conf (should never happen the default value " "should be used)" msgstr "" "Значение gmetric_bin_path пуÑто в arc.conf (никогда не должно ÑлучатьÑÑ, " "должно иÑпользоватьÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ по умолчанию)" #: src/services/a-rex/grid-manager/log/JobLog.cpp:114 msgid ": Accounting records reporter tool is not specified" msgstr ": Ðе указано ÑредÑтво ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¾Ñ‚Ñ‡Ñ‘Ñ‚Ð¾Ð² об учётных запиÑÑÑ…" #: src/services/a-rex/grid-manager/log/JobLog.cpp:130 msgid ": Failure creating slot for accounting reporter child process" msgstr ": Сбой подготовки дочернего процеÑÑа ÑредÑтва ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¾Ñ‚Ñ‡Ñ‘Ñ‚Ð¾Ð²" #: src/services/a-rex/grid-manager/log/JobLog.cpp:143 msgid ": Failure starting accounting reporter child process" msgstr ": Сбой запуÑка дочернего процеÑÑа ÑредÑтва ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¾Ñ‚Ñ‡Ñ‘Ñ‚Ð¾Ð²" #: src/services/a-rex/grid-manager/log/JobLog.cpp:176 msgid ": Failure creating accounting database connection" msgstr ": Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ðº базе данных учёта задач" #: src/services/a-rex/grid-manager/log/JobLog.cpp:202 #, c-format msgid ": writing accounting record took %llu ms" msgstr ": запиÑÑŒ учётной запиÑи занÑла %llu mÑ" #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:74 #, c-format msgid "Session dir '%s' contains user specific substitutions - skipping it" msgstr "Каталог ÑеÑÑии '%s' Ñодержит пользовательÑкие замены - пропуÑкаетÑÑ" #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:86 #, c-format msgid "Sessiondir %s: Free space %f GB" msgstr "Рабочий каталог %s: Свободное проÑтранÑтво %f ГБ" #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:94 msgid "No session directories found in configuration." msgstr "Ðе найдены каталоги ÑеÑÑий в файле наÑтроек." #: src/services/a-rex/grid-manager/log/SpaceMetrics.cpp:125 msgid "No cachedirs found/configured for calculation of free space." msgstr "" "Каталоги кÑша не найдены или не наÑтроены при вычиÑлении Ñвободного " "проÑтранÑтва." #: src/services/a-rex/grid-manager/mail/send_mail.cpp:29 msgid "Failed reading local information" msgstr "Ðе удалоÑÑŒ прочеÑть локальную информацию" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:79 #, c-format msgid "Running mailer command (%s)" msgstr "Выполнение команды раÑÑылки (%s)" #: src/services/a-rex/grid-manager/mail/send_mail.cpp:81 msgid "Failed running mailer" msgstr "Ðе удалоÑÑŒ запуÑтить Ñлужбу раÑÑылки" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:34 #, c-format msgid "%s: Job's helper exited" msgstr "%s: ÐÑÑиÑтент задачи прерван" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:71 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:24 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:56 #, c-format msgid "%s: Failure creating slot for child process" msgstr "%s: Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¾Ð±Ð»Ð°Ñти памÑти Ð´Ð»Ñ Ð´Ð¾Ñ‡ÐµÑ€Ð½ÐµÐ³Ð¾ процеÑÑа" #: src/services/a-rex/grid-manager/run/RunParallel.cpp:120 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:41 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:73 #, c-format msgid "%s: Failure starting child process" msgstr "%s: Сбой при запуÑке дочернего процеÑÑа" #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:30 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:62 #, c-format msgid "%s: Failure creating data storage for child process" msgstr "%s: Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ…Ñ€Ð°Ð½Ð¸Ð»Ð¸Ñ‰Ð° данных Ð´Ð»Ñ Ð´Ð¾Ñ‡ÐµÑ€Ð½ÐµÐ³Ð¾ процеÑÑа" #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:46 #: src/services/a-rex/grid-manager/run/RunRedirected.cpp:78 #, c-format msgid "%s: Failure waiting for child process to finish" msgstr "%s: Сбой Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ð½Ð¸Ñ Ð´Ð¾Ñ‡ÐµÑ€Ð½ÐµÐ³Ð¾ процеÑÑа" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:47 msgid "[job description input]" msgstr "[ввод опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸]" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:48 msgid "" "Tool for writing the grami file representation of a job description file." msgstr "Утилита Ð´Ð»Ñ Ð¿Ñ€ÐµÐ´ÑÑ‚Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ в виде файла grami." #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:52 msgid "Name of grami file" msgstr "Ð˜Ð¼Ñ Ñ„Ð°Ð¹Ð»Ð° grami" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:57 msgid "Configuration file to load" msgstr "ИÑпользуемый файл конфигурации" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:58 msgid "arc.conf" msgstr "arc.conf" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:62 msgid "Session directory to use" msgstr "ИÑпользуемый каталог ÑеÑÑии" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:63 msgid "directory" msgstr "каталог" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:79 msgid "No job description file name provided." msgstr "Ðе указан файл Ñ Ð¾Ð¿Ð¸Ñанием задачи." #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:85 #, c-format msgid "Unable to parse job description input: %s" msgstr "Ðевозможно разобрать введённое опиÑание задачи: %s" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:91 msgid "Unable to load ARC configuration file." msgstr "Ðе удалоÑÑŒ загрузить файл конфигурации ARC." #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:111 #, c-format msgid "Unable to write grami file: %s" msgstr "Ðе удалоÑÑŒ запиÑать файл grami: %s" #: src/services/a-rex/grid-manager/test_write_grami_file.cpp:117 #, c-format msgid "Unable to write 'output' file: %s" msgstr "Ðе удалоÑÑŒ запиÑать файл 'output': %s" #: src/services/a-rex/information_collector.cpp:53 #, c-format msgid "Resource information provider: %s" msgstr "Сборщик информации о реÑурÑе: %s" #: src/services/a-rex/information_collector.cpp:56 msgid "Resource information provider failed to start" msgstr "Сбой запуÑка Ñборщика информации о реÑурÑе" #: src/services/a-rex/information_collector.cpp:59 msgid "Resource information provider failed to run" msgstr "Сбой работы Ñборщика информации о реÑурÑе" #: src/services/a-rex/information_collector.cpp:63 #, c-format msgid "" "Resource information provider failed with exit status: %i\n" "%s" msgstr "" "Сбой Ñборщика информации о реÑурÑе Ñ Ð²Ñ‹Ñ…Ð¾Ð´Ð½Ñ‹Ð¼ ÑтатуÑом: %i\n" "%s" #: src/services/a-rex/information_collector.cpp:65 #, c-format msgid "" "Resource information provider log:\n" "%s" msgstr "" "Журнал Ñборщика информации о реÑурÑе:\n" "%s" #: src/services/a-rex/information_collector.cpp:71 msgid "No new informational document assigned" msgstr "Ðе приÑвоено новых информационных документов" #: src/services/a-rex/information_collector.cpp:73 #, c-format msgid "Obtained XML: %s" msgstr "Полученный XML: %s" #: src/services/a-rex/information_collector.cpp:87 msgid "Informational document is empty" msgstr "ПуÑтой информационный документ" #: src/services/a-rex/information_collector.cpp:212 msgid "OptimizedInformationContainer failed to create temporary file" msgstr "OptimizedInformationContainer не Ñмог Ñоздать временный файл" #: src/services/a-rex/information_collector.cpp:215 #, c-format msgid "OptimizedInformationContainer created temporary file: %s" msgstr "OptimizedInformationContainer Ñоздал временный файл: %s" #: src/services/a-rex/information_collector.cpp:221 msgid "" "OptimizedInformationContainer failed to store XML document to temporary file" msgstr "" "OptimizedInformationContainer не Ñмог запиÑать документ XML во временный файл" #: src/services/a-rex/information_collector.cpp:230 msgid "OptimizedInformationContainer failed to parse XML" msgstr "OptimizedInformationContainer не Ñмог разобрать XML" #: src/services/a-rex/information_collector.cpp:242 msgid "OptimizedInformationContainer failed to rename temporary file" msgstr "OptimizedInformationContainer не Ñмог переименовать временный файл" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:38 msgid "Default INTERNAL client constructor" msgstr "КонÑтруктор по умолчанию клиента INTERNAL" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:41 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:61 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:83 msgid "Failed to load grid-manager configfile" msgstr "Ðе удалоÑÑŒ подгрузить файл наÑтроек grid-manager" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:46 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:66 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:88 msgid "Failed to set INTERNAL endpoint" msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ‚Ð¾Ñ‡ÐºÐ¸ входа INTERNAL" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:131 msgid "Failed to identify grid-manager config file" msgstr "Ðе удалоÑÑŒ обнаружить файл наÑтроек grid-manager" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:150 #, c-format msgid "Failed to run configuration parser at %s." msgstr "Сбой запуÑка разборщика файла наÑтроек %s." #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:154 #, c-format msgid "Parser failed with error code %i." msgstr "Сбой разборщика Ñ ÐºÐ¾Ð´Ð¾Ð¼ ошибки %i." #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:160 #, c-format msgid "No pid file is found at '%s'. Probably A-REX is not running." msgstr "Ðе обнаружен файл pid в '%s'. Возможно, A-REX не запущен." #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:175 #, c-format msgid "Failed to load grid-manager config file from %s" msgstr "Ðе удалоÑÑŒ подгрузить файл наÑтроек grid-manager из %s" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:266 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:372 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:405 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:451 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:505 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:557 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:575 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:625 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:655 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:673 #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:691 msgid "INTERNALClient is not initialized" msgstr "Клиент INTERNALClient не запущен" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:456 msgid "Submitting job " msgstr "ЗапуÑк задачи " #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:517 #, c-format msgid "Failed to copy input file: %s to path: %s" msgstr "Сбой ÐºÐ¾Ð¿Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð²Ñ…Ð¾Ð´Ð½Ð¾Ð³Ð¾ файла: %s в размещение: %s" #: src/services/a-rex/internaljobplugin/INTERNALClient.cpp:523 #, c-format msgid "Failed to set permissions on: %s" msgstr "Сбой Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ Ð¿Ñ€Ð°Ð² доÑтупа к %s" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:51 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:92 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:119 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:145 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:184 #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:246 msgid "Failed to load grid-manager config file" msgstr "Ðе удалоÑÑŒ подгрузить файл наÑтроек grid-manager" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:191 #, c-format msgid "Job %s does not report a resumable state" msgstr "Задача %s не находитÑÑ Ð² возобновлÑемом ÑоÑтоÑнии" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:196 #, c-format msgid "Resuming job: %s at state: %s (%s)" msgstr "Возобновление задачи %s в ÑоÑтоÑнии %s (%s)" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:205 msgid "Job resuming successful" msgstr "Задача уÑпешно возобновлена" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:251 #, c-format msgid "Failed retrieving information for job: %s" msgstr "Сбой Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸ о задаче: %s" #: src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp:324 msgid "Retrieving job description of INTERNAL jobs is not supported" msgstr "Получение опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡ INTERNAL не поддерживаетÑÑ" #: src/services/a-rex/internaljobplugin/JobListRetrieverPluginINTERNAL.cpp:67 #, c-format msgid "Listing localjobs succeeded, %d localjobs found" msgstr "Локальные задачи уÑпешно перечиÑлены, обнаружено %d задач(и)" #: src/services/a-rex/internaljobplugin/JobListRetrieverPluginINTERNAL.cpp:83 #, c-format msgid "" "Skipping retrieved job (%s) because it was submitted via another interface " "(%s)." msgstr "" "ПропуÑкаетÑÑ ÑÐºÐ°Ñ‡Ð°Ð½Ð½Ð°Ñ Ð·Ð°Ð´Ð°Ñ‡Ð° (%s), так как она была запущена через другой " "Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ (%s)." #: src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.cpp:38 msgid "" "Failed to delegate credentials to server - no delegation interface found" msgstr "" "Сбой Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð¾Ð² доÑтупа на Ñервер - не обнаружен Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ " "делегированиÑ" #: src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.cpp:45 #, c-format msgid "Failed to delegate credentials to server - %s" msgstr "Сбой Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð¾Ð² доÑтупа на Ñервер - %s" #: src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.cpp:84 msgid "Failed preparing job description" msgstr "Ðе удалоÑÑŒ подготовить опиÑание задачи" #: src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.cpp:127 msgid "Failed submitting job description" msgstr "Сбой заÑылки опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" #: src/services/a-rex/job.cpp:78 #, c-format msgid "Using cached local account '%s'" msgstr "ИÑпользуетÑÑ ÐºÑÑˆÐ¸Ñ€Ð¾Ð²Ð°Ð½Ð½Ð°Ñ Ð¼ÐµÑÑ‚Ð½Ð°Ñ ÑƒÑ‡Ñ‘Ñ‚Ð½Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ '%s'" #: src/services/a-rex/job.cpp:89 msgid "Will not map to 'root' account by default" msgstr "По умолчанию привÑзки к учётной запиÑи 'root' не будет" #: src/services/a-rex/job.cpp:102 msgid "No local account name specified" msgstr "Ðе указано Ð¸Ð¼Ñ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð¾Ð¹ учётной запиÑи" #: src/services/a-rex/job.cpp:105 #, c-format msgid "Using local account '%s'" msgstr "ИÑпользуетÑÑ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð°Ñ ÑƒÑ‡Ñ‘Ñ‚Ð½Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ '%s'" #: src/services/a-rex/job.cpp:109 msgid "TLS provides no identity, going for OTokens" msgstr "TLS не передал идентификацию, переход к OTokens" #: src/services/a-rex/job.cpp:168 msgid "Failed to acquire A-REX's configuration" msgstr "Ðе удалоÑÑŒ получить наÑтройки A-REX" #: src/services/a-rex/job.cpp:240 #, c-format msgid "Cannot handle local user %s" msgstr "Ðевозможно обÑлужить локального Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %s" #: src/services/a-rex/job.cpp:288 #, c-format msgid "%s: Failed to parse user policy" msgstr "%s: Сбой при разборе правил допуÑка пользователÑ" #: src/services/a-rex/job.cpp:293 #, c-format msgid "%s: Failed to load evaluator for user policy " msgstr "%s: Ðе удалоÑÑŒ подгрузить анализатор Ð´Ð»Ñ Ð¿Ñ€Ð°Ð²Ð¸Ð» допуÑка пользователей " #: src/services/a-rex/job.cpp:398 #, c-format msgid "%s: Unknown user policy '%s'" msgstr "%s: ÐеизвеÑтное правило допуÑка Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ '%s'" #: src/services/a-rex/job.cpp:738 src/services/a-rex/job.cpp:756 #, c-format msgid "Credential expires at %s" msgstr "Срок дейÑÑ‚Ð²Ð¸Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð¾Ð² доÑтупа иÑтекает в %s" #: src/services/a-rex/job.cpp:740 src/services/a-rex/job.cpp:758 #, c-format msgid "Credential handling exception: %s" msgstr "Прерывание при обработке параметров доÑтупа: %s" #: src/services/a-rex/job.cpp:924 #, c-format msgid "Failed to run external plugin: %s" msgstr "Ðе удалоÑÑŒ запуÑтить внешний подключаемый модуль: %s" #: src/services/a-rex/job.cpp:928 #, c-format msgid "Plugin response: %s" msgstr "Ответ подключаемого модулÑ: %s" #: src/services/a-rex/job.cpp:1138 #, fuzzy, c-format msgid "Failed to create job in %s" msgstr "Ðе удалоÑÑŒ Ñоздать файл в %s" #: src/services/a-rex/job.cpp:1147 #, c-format msgid "Out of tries while allocating new job ID in %s" msgstr "ЗакончилиÑÑŒ попытки приÑÐ²Ð¾ÐµÐ½Ð¸Ñ Ð½Ð¾Ð²Ð¾Ð³Ð¾ Ñрлыка задачи в %s" #: src/services/a-rex/job.cpp:1397 msgid "No non-draining session dirs available" msgstr "Ðет каталогов ÑеÑÑий не в ÑоÑтоÑнии разгрузки" #: src/services/a-rex/put.cpp:150 #, c-format msgid "%s: put file %s: there is no payload" msgstr "%s: запиÑÑŒ файла %s: отÑутÑтвуют полезные файлы" #: src/services/a-rex/put.cpp:156 #, c-format msgid "%s: put file %s: unrecognized payload" msgstr "%s: запиÑÑŒ файла %s: неопознанные полезные файлы" #: src/services/a-rex/put.cpp:172 src/services/a-rex/rest/rest.cpp:2050 #, c-format msgid "%s: put file %s: failed to create file: %s" msgstr "%s: запиÑÑŒ файла %s: Ñбой при Ñоздании файла: %s" #: src/services/a-rex/put.cpp:188 #, c-format msgid "%s: put file %s: %s" msgstr "%s: запиÑÑŒ файла %s: %s" #: src/services/a-rex/put.cpp:210 #, c-format msgid "%s: delete file %s: failed to obtain file path: %s" msgstr "%s: удаление файла %s: Ñбой Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð¿ÑƒÑ‚Ð¸ к файлу: %s" #: src/services/a-rex/put.cpp:221 #, c-format msgid "%s: delete file %s: failed to open file/dir: %s" msgstr "%s: удаление файла %s: Ñбой Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð°/каталога: %s" #: src/services/a-rex/rest/rest.cpp:749 #, c-format msgid "REST: process %s at %s" msgstr "REST: обработка %s в %s" #: src/services/a-rex/rest/rest.cpp:797 src/services/a-rex/rest/rest.cpp:813 #: src/services/a-rex/rest/rest.cpp:1094 src/services/a-rex/rest/rest.cpp:1185 #: src/services/a-rex/rest/rest.cpp:1549 src/services/a-rex/rest/rest.cpp:2161 #, c-format msgid "process: method %s is not supported for subpath %s" msgstr "обработка: метод %s не поддерживаетÑÑ Ð´Ð»Ñ Ñ‡Ð°Ñти пути %s" #: src/services/a-rex/rest/rest.cpp:819 #, c-format msgid "process: schema %s is not supported for subpath %s" msgstr "обработка: Ñхема %s не поддерживаетÑÑ Ð´Ð»Ñ Ñ‡Ð°Ñти пути %s" #: src/services/a-rex/rest/rest.cpp:1182 src/services/a-rex/rest/rest.cpp:1546 #, c-format msgid "process: action %s is not supported for subpath %s" msgstr "обработка: дейÑтвие %s не поддерживаетÑÑ Ð´Ð»Ñ Ñ‡Ð°Ñти пути %s" #: src/services/a-rex/rest/rest.cpp:1558 src/services/a-rex/rest/rest.cpp:1627 #: src/services/a-rex/rest/rest.cpp:1987 src/services/a-rex/rest/rest.cpp:2150 #, c-format msgid "REST:GET job %s - %s" msgstr "REST:GET задачи %s - %s" #: src/services/a-rex/rest/rest.cpp:1674 src/services/a-rex/rest/rest.cpp:1682 #, c-format msgid "REST:KILL job %s - %s" msgstr "REST:KILL задачи %s - %s" #: src/services/a-rex/rest/rest.cpp:1699 src/services/a-rex/rest/rest.cpp:1707 #, c-format msgid "REST:CLEAN job %s - %s" msgstr "REST:CLEAN задачи %s - %s" #: src/services/a-rex/rest/rest.cpp:1724 src/services/a-rex/rest/rest.cpp:1732 #: src/services/a-rex/rest/rest.cpp:1749 #, c-format msgid "REST:RESTART job %s - %s" msgstr "REST:RESTART задачи %s - %s" #: src/services/a-rex/rest/rest.cpp:2040 #, c-format msgid "REST:PUT job %s: file %s: there is no payload" msgstr "REST:PUT задачи %s: файл %s: отÑутÑтвует нагрузка" #: src/services/a-rex/rest/rest.cpp:2063 #, c-format msgid "HTTP:PUT %s: put file %s: %s" msgstr "HTTP:PUT %s: запиÑÑŒ файла %s: %s" #: src/services/a-rex/test_cache_check.cpp:24 #: src/tests/count/test_client.cpp:20 #: src/tests/echo/echo_test4axis2c/test_client.cpp:20 #: src/tests/echo/test_client.cpp:21 msgid "Creating client side chain" msgstr "Создание цепи на Ñтороне клиента" #: src/services/a-rex/update_credentials.cpp:29 #, c-format msgid "" "UpdateCredentials: request = \n" "%s" msgstr "" "UpdateCredentials: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" "%s" #: src/services/a-rex/update_credentials.cpp:35 msgid "UpdateCredentials: missing Reference" msgstr "UpdateCredentials: отÑутÑтвует ÑÑылка" #: src/services/a-rex/update_credentials.cpp:43 msgid "UpdateCredentials: wrong number of Reference" msgstr "UpdateCredentials: недопуÑтимое количеÑтво ÑÑылок" #: src/services/a-rex/update_credentials.cpp:51 msgid "UpdateCredentials: wrong number of elements inside Reference" msgstr "UpdateCredentials: недопуÑтимое чиÑло Ñлементов внутри Reference" #: src/services/a-rex/update_credentials.cpp:60 msgid "UpdateCredentials: EPR contains no JobID" msgstr "UpdateCredentials: EPR не Ñодержит JobID" #: src/services/a-rex/update_credentials.cpp:70 #, c-format msgid "UpdateCredentials: no job found: %s" msgstr "UpdateCredentials: задача не обнаружена: %s" #: src/services/a-rex/update_credentials.cpp:77 msgid "UpdateCredentials: failed to update credentials" msgstr "UpdateCredentials: невозможно обновить параметры доÑтупа" #: src/services/a-rex/update_credentials.cpp:85 #, c-format msgid "" "UpdateCredentials: response = \n" "%s" msgstr "" "UpdateCredentials: отзыв = \n" "%s" #: src/services/candypond/CandyPond.cpp:52 msgid "No A-REX config file found in candypond configuration" msgstr "Файл наÑтроек A-REX в наÑтройках candypond не обнаружен" #: src/services/candypond/CandyPond.cpp:56 #, c-format msgid "Using A-REX config file %s" msgstr "ИÑпользуетÑÑ Ñ„Ð°Ð¹Ð» наÑтроек A-REX %s" #: src/services/candypond/CandyPond.cpp:60 #, c-format msgid "Failed to process A-REX configuration in %s" msgstr "Ðе удалоÑÑŒ обработать наÑтройки A-REX в %s" #: src/services/candypond/CandyPond.cpp:65 msgid "No caches defined in configuration" msgstr "КÑш не опиÑан в файле наÑтроек" #: src/services/candypond/CandyPond.cpp:140 #: src/services/candypond/CandyPond.cpp:347 #, c-format msgid "Can't handle URL %s" msgstr "Ðевозможно обработать URL %s" #: src/services/candypond/CandyPond.cpp:150 msgid "Empty filename returned from FileCache" msgstr "FileCache возвратил пуÑтое Ð¸Ð¼Ñ Ñ„Ð°Ð¹Ð»Ð°" #: src/services/candypond/CandyPond.cpp:162 #, c-format msgid "Problem accessing cache file %s: %s" msgstr "Проблема при доÑтупе к кÑшированному файлу %s: %s" #: src/services/candypond/CandyPond.cpp:210 #: src/services/candypond/CandyPond.cpp:474 msgid "No job ID supplied" msgstr "Ðе указан Ñрлык задачи" #: src/services/candypond/CandyPond.cpp:219 #, c-format msgid "Bad number in priority element: %s" msgstr "ÐедопуÑтимый приоритет: %s" #: src/services/candypond/CandyPond.cpp:228 msgid "No username supplied" msgstr "Ðе указано Ð¸Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ" #: src/services/candypond/CandyPond.cpp:235 #, c-format msgid "Supplied username %s does not match mapped username %s" msgstr "" "Указанное Ð¸Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %s не Ñовпадает Ñ ÑопоÑтавленным именем " "Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %s" #: src/services/candypond/CandyPond.cpp:249 msgid "No session directory found" msgstr "Ðе найден каталог ÑеÑÑии" #: src/services/candypond/CandyPond.cpp:253 #, c-format msgid "Using session dir %s" msgstr "ИÑпользуетÑÑ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³ ÑеÑÑии %s" #: src/services/candypond/CandyPond.cpp:257 #, c-format msgid "Failed to stat session dir %s" msgstr "Ðе удалоÑÑŒ проверить ÑоÑтоÑние каталога ÑеÑÑии %s" #: src/services/candypond/CandyPond.cpp:262 #, c-format msgid "Session dir %s is owned by %i, but current mapped user is %i" msgstr "Каталог ÑеÑÑии %s принадлежит %i, но текущий пользователь - %i" #: src/services/candypond/CandyPond.cpp:289 #, c-format msgid "Failed to access proxy of given job id %s at %s" msgstr "Сбой доÑтупа к доверенноÑти указанной задачи %s в %s" #: src/services/candypond/CandyPond.cpp:307 #, c-format msgid "DN is %s" msgstr "DN: %s" #: src/services/candypond/CandyPond.cpp:385 #, c-format msgid "Permission checking passed for url %s" msgstr "Проверка прав доÑтупа пройдена Ð´Ð»Ñ URL %s" #: src/services/candypond/CandyPond.cpp:410 #: src/services/candypond/CandyPondGenerator.cpp:135 #, c-format msgid "Failed to move %s to %s: %s" msgstr "Ðе удалоÑÑŒ перемеÑтить %s в %s: %s" #: src/services/candypond/CandyPond.cpp:441 #, c-format msgid "Starting new DTR for %s" msgstr "ЗапуÑкаетÑÑ Ð½Ð¾Ð²Ñ‹Ð¹ Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR Ð´Ð»Ñ %s" #: src/services/candypond/CandyPond.cpp:443 #, c-format msgid "Failed to start new DTR for %s" msgstr "Ðе удалоÑÑŒ запуÑтить новый Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR Ð´Ð»Ñ %s" #: src/services/candypond/CandyPond.cpp:487 #, c-format msgid "Job %s: all files downloaded successfully" msgstr "Задача %s: вÑе файлы уÑпешно загружены" #: src/services/candypond/CandyPond.cpp:494 #, c-format msgid "Job %s: Some downloads failed" msgstr "Задача %s: Сбой некоторых загрузок" #: src/services/candypond/CandyPond.cpp:499 #, c-format msgid "Job %s: files still downloading" msgstr "Задача %s: файлы вÑÑ‘ ещё загружаютÑÑ" #: src/services/candypond/CandyPond.cpp:511 msgid "CandyPond: Unauthorized" msgstr "CandyPond: ДоÑтуп закрыт" #: src/services/candypond/CandyPond.cpp:520 msgid "No local user mapping found" msgstr "Пользователь не припиÑан ни к одному локальному имени" #: src/services/candypond/CandyPond.cpp:527 #: src/services/data-staging/DataDeliveryService.cpp:649 #, c-format msgid "Identity is %s" msgstr "Личные данные: %s" #: src/services/candypond/CandyPond.cpp:585 #: src/services/data-staging/DataDeliveryService.cpp:721 msgid "Security Handlers processing failed" msgstr "Сбой в процеÑÑе обработки прав доÑтупа" #: src/services/candypond/CandyPond.cpp:592 msgid "Only POST is supported in CandyPond" msgstr "CandyPond поддерживает только POST" #: src/services/candypond/CandyPondGenerator.cpp:88 #, c-format msgid "DTR %s finished with state %s" msgstr "DTR %s завершилÑÑ Ð² ÑоÑтоÑнии %s" #: src/services/candypond/CandyPondGenerator.cpp:124 #, c-format msgid "Could not determine session directory from filename %s" msgstr "Ðе удалоÑÑŒ определить каталог ÑеÑÑии из имени файла %s" #: src/services/candypond/CandyPondGenerator.cpp:164 #, c-format msgid "Invalid DTR for source %s, destination %s" msgstr "ÐедопуÑтимый DTR Ð´Ð»Ñ Ð¸Ñточника %s, Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ %s" #: src/services/candypond/CandyPondGenerator.cpp:206 #, c-format msgid "DTRs still running for job %s" msgstr "ЗапроÑÑ‹ DTR Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s вÑÑ‘ ещё иÑполнÑÑŽÑ‚ÑÑ" #: src/services/candypond/CandyPondGenerator.cpp:215 #, c-format msgid "All DTRs finished for job %s" msgstr "Ð’Ñе запроÑÑ‹ DTR Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s завершены" #: src/services/candypond/CandyPondGenerator.cpp:222 #, c-format msgid "Job %s not found" msgstr "Задача %s не обнаружена" #: src/services/data-staging/DataDeliveryService.cpp:66 #, c-format msgid "Archiving DTR %s, state ERROR" msgstr "Ðрхивирование запроÑа DTR %s, ÑоÑтоÑние ERROR" #: src/services/data-staging/DataDeliveryService.cpp:70 #, c-format msgid "Archiving DTR %s, state %s" msgstr "Ðрхивирование запроÑа DTR %s, ÑоÑтоÑние %s" #: src/services/data-staging/DataDeliveryService.cpp:174 msgid "No delegation token in request" msgstr "Ð’ запроÑе отÑутÑтвует токен делегированиÑ" #: src/services/data-staging/DataDeliveryService.cpp:184 msgid "Failed to accept delegation" msgstr "Ðе удалоÑÑŒ принÑть делегирование" #: src/services/data-staging/DataDeliveryService.cpp:214 #: src/services/data-staging/DataDeliveryService.cpp:221 msgid "ErrorDescription" msgstr "ОпиÑание ошибки" #: src/services/data-staging/DataDeliveryService.cpp:226 #, c-format msgid "All %u process slots used" msgstr "Квота на процеÑÑÑ‹ (%u) иÑпользована" #: src/services/data-staging/DataDeliveryService.cpp:241 #, c-format msgid "Received retry for DTR %s still in transfer" msgstr "" "Получена Ð¿Ð¾Ð²Ñ‚Ð¾Ñ€Ð½Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ° запроÑа DTR %s, вÑÑ‘ ещё в ÑоÑтоÑнии передачи" #: src/services/data-staging/DataDeliveryService.cpp:248 #, c-format msgid "Replacing DTR %s in state %s with new request" msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ DTR %s в ÑоÑтоÑнии %s заменÑетÑÑ Ð½Ð¾Ð²Ñ‹Ð¼ запроÑом" #: src/services/data-staging/DataDeliveryService.cpp:258 #, c-format msgid "Storing temp proxy at %s" msgstr "Сохранение временной доверенноÑти в %s" #: src/services/data-staging/DataDeliveryService.cpp:266 #, c-format msgid "Failed to create temp proxy at %s: %s" msgstr "Ðе удалоÑÑŒ Ñоздать временную доверенноÑть в %s: %s" #: src/services/data-staging/DataDeliveryService.cpp:273 #, c-format msgid "Failed to change owner of temp proxy at %s to %i:%i: %s" msgstr "Ðе удалоÑÑŒ поменÑть владельца временной доверенноÑти в %s на %i:%i: %s" #: src/services/data-staging/DataDeliveryService.cpp:302 msgid "Invalid DTR" msgstr "ÐедейÑтвительный Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR" #: src/services/data-staging/DataDeliveryService.cpp:306 #, c-format msgid "Failed to remove temporary proxy %s: %s" msgstr "Ðе удалоÑÑŒ удалить временную доверенноÑть %s: %s" #: src/services/data-staging/DataDeliveryService.cpp:407 #, c-format msgid "No such DTR %s" msgstr "Ðет такого запроÑа DTR %s" #: src/services/data-staging/DataDeliveryService.cpp:425 #, c-format msgid "DTR %s failed: %s" msgstr "Сбой запроÑа DTR %s: %s" #: src/services/data-staging/DataDeliveryService.cpp:436 #, c-format msgid "DTR %s finished successfully" msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ DTR %s уÑпешно завершён" #: src/services/data-staging/DataDeliveryService.cpp:446 #, c-format msgid "DTR %s still in progress (%lluB transferred)" msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ DTR %s ещё в процеÑÑе (передано %lluB)" #: src/services/data-staging/DataDeliveryService.cpp:506 #, c-format msgid "No active DTR %s" msgstr "Ðет активных запроÑов DTR %s" #: src/services/data-staging/DataDeliveryService.cpp:516 #, c-format msgid "DTR %s was already cancelled" msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ DTR %s уже был прерван" #: src/services/data-staging/DataDeliveryService.cpp:525 #, c-format msgid "DTR %s could not be cancelled" msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ DTR %s не может быть прерван" #: src/services/data-staging/DataDeliveryService.cpp:569 #, c-format msgid "Failed to get load average: %s" msgstr "Сбой вычиÑÐ»ÐµÐ½Ð¸Ñ ÑƒÑреднённой загруженноÑти: %s" #: src/services/data-staging/DataDeliveryService.cpp:593 msgid "Invalid configuration - no allowed IP address specified" msgstr "ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð½Ð°Ñтройка - не указано ни одного допуÑтимого IP-адреÑа" #: src/services/data-staging/DataDeliveryService.cpp:597 msgid "Invalid configuration - no transfer dirs specified" msgstr "ÐедопуÑÑ‚Ð¸Ð¼Ð°Ñ Ð½Ð°Ñтройка - не указано ни одного каталога Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡" #: src/services/data-staging/DataDeliveryService.cpp:608 msgid "Failed to start archival thread" msgstr "Ðе удалоÑÑŒ запуÑтить поток архивированиÑ" #: src/services/data-staging/DataDeliveryService.cpp:633 msgid "Shutting down data delivery service" msgstr "ЗакрываетÑÑ Ñлужба Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ…" #: src/services/data-staging/DataDeliveryService.cpp:642 msgid "Unauthorized" msgstr "ДоÑтуп закрыт" #: src/services/data-staging/DataDeliveryService.cpp:728 msgid "Only POST is supported in DataDeliveryService" msgstr "DataDeliveryService поддерживает только POST" #: src/services/examples/echo_python/EchoService.py:12 msgid "EchoService (python) constructor called" msgstr "Вызван Python-конÑтруктор EchoService" #: src/services/examples/echo_python/EchoService.py:17 #, python-format msgid "EchoService (python) has prefix %(prefix)s and suffix %(suffix)s" msgstr "" "EchoService (Python) Ñодержит приÑтавку %(prefix)s и ÑÑƒÑ„Ñ„Ð¸ÐºÑ %(suffix)s" #: src/services/examples/echo_python/EchoService.py:24 msgid "EchoService (python) destructor called" msgstr "Вызван Python-деÑтруктор EchoService" #: src/services/examples/echo_python/EchoService.py:54 msgid "EchoService (python) thread test starting" msgstr "ЗапуÑк теÑта потоков Ñлужбы EchoService (python)" #: src/services/examples/echo_python/EchoService.py:65 #, python-format msgid "EchoService (python) thread test, iteration %(iteration)s %(status)s" msgstr "" "ЗапуÑк теÑта потоков Ñлужбы EchoService (python), Ð¸Ñ‚ÐµÑ€Ð°Ñ†Ð¸Ñ %(iteration)s " "%(status)s" #: src/services/examples/echo_python/EchoService.py:82 msgid "EchoService (python) 'Process' called" msgstr "Вызван 'Process' EchoService (Python)" #: src/services/examples/echo_python/EchoService.py:86 #, python-format msgid "inmsg.Auth().Export(arc.SecAttr.ARCAuth) = %s" msgstr "inmsg.Auth().Export(arc.SecAttr.ARCAuth) = %s" #: src/services/examples/echo_python/EchoService.py:87 #, python-format msgid "inmsg.Attributes().getAll() = %s " msgstr "inmsg.Attributes().getAll() = %s " #: src/services/examples/echo_python/EchoService.py:88 #, python-format msgid "EchoService (python) got: %s " msgstr "EchoService (python) получил: %s " #: src/services/examples/echo_python/EchoService.py:93 #, python-format msgid "EchoService (python) request_namespace: %s" msgstr "EchoService (python) request_namespace: %s" #: src/services/examples/echo_python/EchoService.py:99 #: src/services/examples/echo_python/EchoService.py:171 #, python-format msgid "outpayload %s" msgstr "outpayload %s" #: src/services/examples/echo_python/EchoService.py:128 msgid "Calling https://localhost:60000/Echo using ClientSOAP" msgstr "ВызываетÑÑ https://localhost:60000/Echo иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ ClientSOAP" #: src/services/examples/echo_python/EchoService.py:131 msgid "Calling http://localhost:60000/Echo using ClientSOAP" msgstr "ВызываетÑÑ http://localhost:60000/Echo иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ ClientSOAP" #: src/services/examples/echo_python/EchoService.py:137 #: src/services/examples/echo_python/EchoService.py:155 #, python-format msgid "new_payload %s" msgstr "new_payload %s" #: src/services/examples/echo_python/EchoService.py:149 msgid "Calling http://localhost:60000/Echo using httplib" msgstr "ВызываетÑÑ http://localhost:60000/Echo иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ httplib" #: src/services/examples/echo_python/EchoService.py:165 msgid "Start waiting 10 sec..." msgstr "Ждём 10 Ñекунд..." #: src/services/examples/echo_python/EchoService.py:167 msgid "Waiting ends." msgstr "Ожидание завершено." #: src/services/wrappers/python/pythonwrapper.cpp:103 #, c-format msgid "Loading %u-th Python service" msgstr "ЗагружаетÑÑ %u-Ñ Ñлужба Python" #: src/services/wrappers/python/pythonwrapper.cpp:107 #, c-format msgid "Initialized %u-th Python service" msgstr "Запущена %u-Ñ Ñлужба Python" #: src/services/wrappers/python/pythonwrapper.cpp:142 msgid "Invalid class name" msgstr "Ðеверное название клаÑÑа" #: src/services/wrappers/python/pythonwrapper.cpp:147 #, c-format msgid "class name: %s" msgstr "название клаÑÑа: %s" #: src/services/wrappers/python/pythonwrapper.cpp:148 #, c-format msgid "module name: %s" msgstr "название модулÑ: %s" #: src/services/wrappers/python/pythonwrapper.cpp:205 msgid "Cannot find ARC Config class" msgstr "Ðе удалоÑÑŒ обнаружить клаÑÑ ARC Config" #: src/services/wrappers/python/pythonwrapper.cpp:212 msgid "Config class is not an object" msgstr "КлаÑÑ Config не ÑвлÑетÑÑ Ð¾Ð±ÑŠÐµÐºÑ‚Ð¾Ð¼" #: src/services/wrappers/python/pythonwrapper.cpp:220 msgid "Cannot get dictionary of module" msgstr "Ошибка доÑтупа к Ñловарю модулÑ" #: src/services/wrappers/python/pythonwrapper.cpp:229 msgid "Cannot find service class" msgstr "Ðе удалоÑÑŒ найти клаÑÑ ÑервиÑа" #: src/services/wrappers/python/pythonwrapper.cpp:238 msgid "Cannot create config argument" msgstr "Ðе удалоÑÑŒ Ñоздать аргумент наÑтроек" #: src/services/wrappers/python/pythonwrapper.cpp:245 msgid "Cannot convert config to Python object" msgstr "Ðе удалоÑÑŒ преобразовать наÑтройки в объект Python" #: src/services/wrappers/python/pythonwrapper.cpp:268 #, c-format msgid "%s is not an object" msgstr "%s не ÑвлÑетÑÑ Ð¾Ð±ÑŠÐµÐºÑ‚Ð¾Ð¼" #: src/services/wrappers/python/pythonwrapper.cpp:274 msgid "Message class is not an object" msgstr "КлаÑÑ Message не ÑвлÑетÑÑ Ð¾Ð±ÑŠÐµÐºÑ‚Ð¾Ð¼" #: src/services/wrappers/python/pythonwrapper.cpp:280 msgid "Python Wrapper constructor succeeded" msgstr "КонÑтруктор надÑтройки Python отработал уÑпешно" #: src/services/wrappers/python/pythonwrapper.cpp:295 #, c-format msgid "Python Wrapper destructor (%d)" msgstr "ДеÑтруктор оболочки Python (%d)" #: src/services/wrappers/python/pythonwrapper.cpp:328 msgid "Python interpreter locked" msgstr "Интерпретатор Python заблокирован" #: src/services/wrappers/python/pythonwrapper.cpp:332 msgid "Python interpreter released" msgstr "Интерпретатор Python разблокирован" #: src/services/wrappers/python/pythonwrapper.cpp:403 msgid "Python wrapper process called" msgstr "Вызван процеÑÑ Python wrapper" #: src/services/wrappers/python/pythonwrapper.cpp:412 msgid "Failed to create input SOAP container" msgstr "Ðе удалоÑÑŒ Ñоздать входной контейнер SOAP" #: src/services/wrappers/python/pythonwrapper.cpp:422 msgid "Cannot create inmsg argument" msgstr "Ðе удалоÑÑŒ Ñоздать аргумент inmsg" #: src/services/wrappers/python/pythonwrapper.cpp:436 msgid "Cannot find ARC Message class" msgstr "Ðе удалоÑÑŒ обнаружить клаÑÑ ARC Message" #: src/services/wrappers/python/pythonwrapper.cpp:442 msgid "Cannot convert inmsg to Python object" msgstr "Ðе удалоÑÑŒ преобразовать inmsg в объект Python" #: src/services/wrappers/python/pythonwrapper.cpp:451 msgid "Failed to create SOAP containers" msgstr "Ðе удалоÑÑŒ Ñоздать контейеры SOAP" #: src/services/wrappers/python/pythonwrapper.cpp:457 msgid "Cannot create outmsg argument" msgstr "Ðе удалоÑÑŒ Ñоздать аргумент outmsg" #: src/services/wrappers/python/pythonwrapper.cpp:463 msgid "Cannot convert outmsg to Python object" msgstr "Ðе удалоÑÑŒ преобразовать outmsg в объект Python" #: src/tests/client/test_ClientInterface.cpp:36 #: src/tests/client/test_ClientSAML2SSO.cpp:68 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:78 #: src/tests/echo/test_clientinterface.cpp:41 #: src/tests/echo/test_clientinterface.cpp:132 #: src/tests/echo/test_clientinterface.py:12 msgid "Creating a soap client" msgstr "СоздаётÑÑ ÐºÐ»Ð¸ÐµÐ½Ñ‚ SOAP" #: src/tests/client/test_ClientInterface.cpp:73 #: src/tests/client/test_ClientSAML2SSO.cpp:47 #: src/tests/client/test_ClientSAML2SSO.cpp:71 #: src/tests/count/test_client.cpp:61 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:85 #: src/tests/echo/echo_test4axis2c/test_client.cpp:56 #: src/tests/echo/test.cpp:62 src/tests/echo/test_client.cpp:72 #: src/tests/echo/test_clientinterface.cpp:67 #: src/tests/echo/test_clientinterface.cpp:107 #: src/tests/echo/test_clientinterface.cpp:136 #: src/tests/echo/test_clientinterface.py:22 msgid "Creating and sending request" msgstr "Создание и заÑылка запроÑа" #: src/tests/client/test_ClientInterface.cpp:84 #: src/tests/delegation/test_client_with_delegation_sechandler.cpp:97 #: src/tests/echo/test_clientinterface.cpp:78 #: src/tests/echo/test_clientinterface.py:30 msgid "SOAP invocation failed" msgstr "Ðе удалаÑÑŒ Ð°ÐºÑ‚Ð¸Ð²Ð¸Ð·Ð°Ñ†Ð¸Ñ SOAP" #: src/tests/client/test_ClientSAML2SSO.cpp:44 #: src/tests/echo/test_clientinterface.cpp:100 msgid "Creating a http client" msgstr "СоздаётÑÑ ÐºÐ»Ð¸ÐµÐ½Ñ‚ HTTP" #: src/tests/client/test_ClientSAML2SSO.cpp:55 #: src/tests/echo/test_clientinterface.cpp:117 msgid "HTTP with SAML2SSO invocation failed" msgstr "ÐÐºÑ‚Ð¸Ð²Ð¸Ð·Ð°Ñ†Ð¸Ñ HTTP Ñ SAML2SSO не выполнена" #: src/tests/client/test_ClientSAML2SSO.cpp:59 #: src/tests/echo/test_clientinterface.cpp:121 msgid "There was no HTTP response" msgstr "Ðет ответа HTTP" #: src/tests/client/test_ClientSAML2SSO.cpp:77 #: src/tests/echo/test_clientinterface.cpp:145 msgid "SOAP with SAML2SSO invocation failed" msgstr "ÐÐºÑ‚Ð¸Ð²Ð¸Ð·Ð°Ñ†Ð¸Ñ SOAP Ñ SAML2SSO не выполнена" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:37 #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:38 #: src/tests/delegation/test_delegation_client.cpp:46 #: src/tests/delegation/test_delegation_client.cpp:77 #: src/tests/echo/test_clientinterface.cpp:172 #: src/tests/echo/test_clientinterface.cpp:194 msgid "Creating a delegation soap client" msgstr "Создание клиента SOAP Ð´Ð»Ñ Ð´ÐµÐ»ÐµÐ³Ð°Ñ†Ð¸Ð¸" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:46 #: src/tests/delegation/test_delegation_client.cpp:52 #: src/tests/echo/test_clientinterface.cpp:178 msgid "Delegation to ARC delegation service failed" msgstr "Сбой Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñлужбе Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ ARC" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:50 #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:49 #: src/tests/delegation/test_delegation_client.cpp:57 #: src/tests/delegation/test_delegation_client.cpp:89 #: src/tests/echo/test_clientinterface.cpp:182 #: src/tests/echo/test_clientinterface.cpp:205 #, c-format msgid "Delegation ID: %s" msgstr "ID делегированиÑ: %s" #: src/tests/client/test_ClientX509Delegation_ARC.cpp:58 #, c-format msgid "Delegated credential from delegation service: %s" msgstr "Делегированные параметры доÑтупа от Ñлужбы делегации: %s" #: src/tests/client/test_ClientX509Delegation_GridSite.cpp:45 #: src/tests/delegation/test_delegation_client.cpp:84 #: src/tests/echo/test_clientinterface.cpp:201 msgid "Delegation to gridsite delegation service failed" msgstr "Сбой Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñлужбе Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Gridsite" #: src/tests/count/count.cpp:58 msgid "Input is not SOAP" msgstr "Ввод не в формате SOAP" #: src/tests/count/count.cpp:89 src/tests/echo/echo.cpp:83 msgid "echo: Unauthorized" msgstr "echo: ДоÑтуп закрыт" #: src/tests/count/count.cpp:98 src/tests/count/count.cpp:104 #, c-format msgid "Request is not supported - %s" msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð½Ðµ поддерживаетÑÑ - %s" #: src/tests/count/test_client.cpp:50 #: src/tests/echo/echo_test4axis2c/test_client.cpp:43 #: src/tests/echo/test_client.cpp:59 msgid "Failed to load client configuration" msgstr "Ðе удалоÑÑŒ загрузить наÑтройки клиента" #: src/tests/count/test_client.cpp:54 #: src/tests/echo/echo_test4axis2c/test_client.cpp:47 #: src/tests/echo/test.cpp:58 src/tests/echo/test_client.cpp:63 msgid "Client side MCCs are loaded" msgstr "Подгружены клиентÑкие компоненты цепи Ñообщений" #: src/tests/count/test_client.cpp:57 #: src/tests/echo/echo_test4axis2c/test_client.cpp:50 #: src/tests/echo/test_client.cpp:66 msgid "Client chain does not have entry point" msgstr "ОтÑутÑтует точка входа в клиентÑкую цепь" #: src/tests/count/test_client.cpp:84 #: src/tests/echo/echo_test4axis2c/test_client.cpp:74 #: src/tests/echo/test.cpp:74 src/tests/echo/test_client.cpp:90 msgid "Request failed" msgstr "Ошибка при выполнении запроÑа" #: src/tests/count/test_client.cpp:90 #: src/tests/echo/echo_test4axis2c/test_client.cpp:80 #: src/tests/echo/test.cpp:79 src/tests/echo/test_client.cpp:96 msgid "There is no response" msgstr "Ðет ответа" #: src/tests/count/test_client.cpp:97 #: src/tests/echo/echo_test4axis2c/test_client.cpp:87 #: src/tests/echo/test_client.cpp:103 msgid "Response is not SOAP" msgstr "Ответ не в формате SOAP" #: src/tests/count/test_service.cpp:22 src/tests/echo/test.cpp:23 #: src/tests/echo/test_service.cpp:22 msgid "Creating service side chain" msgstr "Создание цепи на Ñтороне ÑервиÑа" #: src/tests/count/test_service.cpp:25 src/tests/echo/test.cpp:26 #: src/tests/echo/test_service.cpp:25 msgid "Failed to load service configuration" msgstr "Ðе удалоÑÑŒ загрузить наÑтройки ÑервиÑа" #: src/tests/count/test_service.cpp:30 src/tests/echo/test_service.cpp:30 msgid "Service is waiting for requests" msgstr "Ð¡ÐµÑ€Ð²Ð¸Ñ Ð² ожидании запроÑов" #: src/tests/echo/test.cpp:32 msgid "Creating client interface" msgstr "СоздаётÑÑ Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ ÐºÐ»Ð¸ÐµÐ½Ñ‚Ð°" #: src/tests/echo/test.cpp:82 msgid "Request succeed!!!" msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ ÑƒÐ´Ð°Ð»ÑÑ!!!" #~ msgid "--same and --not-same cannot be specified together." #~ msgstr "--same и --not-same не могут быть заданы одновременно." #~ msgid "" #~ "It is not possible to resubmit jobs without new target information " #~ "discovery" #~ msgstr "" #~ "Ðевозможно перезапуÑтить задачу, не выполнив Ñнова поиÑк информации о " #~ "реÑурÑах" #~ msgid "No jobs to resubmit with the specified status" #~ msgstr "Ðет задач Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÐ·Ð°Ð¿ÑƒÑка в указанном ÑоÑтоÑнии" #~ msgid " To recover missing jobs, run arcsync" #~ msgstr " Ð”Ð»Ñ Ð²Ð¾ÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð½ÐµÐ´Ð¾Ñтающих задач, запуÑтите arcsync" #, c-format #~ msgid "Cannot write jobids to file (%s)" #~ msgstr "Ðевозможно запиÑать Ñрлыки задач в файл (%s)" #, c-format #~ msgid "" #~ "Resubmission of job (%s) succeeded, but killing the job failed - it will " #~ "still appear in the job list" #~ msgstr "" #~ "УÑпешно завершена перезаÑылка задачи (%s), но прервать задачу не удалоÑÑŒ " #~ "- она будет приÑутÑтвовать в ÑпиÑке задач" #, c-format #~ msgid "" #~ "Resubmission of job (%s) succeeded, but cleaning the job failed - it will " #~ "still appear in the job list" #~ msgstr "" #~ "УÑпешно завершена перезаÑылка задачи (%s), но очиÑтить задачу не удалоÑÑŒ " #~ "- она будет приÑутÑтвовать в ÑпиÑке задач" #~ msgid " Use arcclean to remove non-existing jobs" #~ msgstr " ИÑпользуйте arcclean Ð´Ð»Ñ ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð½ÐµÑущеÑтвующих задач" #~ msgid "Job resubmission summary:" #~ msgstr "Сводка перезапуÑка задач:" #, c-format #~ msgid "%d of %d jobs were resubmitted" #~ msgstr "%d из %d задач были перезапущены" #, c-format #~ msgid "The following %d were not resubmitted" #~ msgstr "Следующие %d не были перезапущены" #, c-format #~ msgid "Unable to load broker %s" #~ msgstr "Ðевозможно подгрузить брокер %s" #~ msgid "Test aborted because no resource returned any information" #~ msgstr "" #~ "Обрыв заÑылки теÑта, Ñ‚.к. ни один из реÑурÑов не предоÑтавил информацию" #~ msgid "" #~ "ERROR: Test aborted because no suitable resources were found for the test-" #~ "job" #~ msgstr "ОШИБКÐ: Обрыв заÑылки теÑта, так как подходÑщих реÑурÑов не найдено" #~ msgid "" #~ "ERROR: Dumping job description aborted because no suitable resources were " #~ "found for the test-job" #~ msgstr "" #~ "ОШИБКÐ: Обрыв раÑпечатки опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸, так как подходÑщих реÑурÑов не " #~ "найдено" #, c-format #~ msgid "Submitting test-job %d:" #~ msgstr "ЗапуÑкаетÑÑ Ñ‚ÐµÑÑ‚Ð¾Ð²Ð°Ñ Ð·Ð°Ð´Ð°Ñ‡Ð° %d:" #, c-format #~ msgid "Client version: nordugrid-arc-%s" #~ msgstr "ВерÑÐ¸Ñ ÐºÐ»Ð¸ÐµÐ½Ñ‚Ð°: nordugrid-arc-%s" #, c-format #~ msgid "Removing endpoint %s: It has an unrequested interface (%s)." #~ msgstr "УдалÑетÑÑ Ñ‚Ð¾Ñ‡ÐºÐ° входа %s: она Ñодержит ненужный Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ (%s)." #~ msgid "" #~ "It is impossible to mix ARC6 target selection options with legacy " #~ "options. All legacy options will be ignored!" #~ msgstr "" #~ "Смешивать опции выбора реÑурÑа ARC6 Ñ ÑƒÑтаревшими опциÑми нельзÑ. Ð’Ñе " #~ "уÑтаревшие опции будут проигнорированы!" #~ msgid "Legacy options set for defining targets" #~ msgstr "УÑтаревшие варианты Ð¾Ð¿Ñ€ÐµÐ´ÐµÐ»ÐµÐ½Ð¸Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ð¹" #~ msgid "" #~ "select one or more computing elements: name can be an alias for a single " #~ "CE, a group of CEs or a URL" #~ msgstr "" #~ "указать один или более вычиÑлительных реÑурÑов: Ð¸Ð¼Ñ Ð¼Ð¾Ð¶ÐµÑ‚ быть " #~ "Ñокращением Ð´Ð»Ñ Ð¾Ð´Ð½Ð¾Ð³Ð¾ реÑурÑа, группы реÑурÑов, или URL" #~ msgid "name" #~ msgstr "имÑ" #~ msgid "" #~ "the computing element specified by URL at the command line should be " #~ "queried using this information interface.\n" #~ "\tAllowed values are: org.nordugrid.ldapng, org.nordugrid.ldapglue2 and " #~ "org.ogf.glue.emies.resourceinfo" #~ msgstr "" #~ "вычиÑлительный реÑурÑ, заданный URL в командной Ñтроке, должен быть " #~ "опрошен иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ñ‹Ð¹ информационный интерфейÑ.\n" #~ "\tДопуÑтимые значениÑ: org.nordugrid.ldapng, org.nordugrid.ldapglue2 и " #~ "org.ogf.glue.emies.resourceinfo" #~ msgid "interfacename" #~ msgstr "interfacename" #~ msgid "" #~ "selecting a computing element for the new jobs with a URL or an alias, or " #~ "selecting a group of computing elements with the name of the group" #~ msgstr "" #~ "выбор вычиÑлительного реÑурÑа Ð´Ð»Ñ Ð½Ð¾Ð²Ñ‹Ñ… задач Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ URL или " #~ "ÑокращениÑ, или выбор группы Ñлементов Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ Ð½Ð°Ð·Ð²Ð°Ð½Ð¸Ñ Ð³Ñ€ÑƒÐ¿Ð¿Ñ‹" #~ msgid "force migration, ignore kill failure" #~ msgstr "Ð¿Ñ€Ð¸Ð½ÑƒÐ´Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ð¼Ð¸Ð³Ñ€Ð°Ñ†Ð¸Ñ, игнорируетÑÑ Ñбой прерываниÑ" #~ msgid "print a list of services configured in the client.conf" #~ msgstr "вывеÑти ÑпиÑок Ñлужб, наÑтроенных в client.conf" #~ msgid "resubmit to the same resource" #~ msgstr "заÑлать заново на тот же реÑурÑ" #~ msgid "do not resubmit to the same resource" #~ msgstr "не перезаÑылать на тот же реÑурÑ" #~ msgid "" #~ "select one or more registries: name can be an alias for a single " #~ "registry, a group of registries or a URL" #~ msgstr "" #~ "выбрать один или неÑколько рееÑтров: Ð¸Ð¼Ñ Ð¼Ð¾Ð¶ÐµÑ‚ быть Ñокращением Ð´Ð»Ñ " #~ "одного рееÑтра, группы рееÑтров, или URL" #~ msgid "" #~ "only use this interface for submitting.\n" #~ "\tAllowed values are: org.nordugrid.gridftpjob or org.nordugrid.gridftp, " #~ "org.ogf.glue.emies.activitycreation and org.nordugrid.internal" #~ msgstr "" #~ "иÑпользовать только указанный Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ Ð´Ð»Ñ Ð·Ð°Ñылки.\n" #~ "\tДопуÑтимые значениÑ: org.nordugrid.gridftpjob или org.nordugrid." #~ "gridftp, org.ogf.glue.emies.activitycreation и org.nordugrid.internal" #~ msgid "InterfaceName" #~ msgstr "InterfaceName" #~ msgid "submit directly - no resource discovery or matchmaking" #~ msgstr "" #~ "запуÑтить напрÑмую, без Ð¾Ð±Ð½Ð°Ñ€ÑƒÐ¶ÐµÐ½Ð¸Ñ Ð¸ проверки ÑоответÑÑ‚Ð²Ð¸Ñ Ñ€ÐµÑурÑов" #, c-format #~ msgid "Unable to copy %s" #~ msgstr "Ðе удалоÑÑŒ Ñкопировать %s" #~ msgid "" #~ "if the destination is an indexing service and not the same as the source " #~ "and the destination is already registered, then the copy is normally not " #~ "done. However, if this option is specified the source is assumed to be a " #~ "replica of the destination created in an uncontrolled way and the copy is " #~ "done like in case of replication. Using this option also skips validation " #~ "of completed transfers." #~ msgstr "" #~ "еÑли назначением задан индекÑирующий ÑервиÑ, отличный от иÑточника, и Ñто " #~ "назначение уже зарегиÑтрировано, копирование обычно не допуÑкаетÑÑ. Ð’ " #~ "Ñлучае же, когда указана Ñта опциÑ, иÑточник раÑÑматриваетÑÑ ÐºÐ°Ðº " #~ "Ð½ÐµÐ¾Ñ„Ð¸Ñ†Ð¸Ð°Ð»ÑŒÐ½Ð°Ñ ÐºÐ¾Ð¿Ð¸Ñ Ð·Ð°Ñ€ÐµÐ³Ð¸Ñтрированного файла, и копирование производитÑÑ " #~ "как в Ñлучае тиражированиÑ. При иÑпользовании Ñтой опции пропуÑкаетÑÑ " #~ "Ñверка завершённых передач." #, c-format #~ msgid "Unable to list content of %s" #~ msgstr "Ðе удалоÑÑŒ проÑмотреть Ñодержимое %s" #, c-format #~ msgid "Unable to create directory %s" #~ msgstr "Ðе удалоÑÑŒ Ñоздать каталог %s" #, c-format #~ msgid "Unable to remove file %s" #~ msgstr "Ðе удалоÑÑŒ удалить файл %s" #~ msgid "Creating an EMI ES client" #~ msgstr "СоздаётÑÑ ÐºÐ»Ð¸ÐµÐ½Ñ‚ EMI ES" #~ msgid "Unable to create SOAP client used by EMIESClient." #~ msgstr "Ðе удалоÑÑŒ Ñоздать клиент SOAP иÑпользующийÑÑ EMIESClient." #~ msgid "Re-creating an EMI ES client" #~ msgstr "ВоÑÑоздаётÑÑ ÐºÐ»Ð¸ÐµÐ½Ñ‚ EMI ES" #, c-format #~ msgid "Processing a %s request" #~ msgstr "Обработка запроÑа %s" #, c-format #~ msgid "%s request failed" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ %s не выполнен" #, c-format #~ msgid "No response from %s" #~ msgstr "Ðет ответа от %s" #, c-format #~ msgid "%s request to %s failed with response: %s" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ %s к %s не выполнен, получен ответ: %s" #, c-format #~ msgid "XML response: %s" #~ msgstr "Отзыв XML: %s" #, c-format #~ msgid "%s request to %s failed. Unexpected response: %s." #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ %s к %s не выполнен, неожиданный ответ: %s." #, c-format #~ msgid "Creating and sending job submit request to %s" #~ msgstr "Создание и отправка запроÑа об иÑполнении задачи на %s" #, c-format #~ msgid "Job description to be sent: %s" #~ msgstr "ОпиÑание заÑылаемой задачи: %s" #, c-format #~ msgid "New limit for vector queries returned by EMI ES service: %d" #~ msgstr "" #~ "Ð¡ÐµÑ€Ð²Ð¸Ñ EMI ES уÑтановил новые Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð¿Ñ€ÐµÐ´ÐµÐ»Ð¾Ð² Ð´Ð»Ñ Ð¿Ð°Ñ€Ð°Ð»Ð»ÐµÐ»ÑŒÐ½Ñ‹Ñ… " #~ "запроÑов: %d" #, c-format #~ msgid "" #~ "Error: Service returned a limit higher or equal to current limit " #~ "(current: %d; returned: %d)" #~ msgstr "" #~ "Ошибка: Ð¡ÐµÑ€Ð²Ð¸Ñ Ñ‚Ñ€ÐµÐ±ÑƒÐµÑ‚ предел, превышающий или равный текущему (текущий: " #~ "%d; требуемый: %d)" #, c-format #~ msgid "Creating and sending job information query request to %s" #~ msgstr "Создание и отправка запроÑа о ÑоÑтоÑнии задачи на %s" #, c-format #~ msgid "Creating and sending service information request to %s" #~ msgstr "Создание и отправка запроÑа информации о Ñлужбе на %s" #, c-format #~ msgid "Creating and sending service information query request to %s" #~ msgstr "Создание и отправка запроÑа о ÑоÑтоÑнии Ñлужбы на %s" #, c-format #~ msgid "Creating and sending job clean request to %s" #~ msgstr "" #~ "Создание и отправка запроÑа об удалении результатов работы задачи на %s" #, c-format #~ msgid "Creating and sending job suspend request to %s" #~ msgstr "Создание и отправка запроÑа о приоÑтановке задачи на %s" #, c-format #~ msgid "Creating and sending job resume request to %s" #~ msgstr "Создание и отправка запроÑа о возобновлении задачи на %s" #, c-format #~ msgid "Creating and sending job restart request to %s" #~ msgstr "Создание и отправка запроÑа о перезапуÑке задачи на %s" #, c-format #~ msgid "Creating and sending job notify request to %s" #~ msgstr "Создание и отправка запроÑа об уведомлении о задаче на %s" #, c-format #~ msgid "Creating and sending notify request to %s" #~ msgstr "Создание и отправка запроÑа об уведомлении на %s" #, c-format #~ msgid "Creating and sending job list request to %s" #~ msgstr "Создание и отправка запроÑа о проÑмотре задачи на %s" #, c-format #~ msgid "Job %s failed to renew delegation %s - %s." #~ msgstr "Задача %s не Ñмогла обновить делегирование %s - %s." #~ msgid "Retrieving job description of EMI ES jobs is not supported" #~ msgstr "Получение опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡ EMI ES не поддерживаетÑÑ" #, c-format #~ msgid "Listing jobs succeeded, %d jobs found" #~ msgstr "Задачи уÑпешно перечиÑлены, обнаружено %d задач(и)" #~ msgid "Unable to submit job. Job description is not valid XML" #~ msgstr "" #~ "Ðевозможно заÑлать задачу. ОпиÑание задачи не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым файлом " #~ "XML" #~ msgid "No valid job identifier returned by EMI ES" #~ msgstr "EMI ES не возвратил дейÑтвительных Ñрлыков задач" #~ msgid "Job failed on service side" #~ msgstr "Задача дала Ñбой на Ñервере" #~ msgid "Failed to obtain state of job" #~ msgstr "Сбой Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ ÑоÑтоÑние задачи" #~ msgid "Failed to wait for job to allow stage in" #~ msgstr "Сбой Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ñ€Ð°Ð·Ñ€ÐµÑˆÐµÐ½Ð¸Ñ Ð¾Ñ‚ задачи на размещение входных файлов" #~ msgid "Failed to obtain valid stagein URL for input files" #~ msgstr "Сбой Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð´Ð¾Ð¿ÑƒÑтимых URL Ð´Ð»Ñ Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ð²Ñ…Ð¾Ð´Ð½Ñ‹Ñ… файлов" #, c-format #~ msgid "Failed uploading local input files to %s" #~ msgstr "Сбой выгрузки локальных входных файлов в %s" #, c-format #~ msgid "Failed to submit job description: EMIESFault(%s , %s)" #~ msgstr "Сбой заÑылки опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸: EMIESFault(%s , %s)" #, c-format #~ msgid "Failed to submit job description: UnexpectedError(%s)" #~ msgstr "Сбой заÑылки опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸: UnexpectedError(%s)" #~ msgid "Failed to notify service" #~ msgstr "Сбой ÑƒÐ²ÐµÐ´Ð¾Ð¼Ð»ÐµÐ½Ð¸Ñ Ñлужбы" #~ msgid "Failed preparing job description to target resources" #~ msgstr "Ðе удалоÑÑŒ адаптировать опиÑание задачи Ð´Ð»Ñ Ð·Ð°Ñылки по назначению" #, c-format #~ msgid "Failed to submit job description: %s" #~ msgstr "Сбой заÑылки опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸: %s" #~ msgid "Collecting EMI-ES GLUE2 computing info endpoint information." #~ msgstr "СобираетÑÑ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ GLUE2 Ð´Ð»Ñ Ñ‚Ð¾Ñ‡ÐºÐ¸ входа EMI-ES." #~ msgid "Generating EMIES targets" #~ msgstr "СоздаютÑÑ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ EMIES" #, c-format #~ msgid "Generated EMIES target: %s" #~ msgstr "Созданы Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ EMIES: %s" #, c-format #~ msgid "Query returned unexpected element: %s:%s" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð²Ð¾Ð·Ð²Ñ€Ð°Ñ‚Ð¸Ð» неожиданный Ñлемент: %s:%s" #, c-format #~ msgid "Element validation according to GLUE2 schema failed: %s" #~ msgstr "Проверка ÑоответÑÑ‚Ð²Ð¸Ñ Ñлемента Ñхеме GLUE2 не прошла: %s" #~ msgid "Resource query failed" #~ msgstr "Сбой опроÑа реÑурÑа" #~ msgid "Submission failed" #~ msgstr "Сбой заÑылки задачи" #~ msgid "Obtaining status failed" #~ msgstr "Сбой Ð¾Ð¿Ñ€ÐµÐ´ÐµÐ»ÐµÐ½Ð¸Ñ ÑоÑтоÑниÑ" #~ msgid "Obtaining information failed" #~ msgstr "Сбой Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸" #~ msgid "Notify failed" #~ msgstr "Сбой уведомлениÑ" #~ msgid "Kill failed" #~ msgstr "Сбой Ð¿Ñ€ÐµÑ€Ñ‹Ð²Ð°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" #~ msgid "List failed" #~ msgstr "Сбой перечиÑÐ»ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡" #, c-format #~ msgid "Fetching resource description from %s" #~ msgstr "Получение опиÑÐ°Ð½Ð¸Ñ Ñ€ÐµÑурÑа Ñ %s" #, c-format #~ msgid "Failed to obtain resource description: %s" #~ msgstr "Сбой Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð¾Ð¿Ð¸ÑÐ°Ð½Ð¸Ñ Ñ€ÐµÑурÑа: %s" #, c-format #~ msgid "Resource description contains unexpected element: %s:%s" #~ msgstr "Схема опиÑÐ°Ð½Ð¸Ñ Ñ€ÐµÑурÑа Ñодержит недейÑтвительный Ñлемент: %s:%s" #~ msgid "Resource description validation according to GLUE2 schema failed: " #~ msgstr "Проверка ÑоответÑÑ‚Ð²Ð¸Ñ Ð¾Ð¿Ð¸ÑÐ°Ð½Ð¸Ñ Ñ€ÐµÑурÑа Ñхеме GLUE2 не прошла: " #~ msgid "Resource description is empty" #~ msgstr "ОпиÑание реÑурÑа пуÑто" #, c-format #~ msgid "Resource description provides URL for interface %s: %s" #~ msgstr "ОпиÑание реÑурÑа Ñодержит URL интерфейÑа %s: %s" #~ msgid "Resource description provides no URLs for interfaces" #~ msgstr "ОпиÑание реÑурÑа не Ñодержит URL интерфейÑов" #~ msgid "Resource description validation passed" #~ msgstr "Прошла проверка опиÑÐ°Ð½Ð¸Ñ Ñ€ÐµÑурÑа" #, c-format #~ msgid "Requesting ComputingService elements of resource description at %s" #~ msgstr "ЗапрашиваютÑÑ Ñлементы опиÑÐ°Ð½Ð¸Ñ Ñ€ÐµÑурÑа ComputingService Ñ %s" #~ msgid "Performing /Services/ComputingService query" #~ msgstr "ВыполнÑетÑÑ Ð·Ð°Ð¿Ñ€Ð¾Ñ /Services/ComputingService" #~ msgid "Query returned no elements." #~ msgstr "Результат запроÑа не Ñодержит Ñлементов." #~ msgid "Performing /ComputingService query" #~ msgstr "ВыполнÑетÑÑ Ð·Ð°Ð¿Ñ€Ð¾Ñ /ComputingService" #~ msgid "Performing /* query" #~ msgstr "ВыполнÑетÑÑ Ð·Ð°Ð¿Ñ€Ð¾Ñ /*" #~ msgid "All queries failed" #~ msgstr "Сбой вÑех запроÑов" #, c-format #~ msgid "" #~ "Number of ComputingService elements obtained from full document and XPath " #~ "query do not match: %d != %d" #~ msgstr "" #~ "КоличеÑтво Ñлементов ComputingService полученных из полного документа и " #~ "из запроÑа XPath не Ñовпадают: %d != %d" #~ msgid "Resource description query validation passed" #~ msgstr "Проверка ÑоответÑÑ‚Ð²Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа опиÑÐ°Ð½Ð¸Ñ Ñ€ÐµÑурÑа прошла" #, c-format #~ msgid "Unsupported command: %s" #~ msgstr "ÐÐµÐ¿Ð¾Ð´Ð´ÐµÑ€Ð¶Ð¸Ð²Ð°ÐµÐ¼Ð°Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ð°: %s" #, c-format #~ msgid "Connect: Failed to init handle: %s" #~ msgstr "Соединение: Ðе удалоÑÑŒ инициализировать ÑÑылку: %s" #, c-format #~ msgid "Failed to enable IPv6: %s" #~ msgstr "Сбой Ð²ÐºÐ»ÑŽÑ‡ÐµÐ½Ð¸Ñ IPv6: %s" #, c-format #~ msgid "Connect: Failed to connect: %s" #~ msgstr "Соединение: Сбой ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ: %s" #, c-format #~ msgid "Connect: Connecting timed out after %d ms" #~ msgstr "Соединение: Ð’Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ð¸Ñтекло поÑле %d мÑ" #, c-format #~ msgid "Connect: Failed to init auth info handle: %s" #~ msgstr "" #~ "Соединение: Сбой инициализации идентификатора информации проверки " #~ "подлинноÑти: %s" #, c-format #~ msgid "Connect: Failed authentication: %s" #~ msgstr "Соединение: Ошибка проверки подлинноÑти: %s" #, c-format #~ msgid "Connect: Authentication timed out after %d ms" #~ msgstr "Соединение: Ð’Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ¸ подлинноÑти иÑтекло поÑле %d мÑ" #, c-format #~ msgid "SendCommand: Command: %s" #~ msgstr "SendCommand: Команда: %s" #, c-format #~ msgid "SendCommand: Failed: %s" #~ msgstr "Отправка команды: Сбой: %s" #, c-format #~ msgid "SendCommand: Timed out after %d ms" #~ msgstr "Отправка команды: Ð’Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¸Ñтекло поÑле %d мÑ" #, c-format #~ msgid "SendCommand: Response: %s" #~ msgstr "SendCommand: Отзыв: %s" #~ msgid "FTP Job Control: Failed sending EPSV and PASV commands" #~ msgstr "FTP Job Control: Сбой отÑылки команд EPSV и PASV" #, c-format #~ msgid "FTP Job Control: Server PASV response parsing failed: %s" #~ msgstr "FTP Job Control: Сбой разбора отзыва Ñервера PASV: %s" #, c-format #~ msgid "FTP Job Control: Server EPSV response parsing failed: %s" #~ msgstr "FTP Job Control: Сбой разбора отзыва Ñервера EPSV: %s" #, c-format #~ msgid "FTP Job Control: Server EPSV response port parsing failed: %s" #~ msgstr "FTP Job Control: Сбой разбора порта отзыва Ñервера EPSV: %s" #, c-format #~ msgid "" #~ "FTP Job Control: Failed to apply local address to data connection: %s" #~ msgstr "" #~ "FTP Job Control: Ðе удалоÑÑŒ применить локальный Ð°Ð´Ñ€ÐµÑ Ðº каналу передачи " #~ "данных: %s" #, c-format #~ msgid "" #~ "FTP Job Control: Can't parse host and/or port in response to EPSV/PASV: %s" #~ msgstr "" #~ "FTP Job Control: Ðе удалоÑÑŒ извлечь Ð°Ð´Ñ€ÐµÑ ÑƒÐ·Ð»Ð° и/или номер порта из " #~ "ответа на Ð·Ð°Ð¿Ñ€Ð¾Ñ EPSV/PASV: %s" #, c-format #~ msgid "FTP Job Control: Data channel: %d.%d.%d.%d:%d" #~ msgstr "FTP Job Control: Канал передачи данных: %d.%d.%d.%d:%d" #, c-format #~ msgid "FTP Job Control: Data channel: [%s]:%d" #~ msgstr "FTP Job Control: Канал передачи данных: [%s]:%d" #, c-format #~ msgid "FTP Job Control: Local port failed: %s" #~ msgstr "FTP Job Control: Сбой локального порта: %s" #~ msgid "FTP Job Control: Failed sending DCAU command" #~ msgstr "FTP Job Control: Сбой отправки команды DCAU" #~ msgid "FTP Job Control: Failed sending TYPE command" #~ msgstr "FTP Job Control: Сбой отправки команды TYPE" #, c-format #~ msgid "FTP Job Control: Local type failed: %s" #~ msgstr "FTP Job Control: Сбой локального типа: %s" #, c-format #~ msgid "FTP Job Control: Failed sending STOR command: %s" #~ msgstr "FTP Job Control: Сбой отправки команды STOR: %s" #, c-format #~ msgid "FTP Job Control: Data connect write failed: %s" #~ msgstr "FTP Job Control: Сбой ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ð¸ запиÑи данных: %s" #, c-format #~ msgid "FTP Job Control: Data connect write timed out after %d ms" #~ msgstr "" #~ "FTP Job Control: Ð’Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ ÐºÐ¾Ð½Ñ‚Ð°ÐºÑ‚Ð° и запиÑи данных иÑтекло поÑле %d " #~ "мÑ" #, c-format #~ msgid "FTP Job Control: Data write failed: %s" #~ msgstr "FTP Job Control: Сбой запиÑи данных: %s" #, c-format #~ msgid "FTP Job Control: Data write timed out after %d ms" #~ msgstr "FTP Job Control: Ð’Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð·Ð°Ð¿Ð¸Ñи данных иÑтекло поÑле %d мÑ" #, c-format #~ msgid "Disconnect: Failed aborting - ignoring: %s" #~ msgstr "Отключение: Сбой Ð¿Ñ€ÐµÑ€Ñ‹Ð²Ð°Ð½Ð¸Ñ - игнорируетÑÑ: %s" #, c-format #~ msgid "Disconnect: Data close timed out after %d ms" #~ msgstr "Отключение: Ð’Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð·Ð°ÐºÑ€Ñ‹Ñ‚Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ… иÑтекло поÑле %d мÑ" #, c-format #~ msgid "Disconnect: Abort timed out after %d ms" #~ msgstr "Отключение: Ð’Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¿Ñ€ÐµÑ€Ñ‹Ð²Ð°Ð½Ð¸Ñ Ð¸Ñтекло поÑле %d мÑ" #, c-format #~ msgid "Disconnect: Failed quitting - ignoring: %s" #~ msgstr "Отключение: Сбой выхода - игнорируетÑÑ: %s" #, c-format #~ msgid "Disconnect: Quitting timed out after %d ms" #~ msgstr "Отключение: Ð’Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð²Ñ‹Ñ…Ð¾Ð´Ð° иÑтекло поÑле %d мÑ" #, c-format #~ msgid "Disconnect: Failed closing - ignoring: %s" #~ msgstr "Отключение: Сбой Ð¾Ñ‚ÐºÐ»ÑŽÑ‡ÐµÐ½Ð¸Ñ - игнорируетÑÑ: %s" #, c-format #~ msgid "Disconnect: Closing timed out after %d ms" #~ msgstr "Отключение: Ð’Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¾Ñ‚ÐºÐ»ÑŽÑ‡ÐµÐ½Ð¸Ñ Ð¸Ñтекло поÑле %d мÑ" #~ msgid "Disconnect: waiting for globus handle to settle" #~ msgstr "Отключение: ждём пока ÑÑылка globus уÑтаканитÑÑ" #~ msgid "Disconnect: globus handle is stuck." #~ msgstr "Отключение: ÑÑылка globus заÑтрÑла." #, c-format #~ msgid "" #~ "Disconnect: Failed destroying handle: %s. Can't handle such situation." #~ msgstr "" #~ "Отключение: Сбой ÑƒÐ½Ð¸Ñ‡Ñ‚Ð¾Ð¶ÐµÐ½Ð¸Ñ ÑÑылки: %s. Ðевозможно ÑправитьÑÑ Ñ Ñ‚Ð°ÐºÐ¸Ð¼ " #~ "положением." #~ msgid "Disconnect: handle destroyed." #~ msgstr "Отключение: ÑÑылка уничтожена." #~ msgid "" #~ "Missing reference to factory and/or module. It is unsafe to use Globus in " #~ "non-persistent mode - SubmitterPlugin for GRIDFTPJOB is disabled. Report " #~ "to developers." #~ msgstr "" #~ "ОтÑутÑтвует указание на фабрику и/или модуль. ИÑпользование Globus во " #~ "временном режиме небезопаÑно - SubmitterPlugin Ð´Ð»Ñ GRIDFTPJOB отключён. " #~ "Сообщите разработчикам." #, c-format #~ msgid "Unable to query job information (%s), invalid URL provided (%s)" #~ msgstr "" #~ "Ðевозможно опроÑить информацию о задаче (%s), задан недопуÑтимый URL (%s)" #, c-format #~ msgid "Jobs left to query: %d" #~ msgstr "Ðеопрошенных задач: %d" #, c-format #~ msgid "Querying batch with %d jobs" #~ msgstr "ОпрашиваетÑÑ ÑпиÑок из %d задач(и)" #~ msgid "" #~ "Can't create information handle - is the ARC LDAP DMC plugin available?" #~ msgstr "" #~ "Ðе удалоÑÑŒ Ñоздать ÑÑылку Ð´Ð»Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸ - проверьте, доÑтупен ли " #~ "подгружаемый модуль ARC LDAP DMC." #, c-format #~ msgid "Job information not found in the information system: %s" #~ msgstr "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ задаче в информационной ÑиÑтеме не обнаружена: %s" #~ msgid "" #~ "This job was very recently submitted and might not yet have reached the " #~ "information system" #~ msgstr "" #~ "Эта задача была запущена лишь недавно, и может быть ещё не " #~ "зарегиÑтрирована в ÑиÑтеме" #, c-format #~ msgid "Cleaning job: %s" #~ msgstr "УдалÑетÑÑ Ð·Ð°Ð´Ð°Ñ‡Ð°: %s" #~ msgid "Failed to connect for job cleaning" #~ msgstr "Ðе удалоÑÑŒ ÑоединитьÑÑ Ð´Ð»Ñ Ð¾Ñ‡Ð¸Ñтки задачи" #~ msgid "Failed sending CWD command for job cleaning" #~ msgstr "Ðе удалоÑÑŒ отправить инÑтрукцию CWD Ð´Ð»Ñ Ð¾Ñ‡Ð¸Ñтки задачи" #~ msgid "Failed sending RMD command for job cleaning" #~ msgstr "Ðе удалоÑÑŒ отправить инÑтрукцию RMD Ð´Ð»Ñ Ð¾Ñ‡Ð¸Ñтки задачи" #~ msgid "Failed to disconnect after job cleaning" #~ msgstr "Ðе удалоÑÑŒ отÑоединитьÑÑ Ð¿Ð¾Ñле очиÑтки задачи" #~ msgid "Job cleaning successful" #~ msgstr "Задача уÑпешно удалена" #, c-format #~ msgid "Cancelling job: %s" #~ msgstr "Прерывание задачи: %s" #~ msgid "Failed to connect for job cancelling" #~ msgstr "Сбой ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¿Ñ€ÐµÑ€Ñ‹Ð²Ð°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" #~ msgid "Failed sending CWD command for job cancelling" #~ msgstr "Сбой отправки инÑтрукции CWD Ð´Ð»Ñ Ð¿Ñ€ÐµÑ€Ñ‹Ð²Ð°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" #~ msgid "Failed sending DELE command for job cancelling" #~ msgstr "Сбой отправки инÑтрукции DELE Ð´Ð»Ñ Ð¿Ñ€ÐµÑ€Ñ‹Ð²Ð°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" #~ msgid "Failed to disconnect after job cancelling" #~ msgstr "Сбой отÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ð¿Ð¾Ñле Ð¿Ñ€ÐµÑ€Ñ‹Ð²Ð°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" #~ msgid "Job cancelling successful" #~ msgstr "Задача уÑпешно оборвана" #, c-format #~ msgid "Renewing credentials for job: %s" #~ msgstr "Обновление параметров доÑтупа Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸: %s" #~ msgid "Failed to connect for credential renewal" #~ msgstr "Сбой уÑÑ‚Ð°Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ ÑвÑзи Ð´Ð»Ñ Ð¾Ð±Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð¾Ð² доÑтупа" #~ msgid "Failed sending CWD command for credentials renewal" #~ msgstr "Сбой отправки инÑтрукции CWD Ð´Ð»Ñ Ð¾Ð±Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð¾Ð² доÑтупа" #~ msgid "Failed to disconnect after credentials renewal" #~ msgstr "Сбой отÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ð¿Ð¾Ñле Ð¾Ð±Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð¾Ð² доÑтупа" #~ msgid "Renewal of credentials was successful" #~ msgstr "Параметры доÑтупа уÑпешно обновлены" #, c-format #~ msgid "Illegal jobID specified (%s)" #~ msgstr "Задан недопуÑтимый Ñрлык задачи (%s)" #, c-format #~ msgid "HER: %s" #~ msgstr "HER: %s" #, c-format #~ msgid "Could not create temporary file: %s" #~ msgstr "Ðе удалоÑÑŒ Ñоздать временный файл: %s" #, c-format #~ msgid "Trying to retrieve job description of %s from computing resource" #~ msgstr "Попытка Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð¾Ð¿Ð¸ÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s Ñ Ð²Ñ‹Ñ‡Ð¸Ñлительного реÑурÑа" #, c-format #~ msgid "invalid jobID: %s" #~ msgstr "ÐедейÑтвительный Ñрлык задачи: %s" #~ msgid "clientxrsl found" #~ msgstr "найден оригинал опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ в формате XRSL" #~ msgid "could not find end of clientxrsl" #~ msgstr "невозможно найти конец опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ в формате XRSL" #, c-format #~ msgid "Job description: %s" #~ msgstr "ОпиÑание задачи: %s" #~ msgid "clientxrsl not found" #~ msgstr "оригинал опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ в формате XRSL не найден" #, c-format #~ msgid "Invalid JobDescription: %s" #~ msgstr "Ðеверный Ñлемент JobDescription: %s" #~ msgid "Valid JobDescription found" #~ msgstr "Обнаружено дейÑтвительное опиÑание JobDescription" #~ msgid "Submit: Failed to connect" #~ msgstr "ЗаÑылка: Сбой ÑвÑзи" #~ msgid "Submit: Failed sending CWD command" #~ msgstr "ЗаÑылка: Сбой отправки команды CWD" #~ msgid "Submit: Failed sending CWD new command" #~ msgstr "ЗаÑылка: Сбой отправки команды CWD new" #~ msgid "Failed to prepare job description." #~ msgstr "Сбой подготовки опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸." #~ msgid "Submit: Failed sending job description" #~ msgstr "ЗаÑылка: Сбой отправки опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" #~ msgid "Submit: Failed uploading local input files" #~ msgstr "ЗаÑылка: Сбой выгрузки локальных входных файлов" #~ msgid "" #~ "Submit: service has no suitable information interface - need org." #~ "nordugrid.ldapng" #~ msgstr "" #~ "ЗаÑылка: ÑÐµÑ€Ð²Ð¸Ñ Ð½Ðµ предоÑтавлÑет подходÑщего информационного интерфейÑа - " #~ "нужен org.nordugrid.ldapng" #~ msgid "Failed to prepare job description to target resources." #~ msgstr "Сбой подготовки опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ Ð´Ð»Ñ Ð·Ð°Ñылки по назначению." #, c-format #~ msgid "Extractor[%s] (%s): %s = %s" #~ msgstr "Extractor[%s] (%s): %s = %s" #, c-format #~ msgid "Extractor[%s] (%s): %s contains %s" #~ msgstr "Extractor[%s] (%s): %s Ñодержит %s" #, c-format #~ msgid "Adding endpoint '%s' with interface name %s" #~ msgstr "ДобавлÑетÑÑ Ñ‚Ð¾Ñ‡ÐºÐ° доÑтупа '%s' Ñ Ð½Ð°Ð·Ð²Ð°Ð½Ð¸ÐµÐ¼ интерфейÑа %s" #~ msgid "" #~ "Can't create information handle - is the ARC ldap DMC plugin available?" #~ msgstr "" #~ "Ðе удалоÑÑŒ Ñоздать ÑÑылку Ð´Ð»Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸ - проверьте, доÑтупен ли " #~ "подгружаемый модуль ARC LDAP DMC" #, c-format #~ msgid "Unknown entry in EGIIS (%s)" #~ msgstr "ÐеизвеÑÑ‚Ð½Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ в EGIIS (%s)" #~ msgid "" #~ "Entry in EGIIS is missing one or more of the attributes 'Mds-Service-" #~ "type', 'Mds-Service-hn', 'Mds-Service-port' and/or 'Mds-Service-Ldap-" #~ "suffix'" #~ msgstr "" #~ "ЗапиÑÑŒ в EGIIS не Ñодержит одного или неÑкольких атрибутов 'Mds-Service-" #~ "type', 'Mds-Service-hn', 'Mds-Service-port' и/или 'Mds-Service-Ldap-" #~ "suffix'" #~ msgid "" #~ "The \"FreeSlotsWithDuration\" attribute is wrongly formatted. Ignoring it." #~ msgstr "" #~ "Ðтрибут \"FreeSlotsWithDuration\" неверно Ñформатирован; игнорируетÑÑ." #, c-format #~ msgid "Unable to parse the %s.%s value from execution service (%s)." #~ msgstr "" #~ "Ðевозможно разобрать %s.Получено значение %s от Ñлужбы иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ (%s)." #, c-format #~ msgid "Value of %s.%s is \"%s\"" #~ msgstr "Значение %s.%s: \"%s\"" #, c-format #~ msgid "Found none or multiple URLs (%s) in ACIX URL: %s" #~ msgstr "Ð’ ACIX URL обнаружено ни одного или неÑколько URL (%s): %s" #, c-format #~ msgid "Cannot handle URL %s" #~ msgstr "Ðевозможно обработать URL %s" #, c-format #~ msgid "Could not resolve original source of %s: out of time" #~ msgstr "Ðе удалоÑÑŒ определить иÑходный иÑточник %s: Ð²Ñ€ÐµÐ¼Ñ Ð¸Ñтекло" #, c-format #~ msgid "Could not resolve original source of %s: %s" #~ msgstr "Ðе удалоÑÑŒ определить иÑходный иÑточник %s: %s" #, c-format #~ msgid "Querying ACIX server at %s" #~ msgstr "ОпрашиваетÑÑ Ñервер ACIX на %s" #, c-format #~ msgid "Calling acix with query %s" #~ msgstr "Вызов ACIX Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñом %s" #, c-format #~ msgid "Failed to query ACIX: %s" #~ msgstr "Сбой запроÑа к ACIX: %s" #, c-format #~ msgid "Failed to parse ACIX response: %s" #~ msgstr "Сбой разборки отзыва ACIX: %s" #, c-format #~ msgid "ACIX returned %s" #~ msgstr "ACIX ответил %s" #, c-format #~ msgid "No locations for %s" #~ msgstr "Ðе найдено ни одного меÑÑ‚Ð¾Ð½Ð°Ñ…Ð¾Ð¶Ð´ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° %s" #, c-format #~ msgid "%s: ACIX Location: %s" #~ msgstr "%s: МеÑтонахождение в ACIX: %s" #, c-format #~ msgid "%s: Location %s not accessible remotely, skipping" #~ msgstr "%s: К раÑположению %s нет удалённого доÑтупа, пропуÑкаетÑÑ" #~ msgid "" #~ "Missing reference to factory and/or module. Currently safe unloading of " #~ "LDAP DMC is not supported. Report to developers." #~ msgstr "" #~ "ОтÑутÑтвует ÑÑылка на фабрику и/или модуль. Ð’ наÑтоÑщее Ð²Ñ€ÐµÐ¼Ñ Ð±ÐµÐ·Ð¾Ð¿Ð°ÑÐ½Ð°Ñ " #~ "выгрузка LDAP DMC не поддерживаетÑÑ. ПожалуйтеÑÑŒ разработчикам." #~ msgid "SASL Interaction" #~ msgstr "Обмен данными SASL" #, c-format #~ msgid "Challenge: %s" #~ msgstr "ЗапроÑ: %s" #, c-format #~ msgid "Default: %s" #~ msgstr "По умолчанию: %s" #, c-format #~ msgid "LDAP connection already open to %s" #~ msgstr "Соединение LDAP Ñ %s уже уÑтановлено" #, c-format #~ msgid "Could not open LDAP connection to %s" #~ msgstr "Ðевозможно уÑтановить Ñоединие LDAP Ñ %s" #, c-format #~ msgid "Failed to create ldap bind thread (%s)" #~ msgstr "Ðе удалоÑÑŒ Ñоздать поток Ð´Ð»Ñ Ð¿Ñ€Ð¸Ð²Ñзки к LDAP (%s)" #, c-format #~ msgid "Ldap bind timeout (%s)" #~ msgstr "ИÑтекло Ð²Ñ€ÐµÐ¼Ñ ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ LDAP (%s)" #, c-format #~ msgid "Failed to bind to ldap server (%s)" #~ msgstr "Сбой привÑзки к Ñерверу LDAP: %s" #, c-format #~ msgid "Could not set LDAP network timeout (%s)" #~ msgstr "Ðе удалоÑÑŒ задать Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ LDAP (%s)" #, c-format #~ msgid "Could not set LDAP timelimit (%s)" #~ msgstr "Ðе удалоÑÑŒ задать Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¾Ñ‚Ð²ÐµÑ‚Ð° Ñервера LDAP (%s)" #, c-format #~ msgid "Could not set LDAP protocol version (%s)" #~ msgstr "Ðе удалоÑÑŒ задать верÑию протокола LDAP (%s)" #, c-format #~ msgid "LDAPQuery: Querying %s" #~ msgstr "LdapQuery: ЗапрашиваетÑÑ %s" #, c-format #~ msgid " base dn: %s" #~ msgstr " базовое ОИ (DN): %s" #, c-format #~ msgid " filter: %s" #~ msgstr " фильтр: %s" #~ msgid " attributes:" #~ msgstr " атрибуты:" #, c-format #~ msgid "%s (%s)" #~ msgstr "%s (%s)" #, c-format #~ msgid "LDAPQuery: Getting results from %s" #~ msgstr "LDAPQuery: Получение результатов Ñ %s" #, c-format #~ msgid "Error: no LDAP query started to %s" #~ msgstr "Ошибка: не поÑлан Ð·Ð°Ð¿Ñ€Ð¾Ñ LDAP к %s" #, c-format #~ msgid "LDAP query timed out: %s" #~ msgstr "ИÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¾Ñ‚Ð²ÐµÑ‚Ð° на Ð·Ð°Ð¿Ñ€Ð¾Ñ LDAP: %s" #, c-format #~ msgid "" #~ "Bad path for %s: Rucio supports read/write at /objectstores and read-only " #~ "at /replicas" #~ msgstr "" #~ "Ðеверный путь к %s: Rucio поддерживает запиÑÑŒ/чтение в /objectstores и " #~ "лишь чтение в /replicas" #~ msgid "PDPD location is missing" #~ msgstr "отÑутÑтвует раÑположение PDPD" #, c-format #~ msgid "PDPD location: %s" #~ msgstr "раÑположение PDPD: %s" #~ msgid "Conversion mode is set to SUBJECT" #~ msgstr "Задан ÑпоÑоб Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ð½Ð¸Ñ SUBJECT" #~ msgid "Conversion mode is set to CREAM" #~ msgstr "Задан ÑпоÑоб Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ð½Ð¸Ñ CREAM" #~ msgid "Conversion mode is set to EMI" #~ msgstr "Задан ÑпоÑоб Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ð½Ð¸Ñ EMI" #, c-format #~ msgid "Unknown conversion mode %s, using default" #~ msgstr "" #~ "ÐеизвеÑтный ÑпоÑоб Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ð½Ð¸Ñ %s, иÑпользуетÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ по умолчанию" #, c-format #~ msgid "Failed to contact PDP server: %s" #~ msgstr "Сбой ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ Ñервером PDP: %s" #, c-format #~ msgid "There was no SOAP response return from PDP server: %s" #~ msgstr "Сервер PDP не возвратил ответ SOAP: %s" #, c-format #~ msgid "Have %i requests to process" #~ msgstr "%i запроÑов Ð´Ð»Ñ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚ÐºÐ¸" #~ msgid "Creating a client to Argus PDP service" #~ msgstr "СоздаётÑÑ ÐºÐ»Ð¸ÐµÐ½Ñ‚ Ð´Ð»Ñ Ñлужбы Argus PDP" #, c-format #~ msgid "XACML authorisation request: %s" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð°Ð²Ñ‚Ð¾Ñ€Ð¸Ð·Ð°Ñ†Ð¸Ð¸ GACL: %s" #, c-format #~ msgid "XACML authorisation response: %s" #~ msgstr "Отклик допуÑка XACML: %s" #, c-format #~ msgid "%s is not authorized to do action %s in resource %s " #~ msgstr "%s не допущен к иÑполнению дейÑÑ‚Ð²Ð¸Ñ %s на реÑурÑе %s " #~ msgid "Not authorized" #~ msgstr "Ðет допуÑка" #~ msgid "Doing CREAM request" #~ msgstr "ПроизводитÑÑ Ð·Ð°Ð¿Ñ€Ð¾Ñ CREAM" #, c-format #~ msgid "Adding profile-id value: %s" #~ msgstr "ДобавлÑетÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ profile-id: %s" #, c-format #~ msgid "Adding subject-id value: %s" #~ msgstr "ДобавлÑетÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ subject-id: %s" #, c-format #~ msgid "Adding subject-issuer value: %s" #~ msgstr "ДобавлÑетÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ subject-issuer: %s" #, c-format #~ msgid "Adding virtual-organization value: %s" #~ msgstr "ДобавлÑетÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ virtual-organization: %s" #, c-format #~ msgid "Adding FQAN value: %s" #~ msgstr "ДобавлÑетÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ FQAN: %s" #, c-format #~ msgid "Adding FQAN/primary value: %s" #~ msgstr "ДобавлÑетÑÑ FQAN/первичное значение: %s" #, c-format #~ msgid "Adding cert chain value: %s" #~ msgstr "ДобавлÑетÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ цепочки Ñертификатов: %s" #, c-format #~ msgid "Adding resource-id value: %s" #~ msgstr "ДобавлÑетÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ идентификатора реÑурÑа: %s" #, c-format #~ msgid "Adding action-id value: %s" #~ msgstr "ДобавлÑетÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ action-id: %s" #, c-format #~ msgid "CREAM request generation failed: %s" #~ msgstr "Ðе удалоÑÑŒ Ñоздать Ð·Ð°Ð¿Ñ€Ð¾Ñ CREAM: %s" #~ msgid "Doing EMI request" #~ msgstr "ПроизводитÑÑ Ð·Ð°Ð¿Ñ€Ð¾Ñ EMI" #, c-format #~ msgid "Adding Virtual Organization value: %s" #~ msgstr "ДобавлÑетÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ виртуальной организации: %s" #, c-format #~ msgid "Adding VOMS group value: %s" #~ msgstr "ДобавлÑетÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ группы VOMS: %s" #, c-format #~ msgid "Adding VOMS primary group value: %s" #~ msgstr "ДобавлÑетÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ первичной группы VOMS: %s" #, c-format #~ msgid "Adding VOMS role value: %s" #~ msgstr "ДобавлÑетÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ роли VOMS: %s" #, c-format #~ msgid "Adding VOMS primary role value: %s" #~ msgstr "ДобавлÑетÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ первичной роли VOMS: %s" #, c-format #~ msgid "Adding resource-owner value: %s" #~ msgstr "ДобавлÑетÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ владельца реÑурÑа: %s" #, c-format #~ msgid "EMI request generation failed: %s" #~ msgstr "Ðе удалоÑÑŒ Ñоздать Ð·Ð°Ð¿Ñ€Ð¾Ñ EMI: %s" #~ msgid "PEPD location is missing" #~ msgstr "отÑутÑтвует раÑположение PEPD" #, c-format #~ msgid "PEPD location: %s" #~ msgstr "раÑположение PEPD: %s" #~ msgid "Conversion mode is set to DIRECT" #~ msgstr "Задан ÑпоÑоб Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ð½Ð¸Ñ DIRECT" #, c-format #~ msgid "" #~ "Not authorized according to request:\n" #~ "%s" #~ msgstr "" #~ "Ðет допуÑка ÑоглаÑно запроÑу:\n" #~ "%s" #, c-format #~ msgid "%s is not authorized to do action %s in resource %s" #~ msgstr "%s не допущен к иÑполнению дейÑÑ‚Ð²Ð¸Ñ %s на реÑурÑе %s" #~ msgid "Subject of request is null" #~ msgstr "ОтÑутÑтвует Ñубъект запроÑа" #, c-format #~ msgid "Can not create XACML SubjectAttribute: %s" #~ msgstr "Ðе удалоÑÑŒ Ñоздать атрибут XACML SubjectAttribute: %s" #~ msgid "Can not create XACML Resource" #~ msgstr "Ðе удалоÑÑŒ Ñоздать XACML Resource" #, c-format #~ msgid "Can not create XACML ResourceAttribute: %s" #~ msgstr "Ðе удалоÑÑŒ Ñоздать атрибут XACML ResourceAttribute: %s" #~ msgid "Can not create XACML Action" #~ msgstr "Ðе удалоÑÑŒ Ñоздать XACML Action" #, c-format #~ msgid "Can not create XACML ActionAttribute: %s" #~ msgstr "Ðе удалоÑÑŒ Ñоздать атрибут XACML ActionAttribute: %s" #~ msgid "Can not create XACML request" #~ msgstr "Ðе удалоÑÑŒ Ñоздать Ð·Ð°Ð¿Ñ€Ð¾Ñ XACML" #, c-format #~ msgid "Converting to CREAM action - namespace: %s, operation: %s" #~ msgstr "" #~ "Преобразование в дейÑтвие CREAM - проÑтранÑтво имён: %s, операциÑ: %s" #~ msgid "Failed to parse command line options" #~ msgstr "Ðе удалоÑÑŒ разобрать параметры командной Ñтроки" #, c-format #~ msgid "Thread exited with Glib exception: %s" #~ msgstr "Поток завершилÑÑ Ð¿Ñ€ÐµÑ€Ñ‹Ð²Ð°Ð½Ð¸ÐµÐ¼ в Glib: %s" #~ msgid "Unable to create temporary directory" #~ msgstr "Ðе удалоÑÑŒ Ñоздать временный каталог" #, c-format #~ msgid "Unable to create data base environment (%s)" #~ msgstr "Ðе удалоÑÑŒ Ñоздать окружение Ð´Ð»Ñ Ð±Ð°Ð·Ñ‹ данных (%s)" #, c-format #~ msgid "Unable to set duplicate flags for secondary key DB (%s)" #~ msgstr "" #~ "Ðевозможно уÑтановить повторÑющиеÑÑ Ð¼ÐµÑ‚ÐºÐ¸ Ð´Ð»Ñ Ð²Ñ‚Ð¾Ñ€Ð¸Ñ‡Ð½Ð¾Ð¹ базы данных " #~ "ключей (%s)" #, c-format #~ msgid "Unable to create job database (%s)" #~ msgstr "Ðе удалоÑÑŒ Ñоздать базу данных задач (%s)" #, c-format #~ msgid "Unable to create DB for secondary name keys (%s)" #~ msgstr "Ðе удалоÑÑŒ Ñоздать базу данных Ð´Ð»Ñ Ð²Ñ‚Ð¾Ñ€Ð¸Ñ‡Ð½Ñ‹Ñ… ключей имён (%s)" #, c-format #~ msgid "Unable to create DB for secondary endpoint keys (%s)" #~ msgstr "" #~ "Ðе удалоÑÑŒ Ñоздать базу данных Ð´Ð»Ñ Ð²Ñ‚Ð¾Ñ€Ð¸Ñ‡Ð½Ñ‹Ñ… ключей точек входа (%s)" #, c-format #~ msgid "Unable to create DB for secondary service info keys (%s)" #~ msgstr "" #~ "Ðе удалоÑÑŒ Ñоздать базу данных Ð´Ð»Ñ Ð²Ñ‚Ð¾Ñ€Ð¸Ñ‡Ð½Ñ‹Ñ… ключей информации о Ñлужбах " #~ "(%s)" #, c-format #~ msgid "Unable to associate secondary DB with primary DB (%s)" #~ msgstr "" #~ "Ðевозможно поÑтавить в ÑоответÑтвие вторичную базу данных первичной (%s)" #, c-format #~ msgid "Job database created successfully (%s)" #~ msgstr "УÑпешно Ñоздана база данных задач (%s)" #, c-format #~ msgid "Error from BDB: %s: %s" #~ msgstr "Ошибка BDB: %s: %s" #, c-format #~ msgid "Error from BDB: %s" #~ msgstr "Ошибка BDB: %s" #, c-format #~ msgid "Unable to write key/value pair to job database (%s): Key \"%s\"" #~ msgstr "" #~ "Ðевозможно запиÑать пару ключ/значение в базу данных задач (%s): Ключ \"%s" #~ "\"" #~ msgid "" #~ "ENOENT: The file or directory does not exist, Or a nonexistent re_source " #~ "file was specified." #~ msgstr "" #~ "ENOENT: Файл или каталог не ÑущеÑтвуют, либо указан неÑущеÑтвующий файл " #~ "re_source." #~ msgid "" #~ "DB_OLD_VERSION: The database cannot be opened without being first " #~ "upgraded." #~ msgstr "" #~ "DB_OLD_VERSION: База данных не может быть открыта без предварительного " #~ "Ð¾Ð±Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ Ð²ÐµÑ€Ñии." #~ msgid "" #~ "EEXIST: DB_CREATE and DB_EXCL were specified and the database exists." #~ msgstr "EEXIST: были заданы DB_CREATE и DB_EXCL, и база данных ÑущеÑтвует ." #~ msgid "EINVAL" #~ msgstr "EINVAL" #, c-format #~ msgid "Job resubmission failed: Unable to load broker (%s)" #~ msgstr "ПерезаÑылка задачи оборвана: Ðевозможно подгрузить планировщик (%s)" #~ msgid "" #~ "Job resubmission aborted because no resource returned any information" #~ msgstr "" #~ "ПерезаÑылка задачи оборвана, Ñ‚.к. ни один из реÑурÑов не предоÑтавил " #~ "информацию" #, c-format #~ msgid "" #~ "Unable to resubmit job (%s), unable to parse obtained job description" #~ msgstr "" #~ "Ðе удалоÑÑŒ перезаÑлать задачу (%s), Ñ‚.к. невозможно разобрать полученное " #~ "опиÑание задачи" #, c-format #~ msgid "" #~ "Unable to resubmit job (%s), target information retrieval failed for " #~ "target: %s" #~ msgstr "" #~ "Ðевозможно перезапуÑтить задачу (%s), Ñбой Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸ о цели %s" #, c-format #~ msgid "Unable to resubmit job (%s), no targets applicable for submission" #~ msgstr "Ðевозможно перезапуÑтить задачу (%s), нет подходÑщих целей" #, c-format #~ msgid "" #~ "Unable to migrate job (%s), job description could not be retrieved " #~ "remotely" #~ msgstr "" #~ "Ðевозможно мигрировать задачу (%s), опиÑание задачи не может быть " #~ "извлечено Ñ ÑƒÐ´Ð°Ð»Ñ‘Ð½Ð½Ð¾Ð³Ð¾ иÑточника" #~ msgid "Job migration aborted, no resource returned any information" #~ msgstr "" #~ "Перенаправление задачи оборвано, Ñ‚.к. ни один из реÑурÑов не предоÑтавил " #~ "информацию" #, c-format #~ msgid "Job migration aborted, unable to load broker (%s)" #~ msgstr "" #~ "Перенаправление задачи оборвано, невозможно подгрузить планировщик (%s)" #, c-format #~ msgid "Unable to migrate job (%s), unable to parse obtained job description" #~ msgstr "" #~ "Ðе удалоÑÑŒ перенаправить задачу (%s), Ñ‚.к. невозможно разобрать " #~ "полученное опиÑание задачи" #, c-format #~ msgid "Unable to load submission plugin for %s interface" #~ msgstr "" #~ "Ðевозможно погрузить подключаемый модуль Ð´Ð»Ñ Ð·Ð°Ð¿ÑƒÑка задач через " #~ "Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ %s" #, c-format #~ msgid "Job migration failed for job (%s), no applicable targets" #~ msgstr "" #~ "Ðе удалоÑÑŒ перенаправить задачу (%s), возможные Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð¾Ñ‚ÑутÑтвуют" #, c-format #~ msgid "" #~ "Trying to migrate to %s: Migration to a %s interface is not supported." #~ msgstr "Попытка миграции на %s: ÐœÐ¸Ð³Ñ€Ð°Ñ†Ð¸Ñ Ð½Ð° Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ %s не поддерживаетÑÑ." #~ msgid "VOMS command is empty" #~ msgstr "ПуÑÑ‚Ð°Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ð° VOMS" #, c-format #~ msgid "OpenSSL error -- %s" #~ msgstr "Ошибка OpenSSL -- %s" #, c-format #~ msgid "Library : %s" #~ msgstr "Библиотека: %s" #, c-format #~ msgid "Function : %s" #~ msgstr "ФункциÑ: %s" #, c-format #~ msgid "Reason : %s" #~ msgstr "Причина: %s" #~ msgid "User interface error" #~ msgstr "Ошибка интерфейÑа пользователÑ" #~ msgid "Aborted!" #~ msgstr "ИÑполнение прервано!" #~ msgid "Failed to sign proxy" #~ msgstr "Сбой подпиÑи доверенноÑти" #, c-format #~ msgid "VOMS line contains wrong number of tokens (%u expected): \"%s\"" #~ msgstr "" #~ "Строка VOMS Ñодержит неверное количеÑтво токенов (ожидаетÑÑ %u): \"%s\"" #, c-format #~ msgid "Cannot get VOMS server %s information from the vomses files" #~ msgstr "Ðевозможно найти информацию о Ñервере VOMS %s из файлов vomses" #, c-format #~ msgid "There are %d commands to the same VOMS server %s" #~ msgstr "%d инÑтрукций направлено на один и тот же Ñервер VOMS, %s" #, c-format #~ msgid "Try to get attribute from VOMS server with order: %s" #~ msgstr "Попытка получить атрибут Ñ Ñервера VOMS Ñ Ð¿Ð¾Ñ€Ñдком: %s" #, c-format #~ msgid "Message sent to VOMS server %s is: %s" #~ msgstr "Сообщение, отправленное на Ñервер VOMS %s: %s" #, c-format #~ msgid "" #~ "The VOMS server with the information:\n" #~ "\t%s\n" #~ "can not be reached, please make sure it is available" #~ msgstr "" #~ "Ðевозможно ÑвÑзатьÑÑ Ñ Ñервером VOMS Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸ÐµÐ¹:\n" #~ "\t%s\n" #~ "пожалуйÑта, проверьте, доÑтупен ли Ñтот Ñервер" #~ msgid "No HTTP response from VOMS server" #~ msgstr "Сервер VOMS не отзываетÑÑ Ð¿Ð¾ HTTP" #, c-format #~ msgid "Returned message from VOMS server: %s" #~ msgstr "Сообщение Ñ Ñервера VOMS: %s" #~ msgid "No stream response from VOMS server" #~ msgstr "Сервер VOMS не отзываетÑÑ" #, c-format #~ msgid "" #~ "The validity duration of VOMS AC is shortened from %s to %s, due to the " #~ "validity constraint on voms server side.\n" #~ msgstr "" #~ "Срок дейÑÑ‚Ð²Ð¸Ñ Ñертификата атрибута VOMS (AC) Ñокращён Ñ %s до %s, в ÑвÑзи " #~ "Ñ Ð¾Ð³Ñ€Ð°Ð½Ð¸Ñ‡ÐµÐ½Ð¸ÐµÐ¼ Ñо Ñтороны Ñервера VOMS.\n" #, c-format #~ msgid "" #~ "Cannot get any AC or attributes info from VOMS server: %s;\n" #~ " Returned message from VOMS server: %s\n" #~ msgstr "" #~ "Ðевозможно получить Ñертификат атрибута (AC) или информацию об атрибутах " #~ "Ñ Ñервера VOMS: %s;\n" #~ " Сообщение, возвращённое Ñервером VOMS: %s\n" #, c-format #~ msgid "Returned message from VOMS server %s is: %s\n" #~ msgstr "Сообщение, полученное Ñ Ñервера VOMS %s: %s\n" #, c-format #~ msgid "The attribute information from VOMS server: %s is list as following:" #~ msgstr "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾Ð± атрибутах Ñ Ñервера VOMS: %s Ñодержит:" #, c-format #~ msgid "" #~ "There are %d servers with the same name: %s in your vomses file, but none " #~ "of them can be reached, or can return valid message. But proxy without " #~ "VOMS AC extension will still be generated." #~ msgstr "" #~ "Ð’ Вашем файле vomses указаны %d Ñерверов Ñ Ð¾Ð´Ð¸Ð½Ð°ÐºÐ¾Ð²Ñ‹Ð¼ именем %s, но не " #~ "вÑе доÑтупны или правильно отзываютÑÑ. ДоверенноÑть без раÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ VOMS " #~ "AC будет вÑÑ‘ равно Ñоздана." #~ msgid "Failed to generate X509 request with NSS" #~ msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа X509 Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ NSS" #~ msgid "Failed to create X509 certificate with NSS" #~ msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñертификата X509 Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ NSS" #~ msgid "Failed to export X509 certificate from NSS DB" #~ msgstr "Сбой ÑÐ¾Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ Ñертификата X509 из базы данных NSS" #~ msgid "Failed to import X509 certificate into NSS DB" #~ msgstr "Сбой Ð¸Ð¼Ð¿Ð¾Ñ€Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñертификата X509 в базу данных NSS" #~ msgid "Failed to initialize the credential configuration" #~ msgstr "Сбой инициализации наÑтроек параметров доÑтупа" #~ msgid "SSL locks not initialized" #~ msgstr "Блокировка SSL не инициализирована" #, c-format #~ msgid "wrong SSL lock requested: %i of %i: %i - %s" #~ msgstr "Запрошена Ð½ÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð±Ð»Ð¾ÐºÐ¸Ñ€Ð¾Ð²ÐºÐ° SSL: %i из %i: %i - %s" #~ msgid "Number of OpenSSL locks changed - reinitializing" #~ msgstr "ИзменилоÑÑŒ чиÑло блокировок OpenSSL - Ð¿Ð¾Ð²Ñ‚Ð¾Ñ€Ð½Ð°Ñ Ð¸Ð½Ð¸Ñ†Ð¸Ð°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ" #, c-format #~ msgid "Skipping replica on local host %s" #~ msgstr "ПропуÑкаетÑÑ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð°Ñ ÐºÐ¾Ð¿Ð¸Ñ %s" #, c-format #~ msgid "No locations left for %s" #~ msgstr "Ðе оÑталоÑÑŒ раÑположений Ð´Ð»Ñ %s" #~ msgid "No overwrite requested or allowed, skipping pre-cleaning" #~ msgstr "" #~ "ПерезапиÑÑŒ не запрошена или не разрешена, Ð¿Ñ€ÐµÐ´Ð²Ð°Ñ€Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ð¾Ñ‡Ð¸Ñтка " #~ "пропуÑкаетÑÑ" #~ msgid "Can't obtain configuration. Public information is disabled." #~ msgstr "Ðе удалоÑÑŒ получить конфигурацию. ÐžÑ‚ÐºÑ€Ñ‹Ñ‚Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð½ÐµÐ´Ð¾Ñтупна." #, c-format #~ msgid "EMIES:PauseActivity: job %s - %s" #~ msgstr "EMIES:PauseActivity: задача %s - %s" #, c-format #~ msgid "EMIES:ResumeActivity: job %s - %s" #~ msgstr "EMIES:ResumeActivity: задача %s - %s" #, c-format #~ msgid "EMIES:CancelActivity: job %s - %s" #~ msgstr "EMIES:CancelActivity: задача %s - %s" #, c-format #~ msgid "job %s cancelled successfully" #~ msgstr "задача %s уÑпешно прервана" #, c-format #~ msgid "EMIES:WipeActivity: job %s - %s" #~ msgstr "EMIES:WipeActivity: задача %s - %s" #, c-format #~ msgid "job %s (will be) cleaned successfully" #~ msgstr "задача %s (будет) уÑпешно очищена" #, c-format #~ msgid "EMIES:RestartActivity: job %s - %s" #~ msgstr "EMIES:RestartActivity: задача %s - %s" #, c-format #~ msgid "job %s restarted successfully" #~ msgstr "задача %s уÑпешно перезапущена" #, c-format #~ msgid "" #~ "EMIES:CreateActivity: request = \n" #~ "%s" #~ msgstr "" #~ "EMIES:CreateActivity: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" #~ "%s" #~ msgid "EMIES:CreateActivity: too many activity descriptions" #~ msgstr "EMIES:CreateActivity: обнаружено Ñлишком много опиÑаний задач" #~ msgid "EMIES:CreateActivity: no job description found" #~ msgstr "EMIES:CreateActivity: опиÑание задачи не обнаружено" #~ msgid "EMIES:CreateActivity: max jobs total limit reached" #~ msgstr "EMIES:CreateActivity: доÑтигнут предел общего чиÑла задач" #, c-format #~ msgid "ES:CreateActivity: Failed to create new job: %s" #~ msgstr "ES:CreateActivity: Ðе удалоÑÑŒ Ñоздать новую задачу: %s" #~ msgid "EMIES:CreateActivity finished successfully" #~ msgstr "EMIES:CreateActivity уÑпешно завершено" #, c-format #~ msgid "New job accepted with id %s" #~ msgstr "ÐÐ¾Ð²Ð°Ñ Ð·Ð°Ð´Ð°Ñ‡Ð° принÑта Ñ Ð¸Ð´ÐµÐ½Ñ‚Ð¸Ñ„Ð¸ÐºÐ°Ñ‚Ð¾Ñ€Ð¾Ð¼ %s" #, c-format #~ msgid "" #~ "EMIES:CreateActivity: response = \n" #~ "%s" #~ msgstr "" #~ "EMIES:CreateActivity: ответ = \n" #~ "%s" #, c-format #~ msgid "EMIES:GetActivityStatus: job %s - %s" #~ msgstr "EMIES:GetActivityStatus: задача %s - %s" #, c-format #~ msgid "EMIES:GetActivityInfo: job %s - failed to retrieve GLUE2 information" #~ msgstr "" #~ "EMIES:GetActivityInfo: задача %s - не удалоÑÑŒ получить информацию по " #~ "формату GLUE2" #, c-format #~ msgid "EMIES:NotifyService: job %s - %s" #~ msgstr "EMIES:NotifyService: задача %s - %s" #~ msgid "Bad URL in acix_endpoint" #~ msgstr "ÐедопуÑтимый URL в acix_endpoint" #~ msgid "gm-delegations-converter changes format of delegation database." #~ msgstr "" #~ "gm-delegations-converter преобразовывает формат базы данных делегированиÑ." #~ msgid "convert from specified input database format [bdb|sqlite]" #~ msgstr "" #~ "преобразовать из указанного иÑходного формата базы данных [bdb|sqlite]" #~ msgid "database format" #~ msgstr "формат базы данных" #~ msgid "convert into specified output database format [bdb|sqlite]" #~ msgstr "преобразовать в указанный выходной формат базы данных [bdb|sqlite]" #, c-format #~ msgid "Unsupported value for allownew: %s" #~ msgstr "Ðеподдерживаемое значение Ð´Ð»Ñ allownew: %s" #~ msgid "Wrong number in maxjobdesc" #~ msgstr "ÐедопуÑтимое чиÑло в maxjobdesc" #, c-format #~ msgid "Unsupported configuration command: %s" #~ msgstr "ÐÐµÐ¿Ð¾Ð´Ð´ÐµÑ€Ð¶Ð¸Ð²Ð°ÐµÐ¼Ð°Ñ Ð¸Ð½ÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ Ð½Ð°Ñтроек: %s" #, c-format #~ msgid "Mapped user:group (%s:%s) not found" #~ msgstr "СоответÑтвующие user:group (%s:%s) не обнаружены" #~ msgid "Job submission user can't be root" #~ msgstr "Пользователь, заÑылающий задачи, не может быть Ñуперпользователем" #~ msgid "Failed processing A-REX configuration" #~ msgstr "Ðе удалоÑÑŒ обработать наÑтройки A-REX" #~ msgid "This user is denied to submit new jobs." #~ msgstr "Этому пользователю отказано в праве запуÑка новых задач." #~ msgid "No control or session directories defined in configuration" #~ msgstr "Ð’ наÑтройках не заданы контрольные директории или каталоги ÑеÑÑий" #, c-format #~ msgid "Job submission user: %s (%i:%i)" #~ msgstr "Пользователь, отправивший задачу: %s (%i:%i)" #~ msgid "Job plugin was not initialised" #~ msgstr "Модуль обработки задач не был запущен" #~ msgid "No delegated credentials were passed" #~ msgstr "Делегированные параметры доÑтупа не переданы" #, c-format #~ msgid "Cancelling job %s" #~ msgstr "Прерывание задачи %s" #, c-format #~ msgid "Cleaning job %s" #~ msgstr "УдалÑетÑÑ Ð·Ð°Ð´Ð°Ñ‡Ð° %s" #~ msgid "Request to open file with storing in progress" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° в процеÑÑе запиÑи" #, c-format #~ msgid "Retrieving file %s" #~ msgstr "Получение файла %s" #, c-format #~ msgid "Accepting submission of new job or modification request: %s" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° заÑылку новой задачи или изменение Ñтарой принÑÑ‚: %s" #, c-format #~ msgid "Storing file %s" #~ msgstr "ЗапиÑываетÑÑ Ñ„Ð°Ð¹Ð» %s" #, c-format #~ msgid "Unknown open mode %i" #~ msgstr "ÐеизвеÑтный режим Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð¸Ñ %i" #, c-format #~ msgid "action(%s) != request" #~ msgstr "action(%s) != request" #~ msgid "Failed writing job description" #~ msgstr "Ðе удалоÑÑŒ запиÑать опиÑание задачи" #~ msgid "Failed writing local description" #~ msgstr "Сбой запиÑи локального опиÑаниÑ" #~ msgid "Failed writing ACL" #~ msgstr "Ðе удалоÑÑŒ запиÑать ACL" #~ msgid "Failed to run external plugin" #~ msgstr "Ðе удалоÑÑŒ запуÑтить внешний подключаемый модуль" #, c-format #~ msgid "Failed to create session directory %s" #~ msgstr "Ðе удалоÑÑŒ Ñоздать каталог ÑеÑÑии %s" #~ msgid "Failed writing status" #~ msgstr "Ðе удалоÑÑŒ запиÑать ÑоÑтоÑние" #, c-format #~ msgid "Failed to lock delegated credentials: %s" #~ msgstr "Ðевозможно заблокировать делегированные параметры доÑтупа: %s" #, c-format #~ msgid "Renewing proxy for job %s" #~ msgstr "ОбновлÑетÑÑ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñть Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s" #, c-format #~ msgid "New proxy expires at %s" #~ msgstr "Срок дейÑÑ‚Ð²Ð¸Ñ Ð½Ð¾Ð²Ð¾Ð¹ доверенноÑти иÑтекает в %s" #~ msgid "Failed to write 'local' information" #~ msgstr "Ðе удалоÑÑŒ запиÑать 'локальную' информацию" #~ msgid "Failed to renew proxy" #~ msgstr "Ðе удалоÑÑŒ обновить доверенноÑть" #~ msgid "" #~ "New proxy expiry time is not later than old proxy, not renewing proxy" #~ msgstr "" #~ "Срок дейÑÑ‚Ð²Ð¸Ñ Ð½Ð¾Ð²Ð¾Ð¹ доверенноÑти не дольше Ñтарой, доверенноÑть не " #~ "обновлÑетÑÑ" #, c-format #~ msgid "Checking file %s" #~ msgstr "Проверка файла %s" #~ msgid "ID contains forbidden characters" #~ msgstr "ID Ñодержит недопуÑтимые Ñимволы" #~ msgid "Out of tries while allocating new job ID" #~ msgstr "ЗакончилиÑÑŒ попытки приÑÐ²Ð¾ÐµÐ½Ð¸Ñ Ð½Ð¾Ð²Ð¾Ð³Ð¾ Ñрлыка задачи" #, c-format #~ msgid "Failed to read job's local description for job %s from %s" #~ msgstr "Ðе удалоÑÑŒ прочеÑть локальное опиÑание Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s из %s" #~ msgid "No non-draining session directories available" #~ msgstr "Ðет каталогов ÑеÑÑий не в ÑоÑтоÑнии разгрузки" #, c-format #~ msgid "Using control directory %s" #~ msgstr "ИÑпользуетÑÑ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ñ‹Ð¹ каталог %s" #, c-format #~ msgid "Using session directory %s" #~ msgstr "ИÑпользуетÑÑ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³ ÑеÑÑии %s" #, c-format #~ msgid "Failed to read job's ACL for job %s from %s" #~ msgstr "Ðе удалоÑÑŒ прочеÑть правила доÑтупа Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s из %s" #, c-format #~ msgid "Failed to parse user policy for job %s" #~ msgstr "Сбой разбора правил допуÑка Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s" #, c-format #~ msgid "Failed to load policy evaluator for policy of job %s" #~ msgstr "Ðе удалоÑÑŒ подгрузить анализатор Ð´Ð»Ñ Ð¿Ñ€Ð°Ð²Ð¸Ð» допуÑка задачи %s" #, c-format #~ msgid "Unknown ACL policy %s for job %s" #~ msgstr "ÐеизвеÑтное правило доÑтупа %s Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s" #, c-format #~ msgid "Unknown authorization command %s" #~ msgstr "ÐеизвеÑÑ‚Ð½Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° команды допуÑка %s" #, c-format #~ msgid "" #~ "The [vo] section labeled '%s' has no file associated and can't be used " #~ "for matching" #~ msgstr "" #~ "Разделу [vo] Ñ Ð½Ð°Ð·Ð²Ð°Ð½Ð¸ÐµÐ¼ '%s' не поÑтавлен в ÑоответÑтвие файл, и он не " #~ "может быть иÑпользован Ð´Ð»Ñ Ð°Ð²Ñ‚Ð¾Ñ€Ð¸Ð·Ð°Ñ†Ð¸Ð¸" #, c-format #~ msgid "Plugin %s failed to run" #~ msgstr "Подключаемый модуль %s не Ñмог запуÑтитьÑÑ" #, c-format #~ msgid "Plugin %s printed: %u" #~ msgstr "Подключаемый модуль %s вывел на печать: %u" #, c-format #~ msgid "Plugin %s error: %u" #~ msgstr "Ошибка подключаемого Ð¼Ð¾Ð´ÑƒÐ»Ñ %s: %u" #, c-format #~ msgid "VOMS proxy processing returns: %i - %s" #~ msgstr "Обработка доверенноÑти VOMS выдаёт: %i - %s" #, c-format #~ msgid "VOMS trust chains: %s" #~ msgstr "Цепочка Ñертификатов VOMS: %s" #~ msgid "User name mapping has empty command" #~ msgstr "ПуÑÑ‚Ð°Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ð° в приÑвоении имени пользователÑ" #, c-format #~ msgid "User name mapping has empty name: %s" #~ msgstr "ПуÑтое Ð¸Ð¼Ñ Ð² приÑвоении имени пользователÑ: %s" #, c-format #~ msgid "response: %s" #~ msgstr "ответ: %s" #, c-format #~ msgid "Send response failed: %s" #~ msgstr "Сбой отÑылки отклика: %s" #~ msgid "Response sending error" #~ msgstr "Ошибка отÑылки отклика" #~ msgid "Closed connection" #~ msgstr "Соединение закрыто" #, c-format #~ msgid "Socket conversion failed: %s" #~ msgstr "Ошибка Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ð½Ð¸Ñ Ñокета: %s" #, c-format #~ msgid "Failed to obtain own address: %s" #~ msgstr "Сбой Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ ÑобÑтвенного адреÑа: %s" #, c-format #~ msgid "Failed to recognize own address type (IPv4 or IPv6) - %u" #~ msgstr "Сбой раÑÐ¿Ð¾Ð·Ð½Ð°Ð²Ð°Ð½Ð¸Ñ Ñ‚Ð¸Ð¿Ð° ÑобÑтвенного адреÑа (IPv4 или IPv6) - %u" #, c-format #~ msgid "Accepted connection on [%s]:%u" #~ msgstr "ПринÑто Ñоединение к [%s]:%u" #, c-format #~ msgid "Accepted connection on %u.%u.%u.%u:%u" #~ msgstr "ПринÑто Ñоединение к %u.%u.%u.%u:%u" #~ msgid "Accept failed" #~ msgstr "Сбой принÑтиÑ" #, c-format #~ msgid "Accept failed: %s" #~ msgstr "Сбой принÑтиÑ: %s" #, c-format #~ msgid "Accepted connection from [%s]:%u" #~ msgstr "ПринÑто Ñоединение Ñ [%s]:%u" #, c-format #~ msgid "Accepted connection from %u.%u.%u.%u:%u" #~ msgstr "ПринÑто Ñоединение Ñ %u.%u.%u.%u:%u" #~ msgid "Authenticate in commands failed" #~ msgstr "Сбой проверки подлинноÑти при иÑполнении инÑтрукций" #~ msgid "Authentication failure" #~ msgstr "Сбой при проверке подлинноÑти" #, c-format #~ msgid "User subject: %s" #~ msgstr "Субъект Ñертификата: %s" #, c-format #~ msgid "Encrypted: %s" #~ msgstr "Зашифрован: %s" #~ msgid "User has no proper configuration associated" #~ msgstr "Пользователь не аÑÑоциирован Ñ Ð¿Ð¾Ð´Ñ…Ð¾Ð´Ñщей наÑтройкой" #~ msgid "" #~ "User has empty virtual directory tree.\n" #~ "Either user has no authorised plugins or there are no plugins configured " #~ "at all." #~ msgstr "" #~ "Дерево виртуального каталога Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð¿ÑƒÑто.\n" #~ "Либо у Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð½ÐµÑ‚ допущенных раÑширений, либо раÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ Ð²Ð¾Ð¾Ð±Ñ‰Ðµ не " #~ "наÑтроены." #~ msgid "Read commands in authenticate failed" #~ msgstr "Сбой команд Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð² проверке подлинноÑти" #~ msgid "Control connection (probably) closed" #~ msgstr "Контрольное Ñоединение (наверное) закрыто" #~ msgid "Command EPRT" #~ msgstr "Команда EPRT" #, c-format #~ msgid "Failed to parse remote address %s" #~ msgstr "Сбой разборки удалённого адреÑа %s" #, c-format #~ msgid "Command USER %s" #~ msgstr "Команда USER %s" #~ msgid "Command CDUP" #~ msgstr "Команда CDUP" #, c-format #~ msgid "Command CWD %s" #~ msgstr "Команда CWD %s" #, c-format #~ msgid "Command MKD %s" #~ msgstr "Команда MKD %s" #, c-format #~ msgid "Command SIZE %s" #~ msgstr "Команда SIZE %s" #, c-format #~ msgid "Command SBUF: %i" #~ msgstr "Команда SBUF: %i" #, c-format #~ msgid "Command MLST %s" #~ msgstr "Команда MLST %s" #, c-format #~ msgid "Command DELE %s" #~ msgstr "Команда DELE %s" #, c-format #~ msgid "Command RMD %s" #~ msgstr "Команда RMD %s" #, c-format #~ msgid "Command TYPE %c" #~ msgstr "Команда TYPE %c" #, c-format #~ msgid "Command MODE %c" #~ msgstr "Команда MODE %c" #~ msgid "Command ABOR" #~ msgstr "Команда ABOR" #, c-format #~ msgid "Command REST %s" #~ msgstr "Команда REST %s" #, c-format #~ msgid "Command EPSV %s" #~ msgstr "Команда EPSV %s" #~ msgid "Command SPAS" #~ msgstr "Команда SPAS" #~ msgid "Command PASV" #~ msgstr "Команда PASV" #~ msgid "local_pasv failed" #~ msgstr "Сбой local_pasv" #~ msgid "local_spas failed" #~ msgstr "Сбой local_spas" #~ msgid "Command PORT" #~ msgstr "Команда PORT" #~ msgid "active_data is disabled" #~ msgstr "active_data отключено" #~ msgid "local_port failed" #~ msgstr "Сбой local_port" #, c-format #~ msgid "Command MLSD %s" #~ msgstr "Команда MLSD %s" #, c-format #~ msgid "Command NLST %s" #~ msgstr "Команда NLST %s" #, c-format #~ msgid "Command LIST %s" #~ msgstr "Команда LIST %s" #, c-format #~ msgid "Command ERET %s" #~ msgstr "Команда ERET %s" #, c-format #~ msgid "Command RETR %s" #~ msgstr "Команда RETR %s" #, c-format #~ msgid "Command STOR %s" #~ msgstr "Команда STOR %s" #, c-format #~ msgid "Command ALLO %i" #~ msgstr "Команда ALLO %i" #~ msgid "Command OPTS" #~ msgstr "Команда OPTS" #~ msgid "Command OPTS RETR" #~ msgstr "Команда OPTS RETR" #, c-format #~ msgid "Option: %s" #~ msgstr "ОпциÑ: %s" #~ msgid "Command NOOP" #~ msgstr "Команда NOOP" #~ msgid "Command QUIT" #~ msgstr "Команда QUIT" #~ msgid "Failed to close, deleting client" #~ msgstr "Ðе удалоÑÑŒ закрыть, уничтожаетÑÑ ÐºÐ»Ð¸ÐµÐ½Ñ‚" #, c-format #~ msgid "Command DCAU: %i '%s'" #~ msgstr "Команда DCAU: %i '%s'" #, c-format #~ msgid "Command PBZS: %s" #~ msgstr "Команда PBZS: %s" #, c-format #~ msgid "Setting pbsz to %lu" #~ msgstr "ПоÑылаетÑÑ pbsz на %lu" #, c-format #~ msgid "Command PROT: %s" #~ msgstr "Команда PROT: %s" #, c-format #~ msgid "Command MDTM %s" #~ msgstr "Команда MDTM %s" #, c-format #~ msgid "Raw command: %s" #~ msgstr "ÐÐµÐ¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚Ð°Ð½Ð½Ð°Ñ Ð¸Ð½ÑтрукциÑ: %s" #~ msgid "Failed to allocate memory for buffer" #~ msgstr "Ðе удалоÑÑŒ зарезервировать памÑть под буфер" #, c-format #~ msgid "Allocated %u buffers %llu bytes each." #~ msgstr "Выделено %u буферов по %llu байт каждый." #~ msgid "abort_callback: start" #~ msgstr "abort_callback: запуÑк" #, c-format #~ msgid "abort_callback: Globus error: %s" #~ msgstr "abort_callback: ошибка Globus: %s" #~ msgid "make_abort: start" #~ msgstr "make_abort: запуÑк" #~ msgid "Failed to abort data connection - ignoring and recovering" #~ msgstr "" #~ "Ðе удалоÑÑŒ оборвать Ñоединение Ð´Ð»Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ… - игнорируем и воÑÑтанавливаемÑÑ" #~ msgid "make_abort: wait for abort flag to be reset" #~ msgstr "make_abort: ожидание ÑброÑа Ñемафора прерываниÑ" #~ msgid "make_abort: leaving" #~ msgstr "make_abort: выход" #~ msgid "check_abort: have Globus error" #~ msgstr "check_abort: получена ошибка Globus" #~ msgid "Abort request caused by transfer error" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° прерывание по причине ошибки передачи" #~ msgid "check_abort: sending 426" #~ msgstr "check_abort: поÑылаетÑÑ 426" #~ msgid "Abort request caused by error in transfer function" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° прерывание по причине ошибки в функции передачи" #~ msgid "Failed to start timer thread - timeout won't work" #~ msgstr "" #~ "Ðе удалоÑÑŒ запуÑтить поток таймера - прерывание по времени не будет " #~ "работать" #~ msgid "Killing connection due to timeout" #~ msgstr "Прерывание ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ð² ÑвÑзи Ñ Ð¸Ñтёкшим лимитом времени" #~ msgid "Configuration section [userlist] is missing name." #~ msgstr "Раздел наÑтроек [userlist] не Ñодержит названиÑ." #, c-format #~ msgid "No such user: %s" #~ msgstr "Ðет такого пользователÑ: %s" #, c-format #~ msgid "No such group: %s" #~ msgstr "Ðет такой группы: %s" #, c-format #~ msgid "Improper debug level '%s'" #~ msgstr "ÐедопуÑтимый уровень отладки '%s'" #~ msgid "Missing option for command logreopen" #~ msgstr "ОтÑутÑÑ‚Ð²ÑƒÑŽÑ‰Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ Ð´Ð»Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ñ‹ logreopen" #~ msgid "Wrong option in logreopen" #~ msgstr "ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ Ð´Ð»Ñ logreopen" #, c-format #~ msgid "Failed to open log file %s" #~ msgstr "Ðе удалоÑÑŒ открыть журнальный файл %s" #~ msgid "Closing channel (list)" #~ msgstr "ЗакрываетÑÑ ÐºÐ°Ð½Ð°Ð» (перечиÑление)" #~ msgid "Data channel connected (list)" #~ msgstr "Канал передачи данных подÑоединён (перечиÑление)" #~ msgid "data_connect_retrieve_callback" #~ msgstr "data_connect_retrieve_callback" #~ msgid "Data channel connected (retrieve)" #~ msgstr "Канал передачи данных подÑоединён (получение)" #~ msgid "data_connect_retrieve_callback: allocate_data_buffer" #~ msgstr "data_connect_retrieve_callback: allocate_data_buffer" #~ msgid "data_connect_retrieve_callback: allocate_data_buffer failed" #~ msgstr "data_connect_retrieve_callback: Ñбой в allocate_data_buffer" #, c-format #~ msgid "data_connect_retrieve_callback: check for buffer %u" #~ msgstr "data_connect_retrieve_callback: проверка буфера %u" #, c-format #~ msgid "Closing channel (retrieve) due to local read error: %s" #~ msgstr "ЗакрываетÑÑ ÐºÐ°Ð½Ð°Ð» (загрузки) в ÑвÑзи Ñ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð¾Ð¹ ошибкой: %s" #~ msgid "Buffer registration failed" #~ msgstr "Сбой региÑтрации буфера" #~ msgid "data_retrieve_callback" #~ msgstr "data_retrieve_callback" #, c-format #~ msgid "Data channel (retrieve) %i %i %i" #~ msgstr "Канал передачи данных (получение) %i %i %i" #~ msgid "Closing channel (retrieve)" #~ msgstr "Канал закрываетÑÑ (получение)" #, c-format #~ msgid "Time spent waiting for network: %.3f ms" #~ msgstr "ВремÑ, проведённое в ожидании ÑвÑзи: %.3f мÑ" #, c-format #~ msgid "Time spent waiting for disc: %.3f ms" #~ msgstr "ВремÑ, проведённое в ожидании диÑка: %.3f мÑ" #~ msgid "data_retrieve_callback: lost buffer" #~ msgstr "data_retrieve_callback: буфер потерÑн" #~ msgid "data_connect_store_callback" #~ msgstr "data_connect_store_callback" #~ msgid "Data channel connected (store)" #~ msgstr "Канал передачи данных подÑоединён (запиÑÑŒ)" #~ msgid "Failed to register any buffer" #~ msgstr "Ðе удалоÑÑŒ зарегиÑтрировать ни одного буфера" #, c-format #~ msgid "Data channel (store) %i %i %i" #~ msgstr "Канал передачи данных (запиÑÑŒ) %i %i %i" #~ msgid "data_store_callback: lost buffer" #~ msgstr "data_store_callback: буфер потерÑн" #, c-format #~ msgid "Closing channel (store) due to error: %s" #~ msgstr "Прерывание канала (запиÑÑŒ) в ÑвÑзи Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ¾Ð¹: %s" #~ msgid "Closing channel (store)" #~ msgstr "ЗакрываетÑÑ ÐºÐ°Ð½Ð°Ð» (запиÑÑŒ)" #~ msgid "Can't parse access rights in configuration line" #~ msgstr "Ðе удалоÑÑŒ разобрать права доÑтупа в Ñтроке наÑтроек" #~ msgid "Can't parse user:group in configuration line" #~ msgstr "Ðе удалоÑÑŒ разобрать user:group в Ñтроке наÑтроек" #~ msgid "Can't recognize user in configuration line" #~ msgstr "Ðе удалоÑÑŒ определить Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð² Ñтроке наÑтроек" #~ msgid "Can't recognize group in configuration line" #~ msgstr "Ðе удалоÑÑŒ определить группу в Ñтроке наÑтроек" #~ msgid "Can't parse or:and in configuration line" #~ msgstr "Ðе удалоÑÑŒ разобрать or:and в Ñтроке наÑтроек" #~ msgid "Can't parse configuration line" #~ msgstr "Ðе удалоÑÑŒ разобрать Ñтроку наÑтроек" #, c-format #~ msgid "Bad directory name: %s" #~ msgstr "Ðеверное Ð¸Ð¼Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð°: %s" #~ msgid "Can't parse create arguments in configuration line" #~ msgstr "Ðе удалоÑÑŒ обработать аргументы create в файле конфигурации" #~ msgid "Can't parse mkdir arguments in configuration line" #~ msgstr "Ðе удалоÑÑŒ обработать аргументы mkdir в файле конфигурации" #, c-format #~ msgid "Bad subcommand in configuration line: %s" #~ msgstr "ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð¸Ð½ÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ Ð² Ñтроке наÑтроек: %s" #~ msgid "Bad mount directory specified" #~ msgstr "Указан неподходÑщий каталог Ð´Ð»Ñ Ð¼Ð¾Ð½Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ" #, c-format #~ msgid "Mount point %s" #~ msgstr "Точка Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡ÐµÐ½Ð¸Ñ %s" #, c-format #~ msgid "mkdir failed: %s" #~ msgstr "Ñбой mkdir: %s" #, c-format #~ msgid "Warning: mount point %s creation failed." #~ msgstr "Предупреждение: не удалоÑÑŒ Ñоздать точку Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡ÐµÐ½Ð¸Ñ %s." #, c-format #~ msgid "plugin: open: %s" #~ msgstr "подключаемый модуль: открытие: %s" #~ msgid "Not enough space to store file" #~ msgstr "ÐедоÑтаточно Ñвободного меÑта Ð´Ð»Ñ Ð·Ð°Ð¿Ð¸Ñи файла" #, c-format #~ msgid "open: changing owner for %s, %i, %i" #~ msgstr "открытие: Ñмена владельца Ð´Ð»Ñ %s, %i, %i" #, c-format #~ msgid "open: owner: %i %i" #~ msgstr "открытие: владелец: %i %i" #, c-format #~ msgid "Unknown open mode %s" #~ msgstr "ÐеизвеÑтный режим Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð¸Ñ %s" #~ msgid "plugin: close" #~ msgstr "подключаемый модуль: закрытие" #~ msgid "plugin: read" #~ msgstr "подключаемый модуль: чтение" #~ msgid "Error while reading file" #~ msgstr "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð°%1" #~ msgid "plugin: write" #~ msgstr "подключаемый модуль: запиÑÑŒ" #~ msgid "Zero bytes written to file" #~ msgstr "Ð’ файл запиÑано ноль байтов" #, c-format #~ msgid "plugin: checkdir: %s" #~ msgstr "подключаемый модуль: проверка каталога: %s" #, c-format #~ msgid "plugin: checkdir: access: %s" #~ msgstr "подключаемый модуль: проверка каталога: доÑтуп: %s" #, c-format #~ msgid "plugin: checkdir: access: allowed: %s" #~ msgstr "подключаемый модуль: проверка каталога: доÑтуп: открыт: %s" #, c-format #~ msgid "No plugin is configured or authorised for requested path %s" #~ msgstr "" #~ "ОтÑутÑтвуют наÑтроенные или допущенные раÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ Ð¿Ð¾ заданному адреÑу %s" #~ msgid "FilePlugin: more unload than load" #~ msgstr "FilePlugin: разгрузок больше, чем загрузок" #, c-format #~ msgid "Can't load plugin %s for access point %s" #~ msgstr "Ðевозможно загрузить подключаемый модуль %s Ð´Ð»Ñ Ñ‚Ð¾Ñ‡ÐºÐ¸ доÑтупа %s" #, c-format #~ msgid "Plugin %s for access point %s is broken." #~ msgstr "РаÑширение %s Ð´Ð»Ñ Ñ‚Ð¾Ñ‡ÐºÐ¸ доÑтупа %s неиÑправно." #, c-format #~ msgid "Plugin %s for access point %s acquire failed (should never happen)." #~ msgstr "" #~ "РаÑширение %s Ð´Ð»Ñ Ñ‚Ð¾Ñ‡ÐºÐ¸ доÑтупа %s недоÑтупно (никогда не должно " #~ "ÑлучатьÑÑ)." #, c-format #~ msgid "Destructor with dlclose (%s)" #~ msgstr "ДеÑтруктор Ñ dlclose (%s)" #, c-format #~ msgid "FileNode: operator= (%s <- %s) %lu <- %lu" #~ msgstr "FileNode: operator= (%s <- %s) %lu <- %lu" #~ msgid "Copying with dlclose" #~ msgstr "Копирование Ñ dlclose" #~ msgid "configuration file not found" #~ msgstr "файл наÑтроек не найден" #~ msgid "Wrong port number in configuration" #~ msgstr "Ðеприемлемый номер порта в наÑтройках" #~ msgid "Wrong maxconnections number in configuration" #~ msgstr "Ðеприемлемое значение maxconnections в наÑтройках" #~ msgid "Wrong defaultbuffer number in configuration" #~ msgstr "Ðеприемлемое значение defaultbuffer в наÑтройках" #~ msgid "Wrong maxbuffer number in configuration" #~ msgstr "Ðеприемлемое значение maxbuffer в наÑтройках" #, c-format #~ msgid "Can't resolve host %s" #~ msgstr "Ðе удалоÑÑŒ найти Ñервер %s" #~ msgid "Could not determine hostname from gethostname()" #~ msgstr "Ðевозможно определить Ð¸Ð¼Ñ ÑƒÐ·Ð»Ð° иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ gethostname()" #~ msgid "unnamed group" #~ msgstr "группа без имени" #~ msgid "undefined plugin name" #~ msgstr "неизвеÑтное Ð¸Ð¼Ñ Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡Ð°ÐµÐ¼Ð¾Ð³Ð¾ модулÑ" #~ msgid "undefined virtual plugin path" #~ msgstr "не задан путь к виртуальному раÑширению" #, c-format #~ msgid "bad directory for plugin: %s" #~ msgstr "неверный каталог Ð´Ð»Ñ Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡Ð°ÐµÐ¼Ð¾Ð³Ð¾ модулÑ: %s" #, c-format #~ msgid "Already have directory: %s" #~ msgstr "Каталог %s уже ÑущеÑтвует" #, c-format #~ msgid "Registering directory: %s with plugin: %s" #~ msgstr "РегиÑтрируетÑÑ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³: %s Ñ Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡Ð°ÐµÐ¼Ñ‹Ð¼ модулем: %s" #, c-format #~ msgid "file node creation failed: %s" #~ msgstr "Ñбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÑƒÐ·Ð»Ð° файла: %s" #, c-format #~ msgid "improper attribute for allowencryption command: %s" #~ msgstr "недопуÑтимый атрибут команды allowencryption: %s" #, c-format #~ msgid "improper attribute for allowactvedata command: %s" #~ msgstr "недопуÑтимый атрибут команды allowactvedata: %s" #, c-format #~ msgid "failed while processing configuration command: %s %s" #~ msgstr "Ñбой при обработке команды наÑтройки: %s %s" #, c-format #~ msgid "Failed processing authorization group %s" #~ msgstr "Ðе удалоÑÑŒ обработать группу допуÑка %s" #~ msgid "Missing authgroup name in allowaccess" #~ msgstr "ОтÑутÑтвует название authgroup в allowaccess" #~ msgid "Missing authgroup name in denyaccess" #~ msgstr "ОтÑутÑтвует название authgroup в denyaccess" #~ msgid "failed to process client identification" #~ msgstr "Ðе удалоÑÑŒ обработать личные данные клиента" #~ msgid "failed to identify plugins path" #~ msgstr "Ñбой Ð¾Ð±Ð½Ð°Ñ€ÑƒÐ¶ÐµÐ½Ð¸Ñ Ð¿ÑƒÑ‚Ð¸ к модулÑм" #, c-format #~ msgid "Registering dummy directory: %s" #~ msgstr "РегиÑтрируетÑÑ Ð²Ñпомогательный каталог: %s" #~ msgid "Activation failed" #~ msgstr "Ошибка активации" #~ msgid "Child exited" #~ msgstr "Потомок завершил работу" #~ msgid "Globus connection error" #~ msgstr "Ошибка ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Globus" #~ msgid "New connection" #~ msgstr "Ðовое Ñоединение" #~ msgid "Server stopped" #~ msgstr "Сервер оÑтановлен" #~ msgid "Error: failed to set handler for SIGTERM" #~ msgstr "Ошибка: не удалоÑÑŒ уÑтановить обработчик SIGTERM" #~ msgid "fork failed" #~ msgstr "ошибка при выполнении ÑиÑтемного вызова fork" #~ msgid "wait failed - killing child" #~ msgstr "ошибка Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ - прерывание процеÑÑа-потомка" #~ msgid "Killed with signal: " #~ msgstr "Прерван Ñигналом: " #~ msgid "Restarting after segmentation violation." #~ msgstr "ПерезапуÑк поÑле Ð½Ð°Ñ€ÑƒÑˆÐµÐ½Ð¸Ñ Ñегментации." #~ msgid "Waiting 1 minute" #~ msgstr "Ожидание: 1 минута" #~ msgid "Error: failed to set handler for SIGCHLD" #~ msgstr "Ошибка: не удалоÑÑŒ уÑтановить обработчик SIGCHLD" #~ msgid "Missing argument" #~ msgstr "ОтÑутÑтвует аргумент" #~ msgid "Unknown option" #~ msgstr "ÐеизвеÑтный параметр" #~ msgid "Wrong port number" #~ msgstr "ÐедопуÑтимый номер порта" #~ msgid "Wrong number of connections" #~ msgstr "ÐедопуÑтимое количеÑтво подключений" #~ msgid "Wrong buffer size" #~ msgstr "ÐедопуÑтимый размер буфера" #~ msgid "Wrong maximal buffer size" #~ msgstr "ÐедопуÑтимый макÑимальный размер буфера" #, c-format #~ msgid "Failed to obtain local address: %s" #~ msgstr "Ðе удалоÑÑŒ получить локальный адреÑ: %s" #, c-format #~ msgid "Failed to create socket(%s): %s" #~ msgstr "Ðе удалоÑÑŒ Ñоздать Ñокет (%s): %s" #, c-format #~ msgid "Failed to limit socket to IPv6: %s" #~ msgstr "Ðе удалоÑÑŒ ограничить Ñокет до IPv6: %s" #, c-format #~ msgid "Failed to bind socket(%s): %s" #~ msgstr "Ðе удалоÑÑŒ ÑвÑзать Ñокет (%s): %s" #, c-format #~ msgid "Failed to listen on socket(%s): %s" #~ msgstr "Ðе удалоÑÑŒ проÑлушать Ñокет (%s): %s" #~ msgid "Not listening to anything" #~ msgstr "Ðичего не проÑлушиваетÑÑ" #, c-format #~ msgid "Some addresses failed. Listening on %u of %u." #~ msgstr "Ðекоторые адреÑа недоÑтупны. ПроÑлушиваетÑÑ %u из %u." #~ msgid "Listen started" #~ msgstr "ПроÑлушивание началоÑÑŒ" #~ msgid "No valid handles left for listening" #~ msgstr "Ðе оÑталоÑÑŒ допуÑтимых деÑкрипторов Ð´Ð»Ñ Ð¿Ñ€Ð¾ÑлушиваниÑ" #, c-format #~ msgid "Select failed: %s" #~ msgstr "Выбор не удалÑÑ: %s" #, c-format #~ msgid "Have connections: %i, max: %i" #~ msgstr "СущеÑтвующих Ñоединений: %i, макÑимально: %i" #, c-format #~ msgid "Fork failed: %s" #~ msgstr "Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð´Ð¾Ñ‡ÐµÑ€Ð½ÐµÐ³Ð¾ процеÑа: %s" #~ msgid "Refusing connection: Connection limit exceeded" #~ msgstr "Отказано в Ñоединении: Превышен предел Ñоединений" #~ msgid "Init failed" #~ msgstr "Сбой инициализации" #~ msgid "Listen failed" #~ msgstr "Сбой проÑлушиваниÑ" #~ msgid "Listen finished" #~ msgstr "ПроÑлушивание завершено" #~ msgid "Stopping server" #~ msgstr "ОÑтанавливаетÑÑ Ñервер" #~ msgid "Destroying handle" #~ msgstr "ОпиÑатель уничтожаетÑÑ" #~ msgid "Deactivating modules" #~ msgstr "Выгрузка модулей" #~ msgid "Exiting" #~ msgstr "ЗавершаетÑÑ" #, c-format #~ msgid "%s: %s:%i" #~ msgstr "%s: %s:%i" #, c-format #~ msgid "%s %s" #~ msgstr "%s %s" #, c-format #~ msgid " %s: %s" #~ msgstr " %s: %s" #, c-format #~ msgid " %s:" #~ msgstr " %s:" #~ msgid "No proxy provided" #~ msgstr "ОтÑутÑтвует доверенноÑть" #, c-format #~ msgid "Proxy/credentials stored at %s" #~ msgstr "ДоверенноÑть/параметры доÑтупа Ñохранены в %s" #~ msgid "Running user has no name" #~ msgstr "Текущий пользователь не имеет имени" #, c-format #~ msgid "Mapped to running user: %s" #~ msgstr "ПривÑзка к текущему пользователю: %s" #, c-format #~ msgid "Mapped to local id: %i" #~ msgstr "ПривÑзка к локальному идентификатору: %i" #, c-format #~ msgid "No group %i for mapped user" #~ msgstr "Группа %i Ð´Ð»Ñ Ð¿Ñ€Ð¸Ð²Ñзанного Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð¾Ñ‚ÑутÑтвует" #, c-format #~ msgid "Mapped to local group id: %i" #~ msgstr "ПривÑзка к локальной группе Ñ Ð¸Ð´ÐµÐ½Ñ‚Ð¸Ñ„Ð¸ÐºÐ°Ñ‚Ð¾Ñ€Ð¾Ð¼: %i" #, c-format #~ msgid "Mapped to local group name: %s" #~ msgstr "ПривÑзка к локальной группе Ñ Ð¸Ð¼ÐµÐ½ÐµÐ¼: %s" #, c-format #~ msgid "Mapped user's home: %s" #~ msgstr "Домашний каталог привÑзанного пользователÑ: %s" #, c-format #~ msgid "Proxy stored at %s" #~ msgstr "ДоверенноÑть запиÑана в %s" #, c-format #~ msgid "Undefined control sequence: %%%s" #~ msgstr "ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ ÑƒÐ¿Ñ€Ð°Ð²Ð»ÑÑŽÑ‰Ð°Ñ Ð¿Ð¾ÑледовательноÑть: %%%s" #, c-format #~ msgid "Local user %s does not exist" #~ msgstr "Локальный пользователь %s не ÑущеÑтвует" #, c-format #~ msgid "Local group %s does not exist" #~ msgstr "Ð›Ð¾ÐºÐ°Ð»ÑŒÐ½Ð°Ñ Ð³Ñ€ÑƒÐ¿Ð¿Ð° %s не ÑущеÑтвует" #, c-format #~ msgid "Remapped to local user: %s" #~ msgstr "ПерепривÑзка к локальному пользователю: %s" #, c-format #~ msgid "Remapped to local id: %i" #~ msgstr "ПерепривÑзка к локальному идентификатору: %i" #, c-format #~ msgid "Remapped to local group id: %i" #~ msgstr "ПерепривÑзка к локальной группе Ñ Ð¸Ð´ÐµÐ½Ñ‚Ð¸Ñ„Ð¸ÐºÐ°Ñ‚Ð¾Ñ€Ð¾Ð¼: %i" #, c-format #~ msgid "Remapped to local group name: %s" #~ msgstr "ПерепривÑзка к локальной группе Ñ Ð¸Ð¼ÐµÐ½ÐµÐ¼: %s" #, c-format #~ msgid "Remapped user's home: %s" #~ msgstr "Домашний каталог перепривÑзанного пользователÑ: %s" #~ msgid "Multi-request job description not allowed in GRIDMANAGER dialect" #~ msgstr "" #~ "МножеÑтвенное опиÑание заданий не допуÑкаетÑÑ Ð² диалекте GRIDMANAGER" #~ msgid "%s: Failed to run plugin" #~ msgstr "%s: Сбой при запуÑке подключаемого модулÑ" #~ msgid "%s: Plugin failed" #~ msgstr "%s: Сбой подключаемого модулÑ" #~ msgid "Empty registration collector" #~ msgstr "ПуÑтой Ñборщик региÑтраций" #~ msgid "Passing service's information from collector to registrator" #~ msgstr "Идёт передача информации о Ñлужбе от Ñборщика к региÑтратору" #~ msgid "" #~ "Registered static information: \n" #~ " doc: %s" #~ msgstr "" #~ "ЗарегиÑтрирована ÑтатичеÑÐºÐ°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ: \n" #~ " документ: %s" #~ msgid "" #~ "Information registered without static attributes: \n" #~ " doc: %s" #~ msgstr "" #~ "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð·Ð°Ñ€ÐµÐ³Ð¸Ñтрирована без ÑтатичеÑких аттрибутов: \n" #~ " документ: %s" #~ msgid "Failed to create XMLNode container" #~ msgstr "Ðе удалоÑÑŒ Ñоздать контейнер XMLNode" #~ msgid "Cannot find ARC XMLNode class" #~ msgstr "КлаÑÑ ARC XMLNode не найден" #~ msgid "Cannot create doc argument" #~ msgstr "Ðе удалоÑÑŒ Ñоздать аргумент документации" #~ msgid "Cannot convert doc to Python object" #~ msgstr "Ðе удалоÑÑŒ преобразовать doc в объект Python" #~ msgid "Can't obtain configuration" #~ msgstr "Ðе удалоÑÑŒ получить наÑтройки" #~ msgid "require the specified endpoint type for job submission" #~ msgstr "потребовать указанный тип точки входа Ð´Ð»Ñ Ð·Ð°Ñылки задачи" #~ msgid "Failed to cancel job: %s" #~ msgstr "Ошибка отмены задачи: %s" #~ msgid "Failed retrieving job IDs: Unsupported url (%s) given" #~ msgstr "Сбой Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ Ñрлыков задач: задан неподдерживаемый URL (%s)" #~ msgid "Failed retrieving job IDs" #~ msgstr "Сбой Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ Ñрлыков задач" #~ msgid "" #~ "Error encoutered during job ID retrieval. All job IDs might not have been " #~ "retrieved" #~ msgstr "" #~ "Сбой в процеÑÑе Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ Ñрлыков задач: возможно, не вÑе Ñрлыки " #~ "извлечены" #~ msgid "Service access is not allowed for this user" #~ msgstr "ДоÑтуп к Ñлужбе Ð´Ð»Ñ Ñтого Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð·Ð°ÐºÑ€Ñ‹Ñ‚" #~ msgid "ServiceURL missing" #~ msgstr "ОтÑутÑтвует ServiceURL" #~ msgid "" #~ "Protocol is %s. It is recommended to use secure connection with https." #~ msgstr "Протокол %s. РекомендуетÑÑ Ð±ÐµÐ·Ð¾Ð¿Ð°Ñное Ñоединение по https." #~ msgid "Ignoring incomplete log file \"%s\"" #~ msgstr "ИгнорируетÑÑ Ð½ÐµÐ¿Ð¾Ð»Ð½Ñ‹Ð¹ журнальный файл \"%s\"" #~ msgid "Logging UR set of %d URs." #~ msgstr "ЗапиÑываетÑÑ Ð½Ð°Ð±Ð¾Ñ€ UR из %d запиÑей UR." #~ msgid "UR set dump: %s" #~ msgstr "Выведен набор запиÑей UR: %s" #~ msgid "Backup file (%s) created." #~ msgstr "Создан резервный файл (%s)." #~ msgid "APEL message file (%s) created." #~ msgstr "Создан файл Ñообщений APEL (%s)." #~ msgid "Running SSM client using: %s" #~ msgstr "Запущен клиент SSM иÑпользующий: %s" #~ msgid "SSM client exit code: %d" #~ msgstr "Код выхода клиента SSM: %d" #~ msgid "Aggregation record (%s) not exist, initialize it..." #~ msgstr "" #~ "ÐÐ³Ñ€ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð½Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ (%s) не ÑущеÑтвует, производитÑÑ Ð¸Ð½Ð¸Ñ†Ð¸Ð°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ..." #~ msgid "Aggregation record (%s) initialization successful." #~ msgstr "ÐÐ³Ñ€ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð½Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ (%s) уÑпешно инициализирована." #~ msgid "" #~ "Some error happens during the aggregation record (%s) initialization." #~ msgstr "ÐеизвеÑÑ‚Ð½Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° при инициализации агрегированной запиÑи (%s)." #~ msgid "Aggregation record (%s) read from file successful." #~ msgstr "ÐÐ³Ñ€ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð½Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ (%s) уÑпешно прочитана из файла." #~ msgid "Aggregation record (%s) stored successful." #~ msgstr "ÐÐ³Ñ€ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð½Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ (%s) уÑпешно Ñохранена." #~ msgid "Some error happens during the aggregation record (%s) storing." #~ msgstr "ÐеизвеÑÑ‚Ð½Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° при Ñохранении агрегированной запиÑи (%s)." #~ msgid "APEL aggregation message file (%s) created." #~ msgstr "Создан файл агрегированного ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ APEL (%s)." #~ msgid "SSM client return value: %d" #~ msgstr "Код возврата клиента SSM: %d" #~ msgid "year: %s" #~ msgstr "год: %s" #~ msgid "month: %s" #~ msgstr "меÑÑц: %s" #~ msgid "queue: %s" #~ msgstr "очередь: %s" #~ msgid "query: %s" #~ msgstr "запроÑ: %s" #~ msgid "list size: %d" #~ msgstr "длина ÑпиÑка: %d" #~ msgid "XML: %s" #~ msgstr "XML: %s" #~ msgid "UPDATE Aggregation Record called." #~ msgstr "Вызов метода UPDATE агрегированной запиÑи." #~ msgid "Does not sending empty aggregation/synch message." #~ msgstr "" #~ "ОтÑылка пуÑтого агрегированного/Ñинхронизационного ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð½Ðµ " #~ "производитÑÑ." #~ msgid "synch message: %s" #~ msgstr "Ñинхронизационное Ñообщение: %s" #~ msgid "Protocol is %s, should be https" #~ msgstr "Указан протокол %s, а должен быть https" #~ msgid "Wrong loglevel (%s) config value given!" #~ msgstr "Задано недопуÑтимое значение наÑтройки loglevel (%s)!" #~ msgid "Wrong urdelivery_keepfailed (%s) config value given!" #~ msgstr "Задано недопуÑтимое значение наÑтройки urdelivery_keepfailed (%s)!" #~ msgid "Wrong urdelivery_frequency (%s) config value given!" #~ msgstr "" #~ "Задано недопуÑтимое значение urdelivery_frequency (%s) в наÑтройках!" #~ msgid "Name part is missing by SGAS section!" #~ msgstr "ОтÑутÑтвует подраздел Ñ Ð½Ð°Ð·Ð²Ð°Ð½Ð¸ÐµÐ¼ в разделе SGAS!" #~ msgid "Targeturl config value is missing by SGAS!" #~ msgstr "Значение наÑтройки Targeturl отÑутÑтвует Ð´Ð»Ñ SGAS!" #~ msgid "Block %s is not marked for legacy fallback processing. Skipping." #~ msgstr "" #~ "Блок %s не помечен Ð´Ð»Ñ Ð°Ð»ÑŒÑ‚ÐµÑ€Ð½Ð°Ñ‚Ð¸Ð²Ð½Ð¾Ð¹ обработки уÑтаревших запиÑей. " #~ "ПропуÑкаетÑÑ." #~ msgid "Wrong urbatchsize (%s) config value given by SGAS!" #~ msgstr "Задано недопуÑтимое значение наÑтройки urbatchsize (%s) в SGAS!" #~ msgid "Name part is missing by APEL section!" #~ msgstr "ОтÑутÑтвует подраздел Ñ Ð½Ð°Ð·Ð²Ð°Ð½Ð¸ÐµÐ¼ в разделе APEL!" #~ msgid "Targeturl config value is missing by APEL!" #~ msgstr "Значение наÑтройки Targeturl отÑутÑтвует Ð´Ð»Ñ APEL!" #~ msgid "Wrong benchmark_value (%s) config value given by APEL!" #~ msgstr "Задано недопуÑтимое значение наÑтройки benchmark_value (%s) в APEL!" #~ msgid "Wrong urbatchsize (%s) config value given by APEL!" #~ msgstr "Задано недопуÑтимое значение наÑтройки urbatchsize (%s) в APEL!" #~ msgid "Sent jobIDs: (nr. of job(s) %d)" #~ msgstr "Отправленные jobID: (вÑего %d задач(и))" #~ msgid "Unable to create adapter for the specific reporting destination type" #~ msgstr "Ðевозможно Ñоздать адаптер Ð´Ð»Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ð¾Ð³Ð¾ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð¾Ñ‚Ñ‡Ñ‘Ñ‚Ð½Ð¾Ñти" #~ msgid "Insert filter element: <%s,%s>" #~ msgstr "Ð’Ñтавка Ñлемента фильтра: <%s,%s>" #~ msgid "Not set filter for this URL (%s)." #~ msgstr "Ð”Ð»Ñ Ñтого URL (%s) фильтр не назначен." #~ msgid "Current job's VO name: %s" #~ msgstr "Ð˜Ð¼Ñ Ð’Ðž текущей задачи: %s" #~ msgid "VO filter for host: %s" #~ msgstr "Фильтр ВО Ð´Ð»Ñ ÑƒÐ·Ð»Ð°: %s" #~ msgid "Read archive file %s" #~ msgstr "ЧитаетÑÑ Ð°Ñ€Ñ…Ð¸Ð²Ð½Ñ‹Ð¹ файл %s" #~ msgid "" #~ "Could not read archive file %s for job log file %s (%s), generating new " #~ "Usage Record" #~ msgstr "" #~ "Ðевозможно прочеÑть архивный файл %s Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° журнала задач %s (%s), " #~ "ÑоздаётÑÑ Ð½Ð¾Ð²Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ Usage Record" #~ msgid "" #~ "Missing required Usage Record element \"RecordIdentity\", in job log file " #~ "%s" #~ msgstr "" #~ "ОтÑутÑтвует обÑзательный Ñлемент Usage Record \"RecordIdentity\", в файле " #~ "журнала задач %s" #~ msgid "VO (%s) not set for this (%s) SGAS server by VO filter." #~ msgstr "Фильтр ВО (%s) не наÑтроен Ð´Ð»Ñ Ñтого Ñервера SGAS (%s)." #~ msgid "[VO filter] Job log will be not send. %s." #~ msgstr "[VO filter] запиÑÑŒ о задаче не будет отправлена. %s." #~ msgid "Missing required element \"Status\" in job log file %s" #~ msgstr "" #~ "ОтÑутÑтвует обÑзательный Ñлемент \"Status\" в файле журнала задач %s" #~ msgid "Failed to create archive directory %s: %s" #~ msgstr "Ðе удалоÑÑŒ Ñоздать архивный каталог %s: %s" #~ msgid "Archiving Usage Record to file %s" #~ msgstr "Ðрхивирование запиÑи Usage Record в файл %s" #~ msgid "Failed to write file %s: %s" #~ msgstr "Сбой при запиÑи файла %s: %s" #~ msgid "Missing required element \"CpuDuration\" in job log file %s" #~ msgstr "" #~ "ОтÑутÑтвует обÑзательный Ñлемент \"CpuDuration\" в файле журнала задач %s" #~ msgid "Set non standard benchmark type: %s" #~ msgstr "Задан неÑтандартный тип Ñталонного теÑта: %s" #~ msgid "Ignored incoming benchmark value: %s, Use float value!" #~ msgstr "" #~ "ИгнорируетÑÑ Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð½Ð¾Ðµ значение Ñталонного теÑта: %s, иÑпользуйте " #~ "значение Ñ Ð¿Ð»Ð°Ð²Ð°ÑŽÑ‰ÐµÐ¹ запÑтой!" #~ msgid "Failed to delete file %s:%s" #~ msgstr "Ðе удалоÑÑŒ удалить файл %s: %s" #~ msgid "UsageRecords registration response: %s" #~ msgstr "Отклик региÑтрации запиÑи UsageRecords: %s" #~ msgid "Initialised, archived job log dir: %s" #~ msgstr "" #~ "Инициализирован каталог архивного Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ ÑƒÑ‡Ñ‘Ñ‚Ð½Ñ‹Ñ… запиÑей о задачах: %s" #~ msgid "Incoming time range: %s" #~ msgstr "Промежуток времени заÑылки: %s" #~ msgid "Requested time range: %d.%d.%d. 0:00 - %d.%d.%d. %d:%d " #~ msgstr "Запрошенный промежуток времени: %d.%d.%d. 0:00 - %d.%d.%d. %d:%d " #~ msgid "Interactive mode." #~ msgstr "Интерактивный режим." #~ msgid "Could not open log directory \"%s\": %s" #~ msgstr "Ðевозможно открыть каталог Ñ Ð¶ÑƒÑ€Ð½Ð°Ð»Ð°Ð¼Ð¸ \"%s\": %s" #~ msgid "Error reading log directory \"%s\": %s" #~ msgstr "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð° журналов \"%s\": %s" #~ msgid "Finished, job log dir: %s" #~ msgstr "Завершено, каталог журнала задач: %s" #~ msgid "Initialised, job log dir: %s" #~ msgstr "Запущено, каталог журнала задач: %s" #~ msgid "Expiration time: %d seconds" #~ msgstr "Ð’Ñ€ÐµÐ¼Ñ Ð¸ÑÑ‚ÐµÑ‡ÐµÐ½Ð¸Ñ Ð´ÐµÐ¹ÑтвительноÑти: %d Ñекунд" #~ msgid "Could not open output directory \"%s\": %s" #~ msgstr "Ðевозможно открыть выходной каталог \"%s\": %s" #~ msgid "Creating the output directory \"%s\"" #~ msgstr "СоздаетÑÑ Ð²Ñ‹ÑŒÐ¾Ð´Ð½Ð¾Ð¹ каталог %s" #~ msgid "Failed to create output directory \"%s\": %s" #~ msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð²Ñ‹Ñ…Ð¾Ð´Ð½Ð¾Ð³Ð¾ каталога %s: %s" #~ msgid "Removing outdated job log file %s" #~ msgstr "УдалÑетÑÑ ÑƒÑтаревший файл журнала задач %s" #~ msgid "Missing option argument" #~ msgstr "ОтÑутÑтвует аргумент опции" #~ msgid "Unrecognized option" #~ msgstr "ÐÐµÐ¾Ð¿Ð¾Ð·Ð½Ð°Ð½Ð½Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ" #~ msgid "Add URL value before a topic. (for example: -u [...] -t [...])" #~ msgstr "Добавьте значение URL перед темой. (например: -u [...] -t [...])" #~ msgid "Force resend all aggregation records." #~ msgstr "Принудительно отправить заново вÑе агрегированные запиÑи." #~ msgid "Sync message(s) will be send..." #~ msgstr "Ð¡Ð¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ñинхронизации будут отправлены..." #~ msgid "Options processing error" #~ msgstr "Ошибка при обработке опций" #~ msgid "Failed processing configuration file %s" #~ msgstr "Ðе удалоÑÑŒ обработать файл наÑтроек %s" #~ msgid "Topic missing for a (%s) host." #~ msgstr "ОтÑутÑтвует тема Ð´Ð»Ñ Ñервера (%s)." #~ msgid "Aggregation record(s) sending to %s" #~ msgstr "ÐÐ³Ñ€ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð½Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ отправлÑетÑÑ Ð½Ð° %s" #~ msgid "resend opt: %s" #~ msgstr "Ð¾Ð¿Ñ†Ð¸Ñ Ð¿Ð¾Ð²Ñ‚Ð¾Ñ€Ð½Ð¾Ð¹ отправки: %s" #~ msgid " Use arclean to remove retrieved jobs from job list" #~ msgstr "" #~ " ИÑпользуйте arclean Ð´Ð»Ñ ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð½Ñ‹Ñ… задач из ÑпиÑка" #~ msgid "No execuable path specified in GRIDMANAGER dialect" #~ msgstr "Ðе задан путь к иÑполнÑемому файлу в диалекте GRIDMANAGER" #~ msgid "Executable path not specified ('executable' attribute)" #~ msgstr "Ðе задан путь к иÑполнÑемому файлу (атрибут 'executable')" #~ msgid "Missing executable" #~ msgstr "ОтÑутÑтвует Ñлемент Executable" #~ msgid "Error evaulating profile" #~ msgstr "Ошибка проверки профилÑ" #~ msgid "Adding resoure-id value: %s" #~ msgstr "ДобавлÑетÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ resoure-id: %s" #~ msgid "" #~ "Can not access CA certificates directory: %s. The certificates will not " #~ "be verified." #~ msgstr "" #~ "Ðе удалоÑÑŒ открыть каталог Ñертификатов CA: %s. Сертификаты не будут " #~ "проверены." #~ msgid "" #~ "Unable to locate the \"%s\" plugin. Please refer to installation " #~ "instructions and check if package providing support for %s plugin is " #~ "installed" #~ msgstr "" #~ "Ðе удалоÑÑŒ обнаружить подключаемый модуль \"%s\". ПожалуйÑта, " #~ "проконÑультируйтеÑÑŒ Ñ Ð¸Ð½Ñтрукцией по уÑтановке и проверьте, уÑтановлен ли " #~ "пакет, Ñодержащий модуль \"%s\"." #~ msgid "" #~ "The VOMS server with the information:\n" #~ "\t%s\"\n" #~ "can not be reached, please make sure it is available" #~ msgstr "" #~ "Ðевозможно ÑвÑзатьÑÑ Ñ Ñервером VOMS Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸ÐµÐ¹:\n" #~ "\t%s\"\n" #~ "пожалуйÑта, проверьте, доÑтупен ли Ñтот Ñервер" #~ msgid "Please choose the NSS database you would use (1-%d): " #~ msgstr "ПожалуйÑта, выберите базу данных NSS Ð´Ð»Ñ Ð¸ÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ (1-%d): " #~ msgid "Failed to authenticate to token %s." #~ msgstr "Ðе удалоÑÑŒ аутентифицироватьÑÑ Ðº маркёру %s." #~ msgid "Starting hepler process: %s" #~ msgstr "ЗапуÑкаетÑÑ Ð¿Ñ€Ð¾Ñ†ÐµÑÑ Ð°ÑÑиÑтента: %s" #~ msgid "Running command %s" #~ msgstr "ВыполнÑетÑÑ ÐºÐ¾Ð¼Ð°Ð½Ð´Ð° %s" #~ msgid "Bad name for executable: " #~ msgstr "ÐедопуÑтимое Ð¸Ð¼Ñ Ð´Ð»Ñ Ð¸ÑполнÑемого файла: " #~ msgid "Error getting info from statvfs for the path %s:" #~ msgstr "Ошибка Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸ от statvfs Ð´Ð»Ñ Ð¿ÑƒÑ‚Ð¸ %s:" #~ msgid "Closing channel (retrieve) due to local read error :%s" #~ msgstr "" #~ "Прерывание канала (получение) в ÑвÑзи Ñ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð¾Ð¹ ошибкой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ :%s" #~ msgid "SOAP invokation failed" #~ msgstr "Ðе удалоÑÑŒ инициализировать SOAP" #~ msgid "Can not get the delegation credential: %s from delegation service:%s" #~ msgstr "" #~ "Ðе удалоÑÑŒ получить делегированные параметры доÑтупа %s от Ñлужбы " #~ "делегированиÑ:%s" #~ msgid "Application Options:" #~ msgstr "Параметры приложениÑ:" #~ msgid "Failed to delete private key that attaches to certificate: %s" #~ msgstr "Сбой ÑƒÐ½Ð¸Ñ‡Ñ‚Ð¾Ð¶ÐµÐ½Ð¸Ñ Ð·Ð°ÐºÑ€Ñ‹Ñ‚Ð¾Ð³Ð¾ ключа, прикреплÑемого к Ñертификату: %s" #~ msgid "Missing file name in [arex/jura/archiving] logfile" #~ msgstr "ОтÑутÑтвует Ð¸Ð¼Ñ Ñ„Ð°Ð¹Ð»Ð° в журнальном файле [arex/jura/archiving] " #~ msgid "Wrong number in manage_frequency: %s" #~ msgstr "ÐедопуÑтимое значение в smanage_frequency: %s" #~ msgid ": Accounting archive management tool is not specified" #~ msgstr ": Ðе указано ÑредÑтво ÑƒÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð°Ñ€Ñ…Ð¸Ð²Ð¾Ð¼ учётных запиÑей" #~ msgid ": Failure creating slot for accounting archive manager child process" #~ msgstr "" #~ ": Сбой подготовки дочернего процеÑÑа ÑредÑтва ÑƒÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð°Ñ€Ñ…Ð¸Ð²Ð¾Ð¼ учётных " #~ "запиÑей" #~ msgid ": Failure starting accounting archive manager child process" #~ msgstr "" #~ ": Сбой запуÑка дочернего процеÑÑа ÑредÑтва ÑƒÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð°Ñ€Ñ…Ð¸Ð²Ð¾Ð¼ учётных " #~ "запиÑей" #~ msgid " [ egee:jdl ] " #~ msgstr " [ egee:jdl ] " #~ msgid " [ nordugrid:jsdl ] " #~ msgstr " [ nordugrid:jsdl ] " #~ msgid "" #~ "Cannot use multiple session directories and remotegmdirs at the same time" #~ msgstr "" #~ "ÐедопуÑтимо одновременное иÑпользование неÑкольких каталогов ÑеанÑов и " #~ "remotegmdirs" #~ msgid "No non-draining control or session directories available" #~ msgstr "" #~ "Ðет контрольных каталогов или каталогов ÑеÑÑий не в ÑоÑтоÑнии разгрузки" #~ msgid "" #~ "Supported constraints are:\n" #~ " validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start " #~ "from now)\n" #~ " validityEnd=time\n" #~ " validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod " #~ "and validityEnd\n" #~ " not specified, the default is 12 hours for local proxy, and 168 hours " #~ "for delegated\n" #~ " proxy on myproxy server)\n" #~ " vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, " #~ "the default\n" #~ " is the minimum value of 12 hours and validityPeriod)\n" #~ " myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy " #~ "server,\n" #~ " e.g. 43200 or 12h or 12H; if not specified, the default is the minimum " #~ "value of\n" #~ " 12 hours and validityPeriod (which is lifetime of the delegated proxy " #~ "on myproxy server))\n" #~ " proxyPolicy=policy content\n" #~ " proxyPolicyFile=policy file" #~ msgstr "" #~ "Поддерживаемые ограничениÑ:\n" #~ " validityStart=Ð²Ñ€ÐµÐ¼Ñ (например, 2008-05-29T10:20:30Z; еÑли не указано, " #~ "то начинаетÑÑ Ð½ÐµÐ¼ÐµÐ´Ð»ÐµÐ½Ð½Ð¾)\n" #~ " validityEnd=времÑ\n" #~ " validityPeriod=Ð²Ñ€ÐµÐ¼Ñ (например, 43200, или 12h, или 12H; еÑли не " #~ "указаны ни validityPeriod,\n" #~ " ни validityEnd, то Ñрок дейÑÑ‚Ð²Ð¸Ñ Ð¿Ð¾ умолчанию ÑоÑтавлÑет 12 чаÑов Ð´Ð»Ñ " #~ "локальной доверенноÑти,\n" #~ " и 168 чаÑов Ð´Ð»Ñ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð½Ð¾Ð¹ доверенноÑти на Ñервере MyProxy)\n" #~ " vomsACvalidityPeriod=Ð²Ñ€ÐµÐ¼Ñ (например, 43200, или 12h, или 12H; еÑли не " #~ "указано, то иÑпользуетÑÑ\n" #~ " наименьшее между 12 чаÑами и значением validityPeriod)\n" #~ " myproxyvalidityPeriod=Ð²Ñ€ÐµÐ¼Ñ (Ñрок годноÑти доверенноÑти, делегированной " #~ "через Ñервер MyProxy\n" #~ " например, 43200, или 12h, или 12H; еÑли не указано, то иÑпользуетÑÑ " #~ "наименьшее между 12 чаÑами\n" #~ " и значением validityPeriod - Ñроком годноÑти доверенноÑти, " #~ "делегированной через Ñервер MyProxy)\n" #~ " proxyPolicy=Ñодержимое политики\n" #~ " proxyPolicyFile=файл политики" #~ msgid "" #~ "print all information about this proxy. \n" #~ " In order to show the Identity (DN without CN as suffix for " #~ "proxy) \n" #~ " of the certificate, the 'trusted certdir' is needed." #~ msgstr "" #~ "вывеÑти вÑÑŽ информацию о данной доверенноÑти. \n" #~ " Ð”Ð»Ñ Ð²Ñ‹Ð²Ð¾Ð´Ð° перÑональной информации (DN без CN как ÑÑƒÑ„Ñ„Ð¸ÐºÑ " #~ "доверенноÑти) \n" #~ " из Ñертификата, необходим 'trusted certdir'." #~ msgid "username to MyProxy server" #~ msgstr "Ð˜Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ñервера MyProxy" #~ msgid "" #~ "command to MyProxy server. The command can be PUT or GET.\n" #~ " PUT/put/Put -- put a delegated credential to the MyProxy " #~ "server; \n" #~ " GET/get/Get -- get a delegated credential from the MyProxy " #~ "server, \n" #~ " credential (certificate and key) is not needed in this " #~ "case. \n" #~ " MyProxy functionality can be used together with VOMS\n" #~ " functionality.\n" #~ msgstr "" #~ "инÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ Ñерверу MyProxy. Возможны две инÑтрукции: PUT и GET:\n" #~ " PUT/put/Put -- Ñохранить делегированный Ñертификат на " #~ "Ñервере MyProxy;\n" #~ " GET/get/Get -- получить делегированный Ñертификат Ñ Ñервера " #~ "MyProxy,\n" #~ " в Ñтом Ñлучае не требуютÑÑ Ð»Ð¸Ñ‡Ð½Ñ‹Ðµ " #~ "Ñертификаты и ключи.\n" #~ " ИнÑтрукции MyProxy и VOMS могут иÑпользоватьÑÑ " #~ "одновременно.\n" #~ msgid "use NSS credential database in the Firefox profile" #~ msgstr "иÑпользовать базу данных параметров доÑтупа NSS из Ð¿Ñ€Ð¾Ñ„Ð¸Ð»Ñ Firefox" #~ msgid "" #~ "$X509_VOMS_FILE, and $X509_VOMSES are not set;\n" #~ "User has not specify the location for vomses information;\n" #~ "There is also not vomses location information in user's configuration " #~ "file;\n" #~ "Cannot find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses, " #~ "$ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/" #~ "vomses, /etc/vomses, /etc/grid-security/vomses, and the location at the " #~ "corresponding sub-directory" #~ msgstr "" #~ "$X509_VOMS_FILE и $X509_VOMSES не наÑтроены;\n" #~ "Пользователь не указал раÑположение файла vomses;\n" #~ "РаÑположение файла vomses не найдено в файле наÑтроек пользователÑ;\n" #~ "Файл vomses не обнаружен в ~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/" #~ "etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/vomses, /etc/" #~ "vomses, /etc/grid-security/vomses, а также в ÑоответÑтвующих подкаталогах" #~ msgid "No stream response" #~ msgstr "Ðе получен ответ Ñ Ñервера" #~ msgid "Returned msg from myproxy server: %s %d" #~ msgstr "Сервер myproxy возвратил Ñледующее Ñообщение: %s %d" #~ msgid "There are %d certificates in the returned msg" #~ msgstr "Ответное Ñообщение Ñодержит %d Ñертификатов" #~ msgid "Delegate proxy failed" #~ msgstr "Ðе удалоÑÑŒ делегирование доверенноÑти" #~ msgid "Returned msg from voms server: %s " #~ msgstr "Сообщение Ñ Ñервера VOMS: %s " #~ msgid "service message" #~ msgstr "Ñообщение Ñлужбы" #~ msgid "The arcecho command is a client for the ARC echo service." #~ msgstr "Команда arcecho ÑвлÑетÑÑ ÐºÐ»Ð¸ÐµÐ½Ñ‚Ñким приложением Ñлужбы ARC echo." #~ msgid "" #~ "The service argument is a URL to an ARC echo service.\n" #~ "The message argument is the message the service should return." #~ msgstr "" #~ "Ðргументом Ñлужбы должен быть URL Ñхо-Ñервера ARC.\n" #~ "Ðргументом ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð´Ð¾Ð»Ð¶Ð½Ð¾ быть Ñообщение, которое Ñтот Ñервер должен " #~ "возвратить." #~ msgid "service_url" #~ msgstr "service_url" #~ msgid "path to config file" #~ msgstr "путь к файлу наÑтроек" #~ msgid "SOAP Request failed: No response" #~ msgstr "Сбой запроÑа SOAP: Ðет ответа" #~ msgid "SOAP Request failed: Error" #~ msgstr "Сбой запроÑа SOAP: Ошибка" #~ msgid "No in SOAP response" #~ msgstr "Отзыв SOAP не Ñодержит " #~ msgid "No in SAML response" #~ msgstr "Ð’ отклике SAML отÑутÑтвует " #~ msgid "URL [query]" #~ msgstr "URL [запроÑ]" #~ msgid "" #~ "The arcwsrf command is used for obtaining the WS-ResourceProperties of\n" #~ "services." #~ msgstr "" #~ "Команда arcwsrf иÑпользуетÑÑ Ð´Ð»Ñ Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ð¹ WS-" #~ "ResourceProperties\n" #~ "различных Ñлужб." #~ msgid "Request for specific Resource Property" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð·Ð°Ð´Ð°Ð½Ð½Ð¾Ð³Ð¾ ÑвойÑтва реÑурÑа" #~ msgid "[-]name" #~ msgstr "[-]адреÑ" #~ msgid "Missing URL" #~ msgstr "ОтÑутÑтвует URL" #~ msgid "Too many parameters" #~ msgstr "Слишком много параметров" #~ msgid "Query is not a valid XML" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð½Ðµ ÑвлÑетÑÑ ÐºÐ¾Ñ€Ñ€ÐµÐºÑ‚Ð½Ñ‹Ð¼ XML" #~ msgid "Failed to create WSRP request" #~ msgstr "Ðе удалоÑÑŒ Ñоздать корректный Ð·Ð°Ð¿Ñ€Ð¾Ñ WSRP" #~ msgid "Specified URL is not valid" #~ msgstr "Указанный Ð°Ð´Ñ€ÐµÑ Ð½ÐµÐ´ÐµÐ¹Ñтвителен" #~ msgid "Failed to send request" #~ msgstr "Ðе удалоÑÑŒ отправить запроÑ" #~ msgid "Failed to obtain SOAP response" #~ msgstr "Ðе удалоÑÑŒ получить отзыв SOAP" #~ msgid "SOAP fault received" #~ msgstr "Получена ошибка SOAP" #~ msgid "Creating an A-REX client" #~ msgstr "СоздаётÑÑ ÐºÐ»Ð¸ÐµÐ½Ñ‚ A-REX" #~ msgid "Unable to create SOAP client used by AREXClient." #~ msgstr "Ðе удалоÑÑŒ Ñоздать клиент SOAP иÑпользующийÑÑ AREXClient." #~ msgid "Failed locating credentials." #~ msgstr "Сбой Ð¾Ð±Ð½Ð°Ñ€ÑƒÐ¶ÐµÐ½Ð¸Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð¾Ð² доÑтупа." #~ msgid "Failed initiate client connection." #~ msgstr "Сбой инициализации ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ ÐºÐ»Ð¸ÐµÐ½Ñ‚Ð¾Ð¼." #~ msgid "Client connection has no entry point." #~ msgstr "ОтÑутÑтвует точка входа в клиентÑкую цепь." #~ msgid "Re-creating an A-REX client" #~ msgstr "ВоÑÑоздаётÑÑ ÐºÐ»Ð¸ÐµÐ½Ñ‚ A-REX" #~ msgid "AREXClient was not created properly." #~ msgstr "AREXClient не был Ñоздан надлежащим образом." #~ msgid "%s request to %s failed. No expected response." #~ msgstr "Сбой запроÑа %s к %s. ОтÑутÑтвует ожидаемый отклик." #~ msgid "Creating and sending submit request to %s" #~ msgstr "СоздаётÑÑ Ð¸ отправлÑетÑÑ Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° заÑылку к %s" #~ msgid "Unable to retrieve status of job (%s)" #~ msgstr "Ошибка при получении информации о ÑоÑтоÑнии задачи (%s)" #~ msgid "Creating and sending ISIS information query request to %s" #~ msgstr "Создание и отправка запроÑа об информации ISIS на %s" #~ msgid "Service %s of type %s ignored" #~ msgstr "ИгнорируетÑÑ ÑÐµÑ€Ð²Ð¸Ñ %s типа %s" #~ msgid "No execution services registered in the index service" #~ msgstr "Ðи одна Ñлужба иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð½Ðµ зарегиÑтрирована в Ñлужбе региÑтрации" #~ msgid "Creating and sending terminate request to %s" #~ msgstr "Создание и отправка запроÑа о прерывании задачи на %s" #~ msgid "Job termination failed" #~ msgstr "Ошибка Ð¿Ñ€ÐµÑ€Ñ‹Ð²Ð°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" #~ msgid "Creating and sending clean request to %s" #~ msgstr "" #~ "Создание и отправка запроÑа об удалении результатов работы задачи на %s" #~ msgid "Creating and sending job description retrieval request to %s" #~ msgstr "Создание и отправка запроÑа на получение опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ на %s" #~ msgid "Creating and sending job migrate request to %s" #~ msgstr "Создание и отправка запроÑа о миграции задачи на %s" #~ msgid "Renewal of ARC1 jobs is not supported" #~ msgstr "Возобновление задач ARC1 не поддерживаетÑÑ" #~ msgid "Failed retrieving job status information" #~ msgstr "Ðе удалоÑÑŒ извлечь информацию о ÑоÑтоÑнии задачи" #~ msgid "Cleaning of BES jobs is not supported" #~ msgstr "ОчиÑтка результатов задач BES не поддерживаетÑÑ" #~ msgid "Renewal of BES jobs is not supported" #~ msgstr "Возобновление задач BES не поддерживаетÑÑ" #~ msgid "Resuming BES jobs is not supported" #~ msgstr "ПерезапуÑк задач BES не поддерживаетÑÑ" #~ msgid "Collecting Job (A-REX jobs) information." #~ msgstr "СобираетÑÑ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ задачах (задачи на A-REX)" #~ msgid "No job identifier returned by BES service" #~ msgstr "Служба BES не возвратила ни одного Ñрлыка задачи" #~ msgid "Failed adapting job description to target resources" #~ msgstr "Сбой Ð°Ð´Ð°Ð¿Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¾Ð¿Ð¸ÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ Ð´Ð»Ñ Ð·Ð°Ñылки по назначению" #~ msgid "" #~ "Unable to migrate job. Job description is not valid in the %s format: %s" #~ msgstr "" #~ "Ðевозможно мигрировать задачу. ОпиÑание задачи в формате %s " #~ "недейÑтвительно: %s" #~ msgid "The Service doesn't advertise its Quality Level." #~ msgstr "Служба не Ñообщает о Ñвоём уровне качеÑтва." #~ msgid "Generating A-REX target: %s" #~ msgstr "СоздаётÑÑ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ A-REX: %s" #~ msgid "The Service doesn't advertise its Interface." #~ msgstr "Служба не Ñообщает о Ñвоём интерфейÑе." #~ msgid "The Service doesn't advertise its Serving State." #~ msgstr "Служба не Ñообщает о Ñвоём ÑоÑтоÑнии обÑлуживаниÑ." #~ msgid "Creating a CREAM client" #~ msgstr "СоздаётÑÑ ÐºÐ»Ð¸ÐµÐ½Ñ‚ CREAM" #~ msgid "Unable to create SOAP client used by CREAMClient." #~ msgstr "Ðе удалоÑÑŒ Ñоздать клиент SOAP иÑпользующийÑÑ CREAMClient." #~ msgid "CREAMClient not created properly" #~ msgstr "CREAMClient не был Ñоздан надлежащим образом" #~ msgid "Empty response" #~ msgstr "ПуÑтой ответ" #~ msgid "Request failed: %s" #~ msgstr "Сбой запроÑа: %s" #~ msgid "Creating and sending a status request" #~ msgstr "Создание и отправка запроÑа о ÑоÑтоÑнии" #~ msgid "Unable to retrieve job status." #~ msgstr "Ðевозможно извлечь информацию о ÑоÑтоÑнии задачи." #~ msgid "Creating and sending request to terminate a job" #~ msgstr "Создание и отправка запроÑа о прерывании задачи" #~ msgid "Creating and sending request to clean a job" #~ msgstr "Создание и отправка запроÑа об удалении результатов работы задачи" #~ msgid "Creating and sending request to resume a job" #~ msgstr "Создание и отправка запроÑа о возобновлении задачи" #~ msgid "Creating and sending request to list jobs" #~ msgstr "Создание и отправка запроÑа о проÑмотре ÑпиÑка задач" #~ msgid "Creating and sending job register request" #~ msgstr "Создание и отправка запроÑа о региÑтрации задачи" #~ msgid "No job ID in response" #~ msgstr "Отзыв не Ñодержит Ñрлыка задачи" #~ msgid "Creating and sending job start request" #~ msgstr "Создание и отправка запроÑа о начале задачи" #~ msgid "Creating delegation" #~ msgstr "Создание делегированиÑ" #~ msgid "Malformed response: missing getProxyReqReturn" #~ msgstr "ИÑкажённый отзыв: отÑутÑтвует getProxyReqReturn" #~ msgid "Delegatable credentials expired: %s" #~ msgstr "Срок дейÑÑ‚Ð²Ð¸Ñ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€ÑƒÐµÐ¼Ñ‹Ñ… параметров доÑтупа иÑтек: %s" #~ msgid "Failed signing certificate request" #~ msgstr "Сбой подпиÑи запроÑа Ñертификата" #~ msgid "Failed putting signed delegation certificate to service" #~ msgstr "Сбой при передаче подпиÑанного Ñертификата Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð½Ð° ÑервиÑ" #~ msgid "Failed cleaning job: %s" #~ msgstr "Сбой очиÑтки задачи: %s" #~ msgid "Failed canceling job: %s" #~ msgstr "Сбой Ð¿Ñ€ÐµÑ€Ñ‹Ð²Ð°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸: %s" #~ msgid "Renewal of CREAM jobs is not supported" #~ msgstr "Возобновление задач CREAM не поддерживаетÑÑ" #~ msgid "Failed resuming job: %s" #~ msgstr "Сбой Ð²Ð¾Ð·Ð¾Ð±Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s" #~ msgid "Failed creating signed delegation certificate" #~ msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¿Ð¾Ð´Ð¿Ð¸Ñанного Ñертификата делегированиÑ" #~ msgid "Unable to submit job. Job description is not valid in the %s format" #~ msgstr "" #~ "Ðевозможно заÑлать задачу. ОпиÑание задачи в формате %s недейÑтвительно" #~ msgid "Failed registering job" #~ msgstr "Сбой региÑтрации задачи" #~ msgid "Failed starting job" #~ msgstr "Сбой запуÑка задачи" #~ msgid "Failed creating singed delegation certificate" #~ msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¿Ð¾Ð´Ð¿Ð¸Ñанного Ñертификата делегированиÑ" #~ msgid "Unknown operator '%s' in attribute require in Version element" #~ msgstr "ÐеизвеÑтный оператор '%s' в атрибуте require Ñлемента Version" #~ msgid "Multiple '%s' elements are not supported." #~ msgstr "МножеÑтвенные Ñлементы '%s' не поддерживаютÑÑ." #~ msgid "The 'exclusiveBound' attribute to the '%s' element is not supported." #~ msgstr "Ðтрибут 'exclusiveBound' Ñлемента '%s' не поддерживаетÑÑ." #~ msgid "The 'epsilon' attribute to the 'Exact' element is not supported." #~ msgstr "Ðтрибут 'epsilon' Ñлемента 'Exact' не поддерживаетÑÑ." #~ msgid "Parsing error: Value of %s element can't be parsed as number" #~ msgstr "" #~ "Ошибка разбора: Значение Ñлемента %s не может быть разобрано как чиÑло" #~ msgid "" #~ "Parsing error: Elements (%s) representing upper range have different " #~ "values" #~ msgstr "Ошибка разбора: Элементы (%s) задающие верхнюю границу различаютÑÑ" #~ msgid "" #~ "Parsing error: Elements (%s) representing lower range have different " #~ "values" #~ msgstr "Ошибка разбора: Элементы (%s) задающие нижнюю границу различаютÑÑ" #~ msgid "" #~ "Parsing error: Value of lower range (%s) is greater than value of upper " #~ "range (%s)" #~ msgstr "" #~ "Ошибка разбора: Значение нижней границы (%s) превышает значение верхней " #~ "(%s)" #~ msgid "[ARCJSDLParser] Not a JSDL - missing JobDescription element" #~ msgstr "[ARCJSDLParser] Это не JSDL - отÑутÑтвует Ñлемент JobDescription" #~ msgid "" #~ "[ARCJSDLParser] Error during the parsing: missed the name attributes of " #~ "the \"%s\" Environment" #~ msgstr "" #~ "[ARCJSDLParser] Ошибка при разборе: отÑутÑтвует атрибут name в Ñлементе " #~ "Environment \"%s\"" #~ msgid "[ARCJSDLParser] RemoteLogging URL is wrongly formatted." #~ msgstr "[ARCJSDLParser] Ðеверный формат RemoteLogging URL." #~ msgid "[ARCJSDLParser] priority is too large - using max value 100" #~ msgstr "" #~ "[ARCJSDLParser] Ñлишком выÑокий приоритет - иÑпользуетÑÑ Ð¼Ð°ÐºÑимальное " #~ "значение 100" #~ msgid "" #~ "Lower bounded range is not supported for the 'TotalCPUCount' element." #~ msgstr "" #~ "Интервал Ñ Ð½Ð¸Ð¶Ð½ÐµÐ¹ границей не поддерживаетÑÑ Ð´Ð»Ñ Ñлемента 'TotalCPUCount'." #~ msgid "" #~ "Parsing the \"require\" attribute of the \"QueueName\" nordugrid-JSDL " #~ "element failed. An invalid comparison operator was used, only \"ne\" or " #~ "\"eq\" are allowed." #~ msgstr "" #~ "Сбой разбора атрибута \"require\" Ñлемента \"QueueName\" из nordugrid-" #~ "JSDL. ИÑпользуетÑÑ Ð½ÐµÐ´Ð¾Ð¿ÑƒÑтимый оператор ÑравнениÑ, допуÑкаютÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ " #~ "\"ne\" или \"eq\"." #~ msgid "No URI element found in Location for file %s" #~ msgstr "Ðе обнаружено Ñлементов URI в Location Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° %s" #~ msgid "[JDLParser] Semicolon (;) is not allowed inside brackets, at '%s;'." #~ msgstr "" #~ "[JDLParser] Точка Ñ Ð·Ð°Ð¿Ñтой (;) не допуÑкаетÑÑ Ð²Ð½ÑƒÑ‚Ñ€Ð¸ Ñкобок, Ñтрока " #~ "'%s;'." #~ msgid "[JDLParser] This kind of JDL descriptor is not supported yet: %s" #~ msgstr "[JDLParser] Этот тип деÑкриптора JDL пока не поддерживаетÑÑ: %s" #~ msgid "[JDLParser] Attribute named %s has unknown value: %s" #~ msgstr "[JDLParser] У атрибута %s недейÑтвительное значение: %s" #~ msgid "Not enough outputsandboxdesturi elements!" #~ msgstr "ÐедоÑтаточно Ñлементов outputsandboxdesturi!" #~ msgid "" #~ "[JDLParser] Environment variable has been defined without any equals sign." #~ msgstr "" #~ "[JDLParser] ÐŸÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ Ñреды задана без иÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ Ð·Ð½Ð°ÐºÐ¾Ð² равенÑтва." #~ msgid "[JDLParser]: Unknown attribute name: '%s', with value: %s" #~ msgstr "[JDLParser]: ÐеизвеÑтное название атрибута: '%s', значение: %s" #~ msgid "The inputsandboxbaseuri JDL attribute specifies an invalid URL." #~ msgstr "Ðтрибут JDL inputsandboxbaseuri задаёт недопуÑтимый URL." #~ msgid "[JDLParser] Syntax error found during the split function." #~ msgstr "" #~ "[JDLParser] Обнаружена ÑинтакÑичеÑÐºÐ°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° при выполнении разбиениÑ." #~ msgid "[JDLParser] Lines count is zero or other funny error has occurred." #~ msgstr "[JDLParser] Ðулевое количеÑтво Ñтрок, или Ð´Ñ€ÑƒÐ³Ð°Ñ Ð½ÐµÐ¿Ð¾Ð½ÑÑ‚Ð½Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°." #~ msgid "" #~ "[JDLParser] JDL syntax error. There is at least one equals sign missing " #~ "where it would be expected." #~ msgstr "" #~ "[JDLParser] СинтакÑичеÑÐºÐ°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° JDL. По крайней мере один из ожидаемых " #~ "знаков равенÑтва отÑутÑтвует." #~ msgid "Found %u service endpoints from the index service at %s" #~ msgstr "Обнаружено %u точек входа Ñлужб в каталоге на %s" #~ msgid "Cleaning of UNICORE jobs is not supported" #~ msgstr "Удаление задач UNICORE не поддерживаетÑÑ" #~ msgid "Canceling of UNICORE jobs is not supported" #~ msgstr "Прерывание задач UNICORE не поддерживаетÑÑ" #~ msgid "Renewal of UNICORE jobs is not supported" #~ msgstr "Возобновление задач UNICORE не поддерживаетÑÑ" #~ msgid "Resumation of UNICORE jobs is not supported" #~ msgstr "ПерезапуÑк задач UNICORE не поддерживаетÑÑ" #~ msgid "Creating a UNICORE client" #~ msgstr "СоздаётÑÑ ÐºÐ»Ð¸ÐµÐ½Ñ‚ UNICORE" #~ msgid "Failed to find delegation credentials in client configuration" #~ msgstr "" #~ "Сбой Ð¾Ð±Ð½Ð°Ñ€ÑƒÐ¶ÐµÐ½Ð¸Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð¾Ð² доÑтупа Ð´Ð»Ñ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð² наÑтройках клиента" #~ msgid "Failed to initiate delegation" #~ msgstr "Сбой инициализации делегирование" #~ msgid "Submission request failed" #~ msgstr "Сбой запроÑа отправки задачи" #~ msgid "Submission request succeed" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð¾ заÑылке Ð·Ð°Ð´Ð°Ð½Ð¸Ñ ÑƒÐ´Ð°Ð»ÑÑ" #~ msgid "There was no response to a submission request" #~ msgstr "Ðе поÑтупил ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾Ð± отправке задачи" #~ msgid "A response to a submission request was not a SOAP message" #~ msgstr "Ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾ заÑылке не ÑвлÑетÑÑ Ñообщением SOAP" #~ msgid "There is no connection chain configured" #~ msgstr "Ðе наÑтроена цепочка ÑвÑзи" #~ msgid "Submission returned failure: %s" #~ msgstr "Сбой при запуÑке: %s" #~ msgid "Submission failed, service returned: %s" #~ msgstr "Сбой при запуÑке, ÑÐµÑ€Ð²Ð¸Ñ Ð²Ð¾Ð·Ð²Ñ€Ð°Ñ‚Ð¸Ð» ошибку: %s" #~ msgid "Creating and sending a start job request" #~ msgstr "Создание и отправка запроÑа о запуÑке задачи" #~ msgid "A start job request failed" #~ msgstr "Ошибка запроÑа о запуÑке задачи" #~ msgid "A start job request succeeded" #~ msgstr "УÑпешный Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾ запуÑке задачи" #~ msgid "There was no response to a start job request" #~ msgstr "Ðе поÑтупил ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾ запуÑке задачи" #~ msgid "The response of a start job request was not a SOAP message" #~ msgstr "Ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾ Ñоздании задачи не ÑвлÑетÑÑ Ñообщением SOAP" #~ msgid "A status request failed" #~ msgstr "Сбой запроÑа о ÑоÑтоÑнии" #~ msgid "A status request succeed" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð¾ ÑоÑтоÑнии удалÑÑ" #~ msgid "There was no response to a status request" #~ msgstr "Ðе поÑтупил ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾ ÑоÑтоÑнии" #~ msgid "The response of a status request was not a SOAP message" #~ msgstr "Ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾ ÑоÑтоÑнии не ÑвлÑетÑÑ Ñообщением SOAP" #~ msgid "The job status could not be retrieved" #~ msgstr "Ðе удалоÑÑŒ определить ÑоÑтоÑние задачи" #~ msgid "Creating and sending an index service query" #~ msgstr "Создание и отправка запроÑа в каталог реÑурÑов" #~ msgid "Creating and sending a service status request" #~ msgstr "Создание и отправка запроÑа о ÑоÑтоÑнии Ñлужбы" #~ msgid "A service status request failed" #~ msgstr "Ошибка запроÑа о ÑоÑтоÑнии Ñлужбы" #~ msgid "A service status request succeeded" #~ msgstr "УÑпешный Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾ ÑоÑтоÑнии Ñлужбы" #~ msgid "There was no response to a service status request" #~ msgstr "Ðе поÑтупил ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾ ÑоÑтоÑнии Ñлужбы" #~ msgid "The response of a service status request was not a SOAP message" #~ msgstr "Ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾ ÑоÑтоÑнии Ñервера не ÑвлÑетÑÑ Ñообщением SOAP" #~ msgid "The service status could not be retrieved" #~ msgstr "Ðе удалоÑÑŒ определить ÑоÑтоÑние Ñлужбы" #~ msgid "A job termination request failed" #~ msgstr "Ошибка запроÑа об обрыве иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" #~ msgid "A job termination request succeed" #~ msgstr "УÑпешный Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾Ð± обрыве иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" #~ msgid "There was no response to a job termination request" #~ msgstr "Ðе поÑтупил ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾Ð± обрыве иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" #~ msgid "The response of a job termination request was not a SOAP message" #~ msgstr "Ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾ прерывании задачи не ÑвлÑетÑÑ Ñообщением SOAP" #~ msgid "A job cleaning request failed" #~ msgstr "Ошибка запроÑа об удалении результатов работы задачи" #~ msgid "A job cleaning request succeed" #~ msgstr "УÑпешный Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾Ð± удалении результатов работы задачи" #~ msgid "There was no response to a job cleaning request" #~ msgstr "Ðе поÑтупил ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾Ð± удалении результатов работы задачи" #~ msgid "The response of a job cleaning request was not a SOAP message" #~ msgstr "Ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾Ð± удалении задачи не ÑвлÑетÑÑ Ñообщением SOAP" #~ msgid "Adding CREAM computing service" #~ msgstr "ДобавлÑетÑÑ Ð²Ñ‹Ñ‡Ð¸ÑÐ»Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ñлужба типа CREAM" #~ msgid "" #~ "checingBartenderURL: Response:\n" #~ "%s" #~ msgstr "" #~ "checingBartenderURL: Ответ:\n" #~ "%s" #~ msgid "Hostname is not implemented for arc protocol" #~ msgstr "Hostname не поддерживаетÑÑ Ð¿Ñ€Ð¾Ñ‚Ð¾ÐºÐ¾Ð»Ð¾Ð¼ arc" #~ msgid "" #~ "nd:\n" #~ "%s" #~ msgstr "" #~ "nd:\n" #~ "%s" #~ msgid "Not a collection" #~ msgstr "Это не коллекциÑ" #~ msgid "Recieved transfer URL: %s" #~ msgstr "Получен транÑпортный URL: %s" #~ msgid "Calculated checksum: %s" #~ msgstr "ВычиÑÐ»ÐµÐ½Ð½Ð°Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñума: %s" #~ msgid "Check" #~ msgstr "Проверка" #~ msgid "Deleted %s" #~ msgstr "Удалён %s" #~ msgid "Found file %s in remote cache at %s" #~ msgstr "Файл %s обнаружен в удалённом кÑше %s" #~ msgid "Failed to delete stale remote cache file %s: %s" #~ msgstr "Ðе удалоÑÑŒ удалить уÑтаревший удалённо кÑшированный файл %s: %s" #~ msgid "Failed to release lock on remote cache file %s" #~ msgstr "Ðевозможно разблокировать удалённо кÑшированный файл %s" #~ msgid "Replicating file %s to local cache file %s" #~ msgstr "Копирование файла %s в локальный кÑш %s" #~ msgid "" #~ "Replicating file %s from remote cache failed due to source being deleted " #~ "or modified" #~ msgstr "" #~ "Копирование файла %s из удалённого кÑша не удалоÑÑŒ, Ñ‚.к. иÑточник был " #~ "удалён или изменён" #~ msgid "Failed to delete bad copy of remote cache file %s at %s: %s" #~ msgstr "" #~ "Ðе удалоÑÑŒ удалить иÑпорченную копию удалённо кÑшированного файла %s в " #~ "%s: %s" #~ msgid "Cache file for %s not found in any local or remote cache" #~ msgstr "" #~ "КÑшированный файл Ð´Ð»Ñ %s не был обнаружен ни в локальном, ни в удалённом " #~ "кÑшах" #~ msgid "Using remote cache file %s for url %s" #~ msgstr "ИÑпользуетÑÑ ÑƒÐ´Ð°Ð»Ñ‘Ð½Ð½Ð¾ кÑшированный файл %s Ð´Ð»Ñ URL %s" #~ msgid "Initialize ISIS handler" #~ msgstr "Ð˜Ð½Ð¸Ñ†Ð¸Ð°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚Ñ‡Ð¸ÐºÐ° ISIS" #~ msgid "Can't recognize URL: %s" #~ msgstr "Ðеприемлемый URL: %s" #~ msgid "Initialize ISIS handler succeeded" #~ msgstr "УÑÐ¿ÐµÑˆÐ½Ð°Ñ Ð¸Ð½Ð¸Ñ†Ð¸Ð°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚Ñ‡Ð¸ÐºÐ° ISIS" #~ msgid "Remove ISIS (%s) from list" #~ msgstr "Удаление ISIS (%s) из ÑпиÑка" #~ msgid "getISISList from %s" #~ msgstr "getISISList из %s" #~ msgid "Key %s, Cert: %s, CA: %s" #~ msgstr "Ключ %s, Ñертификат: %s, CA: %s" #~ msgid "ISIS (%s) is not available or not valid response. (%d. reconnection)" #~ msgstr "" #~ "ISIS (%s) недоÑтупен, или получен недопуÑтимый отклик. (%d. Повторное " #~ "Ñоединение)" #~ msgid "Connection to the ISIS (%s) is success and get the list of ISIS." #~ msgstr "УÑпешное Ñоединение Ñ ISIS (%s), получение ÑпиÑка ÑервиÑов ISIS." #~ msgid "GetISISList add this (%s) ISIS into the list." #~ msgstr "GetISISList добавлÑет Ñтот ÑÐµÑ€Ð²Ð¸Ñ (%s) ISIS в ÑпиÑок." #~ msgid "Chosen ISIS for communication: %s" #~ msgstr "Выбранный Ð´Ð»Ñ ÑвÑзи ISIS: %s" #~ msgid "Get ISIS from list of ISIS handler" #~ msgstr "Извлечение ÑервиÑа ISIS из ÑпиÑка обработчиков ISIS" #~ msgid "Here is the end of the infinite calling loop." #~ msgstr "ЗдеÑÑŒ и заканчиваетÑÑ Ð±ÐµÑконечный цикл запроÑов." #~ msgid "" #~ "There is no more ISIS available. The list of ISIS's is already empty." #~ msgstr "ДоÑтупных ÑевриÑов ISIS больше нет. СпиÑок ISIS-ов уже опуÑтел." #~ msgid "cannot create directory: %s" #~ msgstr "не удалоÑÑŒ Ñоздать каталог: %s" #~ msgid "Cache configuration: %s" #~ msgstr "ÐаÑтройки кÑша: %s" #~ msgid "Missing cache root in configuration" #~ msgstr "Ð’ наÑтройках кÑша отÑутÑтвует корневой каталог" #~ msgid "Missing service ID" #~ msgstr "ОтÑутÑтвует Ñрлык ÑервиÑа" #~ msgid "Cache root: %s" #~ msgstr "ÐšÐ¾Ñ€Ð½ÐµÐ²Ð°Ñ Ð¿Ð°Ð¿ÐºÐ° кÑша: %s" #~ msgid "InfoCache object is not set up" #~ msgstr "Объект InfoCache не Ñоздан" #~ msgid "Invalid path in Set(): %s" #~ msgstr "ÐедопуÑтимый путь в Set(): %s" #~ msgid "Invalid path in Get(): %s" #~ msgstr "ÐедопуÑтимый путь в Get(): %s" #~ msgid "" #~ "InfoRegistrar thread waiting %d seconds for the all Registers elements " #~ "creation." #~ msgstr "" #~ "Поток InfoRegistrar ожидает %d Ñекунд, пока ÑоздадутÑÑ Ð²Ñе Ñлементы " #~ "Registers." #~ msgid "" #~ "InfoRegister created with config:\n" #~ "%s" #~ msgstr "" #~ "InfoRegister Ñоздан Ñ Ð½Ð°Ñтройками:\n" #~ "%s" #~ msgid "InfoRegister to be registered in Registrar %s" #~ msgstr "InfoRegister будет занеÑён в Registrar %s" #~ msgid "" #~ "Discarding Registrar because the \"URL\" element is missing or empty." #~ msgstr "" #~ "Registrar игнорируетÑÑ, так как Ñлемент \"URL\" отÑутÑтвует, либо пуÑÑ‚." #~ msgid "InfoRegistrar id \"%s\" has been found." #~ msgstr "Обнаружен InfoRegistrar id \"%s\"." #~ msgid "InfoRegistrar id \"%s\" was not found. New registrar created" #~ msgstr "InfoRegistrar id \"%s\" не был обнаружен. Создан новый рееÑтр" #~ msgid "" #~ "Configuration error. Retry: \"%s\" is not a valid value. Default value " #~ "will be used." #~ msgstr "" #~ "Ошибйка наÑтроек. Retry: \"%s\" не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым значением. Будет " #~ "иÑпользовано значение по умолчанию." #~ msgid "Retry: %d" #~ msgstr "ÐŸÐ¾Ð²Ñ‚Ð¾Ñ€Ð½Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ°: %d" #~ msgid "Key: %s, cert: %s" #~ msgstr "Ключ: %s, Ñертификат: %s" #~ msgid "The service won't be registered." #~ msgstr "Ð¡ÐµÑ€Ð²Ð¸Ñ Ð½Ðµ будет зарегиÑтрирован." #~ msgid "Configuration error. Missing mandatory \"Period\" element." #~ msgstr "Ошибка конфигурации. ОтÑутÑтвует обÑзательный Ñлемент \"Period\"." #~ msgid "Configuration error. Missing mandatory \"Endpoint\" element." #~ msgstr "Ошибка конфигурации. ОтÑутÑтвует обÑзательный Ñлемент \"Endpoint\"." #~ msgid "Configuration error. Missing mandatory \"Expiration\" element." #~ msgstr "" #~ "Ошибка конфигурации. ОтÑутÑтвует обÑзательный Ñлемент \"Expiration\"." #~ msgid "" #~ "Service was already registered to the InfoRegistrar connecting to infosys " #~ "%s." #~ msgstr "" #~ "Ð¡ÐµÑ€Ð²Ð¸Ñ Ð±Ñ‹Ð» уже занеÑён в InfoRegistrar, подключённый к информационной " #~ "ÑиÑтеме %s." #~ msgid "" #~ "Service is successfully added to the InfoRegistrar connecting to infosys " #~ "%s." #~ msgstr "" #~ "Ð¡ÐµÑ€Ð²Ð¸Ñ ÑƒÑпешно добавлен в InfoRegistrar, подключённый к информационной " #~ "ÑиÑтеме %s." #~ msgid "Unregistred Service can not be removed." #~ msgstr "ÐезарегиÑÑ‚Ñ€Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð½Ð°Ñ Ñлужба не может быть удалена." #~ msgid "Key: %s, Cert: %s, Proxy: %s, CADir: %s CAPath" #~ msgstr "Ключ: %s, Сертификат: %s, ДоверенноÑть: %s, Каталог CA: %s, путь CA" #~ msgid "Response from the ISIS: %s" #~ msgstr "Отклик из ISIS: %s" #~ msgid "Failed to remove registration from %s ISIS" #~ msgstr "Ðе удалоÑÑŒ удалить учётную запиÑÑŒ Ñ Ñервера ISIS %s" #~ msgid "Successfuly removed registration from ISIS (%s)" #~ msgstr "УÑпешное удаление учётной запиÑи Ñ Ñервера ISIS (%s)" #~ msgid "Failed to remove registration from ISIS (%s) - %s" #~ msgstr "Ðе удалоÑÑŒ удалить учётную запиÑÑŒ Ñ Ñервера ISIS (%s) - %s" #~ msgid "Retry connecting to the ISIS (%s) %d time(s)." #~ msgstr "Повторные попытки ÑвÑзи Ñ Ñервером ISIS (%s) %d раз." #~ msgid "Service removed from InfoRegistrar connecting to infosys %s." #~ msgstr "" #~ "Ð¡ÐµÑ€Ð²Ð¸Ñ ÑƒÐ´Ð°Ð»Ñ‘Ð½ из InfoRegistrar, подключённый к информационной ÑиÑтеме %s." #~ msgid "Failed to remove registration from %s EMIRegistry" #~ msgstr "Ðе удалоÑÑŒ удалить учётную запиÑÑŒ Ñ Ñервера EMIRegistry %s" #~ msgid "Successfuly removed registration from EMIRegistry (%s)" #~ msgstr "УÑпешное удаление учётной запиÑи Ñ Ñервера EMIRegistry (%s)" #~ msgid "Retry connecting to the EMIRegistry (%s) %d time(s)." #~ msgstr "Попытка повторного ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ EMIRegistry (%s) %d раз(а)." #~ msgid "EMIRegistry (%s) is not available." #~ msgstr "ÐедоÑтупен ÑÐµÑ€Ð²Ð¸Ñ EMIRegistry (%s)." #~ msgid "Registration starts: %s" #~ msgstr "РегиÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ Ð½Ð°Ñ‡Ð¸Ð½Ð°ÐµÑ‚ÑÑ: %s" #~ msgid "reg_.size(): %d" #~ msgstr "reg_.size(): %d" #~ msgid "Registrant has no proper URL specified. Registration end." #~ msgstr "Registrant не Ñодержит дейÑтвительного URL. РегиÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ Ð¾ÐºÐ¾Ð½Ñ‡ÐµÐ½Ð°." #~ msgid "Create RegEntry XML element" #~ msgstr "Создание Ñлемента XML RegEntry" #~ msgid "ServiceID attribute calculated from Endpoint Reference" #~ msgstr "Ðтрибут ServiceID получен из опиÑÐ°Ð½Ð¸Ñ Ñ‚Ð¾Ñ‡ÐºÐ¸ входа" #~ msgid "Generation Time attribute calculated from current time" #~ msgstr "Ðтрибут Generation Time получен из текущего времени" #~ msgid "ServiceID stored: %s" #~ msgstr "Сохранён ServiceID: %s" #~ msgid "Missing service document provided by the service %s" #~ msgstr "ОтÑутÑтвует документ Ñлужбы, публикуемый ÑервиÑом %s" #~ msgid "" #~ "Missing MetaServiceAdvertisment or Expiration values provided by the " #~ "service %s" #~ msgstr "" #~ "ОтÑутÑтвуют Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð¾Ð² MetaServiceAdvertisment или Expiration, " #~ "публикуемые ÑервиÑом %s" #~ msgid "Missing Type value provided by the service %s" #~ msgstr "ОтÑутÑтвует значение атрибута Type, публикуемое ÑервиÑом %s" #~ msgid "Missing Endpoint Reference value provided by the service %s" #~ msgstr "" #~ "ОтÑутÑтвует значение атрибута Endpoint Reference, публикуемое ÑервиÑом %s" #~ msgid "Registering to %s ISIS" #~ msgstr "РегиÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ Ð½Ð° Ñервере ISIS %s" #~ msgid "Key: %s, Cert: %s, Proxy: %s, CADir: %s, CAFile" #~ msgstr "Ключ: %s, Сертификат: %s, ДоверенноÑть: %s, Каталог CA: %s, файл CA" #~ msgid "Sent RegEntries: %s" #~ msgstr "ПоÑланы RegEntries: %s" #~ msgid "Error during registration to %s ISIS" #~ msgstr "Ошибка при региÑтрации в ÑÐµÑ€Ð²Ð¸Ñ ISIS %s" #~ msgid "Successful registration to ISIS (%s)" #~ msgstr "УÑÐ¿ÐµÑˆÐ½Ð°Ñ Ñ€ÐµÐ³Ð¸ÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ Ð² ÑÐµÑ€Ð²Ð¸Ñ ISIS (%s)" #~ msgid "Failed to register to ISIS (%s) - %s" #~ msgstr "Сбой региÑтрации в ÑÐµÑ€Ð²Ð¸Ñ ISIS (%s) - %s" #~ msgid "Registration ends: %s" #~ msgstr "Конец региÑтрации: %s" #~ msgid "Waiting period is %d second(s)." #~ msgstr "Ð’Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ - %d Ñекунд(Ñ‹)." #~ msgid "Registration exit: %s" #~ msgstr "Выход из региÑтрации: %s" #~ msgid "Registering to %s EMIRegistry" #~ msgstr "РегиÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ Ð½Ð° Ñервере EMIRegistry %s" #~ msgid "Sent entry: %s" #~ msgstr "Отправлена запиÑÑŒ: %s" #~ msgid "Error during %s to %s EMIRegistry" #~ msgstr "Сбой операции %s по отношению к ÑервиÑу EMIRegistry %s" #~ msgid "Successful %s to EMIRegistry (%s)" #~ msgstr "" #~ "УÑпешное завершение операции %s по отношению к ÑервиÑу EMIRegistry (%s)" #~ msgid "Failed to %s to EMIRegistry (%s) - %d" #~ msgstr "" #~ "Ðе удалоÑÑŒ выполнить операцию %s по отношению к ÑервиÑу EMIRegistry (%s) " #~ "- %d" #~ msgid "Cannot initialize winsock library" #~ msgstr "Ðе удалоÑÑŒ инициализировать библиотеку winsock" #~ msgid "Failed processing user mapping command: unixmap %s" #~ msgstr "Сбой работы команды ÑоответÑÑ‚Ð²Ð¸Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ: unixmap %s" #~ msgid "Failed processing user mapping command: unixgroup %s" #~ msgstr "Сбой работы команды припиÑки пользователей: unixgroup %s" #~ msgid "LDAP authorization is not supported anymore" #~ msgstr "ÐÐ²Ñ‚Ð¾Ñ€Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð´Ð»Ñ LDAP больше не поддерживаетÑÑ" #~ msgid "User name mapping has empty VO: %s" #~ msgstr "ПуÑÑ‚Ð°Ñ VO в приÑвоении имени пользователÑ: %s" #~ msgid "Linking mapped file - can't link on Windows" #~ msgstr "" #~ "СоздаётÑÑ ÑимволичеÑÐºÐ°Ñ ÑÑылка на ÑоответÑтвующий файл - невыполнимо на " #~ "Windows" #~ msgid "process: response is not SOAP" #~ msgstr "процеÑÑ: ответ не ÑвлÑетÑÑ Ð´Ð¾ÐºÑƒÐ¼ÐµÐ½Ñ‚Ð¾Ð¼ SOAP" #~ msgid "Storing configuration in temporary file %s" #~ msgstr "ЗапиÑÑŒ наÑтроек во временный файл %s" #~ msgid "Failed to process service configuration" #~ msgstr "Ðе удалоÑÑŒ обработать наÑтройки ÑервиÑа" #~ msgid "Provided LRMSName is not a valid URL: %s" #~ msgstr "Указанное значение LRMSName не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым URL: %s" #~ msgid "" #~ "No LRMSName is provided. This is needed if you wish to completely comply " #~ "with the BES specifications." #~ msgstr "" #~ "Ðе задан атрибут LRMSName. Он необходим Ð´Ð»Ñ Ð¿Ð¾Ð»Ð½Ð¾Ð³Ð¾ ÑоответÑÑ‚Ð²Ð¸Ñ " #~ "Ñпецификации интерфейÑа BES." #~ msgid "" #~ "ChangeActivityStatus: request = \n" #~ "%s" #~ msgstr "" #~ "ChangeActivityStatus: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" #~ "%s" #~ msgid "ChangeActivityStatus: no ActivityIdentifier found" #~ msgstr "ChangeActivityStatus: не найден ActivityIdentifier" #~ msgid "ChangeActivityStatus: EPR contains no JobID" #~ msgstr "ChangeActivityStatus: EPR не Ñодержит JobID" #~ msgid "ChangeActivityStatus: no job found: %s" #~ msgstr "ChangeActivityStatus: задача не найдена: %s" #~ msgid "ChangeActivityStatus: missing NewStatus element" #~ msgstr "ChangeActivityStatus: отÑутÑтвует Ñлемент NewStatus" #~ msgid "ChangeActivityStatus: Failed to accept delegation" #~ msgstr "ChangeActivityStatus: невозможно принÑть делегирование" #~ msgid "ChangeActivityStatus: old BES state does not match" #~ msgstr "ChangeActivityStatus: не найдено ÑоответÑÑ‚Ð²Ð¸Ñ Ñтарому ÑоÑтоÑнию BES" #~ msgid "ChangeActivityStatus: old A-REX state does not match" #~ msgstr "" #~ "ChangeActivityStatus: не найдено ÑоответÑÑ‚Ð²Ð¸Ñ Ñтарому ÑоÑтоÑнию A-REX" #~ msgid "ChangeActivityStatus: Failed to update credentials" #~ msgstr "ChangeActivityStatus: невозможно обновить параметры доÑтупа" #~ msgid "ChangeActivityStatus: Failed to resume job" #~ msgstr "ChangeActivityStatus: невозможно возобновить задачу" #~ msgid "ChangeActivityStatus: State change not allowed: from %s/%s to %s/%s" #~ msgstr "" #~ "ChangeActivityStatus: недопуÑтимое изменение ÑоÑтоÑниÑ: Ñ %s/%s на %s/%s" #~ msgid "" #~ "ChangeActivityStatus: response = \n" #~ "%s" #~ msgstr "" #~ "ChangeActivityStatus: ответ = \n" #~ "%s" #~ msgid "" #~ "CreateActivity: request = \n" #~ "%s" #~ msgstr "" #~ "CreateActivity: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" #~ "%s" #~ msgid "CreateActivity: no job description found" #~ msgstr "CreateActivity: ОпиÑание задачи не найдено" #~ msgid "CreateActivity: max jobs total limit reached" #~ msgstr "" #~ "CreateActivity: доÑтигнут макÑимальный предел общего количеÑтва задач" #~ msgid "CreateActivity: Failed to accept delegation" #~ msgstr "CreateActivity: Сбой при принÑтии делегированиÑ" #~ msgid "CreateActivity: Failed to create new job: %s" #~ msgstr "CreateActivity: Ðе удалоÑÑŒ Ñоздать новую задачу: %s" #~ msgid "CreateActivity: Failed to create new job" #~ msgstr "CreateActivity: Ðе удалоÑÑŒ Ñоздать новую задачу" #~ msgid "CreateActivity finished successfully" #~ msgstr "CreateActivity закончилоÑÑŒ уÑпешно" #~ msgid "" #~ "CreateActivity: response = \n" #~ "%s" #~ msgstr "" #~ "CreateActivity: ответ = \n" #~ "%s" #~ msgid "Get: can't process file %s" #~ msgstr "Get: невозможно обработать файл %s" #~ msgid "Head: can't process file %s" #~ msgstr "Head: невозможно обработать файл %s" #~ msgid "http_get: start=%llu, end=%llu, burl=%s, hpath=%s" #~ msgstr "http_get: start=%llu, end=%llu, burl=%s, hpath=%s" #~ msgid "" #~ "GetActivityDocuments: request = \n" #~ "%s" #~ msgstr "" #~ "GetActivityDocuments: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" #~ "%s" #~ msgid "GetActivityDocuments: non-AREX job requested" #~ msgstr "GetActivityDocuments: Ð·Ð°Ð¿Ñ€Ð¾ÑˆÐµÐ½Ð½Ð°Ñ Ð·Ð°Ð´Ð°Ñ‡Ð° не контролируетÑÑ AREX" #~ msgid "GetActivityDocuments: job %s - %s" #~ msgstr "GetActivityDocuments: задача %s - %s" #~ msgid "" #~ "GetActivityDocuments: response = \n" #~ "%s" #~ msgstr "" #~ "GetActivityDocuments: ответ = \n" #~ "%s" #~ msgid "" #~ "GetActivityStatuses: request = \n" #~ "%s" #~ msgstr "" #~ "GetActivityStatuses: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" #~ "%s" #~ msgid "GetActivityStatuses: unknown verbosity level requested: %s" #~ msgstr "GetActivityStatuses: запрошен неизвеÑтный уровень отладки: %s" #~ msgid "GetActivityStatuses: job %s - can't understand EPR" #~ msgstr "GetActivityStatuses: задание %s - невозможно интерпретировать EPR" #~ msgid "GetActivityStatuses: job %s - %s" #~ msgstr "GetActivityStatuses: задача %s - %s" #~ msgid "" #~ "GetActivityStatuses: response = \n" #~ "%s" #~ msgstr "" #~ "GetActivityStatuses: ответ = \n" #~ "%s" #~ msgid "" #~ "GetFactoryAttributesDocument: request = \n" #~ "%s" #~ msgstr "" #~ "GetFactoryAttributesDocument: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" #~ "%s" #~ msgid "" #~ "GetFactoryAttributesDocument: response = \n" #~ "%s" #~ msgstr "" #~ "GetFactoryAttributesDocument: ответ = \n" #~ "%s" #~ msgid "" #~ "Usage: %s -I -U -P -L [-c " #~ "] [-p ] [-d ]" #~ msgstr "" #~ "ИÑпользование: %s -I <задача> -U <пользователь> -P <доверенноÑть> -L " #~ "<файл ÑоÑтоÑÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸> [-c <Ð¿Ñ€ÐµÑ„Ð¸ÐºÑ ceID>] [-p <Ð¿Ñ€ÐµÑ„Ð¸ÐºÑ Ð¶ÑƒÑ€Ð½Ð°Ð»Ð°> ] [-d " #~ "<отладка>]" #~ msgid "" #~ "Usage: %s [-N] -P -L [-c ] [-d " #~ "]" #~ msgstr "" #~ "ИÑпользование: %s [-N] -P <доверенноÑть> -L <файл ÑоÑтоÑÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸> [-c " #~ "<файл наÑтроек>] [-d <отладка>]" #~ msgid "User proxy file is required but is not specified" #~ msgstr "Файл доверенноÑти Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð½ÐµÐ¾Ð±Ñ…Ð¾Ð´Ð¸Ð¼, но не указан" #~ msgid "Local job status file is required" #~ msgstr "Ðеобходимо указать файл ÑоÑтоÑÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" #~ msgid "Making the decision for the queue %s" #~ msgstr "ПринимаетÑÑ Ñ€ÐµÑˆÐµÐ½Ð¸Ðµ Ð´Ð»Ñ Ð¾Ñ‡ÐµÑ€ÐµÐ´Ð¸ %s" #~ msgid "Can not parse the configuration file %s" #~ msgstr "Ðевозможно обработать файл наÑтроек %s" #~ msgid "Can not find queue '%s' in the configuration file" #~ msgstr "Ðе удалоÑÑŒ обнаружить очередь '%s' в файле наÑтроек" #~ msgid "No access policy to check, returning success" #~ msgstr "Ðет политик доÑтупа, нуждающихÑÑ Ð² Ñверке, уÑпешное завершение" #~ msgid "CA certificates directory %s does not exist" #~ msgstr "Каталог Ñертификатов агентÑтв CA %s не ÑущеÑтвует" #~ msgid "User proxy certificate is not valid" #~ msgstr "ДоверенноÑть Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð½ÐµÐ´ÐµÐ¹Ñтвительна" #~ msgid "Getting VOMS AC for: %s" #~ msgstr "Извлечение Ñертификата атрибутов VOMS AC длÑ: %s" #~ msgid "Checking a match for '%s'" #~ msgstr "Проверка ÑÐ¾Ð²Ð¿Ð°Ð´ÐµÐ½Ð¸Ñ Ð´Ð»Ñ '%s'" #~ msgid "FQAN '%s' IS a match to '%s'" #~ msgstr "Полный атрибут '%s' СОВПÐДÐЕТ Ñ '%s'" #~ msgid "" #~ "Queue '%s' usage is prohibited to FQAN '%s' by the site access policy" #~ msgstr "" #~ "ИÑпользование очереди '%s' запрещено Ð´Ð»Ñ Ð¿Ð¾Ð»Ð½Ð¾Ð³Ð¾ атрибута '%s' в " #~ "ÑоответÑтвии Ñ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð¾Ð¹ политикой доÑтупа" #~ msgid "FQAN '%s' IS NOT a match to '%s'" #~ msgstr "Полный атрибут '%s' ÐЕ СОВПÐДÐЕТ Ñ '%s'" #~ msgid "" #~ "Queue '%s' usage with provided FQANs is prohibited by the site access " #~ "policy" #~ msgstr "" #~ "ИÑпользование очереди '%s' запрещено Ð´Ð»Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ñ‹Ñ… полных атрибутов в " #~ "ÑоответÑтвии Ñ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð¾Ð¹ политикой доÑтупа" #~ msgid "Can't interpret configuration file %s as XML" #~ msgstr "Ðе удалоÑÑŒ разобрать файл наÑтроек %s как XML" #~ msgid "Wrong number in jobreport_period: %s" #~ msgstr "ÐедопуÑтимое чиÑло в jobreport_period: %s" #~ msgid "Wrong number in jobreport_period: %d, minimal value: %s" #~ msgstr "ÐедопуÑтимое чиÑло в jobreport_period: %d, наименьшее значение: %s" #~ msgid "defaultlrms is empty" #~ msgstr "пуÑтое значение defaultlrms" #~ msgid "Wrong number for timeout in plugin command" #~ msgstr "" #~ "ÐедопуÑтимое значение времени Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð² инÑтрукции подключаемого модулÑ" #~ msgid "Value for maxJobsTracked is incorrect number" #~ msgstr "Значение maxJobsTracked не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым чиÑлом" #~ msgid "Value for maxJobsRun is incorrect number" #~ msgstr "Значение maxJobsRun не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым чиÑлом" #~ msgid "Value for maxJobsTotal is incorrect number" #~ msgstr "Значение maxJobsTotal не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым чиÑлом" #~ msgid "Value for maxJobsPerDN is incorrect number" #~ msgstr "Значение maxJobsPerDN не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым чиÑлом" #~ msgid "Value for wakeupPeriod is incorrect number" #~ msgstr "Значение wakeupPeriod не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым чиÑлом" #~ msgid "Value for maxScripts is incorrect number" #~ msgstr "Значение maxScripts не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым чиÑлом" #~ msgid "serviceMail is empty" #~ msgstr "пуÑтой serviceMail" #~ msgid "Type in LRMS is missing" #~ msgstr "ОтÑутÑтвует тип в СУПО" #~ msgid "LRMS is missing" #~ msgstr "ОтÑутÑтвует СУПО" #~ msgid "State name for authPlugin is missing" #~ msgstr "ОтÑутÑтвует наименование ÑоÑтоÑÐ½Ð¸Ñ Ð¼Ð¾Ð´ÑƒÐ»Ñ authPlugin" #~ msgid "Command for authPlugin is missing" #~ msgstr "ОтÑутÑтвует команда Ð´Ð»Ñ Ð¼Ð¾Ð´ÑƒÐ»Ñ authPlugin" #~ msgid "Registering plugin for state %s; options: %s; command: %s" #~ msgstr "" #~ "РегиÑтрируетÑÑ Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡Ð°ÐµÐ¼Ñ‹Ð¹ модуль Ð´Ð»Ñ ÑоÑтоÑÐ½Ð¸Ñ %s; опции: %s; команда: " #~ "%s" #~ msgid "Command for localCred is missing" #~ msgstr "ОтÑутÑтвует команда Ð´Ð»Ñ Ð¼Ð¾Ð´ÑƒÐ»Ñ localCred" #~ msgid "Timeout for localCred is missing" #~ msgstr "ОтÑутÑтвует тайм-аут Ð´Ð»Ñ Ð¼Ð¾Ð´ÑƒÐ»Ñ localCred" #~ msgid "Timeout for localCred is incorrect number" #~ msgstr "ÐедопуÑтимое значение тайм-аута Ð´Ð»Ñ Ð¼Ð¾Ð´ÑƒÐ»Ñ localCred" #~ msgid "Control element must be present" #~ msgstr "Элемент Control должен приÑутÑтвовать" #~ msgid "controlDir is missing" #~ msgstr "ОтÑутÑтвует controlDir" #~ msgid "sessionRootDir is missing" #~ msgstr "ОтÑутÑтвует sessionRootDir" #~ msgid "Attribute drain for sessionRootDir is incorrect boolean" #~ msgstr "" #~ "Значение атрибута drain Ð´Ð»Ñ sessionRootDir не ÑвлÑетÑÑ Ð²ÐµÑ€Ð½Ñ‹Ð¼ булевÑким" #~ msgid "The fixDirectories element is incorrect value" #~ msgstr "Значение Ñлемента fixDirectories неверно" #~ msgid "The delegationDB element is incorrect value" #~ msgstr "Значение Ñлемента delegationDB неверно" #~ msgid "The maxReruns element is incorrect number" #~ msgstr "Значение Ñлемента maxReruns не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым чиÑлом" #~ msgid "The noRootPower element is incorrect number" #~ msgstr "Значение Ñлемента noRootPower не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым чиÑлом" #~ msgid "The defaultTTL element is incorrect number" #~ msgstr "Значение Ñлемента defaultTTL не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым чиÑлом" #~ msgid "The defaultTTR element is incorrect number" #~ msgstr "Значение Ñлемента defaultTTR не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым чиÑлом" #~ msgid "Command in helperUtility is missing" #~ msgstr "ОтÑутÑтвует команда в модуле helperUtility" #~ msgid "Username in helperUtility is empty" #~ msgstr "Ðе указано Ð¸Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð² модуле helperUtility" #~ msgid "\tRemote cache : %s" #~ msgstr "\tУдалённый кÑш : %s" #~ msgid "\tRemote cache link: %s" #~ msgstr "\tСÑылка на удалённый кÑш: %s" #~ msgid "wrong boolean in %s" #~ msgstr "Ð½ÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð±ÑƒÐ»ÐµÐ²Ð° Ð¿ÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ Ð² %s" #~ msgid "wrong number in %s" #~ msgstr "неверное чиÑло в %s" #~ msgid "Can't interpret configuration file as XML" #~ msgstr "Ðе удалоÑÑŒ разобрать файл наÑтроек как XML" #~ msgid "Bad value for debug" #~ msgstr "ÐедопуÑтимое значение debug" #~ msgid "Bad URL in deliveryService: %s" #~ msgstr "ÐедопуÑтимый URL в deliveryService: %s" #~ msgid "Value for 'link' element in mapURL is incorrect" #~ msgstr "Значение Ñлемента 'link' в mapURL неверно" #~ msgid "Missing 'from' element in mapURL" #~ msgstr "ОтÑутÑтвующий Ñлемент 'from' в mapURL" #~ msgid "Missing 'to' element in mapURL" #~ msgstr "ОтÑутÑтвующий Ñлемент 'to' в mapURL" #~ msgid "Failed to run plugin" #~ msgstr "Ошибка иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð¼Ð¾Ð´ÑƒÐ»Ñ" #~ msgid "Plugin failed: %s" #~ msgstr "Сбой модулÑ: %s" #~ msgid "empty argument to remotegmdirs" #~ msgstr "не задан аргумент remotegmdirs" #~ msgid "bad arguments to remotegmdirs" #~ msgstr "неверные аргументы remotegmdirs" #~ msgid "Failed processing grid-manager configuration" #~ msgstr "Ðе удалоÑÑŒ обработать наÑтройки grid-manager" #~ msgid "%s: Destroying" #~ msgstr "%s: УничтожаетÑÑ" #~ msgid "%s: Can't read state - no comments, just cleaning" #~ msgstr "" #~ "%s: Ðевозможно прочеÑть ÑоÑтоÑние - никаких комментариев, проÑто чиÑтка" #~ msgid "%s: Cleaning control and session directories" #~ msgstr "%s: ОчиÑтка управлÑющей" #~ msgid "%s: This job may be still running - canceling" #~ msgstr "%s: Эта задача, возможно, ещё иÑполнÑетÑÑ - прерывание" #~ msgid "%s: Cancellation failed (probably job finished) - cleaning anyway" #~ msgstr "" #~ "%s: Прерывание не удалоÑÑŒ (вероÑтно, задача закончилаÑÑŒ) - вÑÑ‘ равно " #~ "удалÑем" #~ msgid "%s: Cancellation probably succeeded - cleaning" #~ msgstr "%s: Прерывание, вероÑтно, удалоÑÑŒ - удаление" #~ msgid "Invalid checksum in %s for %s" #~ msgstr "ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма в %s Ð´Ð»Ñ %s" #~ msgid "Invalid file size in %s for %s " #~ msgstr "ÐедопуÑтимый размер файла в %s Ð´Ð»Ñ %s " #~ msgid "Invalid file: %s is too big." #~ msgstr "Ðеверный файл: %s Ñлишком велик." #~ msgid "Error accessing file %s" #~ msgstr "Ошибка доÑтупа к файлу %s" #~ msgid "Error reading file %s" #~ msgstr "Ошибка при чтении файла %s" #~ msgid "File %s has wrong CRC." #~ msgstr "У файла %s Ð½ÐµÐ²ÐµÑ€Ð½Ð°Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма CRC." #~ msgid "Failed downloading file %s - %s" #~ msgstr "Сбой загрузки файла %s - %s" #~ msgid "Retrying" #~ msgstr "Повтор" #~ msgid "Downloaded file %s" #~ msgstr "Загружен файл %s" #~ msgid "Wrong number of threads: %s" #~ msgstr "Ðеверное чиÑло потоков: %s" #~ msgid "Wrong number of files: %s" #~ msgstr "Ðеверное количеÑтво файлов: %s" #~ msgid "Bad number: %s" #~ msgstr "Ðеверное чиÑло: %s" #~ msgid "Specified user can't be handled" #~ msgstr "Указанный пользователь не может быть обработан" #~ msgid "Missing parameter for option %c" #~ msgstr "ОтÑутÑтвует параметр Ð´Ð»Ñ Ð¾Ð¿Ñ†Ð¸Ð¸ %c" #~ msgid "Undefined processing error" #~ msgstr "ÐÐµÐ¾Ð¿Ñ€ÐµÐ´ÐµÐ»Ñ‘Ð½Ð½Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° при обработке" #~ msgid "Missing job id" #~ msgstr "ОтÑутÑтвует Ñрлык задачи" #~ msgid "Missing control directory" #~ msgstr "ОтÑутÑтвует каталог контролÑ" #~ msgid "Missing session directory" #~ msgstr "ОтÑутÑтвует каталог ÑеанÑов" #~ msgid "Minimal speed: %llu B/s during %i s" #~ msgstr "ÐœÐ¸Ð½Ð¸Ð¼Ð°Ð»ÑŒÐ½Ð°Ñ ÑкороÑть: %llu Б/Ñ Ð² течение %i Ñ" #~ msgid "Minimal average speed: %llu B/s" #~ msgstr "ÐœÐ¸Ð½Ð¸Ð¼Ð°Ð»ÑŒÐ½Ð°Ñ ÑреднÑÑ ÑкороÑть: %llu B/s" #~ msgid "Maximal inactivity time: %i s" #~ msgstr "МакÑимальное Ð²Ñ€ÐµÐ¼Ñ Ð±ÐµÐ·Ð´ÐµÐ¹ÑтвиÑ: %i s" #~ msgid "Won't use more than 10 threads" #~ msgstr "Будет иÑпользовано не более 10-и потоков" #~ msgid "Downloader started" #~ msgstr "Загрузчик запущен" #~ msgid "Can't read list of input files" #~ msgstr "Ðевозможно прочеÑть ÑпиÑок входных файлов" #~ msgid "Error: duplicate file in list of input files: %s" #~ msgstr "Ошибка: дублированное Ð¸Ð¼Ñ Ñ„Ð°Ð¹Ð»Ð° в ÑпиÑке входных файлов: %s" #~ msgid "Can't read list of output files" #~ msgstr "Ðевозможно прочеÑть ÑпиÑок выходных файлов" #~ msgid "Can't remove junk files" #~ msgstr "Ðевозможно удалить ненужные файлы" #~ msgid "Can't read job local description" #~ msgstr "Ðевозможно прочеÑть локальное опиÑание задачи" #~ msgid "Local source for download: %s" #~ msgstr "Локальный иÑточник загрузки: %s" #~ msgid "Can't accept URL: %s" #~ msgstr "Ðеприемлемый URL: %s" #~ msgid "Failed to initiate file transfer: %s - %s" #~ msgstr "Ðевозможно запуÑтить передачу файлов: %s - %s" #~ msgid "Downloaded %s" #~ msgstr "Загружен %s" #~ msgid "Failed to download (but may be retried) %s" #~ msgstr "Ðе удалоÑÑŒ загрузить (возможна Ð¿Ð¾Ð²Ñ‚Ð¾Ñ€Ð½Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ°) %s" #~ msgid "Failed to download %s" #~ msgstr "Ðе удалоÑÑŒ загрузить %s" #~ msgid "Some downloads failed" #~ msgstr "Ðекоторые загрузки не удалиÑÑŒ" #~ msgid "Some downloads failed, but may be retried" #~ msgstr "Ðекоторые загрузки не удалиÑÑŒ (возможна Ð¿Ð¾Ð²Ñ‚Ð¾Ñ€Ð½Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ°)" #~ msgid "Failed writing changed input file" #~ msgstr "Ðе удалоÑÑŒ запиÑать изменившийÑÑ Ð²Ñ…Ð¾Ð´Ð½Ð¾Ð¹ файл" #~ msgid "Checking user uploadable file: %s" #~ msgstr "Проверка отгружаемого файла пользователÑ: %s" #~ msgid "User has uploaded file %s" #~ msgstr "Пользователь отгрузил файл %s" #~ msgid "Failed writing changed input file." #~ msgstr "Ðе удалоÑÑŒ запиÑать изменившийÑÑ Ð²Ñ…Ð¾Ð´Ð½Ð¾Ð¹ файл." #~ msgid "Critical error for uploadable file %s" #~ msgstr "КритичеÑÐºÐ°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° Ð´Ð»Ñ Ð¾Ñ‚Ð³Ñ€ÑƒÐ¶Ð°ÐµÐ¼Ð¾Ð³Ð¾ файла %s" #~ msgid "No changes in uploadable files for %u seconds" #~ msgstr "Ðикаких изменений в отгружаемых файлах в течение %u Ñек" #~ msgid "Uploadable files timed out" #~ msgstr "ИÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¾Ñ‚Ð³Ñ€ÑƒÐ¶Ð°ÐµÐ¼Ñ‹Ñ… файлов" #~ msgid "Leaving downloader (%i)" #~ msgstr "Выход из загрузчика (%i)" #~ msgid "Failed uploading file %s - %s" #~ msgstr "Ðе удалоÑÑŒ отгрузить файл %s - %s" #~ msgid "Uploaded file %s" #~ msgstr "Закачан файл %s" #~ msgid "Uploader started" #~ msgstr "Отгрузчик запущен" #~ msgid "Reading output files from user generated list in %s" #~ msgstr "Чтение выходных файлов в ÑпиÑке Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %s" #~ msgid "Error reading user generated output file list in %s" #~ msgstr "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ ÑпиÑка выходных файлов Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð² %s" #~ msgid "Two identical output destinations: %s" #~ msgstr "Два одинаковых Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð²Ñ‹Ð´Ð°Ñ‡Ð¸: %s" #~ msgid "Cannot upload two different files %s and %s to same LFN: %s" #~ msgstr "Ðевозможно запиÑать два разных файла %s и %s Ñ Ð¾Ð´Ð½Ð¸Ð¼ LFN: %s" #~ msgid "Local destination for uploader %s" #~ msgstr "Локальный файл-приёмник Ð´Ð»Ñ Ð¾Ñ‚Ð³Ñ€ÑƒÐ·Ñ‡Ð¸ÐºÐ° %s" #~ msgid "Uploaded %s" #~ msgstr "Отгружен %s" #~ msgid "Failed writing output status file" #~ msgstr "Ðе удалоÑÑŒ запиÑать выходной файл ÑоÑтоÑниÑ" #~ msgid "Failed to upload (but may be retried) %s" #~ msgstr "Ðе удалоÑÑŒ выгрузить (возможна Ð¿Ð¾Ð²Ñ‚Ð¾Ñ€Ð½Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ°) %s" #~ msgid "Failed to upload %s" #~ msgstr "Ðе удалоÑÑŒ отгрузить %s" #~ msgid "Some uploads failed" #~ msgstr "Ðекоторые отгрузки не удалиÑÑŒ" #~ msgid "Writing back dynamic output file %s" #~ msgstr "ЗапиÑÑŒ динамичеÑкого ÑпиÑка выходных файлов %s" #~ msgid "Failed to rewrite output file list %s. Job resuming may not work" #~ msgstr "" #~ "Ðе удалоÑÑŒ перезапиÑать ÑпиÑок выходных файлов %s. ПерезапуÑк задач может " #~ "не работать" #~ msgid "Some uploads failed, but (some) may be retried" #~ msgstr "" #~ "Ðекоторые выгрузки не удалиÑÑŒ (Ð´Ð»Ñ Ð½ÐµÐºÐ¾Ñ‚Ð¾Ñ€Ñ‹Ñ… возможна Ð¿Ð¾Ð²Ñ‚Ð¾Ñ€Ð½Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ°)" #~ msgid "Failed writing changed output file" #~ msgstr "Ðе удалоÑÑŒ запиÑать изменившийÑÑ Ð²Ñ‹Ñ…Ð¾Ð´Ð½Ð¾Ð¹ файл" #~ msgid "Leaving uploader (%i)" #~ msgstr "Отгрузчик покидаетÑÑ (%i)" #~ msgid "system retval: %d" #~ msgstr "СиÑтемное значение retval: %d" #~ msgid "" #~ "MigrateActivity: request = \n" #~ "%s" #~ msgstr "" #~ "MigrateActivity: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" #~ "%s" #~ msgid "MigrateActivitys: no ActivityIdentifier found" #~ msgstr "MigrateActivitys: не обнаружен ActivityIdentifier" #~ msgid "MigrateActivity: EPR contains no JobID" #~ msgstr "MigrateActivity: EPR не Ñодержит JobID" #~ msgid "MigrateActivity: Failed to accept delegation" #~ msgstr "MigrateActivity: невозможно принÑть делегирование" #~ msgid "MigrateActivity: no job description found" #~ msgstr "MigrateActivity: не обнаружено опиÑание задачи" #~ msgid "Migration XML sent to AREXJob: %s" #~ msgstr "Миграционный документ XML поÑлан к AREXJob: %s" #~ msgid "MigrateActivity: Failed to migrate new job: %s" #~ msgstr "MigrateActivity: невозможно мигрировать новую задачу: %s" #~ msgid "MigrateActivity: Failed to migrate new job" #~ msgstr "MigrateActivity: невозможно мигрировать новую задачу" #~ msgid "MigrateActivity finished successfully" #~ msgstr "MigrateActivity уÑпешно завершён" #~ msgid "" #~ "MigrateActivity: response = \n" #~ "%s" #~ msgstr "" #~ "MigrateActivity: отзыв = \n" #~ "%s" #~ msgid "Put: there is no job: %s - %s" #~ msgstr "Put: задача отÑутÑтвует: %s - %s" #~ msgid "Put: there is no payload for file %s in job: %s" #~ msgstr "Put: отÑутÑтвует Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ файле %s в задании: %s" #~ msgid "Put: unrecognized payload for file %s in job: %s" #~ msgstr "Put: Ð½ÐµÐ¿Ñ€Ð¸ÐµÐ¼Ð»ÐµÐ¼Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ файле %s в задании: %s" #~ msgid "Put: failed to create file %s for job %s - %s" #~ msgstr "Put: не удалоÑÑŒ Ñоздать файл %s Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ð½Ð¸Ñ %s - %s" #~ msgid "Put: failed to set position of file %s for job %s to %Lu - %s" #~ msgstr "" #~ "Put: не удалоÑÑŒ уÑтановить позицию файла %s Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ð½Ð¸Ñ %s на %Lu - %s" #~ msgid "Put: failed to allocate memory for file %s in job %s" #~ msgstr "Put: не удалоÑÑŒ зарезервировать памÑть Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° %s в задании %s" #~ msgid "" #~ "TerminateActivities: request = \n" #~ "%s" #~ msgstr "" #~ "TerminateActivities: Ð·Ð°Ð¿Ñ€Ð¾Ñ = \n" #~ "%s" #~ msgid "TerminateActivities: non-AREX job requested" #~ msgstr "TerminateActivities: запрошена задача, неÑовмеÑÑ‚Ð¸Ð¼Ð°Ñ Ñ AREX" #~ msgid "TerminateActivities: job %s - %s" #~ msgstr "TerminateActivities: задача %s - %s" #~ msgid "" #~ "TerminateActivities: response = \n" #~ "%s" #~ msgstr "" #~ "TerminateActivities: ответ = \n" #~ "%s" #~ msgid "Response is not expected WS-RP" #~ msgstr "Отзыв не ÑвлÑетÑÑ Ð¾Ð¶Ð¸Ð´Ð°ÐµÐ¼Ñ‹Ð¼ WS-RP" #~ msgid "CacheService: Unauthorized" #~ msgstr "CacheService: Ðет допуÑка" #~ msgid "Only POST is supported in CacheService" #~ msgstr "CacheService поддерживает только POST" #~ msgid "Connecting to %s:%i" #~ msgstr "Соединение Ñ %s:%i" #~ msgid "Querying at %s" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ðº %s" #~ msgid "Failed to get results from LDAP server %s" #~ msgstr "Ðе удалоÑÑŒ получить информацию Ñ Ñервера LDAP %s" #~ msgid "LDAP authorization is not supported" #~ msgstr "ÐÐ²Ñ‚Ð¾Ñ€Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð´Ð»Ñ LDAP не поддерживаетÑÑ" #~ msgid "" #~ "Configuration section [vo] is missing name. Check for presence of name= " #~ "or vo= option." #~ msgstr "" #~ "Раздел наÑтроек [vo] не Ñодержит имени. УбедитеÑÑŒ в наличии опций name= " #~ "или vo= ." #~ msgid "Missing option for command daemon" #~ msgstr "Пропущены наÑтраиваемые параметры Ð´Ð»Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ð½Ð¾Ð³Ð¾ демона" #~ msgid "Wrong option in daemon" #~ msgstr "Ðеверные опции в демоне" #~ msgid "Improper size of log '%s'" #~ msgstr "ÐедопуÑтимый размер журнала '%s'" #~ msgid "Improper number of logs '%s'" #~ msgstr "ÐедопуÑтимое количеÑтво журналов '%s'" #~ msgid "Improper argument for logsize '%s'" #~ msgstr "ÐедопуÑтимый аргумент Ð´Ð»Ñ Ñ€Ð°Ð·Ð¼ÐµÑ€Ð° журнала '%s'" #~ msgid "" #~ "Central configuration file is missing at guessed location:\n" #~ " /etc/arc.conf\n" #~ "Use ARC_CONFIG variable for non-standard location" #~ msgstr "" #~ "Общий файл наÑтроек отÑутÑтвует в обычном меÑте:\n" #~ " /etc/arc.conf\n" #~ "ИÑпользуйте переменную Ñреды ARC_CONFIG Ð´Ð»Ñ Ð½ÐµÐ¾Ð±Ñ‹Ñ‡Ð½Ñ‹Ñ… меÑÑ‚" #~ msgid "couldn't open file %s" #~ msgstr "не удалоÑÑŒ открыть файл %s" #~ msgid "unknown (non-gridmap) user is not allowed" #~ msgstr "неизвеÑтный (не занеÑённый в gridmap) пользователь не допуÑкаетÑÑ" #~ msgid "couldn't process VO configuration" #~ msgstr "не удалоÑÑŒ обработать наÑтройки ВО" #~ msgid "can't parse configuration line: %s %s %s %s" #~ msgstr "невозможно разобрать Ñтроку наÑтроек: %s %s %s %s" #~ msgid "bad directory in plugin command: %s" #~ msgstr "неверный каталог в команде подключаемого модулÑ: %s" #~ msgid "unsupported configuration command: %s" #~ msgstr "Ð½ÐµÐ¿Ð¾Ð´Ð´ÐµÑ€Ð¶Ð¸Ð²Ð°ÐµÐ¼Ð°Ñ Ð¸Ð½ÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ Ð½Ð°Ñтроек: %s" #~ msgid "improper attribute for allowunknown command: %s" #~ msgstr "недопуÑтимый атрибут команды allowunknown: %s" #~ msgid "Mapfile is missing at %s" #~ msgstr "Файл припиÑки пользователей отÑутÑтвует в %s" #~ msgid "There is no local mapping for user" #~ msgstr "Пользователь не припиÑан ни к одному локальному имени" #~ msgid "There is no local name for user" #~ msgstr "Локальное Ð¸Ð¼Ñ Ð¿Ñ€Ð¸Ð¿Ð¸Ñки Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð½Ðµ указано" #~ msgid "Initially mapped to local user: %s" #~ msgstr "ÐÐ°Ñ‡Ð°Ð»ÑŒÐ½Ð°Ñ Ð¿Ñ€Ð¸Ð¿Ð¸Ñка к локальному имени пользователÑ: %s" #~ msgid "Initially mapped to local group: %s" #~ msgstr "ÐŸÑ€ÐµÐ´Ð²Ð°Ñ€Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ð¿Ñ€Ð¸Ð²Ñзка к локальной группе: %s" #~ msgid "Local user does not exist" #~ msgstr "Локальное Ð¸Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð½Ðµ ÑущеÑтвует" #~ msgid "config: %s, class name: %s" #~ msgstr "наÑтройки: %s, клаÑÑ: %s" #~ msgid "libjvm.so not loadable - check your LD_LIBRARY_PATH" #~ msgstr "libjvm.so не можетр быть подгружена - проверьте LD_LIBRARY_PATH" #~ msgid "libjvm.so does not contain the expected symbols" #~ msgstr "libjvm.so не Ñодержит ожидаемых Ñимволов" #~ msgid "JVM started" #~ msgstr "Запущена JVM" #~ msgid "There is no service: %s in your Java class search path" #~ msgstr "ОтÑутÑтвие уÑлуги %s в пути поиÑка клаÑÑов Java" #~ msgid "There is no constructor function" #~ msgstr "ОтÑутÑтвует конÑтруктор" #~ msgid "%s constructed" #~ msgstr "%s Ñоздан" #~ msgid "Destroy JVM" #~ msgstr "Уничтожение JVM" #~ msgid "Cannot find MCC_Status object" #~ msgstr "Ðе удалоÑÑŒ обнаружить объект MCC_Status" #~ msgid "Java object returned NULL status" #~ msgstr "Объект Java возвратил ÑÑ‚Ð°Ñ‚ÑƒÑ NULL" #~ msgid "" #~ "The 'remote_host' attribute value is empty - a host name was expected" #~ msgstr "Значение Ñлемента 'remote_host' пуÑто - ожидалоÑÑŒ название реÑурÑа" #~ msgid "The 'remoteHost' element value is empty - a host name was expected" #~ msgstr "Значение Ñлемента 'remoteHost' пуÑто - ожидалоÑÑŒ название реÑурÑа" #, fuzzy #~ msgid "Failed processing user mapping command: unixlistmap %s" #~ msgstr "Сбой работы команды ÑоответÑÑ‚Ð²Ð¸Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ: unixmap %s" #, fuzzy #~ msgid "failed to initialize environment variables" #~ msgstr "Сбой инициализации параметров доÑтупа Ð´Ð»Ñ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ" #~ msgid "Using non-RFC proxy so only local delivery can be used" #~ msgstr "" #~ "ИÑпользуетÑÑ Ð½ÐµÑовмеÑÑ‚Ð¸Ð¼Ð°Ñ Ñ RFC доверенноÑть, поÑтому возможна только " #~ "Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð°Ñ Ð´Ð¾Ñтавка" #~ msgid "Using non-RFC proxy so forcing local delivery" #~ msgstr "" #~ "ИÑпользуетÑÑ Ð½ÐµÑовмеÑÑ‚Ð¸Ð¼Ð°Ñ Ñ RFC доверенноÑть, вынужденно иÑпользуетÑÑ " #~ "Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð°Ñ Ð´Ð¾Ñтавка" #~ msgid "use GSI proxy (RFC 3820 compliant proxy is default)" #~ msgstr "" #~ "иÑпользовать доверенноÑть GSI (по умолчанию иÑпользуетÑÑ\n" #~ " RFC 3820-ÑовмеÑÑ‚Ð¸Ð¼Ð°Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñть)" #~ msgid "Can not set the STORE_CTX for chain verification" #~ msgstr "Ðе удалоÑÑŒ задать STORE_CTX Ð´Ð»Ñ Ð¿Ð¾Ð´Ñ‚Ð²ÐµÑ€Ð¶Ð´ÐµÐ½Ð¸Ñ Ñ†ÐµÐ¿Ð¸" #~ msgid "X509_V_ERR_PATH_LENGTH_EXCEEDED" #~ msgstr "X509_V_ERR_PATH_LENGTH_EXCEEDED" #~ msgid "X509_V_ERR_PATH_LENGTH_EXCEEDED --- with proxy" #~ msgstr "X509_V_ERR_PATH_LENGTH_EXCEEDED --- Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñтью" #~ msgid "X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION" #~ msgstr "X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION" #~ msgid "" #~ "The proxy to be signed should be compatible with the signing certificate: " #~ "(%s) -> (%s)" #~ msgstr "" #~ "ПодпиÑÑ‹Ð²Ð°ÐµÐ¼Ð°Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñть должна быть ÑовмеÑтима Ñ Ð¿Ð¾Ð´Ð¿Ð¸Ñывающим " #~ "Ñертификатом: (%s) -> (%s)" #~ msgid "The proxy depth %i is out of maximum limit %i" #~ msgstr "Глубина доверенноÑти %i превышает предел %i" #~ msgid "proxy_depth: %i, path_length: %i" #~ msgstr "proxy_depth: %i, path_length: %i" #~ msgid "" #~ "Can not convert DER encoded PROXYCERTINFO extension to internal format" #~ msgstr "" #~ "Ðевозможно преобразовать раÑширение PROXYCERTINFO в кодировке DER во " #~ "внутренний формат" #~ msgid "Found more than one PCI extension" #~ msgstr "Обнаружено более одного раÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ PCI" #~ msgid "" #~ "Globus legacy proxies can not carry policy data or path length constraints" #~ msgstr "" #~ "УÑтаревшие доверенноÑти Globus не могут Ñодержать данные о политиках или " #~ "Ð¾Ð³Ñ€Ð°Ð½Ð¸Ñ‡ÐµÐ½Ð¸Ñ Ð¿Ð¾ длине пути" #~ msgid "RSA_generate_key failed" #~ msgstr "Сбой метода RSA_generate_key" #~ msgid "Can not get X509V3_EXT_METHOD for %s" #~ msgstr "Ðевозможно извлечь X509V3_EXT_METHOD Ð´Ð»Ñ %s" #, fuzzy #~ msgid "Can not get policy from PROXYCERTINFO extension" #~ msgstr "Ðевозможно извлечь политику из раÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ PROXYCERTINFO" #, fuzzy #~ msgid "Can not get policy language from PROXYCERTINFO extension" #~ msgstr "Ðевозможно извлечь Ñзык политики из раÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ PROXYCERTINFO" #~ msgid "Can't get X509V3_EXT_METHOD for %s" #~ msgstr "Ðевозможно извлечь X509V3_EXT_METHOD Ð´Ð»Ñ %s" #~ msgid "Can not get extension from issuer certificate" #~ msgstr "Ðевозможно извлечь раÑширение из Ñертификата агентÑтва" #~ msgid "Failed to add extension into proxy" #~ msgstr "Сбой Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ñ€Ð°ÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ Ð² доверенноÑть" #~ msgid "" #~ "The signing algorithm %s is not allowed,it should be SHA1/SHA2 to sign " #~ "certificate requests" #~ msgstr "" #~ "ÐедопуÑтимый алгоритм подпиÑи %s: запроÑÑ‹ Ñертификата должны " #~ "подпиÑыватьÑÑ SHA1 или SHA2" #~ msgid "Failed to add extension into EEC certificate" #~ msgstr "Сбой Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ñ€Ð°ÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ Ð² Ñертификат EEC" #~ msgid "" #~ "Resource information provider timed out: %u seconds. Using heartbeat file " #~ "from now on... Consider increasing infoproviders_timeout in arc.conf" #~ msgstr "" #~ "ИÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð½Ð° Ñбор информации о реÑурÑе: %u Ñекунд. ПроверÑетÑÑ " #~ "контрольный файл... Попробуйте увеличить значение infoproviders_timeout в " #~ "arc.conf" #~ msgid "" #~ "Resource information provider timed out: %u seconds. Checking heartbeat " #~ "file..." #~ msgstr "" #~ "ИÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð½Ð° Ñбор информации о реÑурÑе: %u Ñекунд. ПроверÑетÑÑ " #~ "контрольный файл..." #~ msgid "" #~ "Cannot stat %s. Are infoproviders running? This message will not be " #~ "repeated." #~ msgstr "" #~ "Ðевозможно проверить %s. Запущены ли Ñборщики информации? Это Ñообщение " #~ "не будет больше повторÑтьÑÑ." #~ msgid "" #~ "Cannot stat %s. Are infoproviders running? It happened already %d times." #~ msgstr "" #~ "Ðевозможно проверить %s. Запущены ли Ñборщики информации? Это уже %d-й " #~ "раз." #~ msgid "" #~ "Checked time: %d | Heartbeat file stat: %d | %s has not beed touched " #~ "before timeout (%d). \n" #~ " The performance is too low, infoproviders will be killed. A-REX " #~ "functionality is not ensured." #~ msgstr "" #~ "Ð’Ñ€ÐµÐ¼Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ¸: %d | ПоÑледнее контрольное обновление: %d | %s не " #~ "обновилÑÑ Ð² Ñрок (%d). \n" #~ " ÐÐ¸Ð·ÐºÐ°Ñ Ð¿Ñ€Ð¾Ð¸Ð·Ð²Ð¾Ð´Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð¾Ñть, Ñборщики информации будут оÑтановлены. " #~ "РаботоÑпоÑобноÑть A-REX под угрозой." #~ msgid "Found recent heartbeat file %s , waiting other %d seconds" #~ msgstr "" #~ "Обнаружен недавно обновлённый контрольный файл %s , ожидание ещё %d Ñекунд" #~ msgid "Submit: Failed to disconnect after submission" #~ msgstr "ЗаÑылка: Сбой отÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ð¿Ð¾Ñле заÑылки" #~ msgid "EMIES:WipeActivity: job %s - state is %s, not terminal" #~ msgstr "EMIES:WipeActivity: задача %s - ÑоÑтоÑние %s, не конечное" #~ msgid "Unable to copy %s: No valid credentials found" #~ msgstr "" #~ "Ðевозможно Ñкопировать %s: Ðе обнаружено дейÑтвительных параметров доÑтупа" #~ msgid "Unable to list content of %s: No valid credentials found" #~ msgstr "" #~ "Ðевозможно проÑмотреть Ñодержимое %s: Ðе обнаружено дейÑтвительных " #~ "параметров доÑтупа" #~ msgid "Unable to create directory %s: No valid credentials found" #~ msgstr "" #~ "Ðевозможно Ñоздать директорию %s: Ðе обнаружено дейÑтвительных параметров " #~ "доÑтупа" #~ msgid "Unable to rename %s: No valid credentials found" #~ msgstr "" #~ "Ðевозможно переименовать %s: Ðе обнаружено дейÑтвительных параметров " #~ "доÑтупа" #~ msgid "Unable to remove file %s: No valid credentials found" #~ msgstr "" #~ "Ðевозможно Ñтереть %s: Ðе обнаружено дейÑтвительных параметров доÑтупа" #~ msgid "year" #~ msgid_plural "years" #~ msgstr[0] "год" #~ msgstr[1] "года" #~ msgstr[2] "лет" #~ msgid "month" #~ msgid_plural "months" #~ msgstr[0] "меÑÑц" #~ msgstr[1] "меÑÑца" #~ msgstr[2] "меÑÑцев" #~ msgid "day" #~ msgid_plural "days" #~ msgstr[0] "день" #~ msgstr[1] "днÑ" #~ msgstr[2] "дней" #~ msgid "arc_to_voms - %u attributes" #~ msgstr "arc_to_voms - %u атрибут(а)" #~ msgid "arc_to_voms: attribute: %s" #~ msgstr "arc_to_voms: атрибут: %s" #~ msgid "%s: Failed switching user" #~ msgstr "%s: Сбой при Ñмене пользователÑ" #~ msgid "Job could have died due to expired proxy: restarting" #~ msgstr "" #~ "Возможно, иÑполнение задачи прервалоÑÑŒ из-за проÑроченной доверенноÑти: " #~ "перезапуÑк" #~ msgid "Failed to report renewed proxy to job" #~ msgstr "Ðе удалоÑÑŒ Ñообщить задаче о новой доверенноÑти" #~ msgid "" #~ "Proxy certificate path was not explicitly set or does not exist or has\n" #~ "improper permissions/ownership and not found at default location.\n" #~ "Key/certificate paths were not explicitly set or do not exist or have\n" #~ "improper permissions/ownership and usercert.pem/userkey.pem not found\n" #~ "at default locations:\n" #~ "~/.arc/, ~/.globus/, %s/etc/arc, and ./.\n" #~ "If the proxy or certificate/key does exist, please manually specify the " #~ "locations via env\n" #~ "X509_USER_CERT/X509_USER_KEY or X509_USER_PROXY, or the certificatepath/" #~ "keypath or proxypath\n" #~ "item in client.conf\n" #~ "If the certificate/key does exist, and proxy is needed to be generated, " #~ "please\n" #~ "use arcproxy utility to create a proxy certificate." #~ msgstr "" #~ "МеÑтонахождение доверенноÑти не задано Ñвно, либо не ÑущеÑтвует,\n" #~ "либо у Ð’Ð°Ñ Ð½ÐµÐ´Ð¾Ñтаточные привилегии, а в Ñтандартном меÑте её нет.\n" #~ "МеÑÑ‚Ð¾Ð½Ð°Ñ…Ð¾Ð¶Ð´ÐµÐ½Ð¸Ñ Ð·Ð°ÐºÑ€Ñ‹Ñ‚Ð¾Ð³Ð¾/открытого ключей не заданы Ñвно, либо их нет,\n" #~ "либо у Ð’Ð°Ñ Ð½ÐµÐ´Ð¾Ñтаточные привилегии, а файлов usercert.pem/userkey.pem " #~ "нет\n" #~ "в Ñтандартных меÑтах:\n" #~ "~/.arc/, ~/.globus/, %s/etc/arc, и ./.\n" #~ "ЕÑли у Ð’Ð°Ñ ÐµÑть Ñти файлы, пожалуйÑта, укажите вручную их раÑÐ¿Ð¾Ð»Ð¾Ð¶ÐµÐ½Ð¸Ñ Ñ " #~ "помощью\n" #~ "переменных X509_USER_CERT/X509_USER_KEY и/или X509_USER_PROXY, либо задав " #~ "значениÑ\n" #~ "certificatepath/keypath or proxypath в файле наÑтроек клиента client." #~ "conf\n" #~ "ЕÑли у Ð²Ð°Ñ ÐµÑть ключи, но нет доверенноÑти, иÑпользуйте ÑредÑтво arcproxy " #~ "Ð´Ð»Ñ ÐµÑ‘ ÑозданиÑ." #~ msgid "LDAP authorization is not implemented yet" #~ msgstr "ÐÐ²Ñ‚Ð¾Ñ€Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð´Ð»Ñ LDAP ещё не реализована" #~ msgid "Match group: %s" #~ msgstr "Совпадение группы: %s" #~ msgid "Match capabilities: %s" #~ msgstr "Совпадение возможноÑти: %s" #~ msgid "Failed writing RSL" #~ msgstr "Сбой запиÑи RSL" #~ msgid "" #~ "Parsing error:\n" #~ "%s" #~ msgstr "" #~ "Ошибка разбора:\n" #~ "%s" #~ msgid "Parsing string using ADLParser" #~ msgstr "Разбор Ñтроки Ñ Ð¸Ñпользованием ADLParser" #~ msgid "[ADLParser] Parsing error: %s\n" #~ msgstr "[ADLParser] Ошибка разбора: %s\n" #~ msgid "[ADLParser] Wrong XML structure! " #~ msgstr "[ADLParser] ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ñтруктура XML! " #~ msgid "Parsing string using ARCJSDLParser" #~ msgstr "Обработка Ñтроки Ñ Ð¸Ñпользованием ARCJSDLParser" #~ msgid "[ARCJSDLParser] XML parsing error: %s\n" #~ msgstr "[ARCJSDLParser] Ошибка разбора XML: %s\n" #~ msgid "[ARCJSDLParser] Wrong XML structure! " #~ msgstr "[ARCJSDLParser] ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ñтруктура XML! " #~ msgid "Parsing string using JDLParser" #~ msgstr "Разбор Ñтроки Ñ Ð¸Ñпользованием JDLParser" #~ msgid "" #~ "[JDLParser] There is at least one necessary square bracket missing or " #~ "their order is incorrect. ('[' or ']')" #~ msgstr "" #~ "[JDLParser] По крайней мере одна ÐºÐ²Ð°Ð´Ñ€Ð°Ñ‚Ð½Ð°Ñ Ñкобка отÑутÑтвует, или их " #~ "порÑдок неверен ('[' или ']')." #~ msgid "Can't evaluate left operand for RSL concatenation: %s" #~ msgstr "" #~ "Ðевозможно определить значение левого операнда Ð´Ð»Ñ Ð¿Ð¾Ð´Ñ†ÐµÐ¿Ð»ÐµÐ½Ð¸Ñ RSL: %s" #~ msgid "Can't evaluate right operand for RSL concatenation: %s" #~ msgstr "" #~ "Ðевозможно определить значение правого операнда Ð´Ð»Ñ Ð¿Ð¾Ð´Ñ†ÐµÐ¿Ð»ÐµÐ½Ð¸Ñ RSL: %s" #~ msgid "Can't evaluate RSL list member: %s" #~ msgstr "Ðевозможно определить значение Ñлемента ÑпиÑка RSL: %s" #~ msgid "Can't evaluate RSL sequence member: %s" #~ msgstr "Ðевозможно определить значение члена поÑледовательноÑти RSL: %s" #~ msgid "Unknown RSL value type - should not happen" #~ msgstr "ÐеизвеÑтный тип Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ RSL - не должно ÑлучатьÑÑ" #~ msgid "RSL (inside multi) could not be evaluated: %s" #~ msgstr "RSL (внутри множеÑтвенного опиÑаниÑ) не может быть обработан: %s" #~ msgid "RSL could not be evaluated: %s" #~ msgstr "RSL не может быть обработан: %s" #~ msgid "Can't evaluate RSL fragment: %s" #~ msgstr "Ðевозможно обработать фрагмент RSL: %s" #~ msgid "Can't evaluate RSL substitution variable name: %s" #~ msgstr "Ðевозможно определить Ð¸Ð¼Ñ Ð¿ÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð¾Ð¹ Ð´Ð»Ñ Ð·Ð°Ð¼ÐµÐ½Ñ‹ RSL: %s" #~ msgid "Can't evaluate RSL substitution variable value: %s" #~ msgstr "Ðевозможно определить значение переменной Ð´Ð»Ñ Ð·Ð°Ð¼ÐµÐ½Ñ‹ RSL: %s" #~ msgid "Can't evaluate RSL condition value: %s" #~ msgstr "Ðевозможно определить значение уÑÐ»Ð¾Ð²Ð¸Ñ RSL: %s" #~ msgid "Unknown RSL type - should not happen" #~ msgstr "ÐеизвеÑтный тип RSL - Ñто не должно ÑлучатьÑÑ" #~ msgid "RSL parsing failed at position %ld" #~ msgstr "Сбой обработки RSL на позиции %ld" #~ msgid "Expected ) at position %ld" #~ msgstr "ОжидаетÑÑ ) на позиции %ld" #~ msgid "Expected ( at position %ld" #~ msgstr "ОжидаетÑÑ ( на позиции %ld" #~ msgid "Expected variable name at position %ld" #~ msgstr "ОжидаетÑÑ Ð½Ð°Ð·Ð²Ð°Ð½Ð¸Ðµ переменной на позиции %ld" #~ msgid "Broken string at position %ld" #~ msgstr "ÐŸÐ¾Ð²Ñ€ÐµÐ¶Ð´Ñ‘Ð½Ð½Ð°Ñ Ñтрока на позиции %ld" #~ msgid "RSL parsing error at position %ld" #~ msgstr "Ошибка обработки RSL на позиции %ld" #~ msgid "Expected attribute name at position %ld" #~ msgstr "ОжидаетÑÑ Ð½Ð°Ð·Ð²Ð°Ð½Ð¸Ðµ атрибута на позиции %ld" #~ msgid "Expected relation operator at position %ld" #~ msgstr "ОжидаетÑÑ Ð¾Ð¿ÐµÑ€Ð°Ñ‚Ð¾Ñ€ ÑÑ€Ð°Ð²Ð½ÐµÐ½Ð¸Ñ Ð½Ð° позиции %ld" #~ msgid "Xrsl attribute join is set but attribute stdout is not set" #~ msgstr "Задан атрибут xRSL join, но атрибут stdout пропущен" #~ msgid "Xrsl attribute join is set but attribute stderr is also set" #~ msgstr "Задан атрибут xRSL join, но также задан атрибут stderr" #~ msgid "Parsing string using XRSLParser" #~ msgstr "Обработка Ñтроки Ñ Ð¸Ñпользованием XRSLParser" #~ msgid "XRSL parsing error" #~ msgstr "Ошибка обработки XRSL" #~ msgid "filename cannot be empty." #~ msgstr "Ð˜Ð¼Ñ Ñ„Ð°Ð¹Ð»Ð° не может быть пуÑтым." #~ msgid "" #~ "Parsing the queue xrsl attribute failed. An invalid comparison operator " #~ "was used, only \"=\" is allowed." #~ msgstr "" #~ "Ошибка разбора атрибута XRSL queue. ИÑпользуетÑÑ Ð½ÐµÐ´Ð¾Ð¿ÑƒÑтимый оператор " #~ "ÑравнениÑ, допуÑкаетÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ \"=\"." #~ msgid "%d Queues" #~ msgstr "%d Очереди" #~ msgid "Queue Information:" #~ msgstr "Ð¡Ð²ÐµÐ´ÐµÐ½Ð¸Ñ Ð¾Ð± очереди:" #~ msgid "" #~ "Localtransfer is deprecated, but turned on in arc.conf. Job will be " #~ "submitted with localtransfer=no." #~ msgstr "" #~ "ÐžÐ¿Ñ†Ð¸Ñ localtransfer более не поддерживаетÑÑ, но указана в arc.conf. " #~ "Задача будет запущена Ñ Ð¾Ð¿Ñ†Ð¸ÐµÐ¹ localtransfer=no." #~ msgid "Localtransfer deprecated. Localtransfer has been turned off." #~ msgstr "ÐžÐ¿Ñ†Ð¸Ñ localtransfer более не поддерживаетÑÑ Ð¸ отключена." #~ msgid "Permission checking failed" #~ msgstr "Проверка прав доÑтупа не удалаÑÑŒ" #~ msgid "Cache file valid until: %s" #~ msgstr "Файл в кÑше дейÑтвителен до: %s" #~ msgid "Changing old validity time format to new in %s" #~ msgstr "ЗаменÑетÑÑ Ñтарый формат Ñрока годноÑти на новый в %s" #~ msgid "%s: adding to transfer share %s" #~ msgstr "%s: добавлÑетÑÑ Ðº транÑферной доле %s" #~ msgid "%s: state: %s: starting new child" #~ msgstr "%s: СоÑтоÑние: %s: запуÑкаетÑÑ Ð½Ð¾Ð²Ñ‹Ð¹ дочерний процеÑÑ" #~ msgid "%s: State %s: starting child: %s" #~ msgstr "%s: СоÑтоÑние %s: запуÑкаетÑÑ Ð´Ð¾Ñ‡ÐµÑ€Ð½Ð¸Ð¹ процеÑÑ: %s" #~ msgid "%s: Failed to run uploader process" #~ msgstr "%s: Ðе удалоÑÑŒ запуÑтить процеÑÑ Ð¾Ñ‚Ð³Ñ€ÑƒÐ·Ñ‡Ð¸ÐºÐ°" #~ msgid "%s: Failed to run downloader process" #~ msgstr "%s: Ðе удалоÑÑŒ запуÑтить процеÑÑ Ð·Ð°Ð³Ñ€ÑƒÐ·Ñ‡Ð¸ÐºÐ°" #~ msgid "%s: State: PREPARING/FINISHING: child is running" #~ msgstr "%s: ÑоÑтоÑние PREPARING/FINISHING: дочерний процеÑÑ Ð¸ÑполнÑетÑÑ" #~ msgid "%s: State: PREPARING: child exited with code: %i" #~ msgstr "" #~ "%s: ÑоÑтоÑние PREPARING: дочерний процеÑÑ Ð·Ð°Ð²ÐµÑ€ÑˆÐ¸Ð»ÑÑ Ñ ÐºÐ¾Ð´Ð¾Ð¼ выхода: %i" #~ msgid "%s: State: FINISHING: child exited with code: %i" #~ msgstr "" #~ "%s: ÑоÑтоÑние: FINISHING: дочерний процеÑÑ Ð·Ð°Ð²ÐµÑ€ÑˆÐ¸Ð»ÑÑ Ñ ÐºÐ¾Ð´Ð¾Ð¼ выхода: %i" #~ msgid "%s: State: FINISHING: unrecoverable error detected (exit code 1)" #~ msgstr "" #~ "%s: ÑоÑтоÑние FINISHING: обнаружена неиÑÐ¿Ñ€Ð°Ð²Ð¸Ð¼Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° (код выхода 1)" #~ msgid "%s: State: PREPARING: unrecoverable error detected (exit code 1)" #~ msgstr "" #~ "%s: ÑоÑтоÑние PREPARING: обнаружена неиÑÐ¿Ñ€Ð°Ð²Ð¸Ð¼Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° (код выхода 1)" #~ msgid "%s: State: PREPARING/FINISHING: retryable error" #~ msgstr "%s: ÑоÑтоÑние PREPARING/FINISHING: иÑÐ¿Ñ€Ð°Ð²Ð¸Ð¼Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°" #~ msgid "%s: State: %s: credentials probably expired (exit code %i)" #~ msgstr "" #~ "%s: ÑоÑтоÑние: %s: вероÑтно, иÑтёк Ñрок дейÑÑ‚Ð²Ð¸Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð¾Ð² доÑтупа (код " #~ "выхода %i)" #~ msgid "%s: State: %s: trying to renew credentials" #~ msgstr "%s: СоÑтоÑние: %s: попытка обновить параметры доÑтупа" #~ msgid "%s: State: %s: failed to renew credentials" #~ msgstr "%s: СоÑтоÑние: %s: невозможно обновить параметры доÑтупа" #~ msgid "%s: State: %s: failed to create temporary proxy for renew: %s" #~ msgstr "" #~ "%s: СоÑтоÑние: %s: не удалоÑÑŒ Ñоздать временную доверенноÑть Ð´Ð»Ñ " #~ "обновлениÑ: %s" #~ msgid "" #~ "%s: State: %s: some error detected (exit code %i). Recover from such type " #~ "of errors is not supported yet." #~ msgstr "" #~ "%s: СоÑтоÑние: %s:обнаружена ошибка (код выхода %i). ВоÑÑтановление поÑле " #~ "такой ошибки пока что не поддерживаетÑÑ." #~ msgid "%s: Data staging failed. No retries left." #~ msgstr "%s: Сбой Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ…. Ð’Ñе попытки вышли." #~ msgid "" #~ "%s: Download failed. %d retries left. Will wait for %ds before retrying" #~ msgstr "%s: Сбой загрузки. ОÑталоÑÑŒ %d попыток. ÐŸÐ¾Ð²Ñ‚Ð¾Ñ€Ð½Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ° через %dÑ" #~ msgid "%s: Upload failed. No retries left." #~ msgstr "%s: Сбой отгрузки. Ð’Ñе попытки вышли." #~ msgid "" #~ "%s: Upload failed. %d retries left. Will wait for %ds before retrying." #~ msgstr "%s: Сбой отгрузки. ОÑталоÑÑŒ %d попыток. ÐŸÐ¾Ð²Ñ‚Ð¾Ñ€Ð½Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ° через %dÑ" #~ msgid "Wrong number in speedcontrol: %s" #~ msgstr "ÐедопуÑтимое чиÑло в speedcontrol: %s" #~ msgid "Wrong number in maxtransfertries" #~ msgstr "ÐедопуÑтимое чиÑло в maxtransfertries" #~ msgid "Empty root directory for GACL plugin" #~ msgstr "Корневой каталог раÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ GACL пуÑÑ‚" #~ msgid "Failed to parse default GACL document" #~ msgstr "Ðе удалоÑÑŒ разобрать документ GACL по умолчанию" #~ msgid "Mount point %s creation failed." #~ msgstr "Ðе удалоÑÑŒ Ñоздать точку Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡ÐµÐ½Ð¸Ñ %s." #~ msgid "Creation of top level ACL %s failed." #~ msgstr "Ðе удалоÑÑŒ Ñоздать правила доÑтупа выÑшего ÑƒÑ€Ð¾Ð²Ð½Ñ %s." #~ msgid "plugin(gacl): open: %s" #~ msgstr "Подключаемый модуль(gacl): открытие: %s" #~ msgid "Failed to parse GACL" #~ msgstr "Ðевозможно обработать GACL" #~ msgid "GACL without is not allowed" #~ msgstr "ИнÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ GACL без недопуÑтима" #~ msgid "Failed to save GACL" #~ msgstr "Ðевозможно Ñохранить GACL" #~ msgid "GACL file %s is not an ordinary file" #~ msgstr "Файл GACL %s не ÑвлÑетÑÑ Ð¾Ð±Ñ‹Ñ‡Ð½Ñ‹Ð¼ файлом" #~ msgid "GACL description for file %s could not be loaded" #~ msgstr "Правила GACL Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° %s не могут быть загружены" #~ msgid "Request failed: No response" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð½Ðµ удалÑÑ: нет ответа" #~ msgid "Request failed: Error" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð½Ðµ удалÑÑ: ошибка." #~ msgid "Request succeeded!!!" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ ÑƒÐ´Ð°Ð»ÑÑ!!!" #~ msgid "SP Service name is %s" #~ msgstr "Ð˜Ð¼Ñ Ñлужбы провайдера уÑлуг: %s" #~ msgid "SAML Metadata is from %s" #~ msgstr "Метаданные SAML из %s" #~ msgid "saml2SP: Unauthorized" #~ msgstr "SAML2SP: ДоÑтуп закрыт" #~ msgid "no input payload" #~ msgstr "пуÑÑ‚Ð°Ñ Ð½Ð°Ð³Ñ€ÑƒÐ·ÐºÐ° на входе" #~ msgid "Using private key file to sign: %s" #~ msgstr "ИÑпользуетÑÑ Ñ„Ð°Ð¹Ð» личного ключа Ð´Ð»Ñ Ð¿Ð¾Ð´Ð¿Ð¸Ñи: %s" #~ msgid "After signature: %s" #~ msgstr "ПоÑле подпиÑи: %s" #~ msgid "Encrypted SAML assertion: %s" #~ msgstr "Зашифрованное утверждение SAML: %s" #~ msgid "Can not decrypt the EncryptedAssertion from SAML response" #~ msgstr "Ðе удалоÑÑŒ раÑшифровать EncryptedAssertion из отзыва SAML" #~ msgid "Decrypted SAML Assertion: %s" #~ msgstr "РаÑшифрованное утверждение SAML: %s" #~ msgid "Encrypted name ID: %s" #~ msgstr "Зашифрованный идентификатор имени: %s" #~ msgid "Can not decrypt the EncryptedID from SAML assertion" #~ msgstr "Ðе удалоÑÑŒ раÑшифровать EncryptedID из ÑƒÑ‚Ð²ÐµÑ€Ð¶Ð´ÐµÐ½Ð¸Ñ SAML" #~ msgid "Decrypted SAML name ID: %s" #~ msgstr "РаÑшифрованный идентификатор имени SAML: %s" #~ msgid "saml:Conditions, current time: %s is before the start time: %s" #~ msgstr "saml:Conditions, текущее времÑ: %s раньше времени начала: %s" #~ msgid "saml:Conditions, current time: %s is after the end time: %s" #~ msgstr "saml:Conditions, текущее времÑ: %s позже времени окончаниÑ: %s" #~ msgid "saml:Subject, current time is before the start time" #~ msgstr "SAML:Subject, текущее Ð²Ñ€ÐµÐ¼Ñ Ñ€Ð°Ð½ÑŒÑˆÐµ времени начала" #~ msgid "saml:Subject, current time is after the end time" #~ msgstr "SAML:Subject, текущее Ð²Ñ€ÐµÐ¼Ñ Ð¿Ð¾Ð·Ð¶Ðµ времени окончаниÑ" #~ msgid "Can not get saml:Assertion or saml:EncryptedAssertion from IdP" #~ msgstr "" #~ "Ðевозможно получить SAML:Assertion или SAML:EncryptedAssertion от IdP" #~ msgid "Succeeded to verify the signature under " #~ msgstr "ПодпиÑÑŒ уÑпешно подтверждена" #~ msgid "Failed to verify the signature under " #~ msgstr "ПодпиÑÑŒ не подтверждена" #~ msgid "" #~ "The NameID inside request is the same as the NameID from the tls " #~ "authentication: %s" #~ msgstr "" #~ "Параметр NameID в запроÑе идентичен NameID при проверке подлинноÑти TLS: " #~ "%s" #~ msgid "" #~ "Access database %s from server %s port %s, with user %s and password %s" #~ msgstr "" #~ "ДоÑтуп к базе данных %s на Ñервере %s по порту %s, как пользователь %s Ñ " #~ "паролем %s" #~ msgid "Can't establish connection to mysql database" #~ msgstr "Ðе удалоÑÑŒ уÑтановить ÑвÑзь Ñ Ð±Ð°Ð·Ð¾Ð¹ данных mysql" #~ msgid "Is connected to database? %s" #~ msgstr "ЕÑть ли ÑвÑзь Ñ Ð±Ð°Ð·Ð¾Ð¹ данных? %s" #~ msgid "Query: %s" #~ msgstr "ЗапроÑ: %s" #~ msgid "Get result array with %d rows" #~ msgstr "Получен маÑÑив результатов из %d Ñтрок" #~ msgid "Can not find StatusCode" #~ msgstr "Ðе обнаружен StatusCode" #~ msgid "" #~ "SAML Assertion parsed from SP Service:\n" #~ "%s" #~ msgstr "" #~ "Утверждение SAML выделенное из ÑервиÑа SP:\n" #~ "%s" #~ msgid "Can not get SAMLAssertion SecAttr from outgoing message AuthContext" #~ msgstr "" #~ "Ðевозможно получить SAMLAssertion SecAttr из иÑходÑщего ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ " #~ "AuthContext" #~ msgid "MessageAuthContext can not be parsed from outgoing message" #~ msgstr "Ðевозможно выделить MessageAuthContext из иÑходÑщего ÑообщениÑ" #~ msgid "Process: POST" #~ msgstr "ПроцеÑÑ: POST" #~ msgid "SOAP body does not include any request node" #~ msgstr "Тело SOAP не Ñодержит запроÑов" #~ msgid "Request: %s" #~ msgstr "ЗапроÑ: %s" #~ msgid "There is no X509Request node in the request message" #~ msgstr "Ð’ запроÑе отÑутÑтвует Ñлемент X509Request" #~ msgid "Composed DN: %s" #~ msgstr "Создан DN: %s" #~ msgid "get|put [object ...]" #~ msgstr "get|put [объект ...]" #~ msgid "" #~ "The arcacl command retrieves/sets permissions (ACL) of data or computing " #~ "objects." #~ msgstr "" #~ "Команда arcacl иÑпользуетÑÑ Ð´Ð»Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ¸ и приÑÐ²Ð¾ÐµÐ½Ð¸Ñ Ð¿Ñ€Ð°Ð² доÑтупа (ACL) " #~ "данным или вычиÑлительному заданию." #~ msgid "Unsupported command %s." #~ msgstr "ÐÐµÐ¿Ð¾Ð´Ð´ÐµÑ€Ð¶Ð¸Ð²Ð°ÐµÐ¼Ð°Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ð° %s" #~ msgid "Cannot read specified jobID file: %s" #~ msgstr "Ðе удаётÑÑ Ð¿Ñ€Ð¾Ñ‡ÐµÑть указанный файл, Ñодержащий Ñрлыки задач: %s" #~ msgid "No objects given" #~ msgstr "Объекты не указаны" #~ msgid "Data object %s is not valid URL." #~ msgstr "Файловый объект %s не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым URL." #~ msgid "" #~ "Data object %s is not supported. Only GACL-enabled GridFTP servers are " #~ "supported yet." #~ msgstr "" #~ "Тип %s не поддерживаетÑÑ. Пока что поддерживаютÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ GridFTP Ñерверы " #~ "Ñ GACL." #~ msgid "URL %s is not supported." #~ msgstr "URL %s не поддерживаетÑÑ." #~ msgid "Object for stdout handling failed." #~ msgstr "Сбой обработки объекта stdout." #~ msgid "Object for stdin handling failed." #~ msgstr "Сбой обработки объекта stdin." #~ msgid "ACL transfer FAILED: %s" #~ msgstr "Сбой переÑылки ACL: %s" #~ msgid "" #~ "The arcmigrate command is used for migrating queued jobs to another " #~ "resource.\n" #~ "Note that migration is only supported between A-REX powered resources." #~ msgstr "" #~ "Команда arcmigrate иÑпользуетÑÑ Ð´Ð»Ñ Ð¼Ð¸Ð³Ñ€Ð°Ñ†Ð¸Ð¸ ожидающих задач на другой " #~ "реÑурÑ.\n" #~ "ÐœÐ¸Ð³Ñ€Ð°Ñ†Ð¸Ñ Ð¿Ð¾Ð´Ð´ÐµÑ€Ð¶Ð¸Ð²Ð°ÐµÑ‚ÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ между Ñлужбами A-REX." #~ msgid "Cannot write job IDs of submitted jobs to file (%s)" #~ msgstr "Ðевозможно запиÑать Ñрлыки запущенных задач в файл (%s) " #~ msgid "" #~ "Migration of job (%s) succeeded, but killing the job failed - it will " #~ "still appear in the job list" #~ msgstr "" #~ "УÑпешно завершена Ð¼Ð¸Ð³Ñ€Ð°Ñ†Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ (%s), но прервать задачу не удалоÑÑŒ - " #~ "она будет приÑутÑтвовать в ÑпиÑке задач" #~ msgid "" #~ "Migration of job (%s) succeeded, but cleaning the job failed - it will " #~ "still appear in the job list" #~ msgstr "" #~ "УÑпешно завершена Ð¼Ð¸Ð³Ñ€Ð°Ñ†Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ (%s), но очиÑтить задачу не удалоÑÑŒ - " #~ "она будет приÑутÑтвовать в ÑпиÑке задач" #~ msgid "Job migration summary:" #~ msgstr "Сводка перезаÑылки задач:" #~ msgid "%d of %d jobs were migrated" #~ msgstr "%d из %d задач были перезаÑланы" #~ msgid "The following %d were not migrated" #~ msgstr "Следующие %d не были перезаÑланы" #~ msgid "OpenSSL Error -- %s" #~ msgstr "Ошибка OpenSSL -- %s" #~ msgid "Creating and sending soap request" #~ msgstr "Создание и отправка запроÑа SOAP" #~ msgid "URL of SLCS service" #~ msgstr "URL Ñлужбы SLCS" #~ msgid "Identity provider name" #~ msgstr "Ð˜Ð¼Ñ Ð¿Ñ€Ð¾Ð²Ð°Ð¹Ð´ÐµÑ€Ð° идентификационной информации" #~ msgid "User account to identity provider" #~ msgstr "" #~ "Ð£Ñ‡Ñ‘Ñ‚Ð½Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ñƒ провайдера идентификационной информации" #~ msgid "Password for user account to identity provider" #~ msgstr "Пароль учётной запиÑи у провайдера идентификационной информации" #~ msgid "Key size of the private key (512, 1024, 2048)" #~ msgstr "Длина Ñекретного ключа (512, 1024, 2048)" #~ msgid "Private key passphrase" #~ msgstr "Пароль Ñекретного ключа:" #~ msgid "passphrase" #~ msgstr "пароль" #~ msgid "Lifetime of the certificate, start with current time, hour as unit" #~ msgstr "Период дейÑÑ‚Ð²Ð¸Ñ Ñертификата, Ð½Ð°Ñ‡Ð¸Ð½Ð°Ñ Ñ Ñ‚ÐµÐºÑƒÑ‰ÐµÐ³Ð¾ момента, в чаÑах" #~ msgid "period" #~ msgstr "период" #~ msgid "Store directory for key and signed certificate" #~ msgstr "МеÑто Ð´Ð»Ñ Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ Ð·Ð°ÐºÑ€Ñ‹Ñ‚Ð¾Ð³Ð¾ ключа и подпиÑанного Ñертификата" #~ msgid "" #~ "The VOMS server with the information:\n" #~ "\t%s\"\n" #~ "can not be reached, please make sure it is available." #~ msgstr "" #~ "Ðевозможно ÑвÑзатьÑÑ Ñ Ñервером VOMS Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸ÐµÐ¹:\n" #~ "\t%s\"\n" #~ "ПожалуйÑта, проверьте, доÑтупен ли Ñтот Ñервер." #~ msgid "Error: can't read policy file: %s" #~ msgstr "Ошибка: невозможно прочеÑть файл политик: %s" #~ msgid "" #~ "One of the elements 'Exact', 'UpperBoundedRange', 'LowerBoundedRange', " #~ "'Range', 'Min' or 'Max' was expected." #~ msgstr "" #~ "ОжидалÑÑ Ð¾Ð´Ð¸Ð½ из Ñлементов 'Exact', 'UpperBoundedRange', " #~ "'LowerBoundedRange', 'Range', 'Min' или 'Max'." #~ msgid "" #~ "Combinations of 'Exact', 'Range', 'UpperBoundedRange'/'LowerBoundedRange' " #~ "and 'Max'/'Min' are not supported." #~ msgstr "" #~ "Комбинации 'Exact', 'Range', 'UpperBoundedRange'/'LowerBoundedRange' и " #~ "'Max'/'Min' не поддерживаютÑÑ." #~ msgid "Called SAML2SSOHTTPClient constructor" #~ msgstr "Вызван конÑтруктор SAML2SSOHTTPClient" #~ msgid "Relaystate %s" #~ msgstr "Значение RelayState: %s" #~ msgid "Performing SSO with %s " #~ msgstr "ВыполнÑетÑÑ SSO Ñ %s " #~ msgid "The IdP login is %s" #~ msgstr "Ð˜Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ IdP: %s" #~ msgid "Retrieving the remote SimpleSAMLphp installation failed!" #~ msgstr "Сбой доÑтупа к удалённой Ñлужбе SimpleSAMLphp!" #~ msgid "Getting from Confusa to the IdP page failed!" #~ msgstr "Сбой перехода Ñ Confusa на Ñтраницу IdP!" #~ msgid "Successfully redirected from Confusa to the IdP login!" #~ msgstr "УÑпешное перенаправление Ñ Confusa на вход в IdP!" #~ msgid "Getting the user consent for SSO failed!" #~ msgstr "Сбой Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ ÑоглаÑÐ¸Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð½Ð° SSO!" #~ msgid "Successfully logged in to the IdP!" #~ msgstr "УÑпешный вход в IdP!" #~ msgid "Directing back from the IdP to Confusa failed!" #~ msgstr "Сбой обратного Ð¿ÐµÑ€ÐµÐ½Ð°Ð¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ñ IdP на Confusa!" #~ msgid "Successfully redirected back from the IdP to Confusa!" #~ msgstr "УÑпешное обратное перенаправление Ñ IdP на Confusa!" #~ msgid "The used session cookies for the about page is %s" #~ msgstr "ИÑпользуемые куки Ð´Ð»Ñ Ñтраницы Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸ÐµÐ¹: %s" #~ msgid "The retrieved DN is %s" #~ msgstr "Полученное выделенное Ð¸Ð¼Ñ (DN): %s" #~ msgid "The location to which the GET is performed is %s" #~ msgstr "Ðазначение операции GET: %s" #~ msgid "Approving CSR on Confusa's approve page %s" #~ msgstr "Одобрение запроÑа CSR на Ñтранице Ð¾Ð´Ð¾Ð±Ñ€ÐµÐ½Ð¸Ñ Confusa %s" #~ msgid "The cookie sent with approve is %s" #~ msgstr "Куки-файл, поÑланный Ñ Ð¾Ð´Ð¾Ð±Ñ€ÐµÐ½Ð¸ÐµÐ¼: %s" #~ msgid "The server location is %s " #~ msgstr "Сервер раÑположен на %s " #~ msgid "The request URL is %s" #~ msgstr "URL запроÑа: %s" #~ msgid "Sending OAuth request to signed URL %s" #~ msgstr "Отправка запроÑа OAuth на подпиÑанный URL %s" #~ msgid "Please login at the following URL " #~ msgstr "ПожалуйÑта, войдите в ÑиÑтему по данному URL " #~ msgid "Press enter to continue\n" #~ msgstr "Ðажмите enter, чтобы продолжить\n" #~ msgid "The about-you request URL is %s" #~ msgstr "URL запроÑа о данных пользователÑ: %s" #~ msgid "Approving the certificate signing request at %s" #~ msgstr "ОдобрÑетÑÑ Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¿Ð¾Ð´Ð¿Ð¸Ñи Ñертификата на %s" #~ msgid "The OAuth request URL is %s" #~ msgstr "URL запроÑа OAuth: %s" #~ msgid "The request is NULL!" #~ msgstr "ОтÑутÑтвует Ñубъект запроÑа!" #~ msgid "No characters were read from the BIO in public key extraction" #~ msgstr "" #~ "Ðи одного Ñимвола не было Ñчитано Ñ BIO при извлечении открытого ключа" #~ msgid "Could not find any digest for the given name" #~ msgstr "Ðевозможно найти Ñводку Ð´Ð»Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ð¾Ð³Ð¾ имени" #~ msgid "SHA1Sum appears to be empty!" #~ msgstr "Похоже, отÑутÑтвует SHA1Sum!" #~ msgid "Could not create a certificate request for subject %s" #~ msgstr "Ðевозможно Ñоздать Ð·Ð°Ð¿Ñ€Ð¾Ñ Ñертификата Ð´Ð»Ñ Ñубъекта %s" #~ msgid "Trying to get content %s from XML element, size %d" #~ msgstr "Попытка извлечь Ñодержимое %s из Ñлемента XML, размер %d" #~ msgid "Failed to parse XML file!" #~ msgstr "Сбой при разборе файла формата XML!" #~ msgid "extract_body_information(): Body elements not found in passed string" #~ msgstr "" #~ "extract_body_information(): Элемент Body не обнаружен в переданной Ñтроке" #~ msgid "post_2_ssoservice_redirect URL is %s" #~ msgstr "URL post_2_ssoservice_redirect: %s" #~ msgid "The consent_page is %s" #~ msgstr "consent_page: %s" #~ msgid "SAML2SSOHTTPClient::processConsent()" #~ msgstr "SAML2SSOHTTPClient::processConsent()" #~ msgid "Trying to open confirm site %s" #~ msgstr "Попытка открыть подтверждённый Ñайт %s" #~ msgid "Found action is %s" #~ msgstr "Обнаруженное дейÑтвие: %s" #~ msgid "Post-IdP-authentication action is %s" #~ msgstr "ДейÑтвие проверки подлинноÑти поÑле IdP: %s" #~ msgid "Used session cookies for the assertion consumer are %s" #~ msgstr "ИÑпользованные маркёры Ð´Ð»Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ¸ утверждений: %s" #~ msgid "Got over the actual IP login 2 to %s, cookie %s " #~ msgstr "Подключение ÑобÑтвенно через вход IP к %s, куки-файл %s " #~ msgid "Posting username/password with the following session cookie %s to %s" #~ msgstr "Передача имени/Ð¿Ð°Ñ€Ð¾Ð»Ñ Ñ‡ÐµÑ€ÐµÐ· Ñледующий куки-файл ÑеÑÑии: %s на %s" #~ msgid "The idp_login_post_info cookie is %s, while the sent cookie was %s" #~ msgstr "" #~ "Куки-файл idp_login_post_info cookie ÑвлÑетÑÑ %s, тогда как отправленный " #~ "куки-файл был %s" #~ msgid "Getting SAML response" #~ msgstr "Ожидание отклика SAML" #~ msgid "Calling post-IdP site %s with relay state %s" #~ msgstr "ВызываетÑÑ Ñервер post-IdP %s Ñо ÑтатуÑом передачи %s" #~ msgid "Cookies %s" #~ msgstr "Куки %s" #~ msgid "Called HakaClient::processConsent()" #~ msgstr "Вызван HakaClient::processConsent()" #~ msgid "Checking if consent is necessary" #~ msgstr "ПроверÑем, необходимо ли ÑоглаÑие пользователÑ" #~ msgid "User consent to attribute transfer is necessary" #~ msgstr "Ðеобходимо ÑоглаÑие Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð½Ð° передачу атрибутов" #~ msgid "" #~ "Your identity provider will send the following information to the SLCS " #~ "service:" #~ msgstr "" #~ "Ð¡Ð»ÐµÐ´ÑƒÑŽÑ‰Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð±ÑƒÐ´ÐµÑ‚ поÑлана Вашим провайдером идентификации на " #~ "Ñервер SLCS:" #~ msgid "==============================================================================" #~ msgstr "==============================================================================" #~ msgid "Do you consent to the release of that information? (y/n) " #~ msgstr "СоглаÑны ли Ð’Ñ‹ на передачу Ñтой информации? (y/n)" #~ msgid "Consent confirm redirection URL is %s, cookies %s" #~ msgstr "URL Ð¿ÐµÑ€ÐµÐ½Ð°Ð¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð¿Ñ€Ð¸ подтверждённом ÑоглаÑии - %s, куки-файлы %s" #~ msgid "LFC resolve timed out" #~ msgstr "ИÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ñ€Ð°Ð·Ð±Ð¾Ñ€Ð° LFC" #~ msgid "Error finding replicas: %s" #~ msgstr "Ошибка Ð¾Ð±Ð½Ð°Ñ€ÑƒÐ¶ÐµÐ½Ð¸Ñ ÐºÐ¾Ð¿Ð¸Ð¹: %s" #~ msgid "LFC resolve returned no entries" #~ msgstr "Разбор LFC не выдал запиÑей" #~ msgid "File does not exist in LFC" #~ msgstr "Этот файл не занеÑён в LFC" #~ msgid "Skipping invalid location: %s - %s" #~ msgstr "ПропуÑкаетÑÑ Ð½ÐµÐ²ÐµÑ€Ð½Ñ‹Ð¹ адреÑ: %s - %s" #~ msgid "Replica %s already exists for LFN %s" #~ msgstr "Реплика %s уже ÑущеÑтвует Ð´Ð»Ñ LFN %s" #~ msgid "Duplicate replica location: %s" #~ msgstr "Идентичное меÑтонахождение реплики: %s" #~ msgid "Resolve: checksum: %s" #~ msgstr "Разбор: ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма: %s" #~ msgid "Resolve: size: %llu" #~ msgstr "Разбор: размер: %llu" #~ msgid "Resolve: modified: %s" #~ msgstr "Разбор: Ð²Ñ€ÐµÐ¼Ñ Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ: %s" #~ msgid "LFN is missing in LFC (needed for replication)" #~ msgstr "Ð’ LFC отÑутÑтвует LFN (необходимо Ð´Ð»Ñ Ñ‚Ð¸Ñ€Ð°Ð¶Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ)" #~ msgid "LFN already exists in LFC" #~ msgstr "LFN уже зарегиÑтрирован в LFC" #~ msgid "Using supplied guid %s" #~ msgstr "ИÑпользуетÑÑ Ð¿Ñ€ÐµÐ´Ð¾Ñтавленный guid %s" #~ msgid "Error creating LFC entry: %s" #~ msgstr "Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð·Ð°Ð¿Ð¸Ñи каталога LFC: %s" #~ msgid "Error finding info on LFC entry %s which should exist: %s" #~ msgstr "" #~ "Ошибка Ð¾Ð±Ð½Ð°Ñ€ÑƒÐ¶ÐµÐ½Ð¸Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸ о запиÑи LFC %s, ÐºÐ¾Ñ‚Ð¾Ñ€Ð°Ñ Ð´Ð¾Ð»Ð¶Ð½Ð° " #~ "ÑущеÑтвовать: %s" #~ msgid "Error creating LFC entry %s, guid %s: %s" #~ msgstr "Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð·Ð°Ð¿Ð¸Ñи каталога LFC %s, guid %s: %s" #~ msgid "Error entering metadata: %s" #~ msgstr "Ошибка при вводе метаданных: %s" #~ msgid "Warning: only md5 and adler32 checksums are supported by LFC" #~ msgstr "" #~ "Предупреждение: LFC поддерживает только проверочные Ñуммы типа md5 и " #~ "adler32" #~ msgid "No GUID defined for LFN - probably not preregistered" #~ msgstr "" #~ "Ð”Ð»Ñ LFN не задан GUID - возможно, не пройдена Ð¿Ñ€ÐµÐ´Ð²Ð°Ñ€Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ñ€ÐµÐ³Ð¸ÑтрациÑ" #~ msgid "Error adding replica: %s" #~ msgstr "Ошибка Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ñ€ÐµÐ¿Ð»Ð¸ÐºÐ¸: %s" #~ msgid "Entering checksum type %s, value %s, file size %llu" #~ msgstr "" #~ "ЗаноÑитÑÑ Ð¿Ñ€Ð¾Ð²ÐµÑ€Ð¾Ñ‡Ð½Ð°Ñ Ñумма типа %s, Ñо значением %s, размер файла %llu" #~ msgid "Failed to remove LFN in LFC - You may need to do it by hand" #~ msgstr "" #~ "Ðе удалоÑÑŒ Ñтереть LFN в LFC - возможно, Вам придётÑÑ Ð´ÐµÐ»Ð°Ñ‚ÑŒ Ñто вручную" #~ msgid "Location is missing" #~ msgstr "ОтÑутÑтвует раÑположение" #~ msgid "Error getting replicas: %s" #~ msgstr "Ошибка Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ñ€ÐµÐ¿Ð»Ð¸Ðº: %s" #~ msgid "Failed to remove location from LFC: %s" #~ msgstr "Ошибка при удалении меÑÑ‚Ð¾Ð½Ð°Ñ…Ð¾Ð¶Ð´ÐµÐ½Ð¸Ñ Ð¸Ð· LFC: %s" #~ msgid "Failed to remove LFC directory: directory is not empty" #~ msgstr "Ðе удалоÑÑŒ Ñтереть директорию LFC: Ð´Ð¸Ñ€ÐµÐºÑ‚Ð¾Ñ€Ð¸Ñ Ð½Ðµ пуÑта" #~ msgid "Failed to remove LFC directory: %s" #~ msgstr "Ошибка при удалении каталога LFC: %s" #~ msgid "Failed to remove LFN in LFC: %s" #~ msgstr "Сбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ LFN из LFC: %s" #~ msgid "Error listing file or directory: %s" #~ msgstr "Ошибка вывода файла или каталога: %s" #~ msgid "Not a directory" #~ msgstr "Ðе ÑвлÑетÑÑ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð¾Ð¼" #~ msgid "Error opening directory: %s" #~ msgstr "Ошибка при открытии каталога: %s" #~ msgid "Error listing directory: %s" #~ msgstr "Ошибка вывода каталога: %s" #~ msgid "Error listing replicas: %s" #~ msgstr "Ошибка перечиÑÐ»ÐµÐ½Ð¸Ñ Ñ€ÐµÐ¿Ð»Ð¸Ðº: %s" #~ msgid "Creating LFC directory %s" #~ msgstr "Создание каталога LFC %s" #~ msgid "Error creating required LFC dirs: %s" #~ msgstr "Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ‚Ñ€ÐµÐ±ÑƒÐµÐ¼Ñ‹Ñ… директорий LFC: %s" #~ msgid "Cannot rename to root directory" #~ msgstr "Ðевозможно переименовать в корневой каталог" #~ msgid "Error renaming %s to %s: %s" #~ msgstr "Ошибка Ð¿ÐµÑ€ÐµÐ¸Ð¼ÐµÐ½Ð¾Ð²Ñ‹Ð²Ð°Ð½Ð¸Ñ %s в %s: %s" #~ msgid "Error finding LFN from GUID %s: %s" #~ msgstr "Ошибка Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ LFN Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ GUID %s: %s" #~ msgid "GUID %s resolved to LFN %s" #~ msgstr "GUID %s принадлежит LFN %s" #~ msgid "Mismatching protocol/host in bulk resolve!" #~ msgstr "ÐеÑовпадающий протокол/Ñервер в маÑÑовом разборе!" #~ msgid "Cannot use a mixture of GUIDs and LFNs in bulk resolve" #~ msgstr "Ð’ маÑÑовом разборе Ð½ÐµÐ»ÑŒÐ·Ñ Ð¸Ñпользовать ÑмеÑÑŒ GUID-ов и LFN-ов" #~ msgid "Bulk resolve returned no entries" #~ msgstr "МаÑÑовый разбор не обнаружил запиÑей" #~ msgid "GUID %s, SFN %s" #~ msgstr "GUID %s, SFN %s" #~ msgid "LFC returned more results than we asked for!" #~ msgstr "LFC выдаёт больше результатов, чем надо!" #~ msgid "Invalid dataset name: %s" #~ msgstr "Ðеверное название набора данных: %s" #~ msgid "Invalid DQ2 URL %s" #~ msgstr "ÐедопуÑтимый URL DQ2: %s" #~ msgid "Could not obtain information from AGIS" #~ msgstr "Ðе удалоÑÑŒ получить информацию из AGIS" #~ msgid "No suitable endpoints found in AGIS" #~ msgstr "Ðе обнаружено подходÑщих точек входа в AGIS" #~ msgid "Proxy certificate does not have ATLAS VO extension" #~ msgstr "У Ñертификата доверенноÑти нет раÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ Ð’Ðž ATLAS" #~ msgid "Locations of dataset %s are cached" #~ msgstr "МеÑÑ‚Ð¾Ð¿Ð¾Ð»Ð¾Ð¶ÐµÐ½Ð¸Ñ Ð½Ð°Ð±Ð¾Ñ€Ð° данных %s в кÑше" #~ msgid "No such dataset: %s" #~ msgstr "Ðет такого набора данных: %s" #~ msgid "Malformed DQ2 response: %s" #~ msgstr "ИÑкажённый отзыв DQ2: %s" #~ msgid "Dataset %s: DUID %s" #~ msgstr "Ðабор данных %s: DUID %s" #~ msgid "Location: %s" #~ msgstr "РаÑположение: %s" #~ msgid "DQ2 returned %s" #~ msgstr "DQ2 ответил %s" #~ msgid "Duplicate location of file %s" #~ msgstr "ДублирующееÑÑ Ñ€Ð°Ñположение файла %s" #~ msgid "Site %s is not deterministic and cannot be used" #~ msgstr "Узел %s не определён однозначно и не может быть иÑпользован" #~ msgid "Site %s not found in AGIS info" #~ msgstr "Узел %s не обнаружен в информации AGIS" #~ msgid "Reading cached AGIS data from %s" #~ msgstr "Чтение кÑшрованных данных AGIS Ñ %s" #~ msgid "Cannot read cached AGIS info from %s, will re-download: %s" #~ msgstr "" #~ "Ðе удалоÑÑŒ прочеÑть информацию AGIS Ñ %s, будет произведена перезагрузка: " #~ "%s" #~ msgid "Cached AGIS info is out of date, will re-download" #~ msgstr "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ AGIS в кÑше уÑтарела, будет загружена заново" #~ msgid "Could not refresh AGIS info, cached version will be used: %s" #~ msgstr "" #~ "Ðе удалоÑÑŒ обновить информацию AGIS, будет иÑпользована кÑÑˆÐ¸Ñ€Ð¾Ð²Ð°Ð½Ð½Ð°Ñ " #~ "верÑиÑ: %s" #~ msgid "Could not download AGIS info: %s" #~ msgstr "Ðе удалоÑÑŒ загрузить информацию AGIS: %s" #~ msgid "AGIS returned %s" #~ msgstr "AGIS ответил %s " #~ msgid "Could not create file %s" #~ msgstr "Ðе удалоÑÑŒ Ñоздать файл %s" #~ msgid "Badly formatted output from AGIS" #~ msgstr "Ðеверно Ñформированный отзыв AGIS" #~ msgid "%s -> %s" #~ msgstr "%s -> %s" #~ msgid "Recieved token length: %i" #~ msgstr "Длина полученного токена: %i" #~ msgid "GSS accept security context failed: %i/%i%s" #~ msgstr "Сбой принÑÑ‚Ð¸Ñ ÐºÐ¾Ð½Ñ‚ÐµÐºÑта безопаÑноÑти GSS: %i/%i%s" #~ msgid "GSS accept security context: %i/%i" #~ msgstr "ПринÑтие контекÑта безопаÑноÑти GSS: %i/%i" #~ msgid "Returned token length: %i" #~ msgstr "Длина выданного токена: %i" #~ msgid "GSS unwrap failed: %i/%i%s" #~ msgstr "Сбой Ñ€Ð°Ð·Ð²Ñ‘Ñ€Ñ‚Ñ‹Ð²Ð°Ð½Ð¸Ñ GSS: %i/%i%s" #~ msgid "GSS unwrap: %i/%i" #~ msgstr "Развёртывание GSS: %i/%i" #~ msgid "Security check failed in GSI MCC for incoming message" #~ msgstr "Ðе прошла проверка безопаÑноÑти в GSI MCC Ð´Ð»Ñ Ð²Ñ…Ð¾Ð´Ñщего ÑообщениÑ" #~ msgid "Security check failed in GSI MCC for outgoing message" #~ msgstr "Ðе прошла проверка безопаÑноÑти в GSI MCC Ð´Ð»Ñ Ð¸ÑходÑщего ÑообщениÑ" #~ msgid "GSS wrap failed: %i/%i%s" #~ msgstr "Сбой ÑÐ²Ñ‘Ñ€Ñ‚Ñ‹Ð²Ð°Ð½Ð¸Ñ GSS: %i/%i%s" #~ msgid "GSS wrap: %i/%i" #~ msgstr "Свёртывание GSS: %i/%i" #~ msgid "Could not resolve peer side's hostname" #~ msgstr "Ðевозможно разобрать доменное Ð¸Ð¼Ñ ÑƒÐ·Ð»Ð° партнёра" #~ msgid "Peer host name to which this client will access: %s" #~ msgstr "Доменное Ð¸Ð¼Ñ ÑƒÐ·Ð»Ð°, к которому будет Ñовершён доÑтуп: %s" #~ msgid "GSS import name failed: %i/%i%s" #~ msgstr "Сбой Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ Ð¸Ð¼ÐµÐ½Ð¸ GSS: %i/%i%s" #~ msgid "GSS init security context failed: %i/%i%s" #~ msgstr "Сбой инициализации контекÑта безопаÑноÑти GSS: %i/%i%s" #~ msgid "GSS init security context: %i/%i" #~ msgstr "Ð˜Ð½Ð¸Ñ†Ð¸Ð°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ ÐºÐ¾Ð½Ñ‚ÐµÐºÑта безопаÑноÑти GSS: %i/%i" #~ msgid "No payload during GSI context initialisation" #~ msgstr "ОтÑутÑтвует Ð¿Ð¾Ð»ÐµÐ·Ð½Ð°Ñ Ð½Ð°Ð³Ñ€ÑƒÐ·ÐºÐ° при инициализации контекÑта GSI" #~ msgid "Transfer protocol is TLS or SSL3" #~ msgstr "Протокол передачи TLS или SSL3" #~ msgid "Transfer protocol is GLOBUS SSL" #~ msgstr "Протокол передачи GLOBUS SSL" #~ msgid "Transfer protocol is SSL2" #~ msgstr "Протокол передачи SSL2" #~ msgid "Transfer protocol is GSI" #~ msgstr "Протокол передачи GSI" #~ msgid "input token length: %i" #~ msgstr "Длина входного токена: %i" #~ msgid "GSS wrap/unwrap failed: %i/%i%s" #~ msgstr "Сбой ÑвёртываниÑ/Ñ€Ð°Ð·Ð²Ñ‘Ñ€Ñ‚Ñ‹Ð²Ð°Ð½Ð¸Ñ GSS: %i/%i%s" #~ msgid "Output token length: %i" #~ msgstr "Длина выходного токена: %i" #~ msgid "password sources" #~ msgstr "иÑточники паролÑ" #~ msgid "" #~ "There are %d NSS base directories where the certificate, key, and module " #~ "datbases live" #~ msgstr "" #~ "Обнаружено %d оÑновных директорий NSS, Ñодержащих базы данных " #~ "Ñертификатов, ключей и модулей" #~ msgid "Writing to xrootd is not (yet) supported" #~ msgstr "ЗапиÑÑŒ по протоколу xrootd (пока) не поддерживаетÑÑ" #~ msgid "Cannot (yet) remove files through xrootd" #~ msgstr "Ðевозможно (пока) удалить файл через xrootd" #~ msgid "Cannot (yet) create directories through xrootd" #~ msgstr "Ðевозможно (пока) Ñоздать каталог через xrootd" #~ msgid "Cannot (yet) rename files through xrootd" #~ msgstr "Ðевозможно (пока) переименовать файл через xrootd" #~ msgid "Rucio returned malormed xml: %s" #~ msgstr "Rucio возвратил некорректный XML: %s" #~ msgid "" #~ "Matchmaking, MaxDiskSpace*1024 >= DiskSpace - CacheDiskSpace problem, " #~ "ExecutionTarget: %d MB (MaxDiskSpace); JobDescription: %d MB (DiskSpace), " #~ "%d MB (CacheDiskSpace)" #~ msgstr "" #~ "Сравнение; MaxDiskSpace*1024 >= DiskSpace - неÑовпадение CacheDiskSpace, " #~ "у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %d MB (MaxDiskSpace); в опиÑании задачи: %d " #~ "MB (DiskSpace), %d MB (CacheDiskSpace)" #~ msgid "" #~ "Matchmaking, WorkingAreaFree*1024 >= DiskSpace - CacheDiskSpace problem, " #~ "ExecutionTarget: %d MB (MaxDiskSpace); JobDescription: %d MB (DiskSpace), " #~ "%d MB (CacheDiskSpace)" #~ msgstr "" #~ "Сравнение; WorkingAreaFree*1024 >= DiskSpace - неÑовпадение " #~ "CacheDiskSpace, у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %d MB (MaxDiskSpace); в " #~ "опиÑании задачи: %d MB (DiskSpace), %d MB (CacheDiskSpace)" #~ msgid " State: %s (%s)" #~ msgstr " СоÑтоÑние: %s (%s)" #~ msgid "Renewal of EMI ES jobs is not supported" #~ msgstr "Возобновление задач EMI ES не поддерживаетÑÑ" #~ msgid "" #~ "Could not convert the bartender attribute value (%s) to an URL instance " #~ "in configuration file (%s)" #~ msgstr "" #~ "Ðе удалоÑÑŒ преобразовать значение атрибута bartender (%s) в файле " #~ "наÑтроек в URL (%s)" #~ msgid "Command PASV/SPAS" #~ msgstr "Команда PASV/SPAS" #~ msgid "Wrong number in maxload: %s" #~ msgstr "ÐедопуÑтимое чиÑло в maxload: %s" #~ msgid "Wrong number in maxloadshare: %s" #~ msgstr "ÐедопуÑтимое чиÑло в maxloadshare: %s" #~ msgid "The type of share is not set in maxloadshare" #~ msgstr "Тип квоты не указан в maxloadshare" #~ msgid "share_limit should be located after maxloadshare" #~ msgstr "share_limit должен раÑполагатьÑÑ Ð¿Ð¾Ñле maxloadshare" #~ msgid "The name of share is not set in share_limit" #~ msgstr "Ðазвание квоты не указано в share_limit" #~ msgid "" #~ "'newdatastaging' configuration option is deprecated, 'enable_dtr' should " #~ "be used instead" #~ msgstr "" #~ "ÐžÐ¿Ñ†Ð¸Ñ Ð½Ð°Ñтроек 'newdatastaging' теперь называетÑÑ 'enable_dtr'; " #~ "пожалуйÑта, иÑпользуйте новое название" #~ msgid "Resume of EMI ES jobs is not supported" #~ msgstr "Продолжение задач EMI ES не поддерживаетÑÑ" #~ msgid "Failed to read input passphrase" #~ msgstr "Ðе удалоÑÑŒ прочеÑть пароль" #~ msgid "Input phrase is too short (at least %d char)" #~ msgstr "Пароль Ñлишком короткий (иÑпользуйте по крайней мере %d Ñимволов)" #~ msgid "Password is too short, need at least %u charcters" #~ msgstr "Пароль Ñлишком короткий, иÑпользуйте Ñ…Ð¾Ñ‚Ñ Ð±Ñ‹ %u Ñимволов." #~ msgid "Password is too long, need at most %u characters" #~ msgstr "Слишком длинный пароль, требуетÑÑ Ð½Ðµ более %u Ñимволов" #~ msgid "" #~ "ERROR: A computing resource using the GridFTP interface was requested, but" #~ msgstr "" #~ "ОШИБКÐ: Был запрошен вычиÑлительный реÑÑƒÑ€Ñ Ñ Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñом GridFTP, но" #~ msgid "" #~ " the corresponding plugin could not be loaded. Is the plugin " #~ "installed?" #~ msgstr "" #~ " ÑоответÑтвующий модуль не может быть подгружен. Ð’Ñ‹ уÑтановили Ñтот " #~ "модуль?" #~ msgid "" #~ " If not, please install the package 'nordugrid-arc-plugins-globus'." #~ msgstr "" #~ " ЕÑли нет, пожалуйÑта, уÑтановите пакет 'nordugrid-arc-plugins-" #~ "globus'." #~ msgid "" #~ " Depending on your type of installation the package name might " #~ "differ. " #~ msgstr "" #~ " Ð˜Ð¼Ñ Ð¿Ð°ÐºÐµÑ‚Ð° может отличатьÑÑ, в завиÑимоÑти от типа вашей ÑиÑтемы. " #~ msgid "" #~ "Error: Unable to parse limit in VectorLimitExceededFault response from " #~ "service to an 'int': %s" #~ msgstr "" #~ "Ошибка: Ðевозможно разобрать предел в отзыве ÑервиÑа " #~ "VectorLimitExceededFault как 'int': %s" #~ msgid "" #~ "%s is not a directory, it is needed for the client to function correctly" #~ msgstr "" #~ "%s не ÑвлÑетÑÑ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð¾Ð¼. Он необходим Ð´Ð»Ñ Ð½Ð¾Ñ€Ð¼Ð°Ð»ÑŒÐ½Ð¾Ð¹ работы клиента" #~ msgid "ERROR: Failed to lock job list file %s" #~ msgstr "Ошибка: Ðе удалоÑÑŒ заблокировать файл ÑпиÑка задач %s" #~ msgid "Please try again later, or manually clean up lock file" #~ msgstr "" #~ "ПожалуйÑта, попытайтеÑÑŒ заново попозже, или удалите файл блокировки " #~ "вручную" #~ msgid "Could not write meta file %s" #~ msgstr "Ðе удалоÑÑŒ запиÑать мета-файл %s" #~ msgid "DTR %s: Transfer failed: %s" #~ msgstr "DTR %s: Сбой передачи: %s" #~ msgid "DTR %s: No locations defined for %s" #~ msgstr "DTR %s: Ðе определены раÑÐ¿Ð¾Ð»Ð¾Ð¶ÐµÐ½Ð¸Ñ %s" #~ msgid "" #~ "DTR %s: Request:\n" #~ "%s" #~ msgstr "" #~ "DTR %s: ЗапроÑ:\n" #~ "%s" #~ msgid "" #~ "DTR %s: Response:\n" #~ "%s" #~ msgstr "" #~ "DTR %s: Отклик:\n" #~ "%s" #~ msgid "DTR %s: %s" #~ msgstr "DTR %s: %s" #~ msgid "DTR %s: Failed locating credentials" #~ msgstr "DTR %s: Сбой Ð¾Ð±Ð½Ð°Ñ€ÑƒÐ¶ÐµÐ½Ð¸Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð¾Ð² доÑтупа" #~ msgid "DTR %s: Failed to initiate client connection" #~ msgstr "DTR %s: Сбой запуÑка ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ ÐºÐ»Ð¸ÐµÐ½Ñ‚Ð¾Ð¼" #~ msgid "DTR %s: Client connection has no entry point" #~ msgstr "DTR %s: ОтÑутÑтвует точка входа Ð´Ð»Ñ ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ ÐºÐ»Ð¸ÐµÐ½Ñ‚Ð¾Ð¼" #~ msgid "DTR %s: Initiating delegation procedure" #~ msgstr "DTR %s: Ð˜Ð½Ð¸Ñ†Ð¸Ð°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð¿Ñ€Ð¾Ñ†ÐµÐ´ÑƒÑ€Ñ‹ делегированиÑ" #~ msgid "DTR %s: Failed to initiate delegation credentials" #~ msgstr "DTR %s: Сбой инициализации делегируемых прав доÑтупа" #~ msgid "DTR %s: Running command: %s" #~ msgstr "DTR %s: ВыполнÑетÑÑ ÐºÐ¾Ð¼Ð°Ð½Ð´Ð° %s" #~ msgid "DTR %s: Error creating cache" #~ msgstr "DTR %s: Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÐºÑша" #~ msgid "DTR %s: Forcing re-download of file %s" #~ msgstr "DTR %s: ÐŸÑ€Ð¸Ð½ÑƒÐ´Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ð¿ÐµÑ€ÐµÐ·Ð°Ð³Ñ€ÑƒÐ·ÐºÐ° файла %s" #~ msgid "DTR %s: Cached file is locked - should retry" #~ msgstr "DTR %s: КÑшированный файл заблокирован - должна быть Ð½Ð¾Ð²Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ°" #~ msgid "DTR %s: Failed to initiate cache" #~ msgstr "DTR %s: Сбой инициализации кÑша" #~ msgid "DTR %s: File %s is cached (%s) - checking permissions" #~ msgstr "DTR %s: Файл %s занеÑён в кÑш (%s) - проверка прав доÑтупа" #~ msgid "DTR %s: Permission checking failed" #~ msgstr "DTR %s: Проверка доÑтупа не пройдена" #~ msgid "DTR %s: Permission checking passed" #~ msgstr "DTR %s: Проверка доÑтупа пройдена уÑпешно" #~ msgid "DTR %s: Source modification date: %s" #~ msgstr "DTR %s: Ð’Ñ€ÐµÐ¼Ñ Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ Ð¸Ñточника: %s" #~ msgid "DTR %s: Cache creation date: %s" #~ msgstr "DTR %s: Ð’Ñ€ÐµÐ¼Ñ ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÐºÑша: %s" #~ msgid "DTR %s: Cache file valid until: %s" #~ msgstr "DTR %s: Файл в кÑше дейÑтвителен до: %s" #~ msgid "DTR %s: Cached file is outdated, will re-download" #~ msgstr "DTR %s: КÑшированный файл уÑтарел, будет перезагружен" #~ msgid "DTR %s: Cached copy is still valid" #~ msgstr "DTR %s: КÑÑˆÐ¸Ñ€Ð¾Ð²Ð°Ð½Ð½Ð°Ñ ÐºÐ¾Ð¿Ð¸Ñ Ð²ÑÑ‘ ещё дейÑтвительна" #~ msgid "DTR %s: No locations for destination different from source found" #~ msgstr "DTR %s: Ðе обнаружено адреÑов цели, отличных от иÑточника" #~ msgid "DTR %s: Checking %s" #~ msgstr "DTR %s: ПроверÑетÑÑ %s" #~ msgid "DTR %s: Removing %s" #~ msgstr "DTR %s: УдалÑетÑÑ %s" #~ msgid "DTR %s: Linking/copying cached file to %s" #~ msgstr "DTR %s: Создание ÑÑылки/копирование файла из кÑша в %s" #~ msgid "Bad number in logsize: %s" #~ msgstr "ÐедопуÑтимое значение logsize: %s" #~ msgid "Starting grid-manager thread" #~ msgstr "ЗапуÑкаетÑÑ Ð¿Ð¾Ñ‚Ð¾Ðº Грид-менеджера" #~ msgid "Destroying jobs and waiting for underlying processes to finish" #~ msgstr "Уничтожение задач и ожидание Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ð½Ð¸Ñ ÑоответÑтвующих им процеÑÑов" #~ msgid "Cannot open database" #~ msgstr "Ðе удалоÑÑŒ открыть базу данных" #~ msgid "Cannot abort transaction %s" #~ msgstr "Ðевозможно прервать транзакцию %s" #~ msgid "put: deadlock handling: try again" #~ msgstr "put: обработка взаимоблокировки, Ð½Ð¾Ð²Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ°" #~ msgid "put: cannot abort transaction: %s" #~ msgstr "put: невозможно оборвать транзакцию: %s" #~ msgid "put: %s" #~ msgstr "put: %s" #~ msgid "get: deadlock handling, try again" #~ msgstr "get: обработка взаимоблокировки, Ð½Ð¾Ð²Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ°" #~ msgid "get: cannot abort transaction: %s" #~ msgstr "get: невозможно оборвать транзакцию: %s" #~ msgid "get: %s" #~ msgstr "get: %s" #~ msgid "del: deadlock handling, try again" #~ msgstr "del: обработка взаимоблокировки, Ð½Ð¾Ð²Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ°" #~ msgid "del: cannot abort transaction: %s" #~ msgstr "del: невозможно оборвать транзакцию: %s" #~ msgid "del: %s" #~ msgstr "del: %s" #~ msgid "get_doc_name: deadlock handling, try again" #~ msgstr "get_doc_name: обработка взаимоблокировки, Ð½Ð¾Ð²Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ°" #~ msgid "get_doc_names: cannot abort transaction: %s" #~ msgstr "get_doc_names: невозможно прервать транзакцию: %s" #~ msgid "Error during the transaction: %s" #~ msgstr "Ошибка при транзакции: %s" #~ msgid "checkpoint: %s" #~ msgstr "ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñ‚Ð¾Ñ‡ÐºÐ°: %s" #~ msgid "Failed to create dir %s for temp proxies: %s" #~ msgstr "Ðе удалоÑÑŒ Ñоздать каталог %s Ð´Ð»Ñ Ð²Ñ€ÐµÐ¼ÐµÐ½Ð½Ñ‹Ñ… доверенноÑтей: %s" #~ msgid "Could not write temporary file: %s" #~ msgstr "Ðе удалоÑÑŒ запиÑать временный файл: %s" #~ msgid "Error creating file %s with mkstemp(): %s" #~ msgstr "Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° %s Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ mkstemp(): %s" #~ msgid "Error writing to tmp lock file %s: %s" #~ msgstr "Ошибка запиÑи во временный файл блокировки %s: %s" #~ msgid "Warning: closing tmp lock file %s failed" #~ msgstr "Предупреждение: Ñбой Ð·Ð°ÐºÑ€Ñ‹Ñ‚Ð¸Ñ Ð²Ñ€ÐµÐ¼ÐµÐ½Ð½Ð¾Ð³Ð¾ файла блокировки %s" #~ msgid "Source probably does not exist" #~ msgstr "ИÑточник Ñкорее вÑего не ÑущеÑтвует" #~ msgid "Problems resolving destination" #~ msgstr "Проблемы Ñ Ñ€Ð°Ð·Ð±Ð¾Ñ€Ð¾Ð¼ направлениÑ" #~ msgid "%s: Reprocessing RSL failed" #~ msgstr "%s: Сбой переобработки RSL" #~ msgid "" #~ "Dumping job description aborted because no resource returned any " #~ "information" #~ msgstr "" #~ "Обрыв раÑпечатки опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸, Ñ‚.к. ни один из реÑурÑов не предоÑтавил " #~ "информацию" #~ msgid "Creating a PDP client" #~ msgstr "СоздаётÑÑ ÐºÐ»Ð¸ÐµÐ½Ñ‚ PDP" #~ msgid "job.Resources.QueueName = %s" #~ msgstr "job.Resources.QueueName = %s" #, fuzzy #~ msgid "PrepareToGet request timed out after %i seconds" #~ msgstr "Ðе удаётÑÑ ÑоединитьÑÑ Ñ %s:%s (%s), connection timed out" #, fuzzy #~ msgid "Bring online request timed out after %i seconds" #~ msgstr "Ðе удаётÑÑ ÑоединитьÑÑ Ñ %s:%s (%s), connection timed out" #, fuzzy #~ msgid "PrepareToPut request timed out after %i seconds" #~ msgstr "Ðе удаётÑÑ ÑоединитьÑÑ Ñ %s:%s (%s), connection timed out" #, fuzzy #~ msgid "Ls request timed out after %i seconds" #~ msgstr "Ðе удаётÑÑ ÑоединитьÑÑ Ñ %s:%s (%s), connection timed out" #, fuzzy #~ msgid "copy request timed out after %i seconds" #~ msgstr "Ðе удаётÑÑ ÑоединитьÑÑ Ñ %s:%s (%s), connection timed out" #~ msgid "Could not load GFAL DMC. Please check that this plugin is installed" #~ msgstr "" #~ "Ðе удалоÑÑŒ подгрузить GFAL DMC. ПожалуйÑта, убедитеÑÑŒ, что Ñтот " #~ "подключаемый модуль уÑтановлен." #~ msgid "Unable to remove file %s: No valid proxy found" #~ msgstr "Ðевозможно удалить файл %s: Ðе обнаружено приемлемой доверенноÑти" #~ msgid "Unable to transfer file %s: No valid credentials found" #~ msgstr "" #~ "Ðевозможно переÑлать файл %s: Ðе обнаружено дейÑтвительных параметров " #~ "доÑтупа" #~ msgid "Unable to register file %s: No valid credentials found" #~ msgstr "" #~ "Ðевозможно зарегиÑтрировать файл %s: Ðе обнаружено дейÑтвительных " #~ "параметров доÑтупа" #~ msgid "Unable to copy from %s: No valid credentials found" #~ msgstr "" #~ "Ðевозможно Ñкопировать из %s: Ðе обнаружено дейÑтвительных параметров " #~ "доÑтупа" #, fuzzy #~ msgid "arrayOfFileStatuses" #~ msgstr "arrayOfFileStatuses" #, fuzzy #~ msgid "Failed to create reading thread" #~ msgstr "Ðе удалоÑÑŒ Ñоздать поток Ð´Ð»Ñ Ð¿Ñ€Ð¸Ð²Ñзки к LDAP (%s)" #, fuzzy #~ msgid "Failed to create writing thread" #~ msgstr "Ðе удалоÑÑŒ Ñоздать поток Ð´Ð»Ñ Ð¿Ñ€Ð¸Ð²Ñзки к LDAP (%s)" #, fuzzy #~ msgid "DTR %s: Failed to resolve any source replicas" #~ msgstr "Ðе удалоÑÑŒ загрузить иÑточник \"%s\": %s" #, fuzzy #~ msgid "DTR %s: Failed to resolve destination replicas" #~ msgstr "Ðе удалоÑÑŒ зарегиÑтрировать назначение: %s" #, fuzzy #~ msgid "DTR %s: Failed to pre-register destination" #~ msgstr "Сбой при региÑтрации нового файла/направлениÑ" #, fuzzy #~ msgid "DTR %s: Failed checking source replica" #~ msgstr "Ошибка проверки иÑходного раздела %1." #, fuzzy #~ msgid "DTR %s: Error resolving destination replicas" #~ msgstr "DTR %s: Обнаружение ÑущеÑтвующих копий назначениÑ" #, fuzzy #~ msgid "DTR %s: Failed to prepare source" #~ msgstr "Сбой при доÑтупе к иÑточнику(-ам) конфигурации: %s\n" #, fuzzy #~ msgid "DTR %s: Failed to prepare destination" #~ msgstr "Ошибка при запиÑи в цель" #, fuzzy #~ msgid "The retrieved dn is %s" #~ msgstr "принимаетÑÑ Ð·Ð° 1 Гц " #~ msgid "xacml authz request: %s" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° допуÑк XACML: %s" #~ msgid "xacml authz response: %s" #~ msgstr "Отклик допуÑка XACML: %s" #, fuzzy #~ msgid "Failed initing handle" #~ msgstr "Ðевозможно инициализировать мутекÑ" #~ msgid "Bad authentication information" #~ msgstr "ÐÐµÐ¿Ñ€Ð¸ÐµÐ¼Ð»ÐµÐ¼Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð´Ð»Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ¸ подлинноÑти" #~ msgid "nss db to be accesses: %s\n" #~ msgstr "будет иÑпользована база данных NSS %s\n" #~ msgid "Removing temp proxy %s" #~ msgstr "Удаление временной доверенноÑти %s" #~ msgid "Failed to create temporary file in %s - %s" #~ msgstr "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð²Ñ€ÐµÐ¼ÐµÐ½Ð½Ð¾Ð³Ð¾ файла в %s - %s" #~ msgid "Failed to create control (%s) or session (%s) directories" #~ msgstr "Ðе удалоÑÑŒ Ñоздать контрольный каталог (%s) или каталог ÑеÑÑии (%s)" #~ msgid "Failed to store configuration into temporary file: %s" #~ msgstr "Сбой запиÑи наÑтроек во временный файл: %s" #, fuzzy #~ msgid "Failed to create/detect control (%s) or session (%s) directories" #~ msgstr "Ðе удалоÑÑŒ Ñоздать контрольный каталог (%s) или каталог ÑеÑÑии (%s)" #~ msgid "pretend utility is run by user with given name" #~ msgstr "Ñделать вид, что утилита запущена пользователем Ñ Ð´Ñ€ÑƒÐ³Ð¸Ð¼ именем" #~ msgid "pretend utility is run by user with given UID" #~ msgstr "Ñделать вид, что утилита запущена пользователем Ñ Ð´Ñ€ÑƒÐ³Ð¸Ð¼ UID" #~ msgid "Error processing configuration - EXITING" #~ msgstr "Сбой при обработке наÑтроек - ВЫХОД" #~ msgid "No suitable users found in configuration - EXITING" #~ msgstr "Ð’ наÑтройках не указано, от чьего имени производить запуÑк - ВЫХОД" #~ msgid "Can't recognize own username - EXITING" #~ msgstr "Попытка запуÑка от имени неизвеÑтного Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ - ВЫХОД" #, fuzzy #~ msgid "Processing grid-manager configuration" #~ msgstr "Ðе удалоÑÑŒ обработать наÑтройки grid-manager" #~ msgid "Usage: inputcheck [-h] [-d debug_level] RSL_file [proxy_file]" #~ msgstr "" #~ "ИÑпользование: inputcheck [-h] [-d уровень_отладки] RSL_file " #~ "[файл_доверенноÑти]" #~ msgid "Environment could not be set up" #~ msgstr "Ðе удалоÑÑŒ наÑтроить Ñреду" #~ msgid "User %s is not valid" #~ msgstr "ÐедейÑтвительный пользователь %s" #~ msgid "No configuration file found" #~ msgstr "Ðе найден файл наÑтроек" #~ msgid "" #~ "Gridmap user list feature is not supported anymore. Plase use @filename " #~ "to specify user list." #~ msgstr "" #~ "ПерехиÑление пользователей в gridmap больше не поддерживаетÑÑ. " #~ "ПожалуйÑта, укажите @файл Ñо ÑпиÑком пользователей." #~ msgid "Can't read users in specified file %s" #~ msgstr "Ðевозможно прочеÑть пользователей в указанном файле %s" #~ msgid "Wrong number in speedcontrol: " #~ msgstr "ÐедопуÑтимое чиÑло в speedcontrol: " #~ msgid "Wrong option in securetransfer" #~ msgstr "ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ Ð² securetransfer" #~ msgid "Wrong option in passivetransfer" #~ msgstr "ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ Ð² passivetransfer" #~ msgid "Wrong option in norootpower" #~ msgstr "ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ Ð² norootpower" #~ msgid "Wrong option in localtransfer" #~ msgstr "ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ Ð² localtransfer" #~ msgid "Junk in defaultttl command" #~ msgstr "БеÑÑмыÑлица в команде defaultttl" #~ msgid "Junk in maxrerun command" #~ msgstr "БеÑÑмыÑлица в команде maxrerun" #~ msgid "diskspace is empty" #~ msgstr "пуÑтое значение diskspace" #~ msgid "junk in diskspace command" #~ msgstr "беÑÑмыÑлица в команде diskspace" #~ msgid "Wrong number in diskspace command" #~ msgstr "ÐедопуÑтимое чиÑло в команде diskspace" #~ msgid "Junk in defaultlrms command" #~ msgstr "БеÑÑмыÑлица в команде defaultlrms" #~ msgid "Timeout for plugin is missing" #~ msgstr "ОтÑутÑтвует тайм-аут Ð´Ð»Ñ Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡Ð°ÐµÐ¼Ð¾Ð³Ð¾ модулÑ" #~ msgid "preferredpattern value is missing" #~ msgstr "ОтÑутÑтвует значение preferredpattern" #~ msgid "Wrong option in newdatastaging" #~ msgstr "ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ Ð² newdatastaging" #~ msgid "Bad URL in delivery_service: %s" #~ msgstr "ÐедопуÑтимый URL в delivery_service: %s" #~ msgid "Could not add file:/local to delivery services" #~ msgstr "Ðевозможно добавить file:/local к Ñлужбам доÑтавки" #~ msgid "Can't read user list in specified file %s" #~ msgstr "Ðевозможно прочеÑть ÑпиÑок пользователей в указанном файле %s" #~ msgid "Warning: creation of user \"%s\" failed" #~ msgstr "Предупреждение: не удалоÑÑŒ Ñоздать Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ \"%s\"" #~ msgid "" #~ "Gridmap user list feature is not supported anymore. Please use @filename " #~ "to specify user list." #~ msgstr "" #~ "СпиÑок пользователей в Gridmap больше не поддерживаетÑÑ. ПожалуйÑта, " #~ "иÑпользуйте @filename Ð´Ð»Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð¸Ñ ÑпиÑка пользователей." #~ msgid "No username entries in control directory" #~ msgstr "ОтÑутÑтвуют Ñлементы имени Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð² контрольном каталоге" #~ msgid "User %s for helperUtility is not configured" #~ msgstr "Ðе Ñконфигурирован пользователь %s Ð´Ð»Ñ Ð¼Ð¾Ð´ÑƒÐ»Ñ helperUtility" #~ msgid "Added user : %s" #~ msgstr "Добавлен пользователь : %s" #~ msgid "%s: No configured user found for uid %i" #~ msgstr "%s: Ðе обнаружено Ñконфигурированных пользователей Ð´Ð»Ñ uid %i" #~ msgid "%s: Added" #~ msgstr "%s: Добавлено" #~ msgid "Error with cache configuration: %s. Cannot clean up files for job %s" #~ msgstr "" #~ "Ошибка в наÑтройке кÑша: %s. Ðевозможно очиÑтить файлы Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s" #~ msgid "Wrong user name" #~ msgstr "Ðеверное Ð¸Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ" #~ msgid "No configuration found for user %s in A-REX configuration" #~ msgstr "Ðе найдено наÑтроек Ð´Ð»Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %s в наÑтройках A-REX" #~ msgid "Peer certificate cannot be extracted" #~ msgstr "Ðевозможно извлечь Ñертификат контакта" #~ msgid "Peer cert verification fail" #~ msgstr "Ðе удалоÑÑŒ подтвердить дейÑтвительноÑть Ñертификата узла партнёра" #~ msgid "" #~ "Certificate cannot be extracted, make sure it is the case where client " #~ "side authentication is turned off" #~ msgstr "" #~ "Ðевозможно извлечь Ñертификат - убедитеÑÑŒ, что проверка подлинноÑти на " #~ "Ñтороне клиента отключена " #~ msgid "Peer certificate chain cannot be extracted" #~ msgstr "Ðевозможно извлечь цепочку Ñертификатов узла партнёра" #~ msgid "Can not read file %s with list of trusted VOMS DNs" #~ msgstr "" #~ "Ðе удалоÑÑŒ прочеÑть файл %s Ñо ÑпиÑком уникальных имён доверÑемых " #~ "Ñерверов VOMS" #~ msgid "Can not assign CA location - %s" #~ msgstr "Ðе удалоÑÑŒ припиÑать меÑтонахождение агентÑтва - %s" #~ msgid "Can not load certificate file - %s" #~ msgstr "Ðевозможно подгрузить файл Ñертификата - %s" #~ msgid "Can not load key file - %s" #~ msgstr "Ðе удалоÑÑŒ подгрузить файл Ñекретного ключа - %s" #~ msgid "Private key %s does not match certificate %s" #~ msgstr "Секретный ключ %s не Ñовпадает Ñ Ñертификатом %s" #~ msgid "Certificate %s failed Globus signing policy" #~ msgstr "Сертификат %s не ÑоответÑтвует политике подпиÑи Globus" #~ msgid "Resumation of CREAM jobs is not supported" #~ msgstr "ПерезапуÑк задач CREAM не поддерживаетÑÑ" #~ msgid "EMIESClient was not created properly." #~ msgstr "EMIESClient не был Ñоздан надлежащим образом." #~ msgid "Missing ActivityManager in response from %s" #~ msgstr "ОтÑутÑтвует Ñлемент ActivityManager в отзыве Ñ %s" #~ msgid "Current transfer FAILED: %s - %s" #~ msgstr "Ð¢ÐµÐºÑƒÑ‰Ð°Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð° ÐЕ СОСТОЯЛÐСЬ: %s - %s" #~ msgid "" #~ "The inputsandbox JDL attribute is referencing a non-regular file (%s)." #~ msgstr "атрибут JDL inputsandbox JDL ÑÑылаетÑÑ Ð½Ð° необычный файл (%s)." #~ msgid "NSS set domestic policy failed (%s) on certificate database %s" #~ msgstr "" #~ "Сбой уÑтановки локальной политики NSS (%s) Ð´Ð»Ñ Ð±Ð°Ð·Ñ‹ данных Ñертификатов %s" #~ msgid "Failed while transferring data (mostly timeout)" #~ msgstr "Сбой при передаче данных (обычно иÑтечение Ñрока ожиданиÑ)" #~ msgid "Cannot create directory %s/%s for cache: %s" #~ msgstr "Ðе удалоÑÑŒ Ñоздать каталог %s/%s Ð´Ð»Ñ ÐºÑша: %s" #~ msgid "Failed uploading file: %s - %s" #~ msgstr "Ðе удалоÑÑŒ отгрузить файл %s - %s" #~ msgid "Failed uploading file: %s" #~ msgstr "Ðе удалоÑÑŒ отгрузить файл: %s" #, fuzzy #~ msgid " Cluster: %s" #~ msgstr "КлаÑтер" #, fuzzy #~ msgid " Management Interface: %s" #~ msgstr "Ð˜Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ ÑƒÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð³Ð¾Ñ€Ñчими клавишами" #~ msgid "File download failed: %s - %s" #~ msgstr "Ошибка загрузи файла: %s - %s" #, fuzzy #~ msgid "" #~ "Ignoring job (%s), the Job::InterfaceName attribute must be specified" #~ msgstr "Задача (%s) игнорируетÑÑ, необходимо указывать атрибут Job::Flavour" #~ msgid "Broker %s could not be created" #~ msgstr "Брокер %s не может быть Ñоздан" #~ msgid "Loaded Broker %s" #~ msgstr "Подгружен брокер %s" #~ msgid "" #~ "Will not query endpoint (%s) because another thread is already querying it" #~ msgstr "" #~ "Точка доÑтупа (%s) не будет опрошена, так как её уже опрашивает другой " #~ "поток" #, fuzzy #~ msgid " Local information system URL: %s" #~ msgstr "Канал информации о ÑиÑтеме" #, fuzzy #~ msgid " Submission interface name: %s" #~ msgstr "Ðе найден Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ Ñ Ð¸Ð¼ÐµÐ½ÐµÐ¼ %s" #~ msgid "Location information:" #~ msgstr "Ð¡Ð²ÐµÐ´ÐµÐ½Ð¸Ñ Ð¾ раÑположении:" #~ msgid "Domain information:" #~ msgstr "Ð¡Ð²ÐµÐ´ÐµÐ½Ð¸Ñ Ð¾ домене:" #~ msgid " Service name: %s" #~ msgstr " Ð˜Ð¼Ñ Ñлужбы: %s" #~ msgid "Manager information:" #~ msgstr "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ ÑиÑтеме ÑƒÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ñ€ÐµÑурÑом:" #~ msgid " Resource manager version: %s" #~ msgstr " ВерÑÐ¸Ñ ÑиÑтемы управлениÑ: %s" #~ msgid "Execution environment information:" #~ msgstr "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ рабочих Ñредах:" #~ msgid "Check: obtained creation date: %s" #~ msgstr "Проверка: получена дата ÑозданиÑ: %s" #~ msgid "meta_get_data: checksum: %s" #~ msgstr "meta_get_data: ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма: %s" #~ msgid "meta_get_data: size: %llu" #~ msgstr "meta_get_data: размер: %llu" #~ msgid "meta_get_data: created: %s" #~ msgstr "meta_get_data: Ñоздан: %s" #~ msgid "Failed to remove location from LFC" #~ msgstr "Ошибка при удалении меÑÑ‚Ð¾Ð½Ð°Ñ…Ð¾Ð¶Ð´ÐµÐ½Ð¸Ñ Ð¸Ð· LFC" #~ msgid "Contacting %s" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ %s" #, fuzzy #~ msgid "Warning: can't connect to RLS server %s: %s" #~ msgstr "" #~ "Ðевозможно подключитÑÑ Ðº Ñерверу. «%s» не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым адреÑом." #~ msgid "" #~ "Missing reference to factory and/or module. It is unsafe to use Globus in " #~ "non-persistent mode - RLS code is disabled. Report to developers." #~ msgstr "" #~ "ОтÑутÑтвует указание на фабрику и/или модуль. ИÑпользование Globus в " #~ "неопределённом режиме небезопаÑно - Обращение к RLS заблокировано. " #~ "СвÑжитеÑÑŒ Ñ Ñ€Ð°Ð·Ñ€Ð°Ð±Ð¾Ñ‚Ñ‡Ð¸ÐºÐ°Ð¼Ð¸." #, fuzzy #~ msgid "Warning: Failed to obtain attributes from %s: %s" #~ msgstr "VOMS: Ñбой при разборе атрибутов в Ñертификате атрибута (AC)" #~ msgid "Attribute: %s - %s" #~ msgstr "Ðтрибут: %s - %s" #~ msgid "RLS URL must contain host" #~ msgstr "RLS URL должен Ñодержать Ð¸Ð¼Ñ ÑƒÐ·Ð»Ð°" #, fuzzy #~ msgid "Source must contain LFN" #~ msgstr "RLS URL должен Ñодержать Ð¸Ð¼Ñ ÑƒÐ·Ð»Ð°" #, fuzzy #~ msgid "Destination must contain LFN" #~ msgstr "" #~ " Ðазначение \"%s\" должно быть каталогом \n" #~ " %s " #, fuzzy #~ msgid "No locations found for destination" #~ msgstr "Ðе найдено физичеÑких адреÑов Ð´Ð»Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ: %s" #~ msgid "LFN already exists in replica" #~ msgstr "LFN уже зарегиÑтрирован Ð´Ð»Ñ Ñ€ÐµÐ¿Ð»Ð¸ÐºÐ¸" #~ msgid "Failed to create GUID in RLS: %s" #~ msgstr "Ðе удалоÑÑŒ Ñоздать GUID в RLS: %s" #, fuzzy #~ msgid "There is same LFN in %s" #~ msgstr "Ðет подпроекта %1 в SUBDIRS" #~ msgid "Failed to add LFN-GUID to RLS: %s" #~ msgstr "Ðе удалоÑÑŒ добавить LFN-GUID в RLS: %s" #, fuzzy #~ msgid "Failed to retrieve LFN/LRC: %s" #~ msgstr "Ðе могу получить ÑпиÑок каталогов!" #~ msgid "No LFNs found in %s" #~ msgstr "Ð’ %s не обнаружено логичеÑких имён файлов" #~ msgid "lfn: %s(%s) - %s" #~ msgstr "LFN: %s(%s) - %s" #~ msgid "lfn: %s - pfn: %s" #~ msgstr "LFN: %s - PFN: %s" #, fuzzy #~ msgid "Rename: failed to rename file" #~ msgstr "" #~ "Ðе удаетÑÑ Ð¿ÐµÑ€ÐµÐ¸Ð¼ÐµÐ½Ð¾Ð²Ð°Ñ‚ÑŒ файл '%s' в '%s': Ñбой функции g_rename(): %s" #, fuzzy #~ msgid "DTR %s: No SOAP response" #~ msgstr "Ðет ответа SOAP" #, fuzzy #~ msgid "DTR %s: Starting bulk request" #~ msgstr "Принимать _непрÑмые запроÑÑ‹" #~ msgid "Cancelling all DTRs" #~ msgstr "Прерывание вÑех запроÑов DTR" #, fuzzy #~ msgid "Received back DTR %s" #~ msgstr "Получен Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR %s, ÑоÑтоÑние %s" #~ msgid "Job submission failed, no more possible targets" #~ msgstr "Ðе удалоÑÑŒ заÑлать задачу, возможные Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð¾Ñ‚ÑутÑтвуют" #~ msgid "Unable to print job description: No matching target found." #~ msgstr "" #~ "Ðевозможно вывеÑти опиÑание задачи: Ðе найдено ни одного подходÑщего " #~ "назначениÑ." #~ msgid "Fileset copy for this kind of source is not supported" #~ msgstr "" #~ "Копирование набора файлов из иÑточника данного типа не поддерживаетÑÑ" #~ msgid "Failed listing metafiles" #~ msgstr "ПеречиÑление метафайлов не удалоÑÑŒ" #~ msgid "Failed listing files" #~ msgstr "ПеречиÑление файлов не удалоÑÑŒ" #~ msgid "%s%s" #~ msgstr "%s%s" #~ msgid "Delete failed: %s" #~ msgstr "Сбой при удалении: %s" #, fuzzy #~ msgid "Rename failed: %s" #~ msgstr "переименовать не удалоÑÑŒ, %s (%s -> %s)." #, fuzzy #~ msgid "Rename failed: %s (%s)" #~ msgstr "переименовать не удалоÑÑŒ, %s (%s -> %s)." #~ msgid "service" #~ msgstr "Ñлужба" #~ msgid "The arcsrmping command is a ping client for the SRM service." #~ msgstr "Команда arcsrmping ÑвлÑетÑÑ Ð°Ð½Ð°Ð»Ð¾Ð³Ð¾Ð¼ утилиты ping Ð´Ð»Ñ Ñлужб SRM." #~ msgid "The service argument is a URL to an SRM service." #~ msgstr "Ðргументом Ñлужбы должен быть URL Ñервера SRM" #, fuzzy #~ msgid "ExecutionTarget %s added to ExecutionTargetSet" #~ msgstr "Файл добавлен в проект" #~ msgid "AuthRequest(inmsg) = " #~ msgstr "AuthRequest(inmsg) = " #~ msgid "Starting:" #~ msgstr "ЗапуÑк:" #~ msgid "Stopping:" #~ msgstr "ОÑтановка:" #~ msgid "%(sn)s.%(rn)s called" #~ msgstr "вызов %(sn)s.%(rn)s" #, fuzzy #~ msgid "No URLs to connect to (in %s)" #~ msgstr "Ð’ выделении нет клонов." #~ msgid "ERROR connecting to" #~ msgstr "Ошибка ÑвÑзи Ñ" #~ msgid "ERROR connecting to all of these:" #~ msgstr "Ошибка ÑвÑзи Ñ ÐºÐ°Ð¶Ð´Ñ‹Ð¼ из:" #~ msgid "ID" #~ msgstr "Идентификатор объекта" #~ msgid "ZODBStore constructor called" #~ msgstr "Вызван конÑтруктор ZODBStore" #~ msgid "datadir:" #~ msgstr "datadir:" #~ msgid "TransDBStore constructor called" #~ msgstr "Вызван конÑтруктор TransDBStore" #~ msgid "db environment opened" #~ msgstr "Окружение базы данных открыто" #~ msgid "couldn't find DeadlockRetries, using 5 as default" #~ msgstr "не удалоÑÑŒ найти DeadlockRetries, по умолчанию иÑпользуетÑÑ 5" #~ msgid "couldn't find SleepTime, using %d as default" #~ msgstr "не удалоÑÑŒ найти SleepTime, по умолчанию иÑпользуетÑÑ %d" #~ msgid "got deadlock - retrying" #~ msgstr "взаимоблокировка - Ð½Ð¾Ð²Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ°" #~ msgid "Got deadlock error" #~ msgstr "Ошибка взаимоблокировки" #~ msgid "Got rep_dead_handle error" #~ msgstr "Получена ошибка rep_dead_handle" #~ msgid "got DBLockDeadlockError" #~ msgstr "получена ошибка DBLockDeadlockError" #~ msgid "retrying transaction" #~ msgstr "Ð½Ð¾Ð²Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ° транзакции" #~ msgid "Deadlock exception, giving up..." #~ msgstr "Ошибка взаимоблокировки - ÑдаюÑÑŒ..." #, fuzzy #~ msgid "Read-only db. I'm not a master." #~ msgstr "umount: невозможно перемонтировать %s только Ð´Ð»Ñ Ñ‡Ñ‚ÐµÐ½Ð¸Ñ\n" #~ msgid "cannot delete non-existing entries" #~ msgstr "невозможно удалить неÑущеÑтвующие запиÑи" #~ msgid "Error setting %s" #~ msgstr "Ошибка приÑÐ²Ð¾ÐµÐ½Ð¸Ñ %s" #~ msgid "db environment closed" #~ msgstr "Окружение базы данных закрыто" #~ msgid "error closing environment" #~ msgstr "ошибка при закрытии Ñреды" #~ msgid "PickleStore constructor called" #~ msgstr "Вызван конÑтруктор PickleStore" #~ msgid "filename:" #~ msgstr "файл:" #~ msgid "StringStore constructor called" #~ msgstr "Вызван конÑтруктор StringStore" #~ msgid "CachedStringStore constructor called" #~ msgstr "Вызван конÑтруктор CachedStringStore" #, fuzzy #~ msgid "Failed to create parent directory, continuing anyway: %s" #~ msgstr "Ðе удалоÑÑŒ Ñоздать архивный каталог %s: %s" #, fuzzy #~ msgid "Failed to output the cert req as ascii format" #~ msgstr "Ðе удалоÑÑŒ запуÑтить Ñкрипт очиÑтки кÑша" #~ msgid "Not invoking janitor because it's not enabled in the config file" #~ msgstr "Janitor не будет запущен, Ñ‚.к. он не активирован в наÑтройках" #~ msgid "Janitor not enabled and job contains non-deployed RTEs" #~ msgstr "Janitor не запущен, а задача требует отÑутÑтвующую Ñреду иÑполнениÑ" #~ msgid "Janitor not installed and job contains non-deployed RTEs" #~ msgstr "" #~ "Janitor не уÑтановлен, а задача требует отÑутÑтвующую Ñреду иÑполнениÑ" #~ msgid "Janitor timeout while deploying Dynamic RTE(s)" #~ msgstr "" #~ "Ð’Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Janitor вышло при уÑтановке динамичеÑкой Ñреды иÑполнениÑ" #~ msgid "Janitor not enabled and there are missing RTE(s)" #~ msgstr "Janitor не запущен, а Ñреда иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð¾Ñ‚ÑутÑтвует" #~ msgid "Janitor failed to deploy Dynamic RTE(s)" #~ msgstr "Janitor не Ñмог уÑтановить динамичеÑкую Ñреду иÑполнениÑ" #~ msgid "" #~ "Janitor timeout while removing Dynamic RTE(s) associations (ignoring)" #~ msgstr "" #~ "Ð’Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Janitor вышло при удалении ÑвÑзей динамичеÑкой Ñреды " #~ "иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ (игнорируетÑÑ)" #~ msgid "Janitor failed to remove Dynamic RTE(s) associations (ignoring)" #~ msgstr "" #~ "Janitor не Ñмог удалить ÑвÑзи динамичеÑкой Ñреды иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ (игнорируетÑÑ)" #~ msgid "Janitor executable not found at %s" #~ msgstr "ИÑполнÑемый файл Janitor не найден в %s" #~ msgid "Can't run %s" #~ msgstr "Ðевозможно выполнить %s" #~ msgid "Can't start %s" #~ msgstr "Ðевозможно запуÑтить %s" #~ msgid "Stopping Master Thread." #~ msgstr "ОÑтанавливаетÑÑ Ð¾Ñновной поток" #~ msgid "Master Thread is deleting threads." #~ msgstr "Головной поток уничтожает потоки." #~ msgid "Master Thread stopped." #~ msgstr "ОÑновной поток оÑтановлен" #~ msgid "Thread %d, Pipes failed" #~ msgstr "Поток %d, Ñбой Ð¿ÐµÑ€ÐµÐ½Ð°Ð¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð¿Ð¾Ñ‚Ð¾ÐºÐ¾Ð²" #~ msgid "Thread %d, Fork failed" #~ msgstr "Поток %d, Ñбой почкованиÑ" #~ msgid "Thread %d, child is terminating." #~ msgstr "Поток %d, обрываетÑÑ Ð´Ð¾Ñ‡ÐµÑ€Ð½Ð¸Ð¹ поток" #~ msgid "Thread %d is ready." #~ msgstr "Поток %d готов" #~ msgid "Thread %d got Task %d." #~ msgstr "Поток %d получил задание %d." #~ msgid "Thread %d, Input is not SOAP" #~ msgstr "Поток %d, вход не в формате SOAP" #~ msgid "" #~ "Thread %d: Task %d Result:\n" #~ "%s\n" #~ msgstr "" #~ "Поток %d: Задание %d Результат:\n" #~ "%s\n" #~ msgid "Thread %d, TaskQueue returned empty Task." #~ msgstr "Поток %d, TaskQueue вернул пуÑтое задание." #~ msgid " Deconstructing Web Service" #~ msgstr "ЛиквидируетÑÑ Ð²ÐµÐ±-Ñлужба" #~ msgid " Flushing set and queue" #~ msgstr "СбраÑывютÑÑ Ð·Ð°Ð´Ð°Ð½Ð¸Ñ Ð¸ очередь" #~ msgid " Deconstructing is waiting for PerlProcessor" #~ msgstr "Ð›Ð¸ÐºÐ²Ð¸Ð´Ð°Ñ†Ð¸Ñ Ð¾Ð¶Ð¸Ð´Ð°ÐµÑ‚ PerlProcessor" #~ msgid " Deconstructing is waiting for TaskQueue" #~ msgstr "Ð›Ð¸ÐºÐ²Ð¸Ð´Ð°Ñ†Ð¸Ñ Ð¾Ð¶Ð¸Ð´Ð°ÐµÑ‚ TaskQueue" #~ msgid " Deconstructing is waiting for TaskSet" #~ msgstr "Ð›Ð¸ÐºÐ²Ð¸Ð´Ð°Ñ†Ð¸Ñ Ð¾Ð¶Ð¸Ð´Ð°ÐµÑ‚ TaskSet" #~ msgid " Deconstructing Web Service ... done" #~ msgstr "ЛиквидируетÑÑ Ð²ÐµÐ±-Ñлужба ... готово" #~ msgid "Creating fault! Reason: \"%s\"" #~ msgstr "СоздаётÑÑ Ð¾Ñ‚Ñ‡Ñ‘Ñ‚ о Ñбое! Причина: \"%s\"" #~ msgid "DREWEBSERVICE 1 %d" #~ msgstr "DREWEBSERVICE 1 %d" #~ msgid "DREWEBSERVICE 2 %d" #~ msgstr "DREWEBSERVICE 2 %d" #~ msgid "TaskSet is waiting for objects (%d) still using the set." #~ msgstr "TaskSet ожидает объекты (%d) вÑÑ‘ ещё иÑпользующие поÑтановку." #~ msgid "Added Task %d to the set. " #~ msgstr "Задание %d добавлено в группу. " #~ msgid "Removed Task %d out of to the set. " #~ msgstr "Задание %d удалено из поÑтановки. " #~ msgid "TaskSet is waiting for objects still using the set." #~ msgstr "TaskSet ожидает объекты вÑÑ‘ ещё иÑпользующие поÑтановку." #~ msgid "Pushed Task %d into the queue. " #~ msgstr "Задание %d переведено в очередь. " #~ msgid "Shifted Task %d out of to the queue. " #~ msgstr "Задание %d передвинуто в очередь. " #~ msgid "Chunk %u: %u - %u" #~ msgstr "Фрагмент %u: %u - %u" #~ msgid "Hopi SlaveMode is active, PUT is only allowed to existing files" #~ msgstr "" #~ "Ðктивирован подчинённый режим Хопи, Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ñ PUT разрешена только Ð´Ð»Ñ " #~ "ÑущеÑтвующих файлов" #~ msgid "Removing complete file in slave mode" #~ msgstr "УдалÑетÑÑ Ð²ÐµÑÑŒ файл в подчинённом режиме" #~ msgid "Hopi Initialized" #~ msgstr "Hopi запущен" #~ msgid "Hopi DocumentRoot is " #~ msgstr "Hopi DocumentRoot:" #~ msgid "Hopi SlaveMode is on!" #~ msgstr "Включён режим Hopi SlaveMode!" #~ msgid "Hopi shutdown" #~ msgstr "Hopi оÑтанавливаетÑÑ" #~ msgid "PUT called" #~ msgstr "Вызван метод PUT" #~ msgid "File size is %u" #~ msgstr "Размер файла: %u" #~ msgid "error reading from HTTP stream" #~ msgstr "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¿Ð¾Ñ‚Ð¾ÐºÐ° HTTP" #~ msgid "error on write" #~ msgstr "ошибка запиÑи" #~ msgid "Input for PUT operation is neither stream nor buffer" #~ msgstr "Вход операции PUT не ÑвлÑетÑÑ Ð½Ð¸ потоком, ни буфером" #~ msgid "method=%s, path=%s, url=%s, base=%s" #~ msgstr "метод=%s, путь=%s, URL-адреÑ=%s, база=%s" #~ msgid "No content provided for PUT operation" #~ msgstr "Ðе указано Ñодержимое Ð´Ð»Ñ Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ð¸ PUT" #~ msgid "Not supported operation" #~ msgstr "ÐžÐ¿ÐµÑ€Ð°Ñ†Ð¸Ñ Ð½Ðµ поддерживаетÑÑ" #~ msgid "request node is empty" #~ msgstr "ПуÑтой узел запроÑа" #~ msgid "Evaluator is not initialized" #~ msgstr "Обработчик не запущен" #~ msgid "Policy(ies) modified - reloading evaluator" #~ msgstr "Ðорматив(Ñ‹) изменен(Ñ‹) - перезагрузка анализатора" #~ msgid "NULL response" #~ msgstr "Ответ NULL" #~ msgid "Authorized from Charon service" #~ msgstr "Допущен Ñлужбой Charon" #~ msgid "" #~ "Not authorized from Charon service; Some of the RequestItem does not " #~ "satisfy Policy" #~ msgstr "" #~ "Ðе допущен Ñлужбой Charon; некоторые пункты RequestItem не удовлетворÑÑŽÑ‚ " #~ "нормативам" #~ msgid "process: %s: not supported" #~ msgstr "процеÑÑ: %s: не поддерживаетÑÑ" #~ msgid "Policy location: %s" #~ msgstr "РаÑположение правил доÑтупа: %s" #~ msgid "Loading policy from %s" #~ msgstr "Загрузка правил из %s" #~ msgid "Failed loading policy from %s" #~ msgstr "Сбой загрузки правил из %s" #~ msgid "Checking policy modification: %s" #~ msgstr "Проверка изменений в правилах: %s" #~ msgid "Policy removed: %s" #~ msgstr "Правила удалены: %s" #~ msgid "Old policy times: %u/%u" #~ msgstr "Ð’Ñ€ÐµÐ¼Ñ Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ/ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñтарых правил: %u/%u" #~ msgid "New policy times: %u/%u" #~ msgstr "Ð’Ñ€ÐµÐ¼Ñ Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ/ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð½Ð¾Ð²Ñ‹Ñ… правил: %u/%u" #~ msgid "Policy Decision Request failed" #~ msgstr "Ошибка запроÑа Ñ€ÐµÑˆÐµÐ½Ð¸Ñ Ð¾ доÑтупе" #~ msgid "Policy Decision Request succeeded!!!" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð¾ принÑтии Ñ€ÐµÑˆÐµÐ½Ð¸Ñ Ñоздан!!!" #~ msgid "ES:CreateActivities: Failed to create new job: %s" #~ msgstr "ES: CreateActivity: Ðе удалоÑÑŒ Ñоздать новую задачу: %s" #~ msgid "Not all jobs are cleaned yet" #~ msgstr "Ещё не вÑе задачи вычищены" #~ msgid "Trying again" #~ msgstr "ÐÐ¾Ð²Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ°" #~ msgid "Jobs cleaned" #~ msgstr "задач очищено" #~ msgid "Preparing directories" #~ msgstr "Подготовка каталогов" #~ msgid "Empty URL list add to the thread." #~ msgstr "ПуÑтой ÑпиÑок URL добавлен к потоку." #~ msgid "Empty message add to the thread." #~ msgstr "Ð’ поток добавлено пуÑтое Ñообщение." #~ msgid "Status (%s): Failed" #~ msgstr "СоÑтоÑние (%s): Сбой" #~ msgid "Status (%s): OK" #~ msgstr "СоÑтоÑние (%s): УÑпех" #~ msgid "Empty message won't be send to the neighbors." #~ msgstr "ПуÑтое Ñообщение не будет разоÑлано ÑоÑедÑм." #~ msgid "%s: %d seconds to the next database cleaning." #~ msgstr "%s: %d Ñекунд(Ñ‹) до Ñледующей очиÑтки базы данных." #~ msgid "Parsing configuration parameters" #~ msgstr "Обработка параметров наÑтройки" #~ msgid "" #~ "The Endpoint element is defined multiple time in ISIS configuration. The " #~ "'%s' value will be used." #~ msgstr "" #~ "Элемент Endpoint задан неÑколько раз в наÑтройках ISIS. Будет " #~ "иÑпользовано значение '%s'." #~ msgid "Empty endpoint element in the configuration!" #~ msgstr "ПуÑтой Ñлемент endpoint в наÑтройках!" #~ msgid "KeyPath: %s" #~ msgstr "KeyPath: %s" #~ msgid "CertificatePath: %s" #~ msgstr "CertificatePath: %s" #~ msgid "CACertificatesDir: %s" #~ msgstr "CACertificatesDir: %s" #~ msgid "CACertficatePath: %s" #~ msgstr "CACertficatePath: %s" #~ msgid "Missing or empty KeyPath element in the configuration!" #~ msgstr "ПуÑтой или отÑутÑтвующий Ñлемент KeyPath в наÑтройках!" #~ msgid "Misisng or empty CertificatePath element in the configuration!" #~ msgstr "ПуÑтой или отÑутÑтвующий Ñлемент CertificatePath в наÑтройках!" #~ msgid "Missing or empty ProxyPath element in the configuration!" #~ msgstr "ПуÑтой или отÑутÑтвующий Ñлемент ProxyPath в наÑтройках!" #~ msgid "Missing or empty CACertificatesDir element in the configuration!" #~ msgstr "ПуÑтой или отÑутÑтвующий Ñлемент CACertificatesDir в наÑтройках!" #~ msgid "Missing or empty CACertificatePath element in the configuration!" #~ msgstr "ПуÑтой или отÑутÑтвующий Ñлемент CACertificatePath в наÑтройках!" #~ msgid "" #~ "Configuration error. Retry: \"%d\" is not a valid value. Default value " #~ "will be used." #~ msgstr "" #~ "Ошибйка наÑтроек. Retry: \"%d\" не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым значением. Будет " #~ "иÑпользовано значение по умолчанию." #~ msgid "" #~ "The Retry element is defined multiple time in ISIS configuration. The " #~ "'%d' value will be used." #~ msgstr "" #~ "Элемент Retry задан неÑколько раз в наÑтройках ISIS. Будет иÑпользовано " #~ "значение '%d'." #~ msgid "" #~ "Configuration error. Sparsity: \"%d\" is not a valid value. Default value " #~ "will be used." #~ msgstr "" #~ "Ошибйка наÑтроек. Sparsity: \"%d\" не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым значением. " #~ "Будет иÑпользовано значение по умолчанию." #~ msgid "" #~ "The Sparsity element is defined multiple time in ISIS configuration. The " #~ "'%d' value will be used." #~ msgstr "" #~ "Элемент Sparsity задан неÑколько раз в наÑтройках ISIS. Будет " #~ "иÑпользовано значение '%d'." #~ msgid "Sparsity: %d" #~ msgstr "Sparsity: %d" #~ msgid "" #~ "Configuration error. ETValid: \"%s\" is not a valid value. Default value " #~ "will be used." #~ msgstr "" #~ "Ошибйка наÑтроек. ETValid: \"%s\" не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым значением. Будет " #~ "иÑпользовано значение по умолчанию." #~ msgid "Configuration error. ETValid is empty. Default value will be used." #~ msgstr "" #~ "Ошибйка наÑтроек. Значение ETValid не задано. Будет иÑпользовано значение " #~ "по умолчанию." #~ msgid "ETValid: %d seconds" #~ msgstr "ETValid: %d Ñекунд" #~ msgid "" #~ "Configuration error. ETRemove: \"%s\" is not a valid value. Default value " #~ "will be used." #~ msgstr "" #~ "Ошибйка наÑтроек. ETRemove: \"%s\" не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым значением. " #~ "Будет иÑпользовано значение по умолчанию." #~ msgid "Configuration error. ETRemove is empty. Default value will be used." #~ msgstr "" #~ "Ошибйка наÑтроек. Значение ETRemove не задано. Будет иÑпользовано " #~ "значение по умолчанию." #~ msgid "ETRemove: %d seconds" #~ msgstr "ETRemove: %d Ñекунд" #~ msgid "Invalid database path definition" #~ msgstr "Ðеверное определение пути к базе данных" #~ msgid "The InfoProvider element in ISIS configuration is empty." #~ msgstr "Элемент InfoProvider в наÑтройках ISIS пуÑÑ‚." #~ msgid "RemoveRegistrations message sent to neighbors." #~ msgstr "Сообщение RemoveRegistrations разоÑлано ÑоÑедÑм." #~ msgid "ISIS (%s) has %d more thread%s" #~ msgstr "Ð’ ISIS (%s) ещё %d поток%s" #~ msgid "ISIS (%s) destroyed." #~ msgstr "ISIS (%s) ликвидирован." #~ msgid "Query received: %s" #~ msgstr "Получен запроÑ: %s" #~ msgid "Register received: ID=%s; EPR=%s; MsgGenTime=%s" #~ msgstr "Получена региÑтрациÑ: ID=%s; EPR=%s; MsgGenTime=%s" #~ msgid "RemoveRegistrations received: ID=%s" #~ msgstr "Получен Ð·Ð°Ð¿Ñ€Ð¾Ñ RemoveRegistrations: ID=%s" #~ msgid "GetISISList received" #~ msgstr "Получен Ð·Ð°Ð¿Ñ€Ð¾Ñ GetISISList" #~ msgid "Connect received" #~ msgstr "Получен Ð·Ð°Ð¿Ñ€Ð¾Ñ Connect" #~ msgid "Communication error: input is not SOAP" #~ msgstr "Сбой передачи данных: ввод не в формате SOAP" #~ msgid "Neighbors count recalculate from %d to %d (at ISIS %s)" #~ msgstr "КоличеÑтво ÑоÑедей переÑчитано Ñ %d на %d (Ð´Ð»Ñ ISIS %s)" #~ msgid "Query failed at %s, choosing new InfoProvider." #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ðº %s не удалÑÑ, выбираем новый InfoProvider." #~ msgid "Remove ISIS (%s) from the list of InfoProviders." #~ msgstr "Удаление ISIS (%s) из ÑпиÑка InfoProviders." #~ msgid "No InfoProvider is available." #~ msgstr "Ðет доÑтупных InfoProvider." #~ msgid "Neighbors count: %d" #~ msgstr "КоличеÑтво ÑоÑедей: %d" #~ msgid "Connect status (%s): Failed" #~ msgstr "СоÑтоÑние ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ (%s): Сбой" #~ msgid "Connect status (%s): OK" #~ msgstr "СоÑтоÑние ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ (%s): УÑпех" #~ msgid "Database mass updated." #~ msgstr "База данных маÑÑово обновлена." #~ msgid "Error converting maxload parameter %s to integer" #~ msgstr "Ошибка Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð° maxload %s в целое" #~ msgid "Setting max downloads to %u" #~ msgstr "МакÑимальное чиÑло загрузок уÑтанавливаетÑÑ Ð½Ð° %u" #~ msgid "Failed writing file with inputs" #~ msgstr "Ðе удалоÑÑŒ запиÑать файл Ñ Ð²Ñ…Ð¾Ð´Ð½Ñ‹Ð¼Ð¸ ÑÑылками" #~ msgid "Starting child downloader process" #~ msgstr "ЗапуÑк дочернего процеÑÑа загрузчика" #~ msgid "%s: child is running" #~ msgstr "%s: дочерний процеÑÑ Ð·Ð°Ð¿ÑƒÑ‰ÐµÐ½" #~ msgid "Download process for job %s timed out" #~ msgstr "ПроцеÑÑ Ð·Ð°Ð³Ñ€ÑƒÐ·ÐºÐ¸ Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s превыÑил Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ" #~ msgid "Downloader exited with code: %i" #~ msgstr "Загрузчик завершил работу Ñ ÐºÐ¾Ð´Ð¾Ð¼ выхода: %i" #~ msgid "TargetRetriver%s initialized with %s service url: %s" #~ msgstr "TargetRetriver%s запущен Ñ URL Ñлужбы %s: %s" #~ msgid "" #~ "Trying to migrate to %s: Migration to a CREAM resource is not supported." #~ msgstr "Попытка миграции на %s: ÐœÐ¸Ð³Ñ€Ð°Ñ†Ð¸Ñ Ð½Ð° реÑÑƒÑ€Ñ CREAM не поддерживаетÑÑ." #~ msgid "Failed dowloading %s to %s" #~ msgstr "Ðе удалоÑÑŒ загрузить %s в %s" #~ msgid "Migration for EMI ES is not implemented" #~ msgstr "ÐœÐ¸Ð³Ñ€Ð°Ñ†Ð¸Ñ Ð´Ð»Ñ EMI ES не реализована" #~ msgid "Collecting Job (%s jobs) information." #~ msgstr "СобираетÑÑ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ задачах (%s задач)" #~ msgid "%s directory exist! This job downloaded previously." #~ msgstr "Каталог %s уже ÑущеÑтвует! Эта задача была загружена ранее." #~ msgid "Cancel of EMI ES jobs is not supported" #~ msgstr "Прерывание задач EMI ES не поддерживаетÑÑ" #~ msgid "" #~ "Trying to migrate to %s: Migration to a legacy ARC resource is not " #~ "supported." #~ msgstr "" #~ "Попытка миграции на %s: ÐœÐ¸Ð³Ñ€Ð°Ñ†Ð¸Ñ Ð½Ð° Ñтарый реÑÑƒÑ€Ñ ARC не поддерживаетÑÑ." #~ msgid "" #~ "Missing reference to factory and/or module. It is unsafe to use Globus in " #~ "non-persistent mode - TargetRetriver for ARC0 is disabled. Report to " #~ "developers." #~ msgstr "" #~ "ОтÑутÑтвует указание на фабрику и/или модуль. ИÑпользование Globus в " #~ "неопределённом режиме небезопаÑно - TargetRetriever Ð´Ð»Ñ ARC0 " #~ "заблокирован. СвÑжитеÑÑŒ Ñ Ñ€Ð°Ð·Ñ€Ð°Ð±Ð¾Ñ‚Ñ‡Ð¸ÐºÐ°Ð¼Ð¸." #~ msgid "" #~ "Trying to migrate to %s: Migration to a UNICORE resource is not supported." #~ msgstr "" #~ "Попытка миграции на %s: ÐœÐ¸Ð³Ñ€Ð°Ñ†Ð¸Ñ Ð½Ð° реÑÑƒÑ€Ñ UNICORE не поддерживаетÑÑ." #~ msgid "Collecting ExecutionTarget (A-REX/BES) information." #~ msgstr "Сбор информации об ExecutionTarget (A-REX/BES)." #~ msgid "Generating BES target: %s" #~ msgstr "СоздаётÑÑ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ BES: %s" #~ msgid "" #~ "Multiple execution environments per queue specified for target: \"%s\". " #~ "Execution environment information will be ignored." #~ msgstr "" #~ "Ð”Ð»Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ \"%s\" указаны множеÑтвенные рабочие Ñреды очередей. " #~ "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ рабочих Ñредах игнорируетÑÑ." #~ msgid "ComputingShare is associated with the ExecutionEnvironment \"%s\"" #~ msgstr "ComputingShare аÑÑоциирована Ñ ExecutionEnvironment \"%s\"" #~ msgid "ExecutionEnvironment \"%s\" located" #~ msgstr "Обнаружена ExecutionEnvironment \"%s\"" #~ msgid "Getting BES jobs is not supported" #~ msgstr "Извлечение задач BES не поддерживаетÑÑ" #~ msgid "targets.size() = %d" #~ msgstr "targets.size() = %d" #~ msgid "Wrong middleware type: %s" #~ msgstr "ÐедопуÑтимый тип подпрограммного обеÑпечению: %s" #~ msgid "Found %u %s execution services from the index service at %s" #~ msgstr "" #~ "Обнаружено %u вычиÑлительных ÑервиÑов %s через ÑÐµÑ€Ð²Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð° на %s" #~ msgid "" #~ "Matching against job description,following targets possible for " #~ "BenchmarkBroker: %d" #~ msgstr "" #~ "Сравнение Ñ Ð¾Ð¿Ð¸Ñанием задачи; Ñледующие Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ñ€Ð°ÑÑматриваютÑÑ Ð² " #~ "алгоритме BenchmarkBroker: %d" #~ msgid "%d. Resource: %s; Queue: %s" #~ msgstr "%d. РеÑурÑ: %s; Очередь: %s" #~ msgid "Resource will be ranked according to the %s benchmark scenario" #~ msgstr "Ðазначение будет упорÑдочено в ÑоответÑтвии Ñ Ñталонным теÑтом %s" #~ msgid "Best targets are: %d" #~ msgstr "Ðаилучшие цели: %d" #~ msgid "FastestQueueBroker is filtering %d targets" #~ msgstr "FastestQueueBroker перебирает %d назначений" #~ msgid "FastestQueueBroker will rank the following %d targets" #~ msgstr "FastestQueueBroker упорÑдочивает Ñледующие %d назначений" #~ msgid "" #~ "Matching against job description, following targets possible for " #~ "DataBroker: %d" #~ msgstr "" #~ "Сравнение Ñ Ð¾Ð¿Ð¸Ñанием задачи; Ñледующие Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ñ€Ð°ÑÑматриваютÑÑ Ð² " #~ "алгоритме DataBroker: %d" #~ msgid "" #~ "Matching against job description, following targets possible for " #~ "RandomBroker: %d" #~ msgstr "" #~ "Сравнение Ñ Ð¾Ð¿Ð¸Ñанием задачи; Ñледующие Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ñ€Ð°ÑÑматриваютÑÑ Ð² " #~ "алгоритме RandomBroker: %d" #~ msgid "Cannot create Python list" #~ msgstr "Ðевозможно Ñоздать ÑпиÑок Python" #~ msgid "Private key of the credential object is NULL" #~ msgstr "Закрытый ключ объекта параметров доÑтупа имеет значение NULL" #~ msgid "Unable to get job (%s), job is deleted" #~ msgstr "Ðевозможно извлечь задачу (%s), задача удалена" #~ msgid "Unable to get job (%s), it has not finished yet" #~ msgstr "Ðевозможно извлечь задачу (%s), она ещё не завершилаÑÑŒ" #~ msgid "Unable to renew job (%s), job already finished" #~ msgstr "Ðевозможно возобновить задачу (%s), она уже завершилаÑÑŒ" #~ msgid "Unable to resume job (%s), job is %s and cannot be resumed" #~ msgstr "" #~ "Ðевозможно продолжить задачу (%s), задача в ÑоÑтоÑнии %s не может быть " #~ "продолжена" #~ msgid "" #~ "Unable to resubmit job (%s), job description could not be retrieved " #~ "remotely" #~ msgstr "" #~ "Ðевозможно перезапуÑтить задачу (%s), опиÑание задачи не может быть " #~ "извлечено Ñ ÑƒÐ´Ð°Ð»Ñ‘Ð½Ð½Ð¾Ð³Ð¾ иÑточника" #~ msgid "Unable to resubmit job (%s), local input file (%s) has changed" #~ msgstr "" #~ "Ðевозможно перезапуÑтить задачу (%s), локальный входной файл (%s) " #~ "изменилÑÑ" #~ msgid "Unable to kill job (%s), job is deleted" #~ msgstr "Ðевозможно прервать задачу (%s), задача удалена" #~ msgid "Unable to kill job (%s), job has already finished" #~ msgstr "Ðевозможно прервать задачу (%s), она уже завершилаÑÑŒ" #~ msgid "Unable to clean job (%s), job has not finished yet" #~ msgstr "Ðевозможно вычиÑтить задачу (%s), она ещё не завершилаÑÑŒ" #~ msgid "Target (%s) was explicitly rejected." #~ msgstr "Цель (%s) Ñвно отклонена." #~ msgid "Possible targets after prefiltering: %d" #~ msgstr "Возможные Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð¿Ð¾Ñле предварительного отбора: %d" #~ msgid "Health State: %s" #~ msgstr "СоÑтоÑние здоровьÑ: %s" #~ msgid "Target sorting not done, sorting them now" #~ msgstr "ÐÐ°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð½Ðµ упорÑдочены, ведётÑÑ Ñортировка" #~ msgid "For this middleware there are no testjobs defined." #~ msgstr "Ð”Ð»Ñ Ñтого Грид-ПО пробных задач пока что нет" #~ msgid "For this middleware only %s testjobs are defined." #~ msgstr "Ð”Ð»Ñ Ñтого Грид-ПО ÑущеÑтвуют только Ñледующие теÑтовые задачи: %s" #~ msgid "FreeSlots = %d; UsedSlots = %d; WaitingJobs = %d" #~ msgstr "Свободных меÑÑ‚ = %d; занÑтых меÑÑ‚ = %d; задач в очереди = %d" #~ msgid "Generating computing target: %s" #~ msgstr "СоздаётÑÑ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ Ð´Ð»Ñ Ð²Ñ‹Ñ‡Ð¸ÑлениÑ: %s" #~ msgid "" #~ "Multiple execution environments per queue specified for target. Execution " #~ "environment information will be ignored." #~ msgstr "" #~ "Ð”Ð»Ñ Ñ†ÐµÐ»Ð¸ указаны множеÑтвенные рабочие Ñреды очередей. Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ " #~ "рабочих Ñредах игнорируетÑÑ." #~ msgid "Found %ld targets" #~ msgstr "Обнаружено %ld назначений" #~ msgid "Resource: %s" #~ msgstr "РеÑурÑ: %s" #~ msgid "Found %ld jobs" #~ msgstr "Обнаружено %ld задач" #~ msgid " URL: %s:%s" #~ msgstr " URL: %s:%s" #~ msgid "TargetRetriever plugin \"%s\" not found." #~ msgstr "Подключаемый модуль TargetRetriever \"%s\" не обнаружен" #~ msgid "TargetRetriever %s could not be created." #~ msgstr "TargetRetriever %s не может быть Ñоздан" #~ msgid "Loaded TargetRetriever %s" #~ msgstr "Подгружен TargetRetriever %s" #~ msgid "Overwriting already defined alias \"%s\"" #~ msgstr "ПереопределÑетÑÑ ÑƒÐ¶Ðµ заданный пÑевдоним \"%s\"" #~ msgid "Could not resolve alias \"%s\" it is not defined." #~ msgstr "Ðе удалоÑÑŒ разобрать Ñокращённое название \"%s\" Ñ‚.к. оно не задано" #~ msgid "" #~ "The defaultservices attribute value contains a wrongly formated element " #~ "(%s) in configuration file (%s)" #~ msgstr "" #~ "Ðтрибут defaultservices Ñодержит неверно Ñформулированный Ñлемент (%s) в " #~ "файле наÑтроек (%s)" #~ msgid "" #~ "The defaultservices attribute value contains an unknown servicetype %s at " #~ "%s in configuration file (%s)" #~ msgstr "" #~ "Значение атрибута defaultservices Ñодержит неизвеÑтный тип ÑервиÑа %s в " #~ "%s в файле наÑтроек (%s)" #~ msgid "Adding selected service %s:%s" #~ msgstr "ДобавлÑетÑÑ Ð²Ñ‹Ð±Ñ€Ð°Ð½Ð½Ñ‹Ð¹ ÑÐµÑ€Ð²Ð¸Ñ %s:%s" #~ msgid "" #~ "The rejectservices attribute value contains a wrongly formated element " #~ "(%s) in configuration file (%s)" #~ msgstr "" #~ "Ðтрибут rejectservices Ñодержит неверно Ñформулированный Ñлемент (%s) в " #~ "файле наÑтроек (%s)" #~ msgid "" #~ "The rejectservices attribute value contains an unknown servicetype %s at " #~ "%s in configuration file (%s)" #~ msgstr "" #~ "Значение атрибута rejectservices Ñодержит неизвеÑтный тип ÑервиÑа %s в %s " #~ "в файле наÑтроек (%s)" #~ msgid "Adding rejected service %s:%s" #~ msgstr "БлокируетÑÑ ÑÐµÑ€Ð²Ð¸Ñ %s:%s" #~ msgid "rejected" #~ msgstr "отклонён" #~ msgid "Cannot resolve alias \"%s\". Loop detected: %s" #~ msgstr "" #~ "Ðевозможно разобратьÑокращённое названив \"%s\". Обнаружена цикличеÑÐºÐ°Ñ " #~ "завиÑимоÑть: %s" #, fuzzy #~ msgid "Cannot resolve alias %s, it is not defined" #~ msgstr "Ðе удалоÑÑŒ разобрать Ñокращённое название \"%s\" Ñ‚.к. оно не задано" #~ msgid "Alias name (%s) contains a unknown servicetype %s at %s" #~ msgstr "ПÑевдоним (%s) Ñодержит неизвеÑтный тип ÑервиÑа %s в %s" #, fuzzy #~ msgid "Adding service %s:%s from resolved alias %s" #~ msgstr "Ошибка при добавлении Ñлужбы. %s" #~ msgid "Alias (%s) contains a wrongly formatted element (%s)" #~ msgstr "ПÑевдоним (%s) Ñодержит неверно оформленый Ñлемент (%s)" #, fuzzy #~ msgid "DTR %s: Re-resolving destination replicas" #~ msgstr "Локальный &каталог назначениÑ:" #~ msgid "The testjob ID should be 1, 2 or 3.\n" #~ msgstr "Ðомер теÑтовой задачи может быть 1, 2 или 3.\n" #~ msgid "Unable to print job description: No target found." #~ msgstr "Ðевозможно вывеÑти опиÑание задачи: Ðе найдено ни одного назначениÑ" #~ msgid "" #~ "Cannot find any proxy. arcresub currently cannot run without a proxy.\n" #~ " If you have the proxy file in a non-default location,\n" #~ " please make sure the path is specified in the client configuration " #~ "file.\n" #~ " If you don't have a proxy yet, please run 'arcproxy'!" #~ msgstr "" #~ "Ðе удалоÑÑŒ обнаружить доверенноÑть. Ð’ Ñтой верÑии arcresub не работает " #~ "без доверенноÑти.\n" #~ " ЕÑли Ваша доверенноÑть хранитÑÑ Ð² неÑтандартном меÑте, пожалуйÑта,\n" #~ " убедитеÑÑŒ, что в наÑтройках клиента указан правильный путь.\n" #~ " ЕÑли же Ð’Ñ‹ пока не Ñоздали доверенноÑть, запуÑтите 'arcproxy'!" #~ msgid "explicitly select or reject a specific resource" #~ msgstr "Ñвным образом выбрать или отÑеÑть указанный реÑурÑ" #~ msgid "explicitly select or reject a specific resource for new jobs" #~ msgstr "Ñвным образом выбрать или отÑеÑть указанный реÑÑƒÑ€Ñ Ð´Ð»Ñ Ð½Ð¾Ð²Ñ‹Ñ… задач" #~ msgid "explicitly select or reject an index server" #~ msgstr "Ñвным образом выбрать или отÑеÑть каталог реÑурÑов" #~ msgid "Unable to find JobController for job %s (plugin type: %s)" #~ msgstr "" #~ "Ðевозможно обнаружить модуль JobController Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s (тип " #~ "подключаемого модулÑ: %s)" #~ msgid "No jobs selected for cleaning" #~ msgstr "Ðе выбраны задачи Ð´Ð»Ñ Ð²Ñ‹Ñ‡Ð¸Ñ‰ÐµÐ½Ð¸Ñ" #~ msgid "No jobs selected for migration" #~ msgstr "Ðе выбраны задачи Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÐ·Ð°Ñылки" #~ msgid "No queuing jobs to migrate" #~ msgstr "Ðет задач в очереди Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÐ·Ð°Ñылки" #~ msgid "No jobs selected for resubmission" #~ msgstr "Ðе выбраны задачи Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÐ·Ð°Ð¿ÑƒÑка" #~ msgid "service_url request_file" #~ msgstr "service_url request_file" #~ msgid "url of the policy decision service" #~ msgstr "URL Ñлужбы принÑÑ‚Ð¸Ñ Ñ€ÐµÑˆÐµÐ½Ð¸Ð¹" #~ msgid "path to request file" #~ msgstr "путь к файлу Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñом" #~ msgid "use SAML 2.0 profile of XACML v2.0 to contact the service" #~ msgstr "Ð´Ð»Ñ ÑвÑзи Ñо Ñлужбой иÑпользуетÑÑ Ð¿Ñ€Ð¾Ñ„Ð¸Ð»ÑŒ SAML 2.0 XACML v2.0" #~ msgid "path to the certificate file" #~ msgstr "путь к файлу Ñертификата" #~ msgid "path to the private key file" #~ msgstr "путь к файлу Ñекретного ключа" #~ msgid "" #~ "Cannot find the path of the certificate/key file, and proxy file, please " #~ "setup environment X509_USER_CERT/X509_USER_KEY, or X509_USER_PROXY,or " #~ "setup certificatepath/keypath, or proxypath in a configuration file" #~ msgstr "" #~ "Ðе удалоÑÑŒ найти путь к открытому/закрытому ключу и доверенноÑти. " #~ "ПожалуйÑта, задайте переменную Ñреды X509_USER_CERT/X509_USER_KEY, или " #~ "X509_USER_PROXY, или значение certificatepath/keypath, или proxypath в " #~ "файле наÑтроек" #~ msgid "" #~ "CA certificate directory: %s is given by X509_CERT_DIR, but it can't been " #~ "accessed." #~ msgstr "" #~ "Каталог Ñертификатов агентÑтв CA %s задан X509_CERT_DIR, но не может быть " #~ "прочитан." #~ msgid "" #~ "The start time that you set plus validityPeriod: %s is before current " #~ "time: %s.\n" #~ "Please set the time constraints once again." #~ msgstr "" #~ "Указанное Ð²Ñ€ÐµÐ¼Ñ Ð½Ð°Ñ‡Ð°Ð»Ð° Ñ Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸ÐµÐ¼ Ñрока годноÑти %s предшеÑтвует " #~ "текущему времени: %s.\n" #~ "ПожалуйÑта, задайте Ñроки Ñнова." #~ msgid "" #~ "The start time that you set plus validityPeriod: %s is after current " #~ "time: %s.\n" #~ "The validityPeriod will be shorten to %s." #~ msgstr "" #~ "Указанное Ð²Ñ€ÐµÐ¼Ñ Ð½Ð°Ñ‡Ð°Ð»Ð° Ñ Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸ÐµÐ¼ Ñрока годноÑти %s позже текущего " #~ "времени: %s.\n" #~ "Срок годноÑти будет Ñокращён до %s." #~ msgid "" #~ "The start time that you set: %s is before current time: %s.\n" #~ "The current time will be used as start time." #~ msgstr "" #~ "Указанное Ð²Ñ€ÐµÐ¼Ñ Ð½Ð°Ñ‡Ð°Ð»Ð° %s предшеÑтвует текущему времени: %s.\n" #~ "Текущее Ð²Ñ€ÐµÐ¼Ñ Ð±ÑƒÐ´ÐµÑ‚ иÑпользовано в качеÑтве начального." #~ msgid "" #~ "The end time that you set: %s is after the start time plus " #~ "validityPeriod: %s.\n" #~ " The validityPeriod will not be changed.\n" #~ msgstr "" #~ "Указанное Ð²Ñ€ÐµÐ¼Ñ Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ð½Ð¸Ñ %s позже времени начала Ñ Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸ÐµÐ¼ Ñрока " #~ "годноÑти: %s.\n" #~ "Срок годноÑти не будет изменён.\n" #~ msgid "" #~ "The end time that you set: %s is before the start time plus " #~ "validityPeriod: %s.\n" #~ "The validityPeriod will be shorten to: %s." #~ msgstr "" #~ "Указанное Ð²Ñ€ÐµÐ¼Ñ Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ð½Ð¸Ñ %s предшеÑтвует времени начала Ñ Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸ÐµÐ¼ " #~ "Ñрока годноÑти: %s.\n" #~ "Срок годноÑти будет Ñокращён до: %s." #~ msgid "" #~ "The end time that you set: %s is before start time: %s.\n" #~ "Please set the time constraints once again.\n" #~ msgstr "" #~ "Указанное Ð²Ñ€ÐµÐ¼Ñ Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ð½Ð¸Ñ %s предшеÑтвует времени начала: %s.\n" #~ "ПожалуйÑта, задайте Ñроки Ñнова.\n" #~ msgid " Service_ID's number is not equivalent with the EPR's number!" #~ msgstr "Ðомер Service_ID отличен от номера в EPR!!" #~ msgid "[ISIS testing ...]" #~ msgstr "[теÑтирование ISIS ...]" #~ msgid "This tiny tool can be used for testing the ISIS's abilities." #~ msgstr "" #~ "Эта ÑÐºÑ€Ð¾Ð¼Ð½Ð°Ñ ÑƒÑ‚Ð¸Ð»Ð¸Ñ‚Ð° может быть иÑпользована Ð´Ð»Ñ Ñ‚ÐµÑÑ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ " #~ "возможноÑтей ISIS" #~ msgid "The method are the folows: Query, Register, RemoveRegistration" #~ msgstr "Следующие методы доÑтупны: Query, Register, RemoveRegistration" #~ msgid "define the URL of the Bootstrap ISIS" #~ msgstr "задать URL начального ISIS" #~ msgid "isis" #~ msgstr "ISIS" #~ msgid "define the URL of the ISIS to connect directly" #~ msgstr "задать URL Ñервера ISIS Ð´Ð»Ñ Ð¿Ñ€Ñмого доÑтупа" #~ msgid "define which method are use (Query, Register, RemoveRegistration)" #~ msgstr "задать иÑпользуемый метод (Query, Register, RemoveRegistration)" #~ msgid "method" #~ msgstr "метод" #~ msgid "get neighbors list from the BootstrapISIS" #~ msgstr "получить ÑпиÑок ÑоÑедей Ñ Ð½Ð°Ñ‡Ð°Ð»ÑŒÐ½Ð¾Ð³Ð¾ ISIS" #~ msgid " ISIS tester start!" #~ msgstr " ЗапуÑк теÑтера ISIS!" #~ msgid " Not enough or too much parameters! %s" #~ msgstr "ÐедоÑтаток или избыток параметров! %s" #~ msgid "ByteIOBackend datadir:" #~ msgstr "Ð”Ð¸Ñ€ÐµÐºÑ‚Ð¾Ñ€Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ… ByteIOBackend:" #~ msgid "ByteIOBackend transferdir:" #~ msgstr "Каталог передачи ByteIOBackend:" #~ msgid "ByteIOService transfer dir:" #~ msgstr "Ð”Ð¸Ñ€ÐµÐºÑ‚Ð¾Ñ€Ð¸Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡ ByteIOService:" #~ msgid "Subject:" #~ msgstr "Тема:" #~ msgid "checking" #~ msgstr "проверÑетÑÑ" #~ msgid "HopiBackend datadir:" #~ msgstr "Ð”Ð¸Ñ€ÐµÐºÑ‚Ð¾Ñ€Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ… HopiBackend:" #~ msgid "HopiBackend transferdir:" #~ msgstr "Каталог передачи HopiBackend:" #~ msgid "ApacheBackend datadir:" #~ msgstr "Ð”Ð¸Ñ€ÐµÐºÑ‚Ð¾Ñ€Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ… ApacheBackend:" #~ msgid "ApacheBackend transferdir:" #~ msgstr "Каталог передачи ApacheBackend:" #~ msgid "Cannot import backend class %(c)s (reason: %(r)s)" #~ msgstr "" #~ "Ðевозможно импортировать клаÑÑ Ð²Ð½ÑƒÑ‚Ñ€ÐµÐ½Ð½ÐµÐ³Ð¾ интерфейÑа %(c)s (причина: " #~ "%(r)s)" #~ msgid "Cannot import store class" #~ msgstr "Ðевозможно импортировать клаÑÑ Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ" #~ msgid "Cannot set CheckPeriod, MinCheckInterval" #~ msgstr "Ðевозможно выÑтавить CheckPeriod, MinCheckInterval" #~ msgid "Got Librarian URLs from the config:" #~ msgstr "Ð’ наÑтройках обнаружен Ð°Ð´Ñ€ÐµÑ Ð‘Ð¸Ð±Ð»Ð¸Ð¾Ñ‚ÐµÐºÐ°Ñ€Ñ:" #~ msgid "" #~ "No Librarian URLs and no ISIS URLs found in the configuration: no self-" #~ "healing!" #~ msgstr "" #~ "Ð’ наÑтройках не найдены адреÑа ни БиблиотекарÑ, ни ISIS: " #~ "ÑамовоÑÑтановление невозможно!" #~ msgid "Got Bartender URLs from the config:" #~ msgstr "Ð’ наÑтройках обнаружен Ð°Ð´Ñ€ÐµÑ Ð‘Ð°Ñ€Ð¼ÐµÐ½Ð°:" #~ msgid "" #~ "No Bartender URLs and no ISIS URLs found in the configuration: no self-" #~ "healing!" #~ msgstr "" #~ "Ð’ наÑтройках не найдены адреÑа ни Бармена, ни ISIS: ÑамовоÑÑтановление " #~ "невозможно!" #~ msgid "Getting Librarians from ISISes" #~ msgstr "Получение ÑпиÑка Библиотекарей из ISIS-ов" #~ msgid "Trying to get Librarian from" #~ msgstr "Попытка получить Ð°Ð´Ñ€ÐµÑ Ð‘Ð¸Ð±Ð»Ð¸Ð¾Ñ‚ÐµÐºÐ°Ñ€Ñ Ð¸Ð·" #~ msgid "Got Librarian from ISIS:" #~ msgstr "Получен Ð°Ð´Ñ€ÐµÑ Ð‘Ð¾Ð±Ð»Ð¸Ð¾Ñ‚ÐµÐºÐ°Ñ€Ñ Ð¸Ð· ISIS:" #~ msgid "Error in isisLibrarianThread: %s" #~ msgstr "Ошибка в isisLibrarianThread: %s" #~ msgid "Getting Bartenders from ISISes" #~ msgstr "Получение ÑпиÑка Барменов из ISIS-ов" #~ msgid "Trying to get Bartender from" #~ msgstr "Попытка получить Ð°Ð´Ñ€ÐµÑ Ð‘Ð°Ñ€Ð¼ÐµÐ½Ð° из" #~ msgid "Got Bartender from ISIS:" #~ msgstr "Получен Ð°Ð´Ñ€ÐµÑ Ð‘Ð°Ñ€Ð¼ÐµÐ½Ð° из ISIS:" #~ msgid "Error in isisBartenderThread: %s" #~ msgstr "Ошибка в isisBartenderThread: %s" #~ msgid "Shepherd" #~ msgstr "Чабан" #~ msgid "" #~ "\n" #~ "CHECKSUM OK" #~ msgstr "" #~ "\n" #~ "CHECKSUM в порÑдке" #~ msgid "" #~ "\n" #~ "CHECKSUM MISMATCH" #~ msgstr "" #~ "\n" #~ "CHECKSUM не Ñовпадает" #~ msgid "checksum refreshed" #~ msgstr "ÐšÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма обновлена" #~ msgid "\n" #~ msgstr "\n" #~ msgid "" #~ "\n" #~ "\n" #~ "File" #~ msgstr "" #~ "\n" #~ "\n" #~ "Файл" #~ msgid "" #~ "\n" #~ "\n" #~ "I have an invalid replica of file" #~ msgstr "" #~ "\n" #~ "\n" #~ "Обнаружена Ð½ÐµÐ²ÐµÑ€Ð½Ð°Ñ ÐºÐ¾Ð¿Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð°" #, fuzzy #~ msgid "ERROR checking checksum of %(rID)s, reason: %(r)s" #~ msgstr "неверный заголовок: ошибка контрольной Ñуммы" #~ msgid "changeState" #~ msgstr "changeState" #~ msgid "" #~ "\n" #~ "\n" #~ msgstr "" #~ "\n" #~ "\n" #~ msgid "Getting AHash URL from the config" #~ msgstr "Ð’ наÑтройках обнаружен Ð°Ð´Ñ€ÐµÑ Ð-Ð¥Ñш" #~ msgid "Got AHash URLs:" #~ msgstr "Получены адреÑа Ð-Ð¥Ñш:" #~ msgid "AHash URL found in the configuration." #~ msgstr "Ð’ наÑтройках обнаружен Ð°Ð´Ñ€ÐµÑ Ð-Ð¥Ñш" #, fuzzy #~ msgid "Setting running state to True" #~ msgstr "Создание и отправка запроÑа о ÑоÑтоÑнии" #~ msgid "No AHash from the config" #~ msgstr "Ð’ наÑтройках нет Ð-Ð¥Ñш" #, fuzzy #~ msgid "AHash URL and ISIS URL not found in the configuration." #~ msgstr "Ð’ наÑтройках не найдены адреÑа ни БиблиотекарÑ, ни ISIS." #, fuzzy #~ msgid "Trying to get A-Hash from ISISes" #~ msgstr "Попытка извлечь Ñодержимое %s из Ñлемента XML, размер %d" #~ msgid "Trying to get A-Hash from" #~ msgstr "Попытка получить Ð°Ð´Ñ€ÐµÑ Ð-Ð¥Ñш из" #~ msgid "Got A-Hash from ISIS:" #~ msgstr "Получен Ð°Ð´Ñ€ÐµÑ Ð-Ð¥Ñш из ISIS:" #~ msgid "Error in initThread: %s" #~ msgstr "Ошибка в initThread: %s" #, fuzzy #~ msgid "Error in Librarian's checking thread: %s" #~ msgstr "Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð½Ð¾Ð²Ð¾Ð¹ запиÑи в Librarian: %s" #~ msgid "Error processing report message" #~ msgstr "Ошибка обработки отчёта" #~ msgid "Error traversing: %s" #~ msgstr "Ошибка при проходе: %s" #~ msgid "Error in traverseLN method: %s" #~ msgstr "Ошибка метода traverseLN: %s" #~ msgid "CentralAHash constructor called" #~ msgstr "Вызван конÑтруктор CentralAHash" #~ msgid "Error importing" #~ msgstr "Сбой импортированиÑ" #~ msgid "Error importing class" #~ msgstr "Ошибка Ð¸Ð¼Ð¿Ð¾Ñ€Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ ÐºÐ»Ð°ÑÑа" #~ msgid "ReplicatedAHash constructor called" #~ msgstr "Вызван конÑтруктор ReplicatedAHash" #~ msgid "sending message of length" #~ msgstr "отправка ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð´Ð»Ð¸Ð½Ð¾Ð¹" #~ msgid "sendt message, success=%s" #~ msgstr "Ñообщение отправлено, success=%s" #~ msgid "processing message..." #~ msgstr "обработка ÑообщениÑ..." #~ msgid "processing message... Finished" #~ msgstr "обработка ÑообщениÑ... Закончена" #~ msgid "Initialized replication environment" #~ msgstr "Инициализирована Ñреда репликации" #~ msgid "Couldn't start replication manager." #~ msgstr "Ðе удалоÑÑŒ запуÑтить менеджер репликации." #~ msgid "master locking" #~ msgstr "блокирование головного узла" #~ msgid "unlocking" #~ msgstr "разблокируетÑÑ" #~ msgid "unlocked" #~ msgstr "разблокирован" #~ msgid "couldn't unlock" #~ msgstr "не удалоÑÑŒ разблокировать" #~ msgid "checkingThread slept %d s" #~ msgstr "checkingThread ожидал %d Ñ" #, fuzzy #~ msgid "Resolved %d deadlocks" #~ msgstr "РаÑпознан пÑевдоним «%s» -> %s\n" #, fuzzy #~ msgid "wrote ahash list %s" #~ msgstr "конец ÑпиÑка поиÑка\n" #, fuzzy #~ msgid "but dbenv wasn't ready." #~ msgstr "ОжидалоÑÑŒ завершение процеÑÑа %s, но он не был запущен" #, fuzzy #~ msgid "entering start" #~ msgstr "ÐÐ°Ñ‡Ð°Ð»ÑŒÐ½Ð°Ñ Ñтрелка" #~ msgid "Couldn't start replication framework" #~ msgstr "Ðе удалоÑÑŒ запуÑтить инфраÑтруктуру репликации" #, fuzzy #~ msgid "entered election thread" #~ msgstr "%<__thread%> перед %" #~ msgid "%s: my role is" #~ msgstr "%s: Ð¼Ð¾Ñ Ñ€Ð¾Ð»ÑŒ" #~ msgid "%s: my role is now" #~ msgstr "%s: Ð¼Ð¾Ñ Ñ€Ð¾Ð»ÑŒ теперь" #~ msgid "Couldn't run election" #~ msgstr "Ðевозможно провеÑти выборы" #~ msgid "entering startElection" #~ msgstr "вход в startElection" #~ msgid "new role" #~ msgstr "Ð½Ð¾Ð²Ð°Ñ Ñ€Ð¾Ð»ÑŒ" #~ msgid "Couldn't begin role" #~ msgstr "Ðевозможно вÑтупить в роль" #~ msgid "entering send" #~ msgstr "переход в send" #~ msgid "failed to send to" #~ msgstr "Ñбой отправки на" #~ msgid "entering repSend" #~ msgstr "переход в repSend" #~ msgid "entering sendNewSiteMsg" #~ msgstr "переход в sendNewSiteMsg" #~ msgid "entering sendHeartbeatMsg" #~ msgstr "переход в sendHeartbeatMsg" #~ msgid "entering sendNewMasterMsg" #~ msgstr "переход в sendNewMasterMsg" #~ msgid "entering processMessage from " #~ msgstr "переход в processMessage из " #~ msgid "received message from myself!" #~ msgstr "получено ÑобÑтвенное Ñообщение!" #~ msgid "received master id" #~ msgstr "получен идентификатор головного узла" #~ msgid "received HEARTBEAT_MESSAGE" #~ msgstr "получено Ñообщение HEARTBEAT_MESSAGE" #~ msgid "received ELECTION_MESSAGE" #~ msgstr "получено Ñообщение ELECTION_MESSAGE" #~ msgid "received NEWSITE_MESSAGE" #~ msgstr "получено Ñообщение NEWSITE_MESSAGE" #~ msgid "processing message from %d" #~ msgstr "обработка ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð¾Ñ‚ %d" #~ msgid "Got dbnotfound" #~ msgstr "Получено dbnotfound" #~ msgid "couldn't process message" #~ msgstr "не удалоÑÑŒ обработать Ñообщение" #~ msgid "received DB_REP_NEWSITE from %s" #~ msgstr "получено DB_REP_NEWSITE от %s" #~ msgid "received DB_REP_HOLDELECTION" #~ msgstr "получено DB_REP_HOLDELECTION" #~ msgid "REP_NOTPERM returned for LSN %s" #~ msgstr "REP_NOTPERM получено Ð´Ð»Ñ LSN %s" #~ msgid "REP_IGNORE received" #~ msgstr "получен Ñигнал REP_IGNORE" #~ msgid "JOIN_FAILURE received" #~ msgstr "получен Ñигнал JOIN_FAILURE" #~ msgid "I am now a master" #~ msgstr "Ñ Ñ‚ÐµÐ¿ÐµÑ€ÑŒ главный" #~ msgid "received DB_EVENT_REP_MASTER" #~ msgstr "получено DB_EVENT_REP_MASTER" #~ msgid "I am now a client" #~ msgstr "Я теперь клиент" #~ msgid "Getting permission failed" #~ msgstr "Ðе удалоÑÑŒ получить разрешение" #~ msgid "Write failed" #~ msgstr "ЗапиÑÑŒ не удалаÑÑŒ" #~ msgid "New master elected" #~ msgstr "Выбран новый головной узел" #~ msgid "I won the election: I am the MASTER" #~ msgstr "Я победил на выборах: Ñ Ñ‚ÐµÐ¿ÐµÑ€ÑŒ MASTER" #~ msgid "Oops! Internal DB panic!" #~ msgstr "Ой! ВнутреннÑÑ Ð¿Ð°Ð½Ð¸ÐºÐ° БД!" #~ msgid "accessing gateway: %s" #~ msgstr "Ñоединение Ñ ÑˆÐ»ÑŽÐ·Ð¾Ð¼: %s" #, fuzzy #~ msgid "This bartender does not support gateway" #~ msgstr "Сервер не поддерживает TLS" #~ msgid "Librarian URL or ISIS URL not found in the configuration." #~ msgstr "Ð’ наÑтройках не найдены адреÑа ни БиблиотекарÑ, ни ISIS." #, fuzzy #~ msgid "Error connecting to ISIS %(iu)s, reason: %(r)s" #~ msgstr "Ошибка Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡ÐµÐ½Ð¸Ñ Ðº беÑпроводной Ñети" #, fuzzy #~ msgid "Error in isisThread: %s" #~ msgstr "Ошибка в libkabc" #~ msgid "adding" #~ msgstr "добавлÑетÑÑ" #~ msgid "modifyMetadata response" #~ msgstr "возврат modifyMetadata" #~ msgid "modifyMetadata failed, removing the new librarian entry" #~ msgstr "" #~ "ошибка Ð²Ñ‹Ð¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ modifyMetadata, удаление новой запиÑи библиотекарÑ" #~ msgid "Error creating new entry in Librarian: %s" #~ msgstr "Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð½Ð¾Ð²Ð¾Ð¹ запиÑи в Librarian: %s" #~ msgid "//// response from the external store:" #~ msgstr "//// ответ внешнего запоминающего уÑтройÑтва:" #~ msgid "location chosen:" #~ msgstr "выбранное раÑположение:" #, fuzzy #~ msgid "ERROR from the chosen Shepherd" #~ msgstr "Ошибка %s при выборке из %s@%s\n" #~ msgid "addReplica" #~ msgstr "addReplica" #~ msgid "Registered Shepherds in Librarian" #~ msgstr "ЗарегиÑтрированные у Ð‘Ð¸Ð±Ð»Ð¸Ð¾Ñ‚ÐµÐºÐ°Ñ€Ñ Ð§Ð°Ð±Ð°Ð½Ñ‹" #~ msgid "Alive Shepherds:" #~ msgstr "Живые Чабаны:" #~ msgid "LN" #~ msgstr "LN" #~ msgid "metadata" #~ msgstr "метаданные" #~ msgid "Could not read entry" #~ msgstr "Ðе удалоÑÑŒ прочеÑть запиÑÑŒ" #~ msgid "\\/\\/" #~ msgstr "\\/\\/" #~ msgid "removing" #~ msgstr "удалÑетÑÑ" #~ msgid "Proxy store:" #~ msgstr "Хранилище доверенноÑтей:" #~ msgid "Delegation status: " #~ msgstr "Ð¡Ñ‚Ð°Ñ‚ÑƒÑ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ:" #~ msgid "creating proxy file : " #~ msgstr "ÑоздаётÑÑ Ñ„Ð°Ð¹Ð» доверенноÑти:" #~ msgid "Delegation failed: " #~ msgstr "Сбой делегированиÑ:" #~ msgid "ID: " #~ msgstr "ID: " #~ msgid "ProxyStore: %s" #~ msgstr "ProxyStore: %s" #~ msgid "removeCredentials: %s" #~ msgstr "removeCredentials: %s" #~ msgid "proxy store is not accessable." #~ msgstr "хранилище доверенноÑтей недоÑтупно." #~ msgid "Certificate directory is not accessable! Check configuration file." #~ msgstr "" #~ "Каталог Ñ Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ñ‹Ð¼Ð¸ ключами Ñертификационных агентÑтв недоÑтупен! " #~ "Проверьте файл наÑтроек." #, fuzzy #~ msgid "Proxy store is not accessable." #~ msgstr "хранилище доверенноÑтей недоÑтупно." #~ msgid "Failed retrieving job information for job: %s" #~ msgstr "Ðе удалоÑÑŒ извлечь информацию о задаче: %s" #~ msgid "Unable to select run time environment" #~ msgstr "Ðевозможно выбрать Ñреду выполнениÑ" #~ msgid "Submit: Failed to modify job description to be sent to target." #~ msgstr "" #~ "ЗаÑылка: Ðе удалоÑÑŒ адаптировать опиÑание задачи Ð´Ð»Ñ Ð·Ð°Ñылки по назначению" #~ msgid "[ADLParser] RemoteSessionAccess is not supported yet." #~ msgstr "[ADLParser] RemoteSessionAccess пока что не поддерживаетÑÑ." #~ msgid "Can't sign a non-limited, non-independent proxy with a limited proxy" #~ msgstr "" #~ "Ðевозможно подпиÑать неограниченную завиÑимую доверенноÑть ограниченной " #~ "доверенноÑтью" #~ msgid " Used Slots: %d" #~ msgstr "ИÑпользованные Ñдра: %d" #~ msgid "Job list file not specified." #~ msgstr "Ðе указан файл ÑпиÑка задач" #~ msgid "cFlavour = %s; service = %s" #~ msgstr "cFlavour = %s; service = %s" #~ msgid "" #~ "Unable to get job (%s), job information not found at execution service" #~ msgstr "" #~ "Ðевозможно извлечь задачу (%s), на ÑервиÑе иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð½Ðµ обнаружена " #~ "Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ задаче" #~ msgid "" #~ "Unable to kill job (%s), job information not found at execution service" #~ msgstr "" #~ "Ðевозможно прервать задачу (%s), на ÑервиÑе иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð½Ðµ обнаружена " #~ "Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ задаче" #~ msgid "Failed killing job (%s)" #~ msgstr "Сбой Ð¿Ñ€ÐµÑ€Ñ‹Ð²Ð°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ (%s)" #~ msgid "" #~ "Unable to renew job (%s), job information not found at execution service" #~ msgstr "" #~ "Ðевозможно возобновить задачу (%s), на ÑервиÑе иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð½Ðµ обнаружена " #~ "Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ задаче" #~ msgid "Failed renewing job (%s)" #~ msgstr "Сбой Ð²Ð¾Ð·Ð¾Ð±Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ (%s)" #~ msgid "Unable to resume job (%s), job information not found" #~ msgstr "" #~ "Ðевозможно продолжить задачу (%s), не обнаружена Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ задаче" #~ msgid "Generating EMIES target: %s" #~ msgstr "Создание цели EMIES: %s" #~ msgid "" #~ "The middleware flavour of the job (%s) does not match that of the job " #~ "controller (%s)" #~ msgstr "" #~ "Тип подпрограммного обеÑÐ¿ÐµÑ‡ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ (%s) не ÑоответÑтвует типу " #~ "контроллера (%s)" #~ msgid "Job has not finished yet: %s" #~ msgstr "Задача ещё не завершилаÑÑŒ: %s" #~ msgid "Failed downloading job %s" #~ msgstr "Ðе удалоÑÑŒ получить результаты задачи %s" #~ msgid "Failed cleaning job %s" #~ msgstr "Ðе удалоÑÑŒ удалить задачу %s" #~ msgid "Job has already finished: %s" #~ msgstr "Задача уже завершилаÑÑŒ: %s" #~ msgid "Failed cancelling job %s" #~ msgstr "Ðе удалоÑÑŒ прервать задачу %s" #~ msgid "" #~ "Job information not found, job %s will only be deleted from local joblist" #~ msgstr "" #~ "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ задаче не обнаружена, задача %s будет удалена только из " #~ "локального ÑпиÑка" #~ msgid "Unknown output %s" #~ msgstr "ÐеизвеÑтный вывод %s" #~ msgid "Cannot create output of %s for job (%s): Invalid destination %s" #~ msgstr "" #~ "Ðевозможно Ñоздать вывод %s Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ (%s): ÐедопуÑтимое назначение %s" #~ msgid "%s from job %s" #~ msgstr "%s из задачи %s" #~ msgid "Cannot migrate job %s, it is not queuing." #~ msgstr "Ðевозможно мигрировать задачу %s, она не ожидает в очереди." #~ msgid "Job migration failed, for job %s, no more possible targets" #~ msgstr "ÐœÐ¸Ð³Ñ€Ð°Ñ†Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s не удалаÑÑŒ, отÑутÑтвуют возможные назначениÑ" #~ msgid "Failed to lock job list file %s. Job information will be out of sync" #~ msgstr "" #~ "Ðе удалоÑÑŒ заблокировать файл ÑпиÑка задач %s. Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ задачах будет " #~ "раÑÑинхронизована" #~ msgid "Failed renewing job %s" #~ msgstr "Ðе удалоÑÑŒ обновить доверенноÑть задачи %s" #~ msgid "Failed retrieving job description for job (%s)" #~ msgstr "Ðе удалоÑÑŒ получить опиÑание задачи (%s)" #, fuzzy #~ msgid "Scheduler loop exited" #~ msgstr "%s: программа %s завершилаÑÑŒ Ñ ÐºÐ¾Ð´Ð¾Ð¼ %d\n" #~ msgid "No job controller plugins loaded" #~ msgstr "Ðе подгружен ни один модуль ÑƒÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð°Ð¼Ð¸" #~ msgid "Credentials renewed" #~ msgstr "Параметры доÑтупа обновлены" #~ msgid "Failed to renew credentials for some or all jobs" #~ msgstr "Ðе удалоÑÑŒ обновить параметры доÑтупа Ð´Ð»Ñ Ð½ÐµÐºÐ¾Ñ‚Ð¾Ñ€Ñ‹Ñ… или вÑех задач" #~ msgid "add dryrun option if available" #~ msgstr "добавить холоÑтую прогонку, еÑли возможно" #~ msgid "select broker method (Random (default), FastestQueue, or custom)" #~ msgstr "" #~ "выбрать алгоритм планировщика (Random (по умолчанию), FastestQueue, или " #~ "Ñпециальный)" #~ msgid "Job description languages supported by ARC client tools:" #~ msgstr "" #~ "Следующие Ñзыки опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡ поддерживаютÑÑ ÐºÐ»Ð¸ÐµÐ½Ñ‚Ñкими ÑредÑтвами ARC:" #~ msgid "explicitly select or reject a resource holding queued jobs" #~ msgstr "Ñвным образом выбрать или отÑеÑть реÑурÑ, держащий задачи в очереди" #~ msgid "explicitly select or reject a resource to migrate to" #~ msgstr "Ñвным образом выбрать или отÑеÑть назначение миграции" #~ msgid "Brokers available to arcmigrate:" #~ msgstr "Следующие планировщики доÑтупны Ð´Ð»Ñ arcmigrate:" #~ msgid "Job migration aborted because no resource returned any information" #~ msgstr "" #~ "Обрыв заÑылки задачи, Ñ‚.к. ни один из реÑурÑов не предоÑтавил информацию" #~ msgid "All jobs were resumed" #~ msgstr "Ð’Ñе задачи были возобновлены" #~ msgid "Brokers available to arcresub:" #~ msgstr "Следующие планировщики доÑтупны Ð´Ð»Ñ arcresub:" #~ msgid "Disregarding %s" #~ msgstr "ИгнорируетÑÑ %s" #~ msgid "Job resubmission failed, unable to parse obtained job description" #~ msgstr "" #~ "Ðе удалоÑÑŒ перезаÑлать задачу, невозможно разобрать полученное опиÑание " #~ "задачи" #~ msgid "Job resubmitted with new jobid: %s" #~ msgstr "Задача запущена Ñ Ð½Ð¾Ð²Ñ‹Ð¼ Ñрлыком: %s" #~ msgid "Job resubmission failed, no more possible targets" #~ msgstr "Ðе удалоÑÑŒ перезаÑлать задачу, возможные Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð¾Ñ‚ÑутÑтвуют" #~ msgid "Job could not be killed or cleaned" #~ msgstr "Задача не может быть прервана или Ñтёрта" #~ msgid "" #~ "Cannot find any proxy. arcsub currently cannot run without a proxy.\n" #~ " If you have the proxy file in a non-default location,\n" #~ " please make sure the path is specified in the client configuration " #~ "file.\n" #~ " If you don't have a proxy yet, please run 'arcproxy'!" #~ msgstr "" #~ "Ðе удалоÑÑŒ обнаружить доверенноÑть. Ð’ Ñтой верÑии arcsub не работает без " #~ "доверенноÑти.\n" #~ " ЕÑли Ваша доверенноÑть хранитÑÑ Ð² неÑтандартном меÑте, пожалуйÑта,\n" #~ " убедитеÑÑŒ, что в наÑтройках клиента указан правильный путь.\n" #~ " ЕÑли же Ð’Ñ‹ пока не Ñоздали доверенноÑть, запуÑтите 'arcproxy'!" #, fuzzy #~ msgid "The request_url is %s" #~ msgstr "Ошибка в ÑÑылке '%1'." #~ msgid "Unable to calculate checksum of local input file %s" #~ msgstr "Ðе удалоÑÑŒ вычиÑлить контрольную Ñумму локального входного файла %s" #~ msgid "[ADLParser] %s element with false value is not supported yet." #~ msgstr "[ADLParser] Элемент %s Ñ Ð»Ð¾Ð¶Ð½Ñ‹Ð¼ значением пока не поддерживаетÑÑ." #~ msgid "[ADLParser] %s element with true value is not supported yet." #~ msgstr "[ADLParser] Элемент %s Ñ Ð¸Ñтиным значением пока не поддерживаетÑÑ." #~ msgid "" #~ "[ADLParser] Option element inside RuntimeEnvironment is not supported yet." #~ msgstr "" #~ "[ADLParser] Элемент Option внутри RuntimeEnvironment пока что не " #~ "поддерживаетÑÑ." #~ msgid "[ADLParser] ParallelEnvironment is not supported yet." #~ msgstr "[ADLParser] ParallelEnvironment пока что не поддерживаетÑÑ." #~ msgid " Keep data: true" #~ msgstr " ОÑтавлÑть данные: верно" #~ msgid "" #~ "[ADLParser] For useNumberOfSlots of SlotsPerHost only false value is " #~ "supported yet." #~ msgstr "" #~ "[ADLParser] Ð”Ð»Ñ useNumberOfSlots атрибута SlotsPerHost пока что " #~ "поддерживаетÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ ложное значение." #~ msgid "[ADLParser] ExclusiveExecution is not supported yet." #~ msgstr "[ADLParser] ExclusiveExecution пока что не поддерживаетÑÑ." #, fuzzy #~ msgid "Invalid configuration - no allowed DNs specified" #~ msgstr "Ðе указан файл наÑтроек" #~ msgid "bind failed" #~ msgstr "Ñбой привÑзки" #~ msgid "%s: Failed reading list of output files" #~ msgstr "%s: Ðе удалоÑÑŒ прочеÑть ÑпиÑок выходных файлов" #~ msgid "ARC: acl element wrongly formated - missing Content element" #~ msgstr "" #~ "Ðевеврный формат Ñлемента ARC: acl element - отÑутÑтвует Ñлемент Content" #~ msgid "ARC: unsupported ACL type specified: %s" #~ msgstr "ARC: указан неподдерживаемый тип ACL: %s" #~ msgid "" #~ "[ADLParser] Missing FailIfExitCodeNotEqualTo in %s. Ignoring exit code is " #~ "not supported yet." #~ msgstr "" #~ "[ADLParser] Ð’ %s отÑутÑтвует FailIfExitCodeNotEqualTo. Игнорирование кода " #~ "выхода пока не поддерживаетÑÑ." #~ msgid "" #~ "[ADLParser] FailIfExitCodeNotEqualTo in %s contain non-zero code. This " #~ "feature is not supported yet." #~ msgstr "" #~ "[ADLParser] FailIfExitCodeNotEqualTo в %s Ñодержит ненулевой код. Ð¢Ð°ÐºÐ°Ñ " #~ "возможноÑть пока что не поддерживаетÑÑ." #~ msgid "[ADLParser] Multiple PreExecutable elements are not supported yet." #~ msgstr "" #~ "[ADLParser] МножеÑтвенные Ñлементы PreExecutable пока что не " #~ "поддерживаютÑÑ." #~ msgid "" #~ "[ADLParser] Only SGAS ServiceType for RemoteLogging is supported yet." #~ msgstr "" #~ "[ADLParser] Пока что поддерживаетÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ SGAS ServiceType Ð´Ð»Ñ " #~ "RemoteLogging." #~ msgid "[ADLParser] For ClientDataPush only false value is supported yet." #~ msgstr "" #~ "[ADLParser] Ð”Ð»Ñ ClientDataPush пока что поддерживаетÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ ложное " #~ "значение." #~ msgid "[ADLParser] DelegationID in Source is not supported yet." #~ msgstr "[ADLParser] DelegationID в Source пока что не поддерживаетÑÑ." #~ msgid "[ADLParser] Option in Source is not supported yet." #~ msgstr "[ADLParser] Option в Source пока что не поддерживаетÑÑ." #~ msgid "[ADLParser] DelegationID in Target is not supported yet." #~ msgstr "[ADLParser] DelegationID в Target пока что не поддерживаетÑÑ." #~ msgid "[ADLParser] Option in Target is not supported yet." #~ msgstr "[ADLParser] Option в Target пока что не поддерживаетÑÑ." #~ msgid "" #~ "The JobDescription::operator bool() method is DEPRECATED, use validity " #~ "checks when parsing sting or outputing contents of JobDescription object." #~ msgstr "" #~ "ИÑпользование метода JobDescription::operator bool() ÐЕ РЕКОМЕÐДУЕТСЯ, " #~ "проверÑйте дейÑтвительноÑть при разборке Ñтрок или выводе Ñодержимого " #~ "объекта JobDescription." #~ msgid "" #~ "The JobDescription::Print method is DEPRECATED, use the JobDescription::" #~ "SaveToStream method instead." #~ msgstr "" #~ "ИÑпользование метода JobDescription::Print ÐЕ РЕКОМЕÐДУЕТСЯ, иÑпользуйте " #~ "метод JobDescription::SaveToStream взамен." #~ msgid " User tag: %s" #~ msgstr " Метка пользователÑ: %s" #~ msgid " Prologue arguments: %s" #~ msgstr " Ðргументы пролога: %s" #~ msgid " Epilogue arguments: %s" #~ msgstr " Ðргументы Ñпилога: %s" #~ msgid "" #~ "This method is DEPRECATED, please use the JobDescription::Parse(const " #~ "std::string&, std::list&, const std::string&, const std::" #~ "string&) method instead." #~ msgstr "" #~ "ИÑпользование Ñтого метода ÐЕ РЕКОМЕÐДУЕТСЯ, пожалуйÑта, иÑпользуйте " #~ "метод JobDescription::Parse(const std::string&, std::" #~ "list&, const std::string&, const std::string&) взамен." #~ msgid "" #~ "This method is DEPRECATED, please use the JobDescription::UnParse(std::" #~ "string&, std::string, const std::string&) method instead." #~ msgstr "" #~ "ИÑпользование Ñтого метода ÐЕ РЕКОМЕÐДУЕТСЯ, пожалуйÑта, иÑпользуйте " #~ "метод JobDescription::UnParse(std::string&, std::string, const std::" #~ "string&) взамен." #~ msgid "" #~ "The Job::Print method is DEPRECATED, use the Job::SaveToStream method " #~ "instead." #~ msgstr "" #~ "ИÑпользование метода Job::Print ÐЕ РЕКОМЕÐДУЕТСЯ, иÑпользуйте метод Job::" #~ "SaveToStream взамен." #~ msgid "" #~ "The TargetGenerator::GetTargets method is DEPRECATED, use the " #~ "GetExecutionTargets or GetJobs method instead." #~ msgstr "" #~ "ИÑпользование метода TargetGenerator::GetTargets ÐЕ РЕКОМЕÐДУЕТСЯ, " #~ "иÑпользуйте метод GetExecutionTargets or GetJobs взамен." #~ msgid "Running resource (target) discovery" #~ msgstr "ВыполнÑетÑÑ Ð¾Ð±Ð½Ð°Ñ€ÑƒÐ¶ÐµÐ½Ð¸Ðµ реÑурÑов (назначений)" #~ msgid "" #~ "The TargetGenerator::ModifyFoundTargets method is DEPRECATED, use the " #~ "FoundTargets method instead." #~ msgstr "" #~ "ИÑпользование метода TargetGenerator::ModifyFoundTargets ÐЕ " #~ "РЕКОМЕÐДУЕТСЯ, иÑпользуйте метод FoundTargets взамен." #~ msgid "" #~ "The TargetGenerator::FoundJobs method is DEPRECATED, use the GetFoundJobs " #~ "method instead." #~ msgstr "" #~ "ИÑпользование метода TargetGenerator::FoundJobs ÐЕ РЕКОМЕÐДУЕТСЯ, " #~ "иÑпользуйте метод GetFoundJobs взамен." #~ msgid "" #~ "The TargetGenerator::AddJob(const XMLNode&) method is DEPRECATED, use the " #~ "AddJob(const Job&) method instead." #~ msgstr "" #~ "ИÑпользование метода TargetGenerator::AddJob(const XMLNode&) ÐЕ " #~ "РЕКОМЕÐДУЕТСЯ, иÑпользуйте метод AddJob(const Job&) взамен." #~ msgid "" #~ "The TargetGenerator::PrintTargetInfo method is DEPRECATED, use the " #~ "TargetGenerator::SaveTargetInfoToStream method instead." #~ msgstr "" #~ "ИÑпользование метода TargetGenerator::PrintTargetInfo ÐЕ РЕКОМЕÐДУЕТСЯ, " #~ "иÑпользуйте метод TargetGenerator::SaveTargetInfoToStream взамен." #~ msgid "" #~ "The JobController::Cat(const std::list&, const std::string&) " #~ "method is DEPRECATED, use the JobController::Cat(std::ostream&, const " #~ "std::list&, const std::string&) method instead." #~ msgstr "" #~ "ИÑпользование метода JobController::Cat(const std::list&, " #~ "const std::string&) ÐЕ РЕКОМЕÐДУЕТСЯ, иÑпользуйте метод JobController::" #~ "Cat(std::ostream&, const std::list&, const std::string&) " #~ "взамен." #~ msgid "" #~ "Specifying the \"gmlog\" value for the whichfile parameter in the Job::" #~ "Cat method is DEPRECATED, use the \"joblog\" value instead." #~ msgstr "" #~ "ИÑпользование Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ \"gmlog\" Ð´Ð»Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð° whichfile в методе Job::" #~ "Cat ÐЕ РЕКОМЕÐДУЕТСЯ, иÑпользуйте значение \"joblog\" взамен." #~ msgid "" #~ "The JobController::PrintJobStatus method is DEPRECATED, use the Job::" #~ "SaveJobStatusToStream method instead." #~ msgstr "" #~ "ИÑпользование метода JobController::PrintJobStatus ÐЕ РЕКОМЕÐДУЕТСЯ, " #~ "иÑпользуйте метод Job::SaveJobStatusToStream взамен." #~ msgid "Failed to lock job list file %s. Job list will be out of sync" #~ msgstr "" #~ "Сбой блокировки файла ÑпиÑка задач %s. Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ задачах будет " #~ "раÑÑинхронизована" #~ msgid "" #~ "The ExecutionTarget::Print method is DEPRECATED, use the ExecutionTarget::" #~ "SaveToStream method instead." #~ msgstr "" #~ "ИÑпользование метода ExecutionTarget::Print ÐЕ РЕКОМЕÐДУЕТСЯ, иÑпользуйте " #~ "метод ExecutionTarget::SaveToStream взамен." #, fuzzy #~ msgid "CreateActivity: has delegation: %s" #~ msgstr "CreateActivity: Сбой при принÑтии делегированиÑ" #, fuzzy #~ msgid "Error parsing VOMS AC" #~ msgstr "Обнаружена ошибка при разборе Ñертификата атрибута" #, fuzzy #~ msgid "Error opening lock file %s: %s" #~ msgstr "Ошибка Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° вывода" #, fuzzy #~ msgid "Found empty lock file %s" #~ msgstr "Ожидание блокировки файла" #, fuzzy #~ msgid "DTR %s: Failed linking cache file to %s due to existing write lock" #~ msgstr "Копирование файла '%s' из '%s'..." #~ msgid "Cannot determine hostname from uname()" #~ msgstr "Ðевозможно извлечь Ð¸Ð¼Ñ ÑƒÐ·Ð»Ð° иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ uname()" #~ msgid "Error reading meta file %s" #~ msgstr "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¼ÐµÑ‚Ð°-файла %s" #~ msgid "" #~ "File exists in remote cache at %s but is locked. Will download from source" #~ msgstr "" #~ "Файл приÑутÑтвует в удалённом кÑше на %s, но заблокирован. Будет " #~ "проведена загрузка из иÑточника" #~ msgid "Creating temporary link from %s to remote cache file %s" #~ msgstr "СоздаётÑÑ Ð²Ñ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ ÑÑылка Ñ %s на удалённо кÑшированный файл %s" #~ msgid "" #~ "Failed to create soft link to remote cache: %s. Will download %s from " #~ "source" #~ msgstr "" #~ "Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð³Ð¸Ð±ÐºÐ¾Ð¹ ÑÑылки на удалённый кÑш: %s. Будет произведена " #~ "загрузка %s из иÑточника" #~ msgid "" #~ "Could not read target of link %s. Manual intervention may be required to " #~ "remove lock in remote cache" #~ msgstr "" #~ "Ðевозможно прочеÑть цель ÑÑылки %s. Возможно, необходимо ручное " #~ "вмешательÑтво Ð´Ð»Ñ ÑнÑÑ‚Ð¸Ñ Ð±Ð»Ð¾ÐºÐ¸Ñ€Ð¾Ð²ÐºÐ¸ в удалённом кÑше" #~ msgid "" #~ "Failed to unlock remote cache file %s. Manual intervention may be required" #~ msgstr "" #~ "Сбой разблокировки удалённого кÑшированного файла %s. Возможно, " #~ "необходимо ручное вмешательÑтво" #~ msgid "Error removing file %s: %s. Manual intervention may be required" #~ msgstr "" #~ "Ошибка при удалении файла %s: %s. Возможно, необходимо ручное " #~ "вмешательÑтво" #~ msgid "Error: Cache file %s does not exist" #~ msgstr "Ошибка: КÑшированный файл %s не ÑущеÑтвует" #~ msgid "Could not read target of link %s" #~ msgstr "Ðевозможно прочеÑть цель ÑÑылки %s" #~ msgid "Couldn't match link target %s to any remote cache" #~ msgstr "Цель ÑÑылки %s не найдена ни в одном удалённом кÑше" #~ msgid "Error removing symlink %s: %s. Manual intervention may be required" #~ msgstr "" #~ "Ошибка при удалении Ñимвольной ÑÑылки %s: %s. Возможно, необходимо ручное " #~ "вмешательÑтво" #~ msgid "'../' is not allowed in filename" #~ msgstr "'../' не допуÑкаетÑÑ Ð² имени файла" #~ msgid "Your issuer CA's DN: %s." #~ msgstr "Выделенное Ð¸Ð¼Ñ Ð°Ð³ÐµÐ½Ñ‚Ñтва, выдавшего Ваш Ñертификат: %s." #~ msgid "Source is bad URL or can't be used due to some reason" #~ msgstr "" #~ "URL иÑточника недопуÑтим, или не может быть иÑпользован по какой-либо " #~ "причине" #~ msgid "Destination is bad URL or can't be used due to some reason" #~ msgstr "" #~ "URL Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð½ÐµÐ´Ð¾Ð¿ÑƒÑтим, или не может быть иÑпользован по какой-либо " #~ "причине" #~ msgid "Error deleting location or URL" #~ msgstr "Ошибка ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ñ€Ð°ÑÐ¿Ð¾Ð»Ð¾Ð¶ÐµÐ½Ð¸Ñ Ð¸Ð»Ð¸ URL" #~ msgid "DataPoint is already reading" #~ msgstr "DataPoint уже читает" #~ msgid "DataPoint is already writing" #~ msgstr "DataPoint уже пишет" #~ msgid "File stating failed" #~ msgstr "Ðе удалоÑÑŒ получить информацию о ÑоÑтоÑнии файла" #~ msgid "Failed to finish destination" #~ msgstr "Ðе удалоÑÑŒ завершить назначение" #~ msgid "" #~ "Cannot find file at %s for getting the certificate. Please make sure this " #~ "file exists." #~ msgstr "" #~ "Ðе удалоÑÑŒ найти файл по адреÑу %s, Ñодержащий Ñертификат. ПожалуйÑта, " #~ "убедитеÑÑŒ, что файл ÑущеÑтвует." #~ msgid "Timeleft for AC: %s" #~ msgstr "ОÑтавшееÑÑ Ð²Ñ€ÐµÐ¼Ñ Ð´Ð»Ñ AC: %s" #~ msgid "AC has been expired for: %s" #~ msgstr "Срок дейÑÑ‚Ð²Ð¸Ñ Ñертификат атрибута Ð´Ð»Ñ %s закончилÑÑ" #, fuzzy #~ msgid "Can get X509V3_EXT_METHOD for %s" #~ msgstr "Ðевозможно извлечь X509V3_EXT_METHOD Ð´Ð»Ñ %s" #, fuzzy #~ msgid "Service_doc: %s" #~ msgstr "Файл &DOC:" #, fuzzy #~ msgid "SOAP Fault: %s" #~ msgstr "Получена ошибка SOAP" #~ msgid "Proxy successfully verified." #~ msgstr "ДоверенноÑть подтверждена." #~ msgid "Proxy not valid. Job submission aborted. Please run 'arcproxy'!" #~ msgstr "" #~ "ДоверенноÑть недейÑтвительна. ЗаÑылка задачи оборвана. ПожалуйÑта, " #~ "запуÑтите 'arcproxy'!" #~ msgid "" #~ "Cannot find CA certificates directory. Please specify the location to the " #~ "directory in the client configuration file." #~ msgstr "" #~ "Ðе удалоÑÑŒ найти каталог Ñ Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ñ‹Ð¼Ð¸ ключами Ñертификационных агентÑтв. " #~ "ПожалуйÑта, введите раÑположение Ñтого каталога в файл наÑтроек клиента." #~ msgid "Local user does not match user of DTR %s" #~ msgstr "Локальный пользователь не ÑоответÑтвует пользователю DTR %s" #~ msgid "" #~ "No services specified. Please specify a cluster or index (-c or -g " #~ "options, see arcsync -h) or set the \"defaultservices\" attribute in the " #~ "client configuration." #~ msgstr "" #~ "Ðе задано ни одного назначениÑ. ПожалуйÑта, задайте значение аттрибута " #~ "\"defaultservices\" в файле наÑтроек клиента, либо укажите Ñвным образом " #~ "реÑÑƒÑ€Ñ Ð¸Ð»Ð¸ каталог реÑурÑов (опции -c или -g, Ñм. arcsync -h)" #~ msgid "Failed to read PEM from file %s" #~ msgstr "Ðе удалоÑÑŒ прочеÑть PEM из файла %s" #~ msgid "" #~ "Failed to read private key from file %s - probably no delegation was done" #~ msgstr "" #~ "Ðе удалоÑÑŒ прочитать файл личного ключа из файла %s - вероÑтно, не было " #~ "делегированиÑ" #~ msgid "Failed in SSL (sk_X509_new_null)" #~ msgstr "Сбой в SSL (sk_X509_new_null)" #~ msgid "Failed in SSL (sk_X509_insert)" #~ msgstr "Сбой в SSL (sk_X509_insert)" #~ msgid "Error: no VOMS extension found" #~ msgstr "Ошибка: не найдено раÑширений VOMS" #~ msgid "Shutting down grid-manager thread" #~ msgstr "ПрерываетÑÑ Ð¿Ð¾Ñ‚Ð¾Ðº Грид-менеджера" #~ msgid "Requirement satisfied. %s %s %s." #~ msgstr "Требование удовлетворено. %s %s %s." #~ msgid "Requirement NOT satisfied. %s %s %s." #~ msgstr "Требование ÐЕ удовлетворено. %s %s %s." #~ msgid "End of list reached requirement not met." #~ msgstr "ДоÑтигнут конец ÑпиÑка, Ñ‚Ñ€ÐµÐ±Ð¾Ð²Ð°Ð½Ð¸Ñ Ð½Ðµ удовлетворены" #~ msgid "Can't stat file: %s" #~ msgstr "Ðевозможно получить ÑÑ‚Ð°Ñ‚ÑƒÑ Ñ„Ð°Ð¹Ð»Ð°: %s" #~ msgid "File is not accessible: %s - %s" #~ msgstr "Файл недоÑтупен: %s - %s" #, fuzzy #~ msgid "delete_ftp: globus_ftp_client_delete timeout" #~ msgstr "check_ftp: Ñбой в globus_ftp_client_size" #~ msgid "Transfer FAILED: %s - %s" #~ msgstr "Передача ÐЕ УДÐЛÐСЬ: %s - %s" #, fuzzy #~ msgid "" #~ "Cannot find the path of the key file, please setup environment " #~ "X509_USER_KEY, or keypath in a configuration file" #~ msgstr "" #~ "Ðе удалоÑÑŒ найти закрытый ключ пользователÑ. ПожалуйÑта, задайте " #~ "переменную Ñреды X509_USER_KEY, или значение keypath в файле конфигурации" #, fuzzy #~ msgid "" #~ "Cannot find file at %s for getting the key. Please make sure this file " #~ "exists." #~ msgstr "" #~ "Ðе удалоÑÑŒ найти файл по адреÑу %s, Ñодержащий доверенноÑть. ПожалуйÑта, " #~ "убедитеÑÑŒ, что файл ÑущеÑтвует." #, fuzzy #~ msgid "[ARCJSDLParser] Validating error" #~ msgstr "%s: ошибка запиÑи файла '%s': %s\n" #~ msgid "Requirements not satisfied." #~ msgstr "Ð¢Ñ€ÐµÐ±Ð¾Ð²Ð°Ð½Ð¸Ñ Ð½Ðµ удовлетворены." #, fuzzy #~ msgid "Mismatching url in file %s: %s Expected %s" #~ msgstr "пропущено Ð¸Ð¼Ñ Ñ„Ð°Ð¹Ð»Ð° в URL" #, fuzzy #~ msgid "Bad separator in file %s: %s" #~ msgstr "Ð½ÐµÐ²ÐµÑ€Ð½Ð°Ñ Ñтрока в файле-каркаÑе" #, fuzzy #~ msgid "Bad value of expiry time in %s: %s" #~ msgstr "Единица времени Ð¾Ð¿Ñ€ÐµÐ´ÐµÐ»ÐµÐ½Ð¸Ñ Ð¿ÐµÑ€Ð¸Ð¾Ð´Ð° уÑтареваниÑ." #, fuzzy #~ msgid "Illegal testjob-id given" #~ msgstr "Задан недопуÑтимый номер теÑтовой задачи" #~ msgid "Failed to terminate LCMAPS - has to keep library loaded" #~ msgstr "" #~ "Ðе удалоÑÑŒ прервать LCMAPS - придётÑÑ Ð¾Ñтавить библиотеку подгруженой" #~ msgid "VOMS config: vo: %s" #~ msgstr "ÐаÑтройки VOMS: ВО: %s" #~ msgid "VOMS config: group: %s" #~ msgstr "ÐаÑтройки VOMS: группа: %s" #~ msgid "VOMS config: role: %s" #~ msgstr "ÐаÑтройки VOMS: роль: %s" #~ msgid "VOMS config: capabilities: %s" #~ msgstr "ÐаÑтройки VOMS: возможноÑти: %s" #, fuzzy #~ msgid "VOMS matched" #~ msgstr "ПодходÑщие подгруппы:" #~ msgid "Failed to terminate LCAS - has to keep library loaded" #~ msgstr "Ðе удалоÑÑŒ прервать LCAS - придётÑÑ Ð¾Ñтавить библиотеку подгруженой" #~ msgid "Disconnect: Failed quitting: %s" #~ msgstr "Отключение: Ðе удалоÑÑŒ выйти: %s" #~ msgid "Failed to close connection 1" #~ msgstr "Ðе удалоÑÑŒ закрыть Ñоединение 1" #~ msgid "Failed to close connection 2" #~ msgstr "Ðе удалоÑÑŒ закрыть Ñоединение 2" #~ msgid "Failed to close connection 3" #~ msgstr "Ðе удалоÑÑŒ закрыть Ñоединение 3" #, fuzzy #~ msgid "subject: %s" #~ msgstr "Тема:" #~ msgid "Out of memory" #~ msgstr "Мало памÑти" #~ msgid "out of memory" #~ msgstr "мало памÑти" #, fuzzy #~ msgid "Error reading valid and existing meta file %s: %s" #~ msgstr "" #~ "\n" #~ "%s: ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð²Ñ…Ð¾Ð´Ð½Ð¾Ð³Ð¾ файла '%s': %s\n" #, fuzzy #~ msgid "Error listing dir %s: %s" #~ msgstr "Ошибка перечиÑÐ»ÐµÐ½Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð¾Ð²: %s\n" #, fuzzy #~ msgid "Error reading srm info file %s:%s" #~ msgstr "%s: ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð²Ñпомогательного файла '%s': %s\n" #, fuzzy #~ msgid "Error creating srm info file %s" #~ msgstr "%s: ошибка Ð·Ð°ÐºÑ€Ñ‹Ñ‚Ð¸Ñ Ð²Ñпомогательного файла '%s': %s\n" #, fuzzy #~ msgid "DTR %s: Cache processing successful" #~ msgstr "Задача уÑпешно возобновлена" #, fuzzy #~ msgid "job_id url destination" #~ msgstr "Ðеверный URL цели." #, fuzzy #~ msgid "link the cache file" #~ msgstr "Обновление кÑш-файла" #, fuzzy #~ msgid "copy the cache file" #~ msgstr "Обновление кÑш-файла" #~ msgid "file is executable" #~ msgstr "файл ÑвлÑетÑÑ Ð¸ÑполнÑемым файлом" #, fuzzy #~ msgid "gid of destination owner" #~ msgstr "Указать владельца Ñхемы" #, fuzzy #~ msgid "One of -l and -c must be specified" #~ msgstr "должно быть одним из: C, S, E, P, или пуÑтым" #~ msgid "No configuration specified" #~ msgstr "Файл наÑтроек не указан" #, fuzzy #~ msgid "Error linking/copying cache file" #~ msgstr "Ошибка ÐºÐ¾Ð¿Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð²Ñ€ÐµÐ¼ÐµÐ½Ð½Ð¾Ð³Ð¾ почтового файла: %s" #, fuzzy #~ msgid "Adding %s service %s " #~ msgstr "Ошибка при добавлении Ñлужбы. %s" #, fuzzy #~ msgid "" #~ "Can not access CA certificate directory: %s. The certificates will not be " #~ "verified" #~ msgstr "Ðевозможно открыть файл Ñертификата: %s (%s)" #, fuzzy #~ msgid "" #~ "Trying to migrate to %s: Migration to a ARC GM-powered resource is not " #~ "supported." #~ msgstr "-mhard-float не поддерживаетÑÑ" #~ msgid "Using job list file %s" #~ msgstr "ИÑпользуетÑÑ ÑпиÑок задач из файла %s" #, fuzzy #~ msgid "Job not found in the job list: %s" #~ msgstr "Задача %s не обнаружена в ÑпиÑке задач." #~ msgid "Failed to use channel stdout" #~ msgstr "Сбой иÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ ÐºÐ°Ð½Ð°Ð»Ð° stdout" #~ msgid "" #~ "Cannot find any proxy. Please specify the path to the proxy file in the " #~ "client configuration file." #~ msgstr "" #~ "Ðе удалоÑÑŒ найти доверенноÑть пользователÑ. ПожалуйÑта, введите " #~ "раÑположение доверенноÑти в файл конфигурации клиента." #, fuzzy #~ msgid "Error allocating memory for info file %s:%s" #~ msgstr "%s: ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð²Ñпомогательного файла '%s': %s\n" #, fuzzy #~ msgid "Error opening srm info file for writing %s:%s" #~ msgstr "Ошибка: Ðевозможно открыть файл %s Ð´Ð»Ñ Ð·Ð°Ð¿Ð¸Ñи.\n" #, fuzzy #~ msgid "Error allocating memory for srm info file %s:%s" #~ msgstr "%s: ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð²Ñпомогательного файла '%s': %s\n" #, fuzzy #~ msgid "Error opening srm info file %s:%s" #~ msgstr "%s: ошибка Ð·Ð°ÐºÑ€Ñ‹Ñ‚Ð¸Ñ Ð²Ñпомогательного файла '%s': %s\n" #~ msgid "" #~ "Argument to -g has the format Flavour:URL e.g.\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/mds-vo-name=sweden,O=grid\n" #~ "CREAM:ldap://cream.grid.upjs.sk:2170/o=grid\n" #~ "\n" #~ "Argument to -c has the format Flavour:URL e.g.\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #~ msgstr "" #~ "Ðргумент опции -g задаётÑÑ Ð¿Ð¾ форме Flavour:URL, например:\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/mds-vo-name=sweden,O=grid\n" #~ "CREAM:ldap://cream.grid.upjs.sk:2170/o=grid\n" #~ "\n" #~ "Ðргумент опции -c задаётÑÑ Ð¿Ð¾ форме Flavour:URL, например:\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #~ msgid "" #~ "Argument to -c has the format Flavour:URL e.g.\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #~ msgstr "" #~ "Ðргумент опции -c задаётÑÑ Ð¿Ð¾ форме Flavour:URL, например:\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #, fuzzy #~ msgid "Getting job descriptions from local job file" #~ msgstr "Удалить передачу из ÑпиÑка." #, fuzzy #~ msgid "Disregarding job descriptions from local job file" #~ msgstr "Удалить передачу из ÑпиÑка." #, fuzzy #~ msgid "Valid job description found for: %s" #~ msgstr "CreateActivity: ОпиÑание задачи не найдено" #, fuzzy #~ msgid "Invalid job description found for: %s" #~ msgstr "CreateActivity: ОпиÑание задачи не найдено" #, fuzzy #~ msgid "Job description for %s retrieved locally" #~ msgstr "опиÑание заÑылаемой задачи: %s" #, fuzzy #~ msgid "Job %s can not be resubmitted" #~ msgstr "Задача не может быть перезапущена" #~ msgid "Job description for %s could not be retrieved locally" #~ msgstr "ОпиÑание задачи %s не может быть воÑÑтановлено локально" #~ msgid "file where the jobs will be stored" #~ msgstr "Файл Ð´Ð»Ñ Ð·Ð°Ð¿Ð¸Ñи Ñрлыков запущенных задач" #, fuzzy #~ msgid "Incompatible RSL attributes" #~ msgstr "Правка параметров ÑлоÑ" #, fuzzy #~ msgid "job.Resources.CandidateTarget.size() = %d" #~ msgstr "Размер ÑпиÑка недавно иÑпользовавшихÑÑ Ñ€ÐµÑурÑов" #, fuzzy #~ msgid "Error creating tmp file %s for remote lock with mkstemp(): %s" #~ msgstr "Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð²Ñ€ÐµÐ¼ÐµÐ½Ð½Ð¾Ð³Ð¾ файла Ñкрипта" #, fuzzy #~ msgid "Error writing to tmp lock file for remote lock %s: %s" #~ msgstr "Ожидание блокировки файла" #, fuzzy #~ msgid "Failed to change owner of destination dir to %i: %s" #~ msgstr "" #~ "Ðе удалоÑÑŒ Ñменить текущий каталог на админиÑтративный каталог %sinfo" #~ msgid " EndPointURL: %s" #~ msgstr "URL конечной точки: %s" #~ msgid " QueueName: %s" #~ msgstr "Ð˜Ð¼Ñ Ð¾Ñ‡ÐµÑ€ÐµÐ´Ð¸: %s" #, fuzzy #~ msgid " QueueName (ignored): %s" #~ msgstr "ИгнорируетÑÑ (уÑтаревшаÑ)" #~ msgid " Target.Mandatory: true" #~ msgstr " Target.Mandatory: true" #~ msgid " DownloadToCache: true" #~ msgstr " DownloadToCache: true" #~ msgid " Directory element:" #~ msgstr " Элемент Directory:" #, fuzzy #~ msgid "URL of ExecutionTarget is not properly defined" #~ msgstr "" #~ "Сравнение, назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, ÑоÑтоÑние Ð·Ð´Ð¾Ñ€Ð¾Ð²ÑŒÑ Ð½Ðµ " #~ "определено" #, fuzzy #~ msgid "URL of ExecutionTarget is not properly defined: %s." #~ msgstr "" #~ "Сравнение, назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, ÑоÑтоÑние Ð·Ð´Ð¾Ñ€Ð¾Ð²ÑŒÑ Ð½Ðµ " #~ "определено" #~ msgid "Filetransfer created" #~ msgstr "Передача файла начата" #~ msgid "Cannot accept destination as URL" #~ msgstr "Ðазначение должно быть URL" #~ msgid "Stage in" #~ msgstr "Подгрузка файлов" #~ msgid "Stage out" #~ msgstr "Выгрузка файлов" #~ msgid "Cannot collect resource information" #~ msgstr "Ðе удалоÑÑŒ Ñобрать информацию о реÑурÑе" #~ msgid "No response" #~ msgstr "Ðет ответа" #~ msgid "Cannot find job id" #~ msgstr "Ðе удалоÑÑŒ найти идентификатор задачи" #~ msgid "Cannot find scheduler endpoint" #~ msgstr "Ðе удалоÑÑŒ найти конечную точку планировщика" #~ msgid "Status: %s %d" #~ msgstr "СоÑтоÑние: %s %d" #~ msgid "Process job: %s" #~ msgstr "Обработка задачи: %s" #~ msgid "No scheduler configured" #~ msgstr "Ðи одного планировщика не наÑтроено" #~ msgid "Do Request: %s" #~ msgstr "ИÑполнение запроÑа: %s" #~ msgid "No free CPU slot" #~ msgstr "ОтÑутÑтвуют доÑтупные Ñвободные процеÑÑоры" #~ msgid "Per: %d" #~ msgstr "Период: %d" #~ msgid "Report status" #~ msgstr "Отчёт о ÑоÑтоÑнии" #~ msgid "%s reported %s" #~ msgstr "%s Ñообщает %s" #~ msgid "%s reported" #~ msgstr "%s Ñообщает" #~ msgid "%s job reported finished" #~ msgstr "Задача %s закончена" #~ msgid "Get activity status changes" #~ msgstr "Получение информации об изменении ÑоÑтоÑниÑ" #~ msgid "%s new status: %s" #~ msgstr "Ðовое ÑоÑтоÑние %s: %s" #~ msgid "Killing %s" #~ msgstr "ПрерываетÑÑ %s" #~ msgid "pre cleanup %s %d" #~ msgstr "Ð¿Ñ€ÐµÐ´Ð²Ð°Ñ€Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ð¾Ñ‡Ð¸Ñтка %s %d" #~ msgid "cleanup %s" #~ msgstr "очиÑтка %s" #~ msgid "cleanup 2 %s" #~ msgstr "очиÑтка 2 %s" #~ msgid "PaulService shutdown" #~ msgstr "Выключение PaulService" #~ msgid "Terminate job %s" #~ msgstr "Terminate job %s" #~ msgid "** %s" #~ msgstr "** %s" #~ msgid "Cannot allocate output raw buffer" #~ msgstr "Ðе удалоÑÑŒ зарезервировать буфер вывода" #~ msgid "Permission denied from %s host" #~ msgstr "Сервер %s host отказал в доÑтупе" #~ msgid "Start process" #~ msgstr "Ðачать процеÑÑ" #~ msgid "Invalid JSDL! Missing application section" #~ msgstr "ÐедопуÑтимый формат JSDL! ОтÑутÑтвует раздел \"application\"." #~ msgid "%s set exception" #~ msgstr "%s приÑвоена ошибка" #~ msgid "Empty executable" #~ msgstr "Ðе задан иÑполнÑемый файл" #~ msgid "Windows cmd path: %s" #~ msgstr "Путь поиÑка команд Windows: %s" #~ msgid "Cmd: %s" #~ msgstr "Команда: %s" #~ msgid "StdOut: %s" #~ msgstr "Стандартный выход: %s" #~ msgid "StdErr: %s" #~ msgstr "Ð¡Ñ‚Ð°Ð½Ð´Ð°Ñ€Ñ‚Ð½Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°: %s" #~ msgid "return from run" #~ msgstr "возврат поÑле иÑполнениÑ" #~ msgid "Error during the application run" #~ msgstr "Ошибка при иÑполнении приложениÑ" #~ msgid "Exception: %s" #~ msgstr "Ошибка: %s" #~ msgid "SpawnError" #~ msgstr "SpawnError" #~ msgid "Status request failed" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð¾ ÑоÑтоÑнии удалÑÑ" #~ msgid "Status request succeed" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð¾ ÑоÑтоÑнии удалÑÑ" #~ msgid "The response to a status request was not a SOAP message" #~ msgstr "Ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾ ÑоÑтоÑнии не ÑвлÑетÑÑ Ñообщением SOAP" #~ msgid "Service status request failed" #~ msgstr "Ошибка запроÑа о ÑоÑтоÑнии Ñлужбы" #~ msgid "Service status request succeed" #~ msgstr "УÑпешный Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾ ÑоÑтоÑнии Ñлужбы" #~ msgid "Job termination request failed" #~ msgstr "ошибка запроÑа об обрыве иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" #~ msgid "Job termination request succeed" #~ msgstr "уÑпешный Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾Ð± обрыве иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" #~ msgid "file_name: " #~ msgstr "Ðазвание файла:" #~ msgid "Jsdl: " #~ msgstr "JSDL: " #~ msgid "The submited JSDL file's name: " #~ msgstr "Ð˜Ð¼Ñ Ð·Ð°Ð´Ð°Ð½Ð½Ð¾Ð³Ð¾ файла JSDL: " #~ msgid "Jod Id: " #~ msgstr "Идентификатор задачи:" #~ msgid "STATUS: " #~ msgstr "СОСТОЯÐИЕ:" #~ msgid "Info from the ISIS" #~ msgstr "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¸Ð· ISIS" #~ msgid "job(s) submit" #~ msgstr "запуÑк задач(и)" #~ msgid "Wrong Job submitting! URL: " #~ msgstr "Ðеверный запуÑк задачи! URL: " #~ msgid " Achitecture: " #~ msgstr " Ðрхитектура: " #~ msgid "Result(s) download" #~ msgstr "Загрузка результатов:" #~ msgid "Download Place: " #~ msgstr "РаÑположение загруженных файлов:" #~ msgid "Download cycle: start" #~ msgstr "Цикл загрузки: начало" #~ msgid "Current Arhitecture: " #~ msgstr "Ð¢ÐµÐºÑƒÑ‰Ð°Ñ Ð°Ñ€Ñ…Ð¸Ñ‚ÐµÐºÑ‚ÑƒÑ€Ð°:" #~ msgid "Empty Job ID. Go to the next Job ID." #~ msgstr "ПуÑтой Ñрлык задачи. Переход к Ñледующему Ñрлыку." #~ msgid "Download url: " #~ msgstr "URL Ð´Ð»Ñ Ð·Ð°Ð³Ñ€ÑƒÐ·ÐºÐ¸:" #~ msgid "Download path: " #~ msgstr "Путь Ð´Ð»Ñ Ð·Ð°Ð³Ñ€ÑƒÐ·ÐºÐ¸:" #~ msgid "Download cycle: end" #~ msgstr "Цикл загрузки: конец" #~ msgid "Finished the compile: " #~ msgstr "КомпилÑÑ†Ð¸Ñ Ð¾ÐºÐ¾Ð½Ñ‡ÐµÐ½Ð°:" #~ msgid " The SOAP message send and return" #~ msgstr " Отправленное и полученное Ñообщение SOAP" #~ msgid "Can not create output SOAP payload for delegation service" #~ msgstr "Ðе удалоÑÑŒ Ñоздать выходную нагрузку SOAP Ð´Ð»Ñ Ñлужбы делегированиÑ" #~ msgid "Can not store proxy certificate" #~ msgstr "Ðе удалоÑÑŒ Ñохранить доверенноÑть" #~ msgid "" #~ "Delegated credentials:\n" #~ " %s" #~ msgstr "" #~ "Делегированные параметры доÑтупа:\n" #~ " %s" #~ msgid "Can not find the corresponding credential from credential cache" #~ msgstr "Ðе удалоÑÑŒ найти ÑоответÑтвующие параметры доÑтупа в кÑше" #~ msgid "Signing proxy on delegation service failed" #~ msgstr "Ðе удалоÑÑŒ заверить доверенноÑть на Ñлужбе делегации" #~ msgid "Cannot create SOAP fault" #~ msgstr "Ðевозможно Ñформулировать ошибку SOAP" #~ msgid "GetActivityStatuses: job %s not found" #~ msgstr "GetActivityStatuses: задача %s не обнаружена" #~ msgid "ChangeActivityStatuses: job %s not found" #~ msgstr "ChangeActivityStatuses: задача %s не обнаружена" #~ msgid "GetActivityDocuments: job %s not found" #~ msgstr "GetActivityDocuments: задача %s не обнаружена" #~ msgid "GetActivityStatuses: job %s" #~ msgstr "GetActivityStatuses: задача %s" #~ msgid "doSched" #~ msgstr "Ð’ doSched..." #~ msgid "jobq checkpoint done" #~ msgstr "ÐšÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñ‚Ð¾Ñ‡ÐºÐ° jobq пройдена" #~ msgid "" #~ "Count of jobs: %i Count of resources: %i Scheduler period: %i Endpoint: " #~ "%s DBPath: %s" #~ msgstr "" #~ "КоличеÑтво задач: %i КоличеÑтво реÑурÑов: %i Период планировщика: %i " #~ "ÐšÐ¾Ð½ÐµÑ‡Ð½Ð°Ñ Ñ‚Ð¾Ñ‡ÐºÐ°: %s DBPath: %s" #~ msgid "NEW job: %s" #~ msgstr "ÐОВÐЯ задача: %s" #~ msgid "A-REX ID: %s" #~ msgstr "Идентификатор A-REX: %s" #~ msgid "Sched job ID: %s NOT SUBMITTED" #~ msgstr "Sched задача: %s NOT SUBMITTED" #~ msgid "%s set killed" #~ msgstr "%s оборвано" #~ msgid "%s remove from queue" #~ msgstr "%s удалено из очереди" #~ msgid "Sched job ID: %s (A-REX job ID is empty)" #~ msgstr "Sched задача: %s (пуÑтой Ñрлык задачи A-REX)" #~ msgid "Job RESCHEDULE: %s" #~ msgstr "задача перепланирована: %s" #~ msgid "JobID: %s state: %s" #~ msgstr "JobID: %s ÑоÑтоÑние: %s" #~ msgid "doReschedule" #~ msgstr "Ð’ doReschedule..." #~ msgid "Rescheduled job: %s" #~ msgstr "Rescheduled job: %s" #~ msgid "Error during database open: %s" #~ msgstr "Ошибка при открытии базы данных: %s" #~ msgid "Assigned new informational document" #~ msgstr "Добавлен новый информационный документ" #~ msgid "Failed to create informational document" #~ msgstr "Сбой при Ñоздании информационного документа" #~ msgid "%d <> %d" #~ msgstr "%d <> %d" #~ msgid "Cannot get resource ID" #~ msgstr "Ðевозможно получить идентификатор реÑурÑа" #~ msgid "invalid job id" #~ msgstr "неверный Ñрлык задачи" #~ msgid "Invalid status report" #~ msgstr "ÐедопуÑтимые данные о ÑоÑтоÑнии" #~ msgid "%s reports job status of %s but it is running on %s" #~ msgstr "%s отчитываетÑÑ Ð¾ ÑоÑтоÑнии задачи %s, но она запущена на %s" #~ msgid "%s try to status change: %s->%s" #~ msgstr "%s пытаетÑÑ Ð¸Ð·Ð¼ÐµÐ½Ð¸Ñ‚ÑŒ ÑоÑтоÑние: %s->%s" #~ msgid "refresh: Cannot abort transaction: %s" #~ msgstr "Ð’ refresh: Ðевозможно прервать передачу: %s" #~ msgid "refresh: Error during transaction: %s" #~ msgstr "обновление: Ошибка при транзакции: %s" #~ msgid "operator[]: Cannot abort transaction: %s" #~ msgstr "operator[]: Ðе удалоÑÑŒ прервать передачу: %s" #~ msgid "remove: Cannot abort transaction: %s" #~ msgstr "удаление: Ðевозможно оборвать транзакцию: %s" #~ msgid "Job type: single" #~ msgstr "Тип задачи: одиночнаÑ" #~ msgid "Job type: collection" #~ msgstr "Тип задачи: набор" #~ msgid "Job type: parallel" #~ msgstr "Тип задачи: параллельнаÑ" #~ msgid "Job type: workflownode" #~ msgstr "Тип задачи: узел поточного заданиÑ" #, fuzzy #~ msgid "Failed setting signal handler for SIGHUP" #~ msgstr "Ðе удалоÑÑŒ задать владельца файла: %s" #, fuzzy #~ msgid "Failed setting signal handler for SIGCHLD" #~ msgstr "Ошибка: не удалоÑÑŒ уÑтановить обработчик SIGCHLD" #, fuzzy #~ msgid "Failed setting signal handler for SIGTERM" #~ msgstr "Ошибка: не удалоÑÑŒ уÑтановить обработчик SIGTERM" #, fuzzy #~ msgid "Failed setting signal handler for SIGINT" #~ msgstr "Ðе удалоÑÑŒ задать владельца файла: %s" #, fuzzy #~ msgid "Failed to create thread for handling signals" #~ msgstr "Ðе удалоÑÑŒ Ñоздать контекÑÑ‚ GSI: %s" #, fuzzy #~ msgid "Failure creating slot for child process." #~ msgstr "%s: Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¾Ð±Ð»Ð°Ñти памÑти Ð´Ð»Ñ Ð´Ð¾Ñ‡ÐµÑ€Ð½ÐµÐ³Ð¾ процеÑÑа" #, fuzzy #~ msgid "Failure forking child process." #~ msgstr "%s: Сбой при запуÑке дочернего процеÑÑа" #, fuzzy #~ msgid "Timeout waiting for child to finish" #~ msgstr "%s: Сбой Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ð½Ð¸Ñ Ð´Ð¾Ñ‡ÐµÑ€Ð½ÐµÐ³Ð¾ процеÑÑа" #, fuzzy #~ msgid "Failure opening pipes." #~ msgstr "Поток %d, Ñбой Ð¿ÐµÑ€ÐµÐ½Ð°Ð¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð¿Ð¾Ñ‚Ð¾ÐºÐ¾Ð²" #~ msgid "TargetRetriverCREAM initialized with %s service url: %s" #~ msgstr "TargetRetriverCREAM запущен Ñ Ð°Ð´Ñ€ÐµÑом Ñлужбы %s: %s" #~ msgid "TargetRetriverARC0 initialized with %s service url: %s" #~ msgstr "TargetRetriverARC0 запущен Ñ URL Ñлужбы %s:: %s" #~ msgid "TargetRetriverUNICORE initialized with %s service url: %s" #~ msgstr "TargetRetriverUNICORE запущен Ñ URL Ñлужбы %sl: %s" #~ msgid "TargetRetriverARC1 initialized with %s service url: %s" #~ msgstr "TargetRetriverARC1 запущен Ñ URL Ñлужбы %s:: %s" #, fuzzy #~ msgid "Failed locating delegation credentials in chain configuration" #~ msgstr "Ðе удалоÑÑŒ обнаружить доверенноÑти в конфигурации клиента" #, fuzzy #~ msgid "Found malformed job state string: %s" #~ msgstr "Ðе удалоÑÑŒ получить информацию о задаче: %s" #, fuzzy #~ msgid "Failed to set PEPd URL: '%s'" #~ msgstr "не удалоÑÑŒ уÑтановить Ñкан-код %x коду %d\n" #, fuzzy #~ msgid "Failed to create XACML request\n" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ %s к %s не выполнен, получен ответ: %s" #, fuzzy #~ msgid "Failed to authorize XACML request: %s\n" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ %s к %s не выполнен, получен ответ: %s" #, fuzzy #~ msgid "Response is null" #~ msgstr "Ключ имеет значение NULL" #, fuzzy #~ msgid "%s is not authorized" #~ msgstr "%s не ÑвлÑетÑÑ Ð¾Ð±ÑŠÐµÐºÑ‚Ð¾Ð¼" #, fuzzy #~ msgid "Failed to create soft link: %s" #~ msgstr "невозможно Ñоздать жеÑткую ÑÑылку %s на %s" #, fuzzy #~ msgid " XRSL_elements: [%s], %s" #~ msgstr "Дополнительные Ñлементы" #, fuzzy #~ msgid " JDL_elements: [%s], %s" #~ msgstr "Дополнительные Ñлементы" #~ msgid "Try to parse as XRSL" #~ msgstr "Попытка ÑинтакÑичеÑкого разбора как XRSL" #~ msgid "Try to parse as JDL" #~ msgstr "Попытка ÑинтакÑичеÑкого разбора как JDL" #~ msgid "Try to parse as ARCJSDL" #~ msgstr "Попытка ÑинтакÑичеÑкого разбора как ARC JSDL" #~ msgid "Generate JDL output" #~ msgstr "Создание JDL на выходе" #~ msgid "Generating %s output was unsuccessful" #~ msgstr "Создание %s на выходе не удалоÑÑŒ" #~ msgid "Generate XRSL output" #~ msgstr "Создание XRSL на выходе" #~ msgid "Generate ARCJSDL output" #~ msgstr "Созадние ARC JSDL на выходе" #~ msgid "Unknown output format: %s" #~ msgstr "ÐеизвеÑтный формат вывода: %s" #~ msgid " ExecutionCE: %s" #~ msgstr "ИÑполнÑющий вычиÑлительный Ñлемент: %s" #, fuzzy #~ msgid "Cannot parse the specified %s service (%s)" #~ msgstr "Ðе удалоÑÑŒ найти клаÑÑ ÑервиÑа" #, fuzzy #~ msgid "The specified %s service (%s) is not a valid URL" #~ msgstr "Ð—Ð°Ð´Ð°Ð½Ð½Ð°Ñ Ð¿Ð°Ð¿ÐºÐ° некорректна" #~ msgid "" #~ "cnd:\n" #~ "%s is a %s" #~ msgstr "" #~ "cnd:\n" #~ "%s is a %s" #, fuzzy #~ msgid "globus_io_cancel failed: %s" #~ msgstr "%s: Ðе удалоÑÑŒ выполнить процедуру прерываниÑ" #, fuzzy #~ msgid "Connect to %s failed: %s" #~ msgstr "Ðе удалоÑÑŒ уÑтановить Ñоединение Ñ %s" #, fuzzy #~ msgid "clear_input: %s" #~ msgstr "ОчиÑтить ввод" #~ msgid "Connection closed" #~ msgstr "Подключение закрыто" #, fuzzy #~ msgid "Globus error (read): %s" #~ msgstr "%s: ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð½Ð° %s\n" #, fuzzy #~ msgid "*** Server response: %s" #~ msgstr "&Ответ Ñервера:" #, fuzzy #~ msgid "Globus error (write): %s" #~ msgstr "" #~ "Ошибка:\n" #~ "\n" #~ "Ðе удалоÑÑŒ запиÑать %s\n" #, fuzzy #~ msgid "*** Client request: %s" #~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ ÑƒÐ´Ð°Ð»ÑÑ!!!" #~ msgid "Authenticating: %s" #~ msgstr "Проверка подлинноÑти: %s" #, fuzzy #~ msgid "Connection to server failed: %s" #~ msgstr "Ðе удалоÑÑŒ прервать ÑвÑзь Ñ Ñервером" #~ msgid "Failed to read SSL token during authentication" #~ msgstr "Ðе удалоÑÑŒ прочеÑть токен SSL в процеÑÑе проверки подлинноÑти" #, fuzzy #~ msgid "Failed unwrapping GSI token: %s" #~ msgstr "Ðе удалоÑÑŒ Ñоздать контекÑÑ‚ GSI: %s" #, fuzzy #~ msgid "Urecognized SSL token received" #~ msgstr "получен неверный ответ на ÑоглаÑование по SSL: %c\n" #, fuzzy #~ msgid "Timeout while sending SOAP request" #~ msgstr "Создание и отправка запроÑа SOAP" #~ msgid "Error sending data to server" #~ msgstr "Ошибка передачи данных на Ñервер." #, fuzzy #~ msgid "read_response_header: line: %s" #~ msgstr "Слишком длинный заголовок" #, fuzzy #~ msgid "Timeout while reading response header" #~ msgstr "" #~ "Ошибка при чтении заголовка файла:\n" #~ " %1" #, fuzzy #~ msgid "Error while reading response header" #~ msgstr "" #~ "Ошибка при чтении заголовка файла:\n" #~ " %1" #, fuzzy #~ msgid "read_response_header: header finished" #~ msgstr "невозможно Ñчитать заголовок архива" #, fuzzy #~ msgid "skip_response_entity" #~ msgstr "Ð”Ñ€ÑƒÐ³Ð°Ñ Ð²Ð½ÐµÑˆÐ½ÑÑ ÑущноÑть" #, fuzzy #~ msgid "skip_response_entity: no entity" #~ msgstr "ÑущноÑть не имеет атрибута %s" #~ msgid "Not connected" #~ msgstr "Ðет подключениÑ" #, fuzzy #~ msgid "Timeout sending header" #~ msgstr "Шрифт колонтитулов:" #~ msgid "Early response from server" #~ msgstr "Преждевременный ответ Ñервера" #~ msgid "No response from server received" #~ msgstr "Ответ Ñервера не получен" #~ msgid "Failed to send body" #~ msgstr "Ðе удалоÑÑŒ отправить тело" #, fuzzy #~ msgid "Failure while receiving entity" #~ msgstr "ошибка при запиÑи данных Ð´Ð»Ñ ÐºÐ°Ñ‚ÐµÐ³Ð¾Ñ€Ð¸Ð¸`%s'" #, fuzzy #~ msgid "Timeout while sending header" #~ msgstr "ошибка при отправке %(message)s ( %(error)s )" #, fuzzy #~ msgid "GET: connection to be closed" #~ msgstr "Ðе удалоÑÑŒ принудительно прервать ÑвÑзь Ñ" #, fuzzy #~ msgid "GET callback returned error" #~ msgstr "Ошибка печати: команда «%s» возвратила %d\n" #, fuzzy #~ msgid "Failed while reading response content" #~ msgstr "ошибка при чтении данных ленты.\n" #, fuzzy #~ msgid "Timeout while reading response content" #~ msgstr "Создание и отправка запроÑа" #, fuzzy #~ msgid "Error while reading response content" #~ msgstr "Ошибка при чтении %d-ой из %d точек: %s\n" #, fuzzy #~ msgid "GET: calling callback: size: %u" #~ msgstr "Ðевозможно получить размер диÑка" #~ msgid "SOAP request failed (%s)" #~ msgstr "Ошибка запроÑа SOAP (%s)" #, fuzzy #~ msgid "SOAP request failed (srmMkdir)" #~ msgstr "Ошибка запроÑа SOAP (copy)" #~ msgid "SOAP request failed (get)" #~ msgstr "Ошибка запроÑа SOAP (get)" #~ msgid "SOAP request failed (getRequestStatus)" #~ msgstr "Ошибка запроÑа SOAP (getRequestStatus)" #~ msgid "SOAP request failed (put)" #~ msgstr "Ошибка запроÑа SOAP (put)" #~ msgid "SOAP request failed (copy)" #~ msgstr "Ошибка запроÑа SOAP (copy)" #~ msgid "SOAP request failed (setFileStatus)" #~ msgstr "Ошибка запроÑа SOAP (setFileStatus)" #~ msgid "SOAP request failed (getFileMetaData)" #~ msgstr "Ошибка запроÑа SOAP (getFileMetaData)" #, fuzzy #~ msgid "Response(%i): %s" #~ msgstr "Ответ" #~ msgid "Submission to %s failed, trying next target" #~ msgstr "Сбой заÑылки задачи на %s, проверка Ñледующего назначениÑ" #~ msgid "" #~ "path to local cache (use to put file into cache). The X509_USER_PROXY and " #~ "X509_CERT_DIR environment variables must be set correctly." #~ msgstr "" #~ "путь к локальному кÑшу (иÑпользуйте Ð´Ð»Ñ ÑÐ¾Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° в кÑше). " #~ "УбедитеÑÑŒ, что переменные Ñреды X509_USER_PROXY и X509_CERT_DIR заданы " #~ "правильно." #, fuzzy #~ msgid "" #~ "The config: \n" #~ "%s \n" #~ msgstr "ÐšÐ¾Ð½Ñ„Ð¸Ð³ÑƒÑ€Ð°Ñ†Ð¸Ñ %1" #~ msgid "%s > %s => false: \\%s contains non numbers in the version part." #~ msgstr "" #~ "%s > %s => неверно: \\%s Ñодержит нецифровые Ñимволы в номере верÑии." #, fuzzy #~ msgid "Can not locate CA certificate directory." #~ msgstr "не удалоÑÑŒ прочитать файл корневых Ñертификатов \"%s\": %s\n" #~ msgid "Client chain configuration: %s" #~ msgstr "ÐšÐ¾Ð½Ñ„Ð¸Ð³ÑƒÑ€Ð°Ñ†Ð¸Ñ Ñ†ÐµÐ¿Ð¾Ñ‡ÐºÐ¸Ð¿Ñ€Ð¸Ð¶Ð°Ñ‚Ñ‹Ðµ клиента: %s" #~ msgid "Cannot import arc module" #~ msgstr "Ðе удалоÑÑŒ импортировать модуль ARC" #, fuzzy #~ msgid "Cannot find arc XMLNode class" #~ msgstr "КлаÑÑ ARC XMLNode не найден" #, fuzzy #~ msgid "Cannot stat local executable input file %s" #~ msgstr "Ðевозможно прочеÑть локальный ÑпиÑок задач" #~ msgid "The parsing of the job description was unsuccessful" #~ msgstr "СинтакÑичеÑÐºÐ°Ñ Ñ€Ð°Ð·Ð±Ð¾Ñ€ÐºÐ° опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ не удалаÑÑŒ" #, fuzzy #~ msgid "XRSL parsing problem" #~ msgstr "&Выводить информацию о проблемах" #, fuzzy #~ msgid "Cannot find arc UserConfig class" #~ msgstr "нет файла Ð´Ð»Ñ ÐºÐ»Ð°ÑÑа %s" #, fuzzy #~ msgid "Encrypted saml assertion: %s" #~ msgstr "Разблокирование зашифрованных данных" #, fuzzy #~ msgid "Failed to create/find directory %s : %s" #~ msgstr "Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð° %s/.gnome." #~ msgid "Failed to create/find directory %s, (%d)" #~ msgstr "Ошибка ÑозданиÑ/Ð¾Ð±Ð½Ð°Ñ€ÑƒÐ¶ÐµÐ½Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð° %s, (%d)" #, fuzzy #~ msgid "start_reading_srm: looking for metadata: %s" #~ msgstr "ПоиÑк пакетов Gentoo: " #, fuzzy #~ msgid "start_reading_srm: obtained checksum: %s" #~ msgstr "ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма %x, а должна быть %x" #~ msgid "explicity select or reject an index server" #~ msgstr "Ñвным образом выбрать или отÑеÑть указанный каталог реÑурÑов" #~ msgid "[job ...]\n" #~ msgstr "[задача ...]\n" #, fuzzy #~ msgid "Cannot find vomses at %s, %s, %s, %s and %s" #~ msgstr "Ðе удаётÑÑ Ð½Ð°Ð¹Ñ‚Ð¸ уÑтройÑтво диÑка %1 Ñ Ð¿Ð»Ð¾Ñ‚Ð½Ð¾Ñтью %2." #~ msgid "IdP name" #~ msgstr "Ð˜Ð¼Ñ IdP" #~ msgid "Configured username is invalid %s" #~ msgstr "ÐаÑтроенное Ð¸Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð½ÐµÐ´Ð¾Ð¿ÑƒÑтимо %s" #~ msgid "%s: State FINISHING: starting child: %s" #~ msgstr "%s: ÑоÑтоÑние FINISHING: запуÑк дочернего процеÑÑа: %s" #~ msgid "%s: State: PREPARING: credentials probably expired (exit code 3)" #~ msgstr "" #~ "%s: ÑоÑтоÑние PREPARING: вероÑтно, иÑтёк Ñрок дейÑÑ‚Ð²Ð¸Ñ Ð¼Ð°Ð½Ð´Ð°Ñ‚Ð° (код " #~ "выхода 3)" #~ msgid "" #~ "%s: State: PREPARING: some error detected (exit code %i). Recover from " #~ "such type of errors is not supported yet." #~ msgstr "" #~ "%s: ÑоÑтоÑние PREPARING:обнаружена ошибка (код выхода %i). ВоÑÑтановление " #~ "поÑле такой ошибки пока не поддерживаетÑÑ." #~ msgid "url of myproxy server" #~ msgstr "URL Ñервера MyProxy" #~ msgid "Returned msg from myproxy server: %s" #~ msgstr "Сообщение Ñервера MyProxy: %s" #~ msgid "Myproxy server return failure msg" #~ msgstr "Сервер MyProxy Ñообщил об ошибке" #~ msgid "ARC_PLUGIN_PATH=%s" #~ msgstr "ARC_PLUGIN_PATH=%s" #~ msgid "Can not read key file: %s" #~ msgstr "Ðе удалоÑÑŒ прочитать файл личного ключа: %s" #, fuzzy #~ msgid "StartReading: obtained size: %lli" #~ msgstr "ДиÑковый кÑш, вÑего" #, fuzzy #~ msgid "Retrying with gsi protocol...\n" #~ msgstr "Проблема Ñ Ð¿Ð¾Ð´Ñ‚Ð²ÐµÑ€Ð¶Ð´ÐµÐ½Ð¸ÐµÐ¼ мандата" #, fuzzy #~ msgid "start_reading_ftp: failure" #~ msgstr "ftpfs: чтение каталога FTP %s... %s%s" #~ msgid "failed to send to %d of %s" #~ msgstr "не удалоÑÑŒ отправить на %d %s" #~ msgid "%s: Plugin in state %s : %s" #~ msgstr "%s: Подключаемый модуль в ÑоÑтоÑнии %s : %s" #~ msgid "Will not use caching" #~ msgstr "КÑширование иÑпользоватьÑÑ Ð½Ðµ будет" #~ msgid "Cannot clean up any cache files" #~ msgstr "Ðе удалоÑÑŒ Ñтереть кÑшированые файлы" #, fuzzy #~ msgid "store job descriptions in local sandbox." #~ msgstr "Ðевозможно открыть файл Ñ Ð¾Ð¿Ð¸Ñанием задачи: %s" #, fuzzy #~ msgid "Failed to load service configuration form file %s" #~ msgstr "Ðе удалоÑÑŒ загрузить конфигурацию ÑервиÑа" #, fuzzy #~ msgid "Contacting VOMS server (named %s): %s on port: %i" #~ msgstr "" #~ "УÑтанавливаетÑÑ ÑвÑзь Ñ Ñервером VOMS (по имени %s): %s по порту: %s" #, fuzzy #~ msgid "Getting %s jobs" #~ msgstr "" #~ " -a, -all применить ко вÑем задачам пользователÑ" #, fuzzy #~ msgid "Killing %s jobs" #~ msgstr "" #~ " -a, -all применить ко вÑем задачам пользователÑ" #, fuzzy #~ msgid "Cleaning %s jobs" #~ msgstr "" #~ " -a, -all применить ко вÑем задачам пользователÑ" #, fuzzy #~ msgid "Cannot migrate to a %s cluster." #~ msgstr "ДиÑплей DMX на который проиÑходит перемещение" #~ msgid "No valid jobdescription found for: %s" #~ msgstr "Ðе обнаружено допуÑтимых опиÑаний задачи: %s" #, fuzzy #~ msgid "Creating delegation failed" #~ msgstr "%s: Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÐºÐ°Ð½Ð°Ð»Ð°" #~ msgid "Job registration failed" #~ msgstr "Ошибка региÑтрации задачи" #~ msgid "Job starting failed" #~ msgstr "Ошибка запуÑка задачи" #~ msgid "Could not retrieve job information" #~ msgstr "Ðе удалоÑÑŒ получить информацию о задаче" #, fuzzy #~ msgid "The node %s has no %s element." #~ msgstr "Документ `%s' не имеет узла верхнего ÑƒÑ€Ð¾Ð²Ð½Ñ <%s>\n" #, fuzzy #~ msgid "The response was not a SOAP message" #~ msgstr "" #~ "Содержимое пиÑьма не было принÑто.\n" #~ "%1" #~ msgid "Fetching job state" #~ msgstr "ИзвлекаетÑÑ ÑоÑтоÑние задачи" #, fuzzy #~ msgid "The status of the job (%s) could not be retrieved." #~ msgstr "Файл не может быть Ñоздан" #, fuzzy #~ msgid "The response to a service status request is Fault message: " #~ msgstr "Ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾ ÑоÑтоÑнии Ñлужбы не ÑвлÑетÑÑ Ñообщением SOAP" #, fuzzy #~ msgid "There was an empty response to an index service query" #~ msgstr "Ðе поÑтупил ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾ ÑоÑтоÑнии Ñлужбы" #, fuzzy #~ msgid "The response of a index service query was not a SOAP message" #~ msgstr "Ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾ ÑоÑтоÑнии не ÑвлÑетÑÑ Ñообщением SOAP" #, fuzzy #~ msgid "Request failed, service returned: %s" #~ msgstr "Ошибка публикации Ñлужбы" #, fuzzy #~ msgid "Migration failed, service returned: %s" #~ msgstr "Ошибка публикации Ñлужбы" #, fuzzy #~ msgid "Job resuming failed" #~ msgstr "Ðе удалоÑÑŒ поÑлать задачу" #, fuzzy #~ msgid "Job resumed at state: %s" #~ msgstr "ÃÂõòõрýþõ ÷ýðчõýøõ job-state!" #, fuzzy #~ msgid "Failed migrating job" #~ msgstr "Ðе удалоÑÑŒ поÑлать задачу" #~ msgid "Timer kicking" #~ msgstr "ЗапуÑкаетÑÑ Ñ‚Ð°Ð¹Ð¼ÐµÑ€" #~ msgid "Multiple " #~ msgstr "МножеÑтвенные" #, fuzzy #~ msgid "Multiple timeout attributes in configuration file (%s)" #~ msgstr "Ðе удалоÑÑŒ загрузить конфигурацию ÑервиÑа" #, fuzzy #~ msgid "Multiple brokername attributes in configuration file (%s)" #~ msgstr "Ðе удалоÑÑŒ загрузить конфигурацию ÑервиÑа" #, fuzzy #~ msgid "Multiple bartender attributes in configuration file (%s)" #~ msgstr "Чтение файла конфигурации: %s" #, fuzzy #~ msgid "Multiple keysize attributes in configuration file (%s)" #~ msgstr "Чтение файла конфигурации: %s" #~ msgid "lasso_assertion_query_new() failed" #~ msgstr "Ñбой в lasso_assertion_query_new()" #~ msgid "lasso_assertion_query_init_request failed" #~ msgstr "Ñбой в lasso_assertion_query_init_request" #~ msgid "lasso_assertion_query_build_request_msg failed" #~ msgstr "Ñбой в lasso_assertion_query_build_request_msg" #~ msgid "assertionRequestBody shouldn't be NULL" #~ msgstr "assertionRequestBody не может быть NULL" #~ msgid "lasso_assertion_query_process_response_msg failed" #~ msgstr "Ñбой lasso_assertion_query_process_response_msg" #~ msgid "Configuration: LRMS: %s" #~ msgstr "КонфигурациÑ: СУПО: %s" #~ msgid "Configuration: Queue: %s" #~ msgstr "КонфигурациÑ: Очередь: %s" #~ msgid "process: CreateActivity" #~ msgstr "процеÑÑ: CreateActivity" #, fuzzy #~ msgid "Couldn't parse value \"%s\" of benchmark %s. Parse error: \"%s\"." #~ msgstr "Ошибка: невозможно обработать %1 как значение параметра.\n" #, fuzzy #~ msgid "Couldn't parse benchmark string: \"%s\"." #~ msgstr "Ðевозможно открыть файл проекта" #, fuzzy #~ msgid "lhs > rhs TRUE" #~ msgstr "Перечёркнутый" #, fuzzy #~ msgid "lhs < rhs TRUE" #~ msgstr "Перечёркнутый" #, fuzzy #~ msgid "Failed resolving aliases" #~ msgstr "Ñоздание алиаÑов ÑловарÑ" #, fuzzy #~ msgid "Matchmaking, ExecutionTarget URL: %s " #~ msgstr "Ðеправильный URL: %1" #, fuzzy #~ msgid "Resolving alias: %s" #~ msgstr "&Изменить пÑевдоним..." #, fuzzy #~ msgid "Alias \"%s\" requested but not defined" #~ msgstr "метка %q+D определена, но не иÑпользуетÑÑ" #, fuzzy #~ msgid "Done resolving alias: %s" #~ msgstr "СинтакÑичеÑкий анализ Ñокращённого имени" #, fuzzy #~ msgid "Key is not a file: %s" #~ msgstr "%s: файл %s не ÑвлÑетÑÑ Ð°Ñ€Ñ…Ð¸Ð²Ð¾Ð¼\n" #, fuzzy #~ msgid "The specified configuration file (%s) is not a regular file" #~ msgstr "Файл \"%s\" не ÑвлÑетÑÑ Ð¾Ð±Ñ‹Ñ‡Ð½Ñ‹Ð¼ файлом или каталогом." #, fuzzy #~ msgid "XML user configuration (%s) loaded" #~ msgstr "Пакет конфигурации уÑпешно загружен." #, fuzzy #~ msgid "INI user configuration (%s) loaded" #~ msgstr "Пакет конфигурации уÑпешно загружен." #~ msgid "SSL_library_init failed" #~ msgstr "Сбой в SSL_library_init" #, fuzzy #~ msgid "timeout in seconds (default " #~ msgstr "Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð² Ñекундах (по умолчанию 20)" #~ msgid "select broker method (Random (default), QueueBalance, or custom)" #~ msgstr "" #~ "выбрать алгоритм планировщика (Random (по умолчанию), QueueBalance, или " #~ "Ñпециальный)" #~ msgid "ERROR" #~ msgstr "ОШИБКÐ" #~ msgid "DMCs are loaded" #~ msgstr "Подгружены компоненты цепи Ñообщений" #~ msgid " And now I am there" #~ msgstr "Вот мы и здеÑÑŒ" #~ msgid "wrong option in cacheregistration" #~ msgstr "Ð½ÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ Ð² cacheregistration" #, fuzzy #~ msgid "Failed to allocate SSL locks" #~ msgstr "Ðевозможно выделить памÑть Ð´Ð»Ñ Ð¸Ð·Ð¾Ð±Ñ€Ð°Ð¶ÐµÐ½Ð¸Ñ:" #, fuzzy #~ msgid "Current transfer FAILED" #~ msgstr "Сбой переноÑа файла." #, fuzzy #~ msgid "Creating and sending a service an index service query" #~ msgstr "Создание и отправка запроÑа о ÑоÑтоÑнии Ñлужбы" #, fuzzy #~ msgid "Creating client chain for UNICORE BES service" #~ msgstr "СоздаётÑÑ Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ ÐºÐ»Ð¸ÐµÐ½Ñ‚Ð°" #, fuzzy #~ msgid "Request xml structure is: %s" #~ msgstr "Ð˜Ð¼Ñ Ñ„Ð°Ð¹Ð»Ð° \"%s\" формата XML на указывает на каталог" #, fuzzy #~ msgid "UnAuthorized from count.pdp!!!" #~ msgstr "Защитить компьютер от неÑанкционированного иÑпользованиÑ" #~ msgid "Plugins element has no Name defined" #~ msgstr "Ð’ Ñлементе Plugins не задано имÑ" #, fuzzy #~ msgid "DataManager has no name attribute defined" #~ msgstr "Ðе задан ни один параметр Ñ Ð¸Ð¼ÐµÐ½ÐµÐ¼ «%s»" #, fuzzy #~ msgid "DataManager %s(%s) could not be created" #~ msgstr "Файл не может быть Ñоздан" #, fuzzy #~ msgid "Loaded DataManager %s(%s)" #~ msgstr "Страница загружена." #, fuzzy #~ msgid "ArcClientComponent has no name attribute defined" #~ msgstr "Ðе задан ни один параметр Ñ Ð¸Ð¼ÐµÐ½ÐµÐ¼ «%s»" #, fuzzy #~ msgid "ArcClientComponent %s(%s) could not be created" #~ msgstr "Файл не может быть Ñоздан" #, fuzzy #~ msgid "Loaded ArcClientComponent %s(%s)" #~ msgstr "Страница загружена." #, fuzzy #~ msgid "Adding job info to sandbox" #~ msgstr "" #~ "INFO: Þöøôðю þúþýчðýøѠòыÿþûýõýøѠ÷ðôðчø…\n" #, fuzzy #~ msgid "Request failed: Error1" #~ msgstr "Ошибка запроÑа DDE poke" #, fuzzy #~ msgid "Request failed: Error2" #~ msgstr "Ошибка запроÑа DDE poke" #, fuzzy #~ msgid "Request failed: Error3" #~ msgstr "Ошибка запроÑа DDE poke" #, fuzzy #~ msgid "Request failed: Error4" #~ msgstr "Ошибка запроÑа DDE poke" #, fuzzy #~ msgid "Request failed: Error5" #~ msgstr "Ошибка запроÑа DDE poke" #, fuzzy #~ msgid "Requirements in sub-requirements satisfied." #~ msgstr "(ÐедоÑтупно: завиÑимоÑти не удовлетворены)" #, fuzzy #~ msgid "Extracting local file list from job description failed" #~ msgstr "Ðевозможно открыть файл Ñ Ð¾Ð¿Ð¸Ñанием задачи: %s" #~ msgid "Failed uploading file" #~ msgstr "Ошибка загрузки файла" #~ msgid "Can not access ARC job list file: %s (%s)" #~ msgstr "Ðевозможно открыть файл задач ARC: %s (%s)" #, fuzzy #~ msgid "Cannot access ARC user config file: %s (%s)" #~ msgstr "Ðевозможно открыть файл пользовательÑкой конфигурации ARC: %s (%s)" #~ msgid "ARC user config file is not a regular file: %s" #~ msgstr "" #~ "Файл пользовательÑкой конфигурации ARC не ÑвлÑетÑÑ Ñтандартным файлом: %s" #, fuzzy #~ msgid "Could not load system client configuration" #~ msgstr "Ðе удалоÑÑŒ обнаружить ÑиÑтемную конфигурацию клиента" #~ msgid "Path is %s" #~ msgstr "Путь: %s" #, fuzzy #~ msgid "File type is neither file or directory" #~ msgstr "" #~ "%1:\n" #~ "ÐеизвеÑтный тип файла: ни каталог ни файл." #, fuzzy #~ msgid "Cannot migrate from %s clusters." #~ msgstr "недопуÑÑ‚Ð¸Ð¼Ð°Ñ Ð¸Ð½Ð¸Ñ†Ð¸Ð°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ %qT из %qT" #, fuzzy #~ msgid "Transfer FAILED" #~ msgstr "Сбой переноÑа файла." #, fuzzy #~ msgid "path for cache data (if different from -y)" #~ msgstr "" #~ " -Y, -cachedata путь путь к опиÑанию кÑша (еÑли отличен от -y)" #, fuzzy #~ msgid "Received AuthURL " #~ msgstr "Получен Ñигнал" #, fuzzy #~ msgid "Received status " #~ msgstr "Ðовое ÑоÑтоÑние %s: %s" #, fuzzy #~ msgid "use the Confusa SLCS service" #~ msgstr "ИÑпользовать Ñлужбу PC-To-Phone" #, fuzzy #~ msgid "Confusa Auth module" #~ msgstr "Модуль поддержки Ñодержимого" #, fuzzy #~ msgid "__del__" #~ msgstr " Удалить " #~ msgid "delete run" #~ msgstr "обрываетÑÑ Ð¸Ñполнение" #, fuzzy #~ msgid "passphrase to myproxy server" #~ msgstr "&Путь к Ñборнику переводов" #, fuzzy #~ msgid " Implementation Version: %s" #~ msgstr "collect2 верÑÐ¸Ñ %s" #, fuzzy #~ msgid " JobName: %s" #~ msgstr "Ðеверное Ð¸Ð¼Ñ Ð·Ð°Ð´Ð°Ð½Ð¸Ñ" #, fuzzy #~ msgid ", value: %s" #~ msgstr "Ошибка: %s" #, fuzzy #~ msgid " Author: %s" #~ msgstr "Ðвтор:" #, fuzzy #~ msgid " Input: %s" #~ msgstr "Вводить" #, fuzzy #~ msgid " Output: %s" #~ msgstr "ВЫВОД" #, fuzzy #~ msgid " Notification element: " #~ msgstr "Ðеожиданный Ñлемент" #, fuzzy #~ msgid " Address: %s" #~ msgstr "ÐÐ´Ñ€ÐµÑ - 1 1/8 x 3 1/2 дюйма" #, fuzzy #~ msgid " Total CPU Time: %s" #~ msgstr "%t - иÑпользование процеÑÑора (ÑиÑтема + пользователи)" #, fuzzy #~ msgid " Individual CPU Time: %s" #~ msgstr "ДлительноÑть по умолчанию (CPU)" #, fuzzy #~ msgid " Total Wall Time: %s" #~ msgstr "ДлительноÑть по умолчанию (по чаÑам)" #, fuzzy #~ msgid " Individual Wall Time: %s" #~ msgstr "ДлительноÑть по умолчанию (по чаÑам)" #, fuzzy #~ msgid " Benchmark: %s" #~ msgstr "Ðеприемлемый Ñталонный теÑÑ‚" #, fuzzy #~ msgid " value: %d" #~ msgstr "Ошибка: %s" #, fuzzy #~ msgid " time: %s" #~ msgstr "Ð’Ñ€ÐµÐ¼Ñ Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ð½Ð¸Ñ" #, fuzzy #~ msgid " OSName: %s" #~ msgstr "название клаÑÑа: %s" #, fuzzy #~ msgid " OSVersion: %s" #~ msgstr "%s, верÑÐ¸Ñ %s" #, fuzzy #~ msgid " DiskSpace: %d" #~ msgstr "ÐедоÑтаточно меÑта на диÑке" #, fuzzy #~ msgid " Alias: %s" #~ msgstr "МеÑто" #~ msgid " Latitude: %s" #~ msgstr "Широта: %s" #~ msgid " Longitude: %s" #~ msgstr "Долгота: %s" #, fuzzy #~ msgid " Slots: %d" #~ msgstr "Разъёмы карт..." #, fuzzy #~ msgid " RunTimeEnvironment.Version: %s" #~ msgstr "collect2 верÑÐ¸Ñ %s" #, fuzzy #~ msgid " Homogeneous: true" #~ msgstr "Перечёркнутый" #, fuzzy #~ msgid " InBound: true" #~ msgstr "Перечёркнутый" #, fuzzy #~ msgid " OutBound: true" #~ msgstr "ИÑходÑщие данные:\n" #, fuzzy #~ msgid " Source.Threads: %d" #~ msgstr "X_vid" #, fuzzy #~ msgid " Target.Threads: %d" #~ msgstr "X_vid" #, fuzzy #~ msgid " Target.NeededReplicas: %d" #~ msgstr "Выбор узла назначениÑ" #, fuzzy #~ msgid "Try to parse as POSIX JSDL" #~ msgstr "опиÑание заÑылаемой задачи: %s" #, fuzzy #~ msgid "[PosixJSDLParser] Failed to create parser context" #~ msgstr "Ðе удалоÑÑŒ Ñоздать контекÑÑ‚ GSI: %s" #, fuzzy #~ msgid "Invalid notify attribute: %c" #~ msgstr "ÐедопуÑтимый интервал времени" #, fuzzy #~ msgid "My hash is: %s" #~ msgstr "Контур закрыт." #, fuzzy #~ msgid "RegistrationCollector function is running." #~ msgstr "Код возврата" #, fuzzy #~ msgid "The ServiceID (%s) is found in the database." #~ msgstr "Файл не может быть Ñоздан" #, fuzzy #~ msgid "RemoveRegistrations: MGenTime=%s" #~ msgstr "СмыÑл-конец" #, fuzzy #~ msgid "Connect" #~ msgstr "Ðет подключениÑ" #, fuzzy #~ msgid "[PeerID] calculated hash: %s" #~ msgstr "ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма %x, а должна быть %x" #, fuzzy #~ msgid "[Cert] calculated value: %s" #~ msgstr "ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма %x, а должна быть %x" #, fuzzy #~ msgid "[Key] calculated value: %s" #~ msgstr "ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма %x, а должна быть %x" #, fuzzy #~ msgid "[Proxy] calculated value: %s" #~ msgstr "ДоверенноÑть дейÑтвительна до: %s" #, fuzzy #~ msgid "[CaDir] calculated value: %s" #~ msgstr "ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма %x, а должна быть %x" #, fuzzy #~ msgid "find ServiceID: %s , hash: %d" #~ msgstr "Ð˜Ð¼Ñ ÑервиÑа SP: %s" #, fuzzy #~ msgid "Connect request failed, try again." #~ msgstr "Ошибка при выполнении запроÑа" #, fuzzy #~ msgid "File size is %ul" #~ msgstr "Файл '%s' имеет размер в ноль байт - иÑпользуетÑÑ %s." #~ msgid "years" #~ msgstr "года(лет)" #~ msgid "months" #~ msgstr "меÑÑца(ев)" #~ msgid "days" #~ msgstr "днÑ(дней)" #~ msgid "hours" #~ msgstr "чаÑа(ов)" #~ msgid "ENV: " #~ msgstr "ENV: " #~ msgid "Broken RSL in NAME" #~ msgstr "Ðеверный код RSL в NAME" #~ msgid "Broken RSL in clientsoftware" #~ msgstr "Ðеверный код RSL в clientsoftware" #~ msgid "Broken RSL" #~ msgstr "Ðеверный код RSL" #, fuzzy #~ msgid "Failed reading RSL" #~ msgstr "Чтение %s раздела %s завершилоÑÑŒ неудачей: %s" #, fuzzy #~ msgid "Failed parsing RSL" #~ msgstr "Ðе удалоÑÑŒ проанализировать XML" #~ msgid "Broken RSL in jobid" #~ msgstr "Ðеверный код RSL в jobid" #, fuzzy #~ msgid "slashes are not allowed in jobid" #~ msgstr "%s: пробелы в имени закладки не разрешаютÑÑ\n" #~ msgid "Broken RSL in action" #~ msgstr "Ðеверный код RSL в action" #~ msgid "Broken RSL in queue" #~ msgstr "Ðеверный код RSL в queue" #~ msgid "Broken RSL in replicacollection" #~ msgstr "Ðеверный код RSL в replicacollection" #~ msgid "Broken RSL in lifetime" #~ msgstr "Ðеверный код RSL в lifetime" #~ msgid "Broken RSL in starttime" #~ msgstr "Ðеверный код RSL в starttime" #~ msgid "Broken RSL in jobname" #~ msgstr "Ðеверный код RSL в jobname" #~ msgid "Broken RSL in jobreport" #~ msgstr "Ðеверный код RSL в jobreport" #~ msgid "Broken RSL in rerun" #~ msgstr "Ðеверный код RSL в rerun" #, fuzzy #~ msgid "Bad integer in rerun" #~ msgstr "переполнение при вычиÑлении целочиÑленного выражениÑ" #~ msgid "Broken RSL in disk" #~ msgstr "Ðеверный код RSL в disk" #, fuzzy #~ msgid "disk value is bad" #~ msgstr "Предупреждение: ошибка в подпиÑи." #~ msgid "Broken RSL in notify" #~ msgstr "Ðеверный код RSL в notify" #~ msgid "Broken RSL in inputdata" #~ msgstr "Ðеверный код RSL в inputdata" #~ msgid "Broken RSL in outputdata" #~ msgstr "Ðеверный код RSL в outputdata" #~ msgid "Broken RSL in gmlog" #~ msgstr "Ðеверный код RSL в gmlog" #~ msgid "Broken RSL in stdout" #~ msgstr "Ðеверный код RSL в stdout" #~ msgid "Broken RSL in stderr" #~ msgstr "Ðеверный код RSL в stderr" #~ msgid "Broken RSL in ftpthreads" #~ msgstr "Ðеверный код RSL в ftpthreads" #~ msgid "Broken RSL in cache" #~ msgstr "Ðеверный код RSL в cache" #~ msgid "Broken RSL in hostname" #~ msgstr "Ðеверный код RSL в hostname" #~ msgid "Broken RSL in dryrun" #~ msgstr "Ðеверный код RSL в dryrun" #~ msgid "Broken RSL in credentialserver" #~ msgstr "Ðеверный код RSL в credentialserver" #~ msgid "Broken RSL in acl" #~ msgstr "Ðеверный код RSL в acl" #, fuzzy #~ msgid "Failed evaluating RSL" #~ msgstr "Проверка правил фильтра: " #, fuzzy #~ msgid "UNKNOWN RSL STRUCTURE" #~ msgstr "Ð´ÐµÐºÑ€ÐµÐ¼ÐµÐ½Ñ‚Ð°Ñ†Ð¸Ñ ÑƒÐºÐ°Ð·Ð°Ñ‚ÐµÐ»Ñ Ð½Ð° неизвеÑтную Ñтруктуру" #, fuzzy #~ msgid "UNKNOWN RLS ELEMENT" #~ msgstr "ÐеизвеÑтный атрибут \"%s\"=\"%s\" в Ñ‚Ñге <%s>" #, fuzzy #~ msgid "Could not write the private key!" #~ msgstr "локаль '%s' не может быть уÑтановлена." #, fuzzy #~ msgid "Host not found: %s" #~ msgstr "Сервер не найден" #, fuzzy #~ msgid "Migration request failed" #~ msgstr "ошибка запроÑа об обрыве иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" #, fuzzy #~ msgid "Migration request succeed" #~ msgstr "уÑпешный Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾Ð± обрыве иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" #, fuzzy #~ msgid "There was no response to a migration request" #~ msgstr "Ðе поÑтупил ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾Ð± отправке задачи" #, fuzzy #~ msgid "A job resuming request failed" #~ msgstr "Ошибка запроÑа DDE poke" #, fuzzy #~ msgid "A job resuming request succeed" #~ msgstr "ÐеизвеÑтный тип заданиÑ." #, fuzzy #~ msgid "There was no response to a job resuming request" #~ msgstr "Ðе поÑтупил ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾Ð± удалении результатов работы задачи" #, fuzzy #~ msgid "RegistrationCollector create: %s" #~ msgstr "Код возврата" #, fuzzy #~ msgid "Job description successfully stored in sandbox" #~ msgstr "опиÑание заÑылаемой задачи: %s" #, fuzzy #~ msgid "Maximal UR Set size is: %d" #~ msgstr "Файл '%s' имеет размер в ноль байт - иÑпользуетÑÑ %s." #, fuzzy #~ msgid "Deleting %s" #~ msgstr "Идентификатор A-REX: %s" #, fuzzy #~ msgid "Reporting interval is: %d s" #~ msgstr "&Изменить пÑевдоним..." #~ msgid "show information about clusters and queues" #~ msgstr "вывеÑти информацию о вычиÑлительных реÑурÑах и очередÑÑ…" #, fuzzy #~ msgid " Rank: %s" #~ msgstr "Положение в очереди" #, fuzzy #~ msgid "Error during the XML generation!" #~ msgstr "Ошибка при нахождении различий" #~ msgid " element: " #~ msgstr " Ñлемент:" #, fuzzy #~ msgid "Can not access user's home directory: %s (%s)" #~ msgstr "" #~ "%s: каталог %s не удалён (ÑвлÑетÑÑ Ð´Ð¾Ð¼Ð°ÑˆÐ½Ð¸Ð¼ каталогом Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %s)\n" #, fuzzy #~ msgid "User's home directory is not a directory: %s" #~ msgstr "" #~ "%s: каталог %s не удалён (ÑвлÑетÑÑ Ð´Ð¾Ð¼Ð°ÑˆÐ½Ð¸Ð¼ каталогом Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %s)\n" #, fuzzy #~ msgid "Can not create ARC user config directory: %s (%s)" #~ msgstr "Ðевозможно Ñоздать пользовательÑкий каталог конфигурации Dia" #, fuzzy #~ msgid "ARC user config directory is not a directory: %s" #~ msgstr "Ðевозможно Ñоздать пользовательÑкий каталог конфигурации Dia" #~ msgid "Created empty ARC user config file: %s" #~ msgstr "Создан пуÑтой файл Ð´Ð»Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»ÑŒÑкой конфигурации ARC: %s" #, fuzzy #~ msgid "CertificatePath defined, but not KeyPath" #~ msgstr "метка %q+D определена, но не иÑпользуетÑÑ" #, fuzzy #~ msgid "Delegation handler with service role starts to process" #~ msgstr "Поддержка ETRN не наÑтроена.\n" #~ msgid "Shepherd chosen:" #~ msgstr "Выбран Чабан:" #~ msgid "Couldn't acquire transaction lock" #~ msgstr "Сбой Ð¿Ñ€ÐµÐ´Ð¾Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ Ñ‚Ñ€Ð°Ð½Ð·Ð°ÐºÑ†Ð¸Ð¸" #~ msgid "Source Path size: %d" #~ msgstr "Длина пути в иÑточнике: %d" #~ msgid "Registration for Service: %s" #~ msgstr "РегиÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ ÑервиÑа: %s" #~ msgid "Outdated data: %s" #~ msgstr "УÑтаревшие данные: %s" #~ msgid "SOAP operation not supported: %s" #~ msgstr "ÐžÐ¿ÐµÑ€Ð°Ñ†Ð¸Ñ SOAP не поддерживаетÑÑ: %s" #~ msgid "Route" #~ msgstr "Маршрут" #~ msgid "Routing to %s" #~ msgstr "ÐœÐ°Ñ€ÑˆÑ€ÑƒÑ‚Ð¸Ð·Ð°Ñ†Ð¸Ñ Ðº %s" #~ msgid "error on seek" #~ msgstr "ошибка поиÑка" #~ msgid "\tCache data dir : %s" #~ msgstr "\tКаталог Ñ ÐºÑшем данных: %s" #, fuzzy #~ msgid "Can not parse PKCS12 file" #~ msgstr "Файл \"%file:1\" не может быть открыт" #, fuzzy #~ msgid "No per-job directory specified" #~ msgstr "Ðе задано опиÑание задачи" #, fuzzy #~ msgid "Number of possible targets : %d" #~ msgstr "Сортировка назначений" #, fuzzy #~ msgid " ReferenceTime.value: %s" #~ msgstr "некорректное значение %%C" #~ msgid "WSRF request failed" #~ msgstr "Ошибка запроÑа WSRF" #, fuzzy #~ msgid "path to CA directory" #~ msgstr "Путь к каталогу Ð´Ð»Ñ Ð²Ñ€ÐµÐ¼ÐµÐ½Ð½Ñ‹Ñ… файлов" #~ msgid "" #~ "\n" #~ "\n" #~ "!!!!check_again" #~ msgstr "" #~ "\n" #~ "\n" #~ "!!!!check_again" #~ msgid "" #~ "\n" #~ "\n" #~ "!!!!do_delete" #~ msgstr "" #~ "\n" #~ "\n" #~ "!!!!do_delete" #, fuzzy #~ msgid "Can't acquire delegation context" #~ msgstr "%s: Ðевозможно получить контекÑÑ‚ Ð´Ð»Ñ %s" #~ msgid "" #~ "Argumentd to -i has the format Flavour:URL e.g.\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/mds-vo-name=sweden,O=grid\n" #~ "CREAM:ldap://cream.grid.upjs.sk:2170/o=grid\n" #~ "\n" #~ "Argument to -c has the format Flavour:URL e.g.\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #~ msgstr "" #~ "Ðргумент опции -i задаётÑÑ Ð¿Ð¾ форме Flavour:URL, например:\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/mds-vo-name=sweden,O=grid\n" #~ "CREAM:ldap://cream.grid.upjs.sk:2170/o=grid\n" #~ "\n" #~ "Ðргумент опции -c задаётÑÑ Ð¿Ð¾ форме Flavour:URL, например:\n" #~ "ARC0:ldap://grid.tsl.uu.se:2135/nordugrid-cluster-name=grid.tsl.uu.se,Mds-" #~ "Vo-name=local,o=grid" #~ msgid "Creating an A-REX client." #~ msgstr "СоздаётÑÑ ÐºÐ»Ð¸ÐµÐ½Ñ‚ A-REX" #~ msgid "Client side MCCs are loaded." #~ msgstr "Подгружены клиентÑкие компоненты цепи Ñообщений" #~ msgid "Failed to find delegation credentials in client configuration." #~ msgstr "Ðе удалоÑÑŒ обнаружить доверенноÑти в конфигурации клиента" #~ msgid "There were no response to a submission request." #~ msgstr "Ðе поÑтупил ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾Ð± отправке задачи" #~ msgid "A response to a submission request was not a SOAP message." #~ msgstr "Ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾ запуÑке задачи не ÑвлÑетÑÑ Ñообщением SOAP" #~ msgid "Creating and sending a status request." #~ msgstr "Создание и отправка запроÑа о ÑоÑтоÑнии" #~ msgid "There were no response to a status request." #~ msgstr "Ðе поÑтупил ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾ ÑоÑтоÑнии" #~ msgid "Creating and sending a service status request." #~ msgstr "Создание и отправка запроÑа о ÑоÑтоÑнии Ñлужбы" #~ msgid "There were no response to a service status request." #~ msgstr "Ðе поÑтупил ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾ ÑоÑтоÑнии Ñлужбы" #~ msgid "Creating and sending request to terminate a job." #~ msgstr "Создание и отправка запроÑа о прерывании задачи" #~ msgid "There was no response to a job termination request." #~ msgstr "Ðе поÑтупил ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾ прерывании задачи" #~ msgid "There was no response to a job cleaning request." #~ msgstr "Ðе поÑтупил ответ на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾Ð± очиÑтке задачи" nordugrid-arc-7.1.1/po/PaxHeaders/de.gmo0000644000000000000000000000013215067751432015062 xustar0030 mtime=1759499034.520534928 30 atime=1759499034.519506462 30 ctime=1759499034.637585674 nordugrid-arc-7.1.1/po/de.gmo0000644000175000002070000007671015067751432016777 0ustar00mockbuildmock00000000000000Þ•:ì £¼HI L V9dž´ÎLç94"n‘I«õ2&9,`%%³4Ù5%D?jHª>ó526hŸ¿Ö"ñ+)@:j/¥PÕ& ; [ x “ ´ È Û %ç !.!E!b!y!–!¬!È! â!î!0þ!*/"Z"p"Š"œ"!¼"Þ"!õ"#H'# p#"~#/¡#8Ñ#= $#H$$l$%‘$-·$å$!û$%1%:N%>‰%:È%!&4%&5Z&/&3À&/ô&$$''I')q'›'-·'1å'-(E()b(Œ(¥(4½(6ò(6))"`)*ƒ) ®)#Ï) ó)/ÿ)/*2J*;}*<¹*ö*.+ 6+ C+d+„+“+«+»+Î+(Þ+,,7,T,,q,#ž,&Â,é,]-(_-=ˆ-€Æ- G.Ih.'².!Ú.‘ü."Ž/±/Â/×/à/&ñ/ 0–&0%½0#ã0 1(1 E1Q1a1({11¤1)Ö122+2:2 I2U26f262*Ô2 ÿ2 3 3%383"O3)r3(œ3Å3Þ3õ34/4 E4 P4 [4 h4 u4#4'¥4#Í49ñ49+5Le5M²5N6%O6'u6'6Å6iß6JI7A”71Ö7_8Lh8,µ88â879S9n99‰9Ã9×9é9$:*:J:i: „:!¥:(Ç:ð: ;(;=;[;Bp;$³;$Ø;ý;<!<?< N<X<a<1h<$š<'¿<5ç<*=(H=0q=#¢=Æ=/Õ= >>>6>$R>Hw>À>Ó> Ü>ý>?!7?/Y?)‰?;³? ï?-@*>@<i@ ¦@5±@=ç@8%A^A6pA8§A+àA B6B)IB:sB-®B3ÜB&C57C(mC–CµCÄCÔCÖC.ÝC* D)7DaDtDyDV“DêDùDEE:ERE$mE’E(¥E/ÎE.þE.-F7\F”F ²F%¼F0âFGG:G)>GhGHjG³I¶I ÈI;ÖIJ(JBJ``J8ÁJ%úJ KSX2pX9£X!ÝX9ÿXF9Y3€Y"´Y9×YZ1Z9OZA‰ZAËZ* [*8[c[[ œ[D§[ì[: \?F\?†\Æ\:Ø\]""](E]n]]œ]¯]Ç]2Ú] ^&^E^d^'~^+¦^,Ò^ÿ^^_-x_D¦_—ë_%ƒ`K©`,õ`&"a¯Ia&ùa b:bXb`b.sb¢b¨·b(`c$‰c#®c"Òcõc d!!d)Cd7md2¥dØdçdee e,eV=eM”e<âe f +f 7f"Cfff&f/¨f*Øfgg4gNgjg ‰g ”g  g ­g ºg*Æg/ñg%!h?Gh>‡hUÆhZiWwi)Ïi(ùi%"jHjþcjMbk1°k3âk\lXsl0Ìl@ýl9>mxm”mQ±mnn-n-Kn%yn*Ÿn'Ên%òn#o0{X{&n{ •{7¡{7Ù{6|/H|Fx|%¿| å|(ï|0} I}5V}Œ}5}Æ}z%8 °:9,â*¥NêV]ŸŒñ|uæ½w+ 71 ¸6ÁOùë…£¢ŽAÒ+ƒ•Pº)Y §üÙ(ïÂa!²œ~‘µ4¿ bc$äÔ/=Í[ò„Îè©çÜL»/CßÞF&2ž¡¤(2rxÛ*·& ”À8—³kÉ ˆ'@ Kõø)-ö´¬¶“ÿBReÖÏúþ:­¼–ðÌ0-gq¨#X ".fdáhl¹ìn’5yjsvDé˜à®Ø._S<!'÷ÓUš;% ýÃËZoÕ«›ôiGÑ}#‚ÝT\{±íÆ$"H 74p†Å‹MÊ0¾™ ?¦,mtQ`^û6Š€EתȉóW9åÄã>1 3I5îJ¯‡ÐÚÇ3%s%s failed%s version %s%s: File request %s in SRM queue. Sleeping for %i secondsARC Auth. request: %sARC delegation policy: %sAdding location: %s - %sArcAuthZ: failed to initiate all PDPs - this instance will be non-functionalAre you sure you want to synchronize your local job list?Authorized from remote pdp serviceAuthorized from xacml.pdpBoth of CACertificatePath and CACertificatesDir elements missing or emptyBroker %s loadedCan not create function: FunctionId does not existCan not dynamically produce AlgFacrotyCan not dynamically produce AttributeFactoryCan not dynamically produce EvaluatorCan not dynamically produce FnFactoryCan not find element with proper namespaceCan not find element with proper namespaceCan not open job description file: %sCan not parse classname for AttributeFactory from configurationCan not parse classname for CombiningAlgorithmFactory from configurationCan not parse classname for FunctionFactory from configurationCan not parse classname for Policy from configurationCan not parse classname for Request from configurationCan't create delegation contextCan't read from sourceCan't write to destinationCancelling synchronization requestCannot convert module name to Python stringCannot create argument of the constructorCannot find under response soap message:Cannot find content under response soap messageCannot find file at %s for getting the proxy. Please make sure this file exists.Cannot import moduleCheck: looking for metadata: %sCheck: obtained checksum: %sCheck: obtained size: %lliChecking URL returned by SRM: %sClosed successfullyClosing connectionCommand: %sCould not determine version of serverCouldn't parse benchmark XML: %sCreating a http clientCreating a pdpservice clientCreating a soap clientCreating and sending requestCreating directory %sCurrent transfer FAILED: %sCurrent transfer completeDCAU failedDCAU failed: %sDN %s is cached and is valid until %s for URL %sDN %s is cached but has expired for URL %sData transfer abortedData transfer aborted: %sDelegation ID: %sDelegation authorization failedDelegation role not supported: %sDelegation service: %sDelegation type not supported: %sDestination: %sDirectory size is larger than %i files, will have to call multiple timesDirectory: %sDuplicate replica found in LFC: %sError opening lock file %s in initial check: %sEvaluator does not support loadable Combining AlgorithmsEvaluator does not support specified Combining Algorithm - %sEvaluator for ArcPDP was not loadedEvaluator for GACLPDP was not loadedEvaluator for XACMLPDP was not loadedFATAL, ERROR, WARNING, INFO, VERBOSE or DEBUGFailed authenticatingFailed connecting to server %s:%dFailed reading dataFailed reading list of filesFailed to authenticate SAML Token inside the incoming SOAPFailed to authenticate Username Token inside the incoming SOAPFailed to authenticate X509 Token inside the incoming SOAPFailed to connect to server %s:%dFailed to convert security information to ARC policyFailed to convert security information to ARC requestFailed to generate SAML Token for outgoing SOAPFailed to generate Username Token for outgoing SOAPFailed to generate X509 Token for outgoing SOAPFailed to initialize OpenSSL libraryFailed to initialize main Python threadFailed to initiate delegation credentialsFailed to open data channelFailed to parse SAML Token from incoming SOAPFailed to parse Username Token from incoming SOAPFailed to parse X509 Token from incoming SOAPFailed to read object %s: %sFailed to remove cache per-job dir %s: %sFailed to store ftp fileFailed to transfer dataFailed to verify X509 Token inside the incoming SOAPFailed to verify the signature under Failed to verify the signature under Failed uploading local input filesFailed while finishing reading from sourceFailed while reading from sourceFailed while writing to destinationFailure: %sFile delete failed, attempting directory deleteFile is not accessible: %sFile type is not available, attempting file deleteFiles associated with request token %s aborted successfullyFiles associated with request token %s released successfullyGlobus error: %sGrid identity is mapped to local identity '%s'Identity: %sInitialized %u-th Python serviceInitiating delegation procedureInvalid EffectInvalid JobDescription:Invalid URL: %sInvalid class nameInvalid url: %sJob %s does not report a resumable stateJob resuming successfulJob submission summary:Job submitted with jobid: %sLoading %u-th Python serviceLocations are missing in destination LFC URLMLSD is not supported - trying NLSTMain Python thread was not initializedMemory allocation errorMissing CertificatePath element or ProxyPath element, or is missingMissing or empty CertificatePath elementMissing or empty CertificatePath or CACertificatesDir elementMissing or empty CertificatePath or CACertificatesDir element; will only check the signature, will not do message authenticationMissing or empty KeyPath elementMissing or empty KeyPath element, or is missingMissing or empty PasswordSource elementMissing or empty Username elementMissing reference to factory and/or module. It is unsafe to use Globus in non-persistent mode - (Grid)FTP code is disabled. Report to developers.Missing security object in messageNLST/MLSD failedNLST/MLSD failed: %sName: %sNo SOAP responseNo authorization response was returnedNo jobs givenNo policy file or DNs specified for simplelist.pdp, please set location attribute or at least one DN element for simplelist PDP node in configuration.No target available inside the policyNo target available inside the ruleOperation completed successfullyOutgoing Message is not SOAPPASV failedPASV failed: %sPDP: %s can not be loadedPassword encoding type not supported: %sPath %s is invalid, creating required directoriesPolicy Decision Service invocation failedPolicy is emptyProxy generation succeededProxy path: %sProxy type: %sRemoving %sRequest is emptyRequest is reported as ABORTED, but all files are doneRequest is reported as ABORTED, since it was cancelledRequest is reported as ABORTED. Reason: %sRequest: %sResponse: %sResponse: %sReusing connectionSOAP invocation failedSRM did not return any informationSRM did not return any useful informationSRM returned no useful Transfer URLs: %sSSL error: %d - %s:%s:%sServer SRM version: %sServer implementation: %sService is waiting for requestsSome transfers failedSource: %sStart testStartReadingStartWritingSubject: %sSucceeded to authenticate SAMLTokenSucceeded to authenticate UsernameTokenSucceeded to authenticate X509TokenSucceeded to verify the signature under Succeeded to verify the signature under Target %s removed by FastestQueueBroker, doesn't report number of free slotsTarget %s removed by FastestQueueBroker, doesn't report number of total slotsTarget %s removed by FastestQueueBroker, doesn't report number of waiting jobsThe Response is not going to this endThe Service advertises no Health State.The Service doesn't advertise its Type.The StatusCode is SuccessThe arccat command performs the cat command on the stdout, stderr or grid manager's error log of the job.The arccp command copies files to, from and between grid storage elements.The arcget command is used for retrieving the results from a job.The arckill command is used to kill running jobs.The arcls command is used for listing files in grid storage elements and file index catalogues.The delegated credential got from delegation service is stored into path: %sThe request has passed the policy evaluationThere are %d requests, which satisfy at least one policyThere is %d subjects, which satisfy at least one policyThere was no HTTP responseThere was no SOAP responseThis seems like a temporary error, please try again laterTransfer FAILED: %sTransfer completeType is file, calling srmRmUnauthorized from remote pdp serviceUnsupported destination url: %sUnsupported protocol in url %sUnsupported source url: %sUpdateCredentials: request = %sUpdateCredentials: response = %sUsername Token handler is not configuredUsing insecure data transferUsing secure data transferUsing space token %sVOMS attribute parsing failedWaiting for responseWarning: Using SRM protocol v1 which does not support space tokensWrong number of parameters specifiedX509 Token handler is not configuredXACML request: %sYour identity: %sYour proxy is valid until: %s[filename ...][job ...]all jobsbrokercheck_ftp: failed to get file's modification timecheck_ftp: failed to get file's sizecheck_ftp: globus_ftp_client_get failedcheck_ftp: globus_ftp_client_modification_time failedcheck_ftp: globus_ftp_client_register_readcheck_ftp: globus_ftp_client_size failedcheck_ftp: timeout waiting for modification_timecheck_ftp: timeout waiting for sizeclass name: %sconfiguration file (default ~/.arc/client.conf)directorydirnamedisplay all available metadatado not ask for verificationdo not try to force passive transferdownload directory (the job directory will be created in this directory)echo: Unauthorizedfilenameftp_complete_callback: error: %sftp_complete_callback: successftp_read_callback: successftp_read_thread: Globus error: %sftp_read_thread: for_read failed - aborting: %sftp_read_thread: get and register buffersftp_read_thread: too many registration failures - abort: %sftp_read_thread: waiting for eofftp_write_thread: for_write failed - abortingftp_write_thread: get and register buffersglobus_ftp_client_operationattr_set_authorization: error: %shourhoursinit_handle: globus_ftp_client_handleattr_init failedinit_handle: globus_ftp_client_handleattr_set_gridftp2 failedinit_handle: globus_ftp_client_operationattr_init failedinput is not SOAPjobdescription file describing the job to be submittedjobdescription string describing the job to be submittedkeep the files on the server (do not clean)levellist_files_ftp: failed to get file's modification timelist_files_ftp: failed to get file's sizelist_files_ftp: globus_ftp_client_modification_time failedlist_files_ftp: globus_ftp_client_size failedlist_files_ftp: looking for modification time of %slist_files_ftp: looking for size of %slist_files_ftp: timeout waiting for modification_timelist_files_ftp: timeout waiting for sizelong format (more information)minuteminutesmodule name: %snnumbernumber of retries before failing file transferonly select jobs whose status is statusstroperate recursively up to specified leveloutput is not SOAPpathprint version informationremove the job from the local list of jobs even if the job is not found in the infosyssecondsecondssecondssetting file %s to size %llushow URLs of file locationsshow progress indicatorshow the stderr of the jobshow the stdout of the job (default)source destinationstart_reading_ftp: globus_ftp_client_getstart_reading_ftp: globus_ftp_client_get failedstart_reading_ftp: globus_thread_create failedstart_writing_ftp: globus_thread_create failedstart_writing_ftp: mkdir failed - still trying to writestart_writing_ftp: put failedstatusstrstop_reading_ftp: aborting connectionstop_reading_ftp: waiting for transfer to finishstringtimeout in seconds (default 20)urluse secure transfer (insecure by default)yProject-Id-Version: Arc Report-Msgid-Bugs-To: support@nordugrid.org PO-Revision-Date: 2010-02-25 19:18+0100 Last-Translator: Steffen Möller Language-Team: German Language: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-Generator: KBabel 1.11.4 Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2); X-Poedit-Language: Russian X-Poedit-KeywordsList: msg:2;IString:1;istring:1 X-Poedit-Basepath: /home/oxana/CVSROOT/ARC1 X-Poedit-SearchPath-0: src %s%s fehlgeschlagen%s version %s%s: Datei Anfrage %s in SRM queue. Schlage für %i SekundenARC Auth. Anfrage: %sARC delegation policy: %sFüge location hinzu: %s - %sArcAuthZ: Fehler bei Initiierung wenigstens einer PDP - diese Instanz wird nicht funktional seinSoll die lokale job list wirklich synchronisiert werden?Authorisiert durch remote pdp serviceAuthorisiert durch xaml.pdpSowohl CACertificatePath als auch CACertificatesDir Elemente sind fehlend oder leerBroker %s geladenKann Funktion nicht anlegen: FunctionId existiert nichtKann AlgFactory nicht dynamisch anlegenKann AttributeFactory nicht dynamisch anlegenKann Evaluator nicht dynamisch produzierenKann FnFactory nicht dynamisch anlegenKann element mit passendem namespace nicht findenKann element mit passendem namespace nicht findenKonnte Datei mit Job Beschreibung nicht öffnen: %sKonnte classname für AttributeFactory nicht von Konfiguration parsenKonnte classname für CombiningAlgorithmFactory nicht von Konfiguration parsenKonnte classname für FunctionFactory nicht von Konfiguration parsenKonnte classname für Policy nicht von Konfiguration parsenKonnte classname für Request nicht von Konfiguration parsenKann delegation context nicht anlegenKann nicht von Quelle lesenKann nicht zu Ziel schreibenAbbruch der SynchronisationsanfrageKann Modul name nicht zu Python Zeichenkette konvertierenKann Argument für den Konstruktor nicht anlegenKann in SOAP-Antwort nicht findenKann Inhalt in SOAP-Antwort nicht findenKann Datei nicht bei %s finden, um den Proxy zu erhalten. Bitte stellen Sie sicher, dass diese Datei existiert.Kann Modul nicht importierenCheck: looking für Metadata: %sCheck: erhielt checksum: %sCheck: erhielt Größe: %lliÜberprüfen der URL zurückgegeben von SRM: %sVerbindung erfolgreich geschlossenSchließe VerbindungKommando: %sKonnte Version des Server nicht bestimmenKonnte benchmark XML nicht parsen: %sLege HTTP Client anLege pdpservice client anLege SOAP Clietn anErstellen und senden von AnfrageLege Verzeichnis %s anAktueller Transfer SCHLUG FEHL: %sAktueller Transfer vollständigDCAU fehlgeschlagenDCAU fehlgeschlagen: %sDN %s wird gecacht und ist gültig bis %s für Datei %sDN %s wird gecacht aber ist abgelaufen für URL %sDatentransfer abgebrochenDatentransfer abgebrochen: %sDelegation ID: %sDelegation Authorisierung fehlgeschlagenDelegation role nicht unterstützt: %sDelegation service: %sDelegation Typ nicht unterstützt: %sZiel: %sVerzeichnis enthält mehr als %i Dateien, werde Aufruf mehrfach ausführenVerzeichnis: %sDoppelte replica gefunden in LFC: %sFehler bei Öffnen von Lock-Datei %s in initialer Überprüfung: %sEvaluator unterstützt ladare Combining Algorithms nichtEvaluator unterstützt die angegebenen Combining Algorithms nicht - %sEvaluator für ArcPDP wurde nicht geladenEvaluator für GACLPDP wurde nicht geladenEvaluator für XACMLPDP wurde nicht geladenFATAL, ERROR, WARNING, INFO, VERBOSE or DEBUGFehler bei AuthentisierenFehler bei Verbinden zu server %s:%dFehler bei Lesen von DatenFehler bei Lesen von DateilisteKonnte SAML Token aus eingehender SOAP nicht authentifizierenFehler bei der Authentifikation des Username Token in der einngehenden SOAPFehler bei Authentifizieren von X509 Token in eigehendem SOAPFehler bei Verbinden zu server %s:%dFehler bei Konvertieren von security information für ARC policyFehler bei Konvertierung von security information für ARC AnfrageKonnte SAML Token für ausgehendes SOAP nicht generierenFehler bei Erstellen von Nutzernamen Token für ausgehende SOAPFehler bei Generieren von X509 Token für ausgehende SOAPFehler bei Initialisierung von OpenSSL BibliothekFehler bei Initialisierung des main Python ThreadsFehler bei der Initialisierung der delegation credentialsFehler bei Öffnen von DatenkanalKonnte SAML Token nicht aus eingehender SOAP herausparsenKonnte Username Token nicht von eingehender SOAP Nachricht herauslesenFehler bei Parsen von X509 Token in eigehendem SOAPFehler bei Lesen von Objekt %s: %sFehler bei Entfernen von cache per-job Verzeichnis %s: %sFehler bei Ablage von FTP DateiFehler bei Transfer von DatenFehler bei Verifizieren von X509 Token in eigehendem SOAPFehler bei der Überprüfung der Signatur unter Fehler bei der Überprüfung der Signatur unter Konnte lokale Inputdateien nicht hochladenFehler bei Abschluß des Lesens von QuelleFehler bei Lesen von QuelleFehler bei Schreiben zu ZielFehler: %sLöschen von Datei schlug fehl, versuche als Verzeichnis zu löschenDatei ist nicht zugreifbar: %sDateitype ist nicht verfügbar, versuche Datei zu löschenDateien assoziiert mit Anfrage Token %s erfolgreich abgebrochenDateien assoziiert mit Anfrage Token %s erfolgreich freigegebenGlobus Fehler: %sGrid Identität wird zugewiesen zu lokaler Identität '%s'Identität: %sInitialisierte %u-th Python servceInitialisierung der Delegations-ProzedurUngültiger EffektUngültige JobDescription:Ungültige URL: %sUngültiger KlassennameUngültige url: %sJob %s berichtet nicht von einem resumable ZustandJob erfolgreich resumed.Job Hochladen Zusammenfassung:Job hochgeladen mit Job ID: %sLade %u-th Python ServiceLocations fehlen in destination LFC URLMLSD ist nicht unterstützt - versuche NLSTMain Python Thread wurde nicht initialisiertSpeicherallokationsfehlerFehlendes CertificatePath Element oder ProxyPath Element, oder fehltFehlendes oder leeres CertificatePath ElementFehlendes oder leeres CertificatePath oder CACertificatesDir ElementFehlendes oder leeres CertificatePath oder CACertificatesDir Element; werde nur die Signature überprüfen, die Nachricht jedoch nicht authentifizierenFehlendes oder leeres KeyPath ElementFehlendes oder leeres KeyPath Element, oder fehltFehlendes oder leeres PasswordSource ElementFehlendes oder leeres Username ElementFehlende Referenz zu factory und/doer Module. Es ist unsicher, Globus im nicht-persistenten Modus zu nutzen - (Grid)FTP code wurde disabled. Bitte die Entwickler informieren.Fehlendes security Objekt in NachrichtNLST/UMLSD fehlgeschlagenNLST/UMLSD fehlgeschlagen: %sName %sKeine SOAP AntwortEs wurde keine authorization response erwidertKeine Jobs angegebenKeine Policy Datei oder DNs angegeben für simplelist.pdp, bitte setzen Sie ein location Attribut oder zumindest ein DN Element für den PDP Knoten in der KonfigurationKein Ziel innerhalb der Policy vorhandenKein Ziel verfügbar in dieser RegelOperation erfolgreich abgeschlossenAusgehende Nachricht ist kein SOAPPASV fehlgeschlagenPASV fehlgeschlagen: %sPDP: %s kann nicht geladen werdenPasswort Kodierung nicht unterstützt: %sPfad %s ist ungültig, lege benötigte Verzeichnisse anAusführen des Policy Decision Service schlug fehlPolicy is leerProxy erfolgreich angelegtProxy Pfad: %sProxy Typ: %sEntferne %sAnfrage ist leerAnfrage wurde berichtet als ABORTED (abgebrochen), aber alle Dateien wurden bearbeitetAnfrage wurde berichtet als ABORTED (abgebrochen), denn sie wurde abgebrochenAnfrage wurde berichtet als ABORTED (abgebrochen). Grund: %sAnfrage: %sAntwort: %sAntwort: %sWiederholte Nutzung von VerbindungSOAP Aufruf fehlgeschlagenSRM lieferte keine Information zurückSRM lieferte keinerlei gebrauchbare InformationSRM gab keine nützliche Transfer URLs: %sSSL Fehler: %d - %s:%s:%sServer SRM version: %sServer Implementation: %sService wartet auf AnfragenEinige Transfers schlugen fehlQuelle: %sStarte TestStartReadingStartWritingSubjekt: %sErfolreiche Anthentifikation von SAMLTOkenErfolgreiche Authentifikation des UsernameTokenX509Token erfolgreich authentifiziertErfolgreiche Überprüfung der Signatur unter Erfolgreiche Verifikation der Signatur unter Ziel %s entfernt durch FastestQueueBroker, die Anzahl freier slots wird nicht genanntZiel %s entfernt durch FastestQueueBroker, die Anzahl vorhandener slots wird nicht genanntZiel %s entfernt durch FastestQueueBroker, die Anzahl wartender Jobs wird nicht genanntDie Antwort geht nicht bis zu diesem EndeDer Service gibt keinen Health State an.Der Service gibt seinen Typ nicht an.Der StatusCode ist SuccessЭта команда предназначена Ð´Ð»Ñ Ð²Ñ‹Ð²Ð¾Ð´Ð° на Ñкран Ñообщений Ñтандартного выхода, Ñтандартной ошибки или ошибок ÑиÑтемы при иÑполнении задачиMit arccp werden Dateien zu, von und zwischen grid storage Elementen kopiert.Mit arcget erhält man die Ergebnisse eines Jobs.Mit arckill lassen sich laufenden Prozesse beenden.Mit arcls werden Verzeichniss auf grid storage Elementen und Datei Index Katalogen angegebenDas delegierte credential wie erhalten von delegation service is abgelegt unter Pfad: %sDie Anfrage hat die Policy Evaluierung bestandenEs gibt %d Anfragen, die wenigstens einer Policy Anfrage genügtEs gibt %d Subjekte, die wenigstens eine Policy erfüllenKeine HTTP Antwort erhaltenKeine SOAP response erhaltenDies scheint ein vorübergehender Fehler zu sein, bitte später nochmal probierenTransfer FEHLER: %sTransfer vollständigTyp ist Datei, rufe srmRm aufNicht authorisiert von entferntem PDP serviceNicht unterstützte URL für Ziel: %sNicht-unterstzütztes Protrokoll in URL %sNicht unterstützte URL für Quelle: %sUpdateCredentials: Ð·Ð°Ð¿Ñ€Ð¾Ñ = %sUpdateCredentials: отзыв = %sNutzernamen Token handler ist nicht konfiguriertNutze unsicheren DatentransferNutze sicheren DatentransferNutze space token %sKonnte VOMS Attribut nicht herauslesenWarte vor AntwortWarnung: Nutze SRM Protokol v1 das keine space tokens unterstütztFalsche Anzahl an Parametern übertragenX509 Token handler ist nicht konfiguriertXACML Anfrage: %sIhre Identität: %sIhr Proxy ist gültig bis: %s[dateiname ...][Job ...]alle JobsBrokercheck_ftp: konnte Modification time von Datei nicht erhaltencheck_ftp: konnten Dateigröße nicht bestimmencheck_ftp: globus_ftp_client_get fehlgeschlagencheck_ftp: globus_ftp_client_modification_time fehlgeschlagencheck_ftp: globus_ftp_client_register_readcheck_ftp: globus_ftp_client_size fehlgeschlagencheck_ftp: Zeitüberschreitung bei Warten auf modification_timecheck_ftp: Zeitüberschreitung bei Warten für GrößeKlassenname: %sKonfigurationsdatei (Vorteinstellung ~/.arc/client.conf)VerzeichnisVerzeichnisnamezeige alle verfügbare Metadatenfrage nicht nach Verifikationversuche nicht, passiven Transfer zu erzwigenDownload-Verzeichnis (das Job-Verzeichnis wird in diesem Verzeichnis abgelegt)echo: UnauthorisiertDateinameftp_complete_callback: Fehler: %sftp_complete_callback: erfolgreichftp_read_callback: Erfolgftp_read_thread: Globus Fehler: %sftp_read_thread: for_read fehlgeschlagen - Abbruch: %sftp_read_thread: beziehe und registriere Pufferftp_read_thread: zu viele Registrierungsfehler - Abbruch: %sftp_read_thread: warte auf EOFftp_write_thread: for_write fehlgeschlagen - Abbruchftp_write_thread: Beziehe und Registriere Pufferglobus_ftp_client_operationattr_set_authorisation: Fehler: %sStundenStundeStundeninit_handle: globus_ftp_client_handleattr_init fehlgeschlageninit_handle: globus_ftp_client_handleattr_set_gridftp2 fehlgeschlageninit_handle: globus_ftp_client_operationattr_init fehlgeschlagenEingabe ist kein SOAPDatei mit Job-Beschreibung wird hochgeladenZeichenkette mit Job-Beschreibung wird hochgeladenbehalte die Dateien auf dem Server (dort nicht löschen)Tiefelist_files_ftp: Fehler bei Bezug von Zeitpunkt der letzten Dateiänderunglist_files_ftp: Fehler bei Bezug von Dateigrößelist_files_ftp: globus_ftp_client_modification_time fehlgeschlagenlist_files_ftp: globus_ftp_client_size fehlgeschlagenlist_files_ftp: Bezug von Zeitpunkt der letzten Dateiänderung von %slist_files_ftp: Suche nach Größe von %slist_files_ftp: Zeitüberschreitung bei Warten auf Zeitpunkt der letzten Dateiänderung list_files_ftp: Zeitüberschreitung bei Warten auf Dateigröße ausführliche AusgabeMinutenMinuteMinutenModulname: %snNummerAnzahl von Wiederholungen bis zu einem Abbruch der DateiübertragungSelektiere Jobs mit Status statusstrarbeite rekursiv bis zu einer festgelegten TiefeAusgabe ist nicht SOAPPfadAngabe des aktuellen Versionsbezeichnersentferne Job aus lokaler Liste selbst wenn der Job dem Infosys nicht bekannt istSekundenSekundeSekundenSekundenSetze Datei %s zu Größe %lluzeige URLs von Datei-Lokalisationenzeige Fortschrittsanzeigezeige stderr des JobsZeige stdout des Jobs (Voreinstellung)Quelle Zielstart_reading_ftp: globus_ftp_client_get fehlgeschlagenstart_reading_ftp: globus_ftp_client_get fehlgeschlagenstart_reading_ftp: globus_thread_create fehlgeschlagenstart_writitng_ftp: globus_thread_create failedstart_writing_ftp: mkdir fehlgeschlagen - versuche weiter zu schreibenstart_writing_ftp: put fehlgeschlagenstatusstrstop_reading_ftp: Abbruch der Verbindungstop_reading_ftp: warte auf Beenden von TransferZeichenketteZeitüberschreitung nach Sekunden (Voreinstellung 20)URLNutze sicheren Transfer (unsicher ist Voreinstellung)jnordugrid-arc-7.1.1/po/PaxHeaders/en@quot.header0000644000000000000000000000013215067751332016552 xustar0030 mtime=1759498970.319382414 30 atime=1759498970.318530925 30 ctime=1759499034.619577951 nordugrid-arc-7.1.1/po/en@quot.header0000644000175000002070000000226315067751332020457 0ustar00mockbuildmock00000000000000# All this catalog "translates" are quotation characters. # The msgids must be ASCII and therefore cannot contain real quotation # characters, only substitutes like grave accent (0x60), apostrophe (0x27) # and double quote (0x22). These substitutes look strange; see # http://www.cl.cam.ac.uk/~mgk25/ucs/quotes.html # # This catalog translates grave accent (0x60) and apostrophe (0x27) to # left single quotation mark (U+2018) and right single quotation mark (U+2019). # It also translates pairs of apostrophe (0x27) to # left single quotation mark (U+2018) and right single quotation mark (U+2019) # and pairs of quotation mark (0x22) to # left double quotation mark (U+201C) and right double quotation mark (U+201D). # # When output to an UTF-8 terminal, the quotation characters appear perfectly. # When output to an ISO-8859-1 terminal, the single quotation marks are # transliterated to apostrophes (by iconv in glibc 2.2 or newer) or to # grave/acute accent (by libiconv), and the double quotation marks are # transliterated to 0x22. # When output to an ASCII terminal, the single quotation marks are # transliterated to apostrophes, and the double quotation marks are # transliterated to 0x22. # nordugrid-arc-7.1.1/po/PaxHeaders/insert-header.sin0000644000000000000000000000013215067751332017232 xustar0030 mtime=1759498970.331907148 30 atime=1759498970.330531107 30 ctime=1759499034.622331043 nordugrid-arc-7.1.1/po/insert-header.sin0000644000175000002070000000124015067751332021131 0ustar00mockbuildmock00000000000000# Sed script that inserts the file called HEADER before the header entry. # # At each occurrence of a line starting with "msgid ", we execute the following # commands. At the first occurrence, insert the file. At the following # occurrences, do nothing. The distinction between the first and the following # occurrences is achieved by looking at the hold space. /^msgid /{ x # Test if the hold space is empty. s/m/m/ ta # Yes it was empty. First occurrence. Read the file. r HEADER # Output the file's contents by reading the next line. But don't lose the # current line while doing this. g N bb :a # The hold space was nonempty. Following occurrences. Do nothing. x :b } nordugrid-arc-7.1.1/po/PaxHeaders/quot.sed0000644000000000000000000000013215067751332015452 xustar0030 mtime=1759498970.344631816 30 atime=1759498970.343531305 30 ctime=1759499034.617019328 nordugrid-arc-7.1.1/po/quot.sed0000644000175000002070000000023115067751332017350 0ustar00mockbuildmock00000000000000s/"\([^"]*\)"/“\1â€/g s/`\([^`']*\)'/‘\1’/g s/ '\([^`']*\)' / ‘\1’ /g s/ '\([^`']*\)'$/ ‘\1’/g s/^'\([^`']*\)' /‘\1’ /g s/“â€/""/g nordugrid-arc-7.1.1/po/PaxHeaders/en@boldquot.header0000644000000000000000000000013215067751332017413 xustar0030 mtime=1759498970.306773239 30 atime=1759498970.305530727 30 ctime=1759499034.620939611 nordugrid-arc-7.1.1/po/en@boldquot.header0000644000175000002070000000247115067751332021321 0ustar00mockbuildmock00000000000000# All this catalog "translates" are quotation characters. # The msgids must be ASCII and therefore cannot contain real quotation # characters, only substitutes like grave accent (0x60), apostrophe (0x27) # and double quote (0x22). These substitutes look strange; see # http://www.cl.cam.ac.uk/~mgk25/ucs/quotes.html # # This catalog translates grave accent (0x60) and apostrophe (0x27) to # left single quotation mark (U+2018) and right single quotation mark (U+2019). # It also translates pairs of apostrophe (0x27) to # left single quotation mark (U+2018) and right single quotation mark (U+2019) # and pairs of quotation mark (0x22) to # left double quotation mark (U+201C) and right double quotation mark (U+201D). # # When output to an UTF-8 terminal, the quotation characters appear perfectly. # When output to an ISO-8859-1 terminal, the single quotation marks are # transliterated to apostrophes (by iconv in glibc 2.2 or newer) or to # grave/acute accent (by libiconv), and the double quotation marks are # transliterated to 0x22. # When output to an ASCII terminal, the single quotation marks are # transliterated to apostrophes, and the double quotation marks are # transliterated to 0x22. # # This catalog furthermore displays the text between the quotation marks in # bold face, assuming the VT100/XTerm escape sequences. # nordugrid-arc-7.1.1/po/PaxHeaders/ru.gmo0000644000000000000000000000013015067751432015116 xustar0030 mtime=1759499034.386504441 30 atime=1759499034.383504395 28 ctime=1759499034.6343481 nordugrid-arc-7.1.1/po/ru.gmo0000644000175000002070000172002715067751432017033 0ustar00mockbuildmock00000000000000Þ•  m+ÚÈ"É"Í"ä"û"#,#C#Z#q#ˆ#Ÿ#¶#I¸#H$<K$ˆ$ ¡$¯$Í$ë$ü$%8%I%a%z%’%ª%³%Ê%ä%ù%&*&D&Y&t&-”&Â& Ô&Þ&ä&ö& ' '('$@'e'&'!¨'Ê'â'ó' ((0(F(\(m('|(¤(¶(É($Ý()));)!X)z)“) ¦)1°)â)"ø)*2*;* P*^*~*’*®* Ã*Î*ë*+"+ 9+"D+g++—+%¬+Ò+#ì+!,2,M,a,|, ˜, £, ¯, º,Æ,Ú,é,- -4-E-!Y-{-‘-­-Í-Ý-ªò-/º/ Ë/ Ø/â/ö/70*K0v0y0•0>¦0å0õ01!$1 F1 P1q1‡1›1®1¿1 Ù1ç1î12$2(2/2 62!W2y2A”23Ö2+ 3+63"b3…31 3)Ò3ü34?14%q4!—4)¹4!ã445+:5\f5-Ã5ñ5 67'6_6+~6ª6Ç6`ç6&H7$o7'”7%¼7)â7% 8 28"S8*v8-¡8&Ï8ö8-9&D9Bk9'®9CÖ92:3M:*:5¬:<â:3;'S;({;/¤;&Ô;+û;$'<(L<3u<+©<"Õ</ø<3(=9\="–=¹= É=1ê=6>sS>-Ç>-õ>#?K.LK.{K'ªK5ÒK+L$4LYL)vL" L ÃL ÐL;ÝLVMGpM¸MÔMëMóMúMÿM3N18N=jN4¨N)ÝN(O0O5O;O@O ZP3{P ¯PÐPàPöPQK)Q<uQ3²Q3æQ,RGRfRR0—R2ÈRTûRPSiSS—S ®SºSÔS ñST.TKMT-™T;ÇTLUPUkU8‰U9ÂU$üU"!VDV\V/vV=¦V>äV;#W#_WƒW\£WXX"0X3SX‡X@ŸXAàX"YAY"WY"zYY·YÐYãY"Z%Z-y^¸^+É^õ^C_4L_I_AË_6 ` D`R`c``™`²`SË`.a NaZaua„a ˜a¦aµaÅa#àa#b0(b/Yb‰b¥bÂbßb÷bc<,c;icX¥c:þc9d*Jd4udªdÆd$ád)e:0e7keX£eüe9f9Kf4…f1ºf5ìf)"gSLg' g)Èg#ògh2hEPh(–h¿h;×h$iR8iT‹i7ài;j)Tj/~j1®j1àj'k(:k?ck £k&Äkëk- l/8l2hl!›lF½l?m,Dm%qm.—mÆm2ám2n%Gnmna‹n5ínQ#o&uo,œo%Éo%ïo"p#8p4\p5‘pÇpâp€r!r%¿rQår7sQsàpsQtot6Žt?Åt;uDAu †uE§u$íu%v#8v\vwv–v%²vØv?ñvH1w>zw5¹w6ïw&x=x"Uxxxx/¬x7Üx3y!Hyjy#Šy7®yæy#þy2"zUz(mz)–z/Àz.ðz0{)P{8z{:³{î{(|D*|>o|®|Î|ì|+})1}+[}-‡}6µ}7ì}9$~^~r~‹~ ª~@Ë~K X5v¬#Êî,€)2€'\€„€Aœ€*Þ€0 :Siˆ ¼Íè2þ1‚"L‚o‚^‡‚æ‚#ƒ)ƒ/Gƒ4wƒ.¬ƒ*Ûƒ&„%-„+S„&„*¦„@Ñ„&…%9…!_…)…«…NÉ…1†J†e†&‚†©†'dž:ï†,*‡,W‡L„‡$ч$ö‡:ˆVˆ%sˆ$™ˆ¾ˆ ܈ýˆ/Š6ŠPVŠ6§Š&ÞŠ‹’‹v²‹z)Œq¤ŒA#X-|ªÊæÿ/Ž<DŽ>ŽÀŽUߎ²5'èKe\Â*× ‘2#‘$V‘{‘6š‘'Ñ‘ù‘!’9’,U’‚’S’!ñ’!“$5“ Z“h“,~“ «“¹“ Ø“Eù“ ?”`”z”””¯”>Í”) •6•O•f•…•"Ÿ••"â•=–'C–k–‹–›–·–Õ–/õ–,%—R—%o—$•—º—Ô— ï— û—˜"1˜"T˜)w˜¡˜9¾˜ø˜ ™ ,™8™FJ™S‘™Gå™i-š\—šôšV ›Fa›O¨› ø›.œHœ-cœ‘œ#°œ Ôœ!õœ !8Zt&ƒ&ª$Ñöž&ž09žjž‚ž—ž­ž-Åžóž Ÿ%Ÿ%;Ÿ'aŸ3‰ŸJ½Ÿ  '1 9Y “ ± Ë 'ß "¡A*¡:l¡ §¡/È¡ø¡-¢$A¢f¢1}¢ ¯¢»¢Ñ¢&á¢#£#,£P£]k£É£:ç£2"¤$U¤@z¤6»¤%ò¤C¥.\¥6‹¥4Â¥?÷¥ 7¦1X¦Ц!§¦.ɦ+ø¦-$§&R§1y§«§Ƨâ§ ¨##¨ G¨(S¨!|¨ž¨µ¨Ò¨é¨©!©;©8V©.©/¾©6î©%ª;ªRªnª!‡ª©ª'Ǫ)ïªC«]«y« “«Ÿ«¯«Å«0Ü«* ¬8¬A¬R¬p¬‚¬Ÿ¬.½¬,쬭@6­Iw­3Á­+õ­$!®1F®x®&•®5¼®-ò®, ¯M¯l¯Н¨¯ůÛ¯õ¯ °%°=°W°2h°.›°1ʰ.ü°+±*<±&g±%ޱ)´±@Þ±D²Fd²'«²Ó²#è² ³h(³‘³§³Ƴ0Û³! ´.´@´`´%€´$¦´+Ë´8÷´80µ"iµ!Œµ®µ+ŵ0ñµ!"¶OD¶A”¶7Ö¶ ·&·=B·'€·%¨·η!æ·¸&¸C¸?^¸.ž¸͸ݸ!ú¸;¹X¹Wq¹HɹQº dº+rº$žºúãºþº»$»98»"r»%•»»» ӻ߻ ï»%ù»C¼3c¼J—¼:â¼½c=½V¡½Wø½@P¾Z‘¾%ì¾'¿&:¿a¿@¿*À¿)ë¿DÀ ZÀiûÀdeÁeÊÁ&0ÂWÂ#l ŸÂ¬Â%ÅÂ%ëÂ2ÃDÃ!ZÃ_|ÃOÜÃ,Ä-AÄoÄÄ*¯Ä ÚÄ$ûÄ$ Å/EÅ4uÅcªÅÆ'Æ=Æ3WÆ%‹Æ5±ÆçÆBÇEÇfcÇÊÇ-êÇÈ%7È5]È5“È!ÉÈ!ëÈ/ É=É&XÉ7É,·É"äÉÊ&Ê8EÊ ~ʟʳÊ-ÑÊ0ÿÊ<0ËmËŒË"«Ë%ÎËôË1Ì.BÌqÌÌ!®Ì!ÐÌ0òÌ#Í"4Í WÍ8xÍ=±Í#ïÍ$Î%8Î'^Î3†Î2ºÎ)íÎ)Ï:AÏ;|Ï+¸Ï*äÏ2Ð3BÐ&vÐжÐ!ÕÐ"÷Ð-Ñ-HÑ*vÑ¡Ñ!¿Ñ?áÑ-!Ò"OÒ#rÒ–Ò¬ÒÆÒäÒ%Ó")Ó!LÓ#nÓ$’Ó!·Ó:ÙÓÔ60ÔBgÔªÔ(ÅÔîÔ Õ*Õ/FÕ vÕ-—Õ$ÅÕ(êÕÖ'Ö DÖ)eÖ-Ö½ÖÓÖ!ñÖ(×<×#\×€×%œ×/Â×'ò×$Ø,?Ø!lØŽØF«ØWòØ8Jك١Ù>½Ù"üÙÚ!=Ú._Ú2ŽÚÁÚ+ÞÚ! Û5,ÛbÛ$‚Û,§ÛHÔÛÜ<ÜZÜ0zÜ:«Ü>æÜ:%Ý(`Ý&‰Ý"°Ý'ÓÝ-ûÝ)Þ%FÞlÞ"Þ0¤Þ0ÕÞ<ß0Cß7tß&¬ß=Óßà$à-Cà)qà›à#ºà!Þàá#áICáOá,Ýá4 â5?â7uâ­â Ãâ)äâ ã,/ã-\ã Šã-«ã0Ùã# ä$.ä%Säyä”ä,²ä!ßäå!å,>å%kå(‘å2ºåíå æ8+æ6dæ<›æØæ0öæ%'çMç$eç!Šç¬ç-ÊçHøçAè.Uè„è6¡è,Øèé!#éEédé,é®é(Îé"÷éê8êNêfê8ƒê"¼ê"ßêë*ë(Jë6së=ªë&èë)ì9ì^Xì+·ìãì7üìK4í€í/ší3Êí/þí*.î8YîV’î/éîï2ï1Iï,{ï¨ïÇïáï(úï)#ð+Mð*yð¤ð*Áðìðñ$"ñ$Gñ#lñ(ñ5¹ñ'ïñò$0ò)Uò1òP±òVó!Yó'{ó#£ó$Çó'ìó/ô&Dô kôŒô$§ôAÌô1õ @õ.aõ*õ*»õæõöö*3ö&^ö=…ö'Ãö%ëö-÷/?÷&o÷"–÷!¹÷6Û÷ø.øNø2eø ˜ø(¹øâøþøù6ùVù8vù6¯ùæù"ú-%ú1Sú …ú-¦ú4Ôú% û+/û5[û%‘û#·û"Ûûþû!ü!>ü`üyü%–ü+¼üNèü%7ý]ý!wý1™ý+ËýE÷ý*=þhþ-‚þ!°þ#Òþ#öþ)ÿDÿ&[ÿ%‚ÿ¨ÿ#Åÿéÿ"$**O+z&¦=Í# '/!W"y'œ)Ä-î1NkE…'Ë'ó0E!_0žMÏ)!Gi)ƒ!­!Ï ñ(:;v–.µ'ä+ +8 d$…Fª.ñ '<&d$‹°È"è% $12V9‰Ã6â2 +L Ix ' (ê $ $8  ] *~ © Ç  à   5  S  t • !ª Ì å  A &Z Z ^Ü N; NŠ RÙ /,4\‘6®6å'/D/t%¤9Ê('-0U#†#ª2Î4$6?["›"¾*á-  :[+z5¦#ÜL MYt6©XàZ9-”ÂUß5%M,s/ 6Ð# -Dr0«aÜ>8X2‘+Ä;ð<,<i2¦)Ù%')$Qv3Œ@ÀAUCY™&ó#9]Eoµ/ÑD#F)j”²Ì>êd)@ŽÏÞ÷!$<2a” ¦6´%ë,!>`'0§8Ø 1. 9` š « ] . !O!c!u!$Š!¯!#Ì!ð!"$"=5" s""¡"»"Ð"!×"!ù"$#!@#b#q# ƒ#Ð#Ba$>¤$Lã$40%0e%;–%7Ò%< &!G&Ii&5³&#é&0 ''>'f'~'!œ'¾'×'ï'6ÿ'76(bn(Ñ(7î(&)%<)b)"w)š) º)Û)û) *&'*N*f*#z*;ž*Ú*ð*+ #+01+ b+)n+˜+(§+Ð+ß+ÿ+$,%<,<b,=Ÿ,Ý,ô,$-%)-O-g-—z-W.hj.YÓ.7-/2e/>˜/×/0ò/#0;0$S0x0(•0¾0Ø0%ò01 (1 61(A1j1)Š1(´1Ý1#ý1!2)>2/h2˜2!¸2Ú2ò2( 3%23:X3“3¤3)Â3ì3 45)4_4'o4H—4*à4! 5-5K?57‹5DÃ56#6 ?6(`66‰6IÀ6 77*7B7'_7‡787AÈ77 8@B8+ƒ8*¯8'Ú8&9+)9*U9%€92¦91Ù9- :9:Y:y:+™:EÅ:. ;H:;ƒ;–;±;Ï;í; <- <N<n<Ž<Ÿ<<´<2ñ< $=A1=+s=3Ÿ=mÓ=2A>)t>'ž>*Æ>+ñ>&?)D?*n?™?¬?À?!Ü?>þ?==@{@#•@¹@Ð@/í@=A [A eArA$AµAÇAØAîAB-%BSBnB‰B B¹B#ÐBôB, C)9CcC ~CŒC¥C·C&ÒC!ùC#D#?D%cD&‰D°DAÅD@EHEgE†E%—E½EÍEßE ðEýE FFI4FB~FlÁFN.Gf}GiäGBNH<‘H=ÎHD ISQIh¥IUJ=dJ:¢JPÝJP.KEK8ÅKEþKRDLA—LLÙLx&MrŸMkNr~NvñNphOnÙO{HPrÄPr7Q[ªQ[RhbRqËRx=Sx¶S/T@TSTbT€T•T«TÄTÞTñT U#U:UOUBaU¤U¼U4ÛU,V0=V`nVÏVàV+òV]W|WœW¼WÛW!÷W"X5ÖŒ $2"Kn‡¢Â/Þ@ŽOŽWd޼Ž+ÌŽ øŽ<BTo7Çâ(ø!4E Z&f4¥%Ú1‘2‘-D‘4r‘*§‘Ò‘5ñ‘K'’/s’0£’Ô’"é’ “*“$9“>^“;“MÙ“*'”NR”K¡”]í”:K•†•—•6µ•6ì•*#–N–a–%s– ™–¥–.¹–!è–" —&-—T—s—Š—1©—,Û—!˜+*˜-V˜<„˜%Á˜!ç˜ ™™3™ G™ T™Fa™¨™"¾™<ᙚ5šBHš‹š4šÒšÛšîš››&›5›E› W›d›x›‰›$¥›Ê›$â›*œ2œAœ#Xœ|œœ$ŸœÄœÛœõœ" ).(XOQÑQ#žuž-Žž¼žÕžñžnŸŸ•Ÿ°ŸÆŸ ÞŸ'韠(1 Z ,m #š '¾ .æ .¡6D¡:{¡6¶¡:í¡5(¢$^¢-ƒ¢)±¢ Û¢ü¢£+£#E£i£Eƒ£FÉ£#¤%4¤Z¤o¤¤¬¤<¾¤Cû¤?¥!X¥z¥Bš¥$Ý¥)¦,¦$B¦g¦*z¦%¥¦˦à¦ò¦#§"&§I§ a§0o§, §ͧ]ä§B¨*a¨PŒ¨&ݨ'©%,©R©(h©6‘©@È©% ª2/ªbªuª’ªA«ª%íª«)«)@«!j«Œ«&©« Ы#Û«+ÿ«+¬/<¬ l¬x¬ ‰¬”¬ ¬¬,¹¬ æ¬,ó¬ ­>­S­q­­­­Ç­…ß­/e®+•®#Á®å® þ®I¯;i¯4¥¯2Ú¯ °4.°2c°#–° º°'Û°±±5±U±l±!…±.§±Ö±ö±² ²(²'<²&d²‹²4›²2в9³4=³3r³#¦³'ʳ#ò³.´!E´/g´1—´É´-ä´5µHµ)dµ޵®µε êµ% ¶31¶,e¶Q’¶'ä¶9 ·9F·*€·6«·â·ö·¸-¸A¸uT¸ýʸPȹQºikº4Õº/ ».:»i»ƒ»+›»Ç»0á»L¼M_¼N­¼ü¼ ½%#½I½Gg½Z¯½B ¾M¾Rh¾M»¾: ¿:D¿!¿5¡¿9׿8À;JÀ%†À'¬À'ÔÀüÀ8Á_OÁi¯Á?ÂJYÂA¤ÂXæÂ1?Ã_qÃOÑÃq!Ä=“Ä9ÑÄp ÅS|Å>ÐÅÆ"-Æ[PÆ,¬Æ4ÙÆ/Ç#>Ç9bÇ:œÇL×Ç*$ÈAOÈ2‘È7ÄÈ6üÈ73É1kÉWÉ&õÉQÊInÊ$¸Ê.ÝÊQ Ë(^Ë(‡Ë0°Ë;áˉÌ(§Ì.ÐÌHÿÌ,HÍ^uÍAÔÍ4Î5KÎ;Î4½ÎCòÎ[6Ï[’ÏWîÏFÐ8`Ѐ™Ð;ÑJVÑP¡Ñ7òÑ0*Ò2[Ò*ŽÒ4¹Ò2îÒ,!ÓNÓ1hÓ(šÓ\ÃÓ1 ÔCRÔ–ÔM«Ô<ùÔ6ÕQÕ9lÕ ¦Õ%ÇÖ íÖ'×'6×!^×(€×+©×(Õ×9þ×Q8Ø!ŠØ(¬ØÕØ êØ% Ù1Ù"IÙ(lÙ&•Ù&¼Ù-ãÙ8ÚJÚ$dÚ#‰Ú'­Ú/ÕÚ0ÛA6ÛIxÛÂÛÑÛèÛ Ü&Ü>ÜNÜb܂ܔܤÜ,¸ÜåÜ6üÜ3ÝFÝ YÝf݆Ý.šÝÉÝåÝ#Þ''Þ*OÞEzÞ)ÀÞêÞß$ß>-ßPlßT½ßGà4Zàà7“àDËàá%á-:áháˆáá[¬áGâEPâ–âµâ8Õâ7ã-Fã1tã&¦ã4Íã+ä.äUMä%£äÉä2èäQå2må- å#Îåòå& æ 4æTUæªæE:ç1€ç)²çIÜç&èQ7èL‰è-Öèbé-gé;•éÑé"íé$ê?5êGuêE½êGë:Kë$†ë!«ëÍë7ìë$ì @ì$Mìrì'†ì®ìÃì=áì#í.Cí'rí6ší:Ñí% î'2îDZî"Ÿî)ÂîNìî%;ï-aï(ï ¸ïÆï"ßïðð,)ðVðvð!’ð´ð'Åðíðññ)=ñ%gñ%ñ³ñ3Òñ+ò2ò(Mò~vòHõò>ó(Wó/€ó$°ó#Õó ùó!ô,<ô<iô¦ô­ô0Ìôýôõ8õ-Gõ@uõ"¶õ2Ùõ" ö)/öYö xö/™ö*Éö+ôö! ÷$B÷(g÷÷"«÷!Î÷ ð÷ý÷ø3øBø bøpø‹ø¡ø²øÌøåø$ù 'ù4ùGù`ùvù“ù¢ù·ùÒùçù üùú4úOúmú‹ú<£úàú þúû?4ûtû&Žû,µû9âû-üJü4aü–ü³üÊü:áü%ý$Býgý[ƒý,ßý) þ86þ4oþ¤þºþ4Õþ( ÿ#3ÿWÿ.nÿJÿWèÿ!@&b)‰I³Bý$@+e:‘8ÌZâ`'C+k;—DÓ&Q?d‘ö9'P@x(¹@â #1Je-u4£DØ0/N3~H²'û'#2K~-•AÃC  I W #j Ž $ª .Ï þ   $ >. ,m 5š RÐ &# /J Dz :¿ 9ú ;4 Bp )³ !Ý "ÿ 3" 1V @ˆ JÉ ;>P¨BÅGPPR¡Rô$Gl(‡°)Ïù %C^ t‚$—¼%Ú!"=+\ˆ9žØ"õ"; W(x ¡Ââ2ý$0Uh…'¤Ì!ì)!8#Z1~°(Ê*ó$C!UÔw:L‡*™Ä'â* +5Ga3©5ÝP/d0”;Å8?:>z4¹7î3&8Z3“/Ç-÷5%B[;ž4ÚŠ(š1Ã*õ/ /P,€7­å ôþ,";^o¡ª³*ºåû  * - GF 1Ž $À 'å 5 !*C!(n!)—!Á!0à!*"#<"`"o"€" ›"/¥"Õ"í" #7#+F#*r#&# Ä#Ð#ê# î#ø#$$$D$G$<c$ $)º$Kä$V0%$‡%¬%Hµ% þ%&&#&7&KP&*œ&'Ç&!ï&''0'H'M' g'q'„''–'1Ÿ'#Ñ'õ'ü'(6( I(j(1‰("»("Þ(#)0%)V)u)G):Ø)=*!Q*4s*¨*FÁ*++*4+4_+/”+)Ä+;î+-*, X,y,™,"¸,1Û,@ -N-5h--ž-*Ì-=÷-.5.!d./†.¶.Ì.Aå..'/V/l//™/®/Å/Û/ð/00<30;p0Y¬01!1 >1I1L1i1Co11³15å1=28Y2B’2DÕ2!3-<3j3Š3œ3G,4t4 x4‚4™4! 46Â48ù4+25^5d5t505À56Ü5)6-=6:k6-¦6*Ô63ÿ6&370Z70‹71¼7)î758(N8w8“8¨8 Ç8Õ8!í8 9929A9$V9{9‹99œ90¶9/ç93:2K:>~:-½:Dë:0;.7;f;*v;¡;)µ;ß; å;ó;H<O<j<$|<¡<0¦<*×<L=fO=¶=\Í=X*>kƒ>ýï>í?1@'7@)_@&‰@2°@,ã@A,+AXA5rA+¨AÔA äA ñA ÿA B2BMBcB}B#B2±BäBCC.CBC2WCŠCC²C ÄCÐC1èCAD\D+qD%DBÃDEE<EUUE «EV¸E-FJ=F,ˆFIµFEÿFEG_GnGEvG¼GÙG1õGM'H8uH#®HÒH&êH"I!4I4VI‹I$¦IËI:ÔI<J7LJ„J‹JžJ ³J"ÁJ#äJK(K/CK.sK(¢K0ËK,üK*)L.TL&ƒLªL7ÃLûLM'0M)XM'‚M0ªMÛM âMìM,N%3NYN0wN¨N6¯N#æN( O;3O;oOI«OõOBP)XP‚P‡P ¦P±P¸PÀP ÄP#ÒP;öPV2Q_‰Q)éQ RA4RKvRÂRÇRÞRôR)S":S#]SS™S°SÈSÊS0éS2T9MT‡V‹V4£V+ØV)W..W*]W1ˆW,ºW-çW3XIX}KX}ÉXrGY'ºY âYðYZ,ZHZfZ„Z#¢Z'ÆZ îZ%[5[#>[&b[&‰[7°[Cè[G,\+t\' \3È\Kü\ H] i]s]+y]$¥]Ê] â];ð]y,^:¦^\á^G>_5†_9¼_ ö_"`1:`/l`œ`²`Ã`@Ü`(aFa]a6xa¯aQÎa1 bLRb-Ÿb(Íb öbVc+YcA…c-Çcõc+ d&6dA]d$Ÿd(Ädíd eeîj-k=kRk"dmL‡mÔm&ím&n-;nKinZµno,o@opZoËo4áopU5p‹pO¢p+òp)q,Hq uqA–qØqìqGóq1;rmrqrxr<rD¼rIsdKse°s{tf’tRùtGLuo”uRv:Wv@’vgÓvU;wM‘wUßwL5xh‚xbëxªNy`ùy0Zz2‹zZ¾z5{rO{AÂ{:|¼?|;ü|P8}R‰}LÜ}Q)~5{~P±~D]G€¥D&€Wk€rÀQ6”ˆS‚–q‚nƒpwƒ<胉%„t¯„r$…Q—…Sé…d=†Z¢†Wý†PU‡T¦‡hû‡ddˆFɈT‰fe‰V̉>#Š5bŠ<˜ŠlÕŠoB‹Û²‹_ŽŒ„îŒ,s# uÄ`:ŽE›ŽYáŽA;@}q¾X0|‰²‘L¹‘C’ãJ’e.“2”“Ç“GI”P‘”Jâ”_-•]•Zë•SF–iš–U—XZ—K³—1ÿ—H1˜gz˜Pâ˜J3™R~™~Ñ™<Pš(šP¶š[› c›(„›I­›<÷›\4œ!‘œ!³œÕœ_ôœ\T?±Iñ!;ž]ž‡|ž{ŸL€ŸVÍŸR$ >w 9¶ (ð Z¡]t¡LÒ¡-¢0M¢E~¢LÄ¢*£,<£"i£OŒ£RÜ£T/¤V„¤XÛ¤4¥8M¥5†¥8¼¥õ¥t ¦Z€¦VÛ¦[2§sާS¨+V¨n‚¨Yñ¨K©b©’y©ž ªŒ«ª'8«#`« „«‘«¢«§«i¬«_¬tv¬në¬aZ­@¼­ý­®®ê ®<ø¯Y5°:°Dʰ+±3;±>o±ž®±€M²Vβt%³[š³8ö³;/´6k´K¢´Mî´}<µ*ºµ5åµL¶9h¶¢¶>±¶Cð¶?4·?t·3´·wè·L`¸Z­¸¹GйJÒ¹~ºuœº_»2r»>¥»?ä»k$¼g¼hø¼‹a½>í½8,¾†e¾)ì¾)¿9@¿Pz¿*Ë¿gö¿h^ÀSÇÀ!Á4=Á,rÁ#ŸÁ+ÃÁ!ïÁ3Â`EÂB¦Â}éÂZgÃZÂÃTÄ4rħÄÅĺãÄ·žÅ·VÆNÇM]Ç1«Ç1ÝÇ9È5IÈ6È5¶È6ìÈ7#É/[É9‹É6ÅÉnüÉ9kÊX¥Ê!þÊ! ËtBË%·ËEÝË#Ì‹?ÌwËÌhCͬÍ|:Î%·Î,ÝÎR Ï&]ÏG„Ï/ÌÏÃüÏ\ÀÐ@Ñ5^Ñ”Ñ8°Ñ%éÑ3Ò%CÒ<iÒU¦ÒBüÒU?ÓQ•Ó;çÓ>#Ô?bÔ&¢Ô>ÉÔ:Õ“CÕŸ×ÕÍwÖ•E×Û×Eñ×b7Ø,šØ8ÇØQÙMRÙ„ Ù™%ÚÇ¿Ú(‡ÛW°ÛWÜN`ÜK¯ÜOûÜoKÝk»ÝL'ÞNtÞ>ÃÞAß@D߆…ß[ à?hàu¨àSáŸráŽâv¡â}ã]–ãsôãshäsÜäKPå`œå…ýå5ƒæL¹æ=ç™Dç•ÞçRtè;Çè‚é†éOêHXêX¡ê5úêX0ëj‰ë;ôë30ìdì~eíäíDtîJ¹îNïCSï@—ïAØïlðm‡ð%õð—ñ/³ó5ãóLô¦fôO õL]õ&ªõ4ÑöL÷dS÷‰¸÷hBøo«øFù—bùcúù\^úM»úE ûIOûE™ûSßûH3üj|üsçüi[ý`Åýa&þ6ˆþ8¿þEøþ8>ÿBwÿsºÿl.›J;hT¤jù<dO¡jñ:\T—XìYETŸUô_J¶ª²a+r@“³sGN»9 3D Ux WÎ Y& \€ eÝ ‡C ‡Ë 0S ;„ HÀ J ƒT ¥Ø <~„»>@E8ÅYþTXZ­EzNLÉR>i.¨T×E,Zr4Í03SS,§?Ô$©9@ãI$Tn`Ã[$U€PÖX'K€]ÌL*w€ùDzC¿>LBD°Ôc…8é9"<\:™UÔx*W£Tû‚PDÓH la >Î = !<K!?ˆ!8È!ë"^í#^L$¼«$nh%^×%86&o&èt'þ](å\)fB*>©*è*:j+X¥+?þ+;>,jz, å,œ†-J#.¾n.é-/R0“j0¶þ0Aµ1a÷1RY2i¬2r3K‰3uÕ3gK4K³4lÿ48l5s¥5D6É^6T(7N}7TÌ7!8298hl8!Õ8E÷8P=9~Ž9- :*;:*f:+‘:8½:­ö:B¤;/ç;#<<;<1x<8ª<0ã<B=dW=T¼=K>]>5v>8¬>5å>]?]y?H×?D @8e@2ž@3Ñ@A3A(MA<vA>³A5òA.(BrWB$ÊB=ïB$-C8RC‹C®DÊDÚZEŽ5F9ÄFÊþF•ÉG˜_H`øH€YIJÚIt%JYšJ_ôJYTKM®KLüKMIL5—L!ÍL?ïLK/MM{MZÉM'$N!LN\nN9ËN.O24O&gOXŽO)çOPC$P=hP?¦PgæPNQ*ÞQ- R87RapR8ÒR, S8S>XS?—Sn×SlFT+³T:ßT:URUUC¨UìUo V{V4”V#ÉVXíVEFWSŒWAàW…"X/¨XŽØXzgYCâYg&ZbŽZDñZ°6[Lç[c4\n˜\ƒ]M‹]›Ù]Bu^@¸^Sù^VM_U¤_Gú_aB`N¤`Yó`EMaB“a}ÖaTbQeb>·b$öb*c$Fc1kc(c4Æc<ûc€8d\¹d`eiwe*áe* f<7fTtfVÉfG gZhgJÃgih=xh2¶h+éh/i(Ei#nih’icûi_j"fj;‰j"Åj3èj4kkQkG½k0le6ltœldmMvm4ÄmZùm!Tn2vnZ©nToKYo/¥oNÕoB$p8gp1 p9Òp. q2;q.nq?qÝqbîqbQre´r]sxsiŒsPösgGt<¯tdìtxQujÊuH5vO~vFÎv.wÓDwFx#_xƒxgxpy!vyH˜yBáy>$zCcz_§z]{ae{;Ç{H|+L|Ox|TÈ|F}Žd}qó}Ge~­~4Ë~…K†IÒ/€=L€=Š€6È€%ÿ€{%]¡ÿ?‚-X‚\†‚@オ$ƒ‡Ïƒ·W„…†"…S©…?ý…6=†'t†œ†º†ˆÚ†?c‡K£‡*ï‡ ˆ(ˆ:ˆGKˆ|“ˆY‰˜j‰[ŠP_Š®°Š¥_‹ Œa¦ŒÑ+Ú6Ž4=Ž(rŽ`›Ž*üŽH'spöä¥Û ‘¡"’;Ä’.“;/“k“Š“=¤“Tâ“X7”ˆ”.•KH•–”•š+–-Æ–tô–5i—DŸ—Qä—G6˜D~˜_Ø‘#™‡µ™º=š,øš%›=›XY›H²›cû›#_œ“ƒœFÍ^O,žm|žLêžM7ŸP…Ÿ„ÖŸD[ I  ‚ê 4m¡H¢¡më¡LY¢D¦¢@ë¢4,£ka£JÍ£¤=4¤Zr¤Yͤm'¥8•¥/Î¥3þ¥A2¦)t¦\ž¦Uû¦CQ§@•§AÖ§M¨yf¨à¨Iþ¨?H©{ˆ©xª?}ª@½ªAþª5@«Sv«pÊ«Z;¬U–¬cì¬eP­9¶­?ð­^0®`®Að®32¯If¯I°¯$ú¯Z°Uz°Gа>±<W±z”±1²DA²S†²4Ú²8³4H³7}³>µ³;ô³'0´9X´:’´FÍ´ˆµ&µ]ĵz"¶¶*º¶å¶J·CN·V’·Hé·]2¸J¸NÛ¸&*¹3Q¹L…¹FÒ¹?ºDYºBžº5áºF»;^»Iš»=ä»K"¼\n¼=˼N ½cX½A¼½9þ½‡8¾À¾dQ¿4¶¿E뿃1ÀUµÀ4 ÁV@Ác—ÁwûÁ3sÂt§Â]Ã|zÃ:÷Ãj2ÄoĦ ÅG´Å<üÅ79ÆzqÆìƃlÇðÇNpÈX¿ÈOÉ>hÉL§É0ôÉF%ÊlÊ7ŠÊUÂÊWËxpËeéËxOÌIÈÌyÍ)ŒÍ0¶ÍKçÍB3ÎJvÎOÁÎMÏ;_Ï@›ÏrÜÏwOÐ@ÇÐwÑg€ÑièÑ&RÒ7yÒ[±Ò> ÓTLÓ>¡Ó:àÓFÔSbÔ9¶ÔFðÔJ7Õ1‚Õ4´ÕUéÕ1?ÖKqÖ2½ÖQðÖIB׌×lØ-‡Ø3µØ[éØUEÙa›ÙDýÙWBÚXšÚ-óÚG!ÛCiÛ^­Û^ Ü¡kÜ# Ý[1Ý=Ý}ËÝ…IÞBÏÞ>ßBQßB”ßZ×ß+2àP^àC¯à?óà)3á?]á8áYÖáA0â:râ@­âlîâY[ãeµãsäBä6Òä9 å·Cåbûå3^æ{’æ–ç.¥çfÔç…;èfÁèU(éd~éÂãéD¦êFëê12ëNdëJ³ëSþë>Rì>‘ìUÐìV&íO}íPÍí>îW]î(µî*ÞîD ïHNïL—ïOäïk4ð> ð3ßð@ñiTñX¾ñ¦ò²¾ò6qó@¨óHéóA2ôOtôWÄôOõQlõ@¾õHÿõ{Hö[ÄöN ÷bo÷^Ò÷X1ø7Šø<Âø5ÿøL5ùZ‚ùŠÝù>húQ§úTùú\NûG«ûQóû>Eüi„üKîü8:ý.sýZ¢ýKýýOIþD™þAÞþ0 ÿ6Qÿ6ˆÿS¿ÿQ/e;•fÑj8>£fâXI=¢Wà^8M—Oå[5?‘CÑ;;Q?jÍG8˜€A,[2ˆQ»J ŠXXã,< Zi 5Ä Bú U= G“ *Û D HK +” EÀ D NK =š CØ K dh kÍ A9I{=Å1D5EzdÀj%2-ÊñW|UÔ2*)],‡A´=ö\4›‘W-1…/·Lç_4F”HÛV$¨{8$-]|‹Z^cHÂ9 <E‚TAhXªJ?N5ŽLÄKR]U°lssLçt4a©I zUIÐ\ Sw @Ë , !>9!Zx!>Ó!F"3Y","/º"2ê"2#(P#cy# Ý#"þ#1!$S$Iä$Â.%Æñ%”¸&”M'˜â'X{(hÔ(7=)?u)>µ)Iô)I>*Iˆ*DÒ*c+U{+NÑ+X ,=y,A·,Uù,QO-S¡-|õ-<r.H¯.Bø.I;/1…/-·/Iå/G/0,w0ޤ0311D11v12¨1xÛ1ƒT2­Ø2[†3'â3‚ 4(4O¶4V5]]5g»54#6NX6@§6$è6! 7j/7Íš7h8l†8Nó8BB9q…9s÷9}k:~é:]h;OÆ;2<EI<%<Kµ<w=xy=´ò= §>iH?C²?Tö?!K@£m@-A\?A~œALBYhBWÂB5C@PC‘C›D’»DNE@nE,¯E.ÜE. FE:F\€F!ÝF+ÿF†+G^²GHH?ZHAšH;ÜH€I{™I.JzDJ„¿JDK$\K¾K™@L/ÚL M#M?=M+}M?©M/éM?N'YN‡N" OT,OHO!ÊOìO:óO/.P;^PFšPáPÿPQ¥=QPãRƒ4S ¸SpYTfÊT}1U{¯Us+VMŸVíVx‹WSXjXXVÃX8Y>SYB’Y/ÕYZ%Z‰>Z‰ÈZŠR[?Ý[P\1n\R \1ó\7%]:]]*˜]HÃ]$ ^R1^4„^/¹^"é^5 _lB_*¯_1Ú_" `/`iE`1¯`Rá`%4aVZa±aHÉa/bIBbKŒbuØbwNc+ÆcòcF dFRd3™d.ÍdÞüd©Ûe½…f®CgrògzehÝàh ¾iPßi60j;gjs£j8k‚Pk<Ók4lBElˆl&¡lÈl3àl$m>9mHxm$Ám<æm)#n5MnDƒn8Èn8o#:o^oZzoQÕo›'p)Ãp=ípD+q@pqN±q\r]rR}roÐrZ@s>›sÚs‚÷slzt‚çt5juB u?ãuV#vszvîv~w4–w'Ëw3ówV'x~xcx€óxetyyÚyPTzS¥zZùzFT{b›{Wþ{@V|i—|^}c`}=Ä};~A>~H€~kÉ~H5k~?ê3*€3^€3’€)Æ€"ð€a:u:°ëþs‚Q‰‚Û‚zì‚:gƒR¢ƒÌõƒ=„6…37…Fk…<²…Bï…U2†Kˆ†4Ô†g ‡Fq‡S¸‡` ˆvmˆOäˆ.4‰&c‰0Љm»‰w)Š"¡ŠÄŠ)ÛŠ0‹6‹T‹(q‹%š‹0À‹Gñ‹%9Œ-_Œ3ŒFÁŒ7=@/~S®GŽ:JŽ…Ž$˜Ž½Ž3ÏŽQ*U<€<½Aú=<,zu§r‘*‘L»‘2’T;’$’)µ’3ß’“1“"M“%p“§–“—>”ºÖ”¯‘•µA–¸÷–„°—5˜‡µ˜‡=™”Å™ÐZš¹+›€å›}fœr䜑Wˆé`ržŒÓž”`Ÿ…õŸo{ Æë …²¡¹8¢Àò¢ij£¿x¤8¥ɺ¥…„¦Ý §{è§åd¨·J©¿ªÆÂªlj«MQ¬<Ÿ¬ܬ;÷¬-3­5a­4—­?Ì­, ®>9®Fx®4¿®-ô®D"¯ˆg¯,ð¯9°}W°\Õ°W2±´Š±M?²D²uÒ²pH³S¹³O ´N]´3¬´Cà´Z$µqµEñµ`7¶q˜¶< ·WG·DŸ·;ä·; ¸?\¸7œ¸9Ô¸,¹E;¹^¹ôà¹=Õºx»DŒ»>Ñ»Q¼b¼½L’¾7ß¾w¿K¿?Û¿PÀ=lÀ.ªÀwÙÀNQÁy ÁkÂC†ÂVÊÂ2!ÃUTÃ!ªÃ<ÌÃv ÄR€ÄÓÄ_èÄ‘HÅ^ÚÅ09Æ4jÆEŸÆkåÆ)QÇ4{Ç°Ç ÈÇpÓÇ•DȶÚÈ!‘É~³ÉY2ÊiŒÊ&öÊxË;–ËKÒËÌ>7Ì;vÌ/²Ì9âÌLÍ0iÍ)šÍ—ÄÍ7\ÎR”ÎVçÎP>ÏpÏMÐFNÐV•Ð5ìЖ"ÑN¹ÑgÒ&pÒG—ÒßÒOóÒhCÓ)¬Ó>ÖÓ/ÔaEÔK§Ô{óÔIoÕ¹ÕQËÕ ÖZ>ÖM™ÖIçÖc1×R•×zè×~cØWâØU:Ù_Ù6ðÙe'Ú#ÚQ±ÚMÛ»QÛT ÜGbÜAªÜTìÜ4AÝMvÝDÄÝZ ÞídÞ?Rß`’ßAóßX5àNŽàªÝà(ˆá.±á1àájâ-}â$«âMÐâ\ãk{ãWçãQ?ä-‘äL¿ä åD-å|rå-ïå)æ6Gæ;~æ9ºæKôæR@ç&“çxºçE3è3y莭è^<é*›éVÆéaê%ꈥêp.ë…Ÿë1%ìOWìO§ì-÷ì[%íoíoñí'aî1‰î%»î<áï2ðQðvcðÚðîð ñ$ñ:ñ!Qñsñ/‘ñÁñEÑñ#òB;òT~òÓòOðò4@ó>uók´ó ô•®ô®Dõóõsö4Œö0Áö2òö\%÷'‚÷Jª÷Võ÷BLøbøròø`eùÆùUâùQ8úiŠúôúrûC…ûgÉûU1ü%‡ü:­ü#èüu ý‚ý+—ý/Ãý óý8þ39þ5mþ£þÂþlâþNOÿ\žÿ%ûÿK!<m¯ªVZN±hOix¹2`NL¯cü`0~}¯@-On0¾Gï27jykdü`agÂ;*MfI´Iþ\H W¥ ;ý p9 ª ­* ±Ø 6Š kÁ - ,I !v #˜ J¼ !0)oZJÊ^AtJ¶V[Xf´tTjå>PµMEã“1w=©¡çp‰€ún{jê„U‰Ú6d=›2Ù5 +B4n#£2ÇVúƒQKÕ!4¡Ö}óTq1ÆHøFA:ˆ<Ã*'+#SŠw9F<%ƒ!©Ëë N( !w z™ )!P>!q!v"€x"'ù"!#Z@#9›#-Õ#-$11$5c$P™$Šê$.u%ƒ¤%*(&~S&Ò&gï&$W'6|'6³'aê'YL(¦(KÆ().)L)k)M)-Ï)ký)@i*nª*++]E+g£+S ,2_,’,Í-uà-\V.&³.CÚ.:/7Y/B‘/qÔ/hF0{¯0H+1yt1pî1‹_2Pë2<33V3JŠ3OÕ39%4_49|4P¶4525aK5Q­5=ÿ5==68{6-´66â6L7Jf7E±7M÷7OE8l•8H99K9'…9&­9'Ô9ü9 :m:.ˆ:H·:Y;$Z;B;|Â;?<g[<Ã<(Ö<*ÿ< *=$K=p=$="²=Õ=(î=>96><p>"­>8Ð>; ?E?/[?;‹?Ç?ß??ó?93@+m@)™@?Ã@PAFTAž›Aª:B¢åBˆC<¨C2åCCDO\Dº¬D%gE7E'ÅE(íEF/%F4UF5ŠFÀFZÓFG.GKvGlÂGn/HtžHoIvƒIqúIslJIàJU*Kr€KmóKHaL#ªL'ÎL;öL82MkM{ùM5uN7«N!ãN1OX7O/OkÀOz,P4§PIÜPU&QÈ|QLERN’R)áR/ S<;SZxSTÓS7(T2`T“TE¯TEõT5;U qUeUPåU)6V™`VCúV?>Wž~WIXJgXL²X7ÿXr7YªY8Z_ÉZq)[-›[;É[;\ŠA\RÌ\/]BO]W’]8ê]2#^}V^Ô^?é^S)_J}_{È_D`5_`•`«`Ç`Wá`9aWSaI«a'õaEbHcbA¬b.îb<cçZc]BdR d7ód2+ej^eÉeUWfA­fAïf,1gA^gA gDâg,'h<Th2‘hOÄhIi,^iF‹i9Òik j/xj¨j&Äjëj6þj^5kS”kèkSldWlt¼lS1mR…mJØmF#nTjnH¿nRoH[oX¤oKýo]Ip[§p2qN6qD…q7Êq1r04r8erGžrWær>s:ÌsHtHPtK™t–åt,|uL©u:öu'1v1Yv³‹væ?w¦&y±ÍyázRa{P´{L|.R|'|N©|3ø|f,}™“}‘-~™¿~Y.rk¡- €€;€¼€^J©‡Ç…O‚aÕ‚o7ƒ0§ƒW؃`0„]‘„Mï„)=…ig…:Ñ… †] †¶~†ÿ5‡¥5ˆ…Ûˆxa‰™Ú‰stŠ èŠ…ô‹°zŒx+^¤£ŽЧŽ2^Ãq"…”J‘Ne‘\´‘X’cj’iÎ’©8“kâ“mN”f¼”u#•r™•n –q{–¼í–=ª—žè—‡˜M™Me™ž³™aRša´šY›‡p›Ïø›<ÈœSY<ê›'žmÞ`1Ÿ^’ŸñŸm kï w[¡wÓ¡¦K¢9ò¢p,£Õ£ds¤©Ø¤£‚¥r&¦M™¦gç¦:O§OЧOÚ§@*¨k¨@„¨8Ũˆþ¨f‡©…î©tªˆªz«”«­«mÆ«à4¬H®?^®Kž®Oê®':¯[b¯M¾¯U °vb°®Ù°:ˆ±EñG ²ŠQ²ܲ<\³w™³z´HŒ´TÕ´d*µ{µ? ¶[K¶F§¶Nî¶_=·€·°¸wϸG¹Oa¹C±¹Eõ¹O;º,‹º(¸º=áº0»$P»u»J“»Þ»Uý»S¼Gs¼;»¼A÷¼,9½]f½0Ľ-õ½7#¾d[¾YÀ¾™¿O´¿)À'.À VÀuaÀƒ×À•[ÁrñÁZd¿•ÃÂYÃ9ÙÃ/ÄMCÄ6‘ÄÈÄ(ÐıùÄŸ«Å|KÆ5ÈÆ=þÆn<Çn«ÇZÈXuÈcÎÈw2ÉSªÉ;þÉ™:ÊVÔÊC+ËboË£ÒËhvÌfßÌTFÍ>›ÍMÚÍF(ΠoÎ(ϱ9ÐlëÐ\XÑ–µÑ)LÒžvÒ™Óa¯ÓèÔ`úÔm[ÕWÉÕL!ÖCnÖz²Ö£-׈ÑׇZØiâØLLÙ:™Ù7ÔÙ_ Ú%lÚ’Ú9¬Ú!æÚTÛ5]ÛB“ÛwÖÛFNÜE•ÜBÛÜSÝWrÝLÊÝNÞ…fÞOìÞJ<ß{‡ßDà_HàO¨à#øàDálaáEÎá*âP?â?âCÐâaã%vãFœã6ãã:ä=UäV“ä_êäYJå;¤ågàå^Hæ;§æPãæÆ4ç¢ûçžè2¼èZïè6Jé=é%¿é#åéM ê`Wê¸êFÔêUëDqë5¶ë)ìëVì…mìhóìk\íWÈíc îE„îLÊî„ï{œïpð<‰ð7Æð@þð;?ñ\{ñAØñò=:ò^xò"×òaúò0\ó<ó6ÊóAô1Cô/uôO¥ôYõô$Oõ?tõL´õ;ö<=ö(zö>£öKâö7.÷Nf÷aµ÷%ø2=ø,pø4ø/Òønù3qù1¥ù-×ùúP£ú`ôú^Uû…´û?:ü1züµ¬üObý²ýÒý’ðý;ƒþ/¿þ#ïþŒÿg ÿ¬yµ€/%°+ÖkgngÖ*>Ii£³ÉW4!'VF~YÅe%…q«£¥ÁÊg–2 :É › ®  ³O W [ ýÝ OÛ¾+šêµ…Y;ˆ•9wXqÐBZ^a¹jN†KÕ?!¬aKcZT¾.=B–€$¨Í<ë=(Ufe¼""Be¢|si“›ýc™lý“jŠþu‰{ÿŽ{* J5 U€ lÖ mC!­±!­_"q ##[$@]$mž$¦ %‘³%›E&yá&Z['I¶'O(JP(^›($ú(<)?\)aœ)?þ) >*)_*N‰*AØ*Z+`u+CÖ+F,\a,%¾,Cä,.(-@W-@˜-2Ù-> .EK.7‘.IÉ.9/eM/G³/!û/E0-c0b‘0^ô0]S1b±1^2]s2PÑ2'"3JJ3L•3<â34:64mq4¬ß5&Œ6a³6E7I[7I¥7Hï7m88O¦8Wö8{N9\Ê9]':_…:\å:cB;b¦;X <[b<W¾<\=Ws=SË=J>]j>iÈ>l2?VŸ?Åö?J¼@EAXMA=¦A=äAS"B…vBüB C"C(ACjC6}C´C&ÆC&íCD(D1DCHD3ŒD2ÀDóDEE)EzEE^ÀEKF,kF:˜F*ÓF-þF;,G,hGG•GYÝGI7H!H$£H2ÈH ûHFILI"fI‰IR˜I0ëI/JLLJ™J®JÈJ×JæJ?õJO5K…K6ˆK}¿K-=LXkL—ÄL†\MjãMNNx_NØNéNO-"O:PO~‹OG P_RP4²P çP-òP- QNQ WQxQ/”QÄQÍQ äQ}ñQ*oR šR4§R6ÜRS'&S!NSEpS%¶S%ÜS*TJ-TxT˜TJ¶T^U]`U(¾UaçUIVseV?ÙV5WDOW=”WLÒW_XKX7ËX Y!$Y)FY^pYlÏYqmNq)¼qRæq9r Hr&Vr£}rA!s,cs?sÐsmÙs=Gt{…t™u/›u‰ËuŽUvÆävá«w%y„³yR8zf‹zhòzw[{^Ó{02|bc|3Æ|ªú|y¥}~6~J~_~t~g†~/î~:.Y?ˆaÈ7*€b€$€¦€Æ€aä€$F(k/”Ä,ße ‚ur‚è‚9ƒH:ƒ„ƒƒ„„a;„ƒ„)!…ŒK…QØ…—*†W†‡™¸‡;Rˆ*Žˆ¹ˆ›Íˆ:i‰=¤‰|â‰_ŠŒýŠFŠ‹8Ñ‹P ŒS[ŒE¯Œ^õŒ?TW”삎Z„ŽvߎV%g ¢;°5ì"(44]3’5ÆJü@G‘@ˆ‘3É‘?ý‘=’QV’¨’"¿’9â’;“;X“C”“Ø“ë“þ“F”;b” ž”J¿” •o•c‡•=ë•])–n‡–xö–Lo—§¼—Jd˜¯˜9¶˜ð˜ ™™-™ 1™<?™U|™žÒ™äqšŒV›Eã›)œÄÇœŒ(‘º-Ó7žC9ž:}ž+¸ž+äž'Ÿ8Ÿ-:Ÿ]hŸPÆŸ  º· ™ô Š Ò ô áÎâ'§ )  ~ … kß; í Ûmã Q ä (€ ÷@' « ê ‹ 4äåú$ † F ;[I^-H ‘ 3) ‹_·ÿ ØG ±é, !µåé®Bé “ lŸ ´# Ô ì+ùk ìy Î ¨ ± 6 ÝDN mc ~ åŠ Ö K’U _Õ N0 æÝ ·t0ý­ < Ò srÝ –êÅ2 7¸/ŸÆw £¡[ ÷>î$ä ð˜- Ñ ¹ · ×   "ê¼Q ò \ b` Û ‡— +¨ ëä‹} Ÿ`;ÎFš+ L ¸ Ëó ¢¼Ë ”¢íßR A;Ï£ ŸúC<Ž ,~ ÞK E ‹r†½ZŠ¥a•  79 ÌV ÉV² †*‚|W Ó*adA À%¤þ ~ ž ¡N ›òÚYX4 ?õ 0 Âò  <)‚ } È z± ¥ ‰ƒ @ á2Šuô0„= Yfºáu ©| %  J .4…d Gñ x ˆeoWœ ", ü “æV7‚cÅû4Í îŽ ª >û%2 ž$B ØSÓ áø K  ÈPû al 2 ø b €d ` …˜– »Ó ÖÈ} º åŃ Yª› C| ÁqD¡ :N (LmxuÉs ÀiæªÊF &ÑÞa Ti.÷¥ÓÏÿ l õ  R ž¾ ³ ‘¸º šÚô a™¾óN wÝô ÿí×  ^KØÿ\ ŒF > ÚÈ ù Å ¸ t… } ¤”œJ e ­_êñ •§eÅ9T h jl Ê Š Q›? 2Ëž©"–Hy 9  öˆ x•G{‡„ µ ò a DŒ€kšê¯  ' ! ;)Ü4à 7 &´ t¢ ~Xs º Ö à˜ â Ú = TŠ Ž ¶r j júú Ò{ þh"D O ê ¢" ñv¯ ä” ¶0 › `³ ´‘ ^*gx x nL Π¨Æ ë  mHˆ[ó —™^ äÐãV‚ ê:9 V°ï «H ½vl¢]°²ý ù”¹Q¬Ò1– y oÙ& 8ïï  ’ ˆÞ |ž¶ ƒN½ ,Ï èjëu M¦;+ÕG–yä÷iQ¬© í2iIàeB  ÈÊ$ Í:± sÔÉ × â QÍ ÑÐõ¡5O à .Œš ” „SÇçA• õ$ý£ w E ÍK 9 ! ¨‡Öõ GÊô 0ð« Þ m ˆ¤”: ã ÓI t Ù > nÙ é„ ª¨ç ­— ©µ bM #Ñfn I ^c :m ¯ñ: R F%½ÆM6‰ C /Iÿ_` $µ ·Œ è C ÕӢРŒi >*\ ¤ ‡ Y[Šœð’‡Ù­– W"ø Õ€9 ] ÊfÊïŒ vxŒSÉa q B«‹6 ZKµè~h î Ò…ë ,Ìô}: ÿ3žØÏT ù餓! ›æ9ƒ ­ n g âJýUÇ—ñc XJúÐ'b§ Çšz m ×yG ôD˜ qOG×½Ä á ÃAî ên– ÂþŸ¿qOñP ¿ö ¸Ñ q{º ûé < ¸  2´W B ‹‚¦Ø d w5t7 A d ™ â´ ¡ë P‰ Æ ª) 8æUhÀ™ ° DQ «4øm I ZX ÏÂìc Ê6¨÷òÏ#ì ÿ ~¶ Ñw › ³Ê> 2 Q © ] c ¦ 5íðÀf ^ Ÿ ¯7,ŽÔ>ˆ¹ ›s 'àž ÷Ô  Ü†‹H Æ m ¦ Ó¥;  Ä œlqs Ô F …ŒI l6òøñ·}ƒ j Ä¡ „ ‹ ѱ O kñ 1  §À û q — 2@ † TF 2I X ú‘ ‚ ˆ þcè ü Ñ]Q$ \A‡ zCÜR©ñ; à„0º€ <“ž p×#)o ¾k P#„  êË… ˜ l î §bb¸Ñ¿NÁ4û»È ×Ì · 5½)žê J — 3ÿ‘<‰7M¹CU 0ôU ¨kõ4 ¦aYü‚] Ö3 Ú c‡ 1 ¶v Ur ‚/ ˆÒ ÷Jÿ,)  Ì Án sÉ¸Æ o C p z o ; ߺ ƒÁ Î Öh È _ ¯Ë – ~ ± Ø U cÿùT c~ zÑ ù ’/ d %Á 3 <[6ܾ²ç÷ rºèòN<   ² § þ*‹®A`ƒ€µ ÁÍÞ]â ”˜¿, Ð úWv< O{‰ ¡\=¼ 榼 Ô ß# J;ÈUY B1 £ b¶Âè ³ žyíY6 è xí ‹ ®· r¢ìf, Å ?ipÚ1 C Nò Ì\É P? @ ‚ Â9CÝ]Ótz ðt ƾ Œ‰bÔÒukS æŸÐZw‘ƒ5k|”àCøóñ«¼h Dä ì =Ic¸£Å ˜q  åBí š çÃ Ü Î/Åp U¢E sb# } gŽò -˜„*¤ H Ú‘öA Ìâ ³ L ¬““ e &F· B/ y J} W  È8 ä g`œå M > ÃøO /Û fy „Ÿ”—° <5 ® À P -M »Ü 'v»  ü ü- > šsLx ¶v ‚ã ê  d†b ëu• à ç ñM‰%ª ß Dé ª/ ƒ' @Hæ ¡Ë °ûž ¶< à= ³M  à Žoåg œ |Ú€ Û }ð ]§&á ¿³{ã4 óË: 8 t u ½  “ û¥úÂÖ Þ5 F¦^© ´ &` Él, 7É­t ¹\.hØ öBÒ/ûPÿ¬ X& RB  ¥X ! 38 {3 fšÝ8 ` Í 3d® öÊð ö `xç !1 KĬěY "” ¨"´Û qεtŠ@A5K F³ ¥ K°À&ð ðï ç ˜. XZãjN# å¿ GÒ‚ _¢ Ò Å ²¤mSàk kM å = “R { Eà ý§Z  IRÌg S ßóV ˜o Âî( ±ÅS]  ÙwA Fçn ï7¤„gä è ?øîAïê  ‘e  ô£ 'º¹Îø Twnu€á‚ Se ]Ò ^ * Hý ÷  ŠÃø‰©y«¿< Àäµg Ήôâ VÚ ª€ ÛÄG 5 ü y*Æ EF; Å &ß$ zÛõ& . æ Á r =øÞ ìS ö \ Þ„HŒ ý P ]eaí âi »üÈ,J  Õ"j /Ý ~jîÁë?Ø>8 v‘( ˆ*±-þ¬ u UtÌ =¨ 96 ŒÎQ¦ } 3S) ~ ‡  T ÇÏÙ¾ƒóTþ ¼¶ :hÙžm倕ˆý‰ –L¯ì q† Z $•èŽ ¬H¹…On ÃW 'ò … = ’ # jõ l f … –5¥‡ % æ#ž ‚ t!r›ã1ù •–V BÏ 9³ú8U½1Ëíu –sä= %§ Ó / è®yY‡ï@ o* m~D ª m­ó©yþ i(•\IÝ”Á A |e” \™ ] { ð% cW ‘PÂ! z â£ùÔ@w GW Ÿ õz¦îðÈ´ f „%¦ â. ’ \ÕRf ¯ ´Ì Ê ‹ ¸ ®L ¯Õ+ €’±Ï«ÏHý™{ ¡ ²i½ çÉ Ä—Ò V 8°« Ègâ}’vä¼9ÈÝ l& [­dë+ç ˜ñËá ( 0 ³s É)ËD - S`Q   _E² Ð? %­E÷© “ †p `’™å­ … Ñ ëî ì =3 Ái ¤ L Z ù!wù«6Í 4€ ×° ³o U ŒS Ï| £ úü¾­þ + ¦[À » àt g Ž$J N ™ý_ —öÞ pá ÓÊ aï ¨[ Ãi < [¢ ¢ ÷ ¸ìš  °² * v ¾ì ïSK_ 5£× ‘ û n±¤ ˜ 4 B 8à L ¥ X® °Z[ Þ¿' E‹ ) F#0Ç @ % ¬ ’ Xh^2 ²Ž ZM | Ë d M ð Ûã ¹gœïÆ›    ͱ®YMr[™Çs^% @ Ž*ú  +š Ç  ©é øk ( ƒî\ ¿ ó   x ¢H –ý ¦O . Ö·1Çß ‚ÙÍÊ_ &r õ¥¦{.7 Ä 5 Qcùj ÜáLn K ’ |gÍe /I)÷  ¬ X — Üüö Ö dÚ €Ø ›¸LÜ (àÿh Jk ” 3©0 b v —? ÄÆ@@á !E £´ ¼ª z& æ  à ¥() Ó™  9 Í» ÙR ¿Ë" _#P ¡T Ÿ§è Ÿ > ?n  å ’o ƒ6pÆ B"Tœ»ß ^šßÀ +X Áv¾œ †z u ˆ ; «ue :ÛJ õê ¼ À ì$ÑÕ g‰ ÿ 0 ¾ ‡zC¯ 1>q d‰¥ ÃKP Ü µhæÜÐ TC! û / ýóGZ q# Ì!¼²ã q¹Í£…í } xs½oß aY nè7š á+é !èZEÃá Š-¡q “ ½ G b Ýú çöE VŽ • ±2 µygë , .Ó¾ O]4 'íj'Ä1 ãŠ&I~h e~Ùþ† æ … ¬â ðAò MR|Ä‘'Ü% ¢4®œh» Ô ¿ “ T»’ ¹ +RÕ»ÆÇ  6¯ »  ¹ºóp¾Чù  ' Är Ï îüp9 ²u‹aÝØ"  › Ÿi £º²é | ޱ" o 8 §R Y [ 9ò£vfv  ÒªW{ QPÇp„ ’ÐÁ1( { ½š.>Ã× ³$  8-   ë { Ý7 ¨ Å㨕 í§Ç b‡„É·xÁ¬ †ã µS Ö Éì ël <r ‰ p p•: @Ì Ù õG­ Ø:÷ ßÑ Œ \ ×OD dN ÛV ( ®F ²¤µ ?ÎI —Í Õ×6Ë+  $Î!Ö¯ d ‡( “¥ Û2¡, w.“-ë J þ4 ½ ?z8É é ïX ¶ ›o hŒ Y¸Ô û Õ — {2°3mX‹ 6 (‡ô" ž ³¶3´ƒÃÅ iú \ù µ;_Š j€ etÄ þ^ , ç. D ‘Ï ?Þ Ûr G ZÀ0û¯a   ™ Uî[Õ ÛrŽó-1× i-V· w:=C3 Ú†®˜  (؉Ԝ Ÿ + -°†Ú­Ô©O 57ºw Ç Ù é|l ˆ O.ÀÐ N ý? ü8^ fV^7 † =_ b : Ư K 5 ° ÝL™¨ ŽÐÌ • N¼pƒ Uþ¼ a üªÌø ¿ eHó ˆ »¤® P¼…å ·¤QjA Hü´Ó R0¾ x xÊ Ö16E)L Š; oßñ } s ÚMf  `Y ?B O” •ô$ ÕcPl E Z k /Þ TR>öÇБ Ü - +¶z]›œ Dö´Šçšy¿ò œ*  * Ô–ön¡ | ÖL j ¬Þ« ¶ #ª ãDWWV wÎ õ¬f p @Ÿ E[ J_ “Ù ¹ ™éK C W }kW` ˆ =œ—«Øu %s Cache : %s Cache (read-only): %s Cache cleaning disabled Cache cleaning enabled Cache link dir : %s Control dir : %s Session root dir : %s default LRMS : %s default queue : %s default ttl : %u Run 'arcclean -s Undefined' to remove cleaned jobs from job list Run 'arcclean -s Undefined' to remove killed jobs from job list Use arcclean to remove retrieved jobs from job list Is executable: true Name: %s Sources.DelegationID: %s Sources.Options: %s = %s Sources: %s Targets.DelegationID: %s Targets.Options: %s = %s Targets: %s certificate dn: %s expiration time: %s issuer dn: %s serial number: %d %s: %i Delivery service: %s Delivery service: LOCAL Delivery slots: %u Emergency slots: %u Post-processor slots: %u Pre-processor slots: %u Prepared slots: %u Shares configuration: %s Status of endpoint (%s) is %s This endpoint (%s) is STARTED or SUCCESSFUL unspecified: %i %s -> %s (%s) --- DRY RUN --- Access control: %s Annotation: %s Argument: %s Benchmark information: Computing Service Log Directory: %s Computing endpoint URL: %s Computing endpoint interface name: %s Computing endpoint requirements: Credential service: %s Delegation IDs: DelegationID element: %s End Time: %s Entry valid for: %s Entry valid from: %s Environment.name: %s Environment: %s Exit Code: %d Exit code for successful execution: %d Health state: %s ID on service: %s Inputfile element: Installed application environments: Job Error: %s Job does not require exclusive execution Job management URL: %s (%s) Job requires exclusive execution Job status URL: %s (%s) Mapping queue: %s Name: %s No exit code for successful execution specified. Node access: inbound Node access: inbound and outbound Node access: outbound Notify: Old activity ID: %s Old job IDs: Operating system requirements: Other Messages: %s Other attributes: [%s], %s Outputfile element: Owner: %s PostExecutable.Argument: %s PreExecutable.Argument: %s Processing start time: %s Proxy valid until: %s Queue: %s RemoteLogging (optional): %s (%s) RemoteLogging: %s (%s) Requested CPU Time: %s Requested Slots: %d Results must be retrieved before: %s Results were deleted: %s Run time environment requirements: Service information URL: %s (%s) Session directory URL: %s Specific state: %s Stagein directory URL: %s Stageout directory URL: %s State: %s Stderr: %s Stdin: %s Stdout: %s Submitted from: %s Submitted: %s Submitting client: %s Used CPU Time: %s Used CPU Time: %s (%s per slot) Used Memory: %d Used Wall Time: %s Used Wall Time: %s (%s per slot) Waiting Position: %d [ JobDescription tester ] [ Parsing the original text ] [ emies:adl ] [ nordugrid:xrsl ] $X509_VOMS_FILE, and $X509_VOMSES are not set; User has not specified the location for vomses information; There is also not vomses location information in user's configuration file; Can not find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomses, and the location at the corresponding sub-directory%5u s: %10.1f kB %8.1f kB/s%d Batch Systems%d Endpoints%d Shares%d mapping policies%d of %d jobs were submitted%i retries left, will wait until %s before next attempt%li seconds since lock file %s was created%s%s %s could not be created.%s > %s => false%s > %s => false: %s contains non numbers in the version part.%s > %s => true%s class is not an object%s directory created%s directory exist! Skipping job.%s failed%s is an unsupported digest type%s is not a directory%s is not an object%s made persistent%s parsing error%s plugin "%s" not found.%s version %s%s->%s%s. Cannot copy fileset%s. SQLite database error: %s%s:%s: %i%s: %s%s: %s: New job belongs to %i/%i%s: Adding new output file %s: %s%s: All %s %s successfully%s: Bring online request %s in SRM queue. Sleeping for %i seconds%s: Cache cleaning takes too long - %u.%06u seconds%s: Can't convert checksum %s to int for %s%s: Can't convert filesize %s to int for %s%s: Can't read list of input files%s: Can't rerun on request%s: Can't rerun on request - not a suitable state%s: Canceling job because of user request%s: Cancelling active DTRs%s: Cancelling other DTRs%s: Cannot upload two different files %s and %s to same LFN: %s%s: Checking user uploadable file: %s%s: Checksum %llu verified for %s%s: Critical error for uploadable file %s%s: DTR %s to copy file %s failed%s: DTR %s to copy to %s failed but is not mandatory%s: Delete request due to internal problems%s: Destination file %s was possibly left unfinished from previous A-REX run, will overwrite%s: Duplicate file in list of input files: %s%s: Error accessing file %s%s: Error reading file %s%s: Error reading user generated output file list in %s%s: Failed creating grami file%s: Failed obtaining local job information.%s: Failed obtaining lrms id%s: Failed parsing job request.%s: Failed reading .local and changing state, job and A-REX may be left in an inconsistent state%s: Failed reading job description: %s%s: Failed reading local information%s: Failed running cancellation process%s: Failed running submission process%s: Failed setting executable permissions%s: Failed storing failure reason: %s%s: Failed to cancel running job%s: Failed to clean up session dir%s: Failed to list output directory %s: %s%s: Failed to load evaluator for user policy %s: Failed to open file %s for reading%s: Failed to parse user policy%s: Failed to read dynamic output files in %s%s: Failed to read list of input files%s: Failed to read list of input files, can't clean up session dir%s: Failed to read list of output files%s: Failed to read list of output files, can't clean up session dir%s: Failed to read reprocessed list of input files%s: Failed to read reprocessed list of output files%s: Failed to receive job in DTR generator%s: Failed to switch user ID to %d/%d to read file %s%s: Failed to turn job into failed during cancel processing.%s: Failed to write back dynamic output files in %s%s: Failed to write list of input files%s: Failed to write list of output files%s: Failed to write list of output status files%s: Failed writing changed input file.%s: Failed writing list of output files: %s%s: Failed writing local information%s: Failed writing local information: %s%s: Failure creating data storage for child process%s: Failure creating slot for child process%s: Failure starting child process%s: Failure waiting for child process to finish%s: File %s has wrong checksum: %llu. Expected %lli%s: File request %s in SRM queue. Sleeping for %i seconds%s: Going through files in list %s%s: Invalid DTR%s: Invalid file: %s is too big.%s: Invalid size/checksum information (%s) for %s%s: Job cancel request from DTR generator to scheduler%s: Job cancellation takes too long, but diagnostic collection seems to be done. Pretending cancellation succeeded.%s: Job cancellation takes too long. Failing.%s: Job failed in unknown state. Won't rerun.%s: Job failure detected%s: Job finished%s: Job has completed already. No action taken to cancel%s: Job is ancient - delete rest of information%s: Job is not allowed to be rerun anymore%s: Job is requested to clean - deleting%s: Job is too old - deleting%s: Job monitoring counter is broken%s: Job monitoring is lost due to removal from queue%s: Job monitoring is unintentionally lost%s: Job monitoring stop requested with %u active references%s: Job monitoring stop requested with %u active references and %s queue associated%s: Job monitoring stop success%s: Job submission to LRMS failed%s: Job submission to LRMS takes too long, but ID is already obtained. Pretending submission is done.%s: Job submission to LRMS takes too long. Failing.%s: Job's helper exited%s: LRMS scripts limit of %u is reached - suspending submit/cancel%s: Plugin at state %s : %s%s: Plugin execution failed%s: Processing job description failed%s: PushSorted failed to find job where expected%s: Re-requesting attention from DTR generator%s: Reading output files from user generated list in %s%s: Reading status of new job failed%s: Received DTR %s to copy file %s in state %s%s: Received DTR belongs to inactive job%s: Received DTR with two remote endpoints!%s: Received data staging request to %s files%s: Received job in DTR generator%s: Received job in a bad state: %s%s: Removing %s from dynamic output file %s%s: Reprocessing job description failed%s: Requesting attention from DTR generator%s: Returning canceled job from DTR generator%s: Session directory processing takes too long - %u.%06u seconds%s: Some %s failed%s: State: %s from %s%s: State: %s: data staging finished%s: State: %s: still in data staging%s: State: ACCEPTED%s: State: ACCEPTED: dryrun%s: State: ACCEPTED: has process time %s%s: State: ACCEPTED: moving to PREPARING%s: State: ACCEPTED: parsing job description%s: State: CANCELING%s: State: FINISHING%s: State: INLRMS%s: State: INLRMS - checking for not pending%s: State: INLRMS - checking for pending(%u) and mark%s: State: INLRMS - no mark found%s: State: INLRMS: exit message is %i %s%s: State: PREPARING%s: State: SUBMIT%s: Trying remove job from data staging which does not exist%s: Trying to remove job from data staging which is still active%s: Two identical output destinations: %s%s: Unknown user policy '%s'%s: Uploadable files timed out%s: User has NOT uploaded file %s%s: User has uploaded file %s%s: checksum %s%s: delete file %s: failed to obtain file path: %s%s: delete file %s: failed to open file/dir: %s%s: job assigned for slow polling%s: job being processed%s: job for attention%s: job found while scanning%s: job will wait for external process%s: new job is accepted%s: old job is accepted%s: put file %s: %s%s: put file %s: failed to create file: %s%s: put file %s: there is no payload%s: put file %s: unrecognized payload%s: put log %s: there is no payload%s: put log %s: unrecognized payload%s: replica type %s%s: restarted FINISHING job%s: restarted INLRMS job%s: restarted PREPARING job%s: size %llu%s: state CANCELING: child exited with code %i%s: state CANCELING: job diagnostics collected%s: state CANCELING: starting child: %s%s: state CANCELING: timeout waiting for cancellation%s: state SUBMIT: child exited with code %i%s: state SUBMIT: starting child: %s%s: there is no such job: %s%s: unexpected failed job add request: %s%s: unexpected job add request: %s'(' expected')' expected'action' attribute not allowed in user-side job description'control' configuration option is no longer supported, please use 'controldir' instead'stdout' attribute must be specified when 'join' attribute is specified(Re)Trying next destination(Re)Trying next source(empty)(null): %d: %s: Accounting records reporter tool is not specified: Failure creating accounting database connection: Failure creating slot for accounting reporter child process: Failure starting accounting reporter child process: Metrics tool returned error code %i: %s: writing accounting record took %llu ms< %s<< %s> %sA computing resource using the GridFTP interface was requested, but %sthe corresponding plugin could not be loaded. Is the plugin installed? %sIf not, please install the package 'nordugrid-arc-plugins-globus'. %sDepending on your type of installation the package name might differ.A-REX REST: Failed to resume jobA-REX REST: State change not allowed: from %s to %sAC extension information for VO AC is invalid: ARC Auth. request: %sARC delegation policy: %sAccess list location: %sAccounting database cannot be created. Faile to create parent directory %s.Accounting database cannot be created: %s is not a directoryAccounting database connection has been establishedAccounting database file (%s) is not a regular fileAccounting database initialized successfullyAcquired auth token for %s: %sAdd location: metadata: %sAdd location: url: %sAdding endpoint (%s) to ServiceEndpointRetrieverAdding endpoint (%s) to TargetInformationRetrieverAdding endpoint (%s) to both ServiceEndpointRetriever and TargetInformationRetrieverAdding location: %s - %sAdding request token %sAdding space token %sAdding to bulk requestAddress: %sAll %u process slots usedAll DTRs finished for job %sAll results obtained are invalidAlready reading from sourceAlready writing to destinationAn error occurred during the generation of job description to be sent to %sAnother process (%s) owns the lock on file %sArc policy can not been carried by SAML2.0 profile of XACMLArcAuthZ: failed to initiate all PDPs - this instance will be non-functionalArchiving DTR %s, state %sArchiving DTR %s, state ERRORAre you sure you want to clean jobs missing information?Are you sure you want to synchronize your local job list?Assembling BLAH parser log entry: %sAssigned to authorization group %sAssigned to userlist %sAssuming - file not foundAssuming transfer is already aborted or failed.At least two values are needed for the 'inputfiles' attributeAt least two values are needed for the 'outputfiles' attributeAttempt to assign relative path to URL - making it absoluteAttempting to contact %s on port %iAttribute '%s' multiply definedAttribute 'join' cannot be specified when both 'stdout' and 'stderr' attributes is specifiedAttribute Value (1): %sAttribute Value (2): %sAttribute Value inside Subject: %sAttribute name (%s) contains invalid character (%s)Attribute name expectedAttributes 'gridtime' and 'cputime' cannot be specified togetherAttributes 'gridtime' and 'walltime' cannot be specified togetherAuthentication Request URL: %sAuthorized by arc.pdpAuthorized from remote pdp serviceAuthorized from simplelist.pdp: %sAuthorized from xacml.pdpBN_new || RSA_new failedBN_set_word failedBad URL in deliveryservice: %sBad authentication information: %sBad checksum format %sBad credential value %s in cache access rulesBad format detected in file %s, in line %sBad format in XML response from delivery service at %s: %sBad format in XML response from service at %s: %sBad format in XML response: %sBad label: "%s"Bad logicBad logic for %s - bringOnline returned ok but SRM request is not finished successfully or on goingBad logic for %s - getTURLs returned ok but SRM request is not finished successfully or on goingBad logic for %s - putTURLs returned ok but SRM request is not finished successfully or on goingBad name for executable: %sBad name for runtime environment: %sBad name for stderr: %sBad name for stdout: %sBad number in definedshare %sBad number in maxdeliveryBad number in maxemergencyBad number in maxpreparedBad number in maxprocessorBad number in maxtransfertriesBad number in priority element: %sBad number in remotesizelimitBad number in speedcontrolBad or old format detected in file %s, in line %sBad value for loglevelBadly formatted pid %s in lock file %sBatch System Information:Batch system information:Bearer token is available. It is preferred for job submission.Behaviour tuningBlock %s not found in configuration file %sBlockName is emptyBoosting priority from %i to %i due to incoming higher priority DTRBoth URLs must have the same protocol, host and portBoth of CACertificatePath and CACertificatesDir elements missing or emptyBring online request %s finished successfully, file is now ONLINEBring online request %s is still in queue, should waitBroken stringBroker %s loadedBroker plugin "%s" not found.Brokering and filteringBrokers available to %s:Buffer creation failed !Busy plugins found while unloading Module Manager. Waiting for them to be released.CA certificate and CA private key do not matchCA name: %sCA-certificates installed:CONTENT %u: %sCPU clock speed: %iCPU model: %sCPU vendor: %sCPU version: %sCache %s: Free space %f GBCache access allowed to %s by DN %sCache access allowed to %s by VO %sCache access allowed to %s by VO %s and group %sCache access allowed to %s by VO %s and role %sCache area free size: %i GBCache area total size: %i GBCache cleaning script failedCache creation date: %sCache file %s does not existCache file %s not foundCache file %s was deleted during link/copy, must start againCache file %s was locked during link/copy, must start againCache file %s was modified in the last second, sleeping 1 second to avoid race conditionCache file %s was modified while linking, must start againCache file is %sCache meta file %s is empty, will recreateCache meta file %s possibly corrupted, will recreateCache not found for file %sCached copy is still validCached file is locked - should retryCached file is outdated, will re-downloadCalculated checksum %s matches checksum reported by serverCalculated transfer checksum %s matches source checksumCalculated/supplied transfer checksum %s matches checksum reported by SRM destination %sCallback got failureCalling PrepareReading when request was already prepared!Calling PrepareWriting when request was already prepared!Calling http://localhost:60000/Echo using ClientSOAPCalling http://localhost:60000/Echo using httplibCalling https://localhost:60000/Echo using ClientSOAPCalling plugin %s to query endpoint on %sCan not access CA certificate directory: %s. The certificates will not be verified.Can not access VOMS file/directory: %s.Can not access VOMSES file/directory: %s.Can not access certificate file: %sCan not access key file: %sCan not access proxy file: %sCan not add X509 extended KeyUsage extension to new proxy certificateCan not add X509 extension to proxy certCan not allocate memoryCan not allocate memory for extension for proxy certificateCan not compute digest of public keyCan not convert DER encoded PROXY_CERT_INFO_EXTENSION extension to internal formatCan not convert PROXY_CERT_INFO_EXTENSION struct from internal to DER encoded formatCan not convert keyUsage struct from DER encoded formatCan not convert keyUsage struct from internal to DER formatCan not convert private key to DER formatCan not convert signed EEC cert into DER formatCan not convert signed proxy cert into DER formatCan not convert signed proxy cert into PEM formatCan not convert string into ASN1_OBJECTCan not copy extended KeyUsage extensionCan not copy the subject name from issuer for proxy certificateCan not create ASN1_OCTET_STRINGCan not create BIO for parsing requestCan not create BIO for requestCan not create BIO for signed EEC certificateCan not create BIO for signed proxy certificateCan not create PROXY_CERT_INFO_EXTENSION extensionCan not create PolicyStore objectCan not create a new X509_NAME_ENTRY for the proxy certificate requestCan not create delegation crendential to delegation service: %sCan not create extension for PROXY_CERT_INFOCan not create extension for keyUsageCan not create extension for proxy certificateCan not create function %sCan not create function: FunctionId does not existCan not create name entry CN for proxy certificateCan not create the SSL Context objectCan not create the SSL objectCan not determine the install location. Using %s. Please set ARC_LOCATION if this is not correct.Can not duplicate serial number for proxy certificateCan not duplicate the subject name for the self-signing proxy certificate requestCan not dynamically produce AlgFacrotyCan not dynamically produce AttributeFactoryCan not dynamically produce EvaluatorCan not dynamically produce FnFactoryCan not dynamically produce PolicyCan not dynamically produce RequestCan not find element with proper namespaceCan not find element with proper namespaceCan not find ArcPDPContextCan not find CA certificates directory in default locations: ~/.arc/certificates, ~/.globus/certificates, %s/etc/certificates, %s/etc/grid-security/certificates, %s/share/certificates, /etc/grid-security/certificates. The certificate will not be verified. If the CA certificates directory does exist, please manually specify the locations via env X509_CERT_DIR, or the cacertificatesdirectory item in client.conf Can not find XACMLPDPContextCan not find certificate file: %sCan not find certificate with name %sCan not find issuer certificate for the certificate with subject %s and hash: %luCan not find key file: %sCan not find key with name: %sCan not find voms service configuration file (vomses) in default locations: ~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomsesCan not generate X509 requestCan not generate policy objectCan not get SAMLAssertion SecAttr from message contextCan not get extended KeyUsage extension from issuer certificateCan not get policy from PROXY_CERT_INFO_EXTENSION extensionCan not get policy language from PROXY_CERT_INFO_EXTENSION extensionCan not get the certificate typeCan not get the delegation credential: %s from delegation service: %sCan not get the issuer's private keyCan not load ARC evaluator object: %sCan not load ARC request object: %sCan not load policy objectCan not load policy object: %sCan not load request objectCan not open job description file: %sCan not open key file %sCan not parse classname for AttributeFactory from configurationCan not parse classname for CombiningAlgorithmFactory from configurationCan not parse classname for FunctionFactory from configurationCan not parse classname for Policy from configurationCan not parse classname for Request from configurationCan not parse date: %sCan not parse month: %sCan not parse time zone offset: %sCan not parse time: %sCan not read PEM private keyCan not read PEM private key: failed to decryptCan not read PEM private key: failed to obtain passwordCan not read PEM private key: probably bad passwordCan not read certificate file: %sCan not read certificate stringCan not read certificate/key stringCan not read information from the local job status fileCan not read key stringCan not set CN in proxy certificateCan not set issuer's subject for proxy certificateCan not set private keyCan not set pubkey for proxy certificateCan not set readable file for request BIOCan not set serial number for proxy certificateCan not set the lifetime for proxy certificateCan not set version number for proxy certificateCan not set writable file for request BIOCan not set writable file for signed EEC certificate BIOCan not set writable file for signed proxy certificate BIOCan not sign a EECCan't allocate memory for CA policy pathCan't convert DER encoded PROXYCERTINFO extension to internal formatCan't convert X509 request from internal to DER encoded formatCan't create delegation contextCan't delete directory %s: %sCan't delete file %s: %sCan't extract object's name from source urlCan't find LCAS functions in a library %sCan't find LCMAPS functions in a library %sCan't get policy from PROXYCERTINFO extensionCan't get policy language from PROXYCERTINFO extensionCan't get the first byte of input BIO to get its formatCan't get the first byte of input to determine its formatCan't handle URL %sCan't handle location %sCan't load LCAS library %s: %sCan't load LCMAPS library %s: %sCan't obtain configuration. Only public information is provided.Can't obtain configuration. Public information is disallowed for this user.Can't open configuration fileCan't parse host and/or port in response to EPSV/PASVCan't read configuration fileCan't read configuration file at %sCan't read from sourceCan't read list of destinations from file %sCan't read list of locations from file %sCan't read list of sources from file %sCan't read policy namesCan't read transfer states from %s. Perhaps A-REX is not running?Can't recognize type of configuration fileCan't recognize type of configuration file at %sCan't rename file %s: %sCan't reset the inputCan't set OpenSSL verify flagsCan't stat file: %s: %sCan't stat stdio channel %sCan't use URL %sCan't write to destinationCancellation completeCancelling DTR %s with source: %s, destination: %sCancelling active transferCancelling synchronization requestCandyPond: UnauthorizedCannot adapt job description to the submission target when information discovery is turned offCannot change owner of %s: %s Cannot change permission of %s: %s Cannot compare empty checksumCannot convert ARC module name to Python stringCannot convert ExecutionTarget (%s) to python objectCannot convert JobDescription to python objectCannot convert UserConfig to Python objectCannot convert config to Python objectCannot convert inmsg to Python objectCannot convert module name to Python stringCannot convert outmsg to Python objectCannot convert string %s to int in line %sCannot copy example configuration (%s), it is not a regular fileCannot create ExecutionTarget argumentCannot create JobDescription argumentCannot create UserConfig argumentCannot create argument of the constructorCannot create config argumentCannot create directories for log file %s. Messages will be logged to this logCannot create directory %s for per-job hard linksCannot create http payloadCannot create inmsg argumentCannot create instance of Python classCannot create outmsg argumentCannot create output of %s for any jobsCannot create output of %s for job (%s): Invalid source %sCannot create resolver from /etc/resolv.confCannot determine hostname from gethostname()Cannot determine hostname from gethostname() to generate ceID automatically.Cannot determine replica type for %sCannot determine the %s location: %sCannot find under response soap message:Cannot find ARC Config classCannot find ARC ExecutionTarget classCannot find ARC JobDescription classCannot find ARC Message classCannot find ARC UserConfig classCannot find any proxy. This application currently cannot run without a proxy. If you have the proxy file in a non-default location, please make sure the path is specified in the client configuration file. If you don't have a proxy yet, please run 'arcproxy'!Cannot find content under response soap messageCannot find custom broker classCannot find file at %s for getting the proxy. Please make sure this file exists.Cannot find information abouto job submission endpointCannot find local input file '%s' (%s)Cannot find service classCannot find the CA certificates directory path, please set environment variable X509_CERT_DIR, or cacertificatesdirectory in a configuration file.Cannot find the path of the proxy file, please setup environment X509_USER_PROXY, or proxypath in a configuration fileCannot find the user certificate path, please setup environment X509_USER_CERT, or certificatepath in a configuration fileCannot find the user private key path, please setup environment X509_USER_KEY, or keypath in a configuration fileCannot get VOMS server address information from vomses line: "%s"Cannot get dictionary of ARC moduleCannot get dictionary of custom broker moduleCannot get dictionary of moduleCannot handle local user %sCannot import ARC moduleCannot import moduleCannot initialize ARCHERY domain name for queryCannot link to a remote destination. Will not use mapped URLCannot link to source which can be modified, will copy insteadCannot open BLAH log file '%s'Cannot open cache log file %s: %s. Cache cleaning messages will be logged to this logCannot output XRSL representation: The Resources.SlotRequirement.NumberOfSlots attribute must be specified when the Resources.SlotRequirement.SlotsPerHost attribute is specified.Cannot parse integer value '%s' for -%cCannot parse password source expression %s it must be of type=source formatCannot parse password type %s. Currently supported values are 'key','myproxy','myproxynew' and 'all'.Cannot parse schema!Cannot parse service endpoint TXT records.Cannot process proxy file at %s.Cannot query service endpoint TXT records from DNSCannot read specified jobid file: %sCannot remove proxy file at %sCannot remove proxy file at %s, because it's not thereCannot rename to or from root directoryCannot rename to the same URLCannot stat local input file '%s'Cannot switch to group (%s)Cannot switch to primary group for user (%s)Cannot switch to user (%s)Cannot to update AAR. Cannot find registered AAR for job %s in accounting database.Cannot use supplied --size optionCannot write job IDs to file (%s)Cannot write jobid (%s) to file (%s)Capabilities:Catting %s for job %sCause of failure unclear - choosing randomlyCert Type: %dCertificate %s already expiredCertificate %s will expire in %sCertificate and key ('%s' and '%s') not found in any of the paths: %sCertificate does not have a slotCertificate format is DERCertificate format is PEMCertificate format is PKCSCertificate format is unknownCertificate has unknown extension with numeric ID %u and SN %sCertificate information collection failedCertificate information:Certificate issuer: %sCertificate request is invalidCertificate to use is: %sCertificate verification error: %sCertificate verification failedCertificate verification succeededCertificate with serial number %s and subject "%s" is revokedCertificate with subject %s has expiredCertificate/Proxy path is emptyCertificate: %sCertiticate chain number %dChain(s) configuration failedCheck: looking for metadata: %sCheck: obtained access latency: high (NEARLINE)Check: obtained access latency: low (ONLINE)Check: obtained checksum: %sCheck: obtained modification date: %sCheck: obtained modification time %sCheck: obtained size %lluCheck: obtained size: %lliChecking %sChecking URL returned by SRM: %sChecking cache againChecking cache permissions: DN: %sChecking cache permissions: VO: %sChecking cache permissions: VOMS attr: %sChecking for existence of %sChecking for suspended endpoints which should be started.Checking replica %sChecking source file is presentChecksum %sChecksum mismatchChecksum mismatch between calcuated checksum %s and source checksum %sChecksum mismatch between calculated checksum %s and checksum reported by server %sChecksum mismatch between calculated checksum %s and source checksum %sChecksum mismatch between calculated/supplied checksum (%s) and checksum reported by SRM destination (%s)Checksum mismatch between checksum given as meta option (%s:%s) and calculated checksum (%s)Checksum not computedChecksum type of SRM (%s) and calculated/supplied checksum (%s) differ, cannot compareChecksum type of source and calculated checksum differ, cannot compareChecksum type returned by server is different to requested type, cannot compareChild monitoring child %d exitedChild monitoring drops abandoned child %d (%d)Child monitoring error: %iChild monitoring internal communication errorChild monitoring kick detectedChild monitoring lost child %d (%d)Child monitoring signal detectedChild monitoring stderr is closedChild monitoring stdin is closedChild monitoring stdout is closedChild was already startedClass name: %sCleaning up after failure: deleting %sClient chain does not have entry pointClient connection has no entry pointClient side MCCs are loadedClosed successfullyClosing connectionClosing connection to SQLite accounting databaseClosing may have failedClosing read channelClosing write channelCollected error is: %sCollecting Job (A-REX REST jobs) information.Command is being sentCommand: %sComponent %s(%s) could not be createdComponent has no ID attribute definedComponent has no name attribute definedComponent's %s(%s) next has no ID attribute definedComputing endpoint %s (type %s) added to the list for submission brokeringComputing service:Computing service: %sComputingShare (%s) explicitly rejectedComputingShareName of ExecutionTarget (%s) is not definedConfig class is not an objectConfiguration (%s) loadedConfiguration errorConfiguration example file created (%s)Configuration file can not be readConfiguration file is broken - block name does not end with ]: %sConfiguration file is broken - block name is too short: %sConfiguration file not specifiedConfiguration file not specified in ConfigBlockConfiguration file to loadConfiguration root element is not Connecting to Delivery service at %sConnection from %s: %sContacting VOMS server (named %s): %s on port: %sContent: %sConversion failed: %sCopy failed: %sCould not acquire lock on meta file %sCould not connect to service %s: %sCould not convert incoming payload!Could not convert payload!Could not convert the slcs attribute value (%s) to an URL instance in configuration file (%s)Could not create PayloadSOAP!Could not create link to lock file %s as it already existsCould not create lock file %s as it already existsCould not create temporary file "%s"Could not determine configuration type or configuration is emptyCould not determine session directory from filename %sCould not determine version of serverCould not find any useable delivery service, forcing local transferCould not find loadable module by name %s (%s)Could not find loadable module by names %s and %s (%s)Could not find loadable module descriptor by name %sCould not find loadable module descriptor by name %s or kind %sCould not get checksum of %s: %sCould not handle checksum %s: skip checksum checkCould not handle endpoint %sCould not load configuration (%s)Could not locate module %s in following paths:Could not make new transfer request: %s: %sCould not obtain information about source: %sCould not open file %s for reading: %sCould not read data staging configuration from %sCould not stat file %s: %sCould not validate message!Couldn't handle certificate: %sCouldn't parse benchmark XML: %sCouldn't verify availability of CRLCountry: %sCreated RSA key, proceeding with requestCreating a delegation soap clientCreating a http clientCreating a pdpservice clientCreating a soap clientCreating and sending requestCreating buffer: %lli x %iCreating client interfaceCreating client side chainCreating delegation credential to ARC delegation serviceCreating delegation to CREAM delegation failedCreating delegation to CREAM delegation serviceCreating delegation to CREAM delegation service failedCreating directory %sCreating directory: %sCreating service side chainCredential expires at %sCredential handling exception: %sCredential is not initializedCredentials stored in temporary file %sCritical VOMS attribute processing failedCurrent jobs in system (PREPARING to FINISHING) per-DN (%i entries)Current transfer FAILED: %sCurrent transfer completeDCAU failedDCAU failed: %sDH parameters appliedDN %s doesn't match %sDN %s is cached and is valid until %s for URL %sDN %s is cached but has expired for URL %sDN is %sDTR %s cancelledDTR %s could not be cancelledDTR %s failed: %sDTR %s finished successfullyDTR %s finished with state %sDTR %s requested cancel but no active transferDTR %s still in progress (%lluB transferred)DTR %s was already cancelledDTR Generator processed: %d jobs to cancel, %d DTRs, %d new jobsDTR Generator waiting to process: %d jobs to cancel, %d DTRs, %d new jobsDTR is ready for transfer, moving to delivery queueDTRGenerator got request to cancel null jobDTRGenerator is asked about null jobDTRGenerator is asked to check files for null jobDTRGenerator is not running!DTRGenerator is queried about null jobDTRGenerator is requested to clean links for null jobDTRGenerator is requested to process null jobDTRGenerator is requested to remove null jobDTRGenerator was sent null jobDTRs still running for job %sDaemonization fork failed: %sData channel: %d.%d.%d.%d:%dData channel: [%s]:%dData delivery loop exitedData transfer abortedData transfer aborted: %sData was already cachedDataDelivery log tail: %sDataDelivery: %sDataMove::Transfer: no checksum calculation for %sDataMove::Transfer: using supplied checksum %sDataMove::Transfer: using supplied checksum %s:%sDataMove::Transfer: will calculate %s checksumDataMover: cycleDataMover: destination out of tries - exitDataMover: no retries requested - exitDataMover: source out of tries - exitDataMover::Transfer : starting new threadDataMover::Transfer: trying to destroy/overwrite destination: %sDataPointGFAL::write_file got position %d and offset %d, has to seekDataPointXrootd::write_file got position %d and offset %d, has to seekDataStagingDelivery exited with code %iDefault CPU time: %sDefault INTERNAL client constructorDefault Storage Service: %sDefault broker (%s) is not available. When using %s a broker should be specified explicitly (-b option).Default wall-time: %sDelegateCredentialsInit failedDelegateProxy failedDelegated credential from delegation service: %sDelegated credential identity: %sDelegation ID: %sDelegation authorization failedDelegation authorization passedDelegation getProxyReq request failedDelegation handler is not configuredDelegation handler with delegatee role endsDelegation handler with delegatee role starts to processDelegation handler with delegator role starts to processDelegation putProxy request failedDelegation role not supported: %sDelegation service: %sDelegation to ARC delegation service failedDelegation to gridsite delegation service failedDelegation type not supported: %sDelegationStore: PeriodicCheckConsumers failed to remove old delegation %s - %sDelegationStore: PeriodicCheckConsumers failed to resume iteratorDelegationStore: TouchConsumer failed to create file %sDelete errorDeleted but still have locations at %sDelivery received new DTR %s with source: %s, destination: %sDelivery service at %s can copy from %sDelivery service at %s can copy to %sDestination URL missingDestination URL not supported: %sDestination URL not valid: %sDestination file is in cacheDestination is invalid URLDestination is not index service, skipping replica registrationDestination is not ready, will wait %u secondsDestination: %sDir %s allowed at service %sDirectory %s removed successfullyDirectory %s to store accounting database has been created.Directory listing failedDirectory of trusted CAs is not specified/found; Using current path as the CA direcrotyDirectory size is larger than %i files, will have to call multiple timesDirectory size is too large to list in one call, will have to call multiple timesDirectory: %sDo sorting using user created python brokerDoesn't support advance reservationsDoesn't support bulk SubmissionDoesn't support preemptionDownloading job: %sDowntime ends: %sDowntime starts: %sDumping job description aborted: Unable to load broker %sDuplicate replica found in LFC: %sEACCES Error opening lock file %s: %sECDH parameters appliedEPSV failedEPSV failed: %sERROR: %sERROR: Failed to retrieve informationERROR: Failed to retrieve information from the following endpoints:ERROR: Failed to write job information to file (%s)ERROR: Job submission aborted because no resource returned any informationERROR: One or multiple job descriptions was not submitted.ERROR: Unable to load broker %sERROR: VOMS configuration file %s contains too long line(s). Max supported length is %i characters.ERROR: VOMS configuration file %s contains too many lines. Max supported number is %i.ERROR: VOMS configuration line contains too many tokens. Expecting 5 or 6. Line was: %sERROR: failed to read file %s while scanning VOMS configuration.ERROR: file tree is too deep while scanning VOMS configuration. Max allowed nesting is %i.EchoService (python) 'Process' calledEchoService (python) constructor calledEchoService (python) destructor calledEchoService (python) got: %s EchoService (python) has prefix %(prefix)s and suffix %(suffix)sEchoService (python) request_namespace: %sEchoService (python) thread test startingEchoService (python) thread test, iteration %(iteration)s %(status)sElement "%s" in the profile ignored: the "inidefaultvalue" attribute cannot be specified when the "inisections" and "initag" attributes have not been specified.Element "%s" in the profile ignored: the value of the "inisections" attribute cannot be the empty string.Element "%s" in the profile ignored: the value of the "initag" attribute cannot be the empty string.Element "%s" in the profile ignored: the value of the "initype" attribute cannot be the empty string.Empty filename returned from FileCacheEmpty input payload!Empty job description source stringEmpty payload!Empty stringEnd of comment not foundEnd of double quoted string not foundEnd of single quoted string not foundEnd of user delimiter (%s) quoted string not foundEndpoint Information:Error accessing cache file %s: %sError adding communication interface in %s. Maybe another instance of A-REX is already running.Error adding communication interface in %s. Maybe permissions are not suitable.Error creating cacheError creating cache. Stale locks may remain.Error creating directory %s: %sError creating lock file %s: %sError creating required directories for %sError creating required dirs: %sError creating temporary file %s: %sError detected while parsing this ACError due to expiration of provided credentialsError during file validation. Can't stat file %s: %sError during file validation: Local file size %llu does not match source file size %llu for file %sError evaluating profileError from SQLite: %sError from SQLite: %s: %sError getting info from statvfs for the path %s: %sError getting list of files (in list)Error in cache processing, will retry without cachingError in caching procedureError in lock file %s, even though linking did not return an errorError initialising X509 storeError initiating delegation database in %s. Maybe permissions are not suitable. Returned error is: %s.Error linking cache file to %s.Error linking tmp file %s to lock file %s: %sError listing lock file %s: %sError loading generated configurationError looking up attributes of cache meta file %s: %sError looking up space tokens matching description %sError number in store context: %iError opening accounting databaseError opening lock file %s in initial check: %sError opening meta file %sError opening meta file for writing %sError parsing the internally set executables attribute.Error pinging delivery service at %s: %s: %sError reading info from file %s:%sError reading lock file %s: %sError reading meta file %s: %sError registering replica, moving to end of data stagingError removing cache file %s: %sError switching uidError to flush output payloadError when extracting public key from requestError when loading the extension config file: %sError when loading the extension config file: %s on line: %dError while reading dir %s: %sError with cache configurationError with cache configuration: %sError with formatting in lock file %sError with hearbeatfile: %sError with post-transfer destination handling: %sError with source file, moving to next replicaError writing raw certificateError writing srm info file %sError writing to lock file %s: %sError: can't open policy file: %sError: policy location: %s is not a regular fileErrorDescriptionEstimated average waiting time: %sEstimated worst waiting time: %sEvaluator does not support loadable Combining AlgorithmsEvaluator does not support specified Combining Algorithm - %sEvaluator for ArcPDP was not loadedEvaluator for GACLPDP was not loadedEvaluator for XACMLPDP was not loadedExample configuration (%s) not created.Excepton while trying to start external process: %sExcessive data received while checking file accessExcluding replica %s matching pattern !%sExecution Target on Computing Service: %sExecution environment does not support inbound connectionsExecution environment does not support outbound connectionsExecution environment is a physical machineExecution environment is a virtual machineExecution environment supports inbound connectionsExecution environment supports outbound connectionsExecutionTarget class is not an objectExiting Generator threadExiting jobs processing threadExpecting Command among argumentsExpecting Command and URL providedExpecting Command module name among argumentsExpecting Command module path among argumentsExpecting Module, Command and URL providedExpecting URL among argumentsExternal request for attention %sExtracted nickname %s from credentials to use for RUCIO_ACCOUNTFATAL, ERROR, WARNING, INFO, VERBOSE or DEBUGFaile to assign hostname extensionFailed allocating memory for handleFailed authenticatingFailed authenticating: %sFailed checking database (%s)Failed checking source replicaFailed checking source replica %s: %sFailed checking source replica: %sFailed cleaning up destination %sFailed configuration initializationFailed configuration initialization.Failed connecting to server %s:%dFailed destroying handle: %s. Can't handle such situation.Failed downloading %s to %sFailed downloading %s to %s, destination already existFailed downloading %s to %s, unable to remove existing destinationFailed in globus_cond_initFailed in globus_ftp_control_handle_initFailed in globus_mutex_initFailed linking cache file to %sFailed locating credentialsFailed looking up attributes of cached file: %sFailed preparing job descriptionFailed processing user mapping command: %s %sFailed reading control directory: %sFailed reading control directory: %s: %sFailed reading dataFailed reading list of filesFailed reading local informationFailed retrieving information for job: %sFailed retrieving job description for job: %sFailed running mailerFailed setting file owner: %sFailed submitting job descriptionFailed to abort transfer of ftp file: %sFailed to accept SSL connectionFailed to accept connection requestFailed to accept delegationFailed to accept new file/destinationFailed to access proxy of given job id %s at %sFailed to acquire A-REX's configurationFailed to acquire delegation contextFailed to acquire lock on cache meta file %sFailed to acquire lock on file %sFailed to acquire source: %sFailed to activate Jobs Processing object, exiting Grid Manager threadFailed to add '%s' URL (interface type %s) into the accounting database Endpoints tableFailed to add '%s' into the accounting database %s tableFailed to add Independent OIDFailed to add RFC proxy OIDFailed to add VOMS AC extension. Your proxy may be incomplete.Failed to add VOMS AC sequence OIDFailed to add anyLanguage OIDFailed to add certificate and keyFailed to add certificate to token or databaseFailed to add extension into credential extensionsFailed to add inheritAll OIDFailed to add issuer's extension into proxyFailed to add key usage extensionFailed to add proxy certificate information extensionFailed to add voms AC extensionFailed to allocate certificate trustFailed to allocate item for certificate dataFailed to allocate memory for certificate subject while matching policy.Failed to allocate p12 contextFailed to apply DH parametersFailed to apply ECDH parametersFailed to apply local address to data connectionFailed to authenticate SAML Token inside the incoming SOAPFailed to authenticate Username Token inside the incoming SOAPFailed to authenticate X509 Token inside the incoming SOAPFailed to authenticate to PKCS11 slot %sFailed to authenticate to key databaseFailed to authenticate to token %sFailed to bind socket for %s:%s(%s): %sFailed to bind socket for TCP port %s(%s): %sFailed to call PORT_NewArenaFailed to cancel transfer request: %sFailed to cancel: %sFailed to cancel: No SOAP responseFailed to cast PayloadSOAP from incoming payloadFailed to cast PayloadSOAP from outgoing payloadFailed to change mapping stack processing policy in: %s = %sFailed to change owner of symbolic link %s to %iFailed to change owner of temp proxy at %s to %i:%i: %sFailed to change permissions on %s: %sFailed to change permissions or set owner of hard link %s: %sFailed to check %sFailed to clean up file %s: %sFailed to communicate to delegation endpoint.Failed to complete writing to destinationFailed to connect to %s(%s):%iFailed to connect to %s(%s):%i - %sFailed to connect to server %s:%dFailed to convert ASCII to DERFailed to convert EVP_PKEY to PKCS8Failed to convert GSI credential to GSS credential (major: %d, minor: %d)Failed to convert GSI credential to GSS credential (major: %d, minor: %d):%s:%sFailed to convert PrivateKeyInfo to EVP_PKEYFailed to convert security information to ARC policyFailed to convert security information to ARC requestFailed to convert security information to XACML requestFailed to copy %s: %sFailed to copy file %s to %s: %sFailed to copy input file: %s to path: %sFailed to create DTR dump threadFailed to create OTokens security attributesFailed to create OpenSSL object %s %s - %u %sFailed to create SOAP containersFailed to create any cache directories for %sFailed to create cache directory for file %s: %sFailed to create cache meta file %sFailed to create certificate requestFailed to create control directory %sFailed to create directoryFailed to create directory %sFailed to create directory %s! Skipping job.Failed to create directory %s: %sFailed to create export contextFailed to create file %s: %sFailed to create hard link from %s to %s: %sFailed to create input SOAP containerFailed to create key or certificate safeFailed to create link: %s. Will not use mapped URLFailed to create path lengthFailed to create policy languageFailed to create socket for connecting to %s(%s):%d - %sFailed to create socket for listening at %s:%s(%s): %sFailed to create socket for listening at TCP port %s(%s): %sFailed to create subject nameFailed to create symbolic link from %s to %s: %sFailed to create temp proxy at %s: %sFailed to create threadFailed to create xrootd copy job: %sFailed to create/open file %s: %sFailed to decode trust stringFailed to delegate credentials to server - %sFailed to delegate credentials to server - no delegation interface foundFailed to delete %sFailed to delete %s but will still try to copyFailed to delete certificateFailed to delete delivery object or deletion timed outFailed to delete destination, retry may failFailed to delete logical fileFailed to delete meta-informationFailed to delete physical fileFailed to delete private keyFailed to delete private key and certificateFailed to delete replica %s: %sFailed to delete stale cache file %s: %sFailed to duplicate X509 structureFailed to duplicate extensionFailed to enable IPv6Failed to encode PKCS12Failed to encode certificateFailed to encode the certificate request with DER formatFailed to establish SSL connectionFailed to establish connection: %sFailed to export private keyFailed to extract VOMS nickname from proxyFailed to extract credential informationFailed to fetch data from %s accounting database tableFailed to fetch data from accounting database Endpoints tableFailed to finalize reading from sourceFailed to finalize writing to destinationFailed to find CA certificatesFailed to find certificate and/or private key or files have improper permissions or ownership.Failed to find certificates by nickname: %sFailed to find extensionFailed to find issuer certificate for proxy certificateFailed to find metadata info on %s for determining file or directory deleteFailed to generate EC keyFailed to generate SAML Token for outgoing SOAPFailed to generate Username Token for outgoing SOAPFailed to generate X509 Token for outgoing SOAPFailed to generate public/private key pairFailed to get DN information from .local file for job %sFailed to get TCP socket options for connection to %s(%s):%d - timeout won't work - %sFailed to get certificate from certificate fileFailed to get credentialFailed to get ftp fileFailed to get initiate GFAL2 parameter handle: %sFailed to get initiate new GFAL2 context: %sFailed to get load average: %sFailed to get private keyFailed to get public keyFailed to get public key from RSA objectFailed to get public key from X509 objectFailed to identify grid-manager config fileFailed to import certificate from file: %sFailed to import private keyFailed to import private key from file: %sFailed to initialize LCASFailed to initialize LCMAPSFailed to initialize OpenSSL libraryFailed to initialize PKCS12 file: %sFailed to initialize X509 structureFailed to initialize accounting databaseFailed to initialize extensions member for CredentialFailed to initialize main Python threadFailed to initiate cacheFailed to initiate client connectionFailed to initiate delegation credentialsFailed to insert AAR into the database for job %sFailed to limit socket to IPv6 at %s:%s - may cause errors for IPv4 at same portFailed to limit socket to IPv6 at TCP port %s - may cause errors for IPv4 at same portFailed to listen at %s:%s(%s): %sFailed to listen at TCP port %s(%s): %sFailed to load client configurationFailed to load extension section: %sFailed to load grid-manager config fileFailed to load grid-manager config file from %sFailed to load grid-manager configfileFailed to load plugin for URL %sFailed to load private keyFailed to load service configurationFailed to load service configuration from any default config fileFailed to load service configuration from file %sFailed to load service side MCCsFailed to lock arccredential library in memoryFailed to lock arccrypto library in memoryFailed to make symbolic link %s to %s : %sFailed to move %s to %s: %sFailed to move file %s to %sFailed to new arenaFailed to obtain OpenSSL identifier for %sFailed to obtain bytes transferred: %sFailed to obtain delegation locks for cleaning orphaned locksFailed to obtain information about fileFailed to obtain listing from FTP: %sFailed to obtain local address for %s:%s - %sFailed to obtain local address for port %s - %sFailed to obtain lock on cache file %sFailed to obtain stat from FTP: %sFailed to open %s for reading: %sFailed to open %s, trying to create parent directoriesFailed to open data channelFailed to open directory %s: %sFailed to open file %sFailed to open file with DH parameters for readingFailed to open heartbeat file %sFailed to open input certificate file %sFailed to open log file: %sFailed to open output file '%s'Failed to open p12 fileFailed to open stdio channel %dFailed to open stdio channel %sFailed to output the certificate request as ASCII formatFailed to output the certificate request as DER formatFailed to parse HTTP headerFailed to parse Rucio response: %sFailed to parse SAML Token from incoming SOAPFailed to parse Username Token from incoming SOAPFailed to parse VOMS command: %sFailed to parse X509 Token from incoming SOAPFailed to parse certificate request from CSR file %sFailed to parse configuration file %sFailed to parse requested VOMS lifetime: %sFailed to parse requested VOMS server port number: %sFailed to postregister destination %sFailed to pre-clean destination: %sFailed to preallocate space for %sFailed to prepare destinationFailed to prepare destination: %sFailed to prepare job descriptionFailed to prepare sourceFailed to prepare source: %sFailed to preregister destination: %sFailed to process A-REX configuration in %sFailed to process VOMS configuration or no suitable configuration lines found.Failed to process configuration in %sFailed to process job: %sFailed to process job: %s - %s %sFailed to process jobs - failed to parse responseFailed to process jobs - wrong response: %uFailed to process security attributes in TLS MCC for incoming messageFailed to query AAR database ID for job %sFailed to query state: %sFailed to read attribute %x from private key.Failed to read cache meta file %sFailed to read certificate file: %sFailed to read data from input fileFailed to read database schema file at %sFailed to read file %sFailed to read file with DH parametersFailed to read input certificate fileFailed to read object %s: %sFailed to read private key file: %sFailed to read proxy file: %sFailed to read request from a fileFailed to read request from a stringFailed to register destination replica: %sFailed to register new file/destination: %sFailed to register plugin for state %sFailed to release GSS credential (major: %d, minor: %d):%s:%sFailed to release completed requestFailed to release lock on cache file %sFailed to release lock on file %sFailed to remove .meta file %s: %sFailed to remove all physical instancesFailed to remove cache per-job dir %s: %sFailed to remove existing hard link at %s: %sFailed to remove existing symbolic link at %s: %sFailed to remove file %s: %sFailed to remove instanceFailed to remove lock on %s. Some manual intervention may be requiredFailed to remove stale lock file %s: %sFailed to remove temporary proxy %s: %sFailed to rename URLFailed to resolve %sFailed to resolve %s (%s)Failed to resolve destination: %sFailed to resolve source: %sFailed to retrieve application data from OpenSSLFailed to retrieve link to TLS stream. Additional policy matching is skipped.Failed to retrieve private key for issuerFailed to run Grid Manager threadFailed to run command: %sFailed to run configuration parser at %s.Failed to run external plugin: %sFailed to send cancel request: %sFailed to send content of bufferFailed to set GFAL2 monitor callback: %sFailed to set GFAL2 transfer timeout, will use default: %sFailed to set INTERNAL endpointFailed to set LFC replicas: %sFailed to set credentials for GridFTP transferFailed to set executable bit on file %sFailed to set executable bit on file %s: %sFailed to set overwrite option in GFAL2: %sFailed to set permissions on: %sFailed to set signature algorithm IDFailed to set the pubkey for X509 object by using pubkey from X509_REQFailed to set up credential delegation with %sFailed to shut down SSL: %sFailed to sign encoded certificate dataFailed to sign the certificate requestFailed to sign the proxy certificateFailed to stage file(s)Failed to start archival threadFailed to start cache clean scriptFailed to start certificate extensionFailed to start data staging threadsFailed to start listening on any address for %s:%sFailed to start listening on any address for %s:%s(IPv%s)Failed to start new DTR for %sFailed to start new thread for monitoring job requestsFailed to start new thread: cache won't be cleanedFailed to start querying the endpoint on %sFailed to start querying the endpoint on %s (unable to create sub-thread)Failed to start reading from source: %sFailed to start thread for communicationFailed to start thread for listeningFailed to start transfer request: %sFailed to start writing to cacheFailed to start writing to destination: %sFailed to stat session dir %sFailed to stat source %sFailed to store application dataFailed to store ftp fileFailed to submit all jobs.Failed to submit all jobs: %sFailed to submit all jobs: %s %sFailed to submit all jobs: %u %sFailed to submit jobFailed to switch user id to %d/%dFailed to terminate LCASFailed to terminate LCMAPSFailed to transfer dataFailed to unlock file %s: %s. Manual intervention may be requiredFailed to unlock file with lock %s: %sFailed to unregister pre-registered destination %s. You may need to unregister it manuallyFailed to unregister pre-registered destination %s: %s. You may need to unregister it manuallyFailed to unregister preregistered lfn, You may need to unregister it manuallyFailed to unregister preregistered lfn. You may need to unregister it manuallyFailed to unregister preregistered lfn. You may need to unregister it manually: %sFailed to update AAR in the database for job %sFailed to verify X509 Token inside the incoming SOAPFailed to verify the requestFailed to verify the signature under Failed to verify the signature under Failed to verify the signed certificateFailed to write RTEs information for the job %sFailed to write authtoken attributes for job %sFailed to write body to output streamFailed to write data transfers information for the job %sFailed to write event records for job %sFailed to write header to output streamFailed to write job information to database (%s)Failed to write request into a fileFailed to write request into stringFailed to write signed EEC certificate into a fileFailed to write signed proxy certificate into a fileFailed to write to local job list %sFailed updating timestamp on cache lock file %s for file %s: %sFailed uploading file %s to %s: %sFailed uploading local input filesFailed while finishing reading from sourceFailed while finishing writing to destinationFailed while reading from sourceFailed while transferring dataFailed while waiting for connection requestFailed while waiting for connection to %s(%s):%i - %sFailed while writing to destinationFailure in parsing response from server - some information may be inaccurateFailure: %sFeature is not implementedFetch: response body: %sFetch: response code: %u %sFile %s is NEARLINE, will make request to bring onlineFile %s is already cached at %s under a different URL: %s - this file will not be cachedFile %s is already cached at %s under a different URL: %s - will not add DN to cached listFile %s is cached (%s) - checking permissionsFile %s removed successfullyFile '%s' in the 'executables' attribute is not present in the 'inputfiles' attributeFile already exists: %sFile could not be moved to Done stateFile could not be moved to Running state: %sFile delete failed, attempting directory deleteFile delete failed, attempting directory delete for %sFile download failed: %sFile is cacheable, will check cacheFile is currently being cached, will wait %isFile is not accessible %s: %sFile is not accessible: %sFile is not cacheable, skipping cache processingFile is not cacheable, was requested not to be cached or no cache available, skipping cache checkFile is ready! TURL is %sFile is smaller than %llu bytes, will use local deliveryFile type is not available, attempting file deleteFilename not returned in Rucio response: %sFiles associated with request token %s aborted successfullyFiles associated with request token %s put done successfullyFiles associated with request token %s released successfullyFileset copy to single object is not supported yetFileset registration is not supported yetFinding existing destination replicasFinishWriting: looking for metadata: %sFinishWriting: obtained checksum: %sFinished successfullyFirst stage of registration to index service failedFirst value of 'inputfiles' attribute (filename) cannot be emptyFirst value of 'outputfiles' attribute (filename) cannot be emptyFor registration source must be ordinary URL and destination must be indexing serviceFor the 1st test job you also have to specify a runtime value with -r (--runtime) option.Force-checking source of cache file %sForcing re-download of file %sFound %s %s (it was loaded already)Found %s in cacheFound DTR %s for file %s left in transferring state from previous runFound VOMS AC attribute: %sFound a registry, will query it recursively: %sFound existing token for %s in Rucio token cache with expiry time %sFound service endpoint %s (type %s)Found started or successful endpoint (%s)Found suspended endpoint (%s)Found the following jobs:Found the following new jobs:Found unexpected empty lock file %s. Must go back to acquire()Found unfinished DTR transfers. It is possible the previous A-REX process did not shut down normallyFree slots grouped according to time limits (limit: free slots):Free slots: %iFull string not used: %sGACL Auth. request: %sGET: id %s path %sGenerate new X509 request!Generating %s job description outputGenerating ceID prefix from hostname automaticallyGenerator startedGeneric errorGet delegated credential from delegation service: %sGet from cache: Cached file is lockedGet from cache: Error in cache configurationGet from cache: File not in cacheGet from cache: Invalid URL %sGet from cache: Looking in cache for %sGet from cache: could not access cached file: %sGet request %s is still in queue, should wait %i secondsGet: there is no job %s - %sGetting currect timestamp for BLAH parser log: %sGetting delegation credential from ARC delegation serviceGlobus error: %sGlobus handle is stuckGlobus location variable substitution is not supported anymore. Please specify path directly.Grid identity is mapped to local identity '%s'HEAD: id %s path %sHTTP Error: %d %sHTTP failure %u - %sHTTP with SAML2SSO invocation failedHTTP:PUT %s: put file %s: %sHandle is not in proper state %u/%uHead: there is no job %s - %sHealth state info: %sHealth state: %sHealthState of ExecutionTarget (%s) is not OK or WARNING (%s)Help Options:Helper process start failed: %sHelper program is missingHomogeneous resourceID: %sINI config file %s does not existINTERNALClient is not initializedId= %s,Type= %s,Issuer= %s,Value= %sIdP return some error message: %sIdentity is %sIdentity name: %sIdentity: %sIf the proxy or certificate/key does exist, you can manually specify the locations via environment variables '%s'/'%s' or '%s', or the '%s'/'%s' or '%s' attributes in the client configuration file (e.g. '%s')If you specify a policy you also need to specify a policy languageIgnoring endpoint (%s), it is already registered in retriever.Ignoring job (%s), already tried and were unable to load JobControllerPluginIgnoring job (%s), the job management URL is unknownIgnoring job (%s), the job status URL is unknownIgnoring job (%s), the management interface name is unknownIgnoring job (%s), the status interface name is unknownIgnoring job (%s), unable to load JobControllerPlugin for %sIgnoring job, the job ID is emptyIllegal URL - closing ] for IPv6 address is followed by illegal token: %sIllegal URL - no closing ] for IPv6 address found: %sIllegal URL - no hostname given: %sIllegal URL - path must be absolute or empty: %sIllegal URL - path must be absolute: %sIllegal time format: %sImmediate completion expectedImmediate completion expected: %sImmediate completion: %sImplementation name: %sImplementor: %sIn the available CRL the lastUpdate field is not validIn the available CRL, the nextUpdate field is not validIn the configuration profile the 'initype' attribute on the "%s" element has a invalid value "%s".Incoming Message is not SOAPIncompatible options --nolist and --forcelist requestedInconsistent metadataIndependent proxy - no rights grantedInformation endpointInformation item '%s' is not knownInformational document is emptyInitialized %u-th Python serviceInitiating delegation procedureInput is not SOAPInput is without trailer Input request from a file: Request.xmlInput request from codeInput: metadata: %sInstalled application environments:Interface (%s) specified, submitting only to that interfaceInterface extensions:Interface on endpoint (%s) %s.Interface versions:Interface: %sInternal transfer method is not supported for %sInvalid DTRInvalid DTR for source %s, destination %sInvalid EffectInvalid HTTP object can't produce resultInvalid ID: %sInvalid ISO duration format: %sInvalid JobDescription:Invalid URL '%s' for input file '%s'Invalid URL '%s' for output file '%s'Invalid URL option syntax in option '%s' for input file '%s'Invalid URL option syntax in option '%s' for output file '%s'Invalid URL option: %sInvalid URL: %sInvalid URL: '%s' in input file '%s'Invalid URL: '%s' in output file '%s'Invalid action value %sInvalid class nameInvalid class name. The broker argument for the PythonBroker should be Filename.Class.args (args is optional), for example SampleBroker.MyBrokerInvalid comparison operator '%s' used at 'delegationid' attribute, only "=" is allowed.Invalid comparison operator '%s' used at 'queue' attribute in 'GRIDMANAGER' dialect, only "=" is allowedInvalid comparison operator '%s' used at 'queue' attribute, only "!=" or "=" are allowed.Invalid configuration - no allowed IP address specifiedInvalid configuration - no transfer dirs specifiedInvalid credentials, please check proxy and/or CA certificatesInvalid destination URL %sInvalid download destination path specified (%s)Invalid job descriptionInvalid lock on file %sInvalid log level. Using default %s.Invalid nodeaccess value: %sInvalid old log level. Using default %s.Invalid period string: %sInvalid port number in %sInvalid stage out path specified (%s)Invalid url: %sIssuer CA: %sIssuer: %sJWSE::ExtractPublicKey: external jwk keyJWSE::ExtractPublicKey: jwk keyJWSE::ExtractPublicKey: key parsing errorJWSE::ExtractPublicKey: no supported keyJWSE::ExtractPublicKey: x5c keyJWSE::Input: JWE: not supported yetJWSE::Input: JWS content: %sJWSE::Input: JWS: signature algorithm: %sJWSE::Input: JWS: signature verification failedJWSE::Input: JWS: token too oldJWSE::Input: JWS: token too youngJWSE::Input: header: %sJWSE::Input: token: %sJob %s does not report a resumable stateJob %s failed to renew delegation %s.Job %s has no delegation associated. Can't renew such job.Job %s not foundJob %s: Some downloads failedJob %s: all files downloaded successfullyJob %s: files still downloadingJob ID argument is required.Job database connection established successfully (%s)Job deleted: %sJob description file could not be read.Job description language is not specified, unable to output description.Job description languages supported by %s:Job description to be sent to %s:Job descriptions:Job did not finished successfully. Message will not be written to BLAH log.Job download directory from user configuration file: %sJob download directory will be created in present working directory.Job download directory: %sJob has not started yet: %sJob list file (%s) doesn't existJob list file (%s) is not a regular fileJob list file cannot be created: %s is not a directoryJob list file cannot be created: The parent directory (%s) doesn't exist.Job nr.Job resuming successfulJob submission summary:Job submitted with jobid: %sJob timestamp successfully parsed as %sJob: %sJob: %s : Cancel request put and communicated to serviceJob: %s : Cancel request put but failed to communicate to serviceJob: %s : Clean request put and communicated to serviceJob: %s : Clean request put but failed to communicate to serviceJob: %s : ERROR : Failed to put cancel markJob: %s : ERROR : Failed to put clean markJob: %s : ERROR : No local information.Job: %s : ERROR : Unrecognizable stateJobControllerPlugin %s could not be createdJobControllerPlugin plugin "%s" not found.JobDescription class is not an objectJobDescriptionParserPlugin %s could not be createdJobDescriptionParserPlugin plugin "%s" not found.Jobs missing information will not be cleaned!Jobs processed: %d, deleted: %dJobs processed: %d, renewed: %dJobs processed: %d, resumed: %dJobs processed: %d, successfully killed: %dJobs processed: %d, successfully killed: %d, successfully cleaned: %dJobs processed: %d, successfully retrieved: %dJobs processed: %d, successfully retrieved: %d, successfully cleaned: %dJunk at end of RSLJunk in sessiondir commandLCMAPS did not return any GIDLCMAPS did not return any UIDLCMAPS has getCredentialDataLCMAPS has lcmaps_runLCMAPS returned UID which has no username: %uLCMAPS returned invalid GID: %uLCMAPS returned invalid UID: %uLIST/MLST failedLIST/MLST failed: %sLanguage (%s) not recognized by any job description parsers.Last stage of registration to index service failedLatitude: %fLeft operand for RSL concatenation does not evaluate to a literalLegacyMap: no configurations blocks definedLegacyPDP: ARC Legacy Sec Attribute not recognized.LegacyPDP: there is no %s Sec Attribute defined. Probably ARC Legacy Sec Handler is not configured or failed.LegacySecHandler: configuration file not specifiedLine %d.%d of the attributes returned: %sLinking MCC %s(%s) to MCC (%s) under %sLinking MCC %s(%s) to Plexer (%s) under %sLinking MCC %s(%s) to Service (%s) under %sLinking Plexer %s to MCC (%s) under %sLinking Plexer %s to Plexer (%s) under %sLinking Plexer %s to Service (%s) under %sLinking local fileLinking mapped fileLinking/copying cached fileLinking/copying cached file to %sList functionality is not supported for RESTful VOMS interfaceList functionality is not supported for legacy VOMS interfaceList will stat the URL %sListFiles: looking for metadata: %sListening on %s:%s(%s)Listening on TCP port %s(%s)Listing localjobs succeeded, %d localjobs foundLoadable module %s contains no requested plugin %s of kind %sLoaded %sLoaded %s %sLoaded JobControllerPlugin %sLoaded JobDescriptionParserPlugin %sLoaded MCC %s(%s)Loaded Plexer %sLoaded Service %s(%s)Loaded SubmitterPlugin %sLoading %u-th Python serviceLoading OToken failed - ignoring its presenceLoading Python broker (%i)Loading configuration (%s)Local running jobs: %iLocal suspended jobs: %iLocal waiting jobs: %iLocation URI for file %s is invalidLocation already existsLocations are missing in destination LFC URLLock %s is owned by a different host (%s)Lock file %s doesn't existLongitude: %fLooking for current jobsLooking up URL %sLooking up source replicasMCC %s(%s) - next %s(%s) has no targetMIME is not suitable for SOAP: %sMLSD is not supported - trying NLSTMLST is not supported - trying LISTMain Python thread is not initializedMain Python thread was not initializedMain memory size: %iMalformed ARCHERY record found (endpoint type is not defined): %sMalformed ARCHERY record found (endpoint url is not defined): %sMalformed VOMS AC attribute %sMapfile at %s can't be opened.Mapping %s to %sMapping policy option has empty valueMapping policy:Mapping queue: %sMatch issuer: %sMatch vo: %sMatched nothingMatched: %s %s %sMatched: %s %s %s %sMatchmaking, %s (%d) is %s than %s (%d) published by the ExecutionTarget.Matchmaking, Benchmark %s is not published by the ExecutionTarget.Matchmaking, CacheTotal problem, ExecutionTarget: %d MB (CacheTotal); JobDescription: %d MB (CacheDiskSpace)Matchmaking, Computing endpoint requirement not satisfied. ExecutionTarget: %sMatchmaking, ConnectivityIn problem, ExecutionTarget: %s (ConnectivityIn) JobDescription: %s (InBound)Matchmaking, ConnectivityOut problem, ExecutionTarget: %s (ConnectivityOut) JobDescription: %s (OutBound)Matchmaking, ExecutionTarget: %s, OperatingSystem is not definedMatchmaking, ExecutionTarget: %s, CacheTotal is not definedMatchmaking, ExecutionTarget: %s, HealthState is not definedMatchmaking, ExecutionTarget: %s, ImplementationName is not definedMatchmaking, ExecutionTarget: %s, MaxDiskSpace and WorkingAreaFree are not definedMatchmaking, ExecutionTarget: %s, MaxTotalCPUTime or MaxCPUTime not defined, assuming no CPU time limitMatchmaking, ExecutionTarget: %s, MinCPUTime not defined, assuming no CPU time limitMatchmaking, ExecutionTarget: %s, NetworkInfo is not definedMatchmaking, ExecutionTarget: %s, Platform is not definedMatchmaking, ExecutionTarget: %s, RunTimeEnvironment requirements not satisfiedMatchmaking, ExecutionTarget: %s, TotalSlots and MaxSlotsPerJob are not definedMatchmaking, ExecutionTarget: %s, WorkingAreaLifeTime is not definedMatchmaking, ExecutionTarget: %s matches job descriptionMatchmaking, ExecutionTarget: %s, ApplicationEnvironments not definedMatchmaking, ExecutionTarget: %s, MaxMainMemory and MainMemorySize are not definedMatchmaking, ExecutionTarget: %s, MaxVirtualMemory is not definedMatchmaking, ExecutionTarget: %s, OperatingSystem requirements not satisfiedMatchmaking, MainMemorySize problem, ExecutionTarget: %d (MainMemorySize), JobDescription: %d (IndividualPhysicalMemory)Matchmaking, MaxCPUTime problem, ExecutionTarget: %d (MaxCPUTime), JobDescription: %d (TotalCPUTime/NumberOfSlots)Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); JobDescription: %d MB (DiskSpace)Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); JobDescription: %d MB (SessionDiskSpace)Matchmaking, MaxMainMemory problem, ExecutionTarget: %d (MaxMainMemory), JobDescription: %d (IndividualPhysicalMemory)Matchmaking, MaxSlotsPerJob problem, ExecutionTarget: %d (MaxSlotsPerJob) JobDescription: %d (NumberOfProcesses)Matchmaking, MaxTotalCPUTime problem, ExecutionTarget: %d (MaxTotalCPUTime), JobDescription: %d (TotalCPUTime)Matchmaking, MaxVirtualMemory problem, ExecutionTarget: %d (MaxVirtualMemory), JobDescription: %d (IndividualVirtualMemory)Matchmaking, MinCPUTime problem, ExecutionTarget: %d (MinCPUTime), JobDescription: %d (TotalCPUTime/NumberOfSlots)Matchmaking, NetworkInfo demand not fulfilled, ExecutionTarget do not support %s, specified in the JobDescription.Matchmaking, Platform problem, ExecutionTarget: %s (Platform) JobDescription: %s (Platform)Matchmaking, The %s scaled %s (%d) is %s than the %s (%d) published by the ExecutionTarget.Matchmaking, TotalSlots problem, ExecutionTarget: %d (TotalSlots) JobDescription: %d (NumberOfProcesses)Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB (WorkingAreaFree); JobDescription: %d MB (DiskSpace)Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB (WorkingAreaFree); JobDescription: %d MB (SessionDiskSpace)Matchmaking, WorkingAreaLifeTime problem, ExecutionTarget: %s (WorkingAreaLifeTime) JobDescription: %s (SessionLifeTime)Max CPU time: %sMax disk space: %iMax memory: %iMax pre-LRMS waiting jobs: %iMax running jobs: %iMax slots per job: %iMax stage in streams: %iMax stage out streams: %iMax total jobs: %iMax total wall-time: %sMax user running jobs: %iMax virtual memory: %iMax waiting jobs: %iMax wall-time: %sMaximum number of threads running - putting new request into queueMemory allocation errorMessage class is not an objectMeta info of source and location do not match for %sMetadata of replica and index service differMetadata of source and destination are differentMetadata of source does not match existing destination. Use the --force option to override this.Min CPU time: %sMin wall-time: %sMissing CA subject in Globus signing policyMissing CertificatePath element or ProxyPath element, or is missingMissing Host in Connect elementMissing Port in Connect elementMissing Port in Listen elementMissing VO in configurationMissing audience in configurationMissing authentication informationMissing cancel-%s-job - job cancellation may not workMissing capabilities in configurationMissing condition subjects in Globus signing policyMissing data in DER encoded PROXY_CERT_INFO_EXTENSION extensionMissing directory in controldir commandMissing file name in [arex/jura] logfileMissing final reply: %sMissing group in configurationMissing information in reply: %sMissing issuer in configurationMissing name of LCAS libraryMissing name of LCMAPS libraryMissing number in maxjobsMissing or empty CertificatePath elementMissing or empty CertificatePath or CACertificatesDir elementMissing or empty CertificatePath or CACertificatesDir element; will only check the signature, will not do message authenticationMissing or empty KeyPath elementMissing or empty KeyPath element, or is missingMissing or empty PasswordSource elementMissing or empty Username elementMissing path of credentials fileMissing reference to factory and/or module. It is unsafe to use Globus in non-persistent mode - (Grid)FTP code is disabled. Report to developers.Missing reference to factory and/or module. It is unsafe to use Xrootd in non-persistent mode - Xrootd code is disabled. Report to developers.Missing response from delegation endpoint.Missing role in configurationMissing scan-%s-job - may miss when job finished executingMissing schema! Skipping validation...Missing scope in configurationMissing security object in messageMissing subject in configurationMissing subject nameMissing submit-%s-job - job submission to LRMS may not workModule %s contains no plugin %sModule %s contains no requested plugin %s of kind %sModule %s does not contain plugin(s) of specified kind(s)Module %s failed to reload (%s)Module %s is not an ARC plugin (%s)Module Manager InitModule Manager Init by ModuleManager::setCfgModule name: %sMoving to end of data stagingMulti-request operator only allowed at top levelMultiple %s attributes in configuration file (%s)MyProxy failure: %sMyproxy server did not return proxy with VOMS AC includedNEW: put new job: max jobs total limit reachedNEW: put new job: there is no payloadNLST/MLSD failedNLST/MLSD failed: %sNSS database to be accessed: %s NSS initialization failed on certificate database: %sNULL BIO passed to InquireRequestNULL callback for %sName of grami fileName: %sNegative rights are not supported in Globus signing policyNeither source nor destination are index services, will skip resolving replicasNeither source nor destination were staged, skipping releasing requestsNetwork information:New endpoint is created (%s) from the one with the unspecified interface (%s)No A-REX config file found in candypond configurationNo Attribute exists, which can deal with type: %sNo Connect element specifiedNo FQAN found. Using None as userFQAN valueNo LRMS set in configurationNo RSL content in job description foundNo SOAP responseNo SOAP response from Delivery service %sNo SOAP response from delivery serviceNo active DTR %sNo active job id %sNo arguments are assigned for external processNo authorization response was returnedNo cache directory specifiedNo cachedirs found/configured for calculation of free space.No caches defined in configurationNo callback for %s definedNo checksum information from serverNo checksum information possibleNo checksum information returned in Rucio response for %sNo checksum verification possibleNo configuration file could be loaded.No control directory set in configurationNo credentials suppliedNo delegation policies in this context and message - passing throughNo delegation token in requestNo delivery endpoints available, will try laterNo destination definedNo draining cache directory specifiedNo errorNo files to retrieve for job %sNo filesize information returned in Rucio response for %sNo job ID suppliedNo job description file name provided.No job description input specifiedNo job description parser was able to interpret job descriptionNo job description parsers availableNo job description parsers suitable for handling '%s' language are availableNo jobdescription resulted at %d testNo jobsNo jobs found, try laterNo jobs givenNo left operand for concatenation operatorNo listening ports initiatedNo local account name specifiedNo local user mapping foundNo locations defined for %sNo locations for destination different from source foundNo locations for destination different from source found: %sNo locations for destination found: %sNo locations for source found: %sNo locations found - probably no more physical instancesNo locations found for %sNo match found in cache access rules for %sNo more %s replicasNo more interfaces to try for endpoint %s.No more replicas, will use %sNo need to stage source or destination, skipping stagingNo new informational document assignedNo next MCC or Service at path "%s"No next element in the chainNo non-draining session dirs availableNo pfns returned in Rucio response: %sNo physical files found for destinationNo physical files found for sourceNo pid file is found at '%s'. Probably A-REX is not running.No policy file or DNs specified for simplelist.pdp, please set location attribute or at least one DN element for simplelist PDP node in configuration.No port succeeded for %sNo private key with nickname %s exist in NSS databaseNo proxy foundNo queue name given in queue block nameNo read-only cache directory specifiedNo remote delivery services are useable, forcing local deliveryNo replicas found for %sNo request token specified!No request tokens foundNo requested security information was collectedNo response from AA service %sNo response returned: %sNo results returned from statNo right operand for concatenation operatorNo security processing/check requested for '%s'No server config part of config fileNo session directories found in configuration.No session directory foundNo session directory set in configurationNo source definedNo space token specifiedNo space tokens found matching description %sNo stagein URL is providedNo such DTR %sNo such file or directoryNo target available inside the policyNo target available inside the ruleNo test-job with ID %d found.No test-job, with ID "%d"No usable cachesNo user certificate by nickname %s foundNo user-certificate foundNo username suppliedNo valid caches found in configuration, caching is disabledNo valid credentials found, exitingNo valid location availableNo valid response from VOMS server: %sNo value provided for Subject Attribute %s skippedNon-homogeneous resourceNone of the requested transfer protocols are supportedNot authorized by arc.pdp - failed to get response from EvaluatorNot authorized by arc.pdp - some of the RequestItem elements do not satisfy PolicyNot authorized from simplelist.pdp: %sNot enough parameters in copyurlNot enough parameters in linkurlNot found %s in cacheNot getting checksum of zip constituentNot using delivery service %s due to previous failureNot using delivery service at %s because it is fullNot valid destinationNot valid sourceNothing to do: you have to either specify a test job id with -J (--job) or query information about the certificates with -E (--certificate) Now copying (from -> to)Number %d is with nickname: %s%sNumber %d is: %sNumbers of sources and destinations do not matchOPTION...OS family: %sOS name: %sOS version: %sOTokens: Attr: %s = %sOTokens: Attr: messageOTokens: Attr: token: %sOTokens: Attr: token: bearer: %sOTokens: HandleOTokens: Handle: attributes created: subject = %sOTokens: Handle: messageObject is not suitable for listingObject not initialized (internal error)Obtained XML: %sObtained host and address are not acceptableOnly POST is supported in CandyPondOnly POST is supported in DataDeliveryServiceOnly Raw Buffer payload is supported for outputOnly globus rights are supported in Globus signing policy - %s is not supportedOnly signing rights are supported in Globus signing policy - %s is not supportedOnly standard input is currently supported for password source.Only user '.' for helper program is supportedOpenSSL error string: %sOperating System errorOperation cancelled successfullyOperation completed successfullyOperation not supported for this kind of URLOperation on path "%s"OptimizedInformationContainer created temporary file: %sOptimizedInformationContainer failed to create temporary fileOptimizedInformationContainer failed to parse XMLOptimizedInformationContainer failed to rename temporary fileOptimizedInformationContainer failed to store XML document to temporary fileOptions 'p' and 'n' can't be used simultaneouslyOptions Group %s:Options for plugin are missingOriginal job description is listed below:Orphan delegation lock detected (%s) - cleaningOther actionsOut of memory when generate random serialOut of retriesOut of tries while allocating new job ID in %sOutgoing Message is not SOAPOutput EEC certificateOutput format modifiersOutput the proxy certificateOverwrite requested - will pre-clean destinationOwner: %sPASV failedPASV failed: %sPDP: %s (%s)PDP: %s (%s) can not be loadedPDP: %s can not be loadedPDP: missing name attributePEM_read_bio_X509_REQ failedPEM_write_bio_X509_REQ failedPKCS12 add password integrity failedPKCS12 output password not providedPOST request on special path is not supportedParsed domains: %uParser Context creation failed!Parser failed with error code %i.Parsing .local file to obtain job-specific identifiers and infoParsing VOMS AC to get FQANs informationPassword encoding type not supported: %sPath %s is invalid, creating required directoriesPath to .local job status file is required.Path to user's proxy file should be specified.Peer name: %sPer-job POST/SOAP requests are not supportedPerforming matchmaking against target (%s).Performs neither sorting nor matchingPermanent failurePermanent service errorPermission checking failed, will try downloading without using cachePermission checking failed: %sPermission checking on original URL failed: %sPermission checking passedPermission checking passed for url %sPicking up left jobsPlace: %sPlatform: %sPlease choose the NSS database you would like to use (1-%d): Please choose the one you would use (1-%d): Plexer (%s) - next %s(%s) has no targetPlexer's (%s) next has no ID attribute definedPlugin %s error: %sPlugin %s failed to startPlugin %s printed: %sPlugin %s returned no mappingPlugin %s returned no usernamePlugin %s returned too much: %sPlugin %s returned: %uPlugin %s timeout after %u secondsPlugin (user mapping) command is emptyPlugin (user mapping) timeout is not a number: %sPlugin (user mapping) timeout is wrong number: %sPlugin response: %sPolicy Decision Service invocation failedPolicy is emptyPolicy is not gaclPolicy line: %sPolicy subject: %sPolicyId: %s Alg inside this policy is:-- %sPostal code: %sPre-LRMS waiting jobs: %iPre-clean failed, will still try to copyPre-registering destinationPre-registering destination in index servicePreparing to stage destinationPreparing to stage sourceProblem accessing cache file %s: %sProblem creating dtr (source %s, destination %s)Problem loading plugin %s, skipping it.Problem with index service, will proceed to end of data stagingProblem with index service, will release cache lockProcessing thread timed out. Restarting DTRProcessing type not supported: %sProcessingStartTime (%s) specified in job description is inside the targets downtime period [ %s - %s ].Protocol plugins available:Protocol(s) not supported - please check that the relevant gfal2 plugins are installed (gfal2-plugin-* packages)Proxy certificate information:Proxy expiredProxy expired. Job submission aborted. Please run 'arcproxy'!Proxy generation failed: Certificate has expired.Proxy generation failed: Certificate is not valid yet.Proxy generation failed: Failed to create temporary file.Proxy generation failed: Failed to retrieve VOMS information.Proxy generation failed: No valid certificate found.Proxy generation failed: No valid private key found.Proxy generation succeededProxy has expiredProxy key length: %iProxy path: %sProxy signature: %sProxy subject: %sProxy type: %sProxy with ARC PolicyProxy with all rights inheritedProxy with empty policy - fail on unrecognized policyProxy with specific policy: %sProxy with unknown policy - fail on unrecognized policyProxy-subject: %sProxy: %sPut request %s is still in queue, should wait %i secondsPython Wrapper constructor succeededPython Wrapper destructor (%d)Python broker constructor called (%d)Python broker destructor called (%d)Python interpreter lockedPython interpreter releasedPython wrapper process calledPythonBroker initQuality level: %sQuerying WSRF GLUE2 computing REST endpoint.Querying source replicas in bulkQuerying status of staging requestQueue information:REST: process %s at %sREST:CLEAN job %s - %sREST:GET job %s - %sREST:KILL job %s - %sREST:PUT job %s: file %s: there is no payloadREST:RESTART job %s - %sRESTful and old VOMS communication protocols can't be requested simultaneously.RSA_generate_key_ex failedRSL substitution is not a sequenceRSL substitution sequence is not of length 2RSL substitution variable name does not evaluate to a literalRSL substitution variable value does not evaluate to a literalRandom sortingRead %i bytesRead access check failedRead access not allowed for %s: %sRead request from a fileRead request from a stringReading %u bytes from byte %lluReal transfer from %s to %sReceived DTR %s back from scheduler in state %sReceived DTR %s during Generator shutdown - may not be processedReceived invalid DTRReceived message out-of-band (not critical, ERROR level is just for debugging purposes)Received no DTRReceived retry for DTR %s still in transferReconnectingRecord about new job successfully added to the database (%s)Redirecting to %sRedirecting to new URL: %sRegistering destination replicaRegistration of Globus FTP buffer failed - cancel checkRelation operator expectedReleasing destinationReleasing request(s) made during stagingReleasing requestsReleasing sourceRemove: deleting: %sRemoving %sRemoving logical file from metadata %sRemoving metadata in %sRemoving pre-registered destination in index serviceRename: globus_ftp_client_move failedRename: timeout waiting for operation to completeRenaming %s to %sReplacing DTR %s in state %s with new requestReplacing existing token for %s in Rucio token cacheReplacing old SRM info with new for URL %sReplacing queue '%s' with '%s'Replica %s doesn't match preferred pattern or URL mapReplica %s has high latency, but no more sources exist so will use this oneReplica %s has high latency, trying next sourceReplica %s has long latency, trying next replicaReplica %s is mappedReplica %s matches host pattern %sReplica %s matches pattern %sRequest failedRequest failed: No response from IdPRequest failed: No response from IdP when doing authenticationRequest failed: No response from IdP when doing redirectingRequest failed: No response from SP Service when sending SAML assertion to SPRequest failed: No response from SPServiceRequest failed: response from IdP is not as expected when doing authenticationRequest failed: response from IdP is not as expected when doing redirectingRequest failed: response from SP Service is not as expected when sending SAML assertion to SPRequest failed: response from SPService is not as expectedRequest is emptyRequest is not supported - %sRequest is reported as ABORTED, but all files are doneRequest is reported as ABORTED, since it was cancelledRequest is reported as ABORTED. Reason: %sRequest succeed!!!Request timed outRequest to push to unknown owner - %uRequest: %sRequested slots: %iRequesting recursion and --nolist has no senseRequesting to stop job processingRequirement "%s %s" NOT satisfied.Requirement "%s %s" satisfied by "%s".Requirement "%s %s" satisfied.Reservation policy: %sResolving destination replicasResolving of index service for destination failedResolving of index service for source failedResolving source replicas in bulkResource information provider failed to runResource information provider failed to startResource information provider failed with exit status: %i %sResource information provider log: %sResource information provider: %sResource manager: %sResponse is not SOAPResponse is not XMLResponse: %sResponse: %sResult value (0=Permit, 1=Deny, 2=Indeterminate, 3=Not_Applicable): %dResults stored at: %sResuming job: %s at state: %s (%s)Retrieving job description of INTERNAL jobs is not supportedReturning to generatorReusing connectionRight operand for RSL concatenation does not evaluate to a literalRucio returned %sRucio token for %s has expired or is about to expireRule: %sRule: audience: %sRule: capabilities: %sRule: group: %sRule: issuer: %sRule: role: %sRule: scope: %sRule: subject: %sRule: vo: %sRunning command: %sRunning jobs: %iRunning mailer command (%s)SAML Token handler is not configuredSAML2SSO process failedSOAP Request to AA service %s failedSOAP fault from delivery service at %s: %sSOAP fault: %sSOAP invocation failedSOAP operation is not supported: %sSOAP request: %sSOAP response: %sSOAP with SAML2SSO invocation failedSQL statement used: %sSQLite database error: %sSRM Client status: %sSRM did not return any informationSRM did not return any useful informationSRM returned no useful Transfer URLs: %sSSHFS mount point of cache directory (%s) is broken - waiting for reconnect ...SSHFS mount point of runtime directory (%s) is broken - waiting for reconnect ...SSHFS mount point of session directory (%s) is broken - waiting for reconnect ...SSL error: %d - %s:%s:%sSSL error: %s, libs: %s, func: %s, reason: %sScheduler configuration:Scheduler received NULL DTRScheduler received invalid DTRScheduler received new DTR %s with source: %s, destination: %s, assigned to transfer share %s with priority %dScheduler starting upScheduler stopped, exitingScheduling policy: %sSchema validation errorScheme: %sSecHandler configuration is not definedSecHandler has no configurationSecHandler has no name attribute definedSecHandler: %s(%s)Security Handler %s(%s) could not be createdSecurity Handlers processing failedSecurity Handlers processing failed: %sSecurity check failed for incoming TLS messageSecurity check failed for outgoing TLS messageSecurity check failed in SOAP MCC for incoming messageSecurity check failed in SOAP MCC for incoming message: %sSecurity check failed in SOAP MCC for outgoing messageSecurity check failed in SOAP MCC for outgoing message: %sSecurity check failed in TLS MCC for incoming messageSecurity processing/check failed: %sSecurity processing/check for '%s' failed: %sSecurity processing/check for '%s' passedSecurity processing/check passedSelf-signed certificateServer SRM version: %sServer implementation: %sService %s(%s) could not be createdService Loop: Endpoint %sService endpoint %s (type %s) added to the list for direct submissionService endpoint %s (type %s) added to the list for resource discoveryService has no ID attribute definedService has no Name attribute definedService information:Service is waiting for requestsService side MCCs are loadedServing state: %sSession dir %s is owned by %i, but current mapped user is %iSession dir '%s' contains user specific substitutions - skipping itSession directory to useSession root directory is missingSessiondir %s: Free space %f GBSetting connections limit to %i, connections over limit will be %sSetting status (%s) for endpoint: %sSetting status (STARTED) for endpoint: %sSetting subject name!Setting userRequestDescription to %sShare Information:Should wait for destination to be preparedShould wait for source to be preparedShow %s help optionsShow help optionsShutdown daemonShutting down data delivery serviceShutting down data staging threadsShutting down schedulerSimpleMap: %sSimpleMap: acquired new unmap time of %u secondsSimpleMap: wrong number in unmaptime commandSkipping %s replica %sSkipping ComputingEndpoint '%s', because it has '%s' interface instead of the requested '%s'.Skipping invalid URL option %sSkipping policyAuthority VOMS AC attributeSkipping retrieved job (%s) because it was submitted via another interface (%s).Skipping service: no SchemaPath found!Skipping service: no ServicePath found!Sockets do not match on exit %i != %iSome transfers failedSorting according to free slots in queueSorting according to input data availability at targetSorting according to specified benchmark (default "specint2000")Sorting replicas according to URL mapSorting replicas according to preferred pattern %sSource URL missingSource URL not supported: %sSource URL not valid: %sSource and/or destination is index service, will resolve replicasSource check requested but failed: %sSource is invalid URLSource is mapped to %sSource is not ready, will wait %u secondsSource is the same as destinationSource modification date: %sSource or destination requires stagingSource: %sSpecified module not found in cacheSpecified overlay file (%s) does not exist.Staging jobs: %iStaging request timed out, will release requestStaging: %sStart foregroundStart testStart waiting 10 sec...StartReadingStartReading: File was not prepared properlyStartWritingStartWriting: File was not prepared properlyStarted remote Delivery at %sStarting DTR threadsStarting data staging threadsStarting helper process: %sStarting jobs processing threadStarting jobs' monitoringStarting new DTR for %sStarting querying of suspended endpoint (%s) - no other endpoints for this service is being queried or has been queried successfully.Starting sub-thread to query the endpoint on %sStarting thread to query the endpoint on %sStat: obtained modification time %sStat: obtained size %lluState name for plugin is missingStatus for service endpoint "%s" is set to inactive in ARCHERY. Skipping.Status of %d jobs was queried, %d jobs returned informationStopReading finished waiting for transfer_condition.StopReading starts waiting for transfer_condition.StopReading: aborting connectionStopWriting finished waiting for transfer_condition.StopWriting starts waiting for transfer_condition.StopWriting: Calculated checksum %sStopWriting: aborting connectionStopWriting: looking for checksum of %sStopped job processingStopping helper process %sStopping jobs processing threadStoring port %i for %sStoring temp proxy at %sString successfully parsed as %s.Subject Attribute %s has no known NID, skippedSubject does not start with '/'Subject name: %sSubject to match: %sSubject: %sSubmission endpointSubmitterPlugin %s could not be createdSubmitterPlugin plugin "%s" not found.Submitting job Succeeded to add Independent OID, tag %d is returnedSucceeded to add RFC proxy OID, tag %d is returnedSucceeded to add VOMS AC sequence OID, tag %d is returnedSucceeded to add anyLanguage OID, tag %d is returnedSucceeded to add inheritAll OID, tag %d is returnedSucceeded to authenticate SAMLTokenSucceeded to authenticate UsernameTokenSucceeded to authenticate X509TokenSucceeded to change password on MyProxy serverSucceeded to change trusts to: %sSucceeded to convert PrivateKeyInfo to EVP_PKEYSucceeded to destroy credential on MyProxy serverSucceeded to export PKCS12Succeeded to generate public/private key pairSucceeded to get a proxy in %s from MyProxy server %sSucceeded to get credentialSucceeded to get info from MyProxy serverSucceeded to import certificateSucceeded to import private keySucceeded to initialize NSSSucceeded to load PrivateKeyInfoSucceeded to output certificate to %sSucceeded to output the certificate request into %sSucceeded to put a proxy onto MyProxy serverSucceeded to send DelegationService: %s and DelegationID: %s info to peer serviceSucceeded to sign the proxy certificateSucceeded to verify the signature under Succeeded to verify the signature under Succeeded to verify the signed certificateSupplied username %s does not match mapped username %sSupported Profiles:Supports advance reservationsSupports bulk submissionSupports preemptionSuspended jobs: %iSuspending querying of endpoint (%s) since the service at the endpoint is already being queried, or has been queried.Synchronizing the local list of active jobs with the information in the information system can result in some inconsistencies. Very recently submitted jobs might not yet be present, whereas jobs very recently scheduled for deletion can still be present.Syntax error in 'notify' attribute value ('%s'), it contains unknown state flagsSyntax error in 'notify' attribute value ('%s'), it must contain an email addressSyntax error in 'notify' attribute value ('%s'), it must only contain email addresses after state flag(s)System configuration file (%s or %s) does not exist.System configuration file (%s) contains errors.System configuration file (%s) does not exist.TCP client process calledTCP executor is removedTLS provides no identity, going for OTokensTURL %s cannot be handledTarget %s does not match requested interface(s).Target %s removed by FastestQueueBroker, doesn't report number of free slotsTarget %s removed by FastestQueueBroker, doesn't report number of total slotsTarget %s removed by FastestQueueBroker, doesn't report number of waiting jobsTechnology: %sTemporary service errorTest failed, no more possible targetsTest submitted with jobid: %sTest was defined with ID %d, but some error occurred during parsing it.The "FreeSlotsWithDuration" attribute published by "%s" is wrongly formatted. Ignoring it.The 'sort' and 'rsort' flags cannot be specified at the same time.The BIO for output is NULLThe CA certificates directory is required for contacting VOMS and MyProxy servers.The CA issuer (%s) of the credentials (%s) is not trusted by the target (%s).The ComputingEndpoint doesn't advertise its Quality Level.The ComputingEndpoint doesn't advertise its Serving State.The ComputingEndpoint has no URL.The ComputingService doesn't advertise its Interface.The ComputingService doesn't advertise its Quality Level.The MyProxy period that you set: %s can't be recognized.The NSS database can not be detected in the Firefox profileThe Response is not going to this endThe Service advertises no Health State.The Service doesn't advertise its Type.The StatusCode is SuccessThe VOMS AC period that you set: %s can't be recognized.The VOMS server with the information: %s can not be reached, please make sure it is available.The arccat command performs the cat command on the stdout, stderr or grid manager's error log of the job.The arcclean command removes a job from the computing resource.The arccp command copies files to, from and between grid storage elements.The arcget command is used for retrieving the results from a job.The arcinfo command is used for obtaining the status of computing resources on the Grid.The arckill command is used to kill running jobs.The arcls command is used for listing files in grid storage elements and file index catalogues.The arcmkdir command creates directories on grid storage elements and catalogs.The arcproxy command creates a proxy from a key/certificate pair which can then be used to access grid resources.The arcrename command renames files on grid storage elements.The arcrm command deletes files on grid storage elements.The arcstat command is used for obtaining the status of jobs that have been submitted to Grid enabled resources.The arcsub command is used for submitting jobs to Grid enabled computing resources.The arctest command is used for testing clusters as resources.The available CRL has expiredThe available CRL is not yet validThe brokerarguments attribute can only be used in conjunction with the brokername attributeThe certificate with subject %s is not validThe cluster XRSL attribute is currently unsupported.The credential to be signed contains no requestThe credential to be signed is NULLThe credential's private key has already been initializedThe default configuration file (%s) is not a regular file.The delegated credential got from delegation service is stored into path: %sThe delegated credential got from path: %sThe downtime of the target (%s) is not published. Keeping target.The end time that you set: %s can't be recognized.The end time that you set: %s is before start time: %s.The endpoint (%s) is not supported by this plugin (%s)The endpoint of delegation service should be configuredThe file %s is currently locked with a valid lockThe first supported interface of the plugin %s is an empty string, skipping the plugin.The following jobs were not submitted:The interface of this endpoint (%s) is unspecified, will try all possible pluginsThe job description also can be a file or a string in ADL or XRSL format.The keybits constraint is wrong: %s.The name of the private key to delete is emptyThe old GSI proxies are not supported anymore. Please do not use -O/--old option.The payload of incoming message is emptyThe payload of outgoing message is emptyThe period that you set: %s can't be recognized.The plugin %s does not support any interfaces, skipping it.The policy file setup for simplelist.pdp does not exist, please check location attribute for simplelist PDP node in service configurationThe policy language: %s is not supportedThe private key for signing is not initializedThe process owning the lock on %s is no longer running, will remove lockThe request has passed the policy evaluationThe signing algorithm %s is not allowed,it should be SHA1 or SHA2 to sign certificate requestsThe specified Globus attribute (%s) is not supported. %s ignored.The start time that you set: %s can't be recognized.The start, end and period can't be set simultaneouslyThe subject does not match the issuer name + proxy CN entryThe value of the acl XRSL attribute isn't valid XML.The value of the ftpthreads attribute must be a number from 1 to 10The value of the keysize attribute in the configuration file (%s) was only partially parsedThe value of the timeout attribute in the configuration file (%s) was only partially parsedThere are %d NSS base directories where the certificate, key, and module databases liveThere are %d RequestItemsThere are %d requests, which satisfy at least one policyThere are %d servers with the same name: %s in your vomses file, but none of them can be reached, or can return a valid message.There are %d user certificates existing in the NSS databaseThere are no endpoints in registry that match requested info endpoint typeThere are no endpoints in registry that match requested submission endpoint typeThere is %d subjects, which satisfy at least one policyThere is no Delegated X509 token in the responseThere is no Format delegated token in the responseThere is no Format request in the responseThere is no Id or X509 request value in the responseThere is no Id or X509 token value in the responseThere is no SOAP connection chain configuredThere is no SOAP responseThere is no UpdateCredentialsResponse in responseThere is no X509 request in the responseThere is no certificate named %s found, the certificate could be removed when generating CSRThere is no digest in issuer's private key objectThere is no local LRMS ID. Message will not be written to BLAH log.There is no responseThere was a problem during post-transfer destination handling after error: %sThere was a problem during post-transfer source handling: %sThere was no HTTP responseThere was no SOAP responseThird party transfer is not supported for these endpointsThird party transfer was requested but the corresponding plugin could not be loaded. Is the GFAL plugin installed? If not, please install the packages 'nordugrid-arc-plugins-gfal' and 'gfal2-all'. Depending on your type of installation the package names might differ.This INFO message should also be seenThis INFO message should be seenThis VERBOSE message should not be seenThis VERBOSE message should now be seenThis instance was already deletedThis message goes to initial destinationThis message goes to per-thread destinationThis process already owns the lock on %sThis seems like a temporary error, please try again laterThis tiny tool can be used for testing the JobDescription's conversion abilities.Thread exited with Glib error: %sThread exited with generic exception: %sTime left for AC: %sTime left for AC: AC has expiredTime left for AC: AC is not valid yetTime left for proxy: %sTime left for proxy: Proxy expiredTime left for proxy: Proxy not valid yetTimed out while waiting for cache lockTimeout connecting to %s(%s):%i - %i sTimeout has expired, will remove lock file %sTimeout waiting for Globus callback - leaking connectionTimeout waiting for mkdirTo recover missing jobs, run arcsyncToo many arguments in configurationToo many connections - dropping new oneToo many connections - waiting for old to closeToo many failures to obtain checksum - giving upToo many files in one request - please try again with fewer filesTool for writing the grami file representation of a job description file.Total jobs: %iTotal logical CPUs: %iTotal number of jobs found: Total number of new jobs found: Total physical CPUs: %iTotal slots: %iTransfer FAILED: %sTransfer cancelled successfullyTransfer completeTransfer failedTransfer failed: %sTransfer finished: %llu bytes transferred %sTransfer from %s to %sTransfer killed after %i seconds without communicationTransfer succeededTransfer timed outTrusted CAs:Trying all available interfacesTrying next replicaTrying to check X509 cert with check_cert_typeTrying to connect %s(%s):%dTrying to listen on %s:%s(%s)Trying to listen on TCP port %s(%s)Trying to start suspended endpoint (%s)Trying to submit directly to endpoint (%s)Trying to submit endpoint (%s) using interface (%s) with plugin (%s).Two input files have identical name '%s'.Type is dir, calling srmRmDirType is file, calling srmRmType: %sTypes of execution services that %s is able to submit jobs to:Types of local information services that %s is able to collect information from:Types of local information services that %s is able to collect job information from:Types of registry services that %s is able to collect information from:Types of services that %s is able to manage jobs at:URLURL %s disagrees with stored SRM info, testing new infoURL is mapped to local access - checking permissions on original URLURL is mapped to: %sURL is not valid: %sURL option %s does not have format name=valueURL protocol is not urllist: %sURL: %sUnAuthorized from xacml.pdpUnable to adapt job description to any resource, no resource information could be obtained.Unable to add event: cannot find AAR for job %s in accounting database.Unable to copy example configuration from existing configuration (%s)Unable to create %s directory.Unable to create data base (%s)Unable to create directory for storing results (%s) - %sUnable to create index for jobs table in data base (%s)Unable to create jobs table in data base (%s)Unable to create jobs_new table in data base (%s)Unable to detect format of job record.Unable to detect if issuer certificate is installed.Unable to determine certificate informationUnable to determine error (%d)Unable to download job (%s), no JobControllerPlugin plugin was set to handle the job.Unable to drop jobs in data base (%s)Unable to find file size of %sUnable to handle job (%s), no interface specified.Unable to handle job (%s), no plugin associated with the specified interface (%s)Unable to initialise connection to destination: %sUnable to initialise connection to source: %sUnable to initialize handler for %sUnable to list files at %sUnable to load ARC configuration file.Unable to load BrokerPlugin (%s)Unable to load plugin (%s) for interface (%s) when trying to submit job description.Unable to locate the "%s" plugin. Please refer to installation instructions and check if package providing support for "%s" plugin is installedUnable to match target, marking it as not matching. Broker not valid.Unable to open job list file (%s), unknown formatUnable to parse job description input: %sUnable to parse the specified verbosity (%s) to one of the allowed levelsUnable to parse.Unable to prepare job description according to needs of the target resource (%s).Unable to prepare job description according to needs of the target resource.Unable to read job information from file (%s)Unable to register job submission. Can't get JobDescription object from Broker, Broker is invalid.Unable to rename jobs table in data base (%s)Unable to retrieve list of job files to download for job %sUnable to select middlewareUnable to select operating system.Unable to select runtime environmentUnable to sort ExecutionTarget objects - Invalid Broker object.Unable to sort added jobs. The BrokerPlugin plugin has not been loaded.Unable to submit job. Failed to assign delegation to job description.Unable to submit job. Job description is not valid in the %s format: %sUnable to transfer from jobs to jobs_new in data base (%s)Unable to truncate job database (%s)Unable to write 'output' file: %sUnable to write grami file: %sUnable to write records into job database (%s): Id "%s"Unable to write to p12 fileUnauthorizedUnauthorized from remote pdp serviceUnexpected RSL typeUnexpected argument for 'all' rule - %sUnexpected argumentsUnexpected arguments suppliedUnexpected delegation location from delegation endpoint - %s.Unexpected immediate completion: %sUnexpected name returned in Rucio response: %sUnexpected path %s returned from serverUnexpected response code from delegation endpoint - %uUnexpected response code from delegation endpoint: %u, %s.Uniq is adding service coming from %sUniq is ignoring service coming from %sUniq is replacing service coming from %s with service coming from %sUnknown LDAP scope %s - using baseUnknown XRSL attribute: %s - Ignoring it.Unknown attribute %s in common section of configuration file (%s), ignoring itUnknown channel %s for stdio protocolUnknown credential type %s for URL pattern %sUnknown element in Globus signing policyUnknown errorUnknown key or hash typeUnknown key or hash type of issuerUnknown log level %sUnknown option %sUnknown rights in Globus signing policy - %sUnknown section %s, ignoring itUnknown transfer option: %sUnknown user name mapping rule %sUnregistering %sUnregistering from index service failedUnsupported URL givenUnsupported URL given: %sUnsupported destination url: %sUnsupported information endpoint type: %sUnsupported mapping policy action: %sUnsupported mapping policy option: %sUnsupported protocol in url %sUnsupported proxy policy language is requested - %sUnsupported proxy version is requested - %sUnsupported source url: %sUnsupported submission endpoint type: %sUnsupported submission interface %s. Seems arc-blahp-logger need to be updated accordingly. Please submit the bug to bugzilla.Untrusted self-signed certificate in chain with subject %s and hash: %luUpdateCredentials failedUpdateCredentials: EPR contains no JobIDUpdateCredentials: failed to update credentialsUpdateCredentials: missing ReferenceUpdateCredentials: no job found: %sUpdateCredentials: request = %sUpdateCredentials: response = %sUpdateCredentials: wrong number of ReferenceUpdateCredentials: wrong number of elements inside ReferenceUsage:Usage: copy source destinationUse --help option for detailed usage informationUse -? to get usage descriptionUsed configuration file %sUsed slots: %iUser configuration file (%s) contains errors.User configuration file (%s) does not exist or cannot be loaded.User for helper program is missingUser name direct mapping is missing user name: %s.User name mapping command is emptyUser name mapping has empty authgroup: %sUser name should be specified.User pool at %s can't be opened.User pool at %s failed to perform user mapping.User pool mapping is missing user subject.User subject match is missing user subject.UserConfig class is not an objectUserConfiguration saved to file (%s)Username Token handler is not configuredUsing A-REX config file %sUsing CA certificate directory: %sUsing DH parameters from file: %sUsing OTokenUsing Rucio account %sUsing buffered transfer methodUsing cache %sUsing cached local account '%s'Using cert %sUsing certificate file: %sUsing cipher list: %sUsing cipher: %sUsing configuration at %sUsing curve with NID: %uUsing insecure data transferUsing internal transfer method of %sUsing key %sUsing key file: %sUsing local account '%s'Using next %s replicaUsing protocol options: 0x%xUsing proxy %sUsing proxy file: %sUsing secure data transferUsing session dir %sUsing space token %sUsing space token description %sVO %s doesn't match %sVOMS AC attribute is a tagVOMS AC attribute is the FQANVOMS attr %s doesn't match %sVOMS attr %s matches %sVOMS attribute is ignored due to processing/validation errorVOMS attribute parsing failedVOMS attribute validation failedVOMS: AC has expiredVOMS: AC is not complete - missing Serial or Issuer informationVOMS: AC is not yet validVOMS: AC signature verification failedVOMS: Can not allocate memory for parsing ACVOMS: Can not allocate memory for storing the order of ACVOMS: Can not find AC_ATTR with IETFATTR typeVOMS: Can not parse ACVOMS: Cannot find certificate of AC issuer for VO %sVOMS: DN of holder in AC: %sVOMS: DN of holder: %sVOMS: DN of issuer: %sVOMS: FQDN of this host %s does not match any target in ACVOMS: The lsc file %s can not be openVOMS: The lsc file %s does not existVOMS: authorityKey is wrongVOMS: both idcenoRevAvail and authorityKeyIdentifier certificate extensions must be presentVOMS: can not verify the signature of the ACVOMS: cannot validate AC issuer for VO %sVOMS: case of multiple IETFATTR attributes not supportedVOMS: case of multiple policyAuthority not supportedVOMS: create FQAN: %sVOMS: create attribute: %sVOMS: directory for trusted service certificates: %sVOMS: failed to parse attributes from ACVOMS: failed to verify AC signatureVOMS: missing AC partsVOMS: problems while parsing information in ACVOMS: the DN in certificate: %s does not match that in trusted DN list: %sVOMS: the Issuer identity in certificate: %s does not match that in trusted DN list: %sVOMS: the attribute name is emptyVOMS: the attribute qualifier is emptyVOMS: the attribute value for %s is emptyVOMS: the format of IETFATTRVAL is not supported - expecting OCTET STRINGVOMS: the format of policyAuthority is unsupported - expecting URIVOMS: the grantor attribute is emptyVOMS: the holder information in AC is wrongVOMS: the holder issuer name is not the same as that in ACVOMS: the holder issuerUID is not the same as that in ACVOMS: the holder name in AC is not related to the distinguished name in holder certificateVOMS: the holder serial number %lx is not the same as the serial number in AC %lx, the holder certificate that is used to create a voms proxy could be a proxy certificate with a different serial number as the original EEC certVOMS: the holder serial number is: %lxVOMS: the issuer information in AC is wrongVOMS: the issuer name %s is not the same as that in AC - %sVOMS: the only supported critical extension of the AC is idceTargetsVOMS: the serial number in AC is: %lxVOMS: the serial number of AC INFO is too long - expecting no more than 20 octetsVOMS: there is no constraints of trusted voms DNs, the certificates stack in AC will not be checked.VOMS: trust chain to check: %s VOMS: unable to determine hostname of AC from VO name: %sVOMS: unable to extract VO name from ACVOMS: unable to match certificate chain against VOMS trusted DNsVOMS: unable to verify certificate chainVOMS: unsupported time format in AC - expecting GENERALIZED TIMEValid for: %sValid for: Proxy expiredValid for: Proxy not validValid until: %sValue of 'count' attribute must be an integerValue of 'countpernode' attribute must be an integerValue of 'exclusiveexecution' attribute must either be 'yes' or 'no'Value of attribute '%s' expected not to be emptyValue of attribute '%s' expected to be a stringValue of attribute '%s' expected to be single valueValue of attribute '%s' has wrong sequence length: Expected %d, found %dValue of attribute '%s' is not a stringValue of attribute '%s' is not sequenceVariable name (%s) contains invalid character (%s)Variable name expectedVersion in Listen element can't be recognizedWARNING: The end time that you set: %s is before current time: %sWARNING: The start time that you set: %s is before current time: %sWaiting ends.Waiting for bufferWaiting for globus handle to settleWaiting for lock on file %sWaiting for lock on job list file %sWaiting for main job processing thread to exitWaiting for responseWaiting jobs: %iWaking upWarning: Failed listing files but some information is obtainedWarning: Failed removing jobs from file (%s)Warning: Failed to write job information to file (%s)Warning: Failed to write local list of jobs into file (%s), jobs list is destroyedWarning: Job not found in job list: %sWarning: Some jobs were not removed from serverWarning: Unable to create job list file (%s), jobs list is destroyedWarning: Unable to open job list file (%s), unknown formatWarning: Unable to read local list of jobs from file (%s)Warning: Unable to truncate local list of jobs in file (%s)Warning: Using SRM protocol v1 which does not support space tokensWas expecting %s at the beginning of "%s"Watchdog (re)starting applicationWatchdog detected application exitWatchdog detected application exit due to signal %uWatchdog detected application exited with code %uWatchdog detected application timeout or error - killing processWatchdog exiting because application was purposely killed or exited itselfWatchdog failed to kill application - giving up and exitingWatchdog failed to wait till application exited - sending KILLWatchdog fork failed: %sWatchdog starting monitoringWe only support CAs in Globus signing policy - %s is not supportedWe only support X509 CAs in Globus signing policy - %s is not supportedWe only support globus conditions in Globus signing policy - %s is not supportedWe only support subjects conditions in Globus signing policy - %s is not supportedWhen specifying 'countpernode' attribute, 'count' attribute must also be specifiedWill %s in destination index serviceWill calculate %s checksumWill clean up pre-registered destinationWill download to cache file %sWill not map to 'root' account by defaultWill process cacheWill release cache locksWill remove %s on service %s.Will retry without cachingWill use bulk requestWill wait 10sWill wait around %isWiping and re-creating whole storageWorking area free size: %i GBWorking area is not shared among jobsWorking area is shared among jobsWorking area life time: %sWorking area total size: %i GBWriting the info to the BLAH parser log: %sWrong directory in %sWrong format of the "FreeSlotsWithDuration" = "%s" ("%s")Wrong language requested: %sWrong number in defaultttl commandWrong number in maxjobdesc commandWrong number in maxjobs: %sWrong number in maxrerun commandWrong number in urdelivery_frequency: %sWrong number in wakeupperiod: %sWrong number of arguments givenWrong number of arguments!Wrong number of objects (%i) for stat from ftp: %sWrong number of parameters specifiedWrong option in %sWrong option in delegationdbWrong option in fixdirectoriesWrong ownership of certificate file: %sWrong ownership of key file: %sWrong ownership of proxy file: %sWrong permissions of certificate file: %sWrong permissions of key file: %sWrong permissions of proxy file: %sWrong service record field "%s" found in the "%s"Wrote request into a fileWrote signed EEC certificate into a fileWrote signed proxy certificate into a fileX509 Token handler is not configuredXACML request: %sXML config file %s does not existYou are about to remove jobs from the job list for which no information could be found. NOTE: Recently submitted jobs might not have appeared in the information system, and this action will also remove such jobs.You may try to increase verbosity to get more information.Your identity: %sYour issuer's certificate is not installedYour proxy is valid until: %s[ADLParser] %s element must be boolean.[ADLParser] AccessControl isn't valid XML.[ADLParser] Benchmark is not supported yet.[ADLParser] Code in FailIfExitCodeNotEqualTo in %s is not valid number.[ADLParser] CreationFlag value %s is not supported.[ADLParser] CredentialService must contain valid URL.[ADLParser] Missing Name element or value in ParallelEnvironment/Option element.[ADLParser] Missing or empty Name in InputFile.[ADLParser] Missing or empty Name in OutputFile.[ADLParser] Missing or wrong value in DiskSpaceRequirement.[ADLParser] Missing or wrong value in IndividualCPUTime.[ADLParser] Missing or wrong value in IndividualPhysicalMemory.[ADLParser] Missing or wrong value in IndividualVirtualMemory.[ADLParser] Missing or wrong value in NumberOfSlots.[ADLParser] Missing or wrong value in ProcessesPerSlot.[ADLParser] Missing or wrong value in SlotsPerHost.[ADLParser] Missing or wrong value in ThreadsPerProcess.[ADLParser] Missing or wrong value in TotalCPUTime.[ADLParser] Missing or wrong value in WallTime.[ADLParser] NetworkInfo is not supported yet.[ADLParser] NodeAccess value %s is not supported yet.[ADLParser] Only email Prorocol for Notification is supported yet.[ADLParser] Optional for %s elements are not supported yet.[ADLParser] Root element is not ActivityDescription [ADLParser] The NumberOfSlots element should be specified, when the value of useNumberOfSlots attribute of SlotsPerHost element is "true".[ADLParser] Unsupported EMI ES state %s.[ADLParser] Unsupported URL %s for RemoteLogging.[ADLParser] Unsupported internal state %s.[ADLParser] Wrong URI specified in Source - %s.[ADLParser] Wrong URI specified in Target - %s.[ADLParser] Wrong time %s in ExpirationTime.[ADLParser] priority is too large - using max value 100[filename ...][job ...][job description ...][job description input][resource ...]a file containing a list of jobIDsadd_word failureadvertisedvo parameter is emptyall for attentionall jobsarc.confbrokerbuffer: error : %s, read: %s, write: %sbuffer: read EOF : %sbuffer: write EOF: %scache file: %scancelledceceID prefix is set to %scheck readability of object, does not show any information about objectcheck_ftp: failed to get file's modification timecheck_ftp: failed to get file's sizecheck_ftp: globus_ftp_client_get failedcheck_ftp: globus_ftp_client_modification_time failedcheck_ftp: globus_ftp_client_register_readcheck_ftp: globus_ftp_client_size failedcheck_ftp: obtained modification date: %scheck_ftp: obtained size: %llicheck_ftp: timeout waiting for modification_timecheck_ftp: timeout waiting for partial getcheck_ftp: timeout waiting for sizeclass name: %sclose failed: %sclosing file %s failed: %scomputingconfiguration file (default ~/.arc/client.conf)d2i_X509_REQ_bio faileddata chunk: %llu %lludebugleveldefine the requested format (nordugrid:xrsl, emies:adl)delete_ftp: globus_ftp_client_delete faileddelete_ftp: globus_ftp_client_rmdir faileddelete_ftp: timeout waiting for deletedestinationdestination.next_locationdirdirectorydirnamedisplay all available metadatadisplay more information on each jobdndo not ask for verificationdo not collect information, only convert jobs storage formatdo not print list of jobsdo not print number of jobs in each statedo not submit - dump job description in the language accepted by the targetdo not transfer, but register source into destination. destination must be a meta-url.do not try to force passive transferdownloaddownload directory (the job directory will be created in this directory)downloadsdroppedecho: Unauthorizedempty input payloadempty next chain elementend of string encountered while processing type of subject name element #%derror converting number from bin to BIGNUMerror converting serial to ASN.1 formatescape character at end of stringexitfailed to read data chunkfailed to read data tagfilefile %s is not accessiblefile namefile name too longfilenamefilepathfinishedforce download (overwrite existing job directory)forcedefaultvoms parameter is emptyformatfrom the following endpoints:fsync of file %s failed: %sftp_check_callbackftp_complete_callback: error: %sftp_complete_callback: successftp_get_complete_callback: Failed to get ftp fileftp_get_complete_callback: successftp_put_complete_callback: successftp_read_callback: Globus error: %sftp_read_callback: delayed data chunk: %llu %lluftp_read_callback: failure: %sftp_read_callback: successftp_read_callback: success - offset=%u, length=%u, eof=%u, allow oof=%uftp_read_callback: too many unexpected out of order chunksftp_read_callback: unexpected data out of order: %llu != %lluftp_read_thread: Globus error: %sftp_read_thread: data callback failed - aborting: %sftp_read_thread: exitingftp_read_thread: failed to register Globus buffer - will try later: %sftp_read_thread: failed to register buffersftp_read_thread: failed to release buffersftp_read_thread: failed to release buffers - leakingftp_read_thread: for_read failed - aborting: %sftp_read_thread: get and register buffersftp_read_thread: too many registration failures - abort: %sftp_read_thread: waiting for buffers releasedftp_read_thread: waiting for eofftp_write_callback: failure: %sftp_write_callback: success %sftp_write_thread: Globus error: %sftp_write_thread: data callback failed - abortingftp_write_thread: data out of order in stream mode: %llu != %lluftp_write_thread: exitingftp_write_thread: failed to release buffers - leakingftp_write_thread: for_write failed - abortingftp_write_thread: get and register buffersftp_write_thread: too many out of order chunks in stream modeftp_write_thread: waiting for buffers releasedftp_write_thread: waiting for eofftp_write_thread: waiting for transfer completegfal_close failed: %sgfal_closedir failed: %sgfal_listxattr failed, no replica information can be obtained: %sgfal_mkdir failed (%s), trying to write anywaygfal_mkdir failed: %sgfal_open failed: %sgfal_opendir failed: %sgfal_read failed: %sgfal_rename failed: %sgfal_rmdir failed: %sgfal_stat failed: %sgfal_unlink failed: %sgfal_write failed: %sglobalid is set to %sglobus_ftp_client_operationattr_set_authorization: error: %sgm-jobs displays information on current jobs in the system.gmetric_bin_path empty in arc.conf (should never happen the default value should be used)headnode is set to %shostname[:port] of MyProxy serverhourhoursidincoming message is not SOAPindexinform about changes in particular job (can be used multiple times)init_handle: globus_ftp_client_handle_init failedinit_handle: globus_ftp_client_handleattr_init failedinit_handle: globus_ftp_client_handleattr_set_gridftp2 failedinit_handle: globus_ftp_client_operationattr_init failedinit_handle: globus_ftp_client_operationattr_set_allow_ipv6 failedinit_handle: globus_ftp_client_operationattr_set_delayed_pasv failedinmsg.Attributes().getAll() = %s inmsg.Auth().Export(arc.SecAttr.ARCAuth) = %sinput does not define operationinput is not SOAPinputcheck checks that input files specified in the job description are available and accessible using the credentials in the given proxy file.instead of the status only the IDs of the selected jobs will be printedintinterfaceinterface is set to %sjob idjob_description_file [proxy_file]jobdescription file describing the job to be submittedjobdescription string describing the job to be submittedkeep the files on the server (do not clean)levellist record: %slist the available pluginslist the available plugins (protocols supported)list_files_ftp: checksum %slist_files_ftp: failed to get file's modification timelist_files_ftp: failed to get file's sizelist_files_ftp: globus_ftp_client_cksm failedlist_files_ftp: globus_ftp_client_modification_time failedlist_files_ftp: globus_ftp_client_size failedlist_files_ftp: looking for checksum of %slist_files_ftp: looking for modification time of %slist_files_ftp: looking for size of %slist_files_ftp: no checksum information possiblelist_files_ftp: no checksum information returnedlist_files_ftp: no checksum information supportedlist_files_ftp: timeout waiting for cksumlist_files_ftp: timeout waiting for modification_timelist_files_ftp: timeout waiting for sizeload serial from %s failurelocalid is set to %slong format (more information)lrms is emptymail parameter is emptymake parent directories as neededmalloc errormeta file %s is emptyminuteminutesmkdir_ftp: making %smkdir_ftp: timeout waiting for mkdirmodule name: %snnew_payload %snext chain element callednext element of the chain returned empty payloadnext element of the chain returned error statusnext element of the chain returned error status: %snext element of the chain returned invalid payloadnext element of the chain returned invalid/unsupported payloadnext element of the chain returned no payloadnext element of the chain returned unknown payload - passing throughnumbernumber of retries before failing file transferold_url new_urlonly select jobs whose status is statusstroperate recursivelyoperate recursively up to specified levelorderoutpayload %soutput is not SOAPoutput requested elements (jobs list, delegation ids and tokens) to fileowner subject is set to %sp12 file is emptypassword destination=password sourcepathpath to local cache (use to put file into cache)path to the VOMS server configuration filepath to the certificate file, it can be either PEM, DER, or PKCS12 formattedpath to the private key file, if the certificate is in PKCS12 format, then no need to give private keypath to the proxy filepath to the top directory of VOMS *.lsc files, only needed for the VOMS client functionalitypath to the trusted certificate directory, only needed for the VOMS client functionalityperform third party transfer, where the destination pulls from the source (only available with GFAL plugin)physical location to write to when destination is an indexing service. Must be specified for indexing services which do not automatically generate physical locations. Can be specified multiple times - locations will be tried in order until one succeeds.pkey and rsa_key exist!plugin for transport protocol %s is not installedprint all information about this proxy.print delegation token of specified ID(s)print list of available delegation IDsprint main delegation token of specified Job ID(s)print selected information about this proxy.print state of the serviceprint summary of jobs in each transfer shareprint version informationprints info about installed user- and CA-certificatespriority is too large - using max value 100process: DELETEprocess: GETprocess: HEADprocess: POSTprocess: PUTprocess: action %s is not supported for subpath %sprocess: endpoint: %sprocess: factory endpointprocess: id: %sprocess: method %s is not supportedprocess: method %s is not supported for subpath %sprocess: method is not definedprocess: method: %sprocess: operation: %sprocess: request=%sprocess: response=%sprocess: schema %s is not supported for subpath %sprocess: subop: %sprocess: subpath: %sproxy constraintsput on holdqueue name is set to %sread information from specified control directoryread_thread: data read error from external process - aborting: %sread_thread: exitingread_thread: for_read failed - aborting: %sread_thread: get and register buffersread_thread: non-data tag '%c' from external process - leaving: %sregisterregistryregistry service URL with optional specification of protocolremove logical file name registration even if not all physical instances were removedremove proxyremove the job from the local list of jobs even if the job is not found in the infosysrequest to cancel job(s) with specified ID(s)request to cancel jobs belonging to user(s) with specified subject name(s)request to clean job(s) with specified ID(s)request to clean jobs belonging to user(s) with specified subject name(s)reverse sorting of jobs according to jobid, submissiontime or jobnamesave serial to %s failuresecondsecondssecondsselect broker method (list available brokers with --listplugins flag)setting file %s to size %llushow URLs of file locationsshow jobs where status information is unavailableshow only description of requested object, do not list content of directoriesshow only jobs of user(s) with specified subject name(s)show only jobs with specified ID(s)show progress indicatorshow status information in JSON formatshow the CE's error log of the jobshow the original job descriptionshow the specified file from job's session directoryshow the stderr of the jobshow the stdout of the job (default)shutdownskip jobs that are on a computing element with a given URLskip the service with the given URL during service discoverysort jobs according to jobid, submissiontime or jobnamesourcesource destinationsource.next_locationstart_readingstart_reading: helper start failedstart_reading: thread create failedstart_reading_ftpstart_reading_ftp: globus_ftp_client_getstart_reading_ftp: globus_ftp_client_get failedstart_reading_ftp: globus_thread_create failedstart_writing_ftp: data chunk: %llu %llustart_writing_ftp: delayed data chunk: %llu %llustart_writing_ftp: failed to read data chunkstart_writing_ftp: failed to read data tagstart_writing_ftp: globus_thread_create failedstart_writing_ftp: helper start failedstart_writing_ftp: mkdirstart_writing_ftp: mkdir failed - still trying to writestart_writing_ftp: putstart_writing_ftp: put failedstart_writing_ftp: thread create failedstart_writing_ftp: waiting for data chunkstart_writing_ftp: waiting for data tagstart_writing_ftp: waiting for some buffers sentstatusstatusstrstop_reading: exiting: %sstop_reading: waiting for transfer to finishstop_reading_ftp: aborting connectionstop_reading_ftp: exiting: %sstop_reading_ftp: waiting for transfer to finishstringsubmit jobs as dry run (no submission to batch system)submit test job given by the numbertest job runtime specified by the numberthe IDs of the submitted jobs will be appended to this filethe file storing information about active jobs (default %s)this option is not functional (old GSI proxies are not supported anymore)timeout in seconds (default 20)treat requested object as directory and always try to list contenttruncate the joblist before synchronizingtypeunable to load number from: %sunregisteruploaduploadsurlurl [url ...]urllist %s contains invalid URL: %suse GSI communication protocol for contacting VOMS servicesuse old communication protocol for contacting VOMS services instead of RESTful access use passive transfer (off by default if secure is on, on by default if secure is not requested)use secure transfer (insecure by default)use specified configuration fileuse the jobname instead of the short ID as the job directory nameusername to MyProxy server (if missing subject of user certificate is used)vomswaiting for data chunkwrite_thread: exitingwrite_thread: for_write eofwrite_thread: for_write failed - abortingwrite_thread: get and pass bufferswrite_thread: out failed - abortingxrootd close failed: %sxrootd open failed: %sxrootd write failed: %sy~DataPoint: destroy ftp_handle~DataPoint: destroy ftp_handle failed - retrying~DataPoint: failed to destroy ftp_handle - leakingProject-Id-Version: Arc Report-Msgid-Bugs-To: support@nordugrid.org PO-Revision-Date: 2021-11-26 12:09+0100 Last-Translator: Oxana Smirnova Language-Team: Russian Language: ru MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-Generator: Poedit 2.3 Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2); X-Poedit-KeywordsList: msg:2;IString:1;istring:1;FindNTrans:1,2 X-Poedit-Basepath: /home/oxana/GITROOT/arc6 X-Poedit-SearchPath-0: src %s КÑш : %s Папка кÑша (только чтение): %s ОчиÑтка кÑша отключена ОчиÑтка кÑша включена Каталог Ñ ÐºÑшем ÑÑылок: %s Контрольный каталог: %s Корневой каталог ÑеÑÑии: %s СУПО по умолчанию : %s очередь по умолчанию : %s Ð’Ñ€ÐµÐ¼Ñ Ð¶Ð¸Ð·Ð½Ð¸ по умолчанию : %u ЗапуÑтите 'arcclean -s Undefined' Ð´Ð»Ñ ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð²Ñ‹Ñ‡Ð¸Ñ‰ÐµÐ½Ð½Ñ‹Ñ… задач из ÑпиÑка ЗапуÑтите 'arcclean -s Undefined' Ð´Ð»Ñ ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð¾Ð±Ð¾Ñ€Ð²Ð°Ð½Ð½Ñ‹Ñ… задач из ÑпиÑка ИÑпользуйте arcclean Ð´Ð»Ñ ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð½Ñ‹Ñ… задач из ÑпиÑка ИÑполнÑемый: верно Name: %s Sources.DelegationID: %s Sources.Options: %s = %s ИÑточники: %s Targets.DelegationID: %s Targets.Options: %s = %s ÐазначениÑ: %s DN Ñертификата: %s дейÑтвителен до: %s DN Ñмитента: %s Серийный номер: %d %s: %i Служба доÑтавки: %s Служба доÑтавки: LOCAL МеÑÑ‚ Ð´Ð»Ñ Ð¾Ñ‚Ð³Ñ€ÑƒÐ·ÐºÐ¸: %u МеÑÑ‚ Ð´Ð»Ñ Ñрочной обработки: %u МеÑÑ‚ Ð´Ð»Ñ Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ñ‚ÐµÐ»ÑŒÐ½Ð¾Ð¹ обработки: %u МеÑÑ‚ Ð´Ð»Ñ Ð¿Ñ€ÐµÐ´Ð²Ð°Ñ€Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð¾Ð¹ обработки: %u Подготовленных меÑÑ‚: %u ÐšÐ¾Ð½Ñ„Ð¸Ð³ÑƒÑ€Ð°Ñ†Ð¸Ñ ÐºÐ²Ð¾Ñ‚: %s СоÑтоÑние точки входа (%s): %s СоÑтоÑние точки входа (%s) - STARTED или SUCCESSFUL непределённых: %i %s -> %s (%s) --- ХОЛОСТÐЯ ПРОГОÐКР--- Контроль доÑтупа: %s ÐннотациÑ: %s Argument: %s Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾Ð± Ñталонных теÑтах: Каталог, Ñодержащий журнальную запиÑÑŒ вычиÑлительного ÑервиÑа: %s URL точки входа Ð´Ð»Ñ Ð²Ñ‹Ñ‡Ð¸Ñлений: %s Ðазвание интерфейÑа точки входа Ð´Ð»Ñ Ð²Ñ‹Ñ‡Ð¸Ñлений: %s Ð¢Ñ€ÐµÐ±Ð¾Ð²Ð°Ð½Ð¸Ñ Ðº вычиÑлительному реÑурÑу: Служба параметров доÑтупа: %s Идентификаторы делегированиÑ: Элемент DelegationID: %s Ð’Ñ€ÐµÐ¼Ñ Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ð½Ð¸Ñ: %s ЗапиÑÑŒ дейÑтвительна на: %s ЗапиÑÑŒ дейÑтвительна Ñ: %s Environment.name: %s Environment: %s Код выхода: %d Код выхода уÑпешного иÑполнениÑ: %d СоÑтоÑние здоровьÑ: %s ID ÑервиÑа: %s Элемент Inputfile: УÑтановленные рабочие Ñреды: Ошибка задачи: %s Задача не требует ÑкÑклюзивного иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ URL ÑƒÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡ÐµÐ¹: %s (%s) Задача требует ÑкÑклюзивного иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ URL ÑоÑтоÑÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸: %s (%s) Очередь приÑвоениÑ: %s ИмÑ: %s Код выхода Ð´Ð»Ñ ÑƒÑпешного иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð½Ðµ указан. ДоÑтуп к узлу: входÑщий ДоÑтуп к узлу: входÑщий и иÑходÑщий ДоÑтуп к узлу: иÑходÑщий Уведомить: Старый Ñрлык заданиÑ: %s Старый Ñрлык задачи: Ð¢Ñ€ÐµÐ±Ð¾Ð²Ð°Ð½Ð¸Ñ Ðº операционной ÑиÑтеме: Другие ÑообщениÑ: %s Другие атрибуты: [%s], %s Элемент Outputfile: Владелец: %s PostExecutable.Argument: %s PreExecutable.Argument: %s Ð’Ñ€ÐµÐ¼Ñ Ð½Ð°Ñ‡Ð°Ð»Ð° обработки: %s ДоверенноÑть дейÑтвительна до: %s Очередь: %s Удалённое журналирование (по выбору): %s (%s) Удалённое журналирование: %s (%s) Запрошенное процеÑÑорное времÑ: %s Запрошено ваканÑий: %i Результаты должны быть воÑтребованы до: %s Результаты были удалены: %s Ð¢Ñ€ÐµÐ±Ð¾Ð²Ð°Ð½Ð¸Ñ Ñреды выполнениÑ: URL информации о ÑервиÑе: %s (%s) URL каталога Грид-ÑеÑии: %s СпецифичеÑкое ÑоÑтоÑние: %s URL каталога Ð´Ð»Ñ Ð·Ð°Ð³Ñ€ÑƒÐ·ÐºÐ¸: %s URL каталога Ð´Ð»Ñ Ð¾Ñ‚Ð³Ñ€ÑƒÐ·ÐºÐ¸: %s СоÑтоÑние: %s Ð¡Ñ‚Ð°Ð½Ð´Ð°Ñ€Ñ‚Ð½Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°: %s Стандартный вход: %s Стандартный выход: %s ЗаÑылающий клиент: %s ЗаÑлана: %s ВерÑÐ¸Ñ ÐºÐ»Ð¸ÐµÐ½Ñ‚Ð°: %s ИÑпользованное процеÑÑорное времÑ: %s ИÑпользованное процеÑÑорное времÑ: %s (%s на Ñлот) ИÑпользование ОЗУ: %d ИÑпользованное времÑ: %s ИÑпользованное времÑ: %s (%s на Ñлот) Положение в очереди: %d [ теÑтировщик JobDescription ] [ ОбрабатываетÑÑ Ð¸Ñходный текÑÑ‚ ] [ emies:adl ] [ nordugrid:xrsl ] $X509_VOMS_FILE и $X509_VOMSES не наÑтроены; Пользователь не указал раÑположение файла vomses; РаÑположение файла vomses не найдено в файле наÑтроек пользователÑ; Файл vomses не обнаружен в ~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomses, а также в ÑоответÑтвующих подкаталогах%5u Ñ: %10.1f кБ %8.1f кБ/Ñ%d ÑиÑтемы ÑƒÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð¿Ð°ÐºÐµÑ‚Ð½Ð¾Ð¹ обработкой%d точки входа%d СовмеÑтные реÑурÑÑ‹%d правила приÑвоениÑ%d из %d задач были заÑланыОÑталоÑÑŒ %i попыток, Ð¿Ð¾Ð²Ñ‚Ð¾Ñ€Ð½Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ° в %s%li Ñекунд(Ñ‹) Ñ Ð¼Ð¾Ð¼ÐµÐ½Ñ‚Ð° ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° блокировки %s%s%s %s не может быть Ñоздан.%s > %s => неверно%s > %s => неверно: %s Ñодержит нецифровые Ñимволы в номере верÑии.%s > %s => верноКлаÑÑ %s не ÑвлÑетÑÑ Ð¾Ð±ÑŠÐµÐºÑ‚Ð¾Ð¼Ñоздан каталог %sКаталог %s уже ÑушеÑтвует! Задача пропуÑкаетÑÑ.%s не удалоÑÑŒ%s не ÑвлÑетÑÑ Ð¿Ð¾Ð´Ð´ÐµÑ€Ð¶Ð¸Ð²Ð°ÐµÐ¼Ñ‹Ð¼ типом профилÑ%s не ÑвлÑетÑÑ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð¾Ð¼%s не ÑвлÑетÑÑ Ð¾Ð±ÑŠÐµÐºÑ‚Ð¾Ð¼ÐœÐ¾Ð´ÑƒÐ»ÑŒ %s Ñброшен на диÑк%s ошибка разборкиÐе найден подключаемый модуль %s "%s".%s, верÑÐ¸Ñ %s%s->%s%s. Ðевозможно Ñкопировать набор файлов%s. ошибка базы данных SQLite: %s%s:%s: %i%s: %s%s: %s: ÐÐ¾Ð²Ð°Ñ Ð·Ð°Ð´Ð°Ñ‡Ð° принадлежит %i/%i%s: Добавление нового файла выхода %s: %s%s: Ð’Ñе процеÑÑÑ‹ %s уÑпешно завершилиÑÑŒ (%s)%s: Ð—Ð°Ð¿Ñ€Ð¾Ñ Ñ„Ð°Ð¹Ð»Ð° Ñ Ð»ÐµÐ½Ñ‚Ñ‹ %s в очереди SRM. Ожидание %i Ñекунд%s: ОчиÑтка кÑша продолжаетÑÑ Ñлишком долго - %u.%06u Ñекунд%s: Ðевозможно преобразовать контрольную Ñумму файла %s в целое Ð´Ð»Ñ %s%s: Ðевозможно преобразовать размер файла %s в целое Ð´Ð»Ñ %s%s: Ðевозможно прочеÑть ÑпиÑок входных файлов%s: ПерезапуÑк по требованию невозможен%s: ПерезапуÑк по запроÑу невозможен - неподходÑщее ÑоÑтоÑние%s: Прерывание задачи по запроÑу пользователÑ%s: Прерывание активных запроÑов%s: Прерывание оÑтальных запроÑов DTR%s: Ðевозможно запиÑать два разных файла %s и %s Ñ Ð¾Ð´Ð½Ð¸Ð¼ LFN: %s%s: Проверка отгружаемого файла пользователÑ: %s%s: ÐŸÑ€Ð¾Ð²ÐµÑ€Ð¾Ñ‡Ð½Ð°Ñ Ñумма %llu подтверждена Ð´Ð»Ñ %s%s: КритичеÑÐºÐ°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° Ð´Ð»Ñ Ð¾Ñ‚Ð³Ñ€ÑƒÐ¶Ð°ÐµÐ¼Ð¾Ð³Ð¾ файла %s%s: Сбой запроÑа DTR %s на копирование файла %s%s: копирование DTR %s в %s не удалоÑÑŒ, но не было обÑзательным%s: Удаление запроÑа в ÑвÑзи Ñ Ð²Ð½ÑƒÑ‚Ñ€ÐµÐ½Ð½Ð¸Ð¼Ð¸ неполадками%s: Файл Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ %s вероÑтно оÑталÑÑ Ð½ÐµÐ´Ð¾Ð¿Ð¸Ñанным поÑле предыдущего запуÑка A-REX, перезапиÑÑŒ%s: ПовторÑющееÑÑ Ð¸Ð¼Ñ Ñ„Ð°Ð¹Ð»Ð° в ÑпиÑке входных файлов: %s%s: Ошибка доÑтупа к файлу %s%s: Ошибка при чтении файла %s%s: Чтение выходных файлов в ÑпиÑке Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %s%s: Ðе удалоÑÑŒ Ñоздать файл grami%s: Ðе удалоÑÑŒ извлечь информацию о локальном ÑоÑтоÑнии задачи.%s: Ðе удалоÑÑŒ получить номер из СУПО%s: Сбой разборки запроÑа задачи.%s: Ошибка при чтении .local и изменении ÑоÑтоÑниÑ, задачи и A-REX могут оказатьÑÑ Ð² противоречивом ÑоÑтоÑнии%s: Сбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¾Ð¿Ð¸ÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸: %s%s: Ðе удалоÑÑŒ прочеÑть локальную информацию%s: Ðе удалоÑÑŒ выполнить процедуру прерываниÑ%s: Ðе удалоÑÑŒ выполнить процедуру запуÑка%s: Ðе удалоÑÑŒ уÑтановить права на иÑполнение%s: Сбой запиÑи причины ÑбоÑ: %s%s: Ðе удалоÑÑŒ оборвать иÑполнÑющуюÑÑ Ð·Ð°Ð´Ð°Ñ‡Ñƒ%s: Ðе удалоÑÑŒ очиÑтить каталог ÑеÑÑии%s: Сбой вывода Ñодержимого каталога Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ %s: %s%s: Ðе удалоÑÑŒ подгрузить анализатор Ð´Ð»Ñ Ð¿Ñ€Ð°Ð²Ð¸Ð» допуÑка пользователей %s: Ðе удалоÑÑŒ открыть файл %s на чтение%s: Сбой при разборе правил допуÑка пользователÑ%s: Ðе удалоÑÑŒ прочеÑть динамичеÑкий ÑпиÑок выходных файлов в %s%s: Ðе удалоÑÑŒ прочеÑть ÑпиÑок входных файлов%s: Ðе удалоÑÑŒ прочеÑть ÑпиÑок входных файлов, невозможно очиÑтить каталог ÑеÑÑии%s: Ðе удалоÑÑŒ прочеÑть ÑпиÑок выходных файлов%s: Ðе удалоÑÑŒ прочеÑть ÑпиÑок выходных файлов, невозможно очиÑтить каталог ÑеÑÑии%s: Ðе удалоÑÑŒ прочеÑть переработанный ÑпиÑок входных файлов%s: Ðе удалоÑÑŒ прочеÑть переработанный ÑпиÑок выходных файлов%s: Сбой Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ в DTRGenerator%s: Ðе удалоÑÑŒ изменить идентификатор Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð½Ð° %d/%d Ð´Ð»Ñ Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° %s%s: Сбой приÑÐ²Ð¾ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ðµ ÑоÑтоÑÐ½Ð¸Ñ ÑÐ±Ð¾Ñ Ð¿Ñ€Ð¸ обрыве иÑполнениÑ.%s: Ðе удалоÑÑŒ запиÑать динамичеÑкие выходные файлы обратно в %s%s: Ðе удалоÑÑŒ запиÑать ÑпиÑок входных файлов%s: Ðе удалоÑÑŒ запиÑать ÑпиÑок выходных файлов%s: Ðе удалоÑÑŒ вывеÑти ÑпиÑок ÑоÑтоÑний выходных файлов%s: Ðе удалоÑÑŒ запиÑать изменившийÑÑ Ð²Ñ…Ð¾Ð´Ð½Ð¾Ð¹ файл.%s: Ðе удалоÑÑŒ запиÑать ÑпиÑок выходных файлов: %s%s: Ðе удалоÑÑŒ запиÑать локальную информацию%s: Ðе удалоÑÑŒ запиÑать локальную информацию: %s%s: Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ…Ñ€Ð°Ð½Ð¸Ð»Ð¸Ñ‰Ð° данных Ð´Ð»Ñ Ð´Ð¾Ñ‡ÐµÑ€Ð½ÐµÐ³Ð¾ процеÑÑа%s: Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¾Ð±Ð»Ð°Ñти памÑти Ð´Ð»Ñ Ð´Ð¾Ñ‡ÐµÑ€Ð½ÐµÐ³Ð¾ процеÑÑа%s: Сбой при запуÑке дочернего процеÑÑа%s: Сбой Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ð½Ð¸Ñ Ð´Ð¾Ñ‡ÐµÑ€Ð½ÐµÐ³Ð¾ процеÑÑа%s: У файла %s Ð½ÐµÐ²ÐµÑ€Ð½Ð°Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма: %llu. ОжидалаÑÑŒ %lli%s: Ð—Ð°Ð¿Ñ€Ð¾Ñ Ñ„Ð°Ð¹Ð»Ð° %s в очереди SRM. Ожидание %i Ñекунд%s: ОбрабатываютÑÑ Ñ„Ð°Ð¹Ð»Ñ‹ в ÑпиÑке %s%s: ÐедейÑтвительный Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR%s: Ðеверный файл: %s Ñлишком велик.%s: ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ размере/контрольной Ñумме (%s) Ð´Ð»Ñ %s%s: Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð¾ прерывании задачи от генератора DTR к планировщику%s: Прерывание задачи проиÑходит Ñлишком медленно, но диагноÑтика уже доÑтупна. Будем Ñчитать, что прерывание произошло.%s: Прерывание задачи проиÑходит Ñлишком долго. Сбой.%s: Сбой иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ в неизвеÑтном ÑоÑтоÑнии. ПерезапуÑка не будет.%s: Обнаружен Ñбой задачи%s: Задача завершена%s:Задача уже завершилаÑÑŒ. ДейÑÑ‚Ð²Ð¸Ñ Ð¿Ð¾ прерыванию не применÑÑŽÑ‚ÑÑ%s: Задача уÑтарела - удалÑетÑÑ Ð¾ÑтавшаÑÑÑ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ%s: Задачу Ð½ÐµÐ»ÑŒÐ·Ñ Ð±Ð¾Ð»ÑŒÑˆÐµ перезапуÑкать%s: ПоÑтупил Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° удаление задачи - удалÑетÑÑ%s: Задача Ñлишком ÑÑ‚Ð°Ñ€Ð°Ñ - удалÑетÑÑ%s: Счётчик ÑÐ»ÐµÐ¶ÐµÐ½Ð¸Ñ Ð·Ð° задачей Ñбит%s: Слежение за задачей прервано в ÑвÑзи Ñ ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸ÐµÐ¼ из очереди%s: Слежение за задачей непреднамеренно прервано%s: Запрошено прекращение ÑÐ»ÐµÐ¶ÐµÐ½Ð¸Ñ Ð·Ð° задачей Ñ %u активными ÑÑылками%s: Запрошено прекращение ÑÐ»ÐµÐ¶ÐµÐ½Ð¸Ñ Ð·Ð° задачей Ñ %u активными ÑÑылками и аÑÑоциированной очередью %s%s: Слежение за задачей уÑпешно прекращено%s: Ðе удалоÑÑŒ направить задачу в СУПО%s: ЗаÑылка задачи в СУПО проиÑходит Ñлишком медленно, но идентификатор уже доÑтупен. Будем Ñчитать, что заÑылка произведена.%s: ЗаÑылка задачи в СУПО проиÑходит Ñлишком долго. Сбой.%s: ÐÑÑиÑтент задачи прерван%s: доÑтигнут предел Ñкрипта СУПО %u - приоÑтанавливаетÑÑ Ð·Ð°Ð¿ÑƒÑк/ÑнÑтие%s: Подключаемый модуль в ÑоÑтоÑнии %s : %s%s: Сбой при иÑполнении подключаемого модулÑ%s: Ðе удалоÑÑŒ обработать опиÑание задачи%s: PushSorted не Ñмог обнаружить задачу в ожидаемом меÑте%s: Повторный Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾Ð± обÑлуживании к генератору DTR%s: Чтение выходных файлов в ÑпиÑке Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %s%s: Ðе удалоÑÑŒ прочеÑть ÑоÑтоÑние новой задачи%s: Получен Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR %s на копирование файла %s в ÑоÑтоÑнии %s%s: Полученный DTR принадлежит неактивной задаче%s: Получен Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR Ñ Ð´Ð²ÑƒÐ¼Ñ ÑƒÐ´Ð°Ð»Ñ‘Ð½Ð½Ñ‹Ð¼Ð¸ адреÑами!%s: Получен Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° размещение файлов (%s)%s: Получена задача в DTRGenerator%s: Задача получена в плохом ÑоÑтоÑнии: %s%s: УдалÑетÑÑ %s из динамичеÑкого ÑпиÑка выходных файлов %s%s: Сбой повторной обработки опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸%s: Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð¾Ð± обÑлуживании к генератору DTR%s: Возврат прерванной задачи из генератора DTR%s: Обработка каталога ÑеÑÑий продолжаетÑÑ Ñлишком долго - %u.%06u Ñекунд%s: Ðекоторые процеÑÑÑ‹ %s дали Ñбой%s: СоÑтоÑние: %s поÑле %s%s: СоÑтоÑние: %s: размещение данных завершено%s: СоÑтоÑние: %s: вÑÑ‘ ещё в процеÑÑе переноÑа данных%s: СоÑтоÑние: ACCEPTED%s: СоÑтоÑние: ACCEPTED: dryrun%s: СоÑтоÑние: ACCEPTED: Ð²Ñ€ÐµÐ¼Ñ Ð½Ð° иÑполнение %s%s: ÑоÑтоÑние ACCEPTED: переход в PREPARING%s: СоÑтоÑние: ACCEPTED: обрабатываетÑÑ Ð¾Ð¿Ð¸Ñание задачи%s: СоÑтоÑние: CANCELING%s: СоÑтоÑние: FINISHING%s: СоÑтоÑние: INLRMS%s: СоÑтоÑние: INLRMS - проверка отÑутÑÑ‚Ð²Ð¸Ñ Ð¿Ñ€Ð¸Ð¾Ñтановки%s: СоÑтоÑние: INLRMS - проверка приоÑтановки(%u) и метка%s: СоÑтоÑние: INLRMS - метки не найдены%s: ÑоÑтоÑние INLRMS: Ñообщение на выходе %i %s%s: СоÑтоÑние: PREPARING%s: СоÑтоÑние: SUBMIT%s: Попытка удалить задание из неÑущеÑтвующего процеÑÑа Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ…%s: Попытка удалить задание из активного процеÑÑа Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ…%s: Два одинаковых Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð²Ñ‹Ð´Ð°Ñ‡Ð¸: %s%s: ÐеизвеÑтное правило допуÑка Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ '%s'%s: ИÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¾Ñ‚Ð³Ñ€ÑƒÐ¶Ð°ÐµÐ¼Ñ‹Ñ… файлов%s: Пользователь ÐЕ отгрузил файл %s%s: Пользователь отгрузил файл %s%s: ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма %s%s: удаление файла %s: Ñбой Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð¿ÑƒÑ‚Ð¸ к файлу: %s%s: удаление файла %s: Ñбой Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð°/каталога: %s%s: задача назначена Ð´Ð»Ñ Ð¼ÐµÐ´Ð»ÐµÐ½Ð½Ð¾Ð³Ð¾ опроÑа%s: задача обрабатываетÑÑ%s: задача Ð´Ð»Ñ Ð¾Ð±ÑлуживаниÑ%s: задача обнаружена при Ñканировании%s: задача будет ожидать внешнего процеÑÑа%s: Ð½Ð¾Ð²Ð°Ñ Ð·Ð°Ð´Ð°Ñ‡Ð° принÑта%s: ÑÑ‚Ð°Ñ€Ð°Ñ Ð·Ð°Ð´Ð°Ñ‡Ð° принÑта%s: запиÑÑŒ файла %s: %s%s: запиÑÑŒ файла %s: Ñбой при Ñоздании файла: %s%s: запиÑÑŒ файла %s: отÑутÑтвуют полезные файлы%s: запиÑÑŒ файла %s: неопознанные полезные файлы%s: запиÑÑŒ журнала %s: отÑутÑтвуют полезные файлы%s: запиÑÑŒ журнала %s: неопознанные полезные файлы%s: тип копии %s%s: перезапущена задача из FINISHING%s: перезапущена задача из INLRMS%s: перезапущена задача из PREPARING%s: размер %llu%s: ÑоÑтоÑние CANCELING: дочерний процеÑÑ Ð·Ð°Ð²ÐµÑ€ÑˆÐ¸Ð»ÑÑ Ñ ÐºÐ¾Ð´Ð¾Ð¼ выхода %i%s: ÑоÑтоÑние CANCELING: ÑобираетÑÑ Ð´Ð¸Ð°Ð³Ð½Ð¾Ñтика задачи%s: ÑоÑтоÑние CANCELING: запуÑк дочернего процеÑÑа: %s%s: ÑоÑтоÑние CANCELING: Ñрок Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¿Ñ€ÐµÑ€Ñ‹Ð²Ð°Ð½Ð¸Ñ Ð¸Ñтёк%s: ÑоÑтоÑние: SUBMIT: дочерний процеÑÑ Ð·Ð°Ð²ÐµÑ€ÑˆÐ¸Ð»ÑÑ Ñ ÐºÐ¾Ð´Ð¾Ð¼ выхода: %i%s: ÑоÑтоÑние SUBMIT: запуÑк дочернего процеÑÑа: %s%s: задача отÑутÑтвует: %s%s: непредуÑмотренный Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð½ÐµÑƒÑпешной задачи: %s%s: непредуÑмотренный Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸: %sожидаетÑÑ ')'ожидаетÑÑ ')'ИÑпользование атрибута 'action' в пользовательÑком опиÑании задачи не допуÑкаетÑÑÐžÐ¿Ñ†Ð¸Ñ Ð½Ð°Ñтроек 'control' теперь называетÑÑ 'controldir'; пожалуйÑта, иÑпользуйте новое названиеÐеобходимо задать значение атрибута 'stdout', еÑли задано значение атрибута 'join'Следующее назначениеСледующий иÑточник(пуÑто)(нулевой): %d: %s: Ðе указано ÑредÑтво ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¾Ñ‚Ñ‡Ñ‘Ñ‚Ð¾Ð² об учётных запиÑÑÑ…: Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ðº базе данных учёта задач: Сбой подготовки дочернего процеÑÑа ÑредÑтва ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¾Ñ‚Ñ‡Ñ‘Ñ‚Ð¾Ð²: Сбой запуÑка дочернего процеÑÑа ÑредÑтва ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¾Ñ‚Ñ‡Ñ‘Ñ‚Ð¾Ð²: СредÑтво Ð¸Ð·Ð¼ÐµÑ€ÐµÐ½Ð¸Ñ Ñ…Ð°Ñ€Ð°ÐºÑ‚ÐµÑ€Ð¸Ñтик выдало ошибку %i: %s: запиÑÑŒ учётной запиÑи занÑла %llu mÑ< %s<< %s> %sЗапрошен вычиÑлительный реÑурÑ, иÑпользующий Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ GridFTP, но необходимый %sподключаемый модуль не был подгружен. УÑтанавливали ли Ð’Ñ‹ Ñтот модуль? %sЕÑли нет, пожалуйÑта, уÑтановите пакет 'nordugrid-arc-plugins-globus'. %sÐазвание пакета может завиÑеть от типа вашего диÑтрибутива.A-REX REST: Сбой Ð²Ð¾Ð·Ð¾Ð±Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸A-REX REST: ÐедопуÑтимое изменение ÑоÑтоÑниÑ: Ñ %s на %sÐ˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ раÑширении AC Ð´Ð»Ñ VO Сертификат атрибута недейÑтвителен: Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð°Ð²Ñ‚Ð¾Ñ€Ð¸Ð·Ð°Ñ†Ð¸Ð¸ ARC: %sПолитика Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ ARC: %sМеÑтонахождение ÑпиÑка доÑтупа: %sБаза данных учёта задач не может быть Ñоздана. Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ€Ð¾Ð´Ð¸Ñ‚ÐµÐ»ÑŒÑкого каталога %s.База данных учёта задач не может быть Ñоздана: %s не ÑвлÑетÑÑ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð¾Ð¼Ð£Ñтановлено Ñоединение Ñ ÑƒÑ‡Ñ‘Ñ‚Ð½Ð¾Ð¹ базой данныхФайл базы данных учёта задач (%s) не ÑвлÑетÑÑ Ñтандартным файломУÑпешно инициализирована база данных учёта задачПолучен маркер доÑтупа Ð´Ð»Ñ %s: %sДобавление раÑположениÑ: metadata: %sДобавление раÑположениÑ: url: %sДобавление точки входа (%s) в ServiceEndpointRetrieverДобавление точки входа (%s) в TargetInformationRetrieverТочка входа (%s) добавлÑетÑÑ ÐºÐ°Ðº к ServiceEndpointRetriever, так и к TargetInformationRetrieverДобавлÑетÑÑ Ð°Ð´Ñ€ÐµÑ: %s - %sДобавлÑетÑÑ Ð¼Ð°Ñ€ÐºÑ‘Ñ€ запроÑа %sДобавлÑетÑÑ Ð¼Ð°Ñ€ÐºÑ‘Ñ€ проÑтранÑтва памÑти %sДобавление к маÑÑовому запроÑуÐдреÑ: %sКвота на процеÑÑÑ‹ (%u) иÑпользованаВÑе запроÑÑ‹ DTR Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s завершеныВÑе полученные результаты Ð½ÐµÐ²ÐµÑ€Ð½Ñ‹Ð§Ñ‚ÐµÐ½Ð¸Ñ Ð¸Ð· иÑточника уже в процеÑÑеЗапиÑÑŒ в цель уже в процеÑÑеВозникла ошибка при ÑоÑтавлении опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ Ð´Ð»Ñ Ð·Ð°Ñылки на %sДругой процеÑÑ (%s) обладает блоком файла %sПолитика ARC не может быть задана в профиле SAML2.0 XACMLArcAuthZ: не удалоÑÑŒ инициализировать вÑе PDP - Ñтот процеÑÑ Ð±ÑƒÐ´ÐµÑ‚ нерабочимÐрхивирование запроÑа DTR %s, ÑоÑтоÑние %sÐрхивирование запроÑа DTR %s, ÑоÑтоÑние ERRORÐ’Ñ‹ уверены, что хотите вычиÑтить задачи Ñ Ð¾Ñ‚ÑутÑтвующей информацией?Ð’Ñ‹ уверены, что хотите Ñинхронизировать ÑпиÑок локальных задач?Формирование запиÑи журнала программы разбора BLAH: %sПрипиÑан к группе допуÑка %sПрипиÑан к ÑпиÑку пользователей %sПредполагаетÑÑ, что файл не найденПредполагаем, что переÑылка уже отменена, либо оборвалаÑÑŒ.Ð”Ð»Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð° 'inputfiles' необходимы как минимум два значениÑÐ”Ð»Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð° 'outputfiles' необходимы как минимум два значениÑПопытка интерпретации отноÑительного путь как URL - заменÑетÑÑ Ð½Ð° абÑолютныйПопытка ÑоединитьÑÑ Ñ %s по порту %iÐтрибут '%s' задан неÑколько разÐтрибут 'join' не может иÑпользоватьÑÑ, еÑли заданы оба атрибута 'stdout' и 'stderr'Значение атрибута (1): %sЗначение атрибута (2): %sЗначение атрибута в Ñубъекте: %sÐ˜Ð¼Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð° (%s) Ñодержит неверный Ñимвол (%s)ОжидаетÑÑ Ð¸Ð¼Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð°Ðтрибуты 'gridtime' и 'cputime' не могут быть заданы одновременноÐтрибуты 'gridtime' и 'walltime' не могут быть заданы одновременноÐÐ´Ñ€ÐµÑ URL запроÑа Ð¿Ð¾Ð´Ñ‚Ð²ÐµÑ€Ð¶Ð´ÐµÐ½Ð¸Ñ Ð¿Ð¾Ð´Ð»Ð¸Ð½Ð½Ð¾Ñти: %sДопущен через arc.pdpДопущен удалённой Ñлужбой PDPДопущен через simplelist.pdp: %sДопущен через xacml.pdpСбой метода BN_new или RSA_newСбой метода BN_set_wordÐедопуÑтимый URL в deliveryservice: %sÐÐµÐ¿Ñ€Ð¸ÐµÐ¼Ð»ÐµÐ¼Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð´Ð»Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ¸ подлинноÑти: %sÐеверный формат контрольной Ñуммы %sÐедопуÑтимое значение параметра доÑтупа %s в правилах доÑтупа к кÑшуОбнаружен недопуÑтимый формат в файле %s, Ñтроке %sÐеверный формат отклика XML Ñлужбы доÑтавки на %s: %sÐедопуÑтимый формат отзыва XML от ÑервиÑа в %s: %sÐеверный формат отклика XML: %sÐŸÐ»Ð¾Ñ…Ð°Ñ Ð¼ÐµÑ‚ÐºÐ°: "%s"ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð»Ð¾Ð³Ð¸ÐºÐ°ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð»Ð¾Ð³Ð¸ÐºÐ° в %s - bringOnline завершилÑÑ ÑƒÑпешно, но Ð·Ð°Ð¿Ñ€Ð¾Ñ SRM не завершилÑÑ ÑƒÑпехом, либо ещё в процеÑÑеÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð»Ð¾Ð³Ð¸ÐºÐ° в %s - getTURLs завершилÑÑ ÑƒÑпешно, но Ð·Ð°Ð¿Ñ€Ð¾Ñ SRM не завершилÑÑ ÑƒÑпехом, либо ещё в процеÑÑеÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð»Ð¾Ð³Ð¸ÐºÐ° в %s - putTURLs завершилÑÑ ÑƒÑпешно, но Ð·Ð°Ð¿Ñ€Ð¾Ñ SRM не завершилÑÑ ÑƒÑпехом, либо ещё в процеÑÑеÐедопуÑтимое Ð¸Ð¼Ñ Ð´Ð»Ñ Ð¸ÑполнÑемого файла: %sÐедопуÑтимое название Ñреды выполнениÑ: %sÐедопуÑтимое Ð¸Ð¼Ñ Ð´Ð»Ñ stderr: %sÐедопуÑтимое Ð¸Ð¼Ñ Ð´Ð»Ñ stdout: %sÐедопуÑтимое значение definedshare %sÐедопуÑтимое значение maxdeliveryÐедопуÑтимое значение maxemergencyÐедопуÑтимое значение maxpreparedÐедопуÑтимое значение maxprocessorнедопуÑтимое чиÑло в maxtransfertriesÐедопуÑтимый приоритет: %sÐедопуÑтимое значение remotesizelimitÐедопуÑтимое значение speedcontrolОбнаружен неверный или уÑтаревший формат в файле %s, Ñтроке %sÐедопуÑтимое значение Ð´Ð»Ñ loglevelÐеверно Ñформированный pid %s в файле блокировки %sÐ˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ СУПО:Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ СУПО:ПриÑутÑтвует маркер доÑтупа. Предпочтителен Ð´Ð»Ñ Ð·Ð°Ñылки задач.ÐаÑтройки поведениÑБлок %s не обнаружен в файле наÑтроек %sÐе указан BlockNameУвеличение приоритета %i до %i в ÑвÑзи Ñ Ð²Ñ…Ð¾Ð´Ñщим DTR более выÑокого приоритетаОба URL должны Ñодержать одинаковый протокол, Ð°Ð´Ñ€ÐµÑ Ñервера и портОба Ñлемента CACertificatePath and CACertificatesDir отÑутÑтвуют или пуÑÑ‚Ñ‹Ð—Ð°Ð¿Ñ€Ð¾Ñ %s на размещение на диÑке уÑпешно выполнен, файл теперь в ÑоÑтоÑнии ONLINEÐ—Ð°Ð¿Ñ€Ð¾Ñ %s на размещение на диÑке вÑÑ‘ ещё в очереди, Ñледует подождатьÐедопуÑÑ‚Ð¸Ð¼Ð°Ñ ÑтрокаПодгружен планировщик %sПодключаемый модуль брокера "%s" не обнаружен.Планировка и выборкаСледующие планировщики доÑтупны Ð´Ð»Ñ %s:Ðевозможно Ñоздать буфер!Ð’ процеÑÑе Ð¾Ñ‚ÐºÐ»ÑŽÑ‡ÐµÐ½Ð¸Ñ Ð¼ÐµÐ½ÐµÐ´Ð¶ÐµÑ€Ð° модулей обнаружены занÑтые подключаемые модули. ОжидаетÑÑ Ð¸Ñ… завершение.Сертификат и закрытый ключ агентÑтва не ÑÐ¾Ð²Ð¿Ð°Ð´Ð°ÑŽÑ‚Ð˜Ð¼Ñ Ñертификационного агентÑтва: %sУÑтановленные Ñертификаты CA:СОДЕРЖИМОЕ %u: %sÐ¢Ð°ÐºÑ‚Ð¾Ð²Ð°Ñ Ñ‡Ð°Ñтота процеÑÑора: %iМодель процеÑÑора: %sПроизводитель процеÑÑора: %sВерÑÐ¸Ñ Ð¿Ñ€Ð¾Ñ†ÐµÑÑора: %sКÑш %s: Свободное проÑтранÑтво %f GBДоÑтуп к кÑшу разрешён Ð´Ð»Ñ %s пользователю Ñ DN %sДоÑтуп к кÑшу разрешён Ð´Ð»Ñ %s Ð´Ð»Ñ Ð’Ðž %sДоÑтуп к кÑшу разрешён Ð´Ð»Ñ %s Ð´Ð»Ñ Ð’Ðž %s и группы %sДоÑтуп к кÑшу разрешён Ð´Ð»Ñ %s Ð´Ð»Ñ Ð’Ðž %s и роли %sСвободное проÑтранÑтво кÑша: %i GBОбщий объём проÑтранÑтва кÑша: %i GBСбой в работе Ñкрипта очиÑтки кÑшаДата ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÐºÑша: %sКÑшированный файл %s не ÑущеÑтвуетÐе обнаружен кÑшированый файл %sКÑшированный файл %s был удалён во Ð²Ñ€ÐµÐ¼Ñ ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÑÑылки или копии, Ð½Ð¾Ð²Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ°ÐšÑшированный файл %s был заблокирован во Ð²Ñ€ÐµÐ¼Ñ ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÑÑылки или копии, Ð½Ð¾Ð²Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ°ÐšÑшированный файл %s был изменён в поÑледний момент, приоÑтановка процеÑÑа на 1 Ñекунду Ð´Ð»Ñ Ð¿Ñ€ÐµÐ´Ð¾Ñ‚Ð²Ñ€Ð°Ñ‰ÐµÐ½Ð¸Ñ Ð³Ð¾Ð½ÐºÐ¸ÐšÑшированный файл %s был изменён во Ð²Ñ€ÐµÐ¼Ñ ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÑÑылки или копии, Ð½Ð¾Ð²Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ°Ð¤Ð°Ð¹Ð» кÑша: %sМета-файл кÑша %s пуÑÑ‚, будет воÑÑозданМета-файл кÑша %s, возможно, повреждён, будет воÑÑозданÐе обнаружен кÑш файла %sÐšÐ¾Ð¿Ð¸Ñ Ð² кÑше ещё дейÑтвительнаФайл в кÑше заблокирован - попытаемÑÑ Ð·Ð°Ð½Ð¾Ð²Ð¾Ð¤Ð°Ð¹Ð» в кÑше уÑтарел, будет загружен зановоВычиÑÐ»ÐµÐ½Ð½Ð°Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€Ð¾Ñ‡Ð½Ð°Ñ Ñумма %s Ñовпадает Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€Ð¾Ñ‡Ð½Ð¾Ð¹ Ñуммой ÑервераВычиÑÐ»ÐµÐ½Ð½Ð°Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма передачи %s Ñовпадает Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð¾Ð¹ Ñуммой иÑточникаВычиÑленнаÑ/ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ð°Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма %s Ñовпадает Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð¾Ð¹ Ñуммой, заÑвленной точкой Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ SRM %sСбой обратного вызоваВызов PrepareReading когда Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð±Ñ‹Ð» уже подготовлен!Вызов PrepareWriting когда Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð±Ñ‹Ð» уже подготовлен!ВызываетÑÑ http://localhost:60000/Echo иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ ClientSOAPВызываетÑÑ http://localhost:60000/Echo иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ httplibВызываетÑÑ https://localhost:60000/Echo иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ ClientSOAPВызываетÑÑ Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡Ð°ÐµÐ¼Ñ‹Ð¹ модуль %s Ð´Ð»Ñ Ð¾Ð¿Ñ€Ð¾Ñа точки входа на %sÐе удалоÑÑŒ открыть каталог Ñертификатов CA: %s. Сертификаты .Ðевозможно открыть каталог или файл VOMS: %s.Ðевозможно открыть каталог или файл VOMSES: %s.Ðет доÑтупа к файлу Ñертификата: %sÐет доÑтупа к файлу личного ключа: %sÐет доÑтупа к файлу доверенноÑти: %sÐевозможно добавить раÑширенное X509 раÑширение KeyUsage к новой доверенноÑтиÐевозможно добавить раÑширение X509 к доверенноÑтиÐе удалоÑÑŒ зарезервировать памÑтьÐевозможно зарезервировать памÑть Ð´Ð»Ñ Ñ€Ð°ÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾ÑтиÐе удалоÑÑŒ вычиÑлить профиль открытого ключаÐевозможно преобразовать раÑширение PROXY_CERT_INFO_EXTENSION в кодировке DER во внутренний форматÐевозможно преобразовать Ñтруктуру PROXY_CERT_INFO_EXTENSION из внутреннего формата в DERÐевозможно преобразовать Ñтруктуру keyUsage из формата кодировки DERÐевозможно преобразовать Ñтруктуру keyUsage из внутреннего формата в DERÐевозможно преобразовать закрытый ключ в формат DERÐевозможно преобразовать подпиÑанный Ñертификат EEC в формат DERÐевозможно преобразовать подпиÑанную доверенноÑть в формат DERÐевозможно преобразовать подпиÑанную доверенноÑть в формат PEMÐевозможно преобразовать Ñтроку в ASN1_OBJECTÐевозможно Ñкопировать раÑширенное раÑширение KeyUsageÐевозможно Ñкопировать Ð¸Ð¼Ñ Ñубъекта выдающего агентÑтва в доверенноÑтьÐевозможно Ñоздать ASN1_OCTET_STRINGÐевозможно Ñоздать BIO Ð´Ð»Ñ Ñ€Ð°Ð·Ð±Ð¾Ñ€Ð° запроÑаÐевозможно Ñоздать BIO Ð´Ð»Ñ Ð·Ð°Ð¿Ñ€Ð¾ÑаÐевозможно Ñоздать неформатированный ввод/вывод BIO Ð´Ð»Ñ Ð¿Ð¾Ð´Ð¿Ð¸Ñанного Ñертификата EECÐевозможно Ñоздать неформатированный ввод/вывод BIO Ð´Ð»Ñ Ð¿Ð¾Ð´Ð¿Ð¸Ñанной доверенноÑтиÐевозможно Ñоздать раÑширение PROXY_CERT_INFO_EXTENSIONÐе удалоÑÑŒ Ñоздать объект PolicyStoreÐевозможно Ñоздать новую переменную X509_NAME_ENTRY Ð´Ð»Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа доверенноÑтиÐевозможно Ñоздать делегируемый документ Ð´Ð»Ñ Ñлужбы делегированию: %sÐевозможно Ñоздать раÑширение Ð´Ð»Ñ PROXY_CERT_INFOÐевозможно Ñоздать раÑширение Ð´Ð»Ñ keyUsageÐевозможно Ñоздать раÑширение Ð´Ð»Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾ÑтиÐевозможно Ñоздать функцию %sÐевозможно Ñоздать функцию: FunctionId не ÑущеÑтвуетÐевозможно Ñоздать компонент Ð½Ð°Ð·Ð²Ð°Ð½Ð¸Ñ CN Ð´Ð»Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾ÑтиÐе удалоÑÑŒ Ñоздать объект SSL ContextÐе удалоÑÑŒ Ñоздать объект SSLÐевозможно определить меÑто уÑтановки. ИÑпользуетÑÑ %s. ЕÑли Ñто не ÑоответÑтвует дейÑтвительноÑти, задайте, пожалуйÑта, переменную ARC_LOCATION.Ðевозможно Ñкопировать Ñерийный номер Ð´Ð»Ñ Ñертификата доверенноÑтиÐевозможно дублировать Ð¸Ð¼Ñ Ñубъекта Ð´Ð»Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа ÑамозаверÑющей доверенноÑтиÐе удалоÑÑŒ динамичеÑки Ñоздать AlgFacrotyÐе удалоÑÑŒ динамичеÑки Ñоздать AttributeFactoryÐе удалоÑÑŒ динамичеÑки Ñоздать анализаторÐе удалоÑÑŒ динамичеÑки Ñоздать FnFactoryÐе удалоÑÑŒ динамичеÑки Ñоздать PolicyÐе удалоÑÑŒ динамичеÑки Ñоздать RequestÐевозможно найти Ñлемент Ñ Ð½ÑƒÐ¶Ð½Ñ‹Ð¼ проÑтранÑтвом имёнÐевозможно найти Ñлемент Ñ Ð½ÑƒÐ¶Ð½Ñ‹Ð¼ проÑтранÑтвом имёнÐе обнаружен ArcPDPContextКаталог Ñертификатов СРне обнаружен ни в одном из Ñтандартных меÑÑ‚: ~/.arc/certificates, ~/.globus/certificates, %s/etc/certificates, %s/etc/grid-security/certificates, %s/share/certificates, /etc/grid-security/certificates. Сертификат не будет подтверждён. ЕÑли каталог Ñертификатов СРÑущеÑтвует, пожалуйÑта, укажите вручную его раÑÐ¿Ð¾Ð»Ð¾Ð¶ÐµÐ½Ð¸Ñ Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ переменной X509_CERT_DIR, или задайте cacertificatesdirectory в файле наÑтроек клиента client.conf Ðевозможно найти XACMLPDPContextÐе найден файл Ñертификата: %sÐе удалоÑÑŒ найти открытый ключ по имени: %sÐевозможно найти Ñертификат агентÑтва, выдавшего Ñертификат Ñ Ñубъектом %s и отпечатком: %luÐе удалоÑÑŒ обнаружить файл личного ключа: %sÐе удалоÑÑŒ найти закрытый ключ по имени: %sÐšÐ¾Ð½Ñ„Ð¸Ð³ÑƒÑ€Ð°Ñ†Ð¸Ñ Ñерверов VOMS не обнаружена ни в одном из Ñтандартных раÑположений: ~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomsesÐе удалоÑÑŒ Ñоздать Ð·Ð°Ð¿Ñ€Ð¾Ñ X509Ðевозможно Ñоздать объект правил доÑтупаÐевозможно извлечь SAMLAssertion SecAttr из контекÑта ÑообщениÑÐевозможно получить раÑширенное раÑширение KeyUsage из Ñертификата агентÑтваÐевозможно извлечь политику из раÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ PROXY_CERT_INFO_EXTENSIONÐевозможно извлечь Ñзык политик из раÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ PROXY_CERT_INFO_EXTENSIONÐе удалоÑÑŒ определить тип ÑертификатаÐе удалоÑÑŒ получить делегированные параметры доÑтупа: %s от Ñлужбы делегированиÑ:%sÐевозможно извлечь закрытый ключ выдающего агентÑтваÐевозможно подгрузить объект интерпретатора ARC : %sÐевозможно подгрузить объект запроÑа ARC: %sÐевозможно подгрузить объект политикÐевозможно подгрузить объект политик: %sÐевозможно подгрузить объект запроÑаÐевозможно открыть файл Ñ Ð¾Ð¿Ð¸Ñанием задачи: %sÐе удалоÑÑŒ открыть файл личного ключа %sÐе удалоÑÑŒ определить Ð¸Ð¼Ñ ÐºÐ»Ð°ÑÑа Ð´Ð»Ñ AttributeFactory из наÑтроекÐе удалоÑÑŒ определить Ð¸Ð¼Ñ ÐºÐ»Ð°ÑÑа Ð´Ð»Ñ CombiningAlgorithmFactory из наÑтроекÐе удалоÑÑŒ определить Ð¸Ð¼Ñ ÐºÐ»Ð°ÑÑа Ð´Ð»Ñ FunctionFactory из наÑтроекÐе удалоÑÑŒ определить Ð¸Ð¼Ñ ÐºÐ»Ð°ÑÑа Ð´Ð»Ñ Policy из наÑтроекÐе удалоÑÑŒ определить Ð¸Ð¼Ñ ÐºÐ»Ð°ÑÑа Ð´Ð»Ñ Request из наÑтроекÐевозможно определить дату: %sÐевозможно определить меÑÑц: %sÐевозможно определить чаÑовой поÑÑ: %sÐевозможно определить времÑ: %sÐе удалоÑÑŒ прочеÑть закрытый ключ PEMСбой при чтении файла личного ключа PEM: не удалоÑÑŒ раÑшифроватьСбой при чтении файла личного ключа PEM: не был введён парольÐевозможно прочеÑть закрытый ключ PEM: возможно, введён неверный парольÐе удалоÑÑŒ прочитать файл Ñертификата: %sÐе удалоÑÑŒ прочитать ÑертификатÐевозможно прочеÑть Ñтроку Ñертификата/ключаÐевозможно прочеÑть информацию из файла ÑоÑтоÑÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸Ðе удалоÑÑŒ прочитать личный ключÐевозможно задать Ñлемент CN в доверенноÑтиÐевозможно задать Ð¸Ð¼Ñ Ð²Ñ‹Ð´Ð°ÑŽÑ‰ÐµÐ³Ð¾ агентÑтва в доверенноÑтиÐе удалоÑÑŒ задать закрытый ключÐевозможно задать открытый ключ доверенноÑтиÐевозможно открыть на чтение файл Ð´Ð»Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа BIOÐевозмÐ¾Ð¶Ð½Ð¾ задать Ñерийный номер в доверенноÑтиÐевозможно задать Ñрок годноÑти доверенноÑтиÐевозможно задать номер верÑии в доверенноÑтиÐевозможно Ñоздать запиÑываемый файл Ð´Ð»Ñ BIO запроÑаÐевозможно открыть на запиÑÑŒ файл Ð´Ð»Ñ Ð½ÐµÑ„Ð¾Ñ€Ð¼Ð°Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð½Ð¾Ð³Ð¾ ввода/вывода подпиÑанного Ñертификата EECÐевозможно открыть на запиÑÑŒ файл Ð´Ð»Ñ Ð½ÐµÑ„Ð¾Ñ€Ð¼Ð°Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð½Ð¾Ð³Ð¾ ввода/вывода подпиÑанной доверенноÑтиÐевозможно подпиÑать EECÐевозможно выделить памÑть Ð´Ð»Ñ Ð¿ÑƒÑ‚Ð¸ к файлу политик агентÑтваÐевозможно преобразовать раÑширение PROXYCERTINFO в кодировке DER во внутренний форматÐевозможно преобразовать Ð·Ð°Ð¿Ñ€Ð¾Ñ X509 из внутреннего формата в DERÐе удалоÑÑŒ Ñоздать контекÑÑ‚ делегированиÑÐевозможно удалить каталог %s: %sÐевозможно удалить файл %s: %sÐевозможно извлечь Ð¸Ð¼Ñ Ð¾Ð±ÑŠÐµÐºÑ‚Ð° из URL иÑточникаÐе удалоÑÑŒ обнаружить функции LCAS в библиотеке %sÐе удалоÑÑŒ обнаружить функции LCMAPS в библиотеке %sÐевозможно извлечь политику из раÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ PROXYCERTINFOÐевозможно извлечь Ñзык политики из раÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ PROXYCERTINFOÐевозможно получить первый байт Ñертификата, чтобы определить его форматÐевозможно получить первый байт Ñертификата, чтобы определить его форматÐевозможно обработать URL %sÐевозможно иÑпользовать Ð°Ð´Ñ€ÐµÑ %sÐевозможно загрузить библиотеку LCAS %s: %sÐевозможно загрузить библиотеку LCMAPS %s: %sÐе удалоÑÑŒ получить конфигурацию. Только Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð´Ð¾Ñтупна.Ðе удалоÑÑŒ получить конфигурацию. ÐžÑ‚ÐºÑ€Ñ‹Ñ‚Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð½ÐµÐ´Ð¾Ñтупна Ð´Ð»Ñ Ñтого пользователÑ.Ðе удалоÑÑŒ открыть файл наÑтроекÐе удалоÑÑŒ извлечь Ð°Ð´Ñ€ÐµÑ ÑƒÐ·Ð»Ð° и/или номер порта из ответа на Ð·Ð°Ð¿Ñ€Ð¾Ñ EPSV/PASVÐе удалоÑÑŒ прочеÑть файл наÑтроекÐевозможно прочеÑть файл наÑтроек в %sÐе удалоÑÑŒ Ñчитать Ñ Ð¸ÑточникаÐевозможно прочеÑтьÑпиÑок назначений из файла %sÐевозможно прочеÑть ÑпиÑок адреÑов из файла %sÐевозможно прочеÑть ÑпиÑок иÑточников из файла %sÐевозможно прочеÑть Ð½Ð°Ð·Ð²Ð°Ð½Ð¸Ñ Ð¿Ð¾Ð»Ð¸Ñ‚Ð¸ÐºÐе удалоÑÑŒ прочеÑть ÑоÑтоÑÐ½Ð¸Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡ Ñ %s. Возможно, A-REX не запущен?Ðевозможно определить тип файла наÑтроекÐевозможно определить тип файла наÑтроек в %sÐе удалоÑÑŒ переименовать файл %s: %sÐевозможно ÑброÑить вводÐе удалоÑÑŒ выÑтавить метки Ð¿Ð¾Ð´Ñ‚Ð²ÐµÑ€Ð¶Ð´ÐµÐ½Ð¸Ñ OpenSSLÐевозможно получить ÑÑ‚Ð°Ñ‚ÑƒÑ Ñ„Ð°Ð¹Ð»Ð°: %s: %sÐевозможно выполнить операцию stat Ð´Ð»Ñ ÐºÐ°Ð½Ð°Ð»Ð° stdio %sÐевозможно иÑпользовать URL %sÐе удалоÑÑŒ запиÑать в цельОтмена завершенаОтменÑетÑÑ DTR %s Ñ Ð¸Ñточником: %s, назначением: %sОтмена активных Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð¾ Ñинхронизации отменÑетÑÑCandyPond: ДоÑтуп закрытÐевозможно адаптировать опиÑание задачи ни к одному реÑурÑу когда отключён Ñбор информацииÐевозможно изменить владельца %s: %s Ðе удалоÑÑŒ изменить права доÑтупа к %s: %s Ðевозможно Ñравнить пуÑтую контрольную ÑуммуÐевозможно перевеÑти название Ð¼Ð¾Ð´ÑƒÐ»Ñ ARC в Ñтроку PythonÐевозможно преобразовать ExecutionTarget (%s) в объект PythonÐевозможно преобразовать JobDescription в объект PythonÐе удалоÑÑŒ преобразовать UserConfig в объект PythonÐе удалоÑÑŒ преобразовать наÑтройки в объект PythonÐе удалоÑÑŒ преобразовать inmsg в объект PythonÐевозможно перевеÑти название Ð¼Ð¾Ð´ÑƒÐ»Ñ Ð² Ñтроку PythonÐе удалоÑÑŒ преобразовать outmsg в объект PythonÐевозможно преобразовать Ñтроку %s в целочиÑленное значение в Ñтроке %sÐе удалоÑÑŒ Ñкопировать шаблон наÑтроек (%s), Ñ‚.к. Ñто неÑтандартный файлÐевозможно Ñоздать аргумент ExecutionTargetÐевозможно Ñоздать аргумент JobDescriptionÐе удалоÑÑŒ Ñоздать аргумент UserConfigÐе удалоÑÑŒ Ñоздать аргумент конÑтруктораÐе удалоÑÑŒ Ñоздать аргумент наÑтроекÐе удалоÑÑŒ Ñоздать каталоги Ð´Ð»Ñ Ð¶ÑƒÑ€Ð½Ð°Ð»ÑŒÐ½Ð¾Ð³Ð¾ файла %s. Ð¡Ð¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð±ÑƒÐ´ÑƒÑ‚ запиÑыватьÑÑ Ð² Ñтот журналÐевозможно Ñоздать каталог %s Ð´Ð»Ñ Ð¶Ñ‘Ñтких ÑÑылок задачÐе удалоÑÑŒ Ñоздать нагрузку httpÐе удалоÑÑŒ Ñоздать аргумент inmsgÐе удалоÑÑŒ реализовать клаÑÑ PythonÐе удалоÑÑŒ Ñоздать аргумент outmsgÐевозможно Ñоздать выход %s ни Ð´Ð»Ñ Ð¾Ð´Ð½Ð¾Ð¹ задачиÐевозможно Ñоздать вывод %s Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ (%s): ÐедопуÑтимый иÑточник %sÐе удалоÑÑŒ Ñоздать преобразователь из /etc/resolv.confÐевозможно извлечь Ð¸Ð¼Ñ ÑƒÐ·Ð»Ð° иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ gethostname()Ðевозможно определить hostname из gethostname() Ð´Ð»Ñ Ð°Ð²Ñ‚Ð¾Ð¼Ð°Ñ‚Ð¸Ñ‡ÐµÑкого ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ceID.Ðе удаётÑÑ Ð¾Ð¿Ñ€ÐµÐ´ÐµÐ»Ð¸Ñ‚ÑŒ тип копии Ð´Ð»Ñ %sÐе удаётÑÑ Ð¾Ð¿Ñ€ÐµÐ´ÐµÐ»Ð¸Ñ‚ÑŒ раÑположение %s: %sÐе удалоÑÑŒ найти Ñлемент в ответном Ñообщении SOAP:Ðе удалоÑÑŒ обнаружить клаÑÑ ARC ConfigÐе удалоÑÑŒ найти клаÑÑ ARC ExecutionTargetÐе удалоÑÑŒ найти клаÑÑ ARC JobDescriptionÐе удалоÑÑŒ обнаружить клаÑÑ ARC MessageÐе удалоÑÑŒ найти клаÑÑ ARC UserConfigÐе удалоÑÑŒ обнаружить доверенноÑть. Это приложение не работает без доверенноÑти. ЕÑли Ваша доверенноÑть хранитÑÑ Ð² неÑтандартном меÑте, пожалуйÑта, убедитеÑÑŒ, что в наÑтройках клиента указан правильный путь. ЕÑли же Ð’Ñ‹ пока не Ñоздали доверенноÑть, запуÑтите 'arcproxy'!Ðе удалоÑÑŒ найти Ñодержание ответного ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ SOAPÐе обнаружен клаÑÑ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»ÑŒÑкого планировщикаÐе удалоÑÑŒ найти файл по адреÑу %s, Ñодержащий доверенноÑть. ПожалуйÑта, убедитеÑÑŒ, что файл ÑущеÑтвует.Ðевозможно обнаружить информацию о меÑте Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸Ðевозможно обнаружить локальный входной файл '%s' (%s)Ðе удалоÑÑŒ найти клаÑÑ ÑервиÑаÐе удалоÑÑŒ найти каталог Ñ Ñертификатами агентÑтв CA. ПожалуйÑта, задайте переменную Ñреды X509_CERT_DIR, или значение cacertificatesdirectory в файле наÑтроек.Ðе удалоÑÑŒ найти доверенноÑть пользователÑ. ПожалуйÑта, задайте переменную Ñреды X509_USER_PROXY, или значение proxypath в файле наÑтроекÐе удалоÑÑŒ найти путь к открытому ключу пользователÑ. ПожалуйÑта, задайте переменную Ñреды X509_USER_CERT, или значение certificatepath в файле наÑтроекÐе удалоÑÑŒ найти закрытый ключ пользователÑ. ПожалуйÑта, задайте переменную Ñреды X509_USER_KEY, или значение keypath в файле наÑÑ‚Ñ€Ð¾ÐµÐºÐ˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾Ð± адреÑе Ñервера VOMS отÑутÑтвует в Ñтроке: %s"Ошибка доÑтупа к Ñловарю Ð¼Ð¾Ð´ÑƒÐ»Ñ ARCÐевозможно обнаружить Ñловарь пользовательÑкого Ð¼Ð¾Ð´ÑƒÐ»Ñ Ð¿Ð»Ð°Ð½Ð¸Ñ€Ð¾Ð²Ñ‰Ð¸ÐºÐ°ÐžÑˆÐ¸Ð±ÐºÐ° доÑтупа к Ñловарю модулÑÐевозможно обÑлужить локального Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %sÐе удалоÑÑŒ импортировать модуль ARCÐе удалоÑÑŒ импортировать модульÐе удалоÑÑŒ инициализировать доменное Ð¸Ð¼Ñ ARCHERY Ð´Ð»Ñ Ð·Ð°Ð¿Ñ€Ð¾ÑаÐевозможно Ñоздать ÑÑылку на удалённое назначение. ПрипиÑанный URL не будет иÑпользованÐевозможно Ñоздать ÑÑылку на иÑточник, который может изменитьÑÑ; будет Ñделана копиÑÐе удалоÑÑŒ открыть журнальный файл BLAH '%s'Ðе удалоÑÑŒ открыть журнальный файл кÑша %s: %s. Ð¡Ð¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð¾Ð± очиÑтке кÑша будут запиÑыватьÑÑ Ð² Ñтот журналÐевозможно вывеÑти предÑтавление XRSL: атрибут Resources.SlotRequirement.NumberOfSlots должен быть задан, еÑли задан атрибут Resources.SlotRequirement.SlotsPerHost .Ðе удаётÑÑ Ñ€Ð°Ð·Ð¾Ð±Ñ€Ð°Ñ‚ÑŒ целое значение '%s' Ð´Ð»Ñ -%cÐе удалоÑÑŒ разобрать выражение %s Ð´Ð»Ñ Ð¸Ñточника паролÑ: формат должен быть type=sourceÐе удалоÑÑŒ разобрать тип Ð¿Ð°Ñ€Ð¾Ð»Ñ %s. Ð’ наÑтоÑщий момент поддерживаютÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ 'key','myproxy','myproxynew' и 'all'.Ðевозможно интерпретировать Ñхему!Ðе удалоÑÑŒ разобрать TXT-запиÑи конечных точек Ñлужбы.Ðевозможно обработать файл доверенноÑти в %s.Ðе удалоÑÑŒ запроÑить TXT-запиÑи конечных точек Ñлужбы из DNSÐе удаётÑÑ Ð¿Ñ€Ð¾Ñ‡ÐµÑть указанный файл, Ñодержащий Ñрлыки задач: %sÐевозможно удалить файл доверенноÑти в %sÐевозможно удалить файл доверенноÑти в %s, потому что его там нетÐевозможно перемеÑтить в корневую директорию или из неёÐевозможно переименовать в идентичный URLÐевозможно определить ÑÑ‚Ð°Ñ‚ÑƒÑ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð¾Ð³Ð¾ входного файла '%s'Ðевозможно перейти к группе (%s)Ðевозможно переключить на оÑновную группу Ð´Ð»Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ (%s)Ðевозможно перейти к пользователю (%s)Ðевозможно обновить AAR. Ðе удалоÑÑŒ обнаружить зарегиÑтрированную запиÑÑŒ AAR Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s в учётной базе данных.Ðевозможно иÑпользовать заÑвленную опцию --sizeÐевозможно запиÑать Ñрлыки задач в файл (%s)Ðевозможно запиÑать Ñрлык задачи (%s) в файл (%s)ВозможноÑти:ПодцеплÑетÑÑ %s Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %sПричина ÑÐ±Ð¾Ñ Ð½Ðµ уÑтановлена - выбираетÑÑ ÑÐ»ÑƒÑ‡Ð°Ð¹Ð½Ð°Ñ ÐºÐ¾Ð¿Ð¸ÑТип Ñертификата: %dСрок дейÑÑ‚Ð²Ð¸Ñ Ñертификата %s уже иÑтёкСрок дейÑÑ‚Ð²Ð¸Ñ Ñертификата %s иÑтечёт через %sСертификат и ключ ('%s' и '%s') не обнаружены ни в одном из раÑположений: %sУ Ñертификата нет ÑчейкиСертификат в формате DERСертификат в формате PEMСертификат в формате PKCSФормат Ñертификата неизвеÑтенСертификат Ñодержит неизвеÑтное раÑширение Ñ Ñ‡Ð¸Ñленным идентификатором %u и именем Ñубъекта %sСбой Ñбора информации о ÑÐµÑ€Ñ‚Ð¸Ñ„Ð¸ÐºÐ°Ñ‚ÐµÐ˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ Ñертификате:Сертификат выдан: %sÐедопуÑтимый Ð·Ð°Ð¿Ñ€Ð¾Ñ ÑертификатаИÑпользуемый Ñертификат: %sОшибка проверки Ñертификата: %sСертификат не подтверждёнУÑпешное подтверждение ÑертификатаСертификат Ñ Ñерийным номером %s и Ñубъектом "%s" отозванСрок дейÑÑ‚Ð²Ð¸Ñ Ñертификата Ñ Ñубъектом %s иÑтёкПуть к Ñертификату/доверенноÑти не заданСертификат: %sÐомер цепочки Ñертификатов %dÐе удалоÑÑŒ наÑтроить цепочку/иПроверка: поиÑк метаданных: %sПроверка: получена задержка доÑтупа: Ð´Ð»Ð¸Ð½Ð½Ð°Ñ (NEARLINE)Проверка: получена задержка доÑтупа: ÐºÐ¾Ñ€Ð¾Ñ‚ÐºÐ°Ñ (ONLINE)Проверка: получена ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма: %sПроверка: получено Ð²Ñ€ÐµÐ¼Ñ Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ: %sCheck: получено Ð²Ñ€ÐµÐ¼Ñ Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ %sПроверка: получен размер %lluПроверка: получен размер: %lliПроверка %sПроверÑетÑÑ URL выданный SRM: %sКÑш проверÑетÑÑ ÑноваПроверка прав доÑтупа к кÑшу: DN: %sПроверка прав доÑтупа к кÑшу: ВО: %sChecking cache permissions: атрибуты VOMS: %sПроверка ÑущеÑÑ‚Ð²Ð¾Ð²Ð°Ð½Ð¸Ñ %sПроверка отложенных точек входа на предмет повторного опроÑа.ПроверÑетÑÑ ÐºÐ¾Ð¿Ð¸Ñ %sПроверка Ð½Ð°Ð»Ð¸Ñ‡Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð°-иÑÑ‚Ð¾Ñ‡Ð½Ð¸ÐºÐ°ÐšÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма %sÐеÑовпадение контрольной ÑуммÐеÑовпадение вычиÑленной контрольной Ñуммы %s и контрольной Ñуммы иÑточника %sÐеÑовпадение между вычиÑленной контрольной Ñуммой %s и контрольной Ñуммой, выданной Ñервером %sÐеÑовпадение вычиÑленной контрольной Ñуммы %s и контрольной Ñуммы иÑточника %sÐеÑовпадение между вычиÑленной/указанной контрольной Ñуммой %s и контрольной Ñуммой, заÑвленной точкой Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ SRM %sÐеÑовпадение контрольной Ñуммы, указанной в метаданных (%s:%s), Ñ Ð²Ñ‹Ñ‡Ð¸Ñленной (%s)ÐšÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма не вычиÑленаТипы контрольной Ñуммы в SRM (%s) и вычиÑленной/указанной контрольной Ñуммы (%s) различаютÑÑ, Ñравнение невозможноТип контрольной Ñуммы иÑточника отличаетÑÑ Ð¾Ñ‚ вычиÑленной, Ñравнение невозможноТип контрольной Ñуммы на Ñервере отличаетÑÑ Ð¾Ñ‚ запрошенного, Ñравнение невозможноМониторинг дочерних процеÑÑов: процеÑÑ %d завершилÑÑМониторинг дочерних процеÑÑов: игнорируетÑÑ Ð·Ð°Ð±Ñ€Ð¾ÑˆÐµÐ½Ð½Ñ‹Ð¹ процеÑÑ %d (%d)Мониторинг дочерних процеÑÑов: ошибка: %iМониторинг дочерних процеÑÑов: внутренний Ñбой взаимодейÑтвиÑМониторинг дочерних процеÑÑов: обнаружен запуÑкМониторинг дочерних процеÑÑов: потерÑн процеÑÑ %d (%d)Мониторинг дочерних процеÑÑов: обнаружен ÑигналМониторинг дочерних процеÑÑов: stderr закрытМониторинг дочерних процеÑÑов: stdin закрытМониторинг дочерних процеÑÑов: stdout закрытДочерний процеÑÑ ÑƒÐ¶Ðµ запущенÐазвание клаÑÑа: %sОчиÑтка поÑле ÑбоÑ: уничтожаетÑÑ %sОтÑутÑтует точка входа в клиентÑкую цепьОтÑутÑтвует точка входа в клиентÑкую цепьПодгружены клиентÑкие компоненты цепи ÑообщенийУÑпешное прекращениеПрекращение ÑвÑзиЗакрываетÑÑ Ñоединение Ñ ÑƒÑ‡Ñ‘Ñ‚Ð½Ð¾Ð¹ базой данных SQLiteВозможно, был Ñбой про закрытииЗакрываетÑÑ ÐºÐ°Ð½Ð°Ð» чтениÑЗакрываетÑÑ ÐºÐ°Ð½Ð°Ð» Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð¸ÐŸÐ¾Ð»ÑƒÑ‡ÐµÐ½Ð½Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°: %sСобираетÑÑ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ задаче (задачи на A-REX REST)ПоÑылаетÑÑ Ð¸Ð½ÑтрукциÑКоманда: %sКомпонента %s(%s) не может быть ÑÐ¾Ð·Ð´Ð°Ð½Ð°Ð”Ð»Ñ ÐºÐ¾Ð¼Ð¿Ð¾Ð½ÐµÐ½Ñ‚Ð° не задан атрибут IDÐ”Ð»Ñ ÐºÐ¾Ð¼Ð¿Ð¾Ð½ÐµÐ½Ñ‚Ð° не задан атрибут nameÐ”Ð»Ñ ÐºÐ¾Ð¼Ð¿Ð¾Ð½ÐµÐ½Ñ‚Ð° %s(%s) отÑутÑтвует атрибут ID Ñледующей целиВычиÑÐ»Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ñ‚Ð¾Ñ‡ÐºÐ° входа %s (тип %s) добавлена в ÑпиÑок Ð´Ð»Ñ Ð¿Ð»Ð°Ð½Ð¸Ñ€Ð¾Ð²ÐºÐ¸ заÑылкиВычиÑлительный ÑервиÑ:ВычиÑлительный ÑервиÑ: %sЦель ComputingShare (%s) Ñвно отклоненаÐе определён параметр ComputingShareName атрибута ExecutionTarget (%s)КлаÑÑ Config не ÑвлÑетÑÑ Ð¾Ð±ÑŠÐµÐºÑ‚Ð¾Ð¼ÐаÑтройки (%s) подгруженыОшибка наÑтройкиСоздан шаблонный файл наÑтроек (%s)Ðевозможно прочеÑть файл наÑтроекФайл наÑтроек иÑпорчен - название блока не заканчиваетÑÑ ]: %sФайл наÑтроек иÑпорчен - Ñлишком короткое название блока: %sÐе указан файл наÑтроекÐе указан файл наÑтроек в ConfigBlockИÑпользуемый файл конфигурацииКорневой Ñлемент наÑтроек не ÑвлÑетÑÑ Ð¡Ð¾ÐµÐ´Ð¸Ð½ÑемÑÑ Ñо Ñлужбой доÑтавки на %sСоединение Ñ %s: %sУÑтанавливаетÑÑ ÑвÑзь Ñ Ñервером VOMS (по имени %s): %s по порту: %sСодержимое: %sПреобразование не удалоÑÑŒ: %sСбой копированиÑ: %sÐевозможно уÑтановить блокировку на мета-файл %sÐе удалоÑÑŒ ÑоединитьÑÑ Ñо Ñлужбой %s: %sÐе удалоÑÑŒ преобразовать входную информацию!Ðевозможно преобразовать нагрузку!Ðе удалоÑÑŒ преобразовать значение атрибута slcs (%s) в файле наÑтроек в URL (%s)Ðе удалоÑÑŒ Ñоздать PayloadSOAP!Ðевозможно Ñоздать ÑÑылку на файл блокировки %s, потому что она уже ÑущеÑтвуетÐевозможно Ñоздать файл блокировки %s, потому что он уже ÑущеÑтвуетÐе удалоÑÑŒ Ñоздать временный файл "%s"Ðевозможно определить тип файла наÑтроек, или же он пуÑÑ‚Ðе удалоÑÑŒ определить каталог ÑеÑÑии из имени файла %sÐе удалоÑÑŒ определить верÑию ÑервераÐе удалоÑÑŒ обнаружить подходÑщую Ñлужбу доÑтавки, вынужденно иÑпользуетÑÑ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð°Ñ Ð¿ÐµÑ€ÐµÑылкаÐе удалоÑÑŒ найти подгружаемый модуль %s (%s)Ðевозможно найти подгружаемые модули по имени %s и %s (%s)Ðе удалоÑÑŒ найти деÑкриптор подгружаемого Ð¼Ð¾Ð´ÑƒÐ»Ñ Ð¿Ð¾ имени %sÐе удалоÑÑŒ найти деÑкрипторы подгружаемых модулей по имени %s или типу %sÐе удалоÑÑŒ получить контрольную Ñумму %s: %sÐевозможно обработать контрольную Ñумму %s: пропуÑкаетÑÑ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ° контрольной ÑуммыÐевозможно обработать точку входа %sÐе удалоÑÑŒ подгрузить наÑтройки (%s)Ðевозможно найти модуль %s в Ñледующих меÑтах:Ðевозможно Ñоздать новый Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¿ÐµÑ€ÐµÑылки: %s: %sÐе удалоÑÑŒ получить информацию об иÑточнике: %sÐевозможно открыть файл %s Ð´Ð»Ñ Ñ‡Ñ‚ÐµÐ½Ð¸Ñ: %sÐе удалоÑÑŒ прочитать наÑтройки Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ… в %sÐе удалоÑÑŒ определить ÑоÑтоÑние файла %s: %sÐе удалоÑÑŒ подтвердить доÑтоверноÑть ÑообщениÑ!Ðе удалоÑÑŒ иÑпользовать Ñертификат: %sÐевозможно разобрать Ñталонный XML: %sÐевозможно подтвердить доÑтупноÑть ÑпиÑков отзыва Ñертификатов (CRL)Страна: %sСоздан ключ RSA, теперь обрабатываетÑÑ Ð·Ð°Ð¿Ñ€Ð¾ÑСоздание клиента SOAP Ð´Ð»Ñ Ð´ÐµÐ»ÐµÐ³Ð°Ñ†Ð¸Ð¸Ð¡Ð¾Ð·Ð´Ð°Ñ‘Ñ‚ÑÑ ÐºÐ»Ð¸ÐµÐ½Ñ‚ HTTPСоздаётÑÑ ÐºÐ»Ð¸ÐµÐ½Ñ‚ pdpserviceСоздаётÑÑ ÐºÐ»Ð¸ÐµÐ½Ñ‚ SOAPСоздание и заÑылка запроÑаСоздаётÑÑ Ð±ÑƒÑ„ÐµÑ€: %lli x %iСоздаётÑÑ Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ ÐºÐ»Ð¸ÐµÐ½Ñ‚Ð°Ð¡Ð¾Ð·Ð´Ð°Ð½Ð¸Ðµ цепи на Ñтороне клиентаСоздание делегируемых параметров доÑтупа Ð´Ð»Ñ Ñлужбы Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ ARCСбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð´Ð»Ñ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ CREAMСоздание Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð´Ð»Ñ Ñлужбы Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ CREAMСбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð´Ð»Ñ Ñлужбы Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ CREAMСоздаетÑÑ Ð´Ð¸Ñ€ÐµÐºÑ‚Ð¾Ñ€Ð¸Ñ %sСоздаетÑÑ Ð´Ð¸Ñ€ÐµÐºÑ‚Ð¾Ñ€Ð¸Ñ %sСоздание цепи на Ñтороне ÑервиÑаСрок дейÑÑ‚Ð²Ð¸Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð¾Ð² доÑтупа иÑтекает в %sПрерывание при обработке параметров доÑтупа: %sПараметры доÑтупа не инициализированыПараметры доÑтупа Ñохранены во временном файле %sСбой обработки критичеÑкого атрибута VOMSТекущие задачи в ÑиÑтеме (от PREPARING до FINISHING) на DN (%i запиÑей)Ð¢ÐµÐºÑƒÑ‰Ð°Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð° ÐЕ СОСТОЯЛÐСЬ: %sÐ¢ÐµÐºÑƒÑ‰Ð°Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð° завершенаИнÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ DCAU не прошлаИнÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ DCAU не прошла: %sПрименены параметры DHDN %s не Ñовпадает Ñ %sВыделенное Ð¸Ð¼Ñ %s Ð´Ð»Ñ URL %s кÑшировано, и дейÑтвительно до %sВыделенное Ð¸Ð¼Ñ %s Ð´Ð»Ñ URL %s кÑшировано, но уже проÑроченоDN: %sÐ—Ð°Ð¿Ñ€Ð¾Ñ DTR %s Ð¾Ñ‚Ð¼ÐµÐ½Ñ‘Ð½Ð—Ð°Ð¿Ñ€Ð¾Ñ DTR %s не может быть прерванСбой запроÑа DTR %s: %sÐ—Ð°Ð¿Ñ€Ð¾Ñ DTR %s уÑпешно завершёнDTR %s завершилÑÑ Ð² ÑоÑтоÑнии %sDTR %s запроÑил прерывание, но активные передачи отÑутÑÑ‚Ð²ÑƒÑŽÑ‚Ð—Ð°Ð¿Ñ€Ð¾Ñ DTR %s ещё в процеÑÑе (передано %lluB)Ð—Ð°Ð¿Ñ€Ð¾Ñ DTR %s уже был прерванDTRGenerator обработал: %d отменённых задач, %d DTR, %d новых задачDTRGenerator ожидает обработки: %d отменённых задач, %d DTR, %d новых задачDTR готов к переÑылке, переводитÑÑ Ð² очередь на доÑтавкуDTRGenerator получил Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾Ñ‚Ð¼ÐµÐ½Ð¸Ñ‚ÑŒ ноль задачDTRGenerator запрошен о нуле задачDTRGenerator получил Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€Ð¸Ñ‚ÑŒ файлы Ð½ÑƒÐ»Ñ Ð·Ð°Ð´Ð°Ñ‡DTRGenerator не запущен!DTRGenerator опрошен о нуле задачDTRGenerator получил Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾Ñ‡Ð¸Ñтить ÑÑылки Ð½ÑƒÐ»Ñ Ð·Ð°Ð´Ð°Ñ‡DTRGenerator получил Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° обработку Ð½ÑƒÐ»Ñ Ð·Ð°Ð´Ð°Ñ‡DTRGenerator получил Ð·Ð°Ð¿Ñ€Ð¾Ñ ÑƒÐ´Ð°Ð»Ð¸Ñ‚ÑŒ ноль задачDTRGenerator получил ноль задачЗапроÑÑ‹ DTR Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s вÑÑ‘ ещё иÑполнÑÑŽÑ‚ÑÑÐе удалоÑÑŒ Ñоздать дочерний демон: %sКанал передачи данных: %d.%d.%d.%d:%dКанал передачи данных: [%s]:%dПрерван цикл Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ…ÐŸÐµÑ€ÐµÐ´Ð°Ñ‡Ð° данных прерванаПередача данных прервана: %sДанные уже запиÑаны в кÑщПоÑледние запиÑи журнала DataDelivery: %sDataDelivery: %sDataMove::Transfer: ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма Ð´Ð»Ñ %s не будет вычиÑленаDataMove::Transfer: иÑпользуетÑÑ Ð·Ð°Ð´Ð°Ð½Ð½Ð°Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма %sDataMove::Transfer: иÑпользуетÑÑ Ð·Ð°Ð´Ð°Ð½Ð½Ð°Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма %s:%sDataMove::Transfer: будет вычиÑлена ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма Ð´Ð»Ñ %sDataMover: циклDataMover: закончилиÑÑŒ попытки поиÑка назначений - завершениеDataMover: не запрошено повторных попыток, выходDataMover: закончилиÑÑŒ попытки поиÑка иÑточника - завершениеDataMover::Transfer : запуÑк нового потокаDataMover::Transfer: попытка Ñтереть/перезапиÑать назначение: %sDataPointGFAL::write_file получил на входе Ð°Ð´Ñ€ÐµÑ %d и Ñдвиг %d, проводитÑÑ Ð¿Ð¾Ð¸ÑкDataPointXrootd::write_file получил Ð°Ð´Ñ€ÐµÑ %d и Ñдвиг %d, проводитÑÑ Ð¿Ð¾Ð¸ÑкПроцеÑÑ DataStagingDelivery завершилÑÑ Ñ ÐºÐ¾Ð´Ð¾Ð¼ %iДлительноÑть по умолчанию (процеÑÑорнаÑ): %sКонÑтруктор по умолчанию клиента INTERNALХранилище по умолчанию: %sПланировщик по умолчанию (%s) недоÑтупен. При иÑпользовании %s планировщик должен быть указан Ñвным образом (Ð¾Ð¿Ñ†Ð¸Ñ -b).ДлительноÑть по умолчанию (по чаÑам): %sСбой в DelegateCredentialsInitСбой в DelegateProxyДелегированные параметры доÑтупа от Ñлужбы делегации: %sОтличительные признаки делегированных параметров доÑтупа: %sID делегированиÑ: %sÐÐ²Ñ‚Ð¾Ñ€Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð½Ð° делегирование не выданаÐÐ²Ñ‚Ð¾Ñ€Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ðµ пройденаСбой запроÑа Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ getProxyReqОбработчик Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð²Ð°Ð½Ð¸Ñ Ð½Ðµ наÑтроенЗавершена обработка Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñ Ñ€Ð¾Ð»ÑŒÑŽ делегатаЗапущен обработчик Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñ Ñ€Ð¾Ð»ÑŒÑŽ делегатаЗапущен обработчик Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñ Ñ€Ð¾Ð»ÑŒÑŽ поручителÑСбой запроÑа Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ putProxyÐÐµÐ¿Ð¾Ð´Ð´ÐµÑ€Ð¶Ð¸Ð²Ð°ÐµÐ¼Ð°Ñ Ñ€Ð¾Ð»ÑŒ делегированиÑ: %sСлужба делегированиÑ: %sСбой Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñлужбе Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ ARCСбой Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñлужбе Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ GridsiteÐеподдерживаемый тип делегированиÑ: %sDelegationStore: Ñбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð¿Ñ€Ð¾Ñ†ÐµÑÑом PeriodicCheckConsumers уÑтаревшего Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ %s - %sDelegationStore: Ñбой Ð²Ð¾Ð·Ð¾Ð±Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ Ð¸Ñ‚ÐµÑ€Ð°Ñ‚Ð¾Ñ€Ð° процеÑÑом PeriodicCheckConsumersDelegationStore: TouchConsumer не Ñмог Ñоздать файл %sОшибка удалениÑУдалён, но оÑталиÑÑŒ копии в %sСлужба доÑтавки получила новый Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR %s Ñ Ð¸Ñточником %s и назначением %sСлужба доÑтавки в %s может копировать из %sСлужба доÑтавки в %s может копировать в %sОтÑутÑтвует URL назначениÑÐеподдерживаемый URL назначениÑ: %sÐедейÑтвительный URL назначениÑ: %sФайл Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð·Ð°Ð¿Ð¸Ñан в кÑшÐедопуÑтимый URL целиÐазначение не ÑвлÑетÑÑ ÑƒÐºÐ°Ð·Ð°Ñ‚ÐµÐ»ÐµÐ¼, пропуÑкаетÑÑ Ñ€ÐµÐ³Ð¸ÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ ÐºÐ¾Ð¿Ð¸Ð¸Ðазначение неготово, ÑÐ»ÐµÐ´ÑƒÑŽÑ‰Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ° через %u ÑекÐазначение: %sКаталог %s допуÑкаетÑÑ Ð´Ð»Ñ Ñлужбы %sУÑпешно удалён каталог %sСоздан каталог %s Ð´Ð»Ñ Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ ÑƒÑ‡Ñ‘Ñ‚Ð½Ð¾Ð¹ базы данных.Ðе удалоÑÑŒ вывеÑти ÑпиÑок каталогаКаталог доверÑемых агентÑтв не указан/найден; в качеÑтве такового иÑпользуетÑÑ Ñ‚ÐµÐºÑƒÑ‰Ð¸Ð¹ путьРазмер директории превышает %i файлов, придётÑÑ Ð´ÐµÐ»Ð°Ñ‚ÑŒ неÑколько запроÑовРазмер директории Ñлишком велик Ð´Ð»Ñ Ñ€Ð°Ñпечатки в одном запроÑе, придётÑÑ Ð´ÐµÐ»Ð°Ñ‚ÑŒ неÑколько запроÑовКаталог: %sСортировка Ñ Ð¸Ñпользованием пользовательÑкого python-Ñкрипта планировщикаÐет поддержки предварительного бронированиÑÐе поддерживает групповую заÑылкуУпреждение не поддерживаетÑÑЗагружаетÑÑ Ð·Ð°Ð´Ð°Ñ‡Ð°: %sКонец проÑтоÑ: %sÐачало проÑтоÑ: %sРаÑпечатка опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ оборвана: Ðевозможно подгрузить планировщик %sÐ’ LFC обнаружена Ð¸Ð´ÐµÐ½Ñ‚Ð¸Ñ‡Ð½Ð°Ñ ÐºÐ¾Ð¿Ð¸Ñ: %sEACCES Ошибка Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° блокировки %s: %sПрименены параметры ECDHСбой EPSVСбой EPSV: %sОшибка: %sОШИБКÐ: не удалоÑÑŒ получить информациюОШИБКÐ: Ðе удалоÑÑŒ получить информацию через Ñледующие точки входа:ОШИБКÐ: Сбой запиÑи информации о задаче в файл (%s)ОШИБКÐ: Обрыв заÑылки задачи, так как ни один из реÑурÑов не предоÑтавил информациюОШИБКÐ: Одна или неÑколько задач не были запущены.ОШИБКÐ: не удалоÑÑŒ подгрузить планировщик %sERROR: файл наÑтроек VOMS %s Ñодержит Ñлишком длинную Ñтроку. МакÑимально допуÑÑ‚Ð¸Ð¼Ð°Ñ Ð´Ð»Ð¸Ð½Ð°: %i знаков.ERROR: файл наÑтроек VOMS %s Ñодержит Ñлишком много Ñтрок. МакÑимально допуÑтимое количеÑтво: %i.ERROR: Ñтрока наÑтройки VOMS Ñодержит избыточное чиÑло Ñлементов. ОжидаетÑÑ 5 или 6. Строка: %sERROR: Ñбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° %s при Ñканировании наÑтроек VOMS.ERROR: каталог Ñодержит Ñлишком много уровней Ð´Ð»Ñ ÑÐºÐ°Ð½Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð½Ð°Ñтроек VOMS. МакÑимально допуÑтимое чиÑло уровней: %i.Вызван 'Process' EchoService (Python)Вызван Python-конÑтруктор EchoServiceВызван Python-деÑтруктор EchoServiceEchoService (python) получил: %s EchoService (Python) Ñодержит приÑтавку %(prefix)s и ÑÑƒÑ„Ñ„Ð¸ÐºÑ %(suffix)sEchoService (python) request_namespace: %sЗапуÑк теÑта потоков Ñлужбы EchoService (python)ЗапуÑк теÑта потоков Ñлужбы EchoService (python), Ð¸Ñ‚ÐµÑ€Ð°Ñ†Ð¸Ñ %(iteration)s %(status)sЭлемент "%s" в профиле игнорируетÑÑ: значение атрибута "inidefaultvalue" не может быть задано, когда не заданы Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð¾Ð² "inisections" и "initag".Элемент "%s" в профиле игнорируетÑÑ: значение атрибута "inisections" не может быть пуÑтой Ñтрокой.Элемент "%s" в профиле игнорируетÑÑ: значение атрибута "initag" не может быть пуÑтой Ñтрокой.Элемент "%s" в профиле игнорируетÑÑ: значение атрибута "initype" не может быть пуÑтой Ñтрокой.FileCache возвратил пуÑтое Ð¸Ð¼Ñ Ñ„Ð°Ð¹Ð»Ð°ÐŸÑƒÑÑ‚Ð°Ñ Ð½Ð°Ð³Ñ€ÑƒÐ·ÐºÐ° на входе!ПуÑтое иÑходное опиÑание задачиПуÑÑ‚Ð°Ñ Ð½Ð°Ð³Ñ€ÑƒÐ·ÐºÐ°!ПуÑÑ‚Ð°Ñ ÑтрокаÐе найдено окончание комментариÑÐе обнаружено конца Ñтроки в двойных кавычкахÐе обнаружено конца Ñтроки в одиночных кавычкахÐе обнаружено конца Ñтроки, выделенной пользовательÑким ограничителем (%s)Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ точке входа:Ошибка доÑтупа к кÑшированному файлу %s: %sСбой при добавлении интерфейÑа ÑвÑзи в %s. Возможно, уже запущен другой процеÑÑ A-REX.Сбой при добавлении интерфейÑа ÑвÑзи в %s. Возможно, отÑутÑтвует доÑтуп к директории.Ошибка при Ñоздании кÑшаОшибка про Ñоздании кÑша. Возможно, оÑталиÑÑŒ Ñтарые блокировки.Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð° %s: %sОшибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° блокировки %s: %sОшибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð½ÐµÐ¾Ð±Ñ…Ð¾Ð´Ð¸Ð¼Ñ‹Ñ… каталогов Ð´Ð»Ñ %sОшибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ‚Ñ€ÐµÐ±ÑƒÐµÐ¼Ñ‹Ñ… каталогов: %sОшибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð²Ñ€ÐµÐ¼ÐµÐ½Ð½Ð¾Ð³Ð¾ файла %s: %sОбнаружена ошибка при разборе Ñертификата атрибутаОшибка в ÑвÑзи Ñ Ð¸Ñтечением Ñрока годноÑти предоÑтавленных параметров доÑтупаОшибка при проверке файла. Ðевозможно выполнить операцию stat Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° %s: %sОшибка при Ñверке: размер локального файла %llu не ÑоответÑтвует размеру файла-иÑточника %llu Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° %sОшибка проверки профилÑОшибка SQLite: %sОшибка SQLite: %s: %sОшибка Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸ от statvfs Ð´Ð»Ñ Ð¿ÑƒÑ‚Ð¸ %s: %sÐе удалоÑÑŒ получить ÑпиÑок файлов (в list)Ошибка при обработке кÑша, попытаемÑÑ Ð±ÐµÐ· кÑшированиÑОшибка кÑшированиÑОшибка в файле блокировки %s, неÑÐ¼Ð¾Ñ‚Ñ€Ñ Ð½Ð° то, что Ñоздание ÑÑылки прошло без ÑбоевОшибка при инициализации хранилища X509Сбой при Ñоздании базы данных Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð² %s. Возможно, отÑутÑтвует доÑтуп к директории. Возвращена ошибка %s.Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÑÑылки на файл из кÑша из %s.Ðе удалоÑÑŒ ÑвÑзать временный файл %s Ñ Ñ„Ð°Ð¹Ð»Ð¾Ð¼ блокировки %s: %sОшибка перечиÑÐ»ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° блокировки %s: %sОшибка загрузки Ñгенерированных наÑтроекОшибка поиÑка атрибутов мета-файла кÑша %s: %sОшибка поиÑка маркёров проÑтранÑтва памÑти, ÑоответÑтвующих опиÑанию %sÐомер ошибки в контекÑте хранилища: %iОшибка Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð¸Ñ Ð±Ð°Ð·Ñ‹ данных учёта задачОшибка при открытии файла блокировки %s при предварительной проверке: %sОшибка Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð¸Ñ Ð¼ÐµÑ‚Ð°-файла %sОшибка Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð¸Ñ Ð¼ÐµÑ‚Ð°-файла Ð´Ð»Ñ Ð·Ð°Ð¿Ð¸Ñи %sОшибка разбора переопределённого ÑиÑтемой атрибута executables.Ошибка ÑвÑзи Ñо Ñлужбой доÑтавки на %s: %s: %sОшибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸ из файла %s:%sОшибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° блокировки %s: %sОшибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¼ÐµÑ‚Ð°-файла %s: %sОшибка региÑтрации копии, переход к завершению размещениÑОшибка ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ ÐºÑшированного файла %s: %sОшибка Ñмены uidОшибка ÑброÑа иÑходÑщей нагрузкиОшибка при извлечении открытого ключа из запроÑаОшибка при загрузке файла наÑтроек раÑширений: %sОшибка при загрузке файла наÑтроек раÑширений: %s в Ñтроке: %dОшибка при чтении каталога %s: %sОшибка при наÑтройке кÑшаОшибка при наÑтройке кÑша: %sОшибка формата в файле блокировки %sОшибка в файле такта: %sОшибка обÑÐ»ÑƒÐ¶Ð¸Ð²Ð°Ð½Ð¸Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð¿Ð¾Ñле переÑылки: %sОшибка в файле иÑточника, пробуем другую копиюОшибка запиÑи иÑходного ÑертификатаОшибка запиÑи файла информации SRM %sОшибка запиÑи в файл блокировки %s: %sОшибка: невозможно открыть файл политик: %sОшибка: меÑтонахождение политик: %s не ÑвлÑетÑÑ Ñтандартным файломОпиÑание ошибкиОценка уÑреднённого времени ожиданиÑ: %sОценка худшего времени ожиданиÑ: %sОбработчик не поддерживает подгружаемые алгоритмы комбинированиÑОбработчик не поддерживает указанный алгоритм ÐºÐ¾Ð¼Ð±Ð¸Ð½Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ - %sОбработчик Ð´Ð»Ñ ArcPDP не был загруженОбработчик Ð´Ð»Ñ GACLPDP не был загруженОбработчик Ð´Ð»Ñ XACMLPDP не был загруженШаблон наÑтроек (%s) не Ñоздан.Сбой при попытке запуÑка внешнего процеÑÑа: %sПри проверке прав доÑтупа к файлу получены избыточные данныеОтбраÑываетÑÑ ÐºÐ¾Ð¿Ð¸Ñ %s ÑоответÑÑ‚Ð²ÑƒÑŽÑ‰Ð°Ñ ÑˆÐ°Ð±Ð»Ð¾Ð½Ñƒ !%sИÑполнÑющий реÑÑƒÑ€Ñ Ð²Ñ‹Ñ‡Ð¸Ñлительного ÑервиÑа: %sСреда иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð½Ðµ поддерживает входÑщие ÑоединениÑСреда иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð½Ðµ поддерживает иÑходÑщие ÑоединениÑÐ Ð°Ð±Ð¾Ñ‡Ð°Ñ Ñреда - Ñ€ÐµÐ°Ð»ÑŒÐ½Ð°Ñ Ð¼Ð°ÑˆÐ¸Ð½Ð°Ð Ð°Ð±Ð¾Ñ‡Ð°Ñ Ñреда - Ð²Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð°Ñ Ð¼Ð°ÑˆÐ¸Ð½Ð°Ð¡Ñ€ÐµÐ´Ð° иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð¿Ð¾Ð´Ð´ÐµÑ€Ð¶Ð¸Ð²Ð°ÐµÑ‚ входÑщие ÑоединениÑСреда иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð¿Ð¾Ð´Ð´ÐµÑ€Ð¶Ð¸Ð²Ð°ÐµÑ‚ иÑходÑщие ÑоединениÑКлаÑÑ ExecutionTarget не ÑвлÑетÑÑ Ð¾Ð±ÑŠÐµÐºÑ‚Ð¾Ð¼ÐžÑтанавливаетÑÑ Ð¿Ð¾Ñ‚Ð¾Ðº GeneratorОÑтанавливаетÑÑ Ð¿Ð¾Ñ‚Ð¾Ðº обработки задачиОдним из аргументов должна быть командаЗадайте команду и URLОдним из аргументов должно быть название Command moduleОдним из аргументов должен быть путь к Command moduleОжидаетÑÑ ÑƒÐºÐ°Ð·Ð°Ð½Ð¸Ðµ модулÑ, команды и URLОдним из аргументов должен быть URLВнешний Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° обÑлуживание %sВыделен пÑевдоним %s Ð´Ð»Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð¾Ð² доÑтупа, иÑпользуемых в RUCIO_ACCOUNTFATAL, ERROR, WARNING, INFO, VERBOSE или DEBUGÐе удалоÑÑŒ приÑвоить раÑширение hostnameÐе удалоÑÑŒ зарезервировать памÑть под ÑÑылкуОшибка проверки подлинноÑтиОшибка проверки подлинноÑти: %sСбой проверки базы данных (%s)Сбой проверки копии иÑточникаСбой проверки копии иÑточника %s: %sСбой проверки копии иÑточника: %sОшибка очиÑтки цели %sÐе удалоÑÑŒ загрузить наÑтройкиÐе удалоÑÑŒ загрузить наÑтройки.Сбой уÑÑ‚Ð°Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ ÑвÑзи Ñ Ñервером %s:%dÐе удалоÑÑŒ уничтожить ÑÑылку: %s. Ðевозможно ÑправитьÑÑ Ñ Ñ‚Ð°ÐºÐ¸Ð¼ положением.Ошибка загрузки %s в %sОшибка загрузки %s в %s, файл-приёмник уже ÑущеÑтвуетОшибка загрузки %s в %s, Ñбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ ÑущеÑтвующего файла-приёмникаСбой в globus_cond_initСбой в globus_ftp_control_handle_initСбой в globus_mutex_initСбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÑÑылки на файл из кÑша из %sСбой Ð¾Ð±Ð½Ð°Ñ€ÑƒÐ¶ÐµÐ½Ð¸Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð¾Ð² доÑтупаОшибка поиÑка атрибутов кÑшированного файла: %sÐе удалоÑÑŒ подготовить опиÑание задачиСбой работы команды ÑоответÑÑ‚Ð²Ð¸Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ: %s %sСбой при чтении управлÑющего каталога: %sСбой при чтении управлÑющего каталога: %s: %sОшибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ…ÐžÑˆÐ¸Ð±ÐºÐ° Ñ‡Ñ‚ÐµÐ½Ð¸Ñ ÑпиÑка файловÐе удалоÑÑŒ прочеÑть локальную информациюСбой Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸ о задаче: %sСбой Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ Ð¾Ð¿Ð¸ÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸: %sÐе удалоÑÑŒ запуÑтить Ñлужбу раÑÑылкиÐе удалоÑÑŒ задать владельца файла: %sСбой заÑылки опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸Ð¡Ð±Ð¾Ð¹ Ð¿Ñ€ÐµÑ€Ñ‹Ð²Ð°Ð½Ð¸Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð¸ файла по ftp: %sÐе удалоÑÑŒ принÑть Ñоединение SSLÐе удалоÑÑŒ принÑть Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° ÑоединениеÐе удалоÑÑŒ принÑть делегированиеСбой при приёме нового файла/направлениÑСбой доÑтупа к доверенноÑти указанной задачи %s в %sÐе удалоÑÑŒ получить наÑтройки A-REXÐе удалоÑÑŒ извлечь контекÑÑ‚ делегированиÑСбой уÑтановки блокировки на кÑшированный мета-файл %sСбой уÑтановки блокировки на файл %sÐе удалоÑÑŒ получить иÑточник: %sÐе удалоÑÑŒ активировать объект обработки задач, закрываетÑÑ Ð¿Ð¾Ñ‚Ð¾Ðº Grid ManagerСбой Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ '%s' URL (Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ Ñ‚Ð¸Ð¿Ð° %s) в таблицу Endpoints базы данных учёта задачСбой Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ '%s' в таблицу %s базы данных учёта задачÐе удалоÑÑŒ добавить Independent OIDÐе удалоÑÑŒ добавить OID доверенноÑти RFCСбой Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ñ€Ð°ÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ VOMS AC. Ваша доверенноÑть может быть неполной.Ðе удалоÑÑŒ добавить OID поÑледовательноÑти VOMS ACÐе удалоÑÑŒ добавить anyLanguage OIDÐе удалоÑÑŒ добавить закрытый ключ и ÑертификатСбой Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ñертификата к маркёру или базе данныхÐе удалоÑÑŒ добавить раÑширение к раÑширениÑм параметров доÑтупаÐе удалоÑÑŒ добавить inheritAll OIDСбой Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ñ€Ð°ÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ Ð²Ñ‹Ð´Ð°ÑŽÑ‰ÐµÐ³Ð¾ агентÑтва в доверенноÑтьСбой Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ñ€Ð°ÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ Ð¾Ð± иÑпользовании ключаСбой Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ñ€Ð°ÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ Ð¾Ð± информации Ñертификата доверенноÑтиСбой Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ñ€Ð°ÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ VOMS ACСбой Ñ€ÐµÐ·ÐµÑ€Ð²Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ñ‹Ñ… отношений ÑертификатаÐе удалоÑÑŒ зарезервировать Ñлемент Ð´Ð»Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ… о ÑертификатеÐе удалоÑÑŒ зарезервировать памÑть Ð´Ð»Ñ Ð¸Ð¼ÐµÐ½Ð¸ Ñубъекта Ñертификата при Ñверке Ñ Ð¿Ð¾Ð»Ð¸Ñ‚Ð¸ÐºÐ°Ð¼Ð¸.Ðе удалоÑÑŒ зарезервировать контекÑÑ‚ p12Ðе удалоÑÑŒ применить параметры DHСбой Ð¿Ñ€Ð¸Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð¾Ð² ECDHÐе удалоÑÑŒ применить локальный Ð°Ð´Ñ€ÐµÑ Ðº Ñоединению передачи данныхÐе удалоÑÑŒ уÑтановить подлинноÑть токена SAML во входÑщем документе SOAPÐе удалоÑÑŒ уÑтановить подлинноÑть токена Username во входÑщем документе SOAPÐе удалоÑÑŒ уÑтановить подлинноÑть токена X509 во входÑщем документе SOAPСбой проверки подлинноÑти Ð´Ð»Ñ Ñчейки PKCS11 %sСбой проверки подлинноÑти на базе данных ключейÐе удалоÑÑŒ аутентифицироватьÑÑ Ðº маркёру %sÐе удалоÑÑŒ ÑвÑзать Ñокет Ñ %s:%s(%s): %sÐе удалоÑÑŒ ÑвÑзать Ñокет Ñ Ð¿Ð¾Ñ€Ñ‚Ð¾Ð¼ TCP %s(%s): %sÐе удалоÑÑŒ вызвать PORT_NewArenaСбой Ð¿Ñ€ÐµÑ€Ñ‹Ð²Ð°Ð½Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа на передачу: %sОшибка отмены: %sСбой прерываниÑ: нет ответа SOAPÐе удалоÑÑŒ Ñоздать PayloadSOAP из входÑщей нагрузкиÐе удалоÑÑŒ Ñоздать PayloadSOAP из иÑходÑщей нагрузкиÐе удалоÑÑŒ изменить политики обработки Ñтека ÑоответÑтвий в: %s = %sÐевозможно заменить владельца Ñимвольной ÑÑылки %s на %iÐе удалоÑÑŒ поменÑть владельца временной доверенноÑти в %s на %i:%i: %sÐевозможно изменить права доÑтупа к %s: %sÐе удалоÑÑŒ Ñменить права доÑтупа или владельца жёÑткой ÑÑылки %s: %sÐе удалоÑÑŒ проверить %sСбой при очиÑтке файла %s: %sСбой ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ñо Ñлужбой делегированиÑ.Сбой Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ð·Ð°Ð¿Ð¸Ñи в назначениеÐе удалоÑÑŒ уÑтановить Ñоединение Ñ %s(%s):%iÐе удалоÑÑŒ уÑтановить Ñоединение Ñ %s(%s):%i - %sÐе удалоÑÑŒ уÑтановить ÑвÑзь Ñ Ñервером %s:%dÐе удалоÑÑŒ преобразовать ASCII в DERÐе удалоÑÑŒ преобразовать EVP_PKEY в PKCS8Ðе удалоÑÑŒ преобразовать параметры доÑтупа GSI в GSS (major: %d, minor: %d)Ðе удалоÑÑŒ преобразовать параметры доÑтупа GSI в GSS (major: %d, minor: %d)%s:%sСбой Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ð½Ð¸Ñ PrivateKeyInfo в EVP_PKEYÐе удалоÑÑŒ преобразовать информацию о безопаÑноÑти в политику ARCÐе удалоÑÑŒ преобразовать информацию о защите в Ð·Ð°Ð¿Ñ€Ð¾Ñ ARCÐе удалоÑÑŒ преобразовать информацию о защите в Ð·Ð°Ð¿Ñ€Ð¾Ñ XACMLСбой ÐºÐ¾Ð¿Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ %s: %sСбой ÐºÐ¾Ð¿Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° %s в %s: %sСбой ÐºÐ¾Ð¿Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð²Ñ…Ð¾Ð´Ð½Ð¾Ð³Ð¾ файла: %s в размещение: %sÐе удалоÑÑŒ Ñоздать поток ÑброÑа DTRÐе удалоÑÑŒ Ñоздать атрибуты безопаÑноÑти OTokensСбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¾Ð±ÑŠÐµÐºÑ‚Ð° OpenSSL %s %s - %u %sÐе удалоÑÑŒ Ñоздать контейеры SOAPÐе удалоÑÑŒ Ñоздать каталоги кÑша Ð´Ð»Ñ %sÐе удалоÑÑŒ Ñоздать каталог кÑша Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° %s: %sСбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¼ÐµÑ‚Ð°-файла кÑша %sÐе удалоÑÑŒ Ñоздать Ð·Ð°Ð¿Ñ€Ð¾Ñ ÑертификатаОшибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð¾Ð³Ð¾ каталога %sÐе удалоÑÑŒ Ñоздать каталогÐе удалоÑÑŒ Ñоздать каталог %sСбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð° %s! Задача пропуÑкаетÑÑ.Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð° %s: %sÐе удалоÑÑŒ Ñоздать контекÑÑ‚ Ð´Ð»Ñ ÑкÑпортаСбой при Ñоздании файла %s: %sÐевозможно Ñоздать жёÑткую ÑÑылку Ñ %s на %s: %sÐе удалоÑÑŒ Ñоздать входной контейнер SOAPÐе удалоÑÑŒ Ñоздать безопаÑное хранилище Ð´Ð»Ñ Ð·Ð°ÐºÑ€Ñ‹Ñ‚Ð¾Ð³Ð¾ ключа или ÑертификатаСбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ ÑÑылки %s. ПрипиÑанный URL не будет иÑпользованСбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð´Ð»Ð¸Ð½Ñ‹ путиСбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñзыка политикÐе удалоÑÑŒ Ñоздать Ñокет Ð´Ð»Ñ ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ %s(%s):%d - %sÐе удалоÑÑŒ Ñоздать Ñокет Ð´Ð»Ñ Ð¿Ñ€Ð¾Ñлушки %s:%s(%s): %sÐе удалоÑÑŒ Ñоздать Ñокет Ð´Ð»Ñ Ð¿Ñ€Ð¾Ñлушки порта TCP %s(%s): %sÐе удалоÑÑŒ Ñформировать Ð¸Ð¼Ñ ÑубъектаÐевозможно Ñоздать Ñимвольную ÑÑылку Ñ %s на %s: %sÐе удалоÑÑŒ Ñоздать временную доверенноÑть в %s: %sÐе удалоÑÑŒ Ñоздать потокСбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ ÐºÐ¾Ð¿Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ xrootd %sСбой при Ñоздании/открытии файла %s: %sСбой раÑшифровки опиÑÐ°Ð½Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ñ‹Ñ… отношенийСбой Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð¾Ð² доÑтупа на Ñервер - %sСбой Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð¾Ð² доÑтупа на Ñервер - не обнаружен Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸ÑСбой при удалении %sСбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ %s, вÑÑ‘ равно попытаемÑÑ ÑкопироватьÐе удалоÑÑŒ уничтожить ÑертификатСбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð¾Ð±ÑŠÐµÐºÑ‚Ð° доÑтавки, или иÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸ÑÐе удалоÑÑŒ уничтожить назначение, новые попытки могут быть безуÑпешнымиСбой при удалении логичеÑкого файлаСбой при удалении мета-информацииСбой при удалении физичеÑкого файлаÐе удалоÑÑŒ уничтожить закрытый ключÐе удалоÑÑŒ уничтожить закрытый ключ и ÑертификатСбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ ÐºÐ¾Ð¿Ð¸Ð¸ %s: %sÐе удалоÑÑŒ удалить уÑтаревший файл кÑша %s: %sÐе удалоÑÑŒ Ñкопировать Ñтруктуру X509Ðе удалоÑÑŒ Ñкопировать раÑширениеÐе удалоÑÑŒ включить IPv6Ðе удалоÑÑŒ шифрование в формат PKCS12Ошибка ÑˆÐ¸Ñ„Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ ÑертификатаСбой ÑˆÐ¸Ñ„Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа Ñертификата в формате DERÐе удалоÑÑŒ уÑтановить Ñоединение SSLСбой уÑÑ‚Ð°Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ ÑоединениÑ: %sÐе удалоÑÑŒ Ñохранить закрытый ключСбой Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ Ð¿Ñевдонима VOMS из Ñертификата доверенноÑтиСбой Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸ о параметрах доÑтупаСбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ… из таблицы %s базы данных учёта задачСбой Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ… из таблицы Endpoints базы данных учёта задачСбой Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¸Ð· иÑточникаСбой Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ð·Ð°Ð¿Ð¸Ñи в цельÐевозможно найти Ñертификаты CAÐе удалоÑÑŒ обнаружить Ñертификат и/или закрытый ключ, либо у файлов неподходÑщие параметры доÑтупа.Ðе удалоÑÑŒ обнаружить Ñертификат по краткому имени: %sÐе удалоÑÑŒ найти раÑширениеÐе удалоÑÑŒ обнаружить агентÑтво, выдавшее Ñертификат доверенноÑтиÐе удалоÑÑŒ найти информацию о типе %s, чтобы определить, ÑтираетÑÑ Ñ„Ð°Ð¹Ð» или каталогСбой про Ñоздании ключа ECÐе удалоÑÑŒ Ñоздать токен SAML Ð´Ð»Ñ Ð¸ÑходÑщего ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ SOAPÐе удалоÑÑŒ Ñоздать токен имени Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð´Ð»Ñ Ð¸ÑходÑщего ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ SOAPÐе удалоÑÑŒ Ñоздать токен X509 Ð´Ð»Ñ Ð¸ÑходÑщего ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ SOAPСбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¿Ð°Ñ€Ñ‹ открытого/закрытого ключейÐе удалоÑÑŒ извлечь информацию о DN из файла .local задачи %sÐе удалоÑÑŒ получить параметры TCP-Ñокета Ð´Ð»Ñ ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ %s(%s):%d - прерывание по времени не будет работать - %sСбой Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ Ñертификата из файлаÐе удалоÑÑŒ получить параметры доÑтупаÐе удалоÑÑŒ получить файл ftpÐе удалоÑÑŒ получить ÑÑылку параметра GFAL2: %sÐе удалоÑÑŒ получить новый контекÑÑ‚ GFAL2: %sСбой вычиÑÐ»ÐµÐ½Ð¸Ñ ÑƒÑреднённой загруженноÑти: %sÐе удалоÑÑŒ получить закрытый ключÐе удалоÑÑŒ получить открытый ключÐевозможно извлечь открытый ключ из объекта RSAÐевозможно извлечь открытый ключ из объекта X509Ðе удалоÑÑŒ обнаружить файл наÑтроек grid-managerСбой Ð¸Ð¼Ð¿Ð¾Ñ€Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñертификата из файла: %sÐе удалоÑÑŒ получить закрытый ключСбой Ð¸Ð¼Ð¿Ð¾Ñ€Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð·Ð°ÐºÑ€Ñ‹Ñ‚Ð¾Ð³Ð¾ ключа из файла: %sСбой инициализации LCASСбой инициализации LCMAPSОшибка инициализации библиотеки OpenSSLÐе удалоÑÑŒ инициализировать файл PKCS12: %sÐе удалоÑÑŒ инициализировать Ñтруктуру X509Сбой инициализации базы данных учёта задачСбой инициализации раздела раÑширений параметров доÑтупаСбой запуÑка головного потока PythonСбой при инициализации кÑшаСбой запуÑка ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ ÐºÐ»Ð¸ÐµÐ½Ñ‚Ð¾Ð¼Ð¡Ð±Ð¾Ð¹ инициализации параметров доÑтупа Ð´Ð»Ñ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸ÑÐе удалоÑÑŒ добавить AAR в базу данных Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %sÐе удалоÑÑŒ ограничить Ñокет под IPv6 на %s:%s - может привеÑти к ошибкам Ð´Ð»Ñ IPv4 по Ñтому же портуÐе удалоÑÑŒ ограничить Ñокет под IPv6 на порте TCP %s - может привеÑти к ошибкам Ð´Ð»Ñ IPv4 по Ñтому же портуÐе удалоÑÑŒ проÑлушать %s:%s(%s): %sÐе удалоÑÑŒ проÑлушать порт TCP %s(%s): %sÐе удалоÑÑŒ загрузить наÑтройки клиентаСбой загрузки раздела раÑширений: %sÐе удалоÑÑŒ подгрузить файл наÑтроек grid-managerÐе удалоÑÑŒ подгрузить файл наÑтроек grid-manager из %sÐе удалоÑÑŒ подгрузить файл наÑтроек grid-managerСбой подгрузки подключаемого Ð¼Ð¾Ð´ÑƒÐ»Ñ Ð´Ð»Ñ URL %sÐе удалоÑÑŒ загрузить закрытый ключÐе удалоÑÑŒ загрузить наÑтройки ÑервиÑаÐе удалоÑÑŒ загрузить наÑтройки ÑервиÑа ни из какого файла наÑтроекÐе удалоÑÑŒ загрузить наÑтройки ÑервиÑа из файла %sÐе удалоÑÑŒ загрузить компоненты MCC ÑервераÐевозможно заблокировать библиотеку arccredential в памÑтиÐевозможно заблокировать библиотеку arccrypto в памÑтиСбой при Ñоздании ÑимволичеÑкой ÑÑылки %s на %s : %sÐе удалоÑÑŒ перемеÑтить %s в %s: %sÐе удалоÑÑŒ перемеÑтить файл %s в %sСбой Ð²Ñ‹Ð´ÐµÐ»ÐµÐ½Ð¸Ñ Ð½Ð¾Ð²Ð¾Ð¹ облаÑтиСбой Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ Ð¸Ð´ÐµÐ½Ñ‚Ð¸Ñ„Ð¸ÐºÐ°Ñ‚Ð¾Ñ€Ð° OpenSSL Ð´Ð»Ñ %sСбой Ð¾Ð¿Ñ€ÐµÐ´ÐµÐ»ÐµÐ½Ð¸Ñ ÐºÐ¾Ð»Ð¸Ñ‡ÐµÑтва переданных байтов: %sÐе удалоÑÑŒ получить блоки Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð´Ð»Ñ Ð¾Ñ‡Ð¸Ñтки неиÑпользуемых блоковСбой Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸ о файлеÐе удалоÑÑŒ получить ÑпиÑок файлов через FTP: %sÐе удалоÑÑŒ получить локальный Ð°Ð´Ñ€ÐµÑ Ð´Ð»Ñ %s:%s - %sÐе удалоÑÑŒ получить локальный Ð°Ð´Ñ€ÐµÑ Ð´Ð»Ñ Ð¿Ð¾Ñ€Ñ‚Ð° %s - %sÐевозможно заблокировать файл в кÑше %sÐе удалоÑÑŒ получить ÑпиÑок ÑÑ‚Ð°Ñ‚ÑƒÑ Ñ‡ÐµÑ€ÐµÐ· FTP: %sÐевозможно открыть %s Ð´Ð»Ñ Ñ‡Ñ‚ÐµÐ½Ð¸Ñ: %sСбой Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð¸Ñ %s, попытка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ€Ð¾Ð´Ð¸Ñ‚ÐµÐ»ÑŒÑких каталоговÐе удалоÑÑŒ открыть канал передачи данныхÐе удалоÑÑŒ открыть каталог %s: %sÐе удалоÑÑŒ открыть файл %sÐе удалоÑÑŒ открыть на чтение файл Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð°Ð¼Ð¸ DHÐе удалоÑÑŒ открыть мониторинговый файл %sСбой Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° входного Ñертификата %sÐе удалоÑÑŒ открыть журнальный файл: %sÐе удалоÑÑŒ открыть выходной файл '%s'Сбой при открытии файла pk12Ðе удалоÑÑŒ открыть канал stdio %dÐе удалоÑÑŒ открыть канал stdio %sСбой вывода запроÑа Ñертификата в формате ASCIIСбой вывода запроÑа Ñертификата в формате DERСбой разбора заголовка HTTPÐе удалоÑÑŒ разобрать отзыв Rucio: %sÐе удалоÑÑŒ разобрать токен SAML из входÑщего документа SOAPÐе удалоÑÑŒ разобрать токен Username из входÑщего документа SOAPÐе удалоÑÑŒ разобрать команду VOMS: %sÐе удалоÑÑŒ разобрать токен X509 из входÑщего документа SOAPСбой обработки запроÑа Ñертификата из файла CSR %sСбой при разборе файла наÑтроек %sСбой разборки указанного времени дейÑÑ‚Ð²Ð¸Ñ VOMS: %sСбой разборки указанного номера порта Ñервера VOMS: %sÐе удалоÑÑŒ зарегиÑтрировать назначение: %sСбой предварительной очиÑтки назначениÑ: %sСбой предварительного Ñ€ÐµÐ·ÐµÑ€Ð²Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¼ÐµÑта Ð´Ð»Ñ %sÐе удалоÑÑŒ подготовить назначениеÐе удалоÑÑŒ подготовить назначение: %sСбой подготовки опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸Ðе удалоÑÑŒ подготовить иÑточникÐе удалоÑÑŒ подготовить иÑточник: %sÐе удалоÑÑŒ предварительно зарегиÑтрировать назначение: %sÐе удалоÑÑŒ обработать наÑтройки A-REX в %sÐе удалоÑÑŒ обработать наÑтройки VOMS, или не найдены приемлемые Ñтроки конфигурации.Ðе удалоÑÑŒ обработать наÑтройки в %sСбой обработки задачи: %sСбой разборки задачи: %s - %s %sСбой обработки задач - Ñбой разборки откликаСбой обработки задач - неверный отклик: %uÐе удалоÑÑŒ обработать атрибуты безопаÑноÑти в TLS MCC Ð´Ð»Ñ Ð²Ñ…Ð¾Ð´Ñщего ÑообщениÑÐе удалоÑÑŒ опроÑить базу данных о AAR Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %sСбой опроÑа ÑоÑтоÑниÑ: %sÐе удалоÑÑŒ прочеÑть атрибут %x из закрытого ключа.Сбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¼ÐµÑ‚Ð°-файла кÑша %sСбой при чтении файла Ñертификата: %sÐевозможно прочитать данные из входного файлаСбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° Ñхемы базы данных в %sСбой при чтении файла %sСбой при чтении файла Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð°Ð¼Ð¸ DHСбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° входного ÑертификатаСбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¾Ð±ÑŠÐµÐºÑ‚Ð° %s: %sСбой при чтении файла личного ключа: %sСбой при чтении файла доверенноÑти: %sПроизошёл Ñбой при чтении запроÑа из файлаСбой при чтении запроÑа из ÑтрокиСбой региÑтрации копии назначениÑ: %sСбой при региÑтрации нового файла/цели: %sСбой региÑтрации подключаемого Ð¼Ð¾Ð´ÑƒÐ»Ñ Ð´Ð»Ñ ÑоÑтоÑÐ½Ð¸Ñ %sÐе удалоÑÑŒ оÑвободить параметры доÑтупа GSS (major: %d, minor: %d):%s:%sСбой ÑброÑа завершившегоÑÑ Ð·Ð°Ð¿Ñ€Ð¾ÑаÐевозможно разблокировать файл в кÑше %sÐевозможно разблокировать файл %sСбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° .meta %s: %sСбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð²Ñех фактичеÑких копийСбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð° кÑша задач %s: %sÐевозможно удалить ÑущеÑтвующую жёÑткую ÑÑылку на %s: %sÐевозможно удалить ÑущеÑтвующую Ñимвольную ÑÑылку на %s: %sÐе удалоÑÑŒ удалить файл %s: %sÐе удалоÑÑŒ удалить копиюСбой Ñ€Ð°Ð·Ð±Ð»Ð¾ÐºÐ¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° на %s. Возможно, необходимо ручное вмешательÑтвоСбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ ÑƒÑтаревшего файла блокировки %s: %sÐе удалоÑÑŒ удалить временную доверенноÑть %s: %sÐе удалоÑÑŒ переименовать URLÐе удалоÑÑŒ разрешить %sСбой при разрешении %s (%s)Ðе удалоÑÑŒ определить назначение: %sÐе удалоÑÑŒ определить иÑточник: %sÐе удалоÑÑŒ получить данные о приложении через OpenSSLÐе удалоÑÑŒ получить ÑÑылку на поток TLS. Ð”Ð¾Ð¿Ð¾Ð»Ð½Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ñверка политики пропуÑкаетÑÑ.Сбой Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° закрытого ключа издателÑСбой запуÑка потока Grid ManagerСбой иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ñ‹ %sСбой запуÑка разборщика файла наÑтроек %s.Ðе удалоÑÑŒ запуÑтить внешний подключаемый модуль: %sСбой отправки запроÑа на прерывание: %sÐе удалоÑÑŒ отправить Ñодержимое буфераСбой уÑтановки обратного вызова монитора GFAL2: %sСбой уÑтановки времени Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð¸ GFAL2, будет иÑпользоватьÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ по умолчанию: %sСбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ‚Ð¾Ñ‡ÐºÐ¸ входа INTERNALСбой Ð·Ð°Ð´Ð°Ð½Ð¸Ñ ÐºÐ¾Ð¿Ð¸Ð¹ в LFC: %sÐе удалоÑÑŒ уÑтановить параметры доÑтупа Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð¸ данных по GridFTPÐевозможно выÑтавить иÑполнÑемый бит Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° %sÐевозможно выÑтавить иÑполнÑемый бит Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° %s: %sСбой уÑтановки опции перезапиÑи в GFAL2: %sСбой Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ Ð¿Ñ€Ð°Ð² доÑтупа к %sСбой Ð·Ð°Ð´Ð°Ð½Ð¸Ñ ID алгоритма подпиÑиÐе удалоÑÑŒ задать открытый ключ Ð´Ð»Ñ Ð¾Ð±ÑŠÐµÐºÑ‚Ð° X509 иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ñ‹Ð¹ ключ из X509_REQСбой уÑтановки Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¿Ñ€Ð°Ð² доÑтупа Ñ %sÐе удалоÑÑŒ прервать Ñоединение SSL: %sСбой подпиÑи данных зашифрованного ÑертификатаÐе удалоÑÑŒ подпиÑать Ð·Ð°Ð¿Ñ€Ð¾Ñ ÑертификатаÐе удалоÑÑŒ подпиÑать доверенноÑтьÐе удалоÑÑŒ размеÑтить файл(Ñ‹)Ðе удалоÑÑŒ запуÑтить поток архивированиÑÐе удалоÑÑŒ запуÑтить Ñкрипт очиÑтки кÑшаСбой начала ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ€Ð°ÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ ÑертификатаÐе удалоÑÑŒ запуÑтить потоки Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ…Ðе удалоÑÑŒ начать проÑлушивание ни по какому адреÑу Ð´Ð»Ñ %s:%sÐе удалоÑÑŒ начать проÑлушивание ни по какому адреÑу Ð´Ð»Ñ %s:%s(IPv%s)Ðе удалоÑÑŒ запуÑтить новый Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR Ð´Ð»Ñ %sÐе удалоÑÑŒ запуÑтить поток Ð´Ð»Ñ Ð¾Ñ‚ÑÐ»ÐµÐ¶Ð¸Ð²Ð°Ð½Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñов на задачиÐе удалоÑÑŒ запуÑтить новый поток: кÑш не будет очищенÐе удалоÑÑŒ начать Ð¾Ð¿Ñ€Ð¾Ñ Ñ‚Ð¾Ñ‡ÐºÐ¸ входа на %sСбой начала опроÑа точки доÑтупа по %s (не удалоÑÑŒ Ñоздать подпоток)Ðе удалоÑÑŒ начать чтение из иÑточника: %sÐе удалоÑÑŒ запуÑтить поток Ð´Ð»Ñ Ð¾Ð±Ð¼ÐµÐ½Ð° информациейÐе удалоÑÑŒ запуÑтить поток Ð´Ð»Ñ Ð¿Ñ€Ð¾ÑлушиваниÑСбой запуÑка запроÑа на передачу: %sСбой начала запиÑи в кÑшСбой начала запиÑи в назначение: %sÐе удалоÑÑŒ проверить ÑоÑтоÑние каталога ÑеÑÑии %sСбой проверки ÑтатуÑа иÑточника %sÐе удалоÑÑŒ запиÑать данные приложениÑÐе удалоÑÑŒ Ñохранить файл ftpСбой заÑылки вÑех задач.Сбой заÑылки вÑех задач: %sСбой заÑылки вÑех задач: %s %sСбой заÑылки вÑех задач: %u %sОшибка запуÑка задачиÐе удалоÑÑŒ изменить идентификатор Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð½Ð° %d/%dСбой оÑтановки LCASСбой оÑтановки LCMAPSÐе удалоÑÑŒ передать данныеÐе удалоÑÑŒ разблокировать файл %s: %s. Возможно, необходимо ручное вмешательÑтвоСбой Ñ€Ð°Ð·Ð±Ð»Ð¾ÐºÐ¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° Ñ Ð±Ð»Ð¾ÐºÐ¾Ð¼ %s: %sÐе удалоÑÑŒ отменить предварительную региÑтрацию Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ %s. Возможно, Вам придётÑÑ Ñделать Ñто вручнуюÐе удалоÑÑŒ отменить предварительную региÑтрацию Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ %s: %s. Возможно, Вам придётÑÑ Ñделать Ñто вручнуюСбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð¿Ñ€ÐµÐ´Ð²Ð°Ñ€Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð¾Ð¹ запиÑи LFN. Возможно, необходимо удалить её вручнуюСбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð¿Ñ€ÐµÐ´Ð²Ð°Ñ€Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð¾Ð¹ запиÑи LFN. Возможно, необходимо удалить её вручнуюСбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð¿Ñ€ÐµÐ´Ð²Ð°Ñ€Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð¾Ð¹ запиÑи LFN. Возможно, необходимо удалить её вручную: %sÐе удалоÑÑŒ обновить AAR в базе данных Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %sÐе удалоÑÑŒ подтвердить токен X509 во входÑщем документе SOAPÐе удалоÑÑŒ подтвердить запроÑПодпиÑÑŒ не подтвержденаПодпиÑÑŒ не подтвержденаСбой проверки подпиÑанного ÑертификатаСбой запиÑи информации о RTE Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %sСбой запиÑи атрибутов authtoken Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %sСбой при запиÑи тела в выходной потокСбой запиÑи информации о передаче данных Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %sСбой запиÑи информации о ÑобытиÑÑ… Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %sСбой при запиÑи заголовка в выходной потокСбой запиÑи информации о задаче в базу данных (%s)Ðе удалоÑÑŒ запиÑать Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð² файлÐе удалоÑÑŒ запиÑать Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð² ÑтрокуСбой запиÑи подпиÑанного Ñертификата EEC в файлСбой запиÑи подпиÑанной доверенноÑти в файлОшибка запиÑи в локальный файл ÑпиÑка задач %sСбой Ð¾Ð±Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ Ð¼ÐµÑ‚ÐºÐ¸ времени файла блокировки кÑша %s Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° %s: %sÐе удалоÑÑŒ отгрузить файл %s в %s: %sСбой выгрузки локальных входных файловСбой Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¸Ð· иÑточникаСбой при завершении запиÑи в назначениеОшибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¸Ð· иÑточникаСбой при передаче данныхСбой при ожидании запроÑа на ÑоединениеСбой при ожидании ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ %s(%s):%i - %sОшибка при запиÑи в цельОшибка при разборе отзыва Ñ Ñервера - Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¼Ð¾Ð¶ÐµÑ‚ быть чаÑтично невернойОшибка: %sЭта Ñ„ÑƒÐ½ÐºÑ†Ð¸Ñ Ð½Ðµ реализованаИзвлечение: тело отклика: %sИзвлечение: код отклика: %u %sФайл %s в ÑоÑтоÑнии NEARLINE, будет Ñделан Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¾ размещении на диÑкеФайл %s уже находитÑÑ Ð² кÑше %s Ñ Ð´Ñ€ÑƒÐ³Ð¸Ð¼ URL: %s - Ñтот файл не будет кÑшированФайл %s уже кÑширован в %s Ñ Ð´Ñ€ÑƒÐ³Ð¸Ð¼ URL: %s - выделенное Ð¸Ð¼Ñ Ð½Ðµ будет добавлено в кÑшированный ÑпиÑокФайл %s приÑутÑтвует в кÑше (%s) - проверÑетÑÑ Ð´Ð¾Ð¿ÑƒÑкУÑпешно удалён файл %sФайл '%s' перечиÑленный в атрибуте 'executables' отÑутÑтвует в атрибуте 'inputfiles'Файл уже ÑущеÑтвует: %sФайл не может быть переведён в ÑоÑтоÑние DoneФайл не может быть переведён в ÑоÑтоÑние Running: %sСбой при удалении файла, попытка ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð°Ð¡Ð±Ð¾Ð¹ при удалении файла, попытка ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð° Ð´Ð»Ñ %sÐевозможно загрузить файл: %sФайл может быть кÑширован, проверÑетÑÑ ÐºÑшФайл ещё кÑшируетÑÑ, ожидание %i ÑекФайл недоÑтупен %s: %sФайл недоÑтупен: %sФайл не может быть кÑширован, пропуÑкаетÑÑ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚ÐºÐ° кÑшаФайл либо не может быть кÑширован, либо кÑширование не было запрошено, либо кÑша нет; пропуÑкаетÑÑ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ° кÑшаФайл готов! TURL: %sФайл меньше %llu байт, будет иÑпользована Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð°Ñ Ð´Ð¾ÑтавкаТип файла недоÑтупен, попытка Ñтереть файлВ отзыве Rucio отÑутÑтвует Ð¸Ð¼Ñ Ñ„Ð°Ð¹Ð»Ð°: %sФайлы, аÑÑоциированные Ñ Ð¼Ð°Ñ€ÐºÑ‘Ñ€Ð¾Ð¼ запроÑа %s, уÑпешно прерваныФайлы, аÑÑоциированные Ñ Ð¼Ð°Ñ€ÐºÑ‘Ñ€Ð¾Ð¼ запроÑа %s, уÑпешно отгруженыФайлы, аÑÑоциированные Ñ Ð¼Ð°Ñ€ÐºÑ‘Ñ€Ð¾Ð¼ запроÑа %s, уÑпешно разблокированыКопирование набора файлов в отдельный объект пока не поддерживаетÑÑРегиÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ Ð½Ð°Ð±Ð¾Ñ€Ð¾Ð² файлов пока не поддерживаетÑÑОбнаружение ÑущеÑтвующих копий назначениÑFinishWriting: поиÑк метаданных: %sFinishWriting: получена ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма: %sУÑпешное завершениеСбой первого шага региÑтрации в ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³ÐµÐŸÐµÑ€Ð²Ð°Ñ Ñ‡Ð°Ñть Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð° 'inputfiles' (filename) не может быть пуÑÑ‚Ð¾Ð¹ÐŸÐµÑ€Ð²Ð°Ñ Ñ‡Ð°Ñть Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð° 'outputfiles' (filename) не может быть пуÑÑ‚Ð¾Ð¹Ð”Ð»Ñ Ñ€ÐµÐ³Ð¸Ñтрации, иÑточник должен быть задан обычным URL, а назначением должен быть каталог реÑурÑÐ¾Ð²Ð”Ð»Ñ Ñ‚ÐµÑтовой задачи номер 1 необходимо задать Ð²Ñ€ÐµÐ¼Ñ Ð¸ÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ опции -r (--runtime).ÐŸÑ€Ð¸Ð½ÑƒÐ´Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ° иÑточника кÑшированного файла %sÐŸÑ€Ð¸Ð½ÑƒÐ´Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ð¿ÐµÑ€ÐµÐ·Ð°Ð³Ñ€ÑƒÐ·ÐºÐ° файла %sÐайден подключаемый модуль %s %s (уже подгружен)%s обнаружен в кÑшеÐайден Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR %s Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° %s, оÑтавшийÑÑ Ð² ÑоÑтоÑнии передачи поÑле предыдущего запуÑкаОбнаружен атрибут VOMS AC: %sÐайден рееÑтр, который будет опрошен рекурÑивно: %sОбнаружен ÑущеÑтвующий маркер Ð´Ð»Ñ %s в кÑше маркеров Rucio, иÑтекающий %sОбнаружена ÐºÐ¾Ð½ÐµÑ‡Ð½Ð°Ñ Ñ‚Ð¾Ñ‡ÐºÐ° Ñлужбы %s (тип %s)Ðайдена точка входа в ÑоÑтоÑнии STARTED или SUCCESSFUL (%s)Обнаружена временно иÑÐºÐ»ÑŽÑ‡Ñ‘Ð½Ð½Ð°Ñ Ñ‚Ð¾Ñ‡ÐºÐ° входа (%s)Обнаружены Ñледующие задачи:Обнаружены Ñледующие новые задачи:Ðайден непредвиденный пуÑтой файл блокировки %s. Ðеобходимо вернутьÑÑ Ð² acquire()Ðайдены незаконченные процеÑÑÑ‹ DTR. ВероÑтно, предыдущий процеÑÑ A-REX завершилÑÑ ÑбоемДоÑтупные меÑта Ñгруппированы по предельному времени (предел: доÑтупные меÑта):Свободные Ñдра: %iСтрока иÑпользована неполноÑтью: %sÐ—Ð°Ð¿Ñ€Ð¾Ñ Ð°Ð²Ñ‚Ð¾Ñ€Ð¸Ð·Ð°Ñ†Ð¸Ð¸ GACL: %sGET: идентификатор %s путь %sСоздайте новый Ð·Ð°Ð¿Ñ€Ð¾Ñ X509!СоздаётÑÑ Ð¾Ð¿Ð¸Ñание задачи в формате %sÐвтоматичеÑкое Ñоздание префикÑа ceID из имени узлаГенератор запущенÐеÑпецифичеÑÐºÐ°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°ÐŸÐ¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ðµ делегированных параметров доÑтупа от Ñлужбы делегированиÑ: %sПолучение из кÑша: КÑшированный файл забклокированПолучение из кÑша: Ошибка наÑтроек кÑшаПолучение из кÑша: Файла в кÑше нетПолучение из кÑша: ÐедопуÑтимый URL %sПолучение из кÑша: ПоиÑк %s в кÑшеПолучение из кÑша: не удалоÑÑŒ получить доÑтуп к кÑшированному файлу: %sÐ—Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° получение %s вÑÑ‘ ещё в очереди, Ñледует подождать %i ÑекундGet: отÑутÑвует задача %s - %sСоздание текущей метки времени Ð´Ð»Ñ Ð¶ÑƒÑ€Ð½Ð°Ð»Ð° программы разбора BLAH: %sПолучение делегированных параметров доÑтупа от Ñлужбы Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ ARCОшибка Globus: %sСÑылка globus заÑтрÑлаПеременнаÑ, ÑƒÐºÐ°Ð·Ñ‹Ð²Ð°ÑŽÑ‰Ð°Ñ Ð½Ð° раÑположение Globus, больше не поддерживаетÑÑ. ПожалуйÑта, укажите полный путь.Опознавательные признаки Грид поÑтавлены в ÑоответÑтвие меÑтной учётной запиÑи '%s'HEAD: идентификатор %s путь %sОшибка HTTP: %d %sОшибка HTTP %u - %sÐÐºÑ‚Ð¸Ð²Ð¸Ð·Ð°Ñ†Ð¸Ñ HTTP Ñ SAML2SSO не выполненаHTTP:PUT %s: запиÑÑŒ файла %s: %sСÑылка в недопуÑтимом ÑоÑтоÑии %u/%uHead: отÑутÑвует задача %s - %sÐ˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ ÑоÑтоÑнии здоровьÑ: %sСоÑтоÑние здоровьÑ: %sСоÑтоÑние Ð·Ð´Ð¾Ñ€Ð¾Ð²ÑŒÑ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ (%s) неудовлетворительное (%s)Параметры Ñправки:Сбой при запуÑке вÑпомогательного процеÑÑа: %sОтÑутÑтвует вÑÐ¿Ð¾Ð¼Ð¾Ð³Ð°Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ð¿Ñ€Ð¾Ð³Ñ€Ð°Ð¼Ð¼Ð°ÐžÐ´Ð½Ð¾Ñ€Ð¾Ð´Ð½Ñ‹Ð¹ реÑурÑID: %sФайл наÑтроек INI %s не ÑущеÑтвуетКлиент INTERNALClient не запущенId= %s,Тип= %s,Издатель= %s,Значение= %sСлужба IdP выдала Ñообщение об ошибке: %sЛичные данные: %sВыделенное имÑ: %sЛичные данные: %sЕÑли пара Ñертификат/ключ или файл Ñертификата доверенноÑти ÑущеÑтвуют, Ð’Ñ‹ можете вручную указать их раÑположение Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ переменных Ñреды '%s'/'%s' или '%s', или Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ атрибутов '%s'/'%s' или '%s' в файле наÑтроек клиента (например, '%s')Ð£ÐºÐ°Ð·Ñ‹Ð²Ð°Ñ Ð¿Ð¾Ð»Ð¸Ñ‚Ð¸ÐºÑƒ, указывайте также её ÑзыкИгнорируетÑÑ Ñ‚Ð¾Ñ‡ÐºÐ° входа (%s), Ñ‚.к. она уже зарегиÑтрирована в загрузчике.ИгнорируетÑÑ Ð·Ð°Ð´Ð°Ñ‡Ð° (%s), Ð¿Ñ€ÐµÐ´Ñ‹Ð´ÑƒÑ‰Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ° подгрузить JobControllerPlugin завершилаÑÑŒ неудачейИгнорируетÑÑ Ð·Ð°Ð´Ð°Ñ‡Ð° (%s), отÑутÑтвует URL интерфейÑа управлениÑИгнорируетÑÑ Ð·Ð°Ð´Ð°Ñ‡Ð° (%s), отÑутÑтвует URL ÑоÑтоÑÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸Ð˜Ð³Ð½Ð¾Ñ€Ð¸Ñ€ÑƒÐµÑ‚ÑÑ Ð·Ð°Ð´Ð°Ñ‡Ð° (%s), отÑутÑтвует название интерфейÑа управлениÑИгнорируетÑÑ Ð·Ð°Ð´Ð°Ñ‡Ð° (%s), отÑутÑтвует название интерфейÑа ÑоÑтоÑниÑИгнорируетÑÑ Ð·Ð°Ð´Ð°Ñ‡Ð° (%s), невозможно подгрузить JobControllerPlugin Ð´Ð»Ñ %sЗадача игнорируетÑÑ, так как её Ñрлык пуÑÑ‚ÐедопуÑтимый URL - за закрывающей Ñкобкой ] Ð´Ð»Ñ Ð°Ð´Ñ€ÐµÑа IPv6 Ñледует недопуÑтимый маркёр: %sÐедопуÑтимый URL - отÑутÑтвует Ð·Ð°ÐºÑ€Ñ‹Ð²Ð°ÑŽÑ‰Ð°Ñ Ñкобка ] Ð´Ð»Ñ Ð°Ð´Ñ€ÐµÑа IPv6: %sÐедопуÑтимый Ð°Ð´Ñ€ÐµÑ - не ÑодержитÑÑ Ð¸Ð¼Ñ ÑƒÐ·Ð»Ð°: %sÐедопуÑтимый URL - путь должен быть абÑолютным или пуÑтым: %sÐедопуÑтимый URL - путь должен быть абÑолютным: %sÐедопуÑтимый формат времени: %sОжидаетÑÑ Ð½ÐµÐ¼ÐµÐ´Ð»ÐµÐ½Ð½Ð¾Ðµ ÑоединениеОжидаетÑÑ Ð½ÐµÐ¼ÐµÐ´Ð»ÐµÐ½Ð½Ð¾Ðµ Ñоединение: %sÐемедленное завершение: %sÐ˜Ð¼Ñ Ñ€ÐµÐ°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ð¸: %sВнедритель: %sÐ’ доÑтупном ÑпиÑке отзыва Ñертификатов (CRL) значение lastUpdate недейÑтвительноВ доÑтупном ÑпиÑке отзыва Ñертификатов (CRL) значение nextUpdate недейÑтвительноВ профиле наÑтроек атрибут 'initype' Ñлемента "%s" имеет ÑобÑтвенное значение "%s".ВходÑщее Ñообщение не в формате SOAPЗапрошены неÑовмеÑтимые опции --nolist и --forcelistПротиворечивые метаданныеÐезавиÑÐ¸Ð¼Ð°Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñть - права не выделеныТочка входа Ð´Ð»Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸ÐеизвеÑтный тип информации '%s'ПуÑтой информационный документЗапущена %u-Ñ Ñлужба PythonÐ˜Ð½Ð¸Ñ†Ð¸Ð°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð¿Ñ€Ð¾Ñ†ÐµÐ´ÑƒÑ€Ñ‹ делегированиÑВвод не в формате SOAPВходные данные не Ñодержат Ñтроки Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ð½Ð¸Ñ Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð²Ð²Ð¾Ð´Ð° из файла: Request.xmlÐ—Ð°Ð¿Ñ€Ð¾Ñ Ð²Ð²Ð¾Ð´Ð° из программыВвод: метаданные: %sУÑтановленные рабочие Ñреды:Задан Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ (%s), заÑылка производитÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ через негоРаÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñа:Ð˜Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ Ñ‚Ð¾Ñ‡ÐºÐ¸ входа (%s) %s.ВерÑии интерфейÑа:Ð˜Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ %sВнутренний метод передачи данных не поддерживаетÑÑ Ð´Ð»Ñ %sÐедейÑтвительный Ð·Ð°Ð¿Ñ€Ð¾Ñ DTRÐедопуÑтимый DTR Ð´Ð»Ñ Ð¸Ñточника %s, Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ %sÐедопуÑтимый ÑффектÐедопуÑтимый объект HTTP не может дать результатÐеверный ID: %sÐеверный ISO-формат продолжительноÑти: %sÐеверный Ñлемент JobDescription:ÐедопуÑтимый URL '%s' Ð´Ð»Ñ Ð²Ñ…Ð¾Ð´Ð½Ð¾Ð³Ð¾ файла '%s'ÐедопуÑтимый URL '%s' Ð´Ð»Ñ Ð²Ñ‹Ñ…Ð¾Ð´Ð½Ð¾Ð³Ð¾ файла '%s'ÐедопуÑтимый ÑинтакÑÐ¸Ñ Ð¾Ð¿Ñ†Ð¸Ð¸ URL в опции '%s' Ð´Ð»Ñ Ð²Ñ…Ð¾Ð´Ð½Ð¾Ð³Ð¾ файла '%s'ÐедопуÑтимый ÑинтакÑÐ¸Ñ Ð¾Ð¿Ñ†Ð¸Ð¸ URL в опции '%s' Ð´Ð»Ñ Ð²Ñ‹Ñ…Ð¾Ð´Ð½Ð¾Ð³Ð¾ файла '%s'ÐедопуÑÑ‚Ð¸Ð¼Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ URL: %sÐеверный URL: %sÐедопуÑтимый URL: '%s' во входном файле '%s'ÐедопуÑтимый URL: '%s' в выходном файле '%s'ÐедопуÑтимое значение action %sÐеверное название клаÑÑаÐедопуÑтимое Ð¸Ð¼Ñ ÐºÐ»Ð°ÑÑа. Ðргумент брокера Ð´Ð»Ñ PythonBroker должен быть Filename.Class.args (args не обÑзательно), например: SampleBroker.MyBrokerÐедопуÑтимый оператор ÑÑ€Ð°Ð²Ð½ÐµÐ½Ð¸Ñ '%s' иÑпользуетÑÑ Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð¾Ð¼ 'delegationid', допуÑкаетÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ "=".ÐедопуÑтимый оператор ÑÑ€Ð°Ð²Ð½ÐµÐ½Ð¸Ñ '%s' иÑпользуетÑÑ Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð¾Ð¼ 'queue' в диалекте GRIDMANAGER, допуÑкаетÑÑ Ð»Ð¸ÑˆÑŒ "="ÐедопуÑтимый оператор ÑÑ€Ð°Ð²Ð½ÐµÐ½Ð¸Ñ '%s' иÑпользуетÑÑ Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð¾Ð¼ 'queue', допуÑкаютÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ "!=" или "=".ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð½Ð°Ñтройка - не указано ни одного допуÑтимого IP-адреÑаÐедопуÑÑ‚Ð¸Ð¼Ð°Ñ Ð½Ð°Ñтройка - не указано ни одного каталога Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡ÐедейÑтвительные реквизиты доÑтупа, пожалуйÑта, проверьте Ñертификат доверенноÑти и/или реквизиты органа ÑертификацииÐеверный URL цели %sУказан неверный путь к каталогу загрузки (%s)ÐедопуÑтимое опиÑание задачиÐедопуÑÑ‚Ð¸Ð¼Ð°Ñ Ð±Ð»Ð¾ÐºÐ¸Ñ€Ð¾Ð²ÐºÐ° файла %sÐеверный уровень отладки. ИÑпользуетÑÑ ÑƒÑ€Ð¾Ð²ÐµÐ½ÑŒ по умолчанию %s.ÐедопуÑтимое значение nodeaccess: %sÐеверный Ñтарый уровень отладки. ИÑпользуетÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ по умолчанию %s.ÐедопуÑтимый интервал времени: %sÐедопуÑтимый номер порта в %sУказан неверный путь Ð´Ð»Ñ Ð¾Ñ‚Ð³Ñ€ÑƒÐ·ÐºÐ¸(%s)Ðеверный URL: %sСертификат выдан CA: %sКем выдана: %sJWSE::ExtractPublicKey: внешний ключ jwkJWSE::ExtractPublicKey: ключ jwkJWSE::ExtractPublicKey: ошибка разбора ключаJWSE::ExtractPublicKey: нет поддерживаемого ключаJWSE::ExtractPublicKey: ключ x5cJWSE::Input: JWE: пока не поддерживаетÑÑJWSE::Input: Ñодержимое JWS: %sJWSE::Input: JWS: алгоритм подпиÑи: %sJWSE::Input: JWS: Ñбой Ð¿Ð¾Ð´Ñ‚Ð²ÐµÑ€Ð¶Ð´ÐµÐ½Ð¸Ñ Ð¿Ð¾Ð´Ð¿Ð¸ÑиJWSE::Input: JWS: токен Ñлишком ÑтарыйJWSE::Input: JWS: токен Ñлишком ÑвежийJWSE::Input: заголовок: %sJWSE::Input: токен: %sЗадача %s не находитÑÑ Ð² возобновлÑемом ÑоÑтоÑнииЗадача %s не Ñмогла обновить делегирование %s.С задачей %s не аÑÑоциировано никакого делегированиÑ. Задача не может быть обновлена.Задача %s не обнаруженаЗадача %s: Сбой некоторых загрузокЗадача %s: вÑе файлы уÑпешно загруженыЗадача %s: файлы вÑÑ‘ ещё загружаютÑÑТребуетÑÑ Ð°Ñ€Ð³ÑƒÐ¼ÐµÐ½Ñ‚ - идентификатор задачи.СвÑзь Ñ Ð±Ð°Ð·Ð¾Ð¹ данных задач уÑпешно уÑтановлена (%s)Задача удалена: %sÐевозможно прочеÑть файл Ñ Ð¾Ð¿Ð¸Ñанием задачи.Язык опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ не указан, невозможно вывеÑти опиÑание.Следующие Ñзыки опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡ поддерживаютÑÑ %s:ОпиÑание задачи Ð´Ð»Ñ Ð¾Ñ‚Ð¿Ñ€Ð°Ð²ÐºÐ¸ на %s:ОпиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡:Задача не завершилаÑÑŒ уÑпехом. Сообщение не будет запиÑано в журнал BLAH.Каталог Ð´Ð»Ñ Ð·Ð°Ð³Ñ€ÑƒÐ·ÐºÐ¸ задач из пользовательÑких наÑтроек: %sКаталог Ð´Ð»Ñ Ð·Ð°Ð³Ñ€ÑƒÐ·ÐºÐ¸ задачи будет Ñоздан в текущей рабочей директории.Каталог Ð´Ð»Ñ Ð·Ð°Ð³Ñ€ÑƒÐ·ÐºÐ¸ задач: %sИÑполнение задачи ещё не началоÑÑŒ: %sФайл ÑпиÑка задач (%s) не ÑущеÑтвуетСпиÑок задач (%s) не ÑвлÑетÑÑ Ñтандартным файломФайл ÑпиÑка задач не может быть Ñоздан: %s не ÑвлÑетÑÑ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð¾Ð¼Ð¤Ð°Ð¹Ð» ÑпиÑка задач не может быть Ñоздан: родительÑкий каталог (%s) не ÑущеÑтвует.Задача номерЗадача уÑпешно возобновленаСводка заÑылки задач:Задача запущена Ñ Ñрлыком: %sÐ’Ñ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ Ð¼ÐµÑ‚ÐºÐ° задачи уÑпешно разобрана как %sЗадача: %sЗадача: %s : Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° отмену отправлен и Ñообщён ÑлужбеЗадача: %s : Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° отмену отправлен, но ÑвÑзь Ñо Ñлужбой отÑутÑтвуетЗадача: %s : Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° очиÑтку отправлен и Ñообщён ÑлужбеJob: %s : Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° очиÑтку отправлен, но ÑвÑзь Ñо Ñлужбой отÑутÑтвуетЗадача: %s : ERROR : Сбой запиÑи метки прерываниÑЗадача: %s : ERROR : Сбой запиÑи отметки об очиÑткеЗадача: %s : ERROR : ОтÑутÑтвует Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ.Задача: %s : ERROR : Ðеопознанное ÑоÑтоÑниеПодключаемый модуль JobControllerPlugin %s не может быть ÑозданПодключаемый модуль JobControllerPlugin "%s" не обнаружен.КлаÑÑ JobDescription не ÑвлÑетÑÑ Ð¾Ð±ÑŠÐµÐºÑ‚Ð¾Ð¼ÐŸÐ¾Ð´ÐºÐ»ÑŽÑ‡Ð°ÐµÐ¼Ñ‹Ð¹ модуль JobDescriptionParserPlugin %s не может быть ÑозданПодключаемый модуль JobDescriptionParserPlugin "%s" не обнаружен.Задачи Ñ Ð¾Ñ‚ÑутÑтвующей информацией не будут вычищены!Обработано задач: %d, уничтожено: %dОбработано задач: %d, обновлено: %dОбработано задач: %d, возобновлено: %dОбработано задач: %d, уÑпешно оборвано: %dОбработано задач: %d, уÑпешно оборвано: %d, уÑпешно очищено: %dОбработано задач: %d, уÑпешно получено: %dОбработано задач: %d, уÑпешно получено: %d, уÑпешно очищено: %dÐеразборчивые фрагменты в конце RSLБеÑÑмыÑлица в команде sessiondirLCMAPS не возвратил никакого GIDLCMAPS не возвратил никакого UIDLCMAPS Ñодержит getCredentialDataLCMAPS Ñодержит lcmaps_runLCMAPS возвратил UID не ÑоответÑтвующий учётной запиÑи: %uLCMAPS возвратил недопуÑтимый GID: %uLCMAPS возвратил недопуÑтимый UID: %uСбой LIST/MLSTСбой LIST/MLST: %sЯзык (%s) не опознан ни одним из модулей разборки опиÑаний задач.Сбой поÑледнего шага региÑтрации в каталогеШирота: %fЛевый операнд Ð´Ð»Ñ ÑÑ†ÐµÐ¿Ð»ÐµÐ½Ð¸Ñ RSL не приводитÑÑ Ðº буквенной конÑтантеLegacyMap: не заданы группы наÑтроекLegacyPDP: атрибут безопаÑноÑти ARC Legacy не опознан.LegacyPDP: атрибут безопаÑноÑти %s не задан. Возможно, обработчик безопаÑноÑти ARC Legacy не наÑтроен, или претерпел Ñбой.LegacySecHandler: не указан файл наÑтроекСтрока %d.%d атрибутов выдала: %sПодцепление MCC %s(%s) к MCC (%s) в %sПодцепление MCC %s(%s) к коммутатору (%s) в %sПодцепление MCC %s(%s) к Ñлужбе (%s) в %sПодцепление коммутатора %s к MCC (%s) в %sПодцепление коммутатора %s к коммутатору (%s) в %sПодцепление коммутатора %s к Ñлужбе (%s) в %sПодцеплÑетÑÑ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ñ‹Ð¹ файлСоздаётÑÑ ÑимволичеÑÐºÐ°Ñ ÑÑылка на ÑоответÑтвующий файлПодцепление/копирование файла из кÑшаСоздание ÑÑылки/копирование файла из кÑша в %sПеречиÑление не поддерживаетÑÑ Ð´Ð»Ñ REST-интерфейÑа VOMSПеречиÑление не поддерживаетÑÑ Ð´Ð»Ñ Ñ‚Ñ€Ð°Ð´Ð¸Ñ†Ð¸Ð¾Ð½Ð½Ð¾Ð³Ð¾ интерфейÑа VOMSПеречиÑление запроÑит информацию stat об URL %sListFiles: поиÑк метаданных: %sПроÑлушиваетÑÑ %s:%s(%s)ПроÑлушиваетÑÑ Ð¿Ð¾Ñ€Ñ‚ TCP %s(%s)Локальные задачи уÑпешно перечиÑлены, обнаружено %d задач(и)Подгружаемый модуль %s не Ñодержит запрашиваемого Ð¼Ð¾Ð´ÑƒÐ»Ñ %s типа %sПодгружен модуль %sЗагружен %s %sПодгружен JobControllerPlugin %sПодгружен JobDescriptionParserPlugin %sПодгружен MCC %s(%s)Подгружен Plexer %sПодгружена Ñлужба %s(%s)Подгружен SubmitterPlugin %sЗагружаетÑÑ %u-Ñ Ñлужба PythonСбой загрузки OToken - токен игнорируетÑÑПодгрузка Python broker (%i)Чтение файла наÑтроек (%s)Внутренние задачи в Ñчёте: %iВнутренние приоÑтановленные задачи: %iВнутренние задачи в очереди: %iÐедопуÑтимый URI в Location Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° %sТакой файл уже ÑущеÑтвуетВ URL Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ LFC отÑутÑтвуют меÑтоположениÑБлок %s принадлежит другому процеÑÑу (%s)Файл блокировки %s не ÑущеÑтвуетДолгота: %fПоиÑк текущих задачПоиÑк URL %sПоиÑк копий файла-иÑточникаMCC %s(%s) - Ñледующий %s(%s) не Ñодержит назначениÑMIME не подходит Ð´Ð»Ñ SOAP: %sMLSD не поддерживаетÑÑ - пробуем NLSTMLST не поддерживаетÑÑ - пробуем LISTГоловной процеÑÑ Python не был запущенГоловной поток Python не был запущенОбъём оÑновной памÑти: %iОбнаружена Ð½ÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ ARCHERY (не задан тип конечной точки): %sОбнаружена Ð½ÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ ARCHERY (не задан URL конечной точки): %sÐеверный атрибут VOMS AC %sÐевозможно открыть пул пользователей в %s.%s ÑтавитÑÑ Ð² ÑоответÑтвие %sЗначение параметра политики приÑÐ²Ð¾ÐµÐ½Ð¸Ñ Ð¿ÑƒÑтоПравило приÑвоениÑ:ÐазначаетÑÑ Ð¾Ñ‡ÐµÑ€ÐµÐ´ÑŒ: %sСоответÑтвующий издатель: %sСовпадение ВО: %sСовпадений нетСоответÑтвие: %s %s %sСоответÑтвие: %s %s %s %sСравнение; %s (%d) не ÑоответÑтвует (%s) значению %s (%d), публикуемому назначением Ð´Ð»Ñ Ð¸ÑполнениÑ.Сравнение; значение Ñталонного теÑта %s не публикуетÑÑ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸ÐµÐ¼ Ð´Ð»Ñ Ð¸ÑполнениÑ.Сравнение; неÑовпадение CacheTotal: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %d MB (CacheTotal), в опиÑании задачи: %d MB (CacheDiskSpace)Сравнение; не удовлетворено требование к вычиÑлительному реÑурÑу. Ðазначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %sСравнение; неÑовпадение ConnectivityIn: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %s (ConnectivityIn), в опиÑании задачи: %s (InBound)Сравнение; неÑовпадение ConnectivityOut: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %s (ConnectivityOut), в опиÑании задачи: %s (OutBound)Сравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, значение OperatingSystem не определеноСравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, значение CacheTotal не определеноСравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, ÑоÑтоÑние Ð·Ð´Ð¾Ñ€Ð¾Ð²ÑŒÑ Ð½Ðµ определеноСравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, значение ImplementationName не определеноСравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ MaxDiskSpace и WorkingAreaFree не определеноСравнение; ExecutionTarget: %s, не задано MaxTotalCPUTime или MaxCPUTime, предполагаетÑÑ Ð¾Ñ‚ÑутÑтвие ограничений на процеÑÑорное времÑСравнение; ExecutionTarget: %s, не задано MinCPUTime, предполагаетÑÑ Ð¾Ñ‚ÑутÑтвие ограничений на процеÑÑорное времÑСравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, значение NetworkInfo не определеноСравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, значение Platform не определеноСравнение; не удовлетворены Ñ‚Ñ€ÐµÐ±Ð¾Ð²Ð°Ð½Ð¸Ñ RunTimeEnvironment к ExecutionTarget: %sСравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ TotalSlots и MaxSlotsPerJob не определеныСравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, значение WorkingAreaLifeTime не определеноСравнение; ExecutionTarget: %s ÑоответÑтвует опиÑанию задачиСравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, значение ApplicationEnvironments не определеноСравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ MaxMainMemory и MainMemorySize не определеныСравнение; назначение Ð´Ð»Ñ Ð¸ÑполнениÑ: %s, значение MaxVirtualMemory не определеноСравнение; не удовлетворены Ñ‚Ñ€ÐµÐ±Ð¾Ð²Ð°Ð½Ð¸Ñ OperatingSystem к ExecutionTarget: %sСравнение; неÑовпадение MainMemorySize: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %d (MainMemorySize), в опиÑании задачи: %d (IndividualPhysicalMemory)Сравнение; проблема Ñ MaxCPUTime, ExecutionTarget: %d (MaxCPUTime), JobDescription: %d (TotalCPUTime/NumberOfSlots)Сравнение; неÑовпадение MaxDiskSpace: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %d MB (MaxDiskSpace), в опиÑании задачи: %d MB (DiskSpace)Сравнение; неÑовпадение MaxDiskSpace: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %d MB (MaxDiskSpace), в опиÑании задачи: %d MB (SessionDiskSpace)Сравнение; неÑовпадение MaxMainMemory: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %d (MaxMainMemory), в опиÑании задачи: %d (IndividualPhysicalMemory)Сравнение; неÑовпадение MaxSlotsPerJob: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %d (MaxSlotsPerJob), в опиÑании задачи: %d (NumberOfProcesses)Сравнение; проблема Ñ MaxTotalCPUTime, ExecutionTarget: %d (MaxTotalCPUTime), JobDescription: %d (TotalCPUTime)Сравнение; неÑовпадение MaxVirtualMemory: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %d (MaxVirtualMemory), в опиÑании задачи: %d (IndividualVirtualMemory)Сравнение; проблема Ñ MinCPUTime, ExecutionTarget: %d (MinCPUTime), JobDescription: %d (TotalCPUTime/NumberOfSlots)Сравнение; не удовлетворено требование NetworkInfo, назначение Ð´Ð»Ñ Ð¸ÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð½Ðµ поддерживает %s, указанное в опиÑании задачи.Сравнение; неÑовпадение платформ: ExecutionTarget: %s (Platform) JobDescription: %s (Platform)Сравнение; приведённое к значению %s значение %s (%d) не ÑоответÑтвует (%s) значению %s (%d) публикуемому назначением Ð´Ð»Ñ Ð¸ÑполнениÑ.Сравнение; неÑовпадение TotalSlots: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %d (TotalSlots), в опиÑании задачи: %d (NumberOfProcesses)Сравнение; неÑовпадение WorkingAreaFree: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %d MB (WorkingAreaFree), в опиÑании задачи: %d MB (DiskSpace)Сравнение; неÑовпадение WorkingAreaFree: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %d MB (WorkingAreaFree), в опиÑании задачи: %d MB (SessionDiskSpace)Сравнение; неÑовпадение WorkingAreaLifeTime: у Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð»Ñ Ð¸ÑполнениÑ: %s (WorkingAreaLifeTime), в опиÑании задачи: %s (SessionLifeTime)ДлительноÑть, Ð½Ð°Ð¸Ð±Ð¾Ð»ÑŒÑˆÐ°Ñ (процеÑÑорнаÑ): %sПредел диÑкового проÑтранÑтва: %iМакÑ. памÑть: %iПредел задач в очереди до СУПО: %iЗадачи в Ñчёте (предел): %iПредел Ñегментов на задачу: %iПредел потоков размещениÑ: %iПотоки отгрузки (верхний предел): %iÐ’Ñего заданий (предел): %iПредел общего времени (по чаÑам): %sЗадачи Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð² Ñчёте (предел): %iПредел виртуальной памÑти: %iПредел задач в очереди: %iДлительноÑть, Ð½Ð°Ð¸Ð±Ð¾Ð»ÑŒÑˆÐ°Ñ (по чаÑам): %sЗапущено макÑимальное количеÑтво потоков - новый Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¿Ð¾Ð¼ÐµÑ‰Ñ‘Ð½ в очередьОшибка Ð²Ñ‹Ð´ÐµÐ»ÐµÐ½Ð¸Ñ Ð¿Ð°Ð¼ÑтиКлаÑÑ Message не ÑвлÑетÑÑ Ð¾Ð±ÑŠÐµÐºÑ‚Ð¾Ð¼ÐœÐµÑ‚Ð°-Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¸Ñточника и Ð°Ð´Ñ€ÐµÑ Ð½Ðµ ÑоответÑтвуют друг другу Ð´Ð»Ñ %sМетаданные копии отличаютÑÑ Ð¾Ñ‚ тех, что в каталогеМетаданные иÑточника и Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð½Ðµ ÑовпадаютМетаданные иÑточника и цели не Ñовпадают. ИÑпользуйте опцию --force Ð´Ð»Ñ Ð¿Ñ€Ð¸Ð½ÑƒÐ´Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð¾Ð³Ð¾ копированиÑ.ДлительноÑть, Ð½Ð°Ð¸Ð¼ÐµÐ½ÑŒÑˆÐ°Ñ (процеÑÑорнаÑ): %sДлительноÑть, Ð½Ð°Ð¸Ð¼ÐµÐ½ÑŒÑˆÐ°Ñ (по чаÑам): %sСубъект центра Ñертификации отÑутÑтвует в политике подпиÑи GlobusОтÑутÑтвует Ñлемент CertificatePath или ProxyPath element, или Ð’ Ñлементе Connect отÑутÑтвует название узла (Host)Ð’ Ñлементе Connect отÑутÑтвует номер порта (Port)Ð’ Ñлементе Listen отÑутÑтвует номер порта (Port)Ð’ наÑтройках отÑутÑтвует ВОВ наÑтройках отÑутÑтвуют получателиОтÑутÑтвует Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð´Ð»Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ¸ подлинноÑтиÐе найден Ñкрипт cancel-%s-job - прерывание задачи может не работатьВ наÑтройках отÑутÑтвуют возможноÑтиУÑÐ»Ð¾Ð²Ð¸Ñ Ñубъекта отÑутÑтвуют в политике подпиÑи GlobusÐедоÑтаточно данных в раÑширении PROXY_CERT_INFO_EXTENSION в кодировке DERÐ’ команде controldir пропущен каталогОтÑутÑтвует Ð¸Ð¼Ñ Ñ„Ð°Ð¹Ð»Ð° в журнальном файле [arex/jura]ОтÑутÑтвует заключительный отклик: %sÐ’ наÑтройках отÑутÑтвует группаÐÐµÐ¿Ð¾Ð»Ð½Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð² отклике: %sÐ’ наÑтройках отÑутÑтвует издательОтÑутÑтвует Ð¸Ð¼Ñ Ð±Ð¸Ð±Ð»Ð¸Ð¾Ñ‚ÐµÐºÐ¸ LCASОтÑутÑтвует Ð¸Ð¼Ñ Ð±Ð¸Ð±Ð»Ð¸Ð¾Ñ‚ÐµÐºÐ¸ LCMAPSÐедоÑтающее чиÑло в maxjobsЭлемент CertificatePath отÑутÑтвует или пуÑтЭлемент CertificatePath или CACertificatesDir отÑутÑтвует или пуÑтЭлемент CertificatePath или CACertificatesDir отÑутÑтвует или пуÑÑ‚; будет выполнена лишь проверка подпиÑи, а не удоÑтоверение подлинноÑти ÑообщениÑЭлемент KeyPath отÑутÑтвует или пуÑтЭлемент KeyPath отÑутÑтвует или пуÑÑ‚, либо отÑутÑтвует Элемент PasswordSource отÑутÑтвует или пуÑтЭлемент Username отÑутÑтвует или пуÑтОтÑутÑтвует путь к файлу параметров доÑтупаОтÑутÑтвует указание на фабрику и/или модуль. ИÑпользование Globus в неопределённом режиме небезопаÑно - вызов (Grid)FTP заблокирован. СвÑжитеÑÑŒ Ñ Ñ€Ð°Ð·Ñ€Ð°Ð±Ð¾Ñ‚Ñ‡Ð¸ÐºÐ°Ð¼Ð¸.ОтÑутÑтвует указание на фабрику и/или модуль. ИÑпользование Xrootd в неопределённом режиме небезопаÑно - Xrootd заблокирован. СвÑжитеÑÑŒ Ñ Ñ€Ð°Ð·Ñ€Ð°Ð±Ð¾Ñ‚Ñ‡Ð¸ÐºÐ°Ð¼Ð¸.ОтÑутÑтвует отклик Ñлужбы делегированиÑ.Ð’ наÑтройках отÑутÑтвует рольÐе найден Ñкрипт scan-%s-job - окончание задачи может быть незамеченымСхема отÑутÑтвует! Сверка пропуÑкаетÑÑ...Ð’ наÑтройках отÑутÑтвует контекÑтВ Ñообщении отÑутÑтвует объект авторизацииВ наÑтройках отÑутÑтвует ÑубъектОтÑутÑтвует Ð¸Ð¼Ñ ÑубъектаÐе найден Ñкрипт submit-%s-job - заÑылка задачи в СУПО может не работатьМодуль %s не Ñодержит подключаемый модуль %sМодуль %s не Ñодержит запрашиваемого подключаемого Ð¼Ð¾Ð´ÑƒÐ»Ñ %s типа %sМодуль %s не Ñодержит подключаемых модулей указанных типовÐе удалоÑÑŒ перезагрузить модуль %s (%s)Модуль %s не ÑвлÑетÑÑ Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡Ð°ÐµÐ¼Ñ‹Ð¼ модулем ARC (%s)ЗапуÑк ÑƒÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð¼Ð¾Ð´ÑƒÐ»ÑÐ¼Ð¸Ð˜Ð½Ð¸Ñ†Ð¸Ð°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð¼ÐµÐ½ÐµÐ´Ð¶ÐµÑ€Ð° модулей в ModuleManager::setCfgÐазвание модулÑ: %sЗаканчиваетÑÑ Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ðµ данныхОператор множеÑтвенноÑти RSL допуÑкаетÑÑ Ð»Ð¸ÑˆÑŒ в начале документаМножеÑтвенные атрибуты %s в файле наÑтроек (%s)Сбой MyProxy: %sСервер Myproxy не приÑлал Ñертификат Ñ Ñ€Ð°Ñширением VOMS ACNEW: запиÑÑŒ новой задачи: доÑтигнут макÑимальный предел общего количеÑтва задачNEW: запиÑÑŒ новой задачи: отÑутÑтвуют полезные файлыИнÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ NLST/MLSD не прошлаИнÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ NLST/MLSD не прошла: %sБудет иÑпользоватьÑÑ Ð±Ð°Ð·Ð° данных NSS %s Ð˜Ð½Ð¸Ñ†Ð¸Ð°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ NSS оборвалаÑÑŒ на базе данных Ñертификатов: %sNULL BIO передан в InquireRequestÐулевой обратный вызов Ð´Ð»Ñ %sÐ˜Ð¼Ñ Ñ„Ð°Ð¹Ð»Ð° gramiИмÑ: %sОтрицательные права не поддерживаютÑÑ Ð¿Ð¾Ð»Ð¸Ñ‚Ð¸ÐºÐ¾Ð¹ подпиÑи GlobusÐи иÑточник, ни назначение не ÑвлÑÑŽÑ‚ÑÑ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð¾Ð¼, поиÑк копий не будет произведёнÐи иÑточник, ни назначение не были размещены Ñ Ð»ÐµÐ½Ñ‚Ð¾Ñ‡Ð½Ð¾Ð³Ð¾ накопителÑ, пропуÑкаетÑÑ Ð¾Ñ‚Ð¼ÐµÐ½Ð° запроÑÐ¾Ð²Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ Ñети:Создана Ð½Ð¾Ð²Ð°Ñ Ñ‚Ð¾Ñ‡ÐºÐ° доÑтупа (%s) из точки Ñ Ð½ÐµÐ¸Ð·Ð²ÐµÑтным интерфейÑом (%s)Файл наÑтроек A-REX в наÑтройках candypond не обнаруженÐе ÑущеÑтвует атрибутов, ÑпоÑобных трактовать Ñтот тип: %sÐе задан Ñлемент ConnectFQAN не обнаружен. Ð’ качеÑтве Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ userFQAN будет иÑпользоватьÑÑ NoneÐе найдена СУПО в файле наÑтроекВ опиÑании задачи не найдено Ñтруктуры RSLÐет ответа SOAPÐет ответа SOAP от Ñлужбы доÑтавки %sÐет ответа SOAP от Ñлужбы доÑтавкиÐет активных запроÑов DTR %sÐет активной задачи Ñ Ñрлыком %sВнешнему процеÑÑу не приÑвоены аргументыÐе получен ответ о допуÑкеÐе указан каталог кÑшаКаталоги кÑша не найдены или не наÑтроены при вычиÑлении Ñвободного проÑтранÑтва.КÑш не опиÑан в файле наÑтроекÐе определена Ñ„ÑƒÐ½ÐºÑ†Ð¸Ñ Ð¾Ð±Ñ€Ð°Ñ‚Ð½Ð¾Ð³Ð¾ вызова Ð´Ð»Ñ %sСервер не выдал информацию о контрольной ÑÑƒÐ¼Ð¼ÐµÐ˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ контрольных Ñуммах недоÑтупнаВ отзыве Rucio Ð´Ð»Ñ %s отÑутÑтвует Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ контрольной ÑуммеÐевозможно подтвердить контрольную ÑуммуФайл наÑтроек не может быть подгружен.Ðе найден контрольный каталог в файле наÑтроекÐе указаны параметры доÑтупаВ данном контекÑте и Ñообщении отÑутÑтвуют политики Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ - пропуÑкаетÑÑÐ’ запроÑе отÑутÑтвует токен делегированиÑÐет доÑтупных назначений Ð´Ð»Ñ Ð¾Ñ‚Ð³Ñ€ÑƒÐ·ÐºÐ¸, попытаемÑÑ Ð¿Ð¾Ð·Ð¶ÐµÐазначение не заданоÐе указан каталог кÑша Ð´Ð»Ñ Ð¾Ð¿Ð¾Ñ€Ð¾Ð¶Ð½ÐµÐ½Ð¸ÑÐет ошибокОтÑутÑтвуют загружаемые файлы Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %sÐ’ отзыве Rucio Ð´Ð»Ñ %s отÑутÑтвует Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ размере файлаÐе указан Ñрлык задачиÐе указан файл Ñ Ð¾Ð¿Ð¸Ñанием задачи.Ðе задано опиÑание задачиÐи один разборщик не Ñмог обработать опиÑание задачиОтÑутÑтвуют разборщики опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ð½Ð¸ÑÐет разборщиков опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸, подходÑщих Ð´Ð»Ñ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚ÐºÐ¸ Ñзыка '%s'Ð”Ð»Ñ Ñ‚ÐµÑта %d отÑутÑтвует опиÑание задачиЗадач нетÐе найдено ни одной задачи, попробуйте позжеЗадачи не указаныОтÑутÑтвует левый операнд оператора подцеплениÑÐе инициализированы проÑлушивающие портыÐе указано Ð¸Ð¼Ñ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð¾Ð¹ учётной запиÑиПользователь не припиÑан ни к одному локальному имениÐе найдено ни одного меÑÑ‚Ð¾Ð½Ð°Ñ…Ð¾Ð¶Ð´ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° %sÐе найдено раÑположений Ð´Ð»Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ, отличающихÑÑ Ð¾Ñ‚ иÑточникаÐе найдено раÑположений Ð´Ð»Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ, отличающихÑÑ Ð¾Ñ‚ иÑточника: %sÐе найдено физичеÑких адреÑов Ð´Ð»Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ: %sÐе найдено раÑположений Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° иÑточника: %sÐе найдено раÑположений - возможно, копий больше нетÐе найдено раÑположений Ð´Ð»Ñ %sÐе найдено ÑоответÑÑ‚Ð²Ð¸Ñ Ð´Ð»Ñ %s в правилах доÑтупа к кÑшуБольше копий нет (%s)Опробованы вÑе интерфейÑÑ‹ Ð´Ð»Ñ Ñ‚Ð¾Ñ‡ÐºÐ¸ входа %s.Больше копий нет, будет иÑпользован файл %sÐе требуетÑÑ Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ðµ Ñ Ð»ÐµÐ½Ñ‚Ð¾Ñ‡Ð½Ð¾Ð³Ð¾ Ð½Ð°ÐºÐ¾Ð¿Ð¸Ñ‚ÐµÐ»Ñ Ð½Ð¸ иÑточника, ни назначениÑ; размещение пропуÑкаетÑÑÐе приÑвоено новых информационных документовÐе найдено больше MCC или Ñлужб в пути "%s"ОтÑутÑтвует Ñледующий Ñлемент цепиÐет каталогов ÑеÑÑий не в ÑоÑтоÑнии разгрузкиВ отзыве Rucio отÑутÑтвуeÑ‚ pnfs: %sÐе найдено реальных файлов Ð´Ð»Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸ÑÐе найдено реальных файлов иÑточникаÐе обнаружен файл pid в '%s'. Возможно, A-REX не запущен.Ð”Ð»Ñ simplelist.pdp не задан файл политик или DN; пожалуйÑта, задайте в наÑтройках атрибут location или Ñ…Ð¾Ñ‚Ñ Ð±Ñ‹ один Ñлемент DN Ð´Ð»Ñ ÑƒÐ·Ð»Ð° PDP simplelist.Ðе найдено подходÑщего порта Ð´Ð»Ñ %sЗакрытый ключ Ñ Ð¸Ð¼ÐµÐ½ÐµÐ¼ %s отÑутÑтвует в базе данных NSSÐе удалоÑÑŒ обнаружить доверенноÑтьÐе указано название очереди в названии блока queueÐе указан доÑтупный по чтению каталог кÑшаÐи одна из удалённых Ñлужб доÑтавки не подходит, вынужденно иÑпользуетÑÑ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð°Ñ Ð´Ð¾ÑтавкаÐе найдено копий Ð´Ð»Ñ %sÐе указан маркёр запроÑа!Ðе найдены маркёры запроÑаÐе удалоÑÑŒ Ñобрать запрошенную информацию о безопаÑноÑтиÐет ответа от Ñервера AA %sÐе получен отклик: %sВызов stat не возвратил никаких результатовОтÑутÑтвует правый операнд оператора подцеплениÑОбработка/проверка параметров доÑтупа не запрошена Ð´Ð»Ñ '%s'Ð’ файле наÑтроек отÑутÑтвуют наÑтройки ÑервераÐе найдены каталоги ÑеÑÑий в файле наÑтроек.Ðе найден каталог ÑеÑÑииÐе найден каталог ÑеÑÑии в файле наÑтроекИÑточник не заданÐе указан маркёр проÑтранÑтва памÑтиÐе найдены маркёры проÑтранÑтва памÑти, ÑоответÑтвующие опиÑанию %sÐе указан URL Ð´Ð»Ñ Ð·Ð°Ð³Ñ€ÑƒÐ·ÐºÐ¸Ðет такого запроÑа DTR %sÐет такого файла или каталогаПолитика не Ñодержит назначенийПравило не Ñодержит назначенийТеÑÑ‚Ð¾Ð²Ð°Ñ Ð·Ð°Ð´Ð°Ñ‡Ð° под номером %d не найдена.ТеÑÑ‚Ð¾Ð²Ð°Ñ Ð·Ð°Ð´Ð°Ñ‡Ð° под номером "%d" не ÑущеÑтвуетÐет подходÑщих кÑшейÐе удалоÑÑŒ обнаружить Ñертификат Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ñ ÐºÑ€Ð°Ñ‚ÐºÐ¸Ð¼ именем %sСертификат Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð½Ðµ обнаруженÐе указано Ð¸Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»ÑÐ’ наÑтройках не обнаружено ни одного приемлемого кÑша, кÑширование отключеноÐе найдены дейÑтвительные параметры доÑтупа, выходÐет допуÑтимых адреÑовÐе получено приемлемого отзыва от Ñервера VOMS: %sÐе задана значение атрибута Ñубъекта %s, пропуÑкаетÑÑÐеоднородный реÑурÑÐе поддерживаетÑÑ Ð½Ð¸ один из запрошенных протоколов транÑпортного уровнÑarc.pdp запретил доÑтуп - не удалоÑÑŒ получить отклик обработчикаÐет допуÑка от arc.pdp - некоторые Ñлементы RequestItem не удовлетворÑÑŽÑ‚ политикеÐе допущен через simplelist.pdp: %sÐедоÑтаточное количеÑтво параметров в copyurlÐедоÑтаточное количеÑтво параметров в linkurlМодуль %s не найден в кÑшеÐе получена ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма Ð´Ð»Ñ zip-ÑоÑтавлÑющейСлужба доÑтавки %s не иÑпользуетÑÑ Ð² ÑвÑзи Ñ Ð¿Ñ€ÐµÐ´Ñ‹Ð´ÑƒÑ‰Ð¸Ð¼ ÑбоемСлужба доÑтавки на %s не иÑпользуетÑÑ Ð² ÑвÑзи Ñ Ð¿ÐµÑ€ÐµÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸ÐµÐ¼Ð¦ÐµÐ»ÑŒ недейÑтвительнаÐедейÑтвительный иÑточникЗадание не указано: Ð’Ñ‹ должны либо указать номер теÑтового заданиÑ, иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ Ð¾Ð¿Ñ†Ð¸ÑŽ -J (--job), либо запроÑить информацию о Ñертификатах, иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ Ð¾Ð¿Ñ†Ð¸ÑŽ -E (--certificate) ПроизводитÑÑ ÐºÐ¾Ð¿Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ðµ (из -> в)Ðомер %d Ñ ÐºÑ€Ð°Ñ‚ÐºÐ¸Ð¼ именем: %s%sÐомер %d: %sЧиÑло иÑточников и чиÑло назначений не ÑоответÑтвуют друг другуПÐРÐМЕТР...СемейÑтво ОС: %sÐазвание ОС: %sВерÑÐ¸Ñ ÐžÐ¡: %sOTokens: Attr: %s = %sOTokens: Attr: ÑообщениеOTokens: Attr: токен: %sOTokens: Attr: токен: ноÑитель: %sOTokens: HandleOTokens: Handle: Ñозданы атрибуты: Ñубъект = %sOTokens: Handle: ÑообщениеОбъект не подходит Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÑ‡Ð¸ÑлениÑОбъект не инициализирован (внутреннÑÑ Ð¾ÑˆÐ¸Ð±ÐºÐ°)Полученный XML: %sПолученные Ð°Ð´Ñ€ÐµÑ Ð¸ номер порта неприемлемыCandyPond поддерживает только POSTDataDeliveryService поддерживает только POSTÐ”Ð»Ñ Ð²Ñ‹Ð²Ð¾Ð´Ð° поддерживаетÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ неформатированный буферМы поддерживаем только права globus в политике подпиÑи Globus - %s не поддерживаетÑÑМы поддерживаем только права подпиÑи в политике подпиÑи Globus - %s не поддерживаетÑÑÐа наÑтоÑщий момент единÑтвенным поддерживаемым иÑточником Ð¿Ð°Ñ€Ð¾Ð»Ñ ÑвлÑетÑÑ Ñтандартный вход.Ð”Ð»Ñ Ð²Ñпомогательной программы поддерживаетÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ пользователь '.'Ошибка OpenSSL: %sОшибка операционной ÑиÑÑ‚ÐµÐ¼Ñ‹ÐžÐ¿ÐµÑ€Ð°Ñ†Ð¸Ñ ÑƒÑпешно Ð¿Ñ€ÐµÑ€Ð²Ð°Ð½Ð°ÐžÐ¿ÐµÑ€Ð°Ñ†Ð¸Ñ Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð° уÑпешноЭта Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ñ Ð½Ðµ поддерживаетÑÑ Ð´Ð»Ñ Ð´Ð°Ð½Ð½Ð¾Ð³Ð¾ типа URLДейÑтвие над путём "%s"OptimizedInformationContainer Ñоздал временный файл: %sOptimizedInformationContainer не Ñмог Ñоздать временный файлOptimizedInformationContainer не Ñмог разобрать XMLOptimizedInformationContainer не Ñмог переименовать временный файлOptimizedInformationContainer не Ñмог запиÑать документ XML во временный файлОпции 'p' и 'n' не могут быть иÑпользованы одновременноГруппа опций %s:Этот модуль не имеет наÑтраиваемых параметровИзначальное опиÑание задачи приведено ниже:Обнаружен неиÑпользуемый блок Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ (%s) - очиÑткаДругие дейÑтвиÑÐедоÑтаточно памÑти Ð´Ð»Ñ ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñлучайного Ñерийного номераДоÑтигнут предел количеÑтва попытокЗакончилиÑÑŒ попытки приÑÐ²Ð¾ÐµÐ½Ð¸Ñ Ð½Ð¾Ð²Ð¾Ð³Ð¾ Ñрлыка задачи в %sИÑходÑщее Ñообщение не ÑвлÑетÑÑ Ñообщением SOAPВывод Ñертификата EECВарианты Ñ„Ð¾Ñ€Ð¼Ð°Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð²Ñ‹Ð´Ð°Ñ‡Ð¸Ð’Ñ‹Ð²Ð¾Ð´ доверенноÑтиЗапрошена перезапиÑÑŒ - назначение будет предварительно очищеноВладелец: %sИнÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ PASV не прошлаИнÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ PASV не прошла: %sPDP: %s (%s)PDP: %s (%s) не может быть подгруженPDP: %s не может быть подгруженPDP: отÑутÑтвует атрибут имениСбой PEM_read_bio_X509_REQСбой PEM_write_bio_X509_REQÐе удалоÑÑŒ задать ÑпоÑоб проверки целоÑтноÑти PKCS12 и паролÑÐе задан пароль Ð´Ð»Ñ Ð½Ð¾Ð²Ð¾Ð³Ð¾ Ñертификата PKCS12Ð—Ð°Ð¿Ñ€Ð¾Ñ POST на ÑпецифичеÑкий путь не поддерживаетÑÑРазобрано доменов: %uÐе удалоÑÑŒ Ñоздать контекÑÑ‚ анализатора!Сбой разборщика Ñ ÐºÐ¾Ð´Ð¾Ð¼ ошибки %i.РазбираетÑÑ Ñ„Ð°Ð¹Ð» .local Ñ Ñ†ÐµÐ»ÑŒÑŽ Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ ÑпецифичеÑких Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ идентификаторов и информацииРазборка VOMS AC Ñ Ñ†ÐµÐ»ÑŒÑŽ Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸ о FQANТип ÑˆÐ¸Ñ„Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¿Ð°Ñ€Ð¾Ð»Ñ Ð½Ðµ поддерживаетÑÑ: %sПуть %s недейÑтвителен, ÑоздаютÑÑ Ð½ÐµÐ´Ð¾Ñтающие директорииТребуетÑÑ Ð¿ÑƒÑ‚ÑŒ к файлу ÑоÑтоÑÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡ .local.Должен быть указан путь к Ñертификату доверенноÑти пользователÑ.Ð˜Ð¼Ñ ÐºÐ¾Ð½Ñ‚Ð°ÐºÑ‚Ð°: %sЗапроÑÑ‹ POST/SOAP предварÑющие задачу не поддерживаютÑÑПроизводитÑÑ Ñравнение Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸ÐµÐ¼ (%s).Ðе производитÑÑ Ð½Ð¸ Ñортировки, ни поиÑка ÑоответÑтвиÑУÑтойчивый ÑбойХроничеÑÐºÐ°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° ÑлужбыСбой проверки прав доÑтупа, попытка загрузки без иÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ ÐºÑшаПроверка прав доÑтупа не удалаÑÑŒ: %sСбой проверки прав доÑтупа к иÑходному URL: %sПроверка допуÑка пройденаПроверка прав доÑтупа пройдена Ð´Ð»Ñ URL %sОбработка оÑтавшихÑÑ Ð·Ð°Ð´Ð°Ñ‡ÐœÐµÑто: %sПлатформа: %sПожалуйÑта, выберите базу данных NSS Ð´Ð»Ñ Ð¸ÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ (1-%d): ПожалуйÑта, выберите то, что будет иÑпользоватьÑÑ (1-%d): Коммутатор (%s) - Ñледующий %s(%s) не Ñодержит назначениÑÐ”Ð»Ñ Ñледующего поÑле %s компонента Plexer не задан атрибут IDОшибка подключаемого Ð¼Ð¾Ð´ÑƒÐ»Ñ %s: %sПодключаемый модуль %s не Ñмог запуÑтитьÑÑПодключаемый модуль %s вывел на печать: %sПодключаемый модуль %s не выдал привÑзкиПодключаемый модуль %s не выдал имени пользователÑПодключаемый модуль %s ответил Ñлишком длинно: %sПодключаемый модуль %s ответил: %uÐ’Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡Ð°ÐµÐ¼Ð¾Ð³Ð¾ Ð¼Ð¾Ð´ÑƒÐ»Ñ %s иÑтекло поÑле %u ÑекундПуÑÑ‚Ð°Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ð° в подключаемом модуле (приÑвоение имени пользователÑ)Ðецифровое значение времени Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð² подключаемом модуле (приÑвоение имени пользователÑ): %sÐеприемлемое значение времени Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð² подключаемом модуле (приÑвоение имени пользователÑ): %sОтвет подключаемого модулÑ: %sÐе удалоÑÑŒ запуÑтить Ñлужбу принÑÑ‚Ð¸Ñ Ñ€ÐµÑˆÐµÐ½Ð¸Ð¹ по политикамПуÑтые правилаПолитика не в формате GACLСтрока политики: %sСубъект политики: %sPolicyId: %s Внутренний алгоритм политики:-- %sПочтовый индекÑ: %sЗадачи в очереди до СУПО: %iСбой предварительной очиÑтки, вÑÑ‘ же попытаемÑÑ ÑÐºÐ¾Ð¿Ð¸Ñ€Ð¾Ð²Ð°Ñ‚ÑŒÐŸÑ€ÐµÐ´Ð²Ð°Ñ€Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ñ€ÐµÐ³Ð¸ÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸ÑÐŸÑ€ÐµÐ´Ð²Ð°Ñ€Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ñ€ÐµÐ³Ð¸ÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð² каталогеПодготовка к размещению назначениÑПодготовка к размещению файла-иÑточникаПроблема при доÑтупе к кÑшированному файлу %s: %sПроблема при Ñоздании DTR (иÑточник %s, назначение %s)Проблемы при подключении Ð¼Ð¾Ð´ÑƒÐ»Ñ %s, модуль пропуÑкаетÑÑ.Проблема Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð¾Ð¼, переходим к завершению Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ…ÐŸÑ€Ð¾Ð±Ð»ÐµÐ¼Ð° Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð¾Ð¼, кÑш будет разблокированВышло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¿Ð¾Ñ‚Ð¾ÐºÐ° обработки. DTR перезапуÑкаетÑÑÐеподдерживаемый тип обработки: %sÐ’Ñ€ÐµÐ¼Ñ Ð½Ð°Ñ‡Ð°Ð»Ð° Ñчёта (%s), указанное в опиÑании задачи, приходитÑÑ Ð½Ð° период недоÑтупноÑти цели [ %s - %s ].ДоÑтупны модули Ð´Ð»Ñ Ñледующих протоколов:Протокол не поддерживаетÑÑ - пожалуйÑта, убедитеÑÑŒ что уÑтановлены необходимые подключаемые модули gfal2 (пакеты gfal2-plugin-*)Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ доверенноÑти:Срок дейÑÑ‚Ð²Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти вышелСрок дейÑÑ‚Ð²Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти вышел. ЗаÑылка задачи оборвана. ПожалуйÑта, запуÑтите 'arcproxy'!Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти: Срок дейÑÑ‚Ð²Ð¸Ñ Ñертификата иÑтёк.Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти: Срок дейÑÑ‚Ð²Ð¸Ñ Ñертификата ещё не началÑÑ.Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти: Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð²Ñ€ÐµÐ¼ÐµÐ½Ð½Ð¾Ð³Ð¾ файла.Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти: Сбой Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸ VOMS.Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти: Ðе обнаружено дейÑтвительных Ñертификатов.Сбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти: Ðе обнаружено дейÑтвительных закрытых ключей.ДоверенноÑть уÑпешно ÑозданаСрок дейÑÑ‚Ð²Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти иÑтёкДлина ключа доверенноÑти: %iРаÑположение доверенноÑти: %sПодпиÑÑŒ доверенноÑти: %sÐ˜Ð¼Ñ Ñубъекта доверенноÑти: %sТип доверенноÑти: %sДоверенноÑть Ñ Ð¿Ð¾Ð»Ð¸Ñ‚Ð¸ÐºÐ¾Ð¹ ARCДоверенноÑть Ñо вÑеми унаÑледованными правамиДоверенноÑть Ñ Ð½ÐµÐ·Ð°Ð¿Ð¾Ð»Ð½ÐµÐ½Ð½Ð¾Ð¹ политикой - отказ по неизвеÑтной политикеДоверенноÑть Ñ Ð¾Ð³Ñ€Ð°Ð½Ð¸Ñ‡ÐµÐ½Ð½Ð¾Ð¹ политикой: %sДоверенноÑть Ñ Ð½ÐµÐ¸Ð·Ð²ÐµÑтной политикой - отказ по неизвеÑтной Ð¿Ð¾Ð»Ð¸Ñ‚Ð¸ÐºÐµÐ˜Ð¼Ñ Ñубъекта доверенноÑти: %sДоверенноÑть: %sÐ—Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° размещение %s вÑÑ‘ ещё в очереди, Ñледует подождать %i ÑекундКонÑтруктор надÑтройки Python отработал уÑпешноДеÑтруктор оболочки Python (%d)Вызван Python-конÑтруктор планировщика (%d)Вызван Python-деÑтруктор планировщика (%d)Интерпретатор Python заблокированИнтерпретатор Python разблокированВызван процеÑÑ Python wrapperÐ˜Ð½Ð¸Ñ†Ð¸Ð°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ PythonBrokerУровень качеÑтва: %sОпрашиваетÑÑ Ñ‚Ð¾Ñ‡ÐºÐ° доÑтупа WSRF GLUE2 к информации о вычиÑлительном реÑурÑе REST.МаÑÑовый Ð¾Ð¿Ñ€Ð¾Ñ ÐºÐ¾Ð¿Ð¸Ð¹ иÑÑ‚Ð¾Ñ‡Ð½Ð¸ÐºÐ°ÐžÐ¿Ñ€Ð¾Ñ ÑоÑтоÑÐ½Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа на Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸ÐµÐ¡Ð²ÐµÐ´ÐµÐ½Ð¸Ñ Ð¾Ð± очереди:REST: обработка %s в %sREST:CLEAN задачи %s - %sREST:GET задачи %s - %sREST:KILL задачи %s - %sREST:PUT задачи %s: файл %s: отÑутÑтвует нагрузкаREST:RESTART задачи %s - %sПротоколы REST и уÑтаревший VOMS не могут быть запрошены одновременно.Сбой метода RSA_generate_key_exЗамена в RSL не ÑвлÑетÑÑ Ð¿Ð¾ÑледовательноÑтьюЗамена в RSL не ÑвлÑетÑÑ Ð¿Ð¾ÑледовательноÑтью из двух ÑÐ»ÐµÐ¼ÐµÐ½Ñ‚Ð¾Ð²Ð˜Ð¼Ñ Ð¿ÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð¾Ð¹ Ð´Ð»Ñ Ð·Ð°Ð¼ÐµÐ½Ñ‹ RSL не приводитÑÑ Ðº буквенной конÑтантеЗначение переменной Ð´Ð»Ñ Ð·Ð°Ð¼ÐµÐ½Ñ‹ RSL не приводитÑÑ Ðº буквенной конÑÑ‚Ð°Ð½Ñ‚ÐµÐ¡Ð»ÑƒÑ‡Ð°Ð¹Ð½Ð°Ñ ÑортировкаПрочитано %i байтÐе удалоÑÑŒ подтвердить наличие доÑтупа на чтениеЗакрыт доÑтуп на чтение Ð´Ð»Ñ %s: %sПрочеÑть Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¸Ð· файлаЧтение запроÑа из ÑтрокиЧтение %u байтов из байта %lluФактичеÑÐºÐ°Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð° из %s в %sПланировщик вернул Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR %s в ÑоÑтоÑнии %sÐ—Ð°Ð¿Ñ€Ð¾Ñ DTR %s получен в процеÑÑе Ð·Ð°ÐºÑ€Ñ‹Ñ‚Ð¸Ñ Ð³ÐµÐ½ÐµÑ€Ð°Ñ‚Ð¾Ñ€Ð° - не может быть обработанПринÑÑ‚ неверный Ð·Ð°Ð¿Ñ€Ð¾Ñ DTRПолучено Ñообщение вне полоÑÑ‹ (некритично, уровень ERROR лишь Ð´Ð»Ñ Ð¾Ñ‚Ð»Ð°Ð´ÐºÐ¸)Ðе получено запроÑов DTRПолучена Ð¿Ð¾Ð²Ñ‚Ð¾Ñ€Ð½Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ° запроÑа DTR %s, вÑÑ‘ ещё в ÑоÑтоÑнии передачиПереÑоединениеЗапиÑÑŒ о новой задаче уÑпешно добавлена в базу данных (%s)Перенаправление к %sПеренаправление к новому URL: %sРегиÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ ÐºÐ¾Ð¿Ð¸Ð¸ назначениÑСбой региÑтрации буфера Globus FTP - проверка прерываетÑÑОжидаетÑÑ Ð¸Ñпользование релÑционного Ð¾Ð¿ÐµÑ€Ð°Ñ‚Ð¾Ñ€Ð°Ð¡Ð±Ñ€Ð¾Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸ÑОтзыв запроÑов, Ñделанных при Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ð¸Ð¡Ð±Ñ€Ð¾Ñ Ð·Ð°Ð¿Ñ€Ð¾ÑÐ¾Ð²Ð¡Ð±Ñ€Ð¾Ñ Ð¸ÑточникаRemove: удалÑетÑÑ: %sУдалÑетÑÑ %sУдалÑетÑÑ Ð»Ð¾Ð³Ð¸Ñ‡ÐµÑкий файл из метаданных %sУдалÑÑŽÑ‚ÑÑ Ð¼ÐµÑ‚Ð°Ð´Ð°Ð½Ð½Ñ‹Ðµ в %sОтмена предварительной региÑтрации Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð² каталогеПереименование: Ñбой в globus_ftp_client_moveПереименование: иÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ð¸%s переименовываетÑÑ Ð² %sÐ—Ð°Ð¿Ñ€Ð¾Ñ DTR %s в ÑоÑтоÑнии %s заменÑетÑÑ Ð½Ð¾Ð²Ñ‹Ð¼ запроÑомЗаменÑетÑÑ ÑущеÑтвующий маркер Ð´Ð»Ñ %s в кÑше маркеров RucioЗамена Ñтарой информации в SRM на новую Ð´Ð»Ñ URL %sОчередь '%s' заменÑетÑÑ Ð½Ð° '%s'ÐšÐ¾Ð¿Ð¸Ñ %s не ÑоответÑтвует предпочитаемому шаблону или раÑположению URLÐšÐ¾Ð¿Ð¸Ñ %s доÑтупна Ñ Ð±Ð¾Ð»ÑŒÑˆÐ¾Ð¹ задержкой, но вÑÑ‘ равно будет иÑпользоватьÑÑ Ð² ÑвÑзи Ñ Ð¾Ñ‚ÑутÑтвием других иÑÑ‚Ð¾Ñ‡Ð½Ð¸ÐºÐ¾Ð²ÐšÐ¾Ð¿Ð¸Ñ %s доÑтупна Ñ Ð±Ð¾Ð»ÑŒÑˆÐ¾Ð¹ задержкой, пробуетÑÑ Ð´Ñ€ÑƒÐ³Ð¾Ð¹ иÑточникУ копии %s Ð´Ð¾Ð»Ð³Ð°Ñ Ð·Ð°Ð´ÐµÑ€Ð¶ÐºÐ°, пробуем Ñледующую ÐºÐ¾Ð¿Ð¸ÑŽÐšÐ¾Ð¿Ð¸Ñ %s Ð»Ð¾ÐºÐ°Ð»Ð¸Ð·Ð¾Ð²Ð°Ð½Ð°ÐšÐ¾Ð¿Ð¸Ñ %s ÑоответÑтвует шаблону узла %sÐšÐ¾Ð¿Ð¸Ñ %s ÑоответÑтвует шаблону %sОшибка при выполнении запроÑаСбой запроÑа: нет ответа от Ñлужбы IdPСбой запроÑа: нет ответа от Ñлужбы IdP при проверке подлинноÑтиСбой запроÑа: нет ответа Ð¾Ñ Ñлужбы IdP при перенаправленииСбой запроÑа: нет ответа от Ñлужбы SP при отÑылке ÑƒÑ‚Ð²ÐµÑ€Ð¶Ð´ÐµÐ½Ð¸Ñ SAML на SPСбой запроÑа: нет ответа от Ñлужбы SPServiceСбой запроÑа: неверный ответ от Ñлужбы IdP при проверке подлинноÑтиСбой запроÑа: неверный ответ от Ñлужбы IdP при перенаправленииСбой запроÑа: неприемлемый ответ от Ñлужбы SP при отÑылке ÑƒÑ‚Ð²ÐµÑ€Ð¶Ð´ÐµÐ½Ð¸Ñ SAML на SPСбой запроÑа: неверный ответ от Ñлужбы SPServiceПуÑтой запроÑÐ—Ð°Ð¿Ñ€Ð¾Ñ Ð½Ðµ поддерживаетÑÑ - %sÐ—Ð°Ð¿Ñ€Ð¾Ñ Ð¿Ñ€ÐµÑ€Ð²Ð°Ð½ (ABORTED), но вÑе файлы Ð³Ð¾Ñ‚Ð¾Ð²Ñ‹Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð¿Ñ€ÐµÑ€Ð²Ð°Ð½ (ABORTED), так как он был Ð¾Ñ‚Ð¼ÐµÐ½Ñ‘Ð½Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð¿Ñ€ÐµÑ€Ð²Ð°Ð½ (ABORTED). Причина: %sÐ—Ð°Ð¿Ñ€Ð¾Ñ ÑƒÐ´Ð°Ð»ÑÑ!!!ИÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾ÑаПопытка передачи неизвеÑтному владельцу - %uЗапроÑ: %sЗапрошено Ñегментов Ñдер: %iÐ—Ð°Ð¿Ñ€Ð¾Ñ Ñ€ÐµÐºÑƒÑ€Ñивного проÑмотра и --nolist не имеет ÑмыÑлаЗапрашиваетÑÑ Ð¿Ñ€ÐµÐºÑ€Ð°Ñ‰ÐµÐ½Ð¸Ðµ обработки задачиТребование "%s %s" ÐЕ удовлетворено.Требование "%s %s" удовлетворено "%s".Требование "%s %s" удовлетворено.Политика бронированиÑ: %sОбнаружение копий назначениÑСбой Ð¾Ð±Ð½Ð°Ñ€ÑƒÐ¶ÐµÐ½Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð° Ð´Ð»Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸ÑСбой Ð¾Ð±Ð½Ð°Ñ€ÑƒÐ¶ÐµÐ½Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð° Ð´Ð»Ñ Ð¸ÑточникаМаÑÑовое обнаружение копий иÑточникаСбой работы Ñборщика информации о реÑурÑеСбой запуÑка Ñборщика информации о реÑурÑеСбой Ñборщика информации о реÑурÑе Ñ Ð²Ñ‹Ñ…Ð¾Ð´Ð½Ñ‹Ð¼ ÑтатуÑом: %i %sЖурнал Ñборщика информации о реÑурÑе: %sСборщик информации о реÑурÑе: %sСиÑтема управлениÑ: %sОтвет не в формате SOAPОтклик не в формате XMLОтзыв: %sОтвет: %sРезультат (0=ДопуÑк, 1=Отказ, 2=Ðеопределённый, 3=Ðеприменим): %dРезультаты Ñохранены в: %sВозобновление задачи %s в ÑоÑтоÑнии %s (%s)Получение опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡ INTERNAL не поддерживаетÑÑВозврат в генераторПовторное иÑпользование ÑоединениÑПравый операнд Ð´Ð»Ñ ÑÑ†ÐµÐ¿Ð»ÐµÐ½Ð¸Ñ RSL не приводитÑÑ Ðº буквенной конÑтантеRucio возвратил %sСрок дейÑÑ‚Ð²Ð¸Ñ Ð¼Ð°Ñ€ÐºÐµÑ€Ð° Rucio Ð´Ð»Ñ %s иÑтёк, или вÑкоре иÑтечётПравило: %sПравило: получатели: %sПравило: возможноÑти: %sПравило: группа: %sПравило: издатель: %sПравило: роль: %sПравило: контекÑÑ‚: %sПравило: Ñубъект: %sПравило: ВО: %sВыполнÑетÑÑ ÐºÐ¾Ð¼Ð°Ð½Ð´Ð° %sЗадачи в Ñчёте: %iВыполнение команды раÑÑылки (%s)Обработчик токена SAML не наÑтроенСбой процеÑÑа SAML2SSOОшибка запроÑа SOAP к Ñерверу AA %sОшибка SOAP Ñлужбы доÑтавки на %s: %sОшибка SOAP: %sÐе удалаÑÑŒ Ð°ÐºÑ‚Ð¸Ð²Ð¸Ð·Ð°Ñ†Ð¸Ñ SOAPÐžÐ¿ÐµÑ€Ð°Ñ†Ð¸Ñ SOAP не поддерживаетÑÑ: %sЗапроÑа SOAP: %sОтвет SOAP: %sÐÐºÑ‚Ð¸Ð²Ð¸Ð·Ð°Ñ†Ð¸Ñ SOAP Ñ SAML2SSO не выполненаИÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð½Ð°Ñ Ð¸Ð½ÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ SQL: %sОшибка базы даных SQLite: %sСоÑтоÑние клиента SRM: %sSRM не возвратил никакой информацииSRM не возвратил никакой полезной информацииSRM не выдал пригодных Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð¸ URL: %sТочка Ð¼Ð¾Ð½Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ SSHFS каталога кÑша (%s) недоÑтупна - ожидаетÑÑ Ð¿Ð¾Ð²Ñ‚Ð¾Ñ€Ð½Ð¾Ðµ Ñоединение ...Точка Ð¼Ð¾Ð½Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ SSHFS каталога иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ (%s) недоÑтупна - ожидаетÑÑ Ð¿Ð¾Ð²Ñ‚Ð¾Ñ€Ð½Ð¾Ðµ Ñоединение ...Точка Ð¼Ð¾Ð½Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ SSHFS каталога ÑеÑÑии (%s) недоÑтупна - ожидаетÑÑ Ð¿Ð¾Ð²Ñ‚Ð¾Ñ€Ð½Ð¾Ðµ Ñоединение ...Ошибка SSL: %d - %s:%s:%sОшибка SSL: %s, libs: %s, func: %s, причина: %sÐšÐ¾Ð½Ñ„Ð¸Ð³ÑƒÑ€Ð°Ñ†Ð¸Ñ Ð¿Ð»Ð°Ð½Ð¸Ñ€Ð¾Ð²Ñ‰Ð¸ÐºÐ°:Планировщик получил пуÑтой Ð·Ð°Ð¿Ñ€Ð¾Ñ DTRПланировщик получил недопуÑтимый Ð·Ð°Ð¿Ñ€Ð¾Ñ DTRПланировщик получил новый Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR %s Ñ Ð¸Ñточником: %s, назначением: %s, припиÑан к доле %s Ñ Ð¿Ñ€Ð¸Ð¾Ñ€Ð¸Ñ‚ÐµÑ‚Ð¾Ð¼ %dЗапуÑк планировщикаПланировщик оÑтановлен, выходПравила планировки: %sОшибка проверки ÑхемыСхема: %sÐаÑтройки SecHandler не заданыÐаÑтройки SecHandler отÑутÑтвуютÐе задан атрибут name Ð´Ð»Ñ SecHandlerSecHandler: %s(%s)Обработчик безопаÑноÑти %s(%s) не может быть ÑозданСбой в процеÑÑе обработки прав доÑтупаСбой в процеÑÑе обработки прав доÑтупа: %sÐе прошла проверка безопаÑноÑти Ð´Ð»Ñ Ð²Ñ…Ð¾Ð´Ñщего ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ TLSÐе прошла проверка безопаÑноÑти Ð´Ð»Ñ Ð¸ÑходÑщего ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ TLSÐе прошла проверка безопаÑноÑти в SOAP MCC Ð´Ð»Ñ Ð²Ñ…Ð¾Ð´Ñщего ÑообщениÑСбой проверки безопаÑноÑти в SOAP MCC Ð´Ð»Ñ Ð²Ñ…Ð¾Ð´Ñщего ÑообщениÑ: %sÐе прошла проверка безопаÑноÑти в SOAP MCC Ð´Ð»Ñ Ð¸ÑходÑщего ÑообщениÑСбой проверки безопаÑноÑти в SOAP MCC Ð´Ð»Ñ Ð¸ÑходÑщего ÑообщениÑ: %sÐе прошла проверка безопаÑноÑти в TLS MCC Ð´Ð»Ñ Ð²Ñ…Ð¾Ð´Ñщего ÑообщениÑСбой обработки/проверки безопаÑноÑти: %sСбой обработки/проверки безопаÑноÑти Ð´Ð»Ñ '%s': %sОбработка/проверка параметров доÑтупа '%s' завершилаÑÑŒ уÑпехомОбработка/проверка параметров доÑтупа завершилаÑÑŒ уÑпехомСамоÑтоÑтельно подпиÑанный ÑертификатВерÑÐ¸Ñ Ñервера SRM: %sÐ ÐµÐ°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ Ñервера: %sСлужба %s(%s) не может быть ÑозданаЦикл по ÑервиÑам: точка входа %sТочка входа ÑервиÑа %s (тип %s) добавлена в ÑпиÑок Ð´Ð»Ñ Ð½ÐµÐ¿Ð¾ÑредÑтвенной заÑылкиТочка входа ÑервиÑа %s (тип %s) добавлена в ÑпиÑок Ð´Ð»Ñ Ð¿Ð¾Ð¸Ñка реÑурÑÐ¾Ð²Ð”Ð»Ñ Ñлужбы не задан атрибут IDÐ”Ð»Ñ Ñлужбы не задан атрибут NameÐ¡Ð²ÐµÐ´ÐµÐ½Ð¸Ñ Ð¾ Ñлужбе:Ð¡ÐµÑ€Ð²Ð¸Ñ Ð² ожидании запроÑовПодгружены ÑервиÑные компоненты цепи ÑообщенийСоÑтоÑние обÑлуживаниÑ: %sКаталог ÑеÑÑии %s принадлежит %i, но текущий пользователь - %iКаталог ÑеÑÑии '%s' Ñодержит пользовательÑкие замены - пропуÑкаетÑÑИÑпользуемый каталог ÑеÑÑииОтÑутÑтвует ÐºÐ¾Ñ€Ð½ÐµÐ²Ð°Ñ Ð´Ð¸Ñ€ÐµÐºÑ‚Ð¾Ñ€Ð¸Ñ ÑеÑÑииРабочий каталог %s: Свободное проÑтранÑтво %f ГБПредельное количеÑтво Ñоединений выÑтавлÑетÑÑ Ð½Ð° %i, ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñверх предела будут переведены в ÑоÑтоÑние %sПриÑваиваетÑÑ ÑоÑтоÑние (%s) точки входа: %sЗадаётÑÑ ÑоÑтоÑние (STARTED) Ð´Ð»Ñ Ñ‚Ð¾Ñ‡ÐºÐ¸ входа: %sЗадаётÑÑ Ð¸Ð¼Ñ Ñубъекта!УÑтановка userRequestDescription в %sÐ˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ ÑовмеÑтном реÑурÑе:Следует подождать, когда назначение будет готовоСледует подождать, когда иÑточник будет готовПоказать %s параметров ÑправкиПоказать параметры ÑправкиОÑтанов демонаЗакрываетÑÑ Ñлужба Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ…Ð—Ð°ÐºÑ€Ñ‹Ð²Ð°ÑŽÑ‚ÑÑ Ð¿Ð¾Ñ‚Ð¾ÐºÐ¸ Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ…ÐŸÐ»Ð°Ð½Ð¸Ñ€Ð¾Ð²Ñ‰Ð¸Ðº оÑтанавливаетÑÑSimpleMap: %sSimpleMap: получено новое Ð²Ñ€ÐµÐ¼Ñ Ñ€Ð°ÑÑоглаÑÐ¾Ð²Ð°Ð½Ð¸Ñ Ð½Ð° %u ÑекундSimpleMap: недопуÑтимое значение в команде unmaptimeПропуÑкаетÑÑ %s ÐºÐ¾Ð¿Ð¸Ñ %sПропуÑкаетÑÑ ComputingEndpoint '%s', потому что объÑвлен Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ '%s' вмеÑто запрошенного '%s'.ПропуÑкаетÑÑ Ð½ÐµÐ´Ð¾Ð¿ÑƒÑÑ‚Ð¸Ð¼Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ URL %sПропуÑкаетÑÑ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚ VOMS AC policyAuthorityПропуÑкаетÑÑ ÑÐºÐ°Ñ‡Ð°Ð½Ð½Ð°Ñ Ð·Ð°Ð´Ð°Ñ‡Ð° (%s), так как она была запущена через другой Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ (%s).Ð¡ÐµÑ€Ð²Ð¸Ñ Ð¿Ñ€Ð¾Ð¿ÑƒÑкаетÑÑ: отÑутÑтвует SchemaPath!Ð¡ÐµÑ€Ð²Ð¸Ñ Ð¿Ñ€Ð¾Ð¿ÑƒÑкаетÑÑ: отÑутÑтвует ServicePath!ÐеÑовпадение Ñокетов при завершении %i != %iÐекоторые загрузки не удалиÑьСортировка в ÑоответÑтвии Ñ Ð½Ð°Ð»Ð¸Ñ‡Ð¸ÐµÐ¼ Ñвободных меÑÑ‚ в очередиСортировка в ÑоответÑтвии Ñ Ð´Ð¾ÑтупноÑтью входных данных в пункте назначениÑСортировка в ÑоответÑтвии Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ñ‹Ð¼ Ñталонным теÑтом (по умолчанию - "specint2000")Копии ÑортируютÑÑ Ð² ÑоответÑтвии Ñ Ñ€Ð°Ñположением URLКопии ÑортируютÑÑ Ð² ÑоответÑтвии Ñ Ð¿Ñ€ÐµÐ´Ð¿Ð¾Ñ‡Ð¸Ñ‚Ð°ÐµÐ¼Ñ‹Ð¼ шаблоном %sОтÑутÑтвует URL иÑточникаÐеподдерживаемый URL иÑточника: %sÐедейÑтвительный URL иÑточника: %sИÑточник и/или назначение ÑвлÑетÑÑ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð¾Ð¼, будет произведён поиÑк копийПроверка иÑточника запрошена, но не прошла: %sÐедопуÑтимый URL иÑточникаИÑточник поÑтавлен в ÑоответÑтвие %sИÑточник неготов, ÑÐ»ÐµÐ´ÑƒÑŽÑ‰Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ° через %u ÑекИÑточник идентичен назначениюДата Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ Ð¸Ñточника: %sИÑточник или назначение требуют Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ñ Ð»ÐµÐ½Ñ‚Ð¾Ñ‡Ð½Ð¾Ð³Ð¾ накопителÑИÑточник: %sУказанные модули не найдены в кÑшеУказанный файл Ñ Ñ‚Ñ€Ð°Ñ„Ð°Ñ€ÐµÑ‚Ð¾Ð¼ (%s) не ÑущеÑтвует.Задачи, выполнÑющие размещение данных: %iИÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа на размещение, Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð±ÑƒÐ´ÐµÑ‚ отозванРазмещаетÑÑ: %sЗапуÑк Ñ Ð²Ñ‹Ñоким приоритетомÐачать теÑтЖдём 10 Ñекунд...Ðачало чтениÑStartReading: Файл не был подготовлен должным образомÐачало запиÑиStartWriting: Файл не был подготовлен должным образомЗапущена ÑƒÐ´Ð°Ð»Ñ‘Ð½Ð½Ð°Ñ Ñлужба доÑтавки на %sЗапуÑкаютÑÑ Ð¿Ð¾Ñ‚Ð¾ÐºÐ¸ DTRЗапуÑкаютÑÑ Ð¿Ð¾Ñ‚Ð¾ÐºÐ¸ Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ…Ð—Ð°Ð¿ÑƒÑкаетÑÑ Ð²Ñпомогательный процеÑÑ: %sЗапуÑкаетÑÑ Ð¿Ð¾Ñ‚Ð¾Ðº обработки задачиЗапуÑк мониторинга задачЗапуÑкаетÑÑ Ð½Ð¾Ð²Ñ‹Ð¹ Ð·Ð°Ð¿Ñ€Ð¾Ñ DTR Ð´Ð»Ñ %sÐачинаетÑÑ Ð¾Ð¿Ñ€Ð¾Ñ Ð¾Ñ‚Ð»Ð¾Ð¶ÐµÐ½Ð½Ð¾Ð¹ точки входа (%s) - другие точки входа Ñтого ÑервиÑа не опрашиваютÑÑ, либо были уже уÑпешно опрошены.ЗапуÑкаетÑÑ Ð¿Ð¾Ð´Ð¿Ð¾Ñ‚Ð¾Ðº Ð´Ð»Ñ Ð¾Ð¿Ñ€Ð¾Ñа точки доÑтупа по %sЗапуÑкаетÑÑ Ð¿Ð¾Ñ‚Ð¾Ðº Ð´Ð»Ñ Ð¾Ð¿Ñ€Ð¾Ñа точки доÑтупа %sStat: получено Ð²Ñ€ÐµÐ¼Ñ Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ %sПроверка: получен размер %lluОтÑутÑтвует наименование ÑоÑтоÑÐ½Ð¸Ñ Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡Ð°ÐµÐ¼Ð¾Ð³Ð¾ модулÑСоÑтоÑние точки доÑтупа Ñлужбы "%s" задано как неактивное в ARCHERY. ПропуÑкаетÑÑ.Опрошено ÑоÑтоÑние %d задач, %d задач отозвалиÑÑŒStopReading закончил ожидание transfer_condition.StopReading начинает ожидание transfer_condition.StopReading: прерывание ÑвÑзиStopWriting закончил ожидание transfer_condition.StopWriting начинает ожидание transfer_condition.StopWriting: ВычиÑлена ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма %sStopWriting: прерывание ÑвÑзиStopWriting: поиÑк контрольной Ñуммы %sОбработка задачи завершенаОÑтанавливаетÑÑ Ð²Ñпомогательный процеÑÑ %sОÑтанавливаетÑÑ Ð¿Ð¾Ñ‚Ð¾Ðº обработки задачиСохранÑетÑÑ Ð¿Ð¾Ñ€Ñ‚ %i Ð´Ð»Ñ %sСохранение временной доверенноÑти в %sСтрока уÑпешно разобрана как %s.Ðтрибут Ñубъекта %s не Ñодержит извеÑтного NID, пропуÑкаетÑÑСубъект не начинаетÑÑ Ñ '/'Ð˜Ð¼Ñ Ñубъекта: %sСубъект Ð´Ð»Ñ Ñверки: %sСубъект: %sТочка входа Ð´Ð»Ñ Ð·Ð°Ñылки задачПодключаемый модуль SubmitterPlugin %s не может быть ÑозданПодключаемый модуль SubmitterPlugin "%s" не обнаружен.ЗапуÑк задачи УÑпешно добавлен Independent OID, возвращена метка %dУÑпешно добавлен OID доверенноÑти RFC, возвращена метка %dУÑпешно добавлен OID поÑледовательноÑти VOMS AC, возвращена метка %dУÑпешно добавлен OID anyLanguage, возвращена метка %dУÑпешно добавлен OID inheritAll, возвращена метка %dУÑÐ¿ÐµÑˆÐ½Ð°Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ° подлинноÑти токена SAMLУÑÐ¿ÐµÑˆÐ½Ð°Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ° подлинноÑти UsernameTokenУÑпешное подтверждение подлинноÑти токена X509УдалоÑÑŒ поменÑть пароль на Ñервере MyProxyУÑÐ¿ÐµÑˆÐ½Ð°Ñ Ñмена доверительных отношений на: %sУÑпешное преобразование PrivateKeyInfo в EVP_PKEYУдалоÑÑŒ уничтожить доверенноÑть на Ñервере MyProxyУдалоÑÑŒ извлечь Ñертификат в формате PKCS12УÑпешное Ñоздание пары открытого/закрытого ключейУдалоÑÑŒ получить доверенноÑть в %s Ñ Ñервера MyProxy %sПараметры доÑтупа полученыУдалоÑÑŒ получить информацию Ñ Ñервера MyProxyУÑпешное импортирование ÑертификатаЗакрытый ключ уÑпешно полученNSS уÑпешно инициализированУÑпешно подгружен PrivateKeyInfoУÑпешный вывод Ñертификата в %sУÑпешный вывод запроÑа Ñертификата в %sУдалоÑÑŒ делегировать доверенноÑть Ñерверу MyProxyÐ˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ DelegationService: %s и DelegationID: %s уÑпешно отправлена партнёрÑкому ÑервиÑуДоверенноÑть уÑпешно подпиÑанаПодпиÑÑŒ уÑпешно подтвержденаПодпиÑÑŒ уÑпешно подтвержденаПодпиÑанный Ñертификат уÑпешно проверенУказанное Ð¸Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %s не Ñовпадает Ñ ÑопоÑтавленным именем Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %sПоддерживаемые профили:Поддержка предварительного бронированиÑПоддерживает групповую заÑылкуПоддержка упреждениÑПриоÑтановленные задачи: %iПриоÑтанавливаетÑÑ Ð¾Ð¿Ñ€Ð¾Ñ Ñ‚Ð¾Ñ‡ÐºÐ¸ входа (%s), Ñ‚.к. ÑÐµÑ€Ð²Ð¸Ñ Ð¿Ð¾ Ñтому адреÑу уже опрашиваетÑÑ Ð¸Ð»Ð¸ опрошен.Ð¡Ð¸Ð½Ñ…Ñ€Ð¾Ð½Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð¾Ð³Ð¾ ÑпиÑка активных задач Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸ÐµÐ¹ в ÑиÑтеме Грид может привеÑти к некоторым неÑоответÑтвиÑм: только что запущенные задачи могут быть ещё не зарегиÑтрированы в ÑиÑтеме, тогда как только что удалённые задачи могут вÑÑ‘ ещё приÑутÑтвовать.СинтакÑичеÑÐºÐ°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° в значении атрибута 'notify' ('%s'), он Ñодержит неизвеÑтные метки ÑтатуÑаСинтакÑичеÑÐºÐ°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° в значении атрибута 'notify' ('%s'), он должен Ñодержать Ð°Ð´Ñ€ÐµÑ Ñлектронной почтыСинтакÑичеÑÐºÐ°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° в значении атрибута 'notify' ('%s'), он должен Ñодержать лишь адреÑа Ñлектронной почты поÑле меток ÑтатуÑаФайл ÑиÑтемных наÑтроек (%s or %s) не ÑущеÑтвует.Файл ÑиÑтемных наÑтроек (%s) Ñодержит ошибки.Файл ÑиÑтемных наÑтроек (%s) не ÑущеÑтвует.Вызван процеÑÑ TCP клиентаИÑполнитель TCP удалёнTLS не передал идентификацию, переход к OTokensTURL %s не может быть обработанÐазначение %s не ÑоответÑтвует запрошенному интерфейÑу.Ðазначение %s отброшено алгоритмом FastestQueueBroker, Ñ‚.к. не Ñообщает чиÑло Ñвободных ÑчеекÐазначение %s отброшено алгоритмом FastestQueueBroker, Ñ‚.к. не Ñообщает общее чиÑло ÑчеекÐазначение %s отброшено алгоритмом FastestQueueBroker, Ñ‚.к. не Ñообщает чиÑло ожидающих задачТехнологиÑ: %sПреходÑÑ‰Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° ÑлужбыÐе удалоÑÑŒ заÑлать теÑÑ‚, возможные Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð¾Ñ‚ÑутÑтвуютТеÑÑ‚ запущен Ñ Ñрлыком: %sТеÑÑ‚ был Ñоздан Ñ Ð¸Ð´ÐµÐ½Ñ‚Ð¸Ñ„Ð¸ÐºÐ°Ñ‚Ð¾Ñ€Ð¾Ð¼ %d, но при обработке возникла ошибка.Ðеверно отформатирован атрибут "FreeSlotsWithDuration", публикуемый "%s", - игнорируетÑÑ.Опции 'sort' и 'rsort' не могут быть указаны одновременно.BIO Ð´Ð»Ñ Ð²Ñ‹Ñ…Ð¾Ð´Ð°: NULLКаталог Ñертификатов агентÑтв CA необходим Ð´Ð»Ñ ÑвÑзи Ñ Ñерверами VOMS и MyProxy.ÐгентÑтво (%s), выдавшее Ñертификат (%s), не отноÑитÑÑ Ðº доверÑемым целью (%s).Служба ComputingEndpoint не Ñообщает о Ñвоём уровне качеÑтва.Служба ComputingEndpoint не Ñообщает о Ñвоём ÑоÑтоÑнии обÑлуживаниÑ.У ComputingEndpoint отÑутÑтвует URL.Служба ComputingService не Ñообщает о Ñвоём интерфейÑе.Служба ComputingService не Ñообщает о Ñвоём уровне качеÑтва.Ðевозможно раÑпознать заданный Вами период MyProxy: %s.База данных NSS в профиле Firefox не обнаруженаОтклик доÑюда не дошёлСлужба не предоÑтавлÑет информации о ÑоÑтоÑнии здоровьÑ.Служба не Ñообщает о Ñвоём типе.StatusCode: SuccessÐевозможно раÑпознать заданный Вами период VOMS AC: %s.Ðевозможно ÑвÑзатьÑÑ Ñ Ñервером VOMS Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸ÐµÐ¹: %s ПожалуйÑта, проверьте, доÑтупен ли Ñтот Ñервер.Команда arccat предназначена Ð´Ð»Ñ Ð²Ñ‹Ð²Ð¾Ð´Ð° на Ñкран Ñообщений Ñтандартного выхода, Ñтандартной ошибки или ошибок ÑиÑтемы при иÑполнении задачи.Команда arcclean иÑпользуетÑÑ Ð´Ð»Ñ ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ñ€ÐµÐ·ÑƒÐ»ÑŒÑ‚Ð°Ñ‚Ð¾Ð² работы задач Ñ ÑƒÐ´Ð°Ð»Ñ‘Ð½Ð½Ð¾Ð³Ð¾ компьютера.Команда arccp копирует файлы на, Ñ Ð¸ между запоминающими уÑтройÑтвами Грид.Команда arcget иÑпользуетÑÑ Ð´Ð»Ñ Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ñ€ÐµÐ·ÑƒÐ»ÑŒÑ‚Ð°Ñ‚Ð¾Ð² работы задач.Команда arcinfo иÑпользуетÑÑ Ð´Ð»Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ¸ ÑоÑтоÑÐ½Ð¸Ñ Ð²Ñ‹Ñ‡Ð¸Ñлительных реÑурÑов на Гриде.Команда arckill иÑпользуетÑÑ Ð´Ð»Ñ Ð¿Ñ€ÐµÑ€Ñ‹Ð²Ð°Ð½Ð¸Ñ Ð¸ÑполнÑющихÑÑ Ð·Ð°Ð´Ð°Ñ‡.Команда arcls иÑпользуетÑÑ Ð´Ð»Ñ Ð¿Ñ€Ð¾Ñмотра информации о файлах, хранÑщихÑÑ Ð½Ð° накопительных уÑтройÑтвах Грид, а также в занеÑённых в каталоги данных.Команда arcmkdir Ñоздаёт директории на грид-хранилищах и в каталогах данных.Команда arcproxy Ñоздаёт доверенноÑть из пары закрытый/открытый ключ Ð´Ð»Ñ Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð´Ð¾Ñтупа к гриду.Команда arcrename переименовывает файлы на запоминающих уÑтройÑтвах.Команда arcrm удалÑет файлы Ñ Ð·Ð°Ð¿Ð¾Ð¼Ð¸Ð½Ð°ÑŽÑ‰Ð¸Ñ… уÑтройÑтв.Команда arcstat иÑпользуетÑÑ Ð´Ð»Ñ Ð²Ñ‹Ð²Ð¾Ð´Ð° информации о ÑоÑтоÑнии задач, отправленных на Грид .Команда arcsub иÑпользуетÑÑ Ð´Ð»Ñ Ð·Ð°Ð¿ÑƒÑка задач на вычиÑлительные реÑурÑÑ‹ Грид.Команда arctest иÑпользуетÑÑ Ð´Ð»Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ¸ клаÑтеров как вычиÑлительных реÑурÑов.ДоÑтупный ÑпиÑок отзыва Ñертификатов (CRL) проÑроченДоÑтупный ÑпиÑок отзыва Ñертификатов (CRL) пока недейÑтвителенÐтрибут brokerarguments может быть иÑпользован только в ÑвÑзи Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð¾Ð¼ brokernameСертификат Ñ Ñубъектом %s недейÑтвителенÐтрибут XRSL cluster пока что не поддерживаетÑÑ.Параметры доÑтупа Ð´Ð»Ñ Ð¿Ð¾Ð´Ð¿Ð¸Ñи не Ñодержат запроÑаПараметры доÑтупа Ð´Ð»Ñ Ð¿Ð¾Ð´Ð¿Ð¸Ñи имеют значение NULLЗакрытый ключ параметров доÑтупа уже инициализированФайл наÑтроек по умолчанию (%s) не ÑвлÑетÑÑ Ð¾Ð±Ñ‹Ñ‡Ð½Ñ‹Ð¼ файлом.Делегированные параметры доÑтупа полученные от Ñлужбы Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð·Ð°Ð¿Ð¸Ñаны в каталоге: %sДелегированные параметры доÑтупа извлечены из каталога: %sПериод недоÑтупноÑти цели (%s) не объÑвлен. Цель ÑохранÑетÑÑ.Ðевозможно раÑпознать заданное Вами Ð²Ñ€ÐµÐ¼Ñ Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ð½Ð¸Ñ: %s.Заданное Вами Ð²Ñ€ÐµÐ¼Ñ Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ð½Ð¸Ñ: %s предшеÑтвует времени начала: %s.Точка входа (%s) не поддерживаетÑÑ Ñтим подключаемым модулем (%s)ÐšÐ¾Ð½ÐµÑ‡Ð½Ð°Ñ Ñ‚Ð¾Ñ‡ÐºÐ° ÑервиÑа Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð´Ð¾Ð»Ð¶Ð½Ð° быть наÑтроенаФайл %s в наÑтоÑщий момент заблокирован дейÑтвительным блокомПервый поддерживаемый Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡Ð°ÐµÐ¼Ð¾Ð³Ð¾ Ð¼Ð¾Ð´ÑƒÐ»Ñ %s оказалÑÑ Ð¿ÑƒÑтой Ñтрокой, модуль пропуÑкаетÑÑ.Следующие задачи не были заÑланы:Ð˜Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ Ñтой точки доÑтупа (%s) не задан, пробуютÑÑ Ð²Ñе возможные подключаемые модулиОпиÑание задачи может быть также задано файлом или Ñтрокой в формате ADL или XRSL.ÐедопуÑтимое значение Ð¾Ð³Ñ€Ð°Ð½Ð¸Ñ‡ÐµÐ½Ð¸Ñ keybits: %s.Ð˜Ð¼Ñ Ð·Ð°ÐºÑ€Ñ‹Ñ‚Ð¾Ð³Ð¾ ключа Ð´Ð»Ñ ÑƒÐ½Ð¸Ñ‡Ñ‚Ð¾Ð¶ÐµÐ½Ð¸Ñ Ð¿ÑƒÑтоСтарые доверенноÑти GSI более не поддерживаютÑÑ. ПожалуйÑта, не иÑпользуйте опцию -O/--old.Во входÑщем Ñообщении отÑутÑтвует Ð¿Ð¾Ð»ÐµÐ·Ð½Ð°Ñ Ð½Ð°Ð³Ñ€ÑƒÐ·ÐºÐ°Ð’ иÑходÑщем Ñообщении отÑутÑтвует Ð¿Ð¾Ð»ÐµÐ·Ð½Ð°Ñ Ð½Ð°Ð³Ñ€ÑƒÐ·ÐºÐ°Ðевозможно раÑпознать заданный Вами интервал: %s.Подключаемый модуль %s не поддерживает никаких интерфейÑов, пропуÑкаетÑÑ.Ð”Ð»Ñ simplelist.pdp не задан файл наÑтройки политик; пожалуйÑта, проверьте атрибут location в наÑтройках Ñлужбы узла PDP simplelistЯзык политик %s не поддерживаетÑÑЗакрытый ключ Ð´Ð»Ñ Ð¿Ð¾Ð´Ð¿Ð¸Ñи не инициализированПроцеÑÑ, которому принадлежит блок в %s, больше не ÑущеÑтвует, блок будет ÑƒÐ´Ð°Ð»Ñ‘Ð½Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð¿Ñ€Ð¾ÑˆÑ‘Ð» Ñверку Ñ Ð¿Ð¾Ð»Ð¸Ñ‚Ð¸ÐºÐ¾Ð¹ÐедопуÑтимый алгоритм подпиÑи %s: запроÑÑ‹ Ñертификата должны подпиÑыватьÑÑ SHA1 или SHA2Указанный атрибут Globus (%s) не поддерживаетÑÑ. %s игнорируетÑÑ.Ðевозможно раÑпознать заданное Вами Ð²Ñ€ÐµÐ¼Ñ Ð½Ð°Ñ‡Ð°Ð»Ð°: %s.Опции start, end и period не могут быть заданы одновременноСубъект не ÑоответÑтвует имени выдавшего агентÑтва и атрибуту доверенноÑти CNЗначение атрибута XRSL acl не ÑвлÑетÑÑ Ð´ÐµÐ¹Ñтвительным кодом XML.Значение атрибута 'ftpthreads' должно быть целым чиÑлом от 1 до 10Значение атрибута keysize (%s) в файле наÑтроек разобрано неполноÑтьюЗначение атрибута timeout (%s) в файле наÑтроек разобрано неполноÑтьюОбнаружено %d оÑновных директорий NSS, Ñодержащих базы данных Ñертификатов, ключей и модулейОбнаружено %d Ñлементов запроÑаОбнаружены %d запроÑа, удовлетворÑющих Ñ…Ð¾Ñ‚Ñ Ð±Ñ‹ одной политикеВ Вашем файле vomses указаны %d Ñерверов Ñ Ð¾Ð´Ð¸Ð½Ð°ÐºÐ¾Ð²Ñ‹Ð¼ именем %s, но ни один из них не доÑтупен или не отзываетÑÑ Ð¿Ñ€Ð°Ð²Ð¸Ð»ÑŒÐ½Ð¾.Ð’ базе данных NSS обнаружено %d Ñертификата пользователÑÐ’ учётном ÑпиÑке нет точек входа, ÑоответÑтвующих запрошенному типу точки входа информацииВ учётном ÑпиÑке нет точек входа, ÑоответÑтвующих запрошенному типу точки входа заÑылкиОбнаружены %d Ñубъекта, удовлетворÑющих Ñ…Ð¾Ñ‚Ñ Ð±Ñ‹ одной политикеОтвет не Ñодержит делегированный токен X509Ответ не Ñодержит делегированный токен в нужном форматеВ ответе отÑутÑтвует Ð·Ð°Ð¿Ñ€Ð¾Ñ FormatОтвет не Ñодержит Id или значение запроÑа X509Ответ не Ñодержит Id или значение маркёра X509Ðе наÑтроена цепочка ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ SOAPÐет ответа SOAPÐ’ ответе отÑутÑтвует UpdateCredentialsResponseÐ’ ответе отÑутÑтвует Ð·Ð°Ð¿Ñ€Ð¾Ñ X509Ðе найден Ñертификат Ñ Ð¸Ð¼ÐµÐ½ÐµÐ¼ %s, Ñертификат мог быть удалён при Ñоздании CSRÐ’ объекте закрытого ключа Ð¸Ð·Ð´Ð°Ñ‚ÐµÐ»Ñ Ð¾Ñ‚ÑутÑтвует профильОтÑутÑтвует идентификатор СУПО. Сообщение не будет запиÑано в журнал BLAH.Ðет ответаОбнаружена проблема при обÑлуживании Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð¿Ð¾Ñле переÑылки поÑле ÑбоÑ: %sОбнаружена проблема при обÑлуживании иÑточника поÑле переÑылки: %sÐет ответа HTTPÐет ответа SOAPÐ”Ð»Ñ Ñтих точек входа ÑтороннÑÑ Ð¿ÐµÑ€ÐµÑылка не поддерживаетÑÑЗапрошена переÑылка файла третьим лицом, но необходимый подключаемый модуль не был подгружен. УÑтанавливали ли Ð’Ñ‹ модуль GFAL? ЕÑли нет, пожалуйÑта, уÑтановите пакеты 'nordugrid-arc-plugins-gfal' и 'gfal2-all'. Эти Ð½Ð°Ð·Ð²Ð°Ð½Ð¸Ñ Ð¼Ð¾Ð³ÑƒÑ‚ завиÑеть от типа вашего диÑтрибутива.Это Ñообщение INFO тоже должно быть видноЭто Ñообщение INFO должно быть видноЭтого ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ VERBOSE не должно быть видноЭто Ñообщение VERBOSE теперь должно быть видноЭта ÐºÐ¾Ð¿Ð¸Ñ ÑƒÐ¶Ðµ удаленаЭто Ñообщение выводитÑÑ Ð² изначальное назначениеЭто Ñообщение направлÑетÑÑ Ð² каждый потокУ Ñтого процеÑÑа уже ÑущеÑтвует блокировка в %sПохоже на временный Ñбой - пожалуйÑта, попытайтеÑÑŒ Ñнова попозжеЭта программулечка может быть иÑпользована Ð´Ð»Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ¸ ÑпоÑобноÑтей Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ð½Ð¸Ñ JobDescription.Поток завершилÑÑ Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ¾Ð¹ Glib: %sПоток завершилÑÑ Ð¾Ð±Ñ‰Ð¸Ð¼ прерыванием: %sСертификат атрибута дейÑтвителен на: %sСертификат атрибута дейÑтвителен на: Срок дейÑÑ‚Ð²Ð¸Ñ Ñертификата закончилÑÑСертификат атрибута дейÑтвителен на: Сертификат пока недейÑтвителенДоверенноÑть дейÑтвительна на: %sДоверенноÑть дейÑтвительна на: Срок дейÑÑ‚Ð²Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти вышелДоверенноÑть дейÑтвительна на: ДоверенноÑть пока недейÑтвительнаИÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð±Ð»Ð¾ÐºÐ¸Ñ€Ð¾Ð²ÐºÐ¸ кÑшаИÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ %s(%s):%i - %i ÑÐ’Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¸Ñтекло, файл блокировки %s будет удалёнИÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¾Ñ‚Ð²ÐµÑ‚Ð½Ð¾Ð³Ð¾ ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Globus - утечка ÑоединениÑИÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ñ‹ mkdirÐ”Ð»Ñ Ð²Ð¾ÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð½ÐµÐ´Ð¾Ñтающих задач, запуÑтите arcsyncСлишком много аргументов в наÑтройкахСлишком много Ñоединений - новое отклоненоСлишком много Ñоединений - ожидание Ð·Ð°ÐºÑ€Ñ‹Ñ‚Ð¸Ñ ÑтарыхСлишком много Ñбоев попытки Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð¾Ð¹ Ñуммы - прерываниеСлишком много файлов на один Ð·Ð°Ð¿Ñ€Ð¾Ñ - пожалуйÑта, попробуйте Ñнова, Ñ Ð¼ÐµÐ½ÑŒÑˆÐ¸Ð¼ количеÑтвом файловУтилита Ð´Ð»Ñ Ð¿Ñ€ÐµÐ´ÑÑ‚Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ в виде файла grami.Ð’Ñего задач: %iОбщее количеÑтво логичеÑких процеÑÑоров: %iКоличеÑтво вÑех обнаруженных задач: КоличеÑтво обнаруженных новых задач: Общее количеÑтво физичеÑких процеÑÑоров: %iОбщее количеÑтво Ñдер: %iПередача ÐЕ УДÐЛÐСЬ: %sПередача файлов уÑпешно отмененаПередача данных завершенаПередача не удалаÑьСбой передачи: %sПередача завершена: %llu байтов передано %sПередача из %s в %sПереÑылка оборвана поÑле %i Ñекунд бездейÑтвиÑПередача удалаÑьИÑтечение времени Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ ÑоединениÑДоверенные центры Ñертификации:ПробуютÑÑ Ð²Ñе доÑтупные интерфейÑыПробуем Ñледующую копиюПопытка проверки Ñертификата X509 Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ check_cert_typeПопытка ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ %s(%s):%dПопытка проÑлушать %s:%s(%s)Попытка проÑлушать порт TCP %s(%s)Попытка активации временно иÑключённой точки входа (%s)Попытка заÑылки задачи напрÑмую к точке входа (%s)Попытка заÑылки на точку входа (%s) иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ (%s) Ñ Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡Ð°ÐµÐ¼Ñ‹Ð¼ модулем (%s).Два входных файла Ñ Ð¸Ð´ÐµÐ½Ñ‚Ð¸Ñ‡Ð½Ñ‹Ð¼Ð¸ именами '%s'.Тип dir, вызываетÑÑ srmRmDirТип file, вызываетÑÑ srmRmТип: %sТипы Ñлужб Ð²Ñ‹Ð¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡, на которые %s может заÑылать задачи:Типы локальных Ñлужб информации, Ñ ÐºÐ¾Ñ‚Ð¾Ñ€Ñ‹Ñ… %s может получить информацию:Типы локальных Ñлужб информации, Ñ ÐºÐ¾Ñ‚Ð¾Ñ€Ñ‹Ñ… %s может получить информацию о задачах:Типы Ñлужб региÑтрации, в которых %s может получить информацию:Типы Ñлужб,на которых %s может управлÑть задачами:URLURL %s не ÑоответÑтвует информации, хранÑщейÑÑ Ð² SRM info; проверÑетÑÑ Ð½Ð¾Ð²Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸ÑURL ÑопоÑтавлен локальному файлу - проверка прав доÑтупа к иÑходному URLURL поÑтавлен в ÑоответÑтвие к: %sÐедейÑтвительный адреÑ: %sÐžÐ¿Ñ†Ð¸Ñ URL %s не задана в формате имÑ=значениеПротокол URL не ÑвлÑетÑÑ urllist: %sURL: %sÐе допущен через xacml.pdpÐе удалоÑÑŒ адаптировать опиÑание задачи ни к одному реÑурÑу, Ñ‚.к. не получено никакой информации.Сбой Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ ÑобытиÑ: не обнаружена запиÑÑŒ AAR Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %s в базе данных учёта задач.Ðе удалоÑÑŒ Ñкопировать шаблон наÑтроек из ÑущеÑтвующих наÑтроек (%s)Ðе удалоÑÑŒ Ñоздать каталог %s.Ðе удалоÑÑŒ Ñоздать базу данных (%s)Ðе удалоÑÑŒ Ñоздать каталог Ð´Ð»Ñ ÑÐ¾Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ Ñ€ÐµÐ·ÑƒÐ»ÑŒÑ‚Ð°Ñ‚Ð¾Ð² (%s) - %sÐе удалоÑÑŒ Ñоздать Ð¸Ð½Ð´ÐµÐºÑ Ð´Ð»Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ‹ задач в базе данных (%s)Ðе удалоÑÑŒ Ñоздать таблицу задач в базе данных (%s)Ðе удалоÑÑŒ Ñоздать таблицу jobs_new в базе данных (%s)Ðевозможно определить формат учётной запиÑи о задаче.Ðе удалоÑÑŒ определить, уÑтановлены ли ключи центра Ñертификации.Ðе удалоÑÑŒ получить информацию о ÑертификатеÐевозможно раÑпознать ошибку (%d)Ðевозможно загрузить задачу (%s), не был задан модуль JobControllerPlugin Ð´Ð»Ñ Ñ€Ð°Ð±Ð¾Ñ‚Ñ‹ Ñ Ð·Ð°Ð´Ð°Ñ‡ÐµÐ¹.Ðе удалоÑÑŒ ÑброÑить таблицу jobs в базе данных (%s)Ðе удалоÑÑŒ определить размер файла %sÐевозможно обработать задачу (%s), не указан интерфейÑ.Ðевозможно обработать задачу (%s), Ð´Ð»Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ð¾Ð³Ð¾ интерфейÑа (%s) нету подключаемых модулейÐевозможно инициализировать Ñоединение Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸ÐµÐ¼: %sÐевозможно инициализировать Ñоединение Ñ Ð¸Ñточником: %sÐевозможно инициализировать обработчик Ð´Ð»Ñ %sÐевозможно перечиÑлить файлы на %sÐе удалоÑÑŒ загрузить файл конфигурации ARC.Ðевозможно загрузить модуль BrokerPlugin (%s)Ðевозможно подгрузить модуль (%s) Ð´Ð»Ñ Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñа (%s) при попытке заÑылки опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸.Ðе удалоÑÑŒ обнаружить подключаемый модуль "%s". ПожалуйÑта, проконÑультируйтеÑÑŒ Ñ Ð¸Ð½Ñтрукцией по уÑтановке и проверьте, уÑтановлен ли пакет, Ñодержащий модуль "%s".РеÑÑƒÑ€Ñ Ð½Ðµ ÑоответÑтвует заданию, помечаетÑÑ ÐºÐ°Ðº неÑоответÑтвующий. Планировщик недейÑтвителен.Ðевозможно открыть файл ÑпиÑка задач (%s), формат неизвеÑтенÐевозможно разобрать введённое опиÑание задачи: %sÐевозможно ÑопоÑтавить запрашиваемый уровень отладки (%s) ни Ñ Ð¾Ð´Ð½Ð¸Ð¼ из допуÑтимыхÐе удалоÑÑŒ обработать.Ðевозможно адаптировать опиÑание задачи в ÑоответÑтвии Ñ Ñ‚Ñ€ÐµÐ±Ð¾Ð²Ð°Ð½Ð¸Ñми Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ (%s).Ðевозможно адаптировать опиÑание задачи в ÑоответÑтвии Ñ Ñ‚Ñ€ÐµÐ±Ð¾Ð²Ð°Ð½Ð¸Ñми назначениÑ.Ðевозможно прочитать информацию о задаче из файла (%s)Ðевозможно зарегиÑтрировать заÑылку задачи. Ðевозможно получить объект JobDescription из планировщика, планировщик недейÑтвителен.Ðе удалоÑÑŒ переименовать таблицу jobs в базе данных (%s)Ðевозможно получить ÑпиÑок загружаемых файлов Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ %sÐевозможно выбрать подпрограммное обеÑпечениеÐевозможно выбрать операционную ÑиÑтему.Ðевозможно выбрать Ñреду выполнениÑÐевозможно упорÑдочить объекты ExecutionTarget - недопуÑтимый объект Broker.Ðевозможно упорÑдочить добавленные задачи. Подключаемый модуль BrokerPlugin не был подгружен.ЗаÑылка задачи не удалаÑÑŒ. Сбой приÑÐ²Ð¾ÐµÐ½Ð¸Ñ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¾Ð¿Ð¸Ñанию задачи.Ðевозможно заÑлать задачу. ОпиÑание задачи в формате %s недейÑтвительно: %sÐе удалоÑÑŒ перенеÑти из таблицы jobs в jobs_new в базе данных (%s)Ðе удалоÑÑŒ укоротить базу данных задач (%s)Ðе удалоÑÑŒ запиÑать файл 'output': %sÐе удалоÑÑŒ запиÑать файл grami: %sÐе удалоÑÑŒ внеÑти запиÑи в базу данных задач (%s): Id "%s"Сбой запиÑи в файл p12ДоÑтуп закрытÐе допущен удалённой Ñлужбой PDPÐеожиданный тип RSLÐепредуÑмотренный аргумент Ð´Ð»Ñ Ð¿Ñ€Ð°Ð²Ð¸Ð»Ð° 'all' - %sÐепредуÑмотренные аргументыЗаданы непредуÑмотренные аргументыÐеверное раÑположение Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¾Ñ‚ Ñлужбы Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ - %s.Ðеожиданное немедленное завершение: %sОтзыв Rucio Ñодержит недопуÑтимое имÑ: %sСервер возвратил неожиданный путь %sÐеверный код отклика Ñлужбы Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ - %uÐеверный код отклика Ñлужбы делегированиÑ: %u, %s.Uniq добавлÑет ÑервиÑ, обнаруженный через %sUniq игнорирует ÑервиÑ, обнаруженный через %sUniq заменÑет ÑервиÑ, обнаруженный через %s, на ÑервиÑ, обнаруженный через %sÐеизвеÑтный контекÑÑ‚ LDAP %s - иÑпользуетÑÑ baseÐеизвеÑтный атрибут XRSL: %s - игнорируетÑÑ.ИгнорируетÑÑ Ð½ÐµÐ¸Ð·Ð²ÐµÑтный атрибут %s в разделе common файла наÑтроек (%s)ÐеизвеÑтный канал %s Ð´Ð»Ñ Ð¿Ñ€Ð¾Ñ‚Ð¾ÐºÐ¾Ð»Ð° stdioÐеизвеÑтный тип параметра доÑтупа %s Ð´Ð»Ñ ÑˆÐ°Ð±Ð»Ð¾Ð½Ð° URL %sÐеизвеÑтный Ñлемент в политике подпиÑи GlobusÐеизвеÑÑ‚Ð½Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°ÐеизвеÑтный ключ или тип хешированиÑÐеизвеÑтный ключ или тип Ñ…ÐµÑˆÐ¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð¸Ð·Ð´Ð°Ñ‚ÐµÐ»Ñ ÑертификатаÐеизвеÑтный уровень Ð¶ÑƒÑ€Ð½Ð°Ð»Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ %sÐеизвеÑтный параметр %sÐеизвеÑтные права в политике подпиÑи Globus - %sИгнорируетÑÑ Ð½ÐµÐ¸Ð·Ð²ÐµÑтный раздел %sÐеизвеÑÑ‚Ð½Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð¸ файлов: %sÐеизвеÑтное правило приÑÐ²Ð¾ÐµÐ½Ð¸Ñ Ð¸Ð¼ÐµÐ½Ð¸ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %sУдалÑетÑÑ Ð·Ð°Ð¿Ð¸ÑÑŒ о %sСбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ñ€ÐµÐ³Ð¸Ñтрации из каталогаЗаданный URL не поддерживаетÑÑЗаданный URL не поддерживаетÑÑ: %sÐеподдерживаемый URL назначениÑ: %sÐеподдерживаемый тип точки входа информации: %sÐеподдерживаемое дейÑтвие политики ÑоответÑтвиÑ: %sÐÐµÐ¿Ð¾Ð´Ð´ÐµÑ€Ð¶Ð¸Ð²Ð°ÐµÐ¼Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ Ð¿Ð¾Ð»Ð¸Ñ‚Ð¸ÐºÐ¸ ÑоответÑтвиÑ: %sÐеподдерживаемый протокол в URL %sЗапрошен неподдерживаемый Ñзык политик доверенноÑти - %sЗапрошена Ð½ÐµÐ¿Ð¾Ð´Ð´ÐµÑ€Ð¶Ð¸Ð²Ð°ÐµÐ¼Ð°Ñ Ð²ÐµÑ€ÑÐ¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти - %sÐеподдерживаемый URL иÑточника: %sÐеподдерживаемый тип точки входа заÑылки: %sÐ˜Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ Ð·Ð°Ñылки %s не поддерживаетÑÑ. Похоже, arc-blahp-logger пора обновить. ПожалуйÑта, опишите проблему в bugzill-е.Цепочка Ñодержит недоверÑемый ÑамоподпиÑанный Ñертификат Ñ Ñубъектом %s и отпечатком: %luСбой в UpdateCredentialsUpdateCredentials: EPR не Ñодержит JobIDUpdateCredentials: невозможно обновить параметры доÑтупаUpdateCredentials: отÑутÑтвует ÑÑылкаUpdateCredentials: задача не обнаружена: %sUpdateCredentials: Ð·Ð°Ð¿Ñ€Ð¾Ñ = %sUpdateCredentials: отзыв = %sUpdateCredentials: недопуÑтимое количеÑтво ÑÑылокUpdateCredentials: недопуÑтимое чиÑло Ñлементов внутри ReferenceИÑпользование:ИÑпользование: copy иÑточник назначениеИÑпользуйте опцию --help Ð´Ð»Ñ Ð¿Ð¾Ð´Ñ€Ð¾Ð±Ð½Ð¾Ð³Ð¾ опиÑаниÑÐ”Ð»Ñ Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ñправки иÑпользуйте "-?"ИÑпользуетÑÑ Ñ„Ð°Ð¹Ð» наÑтроек %sИÑпользованные Ñдра: %dФайл наÑтроек Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ (%s) Ñодержит ошибки.Файл наÑтроек Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ (%s) не ÑущеÑтвует или не может быть подгружен.ОтÑутÑтвует пользователь Ð´Ð»Ñ Ð²Ñпомогательной программыОтÑутÑтвует Ð¸Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð² прÑмом приÑвоении имени: %s.ПуÑÑ‚Ð°Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ð° в приÑвоении имени пользователÑПуÑÑ‚Ð°Ñ Ð³Ñ€ÑƒÐ¿Ð¿Ð° authgroup в приÑвоении имени пользователÑ: %sДолжно быть указано Ð¸Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ.Ðевозможно открыть пул пользователей в %s.Пул пользователей в %s не Ñмог уÑтановить ÑоответÑтвие Ð´Ð»Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ.ОтÑутÑтвует Ñубъект Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð² приÑвоении пула пользователей.ОтÑутÑтвует Ñубъект Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð´Ð»Ñ ÑоответÑÑ‚Ð²Ð¸Ñ Ñубъекта.КлаÑÑ UserConfig не ÑвлÑетÑÑ Ð¾Ð±ÑŠÐµÐºÑ‚Ð¾Ð¼UserConfiguration Ñохранены в файле (%s)Обработчик токена Username не наÑтроенИÑпользуетÑÑ Ñ„Ð°Ð¹Ð» наÑтроек A-REX %sИÑпользуетÑÑ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³ доверенных Ñертификатов CA: %sИÑпользуютÑÑ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ñ‹ DH из файла %sИÑпользуетÑÑ OTokenИÑпользуетÑÑ ÑƒÑ‡Ñ‘Ñ‚Ð½Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ Rucio %sИÑпользуетÑÑ Ð±ÑƒÑ„ÐµÑ€Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð½Ñ‹Ð¹ метод передачи данныхИÑпользуетÑÑ ÐºÑш %sИÑпользуетÑÑ ÐºÑÑˆÐ¸Ñ€Ð¾Ð²Ð°Ð½Ð½Ð°Ñ Ð¼ÐµÑÑ‚Ð½Ð°Ñ ÑƒÑ‡Ñ‘Ñ‚Ð½Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ '%s'ИÑпользуетÑÑ Ñертификат %sИÑпользуетÑÑ Ñ„Ð°Ð¹Ð» Ñертификата: %sИÑпользуемый ÑпиÑок шифров: %sИÑпользуетÑÑ Ð°Ð»Ð³Ð¾Ñ€Ð¸Ñ‚Ð¼ ÑˆÐ¸Ñ„Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ %sИÑпользуютÑÑ Ð½Ð°Ñтройки в %sИÑпользуетÑÑ ÐºÑ€Ð¸Ð²Ð°Ñ Ñ NID %uИÑпользуетÑÑ Ð½ÐµÐ·Ð°Ñ‰Ð¸Ñ‰Ñ‘Ð½Ð½Ð°Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð° данныхИÑпользуетÑÑ Ð²Ð½ÑƒÑ‚Ñ€ÐµÐ½Ð½Ð¸Ð¹ метод передачи данных %sИÑпользуетÑÑ ÐºÐ»ÑŽÑ‡ %sИÑпользуетÑÑ Ñ„Ð°Ð¹Ð» личного ключа: %sИÑпользуетÑÑ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð°Ñ ÑƒÑ‡Ñ‘Ñ‚Ð½Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ '%s'ИÑпользуетÑÑ ÑÐ»ÐµÐ´ÑƒÑŽÑ‰Ð°Ñ ÐºÐ¾Ð¿Ð¸Ñ (%s)ИÑпользуемые опции протокола: 0x%xИÑпользуетÑÑ Ð¿Ñ€Ð¾ÐºÑи %sИÑпользуетÑÑ Ñ„Ð°Ð¹Ð» доверенноÑти: %sИÑпользуетÑÑ Ð·Ð°Ñ‰Ð¸Ñ‰Ñ‘Ð½Ð½Ð°Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð° данныхИÑпользуетÑÑ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³ ÑеÑÑии %sИÑпользуетÑÑ Ð¼Ð°Ñ€ÐºÑ‘Ñ€ проÑтранÑтва памÑти %sИÑпользуетÑÑ Ð¾Ð¿Ð¸Ñание маркёра проÑтранÑтва памÑти %sВО %s не Ñовпадает Ñ %sÐтрибут VOMS AC ÑвлÑетÑÑ Ñ‚ÐµÐ³Ð¾Ð¼Ðтрибут VOMS AC ÑвлÑетÑÑ FQANÐтрибут VOMS %s не Ñовпадает Ñ %sÐтрибут VOMS %s Ñовпадает Ñ %sÐтрибут VOMS игнорируетÑÑ Ð¸Ð·-за ошибки обработки или проверкиСбой обработки атрибутов VOMSСбой проверки атрибутов VOMSVOMS: Ñрок годноÑти AC вышелVOMS: Сертификат атрибута (AC) неполон - отÑутÑтвует Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾Ð± атрибутах Serial и/или IssuerVOMS: Ñертификат атрибута ещё не дейÑтвителенVOMS: Ñбой Ð¿Ð¾Ð´Ñ‚Ð²ÐµÑ€Ð¶Ð´ÐµÐ½Ð¸Ñ Ð¿Ð¾Ð´Ð¿Ð¸Ñи Ñертификата атрибутаVOMS: Ðе удалоÑÑŒ зарезервировать памÑть Ð´Ð»Ñ Ñ€Ð°Ð·Ð±Ð¾Ñ€Ð° ACVOMS: Ðе удалоÑÑŒ зарезервировать памÑть Ð´Ð»Ñ Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ Ð¿Ð¾ÑледовательноÑти ACVOMS: Ðевозможно найти AC_ATTR типа IETFATTRVOMS: Ðе удалоÑÑŒ обработать ACVOMS: невозможно найти Ñертификат лица, выдавшего Ñертификат атрибута Ð´Ð»Ñ Ð²Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð¾Ð¹ организации %sVOMS: DN владельца в Ñертификате атрибута (AC): %sVOMS: DN владельца: %sVOMS: DN Ñмитента: %sVOMS: FQDN узла %s не ÑоответÑтвует ни одному из назначений в Ñертификате атрибута (AC)VOMS: Файл lsc %s не может быть открытVOMS: Файл lsc %s не ÑущеÑтвуетVOMS: неверный authorityKeyVOMS: должны приÑутÑтвовать оба раÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ Ñертификата, idcenoRevAvail и authorityKeyIdentifierVOMS: не удалоÑÑŒ подтвердить подпиÑÑŒ Ñертификата атрибутаVOMS: невозможно удоÑтоверить лицо, выдавшее Ñертификат атрибута Ð´Ð»Ñ Ð²Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð¾Ð¹ организации %sVOMS: иÑпользование множеÑтвенных атрибутов IETFATTR не поддерживаетÑÑVOMS: иÑпользование множеÑтвенных атрибутов policyAuthority не поддерживаетÑÑVOMS: ÑоÑтавление FQAN: %sVOMS: Ñозадние атрибута: %sVOMS: директориÑ, ÑÐ¾Ð´ÐµÑ€Ð¶Ð°Ñ‰Ð°Ñ Ñертификаты доверÑемых Ñлужб: %sVOMS: Ñбой при разборе атрибутов в Ñертификате атрибута (AC)VOMS: не удалоÑÑŒ подтвердить подпиÑÑŒ Ñертификата атрибутаVOMS: отÑутÑтвуют чаÑти ACVOMS: проблемы при разборке информации в ACVOMS: Отличительное Ð¸Ð¼Ñ (DN) в Ñертификате %s не ÑоответÑтвует таковому в доверÑемом ÑпиÑке: %sVOMS: Отличительный признак агентÑтва, выдавшего Ñертификат %s, не ÑоответÑтвует таковому в доверÑемом ÑпиÑке: %sVOMS: отÑутÑтвует Ð¸Ð¼Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð°VOMS: атрибут qualifier пуÑÑ‚VOMS: отÑутÑвует значение атрибута Ð´Ð»Ñ %sVOMS: недопуÑтимый формат IETFATTRVAL - ожидаетÑÑ OCTET STRINGVOMS: недопуÑтимый формат атрибута policyAuthority - ожидаетÑÑ URIVOMS: атрибут grantor пуÑÑ‚VOMS: Ð½ÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ владельце в Ñертификате атрибута (AC)VOMS: Ð¸Ð¼Ñ Ð°Ð³ÐµÐ½Ñ‚Ñтва, выдавшего Ñертификат, не Ñовпадает Ñ Ñ‚Ð°ÐºÐ¾Ð²Ñ‹Ð¼ в Ñертификате атрибута (AC)VOMS: атрибут issuerUID в Ñертификате владельца не Ñовпадает Ñ Ñ‚Ð°ÐºÐ¾Ð²Ñ‹Ð¼ в Ñертификате атрибута (AC)VOMS: Ð¸Ð¼Ñ Ð²Ð»Ð°Ð´ÐµÐ»ÑŒÑ†Ð° в Ñертификате атрибута (AC) не имеет Ð¾Ñ‚Ð½Ð¾ÑˆÐµÐ½Ð¸Ñ Ðº отличительному имени в Ñертификате владельцаVOMS: Ñерийный номер владельца %lx не Ñовпадает Ñ Ñ‚Ð°ÐºÐ¾Ð²Ñ‹Ð¼ в Ñертификате атрибута (AC) %lx; Ñертификат, иÑпользуемый Ð´Ð»Ñ ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти VOMS, может быть доверенноÑтью Ñ Ñерийным номером, отличным от изначального ÑертификатаVOMS: Ñерийный номер владельца: %lxVOMS: Ð½ÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾Ð± агентÑтве, выдавшем Ñертификат, в Ñертификате атрибута (AC)VOMS: Ð¸Ð¼Ñ Ð°Ð³ÐµÐ½Ñ‚Ñтва, выдавшего Ñертификат - %s - не Ñовпадает Ñ Ñ‚Ð°ÐºÐ¾Ð²Ñ‹Ð¼ в Ñертификате атрибута (AC) - %sVOMS: единÑтвенным поддерживаемым критичеÑким раÑширением атрибута Ñертификата (AC) ÑвлÑетÑÑ idceTargetsVOMS: Ñерийный номер в Ñертификате атрибута (AC): %lxVOMS: Ñлишком длинный Ñерийный номер AC INFO - ожидаетÑÑ Ð½Ðµ более 20-и октетовVOMS: отÑутÑтвуют Ð¾Ð³Ñ€Ð°Ð½Ð¸Ñ‡ÐµÐ½Ð¸Ñ Ð¿Ð¾ отличительным признакам доверÑемых VOMS, цепочка Ñертификатов в Ñертификате атрибута (AC) не будет проверена.VOMS: подтверждаетÑÑ Ñ†ÐµÐ¿Ð¾Ñ‡ÐºÐ° Ñертификатов: %s VOMS: невозможно определить название узла Ñертификата атрибута (AC) из Ð½Ð°Ð·Ð²Ð°Ð½Ð¸Ñ Ð²Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð¾Ð¹ организации: %sVOMS: невозможно извлечь название виртуальной организации из Ñертификата атрибута (AC)VOMS: невозможно найти цепочку Ñертификатов, ÑоответÑтвующую доверÑемым отличительным признакам VOMSVOMS: невозможно подтвердить цепочку ÑертификатовVOMS: неверный формат времени в Ñертификате атрибута (AC) - ожидаетÑÑ GENERALIZED TIMEСрок дейÑÑ‚Ð²Ð¸Ñ Ð¸Ñтекает через: %sДоверенноÑть дейÑтвительна на: Срок дейÑÑ‚Ð²Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñти вышелДоверенноÑть дейÑтвительна на: ДоверенноÑть недейÑтвительнаДейÑтвует по: %sЗначение атрибута 'count' должно быть целочиÑленнымЗначение атрибута 'countpernode' должно быть целочиÑленнымЗначением атрибута 'exclusiveexecution' может быть либо 'yes', либо 'no'Значение атрибута '%s' не должно быть пуÑтымЗначение атрибута '%s' не ÑвлÑетÑÑ ÑтрокойЗначение атрибута '%s' неоднозначноЗначение атрибута '%s' Ñодержит поÑледовательноÑть недопуÑтимой длины: ожидаетÑÑ %d, получено %dЗначение атрибута '%s' не ÑвлÑетÑÑ ÑтрокойЗначение атрибута '%s' не ÑвлÑетÑÑ Ð¿Ð¾ÑледовательноÑÑ‚ÑŒÑŽÐ˜Ð¼Ñ Ð¿ÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð¾Ð¹ (%s) Ñодержит неверный Ñимвол (%s)ОжидаетÑÑ Ð¸Ð¼Ñ Ð¿ÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð¾Ð¹Ð’ÐµÑ€ÑÐ¸Ñ Ð² Ñлементе Listen не опознанаПРЕДУПРЕЖДЕÐИЕ: Заданное Вами Ð²Ñ€ÐµÐ¼Ñ Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ð½Ð¸Ñ: %s предшеÑтвует текущему времени: %sПРЕДУПРЕЖДЕÐИЕ: Заданное Вами Ð²Ñ€ÐµÐ¼Ñ Ð½Ð°Ñ‡Ð°Ð»Ð°: %s предшеÑтвует текущему времени: %sОжидание завершено.Ожидание буфераЖдём пока ÑÑылка globus уÑтаканитÑÑОжидание Ñ€Ð°Ð·Ð±Ð»Ð¾ÐºÐ¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° %sОжидание Ñ€Ð°Ð·Ð±Ð»Ð¾ÐºÐ¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° ÑпиÑка задач %sОжидание Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ð¾Ñновного потока обработки задачиОжидание откликаЗадачи в очереди: %iÐктивизациÑПредупреждение: Ðе удалоÑÑŒ вывеÑти ÑпиÑок файлов, но Ð½ÐµÐºÐ¾Ñ‚Ð¾Ñ€Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð±Ñ‹Ð»Ð° полученаПредупреждение: Сбой ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¸ о задачах из файла (%s)Предупреждение: Сбой запиÑи информации о задаче в файл (%s)Предупреждение: Сбой запиÑи ÑпиÑка локальных задач в файл (%s), ÑпиÑок задач уничтоженПредупреждение: Задача не обнаружена в ÑпиÑке задач: %sПредупреждение: некоторые задачи не были удалены Ñ ÑервераПредупреждение: Ðевозможно Ñоздать файл ÑпиÑка задач (%s), ÑпиÑок задач уничтоженПредупреждение: Ðевозможно открыть файл ÑпиÑка задач (%s), формат неизвеÑтенПредупреждение: Сбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð¾Ð³Ð¾ ÑпиÑка задач из файла (%s)Предупреждение: Сбой ÑÐ¾ÐºÑ€Ð°Ñ‰ÐµÐ½Ð¸Ñ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð¾Ð³Ð¾ ÑпиÑка задач в файле (%s)Warning: ИÑпользуетÑÑ Ð²ÐµÑ€ÑÐ¸Ñ v1 протокола SRM, ÐºÐ¾Ñ‚Ð¾Ñ€Ð°Ñ Ð½Ðµ поддерживает токены меÑтаОжидалоÑÑŒ %s в начале "%s"Самоконтроль (пере)запуÑкает приложениеСамоконтроль обнаружил завершение приложениÑСамоконтроль обнаружил завершение Ð¿Ñ€Ð¸Ð»Ð¾Ð¶ÐµÐ½Ð¸Ñ Ð¿Ð¾ Ñигналу %uСамоконтроль обнаружил приложение, завершившееÑÑ Ñ ÐºÐ¾Ð´Ð¾Ð¼ %uСамоконтроль обнаружил превышение времени Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¿Ñ€Ð¸Ð»Ð¾Ð¶ÐµÐ½Ð¸Ñ Ð¸Ð»Ð¸ Ñбой - процеÑÑ Ð¿Ñ€ÐµÑ€Ñ‹Ð²Ð°ÐµÑ‚ÑÑСамоконтроль оÑтанавливаетÑÑ, потому что приложение было прервано намеренно, или завершилоÑьСамоконтроль не Ñмог оборвать приложение - отказ и завершениеСамоконтроль не дождалÑÑ Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ð¿Ñ€Ð¸Ð»Ð¾Ð¶ÐµÐ½Ð¸Ñ - поÑылаетÑÑ Ñигнал KILLÐе удалоÑÑŒ Ñоздать дочерний Ñторожевой процеÑÑ: %sСамоконтроль запуÑкает мониторингМы поддерживаем только CA в Globus signing policy - %s не поддерживаетÑÑМы поддерживаем только центры Ñертификации X509 в политике подпиÑи Globus - %s не поддерживаетÑÑМы поддерживаем только уÑÐ»Ð¾Ð²Ð¸Ñ globus в политике подпиÑи Globus - %s не поддерживаетÑÑМы поддерживаем только уÑÐ»Ð¾Ð²Ð¸Ñ Ñубъекта в политике подпиÑи Globus - %s не поддерживаетÑÑПри задании атрибута 'countpernode', атрибут 'count' также должен быть заданБудет выполнена Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ñ %s в каталоге назначениÑБудет вычиÑлена ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»ÑŒÐ½Ð°Ñ Ñумма Ð´Ð»Ñ %sПредварительное назначение будет ÑброшеноБудет произведена загрузка в файл кÑша %sПо умолчанию привÑзки к учётной запиÑи 'root' не будетБудет обработан кÑшБудет отменены блокировки в кÑшеЗадача %s будет удалена Ñ ÑеривÑа %s.Будет произведена Ð¿Ð¾Ð²Ñ‚Ð¾Ñ€Ð½Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ° без кÑшированиÑБудет иÑпользован маÑÑовый запроÑОжидание 10 ÑекундОжидание порÑдка %i ÑекУничтожение и воÑÑоздание вÑего хранилищаСвободное рабочее проÑтранÑтво: %i GBРабочее проÑтранÑтво иÑпользуетÑÑ Ð¾Ð´Ð½Ð¾Ð¹ задачейРабочее проÑтранÑтво иÑпользуетÑÑ Ñ€Ð°Ð·Ð½Ñ‹Ð¼Ð¸ Ð·Ð°Ð´Ð°Ñ‡Ð°Ð¼Ð¸Ð’Ñ€ÐµÐ¼Ñ Ð¶Ð¸Ð·Ð½Ð¸ рабочего проÑтранÑтва: %sОбщий объём рабочего проÑтранÑтва: %i GBЗапиÑÑŒ информации в журнал программы разбора BLAH: %sÐеверный каталог в %sÐеверный формат "FreeSlotsWithDuration" = "%s" ("%s")Запрошен неверный Ñзык: %sÐедопуÑтимое чиÑло в команде defaultttlÐедопуÑтимое чиÑло в команде maxjobdescÐедопуÑтимое чиÑло в maxjobs: %sÐедопуÑтимое чиÑло в команде maxrerunÐедопуÑтимое значение в urdelivery_frequency: %sÐедопуÑтимое чиÑло в wakeupperiod: %sУказано неверное количеÑтво аргументовÐедопуÑтимое чиÑло аргументов!Ðеверное количеÑтво объектов (%i) Ð´Ð»Ñ Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ð¸ stat от ftp: %sЗадано неверное количеÑтво параметровÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ Ð² %sÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ Ð´Ð»Ñ Ð±Ð°Ð·Ñ‹ данных delegationdbÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð¾Ð¿Ñ†Ð¸Ñ Ð² fixdirectoriesÐÐµÐ¿Ñ€Ð°Ð²Ð¸Ð»ÑŒÐ½Ð°Ñ Ð¿Ñ€Ð¸Ð½Ð°Ð´Ð»ÐµÐ¶Ð½Ð¾Ñть файла открытого ключа: %sÐÐµÐ¿Ñ€Ð°Ð²Ð¸Ð»ÑŒÐ½Ð°Ñ Ð¿Ñ€Ð¸Ð½Ð°Ð´Ð»ÐµÐ¶Ð½Ð¾Ñть файла личного ключа: %sÐÐµÐ¿Ñ€Ð°Ð²Ð¸Ð»ÑŒÐ½Ð°Ñ Ð¿Ñ€Ð¸Ð½Ð°Ð´Ð»ÐµÐ¶Ð½Ð¾Ñть файла доверенноÑти: %sÐеправильные права доÑтупа к файлу открытого ключа: %sÐеправильные права доÑтупа к файлу личного ключа: %sÐеправильные права доÑтупа к файлу доверенноÑти: %sОбнаружено недопуÑтимое поле запиÑи "%s" в "%s"Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð·Ð°Ð¿Ð¸Ñан в файлПодпиÑанный Ñертификат EEC запиÑан в файлПодпиÑÐ°Ð½Ð½Ð°Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ñть запиÑана в файлОбработчик токена X509 не наÑÑ‚Ñ€Ð¾ÐµÐ½Ð·Ð°Ð¿Ñ€Ð¾Ñ XACML: %sФайл наÑтроек XML %s не ÑущеÑтвуетИз ÑпиÑка задач будут удалены задачи, о которых не обнаружена информациÑ. Ð’ÐИМÐÐИЕ: задачи, запущенные недавно, могли ещё не поÑвитьÑÑ Ð² информационной ÑиÑтеме, и Ñта Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ñ ÑƒÐ´Ð°Ð»Ð¸Ñ‚ также Ñти задачи.Ð’Ñ‹ можете попытатьÑÑ ÑƒÐ²ÐµÐ»Ð¸Ñ‡Ð¸Ñ‚ÑŒ уровень детальноÑти Ð´Ð»Ñ Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð´Ð¾Ð¿Ð¾Ð»Ð½Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð¾Ð¹ информации.Ваши личные данные: %sÐе уÑтановлен Ñертификат Вашего центра ÑертификацииВаша доверенноÑть дейÑтвительна до: %s[ADLParser] Ñлемент %s должен быть логичеÑким.[ADLParser] AccessControl не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым XML.[ADLParser] Benchmark пока что не поддерживаетÑÑ.[ADLParser] Код в FailIfExitCodeNotEqualTo в %s не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым чиÑлом.[ADLParser] Значение CreationFlag %s не поддерживаетÑÑ.[ADLParser] CredentialService должен Ñодержать допуÑтимый URL.[ADLParser] ОтÑутÑтвует Ñлемент Name или значение Ñлемента ParallelEnvironment/Option.[ADLParser] Значение Name в InputFile отÑутÑтвует или неверно.[ADLParser] Значение Name в OutputFile отÑутÑтвует или неверно.[ADLParser] Значение DiskSpaceRequirement отÑутÑтвует или неверно.[ADLParser] Значение IndividualCPUTime отÑутÑтвует или неверно.[ADLParser] Значение IndividualPhysicalMemory отÑутÑтвует или неверно.[ADLParser] Значение IndividualVirtualMemory отÑутÑтвует или неверно.[ADLParser] Значение NumberOfSlots отÑутÑтвует или неверно.[ADLParser] Значение ProcessesPerSlot отÑутÑтвует или неверно.[ADLParser] Значение SlotsPerHost отÑутÑтвует или неверно.[ADLParser] Значение ThreadsPerProcess отÑутÑтвует или неверно.[ADLParser] Значение TotalCPUTime отÑутÑтвует или неверно.[ADLParser] Значение WallTime отÑутÑтвует или неверно.[ADLParser] NetworkInfo пока что не поддерживаетÑÑ.[ADLParser] Значение NodeAccess %s пока что не поддерживаетÑÑ.[ADLParser] Пока что поддерживаетÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ email Prorocol Ð´Ð»Ñ Notification.[ADLParser] Ðтрибут optional Ð´Ð»Ñ Ñлементов %s пока не поддерживаетÑÑ.[ADLParser] Корневой Ñлемент не ÑвлÑетÑÑ ActivityDescription [ADLParser] Значение Ñлемента NumberOfSlots должно быть указано, еÑли значение атрибута useNumberOfSlots Ñлемента SlotsPerHost - "true".[ADLParser] Ðеподдерживаемое ÑоÑтоÑние EMI ES %s.[ADLParser] Ðеподдерживаемый URL %s в RemoteLogging.[ADLParser] Ðеподдерживаемое внутреннее ÑоÑтоÑние %s.[ADLParser] Указан неверный URI в Source - %s.[ADLParser] Указан неверный URI в Target - %s.[ADLParser] ExpirationTime Ñодержит недопуÑтимое Ð²Ñ€ÐµÐ¼Ñ %s.[ADLParser] Ñлишком выÑокий приоритет - иÑпользуетÑÑ Ð¼Ð°ÐºÑимальное значение 100[файл ...][задача ...][опиÑание задачи...][ввод опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸][реÑÑƒÑ€Ñ ...]файл, Ñодержащий Ñрлыки задачСбой add_wordПараметр authorizedvo пуÑтвÑе Ð´Ð»Ñ Ð¾Ð±ÑлуживаниÑвÑе задачиarc.confпланировщикбуфер: ошибка: %s, чтение: %s, запиÑÑŒ: %sбуфер: чтение конца файла : %sбуфер: запиÑÑŒ конца файла: %sкÑш-файл: %sотмененоceÐŸÑ€ÐµÑ„Ð¸ÐºÑ ceID задан как %sпроверить читаемоÑть объекта, не показывать информацию об объектеcheck_ftp: Ñбой при определении времени Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð°check_ftp: не удалоÑÑŒ определить размер файлаcheck_ftp: Ñбой в globus_ftp_client_getcheck_ftp: Ñбой в globus_ftp_client_modification_timecheck_ftp: globus_ftp_client_register_readcheck_ftp: Ñбой в globus_ftp_client_sizecheck_ftp: получена дата изменениÑ: %scheck_ftp: получен размер: %llicheck_ftp: иÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ modification_timecheck_ftp: иÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ñ‡Ð°Ñтичной загрузкиcheck_ftp: иÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ñ‹ sizeназвание клаÑÑа: %sСбой при закрытии: %sÑбой при закрытии файла %s: %scomputingфайл наÑтроек (по умолчанию ~/.arc/client.conf)Сбой d2i_X509_REQ_bioкуÑок данных: %llu %lluуровеньукажите запрашиваемый формат (nordugrid:xrsl, emies:adl)delete_ftp: Ñбой в globus_ftp_client_deletedelete_ftp: Ñбой в globus_ftp_client_rmdirdelete_ftp: иÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ñ‹ deleteназначениеdestination.next_locationкаталогкаталогкаталогпоказать вÑе доÑтупные метаданныевывеÑти больше информации о каждом заданииDNне запрашивать подтверждениÑне Ñобирать информацию, а лишь конвертировать формат Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð½Ðµ выводить ÑпиÑок задачне выводить количеÑтво задач в каждом ÑоÑтоÑниине выполнÑть заÑылку: раÑпечатка опиÑÐ°Ð½Ð¸Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ на Ñзыке, приемлемом назначениемзарегиÑтрировать файл, не Ð¿ÐµÑ€ÐµÐ´Ð°Ð²Ð°Ñ ÐµÐ³Ð¾ - назначением должен быть мета-URL.не пытатьÑÑ Ñ„Ð¾Ñ€Ñировать паÑÑивный ÑпоÑоб передачи данныхпередачакаталог загрузки (подкаталог задачи будет Ñоздан в Ñтом каталоге)загрузокигнорируетÑÑecho: ДоÑтуп закрытпуÑÑ‚Ð°Ñ Ð½Ð°Ð³Ñ€ÑƒÐ·ÐºÐ° на входеÑледующий Ñлемент в цепи пуÑтойдоÑтигнут конец Ñтроки при обработке типа Ñлемента имени Ñубъекта #%dошибка Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ð½Ð¸Ñ Ñ‡Ð¸Ñла из bin в BIGNUMошибка Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ð½Ð¸Ñ Ñерийного номера в формат ASN.1Ñимвол выхода в конце ÑтрокивыходÑбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ ÐºÑƒÑка данныхÑбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¼ÐµÑ‚ÐºÐ¸ данныхфайлфайл %s недоÑтупенÐазвание файлаÑлишком длинное Ð¸Ð¼Ñ Ñ„Ð°Ð¹Ð»Ð°Ñ„Ð°Ð¹Ð»Ð¿ÑƒÑ‚ÑŒ к Ñ„Ð°Ð¹Ð»ÑƒÐ³Ð¾Ñ‚Ð¾Ð²Ð¾Ð¿Ñ€Ð¸Ð½ÑƒÐ´Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ð·Ð°Ð³Ñ€ÑƒÐ·ÐºÐ° (перезапиÑать ÑущеÑтвующий каталог задачи)Параметр forcedefaultvoms пуÑтформатчерез Ñледующие точки входа:Сбой операции fsync на файле %s: %sftp_check_callbackftp_complete_callback: ошибка: %sftp_complete_callback: уÑпехftp_get_complete_callback: Сбой Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° ftpftp_get_complete_callback: уÑпехftp_put_complete_callback: уÑпехftp_read_callback: ошибка Globus: %sftp_read_callback: задержанный блок данных: %llu %lluftp_read_callback: Ñбой: %sftp_read_callback: уÑпехftp_read_callback: уÑпех - offset=%u, length=%u, eof=%u, allow oof=%uftp_read_callback: избыток неверных неупорÑдоченных блоковftp_read_callback: неверные неупорÑдоченные данные: %llu != %lluftp_read_thread: ошибка Globus: %sftp_read_thread: Ñбой обратного вызова данных - прерывание: %sftp_read_thread: выходftp_read_thread: Ñбой при региÑтрации буфера Globus - попробуем попозже: %sftp_read_thread: Ñбой региÑтрации буферовftp_read_thread: Ñбой ÑброÑа буферовftp_read_thread: Ñбой ÑброÑа буферов - утечкаftp_read_thread: Ñбой for_read - прерывание: %sftp_read_thread: получение и региÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ Ð±ÑƒÑ„ÐµÑ€Ð¾Ð²ftp_read_thread: Ñлишком много Ñбоев региÑтрации - отмена: %sftp_read_thread: ожидание разблокировки буферовftp_read_thread: ожидание конца файлаftp_write_callback: Ñбой: %sftp_write_callback: уÑпех %sftp_write_thread: ошибка Globus: %sftp_write_thread: Ñбой обратного вызова данных - прерываниеftp_write_thread: неупорÑдоченные данные в поточном режиме: %llu != %lluftp_write_thread: выходftp_write_thread: Ñбой ÑброÑа буферов - утечкаftp_write_thread: Ñбой for_write - прерываниеftp_write_thread: получение и региÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ Ð±ÑƒÑ„ÐµÑ€Ð¾Ð²ftp_write_thread: избыток неупорÑдоченных блоков в поточном режимеftp_write_thread: ожидание разблокировки буферовftp_read_thread: ожидание конца файлаftp_write_thread: ожидание Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð¸Ð¡Ð±Ð¾Ð¹ gfal_close: %sСбой gfal_closedir: %sСбой в gfal_listxattr, невозможно получить информацию о копиÑÑ…: %sСбой в gfal_mkdir (%s), вÑÑ‘ же попытаемÑÑ Ð·Ð°Ð¿Ð¸ÑатьСбой gfal_mkdir: %sСбой gfal_open: %sСбой gfal_opendir: %sСбой gfal_read: %sСбой gfal_rename: %sСбой gfal_rmdir: %sСбой gfal_stat: %sСбой gfal_unlink: %sСбой gfal_write: %sglobalid задан как %sglobus_ftp_client_operationattr_set_authorization: ошибка: %sgm-jobs выводит информацию о текущих заданиÑÑ… в ÑиÑтеме.Значение gmetric_bin_path пуÑто в arc.conf (никогда не должно ÑлучатьÑÑ, должно иÑпользоватьÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ по умолчанию)Головной узел задан как %shostname[:port] Ñервера MyProxyчаÑчаÑачаÑовIDвходÑщее Ñообщение не в формате SOAPindexинформировать об изменениÑÑ… в заданной задаче (допуÑкаетÑÑ Ð¼Ð½Ð¾Ð³Ð¾ÐºÑ€Ð°Ñ‚Ð½Ð¾Ðµ иÑпользование)init_handle: Ñбой в globus_ftp_client_handlea_initinit_handle: Ñбой в globus_ftp_client_handleattr_initinit_handle: Ñбой в globus_ftp_client_handleattr_set_gridftp2init_handle: Ñбой в globus_ftp_client_operationattr_initinit_handle: Ñбой globus_ftp_client_operationattr_set_allow_ipv6init_handle: Ñбой globus_ftp_client_operationattr_set_delayed_pasvinmsg.Attributes().getAll() = %s inmsg.Auth().Export(arc.SecAttr.ARCAuth) = %sне задана Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ñ Ð½Ð° вводеввод не в формате SOAPinputcheck проверÑет, доÑтупны ли входные файлы, указанные в опиÑании задачи, иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ñ‹ доÑтупа в указанном файле доверенноÑти.вмеÑто ÑоÑтоÑÐ½Ð¸Ñ Ð±ÑƒÐ´ÑƒÑ‚ выведены только Ñрлыки указанных задаччиÑлоинтерфейÑÐ˜Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ Ð·Ð°Ð´Ð°Ð½ как %sID заданиÑjob_description_file [proxy_file]файл, Ñодержащий опиÑание запуÑкаемой задачиÑтрока, ÑÐ¾Ð´ÐµÑ€Ð¶Ð°Ñ‰Ð°Ñ Ð¾Ð¿Ð¸Ñание запуÑкаемой задачиÑохранÑть файлы на Ñервере (не удалÑть)уровеньперечиÑление запиÑи: %sперечиÑление доÑтупных подключаемых модулейпоказать ÑпиÑок доÑтупных модулей (поддерживаемые протоколы)list_files_ftp: Ð¿Ñ€Ð¾Ð²ÐµÑ€Ð¾Ñ‡Ð½Ð°Ñ Ñумма %slist_files_ftp: не удалоÑÑŒ определить Ð²Ñ€ÐµÐ¼Ñ Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð°list_files_ftp: не удалоÑÑŒ определить размер файлаlist_files_ftp: Ñбой globus_ftp_client_cksmlist_files_ftp: Ñбой globus_ftp_client_modification_timelist_files_ftp: Ñбой в globus_ftp_client_sizelist_files_ftp: поиÑк проверочной Ñуммы %slist_files_ftp: определение времени Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ %slist_files_ftp: поиÑк размера %slist_files_ftp: Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ контрольных Ñуммах недоÑтупнаlist_files_ftp: не получена Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ контрольных Ñуммахlist_files_ftp: Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ контрольных Ñуммах не поддерживаетÑÑlist_files_ftp: иÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€Ð¾Ñ‡Ð½Ð¾Ð¹ Ñуммыlist_files_ftp: иÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ modification_timelist_files_ftp: иÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ñ€Ð°Ð·Ð¼ÐµÑ€Ð°Ñбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ñерийного номера из %slocalid задан как %sраÑширенный формат (Ð´Ð¾Ð¿Ð¾Ð»Ð½Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ)пуÑтое значение lrmsПараметр mail пуÑÑ‚Ñоздавать родительÑкие директории по мере необходимоÑтиошибка mallocМета-файл %s пуÑтминутаминутыминутmkdir_ftp: ÑоздаётÑÑ %smkdir_ftp: иÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ñ‹ mkdirназвание модулÑ: %snnew_payload %sвызван Ñледующий Ñлемент в цепиÑледующий Ñлемент в цепи возвратил пуÑтую нагрузкуÑледующий Ñлемент цепи возвратил ÑÑ‚Ð°Ñ‚ÑƒÑ Ð¾ÑˆÐ¸Ð±ÐºÐ¸Ñледующий Ñлемент цепи возвратил ÑÑ‚Ð°Ñ‚ÑƒÑ Ð¾ÑˆÐ¸Ð±ÐºÐ¸: %sÑледующий Ñлемент в цепи возвратил пуÑтую нагрузкуÑледующий Ñлемент в цепи возвратил недопуÑтимую или неподдерживаемую нагрузкуÑледующий Ñлемент в цепочке возвратил пуÑтую нагрузкуÑледующий Ñлемент в цепи возвратил неопознанную нагрузку - пропуÑкаетÑÑчиÑлоколичеÑтво попыток передачи файлаold_url new_urlвыполнить дейÑтвие лишь над задачами в указанном ÑоÑтоÑнииобработать рекурÑивнорекурÑивное иÑполнение до указанного уровнÑпорÑдокoutpayload %sвывод не в формате SOAPзапиÑать указанные Ñлементы (ÑпиÑок задач, идентификаторы и токены делегированиÑ) в Ñ„Ð°Ð¹Ð»Ð˜Ð¼Ñ Ñубъекта владельца задано как %sФайл Ñертификата p12 пуÑтназначение паролÑ=иÑточник паролÑпутьпуть к локальному кÑшу (иÑпользуетÑÑ Ð´Ð»Ñ Ð·Ð°Ð¿Ð¸Ñи файла в кÑш)путь к файлу наÑтроек Ñерверов VOMSпуть к файлу Ñертификата, который может быть в формате PEM, DER, или PKCS12путь к закрытому ключу; еÑли Ñертификат указан в формате PKCS12, закрытый ключ не нуженпуть к файлу доверенноÑтипуть к корневому каталогу Ñ Ñ„Ð°Ð¹Ð»Ð°Ð¼Ð¸ VOMS *.lsc, иÑпользуетÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ клиентом VOMSпуть к каталогу Ñ Ð´Ð¾Ð²ÐµÑ€Ñемыми Ñертификатами, иÑпользуетÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ клиентом VOMSвыполнить Ñтороннюю переÑылку, когда назначение закачивает файл из иÑточника (доÑтупно только Ñ Ð¼Ð¾Ð´ÑƒÐ»ÐµÐ¼ GFAL)физичеÑкий Ð°Ð´Ñ€ÐµÑ Ð´Ð»Ñ Ð·Ð°Ð¿Ð¸Ñи, еÑли в качеÑтве Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ ÑƒÐºÐ°Ð·Ð°Ð½ каталог реÑурÑов. Должен быть указан Ð´Ð»Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð¾Ð², не генерирующих физичеÑкие адреÑа автоматичеÑки. ÐеÑколько значений может быть указано - адреÑа будут перебиратьÑÑ, пока не будет доÑтигнут уÑпех.pkey и rsa_key ÑущеÑтвуют!не уÑтановлен подключаемый модуль Ð´Ð»Ñ Ð¿Ñ€Ð¾Ñ‚Ð¾ÐºÐ¾Ð»Ð° транÑпортного ÑƒÑ€Ð¾Ð²Ð½Ñ %sвывеÑти вÑÑŽ информацию об Ñтой доверенноÑти.вывеÑти токен Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ð¾Ð³Ð¾ идентификаторавывеÑти ÑпиÑок доÑтупных идентификаторов делегированиÑвывеÑти оÑновной токен Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ð¾Ð³Ð¾ идентификаторавывеÑти избранную информацию об Ñтой доверенноÑти.вывеÑти ÑоÑтоÑние ÑервиÑавывеÑти Ñводку о задачах в каждой из транÑферных квотвывеÑти информацию о верÑиивывеÑти информацию об уÑтановленных Ñертификатах Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð¸ Ñертификационных агентÑтвÑлишком выÑокий приоритет - иÑпользуетÑÑ Ð¼Ð°ÐºÑимальное значение 100процеÑÑ: DELETEпроцеÑÑ: GETпроцеÑÑ: HEADпроцеÑÑ: POSTпроцеÑ: PUTобработка: дейÑтвие %s не поддерживаетÑÑ Ð´Ð»Ñ Ñ‡Ð°Ñти пути %sпроцеÑÑ: ÐºÐ¾Ð½ÐµÑ‡Ð½Ð°Ñ Ñ‚Ð¾Ñ‡ÐºÐ°: %sпроцеÑÑ: ÐºÐ¾Ð½ÐµÑ‡Ð½Ð°Ñ Ñ‚Ð¾Ñ‡ÐºÐ° фабрикипроцеÑÑ: идентификатор: %sпроцеÑÑ: метод %s не поддерживаетÑÑобработка: метод %s не поддерживаетÑÑ Ð´Ð»Ñ Ñ‡Ð°Ñти пути %sпроцеÑÑ: неопределённый методпроцеÑÑ: метод: %sпроцеÑÑ: операциÑ: %sпроцеÑÑ: запроÑ=%sпроцеÑÑ: отзыв=%sобработка: Ñхема %s не поддерживаетÑÑ Ð´Ð»Ñ Ñ‡Ð°Ñти пути %sпроцеÑÑ: подопциÑ: %sпроцеÑÑ: подкаталог: %sÐ¾Ð³Ñ€Ð°Ð½Ð¸Ñ‡ÐµÐ½Ð¸Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾ÑтиприоÑÑ‚Ð°Ð½Ð¾Ð²Ð»ÐµÐ½Ð˜Ð¼Ñ Ð¾Ñ‡ÐµÑ€ÐµÐ´Ð¸ задано как %sчитать информацию из указанного контрольного каталогаread_thread: ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð´Ð°Ð½Ð½Ñ‹Ñ… из внешнего процеÑÑа - прерывание: %sread_thread: выходread_thread: Ñбой for_read - прерывание: %sread_thread: получение и региÑÑ‚Ñ€Ð°Ñ†Ð¸Ñ Ð±ÑƒÑ„ÐµÑ€Ð¾Ð²read_thread: неÑоответÑÑ‚Ð²ÑƒÑŽÑ‰Ð°Ñ Ð´Ð°Ð½Ð½Ñ‹Ð¼ метка '%c' из внешнего процеÑÑа - выход: %sрегиÑтрациÑучётный ÑпиÑокURL Ñлужбы учёта Ñ Ð½ÐµÐ¾Ð±Ñзательным указанием протоколаудалить логичеÑкое Ð¸Ð¼Ñ Ñ„Ð°Ð¹Ð»Ð°, даже еÑли не вÑе физичеÑкие копии удаленыудаление доверенноÑтиудалить задачу из локального ÑпиÑка, даже еÑли Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ ней отÑутÑтвуетзапроÑить обрыв задач Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ñ‹Ð¼Ð¸ ÑрлыкамизапроÑить обрыв задач, принадлежащих пользователÑм Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ñ‹Ð¼Ð¸ именами ÑубъектазапроÑить удаление задач Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ñ‹Ð¼Ð¸ ÑрлыкамизапроÑить удаление задач, принадлежащих пользователÑм Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ñ‹Ð¼Ð¸ именами ÑубъектаÑортировать задачи в обратном порÑдке по идентификатору, времени запуÑка или имениÑбой запиÑи Ñерийного номера в %sÑекундаÑекундыÑекундÑекунд(а/Ñ‹)выбрать ÑпоÑоб планировки (ÑпиÑок доÑтупных планировщиков выводитÑÑ Ð¾Ð¿Ñ†Ð¸ÐµÐ¹ --listplugins)файлу %s приÑваиваетÑÑ Ñ€Ð°Ð·Ð¼ÐµÑ€ %lluвывеÑти адреÑа физичеÑких файловперечиÑлить задачи, Ð´Ð»Ñ ÐºÐ¾Ñ‚Ð¾Ñ€Ñ‹Ñ… отÑутÑтвует Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ ÑоÑтоÑниипоказывать только опиÑание запрашиваемого объекта, не выводить Ñодержимое каталоговпоказать задачи, принадлежащие пользователÑм Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ñ‹Ð¼Ð¸ именами Ñубъектапоказать задачи Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ñ‹Ð¼Ð¸ Ñрлыкамипоказать индикатор выполнениÑвывеÑти информацию о ÑоÑтоÑнии в формате JSONвывеÑти ошибки ÑиÑтемы при иÑполнении задачипоказать изначальное опиÑание задачипоказать заданный файл из рабочего каталога задачивывеÑти Ñтандартную ошибку задачивывеÑти Ñтандартный выход задачи (по умолчанию)ВыключениепропуÑтить задачи, находÑщиеÑÑ Ð½Ð° вычиÑлительном реÑурÑе Ñ Ð·Ð°Ð´Ð°Ð½Ð½Ñ‹Ð¼ URLпропуÑтить Ñлужбу Ñ Ñтим URL при обнаружении ÑлужбÑортировать задачи по идентификатору, времени запуÑка или именииÑточникиÑточник назначениеsource.next_locationstart_readingstart_reading: Ñбой запуÑка аÑÑиÑтентаstart_reading: Ñбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¿Ð¾Ñ‚Ð¾ÐºÐ°start_reading_ftpstart_reading_ftp: globus_ftp_client_getstart_reading_ftp: Ñбой в globus_ftp_client_getstart_reading_ftp: Ñбой в globus_thread_createstart_writing_ftp: куÑок данных: %llu %llustart_writing_ftp: задержанный блок данных: %llu %llustart_writing_ftp: Ñбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ ÐºÑƒÑка данныхstart_writing_ftp: Ñбой Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¼ÐµÑ‚ÐºÐ¸ данныхstart_writing_ftp: Ñбой в globus_thread_createstart_writing_ftp: Ñбой запуÑка аÑÑиÑтентаstart_writing_ftp: mkdirstart_writing_ftp: Ñбой mkdir - вÑÑ‘ же пытаемÑÑ Ð·Ð°Ð¿Ð¸Ñатьstart_writing_ftp: putstart_writing_ftp: Ñбой в putstart_writing_ftp: Ñбой ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¿Ð¾Ñ‚Ð¾ÐºÐ°start_writing_ftp: ожидание куÑка данныхstart_writing_ftp: ожидание метки данныхstart_writing_ftp: ожидание отправки буферовÑоÑтоÑниеÑоÑтоÑниеstop_reading: выход: %sstop_reading: ожидание Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ð¿ÐµÑ€ÐµÑылкиstop_reading_ftp: отменÑетÑÑ Ñоединениеstop_reading_ftp: выход: %sstop_reading_ftp: ожидание Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ð¿ÐµÑ€ÐµÑылкиÑтроказапуÑк задач в режиме холоÑтой прогонки (без заÑылки на Ñчёт)апуÑтить теÑтовую задачу под ÑоответÑтвующим Ð½Ð¾Ð¼ÐµÑ€Ð¾Ð¼Ð²Ñ€ÐµÐ¼Ñ Ð¸ÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ñ‚ÐµÑтовой задачиÑрлыки запущенных задач будут занеÑены в Ñтот файлфайл Ñ Ð·Ð°Ð¿Ð¸Ñью информации о задачах на Ñчёте (по умолчанию %s)Ð¾Ð¿Ñ†Ð¸Ñ Ð½ÐµÐ´Ð¾Ñтупна (Ñтарые доверенноÑти GSI более не поддерживаютÑÑ)Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð² Ñекундах (по умолчанию 20)интерпретировать запрошенный объект как каталог, и вÑегда пытатьÑÑ Ð²Ñ‹Ð²ÐµÑти его ÑодержимоеÑжать ÑпиÑок задач перед Ñинхронизациейтипневозможно прочеÑть номер из: %sдерегиÑтрациÑотгрузкаотгрузокURLurl [url ...]urllist %s Ñодержит недопуÑтимый URL: %sиÑпользовать протокол GSI Ð´Ð»Ñ ÐºÐ¾Ð½Ñ‚Ð°ÐºÑ‚Ð° Ñлужб VOMSиÑпользовать уÑтаревший протокол ÑвÑзи Ñо Ñлужбами VOMS вмеÑто доÑтупа по протоколу REST иÑпользовать паÑÑивную передачу данных (по умолчанию, Ð¾Ð¿Ñ†Ð¸Ñ Ð¾Ñ‚ÐºÐ»ÑŽÑ‡ÐµÐ½Ð° при защищённой передаче, и включена при незащищённой)иÑпользовать защищённую передачу данных (передача не защищена по умолчанию)иÑпользовать указанный файл наÑтроекиÑпользовать Ð¸Ð¼Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸ вмеÑто краткого идентификатора в качеÑтве Ð½Ð°Ð·Ð²Ð°Ð½Ð¸Ñ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð°Ð¸Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ñервера MyProxy (при отÑутÑтвии имени Ñубъекта, или при применении Ñертификата пользователÑ)VOMSожидание куÑка данныхwrite_thread: выходwrite_thread: конец файла for_writewrite_thread: Ñбой for_write - прерываниеwrite_thread: получение и передача буферовwrite_thread: Ñбой вывода - прерываниеСбой при закрытии xrootd: %sСбой при открытии xrootd: %sСбой при запиÑи xrootd: %sy~DataPoint: уничтожение ftp_handle~DataPoint: уничтожение ftp_handle не удалоÑÑŒ - Ð½Ð¾Ð²Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ°~DataPoint: уничтожение ftp_handle не удалоÑÑŒ - утечкаnordugrid-arc-7.1.1/po/PaxHeaders/sv.gmo0000644000000000000000000000013215067751432015122 xustar0030 mtime=1759499034.446505353 30 atime=1759499034.444505322 30 ctime=1759499034.635919592 nordugrid-arc-7.1.1/po/sv.gmo0000644000175000002070000143723115067751432017037 0ustar00mockbuildmock00000000000000Þ•-„qýìâà.á.å.ü./,/D/[/r/‰/ /·/Î/IÐ/H0<c0 0©0 Â0Ð0î0 11;1Y1j1‚1›1³1Ë1Ô1ë12202K2e2z2•2-µ2ã2 õ2ÿ233+3 ;3I3$a3†3&¢3!É3ë344 .4<4Q4g4}4Ž4'4Å4×4ê4$þ4#5)25\5!y5›5´5 Ç51Ñ56"6<6S6\6 q66Ÿ6³6Ï6 ä6ï6 7(7C7 Z7"e7ˆ7 7¸7%Í7ó7# 8!18S8n8‚88 ¹8 Ä8 Ð8 Û8ç8û8 9!9 49U9f9!z9œ9²9Î9î9þ9ª:¾;Û; ì; ù;<<74<*l<—<š<¶<>Ç<==0=!E= g= q=’=¨=¼=Ï=à= ú=>>'>E>I>P> W> x>!„>¦>AÁ>3?+7?+c?"?²?1Í?)ÿ?)@D@?^@%ž@!Ä@)æ@!A42A+gA\“A-ðAB:B7TBŒB+«B×BôB`C&uC$œC'ÁC%éC)D%9D _D"€D*£D-ÎD&üD#E-CE&qEB˜E'ÛECF2GF3zF*®F5ÙF<G3LG'€G(¨G/ÑG&H+(H$TH(yH3¢H+ÖH"I/%I3UI9‰I"ÃIæI öI1J6IJs€J-ôJ-"KPKiK8zK/³K*ãK(L7L$UL4zL*¯L;ÚLSMjM!ŠMe¬M3NFNB^N¡N½N%ÙN0ÿN.0O7_O$—O/¼O(ìO+P-AP!oP#‘P+µP'áP+ Q-5QAcQ¥Q¸Q$ÎQ$óQR,R(HR(qR,šRÇRÜRñR,S50S!fS(ˆS±SÆS<ØS@T)VT€TT!¼TÞTüT2 U/?U!oU‘U©U¿U&ÜUVV3V*GV$rV%—V#½V$áVWW6WOW kW.yW.¨W'×W5ÿW+5X$aX†X)£X"ÍX ðX ýX; YVFYGYåYZZ Z'Z,Z31Z1eZ=—Z4ÕZ) [(4[][b[h[m[ ‡\3¨\ Ü\ý\ ]#]=]KV]<¢]3ß]3^,G^t^“^®^0Ä^2õ^T(_}_–_®_Ä_ Û_ç_` `$?`–d`û`aK6a-‚a;°aLìa9bTb8rb9«b$åb" c-cEc/_c=c>Íc; d#Hdld\Œdéde"e3kÜk+íklC,l4plI¥lAïl61m hmvm‡m¥m½mÖmSïm.Cn rn~n™n¨n ¼nÊnÙnén#o#(o0Lo/}o­oÉoæopp8p<Pp;pXÉp:"q]q*nq4™qÎqêq$r)*r:Tr7rXÇr s95s9os4©s1Þs5t)FtSpt'Ät)ìt#u:uVuEtu(ºuãu;ûu$7vR\vT¯v7w;ž„5Ý„6…J…a…"y…œ…³…/Ð…7†38†!l†ކ#®†7Ò† ‡#"‡2F‡y‡(‘‡)º‡/ä‡.ˆ0Cˆ)tˆ8žˆ:׈‰(%‰DN‰>“‰Ò‰ò‰Š+)Š)UŠ+Š-«Š6ÙŠ7‹9H‹‚‹–‹¯‹ ΋@ï‹K0Œ|Œ5šŒÐŒ#îŒ,))V'€¨AÀ*Ž0-Ž^ŽwŽPŽVÞŽ5TlSˆÜí2Q"l^§‘#%‘I‘/g‘4—‘.Ì‘*û‘&&’%M’+s’&Ÿ’*Æ’@ñ’&2“%Y“!“)¡“Ë“Né“18”j”…”&¢”É”'ç”:•,J•,w•L¤•$ñ•$–:;–v–%“–$¹–Þ– ü–—g&˜/Ž˜¾˜PÞ˜6/™&f™™’§™v:šz±šq,›Až›#à›-œ2œRœnœ‡œ/œœ<Ìœ> HUg²½'pžŠ˜žK#ŸZoŸeÊŸ0 *E  p 2‘ $Ä é 6¡'?¡g¡!…¡§¡,áð¡S ¢!_¢!¢$£¢ È¢Ö¢,ì¢ £'£ F£Eg£ ­£Σ裤¤>;¤)z¤¤¤½¤Ô¤ó¤" ¥0¥"P¥=s¥'±¥Ù¥ù¥ ¦%¦C¦/c¦,“¦À¦%ݦ$§(§B§5]§B“§1Ö§/¨,8¨2e¨@˜¨'Ù¨ © ©.©"C©"f©)‰©³©9Щ ªª >ªJªF\ªS£ªG÷ªi?«\©«¬¬V<¬F“¬OÚ¬F*­ q­.’­Á­-Ü­ ®#)® M®!n® ®!±®Ó®í®&ü®&#¯$J¯o¯‹¯Ÿ¯0²¯ã¯û¯°&°->°l° ‚°%ް%´°'Ú°3±J6±±”±'ª±9Ò± ²*²D²'X²"€²A£²:å² ³/A³q³-Œ³+º³'æ³$´3´1J´ |´ˆ´ž´&®´#Õ´#ù´µ]8µ–µ:´µ2ïµ$"¶@G¶6ˆ¶%¿¶Cå¶)·.E·6t·4«·?à·! ¸ B¸1c¸•¸!²¸.Ô¸+¹-/¹&]¹1„¹¶¹ѹí¹ º#.º Rº(^º‡º!§ºɺàºýº»1»L»f»8».º»/é»6¼P¼f¼}¼™¼!²¼Ô¼'ò¼)½CD½ˆ½¤½ ¾½ʽÚ½ð½0¾*8¾c¾l¾}¾›¾­¾ʾ.è¾,¿D¿@a¿I¢¿3ì¿+ À$LÀ1qÀ£À&ÀÀ5çÀ-Á,KÁxÁ—ÁµÁÓÁðÁ Â6ÂPÂh‚Â2“Â.ÆÂ1õÂ.'ÃVÃ*gÃ&’Ã%¹Ã)ßÃ@ ÄDJÄFÄ'ÖÄþÄ#Å7ÅhSżÅÒÅñÅ0Æ!7ÆYÆkÆ‹Æ%«Æ$ÑÆ+öÆ8"Ç8[Ç"”Ç!·ÇÙÇ+ðÇ0È!MÈOoÈA¿È7É 9É&FÉ=mÉ'«É%ÓÉùÉ!Ê3ÊQÊlʉÊ?¤Ê.äÊË#Ë!@Ë;bËžËW·ËHÌQXÌ ªÌ+¸Ì$äÌ Í)ÍDÍXÍjÍ9~Í"¸Í%ÛÍÎ Î%Î 5Î%?ÎCeÎ3©ÎJÝÎ:(ÏcÏcƒÏVçÏW>Ð@–ÐZ×Ð%2Ñ'XÑ&€Ñ§Ñ@ÅÑ*Ò)1ÒD[Ò  ÒiAÓd«ÓeÔvÔ&“ÔºÔ#ÏÔóÔ ÕÕ%(Õ%NÕ2tÕ§Õ!½Õ_ßÕO?ÖÖ-¤ÖÒÖòÖ×*/× Z×${×$ ×/Å×4õ×c*؎اØÃØÙØ3óØ%'Ù5MÙƒÙBžÙáÙfÿÙfÚ-†Ú´Ú%ÓÚ5ùÚ5/Û!eÛ!‡Û/©ÛÙÛ&ôÛ7Ü,SÜ"€Ü£ÜÂÜ8áÜ Ý;ÝOÝ-mÝ0›Ý<ÌÝ Þ(Þ"GÞ%jÞÞ1¬Þ.ÞÞ ß+ß!Jß!lß0Žß¿ß"Ðß óß*à?à]à8|à=µà#óà$á%<á'bá3Šá2¾á)ñá)â:Eâ;€â+¼â*èâ2ã3Fã&zã¡ãºã!Ùã"ûã-ä-Lä*zä¥äÃäàä!óä?å-Uå"ƒå#¦åÊåàåúåæ%7æ"]æ!€æ#¢æ$Ææ!ëæ: çHç6dçB›çÞç(ùç"è>è^è/zè ªè-Ëè$ùè(éGé[éré é)°é-Úéêê!<ê(^ê‡ê#§êËê%çê/ ë'=ë$eë,Šë!·ëÙëFöëW=ì8•ìÎììì>í"Gíjí!ˆí.ªí2Ùí î+)î!Uî5wî­î$Íî,òîHïhï‡ï¥ï0Åï:öï>1ð:pð(«ð&Ôð"ûð'ñ-Fñtñ%‘ñ·ñ"Ìñ0ïñ0 ò<Qò0Žò7¿ò&÷ò=ó\óoó-Žó)¼óæó#ô!)ôKô#jôIŽôOØô,(õ4Uõ5Šõ7Àõøõ ö)/ö8Yö ’ö,³ö-àö ÷-/÷0]÷#Ž÷$²÷%×÷ý÷ø,6ø!cø(…ø®øÎø,ëø%ù>ù(Yù2‚ùµù Òù8óù6,ú<cú ú0¾ú%ïúû$-û!Rûtû-’ûHÀû ü.üLü6iü, üÍü!ëü ý,ý,Iývý(–ý"¿ýâýþþ.þ8Kþ"„þ"§þ!Êþìþ* ÿ(4ÿ6]ÿ=”ÿ&Òÿ)ùÿ#^B+¡Í7æKj/„3´/è*8CV|/Ó13,e’±Ë(ä) +7*cŽ*«Öð$ $1!V#x(œ5Å'û#$<)a1‹P½V!e'‡#¯$Ó'ø/ &P w˜$³AØ1  L .m *œ *Ç ò  + *? &j =‘ 'Ï %÷ - /K &{ "¢ !Å 6ç  : Z 2q  ¤ (Å î  * B b 8‚ 6» ò "--P1~ °-Ñ4ÿ%4Z+u5¡%×#ý"!D!b!„¦¿%Ü+N.%}£!½+ß1 +=Ei*¯Úú-!B#d%ˆ#®)Òü&%:` }#žÂ"à$*(+S&=¦#ä'!0"R'u)-Ç1õ'DE^'¤'Ìô !8Z0wM¨)ö! B)\3†!º!Ü þ"(B:k¦Æ.å'+<+h ”$µFÚ.!P'l&”$»àø"3%V'|$¤2É9ü66U2Œ+¿Ië'5(]$†$« Ð*ñ :  S t  ¨  Æ  ç !!!?!X!s!A‹!&Í!Zô!^O"N®"Ný"RL#/Ÿ#%Ï#4õ#*$6G$6~$'µ$/Ý$/ %%=%9c%(%'Æ%0î%!&#A&#e&2‰&4¼&$ñ&?'"V'"y'*œ'-Ç' õ'(+5(5a(#—(L»( ))/)H)6d)X›)Zô)-O*}*Uš*ð*%+,.+/[+6‹+Â+#Û+-ÿ+-,K,0f,a—,ù,8-2L-+-;«-<ç-<$.2a.)”.¾.%Ü.'/$*/O/3e/@™/AÚ/U0Yr0&Ì0ó0#161EH1Ž1/ª1DÚ1#2)C2m2‹2¥2>Ã2d3@g3¨3·3Ð3ç3ú3$42:4m4 464%Ä4,ê4!595'X50€58±5ê516996s6„6]›6.ù6(7<7N7$c7ˆ7#¥7É7ç7ý7=8 L8Z8z8”8©8!°8!Ò8$ô8!9;9J9 \9Ði9B::>}:L¼:4 ;0>;;o;7«;<ã;! <BB<I…<5Ï<#=0)='Z=‚=š=!¸=Ú=ó= >6>7R>bŠ>í>7 ?B?%X?~?"“?¶? Ö? ÷?@8@J@&d@‹@£@#·@;Û@A-ALA `A0nA ŸA)«AÕA(äA BBÕEF0/F`FxF$FµF(ÒFûFG%/GUG eG sG2~G(±G0ÚG H)+H(UH~H#žHÂH)ßH7 I/AIqI!‘I³IËI2âI1J-GJ,uJ.¢J*ÑJ/üJ,K-IK.wK4¦K7ÛK3L/GL.wL0¦L,×L1M'6M^M'}M(¥M%ÎM:ôM/N@N)^NˆN¨N5ÅNûN' OH3O*|O!§OÉOKÛO7'PD_P¤P¿P ÛP(üP6%QI\Q¦Q®QÆQÞQ'ûQ#R8+RAdR7¦R@ÞR+S*KS'vS&žS+ÅS*ñS%T2BT1uT-§TÕTõTU+5UEaU.§UHÖUV2VMVkV‰V¦V-¼VêV W*W;W<PW2W ÀWAÍW+X3;XmoX2ÝX)Y':Y*bY+Y&¹Y)àY* Z5ZHZ\Z!xZ>šZ=ÙZ[#1[U[l[/‰[=¹[ ÷[ \\$,\Q\c\t\Š\¤\-Á\ï\ ]%]<]U]#l]],¨])Õ]ÿ] ^(^A^S^&n^!•^#·^#Û^%ÿ^&%_L_Aa_@£_ä_`"`%3`Y`i`{` Œ`™`©`»`Ð`Iï`B9al|aNéaN8b=‡bcÅbf)cicBúc<=d=zdD¸dSýdhQeUºe=f:NfP‰fPÚfE+g8qgEªgRðgAChL…hxÒhrKik¾ir*jvjpkn…k{ôkrplrãl[Vm[²mhnqwnxénxboÛoìoÿop,pApWpppŠppµpÏpæpûpB qPqhq4‡q,¼q0éq`r{rŒr+žr]Êr(sHshs‡s!£s"Ås5ès%t3Dt?xt'¸t(àt u!u @uauužu½u(×u=v€>v ¿vIàv'*w!Rw tw‘•wŽ'x*¶xáx:ÿx&:yay"€y £yÄy;Ùyz45z9jz¤z#Äzèz,üz){9{0W{1ˆ{º{9Î{.|%7|]|n| ƒ|5¤|!Ú|ü|}$}:-}Oh}G¸}~M~5c~1™~Ë~+è~'1Y)j&”»Ì.à&€6€<S€"€³€#΀ ò€9!M&o)–ÀDØ‚/<‚l‚%ƒ‚©‚²‚9Ò‚# ƒ0ƒ&Cƒ"jƒ?ƒ$̓Lòƒ%?„e„m„ †„*”„¿„Ü„ü„…84…<m…&ª…!Ñ…8ó…,†+F†1r†¤†*¸†ã†8‡&:‡#a‡…‡&¢‡&ɇ'ð‡"ˆ<;ˆ–xˆ‰5(‰^‰'m‰&•‰?¼‰ü‰Š1Š/IŠyŠ˜Š±Š+ÏŠ/ûŠ$+‹šP‹.닌)5Œ_ŒqŒ-ŠŒ¸ŒÓŒâŒ%üŒ#"Fd~(¸Ò;ç##ŽGŽ&cŽ2ŠŽ½Ž6ÖŽA RO&¢ É ê '!5I3³ÉŒÚg‘ €‘¡‘0²‘ ã‘ í‘ û‘’’-’D’ ]’~’1Ž’À’&Ù’"“'#“K“,\“#‰“-­“/Û“O ”P[”?¬”-씕3• J• k•,Œ•¹•Е8ã•=–1Z–=Œ–LÊ–0—H—Z—)y—/£— Ó—)á— ˜.˜I˜f˜}˜•˜0²˜ 㘠í˜ù˜ ™™5™O™k™ˆ™$¦™#Ë™-3šFšYš!yš?›š(Ûš(›1-›+_›.‹› º›,È›+õ›%!œGœYœDqœ¶œ.Õœ%E Z d=q,¯'Ü.ž3žGžažwž•ž´žÔž"ëž&Ÿ15Ÿ1gŸ™Ÿ)­ŸןçŸúŸ  - K [ u († ¯ ,Ë ø ¡#1¡0U¡'†¡?®¡3î¡+"¢!N¢hp¢Ù¢wõ¢m£ Œ£=š£%Ø£1þ£60¤9g¤=¡¤4ߤ4¥I¥d¥v¥‹¥š¥®¥À¥Ï¥å¥6¦<¦8[¦”¦ ¦¦8°¦$馧%-§$S§x§’§®§̧Þ§,ð§ ¨">¨a¨t¨‹¨¢¨·¨-ͨû¨O©d©"©,¢©=Ï©> ªLª [ªiª"‚ª¥ª¾ªÙªùª/«@E«†«W›«ó«+¬ /¬<<¬y¬‹¬¦¬7Ƭþ¬­(/­X­k­|­ ‘­&­Ä­4Ü­%®17®i®-{®4©®*Þ® ¯5(¯K^¯/ª¯0Ú¯ °" °C°a°$p°>•°;Ô°M±*^±N‰±Kر]$²:‚²½²β6ì²6#³*Z³…³˜³%ª³ гܳZð³.K´!z´"œ´&¿´æ´µµ1;µ,mµ!šµ+¼µ-èµ<¶%S¶!y¶›¶°¶Ŷ Ù¶ æ¶Fó¶:·"P·<s·°·Ç·BÚ·¸4/¸d¸m¸€¸—¸§¸¸¸Ǹ׸ é¸ö¸ ¹¹$7¹\¹$t¹*™¹ĹÓ¹#깺º$1ºVºmº‡º"º)Àº(êºO»Qc»Qµ»¼- ¼N¼g¼ƒ¼n¢¼½'½B½X½ p½'{½£½(ýì½,ÿ½#,¾'P¾.x¾.§¾6Ö¾: ¿6H¿:¿5º¿$ð¿-À)CÀ mÀŽÀ¦À¾ÀÙÀðÀ# Á.ÁEHÁFŽÁ#ÕÁ%ùÁÂ4ÂTÂqÂ<ƒÂCÀÂÃ!Ã?ÃB_Ã$¢Ã)ÇÃñÃ$Ä,Ä*?Ä%jÄĥķÄ#ÇÄ"ëÄÅ &Å04Å,eÅ’Å]©ÅHÆPÆ*oÆPšÆ&ëÆ'Ç%:Ç`Ç(vÇ6ŸÇ@ÖÇ%È2=ÈpȃȠÈA¹È%ûÈ!É7É)NÉ!xÉšÉ&·É ÞÉ#éÉ+ Ê9Ê/JÊ zÊ†Ê —Ê¢Ê ºÊ,ÇÊ ôÊ,Ë.ËLË a˂ˠ˼ËÜËöË…Ì/”Ì+ÄÌðÌ# Í/Í HÍIiÍ;³Í4ïÍ2$Î WÎ4xÎ2­Î#àÎ Ï'%ÏMÏdÏϟ϶ÏÏÏ!íÏÐ. ÐOÐoЀР•СÐ'µÐ&ÝÐÑ4Ñ2IÑ9|Ñ4¶Ñ3ëÑ#Ò'CÒ#kÒ.Ò!¾Ò/àÒ1ÓBÓ-]Ó5‹ÓÁÓ)ÝÓÔ'ÔGÔ cÔ%„Ô3ªÔ,ÞÔQ Õ']Õ9…Õ9¿Õ*ùÕ6$Ö[ÖM oÖ½âÛâôâãuãý‘ãPäQàäi2å4œå/Ñå.æ0æJæ+bæŽæ0¨æLÙæM&çNtçÃçÝçìç%è*èGHèZèBëè.éRIéMœé:êé:%ê!`ê5‚ê9¸ê8òê;+ë%gë'ë'µëÝë8÷ë_0ìiì?úìJ:íA…íXÇí1 î_RîO²îqï=tï9²ïpìïS]ðo±ð>!ñ`ñ"~ñ[¡ñ,ýñ4*ò/_ò#ò9³ò:íòL(ó*uóA ó2âó7ô6Mô7„ô1¼ôWîô&FõQmõI¿õ$ ö..öQ]ö(¯ö(Øö0÷;2÷‰n÷(ø÷.!øHPø,™ø^ÆøA%ù4gù5œù;Òù4úCCú[‡ú[ãúW?û—û8±û€êû;küJ§üPòü7Cý0{ý2¬ý*ßý4 þ2?þ,rþŸþ1¹þ(ëþ\ÿ1qÿC£ÿçÿMüÿ<J‡¢9½ ÷% >'_'‡!¯(Ñ+ú(&9OQ‰!Û(ý& ;%\‚"š(½&æ& -48b›$µ#Ú'þ/&0VA‡IÉ"9 VwŸ³Óåõ, 66M„— ª·×.ë 6 #T 'x *  EË ) ; Y u >~ P½ T Gc 4« à 7ä D a v -‹ ¹ Ù á [ý GY E¡ ç 8&7_-—1Å&÷4+SUž%ô92MQ€2Ò-#3W&r ™TºEŸ1å)IA‹QœLî-;bi-Ì;ú;6r"Ž$±?ÖGE^G¤<ì0):Z$•!ºÜ7û3 O$\'•½Ò=ð#..R'6©:à%'ADi"®)ÑNû%J-p(ž ÇÕ"î&,8e…!¡Ã'Ôü,)LQv%È%î33+g“(®~×HVŸ(¸/á$ #6  Z !{ , <Ê !!0-!^!~!™!-¨!@Ö!""2:""m")"º" Ù"/ú"**#+U#!#$£#(È#ñ#" $/$I$Z$!l$ Ž$›$²$Ñ$à$ %%)%;%Q%b%|%•%$²% ×%ä%÷%&&&C&R&g&‚&—& ¬&Í&ä&ÿ&';'<S'' ®'Ï'?ä'$(&>(Ie(,¯(9Ü(-)D)4[))­)Ä):Û)%*$<*a*[}*,Ù*)+80+4i+ž+´+4Ï+),(.,#W,{,.’,JÁ,W -!d-&†-)­-I×-B!.$d.+‰.:µ.8ð.Z)/â„/'g0+0;»0D÷0&<1Qc1dµ129:2't2@œ2(Ý2@3 G3U3n3‰3-™34Ç3Dü30A4/r43¢4HÖ4'5'G52o5¢5-¹5Aç5C)6 m6{6#Ž6²6$Î6.ó6"777 H7>R7,‘75¾7Rô7&G8/n8Dž8:ã899;X9B”9)×9!:"#:3F:1z:@¬:Jí:;8;>t;³;Ì;Bé;G,<Pt<RÅ<R=$k==(«=Ô=)ó=>0>I>g>‚> ˜>¦>$»>à>%þ>!$?F?a?+€?¬?9Â?ü?"@"<@_@ {@(œ@ Å@æ@A2!A$TAyAŒA©A'ÈAðA!B)2B!\B#~B1¢BÔB(îB*C$BCgC!yCÔ›C:pD«D*½DèD'E*.E+YEG…E3ÍE5FP7F/ˆF0¸F;éF8%G?^G>žG4ÝG7H3JH8~H3·H/ëH-I5IIBI;ÂI4þIŠ3J(¾J1çJ*K/DK/tK,¤K7ÑK L L"L8LPL"_L‚L“L³LÅL.ÎLýLM* M8MNMdM sM}M€MG™M1áM$N'8N5`N*–N(ÁN)êNO03O*dO#O³OÂOÓO.îO S5'S/]SS¥S »S7ÆS+þS**T&UT |TˆT¢T ¦T°T¸T$×TüTÿT<U8XU0‘UÂU)ÜUKVVRV$©VZÎV)XH2X {X…XX X´XKÍX*Y'DY!lYŽY“Y­YÅYÊY äYîYZ ZZ1Z'NZLvZhÃZ=,[oj[#Ú[þ[\#\?\ R\s\1’\"Ä\"ç\# ]0.]_]~]G™]:á]=^!Z^4|^±^FÊ^+_*=_4h_/_)Í_;÷_-3` a`‚`¢`"Á`1ä`@aWa5qa-§a*Õa=b.>b!mb/b¿bÕbAîb.0c_cucŠc¢c·cÎcäcùcd&d<o$SoxoˆoŠo™o0³o/äo3p2Hp>{p-ºpDèp-q.4qcq†sq>úq*9rdr)xr¢r ¨r¶rHÉrs-s$?sds0is*šsLÅsftyt\tXít3Fu+zuk¦u3v+Fvýrvpw1ˆw'ºw)âw& x23x,fx“x,®xÛx5õx++yWy gy ty ‚y y2yÐyæyz#z24zgz†zšz±zÅz2Úz { {5{ G{S{1k{A{ß{+ô{% |BF|‰|’|<›|UØ| .}V;}P’}Oã}-3~Ja~,¬~IÙ~ð#b€Ew€½€×€æ€Eî€4Q1mMŸ8í#&‚J‚&b‚"‰‚!¬‚4΂ƒ$ƒCƒ:Lƒ<‡ƒ7ăüƒ„„ +„"9„#\„€„(’„/»„.ë„(…0C…,t…*¡….Ì…&û…"†7;†s†І'¨†)І'ú†0"‡S‡ Z‡d‡,~‡%«‡ч0ï‡ ˆ6'ˆ#^ˆ(‚ˆ;«ˆ;çˆI#‰m‰B‰)Љú‰ÿ‰ Š)Š0Š8Š <Š#JŠ;nмªŠfg‹V΋_%Œ)…Œ ¯ŒAÐŒK^Nc²Éß)û"%‘#H‘l‘„‘›‘³‘µ‘0Ô‘2’R8’‹““¦“½“Ö“ï“””5”M”f”~”V€”U×”I-•w•€• š•¨•Æ•ä•õ•–1–B–Z–s–Œ–¤–­–ÖÜ–ó– —&—F—`—#{—3Ÿ—Ó— ç—ñ—÷—˜$˜ 4˜B˜&Y˜€˜*ž˜ɘç˜ü˜ ™)™=™P™g™}™Ž™1¢™Ô™ê™þ™š 1š&>šeš!‚š¤š½š Òš;Üš›'2›Z› s›}›˜›¨›Á›Ú›ö› œœ3œOœjœœ"Šœ­œÅœÚœ$ðœ2!Km† ºÕ å ñ üž ž'ž?ž"TžwžŠž# žÄžØž$õžŸ*Ÿ®?Ÿî  ¡¡ )¡4¡I¡<b¡(Ÿ¡È¡Ë¡ä¡>ö¡5¢E¢b¢(t¢¢­¢È¢ߢö¢ ££ 7£E£$L£q£‹££–£ £ ¾£$È£í£E ¤7Q¤9‰¤9ä(ý¤#&¥CJ¥0Ž¥¿¥Ù¥Dô¥19¦&k¦,’¦*¿¦Eê¦00§ka§,ͧ ú§¨E:¨(€¨4©¨)Þ¨+©x4©1­©0ß©1ª2Bª:uª'°ª.ت2«3:«@n«4¯«.ä«9¬4M¬X‚¬4Û¬X­Bi­B¬­3ï­K#®Oo®C¿®5¯69¯<p¯2­¯9à¯1°5L°;‚°0¾°.ï°B±;a±4± Ò±ó±"²7&²E^²u¤²6³EQ³—³®³D³6´.>´8m´%¦´(Ì´Aõ´/7µCgµZ«µ¶*%¶mP¶=¾¶$ü¶O!·!q·!“·/µ·Då·2*¸:]¸4˜¸4͸'¹**¹+U¹¹,¡¹+ι2ú¹--º0[ºFŒºÓºîº( »-3»a» z»,›»0È»/ù»)¼C¼]¼1t¼>¦¼0å¼7½N½h½D½LĽ*¾<¾8\¾)•¾$¿¾ä¾Aö¾?8¿,x¿¥¿º¿׿-ô¿"À!@ÀbÀ2uÀ)¨À ÒÀ*óÀ!Á@ÁSÁrÁŽÁ­Á;¾Á1úÁ1,Â<^Â8›Â.ÔÂÃ9 Ã.ZÉÛÃD­ÃWòÃ=JÄ(ˆÄ#±ÄÕÄÛÄâÄçÄ6ìÄ;#ÅG_ÅB§Å)êÅ*Æ?ÆDÆJÆOÆ0VÇ@‡Ç ÈÇéÇúÇÈ3ÈVEÈ<œÈ-ÙÈ1É9É(VÉÉžÉ9¸É;òÉ^.ÊÊ©ÊÈÊäÊ Ë%Ë!4Ë$VË{Ë—˜Ë0ÌJÌOiÌ-¹Ì8çÌZ Í{Í"›ÍI¾Í@Î(IÎ"rÎ •ζÎ;ÑÎ7 Ï8EÏ@~Ï&¿Ï)æÏSÐdÐ{Ð ’Ð2³ÐæÐ<Ñ=>Ñ|Ñ›Ñ"³Ñ"ÖÑùÑÒ2Ò$KÒ&pÒ#—Ò1»Ò/íÒ<Ó5ZÓÓ°Ó ÅÓrÒÓoEÔoµÔ"%Õ&HÕoÕŽÕ"­ÕÐÕïÕÖ.Ö#NÖ'rÖ"šÖ½Ö=ÝÖC×_×)~ר×À×CØ×Ø+1Ø]ØSpØ7ÄØOüØLLÙ?™ÙÙÙèÙ!Ú%ÚDÚdÚ]„Ú/âÚ ÛÛ:ÛKÛbÛqÛ…Û”Û.²Û.áÛ;Ü:L܇Ü!§Ü!ÉÜëÜþÜÝA2Ý?tÝb´Ý8ÞPÞ.`Þ<ÞÌÞ#ëÞ*ß0:ßOkßO»ßz à†à7¤à6Üà2á/Fá3vá2ªáTÝá(2â*[â$†â «âÌâRìâ7?ãwã:ã+ÊãTöãXKä:¤äAßä1!åBSåC–åCÚå,æ1KæG}æ Åæ*ææ ç62ç7iç1¡ç!ÓçAõç@7è,xè%¥è,Ëèøè4é1Hé zé›éa·é5êROê#¢ê)Æê%ðê"ë9ë Yë3zë4®ëãë¨ì©í Èí%éíQîaî"~îá¡îƒï£ï?ÂïGðAJðHŒðÕðFõð-<ñ-jñ(˜ñÁñ"àñ ò&$òKòDiòM®òCüò:@ó;{ó·óÐóêóô ô==ôK{ô8Çô õ$!õ(Fõ7oõ§õ$Äõ9éõ#ö2?ö.rö0¡ö,Òö3ÿö-3÷Ca÷D¥÷ê÷/øH3øE|ø!Âø äøù/!ù.Qù/€ù5°ù<æùI#úDmú²úÊú'äú) ûL6û^ƒû âû4ü8ü&Xüü5™ü&Ïü/öü&ýH@ý)‰ý0³ý!äýþS#þXwþ*Ðþ%ûþ&!ÿ]Hÿ¦ÿÀÿßÿ/ôÿ$ @a_z"Ú0ý!.4P:…4À0õ4&+[0‡,¸5åG'c&‹"²)Õ$ÿT$6y°Î%ì)1:[.–0ÅTö&K$r<—&Ô)û(%!N$pþ•”3 H Rd 7· (ï  5 uÅ ‰; vÅ B< ) *© 'Ô  ü  < 5U K‹ L× !$`F¡§+IŒuWVZh±,1"^9&» â=0A!r,”Á5ß"[84”$É*î %4;p5‚:¸OóC]v©<Å,/F_+~ª"Èë< ;G!ƒ¥´"Îñ20Du!• ·Øö8DN6“/Ê,ú4'@\'Å&Öý..D5s©5Âø):!I]kWÉ[!}qÿq(‰m²T fuMÜ1* ?\ œ 1¼ &î 6!(L!*u!) !*Ê!õ! "*""$M"+r"!ž"À"Ù"4ï"$#B#U#i#,‚#¯# ¿#"Ì#*ï#+$;F$M‚$Ð$ä$$û$@ %'a%"‰%¬%&½% ä%=&9C&}&+š&Æ&1ä&('%?'&e'Œ'4¦' Û'é'(""(2E(+x( ¤(]Å( #)CD)8ˆ)#Á)Gå)5-*$c*Hˆ*Ñ*6ñ*=(+<f+I£+&í+#,E8,~,'œ,:Ä,/ÿ,-/-*]-3ˆ-&¼-ã-$."(.-K.y.,‚.#¯.!Ó.õ. /)/?/[/t//6¥/?Ü//0?L0Œ0ž0±0!Ë0í0 1%)11O1G1'É1 ñ12$2:2T24j2@Ÿ2 à2ê2ú23 -3#N38r3.«3Ú3Fõ3T<45‘41Ç4&ù4C 5d5&|5>£58â516&M6#t6%˜6¾6Ø6ë67!7?7U7q76‚7<¹7?ö7568l88„84½82ò8&%9GL9K”9Mà9).:X:$m:’:q¯:!;$7;\;/w; §;È;%Û;!<,#<(P<8y<A²<Aô<)6=`=€=4—=9Ì=>^%>P„>@Õ>?,%?9R?+Œ?*¸?ã? û?$@A@_@"}@C @8ä@A#-A#QA:uA°AkÍAR9BbŒB ïB+ûB'CGCbCxCŒC¢CA¹C#ûC*DJDfDxDŽD,–DIÃD> ESLE5 E$ÖEeûEYaF_»FKG`gG&ÈG(ïG'H@H@_H* H&ËHBòH—5IgÍIb5Jc˜J!üJ'KFK ZK{K ŠK!–K4¸K3íKG!LiL €L`¡LcMfM6€M"·MÚM#úM2N.QN)€N$ªNBÏN;OnNO½O"ÜOÿOPC-P(qPBšPÝP>õP 4QvUQ&ÌQ9óQ"-R/PR8€R<¹RöR'S3xIwx0Áx(òxGyCcyI§y&ñy?z5Xz Žz5¯z,åz*{9={[w{Ó{Mó{'A|\i|FÆ|' },5}'b}*Š}9µ}+ï}3~,O~'|~"¤~ Ç~$è~; .I.x+§,Ó8€29€Bl€M¯€4ý€92(l{•6‚#H‚Cl‚j°‚'ƒ<CƒB€ƒ9Ã5ýƒE3„{y„<õ„&2…#Y…9}…2·…&ê…+†+=†<i†=¦†@ä†7%‡,]‡:Ї"Ň$è‡1 ˆ,?ˆ,lˆ+™ˆ0ň;öˆ02‰#c‰0‡‰3¸‰;ì‰c(ŠiŒŠ-öŠ3$‹2X‹0‹‹=¼‹Fú‹=AŒ0Œ+°Œ4ÜŒ[Am2¯=â5 Ž:VŽ*‘Ž*¼ŽçŽ;8=Sv0Ê4û:0<k2¨0Û0 ‘D=‘%‚‘*¨‘"Ó‘?ö‘-6’3d’'˜’*À’#ë’*“*:“@e“>¦“%å“0 ”)<”;f”D¢”,ç”;•>P•.•"¾•3á•<–4R–3‡–,»–+è–/—.D—&s—*š—3Å—8ù—b2˜3•˜&ɘ.ð˜0™DP™7•™WÍ™=%š0cš-”š;š*þš,)›6V›2›2À›!ó›0œ/Fœ)vœ- œ/Μ(þœ0'4X76Å9üG6ž0~ž2¯ž-âž-Ÿ+>Ÿ:jŸ?¥ŸCåŸ') $Q Kv 2 3õ ")¡ L¡%m¡-“¡(Á¡;ê¡\&¢9ƒ¢-½¢'ë¢4£]H£,¦£4Ó£1¤0:¤6k¤N¢¤.ñ¤. ¥@O¥5¥9Æ¥:¦8;¦.t¦h£¦9 §'F§1n§/ §.Ч#ÿ§&#¨+J¨0v¨1§¨KÙ¨/%©?U©F•©*Ü©Iª@Qª5’ªTȪ8«4V«1‹«5½«3ó«='¬6e¬.œ¬,ˬ"ø¬)­,E­/r­/¢­#Ò­1ö­!(®#J®$n®L“®2à®j¯n~¯_í¯_M°c­°;±1M±?±'¿±Aç±A)²9k²8¥²FÞ²2%³EX³8ž³4׳= ´.J´0y´1ª´FÜ´G#µ3kµLŸµ1ìµ1¶2P¶7ƒ¶(»¶'ä¶3 ·@@·-·S¯·¸¸-¸C¸BZ¸V¸fô¸:[¹–¹J¶¹º+º2CºFvºN½º »,,»2Y» Œ»­»5Ë»j¼l¼A†¼?ȼ*½?3½?s½A³½Dõ½.:¾i¾'‡¾'¯¾'×¾ÿ¾=¿FW¿Fž¿[å¿^AÀ- À&ÎÀ)õÀÁY2ÁŒÁ9©ÁTãÁ%8Â/^Â"ŽÂ±ÂÉÂCåÂx)ÃE¢ÃèÃ!ûÃÄ=ÄTÄnÄ0ŠÄ»Ä ÍÄ7ÛÄ(Å/<Å$lÅ#‘Å+µÅ7áÅDÆ!^Æ3€Æ8´Æ íÆûÆKÇ.bǑǩǹÇ$ÊÇïÇ( È"4ÈWÈqÈD†ÈËÈ"ÝÈÉÉ$É&+É!RÉ(tÉ$ÉÂÉÔÉ çÉÈõÉ<¾Ê@ûÊP<Ë2Ë.ÀË<ïË8,ÌDeÌ ªÌKËÌHÍ=`Í)žÍ8ÈÍ.Î0Î#HÎ'lÎ ”εÎÍÎ=àÎ=Ï^\Ï#»Ï=ßÏÐ-3ÐaÐ!wЙзÐ!×ÐùÐÑ+Ñ*DÑoÑŠÑŸÑB½ÑÒ"Ò9ÒPÒ-`Ò ŽÒ*šÒÅÒ0ÔÒÓ"Ó7Ó$OÓ$tÓC™ÓCÝÓ!Ô=Ô"MÔ"pÔ“Ô­Ô–ÀÔ`WÕp¸Õd)Ö;ŽÖ=ÊÖ>×G×5c×™×°×)É×ó×0ØCØ]Ø'v؞خؿØ2ÎØ)Ù3+Ù"_Ù*‚Ù1­Ù"ßÙ!Ú$Ú(CÚ4lÚ2¡Ú$ÔÚ!ùÚÛ3ÛJJÛ:•Û7ÐÛ=Ü;FÜ4‚Ü<·ÜôÜ,Ý-@ÝLnÝC»Ý<ÿÝ9<Þ?vÞ=¶Þ6ôÞ>+ß5jß  ß&ÁßEèß3.àGbàªà*¿à-êà%á$>á5cá™á%¬áGÒá'â)BâlâW~â;ÖâDãWãsã‘ã'±ã3ÙãE äSä"\ääŸä0¼äíä@öäK7åCƒåNÇå=æBTæ)—æ"Áæ(äæ- ç(;ç/dç4”ç5Éç"ÿç!"è&Dè/kèL›è0èèNéhé€ééºé×éôé3 ê">ê"aê„ê›ê;¶ê;òê .ëH:ë.ƒë5²ë‚èë/kì&›ì)Âì,ìì-í(Gí+pí,œíÉíÛíîí# î<.î;kî%§î#Íîñîï8#ïF\ï £ï±ï!Âï'äï ð"ð7ðQðoð;‹ðÇðæðñ"#ñFñ$eñŠñ% ñ#Æñêñ òò,ò>ò(Vò$ò&¤ò&Ëò%òò#ó<óESóF™óàó%þó$ô)6ô`ôqô…ô ›ô©ô»ôÍôâôFüôACõl…õLòõL?ö9ŒökÆöf2÷i™÷GøAKøBøIÐøXùusùYéùBCú?†úOÆúUûJlû7·ûNïûW>üF–üKÝüx)ýr¢ýkþrþvôþpkÿnÜÿ{KrÇk:[¦PhSq¼x.x§ 5N)a ‹#¬%Ð&ö<1Y‹ ¨É8ß$-DR6—.Î}ý{Ž+¢^Î-Lj‡¢ Á>â!! 1C :u $° $Õ ú  . "L  o  ± -É B÷ ›: %Ö Iü ,F &s  š §» ¤c %.<K)ˆ²$ÐõI($r7—6Ï'&.U,i –!¤1Æ/ø(<8>u-´âù)5>$t™¯Â;ËbRj½BÓA2X‹;©"å,5(E%n”§+À%ìK-(y!¢,Ä'ñ?'Y&-¨ ÖI÷!A=c¡#¾ â"ì?(Ox)—Á4Þ'P;,Œ ¹#Ãç1ù +L%k ‘E²Iø)B$l;‘Í1ê@]2s*¦BÑ)0>o07¾+ö%"5Hœ~ =7 u %‰ #¯ BÓ !4!R!/n!ž!½!$Ù!/þ!7."1f" ˜".9#h#-†#´#Ì#3å##$=$Q$(p$'™$"Á$ä$%(%!B%$d%D‰%)Î%ø%(&CA&…&4™&EÎ&R'%g'+'+¹'å'*ý';((;d( (´(ŒÃ(P) l))8Ÿ) Ø) æ) ô) **%*?*!X*z*0‹*¼*,Ù*'+!.+P+.b+‘+(°+1Ù+K ,OW,H§,/ð, -7-!J-#l---¾-Ú-7í-F%.<l.N©.Xø.5Q/‡/›/'¹/9á/02+0^05p0"¦0É0ã01;1 X1c1u1 ‹1˜1¸1Ó1"ï1#2362-j2-˜2Æ2Ø2ï2*3 /3IP3,š3%Ç36í34$4-Y4 ‡4'•4#½4'á4 55a-5-5A½5$ÿ50$6U6o6 w6/…6'µ6)Ý657=7%O7u7$Œ7)±7%Û78#8,?8:l8<§8ä8)ó89,9 A9O9.b9‘9  9Á9DÜ9!:)<:!f:ˆ:%¥:9Ë:5;E;;7;@¹; ú;d< €<o¡<=-=HJ=.“=GÂ=@ >GK>K“>?ß>A?a?z?—?°?Â?Ô? å?ò?"@>*@i@A‡@É@ Û@Cå@")ALA*kA)–AÀAÔAëA BB.2BaB){B¥BµBÍBåBûB3CFCM`C ®C$ÏC-ôC>"D@aD¢DµD,ÄD1ñD#E @EaE#€E9¤E@ÞEF]2FF:¡F ÜF<êF'G;GWGBwGºGÚG.îGH2HAH VH%bHˆH3ŸH+ÓH>ÿH>I/XI7ˆI+ÀIìI:JZBJ2J3ÐJK$K@K_K+uKD¡KCæKW*L1‚LW´LV MjcMDÎMN$N>>N8}N,¶NãN&øN*O JOWOolO0ÜO# P1P!PPrPŒP£P9ÂP4üP 1Q>RQ?‘QMÑQ*R'JRrR†R›R¯R¸RGÁR S* S8KS„S SF¼ST?T YTcTuTŠT›T±TÁTÒT åTóTU U'>UfU-„U)²U ÜUéUVV 0V$>VcVxVŽV'£V2ËV8þV]7W^•W_ôWTX7kX£XÀX!ÞXzY{Y ’Y³YÏY äY.ïY)Z5HZ~Z,‘Z+¾Z/êZ>[=Y[E—[IÝ[D'\Hl\Dµ\0ú\:+],f]"“]¶]Ð]è]^^"4^W^Gr^Dº^*ÿ^+*_V_"k_#Ž_²_EÌ_Q`d``'œ`MÄ`&a+9aea&za¡a)´a$Þabb2b Db eb†b ¢b,°b0Ýbcl)cV–c(íc-dVDd/›d0Ëd-üd"*e#Me1qe8£e Üe0ýe.f?fYfKwf/Ãfófg3'g&[g‚g(œg Åg#Ðg-ôg!"hKDhh£h¹hÈh àh3íh !i3.ibii7•iÍiéij#jp;Yp9•p'Ïp0÷p'(q4Pq…q0¥q4Öq r)%r=Orr8¨rár ÿr s >s%_s-…s7³sQës"=t5`t5–t-ÌtSútNu7 cu›‚¸‚Ђã‚}ÿ‚}ƒV–„Qí„r?…4²…,ç…+†@†Z†=r†°†0ʆPû†ML‡Pš‡ë‡ ˆˆ,(ˆUˆFrˆX¹ˆ6‰I‰L]‰Kª‰=ö‰A4Š%vŠ9œŠ;ÖŠ0‹4C‹#x‹3œ‹%Ћö‹0ŒY?Œ[™Œ?õŒK5AYÃ:Ž_XŽK¸Ž{B€8Ã_üV\„³?8‘9x‘+²‘WÞ‘,6’5c’8™’&Ò’/ù’=)“Sg“2»“?î“-.”2\”1”/Á”8ñ”Z*•…•_¥•R–;X–8”–MÍ–+—+G—,s—> —‰ß—i˜2‰˜L¼˜* ™p4™=¥™.ã™5š2Hš4{šH°šOùšOI›O™›é›=œ{@œ0¼œ\íœ[J8¦-ß/ ž(=ž7fž4žž:ÓžŸ3(Ÿ&\ŸTƒŸ;ØŸR g Z| H×  ¡:¡:T¡¡&•¢¼¢'Ü¢%£#*£5N£2„£'·£<ߣc¤!€¤+¢¤Τ7ê¤/"¥R¥>q¥6°¥9ç¥-!¦3O¦>ƒ¦¦0à¦%§'7§,_§5Œ§@§K¨O¨e¨„¨ ¨À¨ߨø¨$©;©R©l©1Š©¼©:Ú©ª*+ª Vª%cª‰ª:§ªâª«%"«,H«/u«N¥«)ô«!¬@¬[¬=c¬L¡¬Pî¬A?­/­±­@µ­Zö­Q®e®/}®$­®Ò®Ú®`õ®WV¯N®¯ý¯°D8°6}°+´°/à°*±<;±*x±£±^Á±- ²#N²r²8†²X¿²5³/N³/~³®³)̳%ö³`´£}´N!µ/pµ) µOʵ¶J*¶Au¶9·¶mñ¶3_·@“·@Ô·¸0¸P¸Ao¸J±¸Xü¸GU¹J¹?è¹=(º$fº"‹º®º7κ» %»#3»W»'h»»"£»8Æ»$ÿ»)$¼,N¼/{¼3«¼-ß¼* ½I8½%‚½)¨½NÒ½$!¾*F¾(q¾ š¾¥¾,¾ï¾¿3¿ M¿"n¿&‘¿¸¿.É¿ø¿À ,À(MÀTvÀ'ËÀ)óÀÁ1<Á,nÁ›Á)µÁ€ßÁK`¬Â0ËÂ8üÂ#5Ã(YÃ!‚äÃ&ÂÃ4éà Ä'+ÄCSÄ/—ÄÇÄãÄ/øÄI(Å$rÅ7—Å(ÏÅ6øÅ/Æ&GÆDnÆ.³Æ2âÆ$Ç-:Ç0hÇ$™Ç"¾Ç#áÇÈÈ%3ÈYÈjÈ$ƒÈ¨È"»ÈÞÈöÈÉ*ÉEÉ[É{É"—É&ºÉáÉõÉ Ê)Ê#EÊiÊ|Ê!“ʵÊÒÊ#ëÊË%ËBË\ËyËB‘Ë&ÔË$ûË* ÌDKÌÌ)­ÌY×Ì/1ÍBaÍ-¤ÍÒÍ:êÍ%ÎDÎ^Î>xÎ#·Î!ÛÎýÎbÏ%€Ï2¦Ï/ÙÏ, Ð6ÐKÐ2dÐ6—Ð2ÎÐ0Ñ2Ñ.IÑCxÑU¼ÑÒ#0Ò&TÒF{Ò@ÂÒ!Ó/%Ó?UÓ?•ÓQÕÓì'Ô(Õ.=Õ=lÕEªÕ ðÕPÖgbÖ'ÊÖ;òÖ+.×<Z×*—×EÂ× Ø,ØBØaØ5rØ<¨ØOåØ75Ù7mÙ?¥ÙLåÙ/2Ú/bÚ2’ÚÅÚ,àÚB ÛCPÛ”Û£Û0¶ÛçÛ#Ü7'Ü_ÜpÜ ˆÜM“Ü9áÜBÝZ^Ý(¹Ý1âÝBÞ8WÞ8Þ7ÉÞ?ß!Aß"cß-†ßD´ß8ùßA2àWtàGÌàNácáƒá?¡áDááJ&âKqâL½â* ã!5ã0Wã%ˆã,®ãÛã÷ã'ä(=ä fä‡ä"žä#Áä åä$å+åKå"eå/ˆå¸å;Éåæ&"æ&Iæpæ$æ+´æ#àæçç-3çaç~ç#˜ç%¼ç!âçè"è/?è+oè*›è+Æèòè0 é1>é'pé˜é&«éÞÒéC±êõê/ë7ë+Uë.ë&°ëL×ë/$ì>TìS“ì/çì0íAHí>ŠíEÉíDî:Tî=î9Íî>ï9Fï5€ï(¶ï1ßï=ð7Oð5‡ð}½ð-;ñ2iñ.œñ/Ëñ/ûñ-+ò>Yò ˜ò ¦ò±òÆò Ýò+êòó,óLó eó0oó ó©ó*²óÝóôó ôô ô2ôEOô<•ô7Òô- õ;8õ*tõ.Ÿõ%Îõ"ôõ/ö/Gö*wö ¢ö°ö%Ìö+òö ú@)ú-jú˜ú¶ú Ìú;×ú1û0Eû/vû ¦û³ûÍûÕû Ýûéû"ü+ü.ü;Mü6‰ü*Àüëü+ýM/ýg}ý-åý`þ tÿE~ÿ ÄÿÒÿÙÿíÿS4n5£!Ùû%$(MQltˆ ™8£4Ü]€oSð‰D#Îòù"9Lk>…Äâ 3!#UyJ:Ú9O:n©[Ã86X@5Ð/<66s$ª$Ïô 7. Bf © AÄ 3 0: Bk 6® %å 4 @ \ H{ 6Ä û  2 P k ˆ ¤ ¿ Ü ø : 7M ™… XEx¾$Ø ý #  1G<7„;¼Cø><H{JÄ!-1_~›“9/i p|™¡:¿>ú*9d j!w3™ÍAë<-3j@ž3ß.1B+t6 ;×5-I4w/¬(Ü >K%c‰ž ±¿(Ô ý  19-k1™5Ë?3AGu½,ÄñŽ=’+Ðü, :BQHf¯Í&ÝC ,QO~oÎ>bW_º6*Qm|5ê* øK D!:`!(›!-Ä!/ò!8""+["‡":¦"á"Cþ"1B#t# „# ‘# Ÿ# ­#4º#ï#$ $0$1N$"€$£$¶$Í$â$2ó$&%9%R%f%u%.Ž%A½%ÿ%1&+G&Bs& ¶&Á&9Ê&T' Y']g'TÅ'U(+p(Kœ(+è(K) `)em*GÓ*&+B+R+L[+ ¨+*É+<ô+K1,D},%Â,è,$-&)-&P-4w-¬-À- Þ-Bë-@..:o.ª.°.Ã. Ø.2æ.-/G/(Y/5‚/4¸/(í/308J07ƒ04»06ð0'1E@1†1#11Á1)ó1(2:F22 ˆ2’27­2'å2 3;,3h3?p3"°3$Ó3:ø3834Gl4´4JÔ4'5G5$K5 p5 }5 ‡5•5 ™5(§5GÐ5Î6hç6mP7{¾71:8"l8K8bÛ8>9wC9»<Ò<é</=/5=)e="= ²=!Ó=õ= ÷=?>@X>˜p ¯ ó| … €ð ô7;‹ ©M¦ ¾ ê.a/Ö U þ ³T c iU f êÝ ®ï? ©« • &vÍzLºßÔUÚkë Í W@ $ v S6& ãI-‘ d . ¥ È é ?~‚…  Zn ”—N} ôÅ 0Ão Ì €¹ p[e dhp Iü ¥ŽE Åâg }gM 8 † Õ  æ æ ƒX?úçÿ Ç y®'   ò à| î‰!Å à?@ M` Ê5÷ ú ! ÿˆ€ à ÔŠðÜw ÒM4’Õ 5>Z Ä ·­ ¤ ® Å Æ $‹ / fˆ~ @•œGO ŽÙ% Z $ :£ ò«ÀfÞ{æ0 ï ŸYÀ £ ·} ”66  _ × ð$ú 'Mš’ç•©¤ J_2g ŸPx žþ a ƒ ( } ‰ O Ñ¢ÒÀ |%R Åâ ¡4 K P >uÔ ëO2 ã¼†Ê ] ^H èkVôbŒ q LÀeEá;v &  ) x«\Àº<ÿ~ûT—  üË 9 kP› y q` gí H ÖHä ‘s„) = Ý[G ‘² I²ì E ¥5±pMbÍ[ ˆ˜ € ^ jìäÎB 3(ó ¡ ±ÿ óÇ Ý bÄ b¼Éä…ŠJ¼ ’? À} )à˜0 ÷¿Øs€  ó - ªÞ³v¾ž ¢ ~Ü?} $ ½Ö ‰ $ ýÈÙjå¾Ú±n ¶ µ:ÇÄ‹)‹y Ó£ ‰_ ™ ýt î N æÉ ¡ Ÿ3 ŸŽ¿ Eu Žz pW õ µ ¹Ì Õ Ã˜ Ù3ãt<×.× !Ž  ½zB­¨ ¿— A0lN Va¸ wTò °“Â Ü k» -ÒÝÈ Ù ä 8 Ÿ 휶o © r  u 5  ® – (ô Å §‡ * {ëOm; ø S ¥ F ÌD  ÖÀá Õ¡)e âƒ= Ÿ÷dÁ­ô }ù{ö·, # ,å ®AÉÿy\ä Æ¿ `V  ž ÝçžÌ ¹Ë %6 » ÛÄÁ^œ é ÿ È3 Ëž1Ô ‹a üÓ Õ‹Ê A ÜrÓ ¿Y‰ 5–´‘ 8 °;0ºÝሠ ’ î#  ¡µ^ eׄ g N¿Ržò pY î S¾êp¿ ¬- ɨ  „ £Î5Y) Ó ÀJ á.@ÑÑ ú¬—ï ƒT¨ ÷ ¸þh)µ   ô K < Ã Ž Û~ à § Ï Õ® ¤ 7 <]û j # ±µýç–SŸ¦  µ à h° tB  °f-  ¾@½c{³' …ßOýÚÁËÇ­ ™ ñ°4!â P›Þư2 Ú ¹ ¢ I º« ¡ 6ö:}9 A Ï è W  c –ã –l yª† Æ ,¸{ÕSÿÛÔ ü Û*‡Å ‚ £:7e‡¿}V <) ¹; V_r» àÌï› [Ì ê I õ÷ R€Ía9D cÈ*ô ­¹5+ ¦ ë~îk ÏÄo‚Z1 É· { l J®q ky ~2 ¹oí„ ç 3´Çåü¸ >Òås 2 m+ $ ž  m² ± Âcu+ €L[Ð   ~ ^ ½´+ ÿ& Ty ð Gÿ <E _¿ UãâÒ × w  4 Ð, '¯ú R  Bº ·Ô…‘”^  µ ’yb£Á Š´ µ» ]0 } Ï ì Fê 7 õý Ðü rs,¤º e$t× äùo$ª± §7 Ä  K Á Oñ“ «’ ªW < Ö\ÇL }h –X‰ „° ‹G î ÔÊ( ] ·›þöI!b -ùE r œ zŒâÀm ã LS é Ê Î=܉·  jS ± XÞ çíȶ ÄÊ+* x༠Qq ÞÑì ÿ͉8D| Ò ¬Zkð ³]Ô 0j ”˜n… X Nl D €i .Y ^ k ’# cùJ ŒØ lŽ Œ«*z (² ¥î ý ™ ß ‰ &%xuÖ #vM k %¡#u ¨O OˆF Í Léo wG‚ õ jE¨ÄE_ 9 ÊÂXD åVû Û C 8o F Š ?d ¢a Ïš „ r; G¥ X ¸o½I” Z ¼ ‹ì ¶~{u ç àöš à ÐXÚ §h’ ½ UM" ¢íá, • Hÿ ÒÚ í ëê&k k$æ Ct L$] i' Ð ¤ É!ùBV Í 2ì KhÿËç (fW œ†À ¯Ôy ga w"0 ªpÆh`† §›‘ > Ÿ 0VY[“ ?Ž  j ¹ ¯X ´ 5 º Ï ¦ Ɖ§ ÅÊ )ìÊxÖ. Mj²R¡Üw z ÷ tF½ ©› \` v Õ9£ ž ´  ¯ÜrK® ²” Ù6ÛXÍÏ 6ʼn²P` œzzg«˜¦cv ê峊 – °1 K G   ˆ[d … b wâÝJà é^Ÿ™· |dä…E4 Ûñ>d×@™£ :  Ç „; £Œ ª »9 x),ªË `ER=o f ñ 1 ÙY'ÚQT¥Ó¤’ jª ‚ ™Ï :Dx¶( n ˆ mØ  ¯´– H ]a iÝ «ß£º M5 Žø RºÓò Ùš8 òÄ Ñ  Èl ¯Ýv ¨(³û°OWT ÂÍ‘´dßq 4%+ý v ç g% ñ DrÂây- ܱ/ ƒ n ˆjá | € Þ nÕåÒ š´ × ö •Q ÿù Þ " š–« Ñ0©ß ›Û Á&'i Aé 1Ÿ m ~@}‚ þ GW _F x i , 1 þL š ~ ø¢ Ü ó( 4 ±éd û : EImZ Ì\   â'=Ðþ hØ ç+ï Qß  Jl <²êwÖ% % »ð6¸'(Þz í Ö   À­Gؘ qE ü lù=Q ßå ³ ¦ô¬·îBæµæ h  2 ¥ N7 ^* ÷Î ´Z™$óÎTQL ÃØ³ Ó U ì ŠºAû —" Y L Á è VE ² ÞŠ+– ?T‹Ä¹]Öu ¼m %î ]÷¥bð s ÅÜ † ñ,„ÚA© lìK Ñf‡ OÛ Ý ^ s†(Û!"5 + C”Y R t ˬ  «  I ‚r6 ” © w ùQ ×ÝÎ ‡`üq ªþûÇ ïG „ §5È!õC⣠.ë )å 1 g. ] ke ' ¥ÏÇ ð \Ñ I'ª ¼šÌ2 * ÑaŽ­ ¶,€&5 ø C}/ R :ï-7± ñ\ö ƒ„­2“ × ¶ B 05ƒp Ý ‹;Þ š° œçaø n_ _¸ã›~ÈYJ Ë• {Pt©! R œ M /Å¡9U ¾ ¤ô*ã òF  iÉ † f–Óæ¼ ¨ ¾e3t?Î lÞ  « ž† ¦ŸM 3³  ÷tÏü Š_ ¯Œ‡‘ö $¥á =Z NG §¤S òm L]` vëŒ —< œ à x ˆý áÉ i ~ Ü ; 1*qå á  & ׌  (”¯o¯ ÿ •ªP |Áx ñØŠöØÑKw4ÛË  ? Zû“{± s¡ƒC %‘ý…g Ìž T  ÄøÙ ù V — ;ë Ž ò¤ ¨i _ \Çì *M¶ÒÌs ö J : x "9& îÁÅ á†Ï3 +>Æo=‘ › 3 n m{ Ó ®ù *íÉ T ¦› O ¤8aJ ªÒ-Õ+g#ø ¹‰,Ä q è /, Ú" Ì Þ ï P# i®½n © Š×Áé þ j ° ‹ ö9·„|èó —á‹Æ KU «kWØs”&® w ¬ Rã ÁUv‰M ² ™  S Éß7 ¸µV¹Î ”ðï ’ æ|ƒZ . U= e€ ± ! þÖãX ¬qt ] •  Ê›N N 2½ ÎXc*  Úò Žp º.S A(Hné1¸Å¼ hbH>ð÷ àvåA ¨· €QŒ£c W Ü ¥ QÆ % g}â /h%­!« ¾ %K# 6Foä õ»]¡Æ ƒ® Gó òÕ šUŒ:£ Ÿ Ã@•[ mé z – ) `ï¢Ö£F » ¡ $Ô +èW &ó f õ P ¼ B Ì‚ B— À ¼Q ¼¢ ìè g Éê ö<P _ pt¶' 1/Ii³ ¹ ´ˆ `ä÷ „4ºyµ 7 KCD j ;ë ÷wÒ aüÆe g= ó|7ð Ó on^vø æ§ û@ Sß Y ô…Á: Û^ G Ø Ä / |Ò¶ Ÿ< ‚#ÊÊ  ŠNèýÏN‚F-‡@ ° § yî¹  0 d< ! ø ¬š öýV Ú :xI  • @ˆ f \ Ë ö¿ëº ª ÇÜ"f× \ Š Ë ! C »ˆ ò Aø—\• .jéÚô Håm ”,ú €••B¬ ? ñ Î A ' ó  r‡ø¨ñZ’Ú üB‘ F© ­"mû ¥ .õç³Ðæq b [[ö Í X © y=  ó²õ &W¤ý ز S¡ b Ž i6{ øcÙjÌ“. s þ Q” ­ ͆¶ ³ÊÁ ò8ýlÉ( ä¬ B§-a"\t è ‚ 4 Ö < é_ð² ß ¶¦»G"ä¢ Nuu ÔH "¢e%4 = ` p±h اJ NžD1 H ƒ7† ½ Ó § CZ “Î c" µk õèfã ¶ $“¢ï ñr Th s â¬1«¾þ… ·ïB *â sh ½ Œ µèÜÆWø9 b 4v “ ÝY ºØ Ð í½" s 2È!¯ I_ “^‡#ÏÇ“ ¯ š ®5RœnV#´| ¸ s06 ¸ œ È ÐÆ: ¾õ OÙ ©ü(ÒÃH 2D mFë D ú ` ¦ ;é ìy¨Â0FÈ dRÑ­PC@æU‚Ðô ‰´;Ú¢c ½¤ Ùi c¥ð[ EUeU ¸O  ²¢¾ ÂþÕ ¦9  Ï  Ä);è> ñü †žl ™ ‡  » »Ù "ç Z.Ö¬ {D ‡9ã ™Ð&Þ÷ É›ˆ¾¦…çQ @ ù i Ðírc ‚Yá ëæ[ [ ù eô? ,˜»žú = +Ñ'à ó—XäÈb nËœ H “C fnì¿4©oò ß K+P– zÐ þ O APø í L ¸ „u¯ #q ³Á œ˜C A  »<\ 8*B ¾3  ßûqŒ ‘Fq7¦°/ Ã)îý—û>1 3 Û ÔÙ ¿ Ç › /L &u~CQ t àpûµ Û óì )   æ ï‡! š  ” #·K|ª ûdz- à•CÔ> ™zƒ l  ¿Ð Ñ 3è³ Ù§˜ u2Æ e ê > ê ] Ë Ü’ À ¡äu2 à Y  ùÍÒ —Õ N Å8 V  ¤­ awš ¬> Œú3´ ’6?ï , ˬ86 /Ÿ w{ Àá› `â ™ å dù— ½ \ °€ ‘ŽXÝ ¨ ú7É ßÓ r –Øíl^ ®R¶9è d  ƒ ‡ 8'W¨Aåz8Ê/“™9  -… ¨ ú…à >¹ p ˆ ’[‡ Í=7|˜ *Í õ ú˜/‹ ž ‚r ׯ f “ë¸ Ë31î¿ÔT ÷Çé- bñ J  üúÕWD¤ÎÈ :8D¦ãÌ Îí †Ãx#Óx îñ Ñ êI @ƒ J4 Q{ S­õ+ Š ™  >KP õT -± Œ˜ú ¢· „ ˜ÎJêS í ðá ‘ ¼ ãä Þ¾ ¼Hê ôÂÓLë HŠ  %s Cache : %s Cache (read-only): %s Cache cleaning disabled Cache cleaning enabled Cache link dir : %s Control dir : %s Session root dir : %s default LRMS : %s default queue : %s default ttl : %u Run 'arcclean -s Undefined' to remove cleaned jobs from job list Run 'arcclean -s Undefined' to remove killed jobs from job list Use arcclean to remove retrieved jobs from job list %s Is executable: true Name: %s Sources.DelegationID: %s Sources.Options: %s = %s Sources: %s Targets.DelegationID: %s Targets.Options: %s = %s Targets: %s certificate dn: %s expiration time: %s issuer dn: %s serial number: %d %s: %i Delivery service: %s Delivery service: LOCAL Delivery slots: %u Emergency slots: %u Post-processor slots: %u Pre-processor slots: %u Prepared slots: %u Shares configuration: %s Status of endpoint (%s) is %s This endpoint (%s) is STARTED or SUCCESSFUL unspecified: %i %s -> %s (%s) --- DRY RUN --- Access control: %s Annotation: %s Argument: %s Benchmark information: Computing Service Log Directory: %s Computing endpoint URL: %s Computing endpoint interface name: %s Computing endpoint requirements: Credential service: %s Delegation IDs: DelegationID element: %s End Time: %s Entry valid for: %s Entry valid from: %s Environment.name: %s Environment: %s Exit Code: %d Exit code for successful execution: %d Health state: %s ID on service: %s Inputfile element: Installed application environments: Job Error: %s Job does not require exclusive execution Job management URL: %s (%s) Job requires exclusive execution Job status URL: %s (%s) Mapping queue: %s Name: %s No exit code for successful execution specified. Node access: inbound Node access: inbound and outbound Node access: outbound Notify: Old activity ID: %s Old job IDs: Operating system requirements: Other Messages: %s Other attributes: [%s], %s Outputfile element: Owner: %s PostExecutable.Argument: %s PreExecutable.Argument: %s Processing start time: %s Proxy valid until: %s Queue: %s RemoteLogging (optional): %s (%s) RemoteLogging: %s (%s) Requested CPU Time: %s Requested Slots: %d Results must be retrieved before: %s Results were deleted: %s Run time environment requirements: Service information URL: %s (%s) Session directory URL: %s Specific state: %s Stagein directory URL: %s Stageout directory URL: %s State: %s Stderr: %s Stdin: %s Stdout: %s Submitted from: %s Submitted: %s Submitting client: %s Used CPU Time: %s Used CPU Time: %s (%s per slot) Used Memory: %d Used Wall Time: %s Used Wall Time: %s (%s per slot) Waiting Position: %d [ JobDescription tester ] [ Parsing the original text ] [ emies:adl ] [ nordugrid:xrsl ] $X509_VOMS_FILE, and $X509_VOMSES are not set; User has not specified the location for vomses information; There is also not vomses location information in user's configuration file; Can not find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomses, and the location at the corresponding sub-directory%5u s: %10.1f kB %8.1f kB/s%d Batch Systems%d Endpoints%d Shares%d mapping policies%d of %d jobs were submitted%i retries left, will wait until %s before next attempt%li seconds since lock file %s was created%s%s %s could not be created.%s > %s => false%s > %s => false: %s contains non numbers in the version part.%s > %s => true%s class is not an object%s directory created%s directory exist! Skipping job.%s failed%s is an unsupported digest type%s is not a directory%s is not an object%s made persistent%s parsing error%s plugin "%s" not found.%s version %s%s->%s%s. Cannot copy fileset%s. SQLite database error: %s%s:%s: %i%s: %s%s: %s: New job belongs to %i/%i%s: %s: Adding new output file %s: %s%s: All %s %s successfully%s: Bring online request %s in SRM queue. Sleeping for %i seconds%s: Cache cleaning takes too long - %u.%06u seconds%s: Can't convert checksum %s to int for %s%s: Can't convert filesize %s to int for %s%s: Can't read list of input files%s: Can't rerun on request%s: Can't rerun on request - not a suitable state%s: Canceling job because of user request%s: Cancelling active DTRs%s: Cancelling other DTRs%s: Cannot upload two different files %s and %s to same LFN: %s%s: Checking user uploadable file: %s%s: Checksum %llu verified for %s%s: Critical error for uploadable file %s%s: DTR %s to copy file %s failed%s: DTR %s to copy to %s failed but is not mandatory%s: Delete request due to internal problems%s: Destination file %s was possibly left unfinished from previous A-REX run, will overwrite%s: Duplicate file in list of input files: %s%s: Error accessing file %s%s: Error reading file %s%s: Error reading user generated output file list in %s%s: Failed creating grami file%s: Failed obtaining local job information.%s: Failed obtaining lrms id%s: Failed parsing job request.%s: Failed reading .local and changing state, job and A-REX may be left in an inconsistent state%s: Failed reading job description: %s%s: Failed reading local information%s: Failed running cancellation process%s: Failed running submission process%s: Failed setting executable permissions%s: Failed storing failure reason: %s%s: Failed to cancel running job%s: Failed to clean up session dir%s: Failed to list output directory %s: %s%s: Failed to load evaluator for user policy %s: Failed to open file %s for reading%s: Failed to parse user policy%s: Failed to read dynamic output files in %s%s: Failed to read list of input files%s: Failed to read list of input files, can't clean up session dir%s: Failed to read list of output files%s: Failed to read list of output files, can't clean up session dir%s: Failed to read reprocessed list of input files%s: Failed to read reprocessed list of output files%s: Failed to receive job in DTR generator%s: Failed to switch user ID to %d/%d to read file %s%s: Failed to turn job into failed during cancel processing.%s: Failed to write back dynamic output files in %s%s: Failed to write list of input files%s: Failed to write list of output files%s: Failed to write list of output status files%s: Failed writing changed input file.%s: Failed writing list of output files: %s%s: Failed writing local information%s: Failed writing local information: %s%s: Failure creating data storage for child process%s: Failure creating slot for child process%s: Failure starting child process%s: Failure waiting for child process to finish%s: File %s has wrong checksum: %llu. Expected %lli%s: File request %s in SRM queue. Sleeping for %i seconds%s: Going through files in list %s%s: Invalid DTR%s: Invalid file: %s is too big.%s: Invalid size/checksum information (%s) for %s%s: Job cancel request from DTR generator to scheduler%s: Job cancellation takes too long, but diagnostic collection seems to be done. Pretending cancellation succeeded.%s: Job cancellation takes too long. Failing.%s: Job failed in unknown state. Won't rerun.%s: Job failure detected%s: Job finished%s: Job has completed already. No action taken to cancel%s: Job is ancient - delete rest of information%s: Job is not allowed to be rerun anymore%s: Job is requested to clean - deleting%s: Job is too old - deleting%s: Job monitoring counter is broken%s: Job monitoring is lost due to removal from queue%s: Job monitoring is unintentionally lost%s: Job monitoring stop requested with %u active references%s: Job monitoring stop requested with %u active references and %s queue associated%s: Job monitoring stop success%s: Job submission to LRMS failed%s: Job submission to LRMS takes too long, but ID is already obtained. Pretending submission is done.%s: Job submission to LRMS takes too long. Failing.%s: Job's helper exited%s: LRMS scripts limit of %u is reached - suspending submit/cancel%s: Plugin at state %s : %s%s: Plugin execution failed%s: Processing job description failed%s: PushSorted failed to find job where expected%s: Re-requesting attention from DTR generator%s: Reading output files from user generated list in %s%s: Reading status of new job failed%s: Received DTR %s to copy file %s in state %s%s: Received DTR belongs to inactive job%s: Received DTR with two remote endpoints!%s: Received data staging request to %s files%s: Received job in DTR generator%s: Received job in a bad state: %s%s: Removing %s from dynamic output file %s%s: Reprocessing job description failed%s: Requesting attention from DTR generator%s: Returning canceled job from DTR generator%s: Session directory processing takes too long - %u.%06u seconds%s: Some %s failed%s: State: %s from %s%s: State: %s: data staging finished%s: State: %s: still in data staging%s: State: ACCEPTED%s: State: ACCEPTED: dryrun%s: State: ACCEPTED: has process time %s%s: State: ACCEPTED: moving to PREPARING%s: State: ACCEPTED: parsing job description%s: State: CANCELING%s: State: FINISHING%s: State: INLRMS%s: State: INLRMS - checking for not pending%s: State: INLRMS - checking for pending(%u) and mark%s: State: INLRMS - no mark found%s: State: INLRMS: exit message is %i %s%s: State: PREPARING%s: State: SUBMIT%s: Trying remove job from data staging which does not exist%s: Trying to remove job from data staging which is still active%s: Two identical output destinations: %s%s: Unknown user policy '%s'%s: Uploadable files timed out%s: User has NOT uploaded file %s%s: User has uploaded file %s%s: checksum %s%s: delete file %s: failed to obtain file path: %s%s: delete file %s: failed to open file/dir: %s%s: job assigned for slow polling%s: job being processed%s: job for attention%s: job found while scanning%s: job will wait for external process%s: new job is accepted%s: old job is accepted%s: put file %s: %s%s: put file %s: failed to create file: %s%s: put file %s: there is no payload%s: put file %s: unrecognized payload%s: put log %s: there is no payload%s: put log %s: unrecognized payload%s: replica type %s%s: restarted FINISHING job%s: restarted INLRMS job%s: restarted PREPARING job%s: size %llu%s: state CANCELING: child exited with code %i%s: state CANCELING: job diagnostics collected%s: state CANCELING: starting child: %s%s: state CANCELING: timeout waiting for cancellation%s: state SUBMIT: child exited with code %i%s: state SUBMIT: starting child: %s%s: there is no such job: %s%s: unexpected failed job add request: %s%s: unexpected job add request: %s'(' expected')' expected'action' attribute not allowed in user-side job description'control' configuration option is no longer supported, please use 'controldir' instead'stdout' attribute must be specified when 'join' attribute is specified(Re)Trying next destination(Re)Trying next source(empty)(null): %d: %s: Accounting records reporter tool is not specified: Failure creating accounting database connection: Failure creating slot for accounting reporter child process: Failure starting accounting reporter child process: Metrics tool returned error code %i: %s: writing accounting record took %llu ms< %s<< %s> %sA computing resource using the GridFTP interface was requested, but %sthe corresponding plugin could not be loaded. Is the plugin installed? %sIf not, please install the package 'nordugrid-arc-plugins-globus'. %sDepending on your type of installation the package name might differ.A-REX REST: Failed to resume jobA-REX REST: State change not allowed: from %s to %sAC extension information for VO AC is invalid: ARC Auth. request: %sARC delegation policy: %sAccess list location: %sAccounting database cannot be created. Faile to create parent directory %s.Accounting database cannot be created: %s is not a directoryAccounting database connection has been establishedAccounting database file (%s) is not a regular fileAccounting database initialized successfullyAcquired auth token for %s: %sAdd location: metadata: %sAdd location: url: %sAdding endpoint (%s) to ServiceEndpointRetrieverAdding endpoint (%s) to TargetInformationRetrieverAdding endpoint (%s) to both ServiceEndpointRetriever and TargetInformationRetrieverAdding location: %s - %sAdding request token %sAdding space token %sAdding to bulk requestAddress: %sAll %u process slots usedAll DTRs finished for job %sAll results obtained are invalidAll software requirements satisfied.Allow specified entity to retrieve credential without passphrase. This option is specific to the PUT command when contacting a Myproxy server.Already reading from sourceAlready writing to destinationAn error occurred during the generation of job description to be sent to %sAnother process (%s) owns the lock on file %sArc policy can not been carried by SAML2.0 profile of XACMLArcAuthZ: failed to initiate all PDPs - this instance will be non-functionalArchiving DTR %s, state %sArchiving DTR %s, state ERRORAre you sure you want to clean jobs missing information?Are you sure you want to synchronize your local job list?Assembling BLAH parser log entry: %sAssigned to authorization group %sAssigned to userlist %sAssuming - file not foundAssuming transfer is already aborted or failed.At least two values are needed for the 'inputfiles' attributeAt least two values are needed for the 'outputfiles' attributeAttempt to assign relative path to URL - making it absoluteAttempting to contact %s on port %iAttribute '%s' multiply definedAttribute 'join' cannot be specified when both 'stdout' and 'stderr' attributes is specifiedAttribute Value (1): %sAttribute Value (2): %sAttribute Value inside Subject: %sAttribute name (%s) contains invalid character (%s)Attribute name expectedAttributes 'gridtime' and 'cputime' cannot be specified togetherAttributes 'gridtime' and 'walltime' cannot be specified togetherAuthentication Request URL: %sAuthorized by arc.pdpAuthorized from remote pdp serviceAuthorized from simplelist.pdp: %sAuthorized from xacml.pdpBN_new || RSA_new failedBN_set_word failedBad URL in deliveryservice: %sBad authentication information: %sBad checksum format %sBad credential value %s in cache access rulesBad format detected in file %s, in line %sBad format in XML response from delivery service at %s: %sBad format in XML response from service at %s: %sBad format in XML response: %sBad label: "%s"Bad logicBad logic for %s - bringOnline returned ok but SRM request is not finished successfully or on goingBad logic for %s - getTURLs returned ok but SRM request is not finished successfully or on goingBad logic for %s - putTURLs returned ok but SRM request is not finished successfully or on goingBad name for executable: %sBad name for runtime environment: %sBad name for stderr: %sBad name for stdout: %sBad number in definedshare %sBad number in maxdeliveryBad number in maxemergencyBad number in maxpreparedBad number in maxprocessorBad number in maxtransfertriesBad number in priority element: %sBad number in remotesizelimitBad number in speedcontrolBad or old format detected in file %s, in line %sBad path for %s: Format should be /replicas//Bad value for loglevelBadly formatted pid %s in lock file %sBatch System Information:Batch system information:Bearer token is available. It is preferred for job submission.Behaviour tuningBlock %s not found in configuration file %sBlockName is emptyBoosting priority from %i to %i due to incoming higher priority DTRBoth URLs must have the same protocol, host and portBoth of CACertificatePath and CACertificatesDir elements missing or emptyBring online request %s finished successfully, file is now ONLINEBring online request %s is still in queue, should waitBroken stringBroker %s loadedBroker plugin "%s" not found.Brokering and filteringBrokers available to %s:Buffer creation failed !Busy plugins found while unloading Module Manager. Waiting for them to be released.CA certificate and CA private key do not matchCA name: %sCA-certificates installed:CONTENT %u: %sCPU clock speed: %iCPU model: %sCPU vendor: %sCPU version: %sCache %s: Free space %f GBCache access allowed to %s by DN %sCache access allowed to %s by VO %sCache access allowed to %s by VO %s and group %sCache access allowed to %s by VO %s and role %sCache area free size: %i GBCache area total size: %i GBCache cleaning script failedCache creation date: %sCache file %s does not existCache file %s not foundCache file %s was deleted during link/copy, must start againCache file %s was locked during link/copy, must start againCache file %s was modified in the last second, sleeping 1 second to avoid race conditionCache file %s was modified while linking, must start againCache file is %sCache meta file %s is empty, will recreateCache meta file %s possibly corrupted, will recreateCache not found for file %sCached copy is still validCached file is locked - should retryCached file is outdated, will re-downloadCalculated checksum %s matches checksum reported by serverCalculated transfer checksum %s matches source checksumCalculated/supplied transfer checksum %s matches checksum reported by SRM destination %sCallback got failureCalling PrepareReading when request was already prepared!Calling PrepareWriting when request was already prepared!Calling http://localhost:60000/Echo using ClientSOAPCalling http://localhost:60000/Echo using httplibCalling https://localhost:60000/Echo using ClientSOAPCalling plugin %s to query endpoint on %sCan not access CA certificate directory: %s. The certificates will not be verified.Can not access VOMS file/directory: %s.Can not access VOMSES file/directory: %s.Can not access certificate file: %sCan not access key file: %sCan not access proxy file: %sCan not add X509 extended KeyUsage extension to new proxy certificateCan not add X509 extension to proxy certCan not allocate memoryCan not allocate memory for extension for proxy certificateCan not compute digest of public keyCan not convert DER encoded PROXY_CERT_INFO_EXTENSION extension to internal formatCan not convert PROXY_CERT_INFO_EXTENSION struct from internal to DER encoded formatCan not convert keyUsage struct from DER encoded formatCan not convert keyUsage struct from internal to DER formatCan not convert private key to DER formatCan not convert signed EEC cert into DER formatCan not convert signed proxy cert into DER formatCan not convert signed proxy cert into PEM formatCan not convert string into ASN1_OBJECTCan not copy extended KeyUsage extensionCan not copy the subject name from issuer for proxy certificateCan not create ASN1_OCTET_STRINGCan not create BIO for parsing requestCan not create BIO for requestCan not create BIO for signed EEC certificateCan not create BIO for signed proxy certificateCan not create PROXY_CERT_INFO_EXTENSION extensionCan not create PolicyStore objectCan not create a new X509_NAME_ENTRY for the proxy certificate requestCan not create delegation crendential to delegation service: %sCan not create extension for PROXY_CERT_INFOCan not create extension for keyUsageCan not create extension for proxy certificateCan not create function %sCan not create function: FunctionId does not existCan not create name entry CN for proxy certificateCan not create the SSL Context objectCan not create the SSL objectCan not determine the install location. Using %s. Please set ARC_LOCATION if this is not correct.Can not duplicate serial number for proxy certificateCan not duplicate the subject name for the self-signing proxy certificate requestCan not dynamically produce AlgFacrotyCan not dynamically produce AttributeFactoryCan not dynamically produce EvaluatorCan not dynamically produce FnFactoryCan not dynamically produce PolicyCan not dynamically produce RequestCan not find element with proper namespaceCan not find element with proper namespaceCan not find ArcPDPContextCan not find CA certificates directory in default locations: ~/.arc/certificates, ~/.globus/certificates, %s/etc/certificates, %s/etc/grid-security/certificates, %s/share/certificates, /etc/grid-security/certificates. The certificate will not be verified. If the CA certificates directory does exist, please manually specify the locations via env X509_CERT_DIR, or the cacertificatesdirectory item in client.conf Can not find XACMLPDPContextCan not find certificate file: %sCan not find certificate with name %sCan not find issuer certificate for the certificate with subject %s and hash: %luCan not find key file: %sCan not find key with name: %sCan not find voms service configuration file (vomses) in default locations: ~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomsesCan not generate X509 requestCan not generate policy objectCan not get SAMLAssertion SecAttr from message contextCan not get extended KeyUsage extension from issuer certificateCan not get policy from PROXY_CERT_INFO_EXTENSION extensionCan not get policy language from PROXY_CERT_INFO_EXTENSION extensionCan not get the certificate typeCan not get the delegation credential: %s from delegation service: %sCan not get the issuer's private keyCan not load ARC evaluator object: %sCan not load ARC request object: %sCan not load policy objectCan not load policy object: %sCan not load request objectCan not open job description file: %sCan not open key file %sCan not parse classname for AttributeFactory from configurationCan not parse classname for CombiningAlgorithmFactory from configurationCan not parse classname for FunctionFactory from configurationCan not parse classname for Policy from configurationCan not parse classname for Request from configurationCan not parse date: %sCan not parse month: %sCan not parse time zone offset: %sCan not parse time: %sCan not read PEM private keyCan not read PEM private key: failed to decryptCan not read PEM private key: failed to obtain passwordCan not read PEM private key: probably bad passwordCan not read certificate file: %sCan not read certificate stringCan not read certificate/key stringCan not read information from the local job status fileCan not read key stringCan not set CN in proxy certificateCan not set issuer's subject for proxy certificateCan not set private keyCan not set pubkey for proxy certificateCan not set readable file for request BIOCan not set serial number for proxy certificateCan not set the lifetime for proxy certificateCan not set version number for proxy certificateCan not set writable file for request BIOCan not set writable file for signed EEC certificate BIOCan not set writable file for signed proxy certificate BIOCan not sign a EECCan't allocate memory for CA policy pathCan't convert DER encoded PROXYCERTINFO extension to internal formatCan't convert X509 request from internal to DER encoded formatCan't create delegation contextCan't delete directory %s: %sCan't delete file %s: %sCan't extract object's name from source urlCan't find LCAS functions in a library %sCan't find LCMAPS functions in a library %sCan't get policy from PROXYCERTINFO extensionCan't get policy language from PROXYCERTINFO extensionCan't get the first byte of input BIO to get its formatCan't get the first byte of input to determine its formatCan't handle URL %sCan't handle location %sCan't load LCAS library %s: %sCan't load LCMAPS library %s: %sCan't obtain configuration. Only public information is provided.Can't obtain configuration. Public information is disallowed for this user.Can't open configuration fileCan't parse host and/or port in response to EPSV/PASVCan't read configuration fileCan't read configuration file at %sCan't read from sourceCan't read list of destinations from file %sCan't read list of locations from file %sCan't read list of sources from file %sCan't read policy namesCan't read transfer states from %s. Perhaps A-REX is not running?Can't recognize type of configuration fileCan't recognize type of configuration file at %sCan't rename file %s: %sCan't reset the inputCan't retrieve job files for job (%s) - unable to determine URL of log directoryCan't retrieve job files for job (%s) - unable to determine URL of stage out directoryCan't set OpenSSL verify flagsCan't stat file: %s: %sCan't stat stdio channel %sCan't submit multiple instances for multiple job descriptions. Not implemented yet.Can't use URL %sCan't write to destinationCancellation completeCancelling DTR %s with source: %s, destination: %sCancelling active transferCancelling synchronization requestCandyPond: UnauthorizedCannot adapt job description to the submission target when information discovery is turned offCannot change owner of %s: %s Cannot change permission of %s: %s Cannot compare empty checksumCannot convert ARC module name to Python stringCannot convert ExecutionTarget (%s) to python objectCannot convert JobDescription to python objectCannot convert UserConfig to Python objectCannot convert config to Python objectCannot convert inmsg to Python objectCannot convert module name to Python stringCannot convert outmsg to Python objectCannot convert string %s to int in line %sCannot copy example configuration (%s), it is not a regular fileCannot create ExecutionTarget argumentCannot create JobDescription argumentCannot create UserConfig argumentCannot create argument of the constructorCannot create config argumentCannot create directories for log file %s. Messages will be logged to this logCannot create directory %s for per-job hard linksCannot create http payloadCannot create inmsg argumentCannot create instance of Python classCannot create outmsg argumentCannot create output of %s for any jobsCannot create output of %s for job (%s): Invalid source %sCannot create resolver from /etc/resolv.confCannot determine hostname from gethostname()Cannot determine hostname from gethostname() to generate ceID automatically.Cannot determine replica type for %sCannot determine the %s location: %sCannot find under response soap message:Cannot find ARC Config classCannot find ARC ExecutionTarget classCannot find ARC JobDescription classCannot find ARC Message classCannot find ARC UserConfig classCannot find any proxy. This application currently cannot run without a proxy. If you have the proxy file in a non-default location, please make sure the path is specified in the client configuration file. If you don't have a proxy yet, please run 'arcproxy'!Cannot find any token. Please run 'oidc-token' or use similar utility to obtain authentication token!Cannot find content under response soap messageCannot find custom broker classCannot find file at %s for getting the proxy. Please make sure this file exists.Cannot find information abouto job submission endpointCannot find local input file '%s' (%s)Cannot find service classCannot find the CA certificates directory path, please set environment variable X509_CERT_DIR, or cacertificatesdirectory in a configuration file.Cannot find the path of the proxy file, please setup environment X509_USER_PROXY, or proxypath in a configuration fileCannot find the user certificate path, please setup environment X509_USER_CERT, or certificatepath in a configuration fileCannot find the user private key path, please setup environment X509_USER_KEY, or keypath in a configuration fileCannot get VOMS server address information from vomses line: "%s"Cannot get dictionary of ARC moduleCannot get dictionary of custom broker moduleCannot get dictionary of moduleCannot handle local user %sCannot import ARC moduleCannot import moduleCannot initialize ARCHERY domain name for queryCannot link to a remote destination. Will not use mapped URLCannot link to source which can be modified, will copy insteadCannot open BLAH log file '%s'Cannot open cache log file %s: %s. Cache cleaning messages will be logged to this logCannot output XRSL representation: The Resources.SlotRequirement.NumberOfSlots attribute must be specified when the Resources.SlotRequirement.SlotsPerHost attribute is specified.Cannot parse integer value '%s' for -%cCannot parse password source %s it must be of source_type or source_type:data format. Supported source types are int, stdin, stream, file.Cannot parse password source expression %s it must be of type=source formatCannot parse password source type %s. Supported source types are int, stdin, stream, file.Cannot parse password type %s. Currently supported values are 'key','myproxy','myproxynew' and 'all'.Cannot parse schema!Cannot parse service endpoint TXT records.Cannot process proxy file at %s.Cannot query service endpoint TXT records from DNSCannot read specified jobid file: %sCannot remove proxy file at %sCannot remove proxy file at %s, because it's not thereCannot rename to or from root directoryCannot rename to the same URLCannot stat local input file '%s'Cannot switch to group (%s)Cannot switch to primary group for user (%s)Cannot switch to user (%s)Cannot to update AAR. Cannot find registered AAR for job %s in accounting database.Cannot use supplied --size optionCannot write job IDs to file (%s)Cannot write jobid (%s) to file (%s)Capabilities:Catting %s for job %sCause of failure unclear - choosing randomlyCert Type: %dCertificate %s already expiredCertificate %s will expire in %sCertificate and key ('%s' and '%s') not found in any of the paths: %sCertificate does not have a slotCertificate format is DERCertificate format is PEMCertificate format is PKCSCertificate format is unknownCertificate has unknown extension with numeric ID %u and SN %sCertificate information collection failedCertificate information:Certificate issuer: %sCertificate request is invalidCertificate to use is: %sCertificate verification error: %sCertificate verification failedCertificate verification succeededCertificate with serial number %s and subject "%s" is revokedCertificate with subject %s has expiredCertificate/Proxy path is emptyCertificate: %sCertiticate chain number %dChain(s) configuration failedCheck: looking for metadata: %sCheck: obtained access latency: high (NEARLINE)Check: obtained access latency: low (ONLINE)Check: obtained checksum: %sCheck: obtained modification date: %sCheck: obtained modification time %sCheck: obtained size %lluCheck: obtained size: %lliCheckOperationAllowed: allowed due to matching scopesCheckOperationAllowed: allowed due to missing configuration scopesCheckOperationAllowed: allowed for TLS connectionCheckOperationAllowed: configuration scopes: %sCheckOperationAllowed: missing configurationCheckOperationAllowed: no supported identity foundCheckOperationAllowed: token scopes do not match required scopesCheckOperationAllowed: token scopes: %sChecking %sChecking URL returned by SRM: %sChecking cache againChecking cache permissions: DN: %sChecking cache permissions: VO: %sChecking cache permissions: VOMS attr: %sChecking for existence of %sChecking for suspended endpoints which should be started.Checking replica %sChecking source file is presentChecksum %sChecksum mismatchChecksum mismatch between calcuated checksum %s and source checksum %sChecksum mismatch between calculated checksum %s and checksum reported by server %sChecksum mismatch between calculated checksum %s and source checksum %sChecksum mismatch between calculated/supplied checksum (%s) and checksum reported by SRM destination (%s)Checksum mismatch between checksum given as meta option (%s:%s) and calculated checksum (%s)Checksum not computedChecksum of %s is not availableChecksum type of SRM (%s) and calculated/supplied checksum (%s) differ, cannot compareChecksum type of source and calculated checksum differ, cannot compareChecksum type returned by server is different to requested type, cannot compareChecksum types of index and replica are different, skipping comparisonChild monitoring child %d exitedChild monitoring drops abandoned child %d (%d)Child monitoring error: %iChild monitoring internal communication errorChild monitoring kick detectedChild monitoring lost child %d (%d)Child monitoring signal detectedChild monitoring stderr is closedChild monitoring stdin is closedChild monitoring stdout is closedChild was already startedClass name: %sCleaning up after failure: deleting %sClient chain does not have entry pointClient connection has no entry pointClient side MCCs are loadedClosed successfullyClosing connectionClosing connection to SQLite accounting databaseClosing may have failedClosing read channelClosing write channelCollected error is: %sCollecting Job (A-REX REST jobs) information.Command is being sentCommand: %sComponent %s(%s) could not be createdComponent has no ID attribute definedComponent has no name attribute definedComponent's %s(%s) next has no ID attribute definedComputing endpoint %s (type %s) added to the list for submission brokeringComputing service:Computing service: %sComputingShare (%s) explicitly rejectedComputingShareName of ExecutionTarget (%s) is not definedConfig class is not an objectConfiguration (%s) loadedConfiguration errorConfiguration example file created (%s)Configuration file can not be readConfiguration file is broken - block name does not end with ]: %sConfiguration file is broken - block name is too short: %sConfiguration file not specifiedConfiguration file not specified in ConfigBlockConfiguration file to loadConfiguration root element is not Conflicting authentication types specified.Conflicting delegation types specified.Connecting to Delivery service at %sConnection from %s: %sContacting VOMS server (named %s): %s on port: %sContent: %sConversion failed: %sCopy failed: %sCould not acquire lock on meta file %sCould not connect to service %s: %sCould not convert incoming payload!Could not convert payload!Could not convert the slcs attribute value (%s) to an URL instance in configuration file (%s)Could not create PayloadSOAP!Could not create link to lock file %s as it already existsCould not create lock file %s as it already existsCould not create temporary file "%s"Could not determine configuration type or configuration is emptyCould not determine session directory from filename %sCould not determine version of serverCould not find any useable delivery service, forcing local transferCould not find checksum: %sCould not find loadable module by name %s (%s)Could not find loadable module by names %s and %s (%s)Could not find loadable module descriptor by name %sCould not find loadable module descriptor by name %s or kind %sCould not find matching RSE to %sCould not get checksum of %s: %sCould not handle checksum %s: skip checksum checkCould not handle endpoint %sCould not load configuration (%s)Could not locate module %s in following paths:Could not make new transfer request: %s: %sCould not obtain information about source: %sCould not open file %s for reading: %sCould not read data staging configuration from %sCould not stat file %s: %sCould not validate message!Couldn't handle certificate: %sCouldn't parse benchmark XML: %sCouldn't verify availability of CRLCountry: %sCreated RSA key, proceeding with requestCreated entry for JWT issuer %sCreating a delegation soap clientCreating a http clientCreating a pdpservice clientCreating a soap clientCreating and sending requestCreating buffer: %lli x %iCreating client interfaceCreating client side chainCreating delegation credential to ARC delegation serviceCreating delegation to CREAM delegation failedCreating delegation to CREAM delegation serviceCreating delegation to CREAM delegation service failedCreating directory %sCreating directory: %sCreating service side chainCredential expires at %sCredential handling exception: %sCredential is not initializedCredentials stored in temporary file %sCritical VOMS attribute processing failedCurrent jobs in system (PREPARING to FINISHING) per-DN (%i entries)Current transfer FAILED: %sCurrent transfer completeDCAU failedDCAU failed: %sDH parameters appliedDN %s doesn't match %sDN %s is cached and is valid until %s for URL %sDN %s is cached but has expired for URL %sDN is %sDTR %s cancelledDTR %s could not be cancelledDTR %s failed: %sDTR %s finished successfullyDTR %s finished with state %sDTR %s requested cancel but no active transferDTR %s still in progress (%lluB transferred)DTR %s was already cancelledDTR Generator processed: %d jobs to cancel, %d DTRs, %d new jobsDTR Generator waiting to process: %d jobs to cancel, %d DTRs, %d new jobsDTR is ready for transfer, moving to delivery queueDTRGenerator got request to cancel null jobDTRGenerator is asked about null jobDTRGenerator is asked to check files for null jobDTRGenerator is not running!DTRGenerator is queried about null jobDTRGenerator is requested to clean links for null jobDTRGenerator is requested to process null jobDTRGenerator is requested to remove null jobDTRGenerator was sent null jobDTRs still running for job %sDaemonization fork failed: %sData channel: %d.%d.%d.%d:%dData channel: [%s]:%dData delivery loop exitedData transfer abortedData transfer aborted: %sData was already cachedDataDelivery log tail: %sDataDelivery: %sDataMove::Transfer: no checksum calculation for %sDataMove::Transfer: using supplied checksum %sDataMove::Transfer: using supplied checksum %s:%sDataMove::Transfer: will calculate %s checksumDataMover: cycleDataMover: destination out of tries - exitDataMover: no retries requested - exitDataMover: source out of tries - exitDataMover::Transfer : starting new threadDataMover::Transfer: trying to destroy/overwrite destination: %sDataPointGFAL::write_file got position %d and offset %d, has to seekDataPointXrootd::write_file got position %d and offset %d, has to seekDataStagingDelivery exited with code %iDefault CPU time: %sDefault INTERNAL client constructorDefault Storage Service: %sDefault broker (%s) is not available. When using %s a broker should be specified explicitly (-b option).Default wall-time: %sDelegateCredentialsInit failedDelegateProxy failedDelegated credential from delegation service: %sDelegated credential identity: %sDelegation ID: %sDelegation authorization failedDelegation authorization passedDelegation getProxyReq request failedDelegation handler is not configuredDelegation handler with delegatee role endsDelegation handler with delegatee role starts to processDelegation handler with delegator role starts to processDelegation putProxy request failedDelegation role not supported: %sDelegation service: %sDelegation to ARC delegation service failedDelegation to gridsite delegation service failedDelegation type not supported: %sDelegationStore: PeriodicCheckConsumers failed to remove old delegation %s - %sDelegationStore: PeriodicCheckConsumers failed to resume iteratorDelegationStore: TouchConsumer failed to create file %sDelete errorDeleted but still have locations at %sDelivery received new DTR %s with source: %s, destination: %sDelivery service at %s can copy from %sDelivery service at %s can copy to %sDestination URL missingDestination URL not supported: %sDestination URL not valid: %sDestination already existsDestination file is in cacheDestination is invalid URLDestination is not index service, skipping replica registrationDestination is not ready, will wait %u secondsDestination: %sDir %s allowed at service %sDirectory %s removed successfullyDirectory %s to store accounting database has been created.Directory listing failedDirectory of trusted CAs is not specified/found; Using current path as the CA direcrotyDirectory size is larger than %i files, will have to call multiple timesDirectory size is too large to list in one call, will have to call multiple timesDirectory: %sDo sorting using user created python brokerDoesn't support advance reservationsDoesn't support bulk SubmissionDoesn't support preemptionDownloading job: %sDowntime ends: %sDowntime starts: %sDumping job description aborted: Unable to load broker %sDuplicate replica found in LFC: %sEACCES Error opening lock file %s: %sECDH parameters appliedEPSV failedEPSV failed: %sERROR: %sERROR: Failed to retrieve informationERROR: Failed to retrieve information from the following endpoints:ERROR: Failed to write job information to file (%s)ERROR: Job submission aborted because no resource returned any informationERROR: One or multiple job descriptions was not submitted.ERROR: Unable to load broker %sERROR: VOMS configuration file %s contains too long line(s). Max supported length is %i characters.ERROR: VOMS configuration file %s contains too many lines. Max supported number is %i.ERROR: VOMS configuration line contains too many tokens. Expecting 5 or 6. Line was: %sERROR: failed to read file %s while scanning VOMS configuration.ERROR: file tree is too deep while scanning VOMS configuration. Max allowed nesting is %i.EchoService (python) 'Process' calledEchoService (python) constructor calledEchoService (python) destructor calledEchoService (python) got: %s EchoService (python) has prefix %(prefix)s and suffix %(suffix)sEchoService (python) request_namespace: %sEchoService (python) thread test startingEchoService (python) thread test, iteration %(iteration)s %(status)sElement "%s" in the profile ignored: the "inidefaultvalue" attribute cannot be specified when the "inisections" and "initag" attributes have not been specified.Element "%s" in the profile ignored: the value of the "inisections" attribute cannot be the empty string.Element "%s" in the profile ignored: the value of the "initag" attribute cannot be the empty string.Element "%s" in the profile ignored: the value of the "initype" attribute cannot be the empty string.Empty data for JWT issuer %sEmpty filename returned from FileCacheEmpty input payload!Empty job description source stringEmpty payload!Empty stringEnd of comment not foundEnd of double quoted string not foundEnd of single quoted string not foundEnd of user delimiter (%s) quoted string not foundEndpoint Information:Error accessing cache file %s: %sError adding communication interface in %s. Maybe another instance of A-REX is already running.Error adding communication interface in %s. Maybe permissions are not suitable.Error creating cacheError creating cache. Stale locks may remain.Error creating directory %s: %sError creating directory: %sError creating lock file %s: %sError creating required directories for %sError creating required dirs: %sError creating temporary file %s: %sError detected while parsing this ACError due to expiration of provided credentialsError during file validation. Can't stat file %s: %sError during file validation: Local file size %llu does not match source file size %llu for file %sError evaluating profileError extracting RSE for %sError from SQLite: %sError from SQLite: %s: %sError getting info from statvfs for the path %s: %sError getting list of files (in list)Error in cache processing, will retry without cachingError in caching procedureError in lock file %s, even though linking did not return an errorError initialising X509 storeError initiating delegation database in %s. Maybe permissions are not suitable. Returned error is: %s.Error linking cache file to %s.Error linking tmp file %s to lock file %s: %sError listing lock file %s: %sError loading generated configurationError looking up attributes of cache meta file %s: %sError looking up space tokens matching description %sError number in store context: %iError opening accounting databaseError opening lock file %s in initial check: %sError opening meta file %sError opening meta file for writing %sError parsing the internally set executables attribute.Error pinging delivery service at %s: %s: %sError reading info from file %s:%sError reading lock file %s: %sError reading meta file %s: %sError registering replica, moving to end of data stagingError removing cache file %s: %sError switching uidError to flush output payloadError when extracting public key from requestError when loading the extension config file: %sError when loading the extension config file: %s on line: %dError while reading dir %s: %sError with cache configurationError with cache configuration: %sError with formatting in lock file %sError with hearbeatfile: %sError with post-transfer destination handling: %sError with source file, moving to next replicaError writing raw certificateError writing srm info file %sError writing to lock file %s: %sError: can't open policy file: %sError: policy location: %s is not a regular fileErrorDescriptionEstimated average waiting time: %sEstimated worst waiting time: %sEvaluate operator =: left from context: %sEvaluate operator =: left: %sEvaluate operator =: right: %sEvaluator does not support loadable Combining AlgorithmsEvaluator does not support specified Combining Algorithm - %sEvaluator for ArcPDP was not loadedEvaluator for GACLPDP was not loadedEvaluator for XACMLPDP was not loadedExample configuration (%s) not created.Excepton while trying to start external process: %sExcessive data received while checking file accessExcluding replica %s matching pattern !%sExecution Target on Computing Service: %sExecution environment does not support inbound connectionsExecution environment does not support outbound connectionsExecution environment is a physical machineExecution environment is a virtual machineExecution environment supports inbound connectionsExecution environment supports outbound connectionsExecutionTarget class is not an objectExiting Generator threadExiting jobs processing threadExpecting Command among argumentsExpecting Command and URL providedExpecting Command module name among argumentsExpecting Command module path among argumentsExpecting Module, Command and URL providedExpecting URL among argumentsExpression failed to matchedExpression matchedExternal request for attention %sExtracted nickname %s from credentials to use for RUCIO_ACCOUNTFATAL, ERROR, WARNING, INFO, VERBOSE or DEBUGFaile to assign hostname extensionFailed allocating memory for handleFailed authenticatingFailed authenticating: %sFailed checking database (%s)Failed checking source replicaFailed checking source replica %s: %sFailed checking source replica: %sFailed cleaning up destination %sFailed configuration initializationFailed configuration initialization.Failed connecting to server %s:%dFailed destroying handle: %s. Can't handle such situation.Failed downloading %s to %sFailed downloading %s to %s, destination already existFailed downloading %s to %s, unable to remove existing destinationFailed in globus_cond_initFailed in globus_ftp_control_handle_initFailed in globus_mutex_initFailed linking cache file to %sFailed locating credentialsFailed looking up attributes of cached file: %sFailed preparing job descriptionFailed processing user mapping command: %s %sFailed reading control directory: %sFailed reading control directory: %s: %sFailed reading dataFailed reading file %sFailed reading list of filesFailed reading local informationFailed retrieving information for job: %sFailed retrieving job description for job: %sFailed running mailerFailed setting file owner: %sFailed submitting job descriptionFailed to abort transfer of ftp file: %sFailed to accept SSL connectionFailed to accept connection requestFailed to accept delegationFailed to accept new file/destinationFailed to access proxy of given job id %s at %sFailed to acquire A-REX's configurationFailed to acquire delegation contextFailed to acquire lock on cache meta file %sFailed to acquire lock on file %sFailed to acquire source: %sFailed to activate Jobs Processing object, exiting Grid Manager threadFailed to add '%s' URL (interface type %s) into the accounting database Endpoints tableFailed to add '%s' into the accounting database %s tableFailed to add Independent OIDFailed to add RFC proxy OIDFailed to add VOMS AC extension. Your proxy may be incomplete.Failed to add VOMS AC sequence OIDFailed to add anyLanguage OIDFailed to add certificate and keyFailed to add certificate to token or databaseFailed to add extension into credential extensionsFailed to add inheritAll OIDFailed to add issuer's extension into proxyFailed to add key usage extensionFailed to add proxy certificate information extensionFailed to add voms AC extensionFailed to allocate certificate trustFailed to allocate item for certificate dataFailed to allocate memory for certificate subject while matching policy.Failed to allocate p12 contextFailed to apply DH parametersFailed to apply ECDH parametersFailed to apply local address to data connectionFailed to authenticate SAML Token inside the incoming SOAPFailed to authenticate Username Token inside the incoming SOAPFailed to authenticate X509 Token inside the incoming SOAPFailed to authenticate to PKCS11 slot %sFailed to authenticate to key databaseFailed to authenticate to token %sFailed to bind socket for %s:%s(%s): %sFailed to bind socket for TCP port %s(%s): %sFailed to call PORT_NewArenaFailed to cancel transfer request: %sFailed to cancel: %sFailed to cancel: No SOAP responseFailed to cast PayloadSOAP from incoming payloadFailed to cast PayloadSOAP from outgoing payloadFailed to change mapping stack processing policy in: %s = %sFailed to change owner of symbolic link %s to %iFailed to change owner of temp proxy at %s to %i:%i: %sFailed to change permissions on %s: %sFailed to change permissions or set owner of hard link %s: %sFailed to check %sFailed to clean up file %s: %sFailed to communicate to delegation endpoint.Failed to complete writing to destinationFailed to connect to %s(%s):%iFailed to connect to %s(%s):%i - %sFailed to connect to server %s:%dFailed to convert ASCII to DERFailed to convert EVP_PKEY to PKCS8Failed to convert GSI credential to GSS credential (major: %d, minor: %d)Failed to convert GSI credential to GSS credential (major: %d, minor: %d):%s:%sFailed to convert PrivateKeyInfo to EVP_PKEYFailed to convert security information to ARC policyFailed to convert security information to ARC requestFailed to convert security information to XACML requestFailed to copy %s: %sFailed to copy file %s to %s: %sFailed to copy input file: %s to path: %sFailed to create %s, trying to create parent directoriesFailed to create DTR dump threadFailed to create OTokens security attributesFailed to create OpenSSL object %s %s - %u %sFailed to create SOAP containersFailed to create any cache directories for %sFailed to create cache directory for file %s: %sFailed to create cache meta file %sFailed to create certificate requestFailed to create control directory %sFailed to create directoryFailed to create directory %sFailed to create directory %s! Skipping job.Failed to create directory %s: %sFailed to create entry for JWT issuer %sFailed to create export contextFailed to create file %s: %sFailed to create hard link from %s to %s: %sFailed to create input SOAP containerFailed to create job in %sFailed to create key or certificate safeFailed to create link: %s. Will not use mapped URLFailed to create path lengthFailed to create policy languageFailed to create socket for connecting to %s(%s):%d - %sFailed to create socket for listening at %s:%s(%s): %sFailed to create socket for listening at TCP port %s(%s): %sFailed to create subject nameFailed to create symbolic link from %s to %s: %sFailed to create temp proxy at %s: %sFailed to create threadFailed to create xrootd copy job: %sFailed to create/open file %s: %sFailed to decode trust stringFailed to delegate credentials to server - %sFailed to delegate credentials to server - no delegation interface foundFailed to delete %sFailed to delete %s but will still try to copyFailed to delete certificateFailed to delete delivery object or deletion timed outFailed to delete destination, retry may failFailed to delete logical fileFailed to delete meta-informationFailed to delete physical fileFailed to delete private keyFailed to delete private key and certificateFailed to delete replica %s: %sFailed to delete stale cache file %s: %sFailed to duplicate X509 structureFailed to duplicate extensionFailed to enable IPv6Failed to encode PKCS12Failed to encode certificateFailed to encode the certificate request with DER formatFailed to establish SSL connectionFailed to establish connection: %sFailed to evaluate expression: %sFailed to export private keyFailed to extract VOMS nickname from proxyFailed to extract credential informationFailed to fetch data from %s accounting database tableFailed to fetch data from accounting database Endpoints tableFailed to finalize reading from sourceFailed to finalize writing to destinationFailed to find CA certificatesFailed to find certificate and/or private key or files have improper permissions or ownership.Failed to find certificates by nickname: %sFailed to find extensionFailed to find issuer certificate for proxy certificateFailed to find metadata info on %s for determining file or directory deleteFailed to generate EC keyFailed to generate SAML Token for outgoing SOAPFailed to generate Username Token for outgoing SOAPFailed to generate X509 Token for outgoing SOAPFailed to generate public/private key pairFailed to get DN information from .local file for job %sFailed to get TCP socket options for connection to %s(%s):%d - timeout won't work - %sFailed to get certificate from certificate fileFailed to get credentialFailed to get ftp fileFailed to get initiate GFAL2 parameter handle: %sFailed to get initiate new GFAL2 context: %sFailed to get load average: %sFailed to get private keyFailed to get public keyFailed to get public key from RSA objectFailed to get public key from X509 objectFailed to identify grid-manager config fileFailed to import certificate from file: %sFailed to import private keyFailed to import private key from file: %sFailed to initialize LCASFailed to initialize LCMAPSFailed to initialize OpenSSL libraryFailed to initialize PKCS12 file: %sFailed to initialize S3 to %s: %sFailed to initialize X509 structureFailed to initialize accounting databaseFailed to initialize extensions member for CredentialFailed to initialize main Python threadFailed to initiate cacheFailed to initiate client connectionFailed to initiate delegation credentialsFailed to insert AAR into the database for job %sFailed to limit socket to IPv6 at %s:%s - may cause errors for IPv4 at same portFailed to limit socket to IPv6 at TCP port %s - may cause errors for IPv4 at same portFailed to listen at %s:%s(%s): %sFailed to listen at TCP port %s(%s): %sFailed to load client configurationFailed to load extension section: %sFailed to load grid-manager config fileFailed to load grid-manager config file from %sFailed to load grid-manager configfileFailed to load plugin for URL %sFailed to load private keyFailed to load service configurationFailed to load service configuration from any default config fileFailed to load service configuration from file %sFailed to load service side MCCsFailed to lock arccredential library in memoryFailed to lock arccrypto library in memoryFailed to make symbolic link %s to %s : %sFailed to move %s to %s: %sFailed to move file %s to %sFailed to new arenaFailed to obtain OpenSSL identifier for %sFailed to obtain bytes transferred: %sFailed to obtain delegation locks for cleaning orphaned locksFailed to obtain information about fileFailed to obtain listing from FTP: %sFailed to obtain local address for %s:%s - %sFailed to obtain local address for port %s - %sFailed to obtain lock on cache file %sFailed to obtain stat from FTP: %sFailed to open %s for reading: %sFailed to open %s, trying to create parent directoriesFailed to open data channelFailed to open directory %s: %sFailed to open file %sFailed to open file with DH parameters for readingFailed to open heartbeat file %sFailed to open input certificate file %sFailed to open log file: %sFailed to open output file '%s'Failed to open p12 fileFailed to open stdio channel %dFailed to open stdio channel %sFailed to output the certificate request as ASCII formatFailed to output the certificate request as DER formatFailed to parse HTTP headerFailed to parse Rucio info: %sFailed to parse Rucio response: %sFailed to parse SAML Token from incoming SOAPFailed to parse Username Token from incoming SOAPFailed to parse VOMS command: %sFailed to parse X509 Token from incoming SOAPFailed to parse certificate request from CSR file %sFailed to parse configuration file %sFailed to parse expressionFailed to parse requested VOMS lifetime: %sFailed to parse requested VOMS server port number: %sFailed to postregister destination %sFailed to pre-clean destination: %sFailed to preallocate space for %sFailed to prepare destinationFailed to prepare destination: %sFailed to prepare job descriptionFailed to prepare sourceFailed to prepare source: %sFailed to preregister destination: %sFailed to process A-REX configuration in %sFailed to process VOMS configuration or no suitable configuration lines found.Failed to process configuration in %sFailed to process job: %sFailed to process job: %s - %s %sFailed to process jobs - error response: %sFailed to process jobs - failed to parse responseFailed to process jobs - wrong response: %uFailed to process security attributes in TLS MCC for incoming messageFailed to query AAR database ID for job %sFailed to query parent DIDs: %sFailed to query state: %sFailed to read attribute %x from private key.Failed to read cache meta file %sFailed to read certificate file: %sFailed to read data for JWT issuer %sFailed to read data from input fileFailed to read database schema file at %sFailed to read file %sFailed to read file with DH parametersFailed to read input certificate fileFailed to read object %s: %sFailed to read object %s: %s; %sFailed to read private key file: %sFailed to read proxy file: %sFailed to read request from a fileFailed to read request from a stringFailed to register destination replica: %sFailed to register new file/destination: %sFailed to register plugin for state %sFailed to release GSS credential (major: %d, minor: %d):%s:%sFailed to release completed requestFailed to release lock on cache file %sFailed to release lock on file %sFailed to remove .meta file %s: %sFailed to remove all physical instancesFailed to remove cache per-job dir %s: %sFailed to remove existing hard link at %s: %sFailed to remove existing symbolic link at %s: %sFailed to remove file %s: %sFailed to remove instanceFailed to remove lock on %s. Some manual intervention may be requiredFailed to remove stale lock file %s: %sFailed to remove temporary proxy %s: %sFailed to rename URLFailed to resolve %sFailed to resolve %s (%s)Failed to resolve destination: %sFailed to resolve source: %sFailed to retrieve application data from OpenSSLFailed to retrieve link to TLS stream. Additional policy matching is skipped.Failed to retrieve private key for issuerFailed to run Grid Manager threadFailed to run command: %sFailed to run configuration parser at %s.Failed to run controldir update tool. Exit code: %iFailed to run external plugin: %sFailed to send cancel request: %sFailed to send content of bufferFailed to send traces to Rucio: %sFailed to set GFAL2 monitor callback: %sFailed to set GFAL2 transfer timeout, will use default: %sFailed to set INTERNAL endpointFailed to set LFC replicas: %sFailed to set credentials for GridFTP transferFailed to set executable bit on file %sFailed to set executable bit on file %s: %sFailed to set overwrite option in GFAL2: %sFailed to set permissions on: %sFailed to set signature algorithm IDFailed to set the pubkey for X509 object by using pubkey from X509_REQFailed to set up credential delegation with %sFailed to shut down SSL: %sFailed to sign encoded certificate dataFailed to sign the certificate requestFailed to sign the proxy certificateFailed to stage file(s)Failed to start GM threadsFailed to start archival threadFailed to start cache clean scriptFailed to start certificate extensionFailed to start controldir update tool.Failed to start data staging threadsFailed to start listening on any address for %s:%sFailed to start listening on any address for %s:%s(IPv%s)Failed to start new DTR for %sFailed to start new thread for monitoring job requestsFailed to start new thread: cache won't be cleanedFailed to start querying the endpoint on %sFailed to start querying the endpoint on %s (unable to create sub-thread)Failed to start reading from source: %sFailed to start thread for communicationFailed to start thread for listeningFailed to start transfer request: %sFailed to start writing to cacheFailed to start writing to destination: %sFailed to stat session dir %sFailed to stat source %sFailed to store application dataFailed to store ftp fileFailed to submit all jobs.Failed to submit all jobs: %sFailed to submit all jobs: %s %sFailed to submit all jobs: %u %sFailed to submit jobFailed to switch user id to %d/%dFailed to terminate LCASFailed to terminate LCMAPSFailed to transfer dataFailed to unlock file %s: %s. Manual intervention may be requiredFailed to unlock file with lock %s: %sFailed to unregister pre-registered destination %s. You may need to unregister it manuallyFailed to unregister pre-registered destination %s: %s. You may need to unregister it manuallyFailed to unregister preregistered lfn, You may need to unregister it manuallyFailed to unregister preregistered lfn. You may need to unregister it manuallyFailed to unregister preregistered lfn. You may need to unregister it manually: %sFailed to update AAR in the database for job %sFailed to update control directory %sFailed to verify X509 Token inside the incoming SOAPFailed to verify the requestFailed to verify the signature under Failed to verify the signature under Failed to verify the signed certificateFailed to write RTEs information for the job %sFailed to write authtoken attributes for job %sFailed to write body to output streamFailed to write data transfers information for the job %sFailed to write event records for job %sFailed to write header to output streamFailed to write job information to database (%s)Failed to write object %s: %s; %sFailed to write request into a fileFailed to write request into stringFailed to write signed EEC certificate into a fileFailed to write signed proxy certificate into a fileFailed to write to local job list %sFailed updating timestamp on cache lock file %s for file %s: %sFailed uploading file %s to %s: %sFailed uploading local input filesFailed while finishing reading from sourceFailed while finishing writing to destinationFailed while reading from sourceFailed while transferring dataFailed while waiting for connection requestFailed while waiting for connection to %s(%s):%i - %sFailed while writing to destinationFailure in parsing response from server - some information may be inaccurateFailure: %sFeature is not implementedFetch: response body: %sFetch: response code: %u %sFile %s is NEARLINE, will make request to bring onlineFile %s is already cached at %s under a different URL: %s - this file will not be cachedFile %s is already cached at %s under a different URL: %s - will not add DN to cached listFile %s is cached (%s) - checking permissionsFile %s removed successfullyFile '%s' in the 'executables' attribute is not present in the 'inputfiles' attributeFile already exists: %sFile could not be moved to Done stateFile could not be moved to Running state: %sFile delete failed, attempting directory deleteFile delete failed, attempting directory delete for %sFile download failed: %sFile is cacheable, will check cacheFile is currently being cached, will wait %isFile is not accessible %s: %sFile is not accessible: %sFile is not cacheable, skipping cache processingFile is not cacheable, was requested not to be cached or no cache available, skipping cache checkFile is ready! TURL is %sFile is smaller than %llu bytes, will use local deliveryFile type is not available, attempting file deleteFilename not returned in Rucio response: %sFiles associated with request token %s aborted successfullyFiles associated with request token %s put done successfullyFiles associated with request token %s released successfullyFileset copy to single object is not supported yetFileset registration is not supported yetFinalising current replica %sFinding existing destination replicasFinishWriting: looking for metadata: %sFinishWriting: obtained checksum: %sFinished successfullyFirst stage of registration to index service failedFirst value of 'inputfiles' attribute (filename) cannot be emptyFirst value of 'outputfiles' attribute (filename) cannot be emptyFor registration source must be ordinary URL and destination must be indexing serviceFor the 1st test job you also have to specify a runtime value with -r (--runtime) option.Force-checking source of cache file %sForcing re-download of file %sFound %s %s (it was loaded already)Found %s in cacheFound DTR %s for file %s left in transferring state from previous runFound VOMS AC attribute: %sFound a registry, will query it recursively: %sFound existing token for %s in Rucio token cache with expiry time %sFound service endpoint %s (type %s)Found started or successful endpoint (%s)Found suspended endpoint (%s)Found the following jobs:Found the following new jobs:Found unexpected empty lock file %s. Must go back to acquire()Found unfinished DTR transfers. It is possible the previous A-REX process did not shut down normallyFree slots grouped according to time limits (limit: free slots):Free slots: %iFull string not used: %sGACL Auth. request: %sGET: id %s path %sGenerate new X509 request!Generating %s job description outputGenerating ceID prefix from hostname automaticallyGenerator startedGeneric errorGet delegated credential from delegation service: %sGet from cache: Cached file is lockedGet from cache: Error in cache configurationGet from cache: File not in cacheGet from cache: Invalid URL %sGet from cache: Looking in cache for %sGet from cache: could not access cached file: %sGet request %s is still in queue, should wait %i secondsGet: there is no job %s - %sGetting currect timestamp for BLAH parser log: %sGetting delegation credential from ARC delegation serviceGlobus error: %sGlobus handle is stuckGlobus location variable substitution is not supported anymore. Please specify path directly.Grid identity is mapped to local identity '%s'HEAD: id %s path %sHTTP Error: %d %sHTTP failure %u - %sHTTP with SAML2SSO invocation failedHTTP:PUT %s: put file %s: %sHandle is not in proper state %u/%uHead: there is no job %s - %sHealth state info: %sHealth state: %sHealthState of ExecutionTarget (%s) is not OK or WARNING (%s)Help Options:Helper process start failed: %sHelper program is missingHomogeneous resourceID: %sINI config file %s does not existINTERNALClient is not initializedId= %s,Type= %s,Issuer= %s,Value= %sIdP return some error message: %sIdentity is %sIdentity name: %sIdentity: %sIf the proxy or certificate/key does exist, you can manually specify the locations via environment variables '%s'/'%s' or '%s', or the '%s'/'%s' or '%s' attributes in the client configuration file (e.g. '%s')If you specify a policy you also need to specify a policy languageIgnoring endpoint (%s), it is already registered in retriever.Ignoring job (%s), already tried and were unable to load JobControllerPluginIgnoring job (%s), the job management URL is unknownIgnoring job (%s), the job status URL is unknownIgnoring job (%s), the management interface name is unknownIgnoring job (%s), the status interface name is unknownIgnoring job (%s), unable to load JobControllerPlugin for %sIgnoring job, the job ID is emptyIgnoring verification error due to insecure connection allowed: %sIllegal URL - closing ] for IPv6 address is followed by illegal token: %sIllegal URL - no closing ] for IPv6 address found: %sIllegal URL - no hostname given: %sIllegal URL - path must be absolute or empty: %sIllegal URL - path must be absolute: %sIllegal time format: %sImmediate completion expectedImmediate completion expected: %sImmediate completion: %sImplementation name: %sImplementor: %sIn the available CRL the lastUpdate field is not validIn the available CRL, the nextUpdate field is not validIn the configuration profile the 'initype' attribute on the "%s" element has a invalid value "%s".Incoming Message is not SOAPIncompatible options --nolist and --forcelist requestedInconsistent metadataIndependent proxy - no rights grantedInformation endpointInformation item '%s' is not knownInformational document is emptyInitialized %u-th Python serviceInitializing S3 connection to %sInitiating delegation procedureInput is not SOAPInput is without trailer Input request from a file: Request.xmlInput request from codeInput: metadata: %sInstalled application environments:Interface (%s) specified, submitting only to that interfaceInterface extensions:Interface on endpoint (%s) %s.Interface versions:Interface: %sInternal transfer method is not supported for %sInvalid DTRInvalid DTR for source %s, destination %sInvalid EffectInvalid HTTP object can't produce resultInvalid ID: %sInvalid ISO duration format: %sInvalid JobDescription:Invalid URL '%s' for input file '%s'Invalid URL '%s' for output file '%s'Invalid URL option syntax in option '%s' for input file '%s'Invalid URL option syntax in option '%s' for output file '%s'Invalid URL option: %sInvalid URL: %sInvalid URL: '%s' in input file '%s'Invalid URL: '%s' in output file '%s'Invalid action value %sInvalid class nameInvalid class name. The broker argument for the PythonBroker should be Filename.Class.args (args is optional), for example SampleBroker.MyBrokerInvalid comparison operator '%s' used at 'delegationid' attribute, only "=" is allowed.Invalid comparison operator '%s' used at 'queue' attribute in 'GRIDMANAGER' dialect, only "=" is allowedInvalid comparison operator '%s' used at 'queue' attribute, only "!=" or "=" are allowed.Invalid configuration - no allowed IP address specifiedInvalid configuration - no transfer dirs specifiedInvalid credentials, please check proxy and/or CA certificatesInvalid destination URL %sInvalid download destination path specified (%s)Invalid job descriptionInvalid lock on file %sInvalid log level. Using default %s.Invalid nodeaccess value: %sInvalid old log level. Using default %s.Invalid period string: %sInvalid port number in %sInvalid stage out path specified (%s)Invalid url: %sIssuer CA: %sIssuer: %sJWSE::ExtractPublicKey: deleting outdated info: %sJWSE::ExtractPublicKey: external jwk keyJWSE::ExtractPublicKey: fetching jws key from %sJWSE::ExtractPublicKey: jwk keyJWSE::ExtractPublicKey: key parsing errorJWSE::ExtractPublicKey: no supported keyJWSE::ExtractPublicKey: x5c keyJWSE::Input: JWE: not supported yetJWSE::Input: JWS content: %sJWSE::Input: JWS: signature algorithm: %sJWSE::Input: JWS: signature algorithn not supported: %sJWSE::Input: JWS: signature verification failedJWSE::Input: JWS: token too oldJWSE::Input: JWS: token too youngJWSE::Input: header: %sJWSE::Input: token: %sJWSE::SignECDSA: failed to add message to hash: %iJWSE::SignECDSA: failed to create ECDSA signatureJWSE::SignECDSA: failed to create EVP contextJWSE::SignECDSA: failed to finalize hash: %iJWSE::SignECDSA: failed to initialize hash: %iJWSE::SignECDSA: failed to parse signatureJWSE::SignECDSA: failed to recognize digest: %sJWSE::SignECDSA: missing keyJWSE::SignECDSA: wrong signature size writtenJWSE::SignECDSA: wrong signature size: %i + %iJWSE::VerifyECDSA: failed to add message to hash: %iJWSE::VerifyECDSA: failed to assign ECDSA signature: %iJWSE::VerifyECDSA: failed to create ECDSA signatureJWSE::VerifyECDSA: failed to create EVP contextJWSE::VerifyECDSA: failed to finalize hash: %iJWSE::VerifyECDSA: failed to initialize hash: %iJWSE::VerifyECDSA: failed to parse signatureJWSE::VerifyECDSA: failed to recognize digest: %sJWSE::VerifyECDSA: failed to verify: %iJWSE::VerifyECDSA: missing keyJWSE::VerifyECDSA: wrong signature sizeJob %s does not report a resumable stateJob %s failed to renew delegation %s.Job %s has no delegation associated. Can't renew such job.Job %s not foundJob %s: Some downloads failedJob %s: all files downloaded successfullyJob %s: files still downloadingJob ID argument is required.Job database connection established successfully (%s)Job deleted: %sJob description file could not be read.Job description language is not specified, unable to output description.Job description languages supported by %s:Job description to be sent to %s:Job descriptions:Job did not finished successfully. Message will not be written to BLAH log.Job download directory from user configuration file: %sJob download directory will be created in present working directory.Job download directory: %sJob has not started yet: %sJob list file (%s) doesn't existJob list file (%s) is not a regular fileJob list file cannot be created: %s is not a directoryJob list file cannot be created: The parent directory (%s) doesn't exist.Job nr.Job resuming successfulJob submission summary:Job submitted with jobid: %sJob timestamp successfully parsed as %sJob: %sJob: %s : Cancel request put and communicated to serviceJob: %s : Cancel request put but failed to communicate to serviceJob: %s : Clean request put and communicated to serviceJob: %s : Clean request put but failed to communicate to serviceJob: %s : ERROR : Failed to put cancel markJob: %s : ERROR : Failed to put clean markJob: %s : ERROR : No local information.Job: %s : ERROR : Unrecognizable stateJobControllerPlugin %s could not be createdJobControllerPlugin plugin "%s" not found.JobDescription class is not an objectJobDescriptionParserPlugin %s could not be createdJobDescriptionParserPlugin plugin "%s" not found.Jobs missing information will not be cleaned!Jobs processed: %d, deleted: %dJobs processed: %d, renewed: %dJobs processed: %d, resumed: %dJobs processed: %d, successfully killed: %dJobs processed: %d, successfully killed: %d, successfully cleaned: %dJobs processed: %d, successfully retrieved: %dJobs processed: %d, successfully retrieved: %d, successfully cleaned: %dJunk at end of RSLJunk in sessiondir commandLCMAPS did not return any GIDLCMAPS did not return any UIDLCMAPS has getCredentialDataLCMAPS has lcmaps_runLCMAPS returned UID which has no username: %uLCMAPS returned invalid GID: %uLCMAPS returned invalid UID: %uLIST/MLST failedLIST/MLST failed: %sLanguage (%s) not recognized by any job description parsers.Last stage of registration to index service failedLatitude: %fLeft operand for RSL concatenation does not evaluate to a literalLegacyMap: no configurations blocks definedLegacyPDP: ARC Legacy Sec Attribute not recognized.LegacyPDP: there is no %s Sec Attribute defined. Probably ARC Legacy Sec Handler is not configured or failed.LegacySecHandler: configuration file not specifiedLine %d.%d of the attributes returned: %sLinking MCC %s(%s) to MCC (%s) under %sLinking MCC %s(%s) to Plexer (%s) under %sLinking MCC %s(%s) to Service (%s) under %sLinking Plexer %s to MCC (%s) under %sLinking Plexer %s to Plexer (%s) under %sLinking Plexer %s to Service (%s) under %sLinking local fileLinking mapped fileLinking/copying cached fileLinking/copying cached file to %sList functionality is not supported for RESTful VOMS interfaceList functionality is not supported for legacy VOMS interfaceList will stat the URL %sListFiles: looking for metadata: %sListening on %s:%s(%s)Listening on TCP port %s(%s)Listing localjobs succeeded, %d localjobs foundLoadable module %s contains no requested plugin %s of kind %sLoaded %sLoaded %s %sLoaded JobControllerPlugin %sLoaded JobDescriptionParserPlugin %sLoaded MCC %s(%s)Loaded Plexer %sLoaded Service %s(%s)Loaded SubmitterPlugin %sLoading %u-th Python serviceLoading OToken failed - ignoring its presenceLoading Python broker (%i)Loading configuration (%s)Local running jobs: %iLocal suspended jobs: %iLocal waiting jobs: %iLocation URI for file %s is invalidLocation already existsLocations are missing in destination LFC URLLock %s is owned by a different host (%s)Lock file %s doesn't existLongitude: %fLooking for current jobsLooking up URL %sLooking up source replicasMCC %s(%s) - next %s(%s) has no targetMIME is not suitable for SOAP: %sMLSD is not supported - trying NLSTMLST is not supported - trying LISTMain Python thread is not initializedMain Python thread was not initializedMain memory size: %iMalformed ARCHERY record found (endpoint type is not defined): %sMalformed ARCHERY record found (endpoint url is not defined): %sMalformed VOMS AC attribute %sMapfile at %s can't be opened.Mapping %s to %sMapping policy option has empty valueMapping policy:Mapping queue: %sMatch issuer: %sMatch vo: %sMatched nothingMatched: %s %s %sMatched: %s %s %s %sMatching tokens expression: %sMatchmaking, %s (%d) is %s than %s (%d) published by the ExecutionTarget.Matchmaking, Benchmark %s is not published by the ExecutionTarget.Matchmaking, CacheTotal problem, ExecutionTarget: %d MB (CacheTotal); JobDescription: %d MB (CacheDiskSpace)Matchmaking, Computing endpoint requirement not satisfied. ExecutionTarget: %sMatchmaking, ComputingShare (%s) does not match requested queue (%s): skippingMatchmaking, ComputingShare (%s) matches requested queue (%s)Matchmaking, ComputingShareName of ExecutionTarget (%s) is not defined, but requested queue is (%s)Matchmaking, ConnectivityIn problem, ExecutionTarget: %s (ConnectivityIn) JobDescription: %s (InBound)Matchmaking, ConnectivityOut problem, ExecutionTarget: %s (ConnectivityOut) JobDescription: %s (OutBound)Matchmaking, ExecutionTarget: %s, OperatingSystem is not definedMatchmaking, ExecutionTarget: %s, CacheTotal is not definedMatchmaking, ExecutionTarget: %s, HealthState is not definedMatchmaking, ExecutionTarget: %s, ImplementationName is not definedMatchmaking, ExecutionTarget: %s, MaxDiskSpace and WorkingAreaFree are not definedMatchmaking, ExecutionTarget: %s, MaxTotalCPUTime or MaxCPUTime not defined, assuming no CPU time limitMatchmaking, ExecutionTarget: %s, MinCPUTime not defined, assuming no CPU time limitMatchmaking, ExecutionTarget: %s, NetworkInfo is not definedMatchmaking, ExecutionTarget: %s, Platform is not definedMatchmaking, ExecutionTarget: %s, RunTimeEnvironment requirements not satisfiedMatchmaking, ExecutionTarget: %s, TotalSlots and MaxSlotsPerJob are not definedMatchmaking, ExecutionTarget: %s, WorkingAreaLifeTime is not definedMatchmaking, ExecutionTarget: %s matches job descriptionMatchmaking, ExecutionTarget: %s, ApplicationEnvironments not definedMatchmaking, ExecutionTarget: %s, MaxMainMemory and MainMemorySize are not definedMatchmaking, ExecutionTarget: %s, MaxVirtualMemory is not definedMatchmaking, ExecutionTarget: %s, OperatingSystem requirements not satisfiedMatchmaking, MainMemorySize problem, ExecutionTarget: %d (MainMemorySize), JobDescription: %d (IndividualPhysicalMemory)Matchmaking, MaxCPUTime problem, ExecutionTarget: %d (MaxCPUTime), JobDescription: %d (TotalCPUTime/NumberOfSlots)Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); JobDescription: %d MB (DiskSpace)Matchmaking, MaxDiskSpace problem, ExecutionTarget: %d MB (MaxDiskSpace); JobDescription: %d MB (SessionDiskSpace)Matchmaking, MaxMainMemory problem, ExecutionTarget: %d (MaxMainMemory), JobDescription: %d (IndividualPhysicalMemory)Matchmaking, MaxSlotsPerJob problem, ExecutionTarget: %d (MaxSlotsPerJob) JobDescription: %d (NumberOfProcesses)Matchmaking, MaxTotalCPUTime problem, ExecutionTarget: %d (MaxTotalCPUTime), JobDescription: %d (TotalCPUTime)Matchmaking, MaxVirtualMemory problem, ExecutionTarget: %d (MaxVirtualMemory), JobDescription: %d (IndividualVirtualMemory)Matchmaking, MinCPUTime problem, ExecutionTarget: %d (MinCPUTime), JobDescription: %d (TotalCPUTime/NumberOfSlots)Matchmaking, NetworkInfo demand not fulfilled, ExecutionTarget do not support %s, specified in the JobDescription.Matchmaking, Platform problem, ExecutionTarget: %s (Platform) JobDescription: %s (Platform)Matchmaking, The %s scaled %s (%d) is %s than the %s (%d) published by the ExecutionTarget.Matchmaking, TotalSlots problem, ExecutionTarget: %d (TotalSlots) JobDescription: %d (NumberOfProcesses)Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB (WorkingAreaFree); JobDescription: %d MB (DiskSpace)Matchmaking, WorkingAreaFree problem, ExecutionTarget: %d MB (WorkingAreaFree); JobDescription: %d MB (SessionDiskSpace)Matchmaking, WorkingAreaLifeTime problem, ExecutionTarget: %s (WorkingAreaLifeTime) JobDescription: %s (SessionLifeTime)Max CPU time: %sMax disk space: %iMax memory: %iMax pre-LRMS waiting jobs: %iMax running jobs: %iMax slots per job: %iMax stage in streams: %iMax stage out streams: %iMax total jobs: %iMax total wall-time: %sMax user running jobs: %iMax virtual memory: %iMax waiting jobs: %iMax wall-time: %sMaximum number of threads running - putting new request into queueMemory allocation errorMessage class is not an objectMeta info of source and location do not match for %sMetadata of replica and index service differMetadata of source and destination are differentMetadata of source does not match existing destination. Use the --force option to override this.Min CPU time: %sMin wall-time: %sMissing CA subject in Globus signing policyMissing CertificatePath element or ProxyPath element, or is missingMissing Host in Connect elementMissing Port in Connect elementMissing Port in Listen elementMissing VO in configurationMissing audience in configurationMissing authentication informationMissing cancel-%s-job - job cancellation may not workMissing capabilities in configurationMissing condition subjects in Globus signing policyMissing data in DER encoded PROXY_CERT_INFO_EXTENSION extensionMissing directory in controldir commandMissing file name in [arex/jura] logfileMissing final reply: %sMissing group in configurationMissing information in reply: %sMissing issuer in configurationMissing name of LCAS libraryMissing name of LCMAPS libraryMissing number in maxjobsMissing or empty CertificatePath elementMissing or empty CertificatePath or CACertificatesDir elementMissing or empty CertificatePath or CACertificatesDir element; will only check the signature, will not do message authenticationMissing or empty KeyPath elementMissing or empty KeyPath element, or is missingMissing or empty PasswordSource elementMissing or empty Username elementMissing path of credentials fileMissing reference to factory and/or module. It is unsafe to use Globus in non-persistent mode - (Grid)FTP code is disabled. Report to developers.Missing reference to factory and/or module. It is unsafe to use Xrootd in non-persistent mode - Xrootd code is disabled. Report to developers.Missing response from delegation endpoint.Missing role in configurationMissing scan-%s-job - may miss when job finished executingMissing schema! Skipping validation...Missing scope in configurationMissing security object in messageMissing subject in configurationMissing subject nameMissing submit-%s-job - job submission to LRMS may not workModule %s contains no plugin %sModule %s contains no requested plugin %s of kind %sModule %s does not contain plugin(s) of specified kind(s)Module %s failed to reload (%s)Module %s is not an ARC plugin (%s)Module Manager InitModule Manager Init by ModuleManager::setCfgModule name: %sMoving to end of data stagingMulti-request operator only allowed at top levelMultiple %s attributes in configuration file (%s)MyProxy failure: %sMyproxy server did not return proxy with VOMS AC includedNEW: put new job: max jobs total limit reachedNEW: put new job: there is no payloadNLST/MLSD failedNLST/MLSD failed: %sNSS database to be accessed: %s NSS initialization failed on certificate database: %sNULL BIO passed to InquireRequestNULL callback for %sName of grami fileName: %sNegative rights are not supported in Globus signing policyNeither source nor destination are index services, will skip resolving replicasNeither source nor destination were staged, skipping releasing requestsNetwork information:New endpoint is created (%s) from the one with the unspecified interface (%s)No A-REX config file found in candypond configurationNo Attribute exists, which can deal with type: %sNo Connect element specifiedNo FQAN found. Using None as userFQAN valueNo LRMS set in configurationNo RSL content in job description foundNo SOAP responseNo SOAP response from Delivery service %sNo SOAP response from delivery serviceNo active DTR %sNo active job id %sNo arguments are assigned for external processNo authorization response was returnedNo cache directory specifiedNo cachedirs found/configured for calculation of free space.No caches defined in configurationNo callback for %s definedNo checksum information from serverNo checksum information possibleNo checksum information returned in Rucio response for %sNo checksum verification possibleNo configuration file could be loaded.No control directory set in configurationNo credentials suppliedNo delegation policies in this context and message - passing throughNo delegation token in requestNo delivery endpoints available, will try laterNo destination definedNo draining cache directory specifiedNo errorNo files to retrieve for job %sNo filesize information returned in Rucio response for %sNo information returned by PROPFINDNo job ID suppliedNo job description file name provided.No job description input specifiedNo job description parser was able to interpret job descriptionNo job description parsers availableNo job description parsers suitable for handling '%s' language are availableNo jobdescription resulted at %d testNo jobsNo jobs found, try laterNo jobs givenNo left operand for concatenation operatorNo listening ports initiatedNo local account name specifiedNo local user mapping foundNo locations defined for %sNo locations for destination different from source foundNo locations for destination different from source found: %sNo locations for destination found: %sNo locations for source found: %sNo locations found - probably no more physical instancesNo locations found for %sNo match found in cache access rules for %sNo matching checksum type, using first in list %sNo more %s replicasNo more interfaces to try for endpoint %s.No more replicas, will use %sNo need to stage source or destination, skipping stagingNo new informational document assignedNo next MCC or Service at path "%s"No next element in the chainNo non-draining session dirs availableNo pfns returned in Rucio response: %sNo physical files found for destinationNo physical files found for sourceNo pid file is found at '%s'. Probably A-REX is not running.No policy file or DNs specified for simplelist.pdp, please set location attribute or at least one DN element for simplelist PDP node in configuration.No port succeeded for %sNo private key with nickname %s exist in NSS databaseNo proxy foundNo queue name given in queue block nameNo read-only cache directory specifiedNo remote delivery services are useable, forcing local deliveryNo replicas found for %sNo request token specified!No request tokens foundNo requested security information was collectedNo response from AA service %sNo response returned: %sNo results returned from statNo right operand for concatenation operatorNo security processing/check requested for '%s'No server config part of config fileNo services specified. Please configure default services in the client configuration, or specify a cluster or registry (-C or -Y options, see arcsync -h).No session directories found in configuration.No session directory foundNo session directory set in configurationNo source definedNo space token specifiedNo space tokens found matching description %sNo stagein URL is providedNo such DTR %sNo such file or directoryNo target available inside the policyNo target available inside the ruleNo test-job with ID %d found.No test-job, with ID "%d"No usable cachesNo user certificate by nickname %s foundNo user-certificate foundNo username suppliedNo valid caches found in configuration, caching is disabledNo valid credentials found, exitingNo valid location availableNo valid response from VOMS server: %sNo value provided for Subject Attribute %s skippedNon-homogeneous resourceNone of the requested transfer protocols are supportedNot authorized by arc.pdp - failed to get response from EvaluatorNot authorized by arc.pdp - some of the RequestItem elements do not satisfy PolicyNot authorized from simplelist.pdp: %sNot enough parameters in copyurlNot enough parameters in linkurlNot found %s in cacheNot getting checksum of zip constituentNot using delivery service %s due to previous failureNot using delivery service at %s because it is fullNot valid destinationNot valid sourceNothing to do: you have to either specify a test job id with -J (--job) or query information about the certificates with -E (--certificate) Now copying (from -> to)Number %d is with nickname: %s%sNumber %d is: %sNumbers of sources and destinations do not matchOPTION...OS family: %sOS name: %sOS version: %sOTokens: Attr: %s = %sOTokens: Attr: messageOTokens: Attr: token: %sOTokens: Attr: token: bearer: %sOTokens: HandleOTokens: Handle: attributes created: subject = %sOTokens: Handle: messageOTokens: Handle: token was not presentObject is not suitable for listingObject not initialized (internal error)Obtained XML: %sObtained host and address are not acceptableOnly POST is supported in CandyPondOnly POST is supported in DataDeliveryServiceOnly Raw Buffer payload is supported for outputOnly globus rights are supported in Globus signing policy - %s is not supportedOnly signing rights are supported in Globus signing policy - %s is not supportedOnly standard input is currently supported for password source.Only user '.' for helper program is supportedOpenSSL error string: %sOperating System errorOperation cancelled successfullyOperation completed successfullyOperation not supported for this kind of URLOperation on path "%s"Operator token: %cOptimizedInformationContainer created temporary file: %sOptimizedInformationContainer failed to create temporary fileOptimizedInformationContainer failed to parse XMLOptimizedInformationContainer failed to rename temporary fileOptimizedInformationContainer failed to store XML document to temporary fileOptions 'p' and 'n' can't be used simultaneouslyOptions Group %s:Options for plugin are missingOriginal job description is listed below:Orphan delegation lock detected (%s) - cleaningOther actionsOut of memory when generate random serialOut of retriesOut of tries while allocating new job ID in %sOutgoing Message is not SOAPOutput EEC certificateOutput format modifiersOutput the proxy certificateOverwrite requested - will pre-clean destinationOwner: %sPASV failedPASV failed: %sPDP: %s (%s)PDP: %s (%s) can not be loadedPDP: %s can not be loadedPDP: missing name attributePEM_read_bio_X509_REQ failedPEM_write_bio_X509_REQ failedPKCS12 add password integrity failedPKCS12 output password not providedPOST request on special path is not supportedPROPFIND response: %sParent dataset: %sParsed domains: %uParser Context creation failed!Parser failed with error code %i.Parsing .local file to obtain job-specific identifiers and infoParsing VOMS AC to get FQANs informationPassword encoding type not supported: %sPath %s is invalid, creating required directoriesPath to .local job status file is required.Path to user's proxy file should be specified.Peer name: %sPer-job POST/SOAP requests are not supportedPerforming matchmaking against target (%s).Performs neither sorting nor matchingPermanent failurePermanent service errorPermission checking failed, will try downloading without using cachePermission checking failed: %sPermission checking on original URL failed: %sPermission checking passedPermission checking passed for url %sPicking up left jobsPlace: %sPlatform: %sPlease choose the NSS database you would like to use (1-%d): Please choose the one you would use (1-%d): Plexer (%s) - next %s(%s) has no targetPlexer's (%s) next has no ID attribute definedPlugin %s error: %sPlugin %s failed to startPlugin %s printed: %sPlugin %s returned no mappingPlugin %s returned no usernamePlugin %s returned too much: %sPlugin %s returned: %uPlugin %s timeout after %u secondsPlugin (user mapping) command is emptyPlugin (user mapping) timeout is not a number: %sPlugin (user mapping) timeout is wrong number: %sPlugin response: %sPolicy Decision Service invocation failedPolicy is emptyPolicy is not gaclPolicy line: %sPolicy subject: %sPolicyId: %s Alg inside this policy is:-- %sPostal code: %sPre-LRMS waiting jobs: %iPre-clean failedPre-clean failed, will still try to copyPre-registering destinationPre-registering destination in index servicePreparing to stage destinationPreparing to stage sourceProblem accessing cache file %s: %sProblem creating dtr (source %s, destination %s)Problem loading plugin %s, skipping it.Problem with index service, will proceed to end of data stagingProblem with index service, will release cache lockProcessing thread timed out. Restarting DTRProcessing type not supported: %sProcessingStartTime (%s) specified in job description is inside the targets downtime period [ %s - %s ].Protocol plugins available:Protocol(s) not supported - please check that the relevant gfal2 plugins are installed (gfal2-plugin-* packages)Proxy certificate information:Proxy expiredProxy expired. Job submission aborted. Please run 'arcproxy'!Proxy expired. Please run 'arcproxy'!Proxy generation failed: Certificate has expired.Proxy generation failed: Certificate is not valid yet.Proxy generation failed: Failed to create temporary file.Proxy generation failed: Failed to retrieve VOMS information.Proxy generation failed: No valid certificate found.Proxy generation failed: No valid private key found.Proxy generation succeededProxy has expiredProxy key length: %iProxy path: %sProxy signature: %sProxy subject: %sProxy type: %sProxy with ARC PolicyProxy with all rights inheritedProxy with empty policy - fail on unrecognized policyProxy with specific policy: %sProxy with unknown policy - fail on unrecognized policyProxy-subject: %sProxy: %sPut request %s is still in queue, should wait %i secondsPython Wrapper constructor succeededPython Wrapper destructor (%d)Python broker constructor called (%d)Python broker destructor called (%d)Python interpreter lockedPython interpreter releasedPython wrapper process calledPythonBroker initQuality level: %sQuerying WSRF GLUE2 computing REST endpoint.Querying source replicas in bulkQuerying status of staging requestQueue information:REST: process %s at %sREST:CLEAN job %s - %sREST:GET job %s - %sREST:KILL job %s - %sREST:PUT job %s: file %s: there is no payloadREST:RESTART job %s - %sRESTful and old VOMS communication protocols can't be requested simultaneously.RSA_generate_key_ex failedRSL substitution is not a sequenceRSL substitution sequence is not of length 2RSL substitution variable name does not evaluate to a literalRSL substitution variable value does not evaluate to a literalRandom sortingRead %i bytesRead access check failedRead access not allowed for %s: %sRead request from a fileRead request from a stringReading %u bytes from byte %lluReal transfer from %s to %sReceived DTR %s back from scheduler in state %sReceived DTR %s during Generator shutdown - may not be processedReceived invalid DTRReceived message out-of-band (not critical, ERROR level is just for debugging purposes)Received no DTRReceived retry for DTR %s still in transferReconnectingRecord about new job successfully added to the database (%s)Redirecting to %sRedirecting to new URL: %sRegistering destination replicaRegistration of Globus FTP buffer failed - cancel checkRelation operator expectedReleasing destinationReleasing request(s) made during stagingReleasing requestsReleasing sourceRemove: deleting: %sRemoving %sRemoving logical file from metadata %sRemoving metadata in %sRemoving pre-registered destination in index serviceRename: globus_ftp_client_move failedRename: timeout waiting for operation to completeRenaming %s to %sReplacing DTR %s in state %s with new requestReplacing existing token for %s in Rucio token cacheReplacing old SRM info with new for URL %sReplacing queue '%s' with '%s'Replica %s doesn't match preferred pattern or URL mapReplica %s has high latency, but no more sources exist so will use this oneReplica %s has high latency, trying next sourceReplica %s has long latency, trying next replicaReplica %s is mappedReplica %s matches host pattern %sReplica %s matches pattern %sRequest failedRequest failed: No response from IdPRequest failed: No response from IdP when doing authenticationRequest failed: No response from IdP when doing redirectingRequest failed: No response from SP Service when sending SAML assertion to SPRequest failed: No response from SPServiceRequest failed: response from IdP is not as expected when doing authenticationRequest failed: response from IdP is not as expected when doing redirectingRequest failed: response from SP Service is not as expected when sending SAML assertion to SPRequest failed: response from SPService is not as expectedRequest is emptyRequest is not supported - %sRequest is reported as ABORTED, but all files are doneRequest is reported as ABORTED, since it was cancelledRequest is reported as ABORTED. Reason: %sRequest succeed!!!Request timed outRequest to push to unknown owner - %uRequest: %sRequested slots: %iRequested to skip resource discovery. Will try direct submission to arcrest endpoint type.Requesting recursion and --nolist has no senseRequesting to stop job processingRequirement "%s %s" NOT satisfied.Requirement "%s %s" satisfied by "%s".Requirement "%s %s" satisfied.Reservation policy: %sResolving destination replicasResolving of index service for destination failedResolving of index service for source failedResolving source replicas in bulkResource information provider failed to runResource information provider failed to startResource information provider failed with exit status: %i %sResource information provider log: %sResource information provider: %sResource manager: %sResponse is not SOAPResponse is not XMLResponse: %sResponse: %sResult value (0=Permit, 1=Deny, 2=Indeterminate, 3=Not_Applicable): %dResults stored at: %sResuming job: %s at state: %s (%s)Retrieving job description of INTERNAL jobs is not supportedReturning to generatorReusing connectionRight operand for RSL concatenation does not evaluate to a literalRucio returned %sRucio token for %s has expired or is about to expireRule: %sRule: audience: %sRule: capabilities: %sRule: group: %sRule: issuer: %sRule: role: %sRule: scope: %sRule: subject: %sRule: vo: %sRunning command: %sRunning jobs: %iRunning mailer command (%s)SAML Token handler is not configuredSAML2SSO process failedSOAP Request to AA service %s failedSOAP fault from delivery service at %s: %sSOAP fault: %sSOAP invocation failedSOAP operation is not supported: %sSOAP request: %sSOAP response: %sSOAP with SAML2SSO invocation failedSQL statement used: %sSQLite database error: %sSRM Client status: %sSRM did not return any informationSRM did not return any useful informationSRM returned no useful Transfer URLs: %sSSHFS mount point of cache directory (%s) is broken - waiting for reconnect ...SSHFS mount point of runtime directory (%s) is broken - waiting for reconnect ...SSHFS mount point of session directory (%s) is broken - waiting for reconnect ...SSL error: %d - %s:%s:%sSSL error: %s, libs: %s, func: %s, reason: %sScheduler configuration:Scheduler received NULL DTRScheduler received invalid DTRScheduler received new DTR %s with source: %s, destination: %s, assigned to transfer share %s with priority %dScheduler starting upScheduler stopped, exitingScheduling policy: %sSchema validation errorScheme: %sSecHandler configuration is not definedSecHandler has no configurationSecHandler has no name attribute definedSecHandler: %s(%s)Security Handler %s(%s) could not be createdSecurity Handlers processing failedSecurity Handlers processing failed: %sSecurity check failed for incoming TLS messageSecurity check failed for outgoing TLS messageSecurity check failed in SOAP MCC for incoming messageSecurity check failed in SOAP MCC for incoming message: %sSecurity check failed in SOAP MCC for outgoing messageSecurity check failed in SOAP MCC for outgoing message: %sSecurity check failed in TLS MCC for incoming messageSecurity processing/check failed: %sSecurity processing/check for '%s' failed: %sSecurity processing/check for '%s' passedSecurity processing/check passedSelf-signed certificateSending Rucio trace: %sSequence token parsing: %sServer SRM version: %sServer implementation: %sService %s(%s) could not be createdService Loop: Endpoint %sService endpoint %s (type %s) added to the list for direct submissionService endpoint %s (type %s) added to the list for resource discoveryService has no ID attribute definedService has no Name attribute definedService information:Service is waiting for requestsService side MCCs are loadedServing state: %sSession dir %s is owned by %i, but current mapped user is %iSession dir '%s' contains user specific substitutions - skipping itSession directory to useSession root directory is missingSessiondir %s: Free space %f GBSetting connections limit to %i, connections over limit will be %sSetting status (%s) for endpoint: %sSetting status (STARTED) for endpoint: %sSetting subject name!Setting userRequestDescription to %sShare Information:Should wait for destination to be preparedShould wait for source to be preparedShow %s help optionsShow help optionsShutdown daemonShutting down data delivery serviceShutting down data staging threadsShutting down schedulerSimpleMap: %sSimpleMap: acquired new unmap time of %u secondsSimpleMap: wrong number in unmaptime commandSkipping %s replica %sSkipping ComputingEndpoint '%s', because it has '%s' interface instead of the requested '%s'.Skipping additional policy matching due to insecure connections allowed.Skipping invalid URL option %sSkipping policyAuthority VOMS AC attributeSkipping retrieved job (%s) because it was submitted via another interface (%s).Skipping service: no SchemaPath found!Skipping service: no ServicePath found!Sockets do not match on exit %i != %iSome transfers failedSorting according to free slots in queueSorting according to input data availability at targetSorting according to specified benchmark (default "specint2000")Sorting replicas according to URL mapSorting replicas according to preferred pattern %sSource URL missingSource URL not supported: %sSource URL not valid: %sSource and/or destination is index service, will resolve replicasSource check requested but failed: %sSource is invalid URLSource is mapped to %sSource is not ready, will wait %u secondsSource is the same as destinationSource modification date: %sSource or destination requires stagingSource: %sSpecified module not found in cacheSpecified overlay file (%s) does not exist.Staging jobs: %iStaging request timed out, will release requestStaging: %sStart foregroundStart testStart waiting 10 sec...StartReadingStartReading: File was not prepared properlyStartWritingStartWriting: File was not prepared properlyStarted remote Delivery at %sStarting DTR threadsStarting controldir update tool.Starting data staging threadsStarting helper process: %sStarting jobs processing threadStarting jobs' monitoringStarting new DTR for %sStarting querying of suspended endpoint (%s) - no other endpoints for this service is being queried or has been queried successfully.Starting sub-thread to query the endpoint on %sStarting thread to query the endpoint on %sStat: obtained checksum %sStat: obtained modification time %sStat: obtained size %lluState name for plugin is missingStatus for service endpoint "%s" is set to inactive in ARCHERY. Skipping.Status of %d jobs was queried, %d jobs returned informationStopReading finished waiting for transfer_condition.StopReading starts waiting for transfer_condition.StopReading: aborting connectionStopWriting finished waiting for transfer_condition.StopWriting starts waiting for transfer_condition.StopWriting: Calculated checksum %sStopWriting: aborting connectionStopWriting: looking for checksum of %sStopped job processingStopping helper process %sStopping jobs processing threadStoring port %i for %sStoring temp proxy at %sStrange path in Rucio URL: %sString successfully parsed as %s.String token: %sSubject Attribute %s has no known NID, skippedSubject does not start with '/'Subject name: %sSubject to match: %sSubject: %sSubmission endpointSubmitterPlugin %s could not be createdSubmitterPlugin plugin "%s" not found.Submitting job Succeeded to add Independent OID, tag %d is returnedSucceeded to add RFC proxy OID, tag %d is returnedSucceeded to add VOMS AC sequence OID, tag %d is returnedSucceeded to add anyLanguage OID, tag %d is returnedSucceeded to add inheritAll OID, tag %d is returnedSucceeded to authenticate SAMLTokenSucceeded to authenticate UsernameTokenSucceeded to authenticate X509TokenSucceeded to change password on MyProxy serverSucceeded to change trusts to: %sSucceeded to convert PrivateKeyInfo to EVP_PKEYSucceeded to destroy credential on MyProxy serverSucceeded to export PKCS12Succeeded to generate public/private key pairSucceeded to get a proxy in %s from MyProxy server %sSucceeded to get credentialSucceeded to get info from MyProxy serverSucceeded to import certificateSucceeded to import private keySucceeded to initialize NSSSucceeded to load PrivateKeyInfoSucceeded to output certificate to %sSucceeded to output the certificate request into %sSucceeded to put a proxy onto MyProxy serverSucceeded to send DelegationService: %s and DelegationID: %s info to peer serviceSucceeded to sign the proxy certificateSucceeded to verify the signature under Succeeded to verify the signature under Succeeded to verify the signed certificateSupplied username %s does not match mapped username %sSupported Profiles:Supported constraints are: validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start from now) validityEnd=time validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod and validityEnd not specified, the default is 12 hours for local proxy, and 168 hours for delegated proxy on myproxy server) vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, the default is the minimum value of 12 hours and validityPeriod) myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy server, e.g. 43200 or 12h or 12H; if not specified, the default is the minimum value of 12 hours and validityPeriod (which is lifetime of the delegated proxy on myproxy server)) proxyPolicy=policy content proxyPolicyFile=policy file keybits=number - length of the key to generate. Default is 2048 bits. Special value 'inherit' is to use key length of signing certificate. signingAlgorithm=name - signing algorithm to use for signing public key of proxy. Possible values are sha1, sha2 (alias for sha256), sha224, sha256, sha384, sha512 and inherit (use algorithm of signing certificate). Default is inherit. With old systems, only sha1 is acceptable. Supported information item names are: subject - subject name of proxy certificate. identity - identity subject name of proxy certificate. issuer - issuer subject name of proxy certificate. ca - subject name of CA which issued initial certificate. path - file system path to file containing proxy. type - type of proxy certificate. validityStart - timestamp when proxy validity starts. validityEnd - timestamp when proxy validity ends. validityPeriod - duration of proxy validity in seconds. validityLeft - duration of proxy validity left in seconds. vomsVO - VO name represented by VOMS attribute vomsSubject - subject of certificate for which VOMS attribute is issued vomsIssuer - subject of service which issued VOMS certificate vomsACvalidityStart - timestamp when VOMS attribute validity starts. vomsACvalidityEnd - timestamp when VOMS attribute validity ends. vomsACvalidityPeriod - duration of VOMS attribute validity in seconds. vomsACvalidityLeft - duration of VOMS attribute validity left in seconds. proxyPolicy keybits - size of proxy certificate key in bits. signingAlgorithm - algorithm used to sign proxy certificate. Items are printed in requested order and are separated by newline. If item has multiple values they are printed in same line separated by |. Supported password destinations are: key - for reading private key myproxy - for accessing credentials at MyProxy service myproxynew - for creating credentials at MyProxy service all - for any purspose. Supported password sources are: quoted string ("password") - explicitly specified password int - interactively request password from console stdin - read password from standard input delimited by newline file:filename - read password from file named filename stream:# - read password from input stream number #. Currently only 0 (standard input) is supported.Supports advance reservationsSupports bulk submissionSupports preemptionSuspended jobs: %iSuspending querying of endpoint (%s) since the service at the endpoint is already being queried, or has been queried.Synchronizing the local list of active jobs with the information in the information system can result in some inconsistencies. Very recently submitted jobs might not yet be present, whereas jobs very recently scheduled for deletion can still be present.Syntax error in 'notify' attribute value ('%s'), it contains unknown state flagsSyntax error in 'notify' attribute value ('%s'), it must contain an email addressSyntax error in 'notify' attribute value ('%s'), it must only contain email addresses after state flag(s)System configuration file (%s or %s) does not exist.System configuration file (%s) contains errors.System configuration file (%s) does not exist.TCP client process calledTCP executor is removedTLS provides no identity, going for OTokensTURL %s cannot be handledTarget %s does not match requested interface(s).Target %s removed by FastestQueueBroker, doesn't report number of free slotsTarget %s removed by FastestQueueBroker, doesn't report number of total slotsTarget %s removed by FastestQueueBroker, doesn't report number of waiting jobsTarget endpoint selectionTechnology: %sTemporary service errorTest failed, no more possible targetsTest submitted with jobid: %sTest was defined with ID %d, but some error occurred during parsing it.The "FreeSlotsWithDuration" attribute published by "%s" is wrongly formatted. Ignoring it.The 'sort' and 'rsort' flags cannot be specified at the same time.The BIO for output is NULLThe CA certificates directory is required for contacting VOMS and MyProxy servers.The CA issuer (%s) of the credentials (%s) is not trusted by the target (%s).The ComputingEndpoint doesn't advertise its Quality Level.The ComputingEndpoint doesn't advertise its Serving State.The ComputingEndpoint has no URL.The ComputingService doesn't advertise its Interface.The ComputingService doesn't advertise its Quality Level.The MyProxy period that you set: %s can't be recognized.The NSS database can not be detected in the Firefox profileThe Response is not going to this endThe Service advertises no Health State.The Service doesn't advertise its Type.The StatusCode is SuccessThe VOMS AC period that you set: %s can't be recognized.The VOMS server with the information: %s can not be reached, please make sure it is available.The arccat command performs the cat command on the stdout, stderr or grid manager's error log of the job.The arcclean command removes a job from the computing resource.The arccp command copies files to, from and between grid storage elements.The arcget command is used for retrieving the results from a job.The arcinfo command is used for obtaining the status of computing resources on the Grid.The arckill command is used to kill running jobs.The arcls command is used for listing files in grid storage elements and file index catalogues.The arcmkdir command creates directories on grid storage elements and catalogs.The arcproxy command creates a proxy from a key/certificate pair which can then be used to access grid resources.The arcrename command renames files on grid storage elements.The arcrm command deletes files on grid storage elements.The arcstat command is used for obtaining the status of jobs that have been submitted to Grid enabled resources.The arcsub command is used for submitting jobs to Grid enabled computing resources.The arcsync command synchronizes your local job list with the information at the given CEs or registry servers.The arctest command is used for testing clusters as resources.The available CRL has expiredThe available CRL is not yet validThe brokerarguments attribute can only be used in conjunction with the brokername attributeThe certificate with subject %s is not validThe cluster XRSL attribute is currently unsupported.The credential to be signed contains no requestThe credential to be signed is NULLThe credential's private key has already been initializedThe default configuration file (%s) is not a regular file.The delegated credential got from delegation service is stored into path: %sThe delegated credential got from path: %sThe downtime of the target (%s) is not published. Keeping target.The end time that you set: %s can't be recognized.The end time that you set: %s is before start time: %s.The endpoint (%s) is not supported by this plugin (%s)The endpoint of delegation service should be configuredThe file %s is currently locked with a valid lockThe first supported interface of the plugin %s is an empty string, skipping the plugin.The following jobs were not submitted:The interface of this endpoint (%s) is unspecified, will try all possible pluginsThe job description also can be a file or a string in ADL or XRSL format.The keybits constraint is wrong: %s.The name of the private key to delete is emptyThe old GSI proxies are not supported anymore. Please do not use -O/--old option.The payload of incoming message is emptyThe payload of outgoing message is emptyThe period that you set: %s can't be recognized.The plugin %s does not support any interfaces, skipping it.The policy file setup for simplelist.pdp does not exist, please check location attribute for simplelist PDP node in service configurationThe policy language: %s is not supportedThe private key for signing is not initializedThe process owning the lock on %s is no longer running, will remove lockThe request has passed the policy evaluationThe signing algorithm %s is not allowed,it should be SHA1 or SHA2 to sign certificate requestsThe specified Globus attribute (%s) is not supported. %s ignored.The start time that you set: %s can't be recognized.The start, end and period can't be set simultaneouslyThe subject does not match the issuer name + proxy CN entryThe value of the acl XRSL attribute isn't valid XML.The value of the ftpthreads attribute must be a number from 1 to 10The value of the keysize attribute in the configuration file (%s) was only partially parsedThe value of the timeout attribute in the configuration file (%s) was only partially parsedThere are %d NSS base directories where the certificate, key, and module databases liveThere are %d RequestItemsThere are %d requests, which satisfy at least one policyThere are %d servers with the same name: %s in your vomses file, but none of them can be reached, or can return a valid message.There are %d user certificates existing in the NSS databaseThere are no endpoints in registry that match requested info endpoint typeThere are no endpoints in registry that match requested submission endpoint typeThere is %d subjects, which satisfy at least one policyThere is no Delegated X509 token in the responseThere is no Format delegated token in the responseThere is no Format request in the responseThere is no Id or X509 request value in the responseThere is no Id or X509 token value in the responseThere is no SOAP connection chain configuredThere is no SOAP responseThere is no UpdateCredentialsResponse in responseThere is no X509 request in the responseThere is no certificate named %s found, the certificate could be removed when generating CSRThere is no digest in issuer's private key objectThere is no local LRMS ID. Message will not be written to BLAH log.There is no responseThere was a problem during post-transfer destination handling after error: %sThere was a problem during post-transfer source handling: %sThere was no HTTP responseThere was no SOAP responseThird party transfer is not supported for these endpointsThird party transfer was requested but the corresponding plugin could not be loaded. Is the GFAL plugin installed? If not, please install the packages 'nordugrid-arc-plugins-gfal' and 'gfal2-all'. Depending on your type of installation the package names might differ.This INFO message should also be seenThis INFO message should be seenThis VERBOSE message should not be seenThis VERBOSE message should now be seenThis instance was already deletedThis message goes to initial destinationThis message goes to per-thread destinationThis process already owns the lock on %sThis seems like a temporary error, please try again laterThis tiny tool can be used for testing the JobDescription's conversion abilities.Thread exited with Glib error: %sThread exited with generic exception: %sTime left for AC: %sTime left for AC: AC has expiredTime left for AC: AC is not valid yetTime left for proxy: %sTime left for proxy: Proxy expiredTime left for proxy: Proxy not valid yetTimed out while waiting for cache lockTimeout connecting to %s(%s):%i - %i sTimeout has expired, will remove lock file %sTimeout waiting for Globus callback - leaking connectionTimeout waiting for mkdirTo recover missing jobs, run arcsyncToo many arguments in configurationToo many connections - dropping new oneToo many connections - waiting for old to closeToo many failures to obtain checksum - giving upToo many files in one request - please try again with fewer filesTool for writing the grami file representation of a job description file.Total jobs: %iTotal logical CPUs: %iTotal number of jobs found: Total number of new jobs found: Total physical CPUs: %iTotal slots: %iTransfer FAILED: %sTransfer cancelled successfullyTransfer completeTransfer failedTransfer failed: %sTransfer finished: %llu bytes transferred %sTransfer from %s to %sTransfer killed after %i seconds without communicationTransfer succeededTransfer timed outTrusted CAs:Trying all available interfacesTrying next replicaTrying to check X509 cert with check_cert_typeTrying to connect %s(%s):%dTrying to listen on %s:%s(%s)Trying to listen on TCP port %s(%s)Trying to start suspended endpoint (%s)Trying to submit directly to endpoint (%s)Trying to submit endpoint (%s) using interface (%s) with plugin (%s).Two input files have identical name '%s'.Type is dir, calling srmRmDirType is file, calling srmRmType: %sTypes of execution services that %s is able to submit jobs to:Types of local information services that %s is able to collect information from:Types of local information services that %s is able to collect job information from:Types of registry services that %s is able to collect information from:Types of services that %s is able to manage jobs at:URLURL %s disagrees with stored SRM info, testing new infoURL is mapped to local access - checking permissions on original URLURL is mapped to: %sURL is not valid: %sURL option %s does not have format name=valueURL protocol is not urllist: %sURL: %sUnAuthorized from xacml.pdpUnable to adapt job description to any resource, no resource information could be obtained.Unable to add event: cannot find AAR for job %s in accounting database.Unable to copy example configuration from existing configuration (%s)Unable to create %s directory.Unable to create data base (%s)Unable to create directory for storing results (%s) - %sUnable to create index for jobs table in data base (%s)Unable to create jobs table in data base (%s)Unable to create jobs_new table in data base (%s)Unable to detect format of job record.Unable to detect if issuer certificate is installed.Unable to determine certificate informationUnable to determine error (%d)Unable to download job (%s), no JobControllerPlugin plugin was set to handle the job.Unable to drop jobs in data base (%s)Unable to find file size of %sUnable to handle %sUnable to handle job (%s), no interface specified.Unable to handle job (%s), no plugin associated with the specified interface (%s)Unable to initialise connection to destination: %sUnable to initialise connection to source: %sUnable to initialize handler for %sUnable to list files at %sUnable to load ARC configuration file.Unable to load BrokerPlugin (%s)Unable to load plugin (%s) for interface (%s) when trying to submit job description.Unable to locate the "%s" plugin. Please refer to installation instructions and check if package providing support for "%s" plugin is installedUnable to match target, marking it as not matching. Broker not valid.Unable to open job list file (%s), unknown formatUnable to parse job description input: %sUnable to parse the specified verbosity (%s) to one of the allowed levelsUnable to parse.Unable to prepare job description according to needs of the target resource (%s).Unable to prepare job description according to needs of the target resource.Unable to read job information from file (%s)Unable to register job submission. Can't get JobDescription object from Broker, Broker is invalid.Unable to rename jobs table in data base (%s)Unable to retrieve list of job files to download for job %sUnable to retrieve list of log files to download for job %sUnable to select middlewareUnable to select operating system.Unable to select runtime environmentUnable to sort ExecutionTarget objects - Invalid Broker object.Unable to sort added jobs. The BrokerPlugin plugin has not been loaded.Unable to submit job. Failed to assign delegation to job description.Unable to submit job. Job description is not valid in the %s format: %sUnable to submit jobs. Failed to delegate X.509 credentials.Unable to submit jobs. Failed to delegate token.Unable to transfer from jobs to jobs_new in data base (%s)Unable to truncate job database (%s)Unable to write 'output' file: %sUnable to write grami file: %sUnable to write records into job database (%s): Id "%s"Unable to write to p12 fileUnauthorizedUnauthorized from remote pdp serviceUnexpected RSL typeUnexpected argument for 'all' rule - %sUnexpected argumentsUnexpected arguments suppliedUnexpected delegation location from delegation endpoint - %s.Unexpected immediate completion: %sUnexpected name returned in Rucio response: %sUnexpected path %s returned from serverUnexpected response code from delegation endpoint - %uUnexpected response code from delegation endpoint: %u, %s.Uniq is adding service coming from %sUniq is ignoring service coming from %sUniq is replacing service coming from %s with service coming from %sUnknown LDAP scope %s - using baseUnknown XRSL attribute: %s - Ignoring it.Unknown attribute %s in common section of configuration file (%s), ignoring itUnknown channel %s for stdio protocolUnknown credential type %s for URL pattern %sUnknown element in Globus signing policyUnknown errorUnknown key or hash typeUnknown key or hash type of issuerUnknown log level %sUnknown option %sUnknown rights in Globus signing policy - %sUnknown section %s, ignoring itUnknown transfer option: %sUnknown user name mapping rule %sUnregistering %sUnregistering from index service failedUnsupported URL givenUnsupported URL given: %sUnsupported destination url: %sUnsupported information endpoint type: %sUnsupported job list type '%s', using 'SQLITE'. Supported types are: SQLITE, XML.Unsupported mapping policy action: %sUnsupported mapping policy option: %sUnsupported protocol in url %sUnsupported proxy policy language is requested - %sUnsupported proxy version is requested - %sUnsupported source url: %sUnsupported submission endpoint type: %sUnsupported submission interface %s. Seems arc-blahp-logger need to be updated accordingly. Please submit the bug to bugzilla.Untrusted self-signed certificate in chain with subject %s and hash: %luUpdateCredentials failedUpdateCredentials: EPR contains no JobIDUpdateCredentials: failed to update credentialsUpdateCredentials: missing ReferenceUpdateCredentials: no job found: %sUpdateCredentials: request = %sUpdateCredentials: response = %sUpdateCredentials: wrong number of ReferenceUpdateCredentials: wrong number of elements inside ReferenceUsage:Usage: copy source destinationUse --help option for detailed usage informationUse -? to get usage descriptionUsed configuration file %sUsed slots: %iUser configuration file (%s) contains errors.User configuration file (%s) does not exist or cannot be loaded.User for helper program is missingUser name direct mapping is missing user name: %s.User name mapping command is emptyUser name mapping has empty authgroup: %sUser name should be specified.User pool at %s can't be opened.User pool at %s failed to perform user mapping.User pool mapping is missing user subject.User subject match is missing user subject.UserConfig class is not an objectUserConfiguration saved to file (%s)Username Token handler is not configuredUsing A-REX config file %sUsing CA certificate directory: %sUsing CA default locationUsing CA dir: %sUsing CA file: %sUsing DH parameters from file: %sUsing OTokenUsing Rucio account %sUsing buffered transfer methodUsing cache %sUsing cached local account '%s'Using cert %sUsing certificate file: %sUsing checksum %sUsing cipher list: %sUsing cipher: %sUsing configuration at %sUsing curve with NID: %uUsing insecure data transferUsing internal transfer method of %sUsing key %sUsing key file: %sUsing local account '%s'Using next %s replicaUsing protocol options: 0x%xUsing proxy %sUsing proxy file: %sUsing secure data transferUsing session dir %sUsing space token %sUsing space token description %sVO %s doesn't match %sVOMS AC attribute is a tagVOMS AC attribute is the FQANVOMS attr %s doesn't match %sVOMS attr %s matches %sVOMS attribute is ignored due to processing/validation errorVOMS attribute parsing failedVOMS attribute validation failedVOMS: AC has expiredVOMS: AC is not complete - missing Serial or Issuer informationVOMS: AC is not yet validVOMS: AC signature verification failedVOMS: CA directory or CA file must be provided or default setting enabledVOMS: Can not allocate memory for parsing ACVOMS: Can not allocate memory for storing the order of ACVOMS: Can not find AC_ATTR with IETFATTR typeVOMS: Can not parse ACVOMS: Cannot find certificate of AC issuer for VO %sVOMS: DN of holder in AC: %sVOMS: DN of holder: %sVOMS: DN of issuer: %sVOMS: FQDN of this host %s does not match any target in ACVOMS: The lsc file %s can not be openVOMS: The lsc file %s does not existVOMS: authorityKey is wrongVOMS: both idcenoRevAvail and authorityKeyIdentifier certificate extensions must be presentVOMS: can not verify the signature of the ACVOMS: cannot validate AC issuer for VO %sVOMS: case of multiple IETFATTR attributes not supportedVOMS: case of multiple policyAuthority not supportedVOMS: create FQAN: %sVOMS: create attribute: %sVOMS: directory for trusted service certificates: %sVOMS: failed to access IETFATTR attributeVOMS: failed to parse attributes from ACVOMS: failed to verify AC signatureVOMS: missing AC partsVOMS: problems while parsing information in ACVOMS: the DN in certificate: %s does not match that in trusted DN list: %sVOMS: the Issuer identity in certificate: %s does not match that in trusted DN list: %sVOMS: the attribute name is emptyVOMS: the attribute qualifier is emptyVOMS: the attribute value for %s is emptyVOMS: the format of IETFATTRVAL is not supported - expecting OCTET STRINGVOMS: the format of policyAuthority is unsupported - expecting URIVOMS: the grantor attribute is emptyVOMS: the holder information in AC is wrongVOMS: the holder issuer name is not the same as that in ACVOMS: the holder issuerUID is not the same as that in ACVOMS: the holder name in AC is not related to the distinguished name in holder certificateVOMS: the holder serial number %lx is not the same as the serial number in AC %lx, the holder certificate that is used to create a voms proxy could be a proxy certificate with a different serial number as the original EEC certVOMS: the holder serial number is: %lxVOMS: the issuer information in AC is wrongVOMS: the issuer name %s is not the same as that in AC - %sVOMS: the only supported critical extension of the AC is idceTargetsVOMS: the serial number in AC is: %lxVOMS: the serial number of AC INFO is too long - expecting no more than 20 octetsVOMS: there is no constraints of trusted voms DNs, the certificates stack in AC will not be checked.VOMS: trust chain to check: %s VOMS: unable to determine hostname of AC from VO name: %sVOMS: unable to extract VO name from ACVOMS: unable to match certificate chain against VOMS trusted DNsVOMS: unable to verify certificate chainVOMS: unsupported time format in AC - expecting GENERALIZED TIMEValid for: %sValid for: Proxy expiredValid for: Proxy not validValid until: %sValue of 'count' attribute must be an integerValue of 'countpernode' attribute must be an integerValue of 'exclusiveexecution' attribute must either be 'yes' or 'no'Value of attribute '%s' expected not to be emptyValue of attribute '%s' expected to be a stringValue of attribute '%s' expected to be single valueValue of attribute '%s' has wrong sequence length: Expected %d, found %dValue of attribute '%s' is not a stringValue of attribute '%s' is not sequenceVariable name (%s) contains invalid character (%s)Variable name expectedVersion in Listen element can't be recognizedWARNING: The end time that you set: %s is before current time: %sWARNING: The start time that you set: %s is before current time: %sWaiting ends.Waiting for bufferWaiting for globus handle to settleWaiting for lock on file %sWaiting for lock on job list file %sWaiting for main job processing thread to exitWaiting for responseWaiting jobs: %iWaking upWarning: Failed listing files but some information is obtainedWarning: Failed removing jobs from file (%s)Warning: Failed to write job information to file (%s)Warning: Failed to write local list of jobs into file (%s), jobs list is destroyedWarning: Job not found in job list: %sWarning: Some jobs were not removed from serverWarning: Unable to create job list file (%s), jobs list is destroyedWarning: Unable to open job list file (%s), unknown formatWarning: Unable to read local list of jobs from file (%s)Warning: Unable to truncate local list of jobs in file (%s)Warning: Using SRM protocol v1 which does not support space tokensWas expecting %s at the beginning of "%s"Watchdog (re)starting applicationWatchdog detected application exitWatchdog detected application exit due to signal %uWatchdog detected application exited with code %uWatchdog detected application timeout or error - killing processWatchdog exiting because application was purposely killed or exited itselfWatchdog failed to kill application - giving up and exitingWatchdog failed to wait till application exited - sending KILLWatchdog fork failed: %sWatchdog starting monitoringWe only support CAs in Globus signing policy - %s is not supportedWe only support X509 CAs in Globus signing policy - %s is not supportedWe only support globus conditions in Globus signing policy - %s is not supportedWe only support subjects conditions in Globus signing policy - %s is not supportedWhen specifying 'countpernode' attribute, 'count' attribute must also be specifiedWill %s in destination index serviceWill calculate %s checksumWill clean up pre-registered destinationWill download to cache file %sWill not map to 'root' account by defaultWill process cacheWill release cache locksWill remove %s on service %s.Will retry without cachingWill use bulk requestWill wait 10sWill wait around %isWiping and re-creating whole storageWorking area free size: %i GBWorking area is not shared among jobsWorking area is shared among jobsWorking area life time: %sWorking area total size: %i GBWriting the info to the BLAH parser log: %sWrong directory in %sWrong format of the "FreeSlotsWithDuration" = "%s" ("%s")Wrong language requested: %sWrong number in defaultttl commandWrong number in maxjobdesc commandWrong number in maxjobs: %sWrong number in maxrerun commandWrong number in urdelivery_frequency: %sWrong number in wakeupperiod: %sWrong number of arguments givenWrong number of arguments!Wrong number of objects (%i) for stat from ftp: %sWrong number of parameters specifiedWrong option in %sWrong option in delegationdbWrong option in fixdirectoriesWrong ownership of certificate file: %sWrong ownership of key file: %sWrong ownership of proxy file: %sWrong permissions of certificate file: %sWrong permissions of key file: %sWrong permissions of proxy file: %sWrong service record field "%s" found in the "%s"Wrote request into a fileWrote signed EEC certificate into a fileWrote signed proxy certificate into a fileX509 Token handler is not configuredXACML request: %sXML config file %s does not existYou are about to remove jobs from the job list for which no information could be found. NOTE: Recently submitted jobs might not have appeared in the information system, and this action will also remove such jobs.You may try to increase verbosity to get more information.Your identity: %sYour issuer's certificate is not installedYour proxy is valid until: %s[ADLParser] %s element must be boolean.[ADLParser] AccessControl isn't valid XML.[ADLParser] Benchmark is not supported yet.[ADLParser] Code in FailIfExitCodeNotEqualTo in %s is not valid number.[ADLParser] CreationFlag value %s is not supported.[ADLParser] CredentialService must contain valid URL.[ADLParser] Missing Name element or value in ParallelEnvironment/Option element.[ADLParser] Missing or empty Name in InputFile.[ADLParser] Missing or empty Name in OutputFile.[ADLParser] Missing or wrong value in DiskSpaceRequirement.[ADLParser] Missing or wrong value in IndividualCPUTime.[ADLParser] Missing or wrong value in IndividualPhysicalMemory.[ADLParser] Missing or wrong value in IndividualVirtualMemory.[ADLParser] Missing or wrong value in NumberOfSlots.[ADLParser] Missing or wrong value in ProcessesPerSlot.[ADLParser] Missing or wrong value in SlotsPerHost.[ADLParser] Missing or wrong value in ThreadsPerProcess.[ADLParser] Missing or wrong value in TotalCPUTime.[ADLParser] Missing or wrong value in WallTime.[ADLParser] NetworkInfo is not supported yet.[ADLParser] NodeAccess value %s is not supported yet.[ADLParser] Only email Prorocol for Notification is supported yet.[ADLParser] Optional for %s elements are not supported yet.[ADLParser] Root element is not ActivityDescription [ADLParser] The NumberOfSlots element should be specified, when the value of useNumberOfSlots attribute of SlotsPerHost element is "true".[ADLParser] Unsupported EMI ES state %s.[ADLParser] Unsupported URL %s for RemoteLogging.[ADLParser] Unsupported internal state %s.[ADLParser] Wrong URI specified in Source - %s.[ADLParser] Wrong URI specified in Target - %s.[ADLParser] Wrong time %s in ExpirationTime.[ADLParser] priority is too large - using max value 100[filename ...][job ...][job description ...][job description input][resource ...]a file containing a list of jobIDsadd_word failureadvertisedvo parameter is emptyall for attentionall jobsallow TLS connection which failed verificationarc.confbrokerbuffer: error : %s, read: %s, write: %sbuffer: read EOF : %sbuffer: write EOF: %scache file: %scancelledceceID prefix is set to %scheck readability of object, does not show any information about objectcheck_ftp: failed to get file's modification timecheck_ftp: failed to get file's sizecheck_ftp: globus_ftp_client_get failedcheck_ftp: globus_ftp_client_modification_time failedcheck_ftp: globus_ftp_client_register_readcheck_ftp: globus_ftp_client_size failedcheck_ftp: obtained modification date: %scheck_ftp: obtained size: %llicheck_ftp: timeout waiting for modification_timecheck_ftp: timeout waiting for partial getcheck_ftp: timeout waiting for sizeclass name: %sclose failed: %sclosing file %s failed: %scommand to MyProxy server. The command can be PUT, GET, INFO, NEWPASS or DESTROY. PUT -- put a delegated credentials to the MyProxy server; GET -- get a delegated credentials from the MyProxy server; INFO -- get and present information about credentials stored at the MyProxy server; NEWPASS -- change password protecting credentials stored at the MyProxy server; DESTROY -- wipe off credentials stored at the MyProxy server; Local credentials (certificate and key) are not necessary except in case of PUT. MyProxy functionality can be used together with VOMS functionality. --voms and --vomses can be used for Get command if VOMS attributes is required to be included in the proxy. computingcomputing element hostname or a complete endpoint URLconfiguration file (default ~/.arc/client.conf)d2i_X509_REQ_bio faileddata chunk: %llu %lludebugleveldefine the requested format (nordugrid:xrsl, emies:adl)delete_ftp: globus_ftp_client_delete faileddelete_ftp: globus_ftp_client_rmdir faileddelete_ftp: timeout waiting for deletedestinationdestination.next_locationdirdirectorydirnamedisplay all available metadatadisplay more information on each jobdndo not ask for verificationdo not collect information, only convert jobs storage formatdo not perform any authentication for opened connectionsdo not perform any delegation for submitted jobsdo not print list of jobsdo not print number of jobs in each statedo not submit - dump job description in the language accepted by the targetdo not transfer, but register source into destination. destination must be a meta-url.do not try to force passive transferdon't prompt for a credential passphrase, when retrieving a credential from a MyProxy server. The precondition of this choice is that the credential was PUT onto the MyProxy server without a passphrase by using the -R (--retrievable_by_cert) option. This option is specific to the GET command when contacting a Myproxy server.downloaddownload directory (the job directory will be created in this directory)downloadsdroppedecho: Unauthorizedempty input payloadempty next chain elementend of string encountered while processing type of subject name element #%derror converting number from bin to BIGNUMerror converting serial to ASN.1 formatescape character at end of stringexitfailed to read data chunkfailed to read data tagfilefile %s is not accessiblefile namefile name too longfilenamefilepathfinishedforce download (overwrite existing job directory)force overwrite of existing destinationforce using CA certificates configuration for Grid services (typically IGTF)force using CA certificates configuration for Grid services (typically IGTF) and one provided by OpenSSLforce using CA certificates configuration provided by OpenSSLforce using both CA certificates configuration for Grid services (typically IGTF) and those provided by OpenSSLforcedefaultvoms parameter is emptyformatfrom the following endpoints:fsync of file %s failed: %sftp_check_callbackftp_complete_callback: error: %sftp_complete_callback: successftp_get_complete_callback: Failed to get ftp fileftp_get_complete_callback: successftp_put_complete_callback: successftp_read_callback: Globus error: %sftp_read_callback: delayed data chunk: %llu %lluftp_read_callback: failure: %sftp_read_callback: successftp_read_callback: success - offset=%u, length=%u, eof=%u, allow oof=%uftp_read_callback: too many unexpected out of order chunksftp_read_callback: unexpected data out of order: %llu != %lluftp_read_thread: Globus error: %sftp_read_thread: data callback failed - aborting: %sftp_read_thread: exitingftp_read_thread: failed to register Globus buffer - will try later: %sftp_read_thread: failed to register buffersftp_read_thread: failed to release buffersftp_read_thread: failed to release buffers - leakingftp_read_thread: for_read failed - aborting: %sftp_read_thread: get and register buffersftp_read_thread: too many registration failures - abort: %sftp_read_thread: waiting for buffers releasedftp_read_thread: waiting for eofftp_write_callback: failure: %sftp_write_callback: success %sftp_write_thread: Globus error: %sftp_write_thread: data callback failed - abortingftp_write_thread: data out of order in stream mode: %llu != %lluftp_write_thread: exitingftp_write_thread: failed to release buffers - leakingftp_write_thread: for_write failed - abortingftp_write_thread: get and register buffersftp_write_thread: too many out of order chunks in stream modeftp_write_thread: waiting for buffers releasedftp_write_thread: waiting for eofftp_write_thread: waiting for transfer completegfal_close failed: %sgfal_closedir failed: %sgfal_listxattr failed, no replica information can be obtained: %sgfal_mkdir failed (%s), trying to write anywaygfal_mkdir failed: %sgfal_open failed: %sgfal_opendir failed: %sgfal_read failed: %sgfal_rename failed: %sgfal_rmdir failed: %sgfal_stat failed: %sgfal_unlink failed: %sgfal_write failed: %sglobalid is set to %sglobus_ftp_client_operationattr_set_authorization: error: %sgm-jobs displays information on current jobs in the system.gm-kick wakes up the A-REX corresponding to the given control directory. If no directory is given it uses the control directory found in the configuration file.gmetric_bin_path empty in arc.conf (should never happen the default value should be used)group<:role>. Specify ordering of attributes Example: --order /knowarc.eu/coredev:Developer,/knowarc.eu/testers:Tester or: --order /knowarc.eu/coredev:Developer --order /knowarc.eu/testers:Tester Note that it does not make sense to specify the order if you have two or more different VOMS servers specifiedheadnode is set to %shostname[:port] of MyProxy serverhourhoursidincoming message is not SOAPindexinform about changes in particular job (can be used multiple times)init_handle: globus_ftp_client_handle_init failedinit_handle: globus_ftp_client_handleattr_init failedinit_handle: globus_ftp_client_handleattr_set_gridftp2 failedinit_handle: globus_ftp_client_operationattr_init failedinit_handle: globus_ftp_client_operationattr_set_allow_ipv6 failedinit_handle: globus_ftp_client_operationattr_set_delayed_pasv failedinmsg.Attributes().getAll() = %s inmsg.Auth().Export(arc.SecAttr.ARCAuth) = %sinput does not define operationinput is not SOAPinputcheck checks that input files specified in the job description are available and accessible using the credentials in the given proxy file.instead of the status only the IDs of the selected jobs will be printedintinterfaceinterface is set to %sjob idjob_description_file [proxy_file]jobdescription file describing the job to be submittedjobdescription string describing the job to be submittedkeep the files on the server (do not clean)levellist record: %slist the available pluginslist the available plugins (protocols supported)list_files_ftp: checksum %slist_files_ftp: failed to get file's modification timelist_files_ftp: failed to get file's sizelist_files_ftp: globus_ftp_client_cksm failedlist_files_ftp: globus_ftp_client_modification_time failedlist_files_ftp: globus_ftp_client_size failedlist_files_ftp: looking for checksum of %slist_files_ftp: looking for modification time of %slist_files_ftp: looking for size of %slist_files_ftp: no checksum information possiblelist_files_ftp: no checksum information returnedlist_files_ftp: no checksum information supportedlist_files_ftp: timeout waiting for cksumlist_files_ftp: timeout waiting for modification_timelist_files_ftp: timeout waiting for sizeload serial from %s failurelocalid is set to %slong format (more information)lrms is emptymail parameter is emptymake parent directories as neededmalloc errormeta file %s is emptyminuteminutesmkdir_ftp: making %smkdir_ftp: timeout waiting for mkdirmodule name: %snnew_payload %snext chain element callednext element of the chain returned empty payloadnext element of the chain returned error statusnext element of the chain returned error status: %snext element of the chain returned invalid payloadnext element of the chain returned invalid/unsupported payloadnext element of the chain returned no payloadnext element of the chain returned unknown payload - passing throughnumbernumber of retries before failing file transferold_url new_urlonly get information about executon targets that support this job submission endpoint type. Allowed values are: arcrest and internal.only select jobs that were submitted to this computing elementonly select jobs whose status is statusstroperate recursivelyoperate recursively up to specified levelorderoutpayload %soutput is not SOAPoutput requested elements (jobs list, delegation ids and tokens) to fileowner subject is set to %sp12 file is emptypassword destination=password sourcepathpath to local cache (use to put file into cache)path to the VOMS server configuration filepath to the certificate file, it can be either PEM, DER, or PKCS12 formattedpath to the private key file, if the certificate is in PKCS12 format, then no need to give private keypath to the proxy filepath to the top directory of VOMS *.lsc files, only needed for the VOMS client functionalitypath to the trusted certificate directory, only needed for the VOMS client functionalityperform X.509 authentication for opened connectionsperform X.509 delegation for submitted jobsperform third party transfer, where the destination pulls from the source (only available with GFAL plugin)perform token authentication for opened connectionsperform token delegation for submitted jobsphysical location to write to when destination is an indexing service. Must be specified for indexing services which do not automatically generate physical locations. Can be specified multiple times - locations will be tried in order until one succeeds.pkey and rsa_key exist!plugin for transport protocol %s is not installedprint all information about this proxy.print delegation token of specified ID(s)print list of available delegation IDsprint main delegation token of specified Job ID(s)print selected information about this proxy.print state of the serviceprint summary of jobs in each transfer shareprint version informationprints info about installed user- and CA-certificatespriority is too large - using max value 100process: DELETEprocess: GETprocess: HEADprocess: POSTprocess: PUTprocess: action %s is not supported for subpath %sprocess: endpoint: %sprocess: factory endpointprocess: id: %sprocess: method %s is not supportedprocess: method %s is not supported for subpath %sprocess: method is not definedprocess: method: %sprocess: operation: %sprocess: request=%sprocess: response=%sprocess: schema %s is not supported for subpath %sprocess: subop: %sprocess: subpath: %sproxy constraintsput on holdqueue name is set to %sread information from specified control directoryread_thread: data read error from external process - aborting: %sread_thread: exitingread_thread: for_read failed - aborting: %sread_thread: get and register buffersread_thread: non-data tag '%c' from external process - leaving: %sregisterregistryregistry service URL with optional specification of protocolremove logical file name registration even if not all physical instances were removedremove proxyremove the job from the local list of jobs even if the job is not found in the infosysrequest at least this number of job instances submitted in single submit requestrequest at most this number of job instances submitted in single submit requestrequest to cancel job(s) with specified ID(s)request to cancel jobs belonging to user(s) with specified subject name(s)request to clean job(s) with specified ID(s)request to clean jobs belonging to user(s) with specified subject name(s)require information query using the specified information endpoint type. Special value 'NONE' will disable all resource information queries and the following brokering. Allowed values are: ldap.nordugrid, ldap.glue2, arcrest and internal.require the specified endpoint type for job submission. Allowed values are: arcrest and internal.reverse sorting of jobs according to jobid, submissiontime or jobnamesave serial to %s failuresecondsecondssecondsselect broker method (list available brokers with --listplugins flag)setting file %s to size %llushow URLs of file locationsshow jobs where status information is unavailableshow only description of requested object, do not list content of directoriesshow only jobs of user(s) with specified subject name(s)show only jobs with specified ID(s)show progress indicatorshow status information in JSON formatshow the CE's error log of the jobshow the original job descriptionshow the specified file from job's session directoryshow the stderr of the jobshow the stdout of the job (default)shutdownskip jobs that are on a computing element with a given URLskip the service with the given URL during service discoverysort jobs according to jobid, submissiontime or jobnamesourcesource destinationsource.next_locationstart_readingstart_reading: helper start failedstart_reading: thread create failedstart_reading_ftpstart_reading_ftp: globus_ftp_client_getstart_reading_ftp: globus_ftp_client_get failedstart_reading_ftp: globus_thread_create failedstart_writing_ftp: data chunk: %llu %llustart_writing_ftp: delayed data chunk: %llu %llustart_writing_ftp: failed to read data chunkstart_writing_ftp: failed to read data tagstart_writing_ftp: globus_thread_create failedstart_writing_ftp: helper start failedstart_writing_ftp: mkdirstart_writing_ftp: mkdir failed - still trying to writestart_writing_ftp: putstart_writing_ftp: put failedstart_writing_ftp: thread create failedstart_writing_ftp: waiting for data chunkstart_writing_ftp: waiting for data tagstart_writing_ftp: waiting for some buffers sentstatusstatusstrstop_reading: exiting: %sstop_reading: waiting for transfer to finishstop_reading_ftp: aborting connectionstop_reading_ftp: exiting: %sstop_reading_ftp: waiting for transfer to finishstringsubmit jobs as dry run (no submission to batch system)submit test job given by the numbertest job runtime specified by the numberthe IDs of the submitted jobs will be appended to this filethe file storing information about active jobs (default %s)this option is not functional (old GSI proxies are not supported anymore)timeout in seconds (default 20)treat requested object as directory and always try to list contenttruncate the joblist before synchronizingtypeunable to load number from: %sunregisteruploaduploadsurlurl [url ...]urllist %s contains invalid URL: %suse GSI communication protocol for contacting VOMS servicesuse HTTP communication protocol for contacting VOMS services that provide RESTful access Note for RESTful access, 'list' command and multiple VOMS servers are not supported use NSS credential database in default Mozilla profiles, including Firefox, Seamonkey and Thunderbird.use old communication protocol for contacting VOMS services instead of RESTful access use passive transfer (off by default if secure is on, on by default if secure is not requested)use secure transfer (insecure by default)use specified configuration fileuse the jobname instead of the short ID as the job directory nameusername to MyProxy server (if missing subject of user certificate is used)vomsvoms<:command>. Specify VOMS server More than one VOMS server can be specified like this: --voms VOa:command1 --voms VOb:command2. :command is optional, and is used to ask for specific attributes (e.g: roles) command options are: all --- put all of this DN's attributes into AC; list --- list all of the DN's attribute, will not create AC extension; /Role=yourRole --- specify the role, if this DN has such a role, the role will be put into AC; /voname/groupname/Role=yourRole --- specify the VO, group and role; if this DN has such a role, the role will be put into AC. If this option is not specified values from configuration files are used. To avoid anything to be used specify -S with empty value. waiting for data chunkwrite_thread: exitingwrite_thread: for_write eofwrite_thread: for_write failed - abortingwrite_thread: get and pass bufferswrite_thread: out failed - abortingxrootd close failed: %sxrootd open failed: %sxrootd write failed: %sy~DataPoint: destroy ftp_handle~DataPoint: destroy ftp_handle failed - retrying~DataPoint: failed to destroy ftp_handle - leakingProject-Id-Version: Arc Report-Msgid-Bugs-To: support@nordugrid.org PO-Revision-Date: 2025-06-26 09:41+0200 Last-Translator: Mattias Ellert Language-Team: Swedish Language: sv MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit Plural-Forms: nplurals=2; plural=n != 1; %s Cache : %s Cache (read-only): %s Cacherensning avstängd Cacherensning pÃ¥slagen Cachelänkkatalog : %s Kontrollkatalog : %s Sessionsrotkat : %s förvalt LRMS : %s förvald kö : %s förvald ttl : %u Kör 'arcclean -s Undefined' för att ta bort borttagna jobb frÃ¥n jobblistan Kör 'arcclean -s Undefined' för att ta bort avbrutna jobb frÃ¥n jobblistan Använd arcclean för att ta bort hämtade jobb frÃ¥n jobblistan %s Är exekverbar: sant Namn: %s Sources.DelegationID: %s Sources.Options: %s = %s Sources: %s Targets.DelegationID: %s Targets.Options: %s = %s Targets: %s certifikat-dn: %s giltig till: %s utfärdar-dn: %s serie-nummer: %d %s: %i Leveranstjänst: %s Leveranstjänst: LOKAL Leverans-slottar: %u Akutslottar: %u Efterprocesserings-slottar: %u Förprocesserings-slottar: %u Förberedda slottar: %u Andelsinställningar: %s Status för slutpunkt (%s) är %s Denna slutpunkt (%s) är STARTED eller SUCCESSFUL ospecificerad: %i %s -> %s (%s) --- TORRKÖRNING --- Ã…tkomstkontroll: %s Annotering: %s Argument: %s Benchmarkinformation: Beräkningstjänstens loggkatalog: %s Beräkningsslutpunkt-URL: %s Beräkningsslutpunktsgränssnittsnamn: %s Beräkningslutpunkt-villkor: Referenstjänst: %s Delegerings-ID: Delegerings-ID-element: %s Avslutningstid: %s Post giltig i: %s Post giltig frÃ¥n: %s Environment.name: %s Environment: %s Avslutningskod: %d Avslutningskod för framgÃ¥ngsrik exekvering: %d HälsotillstÃ¥nd: %s ID pÃ¥ tjänst: %s Indatafil-element: Installerade programmiljöer: Jobbfel: %s Jobb kräver inte exklusiv exekvering Jobbhanterings-URL: %s (%s) Jobb kräver exklusiv exekvering Jobbstatus-URL: %s (%s) Mappar till kö: %s Namn: %s Ingen avslutningkod för framgÃ¥ngsrik exekvering angiven. Nod-Ã¥tkomst: inkommande Nod-Ã¥tkomst: inkommande och utgÃ¥ende Nod-Ã¥tkomst: utgÃ¥ende Avisera: Gammalt aktivitets-ID: %s Gamla jobb-id: Operativsystem-villkor: Övriga meddelanden: %s Övriga attribut: [%s], %s Utdatafil-element: Ägare: %s PostExecutable.Argument: %s PreExecutable.Argument: %s Processering starttid: %s Proxy giltig till: %s Kö: %s RemoteLogging (valfritt): %s (%s) RemoteLogging: %s (%s) Begärd CPU-tid: %s Begärda slottar: %d Resultaten mÃ¥ste hämtas innan: %s Resultaten har raderats: %s Runtime-miljö-villkor: Tjänsteinformation-URL: %s (%s) Sessionskatalog-URL: %s Specifikt tillstÃ¥nd: %s Stage-in-katalog-URL: %s Stage-out-katalog-URL: %s TillstÃ¥nd: %s Stderr: %s Stdin: %s Stdout: %s Insänt frÃ¥n %s Insänt: %s Insänt med klient: %s Använd CPU-tid: %s Använd CPU-tid: %s (%s per slot) Använt minne: %i Använd klocktid: %s Använd klocktid: %s (%s per slot) Position i kö: %i [ JobDescription testare ] [ Tolkar den ursprungliga texten ] [ emies:adl ] [ nordugrid:xrsl ] $X509_VOMS_FILE och $X509_VOMSES är inte tilldelade; Användaren har inte angivit sökvägen till vomses-informationen; Det finns inte heller sökväg till vomses i användarens inställningsfil; Kan inte hitta vomses pÃ¥ förvalda sökvägar: ~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomses, och de motsvarande underkatalogerna%5u s: %10.1f kB %8.1f kB/s%d batchsystem%d Slutpunkter%d andelar%d mappningspolicyer%d av %d jobb sändes in%i försök kvar, kommer att vänta %s innan nästa försök%li sekunder sedan lÃ¥sfilen %s skapades%s%s %s kunde inte skapas.%s > %s => falskt%s > %s => falskt: %s innehÃ¥ller icke-nummer i versionsdelen.%s > %s => sant%s-klass är inte ett objekt%s-katalog skapad%s-katalog existerar! Hoppar över jobb.%s misslyckadesDigesttypen %s stöds inte%s är inte en katalog%s är inte ett objekt%s gjord persistent%s-tolkningsfelHittade inte %s-plugin "%s".%s version %s%s->%s%s. Kan inte kopiera filuppsättning%s. SQLite-databasfel: %s%s:%s: %i%s: %s%s: %s: Nytt jobb tillhör %i/%i%s: %s: Lägger till ny utdatafil %s: %s%s: Alla %s %s framgÃ¥ngsrikt%s: Begäran om att bringa online %s i SRM-kö. Väntar i %i sekunder%s: Cacherensning tar för lÃ¥ng tid - %u.%06u sekunder%s: Kan inte konvertera checksumma %s till heltal för %s%s: Kan inte konvertera filstorlek %s till heltal för %s%s: Kan inte läsa lista med indatafiler%s: Kan inte starta om pÃ¥ begäran%s: Kan inte starta om pÃ¥ begäran - inte ett lämpligt tillstÃ¥nd%s: Avbryter jobb pÃ¥ grund av användarbegäran%s: Avbryter aktiva DTRer%s: Avbryter övriga DTRer%s: Kan inte ladda upp tvÃ¥ olika filer %s och %s till samma LFN: %s%s: Kontrollerar användaruppladdningsbar fil: %s%s: Checksumma %llu verifierad för %s%s: Kritiskt fel för uppladdningsbar fil %s%s: DTR %s att kopiera fil %s misslyckades%s: DTR %s att kopiera till %s misslyckades men är inte obligatorisk%s: Radera begäran pÃ¥ grund av interna problem%s: Destinationsfil %s lämnades möjligen oavslutad frÃ¥n tidigare A-REX-körning, kommer att skriva över%s: Duplikatfil i lista med indatafiler: %s%s: Fel vid Ã¥tkomst för fil %s%s: Fel vid läsning av fil %s%s: Fel vid läsning av användargenererad lista med utdatafiler i %s%s: Misslyckades med att skapa grami-fil%s: Misslyckades med att erhÃ¥lla lokal information.%s: Misslyckades med att erhÃ¥lla LRMS-id%s: Misslyckades med att tolka jobbegäran.%s: Misslyckades med att läsa .local och att ändra tillstÃ¥nd, jobb och A-REX kan lämnas i ett motsägande tillstÃ¥nd%s: Misslyckades med att läsa jobbeskrivning: %s%s: Misslyckades med att läsa lokal information%s: Misslyckades med att köra avbrytningsprocess%s: Misslyckades med att köra insändningsprocess%s: Misslyckades med att sätta körbar Ã¥tkomsträttighet%s: Misslyckades med lagra felorsak: %s%s: Misslyckades med att avbryta körande jobb%s: Misslyckades med att rensa upp sessionskatalog%s: Misslyckades med att lista utdatakatalog %s: %s%s: Misslyckades med ladda in utvärderare för användarpolicy %s: Misslyckades med att öppna fil %s för läsning%s: Misslyckades med att tolka användarpolicy%s: Misslyckades med att läsa dynamiska utdatafiler i %s%s: Misslyckades med att läsa lista med indatafiler%s: Misslyckades med att läsa lista med indatafiler, kan inte rensa upp sessionskatalog%s: Misslyckades med att läsa lista med utdatafiler%s: Misslyckades med att läsa lista med utdatafiler, kan inte rensa upp sessionskatalog%s: Misslyckades med att läsa omprocesserad lista med indatafiler%s: Misslyckades med att läsa omprocesserad lista med utdatafiler%s: Misslyckades med att motta jobb i DTR-generator%s: Misslyckades med att byta användar-id till %d/%d för att läsa fil %s%s: Misslyckades med sätta jobbet som misslyckat under avbrytningsprocessering%s: Misslyckades med att skriva tillbaka dynamiska utdatafiler i %s%s: Misslyckades med att skriva lista med indatafiler%s: Misslyckades med att skriva lista med utdatafiler.%s: Misslyckades med att skriva lista med utdatastatusfiler.%s: Misslyckades med att skriva ändrad indatafil.%s: Misslyckades med att skriva lista med utdatafiler: %s%s: Misslyckades med att skriva lokal information%s: Misslyckades med att skriva lokal information: %s%s: Misslyckades med att skapa datalagring för barnprocess%s: Misslyckades med skapa slot för barnprocess%s: Misslyckande med att starta av barnprocess%s: Misslyckande med att vänta pÃ¥ att barnprocess skall avslutas%s: Fil %s har felaktig checksumma: %llu. Förväntade %lli%s: Fil-begäran %s i SRM-kö. Väntar i %i sekunder%s: GÃ¥r igenom filer i lista %s%s: Ogiltig DTR%s: Ogiltig fil: %s är för stor.%s: Ogiltig storlek/checksumma information (%s) för %s%s: Begäran att avbryta jobb frÃ¥n DTR-generator till schemaläggare%s: Avbrytande av jobb tar för lÃ¥ng tid, men diagnostikinsamling verkar ha gjorts. LÃ¥tsas att avbrytande lyckades.%s: Avbrytande av jobb tar för lÃ¥ng tid. Misslyckas.%s: Jobbet misslyckades i okänt tillstÃ¥nd. Kommer ej att starta om.%s: Jobbfel detekterat%s: Jobbet avslutat%s: Jobb har redan slutförts. Ingen handling tagen för att avbryta%s: Jobbet är antikt - raderar resterande information%s: Jobb har inte rätt att startas om längre%s: Jobbet har fÃ¥tt begäran om att tas bort - tar bort%s: Jobbet är för gammalt - raderar%s: Jobbmonitoreringsräknare är trasig%s: Jobbmonitorering förlorad pÃ¥ grund av borttagande frÃ¥n kö%s: Jobbmonitorering har oavsiktligt förlorats%s: Avslutande av jobbmonitorering begärd med %u aktiva referenser%s: Avslutande av jobbmonitorering begärd med %u aktiva referenser och kön %s associerad%s: Jobmonitorering avslutades%s: Jobbinsändning till LRMS misslyckades%s: Jobbinsändning till LRMS tar för lÃ¥ng tid, men ID har redan erhÃ¥llits. LÃ¥tsas att insändning gjorts%s: Jobbinsändning till LRMS tar för lÃ¥ng tid. Misslyckas.%s: Jobbets hjälpprogram avslutades%s: LRMS-skriptets gräns pÃ¥ %u är nÃ¥dd - suspenderar insändning/avbrytande%s: Plugin vid tillstÃ¥nd %s : %s%s: Pluginexekvering misslyckades%s: Processering av jobbeskrivning misslyckades%s: PushSorted misslyckades med att hitta jobb där de förväntades%s: Begär uppmärksamhet frÃ¥n DTR-generator igen%s: Läser utdatafiler frÃ¥n användargenererad lista i %s%s: Läsandet av det nya jobbets status misslyckades%s: Mottog DTR %s att kopiera fil %s i tillstÃ¥nd %s%s: Mottagen DTR tillhör inaktivt jobb%s: Mottog DTR med tvÃ¥ fjärrslutpunkter!%s: Mottog datastagingbegäran att %s filer%s: Mottog jobb i DTR-generator%s: Mottog jobb i ett dÃ¥ligt tillstÃ¥nd: %s%s: Tar bort %s frÃ¥n dynamisk utdatafil %s%s: Omprocessering av jobbeskrivning misslyckades.%s: Begär uppmärksamhet frÃ¥n DTR-generator%s: Returnerar avbrutet jobb frÃ¥n DTR-generator%s: Sessionskatalogsprocessering tar för lÃ¥ng tid - %u.%06u sekunder%s: NÃ¥gra %s misslyckades%s: TillstÃ¥nd: %s frÃ¥n %s%s: tillstÃ¥nd: %s: datastaging avslutad%s: tillstÃ¥nd: %s: fortfarande i datastaging%s: TillstÃ¥nd: ACCEPTED%s: TillstÃ¥nd: ACCEPTED: dryrun%s: TillstÃ¥nd: ACCEPTED: har process-tid %s%s: TillstÃ¥nd: ACCEPTED: flyttar till PREPARING%s: TillstÃ¥nd: ACCEPTED: tolkar jobbeskrivning%s: TillstÃ¥nd: CANCELING%s: TillstÃ¥nd: FINISHING%s: TillstÃ¥nd: INLRMS%s: TillstÃ¥nd: INLRMS - letar efter inte pending%s: TillstÃ¥nd: INLRMS - letar efter pending(%u) och markerade%s: TillstÃ¥nd: INLRMS - hittade ingen markering%s: TillstÃ¥nd: INLRMS: avslutningsmeddelande är %i %s%s: TillstÃ¥nd: PREPARING%s: TillstÃ¥nd: SUBMIT%s: Försöker att ta bort jobb frÃ¥n datastaging som inte existerar%s: Försöker att ta bort jobb frÃ¥n datastaging som fortfarande är aktivt%s: TvÃ¥ identiska utdatadestinationer: %s%s: Okänd användarpolicy '%s'%s: Uppladdningsbara filer avbröts pÃ¥ grund av timeout%s: Användare har INTE laddat upp fil %s%s: Användare har laddat upp fil %s%s: checksumma %s%s: ta bort fil %s: misslyckades med att erhÃ¥lla filsökväg: %s%s: ta bort fil %s: misslyckades med att öppna fil/katalog: %s%s: jobb tilldelat för lÃ¥ngsam utfrÃ¥gning%s: jobb processeras%s: jobb för uppmärksamhet%s: Jobb hittat vid skanning%s: jobb kommer att vänta pÃ¥ extern process%s: nytt jobb har accepterats%s: gammalt jobb har accepterats<%s: put fil %s: %s%s: put fil %s: misslyckades med att skapa fil: %s%s: put fil %s: det finns ingen nyttolast%s: put fil %s: okänd nyttolast%s: put logg %s: det finns ingen nyttolast%s: put logg %s: okänd nyttolast%s: replika-typ %s%s: startade om FINISHING jobb%s: startade om INLRMS jobb%s: startade om PREPARING jobb%s: storlek %llu%s: tillstÃ¥nd CANCELING: barnprocess avslutades med kod %i%s: tillstÃ¥nd CANCELING: jobbdiagnostik insamlad%s: tillstÃ¥nd CANCELING: startar barnprocess: %s%s: tillstÃ¥nd CANCELING: timeout vid väntan pÃ¥ avbrytande%s: tillstÃ¥nd SUBMIT: barnprocess avslutades med kod %i%s: tillstÃ¥nd SUBMIT: startar barnprocess: %s%s: det finns inget jobb: %s%s: oväntad begäran att lägga till misslyckat jobb: %s%s: oväntad begäran att lägga till jobb: %s'(' förväntades')' förväntades'action'-attribut inte tillÃ¥tet i jobbeskrivning pÃ¥ användarsidan'control'-inställningsalternativet stöds inte längre, använd 'controldir' istället'stdout'-attributet mÃ¥ste anges när 'join'-attributet angesFörsöker med nästa destination (igen)Försöker med nästa källa (igen)(tom)(null): %d: %s: Bokföringspostrapporteringsverktyg är inte angivet: Misslyckades med att skapa bokföringsdatabasförbindelse: Misslyckades med skapa slot för bokföringsrapporterings-barnprocess: Misslyckande med att starta bokföringsrapporterings-barnprocess: Metrikverktyg returnerade felkod %i: %s: skrivning av bokföringspost tog %llu ms< %s<< %s> %sEn beräkningsresurs som använder GridFTP-gränssnittet begärdes, men %smotsvarande plugin kunde inte laddas in. Är pluginen installerad? %sOm inte, installera paketet 'nordugrid-arc-plugins-globus'. %sBeroende pÃ¥ din installationtyp kan paketnamnet variera.A-REX REST: Misslyckades med att Ã¥teruppta jobbA-REX REST: TillstÃ¥ndsändring inte tillÃ¥ten: frÃ¥n %s till %sAC-tilläggsinformation för VO AC är ogiltig: ARC-auktoriseringsbegäran: %sARC delegeringspolicy: %sÃ…tkomstlista: %sBokföringsdatabasen kan inte skapas. Misslyckades med att skapa föräldrakatalog %s.Bokföringsdatabasen kan inte skapas. %s är inte en katalogBokföringsdatabasförbindelse har etableratsBokföringsdatabasfil (%s) är inte en vanlig filBokföringsdatabas initieradErhöll autentiserings-token för %s: %sLägg till plats: metadata: %sLägg till plats: url: %sLägger till slutpunkt (%s) till ServiceEndpointRetrieverLägger till slutpunkt (%s) till TargetInformationRetrieverLägger till slutpunkt (%s) till bÃ¥de ServiceEndpointRetriever och TargetInformationRetrieverLägger till plats: %s - %sLägger till begäran-token %sLägger till spacetoken: %sLägger till till massbegäranAdress: %sAlla %u processeringsslottar användsAlla DTRer avslutade för jobb %sAlla erhÃ¥llna resultat är ogiltigaAlla mjukvarukrav uppfyllda.tillÃ¥t angiven entitet att hämta referens utan lösenord. Detta alternativ är specifikt för PUT-kommandot när en myproxy-server kontaktas.Läser redan frÃ¥n källaSkriver redan till destinationEtt fel inträffade under skapandet av jobbeskrivningen som ska sändas till %sEn annan process (%s) äger lÃ¥set pÃ¥ fil %sArc-policy kan inte överföras av XACMLs SAML2.0-profilArcAuthZ: misslyckades med att initiera alla PDPer - denna instans kommer inte att fungeraArkiverar DTR %s, tillstÃ¥nd %sArkiverar DTR %s, tillstÃ¥nd ERRORÄr du säker pÃ¥ att du vill ta bort jobb för vilka information saknas?Är du säker pÃ¥ att du vill synkronisera din lokala jobblista?Sätter samman BLAH-parser-logg-post: %sTilldelad till delegeringsgrupp %sTilldelad till användarlista %sAntar - hittade inte filenAntar att överföring redan är avbruten eller misslyckad.Minst tvÃ¥ värden behövs för 'inputfiles'-attributetMinst tvÃ¥ värden behövs för 'outputfiles'-attributetFörsöker tilldela relativ sökväg till URL - gör den absolutFörsöker att kontakta %s pÃ¥ port %iAttribut '%s' definierat mer än en gÃ¥ngAttributet 'join' kan inte anges när bÃ¥de 'stdout'- och 'stderr'-attributen angesAttributvärde (1): %sAttributvärde (2): %sAttributvärde inuti Subject: %sAttributnamn (%s) innehÃ¥ller ogiltigt tecken (%s)Attributnamn förväntadesAttributen 'gridtime' och 'cputime' kan inte anges samtidigtAttributen 'gridtime' och 'walltime' kan inte anges samtidigtAutentiseringsbegäran-URL: %sAuktoriserad av arc.pdpAuktoriserad av fjärr-pdp-tjänstAuktoriserad av simplelist.pdp: %sAuktoriserad av xacml.pdpBN_new || RSA_new misslyckadesBN_set_word misslyckadesFelaktig URL i leveranstjänsten: %sFelaktig autentiseringsinformation: %sFelaktigt format för checksumma %sFelaktigt referensvärde %s i cacheÃ¥tkomstreglerFelaktigt format upptäckt i fil %s, pÃ¥ rad %sFelaktigt format i XML-svar frÃ¥n leveranstjänst pÃ¥ %s: %sFelaktigt format i XML-svar frÃ¥n tjänst pÃ¥: %s: %sFelaktigt format i XML-svar: %sDÃ¥lig etikett: "%s"DÃ¥lig logikDÃ¥lig logik för %s - bringOnline returnerade OK men SRM-begäran har inte avslutats framgÃ¥ngsrikt eller pÃ¥gÃ¥rDÃ¥lig logik för %s - getTURLs returnerade OK men SRM-begäran har inte avslutats framgÃ¥ngsrikt eller pÃ¥gÃ¥rDÃ¥lig logik för %s - putTURLs returnerade OK men SRM-begäran har inte avslutats framgÃ¥ngsrikt eller pÃ¥gÃ¥rFelaktigt namn för executable: %sFelaktigt namn för runtime-miljö: %sFelaktigt namn för stderr: %sFelaktigt namn för stdout: %sFelaktigt nummer i definedshare %sFelaktigt nummer i maxdeliveryFelaktigt nummer i maxemergencyFelaktigt nummer i maxpreparedFelaktigt nummer i maxprocessorFelaktigt nummer i maxtransfertriesFelaktigt nummer i priority-element: %sFelaktigt nummer i remotesizelimitFelaktigt nummer i speedcontrolFelaktigt eller gammalt format upptäckt i fil %s, pÃ¥ rad %sFelaktig sökväg för %s: Format ska vara /replicas//Felaktigt värde för loglevelFelaktigt formatterat pid %s i lÃ¥sfil %sBatchsysteminformation:Batchsysteminformation:Bärar-token är tillgängligt. Det föredras för jobbinsändning.BeteendeinställningHittade inte block %s i inställningsfil %sBlocknamn är tomtBoostar prioritet frÃ¥n %i till %i pÃ¥ grund av inkommande DTR med högre prioritetBÃ¥da URLerna mÃ¥ste ha samma protokoll, värd och portBÃ¥de CACertificatePath- och CACertificatesDir-elementen saknas eller är tommaBegäran att bringa online %s avslutades framgÃ¥ngsrikt, filen är nu ONLINEBegäran att bringa online %s är fortfarande i kö, ska väntaTrasig strängMäklare %s har laddats inHittade inte mäklar-plugin "%s".Resursmatchning och filtreringMäklare tillgängliga för %s:Skapande av buffer misslyckadesUpptagna pluginer hittad när modul-hanteraren laddades ut. Väntar pÃ¥ att de ska frigöras.CA-certifikat och CA-privat-nyckel matchar inteCA-namn: %sInstallerade CA-certifikat:INNEHÃ…LL %u: %sCPU-klockhastighet: %iCPU-modell: %sCPU-tillverkare: %sCPU-version %sCache %s: Fritt utrymme %f GBÃ…tkomst till cache tillÃ¥tet för %s av DN %sÃ…tkomst till cache tillÃ¥tet för %s av VO %sÃ…tkomst till cache tillÃ¥tet för %s av VO %s och grupp %sÃ…tkomst till cache tillÃ¥tet för %s av VO %s och roll %sCacheutrymme fri storlek: %i GBCacheutrymme total storlek: %i GBCacherensningsskript misslyckadesCache skapades: %sCachefil %s existerar inteHittade inte cachefil %sCachefil %s togs bort under länkning/kopiering, mÃ¥ste börja omCachefil %s lÃ¥stes under länkning/kopiering, mÃ¥ste börja omCachefil %s ändrades under den senaste sekunden, väntar 1 sekund för att undvika race conditionCachefil %s ändrades under länkning, mÃ¥ste börja om<Cachefil är %sCachemetafil %s är tom, kommer att Ã¥terskapaCachemetafil %s är möjligen korrupt, kommer att Ã¥terskapaHittade inte cache för fil %sCachad kopia är fortfarande giltigCachad fil är lÃ¥st - bör försöka igenCachad fil är gammal, kommer att ladda ner igenBeräknad checksumma %s stämmer överens med checksumma rapporterad av servernBeräknad överförings-checksumma %s stämmer överens med källans checksummaBeräknad/tillhandahÃ¥llen överföringschecksumma %s stämmer överens med checksumma rapporterad av SRM-destinationen %sCallback erhöll misslyckandeAnropar PrepareReading när begäran redan förberetts!Anropar PrepareWriting när begäran redan förberettsAnropar http://localhost:60000/Echo med ClientSOAPAnropar http://localhost:60000/Echo med httplibAnropar https://localhost:60000/Echo med ClientSOAPAnropar plugin %s för att frÃ¥ga slutpunkt pÃ¥ %sKan inte komma Ã¥t CA-certifikatkatalog: %s. Certifikaten kommer inte att verifierasKan inte komma Ã¥t VOMS-fil/katalog: %s.Kan inte komma Ã¥t VOMSES-fil/katalog: %s.Kan inte komma Ã¥t certifikatfil: %sKan inte komma Ã¥t nyckelfil: %sKan inte komma Ã¥t proxyfil: %sKan inte lägga till X509-utökat KeyUsage-tillägg till det nya proxycertifikatetKan inte lägga till X509-tillägg till proxycertifikatKan inte allokera minneKan inte allokera minne för tillägg för proxycertifikatKan inte beräkna digest för publik nyckelKan inte konvertera DER-kodat PROXY_CERT_INFO_EXTENSION-tillägg till internt formatKan inte konvertera PROXY_CERT_INFO_EXTENSION-struct frÃ¥n internt till DER-kodat formatKan inte konvertera keyUsage-struct frÃ¥n DER-kodat formatKan inte konvertera keyUsage-struct frÃ¥n internt till DER-formatKan inte konvertera privat nyckel till DER-formatKan inte konvertera det signerade EEC-certifikatet till DER-formatKan inte konvertera det signerade proxycertifikatet till DER-formatKan inte konvertera det signerade proxycertifikatet till PEM-formatKan inte konvertera sträng till ASN1_OBJECTKan inte kopiera det utökade KeyUsage-tilläggetKan inte kopiera subjektnamnet frÃ¥n utfärdaren för proxycertifikatetKan inte skapa ASN1_OCTET_STRINGKan inte skapa BIO för att tolka begäranKan inte skapa BIO för begäranKan inte skapa BIO för det signerade EEC-certifikatetKan inte skapa BIO för det signerade proxycertifikatetKan inte skapa PROXY_CERT_INFO_EXTENSION-tilläggKan inte skapa PolicyStore-objektKan inte skapa en ny X509_NAME_ENTRY för proxycertifikatbegäranKan inte skapa delegeringsreferens för delegeringstjänsten: %sKan inte skapa tillägg för PROXY_CERT_INFOKan inte skapa tillägg för keyUsageKan inte skapa tillägg för proxycertifikatKan inte skapa funktion %sKan inte skapa funktion: Funktions-id existerar inteKan inte skapa namnpost CN för proxycertifikatetKan inte skapa SSL-kontextobjektKan inte skapa SSL-objektetKan inte bestämma installationsplats. Använder %s. Ange ARC_LOCATION om detta inte är korrekt.Kan inte duplicera serienummer för proxycertifikatetKan inte duplicera subjektnamnet för den självsignerande proxycertifikatbegäranKan inte skapa AlgFactory dynamisktKan inte skapa AttributeFactory dynamisktKan inte skapa utvärderare dynamisktKan inte skapa FnFactory dynamisktKan inte skapa policy dynamisktKan inte skapa Request dynamisktKan inte hitta -element med rätt namnrymdKan inte hitta -element med rätt namnrymdKan inte hitta ArcPDPContextKan inte hitta CA-certifikatkatalogen pÃ¥ förvalda platser: ~/.arc/certificates, ~/.globus/certificates, %s/etc/certificates, %s/etc/grid-security/certificates, %s/share/certificates, /etc/grid-security/certificates. Certifikaten kommer inte att verifieras. Om CA-certifikatkatalogen existerar, ange dess plats manuellt ange platsen via miljövariabeln X509_CERT_DIR, eller attributet cacertificatesdirectory i client.conf Kan inte hitta XACMLPDPContextKan inte hitta certifikatfil: %sKan inte hitta certifikat med namn %sKan inte hitta utfärdarcertifikat för certifikatet med subjekt %s och hash: %luKan inte hitta nyckelfil: %sKan inte hitta nyckel med namn: %sKan inte hitta voms-tjänst-inställningsfil (vomses) pÃ¥ förvalda platser: ~/.arc/vomses, ~/.voms/vomses, $ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/vomses, /etc/vomses, /etc/grid-security/vomsesKan inte generera X509-begäranKan inte generera policyobjektKan inte erhÃ¥lla SAMLAssertion SecAttr frÃ¥n meddelandekontextKan inte erhÃ¥lla utökad KeyUsage-tillägg frÃ¥n utfärdarcertifikatetKan inte erhÃ¥lla policy frÃ¥n PROXY_CERT_INFO_EXTENSION-tilläggKan inte erhÃ¥lla policy-sprÃ¥k frÃ¥n PROXY_CERT_INFO_EXTENSION-tilläggKan inte erhÃ¥lla certifikattypKan inte erhÃ¥lla delegeringsreferens: %s frÃ¥n delegeringstjänst: %sKan inte erhÃ¥lla utfärdarens privata nyckelKan inte ladda in ARC-utvärderingsobjekt: %sKan inte ladda in ARC-begäranobjekt: %sKan inte ladda in policyobjektKan inte ladda in policyobjekt: %sKan inte ladda in begäranobjektKan inte öppna jobbeskrivningsfil: %sKan inte öppna nyckelfil: %sKan inte tolka klassnamn för AttributeFactory frÃ¥n konfigurationenKan inte tolka klassnamn för CombiningAlgorithmFactory frÃ¥n konfigurationenKan inte tolka klassnamn för FunctionFactory frÃ¥n konfigurationenKan inte tolka klassnamn för Policy frÃ¥n konfigurationenKan inte tolka klassnamn för Request frÃ¥n konfigurationenKan inte tolka datum: %sKan inte tolka mÃ¥nad: %sKan inte tolka tidszon: %sKan inte tolka tid: %sKan inte läsa PEM privat nyckelKan inte läsa PEM privat nyckel: misslyckades med att avkodaKan inte läsa PEM privat nyckel: misslycḱades med att erhÃ¥lla lösenordKan inte läsa PEM privat nyckel: troligen fel lösenordKan inte läsa certifikatfil: %sKan inte läsa certifikatsträng: %sKan inte läsa certifikat/nyckel-strängKan inte läsa information frÃ¥n den lokala statusfilenKan inte läsa nyckelsträngKan inte ange CN i proxycertifikatetKan inte ange utfärdarens subjekt för proxycertifikatetKan inte ange privat nyckelKan inte ange publik nyckel för proxycertifikatetKunde inte ange läsbar fil för begärans BIOKan inte ange serienummer för proxycertifikatetKan inte ange livstid för proxycertifikatetKan inte ange versionsnummer för proxycertifikatetKan inte ange skrivbar fil för begärans BIOKan inte ange skrivbar fil för det signerade EEC-certifikatets BIOKan inte ange skrivbar fil för det signerade proxycertifikatets BIOKan inte signera ett EECKan inte allokera minne för CA-policy-sökvägKan inte konvertera DER-kodat PROXYCERTINFO-tillägg till internt formatKan inte konvertera X509-begäran frÃ¥n internt till DER-kodat formatKan inte skapa delegeringskontextKan inte ta bort katalog: %s: %sKan inte ta bort fil %s: %sKan ej extrahera objektets namn frÃ¥n käll-URLKan inte hitta LCAS-funktioner i bibliotek: %sKan inte hitta LCMAPS-funktioner i bibliotek %sKan inte erhÃ¥lla policy frÃ¥n PROXYCERTINFO-tilläggKan inte erhÃ¥lla policy-sprÃ¥k frÃ¥n PROXYCERTINFO-tilläggKan inte erhÃ¥lla indata-BIOs första byte för att bestämma dess formatKan inte erhÃ¥lla indatas fösta byte för att bestämma dess formatKan inte hantera URL %sKan inte hantera plats %sKan inte ladda in LCAS-bibliotek %s: %sKan inte ladda in LCMAPS-bibliotek %s: %sKan inte erhÃ¥lla inställningar. Endast publik information tillhandahÃ¥lls.Kan inte erhÃ¥lla inställningar. Publik information är inte tillÃ¥ten för denna användare.Kan inte öppna inställningsfilKan inte tolka värd och/eller port i EPSV/PASV-svarKan inte läsa inställningsfilKan inte läsa inställningsfil pÃ¥ %sKan ej läsa frÃ¥n källaKan inte läsa lista med destinationer frÃ¥n filen %sKan inte läsa platslista frÃ¥n fil %sKan inte läsa lista med källor frÃ¥n filen %sKan inte läsa policynamnKan inte läsa överföringstillstÃ¥nd frÃ¥n %s. Kanske kör inte A-REX?Känner inte igen typ av inställningsfilKänner inte igen typ av inställningsfil pÃ¥ %sKan inte byta namn pÃ¥ fil %s: %sKan inte Ã¥terställa indataKan inte hämta jobbfiler för jobb (%s) - kan inte bestämma URL till logg-katalogKan inte hämta jobbfiler för jobb (%s) - kan inte bestämma URL till stage-out-katalogKan inte ange OpenSSL verifikationsflaggorKan inte göra stat pÃ¥ filen: %s: %sKan inte göra stat pÃ¥ stdio-kanal %sKan inte sända in mer än en instans för mer än en jobbeskrivning. Ännu ej implementerat.Kan inte använda URL: %sKan ej skriva till destinationAvbrytande slutförtAvbryter DTR %s med källa: %s, destination: %sAvbryter aktiv överföringAvbryter synkroniseringsbegäranCandyPond: OauktoriseradKan inte anpassa jobbeskrivning till insändnings-target när informationssökning är avslagenKan inte ändra ägare för %s: %sKan inte ändra Ã¥tkomsträttigheter för %s: %sKan inte jämföra tom checksummaKan inte konvertera ARC-modulnamn till pythonsträngKan inte konvertera ExecutionTarget (%s) till pythonobjektKan inte konvertera JobDescription till pythonobjektKan inte konvertera UserConfig till pythonobjektKan inte konvertera inställningar till pythonobjektKan inte konvertera inmsg till pythonobjektKan inte konvertera modulnamn till pythonsträngKan inte konvertera outmsg till pythonobjektKan inte konvertera sträng %s till heltal pÃ¥ rad %sKan inte kopiera exempelinställningar (%s), det är inte en vanlig filKan inte skapa ExecutionTarget-argumentKan inte skapa JobDescription-argumentKan inte skapa UserConfig-argumentKan inte skapa argument till konstruktornKan inte skapa inställningsargumentKan inte skapa kataloger för loggfil %s. Meddelanden kommer att loggas i denna loggKan inte skapa katalog %s för per-jobb hÃ¥rda länkarKan inte skapa http-nyttolastKan inte skapa inmsg-argumentKan inte skapa instans av pythonklassKan inte skapa outmsg-argumentKan inte skapa %s-utdata för nÃ¥got jobbKan inte skapa %s-utdata för jobb (%s): Ogiltig källa %sKan inte skapa resolver frÃ¥n /etc/resolv.confKan inte bestämma värdnamn frÃ¥n gethostname()Kan inte bestämma värdnamn frÃ¥n gethostname() för att generera ceID automatiskt.Kan inte bestämma replika-typ för %sKan inte bestämma plats för %s: %sKan inte hitta under svar-soap-meddelande:Kan inte hitta ARCs inställningsklassKan inte hitta ARCs ExecutionTarget-klassKan inte hitta ARCs JobDescription-klassKan inte hitta arcmeddelandeklassKan inte hitta ARCs UserConfig-klassKan inte hitta nÃ¥gon proxy. Detta program kan för närvarande inte köras utan en proxy. Om du har proxyfilen pÃ¥ en icke-förvald plats, se till att sökvägen är angiven i klientinställningsfilen. Om du inte har en proxy än, kör 'arcproxy'"Kan inte hitta nÃ¥got token. Kör 'oidc-token' eller använd ett liknande verktyg för att erhÃ¥lla ett autentiserings-token!Kan inte hitta innehÃ¥ll under svar-soap-meddelandeKan inte hitta mäklarklassKan inte hitta fil pÃ¥ %s för att hämta proxyn. Se till att denna fil existerar.Kan inte hitta information om jobbinsändningsslutpunktKan inte hitta lokal indatafil '%s' (%s)Kan inte hitta tjänsteklassKan inte hitta sökväg till CA-certifikat-katalogen, sätt miljövariabeln X509_CERT_DIR, eller cacertificatesdirectory i en inställningsfil.Kan inte hitta sökväg till proxyfilen, sätt miljövariabeln X509_USER_PROXY, eller proxypath i en inställningsfilKan inte hitta sökväg till användarcertifikatet, sätt upp miljövariabeln X509_USER_CERT, eller certificatepath i en inställningsfilKan inte hitta sökväg till privata nyckeln, sätt miljövariabeln X509_USER_KEY, eller keypath i en inställningsfilKan inte hämta VOMS-serveradressinformation frÃ¥n vomsesrad: "%s"Kan inte hämta ordlista för ARC-modulenKan inte hämta ordlista för mäklarmodulKan inte erhÃ¥lla ordlista för modulenKan inte hantera lokal ägare %sKan inte importera ARC-modulenKan inte importera modulKan inte initiera ARCHERY-domännamn för förfrÃ¥ganKan inte länka till fjärrdestination. Kommer inte att använda mappad URLKan inte länka till källa som kan modifieras, kommer att kopiera iställetKan inte öppna BLAH-loggfil '%s'Kan inte öppna cacheloggfil %s: %s. Cacherensningsmeddelanden kommer att loggas till denna loggKan inte skapa XRSL-representation: Resources.SlotRequirement.NumberOfSlots-attributet mÃ¥ste anges när Resources.SlotRequirement.SlotsPerHost-attributet anges.Kan inte tolka heltalsvärdet '%s' för -%cKan inte tolka lösenordskälla %s det mÃ¥ste vara i formatet källtyp eller källtyp:data. Giltiga källtyper är int, stdin, stream, file.Kan inte tolka uttrycket för lösenordskälla %s det mÃ¥ste vara i formatet typ=källaKan inte tolka lösenordskälltyp %s. Giltiga källtyper är int, stdin, stream, file.Kan inte tolka lösenordstyp %s. Nuvarande giltiga värden är 'key', 'myproxy', 'myproxynew' och 'all'.Kan inte tolka schema!Kan inte tolka tjänsteslutpunkt-TXT-poster.Kan inte behandla proxyfil pÃ¥ %s.Kan inte frÃ¥ga om tjänsteslutpunkt-TXT-poster frÃ¥n DNSKan inte läsa angiven jobb-id-fil: %sKan inte ta bort proxyfil pÃ¥ %sKan inte ta bort proxyfil pÃ¥ %s, eftersom den inte existerarKan inte byta namn till eller frÃ¥n rotkatalogenKan inte byta namn till samma URLKan inte göra stat pÃ¥ lokal indatafil '%s'Kan inte byta till grupp (%s)Kan inte byta till primär grupp för användare (%s)Kan inte byta till användare (%s)Kan inte uppdatera AAR. Kan inte hitta registrerad AAR för jobb %s i bokföringsdatabasen.Kan inte använda tillhandahÃ¥llet --size-alternativKan inte skriva jobb-id tillfil (%s)Kan inte skriva jobb-id (%s) till fil (%s)FörmÃ¥gor:Visar %s för jobb %sAnledning till misslyckande oklar - väljer slumpvisCertifikattyp: %dGiltighetstiden för certifikat %s har redan gÃ¥tt utGiltighetstiden för certifikat %s kommer att gÃ¥ ut om %sHittade inte certifikat och nyckel ('%s' och '%s') i nÃ¥gon av sökvägarna: %sCertifikat har ingen slotCertifikatformat är DERCertifikatformat är PEMCertifikatformat är PKCSCertifikatformat är okäntCertifikat har okänt tillägg med numeriskt ID %u och SN %sCertifikatinformationsinsamling misslyckadesCertifikatinformation:Certifikatutfärdare: %sCertifikatbegäran är ogiltigCertifikat som kommer att användas är: %sCertifikatverifieringsfel: %sCertifikatverifiering misslyckadesCertifikatverifiering lyckadesCertifikat med serienummer %s och subjekt "%s" är revokeratGiltighetstiden för certifikat med subjekt %s har gÃ¥tt utCertifikat/proxy-sökväg är tomCertifikat: %sCertifikatkedja nummer %dInställning av kedja misslyckadesCheck: letar efter metadata: %sCheck: erhÃ¥llen Ã¥tkomst-latency: hög (NEARLINE)Check: erhÃ¥llen Ã¥tkomst-latency: lÃ¥g (ONLINE)Check: erhÃ¥llen checksumma: %sCheck: erhÃ¥llen ändringstid: %sCheck: erhÃ¥llen ändringstid %sCheck: erhÃ¥llen storlek %lluCheck: erhÃ¥llen storlek: %lliCheckOperationAllowed: tillÃ¥ten p.g.a. matchande scopesCheckOperationAllowed: tillÃ¥ten p.g.a. saknade inställnings-scopesCheckOperationAllowed: tillÃ¥ten för TLS-förbindelseCheckOperationAllowed: inställnings-scopes: %sCheckOperationAllowed: inställningar saknasCheckOperationAllowed: ingen stödd identitet funnenCheckOperationAllowed: token-scopes matchar inte begärda scopesCheckOperationAllowed: token-scopes: %sKontrollerar: %sKontrollerar URS returnerad av SRM: %sKontrollerar cache igenKontrollerar cacheÃ¥tkomsträttigheter: DN: %sKontrollerar cacheÃ¥tkomsträttigheter: VO: %sKontrollerar cacheÃ¥tkomsträttigheter: VOMS attr: %sKontrollerar om %s finnsLetar efter suspenderade slutpunkter som ska startas.Kontrollerar replika %sKontrollerar att källfil är nÃ¥rvarandeChecksumma: %sChecksumma stämmer inte överensChecksumma stämmer inte överens mellan beräknad checksumma (%s) och källans checksumma %sBeräknad checksumma %s stämmer inte överens med checksumma rapporterad av servern %sChecksumma stämmer inte överens mellan beräknad checksumma %s och källans checksumma %sBeräknad/tillhandahÃ¥llen överföringschecksumma %s stämmer inte överens med checksumma rapporterad av SRM-destinationen (%s)Checksumma stämmer inte överens mellan checksumma given som metaalternativ (%s:%s) och beräknad checksumma(%s)Checksumma ej beräknadChecksumma för %s är inte tillgängligTyp av checksumma frÃ¥n SRM (%s) och beräknad/tillhandahÃ¥llen checksumma (%s) är olika, kan inte jämföraTyp av checksumma för källa och beräknad checksumma är olika, kan inte jämföraTyp av checksumma som returnerades av servern skiljer sig frÃ¥n den begärda typen, kan inte jämföraTyp av checksumma för index och replika är olika, hoppar över jämförelseBarnprocessmonitorering barnprocess %d avslutadesBarnprocessmonitorering släpper övergiven barnprocess %d (%d)Barnprocessmonitorering fel: %iBarnprocessmonitorering internt kommunikationsfelBarnprocessmonitorering kick upptäcktBarnprocessmonitorering borttappad barnprocess %d (%d)Barnprocessmonitorering signal upptäcktBarnprocessmonitorering stderr är stängdBarnprocessmonitorering stdin är stängdBarnprocessmonitorering stdout är stängdBarnprocess har redan startatsKlassnamn: %sRensar upp efter misslyckande: tar bort %sKlientkedjan har ingen ingÃ¥ngspunktKlientförbindelsen har ingen ingÃ¥ngspunktKlientsidans MCCer har laddats inStängdes framgÃ¥ngsriktStänger förbindelseStänger förbindelse till SQLite-bokföringsdatabasStängning kan ha misslyckatsStänger läskanalStänger skrivkanalInsamlade felet är: %sSamlar in jobbinformation (A-REX REST-jobb).Kommande sändsKommando: %sKomponent %s(%s) kunde inte skapasKomponent har inget id-attribut definieratKomponent har inget namnattribut definieratKomponentens %s(%s) nästa har inget id-attribut definieratBeräkningsslutpunkt %s (typ %s) lagd till i listan för insändningsmäklingBeräkningstjänst:Beräkningstjänst: %sComputingShare (%s) explicit avvisadComputingShareName för ExecutionTarget (%s) är inte definieratInställnings-klass är inte ett objektInställningar (%s) har laddats inInställningsfelExempel-inställningsfil skapades (%s)Inställningsfil kan inte läsasInställningsfil är trasig - blocknamn slutar inte med ]: %sInställningsfil är trasig - blocknamn är för kort: %sInställningfil inte angivenInställningsfil inte angiven i ConfigBlockInställningsfil att ladda inInställningarnas rotelement är inte Motstridiga autentiseringstyper angivna.Motstridiga delegeringstyper angivna.Kopplar upp mot leveranstjänst pÃ¥ %sFörbindelse frÃ¥n %s: %sKontaktar VOMS-server (med namn %s): %s pÃ¥ port: %sInnehÃ¥ll: %sKonvertering misslyckades: %sKopiering misslyckades: %sKunde inte fÃ¥ lÃ¥s pÃ¥ metafil %sMisslyckades med att koppla upp mot tjänst %s: %sKunde inte konvertera inkommande nyttolast!Kunde inte konvertera nyttolast!Kunde inte konvertera slcs-attributvärdet (%s) till en URL-instans i inställningsfilen (%s)Kunde inte skapa SOAP nyttolast!Kunde inte skapa länk till lÃ¥sfil %s eftersom den redan existerarKunde inte skapa lÃ¥sfil %s eftersom den redan existerarKunde inte skapa temporär fil "%s"Kunde inte bestämma typ av inställningar eller inställningar är tomKunde inte bestämma sessionskatalog frÃ¥n filnamn %sKunde inte bestämma servers versionKunde inte hitta en lämplig leveranstjänst, tvingar lokal överföringKunde inte hitta checksumma: %sKunde inte hitta inladdningsbar modul med namn %s (%s)Kunde inte hitta inladdningsbar modul med namn %s och %s (%s)Kunde inte hitta inladdningsbar modulbeskrivning med namn %sKunde inte hitta inladdningsbar modulbeskrivning med namn %s eller typ %sKunde inte hitta matchande RSE till %sKunde inte hämta checksumma %s: %sKunde inte hantera checksumma %s: hoppar över kontroll av checksummaKan inte hantera slutpunkt %sKunde inte ladda in inställningar (%s)Kunde inte lokalisera modulen %s pÃ¥ följande sökvägar:Kan inte göra ny överföringsbegäran: %s: %sKunde inte erhÃ¥lla information om källa: %sKunde inte öppna fil %s för läsning: %sKunde inte läsa datastaginginställningar frÃ¥n %sKunde inte göra stat pÃ¥ filen %s: %sKunde inte validera meddelande!Kunde inte hantera certifikatfil: %sKunde inte tolka benchmark-XML: %sKunde inte verifiera tillgänglighet för CRLLand: %sSkapade RSA-nyckel, fortsätter med begäranSkapade post för JWT-utfärdare %sSkapar en delegerings-SOAP-klientSkapar en http-klientSkapar en pdp-tjänste-klientSkapar en echo-klientSkapar och skickar begäranSkapar buffer: %lli x %iSkapar klientgränssnittSkapar klientsidokedjanSkapar delegeringsreferens till ARC delegeringstjänstAtt skapa delegering till CREAM delegeringstjänst misslyckadesSkapar delegering till CREAM delegeringstjänstAtt skapa delegering till CREAM delegeringstjänst misslyckadesSkapar katalog %sSkapar katalog: %sSkapar tjänstesidokedjanReferensens livslängd gÃ¥r ut %sReferenshanteringsundantag: %sReferens har inte initieratsReferenser lagrade i temporär fil %sBehandling av kritiskt VOMS-attribut misslyckadesNuvarande jobb i systemet (PREPARING till FINISHING) per-DN (%i poster)Nuvarande överföring MISSLYCKADES: %sNuvarande överföring slutfördDCAU misslyckadesDCAU misslyckades: %sDH-parametrar tillämpadeDN %s matchar inte %sDN %s är cachat och är giltigt till %s för URL %sDN %s är cachat men dess giltighetstid har gÃ¥tt ut för URL %sDN är %sDTR %s avbrötsDTR %s kunde inte avbrytasDTR %s misslyckades: %sDTR %s avslutades framgÃ¥ngsriktDTR %s avslutades med tillstÃ¥nd %sDTR %s begärd att avbrytas men ingen aktiv överföringDTR %s pÃ¥gÃ¥r fortfarande (%lluB överförda)DTR %s har redan avbrutitsDTR-generator processerade: %d jobb att avbryta, %d DTRer, %d nya jobbDTR-generator väntat pÃ¥ att processera: %d jobb att avbryta, %d DTRer, %d nya jobbDTR är redo att överföra, flyttar till leveransköDTR-generator fick begäran att avsluta null-jobbDTR-generator fick frÃ¥ga om null-jobbDTR-generator fick förfrÃ¥gan att kontrollera filer för null-jobbDTR-generator kör inteDTR-generator fick frÃ¥ga om null-jobbDTR-generator fick begäran att ta bort länkar för null-jobbDTR-generator fick begäran att att processera null-jobbDTR-generator fick begäran att ta bort null-jobbDTR-generator blev tillsänd null-jobbDTRer kör fortfarande för jobb %sDemonisering av fork misslyckades: %sDatakanal: %d.%d.%d.%d:%dDatakanal: [%s]:%dDataleveransloop avslutadesDataöverföring avbrutenDataöverföring avbruten: %sData var redan cachatDataleverans-logg-svans: %sDataleverans: %sDataMove::Transfer: ingen checksumma beräknad för %sDataMove::Transfer: använder tillhandahÃ¥llen checksumma %sDataMove::Transfer: använder tillhandahÃ¥llen checksumma %s:%sDataMove::Transfer: kommer att beräkna %s-checksummaDataMover: nästa cykelDataMover: destinationen har slut pÃ¥ försök - avslutaDataMover: begärt att inte försöka igen - avslutaDataMover: källan har slut pÃ¥ försök - avslutaDataMover::Transfer : startar ny trÃ¥dDataMover::Transfer: försöker förstöra/skriva över destination: %sDataPointGFAL::write_file fick position %d och offset %d, mÃ¥ste göra seekDataPointXrootd::write_file fick position %d och offset %d, mÃ¥ste göra seekDataStagingDelivery avslutades med kod %iFörvald CPU-tid: %sFörvald INTERNAL klient-konstruktorFörvald lagringstjänst: %sFörvald mäklare (%s) är inte tillgänglig. När %s används mÃ¥ste en mäklare anges explicit (alternativ -b).Förvald klocktid: %sDelegateCredentialsInit misslyckadesDelegateProxy misslyckadesDelegerad referens frÃ¥n delegeringstjänst: %sDelegerad referens-identitet: %sDelegerings-ID: %sDelegeringsauktorisering misslyckadesDelegeringsauktorisering lyckadesDelegering-getProxyReq-begäran misslyckadesDelegeringshanteraren har ej ställts inDelegeringshanteraren med delegeringsmottagarroll slutarDelegeringshanterare med delegeringsmottagarroll börjar behandlaDelegeringshanteraren med delegeringssändarroll börjar behandlaDelegering-putProxy-begäran misslyckadesDelegeringsroll stöds inte: %sDelegeringstjänst: %sDelegering till ARCs delegeringstjänst misslyckadesDelegering till gridsites delegeringstjänst misslyckadesDelegeringstyp stöds inte: %sDelegationStore: PeriodicCheckConsumers misslyckades med att ta bort gammal delegering %s - %sDelegationStore: PeriodicCheckConsumers misslyckades med att Ã¥teruppta iteratorDelegationStore: TouchConsumer misslyckades med att skapa fil %sBorttagningfelBorttagen men har fortfarande platser pÃ¥ %sLeverans mottog ny DTR %s med källa: %s, destination: %sLeveranstjänst pÃ¥ %s kan kopiera frÃ¥n %sLeveranstjänst pÃ¥ %s kan kopiera till %sDestinations-URL saknasDestinations-URL stöds inte: %sDestinations-URL är inte giltig: %sDestinationen existerar redanDestinationsfil finns i cacheDestination är inte en giltig URLDestination är inte indextjänst, hoppar över replikaregistreringDestination är inte redo, kommer att vänta %u sekunderDestination: %sKatalog %s tillÃ¥ten pÃ¥ tjänst %sKatalog %s borttagen framgÃ¥ngsriktKatalog %s som ska lagra bokföringsdatabasen har skapats.Kataloglistning MisslyckadesKatalog med betrodda CA har inte angivits eller kan inte hittas; Använder nuvarande katalog som CA-katalogKatalogstorleken är större än %i filer, kommer att behöva anropa flera gÃ¥ngerKatalogstorleken är för stor för att lista i ett anrop, kommer att behöva anropa flera gÃ¥ngerKatalog: %sSortera med användarskapad python-mäklareStöder ej framtida reserveringStöder ej massinsändningStöder ej preemptionLaddar ner jobb: %sDriftstopp slutar: %sDriftstopp börjar: %sVisning av jobbeskrivning avbruten: kan inte ladda in mäklare %sDuplicerad replika hittad i LFC: %sEACCES-fel vid öppnande av lÃ¥sfil %s: %sECDH-parametrar tillämpadeEPSV misslyckadesEPSV misslyckades: %sFel: %sFel: Misslyckades med att hämta informationFel: Misslyckades med att hämta information frÃ¥n följande slutpunkter:Fel: Misslyckades med att skriva jobbinformation till fil (%s)Fel: Jobbinsändning avbröts eftersom inga resurser returnerade nÃ¥gon informationFel: En eller flera jobbeskrivningar sändes inte in.Fel: Kunde inte ladda in mäklare %sFel: VOMS-inställningsfil %s innehÃ¥ller för lÃ¥ng(a) rad(er). Max längd som stöds är %i tecken.Fel: VOMS-inställningsfil %s innehÃ¥ller för mÃ¥nga rader. Max antal som stöds är %i.Fel: VOMS-inställningsrad innehÃ¥ller för mÃ¥nga token. Förväntade 5 eller 6. Raden är: %sFel: misslyckades med att läsa fil %s vid skanning av VOMS-inställningar.Fel: filträd är för djupt vid skanning av VOMS-inställningar. Max tillÃ¥ten nestning är %i.EchoService (python) 'Process' anropadEchoService (python) konstruktor anropadEchoService (python) destruktor anropadEchoService (python) fick: %s EchoService (python) har prefix %(prefix)s och suffix %(suffix)sEchoService (python) request_namespace: %sEchoService (python) trÃ¥dtest startarEchoService (python) trÃ¥dtest, iteration %(iteration)s %(status)sElementet "%s" i profilen ignoreras: värdet pÃ¥ "inidefaultvalue"-attributet kan inte anges när "inisections"- and "initag"-attributen inte angivits.Elementet "%s" i profilen ignoreras: värdet pÃ¥ "inisections"-attributet kan inte vara en tom sträng.Elementet "%s" i profilen ignoreras: värdet pÃ¥ "initag"-attributet kan inte vara en tom sträng.Elementet "%s" i profilen ignoreras: värdet pÃ¥ "initype"-attributet kan inte vara en tom sträng.Ingen data för JWT-utfärdare %sTomt filnamn returnerat frÃ¥n FileCacheTom indatanyttolastTom jobbeskrivnings-källsträngTom nyttolast!Tom strängHittade inte slutet pÃ¥ kommentarHittade inte slutet pÃ¥ sträng i dubbla citatteckenHittade inte slutet pÃ¥ sträng i enkla citatteckenHittade inte slutet pÃ¥ sträng i användardefinierade citattecken (%s)Slutpunktsinformation:Ã…tkomstfel för cachefil %s: %sFel när kommunikationsgränssnitt lades till i %s. Kanske kör redan en annan instans av A-REX.Fel när kommunikationsgränssnitt lades till i %s. Kanske är Ã¥tkomsträttigheter inte lämpliga.Fel vid skapande av cacheFel vid skapande av cache. Gamle lÃ¥s kan finnas kvar.Fel vid skapande av katalog %s: %sFel vid skapande av katalog: %sFel vid skapandet av lÃ¥sfil %s: %sFel vid skapande av nödvändiga kataloger för %sFel vid skapande av nödvändiga kataloger: %sFel vid skapandet av temporär fil %s: %sFel upptäckt när denna AC tolkadesFel eftersom den tillhandahÃ¥llna referensens livstid har gÃ¥tt utFel under filvalidering. Kan inte göra stat pÃ¥ fil %s: %sFel under filvalidering. Lokal filstorlek %llu stämmer inte överens med källans filstorlek %llu för fil %sFel vid utvärdering av profilFel vid extrahering av RSE för %sFel frÃ¥n SQLite: %sFel frÃ¥n SQLite: %s: %sFel vid hämtning av information frÃ¥n statvfs för sökväg %s: %sFel vid erhÃ¥llande av fillista (i list)Fel vid cacheprocessering, kommer att försöka igen utan cachningFel i cachningsprocedurFel i lÃ¥sfil %s, trots att länkning inte returnerade ett felFel vid initiering av X509-lagerFel vid initiering av delegeringsdatabas i %s. Kanske Ã¥tkomsträttigheter inte är lämpliga. Returnerat fel är: %s.Fel vid länkning av cachefil till %s.Fel vid länkning av temporär fil %s till lÃ¥sfil %s: %sFel vid listning av lÃ¥sfil %s: %sFel vid inladdning av genererade inställningarFel vid uppslagning av attribut för cachemetafil %s: %sFel vid uppslagning av spacetoken som matchar beskrivning %sFelnummer i lager-kontext: %iFel vid öppnande av bokföringsdatabasFel vid öppnande av lÃ¥sfil %s i initial check: %sFel vid öppnande av metafil %sFel vid öppnande av metafil för skrivning: %sFel vid tolkning av det internt tilldelade executables-attributet.Fel vid pingning av leveranstjänst pÃ¥ %s: %s: %sFel vid läsning av information frÃ¥n fil %s:%sFel vid läsning av lÃ¥sfil %s: %sFel vid läsning av metafil %s: %sFel vid registrering av replika, hoppar till slutet av datastagingFel vid borttagande av cachefil %s: %sFel vid byte av UIDFel vid utmatning av utdatanyttolastFel när publik nyckel extraheras frÃ¥n begäranFel vid inladdning av tilläggsinställningsfilen: %sFel vid inladdning av tilläggsinställningsfilen: %s pÃ¥ rad: %dFel vid läsande av katalog %s: %sFel med cacheinställningarFel med cacheinställningar: %sFormatteringsfel i lÃ¥sfil %sFel med hjärtslagsfil: %sFel under destinationens efter-överförings-hantering: %sFel med källfil, hoppar till nästa replikaFel vid skrivning av raw certifikatFel vid skrivning av SRM-infofil %sFel vid skrivning till lÃ¥sfil %s: %sFel: kan inte öppna policyfil: %sFel: policy-plats: %s är inte en vanlig filErrorDescriptionFörväntad medelväntetid: %sFörväntad värsta väntetid: %sUtvärdera operator =: vänster frÃ¥n kontext: %sUtvärdera operator =: vänster: %sUtvärdera operator =: höger: %sUtvärderare stöder inte laddningsbara kombinerande algoritmerUtvärderare stöder inte den angivna kombinerande algoritmen - %sUtvärderare för Arc-PDP laddades inteUtvärderare för GACL-PDP laddades inteUtvärderare för XACML-PDP laddades inteExempelinställningar (%s) skapades inte.Undantag under försök att starta extern process: %sFör mycket data mottaget när filÃ¥tkomst kontrolleradesExkluderar replika %s som matchar mönster !%sExekveringstarget pÃ¥ beräkningstjänst: %sExekveringsmiljö stöder inte inkommande förbindelserExekveringsmiljö stöder inte utgÃ¥ende förbindelserExekveringsmiljö är en fysisk maskinExekveringsmiljö är en virtuell maskinExekveringsmiljö stöder inkommande förbindelserExekveringsmiljö stöder utgÃ¥ende förbindelserExecutionTarget-klass är inte ett objektAvslutar generator-trÃ¥dAvslutar jobbprocesseringstrÃ¥dSaknar kommando bland argumentenSaknar kommando och URLFörväntade kommando modul-namn bland argumenten<Förväntade kommando modul-sökväg bland argumentenFörväntade att modul, kommando och URL tillhandahÃ¥llitsSaknar URL bland argumentenUttryck misslyckades med att matchasUttryck matcharExtern begäran om uppmärksamhet %sExtraherade smeknamn %s frÃ¥n referenser att använda som Rucio-kontoFATAL, ERROR, WARNING, INFO, VERBOSE eller DEBUGMisslyckades med att tilldela värdnamnstilläggMisslyckades med att allokera minne för handtagMisslyckades med autentiseringMisslyckades med autentisering: %sMisslyckades med att kontrollera databas (%s)Misslyckades med att kontrollera källreplikaMisslyckades med att kontrollera källreplika %s: %sMisslyckades med att kontrollera källreplikor: %sMisslyckades med att rensa upp destination %sMisslyckades med att initiera inställningarMisslyckades med att initiera inställningar.Misslyckades med att ansluta till %s:%dMisslyckades med att förstöra handtag: %s. Kan inte hantera en sÃ¥dan situation.Misslyckades med att ladda ned %s till %sMisslyckades med att ladda ned %s till %s, destinationen existerar redanMisslyckades med att ladda ned %s till %s, kunde inte ta bort existerande destinationMisslyckades i globus_cond_initMisslyckades i globus_ftp_control_handle_initMisslyckades i globus_mutex_initMisslyckades med att länka cachefil till %sMisslyckades med att lokalisera referenserMisslyckades med att slÃ¥ upp attribut för cachad fil: %sMisslyckades med att förbereda jobbeskrivningMisslyckades med att behandla användarmappningskommando: %s %sMisslyckades med att läsa kontrollkatalog: %sMisslyckades med att läsa kontrollkatalog: %s: %sMisslyckades med att läsa dataMisslyckades med att läsa fil %sMisslyckades med att läsa fillistaMisslyckades med att läsa lokal informationMisslyckades med att hämta information om jobb: %sMisslyckades med att hämta jobbeskrivning för jobb: %sMisslyckades med att köra e-postsändareMisslyckades med ange filägare: %sMisslyckades med att skicka in jobbeskrivningMisslyckades med att avbryta överföring av ftp-fil: %sMisslyckades med att acceptera SSL-förbindelseMisslyckades med att acceptera förbindelsebegäranMisslyckades med att acceptera delegeringMisslyckades med att acceptera ny fil/destinationMisslyckades med att komma Ã¥t proxy för givet jobb-id %s pÃ¥ %sMisslyckades med att förvärva A-REX inställningarMisslyckades med att erhÃ¥lla delegeringskontextMisslyckades med att fÃ¥ lÃ¥s pÃ¥ cachemetafil %sMisslyckades med erhÃ¥lla lÃ¥s för fil %sMisslyckades med att erhÃ¥lla källa: %sMisslyckades med att aktivera jobbprocesseringsobjekt, avslutar grid-manager-trÃ¥dMisslyckades med att lägga till '%s' URL (gränssnittstyp %s) till bokföringsdatabasens Endpoints-tabellMisslyckades med att lägga till '%s' till bokföringsdatabasen %s-tabellMisslyckades med att lägga till Independent-OIDMisslyckades med att lägga till RFC-proxy-OIDMisslyckades med att lägga till VOMS-AC-tillägg. Din proxy kan vara ofullständig.Misslyckades med att lägga till VOMS-AC-sekvens-OIDMisslyckades med att lägga till anyLanguage-OIDMisslyckades med att lägga till certifikat och nyckelMisslyckades med att lägga till certifikat till token eller databasMisslyckades med att lägga till tillägg till referenstilläggMisslyckades med att lägga till inheritAll-OIDMisslyckades med att lägga till utfärdarens tillägg till proxynMisslyckades med att lägga till nyckelanvändningstilläggMisslyckades med att lägga till proxycertifikatsinformationstilläggMisslyckades med att lägga till VOMS-AC-tilläggMisslyckades med att allokera certifikat-tillitMisslyckades med att allokera minne för certifikatdataMisslyckades med att allokera minne för certifikatsubjekt vid policymatchning.Misslyckades med allokera p12-kontextMisslyckades med att tillämpa DH-parametrarMisslyckades med att tillämpa ECDH-parametrarMisslyckades med att tillämpa lokal adress pÃ¥ dataförbindelseMisslyckades med att autentisera SAML-token inuti inkommande SOAPMisslyckades med att autentisera användarnamnstoken inuti inkommande SOAPMisslyckades med att autentisera X509-token inuti inkommande SOAPMisslyckades med att autentisera till PKCS11 slot %sMisslyckades med att autentisera till nyckeldatabasMisslyckades med att autentisera till token %sMisslyckades med att binda socket för %s:%s(%s): %sMisslyckades med att binda socket för TCP-port %s(%s): %sMisslyckades med att anropa PORT_NewArenaMisslyckades med att avbryta överföringsbegäran: %sMisslyckades med att avbryta: %sMisslyckades med att avbryta: inget SOAP-svarMisslyckades med att konvertera till PayloadSOAP frÃ¥n inkommande nyttolastMisslyckades med att konvertera till PayloadSOAP frÃ¥n utgÃ¥ende nyttolastMisslyckades med att ändra mappningsstack-behandlingspolicy i: %s = %sMisslyckades med att ändra ägare av symbolisk länk %s till %iMisslyckades med att ändra ägare pÃ¥ temporär proxy pÃ¥ %s till %i:%i: %sMisslyckades med att ändra Ã¥tkomsträttigheter pÃ¥ %s: %sMisslyckades med att ändra Ã¥tkomsträttigheter eller ägare för hÃ¥rd länk %s: %sMisslyckades med att kontrollera %sMisslyckades med att ta bort fil %s: %sMisslyckades med att kommunicera med delegeringstjänst.Misslyckades med att slutföra skrivning till destinationMisslyckades med att koppla upp mot %s(%s):%iMisslyckades med att koppla upp mot %s(%s):%i - %sMisslyckades med att ansluta till %s:%dMisslyckades med att konvertera ASCII till DERMisslyckades med att konvertera EVP_PKEY till PKCS8Misslyckades med att konvertera GSI-referens till GSS-referens (major: %d, minor: %d)Misslyckades med att konvertera GSI-referens till GSS-referens (major: %d, minor: %d):%s:%sMisslyckades med att konvertera PrivateKeyInfo till EVP_PKEYMisslyckades med att konvertera säkerhetsinformation till ARC-policyMisslyckades med att konvertera säkerhetsinformation till ARC-begäranMisslyckades med att konvertera säkerhetsinformation till XACML-begäranMisslyckades med att kopiera %s: %sMisslyckades med att kopiera fil %s till %s: %sMisslyckades med att kopiera indatafil: %s till sökväg: %sMisslyckades med att skapa %s, försöker skapa föräldrakatalogerMisslyckades med att skapa DTR-dumpningstrÃ¥dMisslyckades med att skapa OTokens säkerhetsattributMisslyckades med att skapa OpenSSL-objekt %s %s - %u %sMisslyckades med att skapa SOAP-behÃ¥llareMisslyckades med att skapa cachekataloger för %sMisslyckades med att skapa cachekatalog för fil %s: %sMisslyckades med att skapa cachemetafil %sMisslyckades med att skapa certifikatbegäranMisslyckades med att skapa kontrollkatalog %sMisslyckades med att skapa katalogMisslyckades med att skapa katalog %sMisslyckades med att skapa katalog %s! Hoppar över jobb.Misslyckades med att skapa katalog %s: %s"Misslyckades med att skapa post för JWT-utfärdare %sMisslyckades med att skapa export-kontextMisslyckades med att skapa fil %s: %sMisslyckades med att skapa hÃ¥rd länk frÃ¥n %s till %s: %sMisslyckades med att skapa indata-SOAP-behÃ¥llareMisslyckades med att skapa jobb i %sMisslyckades med att skapa nyckel- eller certifikat-safeMisslyckades med att skapa länk: %s. Kommer inte att använda mappad URLMisslyckades med att skapa certifikatkedjelängdMisslyckades med att skapa policy-sprÃ¥kMisslyckades med att skapa socket för förbindelse till %s(%s):%d - %sMisslyckades med att skapa socket för att lyssna pÃ¥ %s:%s(%s): %sMisslyckades med att skapa socket för att lyssna pÃ¥ TCP-port %s(%s): %sMisslyckades med att skapa subjektnamnMisslyckades med att skapa symbolisk länk frÃ¥n %s till %s: %sMisslyckades med att skapa temporär proxy pÃ¥ %s: %sMisslyckades med att skapa trÃ¥dMisslyckades med att skapa xrootd-kopierings-jobb: %sMisslyckades med att skapa/öppna fil %s: %sMisslyckades med att avkoda tillitssträngMisslyckades med att delegera referenser till server - %sMisslyckades med att delegera referenser till server - hittade inget delegeringsgränssnittMisslyckades med att ta bort %sMisslyckades med att ta bort %s, men kommer fortfarande att försöka kopieraMisslyckades med att ta bort certifikatMisslyckades med att ta bort leverans-objekt eller borttagning avbröts pÃ¥ grund av timeoutMisslyckades med att ta bort destination, nytt försök kan misslyckasMisslyckades med att ta bort logisk filMisslyckades med att ta bort metainformationMisslyckades med att ta bort fysisk filMisslyckades med att ta bort privat nyckelMisslyckades med att ta bort privat nyckel och certifikatMisslyckades med att ta bort replika %s: %sMisslyckades med att ta bort gammal cachefil %s: %sMisslyckades med att duplicera X509-strukturMisslyckades med att duplicera tilläggMisslyckades med att aktivera IPv6Misslyckades med att koda PKCS12Misslyckades med att koda certifikatMisslyckades med att koda certifikatbegäran med DER-formatMisslyckades med att etablera SSL-förbindelseMisslyckades med att etablera förbindelse: %sMisslyckades med att utvärdera uttryck: %sMisslyckades med att exportera privat nyckelMisslyckades med att extrahera VOMS-smeknamn frÃ¥n proxyMisslyckades med att extrahera referensinformationMisslyckades med att hämta data frÃ¥n %s bokföringsdatabastabellMisslyckades med att hämta data frÃ¥n bokföringsdatabasens Endpoints-tabellMisslyckades med att slutföra läsning frÃ¥n källaMisslyckades med att slutföra skrivning till destinationMisslyckades med att hitta CA-certifikatMisslyckades med att hitta certifikat och/eller privat nyckel eller filer har olämpliga Ã¥tkomsträttigheter eller ägare.Misslyckades med att hitta certifikat med smeknamn: %sMisslyckades med att hitta tilläggMisslyckades med att hitta utfärdarcertifikat för proxycertifikatMisslyckades med att hitta metadatainformation för %s för att bestämma borttagande av fil eller katalogMisslyckades med att generera EC-nyckelMisslyckades med att generera SAML-token för utgÃ¥ende SOAPMisslyckades med att skapa användarnamnstoken för utgÃ¥ende SOAPMisslyckades med att skapa X509-token för utgÃ¥ende SOAPMisslyckades med att generera publik/privat nyckelparMisslyckades med att fÃ¥ DN-information frÃ¥n .local-fil för jobb %sMisslyckades med att erhÃ¥lla TCP-socket-alternativ för förbindelse till %s(%s):%d - timeout kommer inte att fungera - %sMisslyckades med att erhÃ¥lla certifikat frÃ¥n certifikatfilMisslyckades med att erhÃ¥lla referensMisslyckades med att hämta ftp-filMisslyckades med att initiera GFAL2-parameter-handtag: %sMisslyckades med att initiera ny GFAL2-kontext: %sMisslyckades med att fÃ¥ medellast: %sMisslyckades med att erhÃ¥lla privat nyckelMisslyckades med att erhÃ¥lla publik nyckelMisslyckades med att erhÃ¥lla publik nyckel frÃ¥n RSA-objektMisslyckades med att erhÃ¥lla publik nyckel frÃ¥n X509-objektMisslyckades med att identifiera grid-managerns inställningsfilMisslyckades med att importera certifikat frÃ¥n fil: %sMisslyckades med att importera privat nyckelMisslyckades med att importera privat nyckel frÃ¥n fil: %sMisslyckades med att initiera LCASMisslyckades med att initiera LCMAPSMisslyckades med att initiera OpenSSL-biblioteketMisslyckades med att initiera PKCS12-fil: %sMisslyckades med att initiera S3 till %s: %sMisslyckades med att initiera X509-strukturMisslyckades med att initiera bokföringsdatabasMisslyckades med att initiera tilläggsmedlem för referensMisslyckades med att initiera Pythons huvudtrÃ¥dMisslyckades med att initiera cacheMisslyckades med att initiera klientförbindelseMisslyckades med att initiera delegeringsreferenserMisslyckades med att sätta in AAR i databasen för jobb %sMisslyckades med att begränsa socket till IPv6 pÃ¥ %s:%s - kan orsaka fel för IPv4 pÃ¥ samma portMisslyckades med att begränsa socket till IPv6 pÃ¥ TCP-port %s - kan orsaka fel för IPv4 pÃ¥ samma portMisslyckades med att lyssna pÃ¥ %s:%s(%s): %sMisslyckades med att lyssna pÃ¥ TCP-port %s(%s): %sMisslyckades med att ladda in klientinställningarMisslyckades med att hämta tilläggssektion: %sMisslyckades med att ladda in grid-managerns inställningsfilMisslyckades med att ladda in grid-managerns inställningsfil frÃ¥n %sMisslyckades med att ladda in grid-managerns inställningsfilMisslyckades med att ladda in plugin för URL %sMisslyckades med att ladda in privat nyckelMisslyckades med att ladda in tjänsteinställningarMisslyckades med att ladda in tjänsteinställningar frÃ¥n nÃ¥gon förvald inställningsfilMisslyckades med att ladda in tjänsteinställningar frÃ¥n fil %sMisslyckades med att ladda in tjänstesidans MCCerMisslyckades med att lÃ¥sa arccredential-biblioteket i minnetMisslyckades med lÃ¥sa arccrypto-biblioteket i minnetMisslyckades med att skapa symbolisk länk %s till %s : %sMisslyckades med att flytta %s till %s: %sMisslyckades med att flytta fil %s till %sMisslyckades med ny arenaMisslyckades med att erhÃ¥lla OpenSSL-identifierare för %sMisslyckades med att erhÃ¥lla antal överförda byte: %sMisslyckades med att erhÃ¥lla delegeringslÃ¥s för att ta bort föräldralösa lÃ¥sMisslyckades med att erhÃ¥lla information om filMisslyckades med att erhÃ¥lla listning frÃ¥n FTP: %sMisslyckades med att erhÃ¥lla lokal adress för %s:%s - %sMisslyckades med att erhÃ¥lla lokal adress för port %s - %sMisslyckades med att erhÃ¥lla lÃ¥s pÃ¥ cachefil %sMisslyckades med att erhÃ¥lla stat frÃ¥n FTP: %sMisslyckades med att öppna %s för läsning: %sMisslyckades med att öppna %s, försöker skapa föräldrakatalogerMisslyckades med att öppna datakanalMisslyckades med att öppna katalog %s: %sMisslyckades med att öppna fil %sMisslyckades med att öppna fil med DH-parametrar för läsningMisslyckades med att öppna hjärtslagsfil %sMisslyckades med att öppna indata-certifikatfil %sMisslyckades med att öppna loggfil: %sMisslyckades med att öppna utdatafil '%s'Misslyckades med att öppna p12-filMisslyckades med att öppna stdio-kanal %dMisslyckades med att öppna stdio-kanal %sMisslyckades med att skriva ut certifikatbegäran i ASCII-formatMisslyckades med att skriva ut certifikatbegäran i DER-formatMisslyckades med att tolka HTTP-huvudMisslyckades med att tolka Rucio-information: %sMisslyckades med att tolka Rucio-svar: %sMisslyckades med att tolka SAML-token frÃ¥n inkommande SOAPMisslyckades med att tolka användarnamnstoken frÃ¥n inkommande SOAPMisslyckades med att tolka VOMS-kommando: %sMisslyckades med att tolka X509-token frÃ¥n inkommande SOAPMisslyckades med att tolka certifikatbegäran frÃ¥n CSR-fil %sMisslyckades med att tolka inställningsfil %sMisslyckades med att tolka uttryckMisslyckades med att tolka begärd VOMS-livstid: %sMisslyckades med att tolka begärt VOMS-serverportnummer: %sMisslyckades med att efterregistrera destination: %sMisslyckades med att förregistrera destination: %sMisslyckades med förallokera utrymme for %sMisslyckades med att förbereda destinationMisslyckades med att förbereda destination: %sMisslyckades med att förbereda jobbeskrivningMisslyckades med att förbereda källaMisslyckades med att förbereda källa: %sMisslyckades med att förregistrera destination: %sMisslyckades med att processera AREX-inställningsfil %sMisslyckades med att behandla VOMS-inställningar eller hittade inga lämpliga inställningsrader.Misslyckades med att processera inställningar i %sMisslyckades med att behandla jobb: %sMisslyckades med att behandla jobb: %s - %s %sMisslyckades med att behandla jobb - felsvar: %sMisslyckades med att behandla jobb - misslyckades med att tolka svarMisslyckades med att behandla jobb - felaktigt svar: %uMisslyckades med att processera säkerhetsattribut i TLS-MCC för inkommande meddelandeMisslyckades med att frÃ¥ga efter AAR-databas-ID för jobb %sMisslyckades med att frÃ¥ga föräldra-DIDer: %sMisslyckades med att frÃ¥ga om tillstÃ¥nd: %sMisslyckades med att läsa attribut %x frÃ¥n privat nyckel.Misslyckades med att läsa cachemetafil %sMisslyckades med att läsa certifikatfil: %sMisslyckades med att läsa data för JWT-utfärdare %sMisslyckades med att läsa data frÃ¥n indatabufferMisslyckades med att läsa databasschemafil pÃ¥ %sMisslyckades med att läsa fil %sMisslyckades med att läsa fil med DH-parametrarMisslyckades med att läsa indata-certifikatfilMisslyckades med att läsa objekt: %s: %sMisslyckades med att läsa objekt: %s: %s; %sMisslyckades med att läsa privat-nyckelfil: %sMisslyckades med att läsa proxy fil: %sMisslyckades med att läsa begäran frÃ¥n en filMisslyckades med att läsa begäran frÃ¥n en strängMisslyckades med att registrera destinationsreplika: %sMisslyckades med att registrera ny fil/destination: %sMisslyckades med att registrera plugin för tillstÃ¥nd %sMisslyckades med att frigöra GSS-referens (major: %d, minor: %d):%s:%sMisslyckades med att frigöra slutförd begäranMisslyckades med att frigöra lÃ¥s pÃ¥ cachefil %sMisslyckades med att frigöra lÃ¥s pÃ¥ fil %sMisslyckades med att ta bort .meta-fil %s: %sMisslyckades med att ta bort alla instanserMisslyckades med att ta bort cache per-jobb-katalog %s: %sMisslyckades med att ta bort existerande hÃ¥rd länk pÃ¥ %s: %sMisslyckades med att ta bort existerande symbolisk länk pÃ¥ %s: %sMisslyckades med att ta bort fil %s: %sMisslyckades med att ta bort instansMisslyckades med att ta bort lÃ¥s pÃ¥ %s. Manuell intervention kan behövasMisslyckades med att ta bort gammal lÃ¥sfil %s: %sMisslyckades med att ta bort temporär proxy %s: %sMisslyckades med byta namn pÃ¥ URLMisslyckades med att slÃ¥ upp %sMisslyckades med att slÃ¥ upp %s (%s)Misslyckades med att slÃ¥ upp destination: %sMisslyckades med att slÃ¥ upp källa: %sMisslyckades med att hämta tillämpningsdata frÃ¥n OpenSSLMisslyckades med att hämta länk till TLS-ström. Ytterligare policymatchning hoppas över.Misslyckades med att hämta privat nyckel för utfärdareMisslyckades med att köra Grid-Manager-trÃ¥dMisslyckades med att köra kommando: %sMisslyckades med att köra inställningstolk pÃ¥ %s.Misslyckades med att köra verttyget för att uppdatera kontrollkatalogen. Avslutningskod: %iMisslyckades med att köra extern plugin: %sMisslyckades med att sända begäran att avbryta: %sMisslyckades med att skicka innehÃ¥ll till bufferMisslyckades med att sända spÃ¥r till Rucio: %sMisslyckades med att sätta GFAL2-monitor-callback: %sMisslyckades med att sätta GFAL2-överförings-timeout, använder förval: %sMisslyckades med att sätta INTERNAL slutpunktMisslyckades med att sätta in LFC-replika: %sMisslyckades med att sätta referenser för GridFTP-överföringMisslyckades med att sätta exekverbar bit pÃ¥ fil %sMisslyckades med att sätta exekverbar bit pÃ¥ fil %s: %sMisslyckades med att sätta skriv-över-option i GFAL2: %sMisslyckades med att sätta Ã¥tkomsträttigheter pÃ¥: %sMisslyckades med att sätta signeringsalgoritmMisslyckades med att ange publik nyckel för X509-objekt genom att använda publik nyckel frÃ¥n X509_REQMisslyckades med att sätta upp referensdelegering med %sMisslyckades med att stänga av SSL: %sMisslyckades med att signera kodad certifikatdataMisslyckades med att signera certifikatbegäranMisslyckades med att signera proxycertifikatetMisslyckades med att staga file(er)Misslyckades med att starta GM-trÃ¥darMisslyckades med att starta arkiveringtrÃ¥dMisslyckades med att starta cacherensningsskriptMisslyckades med att pÃ¥börja certifikattilläggMisslyckades med att starta verktyget för att uppdatera kontrollkatalogen.Misslyckades med att starta datastaging-trÃ¥darMisslyckades med att börja lyssna pÃ¥ nÃ¥gon adress för %s:%sMisslyckades med att börja lyssna pÃ¥ nÃ¥gon adress för %s:%s(IPv%s)Misslyckades med att starta ny DTR för %sMisslyckades med att starta ny trÃ¥d för monitorering av jobbegärningarMisslyckades med att starta ny trÃ¥d: cache kommer ej att rensasMisslyckades med att börja frÃ¥ga slutpunkten pÃ¥ %sMisslyckades med att börja frÃ¥ga slutpunkten pÃ¥ %s (kunde inte skapa under-trÃ¥d)Misslyckades med att pÃ¥börja läsning frÃ¥n källa: %sMisslyckades med att starta trÃ¥d för kommunikationMisslyckades med att starta trÃ¥d för att lyssnaMisslyckades med att börja överföringsbegäran: %sMisslyckades med att pÃ¥börja skrivning till cacheMisslyckades med att pÃ¥börja skrivning till destination: %sMisslyckades med att göra stat pÃ¥ sessionskatalog %sMisslyckades med att göra stat pÃ¥ källa: %sMisslyckades med att lagra tillämpningsdataMisslyckades med att spara ftp-filMisslyckades med att sända in alla jobb.Misslyckades med att sända in alla jobb: %sMisslyckades med att sända in alla jobb: %s %sMisslyckades med att sända in alla jobb: %u %sMisslyckades med att sända in jobbMisslyckades med att byta användar-id till %d/%dMisslyckades med att avsluta LCASMisslyckades med att avsluta LCMAPSMisslyckades med att överföra dataMisslyckades med att lÃ¥sa upp fil %s: %s. Manuell intervention kan behövasMisslyckades med att lÃ¥sa upp fil med lÃ¥s %s: %sMisslyckades med att avregistrera förregistrerad destination %s. Du kan behöva avregistrera den manuelltMisslyckades med att avregistrera förregistrerad destination %s: %s. Du kan behöva avregistrera den manuelltMisslyckades med att avregistrera förregistrerad lfn, du kan behöva avregistrera den manuelltMisslyckades med att avregistrera förregistrerad lfn. Du kan behöva avregistrera det manuelltMisslyckades med att avregistrera förregistrerad lfn. Du kan behöva avregistrera det manuellt: %sMisslyckades med att uppdatera AAR i databasen för jobb %sMisslyckades med att uppdatera kontrollkatalog %sMisslyckades med att verifiera X509-token inuti inkommande SOAPMisslyckades med att verifiera begäranMisslyckades med att verifiera signaturen under Misslyckades med att verifiera signaturen under Misslyckades med att verifiera det signerade certifikatetMisslyckades med att skriva RTE-information för jobb %sMisslyckades med att skriva auktoriserings-token-attribut för jobb %sMisslyckades med att skriva body till utdataströmMisslyckades med att skriva dataöverföringsinformation för jobb %sMisslyckades med att skriva händelseposter för jobb %sMisslyckades med att skriva header till utdataströmMisslyckades med att skriva jobbinformation till databas (%s)Misslyckades med att skriva objekt: %s: %s; %sMisslyckades med att skriva begäran till en filMisslyckades med att skriva begäran till strängMisslyckades med att skriva det signerade EEC-certifikatet till en filMisslyckades med att skriva det signerade proxycertifikatet till en filMisslyckades med att skriva till lokal jobblista %sMisslyckades med att uppdatera klockslag pÃ¥ cachelÃ¥sfil %s för fil %s: %sMisslyckades med att ladda upp fil %s till %s: %sMisslyckades med att ladda upp lokala indatafilerMisslyckades med att avsluta läsning frÃ¥n källaMisslyckades med att avsluta skrivning till destinationMisslyckades under läsning frÃ¥n källaMisslyckades under överföring av dataMisslyckades under väntan pÃ¥ förbindelsebegäranMisslyckades under väntande pÃ¥ uppkoppling till %s(%s):%i - %sMisslyckades under skrivning till destinationMisslyckades med att tolka svar frÃ¥n server - en del information kan vara felaktigMisslyckande: %sFeature ej implementeradFetch: svars-body: %sFetch: svarskod: %u %sFilen %s är NEARLINE, kommer att göra begäran att bringa onlineFil %s är redan cachad pÃ¥ %s under en annan URL: %s - denna fil kommer ej att cachasFil %s är redan cachad pÃ¥ %s under en annan URL: %s - kommer ej att lägga till DN till cachad listaFil %s är cachad (%s) - kontrollerar Ã¥tkomsträttigheterFil %s borttagen framgÃ¥ngsriktFilen '%s' i 'executables'-attributet finns inte i 'inputfiles'-attributetFilen finns redan: %sFil kunde inte flyttas till tillstÃ¥nd DoneFil kunde inte flyttas till tillstÃ¥nd Running: %sBorttagande av fil misslyckades, försöker med borttagande av katalogBorttagning av fil misslyckades, försöker med borttagande av katalog för %sFilnedladdning misslyckades: %sFil kan cachas, kommer att kontrollera cacheFil hÃ¥ller pÃ¥ att cachas, kommer att vänta %i sFilen kan inte kommas Ã¥t %s: %sFilen kan inte kommas Ã¥t: %sFilen kan inte cachas, hoppar över cacheprocesseringFil kan inte cachas, begärdes att inte cachas eller inget cache tillgängligt, hoppar över cachekontrollFil är klar! TURL är %sFil är mindre än %llu bytes, kommer att använda lokal leveransFiltyp är inte tillgänglig, försöker med borttagande av filFilnamn returnerades inte i Rucio-svar: %sFiler associerade med begäran-token %s avbröts framgÃ¥ngsriktFiler associerade med begäran-token %s sparades framgÃ¥ngsriktFiler associerade med begäran-token %s frigjordes framgÃ¥ngsriktKopiering av filuppsättning till ett enstaka objekt stöds ej ännuFiluppsättningsregistrering stöds inte ännuSlutför nuvarande replika %sHittar existerande destinationsreplikorFinishWriting: letar efter metadata: %sFinishWriting: erhÃ¥llen checksumma: %sAvslutades framgÃ¥ngsriktFörsta steget av registrering till indextjänst misslyckadesFörsta värdet i 'inputfiles'-attributet (filnamn) kan inte vara tomtFörsta värdet i 'inputfiles'-attributet (filnamn) kan inte vara tomtFör registrering mÃ¥ste källan vara en vanlig URL och destinationen en indexeringstjänstFör det första test-jobbet mÃ¥ste du ocksÃ¥ ange en körtid med alternativet -r (--runtime).TvÃ¥ngskontrollerar källa för cachad fil %sFramtvingar Ã¥ternedladdning av fil %sHittade %s %s (den hade redan laddats in)Hittade %s i cacheHittade DTR %s för fil %s kvarlämnad i överförande tillstÃ¥nd frÃ¥n tidigare körningHittade VOMS-AC-attribut: %sHittade ett register, kommer att frÃ¥ga det rekursivt: %sHittade existerande token för %s i Rucios token-cache vars giltighetstid gÃ¥r ut %sHittade tjänsteslutpunkt %s (typ %s)Hittade STARTED eller SUCCESSFUL slutpunkt (%s)Hittade suspenderad slutpunkt (%s)Hittade följande jobb:Hittade följande nya jobb:Hittade oväntad tom lÃ¥sfil %s. MÃ¥ste gÃ¥ tillbaka till acquire()Hittade oavslutade DTR-överföringar. Det är möjligt att en tidigare A-REX-process inte stängde ned pÃ¥ normal sättLediga slottar grupperade enligt tidsgräns (gräns: lediga slottar):Lediga slottar: %iHela strängen användes inte: %sGACL-auktoriseringsbegäran: %sGET: id %s sökväg %sGenerera ny X509-begäranGenererar %s-jobbeskrivningGenererar ceID-prefix from värdnamn automatisktGenerator startadGeneriskt felHämta delegerad referens frÃ¥n delegeringstjänst: %sHämta frÃ¥n cache: Cachad fil är lÃ¥stHämta frÃ¥n cache: Fel med cacheinställningarHämta frÃ¥n cache: Fil inte i cacheHämta frÃ¥n cache: Ogiltig URL: %sHämta frÃ¥n cache: Söker i cache efter %sHämta frÃ¥n cache: kunde inte komma Ã¥t cachad fil: %sBegäran att hämta %s är fortfarande i kö, ska vänta %i sekunderGet: det finns inget jobb %s - %sHämtar nuvarande klockslag för BLAH-tolk-logg: %sHämtar delegeringsreferens frÃ¥n ARC delegeringstjänstGlobusfel: %sGlobus-handtag har fastnatGlobus platsvariabelsubstitution stöds inte längre. Ange sökväg direkt.Grididentitet mappas till lokal identitet '%s'HEAD: id %s sökväg %sHTTP-fel: %d %sHTTP-fel %u - %sHTTP med SAML2SSO-anrop misslyckadesHTTP:PUT %s: put fil %s: %sHandtag är i felaktigt tillstÃ¥nd %u/%uHead: det finns inget jobb %s - %sHälsotillstÃ¥ndsinfo: %sHälsotillstÃ¥nd: %sHealthState för ExecutionTarget (%s) är inte OK eller VARNING (%s)Hjälpalternativ:Hjälpprocesstart misslyckades: %sHjälpprogram saknasHomogen resursID: %sINI-inställningsfil %s existerar inteINTERNALClient är inte initieradId= %s,Typ= %s,Utfärdare= %s,Värde= %sIdP returnerar ett felmeddelande: %sIdentitet är: %sIdentitetsnamn: %sIdentitet: %sOm proxy eller certifikat/nyckel existerar, kan du ange deras platser manuellt via miljövariablerna '%s'/'%s' eller '%s', eller attributen '%s'/'%s' eller '%s' i klientinställningsfilen (t.ex. '%s')Om du anger en policy mÃ¥ste du ocksÃ¥ ange ett policysprÃ¥kIgnorerar slutpunkt (%s), den är redan registrerad i insamlare.Ignorerar jobb (%s), redan försökt och kunde inte ladda in JobControllerPluginIgnorerar jobb (%s), jobbhanterings-URL är okändIgnorerar jobb (%s), jobbstatus-URL är okändIgnorerar jobb (%s), hanteringsgränssnittsnamnet är okäntIgnorerar jobb (%s), statusgränssnittsnamnet är okäntIgnorerar jobb (%s), kunde inte ladda in JobControllerPlugin gör %sIgnorerar jobb, jobb-id är tomtIgnorerar verifieringsfel p.g.a. att osäkra anslutningar är tillÃ¥tna: %sOgiltig URL - avslutande ] för IPv6-adress följs av ogiltigt token: %sOgiltig URL - hittade ingen avslutande ] för IPv6-adress: %sOgiltig URL - inget värdnamn angivet: %sOgiltig URL - sökväg mÃ¥ste vara absolut eller tom: %sOgiltig URL - sökväg mÃ¥ste vara absolut: %sOgiltigt tidsformat: %sOmedelbart slutförande förväntasOmedelbart slutförande förväntas: %sOmedelbart färdigställande: %sImplementeringsnamn: %sImplementerare: %sI den tillgängliga CRLen är lastUpdate-fältet inte giltigtI den tillgängliga CRLen är nextUpdate-fältet inte giltigtIn inställningsprofilen har 'initype'-attributet pÃ¥ "%s"-elementet ett ogiltigt värde "%s".Inkommande meddelande är inte SOAPInkompatibla alternativ --nolist och --forcelist har begärtsInkonsistent metadataOberoende proxy - inga rättigheter beviljadeInformationsslutpunktInformationspunkt '%s' är okändInformationsdokument är tomtInitierade %ue Python-tjänstenInitierar S3-förbindelse till %sInitierar delegeringsprocessIndata är inte SOAPIndata är utan trailer Mata in begäran frÃ¥n en fil: Request.xmlMata in begäran frÃ¥n kodIndata: metadata: %sInstallerade programmiljöer:Gränssnitt (%s) angivet, sänder endast in till detta gränssnittGränssnittstillägg:Gränssnitt pÃ¥ slutpunkt (%s) %s.Gränssnittsversioner:Gränssnitt: %sIntern överföringsmetod stöds inte för %sOgiltig DTROgiltig DTR för källa %s, destination %sOgiltig effektOgiltigt HTTP-objekt kan inte producera resultatOgiltig ID: %sOgiltigt ISO-tidsperiodsformat: %sOgiltig jobbeskrivning:Ogiltig URL '%s' för indatafil '%s'Ogiltig URL '%s' för utdatafil '%s'Ogiltig URL-alternativ-syntax i alternativ '%s' för indatafil '%s'Ogiltig URL-alternativ-syntax i alternativ '%s' för utdatafil '%s'Ogiltigt URL-alternativ: %sOgiltig URL: %sOgiltig URL: '%s' i indatafil '%s'Ogiltig URL: '%s' i utdatafil '%s'Ogiltigt action-värde %sOgiltigt klassnamnOgiltigt klassnamn. broker-argumentet för Python-mäklaren ska vara Filnamn.Klass.args (args är valfritt), till exempel SampleBroker.MyBrokerOgiltig jämförelseoperator '%s' använd i 'delegationid'-attributet, endast "=" är tillÃ¥ten.Ogiltig jämförelseoperator '%s' använd i 'queue'-attributet i 'GRIDMANAGER'-dialekt, endast "=" är tillÃ¥tetOgiltig jämförelseoperator '%s' använd i 'queue'-attributet, endast "!=" eller "=" är tillÃ¥tna.Ogiltiga inställningar - ingen tillÃ¥ten IP-adress angivenOgiltiga inställningar - inga överföringskataloger angivnaOgiltiga referenser, kontrollera proxy och/eller CA-certifikatOgiltig destinations-URL %sOgiltig nedladdningsdestinationssökväg angiven (%s)Ogiltig jobbeskrivningOgiltigt lÃ¥s pÃ¥ fil %sOgiltig logg-nivÃ¥. Använder förval %s.Ogiltigt nodeaccess-värde: %sOgiltig gammal logg-nivÃ¥. Använder förval %s.Ogiltig periodsträng: %sOgiltigt portnummer i %sOgiltig stage-out-sökväg angiven (%s)Ogiltig URL: %sUtfärdar-CA: %sUtfärdare: %sJWSE::ExtractPublicKey: raderar inaktuell info: %sJWSE::ExtractPublicKey: extern jwk-nyckelJWSE::ExtractPublicKey: hämtar jws-nyckel frÃ¥n %sJWSE::ExtractPublicKey: jwk-nyckelJWSE::ExtractPublicKey: nyckeltolkningsfelJWSE::ExtractPublicKey: inte en nyckel som stödsJWSE::ExtractPublicKey: x5c-nyckelJWSE::Input: JWE: stöds inte änJWSE::Input: JWS innehÃ¥ll: %sJWSE::Input: JWS: signeringsalgoritm: %sJWSE::Input: JWS: signeringsalgoritm stöds inte: %sJWSE::Input: JWS: signaturverifiering misslyckadesJWSE::Input: JWS: token för gammaltJWSE::Input: JWS: token för ungtJWSE::Input: header: %sJWSE::Input: token: %sJWSE::SignECDSA: misslyckades med att lägga till meddelande till hash: %iJWSE::SignECDSA: misslyckades med att skapa ECDSA-signaturJWSE::SignECDSA: misslyckades med att skapa EVP-kontextJWSE::SignECDSA: misslyckades med att färdigställa hash: %iJWSE::SignECDSA: misslyckades med att initialisera hash: %iJWSE::SignECDSA: misslyckades med att tolka signaturJWSE::SignECDSA: misslyckades med att känna igen digest: %sJWSE::SignECDSA: saknad nyckelJWSE::SignECDSA: fel signaturstorlek skrivenJWSE::SignECDSA: fel signaturstorlek: %i + %iJWSE::VerifyECDSA: misslyckades med att lägga till meddelande till hash: %iJWSE::VerifyECDSA: misslyckades med att tilldela ECDSA-signatur: %iJWSE::VerifyECDSA: misslyckades med att skapa ECDSA-signaturJWSE::VerifyECDSA: misslyckades med att skapa EVP-kontextJWSE::VerifyECDSA: misslyckades med att färdigställa hash: %iJWSE::VerifyECDSA: misslyckades med att initialisera hash: %iJWSE::VerifyECDSA: misslyckades med att tolka signaturJWSE::VerifyECDSA: misslyckades med att känna igen digest: %sJWSE::VerifyECDSA: misslyckades med att verifiera: %iJWSE::VerifyECDSA: saknad nyckelJWSE::VerifyECDSA: fel signaturstorlekJobb %s rapporterar inte ett tillstÃ¥nd varifrÃ¥n det kan Ã¥terupptasJobb %s misslyckades med att förnya delegering %s.Jobb %s har ingen associerad delegering. Kan inte förnya sÃ¥dana jobb.Hittade inte jobb %sJobb %s: nÃ¥gra nerladdningar misslyckadesJobb %s: alla filer nerladdade framgÃ¥ngsriktJobb %s: filer laddas fortfarande nerJobb-id-alternativ är obligatorisktJobbdatabasförbindelse etablerad framgÃ¥ngsrikt (%s)Jobb borttaget: %sJobbeskrivningsfil kunde inte läsas.JobbeskrivningssprÃ¥k är inte angivet, kan inte skriva ut beskrivning.JobbeskrivningssprÃ¥k som stöds av %s:Jobbeskrivning som skall sändas till: %sJobbeskrivningar:Jobb avslutades inte framgÃ¥ngsrikt. Meddelande kommer inte att skrivas till BLAH-logg.Jobbnedladdningskatalog frÃ¥n användarinställningsfil: %sJobbnedladdningskatalog kommer att skapas i nuvarande arbetskatalog.Jobbnedladdningskatalog: %sJobb har inte startat än: %sJobblistfil (%s) existerar inteJobblistfil (%s) är inte en vanlig filJobblistfil kan inte skapas: %s är inte en katalogJobblistfil kan inte skapas: Föräldrakatalogen (%s) existerar inte.Jobb nr.Jobbet Ã¥terupptogs framgÃ¥ngsriktJobbinsändningssammanfattning:Jobb insänt med jobb-id: %sJobbets klockslag tolkades framgÃ¥ngsrikt som %sJobb: %sJobb: %s : Begäran att avbryta satt och meddelad till tjänstenJobb: %s : Begäran att avbryta satt men misslyckades att meddela tjänstenJobb: %s : Begäran om borttagning satt och meddelad till tjänstenJobb: %s : Begäran om borttagning satt men misslyckades att meddela tjänstenJobb: %s : Fel : Misslyckades med att sätta avbrytsmarkeringJobb: %s : Fel : Misslyckades med att sätta borttagningsmarkeringJobb: %s : Fel : Ingen lokal information.Jobb: %s : Fel : Okänt tillstÃ¥ndJobControllerPlugin %s kunde inte skapasHittade inte JobControllerPlugin-plugin "%s".JobDescription-klass är inte ett objektJobDescriptionParserPlugin %s kunde inte skapasHittade inte JobDescriptionParserPlugin-plugin "%s".Jobb som saknar information kommer inte att tas bort!Jobb behandlade: %d, borttagna; %dJobb behandlade: %d, förnyade %dJobb behandlade: %d, Ã¥terupptagna: %dJobb behandlade: %d, framgÃ¥ngsrikt avbrutna %dJobb behandlade: %d, framgÃ¥ngsrikt avbrutna %d, framgÃ¥ngsrikt borttagna %dJobb behandlade: %d, framgÃ¥ngsrikt hämtade: %dJobb behandlade: %d, framgÃ¥ngsrikt hämtade: %d, framgÃ¥ngsrikt borttagna: %dSkräp i slutet pÃ¥ RSLSkräp i sessiondir-kommandoLCMAPS returnerade ingen GIDLCMAPS returnerade ingen UIDLCMAPS har getCredentialDataLCMAPS har lcmaps_runLCMAPS returnerade UID som saknar användarnamn: %uLCMAPS returnerade ogiltig GID: %uLCMAPS returnerade ogiltig UID: %uLIST/MLST misslyckadesLIST/MLST misslyckades: %sSprÃ¥k (%s) känns inte igen av nÃ¥gon jobbeskrivningstolk.Sista steget av registrering till indextjänst misslyckadesLatitud: %fVänster operand för RSL-konkatenering utvärderas inte till en strängLegacyMap: inga inställningsblock definieradeLegacyPDP: ARC Legacy Sec Attribute känns inte igen.LegacyPDP: Det finns inga %s-säkerhetsattribut definierade. ARC Legacy Sec Handler troligen inte inställd eller har misslyckats.LegacySecHandler: inställningsfil inte angivenRad %d.%d i attributen returnerade: %sLänkar MCC %s(%s) till MCC (%s) under %sLänkar MCC %s(%s) till Plexer (%s) under %sLänkar MCC %s(%s) till tjänst (%s) under %sLänkar Plexer %s till MCC (%s) under %sLänkar Plexer %s till Plexer (%s) under %sLänkar Plexer %s till tjänst (%s) under %sLänkar lokal filLänkar mappad filLänkar/kopierar cachad filLänkar/kopierar cachad fil till %sListfunktionalitet stöds inte av RESTful-VOMS-gränssnittetListfunktionalitet stöds inte av legacy-VOMS-gränssnittetList kommer att göra stat pÃ¥ URL %sListFiles: letar efter metadata: %sLyssnar pÃ¥ %s:%s(%s)Lyssnar pÃ¥ TCP-port %s(%s)Listning av lokala jobb lyckades, hittade %d lokala jobbInladdningsbar modul %s innehÃ¥ller inte begärd plugin %s av typen %sLaddade in %sLaddade in %s %sLaddade in JobControllerPlugin %sLaddar in JobDescriptionParserPlugin %sLaddade in MCC %s(%s)Laddade in Plexer %sLaddade in tjänst %s(%s)Laddade in SubmitterPlugin %sLaddar %ue Python-tjänstenInladdning av OToken misslyckades - ignorerar dess närvaroLaddar in Python-mäklare (%i)Laddar in inställningar (%s)Antal lokala körande jobb: %iAntal lokala suspenderade jobb: %iAntal lokala väntade jobb: %iLocation URI för fil %s är ogiltigPlats existerar redanPlatser saknas i destinations-LFC-URLLÃ¥s %s ägs av en annan värd (%s)LÃ¥sfil %s existerar inteLongitud: %fLetar efter nuvarande jobbSlÃ¥r upp URL: %sSlÃ¥r upp källreplikorMCC %s(%s) - nästa %s(%s) saknar targetMIME är inte lämplig för SOAP: %sMSLD stöds inte - försöker med NLSTMLST stöds inte - försöker med LISTPythons huvudtrÃ¥d är inte initieradPythons huvudtrÃ¥d initierades inteHuvudminnesstorlek: %iHittade felaktig ARCHERY-post (slutpunktstyp är inte definierad): %sHittade felaktig ARCHERY-post (slutpunkts-URL är inte definierad): %sFelaktigt VOMS-AC-attribut %sMappningsfil pÃ¥ %s kan inte öppnas.Mappar %s till %sMappningspolicyalternativ har tomt värdeMappningspolicy:Mappar till kö: %sMatcha utfärdare: %sMatcha vo: %sMatchad ingentingMatchad: %s %s %sMatchad: %s %s %s %sMatchar token-uttryck: %sMatchmaking, %s (%d) är %s än %s (%d) publicerat av ExecutionTarget.Matchmaking, Benchmark %s är inte publicerat av ExecutionTarget.Matchmaking, CacheTotal-problem, ExecutionTarget: %d MB (CacheTotal); JobDescription: %d MB (CacheDiskSpace)Matchmaking, Beräkningsslutpunktsvillkor inte uppfyllt. ExecutionTarget: %sMatchmaking, ComputingShare (%s) matchar inte begärd kö (%s): hoppar överMatchmaking, ComputingShare (%s) matchar begärd kö (%s)Matchmaking, ComputingShareName för ExecutionTarget (%s) är inte definierat, men begärd kö är det (%s)Matchmaking, ConnectivityIn-problem, ExecutionTarget: %s (ConnectivityIn) JobDescription: %s (InBound)Matchmaking, ConnectivityOut-problem, ExecutionTarget: %s (ConnectivityOut) JobDescription: %s (OutBound)Matchmaking, ExecutionTarget: %s, OperatingSystem är inte definieratMatchmaking, ExecutionTarget: %s, CacheTotal är inte definieratMatchmaking, ExecutionTarget: %s, HealthState är inte definieratMatchmaking, ExecutionTarget: %s, ImplementationName är inte definieratMatchmaking, ExecutionTarget: %s, MaxDiskSpace och WorkingAreaFree är inte definieradeMatchmaking, ExecutionTarget: %s, varken MaxTotalCPUTime eller MaxCPUTime är definierad, antar ingen CPU-tidsgränsMatchmaking, ExecutionTarget: %s, MinCPUTime inte definierat, antar ingen CPU-tidsgränsMatchmaking, ExecutionTarget: %s, NetworkInfo är inte definieradMatchmaking, ExecutionTarget: %s, Platform är inte definieradMatchmaking, ExecutionTarget: %s, RunTimeEnvironment-villkor är inte uppfylltMatchmaking, ExecutionTarget: %s, TotalSlots och MaxSlotsPerJob är inte definieradeMatchmaking, ExecutionTarget: %s, WorkingAreaLifeTime är inte definieradMatchmaking, ExecutionTarget: %s matchar jobbeskrivningMatchmaking, ExecutionTarget: %s, ApplicationEnvironments är inte definieradeMatchmaking, ExecutionTarget: %s, MaxMainMemory och MainMemorySize är inte definieradeMatchmaking, ExecutionTarget: %s, MaxVirtualMemory är inte definieratMatchmaking, ExecutionTarget: %s, OperatingSystem-villkor är inte uppfylltMatchmaking, MainMemorySize-problem, ExecutionTarget: %d (MainMemorySize), JobDescription: %d (IndividualPhysicalMemory)Matchmaking, MaxCPUTime-problem, ExecutionTarget: %d (MaxCPUTime), JobDescription: %d (TotalCPUTime/NumberOfSlots)Matchmaking, MaxDiskSpace-problem, ExecutionTarget: %d MB (MaxDiskSpace); JobDescription: %d MB (DiskSpace)Matchmaking, MaxDiskSpace-problem, ExecutionTarget: %d MB (MaxDiskSpace); JobDescription: %d MB (SessionDiskSpace)Matchmaking, MaxMainMemory-problem, ExecutionTarget: %d (MaxMainMemory), JobDescription: %d (IndividualPhysicalMemory)Matchmaking, MaxSlotsPerJob-problem, ExecutionTarget: %d (MaxSlotsPerJob) JobDescription: %d (NumberOfProcesses)Matchmaking, MaxTotalCPUTime-problem, ExecutionTarget: %d (MaxTotalCPUTime), JobDescription: %d (TotalCPUTime)Matchmaking, MaxVirtualMemory-problem, ExecutionTarget: %d (MaxVirtualMemory), JobDescription: %d (IndividualVirtualMemory)Matchmaking, MinCPUTime-problem, ExecutionTarget: %d (MinCPUTime), JobDescription: %d (TotalCPUTime/NumberOfSlots)Matchmaking, NetworkInfo-begäran inte uppfylld, ExecutionTarget stöder inte %s, angiven i jobbeskrivning.Matchmaking, Platform-problem, ExecutionTarget: %s (Platform) JobDescription: %s (Platform)Matchmaking, %s skalad %s (%d) är %s än %s (%d) publicerat av ExecutionTarget.Matchmaking, TotalSlots-problem, ExecutionTarget: %d (TotalSlots) JobDescription: %d (NumberOfProcesses)Matchmaking, WorkingAreaFree-problem, ExecutionTarget: %d MB (WorkingAreaFree); JobDescription: %d MB (DiskSpace)Matchmaking, WorkingAreaFree-problem, ExecutionTarget: %d MB (WorkingAreaFree); JobDescription: %d MB (SessionDiskSpace)Matchmaking, WorkingAreaLifeTime-problem, ExecutionTarget: %s (WorkingAreaLifeTime) JobDescription: %s (SessionLifeTime)Största CPU-tid: %sStörsta diskutrymme: %iStörsta minne: %iStörsta antal pre-LRMS-väntade jobb: %iStörsta antal körande jobb: %iStörsta antal slottar per jobb: %iStörsta antal stage-in-strömmar: %iStörsta antal stage-out-strömmar: %iStörsta totalt antal jobb: %iStörsta totala klocktid: %sStörsta antal körande jobb för användaren: %iStörsta virtuella minne: %iStörsta antal väntade jobb: %iStörsta klocktid: %sMaximalt antal trÃ¥dar kör - ställer ny begäran i köMinnesallokeringsfelMeddelande-klass är inte ett objektMetainformation för källa och plats stämmer inte överens för %sMetadata för replika och indextjänst skiljer sig Ã¥tKällans och destinationens metadata är olikaKällans metadata stämmer inte överens med existerande destination. Använd alternativet --force för att överstyra detta.Minsta CPU-tid: %sMinsta klocktid: %sSaknat CA-subjekt i Globus signeringspolicySaknat CertificatePath-element eller ProxyPath-element, eller saknasVärd saknas i Connect-elementPort saknas i Connect-elementPort saknas i Listen-elementVO saknas i inställningarPublik saknas i inställningarSaknad autentiseringsinformationSaknar cancel-%s-job - avbrytande av jobb kanske inte fungerarFörmÃ¥ga saknas i inställningarSaknade villkorssubjekt i Globus signeringspolicySaknar data i DER-kodat PROXY_CERT_INFO_EXTENSION-tilläggSaknar katalog i controldir-kommandoSaknat filnamn i [arex/jura] loggfilSaknat sista svar: %sGrupp saknas i inställningarSaknad information i svar: %sUtfärdare saknas i inställningarNamn pÃ¥ LCAS-biblioteket saknasNamn pÃ¥ LCMAPS-bibliotek saknasSaknar nummer i maxjobsCertificatePath-element saknas eller är tomtSaknat eller tomt CertificatePath- eller CACertificatesDir-elementCertificatePath- eller CACertificatesDir-element saknas eller är tomt; kommer endast att kontrollera signatur, kommer ej att göra meddelandeautentiseringKeyPath-element saknas eller är tomtSaknat eller tomt KeyPath-element, eller saknasPasswordSource-element saknas eller är tomtUsername-element saknas eller är tomtSökväg till referensfil saknasSaknar referens till fabrik och/eller modul. Det är osäkert att använda Globus i icke-persistent mode - (Grid)FTP-koden är deaktiverad. Rapportera till utvecklare.Saknar referens till fabrik och/eller modul. Det är osäkert att använda Xrootd i icke-persistent mode - Xrootd-koden är deaktiverad. Rapportera till utvecklare.Saknat svar frÃ¥n delegeringstjänst.Roll saknas i inställningarSaknar scan-%s-job - kan missa när jobb har slutat exekveraSchema saknas! Hoppar över validering...Scope saknas i inställningarSäkerhetsobjekt saknas i meddelandeSubjekt saknas i inställningarSubjektnamn saknasSaknar submit-%s-job - insändning av jobb till LRMS kanske inte fungerarModul %s innehÃ¥ller ingen plugin %sModul %s innehÃ¥ller inte begärd plugin %s av typen %sModul %s innehÃ¥ller inte en plugin(er) av angiven typModul %s kunde inte laddas in igen (%s)Modulen %s är inte en ARC-plugin (%s)Modulhanterare initModulhanterare init av ModuleManager::setCfgModulnamn: %sHoppar till slutet av datastagingFlerjobbsoperator endast tillÃ¥ten pÃ¥ toppnivÃ¥nMer än ett %s-attribut i inställningsfil (%s)MyProxy-fel: %sMyproxy-server returnerade inte proxy med VOMS AC inkluderatNYTT: put nytt jobb: gränsen för max totalt antal jobb nÃ¥ddNYTT: put nytt jobb: det finns inge nyttolastNLST/MLSD misslyckadesNLST/MLSD misslyckades: %sNSS-databas som kommer att användas: %s NSS-initiering misslyckades pÃ¥ certifikatdatabas: %sNULL BIO skickad till InquireRequestNULL-callback för %sNamn pÃ¥ grami-filNamn: %sNegativa rättigheter stöds inte i Globus signeringspolicyVarken källa eller destination är indextjänster, kommer att hoppa över uppslagning av replikorVarken källa eller destination stagades, hoppar över frigörande av begärningarNätverksinformation:Ny slutpunkt skapas (%s) frÃ¥n den med ej angivet gränssnitt (%s)Hittade ingen A-REX-inställningsfil i candypond-inställningarnaInget attribut existerar som kan hantera typen: %sInget Connect-element angivetHittade inget FQAN. Använde None som användar-FQAN-värdeInget LRMS satt i inställningarnaHittade inget RSL-innehÃ¥ll i jobbeskrivningInget SOAP-svarInget SOAP-svar frÃ¥n leveranstjänst %sInget SOAP-svar frÃ¥n leveranstjänstIngen aktiv DTR %sInget aktivt jobb-id: %sInga argument har tilldelats extern processInget auktoriseringssvar returneradesIngen cachekatalog angivenInga cachekataloger hittade/konfigurerade för beräkning av fritt utrymme.Inga cacher definierade i inställningarIngen callback för %s definieradIngen information om checksumma frÃ¥n serverIngen information om checksumma möjligIngen information om checksumma returnerad i Rucio-svar för %sIngen verifiering av checksumma möjligIngen inställningfil kunde laddas in.Ingen kontrollkatalog satt i inställningarnaInga referenser tillhandahÃ¥llnaInga delegeringspolicyer i denna kontext och meddelande - passerar igenomInget delegeringstoken i begäranIngen leveransslutpunkt tillgänglig, kommer försöka senareIngen destination definieradIngen draining-cachekatalog angivenInget felInga filer att hämta för jobb %sIngen information om filstorlek returnerad i Rucio-svar för %sIngen information returnerad av PROPFINDInget jobb-id tillhandahÃ¥lletIngen jobbeskrivningsfil tillhandahölls.Ingen jobbeskrivning angivenIngen jobbeskrivningstolk kunde tolka jobbeskrivningInga jobbeskrivningstolkar tillgängligInga jobbeskrivningstolkar lämpliga att hantera '%s'-sprÃ¥ket är tillgängligaIngen jobbeskrivning resulterade vid %d testInga jobbInga jobb hittades, försök senareInga jobb angivnaIngen vänsteroperand för konkateneringsoperatorInga lyssnande portar initieradeInget lokalt kontonamn angivetHittade ingen lokal användarmappningInga platser definierade för %sHittade inga platser för destinationen som skiljer sig frÃ¥n källanHittade inga platser för destinationen som skiljer sig frÃ¥n källan: %sHittade inga platser för destination: %sHittade inga platser för källa: %sHittade inga platser - troligen inga fler fysiska instanserInga platser hittade för %sHittade ingen match i cacheÃ¥tkomstregler för %sIngen matchande typ av checksumma, använder första i listan %sInga fler %s-replikorInga fler gränssnitt att prova för slutpunkt %s.Inga fler replikor, kommer att använda %sBehöver inte staga källa eller destination, hoppar över stagingInget nytt informationsdokument tilldelatIngen nästa MCC eller tjänst pÃ¥ sökväg "%s"Inget nästa element i kedjanInga non-draining sessionskataloger tillgängligInga fysiska filnamn (PFN) returnerade i Rucio-svar: %sHittade inga fysiska filer för destinationHittade inga platser för destinationHittade ingen pid-fil '%s'. Troligen kör inte A-REX.Ingen policyfil eller DN angiven för simplelist.pdp, ange ett location-attribut eller Ã¥tminstone ett DN-element i simplelist-PDP-noden i inställningarna.Ingen port lyckades för %sIngen privat nyckel med smeknamn %s existerar i NSS-databasenHittade ingen proxyInget könamn givet i queue-blocknamnIngen readonly-cachekatalog angivenInga fjärrleveranstjänster kan användas, tvingar lokal leveransHittade inga replikor för %sInget begäran-token angivet!Hittade inga begäran-tokenIngen begärd säkerhetsinformation samlades inInget svar frÃ¥n AA-tjänst %sInget svar returnerades: %sInga resultat returnerade frÃ¥n statIngen högeroperand för konkateneringsoperatorIngen säkerhetsprocessering/kontroll begärd för '%s'Ingen serverinställningsdel i inställningsfilenInga tjänster angivna. Konfigurera förvalda tjänster i klientinställningarna, eller ange ett kluster eller register (alternativ -C eller -Y, se arcsync -h).Hittade ingen sessionkatalog i inställningar.Hittade ingen sessionskatalogIngen sessionskatalog satt i inställningarnaIngen källa definieradInget spacetoken angivetHittade inget spacetoken som matchar beskrivning %sIngen stage-in-URL tillhandahÃ¥llenIngen sÃ¥dan DTR %sIngen sÃ¥dan fil eller katalogInget target tillgängligt inuti policynInget target tillgängligt inuti regelnHittade inget test-jobb med ID %d.Inget test-jobb, med ID "%d"Inga användbara cacherHittade inget certifikat med smeknamn %sHittade inget användarcertifikatInget användarnamn tillhandahÃ¥lletHittade inga giltiga cachar i inställningar, cachning är avstängdHittade inga giltiga referenser, avslutarIngen giltig plats tillgängligInget giltigt svar frÃ¥n VOMS-server: %sInget värde tillhandahÃ¥llet för subjektattribut %s, hoppar överIcke-homogen resursInget av de begärda överföringsprotokollen stödsInte auktoriserad av arc.pdp - kunde inte fÃ¥ svar frÃ¥n utvärderareEj auktoriserad av arc.pdp - nÃ¥gra av RequestItem-elementen uppfyller inte policyEj auktoriserad av simplelist.pdp: %sEj tillräckligt antal parametrar i copyurlEj tillräckligt antal parametrar i linkurlHittade inte %s i cacheHämtar inte checksumma för zip-komponentAnvänder inte leveranstjänst %s pÃ¥ grund av tidigare felAnvänder inte leveranstjänst pÃ¥ %s eftersom den är fullOgiltig destinationOgiltig källaInget att göra: du mÃ¥ste antingen ange ett test-jobb-id med -J (--job) eller frÃ¥ga om information om certifikaten med -E (--certificate) Kopierar nu (frÃ¥n -> till)Nummer %d är med smeknamn: %s%sNummer %d är: %sAntalet källor och destinationer stämmer inte överensALTERNATIV...OS-familj: %sOS-namn: %sOS-version %sOTokens: Attr: %s = %sOTokens: Attr: meddelandeOTokens: Attr: token: %sOTokens: Attr: token: bärare: %sOTokens: HandtagOTokens: Handtag: attribut skapade: subjekt = %sOTokens: Handtag: meddelandeOTokens: Handtag: token var inte närvarandeObjekt är inte lämpligt för listningObjekt ej initierat (internt fel)ErhÃ¥llen XML: %sErhÃ¥llen värd och adress kan inte accepterasEndast POST stöds i CandyPondEndast POST stöds i dataleveranstjänstEndast raw-buffer-nyttolast stöds för utmatningEndast globusrättigheter stöds i Globus signeringspolicy - %s stöds inteEndast signeringsrättigheter stöds i Globus signeringspolicy - %s stöds inteEndast standard input är för närvarande giltig för lösenordskälla.Endast användare '.' för hjälpprogram stödsOpenSSL-felsträng: %sOperativsystem-felOperation avbröts framgÃ¥ngsriktOperation avslutades framgÃ¥ngsriktOperationen stöds inte för denna typ av URLOperation pÃ¥ sökväg "%s"Operator-token: %cOptimizedInformationContainer skapade temporär fil: %sOptimizedInformationContainer misslyckades med att skapa temporär filOptimizedInformationContainer misslyckades med att tolka XMLOptimizedInformationContainer misslyckades med att byta namn pÃ¥ temporär filOptimizedInformationContainer misslyckades med att lagra XML-dokument till temporär filAlternativen 'p' och 'n' kan inte användas samtidigtAlternativgrupp %s:Alternativ för plugin saknasUrsprunglig jobbeskrivning visas nedan:Föräldralöst delegeringslÃ¥s detekterat (%s) - städarÖvriga flaggorMinnet tog slut när slump-serienummer genereradesSlut pÃ¥ försökSlut pÃ¥ försök vid allokering av nytt jobb-id i %sUtgÃ¥ende meddelande är inte SOAPSkriv ut EEC-certifikatetUtdataformateringsmodifierareSkriv ut proxycertifikatetÖverskrivning begärd - kommer att för-städa destinationÄgare: %sPASV misslyckadesPASV misslyckades: %sPDP: %s (%s)PDP: %s (%s) kan inte laddas inPDP: %s kan inte laddas inPDP: %s namnattribut saknasPEM_read_bio_X509_REQ misslyckadesPEM_write_bio_X509_REQ misslyckadesPKCS12 lägg till lösenordsintegritet misslyckadesPKCS12 output-lösenord inte tillhandahÃ¥lletPOST-begäran pÃ¥ specialsökväg stöds intePROPFIND-svar: %sFöräldra-dataset: %sTolkade domäner: %uSkapande av tolkningskontext misslyckades!Tolk misslyckades med felkod %i.Tolkar .local-fil för att erhÃ¥lla jobb-specifika identifierare och infoTolkar VOMS-AC för att fÃ¥ FQAN-informationLösenordskodningstyp stöds inte: %sSökväg %s är ogiltig, skapar nödvändiga katalogerSökväg till .local jobbstatusfil är obligatorisk.Sökväg till användarens proxyfil ska angesPeer-namn: %sPer-jobb POST/SOAP-begäran stöds inteUtför matchmaking mot target (%s).Utför varken sortering eller matchningPermanent felPermanent tjänstefelÃ…tkomsträttighetskontroll inte godkänd, kommer att försöka ladda ned utan att använda cacheÃ…tkomsträttighetskontroll inte godkänd: %sÃ…tkomsträttighetskontroll pÃ¥ ursprunglig URL inte godkänd: %sÃ…tkomsträttighetskontroll godkändÃ…tkomsträttighetskontroll godkänd för URL %sPlockar upp lämnade jobbOrt: %sPlattform: %sVälj den NSS-databas du vill använda (1-%d): Välj det som du vill använda (1-%d): Plexer (%s) - nästa %s(%s) saknar targetPlexerns (%s) nästa har inget id-attribut definieratPlugin %s fel: %sPlugin %s misslyckades med att startaPlugin %s skrev ut: %sPlugin %s returnerade ingen mappningPlugin %s returnerade inget användarnamnPlugin %s returnerade för mycket: %sPlugin %s returnerade: %uPlugin %s timeout efter %u sekunderPlugin (användarmappning) kommando är tomtPlugin (användarmappning) timeout är inte ett nummer: %sPlugin (användarmappning) timeout är felaktigt nummer: %s<Pluginsvar: %sPolicy-besluts-tjänst-anrop misslyckadesPolicy är tomPolicy är inte gaclpolicyrad: %sPolicy-subjekt: %sPolicyId: %s Alg inuti denna policy är:-- %sPostnummer: %sAntal pre-LRMS-väntade jobb: %iFörstädning misslyckadesFörstädning misslyckades, kommer fortfarande att försöka kopieraFörregisterar destinationFörregisterar destination i indextjänstFörbereder att staga destinationFörberedar att staga källaProblem att komma Ã¥t cachefil %s: %sProblem med att skapa katalog (källa %s, destination %s)Problem med att ladda in plugin %s, hoppar över den.Problem med indextjänst, kommer att hoppa till slutet av datastagingProblem med indextjänst, kommer att frigöra cachelÃ¥sProcesseringstrÃ¥d avbröts pÃ¥ grund av timeout. Startar om DTRProcesseringstyp stöds inte: %sProcessingStartTime (%s) angiven i jobbeskrivning ligger inom targets driftstoppsperiod [ %s - %s ].Tillgängliga protokollpluginer:Protokoll stöds inte - kontrollera att relevanta gfal2-pluginer har installerats (gfal2-plugin-* paket)Proxycertifikatinformation:Proxyns livstid har gÃ¥tt utProxyns livstid har gÃ¥tt ut. Jobbinsändning avbruten. Kör 'arcproxy'!Proxyns livstid har gÃ¥tt ut. Kör 'arcproxy'!Proxygenerering misslyckades: Certifikatets giltighetstid har gÃ¥tt ut.Proxygenerering misslyckades: Certifikatet är inte giltigt än.Proxygenerering misslyckades: Misslyckades med att skapa temporär fil.Proxygenerering misslyckades: Misslyckades med att hämta VOMS-information.Proxygenerering misslyckades: Hittade inget giltigt certifikat.Proxygenerering misslyckades: Hittade ingen giltig privat nyckel.Proxygenerering lyckadesProxyns livstid har gÃ¥tt utProxyns nyckellängd: %iProxysökväg: %sProxysignatur: %sProxysubjekt: %sProxytyp: %sProxy med ARC-policyProxy med alla rättigheter ärvdaProxy med tom policy - misslyckades pÃ¥ grund av okänd policyProxy med specifik policy: %sProxy med okänd policy - misslyckades pÃ¥ grund av okänd policyProxy-subjekt: %sProxy: %sBegäran att spara %s är fortfarande i kö, ska vänta %i sekunderPython-wrapper-konstruktor anropadPython-wrapper-destruktor (%d)Python-mäklarens konstruktor anropad (%d)Python-mäklarens destruktor anropad (%d)Pythontolkare lÃ¥stPythontolkare frigjordPython-wrapper-process anropadPython-mäklare initKvalitetsnivÃ¥: %sFrÃ¥gar WSRF-GLUE2-beräknings-REST-slutpunkt.MassfrÃ¥gar källreplikorFrÃ¥gar efter status för stagingbegäranKöinformation:REST: process %s pÃ¥ %sREST:CLEAN jobb %s - %sREST:GET jobb %s - %sREST:KILL jobb %s - %sREST:PUT jobb %s: fil %s: det finns ingen nyttolastREST:RESTART jobb %s - %sRESTful och gammalt VOMS-kommunikationsprotokoll kan inte begäras samtidigt.RSA_generate_key_ex misslyckadesRSL-substitution är inte en sekvensRSL-substitutions-sekvens har inte längden 2RSL-substitutionsvariabelnamn utvärderas inte till en strängRSL-substitutionsvariabelvärde utvärderas inte till en strängSlumpvis sorteringLäste %i byteÃ…tkomstkontroll för läsning inte godkändÃ…tkomst för läsning inte tillÃ¥ten för %s: %sLäste begäran frÃ¥n en filLäste begäran frÃ¥n en strängLäser %u byte frÃ¥n byte %lluReell överföring frÃ¥n %s till %sFick tillbaka DTR %s frÃ¥n schemaläggare i tillstÃ¥nd %sMottog DTR %s under generatoravstängning - kan inte processerasMottog ogiltig DTRMottog meddelande out-of-band (inte kritiskt, ERROR-nivÃ¥ är bara för debuggningsändamÃ¥l)Mottog ingen DTRMottog försök igen för DTR %s som fortfarande överförÃ…teransluterPost om nytt jobb framgÃ¥ngsrikt tillagd till databasen (%s)Omdirigerar till %sOmdirigerar till ny URL: %sRegistrerar destinationsreplikaRegistrering av Globus-FTP-buffer misslyckades - avbryter kontrollRelationsoperator förväntadesFrigör destinationFrigör begärningar som gjordes under stagingFrigör begärningarFrigör källaRemove: tar bort: %sTar bort %sTar bort logisk fil frÃ¥n metadata %sTar bort metadata i %sTar bort förregistrerad destination i indextjänstRename: globus_ftp_client_move misslyckadesRename: timeout vid väntan pÃ¥ att operationen ska slutförasByter namn pÃ¥ %s till %sByter ut DTR %s i tillstÃ¥nd %s med ny begäranByter ut existerande token för %s i Rucios token-cacheByter ut gammal SRM-info mot by för URL %sByter ut kö '%s' mot '%s'Replika %s matchar inte föredraget mönster eller URL-mapReplika %s har hög latency, men inga fler källor existerar sÃ¥ kommer att använda dennaReplika %s har hög latency, prövar nästa källaReplika %s har lÃ¥ng latency, provar nästa replikaReplika %s har mappatsReplika %s matchar värd-mönster %sReplika %s matchar mönster %sBegäran misslyckadesBegäran misslyckades: Inget svar frÃ¥n IdPBegäran misslyckades: Inget svar frÃ¥n IdP när autentisering görsBegäran misslyckades: Inget svar frÃ¥n IdP när omdirigering görsBegäran misslyckades: Inget svar frÃ¥n SP-tjänsten när SAML-assertion sänds till SPBegäran misslyckades: Inget svar frÃ¥n SPServiceBegäran misslyckades: svar frÃ¥n IdP är inte som förväntat när autentisering görsBegäran misslyckades: svar frÃ¥n IdP är inte som förväntat när omdirigering görsBegäran misslyckades: svar frÃ¥n SP-tjänsten är inte som förväntat när SAML-assertion sänds till SPBegäran misslyckades: svar frÃ¥n SPService är inte som förväntatBegäran är tomBegäran stöds inte - %sBegäran rapporteras som avbruten, men alla filer är färdigaBegäran rapporteras som avbruten, eftersom den avbrötsBegäran rapporteras som avbruten. Orsak: %sBegäran lyckades!!!Begäran avbröts pÃ¥ grund av timeoutBegäran att pusha till okänd ägare - %uBegäran: %sBegärda slottar: %iBegärt att hoppa över resurssökning. Kommer att försöka med direkt insändning till arcrest-slutpunktstyp.Att begära rekursion och --nolist saknar meningBegär att avsluta jobbprocesseringVillkor "%s %s" INTE uppfyllt.Villkor "%s %s" uppfyllt av "%s".Villkor "%s %s" uppfyllt.Reserveringspolicy: %sSlÃ¥r upp destinationsreplikorUppslagning av indextjänst för destination misslyckadesUppslagning av indextjänst för källa misslyckadesMassuppslagning av källreplikorResursinformationstillhandahÃ¥llare misslyckades med att köraResursinformationstillhandahÃ¥llare misslyckades med att startaResursinformationstillhandahÃ¥llare misslyckades med avslutningsstatus: %i %sResursinformationstillhandahÃ¥llarlogg: %sResursinformationstillhandahÃ¥llare: %sResurshanterare: %sSvaret är inte SOAPSvaret är inte XMLSvar: %sSvar: %sResultatvärde (0=TillÃ¥t, 1=Vägra, 2=Obestämd, 3=Ej applicerbar): %dResultat lagrade i: %sÃ…terupptar jobb: %s i tillstÃ¥nd: %s (%s)Att hämta jobbeskrivning för INTERNAL jobb stöds inteÃ…tervänder till generatorÃ…teranvänder förbindelseHöger operand för RSL-konkatenering utvärderas inte till en strängRucio returnerade: %sRucios token för %s har gÃ¥tt ut eller är pÃ¥ väg att gÃ¥ utRegel: %sRegel: publik: %sRegel: förmÃ¥ga: %sRegel: grupp: %sRegel: utfärdare: %sRegel: roll: %sRegel: scope: %sRegel: subjekt: %sRegel: vo: %sKör kommando: %sAntal körande jobb: %iKör e-postsändar-kommando (%s)SAML-tokenhanteraren har ej ställts inSAML2SSO-process misslyckadesSOAP-begäran till AA-tjänst %s misslyckadesSOAP-fel frÃ¥n leveranstjänst pÃ¥ %s: %sSOAP-fel: %sSOAP-anrop misslyckadesSOAP-process stöds inte: %sSOAP-begäran: %sSOAP-svar: %sSOAP med SAML2SSO-anrop misslyckadesAnvänd SQL-sats: %sSQLite-databasfel: %sSRM-klientstatus: %sSRM returnerade inte nÃ¥gon informationSRM returnerade inte nÃ¥gon användbar informationSRM returnerade inga användbara överförings-URLer: %sSSHFS-monteringspunkt för cachekatalogen (%s) är trasig - väntar pÃ¥ Ã¥teruppkoppling ...<SSHFS-monteringspunkt för runtimekatalogen (%s) är trasig - väntar pÃ¥ Ã¥teruppkoppling ...SSHFS-monteringspunkt för sessionskatalogen (%s) är trasig - väntar pÃ¥ Ã¥teruppkoppling ...SSL-fel: %d - %s:%s:%sSSL-fel: %s, bibliotek: %s, funktion: %s, anledning: %sSchemaläggarinställningar:Schemalägger mottog NULL-DTRSchemaläggare mottog ogiltig DTRSchemaläggaren mottog en ny DTR %s med källa: %s och destination: %s, tilldelad att överföra andel %s med prioritet %dSchemaläggare startarSchemaläggare stoppar, avstutarSchemaläggningsspolicy: %sSchemavalideringsfelSchema: %sSecHandler-inställningar är inte definieradeSäkerhetshanterare saknar inställningarSäkerhetshanterare har inget namnattribut definieratSecHandler: %s(%s)Säkerhetshanterare %s(%s) kunde inte skapasSäkerhetshanterarprocessering misslyckadesSäkerhetshanterarprocessering misslyckades: %sSäkerhetskontroll misslyckades för inkommande TLS-meddelandeSäkerhetskontroll misslyckades för utgÃ¥ende TLS-meddelandeSäkerhetskontroll misslyckades i SOAP-MCC för inkommande meddelandeSäkerhetskontroll misslyckades i SOAP-MCC för inkommande meddelande: %sSäkerhetskontroll misslyckades i SOAP-MCC för utgÃ¥ende meddelandeSäkerhetskontroll misslyckades i SOAP-MCC för utgÃ¥ende meddelande: %sSäkerhetskontroll misslyckades i TLS-MCC för inkommande meddelandeSäkerhetsprocessering/kontroll misslyckades: %sSäkerhetsprocessering/kontroll för '%s' misslyckades: %sSäkerhetsprocessering/kontroll för '%s' OKSäkerhetsprocessering/kontroll OKSjälvsignerat certifikatSänder Rucio-spÃ¥r: %sSekvens-token-tolkning: %sServer-SRM-version: %sServer-implementering: %sTjänsten %s(%s) kunde inte skapasTjänsteloop: Slutpunkt %sTjänsteslutpunkt %s (typ %s) lagd till i listan för direktinsändningTjänsteslutpunkt %s (typ %s) lagd till i listan för resurssökningTjänsten har inget id-attribut definieratTjänsten har inget namnattribut definieratTjänsteinformation:Tjänsten väntar pÃ¥ begärningarTjänstesidans MCCer har laddats inBetjäningstillstÃ¥nd: %sSessionkatalog %s ägs av %i, men nuvarande mappade användare är %iSessionskatalog '%s' innehÃ¥ller användarspecifika substitutioner - hoppar överSessionskatalog att användaSessions-rotkatalog saknasSessionskatalog %s: Fritt utrymme %f GBSätter förbindelsegräns till %i, förbindelse över gränsen kommer att %sSätter status (%s) för slutpunkt: %sSätter status (STARTED) för slutpunkt: %sSätter subjekt-namnSätter userRequestDescription till %sAndelsinformation:Ska vänta pÃ¥ att destination förberedsSka vänta pÃ¥ att källa förberedsVisa %s hjälpalternativVisa hjälpalternativStänger av demonStänger ner dataleveranstjänstStänger ner datastaging-trÃ¥darStänger ner schemaläggareSimpleMap: %sSimpleMap: fick ny unmap-tid pÃ¥ %u sekunderSimpleMap: felaktigt nummer i unmaptime-kommandoHoppar över %s replika %sHoppar över beräkningsslutpunkt '%s', eftersom den har '%s'-gränssnitt i stället för det begärda '%s'.Hoppar över ytterligare policy-matchning eftersom osäkra anslutningar är tillÃ¥tna.Hoppar över ogiltigt URL-alternativ: %sHoppar över policyAuthority VOMS-AC-attributHoppar över hämtat jobb (%s) eftersom det sändes in via ett annat gränssnitt (%s).Hoppar över tjänst: hittade ingen SchemaPath!Hoppar över tjänst: hittade ingen ServicePath!Socketar passar inte ihop vid avslut %i != %iNÃ¥gra överföringar misslyckadesSorterar efter lediga slottar i köSorterar efter indatas tillgänglighet pÃ¥ targetSorterar efter angivet benchmark (förval "specint2000")Sortera replikor enligt URL-mappSorterar replikor enligt föredraget mönster %sKäll-URL saknasKäll-URL stöds inte: %sKäll-URL är inte giltig: %sKälla och/eller destination är indextjänst, kommer att slÃ¥ upp replikorKontroll av källa begärd men misslyckades: %sKälla är inte en giltig URLKälla mappas till %sKälla är inte redo, kommer att vänta %u sekunderkällan är densamma som destinationenKällans ändringstid: %sKälla eller destination kräver stagingKälla: %sAngiven modul hittades inte i cacheAngiven överlagringsfil (%s) existerar inte.Antal jobb som laddar ned/upp: %iStagingbegäran avbröts pÃ¥ grund av timeout, kommer att frigöra begäranLaddar ned/upp: %sStartar i förgrundenPÃ¥börja testBörja vänta 10 sek...StartReadingStartReading: Fil förbereddes inte pÃ¥ rätt sättStartWritingStartWriting: Fil förbereddes inte pÃ¥ rätt sättStartade fjärrleverans pÃ¥ %sStartar DTR-trÃ¥darStartar verktyget för att uppdatera kontrollkatalogen.Startar datastaging-trÃ¥darStartar hjälpprocess: %sStartar jobbprocesserings-trÃ¥dStartar jobbmonitoreringStartar ny DTR för %sBörjar frÃ¥ga suspenderad slutpunkt (%s) - ingen annan slutpunkt för denna tjänst hÃ¥ller pÃ¥ att frÃ¥gas eller har blivit frÃ¥gad framgÃ¥ngsrikt.Startar under-trÃ¥d för att frÃ¥ga slutpunkten pÃ¥ %sStartar trÃ¥d för att frÃ¥ga slutpunkten pÃ¥ %sStat: erhÃ¥llen checksumma %sStat: erhÃ¥llen ändringstid %sStat: erhÃ¥llen storlek %lluTillstÃ¥ndsnamn för plugin saknasStatus för tjänsteslutpunkt "%s" är satt till inaktiv i ARCHERY. Hoppar över.FrÃ¥gade om status för %d jobb, %d jobb returnerade informationStopReading slutar vänta pÃ¥ transfer_condition.StopReading börjar vänta pÃ¥ transfer_condition.StopReading: avbryter förbindelseStopWriting slutar vänta pÃ¥ transfer_condition.StopWriting börjar vänta pÃ¥ transfer_condition.StopWriting: Beräkna checksumma %sStopWriting: avbryter förbindelseStopWriting: letar efter för checksumma för %sAvslutade jobbprocesseringStoppar hjälpprocess: %sStoppar jobbprocesseringstrÃ¥dSparar port %i för %sLagrar temporär proxy pÃ¥ %sUnderlig sökväg i Rucio-URL: %sSträng framgÃ¥ngsrikt tolkad som %s.Sträng-token: %sSubjektattribut %s har ingen känd NID, hoppar överSubjekt börjar inte med '/'Subjekt-namn: %sSubjekt att matcha: %sSubjekt: %sInsändningsslutpunktSubmitterPlugin %s kunde inte skapasHittade inte SubmitterPlugin-plugin "%s".Sänder in jobb Lyckades lägga till Independent-OID, tagg %d returneradesLyckades lägga till RFC-proxy-OID, tagg %d returneradesLyckades lägga till VOMS-AC-sekvens-OID, tagg %d returneradesLyckades lägga till annyLanguage-OID, tagg %d returneradesLyckades lägga till inheritAll-OID, tagg %d returneradesLyckades med att autentisera SAML-tokenLyckades med att autentisera användarnamnstokenLyckades med att autentisera X509-tokenLyckades med att ändra lösenord pÃ¥ myproxy-serverLyckades ändra tillit till: %sLyckades konvertera PrivateKeyInfo till EVP_PKEYLyckades med att ta bort referens pÃ¥ myproxy-serverLyckades exportera PKCS12Lyckades generera publik/privat nyckelparLyckades med att hämta en proxy i %s frÃ¥n myproxy-server %sLyckades erhÃ¥lla referensLyckades med att hämta information frÃ¥n myproxy-serverLyckades importera certifikatLyckades importera privat nyckelLyckades med att initiera NSSLyckades ladda in PrivateKeyInfoLyckades skriva ut certifikat till %sLyckades skriva ut certifikatbegäran till %sLyckades med att lägga upp en proxy pÃ¥ myproxy-serverLyckades sända DelegationService: %s och DelegationID: %s info till peer-tjänstLyckades signera proxycertifikatetLyckades verifiera signaturen under Lyckades verifiera signaturen under Lyckades verifiera det signerade certifikatetDet tillhandahÃ¥llna användarnamnet %s matchar inte det mappade användarnamnet %sProfiler som stöds:Dessa begränsningar stöds: validityStart=tid (t.ex. 2008-05-29T10:20:30Z; om ej angivet, börjar giltighetstiden nu) validityEnd=tid validityPeriod=tid (t.ex. 43200 eller 12h eller 12H; om varken validityPeriod eller validityEnd är angivet, är förval 12 timmar för lokal proxy och 168 timmar för delegerad proxy pÃ¥ myproxy-server) vomsACvalidityPeriod=tid (t.ex. 43200 eller 12h eller 12H; om ej angivet, är förval minimum av 12 timmar och validityPeriod) myproxyvalidityPeriod=tid (livstid för proxyer som delegeras av myproxy-server, t.ex. 43200 eller 12h eller 12H; om ej angivet, är förval minimum av 12 timmar och validityPeriod (vilket är livstiden för den delegerade proxyn pÃ¥ myproxy-servern)) proxyPolicy=policy-text proxyPolicyFile=policy-fil keybits=nummer - längd för den genererade nyckeln. Förval är 2048 bitar. Särskilt värde 'inherit' betyder att det signerande certifikatets nyckellängd används. signingAlgorithm=namn - signeringsalgoritm att använda för att signera proxyns publika nyckel. Möjliga värden är sha1, sha2 (alias för sha256), sha224, sha256, sha384, sha512 och inherit (använd det signerande certifikatets algoritm). Förval är inherit. PÃ¥ gamla system, är endast sha1 möjligt. Informationspunker som stöds är: subject - proxycertifikatets subjektnamn. identity - proxycertifikatets identitets-subjektnamn. issuer - proxycertifikatets utfärdar-subjektnamn. ca - subjektnamn för CA som utfärdade det ursprungliga certifikatet. path - filsystem-sökväg till fil som innehÃ¥ller proxyn. type - typ av proxycertifikat. validityStart - klockslag dÃ¥ proxyns giltighetstid börjar. validityEnd - klockslag dÃ¥ proxyns giltighetstid slutar. validityPeriod - längd pÃ¥ proxyns giltighetstid i sekunder. validityLeft - kvarvarande längd pÃ¥ proxyns giltighetstid i sekunder. vomsVO - VO-namn representerat av VOMS-attribut vomsSubject - subjekt för certifikat för vilket VOMS-attribut utfärdats vomsIssuer - subjekt för tjänst som utfärdat VOMS-certifikat vomsACvalidityStart - klockslag dÃ¥ VOMS-attributets giltighetstid börjar. vomsACvalidityEnd - klockslag dÃ¥ VOMS-attributets giltighetstid slutar. vomsACvalidityPeriod - längd pÃ¥ VOMS-attributets giltighetstid i sekunder. vomsACvalidityLeft - kvarvarande längd pÃ¥ VOMS-attributets giltighetstid i sekunder. proxyPolicy keybits - proxycertifikatets nyckellängd i bitar. signingAlgorithm - algoritm som användes för att signera proxycertifikatet. Informationspunkterna skrivs i begärd ordning separerade av nyrad. Om en punkt har mer än ett värde skrivs dessa pÃ¥ samma rad separerade av |. Lösenordsdestinationer som stöds är: key - för att läsa privat nyckel myproxy - för att komma Ã¥t referens pÃ¥ myproxy-tjänst myproxynew - för att skapa referens pÃ¥ myproxy-tjänst all - för alla användningsomrÃ¥den. Lösenordskällor som stöds är: sträng mellan citattecken ("lösenord") - explicit angivet lösenord int - interaktiv begäran av lösenord frÃ¥n konsol stdin - läs lösenord frÃ¥n standard input avgränsat av nyrad file:filnamn - läs lösenord frÃ¥n fil med namn filnamn stream:# - läs lösenord frÃ¥n input stream nummer #. För närvarande stöds endast 0 (standard input).Stöder framtida reserveringStöder massinsändningStöder preemptionAntal suspenderade jobb: %iSuspenderar frÃ¥gandet av slutpunkt (%s) eftersom tjänsten pÃ¥ slutpunkten redan hÃ¥ller pÃ¥ att frÃ¥gas eller har frÃ¥gats.Att synkronisera den lokal listan med aktiva jobb med informationen i informationssystemet kan resultera i bristande överensstämmelse. Nyligen insända jobb kan ännu saknas i informationssystemet, medan jobb som nyligen schemalagts för borttagning fortfarande kan finnas kvar.Syntaxfel i 'notify'-attributvärde ('%s'), det innehÃ¥ller okända tillstÃ¥ndsflaggorSyntaxfel i 'notify'-attributvärde ('%s'), det mÃ¥ste innehÃ¥lla en e-postadressSyntaxfel i 'notify'-attributvärde ('%s'), det fÃ¥r endast innehÃ¥lla e-postadresser efter tillstÃ¥ndsflagg(a/or)Systeminställningsfil (%s eller %s) existerar inte.Systeminställningsfil (%s) innehÃ¥ller fel.Systeminställningsfil (%s) existerar inte.TCP-klientprocess anropadTCP-exekverare tas bortTLS tillhandahÃ¥ller ingen identitet, försöker med OTokens.TURL %s kan inte hanterasTarget %s matchar inte begär(t/da) gränssnitt.Target %s borttaget av FastestQueueBroker, rapporterar inte antal lediga slottarTarget %s borttaget av FastestQueueBroker, rapporterar inte totalt antal jobbTarget %s borttaget av FastestQueueBroker, rapporterar inte antal väntande jobbVal av target-slutpunktTeknologi: %sTemporärt tjänstefelTest misslyckades, inga fler möjliga targetTest insänt med jobb-id: %sTest definierades med ID %d, men nÃ¥got fel uppstod när det tolkades."FreeSlotsWithDuration"-attributet publicerat av "%s" är felformatterat. Ignorerar det.Flaggorna 'sort' och 'rsort' kan inte anges samtidigt.Utdata-BIO är NULLCA-certifikat-katalogen behövs för att kontakta VOMS- och myproxy-servrar.CA-utfärdaren (%s) för referenserna (%s) är inte betrodd av target (%s).Beräkningsslutpunkten tillkännager inte sin kvalitetsnivÃ¥.Beräkningsslutpunkten tillkännager inte sitt servicetillstÃ¥nd.Beräkningsslutpunkten har ingen URL.Beräkningstjänsten tillkännager inte ditt gränssnitt.Beräkningstjänsten tillkännager inte sin kvalitetsnivÃ¥.Myproxy-perioden du angivit: %s kan inte tolkas.NSS-databasen kan inte upptäckas i Firefox-profilenSvaret kommer inte till denna ändeTjänsten tillkännaget inte sitt hälsotillstÃ¥nd.Tjänsten tillkännager inte sin typ.Statuskoden är SuccessVOMS-AC-perioden du angivit: %s kan inte tolkas.VOMS-servern med informationen: %s kan inte nÃ¥s, säkerställ att den är tillgänglig.arccat-kommandot utför cat-kommandot pÃ¥ jobbets stdout, stderr eller gridmanager-fellogg.arcclean-kommandot tar bort ett jobb frÃ¥n en beräkningresurs.arccp-kommandot kopierar filer till, frÃ¥n och mellan gridlagringsresurser.arcget-kommandot används för att hämta resultatet av ett jobb.arcinfo-kommandot används för att erhÃ¥lla statusen pÃ¥ beräkningsresurser pÃ¥ griden.arckill-kommandot används för att avbryta körande jobb.arcls-kommandot används för att lista filer pÃ¥ gridlagringsresurser och i filindexkataloger.arcmkdir-kommandot skapar kataloger pÃ¥ gridlagringsresurser och kataloger.arcproxy-kommandot skapar en proxy frÃ¥n ett nyckel/certifikat-par som sedan kan användas för att komma Ã¥t gridresurser.arcrename-kommandot byter namn pÃ¥ filer pÃ¥ gridlagringsresurser.arcrm-kommandot tar bort filer pÃ¥ gridlagringsresurser.arcstat-kommandot används för att erhÃ¥lla statusen pÃ¥ jobb som sänts in till gridresurser.arcsub-kommandot används för att sända in jobb till beräkningsresurser pÃ¥ griden.arcsync-kommandot synkroniserar din lokala jobblista med information frÃ¥n de angivna beräkningsresurserna eller registerservrarna.arctest-kommandot används för att testa kluster som resurser.Giltighetstiden för den tillgängliga CRLen har gÃ¥tt utDen tillgängliga CRLen är inte giltig änbrokerarguments-attributet kan endast användas i kombination med brokername-attributetCertifikatet med subjekt %s är inte giltigtXRSL-attributet cluster stöds för närvarande inte.Referensen som skall signeras innehÃ¥ller ingen begäranReferensen som skall signeras är NULLReferensens privata nyckel har redan initieratsDen förvalda inställningsfilen (%s) är inte en vanlig fil.Delegerade referensen erhÃ¥llen frÃ¥n delegeringstjänsten lagras till sökväg: %sDelegerade referensen erhÃ¥llen frÃ¥n sökväg: %sTargets (%s) driftstopp har inte publicerats. BehÃ¥ller target.Sluttiden som du angivit: %s kan inte tolkas.Sluttiden du angivit: %s är före starttiden: %s.Slutpunkten (%s) stöds inte av denna plugin (%s)Delegeringstjänstens slutpunkt ska ställas inFilen %s är för tillfället lÃ¥st med ett giltigt lÃ¥sFörsta gränssnittet som stöds an pluginen %s är en tom sträng, hoppar över pluginen.Följande jobb sändes inte in:Gränssnittet för denna slutpunkt (%s) är ej angivet, kommer att prova alla möjliga pluginerJobbeskrivningen kan ocksÃ¥ vara en fil eller en sträng i ADL- eller XRSL-format.Begränsningen av antalet bitar i nyckeln är felaktig: %s.Namnen pÃ¥ den privata nyckeln som ska tas bort är tomtDe gamla GSI-proxyerna stöds inte längre. Använd inte alternativ -O/--old.Nyttolasten i inkommande meddelande är tomNyttolasten i utgÃ¥ende meddelande är tom,Längden som du angivit: %s kan inte tolkas.Pluginen %s stöder inte nÃ¥got gränssnitt, hoppar över den.Policyfilen angiven för simplelist.pdp existerar inte, kontrollera location-attributet i simplelist-PDP-noden i tjänsteinställningarnaPolicy-sprÃ¥ket: %s stöds intePrivata nyckeln för signering har inte initieratsProcessen som äger lÃ¥set pÃ¥ %s kör inte längre, kommer att ta bort lÃ¥sBegäran har passerat policyutvärderingenSigneringsalgoritmen %s är ej tillÃ¥ten, den skall vara SHA1 eller SHA2 för att signera certifikatbegärningarDet angivna Globus-attributet (%s) stöds inte. %s ignoreras.Starttiden som du angivit: %s kan inte tolkas.Början, slut och längd kan inte användas samtidigtSubjekt matchar inte utfärdarnamn + proxy-CN-postVärdet pÃ¥ XRSL-attributet acl är inte giltig XML.Värdet pÃ¥ ftpthreads-attributet mÃ¥ste vara ett nummer frÃ¥n 1 till 10Värdet pÃ¥ keysize-attributet i inställningsfilen (%s) tolkades endast delvisVärdet pÃ¥ timeout-attributet i inställningsfilen (%s) tolkades endast delvisDet finns %d NSS-baskataloger där certifikat, nycklar och moduldatabaser finnsDet finns %d RequestItemDet finns %d begärningar som uppfyller Ã¥tminstone en policyDet finns %d servrar med samma namn: %s i din vomses-fil, men ingen av dem kan nÃ¥s eller returnera ett giltigt meddelande.Det finns %d användarcertifikat i NSS-databasenDet finns inga slutpunkter i registret som matchar den begärda informationsslutpunktstypen.Det finns inga slutpunkter i registret som matchar den begärda insändningsslutpunktstypenDet finns %d subjekt som uppfyller Ã¥tminstone en policyDet finns inget delegerat X509-token i svaretDet finns inget delegerat Format-token i svaretDet finns ingen Format-begäran i svaretDet finns inget Id- eller X509-begäran-värde i svaretDet finns inget Id- eller X509-token-värde i svaretDet finns ingen SOAP-förbindelse-kedja i inställningarnaDet finns inget SOAP-svarDet finns inget UpdateCredentialsResponse in svaretDet finns ingen X509-begäran i svaretDet finns inget certifikat med namn %s, certifikatet kan tas bort när CSR genererasDet finns inget digest i utfärdarens privata-nyckel-objektDet finns inget lokalt LRMS-ID. Meddelande kommer inte att skrivas till BLAH-logg.Det finns inget svarDet uppstod ett problem under destinationens efter-överförings-hantering efter felet: %sDet uppstod ett problem under källans efter-överförings-hantering: %sDet fanns inget HTTP-svarDet fanns inget SOAP-svarTredjepartsöverföring stöds inte för dessa slutpunkterTredjepartsöverföring begärdes men motsvarande plugin kunde inte laddas in. Är GFAL-plugin installerad? Om inte, installera paketen 'nordugrid-arc-plugins-gfal' och 'gfal2-all'. Beroende pÃ¥ din typ av installation kan paketnamnen variera.Detta INFO-meddelande borde ocksÃ¥ sesDetta INFO-meddelande borde sesDetta VERBOSE-meddelande borde inte sesDetta VERBOSE-meddelande borde nu sesDenna instans har redan tagits bortDetta meddelande gÃ¥r till ursprungliga destinationenDetta meddelande gÃ¥r till per-trÃ¥d-destinationenDenna process äger redan lÃ¥set pÃ¥ %sDetta verkar vara ett tillfälligt fel, försök igen senareDetta lilla verktyg kan användas för att testa JobDescription-klassens konverteringsmöjligheter.TrÃ¥d avslutades med Glib-fel: %sTrÃ¥d avslutades med generellt undantag: %sKvarvarande tid för AC: %sKvarvarande tid för AC: ACs giltighetstid har gÃ¥tt utKvarvarande tid för AC: AC är inte giltig änKvarvarande tid för proxy: %sKvarvarande tid för proxy: Proxyns giltighetstid har gÃ¥tt utKvarvarande tid för proxy: Proxyn är inte giltig änAvbröts pÃ¥ grund av timeout under väntan pÃ¥ cachelÃ¥sTimeout vid uppkoppling till %s(%s):%i - %i sTimeout har passerat, kommer att ta bort lÃ¥sfil %sTimeout vid väntan pÃ¥ Globus callback - läcker förbindelseTimeout vid väntan pÃ¥ mkdirFör att Ã¥terställa saknade jobb, kör arcsyncFör mÃ¥nga argument i inställningarFör mÃ¥nga förbindelse - tappar en nyFör mÃ¥nga förbindelse - ställer ny i köFör mÃ¥nga fel för att erhÃ¥lla checksumma - ger upFör mÃ¥nga filer i en begäran - försök igen med färre filerVerktyg för att skriva grami-filrepresentationen av en jobbeskrivningsfil.Totalt antal jobb: %iTotalt antal logiska CPUer: %iTotalt antal hittade jobb: Totalt antal hittade nya jobb: Totalt antal fysiska CPUer: %iTotalt antal slottar: %iÖverföring MISSLYCKADES: %sÖverföring avbröts framgÃ¥ngsriktÖverföring slutfördÖverföring misslyckadesÖverföring misslyckades: %sÖverföring avslutad: %llu byteer överförda %sÖverföring frÃ¥n %s till %sÖverföring avbröts efter %i sekunder utan kommunikationÖverföring lyckadesÖverföring avbröts pÃ¥ grund av timeoutBetrodda CA:Provar alla tillgängliga gränssnittFörsöker med nästa replikaFörsöker kontrollera X509-certifikat med check_cert_typeFörsöker koppla upp %s(%s):%dFörsöker lyssna pÃ¥ %s:%s(%s)Försöker lyssna pÃ¥ TCP-port %s(%s)Försöker starta suspenderad slutpunkt (%s)Försöker sända in direkt till slutpunkt (%s)Försöker sända in till slutpunkt (%s) med gränssnitt (%s) med plugin (%s).TvÃ¥ indatafiler har identiska namn '%s'.Typ är katalog, anropar srmRmDirTyp är fil, anropar srmRmTyp: %sTyper av beräkningstjänster som %s kan sända in jobb till:Typer av lokala informationstjänster som %s kan samla in information frÃ¥n:Typer av lokala informationstjänster som %s kan samla in jobbinformation frÃ¥n:Typer av registertjänster som %s kan samla in information frÃ¥n:Typer av tjänster som %s kan hantera jobb pÃ¥:URLURL %s överensstämmer inte med sparad SRM-info, provar ny infoURL är mappad till lokal Ã¥tkomst - kontrollerar Ã¥tkomsträttigheter pÃ¥ ursprunglig URLURL mappas till: %sURL är inte giltig: %sURL-alternativ %s har inte formatet namn=värdeURL-protokollet är inte urllist: %sURL: %sOauktoriserad av xacml.pdpKunde inte anpassa jobbeskrivningen till nÃ¥gon resurs, ingen resursinformation kunde erhÃ¥llas.Kan inte lägga till händelse: kan inte hitta AAR för jobb %s i bokföringsdatabasen.Kunde inte kopiera exempelinställningar frÃ¥n existerande inställningar (%s)Kunde inte skapa %s katalog.Kunde inte skapa databas (%s)Misslyckades med att skapa katalog för att lagra resultat (%s) - %sKunde inte skapa index för jobs-tabell i databas (%s)Kunde inte skapa jobs-tabell i databas (%s)Kunde inte skapa jobs_new-tabell i databas (%s)Kunde inte detektera format för jobbpost.Kunde inte detektera om utfärdarcertifikat är installerat.Kunde inte bestämma certifikatinformationKunde inte bestämma fel (%d)Kunde inte ladda ner jobb (%s), ingen JobControllerPlugin-plugin har satts att hantera jobbet.Kunde inte ta bort jobs-tabell i databas (%s)Kunde inte hitta filstorlek för %sKan inte hantera %sKunde inte hantera jobb (%s), inget gränssnitt angivet.Kunde inte hantera jobb (%s), ingen plugin associerad med det angivna gränssnittet (%s)Kunde inte initiera förbindelse till destination: %sKunde inte initiera förbindelse till källa %sMisslyckades med att initiera hanterare för %sKunde inte lista filer pÃ¥ %sKunde inte ladda in ARC-inställningsfil.Kunde inte ladda in BrokerPlugin (%s)Kunde inte ladda in plugin (%s) för gränssnitt (%s) vid försök att sända in jobbeskrivning.Kunde inte hitta "%s"-pluginen. Referera till installationsinstruktionerna och kontrollera om paketet som tillhandahÃ¥ller stöd för "%s"-pluginen är installeratKan inte matcha target, markerar det som inte matchande. Mäklare inte giltig.Kan inte öppna jobblistfil (%s), okänt formatKunde inte tolka jobbeskrivningsinput: %sKunde inte tolka den angivna debugnivÃ¥n (%s) till en av de tillÃ¥tna nivÃ¥ernaKan inte tolka.Kunde inte förbereda jobbeskrivningen enligt target-resursens behov (%s).Kan inte förbereda jobbeskrivning enligt target-resursens behov.Misslyckades med att läsa jobbinformation frÃ¥n fil (%s)Kan inte registrera jobbinsändning. Kan inte fÃ¥ JobDescription-objekt frÃ¥n mäklare, Mäklare är ogiltig.Kunde inte byta namn pÃ¥ jobs-tabell i databas (%s)Kunde inte hämta lista med jobbfiler att ladda ned för jobb %sKunde inte hämta lista med loggfiler att ladda ned för jobb %sKan inte välja middlewareKan inte välja operativsystem.Kan inte välja runtime-miljöKan inte sortera ExecutionTarget-objekt - Ogiltigt Broker-objekt.Kan inte sortera tillagda jobb. BrokerPlugin-pluginen har inte laddats in.Kunde inte sända in jobb. Misslyckades med att tilldela delegering till jobbeskrivning.Kunde inte sända in jobb. Jobbeskrivning inte giltig i %s-formatet: %sKunde inte sända in jobb. Misslyckades med att delegera X.509-referenser.Kunde inte sända in jobb. Misslyckades med att delegera token.Kunde inte överföra frÃ¥n jobs till jobs_new i databas (%s)Kunde inte trunkera jobbdatabas (%s)Kunde inte skriva 'output'-fil: %sKunde inte skriva grami-fil: %sKunde inte skriva poster till jobbdatabas (%s): Id "%s"Kunde inte skriva till p12-filOauktoriseradOauktoriserad av fjärr-pdp-tjänstOväntad RSL-typOväntat argument för 'all'-regel - %sOväntade argumentOväntat argument tillhandahÃ¥lletOväntad delegeringsplats frÃ¥n delegeringstjänst - %s.Oväntat omedelbart slutförande: %sOväntat namn returnerat i Rucio-svar: %sOväntad sökväg %s returnerad frÃ¥n serverOväntad svarskod frÃ¥n delegeringstjänst - %uOväntad svarskod frÃ¥n delegeringstjänst: %u, %s.Uniq lägger till tjänst som kommer frÃ¥n %sUniq ignorerar tjänst som kommer frÃ¥n %sUniq byter ut tjänst som kommer frÃ¥n %s mot tjänst som kommer frÃ¥n %sOkänt LDAP-scope %s - använder baseOkänt XRSL-attribut: %s - ignorerar det.Okänt attribut %s i common-sektionen i inställningsfilen (%s), ignorerar detOkänd kanal %s för stdio-protokollOkänd referenstyp %s för URL-mönster %sOkänt element i Globus signeringspolicyOkänt felOkänd nyckel- eller hashtypOkänd nyckel- eller hashtyp för utfärdareOkänd logg-nivÃ¥ %sOkänt alternativ %sOkända rättigheter i Globus signeringspolicy - %sOkänd sektion %s, ignorerar denOkänt överföringsalternativ: %sOkänd användarnamnsmappningsregel %sAvregistrerar %sAvregistrering frÃ¥n indextjänst misslyckadesAngiven URL stöds inteAngiven URL stöds inte: %sDestinations-URL stöds inte: %sInformationsänpunktstyp stöds inte: %sJobblisttyp '%s' stöds inte, använder 'SQLITE'. Typer som stöds är: SQLITE, XML.Mappningspolicyhandling stöds inte: %sMappningspolicyalternativ stöds inte: %sProtokoll i url stöds inte %sDet begärda proxypolicysprÃ¥ken stöds inte - %sDen begärda proxyversionen stöds inte - %sKäll-URL stöds inte: %sInsändningsslutpunktstyp stöds inte: %sInsändningsgränssnitt som inte stöds %s. Det ser ut som om arc-blahp-logger mÃ¥ste uppdateras. Sänd in buggen till bugzilla.Icke betrott självsignerat certifikat i kedja med subjekt %s och hash: %luUpdateCredentials misslyckadesUpdateCredentials: EPR innehÃ¥ller inget jobb-idUpdateCredentials: misslyckades att uppdatera referenserUpdateCredentials: saknar ReferenceUpdateCredentials: hittade inga jobb: %sUpdateCredentials: begäran = %sUpdateCredentials: svar = %sUpdateCredentials: fel antal ReferenceUpdateCredentials: fel antal element inuti ReferenceAnvändning:Användning: kopiera källa destinationAnvänd alternativet --help för detaljerad användningsinformationAnvänd -? för att fÃ¥ användningsbeskrivningAnvänd inställningsfil %sAnvända slottar: %iAnvändarinställningsfil (%s) innehÃ¥ller fel.Användarinställningsfil (%s) existerar inte eller kunde inte laddas in.Användare för hjälpprogram saknasDirekt användarnamnsmappning saknar användarnamn: %s.Användarnamnsmappningskommando är tomtAnvändarnamnsmappning har tom auktoriseringsgrupp. %sAnvändarnamn ska angesAnvändarpool pÃ¥ %s kan inte öppnas.Användarpool pÃ¥ %s misslyckades med att utföra användarmappning.Användarpoolmappning saknar användarsubjekt.Användarsubjektmatchning saknar användarsubjekt.UserConfig-klass är inte ett objektAnvändarinställningar sparade till fil (%s)Användarnamnstokenhanteraren har ej ställts inAnvänder A-REX-inställningsfil: %sAnvänder CA-certifikatkatalog: %sAnvänder förvald sökväg för CAAnvänder CA-katalog: %sAnvänder CA-fil: %sAnvänder DH-parametrar frÃ¥n fil: %sAnvänder OTokenAnvänder Rucio-konto %sAnvänder buffrad överföringsmetodAnvänder cache %sAnvänder cachat lokalt konto '%s'Använder certifikat %sAnvänder certifikatfil: %sAnvänder checksumma %sAnvänder chifferlista: %sAnvänder chiffer: %sAnvänder inställningar pÃ¥ %sAnvänder kurva med NID: %uAnvänder osäker dataöverföringAnvänder intern överföringsmetod %sAnvänder nyckel %sAnvänder nyckelfil: %sAnvänder lokalt konto '%s'Använder nästa %s-replikaAnvänder protokollalternativ: 0x%xAnvänder proxy %sAnvänder proxyfil: %sAnvänder säker dataöverföringAnvänder sessionskatalog %sAnvänder spacetoken: %sAnvänder spacetokenbeskrivning: %sVO %s matchar inte %sVOMS-AC-attribut är en taggVOMS-AC-attribut är FQANVOMS attr %s matchar inte %sVOMS attr %s matchar %sVOMS-attribut ignoreras pÃ¥ grund av processerings-/valideringsfelTolkning av VOMS-attribut misslyckadesVOMS-attributvalidering misslyckadesVOMS: giltighetstiden för AC har gÃ¥tt utVOMS: AC är inte komplett - saknar Serial- eller Issuer-informationVOMS: AC är inte giltig änVOMS: AC-signaturverifiering misslyckadesVOMS: CA-katalog eller CA-fil mÃ¥ste tillhandahÃ¥llas eller förvalt alternativ aktiveratVOMS: Kan inte allokera minne för att tolka ACVOMS: Kan inte allokera minne för att att lagra ordningen för ACVOMS: kan inte hitta AC_ATTR med IETFATTR-typVOMS: Kan inte tolka ACVOMS: kan inte hitta AC-utfärdarens certifikat för VO %sVOMS: innehavarans DN i AC: %sVOMS: innehavarans DN: %sVOMS: utfärdarens DN: %sVOMS: FQDN för denna värd %s matchar inte nÃ¥got target i ACVOMS: lsc-filen %s kan inte öppnasVOMS: lsc-filen %s existerar inteVOMS: authorityKey är felaktigVOMS: bÃ¥de idcenoRevAvail och authorityKeyIdentifier certifikattilläggen mÃ¥ste vara närvarandeVOMS: kan inte verifiera ACs signaturVOMS: kunde inte validera AC-utfärdare för VO %sVOMS: mer än ett IETFATTR-attribut stöds inteVOMS: mer än en policyAuthority stöds inteVOMS: skapa FQAN: %sVOMS: skapa attribut: %sVOMS: katalog för betrodda tjänstecertifikat: %sVOMS: misslyckades med att komma Ã¥t IETFATTR-attributVOMS: misslyckades med att tolka attribut frÃ¥n ACVOMS: misslyckades med att verifiera AC-signaturVOMS: saknade AC-delarVOMS: problem vid tolkning ac information i ACVOMS: DN i certifikat: %s matchar inte det i betrodda DN-listan: %sVOMS: utfärdaridentiteten i certifikat: %s matchar inte den i betrodda DN-listan: %sVOMS: attributnamnet är tomtVOMS: attributkvalifieraren är tomVOMS: attributvärdet för %s är tomtVOMS: formatet för IETFATTRVAL stöds inte - förväntar OCTET STRINGVOMS: formatet för policyAuthority stöd inte - förväntar URIVOMS: grantor-attributet är tomtVOMS: innehavarinformationen is AC är felaktigVOMS: innehavarens utfärdarnamn är inte detsamma som det i ACVOMS: innehavarens utfärdar-UID är inte detsamma som det i ACVOMS: innehavarens namn i AC är inte relaterat till DN i innehavarens certifikatVOMS: innehavarens serienummer %lx är inte detsamma som serienumret i AC %lx, innehavarens certifikat som används för att skapa en vomsproxy kan vara ett proxycertifikat med ett annat serienummer än det ursprungliga EEC-certifikatetVOMS: innehavarens serienummer är: %lxVOMS: utfärdarinformationen i AC är felaktigVOMS: utfärdarnamnet %s är inte det samma som det i AC - %sVOMS: det enda kritiska tillägget till AC som stöds är idceTargetsVOMS: serienumret i AC är: %lxVOMS: serienumret i AC INFO är för lÃ¥ngt - förväntade inte mer än 20 bytesVOMS: det finns inga villkor pÃ¥ betrodda voms-DN, certifikatstacken i AC kommer inte att kontrolleras.VOMS: tillitskedja att kontrollera: %s VOMS: Kunde inte bestämma värdnamn i AC frÃ¥n VO-namn: %sVOMS: kunde inte extrahera VO-namn frÃ¥n ACVOMS: kunde inte matcha certifikatkedja mot VOMS betrodda DNVOMS: kunde inte verifiera certifikatkedjaVOMS: tidsformat i AC som inte stöds - förväntade GENERALIZED TIMEGiltig i: %sGiltig i: Proxyns giltighetstid har gÃ¥tt utGiltig i: Proxyn är ej giltigGiltigt till: %sVärdet pÃ¥ 'count'-attributet mÃ¥ste vara ett heltalVärdet pÃ¥ 'countpernode'-attributet mÃ¥ste vara ett heltalVärdet pÃ¥ 'exclusiveexecution'-attributet mÃ¥ste vara endera 'yes' eller 'no'Värdet av attributet '%s' förväntades inte vara tomtVärdet pÃ¥ attributet '%s' förväntas vara en strängVärdet pÃ¥ attributet '%s' förväntas vara ett enstaka värdeVärdet pÃ¥ attributet '%s' har fel sekvenslängd: förväntad %d, hittad %dVärdet pÃ¥ attributet '%s' är inte en strängVärdet pÃ¥ attributet '%s' är inte en sekvensVariabelnamn (%s) innehÃ¥ller ogiltigt tecken (%s)Variabelnamn förväntadesVersion i Listen-element kan ej kännas igenVarning: Sluttiden du angivit: %s är före nuvarande tidpunkt: %sVarning: Starttiden du angivit: %s är före nuvarande tidpunkt: %sVäntan slutarVäntar pÃ¥ bufferVäntar pÃ¥ att globus-handtag ska lugna ned sigVäntar pÃ¥ lÃ¥s pÃ¥ fil %sVäntat pÃ¥ lÃ¥s pÃ¥ jobblistfil %sVäntar pÃ¥ att huvud-jobbprocesseringstrÃ¥den avslutasVäntar pÃ¥ svarAntal väntade jobb: %iVaknar uppVarning: Misslyckades med att lista filer men viss information har erhÃ¥llitsVarning: Misslyckades med att ta bort jobb frÃ¥n fil (%s)Varning: Misslyckades med att skriva jobbinformation till fil (%s)Varning: Misslyckades med att skriva lokal jobblista till fil (%s), jobblista har raderatsVarning: Jobb finns inte i jobblista: %sVarning: NÃ¥gra jobb togs inte bort frÃ¥n servernVarning: Kunde inte skapa jobblistfil (%s), jobblista har raderatsVarning: Kan inte öppna jobblistfil (%s), okänt formatVarning: Kunde inte läsa lokal jobblista frÃ¥n fil (%s)Varning: Kunde inte trunkera lokal jobblista i fil (%s)Varning: Använder SRM-protokoll v1 som inte stöder spacetokenFörväntade %s i början av "%s"Vakthund (Ã¥ter)startar programmetVakthund upptäckte att programmet avslutadesVakthund upptäckte att programmet avslutades pÃ¥ grund av signal %uVakthund upptäckte att programmet avslutades med kod %uVakthund upptäckte program-timeout eller -fel - avbryter processVakthund avslutades eftersom programmet avbröts avsiktligt eller avslutade sig självtVakthund misslyckades med att avbryta programmet - ger upp och avslutarVakthund misslyckades med att vänta pÃ¥ programmets avslutande - sänder KILLVakthunds-fork misslyckades: %sVakthund startar monitoreringVi stöder endast CA i Globus signeringspolicy - %s stöds inteVi stöder endast X509-CA i Globus signeringspolicy - %s stöds inteVi stöder endast globusvillkor i Globus signeringspolicy - %s stöds inteVi stöder endast subjektvillkor i Globus signeringspolicy - %s stöds inteNär 'countpernode'-attributet anges, mÃ¥ste 'count'-attributet ocksÃ¥ angesKommer att %s i destinationsindextjänstenKommer att beräkna %s-checksummaKommer att rensa upp förregistrerad destinationKommer att ladda ned till cachefil %sKommer ej att mappa 'root'-konto som förvalKommer att processera cacheKommer att frigöra cachelÃ¥sKommer att ta bort %s pÃ¥ tjänsten %s.Kommer att försöka igen utan cachning<Kommer att använda massbegäranKommer att vänta 10 sKommer att att vänta omkring %i sRaderar och Ã¥terskapar hela lagretArbetsutrymme fri storlek: %i GBArbetsutrymme delas inte mellan jobbArbetsutrymme delas mellan jobbArbetsutrymme livstid: %sArbetsutrymme total storlek: %i GBSkriver informationen till BLAH-tolk-loggen: %sFel katalog i %sFelaktigt format för "FreeSlotsWithDuration" = "%s" ("%s")Felaktigt sprÃ¥k begärt: %sFelaktigt nummer i defaultttl-kommandoFelaktigt nummer i maxjobdesc-kommandoFelaktigt nummer i maxjobs: %sFelaktigt nummer i maxrerun-kommandoFelaktigt nummer i urdelivery_frequency: %sFelaktigt nummer i wakeupperiod: %sFel antal argument angivnaFel antal argument!Fel antal objekt (%i) för stat frÃ¥n ftp: %sFel antal parametrar angivnaFelaktigt alternativ i %sFelaktigt alternativ i delegationdbFelaktigt alternativ i fixdirectoriesFel ägare för certifikatfil: %sFel ägare för nyckelfil: %sFel ägare för proxyfil: %sFel Ã¥tkomsträttigheter för certifikatfil: %sFel Ã¥tkomsträttigheter för nyckelfil: %sFel Ã¥tkomsträttigheter för proxyfil: %sHittade fel tjänste-post-fält "%s" i "%s"Skrev begäran till en filSkrev det signerade EEC-certifikatet till en filSkrev det signerade proxycertifikatet till en filX509-tokenhanteraren har ej ställts inXACML-begäran: %sXML-inställningsfil %s existerar inteDu är pÃ¥ väg att ta bort jobb frÃ¥n jobblistan för vilka ingen information kunde hittas. Notera att nyligen insända jobb kan saknas i informationssystemet och att denna handling kommer att ta bort ocksÃ¥ sÃ¥dana jobb.Du kan försöka att öka debugnivÃ¥n för att fÃ¥ mer information.Din identitet: %sDin utfärdares certifikat är inte installeratDin proxy är giltig till: %s[ADLParser] %s-element mÃ¥ste vara boolesk.[ADLParser] AccessControl är inte giltig XML.[ADLParser] Benchmark stöds inte än.[ADLParser] Kod i FailIfExitCodeNotEqualTo i %s är inte ett giltigt nummer.[ADLParser] CreationFlag-värde %s stöds inte.[ADLParser] CredentialService mÃ¥ste innehÃ¥lle en giltig URL.[ADLParser] Saknat Name-element eller -värde i ParallelEnvironment/Option-element.[ADLParser] Saknat eller tomt Name i InputFile.[ADLParser] Saknat eller tomt Name i OutputFile.[ADLParser] Saknat eller felaktigt värde i DiskSpaceRequirement.[ADLParser] Saknat eller felaktigt värde i IndividualCPUTime.[ADLParser] Saknat eller felaktigt värde i IndividualPhysicalMemory.[ADLParser] Saknat eller felaktigt värde i IndividualVirtualMemory.[ADLParser] Saknat eller felaktigt värde i NumberOfSlots.[ADLParser] Saknat eller felaktigt värde i ProcessesPerSlot.[ADLParser] Saknat eller felaktigt värde i SlotsPerHost.[ADLParser] Felaktigt eller saknat värde i ThreadsPerProcess.[ADLParser] Saknat eller felaktigt värde i TotalCPUTime.[ADLParser] Saknat eller felaktigt värde i WallTime.[ADLParser] NetworkInfo stöds inte än.[ADLParser] NodeAccess-värde %s stöds inte än.[ADLParser] Endast email-protokoll för avisering stöds än.[ADLParser] Utelämnande av %s-element stöds inte än.[ADLParser] Rot-element är inte ActivityDescription [ADLParser] NumberOfSlots-elementet ska anges när värdet pÃ¥ useNumberOfSlots-attributet i SlotsPerHost-element är "true".[ADLParser] EMI-ES-tillstÃ¥nd stöds inte %s.[ADLParser] URL %s för RemoteLogging stöds inte.[ADLParser] Internt tillstÃ¥nd %s stöds inte.[ADLParser] Felaktig URI angiven i Source - %s.[ADLParser] Felaktig URI angiven i target - %s.[ADLParser] Felaktig tid %s i ExpirationTime.[ADLParser] prioritet är för stor - använder maxvärdet 100[filnamn ...][jobb ...][jobbeskrivning ...][jobbeskrivningsinput][resurs ...]en fil som innehÃ¥ller en lista med jobb-idadd_word misslyckadesadvertisedvo-parametern är tomalla för uppmärksamhetalla jobbtillÃ¥t TLS-förbindelse som ej kunde verifierasarc.confmäklarebuffer: fel : %s, lÃ¥s: %s, skriv: %sbuffer: läs EOF : %sbuffer: skriv EOF: %scachefil: %savbrötsberäkningsresursceID-prefix är satt till %skontrollera objektets läsbarhet, visar ingen information om objektetcheck_ftp: misslyckades med att erhÃ¥lla filens ändringstidcheck_ftp: misslyckades med att erhÃ¥lla filens storlekcheck_ftp: globus_ftp_client_get misslyckadescheck_ftp: globus_ftp_client_modification_time misslyckadescheck_ftp: globus_ftp_client_register_readcheck_ftp: globus_ftp_client_size misslyckadescheck_ftp: erhÃ¥llen ändringstid: %scheck_ftp: erhÃ¥llen storlek: %llicheck_ftp: timeout vid väntan pÃ¥ ändringstidcheck_ftp: timeout vid väntan pÃ¥ partiell getcheck_ftp: timeout vid väntan pÃ¥ storlekklassnamn: %sstängning misslyckades: %sstängning av fil %s misslyckades: %skommando till myproxy-server. Kommandot kan vara PUT, GET, INFO, NEWPASS eller DESTROY. PUT -- lägg upp en delegerad referens pÃ¥ myproxy-servern; GET -- hämta en delegerad referens frÃ¥n myproxy-servern; INFO -- hämta och presentera information om referenser lagrade pÃ¥ myproxy-servern; NEWPASS -- ändra lösenord som skyddar referenser lagrade pÃ¥ myproxy-servern; DESTROY -- ta bort referenser lagrade pÃ¥ myproxy-servern; Lokala referenser (certifikat och nyckel) är inte nödvändiga utom vid PUT. Myproxy-funktionalitet kan användas tillsammans med VOMS-funktionalitet. --voms och --vomses kan användas med GET-kommandot om VOMS-attribut mÃ¥ste inkluderas i proxyn. beräkningberäkningsresurs-värdnamn eller en fullständig slutpunkts-URLinställningsfil (förval ~/.arc/client.conf)d2i_X509_REQ_bio misslyckadesdata-chunk: %llu %lludebugnivÃ¥definiera det begärda formatet (nordugrid:xrsl, emies:adl)delete_ftp: globus_ftp_client_delete misslyckadesdelete_ftp: globus_ftp_client_rmdir misslyckadesdelete_ftp: timeout vid väntan pÃ¥ borttagandedestinationsdestination.next_locationkatalogkatalogkatalognamnvisa all tillgänglig metadatavisa mer information om varje jobbdnfrÃ¥ga inte efter bekräftelsesamla inte in information, konvertera endast lagringsformatutför ingen autentisering för öppnade förbindelserutför ingen delegering för insända jobbskriv inte ut jobblistaskriv inte ut antal jobb i varje tillstÃ¥ndsänd inte in - skriv ut jobbeskrivning i ett sprÃ¥k som accepteras av targetgör inte överföringen, men registrera källan i destinationen. destinationen mÃ¥ste vara en meta-urlförsök inte tvinga fram passiv överföringfrÃ¥ga inte efter ett referens-lösenord när en referens hämtas frÃ¥n en myproxy-server. En förutsättning för detta val är att referensen har satts pÃ¥ myproxy-servern utan lösenord genom att använda alternativet -R (--retrievable_by_cert). Detta alternativ är specifikt för GET-kommandot när en myproxy-server kontaktas.ladda nernedladdningskatalog (jobbkatalogen kommer att skapas i denna katalog)nedladdningartappasecho: Oauktoriseradtom indatanyttolasttomt nästa kedjeelementsträngens slut pÃ¥träffades medan typ för subjekt-namn-element #%d processeradesfel vid konvertering av nummer frÃ¥n bin till BIGNUMfel vid konvertering av serienummer till ASN.1-formatescape-tecken vid strängens slutavslutmisslyckades med att läsa data-chunkmisslyckades med att läsa data-taggfilfil %s kan inte kommas Ã¥tfilnamnfilnamn för lÃ¥ngtfilnamnsökvägavslutadetvÃ¥ngsnedladdning (skriv över existerande jobbkatalog)framtvinga överskrivning av existerande destinationframtvinga användandet av CA-certifikat-inställningar för grid-tjänster (vanligtvis IGTF)framtvinga användandet av CA-certifikat-inställningar för grid-tjänster (vanligtvis IGTF) och en tillhandahÃ¥llen av OpenSSLframtvinga användandet av CA-certifikat-inställningar tillhandahÃ¥llna av OpenSSLframtvinga användandet av bÃ¥de CA-certifikat-inställningar för grid-tjänster (vanligtvis IGTF) och de som tillhandahÃ¥lls av OpenSSLforcedefaultvoms-parametern är tomformatfrÃ¥n följande slutpunkter:fsync för fil %s misslyckades: %sftp_check_callbackftp_complete_callback: fel: %sftp_complete_callback: OKftp_get_complete_callback: Misslyckades med att hämta ftp-filftp_get_complete_callback: OKftp_put_complete_callback: OKftp_read_callback: Globusfel: %sftp_read_callback: fördröjd data-chunk: %llu %lluftp_read_callback: misslyckande: %sftp_read_callback: OKftp_read_callback: lyckades - offset=%u, längd=%u, eof=%u, tillÃ¥t oof=%uftp_read_callback: för mÃ¥nga oväntade chunks i oordningftp_read_callback: oväntad data i oordning: %llu != %lluftp_read_thread: Globusfel: %sftp_read_thread: data-callback misslyckades - avbryter: %sftp_read_thread: avslutarftp_read_thread: misslyckades med att registrera globusbuffer - kommer att prova senare: %sftp_read_thread: misslyckades med att registrera buffrarftp_read_thread: misslyckades med att frigöra buffrarftp_read_thread: misslyckades med att frigöra buffrar - läckerftp_read_thread: for_read misslyckades - avbryter: %sftp_read_thread: erhÃ¥ll och registrera buffrarftp_read_thread: för mÃ¥nga registreringsfel - avbryter: %sftp_read_thread: väntar pÃ¥ att buffrar ska frigörasftp_read_thread: väntar pÃ¥ filslutftp_write_callback: misslyckande: %sftp_write_callback: OK %sftp_write_thread: Globusfel: %sftp_write_thread: data-callback misslyckades - avbryterftp_write_thread: data i oordning i strömningsläge: %llu != %lluftp_write_thread: avslutarftp_write_thread: misslyckades med att frigöra buffrar - läckerftp_write_thread: for_write misslyckades - avbryterftp_write_thread: erhÃ¥ll och registrera buffrarftp_write_thread: för mÃ¥nga chunks i oordning i strömningslägeftp_read_thread: väntar pÃ¥ att buffrar ska frigörasftp_write_thread: väntar pÃ¥ filslutftp_write_thread: väntar pÃ¥ slutförd överföringgfal_close misslyckades: %sgfal_closedir misslyckades: %sgfal_listxattr misslyckades, ingen replika-information kan erhÃ¥llas: %sgfal_mkdir misslyckades (%s), försöker skriva ändÃ¥gfal_mkdir misslyckades: %sgfal_open misslyckades: %sgfal_opendir misslyckades: %sgfal_read misslyckades; %sgfal_rename misslyckades: %sgfal_rmdir misslyckades: %sgfal_stat misslyckades: %sgfal_unlink misslyckades: %sgfal_write misslyckades: %sglobalid är satt till %sglobus_ftp_client_operationattr_set_authorization: fel: %sgm-jobs visar information om nuvarande jobb i systemet.gm-kick väcker den A-REX som motsvarar den angivna kontrollkatalogen. Om ingen katalog anges används kontrollkatalogen som hittas i inställningsfilen.gmetric_bin_path tom i arc.conf (ska aldrig hända, det förvalda värdet ska användas)grupp<:roll>. Ange attributens ordning Exempel: --order /knowarc.eu/coredev:Developer,/knowarc.eu/testers:Tester eller: --order /knowarc.eu/coredev:Developer --order /knowarc.eu/testers:Tester Notera att det saknar mening att ange ordningen om du har tvÃ¥ eller fler olika VOMS-servrar angivnaheadnode är satt till %svärdnamn[:port] för myproxy-servertimmetimmaridinkommande meddelande är inte SOAPindexeringinformera om ändringar i enstaka jobb (kan användas mer än en gÃ¥ng)init_handle: globus_ftp_client_handle_init misslyckadesinit_handle: globus_ftp_client_handleattr_init misslyckadesinit_handle: globus_ftp_client_handleattr_set_gridftp2 misslyckadesinit_handle: globus_ftp_client_operationattr_init misslyckadesinit_handle: globus_ftp_client_operationattr_set_allow_ipv6 misslyckadesinit_handle: globus_ftp_client_operationattr_set_delayed_pasv misslyckadesinmsg.Attributes().getAll() = %s inmsg.Auth().Export(arc.SecAttr.ARCAuth) = %sindata definierar ej operationindata är inte SOAPinputcheck kontrollerar att indatafiler som angivits i jobbeskrivningen är tillgängliga och Ã¥tkomliga när referenserna i den givna proxyfilen används.i stället för status skriv endast de utvalda jobbens IDheltalgränssnittgränssnitt är satt till %sjobb-idjobbeskrivningsfil [proxyfil]jobbeskrivningsfil som beskriver jobbet som ska sändas injobbeskrivningssträng som beskriver jobbet som ska sändas inbehÃ¥ll filerna pÃ¥ servern (ta inte bort)nivÃ¥listpost: %slista de tillgängliga pluginernalista tillgängliga pluginer (protokoll som stöds)list_files_ftp: checksumma %slist_files_ftp: misslyckades med att erhÃ¥lla filens ändringstidlist_files_ftp: misslyckades med att erhÃ¥lla filens storleklist_files_ftp: globus_ftp_client_cksm misslyckadeslist_files_ftp: globus_ftp_client_modification_time misslyckadeslist_files_ftp: globus_ftp_client_size misslyckadeslist_files_ftp: söker efter checksumma pÃ¥ %slist_files_ftp: söker efter ändringstid för %slist_files_ftp: söker efter storlek pÃ¥ %slist_files_ftp: information om checksumma inte möjliglist_files_ftp: information om checksumma returnerades intelist_files_ftp: information om checksumma stöds intelist_files_ftp: timeout vid väntan pÃ¥ cksumlist_files_ftp: timeout vid väntan pÃ¥ ändringstidlist_files_ftp: timeout vid väntan pÃ¥ storlekhämta serienummer frÃ¥n %s misslyckadeslocalid är satt till %slÃ¥ngt format (mer information)lrms är tommail-parametern är tomskapa föräldrakataloger efter behovMinnesallokeringsfelmetafil %s är tomminutminutermkdir_ftp: skapar %smkdir_ftp: timeout vid väntan pÃ¥ mkdirmodulnamn: %snny nyttplast %snästa kedjeelement anropatnästa element i kedjan returnerade tom nyttolastnästa element i kedjan returnerade felstatusnästa element i kedjan returnerade felstatus: %snästa element i kedjan returnerade ogiltig nyttolastnästa element i kedjan returnerade ogiltig/ej stödd nyttolastnästa element i kedjan returnerade ingen nyttolastnästa element i kedjan returnerade ogiltig nyttolast - passerar igenomnummerantal försök innan överföring misslyckasgammal_url ny_urlhämta endast information om exekverings-target som stöder denna jobbinsändnings-slutpunktstyp. TillÃ¥tna värden är: arcrest och internalvälj endast jobb som sändes in till denna beräkningsresursvälj endast jobb vars status är statusstrarbeta rekursivtarbeta rekursivt upp till den angivna nivÃ¥nordningutnyttolast %sutdata är inte SOAPskriv ut begärda element (jobblista, delegerings-id och token) till filägarsubjekt är satt till %sp12-fil är tomlösenordsdestination=lösenordskällasökvägsökväg till lokalt cache (använd för att lägga in fil i cache)sökväg till VOMS-server-inställningsfilensökväg till certifikatfilen, kan vara i endera PEM-, DER- eller PKCS12-formatsökväg till privata-nyckel-filen, om certifikatet är i PKCS12-format behöver inte den privata nyckeln angessökväg till proxyfilensökväg till huvudkatalogen för VOMS *.lsc-filer, behövs endast för VOMS-klient-funktionalitetsökväg till katalogen med betrodda certifikat, behövs endast för VOMS-klient-funktionalitetutför X.509-autentisering för öppnade förbindelserutför X.509-delegering för insända jobbutför tredjepartsöverföring, där destinationen läser frÃ¥n källan (endast tillgänglig med GFAL-plugin)utför token-autentisering för öppnade förbindelseutför token-delegering för insända jobbfysisk plats att skriva till när destinationen är en indexeringstjänst. MÃ¥ste anges för indexeringstjänster som inte genererar fysiska platser automatiskt. Kan anges flera gÃ¥nger - platser kommer att provas i angiven ordning tills en lyckas.pkey och rsa_key existerar!plugin för överföringsprotokoll %s är inte installeradskriv ut all information om denna proxy.skriv ut delegeringstoken med angiv(et/na) IDskriv ut lista med tillgängliga delegerings-IDskriv ut huvuddelegeringstoken för angiv(et/na) jobb-idskriv ut utvald information om denna proxy.skriv ut tjänstens tillstÃ¥ndskriv ut sammanfattning av jobb i varje överföringsandelskriv ut versionsinformationskriver ut information om installerade användar- och CA-certifikatprioritet är för stor - använder maxvärde 100process: DELETEprocess: GETprocess: HEADprocess: POSTprocess: PUTprocess: handling %s stöds inte för subsökväg %sprocess: slutpunkt: %sprocess: factoryslutpunktprocess: id: %sprocess: metod %s stöds inteprocess: metod %s stöds inte för subsökväg %sprocess: metod är inte definieradprocess: metod: %sprocess: operation: %sprocess: begäran=%sprocess: svar=%sprocess: schema %s stöds inte för subsökväg %sprocess: subop: %sprocess: subsökväg: %sproxybegränsningarställas i kökönamn är satt till %släs information frÃ¥n angiven kontrollkatalogread_thread: dataläsningsfel frÃ¥n extern process - avbryter: %sread_thread: avslutarread_thread: for_read misslyckades - avbryter: %sread_thread: erhÃ¥ll och registrera buffrarread_thread: non-data-tagg '%c' frÃ¥n extern process - lämnar: %sregistreraregisterregistertjänst-URL med frivilligt angivande av protokollta bort logiska filnamnsregistreringen även om inte alla fysiska kopior tagits bortta bort proxyta bort jobbet frÃ¥n den lokala jobblistan även om jobbet inte hittas i informationssystemetbegär minst detta antal jobb-instanser inskickade i en enskild insändningsbegäranbegär högst detta antal jobb-instanser inskickade i en enskild insändningsbegäranbegär att avbryta jobb med angiv(et/na) IDbegär att avbryta jobb som ägs av användare med angiv(et/na) subjektnamnbegär att ta bort jobb med angiv(et/na) IDbegär att ta bort jobb som ägs av användare med angiv(et/na) subjektnamnkräv informationsförfrÃ¥gan med den angivna informationsslutpunktstypen. Särskilda värdet 'NONE' stänger av alla resursinformationsförfrÃ¥gningar och den efterföljande resursmatchningen. TillÃ¥tna värden är: ldap.nordugrid, ldap.glue2, arcrest och internal.kräv den angivna slutpunktstypen för jobbinsändning. TillÃ¥tna värden är: arcrest och internal.omvänd sortering av jobb efter jobb-id, insändningstid eller jobbnamnspara serienummer till %s misslyckadessekundsekundersekundervälj resursmatchningsmetod (lista tillgängliga mäklare med --listplugins)Sätter fil %s till storlek %lluvisa URLer till filens registrerade kopiorvisa jobb för vilka statusinformation inte är tillgängligvisa endast beskrivning av begärt objekt, lista inte innehÃ¥ll i katalogervisa endast jobb som ägs av användare med angiv(et/na) subjektnamnvisa endast jobb med angiv(et/na) ID<visa fortskridandeindikatorvisa statusinformation i JSON-formatvisa jobbets beräkningsresurs-felloggvisa den ursprungliga jobbeskrivningenvisa den angivna filen frÃ¥n jobbets sessionskatalogvisa jobbets stderrvisa jobbets stdout (förval)avstängninghoppa över jobb som är pÃ¥ en beräkningsresurs med en given URLhoppa över tjänst med den angivna URLen under tjänstesökningsortera jobb efter jobb-id, insändningstid eller jobbnamnkällkälla destinationsource.next_locationstart_readingstart_reading: start av hjälpprocess misslyckadesstart_reading: skapande av trÃ¥d misslyckadesstart_reading_ftpstart_reading_ftp: globus_ftp_client_getstart_reading_ftp: globus_ftp_client_get misslyckadesstart_reading_ftp: globus_thread_create misslyckadesstart_writing_ftp: data-chunk: %llu %llustart_writing_ftp: fördröjd data-chunk: %llu %llustart_writing_ftp: misslyckades med att läsa data-chunkstart_writing_ftp: misslyckades med att läsa data-taggstart_writing_ftp: globus_thread_create misslyckadesstart_writing_ftp: start av hjälpprocess misslyckadesstart_writing_ftp: mkdirstart_writing_ftp: mkdir misslyckades - försöker fortfarande skrivastart_writing_ftp: putstart_writing_ftp: put misslyckadesstart_writing_ftp: skapande av trÃ¥d misslyckadesstart_writing_ftp: väntar pÃ¥ data-chunkstart_writing_ftp: väntar pÃ¥ data-taggstart_writing_ftp: väntar pÃ¥ nÃ¥gra buffrar som skickatsstatusstatusstrstop_reading: avslutar: %sstop_reading: väntar pÃ¥ att överföring ska avslutasstop_reading_ftp: avbryter förbindelsestop_reading_ftp: avslutar: %sstop_reading_ftp: väntar pÃ¥ att överföring ska avslutassträngsänd in jobb som dryrun (ingen insändning till batchsystemet)sänd in test-jobb givet av numrettest-jobbets körtid anges av numretde insända jobbens ID kommer att läggas till i denna filfilen som lagrar information om aktiva jobb (förval %s)detta alternativ gör ingenting (gamla GSI-proxyer stöds inte längre)timeout i sekunder (förval 20)behandla begärt objekt som en katalog och försök alltid lista innehÃ¥lltrunkera jobblistan för synkroniseringtypkunde inre ladda in nummer frÃ¥n: %savregistreraladda uppuppladdningarurlurl [url ...]urllistan %s innehÃ¥ller ogiltig URL: %sanvänd GSI-kommunikationsprotokollet för att kontakta VOMS-tjänster.använd HTTP-kommunikationsprotokollet för att kontakta VOMS-tjänster som erbjuder RESTful Ã¥tkomst Notera att för RESTful Ã¥tkomst stöds inte 'list'-kommandot och mer än en VOMS-server använd NSS-referens-databas i förvalda Mozilla-profiler, inklusive Firefox, Seamonkey och Thunderbird.använd det gamla kommunikationsprotokollet för att kontakta VOMS-tjänster istället för RESTful Ã¥tkomst använd passiv överföring (förvalt av om säker överföring begärts, förvalt pÃ¥ om säker överföring inte begärtsanvänd säker överföring (osäker som förval)använd särskild inställningsfilanvänd jobbets namn i stället för dess korta ID för jobbkatalogens namnanvändarnamn till myproxy-server (om detta saknas används subjektet frÃ¥n användarcertifikatet)vomsvoms<:kommando>. Ange VOMS-server Mer än en VOMS-server kan anges pÃ¥ detta sätt: --voms VOa:kommando1 --voms VOb:kommando2). :kommando är valfritt, och används för att begära specifika attribut (t.ex. roller) kommandoalternativ är: all --- lägg till detta DNs alla attribut i AC; list --- lista detta DNs alla attribut, skapar inte AC-tillägg; /Role=dinRoll --- ange roll, om detta DN har en sÃ¥dan roll, kommer rollen att läggas till i AC; /vonamn/gruppnamn/Role=dinRoll --- ange VO, grupp och roll; om detta DN har en sÃ¥dan roll, kommer rollen att läggas till i AC. Om detta alternativ inte anges kommer värden frÃ¥n inställningsfilerna att användas. För att undvika att dessa används ange -S med tomt värde. väntar pÃ¥ data-chunkwrite_thread: avslutarwrite_thread: for_write eofwrite_thread: for_write misslyckades - avbryterwrite_thread: erhÃ¥ll och skicka vidare buffrarwrite_thread: out misslyckades - avbryterxrootd stängning misslyckades: %sxrootd öppning misslyckades: %sxrootd skrivning misslyckades: %sj~DataPoint: förstör ftp_handle~DataPoint: förstör ftp_handle misslyckades - försöker igen~DataPoint: misslyckades med att förstöra ftp_handle - läckernordugrid-arc-7.1.1/PaxHeaders/aclocal.m40000644000000000000000000000013115067751343015210 xustar0030 mtime=1759498979.417497942 30 atime=1759498979.542671086 29 ctime=1759499024.68766472 nordugrid-arc-7.1.1/aclocal.m40000644000175000002070000015634715067751343017133 0ustar00mockbuildmock00000000000000# generated automatically by aclocal 1.16.2 -*- Autoconf -*- # Copyright (C) 1996-2020 Free Software Foundation, Inc. # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])]) m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.69],, [m4_warning([this file was generated for autoconf 2.69. You have another version of autoconf. It may work, but is not guaranteed to. If you have problems, you may need to regenerate the build system entirely. To do so, use the procedure documented by the package, typically 'autoreconf'.])]) # pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*- # serial 11 (pkg-config-0.29.1) dnl Copyright © 2004 Scott James Remnant . dnl Copyright © 2012-2015 Dan Nicholson dnl dnl This program is free software; you can redistribute it and/or modify dnl it under the terms of the GNU General Public License as published by dnl the Free Software Foundation; either version 2 of the License, or dnl (at your option) any later version. dnl dnl This program is distributed in the hope that it will be useful, but dnl WITHOUT ANY WARRANTY; without even the implied warranty of dnl MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU dnl General Public License for more details. dnl dnl You should have received a copy of the GNU General Public License dnl along with this program; if not, write to the Free Software dnl Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA dnl 02111-1307, USA. dnl dnl As a special exception to the GNU General Public License, if you dnl distribute this file as part of a program that contains a dnl configuration script generated by Autoconf, you may include it under dnl the same distribution terms that you use for the rest of that dnl program. dnl PKG_PREREQ(MIN-VERSION) dnl ----------------------- dnl Since: 0.29 dnl dnl Verify that the version of the pkg-config macros are at least dnl MIN-VERSION. Unlike PKG_PROG_PKG_CONFIG, which checks the user's dnl installed version of pkg-config, this checks the developer's version dnl of pkg.m4 when generating configure. dnl dnl To ensure that this macro is defined, also add: dnl m4_ifndef([PKG_PREREQ], dnl [m4_fatal([must install pkg-config 0.29 or later before running autoconf/autogen])]) dnl dnl See the "Since" comment for each macro you use to see what version dnl of the macros you require. m4_defun([PKG_PREREQ], [m4_define([PKG_MACROS_VERSION], [0.29.1]) m4_if(m4_version_compare(PKG_MACROS_VERSION, [$1]), -1, [m4_fatal([pkg.m4 version $1 or higher is required but ]PKG_MACROS_VERSION[ found])]) ])dnl PKG_PREREQ dnl PKG_PROG_PKG_CONFIG([MIN-VERSION]) dnl ---------------------------------- dnl Since: 0.16 dnl dnl Search for the pkg-config tool and set the PKG_CONFIG variable to dnl first found in the path. Checks that the version of pkg-config found dnl is at least MIN-VERSION. If MIN-VERSION is not specified, 0.9.0 is dnl used since that's the first version where most current features of dnl pkg-config existed. AC_DEFUN([PKG_PROG_PKG_CONFIG], [m4_pattern_forbid([^_?PKG_[A-Z_]+$]) m4_pattern_allow([^PKG_CONFIG(_(PATH|LIBDIR|SYSROOT_DIR|ALLOW_SYSTEM_(CFLAGS|LIBS)))?$]) m4_pattern_allow([^PKG_CONFIG_(DISABLE_UNINSTALLED|TOP_BUILD_DIR|DEBUG_SPEW)$]) AC_ARG_VAR([PKG_CONFIG], [path to pkg-config utility]) AC_ARG_VAR([PKG_CONFIG_PATH], [directories to add to pkg-config's search path]) AC_ARG_VAR([PKG_CONFIG_LIBDIR], [path overriding pkg-config's built-in search path]) if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then AC_PATH_TOOL([PKG_CONFIG], [pkg-config]) fi if test -n "$PKG_CONFIG"; then _pkg_min_version=m4_default([$1], [0.9.0]) AC_MSG_CHECKING([pkg-config is at least version $_pkg_min_version]) if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) PKG_CONFIG="" fi fi[]dnl ])dnl PKG_PROG_PKG_CONFIG dnl PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) dnl ------------------------------------------------------------------- dnl Since: 0.18 dnl dnl Check to see whether a particular set of modules exists. Similar to dnl PKG_CHECK_MODULES(), but does not set variables or print errors. dnl dnl Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG]) dnl only at the first occurence in configure.ac, so if the first place dnl it's called might be skipped (such as if it is within an "if", you dnl have to call PKG_CHECK_EXISTS manually AC_DEFUN([PKG_CHECK_EXISTS], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl if test -n "$PKG_CONFIG" && \ AC_RUN_LOG([$PKG_CONFIG --exists --print-errors "$1"]); then m4_default([$2], [:]) m4_ifvaln([$3], [else $3])dnl fi]) dnl _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES]) dnl --------------------------------------------- dnl Internal wrapper calling pkg-config via PKG_CONFIG and setting dnl pkg_failed based on the result. m4_define([_PKG_CONFIG], [if test -n "$$1"; then pkg_cv_[]$1="$$1" elif test -n "$PKG_CONFIG"; then PKG_CHECK_EXISTS([$3], [pkg_cv_[]$1=`$PKG_CONFIG --[]$2 "$3" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes ], [pkg_failed=yes]) else pkg_failed=untried fi[]dnl ])dnl _PKG_CONFIG dnl _PKG_SHORT_ERRORS_SUPPORTED dnl --------------------------- dnl Internal check to see if pkg-config supports short errors. AC_DEFUN([_PKG_SHORT_ERRORS_SUPPORTED], [AC_REQUIRE([PKG_PROG_PKG_CONFIG]) if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi[]dnl ])dnl _PKG_SHORT_ERRORS_SUPPORTED dnl PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND], dnl [ACTION-IF-NOT-FOUND]) dnl -------------------------------------------------------------- dnl Since: 0.4.0 dnl dnl Note that if there is a possibility the first call to dnl PKG_CHECK_MODULES might not happen, you should be sure to include an dnl explicit call to PKG_PROG_PKG_CONFIG in your configure.ac AC_DEFUN([PKG_CHECK_MODULES], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl AC_ARG_VAR([$1][_CFLAGS], [C compiler flags for $1, overriding pkg-config])dnl AC_ARG_VAR([$1][_LIBS], [linker flags for $1, overriding pkg-config])dnl pkg_failed=no AC_MSG_CHECKING([for $1]) _PKG_CONFIG([$1][_CFLAGS], [cflags], [$2]) _PKG_CONFIG([$1][_LIBS], [libs], [$2]) m4_define([_PKG_TEXT], [Alternatively, you may set the environment variables $1[]_CFLAGS and $1[]_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details.]) if test $pkg_failed = yes; then AC_MSG_RESULT([no]) _PKG_SHORT_ERRORS_SUPPORTED if test $_pkg_short_errors_supported = yes; then $1[]_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$2" 2>&1` else $1[]_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$2" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$$1[]_PKG_ERRORS" >&AS_MESSAGE_LOG_FD m4_default([$4], [AC_MSG_ERROR( [Package requirements ($2) were not met: $$1_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. _PKG_TEXT])[]dnl ]) elif test $pkg_failed = untried; then AC_MSG_RESULT([no]) m4_default([$4], [AC_MSG_FAILURE( [The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. _PKG_TEXT To get pkg-config, see .])[]dnl ]) else $1[]_CFLAGS=$pkg_cv_[]$1[]_CFLAGS $1[]_LIBS=$pkg_cv_[]$1[]_LIBS AC_MSG_RESULT([yes]) $3 fi[]dnl ])dnl PKG_CHECK_MODULES dnl PKG_CHECK_MODULES_STATIC(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND], dnl [ACTION-IF-NOT-FOUND]) dnl --------------------------------------------------------------------- dnl Since: 0.29 dnl dnl Checks for existence of MODULES and gathers its build flags with dnl static libraries enabled. Sets VARIABLE-PREFIX_CFLAGS from --cflags dnl and VARIABLE-PREFIX_LIBS from --libs. dnl dnl Note that if there is a possibility the first call to dnl PKG_CHECK_MODULES_STATIC might not happen, you should be sure to dnl include an explicit call to PKG_PROG_PKG_CONFIG in your dnl configure.ac. AC_DEFUN([PKG_CHECK_MODULES_STATIC], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl _save_PKG_CONFIG=$PKG_CONFIG PKG_CONFIG="$PKG_CONFIG --static" PKG_CHECK_MODULES($@) PKG_CONFIG=$_save_PKG_CONFIG[]dnl ])dnl PKG_CHECK_MODULES_STATIC dnl PKG_INSTALLDIR([DIRECTORY]) dnl ------------------------- dnl Since: 0.27 dnl dnl Substitutes the variable pkgconfigdir as the location where a module dnl should install pkg-config .pc files. By default the directory is dnl $libdir/pkgconfig, but the default can be changed by passing dnl DIRECTORY. The user can override through the --with-pkgconfigdir dnl parameter. AC_DEFUN([PKG_INSTALLDIR], [m4_pushdef([pkg_default], [m4_default([$1], ['${libdir}/pkgconfig'])]) m4_pushdef([pkg_description], [pkg-config installation directory @<:@]pkg_default[@:>@]) AC_ARG_WITH([pkgconfigdir], [AS_HELP_STRING([--with-pkgconfigdir], pkg_description)],, [with_pkgconfigdir=]pkg_default) AC_SUBST([pkgconfigdir], [$with_pkgconfigdir]) m4_popdef([pkg_default]) m4_popdef([pkg_description]) ])dnl PKG_INSTALLDIR dnl PKG_NOARCH_INSTALLDIR([DIRECTORY]) dnl -------------------------------- dnl Since: 0.27 dnl dnl Substitutes the variable noarch_pkgconfigdir as the location where a dnl module should install arch-independent pkg-config .pc files. By dnl default the directory is $datadir/pkgconfig, but the default can be dnl changed by passing DIRECTORY. The user can override through the dnl --with-noarch-pkgconfigdir parameter. AC_DEFUN([PKG_NOARCH_INSTALLDIR], [m4_pushdef([pkg_default], [m4_default([$1], ['${datadir}/pkgconfig'])]) m4_pushdef([pkg_description], [pkg-config arch-independent installation directory @<:@]pkg_default[@:>@]) AC_ARG_WITH([noarch-pkgconfigdir], [AS_HELP_STRING([--with-noarch-pkgconfigdir], pkg_description)],, [with_noarch_pkgconfigdir=]pkg_default) AC_SUBST([noarch_pkgconfigdir], [$with_noarch_pkgconfigdir]) m4_popdef([pkg_default]) m4_popdef([pkg_description]) ])dnl PKG_NOARCH_INSTALLDIR dnl PKG_CHECK_VAR(VARIABLE, MODULE, CONFIG-VARIABLE, dnl [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) dnl ------------------------------------------- dnl Since: 0.28 dnl dnl Retrieves the value of the pkg-config variable for the given module. AC_DEFUN([PKG_CHECK_VAR], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl AC_ARG_VAR([$1], [value of $3 for $2, overriding pkg-config])dnl _PKG_CONFIG([$1], [variable="][$3]["], [$2]) AS_VAR_COPY([$1], [pkg_cv_][$1]) AS_VAR_IF([$1], [""], [$5], [$4])dnl ])dnl PKG_CHECK_VAR dnl PKG_WITH_MODULES(VARIABLE-PREFIX, MODULES, dnl [ACTION-IF-FOUND],[ACTION-IF-NOT-FOUND], dnl [DESCRIPTION], [DEFAULT]) dnl ------------------------------------------ dnl dnl Prepare a "--with-" configure option using the lowercase dnl [VARIABLE-PREFIX] name, merging the behaviour of AC_ARG_WITH and dnl PKG_CHECK_MODULES in a single macro. AC_DEFUN([PKG_WITH_MODULES], [ m4_pushdef([with_arg], m4_tolower([$1])) m4_pushdef([description], [m4_default([$5], [build with ]with_arg[ support])]) m4_pushdef([def_arg], [m4_default([$6], [auto])]) m4_pushdef([def_action_if_found], [AS_TR_SH([with_]with_arg)=yes]) m4_pushdef([def_action_if_not_found], [AS_TR_SH([with_]with_arg)=no]) m4_case(def_arg, [yes],[m4_pushdef([with_without], [--without-]with_arg)], [m4_pushdef([with_without],[--with-]with_arg)]) AC_ARG_WITH(with_arg, AS_HELP_STRING(with_without, description[ @<:@default=]def_arg[@:>@]),, [AS_TR_SH([with_]with_arg)=def_arg]) AS_CASE([$AS_TR_SH([with_]with_arg)], [yes],[PKG_CHECK_MODULES([$1],[$2],$3,$4)], [auto],[PKG_CHECK_MODULES([$1],[$2], [m4_n([def_action_if_found]) $3], [m4_n([def_action_if_not_found]) $4])]) m4_popdef([with_arg]) m4_popdef([description]) m4_popdef([def_arg]) ])dnl PKG_WITH_MODULES dnl PKG_HAVE_WITH_MODULES(VARIABLE-PREFIX, MODULES, dnl [DESCRIPTION], [DEFAULT]) dnl ----------------------------------------------- dnl dnl Convenience macro to trigger AM_CONDITIONAL after PKG_WITH_MODULES dnl check._[VARIABLE-PREFIX] is exported as make variable. AC_DEFUN([PKG_HAVE_WITH_MODULES], [ PKG_WITH_MODULES([$1],[$2],,,[$3],[$4]) AM_CONDITIONAL([HAVE_][$1], [test "$AS_TR_SH([with_]m4_tolower([$1]))" = "yes"]) ])dnl PKG_HAVE_WITH_MODULES dnl PKG_HAVE_DEFINE_WITH_MODULES(VARIABLE-PREFIX, MODULES, dnl [DESCRIPTION], [DEFAULT]) dnl ------------------------------------------------------ dnl dnl Convenience macro to run AM_CONDITIONAL and AC_DEFINE after dnl PKG_WITH_MODULES check. HAVE_[VARIABLE-PREFIX] is exported as make dnl and preprocessor variable. AC_DEFUN([PKG_HAVE_DEFINE_WITH_MODULES], [ PKG_HAVE_WITH_MODULES([$1],[$2],[$3],[$4]) AS_IF([test "$AS_TR_SH([with_]m4_tolower([$1]))" = "yes"], [AC_DEFINE([HAVE_][$1], 1, [Enable ]m4_tolower([$1])[ support])]) ])dnl PKG_HAVE_DEFINE_WITH_MODULES # Copyright (C) 2002-2020 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_AUTOMAKE_VERSION(VERSION) # ---------------------------- # Automake X.Y traces this macro to ensure aclocal.m4 has been # generated from the m4 files accompanying Automake X.Y. # (This private macro should not be called outside this file.) AC_DEFUN([AM_AUTOMAKE_VERSION], [am__api_version='1.16' dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to dnl require some minimum version. Point them to the right macro. m4_if([$1], [1.16.2], [], [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl ]) # _AM_AUTOCONF_VERSION(VERSION) # ----------------------------- # aclocal traces this macro to find the Autoconf version. # This is a private macro too. Using m4_define simplifies # the logic in aclocal, which can simply ignore this definition. m4_define([_AM_AUTOCONF_VERSION], []) # AM_SET_CURRENT_AUTOMAKE_VERSION # ------------------------------- # Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. # This function is AC_REQUIREd by AM_INIT_AUTOMAKE. AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], [AM_AUTOMAKE_VERSION([1.16.2])dnl m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl _AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) # AM_AUX_DIR_EXPAND -*- Autoconf -*- # Copyright (C) 2001-2020 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets # $ac_aux_dir to '$srcdir/foo'. In other projects, it is set to # '$srcdir', '$srcdir/..', or '$srcdir/../..'. # # Of course, Automake must honor this variable whenever it calls a # tool from the auxiliary directory. The problem is that $srcdir (and # therefore $ac_aux_dir as well) can be either absolute or relative, # depending on how configure is run. This is pretty annoying, since # it makes $ac_aux_dir quite unusable in subdirectories: in the top # source directory, any form will work fine, but in subdirectories a # relative path needs to be adjusted first. # # $ac_aux_dir/missing # fails when called from a subdirectory if $ac_aux_dir is relative # $top_srcdir/$ac_aux_dir/missing # fails if $ac_aux_dir is absolute, # fails when called from a subdirectory in a VPATH build with # a relative $ac_aux_dir # # The reason of the latter failure is that $top_srcdir and $ac_aux_dir # are both prefixed by $srcdir. In an in-source build this is usually # harmless because $srcdir is '.', but things will broke when you # start a VPATH build or use an absolute $srcdir. # # So we could use something similar to $top_srcdir/$ac_aux_dir/missing, # iff we strip the leading $srcdir from $ac_aux_dir. That would be: # am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"` # and then we would define $MISSING as # MISSING="\${SHELL} $am_aux_dir/missing" # This will work as long as MISSING is not called from configure, because # unfortunately $(top_srcdir) has no meaning in configure. # However there are other variables, like CC, which are often used in # configure, and could therefore not use this "fixed" $ac_aux_dir. # # Another solution, used here, is to always expand $ac_aux_dir to an # absolute PATH. The drawback is that using absolute paths prevent a # configured tree to be moved without reconfiguration. AC_DEFUN([AM_AUX_DIR_EXPAND], [AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl # Expand $ac_aux_dir to an absolute path. am_aux_dir=`cd "$ac_aux_dir" && pwd` ]) # AM_CONDITIONAL -*- Autoconf -*- # Copyright (C) 1997-2020 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_CONDITIONAL(NAME, SHELL-CONDITION) # ------------------------------------- # Define a conditional. AC_DEFUN([AM_CONDITIONAL], [AC_PREREQ([2.52])dnl m4_if([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])], [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl AC_SUBST([$1_TRUE])dnl AC_SUBST([$1_FALSE])dnl _AM_SUBST_NOTMAKE([$1_TRUE])dnl _AM_SUBST_NOTMAKE([$1_FALSE])dnl m4_define([_AM_COND_VALUE_$1], [$2])dnl if $2; then $1_TRUE= $1_FALSE='#' else $1_TRUE='#' $1_FALSE= fi AC_CONFIG_COMMANDS_PRE( [if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then AC_MSG_ERROR([[conditional "$1" was never defined. Usually this means the macro was only invoked conditionally.]]) fi])]) # Copyright (C) 1999-2020 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # There are a few dirty hacks below to avoid letting 'AC_PROG_CC' be # written in clear, in which case automake, when reading aclocal.m4, # will think it sees a *use*, and therefore will trigger all it's # C support machinery. Also note that it means that autoscan, seeing # CC etc. in the Makefile, will ask for an AC_PROG_CC use... # _AM_DEPENDENCIES(NAME) # ---------------------- # See how the compiler implements dependency checking. # NAME is "CC", "CXX", "OBJC", "OBJCXX", "UPC", or "GJC". # We try a few techniques and use that to set a single cache variable. # # We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was # modified to invoke _AM_DEPENDENCIES(CC); we would have a circular # dependency, and given that the user is not expected to run this macro, # just rely on AC_PROG_CC. AC_DEFUN([_AM_DEPENDENCIES], [AC_REQUIRE([AM_SET_DEPDIR])dnl AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl AC_REQUIRE([AM_MAKE_INCLUDE])dnl AC_REQUIRE([AM_DEP_TRACK])dnl m4_if([$1], [CC], [depcc="$CC" am_compiler_list=], [$1], [CXX], [depcc="$CXX" am_compiler_list=], [$1], [OBJC], [depcc="$OBJC" am_compiler_list='gcc3 gcc'], [$1], [OBJCXX], [depcc="$OBJCXX" am_compiler_list='gcc3 gcc'], [$1], [UPC], [depcc="$UPC" am_compiler_list=], [$1], [GCJ], [depcc="$GCJ" am_compiler_list='gcc3 gcc'], [depcc="$$1" am_compiler_list=]) AC_CACHE_CHECK([dependency style of $depcc], [am_cv_$1_dependencies_compiler_type], [if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named 'D' -- because '-MD' means "put the output # in D". rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_$1_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp` fi am__universal=false m4_case([$1], [CC], [case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac], [CXX], [case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac]) for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with # Solaris 10 /bin/sh. echo '/* dummy */' > sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with '-c' and '-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle '-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs. am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # After this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested. if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok '-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_$1_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_$1_dependencies_compiler_type=none fi ]) AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type]) AM_CONDITIONAL([am__fastdep$1], [ test "x$enable_dependency_tracking" != xno \ && test "$am_cv_$1_dependencies_compiler_type" = gcc3]) ]) # AM_SET_DEPDIR # ------------- # Choose a directory name for dependency files. # This macro is AC_REQUIREd in _AM_DEPENDENCIES. AC_DEFUN([AM_SET_DEPDIR], [AC_REQUIRE([AM_SET_LEADING_DOT])dnl AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl ]) # AM_DEP_TRACK # ------------ AC_DEFUN([AM_DEP_TRACK], [AC_ARG_ENABLE([dependency-tracking], [dnl AS_HELP_STRING( [--enable-dependency-tracking], [do not reject slow dependency extractors]) AS_HELP_STRING( [--disable-dependency-tracking], [speeds up one-time build])]) if test "x$enable_dependency_tracking" != xno; then am_depcomp="$ac_aux_dir/depcomp" AMDEPBACKSLASH='\' am__nodep='_no' fi AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno]) AC_SUBST([AMDEPBACKSLASH])dnl _AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl AC_SUBST([am__nodep])dnl _AM_SUBST_NOTMAKE([am__nodep])dnl ]) # Generate code to set up dependency tracking. -*- Autoconf -*- # Copyright (C) 1999-2020 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_OUTPUT_DEPENDENCY_COMMANDS # ------------------------------ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], [{ # Older Autoconf quotes --file arguments for eval, but not when files # are listed without --file. Let's play safe and only enable the eval # if we detect the quoting. # TODO: see whether this extra hack can be removed once we start # requiring Autoconf 2.70 or later. AS_CASE([$CONFIG_FILES], [*\'*], [eval set x "$CONFIG_FILES"], [*], [set x $CONFIG_FILES]) shift # Used to flag and report bootstrapping failures. am_rc=0 for am_mf do # Strip MF so we end up with the name of the file. am_mf=`AS_ECHO(["$am_mf"]) | sed -e 's/:.*$//'` # Check whether this is an Automake generated Makefile which includes # dependency-tracking related rules and includes. # Grep'ing the whole file directly is not great: AIX grep has a line # limit of 2048, but all sed's we know have understand at least 4000. sed -n 's,^am--depfiles:.*,X,p' "$am_mf" | grep X >/dev/null 2>&1 \ || continue am_dirpart=`AS_DIRNAME(["$am_mf"])` am_filepart=`AS_BASENAME(["$am_mf"])` AM_RUN_LOG([cd "$am_dirpart" \ && sed -e '/# am--include-marker/d' "$am_filepart" \ | $MAKE -f - am--depfiles]) || am_rc=$? done if test $am_rc -ne 0; then AC_MSG_FAILURE([Something went wrong bootstrapping makefile fragments for automatic dependency tracking. If GNU make was not used, consider re-running the configure script with MAKE="gmake" (or whatever is necessary). You can also try re-running configure with the '--disable-dependency-tracking' option to at least be able to build the package (albeit without support for automatic dependency tracking).]) fi AS_UNSET([am_dirpart]) AS_UNSET([am_filepart]) AS_UNSET([am_mf]) AS_UNSET([am_rc]) rm -f conftest-deps.mk } ])# _AM_OUTPUT_DEPENDENCY_COMMANDS # AM_OUTPUT_DEPENDENCY_COMMANDS # ----------------------------- # This macro should only be invoked once -- use via AC_REQUIRE. # # This code is only required when automatic dependency tracking is enabled. # This creates each '.Po' and '.Plo' makefile fragment that we'll need in # order to bootstrap the dependency handling code. AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], [AC_CONFIG_COMMANDS([depfiles], [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS], [AMDEP_TRUE="$AMDEP_TRUE" MAKE="${MAKE-make}"])]) # Do all the work for Automake. -*- Autoconf -*- # Copyright (C) 1996-2020 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This macro actually does too much. Some checks are only needed if # your package does certain things. But this isn't really a big deal. dnl Redefine AC_PROG_CC to automatically invoke _AM_PROG_CC_C_O. m4_define([AC_PROG_CC], m4_defn([AC_PROG_CC]) [_AM_PROG_CC_C_O ]) # AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) # AM_INIT_AUTOMAKE([OPTIONS]) # ----------------------------------------------- # The call with PACKAGE and VERSION arguments is the old style # call (pre autoconf-2.50), which is being phased out. PACKAGE # and VERSION should now be passed to AC_INIT and removed from # the call to AM_INIT_AUTOMAKE. # We support both call styles for the transition. After # the next Automake release, Autoconf can make the AC_INIT # arguments mandatory, and then we can depend on a new Autoconf # release and drop the old call support. AC_DEFUN([AM_INIT_AUTOMAKE], [AC_PREREQ([2.65])dnl dnl Autoconf wants to disallow AM_ names. We explicitly allow dnl the ones we care about. m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl AC_REQUIRE([AC_PROG_INSTALL])dnl if test "`cd $srcdir && pwd`" != "`pwd`"; then # Use -I$(srcdir) only when $(srcdir) != ., so that make's output # is not polluted with repeated "-I." AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl # test to see if srcdir already configured if test -f $srcdir/config.status; then AC_MSG_ERROR([source directory already configured; run "make distclean" there first]) fi fi # test whether we have cygpath if test -z "$CYGPATH_W"; then if (cygpath --version) >/dev/null 2>/dev/null; then CYGPATH_W='cygpath -w' else CYGPATH_W=echo fi fi AC_SUBST([CYGPATH_W]) # Define the identity of the package. dnl Distinguish between old-style and new-style calls. m4_ifval([$2], [AC_DIAGNOSE([obsolete], [$0: two- and three-arguments forms are deprecated.]) m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl AC_SUBST([PACKAGE], [$1])dnl AC_SUBST([VERSION], [$2])], [_AM_SET_OPTIONS([$1])dnl dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT. m4_if( m4_ifdef([AC_PACKAGE_NAME], [ok]):m4_ifdef([AC_PACKAGE_VERSION], [ok]), [ok:ok],, [m4_fatal([AC_INIT should be called with package and version arguments])])dnl AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl _AM_IF_OPTION([no-define],, [AC_DEFINE_UNQUOTED([PACKAGE], ["$PACKAGE"], [Name of package]) AC_DEFINE_UNQUOTED([VERSION], ["$VERSION"], [Version number of package])])dnl # Some tools Automake needs. AC_REQUIRE([AM_SANITY_CHECK])dnl AC_REQUIRE([AC_ARG_PROGRAM])dnl AM_MISSING_PROG([ACLOCAL], [aclocal-${am__api_version}]) AM_MISSING_PROG([AUTOCONF], [autoconf]) AM_MISSING_PROG([AUTOMAKE], [automake-${am__api_version}]) AM_MISSING_PROG([AUTOHEADER], [autoheader]) AM_MISSING_PROG([MAKEINFO], [makeinfo]) AC_REQUIRE([AM_PROG_INSTALL_SH])dnl AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl AC_REQUIRE([AC_PROG_MKDIR_P])dnl # For better backward compatibility. To be removed once Automake 1.9.x # dies out for good. For more background, see: # # AC_SUBST([mkdir_p], ['$(MKDIR_P)']) # We need awk for the "check" target (and possibly the TAP driver). The # system "awk" is bad on some platforms. AC_REQUIRE([AC_PROG_AWK])dnl AC_REQUIRE([AC_PROG_MAKE_SET])dnl AC_REQUIRE([AM_SET_LEADING_DOT])dnl _AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])], [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])], [_AM_PROG_TAR([v7])])]) _AM_IF_OPTION([no-dependencies],, [AC_PROVIDE_IFELSE([AC_PROG_CC], [_AM_DEPENDENCIES([CC])], [m4_define([AC_PROG_CC], m4_defn([AC_PROG_CC])[_AM_DEPENDENCIES([CC])])])dnl AC_PROVIDE_IFELSE([AC_PROG_CXX], [_AM_DEPENDENCIES([CXX])], [m4_define([AC_PROG_CXX], m4_defn([AC_PROG_CXX])[_AM_DEPENDENCIES([CXX])])])dnl AC_PROVIDE_IFELSE([AC_PROG_OBJC], [_AM_DEPENDENCIES([OBJC])], [m4_define([AC_PROG_OBJC], m4_defn([AC_PROG_OBJC])[_AM_DEPENDENCIES([OBJC])])])dnl AC_PROVIDE_IFELSE([AC_PROG_OBJCXX], [_AM_DEPENDENCIES([OBJCXX])], [m4_define([AC_PROG_OBJCXX], m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl ]) AC_REQUIRE([AM_SILENT_RULES])dnl dnl The testsuite driver may need to know about EXEEXT, so add the dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This dnl macro is hooked onto _AC_COMPILER_EXEEXT early, see below. AC_CONFIG_COMMANDS_PRE(dnl [m4_provide_if([_AM_COMPILER_EXEEXT], [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl # POSIX will say in a future version that running "rm -f" with no argument # is OK; and we want to be able to make that assumption in our Makefile # recipes. So use an aggressive probe to check that the usage we want is # actually supported "in the wild" to an acceptable degree. # See automake bug#10828. # To make any issue more visible, cause the running configure to be aborted # by default if the 'rm' program in use doesn't match our expectations; the # user can still override this though. if rm -f && rm -fr && rm -rf; then : OK; else cat >&2 <<'END' Oops! Your 'rm' program seems unable to run without file operands specified on the command line, even when the '-f' option is present. This is contrary to the behaviour of most rm programs out there, and not conforming with the upcoming POSIX standard: Please tell bug-automake@gnu.org about your system, including the value of your $PATH and any error possibly output before this message. This can help us improve future automake versions. END if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then echo 'Configuration will proceed anyway, since you have set the' >&2 echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2 echo >&2 else cat >&2 <<'END' Aborting the configuration process, to ensure you take notice of the issue. You can download and install GNU coreutils to get an 'rm' implementation that behaves properly: . If you want to complete the configuration process using your problematic 'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM to "yes", and re-run configure. END AC_MSG_ERROR([Your 'rm' program is bad, sorry.]) fi fi dnl The trailing newline in this macro's definition is deliberate, for dnl backward compatibility and to allow trailing 'dnl'-style comments dnl after the AM_INIT_AUTOMAKE invocation. See automake bug#16841. ]) dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion. Do not dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further dnl mangled by Autoconf and run in a shell conditional statement. m4_define([_AC_COMPILER_EXEEXT], m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])]) # When config.status generates a header, we must update the stamp-h file. # This file resides in the same directory as the config header # that is generated. The stamp files are numbered to have different names. # Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the # loop where config.status creates the headers, so we can generate # our stamp files there. AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], [# Compute $1's index in $config_headers. _am_arg=$1 _am_stamp_count=1 for _am_header in $config_headers :; do case $_am_header in $_am_arg | $_am_arg:* ) break ;; * ) _am_stamp_count=`expr $_am_stamp_count + 1` ;; esac done echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) # Copyright (C) 2001-2020 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_INSTALL_SH # ------------------ # Define $install_sh. AC_DEFUN([AM_PROG_INSTALL_SH], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl if test x"${install_sh+set}" != xset; then case $am_aux_dir in *\ * | *\ *) install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; *) install_sh="\${SHELL} $am_aux_dir/install-sh" esac fi AC_SUBST([install_sh])]) # Copyright (C) 2003-2020 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # Check whether the underlying file-system supports filenames # with a leading dot. For instance MS-DOS doesn't. AC_DEFUN([AM_SET_LEADING_DOT], [rm -rf .tst 2>/dev/null mkdir .tst 2>/dev/null if test -d .tst; then am__leading_dot=. else am__leading_dot=_ fi rmdir .tst 2>/dev/null AC_SUBST([am__leading_dot])]) # Check to see how 'make' treats includes. -*- Autoconf -*- # Copyright (C) 2001-2020 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_MAKE_INCLUDE() # ----------------- # Check whether make has an 'include' directive that can support all # the idioms we need for our automatic dependency tracking code. AC_DEFUN([AM_MAKE_INCLUDE], [AC_MSG_CHECKING([whether ${MAKE-make} supports the include directive]) cat > confinc.mk << 'END' am__doit: @echo this is the am__doit target >confinc.out .PHONY: am__doit END am__include="#" am__quote= # BSD make does it like this. echo '.include "confinc.mk" # ignored' > confmf.BSD # Other make implementations (GNU, Solaris 10, AIX) do it like this. echo 'include confinc.mk # ignored' > confmf.GNU _am_result=no for s in GNU BSD; do AM_RUN_LOG([${MAKE-make} -f confmf.$s && cat confinc.out]) AS_CASE([$?:`cat confinc.out 2>/dev/null`], ['0:this is the am__doit target'], [AS_CASE([$s], [BSD], [am__include='.include' am__quote='"'], [am__include='include' am__quote=''])]) if test "$am__include" != "#"; then _am_result="yes ($s style)" break fi done rm -f confinc.* confmf.* AC_MSG_RESULT([${_am_result}]) AC_SUBST([am__include])]) AC_SUBST([am__quote])]) # Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- # Copyright (C) 1997-2020 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_MISSING_PROG(NAME, PROGRAM) # ------------------------------ AC_DEFUN([AM_MISSING_PROG], [AC_REQUIRE([AM_MISSING_HAS_RUN]) $1=${$1-"${am_missing_run}$2"} AC_SUBST($1)]) # AM_MISSING_HAS_RUN # ------------------ # Define MISSING if not defined so far and test if it is modern enough. # If it is, set am_missing_run to use it, otherwise, to nothing. AC_DEFUN([AM_MISSING_HAS_RUN], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl AC_REQUIRE_AUX_FILE([missing])dnl if test x"${MISSING+set}" != xset; then case $am_aux_dir in *\ * | *\ *) MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; *) MISSING="\${SHELL} $am_aux_dir/missing" ;; esac fi # Use eval to expand $SHELL if eval "$MISSING --is-lightweight"; then am_missing_run="$MISSING " else am_missing_run= AC_MSG_WARN(['missing' script is too old or missing]) fi ]) # Copyright (C) 2003-2020 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_MKDIR_P # --------------- # Check for 'mkdir -p'. AC_DEFUN([AM_PROG_MKDIR_P], [AC_PREREQ([2.60])dnl AC_REQUIRE([AC_PROG_MKDIR_P])dnl dnl FIXME we are no longer going to remove this! adjust warning dnl FIXME message accordingly. AC_DIAGNOSE([obsolete], [$0: this macro is deprecated, and will soon be removed. You should use the Autoconf-provided 'AC][_PROG_MKDIR_P' macro instead, and use '$(MKDIR_P)' instead of '$(mkdir_p)'in your Makefile.am files.]) dnl Automake 1.8 to 1.9.6 used to define mkdir_p. We now use MKDIR_P, dnl while keeping a definition of mkdir_p for backward compatibility. dnl @MKDIR_P@ is magic: AC_OUTPUT adjusts its value for each Makefile. dnl However we cannot define mkdir_p as $(MKDIR_P) for the sake of dnl Makefile.ins that do not define MKDIR_P, so we do our own dnl adjustment using top_builddir (which is defined more often than dnl MKDIR_P). AC_SUBST([mkdir_p], ["$MKDIR_P"])dnl case $mkdir_p in [[\\/$]]* | ?:[[\\/]]*) ;; */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;; esac ]) # Helper functions for option handling. -*- Autoconf -*- # Copyright (C) 2001-2020 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_MANGLE_OPTION(NAME) # ----------------------- AC_DEFUN([_AM_MANGLE_OPTION], [[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) # _AM_SET_OPTION(NAME) # -------------------- # Set option NAME. Presently that only means defining a flag for this option. AC_DEFUN([_AM_SET_OPTION], [m4_define(_AM_MANGLE_OPTION([$1]), [1])]) # _AM_SET_OPTIONS(OPTIONS) # ------------------------ # OPTIONS is a space-separated list of Automake options. AC_DEFUN([_AM_SET_OPTIONS], [m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) # _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET]) # ------------------------------------------- # Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. AC_DEFUN([_AM_IF_OPTION], [m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) # Copyright (C) 1999-2020 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_PROG_CC_C_O # --------------- # Like AC_PROG_CC_C_O, but changed for automake. We rewrite AC_PROG_CC # to automatically call this. AC_DEFUN([_AM_PROG_CC_C_O], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl AC_REQUIRE_AUX_FILE([compile])dnl AC_LANG_PUSH([C])dnl AC_CACHE_CHECK( [whether $CC understands -c and -o together], [am_cv_prog_cc_c_o], [AC_LANG_CONFTEST([AC_LANG_PROGRAM([])]) # Make sure it works both with $CC and with simple cc. # Following AC_PROG_CC_C_O, we do the test twice because some # compilers refuse to overwrite an existing .o file with -o, # though they will create one. am_cv_prog_cc_c_o=yes for am_i in 1 2; do if AM_RUN_LOG([$CC -c conftest.$ac_ext -o conftest2.$ac_objext]) \ && test -f conftest2.$ac_objext; then : OK else am_cv_prog_cc_c_o=no break fi done rm -f core conftest* unset am_i]) if test "$am_cv_prog_cc_c_o" != yes; then # Losing compiler, so override with the script. # FIXME: It is wrong to rewrite CC. # But if we don't then we get into trouble of one sort or another. # A longer-term fix would be to have automake use am__CC in this case, # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" CC="$am_aux_dir/compile $CC" fi AC_LANG_POP([C])]) # For backward compatibility. AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])]) # Copyright (C) 2001-2020 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_RUN_LOG(COMMAND) # ------------------- # Run COMMAND, save the exit status in ac_status, and log it. # (This has been adapted from Autoconf's _AC_RUN_LOG macro.) AC_DEFUN([AM_RUN_LOG], [{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD ($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD (exit $ac_status); }]) # Check to make sure that the build environment is sane. -*- Autoconf -*- # Copyright (C) 1996-2020 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_SANITY_CHECK # --------------- AC_DEFUN([AM_SANITY_CHECK], [AC_MSG_CHECKING([whether build environment is sane]) # Reject unsafe characters in $srcdir or the absolute working directory # name. Accept space and tab only in the latter. am_lf=' ' case `pwd` in *[[\\\"\#\$\&\'\`$am_lf]]*) AC_MSG_ERROR([unsafe absolute working directory name]);; esac case $srcdir in *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*) AC_MSG_ERROR([unsafe srcdir value: '$srcdir']);; esac # Do 'set' in a subshell so we don't clobber the current shell's # arguments. Must try -L first in case configure is actually a # symlink; some systems play weird games with the mod time of symlinks # (eg FreeBSD returns the mod time of the symlink's containing # directory). if ( am_has_slept=no for am_try in 1 2; do echo "timestamp, slept: $am_has_slept" > conftest.file set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` if test "$[*]" = "X"; then # -L didn't work. set X `ls -t "$srcdir/configure" conftest.file` fi if test "$[*]" != "X $srcdir/configure conftest.file" \ && test "$[*]" != "X conftest.file $srcdir/configure"; then # If neither matched, then we have a broken ls. This can happen # if, for instance, CONFIG_SHELL is bash and it inherits a # broken ls alias from the environment. This has actually # happened. Such a system could not be considered "sane". AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken alias in your environment]) fi if test "$[2]" = conftest.file || test $am_try -eq 2; then break fi # Just in case. sleep 1 am_has_slept=yes done test "$[2]" = conftest.file ) then # Ok. : else AC_MSG_ERROR([newly created file is older than distributed files! Check your system clock]) fi AC_MSG_RESULT([yes]) # If we didn't sleep, we still need to ensure time stamps of config.status and # generated files are strictly newer. am_sleep_pid= if grep 'slept: no' conftest.file >/dev/null 2>&1; then ( sleep 1 ) & am_sleep_pid=$! fi AC_CONFIG_COMMANDS_PRE( [AC_MSG_CHECKING([that generated files are newer than configure]) if test -n "$am_sleep_pid"; then # Hide warnings about reused PIDs. wait $am_sleep_pid 2>/dev/null fi AC_MSG_RESULT([done])]) rm -f conftest.file ]) # Copyright (C) 2009-2020 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_SILENT_RULES([DEFAULT]) # -------------------------- # Enable less verbose build rules; with the default set to DEFAULT # ("yes" being less verbose, "no" or empty being verbose). AC_DEFUN([AM_SILENT_RULES], [AC_ARG_ENABLE([silent-rules], [dnl AS_HELP_STRING( [--enable-silent-rules], [less verbose build output (undo: "make V=1")]) AS_HELP_STRING( [--disable-silent-rules], [verbose build output (undo: "make V=0")])dnl ]) case $enable_silent_rules in @%:@ ((( yes) AM_DEFAULT_VERBOSITY=0;; no) AM_DEFAULT_VERBOSITY=1;; *) AM_DEFAULT_VERBOSITY=m4_if([$1], [yes], [0], [1]);; esac dnl dnl A few 'make' implementations (e.g., NonStop OS and NextStep) dnl do not support nested variable expansions. dnl See automake bug#9928 and bug#10237. am_make=${MAKE-make} AC_CACHE_CHECK([whether $am_make supports nested variables], [am_cv_make_support_nested_variables], [if AS_ECHO([['TRUE=$(BAR$(V)) BAR0=false BAR1=true V=1 am__doit: @$(TRUE) .PHONY: am__doit']]) | $am_make -f - >/dev/null 2>&1; then am_cv_make_support_nested_variables=yes else am_cv_make_support_nested_variables=no fi]) if test $am_cv_make_support_nested_variables = yes; then dnl Using '$V' instead of '$(V)' breaks IRIX make. AM_V='$(V)' AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' else AM_V=$AM_DEFAULT_VERBOSITY AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY fi AC_SUBST([AM_V])dnl AM_SUBST_NOTMAKE([AM_V])dnl AC_SUBST([AM_DEFAULT_V])dnl AM_SUBST_NOTMAKE([AM_DEFAULT_V])dnl AC_SUBST([AM_DEFAULT_VERBOSITY])dnl AM_BACKSLASH='\' AC_SUBST([AM_BACKSLASH])dnl _AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl ]) # Copyright (C) 2001-2020 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_INSTALL_STRIP # --------------------- # One issue with vendor 'install' (even GNU) is that you can't # specify the program used to strip binaries. This is especially # annoying in cross-compiling environments, where the build's strip # is unlikely to handle the host's binaries. # Fortunately install-sh will honor a STRIPPROG variable, so we # always use install-sh in "make install-strip", and initialize # STRIPPROG with the value of the STRIP variable (set by the user). AC_DEFUN([AM_PROG_INSTALL_STRIP], [AC_REQUIRE([AM_PROG_INSTALL_SH])dnl # Installed binaries are usually stripped using 'strip' when the user # run "make install-strip". However 'strip' might not be the right # tool to use in cross-compilation environments, therefore Automake # will honor the 'STRIP' environment variable to overrule this program. dnl Don't test for $cross_compiling = yes, because it might be 'maybe'. if test "$cross_compiling" != no; then AC_CHECK_TOOL([STRIP], [strip], :) fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" AC_SUBST([INSTALL_STRIP_PROGRAM])]) # Copyright (C) 2006-2020 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_SUBST_NOTMAKE(VARIABLE) # --------------------------- # Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in. # This macro is traced by Automake. AC_DEFUN([_AM_SUBST_NOTMAKE]) # AM_SUBST_NOTMAKE(VARIABLE) # -------------------------- # Public sister of _AM_SUBST_NOTMAKE. AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) # Check how to create a tarball. -*- Autoconf -*- # Copyright (C) 2004-2020 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_PROG_TAR(FORMAT) # -------------------- # Check how to create a tarball in format FORMAT. # FORMAT should be one of 'v7', 'ustar', or 'pax'. # # Substitute a variable $(am__tar) that is a command # writing to stdout a FORMAT-tarball containing the directory # $tardir. # tardir=directory && $(am__tar) > result.tar # # Substitute a variable $(am__untar) that extract such # a tarball read from stdin. # $(am__untar) < result.tar # AC_DEFUN([_AM_PROG_TAR], [# Always define AMTAR for backward compatibility. Yes, it's still used # in the wild :-( We should find a proper way to deprecate it ... AC_SUBST([AMTAR], ['$${TAR-tar}']) # We'll loop over all known methods to create a tar archive until one works. _am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none' m4_if([$1], [v7], [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'], [m4_case([$1], [ustar], [# The POSIX 1988 'ustar' format is defined with fixed-size fields. # There is notably a 21 bits limit for the UID and the GID. In fact, # the 'pax' utility can hang on bigger UID/GID (see automake bug#8343 # and bug#13588). am_max_uid=2097151 # 2^21 - 1 am_max_gid=$am_max_uid # The $UID and $GID variables are not portable, so we need to resort # to the POSIX-mandated id(1) utility. Errors in the 'id' calls # below are definitely unexpected, so allow the users to see them # (that is, avoid stderr redirection). am_uid=`id -u || echo unknown` am_gid=`id -g || echo unknown` AC_MSG_CHECKING([whether UID '$am_uid' is supported by ustar format]) if test $am_uid -le $am_max_uid; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) _am_tools=none fi AC_MSG_CHECKING([whether GID '$am_gid' is supported by ustar format]) if test $am_gid -le $am_max_gid; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) _am_tools=none fi], [pax], [], [m4_fatal([Unknown tar format])]) AC_MSG_CHECKING([how to create a $1 tar archive]) # Go ahead even if we have the value already cached. We do so because we # need to set the values for the 'am__tar' and 'am__untar' variables. _am_tools=${am_cv_prog_tar_$1-$_am_tools} for _am_tool in $_am_tools; do case $_am_tool in gnutar) for _am_tar in tar gnutar gtar; do AM_RUN_LOG([$_am_tar --version]) && break done am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"' am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"' am__untar="$_am_tar -xf -" ;; plaintar) # Must skip GNU tar: if it does not support --format= it doesn't create # ustar tarball either. (tar --version) >/dev/null 2>&1 && continue am__tar='tar chf - "$$tardir"' am__tar_='tar chf - "$tardir"' am__untar='tar xf -' ;; pax) am__tar='pax -L -x $1 -w "$$tardir"' am__tar_='pax -L -x $1 -w "$tardir"' am__untar='pax -r' ;; cpio) am__tar='find "$$tardir" -print | cpio -o -H $1 -L' am__tar_='find "$tardir" -print | cpio -o -H $1 -L' am__untar='cpio -i -H $1 -d' ;; none) am__tar=false am__tar_=false am__untar=false ;; esac # If the value was cached, stop now. We just wanted to have am__tar # and am__untar set. test -n "${am_cv_prog_tar_$1}" && break # tar/untar a dummy directory, and stop if the command works. rm -rf conftest.dir mkdir conftest.dir echo GrepMe > conftest.dir/file AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar]) rm -rf conftest.dir if test -s conftest.tar; then AM_RUN_LOG([$am__untar /dev/null 2>&1 && break fi done rm -rf conftest.dir AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool]) AC_MSG_RESULT([$am_cv_prog_tar_$1])]) AC_SUBST([am__tar]) AC_SUBST([am__untar]) ]) # _AM_PROG_TAR m4_include([m4/ac_cxx_have_sstream.m4]) m4_include([m4/ac_cxx_namespaces.m4]) m4_include([m4/arc_api.m4]) m4_include([m4/arc_paths.m4]) m4_include([m4/fsusage.m4]) m4_include([m4/gettext.m4]) m4_include([m4/gpt.m4]) m4_include([m4/iconv.m4]) m4_include([m4/intlmacosx.m4]) m4_include([m4/lib-ld.m4]) m4_include([m4/lib-link.m4]) m4_include([m4/lib-prefix.m4]) m4_include([m4/libtool.m4]) m4_include([m4/ltoptions.m4]) m4_include([m4/ltsugar.m4]) m4_include([m4/ltversion.m4]) m4_include([m4/lt~obsolete.m4]) m4_include([m4/nls.m4]) m4_include([m4/po.m4]) m4_include([m4/progtest.m4]) nordugrid-arc-7.1.1/PaxHeaders/include0000644000000000000000000000013215067751427014722 xustar0030 mtime=1759499031.939467258 30 atime=1759499034.761510139 30 ctime=1759499031.939467258 nordugrid-arc-7.1.1/include/0000755000175000002070000000000015067751427016701 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/PaxHeaders/arc0000644000000000000000000000013215067751427015467 xustar0030 mtime=1759499031.729464067 30 atime=1759499034.761510139 30 ctime=1759499031.729464067 nordugrid-arc-7.1.1/include/arc/0000755000175000002070000000000015067751427017446 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/PaxHeaders/ArcLocation.h0000644000000000000000000000013215067751327020112 xustar0030 mtime=1759498967.630671696 30 atime=1759498967.806492754 30 ctime=1759499031.694038163 nordugrid-arc-7.1.1/include/arc/ArcLocation.h0000644000175000002070000000006315067751327022013 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/ArcLocation.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/ArcConfigIni.h0000644000000000000000000000013215067751327020207 xustar0030 mtime=1759498967.630671696 30 atime=1759498967.806492754 30 ctime=1759499031.692732289 nordugrid-arc-7.1.1/include/arc/ArcConfigIni.h0000644000175000002070000000006415067751327022111 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/ArcConfigIni.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/data0000644000000000000000000000013215067751427016400 xustar0030 mtime=1759499031.830465602 30 atime=1759499034.761510139 30 ctime=1759499031.830465602 nordugrid-arc-7.1.1/include/arc/data/0000755000175000002070000000000015067751427020357 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/data/PaxHeaders/DataPointDelegate.h0000644000000000000000000000013115067751327022142 xustar0030 mtime=1759498967.636246722 29 atime=1759498967.80749277 30 ctime=1759499031.818924766 nordugrid-arc-7.1.1/include/arc/data/DataPointDelegate.h0000644000175000002070000000007215067751327024044 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/DataPointDelegate.h" nordugrid-arc-7.1.1/include/arc/data/PaxHeaders/DataExternalHelper.h0000644000000000000000000000013115067751327022340 xustar0030 mtime=1759498967.636246722 29 atime=1759498967.80749277 30 ctime=1759499031.812447582 nordugrid-arc-7.1.1/include/arc/data/DataExternalHelper.h0000644000175000002070000000007315067751327024243 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/DataExternalHelper.h" nordugrid-arc-7.1.1/include/arc/data/PaxHeaders/DataSpeed.h0000644000000000000000000000013115067751327020456 xustar0030 mtime=1759498967.636246722 29 atime=1759498967.80749277 30 ctime=1759499031.823372927 nordugrid-arc-7.1.1/include/arc/data/DataSpeed.h0000644000175000002070000000006215067751327022357 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/DataSpeed.h" nordugrid-arc-7.1.1/include/arc/data/PaxHeaders/DataBuffer.h0000644000000000000000000000013115067751327020627 xustar0030 mtime=1759498967.635627712 29 atime=1759498967.80749277 30 ctime=1759499031.808035026 nordugrid-arc-7.1.1/include/arc/data/DataBuffer.h0000644000175000002070000000006315067751327022531 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/DataBuffer.h" nordugrid-arc-7.1.1/include/arc/data/PaxHeaders/DataMover.h0000644000000000000000000000013115067751327020506 xustar0030 mtime=1759498967.636246722 29 atime=1759498967.80749277 30 ctime=1759499031.815801118 nordugrid-arc-7.1.1/include/arc/data/DataMover.h0000644000175000002070000000006215067751327022407 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/DataMover.h" nordugrid-arc-7.1.1/include/arc/data/PaxHeaders/FileCacheHash.h0000644000000000000000000000013115067751327021233 xustar0030 mtime=1759498967.636490171 29 atime=1759498967.80749277 30 ctime=1759499031.827911838 nordugrid-arc-7.1.1/include/arc/data/FileCacheHash.h0000644000175000002070000000006615067751327023140 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/FileCacheHash.h" nordugrid-arc-7.1.1/include/arc/data/PaxHeaders/URLMap.h0000644000000000000000000000013115067751327017724 xustar0030 mtime=1759498967.636490171 29 atime=1759498967.80749277 30 ctime=1759499031.831015798 nordugrid-arc-7.1.1/include/arc/data/URLMap.h0000644000175000002070000000005715067751327021631 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/URLMap.h" nordugrid-arc-7.1.1/include/arc/data/PaxHeaders/DataHandle.h0000644000000000000000000000013115067751327020611 xustar0030 mtime=1759498967.636246722 29 atime=1759498967.80749277 30 ctime=1759499031.814254202 nordugrid-arc-7.1.1/include/arc/data/DataHandle.h0000644000175000002070000000006315067751327022513 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/DataHandle.h" nordugrid-arc-7.1.1/include/arc/data/PaxHeaders/DataPointDirect.h0000644000000000000000000000013115067751327021642 xustar0030 mtime=1759498967.636246722 29 atime=1759498967.80749277 30 ctime=1759499031.820408276 nordugrid-arc-7.1.1/include/arc/data/DataPointDirect.h0000644000175000002070000000007015067751327023542 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/DataPointDirect.h" nordugrid-arc-7.1.1/include/arc/data/PaxHeaders/DataPointIndex.h0000644000000000000000000000013115067751327021477 xustar0030 mtime=1759498967.636246722 29 atime=1759498967.80749277 30 ctime=1759499031.821920291 nordugrid-arc-7.1.1/include/arc/data/DataPointIndex.h0000644000175000002070000000006715067751327023405 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/DataPointIndex.h" nordugrid-arc-7.1.1/include/arc/data/PaxHeaders/FileCache.h0000644000000000000000000000013115067751327020427 xustar0030 mtime=1759498967.636246722 29 atime=1759498967.80749277 30 ctime=1759499031.826342545 nordugrid-arc-7.1.1/include/arc/data/FileCache.h0000644000175000002070000000006215067751327022330 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/FileCache.h" nordugrid-arc-7.1.1/include/arc/data/PaxHeaders/DataExternalComm.h0000644000000000000000000000013015067751327022013 xustar0030 mtime=1759498967.636246722 29 atime=1759498967.80749277 29 ctime=1759499031.81100433 nordugrid-arc-7.1.1/include/arc/data/DataExternalComm.h0000644000175000002070000000007115067751327023715 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/DataExternalComm.h" nordugrid-arc-7.1.1/include/arc/data/PaxHeaders/DataCallback.h0000644000000000000000000000013115067751327021112 xustar0030 mtime=1759498967.636246722 29 atime=1759498967.80749277 30 ctime=1759499031.809531134 nordugrid-arc-7.1.1/include/arc/data/DataCallback.h0000644000175000002070000000006515067751327023016 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/DataCallback.h" nordugrid-arc-7.1.1/include/arc/data/PaxHeaders/DataStatus.h0000644000000000000000000000013115067751327020701 xustar0030 mtime=1759498967.636246722 29 atime=1759498967.80749277 30 ctime=1759499031.824896608 nordugrid-arc-7.1.1/include/arc/data/DataStatus.h0000644000175000002070000000006315067751327022603 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/DataStatus.h" nordugrid-arc-7.1.1/include/arc/data/PaxHeaders/DataPoint.h0000644000000000000000000000013115067751327020507 xustar0030 mtime=1759498967.636246722 29 atime=1759498967.80749277 30 ctime=1759499031.817392308 nordugrid-arc-7.1.1/include/arc/data/DataPoint.h0000644000175000002070000000006215067751327022410 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/DataPoint.h" nordugrid-arc-7.1.1/include/arc/data/PaxHeaders/FileInfo.h0000644000000000000000000000013015067751327020316 xustar0030 mtime=1759498967.636490171 29 atime=1759498967.80749277 29 ctime=1759499031.82942034 nordugrid-arc-7.1.1/include/arc/data/FileInfo.h0000644000175000002070000000006115067751327022217 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/data/FileInfo.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/JobPerfLog.h0000644000000000000000000000013215067751327017705 xustar0030 mtime=1759498967.630671696 30 atime=1759498967.806492754 30 ctime=1759499031.714636296 nordugrid-arc-7.1.1/include/arc/JobPerfLog.h0000644000175000002070000000006215067751327021605 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/JobPerfLog.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/loader0000644000000000000000000000013015067751427016733 xustar0029 mtime=1759499031.84546583 30 atime=1759499034.761510139 29 ctime=1759499031.84546583 nordugrid-arc-7.1.1/include/arc/loader/0000755000175000002070000000000015067751427020714 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/loader/PaxHeaders/ModuleManager.h0000644000000000000000000000013115067751327021701 xustar0030 mtime=1759498967.637978549 29 atime=1759498967.80749277 30 ctime=1759499031.844881079 nordugrid-arc-7.1.1/include/arc/loader/ModuleManager.h0000644000175000002070000000007015067751327023601 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/loader/ModuleManager.h" nordugrid-arc-7.1.1/include/arc/loader/PaxHeaders/FinderLoader.h0000644000000000000000000000013115067751327021517 xustar0030 mtime=1759498967.637599126 29 atime=1759498967.80749277 30 ctime=1759499031.841829731 nordugrid-arc-7.1.1/include/arc/loader/FinderLoader.h0000644000175000002070000000006715067751327023425 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/loader/FinderLoader.h" nordugrid-arc-7.1.1/include/arc/loader/PaxHeaders/Loader.h0000644000000000000000000000013115067751327020367 xustar0030 mtime=1759498967.637978549 29 atime=1759498967.80749277 30 ctime=1759499031.843362951 nordugrid-arc-7.1.1/include/arc/loader/Loader.h0000644000175000002070000000006115067751327022267 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/loader/Loader.h" nordugrid-arc-7.1.1/include/arc/loader/PaxHeaders/Plugin.h0000644000000000000000000000013115067751327020417 xustar0030 mtime=1759498967.637978549 29 atime=1759498967.80749277 30 ctime=1759499031.846454356 nordugrid-arc-7.1.1/include/arc/loader/Plugin.h0000644000175000002070000000006115067751327022317 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/loader/Plugin.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/DateTime.h0000644000000000000000000000013215067751327017410 xustar0030 mtime=1759498967.630671696 30 atime=1759498967.806492754 30 ctime=1759499031.701504422 nordugrid-arc-7.1.1/include/arc/DateTime.h0000644000175000002070000000006015067751327021306 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/DateTime.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/data-staging0000644000000000000000000000013215067751427020032 xustar0030 mtime=1759499031.805465222 30 atime=1759499034.761510139 30 ctime=1759499031.805465222 nordugrid-arc-7.1.1/include/arc/data-staging/0000755000175000002070000000000015067751427022011 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/data-staging/PaxHeaders/DTRList.h0000644000000000000000000000013115067751327021543 xustar0030 mtime=1759498967.635627712 29 atime=1759498967.80749277 30 ctime=1759499031.795100706 nordugrid-arc-7.1.1/include/arc/data-staging/DTRList.h0000644000175000002070000000006415067751327023446 0ustar00mockbuildmock00000000000000#include "../../../src/libs/data-staging/DTRList.h" nordugrid-arc-7.1.1/include/arc/data-staging/PaxHeaders/Processor.h0000644000000000000000000000013115067751327022235 xustar0030 mtime=1759498967.635627712 29 atime=1759498967.80749277 30 ctime=1759499031.802641007 nordugrid-arc-7.1.1/include/arc/data-staging/Processor.h0000644000175000002070000000006615067751327024142 0ustar00mockbuildmock00000000000000#include "../../../src/libs/data-staging/Processor.h" nordugrid-arc-7.1.1/include/arc/data-staging/PaxHeaders/Generator.h0000644000000000000000000000013115067751327022204 xustar0030 mtime=1759498967.635627712 29 atime=1759498967.80749277 30 ctime=1759499031.801238676 nordugrid-arc-7.1.1/include/arc/data-staging/Generator.h0000644000175000002070000000006615067751327024111 0ustar00mockbuildmock00000000000000#include "../../../src/libs/data-staging/Generator.h" nordugrid-arc-7.1.1/include/arc/data-staging/PaxHeaders/DataDeliveryComm.h0000644000000000000000000000013115067751327023447 xustar0030 mtime=1759498967.635627712 29 atime=1759498967.80749277 30 ctime=1759499031.799787785 nordugrid-arc-7.1.1/include/arc/data-staging/DataDeliveryComm.h0000644000175000002070000000007515067751327025354 0ustar00mockbuildmock00000000000000#include "../../../src/libs/data-staging/DataDeliveryComm.h" nordugrid-arc-7.1.1/include/arc/data-staging/PaxHeaders/DataDelivery.h0000644000000000000000000000013115067751327022633 xustar0030 mtime=1759498967.635627712 29 atime=1759498967.80749277 30 ctime=1759499031.798372777 nordugrid-arc-7.1.1/include/arc/data-staging/DataDelivery.h0000644000175000002070000000007115067751327024534 0ustar00mockbuildmock00000000000000#include "../../../src/libs/data-staging/DataDelivery.h" nordugrid-arc-7.1.1/include/arc/data-staging/PaxHeaders/TransferShares.h0000644000000000000000000000013115067751327023210 xustar0030 mtime=1759498967.635627712 29 atime=1759498967.80749277 30 ctime=1759499031.806170516 nordugrid-arc-7.1.1/include/arc/data-staging/TransferShares.h0000644000175000002070000000007315067751327025113 0ustar00mockbuildmock00000000000000#include "../../../src/libs/data-staging/TransferShares.h" nordugrid-arc-7.1.1/include/arc/data-staging/PaxHeaders/DTRStatus.h0000644000000000000000000000012715067751327022120 xustar0030 mtime=1759498967.635627712 29 atime=1759498967.80749277 28 ctime=1759499031.7968279 nordugrid-arc-7.1.1/include/arc/data-staging/DTRStatus.h0000644000175000002070000000006615067751327024020 0ustar00mockbuildmock00000000000000#include "../../../src/libs/data-staging/DTRStatus.h" nordugrid-arc-7.1.1/include/arc/data-staging/PaxHeaders/DTR.h0000644000000000000000000000013115067751327020707 xustar0030 mtime=1759498967.635260514 29 atime=1759498967.80749277 30 ctime=1759499031.793564053 nordugrid-arc-7.1.1/include/arc/data-staging/DTR.h0000644000175000002070000000006015067751327022606 0ustar00mockbuildmock00000000000000#include "../../../src/libs/data-staging/DTR.h" nordugrid-arc-7.1.1/include/arc/data-staging/PaxHeaders/Scheduler.h0000644000000000000000000000013115067751327022174 xustar0030 mtime=1759498967.635627712 29 atime=1759498967.80749277 30 ctime=1759499031.804427579 nordugrid-arc-7.1.1/include/arc/data-staging/Scheduler.h0000644000175000002070000000006615067751327024101 0ustar00mockbuildmock00000000000000#include "../../../src/libs/data-staging/Scheduler.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/XMLNode.h0000644000000000000000000000013215067751327017162 xustar0030 mtime=1759498967.631490095 30 atime=1759498967.806492754 30 ctime=1759499031.730327307 nordugrid-arc-7.1.1/include/arc/XMLNode.h0000644000175000002070000000005715067751327021066 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/XMLNode.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/ArcVersion.h.in0000644000000000000000000000013215067751327020374 xustar0030 mtime=1759498967.630671696 30 atime=1759498967.806492754 30 ctime=1759499024.692123867 nordugrid-arc-7.1.1/include/arc/ArcVersion.h.in0000644000175000002070000000006215067751327022274 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/ArcVersion.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/User.h0000644000000000000000000000013215067751327016632 xustar0030 mtime=1759498967.631490095 30 atime=1759498967.806492754 30 ctime=1759499031.725099297 nordugrid-arc-7.1.1/include/arc/User.h0000644000175000002070000000005415067751327020533 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/User.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/communication0000644000000000000000000000013215067751427020334 xustar0030 mtime=1759499031.733464128 30 atime=1759499034.761510139 30 ctime=1759499031.733464128 nordugrid-arc-7.1.1/include/arc/communication/0000755000175000002070000000000015067751427022313 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/communication/PaxHeaders/ClientInterface.h0000644000000000000000000000013215067751327023620 xustar0030 mtime=1759498967.631490095 30 atime=1759498967.806492754 30 ctime=1759499031.731694688 nordugrid-arc-7.1.1/include/arc/communication/ClientInterface.h0000644000175000002070000000010115067751327025512 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/communication/ClientInterface.h" nordugrid-arc-7.1.1/include/arc/communication/PaxHeaders/ClientSAML2SSO.h0000644000000000000000000000013215067751327023123 xustar0030 mtime=1759498967.632772835 30 atime=1759498967.806492754 30 ctime=1759499031.733081942 nordugrid-arc-7.1.1/include/arc/communication/ClientSAML2SSO.h0000644000175000002070000000010015067751327025014 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/communication/ClientSAML2SSO.h" nordugrid-arc-7.1.1/include/arc/communication/PaxHeaders/ClientX509Delegation.h0000644000000000000000000000013015067751327024357 xustar0030 mtime=1759498967.632772835 30 atime=1759498967.806492754 28 ctime=1759499031.7342919 nordugrid-arc-7.1.1/include/arc/communication/ClientX509Delegation.h0000644000175000002070000000010615067751327026260 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/communication/ClientX509Delegation.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/credential0000644000000000000000000000013215067751427017601 xustar0030 mtime=1759499031.783498313 30 atime=1759499034.761510139 30 ctime=1759499031.783498313 nordugrid-arc-7.1.1/include/arc/credential/0000755000175000002070000000000015067751427021560 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/credential/PaxHeaders/Proxycertinfo.h0000644000000000000000000000013115067751327022700 xustar0030 mtime=1759498967.634685031 29 atime=1759498967.80749277 30 ctime=1759499031.781121163 nordugrid-arc-7.1.1/include/arc/credential/Proxycertinfo.h0000644000175000002070000000007415067751327024604 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/credential/Proxycertinfo.h" nordugrid-arc-7.1.1/include/arc/credential/PaxHeaders/VOMSAttribute.h0000644000000000000000000000013115067751327022475 xustar0030 mtime=1759498967.634685031 29 atime=1759498967.80749277 30 ctime=1759499031.782319088 nordugrid-arc-7.1.1/include/arc/credential/VOMSAttribute.h0000644000175000002070000000007415067751327024401 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/credential/VOMSAttribute.h" nordugrid-arc-7.1.1/include/arc/credential/PaxHeaders/PasswordSource.h0000644000000000000000000000013115067751327023010 xustar0030 mtime=1759498967.634685031 29 atime=1759498967.80749277 30 ctime=1759499031.779711801 nordugrid-arc-7.1.1/include/arc/credential/PasswordSource.h0000644000175000002070000000007615067751327024716 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/credential/PasswordSource.h" nordugrid-arc-7.1.1/include/arc/credential/PaxHeaders/NSSUtil.h0000644000000000000000000000013115067751327021326 xustar0030 mtime=1759498967.634685031 29 atime=1759498967.80749277 30 ctime=1759499031.778342085 nordugrid-arc-7.1.1/include/arc/credential/NSSUtil.h0000644000175000002070000000006615067751327023233 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/credential/NSSUtil.h" nordugrid-arc-7.1.1/include/arc/credential/PaxHeaders/VOMSConfig.h0000644000000000000000000000013115067751327021737 xustar0030 mtime=1759498967.634685031 29 atime=1759498967.80749277 30 ctime=1759499031.783498313 nordugrid-arc-7.1.1/include/arc/credential/VOMSConfig.h0000644000175000002070000000007115067751327023640 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/credential/VOMSConfig.h" nordugrid-arc-7.1.1/include/arc/credential/PaxHeaders/Credential.h0000644000000000000000000000013115067751327022077 xustar0030 mtime=1759498967.634685031 29 atime=1759498967.80749277 30 ctime=1759499031.777129497 nordugrid-arc-7.1.1/include/arc/credential/Credential.h0000644000175000002070000000007215067751327024001 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/credential/Credential.h" nordugrid-arc-7.1.1/include/arc/credential/PaxHeaders/VOMSUtil.h0000644000000000000000000000013115067751327021447 xustar0030 mtime=1759498967.634685031 29 atime=1759498967.80749277 30 ctime=1759499031.784897128 nordugrid-arc-7.1.1/include/arc/credential/VOMSUtil.h0000644000175000002070000000006715067751327023355 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/credential/VOMSUtil.h" nordugrid-arc-7.1.1/include/arc/credential/PaxHeaders/CertUtil.h0000644000000000000000000000013115067751327021560 xustar0030 mtime=1759498967.633490126 29 atime=1759498967.80749277 30 ctime=1759499031.775691531 nordugrid-arc-7.1.1/include/arc/credential/CertUtil.h0000644000175000002070000000006715067751327023466 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/credential/CertUtil.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/ws-security0000644000000000000000000000013215067751427017765 xustar0030 mtime=1759499031.932467152 30 atime=1759499034.761510139 30 ctime=1759499031.932467152 nordugrid-arc-7.1.1/include/arc/ws-security/0000755000175000002070000000000015067751427021744 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/ws-security/PaxHeaders/SAMLToken.h0000644000000000000000000000013115067751327021746 xustar0030 mtime=1759498967.641490247 30 atime=1759498967.808492785 29 ctime=1759499031.93031171 nordugrid-arc-7.1.1/include/arc/ws-security/SAMLToken.h0000644000175000002070000000007115067751327023647 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/ws-security/SAMLToken.h" nordugrid-arc-7.1.1/include/arc/ws-security/PaxHeaders/X509Token.h0000644000000000000000000000013215067751327021660 xustar0030 mtime=1759498967.642096901 30 atime=1759498967.808492785 30 ctime=1759499031.933588901 nordugrid-arc-7.1.1/include/arc/ws-security/X509Token.h0000644000175000002070000000007115067751327023560 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/ws-security/X509Token.h" nordugrid-arc-7.1.1/include/arc/ws-security/PaxHeaders/UsernameToken.h0000644000000000000000000000013215067751327022772 xustar0030 mtime=1759498967.642096901 30 atime=1759498967.808492785 30 ctime=1759499031.931467137 nordugrid-arc-7.1.1/include/arc/ws-security/UsernameToken.h0000644000175000002070000000007515067751327024676 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/ws-security/UsernameToken.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/ArcConfigFile.h0000644000000000000000000000013215067751327020347 xustar0030 mtime=1759498967.630671696 30 atime=1759498967.806492754 30 ctime=1759499031.691668826 nordugrid-arc-7.1.1/include/arc/ArcConfigFile.h0000644000175000002070000000006515067751327022252 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/ArcConfigFile.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/xmlsec0000644000000000000000000000013215067751427016762 xustar0030 mtime=1759499031.937467228 30 atime=1759499034.761510139 30 ctime=1759499031.937467228 nordugrid-arc-7.1.1/include/arc/xmlsec/0000755000175000002070000000000015067751427020741 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/xmlsec/PaxHeaders/XmlSecUtils.h0000644000000000000000000000013215067751327021423 xustar0030 mtime=1759498967.642338994 30 atime=1759498967.808492785 30 ctime=1759499031.936886576 nordugrid-arc-7.1.1/include/arc/xmlsec/XmlSecUtils.h0000644000175000002070000000006615067751327023327 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/xmlsec/XmlSecUtils.h" nordugrid-arc-7.1.1/include/arc/xmlsec/PaxHeaders/XMLSecNode.h0000644000000000000000000000013115067751327021107 xustar0030 mtime=1759498967.642096901 30 atime=1759498967.808492785 29 ctime=1759499031.93527605 nordugrid-arc-7.1.1/include/arc/xmlsec/XMLSecNode.h0000644000175000002070000000006515067751327023013 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/xmlsec/XMLSecNode.h" nordugrid-arc-7.1.1/include/arc/xmlsec/PaxHeaders/saml_util.h0000644000000000000000000000013215067751327021200 xustar0030 mtime=1759498967.642338994 30 atime=1759498967.808492785 30 ctime=1759499031.938497171 nordugrid-arc-7.1.1/include/arc/xmlsec/saml_util.h0000644000175000002070000000006415067751327023102 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/xmlsec/saml_util.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/ArcConfig.h0000644000000000000000000000013215067751327017547 xustar0030 mtime=1759498967.630465473 30 atime=1759498967.806492754 30 ctime=1759499031.690612952 nordugrid-arc-7.1.1/include/arc/ArcConfig.h0000644000175000002070000000006115067751327021446 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/ArcConfig.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/Profile.h0000644000000000000000000000013215067751327017314 xustar0030 mtime=1759498967.631490095 30 atime=1759498967.806492754 30 ctime=1759499031.718519093 nordugrid-arc-7.1.1/include/arc/Profile.h0000644000175000002070000000005715067751327021220 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/Profile.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/IniConfig.h0000644000000000000000000000013215067751327017561 xustar0030 mtime=1759498967.630671696 30 atime=1759498967.806492754 30 ctime=1759499031.710648274 nordugrid-arc-7.1.1/include/arc/IniConfig.h0000644000175000002070000000006115067751327021460 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/IniConfig.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/external0000644000000000000000000000013215067751427017311 xustar0030 mtime=1759499031.684463383 30 atime=1759499034.761510139 30 ctime=1759499031.684463383 nordugrid-arc-7.1.1/include/arc/external/0000755000175000002070000000000015067751427021270 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/external/PaxHeaders/cJSON0000644000000000000000000000013215067751427020225 xustar0030 mtime=1759499031.833465647 30 atime=1759499034.761510139 30 ctime=1759499031.833465647 nordugrid-arc-7.1.1/include/arc/external/cJSON/0000755000175000002070000000000015067751427022204 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/external/cJSON/PaxHeaders/cJSON.h0000644000000000000000000000013115067751327021365 xustar0030 mtime=1759498967.637360123 29 atime=1759498967.80749277 30 ctime=1759499031.834115689 nordugrid-arc-7.1.1/include/arc/external/cJSON/cJSON.h0000644000175000002070000000006215067751327023266 0ustar00mockbuildmock00000000000000#include "../../../../src/external/cJSON/cJSON.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/JSON.h0000644000000000000000000000013215067751327016465 xustar0030 mtime=1759498967.630671696 30 atime=1759498967.806492754 30 ctime=1759499031.713197619 nordugrid-arc-7.1.1/include/arc/JSON.h0000644000175000002070000000005415067751327020366 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/JSON.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/UserConfig.h0000644000000000000000000000013215067751327017760 xustar0030 mtime=1759498967.631490095 30 atime=1759498967.806492754 30 ctime=1759499031.726410068 nordugrid-arc-7.1.1/include/arc/UserConfig.h0000644000175000002070000000006215067751327021660 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/UserConfig.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/IString.h0000644000000000000000000000013215067751327017273 xustar0030 mtime=1759498967.630671696 30 atime=1759498967.806492754 30 ctime=1759499031.709456905 nordugrid-arc-7.1.1/include/arc/IString.h0000644000175000002070000000005715067751327021177 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/IString.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/Watchdog.h0000644000000000000000000000013115067751327017453 xustar0030 mtime=1759498967.631490095 30 atime=1759498967.806492754 29 ctime=1759499031.72912907 nordugrid-arc-7.1.1/include/arc/Watchdog.h0000644000175000002070000000006015067751327021352 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/Watchdog.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/infosys0000644000000000000000000000013215067751427017161 xustar0030 mtime=1759499031.839465739 30 atime=1759499034.761510139 30 ctime=1759499031.839465739 nordugrid-arc-7.1.1/include/arc/infosys/0000755000175000002070000000000015067751427021140 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/infosys/PaxHeaders/InformationInterface.h0000644000000000000000000000013115067751327023513 xustar0030 mtime=1759498967.637599126 29 atime=1759498967.80749277 30 ctime=1759499031.840282174 nordugrid-arc-7.1.1/include/arc/infosys/InformationInterface.h0000644000175000002070000000010015067751327025405 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/infosys/InformationInterface.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/Thread.h0000644000000000000000000000013215067751327017123 xustar0030 mtime=1759498967.631490095 30 atime=1759498967.806492754 30 ctime=1759499031.722408766 nordugrid-arc-7.1.1/include/arc/Thread.h0000644000175000002070000000005615067751327021026 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/Thread.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/Logger.h0000644000000000000000000000013215067751327017133 xustar0030 mtime=1759498967.630671696 30 atime=1759498967.806492754 30 ctime=1759499031.715463854 nordugrid-arc-7.1.1/include/arc/Logger.h0000644000175000002070000000005615067751327021036 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/Logger.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/credentialstore0000644000000000000000000000013215067751427020656 xustar0030 mtime=1759499031.788464964 30 atime=1759499034.761510139 30 ctime=1759499031.788464964 nordugrid-arc-7.1.1/include/arc/credentialstore/0000755000175000002070000000000015067751427022635 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/credentialstore/PaxHeaders/ClientVOMS.h0000644000000000000000000000013115067751327023025 xustar0030 mtime=1759498967.634685031 29 atime=1759498967.80749277 30 ctime=1759499031.786324807 nordugrid-arc-7.1.1/include/arc/credentialstore/ClientVOMS.h0000644000175000002070000000007615067751327024733 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/credentialstore/ClientVOMS.h" nordugrid-arc-7.1.1/include/arc/credentialstore/PaxHeaders/CredentialStore.h0000644000000000000000000000013115067751327024171 xustar0030 mtime=1759498967.635260514 29 atime=1759498967.80749277 30 ctime=1759499031.789797348 nordugrid-arc-7.1.1/include/arc/credentialstore/CredentialStore.h0000644000175000002070000000010415067751327026067 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/credentialstore/CredentialStore.h" nordugrid-arc-7.1.1/include/arc/credentialstore/PaxHeaders/ClientVOMSRESTful.h0000644000000000000000000000013115067751327024232 xustar0030 mtime=1759498967.635260514 29 atime=1759498967.80749277 30 ctime=1759499031.787762091 nordugrid-arc-7.1.1/include/arc/credentialstore/ClientVOMSRESTful.h0000644000175000002070000000010515067751327026131 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/credentialstore/ClientVOMSRESTful.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/security0000644000000000000000000000013215067751427017336 xustar0030 mtime=1759499031.926467061 30 atime=1759499034.761510139 30 ctime=1759499031.926467061 nordugrid-arc-7.1.1/include/arc/security/0000755000175000002070000000000015067751427021315 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/security/PaxHeaders/PDP.h0000644000000000000000000000013115067751327020205 xustar0030 mtime=1759498967.641276717 29 atime=1759498967.80749277 30 ctime=1759499031.925383846 nordugrid-arc-7.1.1/include/arc/security/PDP.h0000644000175000002070000000006015067751327022104 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/security/PDP.h" nordugrid-arc-7.1.1/include/arc/security/PaxHeaders/ArcPDP0000644000000000000000000000013215067751427020407 xustar0030 mtime=1759499031.885466437 30 atime=1759499034.761510139 30 ctime=1759499031.885466437 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/0000755000175000002070000000000015067751427022366 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/security/ArcPDP/PaxHeaders/alg0000644000000000000000000000013215067751427021152 xustar0030 mtime=1759499031.893466559 30 atime=1759499034.761510139 30 ctime=1759499031.893466559 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/alg/0000755000175000002070000000000015067751427023131 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/security/ArcPDP/alg/PaxHeaders/AlgFactory.h0000644000000000000000000000013115067751327023431 xustar0030 mtime=1759498967.639521996 29 atime=1759498967.80749277 30 ctime=1759499031.887672765 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/alg/AlgFactory.h0000644000175000002070000000011015067751327025324 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/alg/AlgFactory.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/alg/PaxHeaders/PermitOverridesAlg.h0000644000000000000000000000013115067751327025145 xustar0030 mtime=1759498967.640211163 29 atime=1759498967.80749277 30 ctime=1759499031.894513723 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/alg/PermitOverridesAlg.h0000644000175000002070000000012015067751327027041 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/alg/PermitOverridesAlg.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/alg/PaxHeaders/DenyOverridesAlg.h0000644000000000000000000000013115067751327024604 xustar0030 mtime=1759498967.640211163 29 atime=1759498967.80749277 30 ctime=1759499031.891085445 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/alg/DenyOverridesAlg.h0000644000175000002070000000011615067751327026505 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/alg/DenyOverridesAlg.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/alg/PaxHeaders/OrderedAlg.h0000644000000000000000000000013115067751327023406 xustar0030 mtime=1759498967.640211163 29 atime=1759498967.80749277 30 ctime=1759499031.892733561 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/alg/OrderedAlg.h0000644000175000002070000000011015067751327025301 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/alg/OrderedAlg.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/alg/PaxHeaders/CombiningAlg.h0000644000000000000000000000013115067751327023727 xustar0030 mtime=1759498967.640211163 29 atime=1759498967.80749277 30 ctime=1759499031.889174777 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/alg/CombiningAlg.h0000644000175000002070000000011215067751327025624 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/alg/CombiningAlg.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/PaxHeaders/EvaluationCtx.h0000644000000000000000000000013115067751327023421 xustar0030 mtime=1759498967.639406008 29 atime=1759498967.80749277 30 ctime=1759499031.874043386 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/EvaluationCtx.h0000644000175000002070000000010415067751327025317 0ustar00mockbuildmock00000000000000#include "../../../../src/hed/libs/security/ArcPDP/EvaluationCtx.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/PaxHeaders/RequestItem.h0000644000000000000000000000013115067751327023102 xustar0030 mtime=1759498967.639521996 29 atime=1759498967.80749277 30 ctime=1759499031.882033564 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/RequestItem.h0000644000175000002070000000010215067751327024776 0ustar00mockbuildmock00000000000000#include "../../../../src/hed/libs/security/ArcPDP/RequestItem.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/PaxHeaders/EvaluatorLoader.h0000644000000000000000000000013115067751327023724 xustar0030 mtime=1759498967.639521996 29 atime=1759498967.80749277 30 ctime=1759499031.876702281 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/EvaluatorLoader.h0000644000175000002070000000010615067751327025624 0ustar00mockbuildmock00000000000000#include "../../../../src/hed/libs/security/ArcPDP/EvaluatorLoader.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/PaxHeaders/Evaluator.h0000644000000000000000000000013115067751327022575 xustar0030 mtime=1759498967.639521996 29 atime=1759498967.80749277 30 ctime=1759499031.875285796 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/Evaluator.h0000644000175000002070000000010015067751327024467 0ustar00mockbuildmock00000000000000#include "../../../../src/hed/libs/security/ArcPDP/Evaluator.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/PaxHeaders/Source.h0000644000000000000000000000013115067751327022073 xustar0030 mtime=1759498967.639521996 29 atime=1759498967.80749277 30 ctime=1759499031.886079217 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/Source.h0000644000175000002070000000007515067751327024000 0ustar00mockbuildmock00000000000000#include "../../../../src/hed/libs/security/ArcPDP/Source.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/PaxHeaders/fn0000644000000000000000000000013215067751427021012 xustar0030 mtime=1759499031.919466954 30 atime=1759499034.761510139 30 ctime=1759499031.919466954 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/fn/0000755000175000002070000000000015067751427022771 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/security/ArcPDP/fn/PaxHeaders/FnFactory.h0000644000000000000000000000013115067751327023131 xustar0030 mtime=1759498967.641276717 29 atime=1759498967.80749277 30 ctime=1759499031.914466878 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/fn/FnFactory.h0000644000175000002070000000010615067751327025031 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/fn/FnFactory.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/fn/PaxHeaders/EqualFunction.h0000644000000000000000000000013015067751327024012 xustar0030 mtime=1759498967.640591586 29 atime=1759498967.80749277 29 ctime=1759499031.91325005 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/fn/EqualFunction.h0000644000175000002070000000011215067751327025710 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/fn/EqualFunction.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/fn/PaxHeaders/MatchFunction.h0000644000000000000000000000013115067751327024000 xustar0030 mtime=1759498967.641276717 29 atime=1759498967.80749277 30 ctime=1759499031.920387926 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/fn/MatchFunction.h0000644000175000002070000000011215067751327025675 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/fn/MatchFunction.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/fn/PaxHeaders/Function.h0000644000000000000000000000013115067751327023023 xustar0030 mtime=1759498967.641276717 29 atime=1759498967.80749277 30 ctime=1759499031.916623352 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/fn/Function.h0000644000175000002070000000010515067751327024722 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/fn/Function.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/fn/PaxHeaders/InRangeFunction.h0000644000000000000000000000013115067751327024267 xustar0030 mtime=1759498967.641276717 29 atime=1759498967.80749277 30 ctime=1759499031.918520043 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/fn/InRangeFunction.h0000644000175000002070000000011415067751327026166 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/fn/InRangeFunction.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/PaxHeaders/Request.h0000644000000000000000000000013015067751327022262 xustar0030 mtime=1759498967.639521996 29 atime=1759498967.80749277 29 ctime=1759499031.88065188 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/Request.h0000644000175000002070000000007615067751327024171 0ustar00mockbuildmock00000000000000#include "../../../../src/hed/libs/security/ArcPDP/Request.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/PaxHeaders/Result.h0000644000000000000000000000013115067751327022111 xustar0030 mtime=1759498967.639521996 29 atime=1759498967.80749277 30 ctime=1759499031.884679713 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/Result.h0000644000175000002070000000007515067751327024016 0ustar00mockbuildmock00000000000000#include "../../../../src/hed/libs/security/ArcPDP/Result.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/PaxHeaders/Response.h0000644000000000000000000000013115067751327022431 xustar0030 mtime=1759498967.639521996 29 atime=1759498967.80749277 30 ctime=1759499031.883278164 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/Response.h0000644000175000002070000000007715067751327024340 0ustar00mockbuildmock00000000000000#include "../../../../src/hed/libs/security/ArcPDP/Response.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/PaxHeaders/policy0000644000000000000000000000013215067751427021706 xustar0030 mtime=1759499031.920466969 30 atime=1759499034.762510154 30 ctime=1759499031.920466969 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/policy/0000755000175000002070000000000015067751427023665 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/security/ArcPDP/policy/PaxHeaders/Policy.h0000644000000000000000000000013115067751327023371 xustar0030 mtime=1759498967.641276717 29 atime=1759498967.80749277 30 ctime=1759499031.921466985 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/policy/Policy.h0000644000175000002070000000010715067751327025272 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/policy/Policy.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/PaxHeaders/PolicyParser.h0000644000000000000000000000013115067751327023247 xustar0030 mtime=1759498967.639521996 29 atime=1759498967.80749277 30 ctime=1759499031.877939325 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/PolicyParser.h0000644000175000002070000000010315067751327025144 0ustar00mockbuildmock00000000000000#include "../../../../src/hed/libs/security/ArcPDP/PolicyParser.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/PaxHeaders/PolicyStore.h0000644000000000000000000000013115067751327023107 xustar0030 mtime=1759498967.639521996 29 atime=1759498967.80749277 30 ctime=1759499031.879414247 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/PolicyStore.h0000644000175000002070000000010215067751327025003 0ustar00mockbuildmock00000000000000#include "../../../../src/hed/libs/security/ArcPDP/PolicyStore.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/PaxHeaders/attr0000644000000000000000000000013215067751427021361 xustar0030 mtime=1759499031.910466817 30 atime=1759499034.762510154 30 ctime=1759499031.910466817 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/attr/0000755000175000002070000000000015067751427023340 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/security/ArcPDP/attr/PaxHeaders/AttributeFactory.h0000644000000000000000000000013115067751327025100 xustar0030 mtime=1759498967.640591586 29 atime=1759498967.80749277 30 ctime=1759499031.897745989 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/attr/AttributeFactory.h0000644000175000002070000000011715067751327027002 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/attr/AttributeFactory.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/attr/PaxHeaders/AnyURIAttribute.h0000644000000000000000000000013115067751327024600 xustar0030 mtime=1759498967.640211163 29 atime=1759498967.80749277 30 ctime=1759499031.896147522 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/attr/AnyURIAttribute.h0000644000175000002070000000011615067751327026501 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/attr/AnyURIAttribute.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/attr/PaxHeaders/RequestAttribute.h0000644000000000000000000000013115067751327025121 xustar0030 mtime=1759498967.640591586 29 atime=1759498967.80749277 30 ctime=1759499031.908106081 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/attr/RequestAttribute.h0000644000175000002070000000011715067751327027023 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/attr/RequestAttribute.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/attr/PaxHeaders/DateTimeAttribute.h0000644000000000000000000000013115067751327025165 xustar0030 mtime=1759498967.640591586 29 atime=1759498967.80749277 30 ctime=1759499031.904571189 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/attr/DateTimeAttribute.h0000644000175000002070000000012015067751327027061 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/attr/DateTimeAttribute.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/attr/PaxHeaders/StringAttribute.h0000644000000000000000000000013115067751327024737 xustar0030 mtime=1759498967.640591586 29 atime=1759498967.80749277 30 ctime=1759499031.909721173 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/attr/StringAttribute.h0000644000175000002070000000011615067751327026640 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/attr/StringAttribute.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/attr/PaxHeaders/GenericAttribute.h0000644000000000000000000000013115067751327025045 xustar0030 mtime=1759498967.640591586 29 atime=1759498967.80749277 30 ctime=1759499031.906392344 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/attr/GenericAttribute.h0000644000175000002070000000011715067751327026747 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/attr/GenericAttribute.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/attr/PaxHeaders/AttributeProxy.h0000644000000000000000000000013115067751327024612 xustar0030 mtime=1759498967.640591586 29 atime=1759498967.80749277 30 ctime=1759499031.899543292 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/attr/AttributeProxy.h0000644000175000002070000000011515067751327026512 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/attr/AttributeProxy.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/attr/PaxHeaders/X500NameAttribute.h0000644000000000000000000000013115067751327024726 xustar0030 mtime=1759498967.640591586 29 atime=1759498967.80749277 30 ctime=1759499031.911519508 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/attr/X500NameAttribute.h0000644000175000002070000000012015067751327026622 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/attr/X500NameAttribute.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/attr/PaxHeaders/AttributeValue.h0000644000000000000000000000013115067751327024545 xustar0030 mtime=1759498967.640591586 29 atime=1759498967.80749277 30 ctime=1759499031.901221795 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/attr/AttributeValue.h0000644000175000002070000000011515067751327026445 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/attr/AttributeValue.h" nordugrid-arc-7.1.1/include/arc/security/ArcPDP/attr/PaxHeaders/BooleanAttribute.h0000644000000000000000000000013115067751327025050 xustar0030 mtime=1759498967.640591586 29 atime=1759498967.80749277 30 ctime=1759499031.902784487 nordugrid-arc-7.1.1/include/arc/security/ArcPDP/attr/BooleanAttribute.h0000644000175000002070000000011715067751327026752 0ustar00mockbuildmock00000000000000#include "../../../../../src/hed/libs/security/ArcPDP/attr/BooleanAttribute.h" nordugrid-arc-7.1.1/include/arc/security/PaxHeaders/ClassLoader.h0000644000000000000000000000013115067751327021756 xustar0030 mtime=1759498967.641276717 29 atime=1759498967.80749277 30 ctime=1759499031.923768496 nordugrid-arc-7.1.1/include/arc/security/ClassLoader.h0000644000175000002070000000007015067751327023656 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/security/ClassLoader.h" nordugrid-arc-7.1.1/include/arc/security/PaxHeaders/Security.h0000644000000000000000000000013015067751327021370 xustar0030 mtime=1759498967.641276717 29 atime=1759498967.80749277 29 ctime=1759499031.92704412 nordugrid-arc-7.1.1/include/arc/security/Security.h0000644000175000002070000000006515067751327023275 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/security/Security.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/GUID.h0000644000000000000000000000013215067751327016444 xustar0030 mtime=1759498967.630671696 30 atime=1759498967.806492754 30 ctime=1759499031.706891892 nordugrid-arc-7.1.1/include/arc/GUID.h0000644000175000002070000000005415067751327020345 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/GUID.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/FileLock.h0000644000000000000000000000013215067751327017404 xustar0030 mtime=1759498967.630671696 30 atime=1759498967.806492754 30 ctime=1759499031.704098164 nordugrid-arc-7.1.1/include/arc/FileLock.h0000644000175000002070000000006015067751327021302 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/FileLock.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/DBInterface.h0000644000000000000000000000013215067751327020022 xustar0030 mtime=1759498967.630671696 30 atime=1759498967.806492754 30 ctime=1759499031.700285717 nordugrid-arc-7.1.1/include/arc/DBInterface.h0000644000175000002070000000006315067751327021723 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/DBInterface.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/Run.h0000644000000000000000000000013215067751327016460 xustar0030 mtime=1759498967.631490095 30 atime=1759498967.806492754 30 ctime=1759499031.719768263 nordugrid-arc-7.1.1/include/arc/Run.h0000644000175000002070000000005315067751327020360 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/Run.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/Base64.h0000644000000000000000000000013215067751327016740 xustar0030 mtime=1759498967.630671696 30 atime=1759498967.806492754 30 ctime=1759499031.696489871 nordugrid-arc-7.1.1/include/arc/Base64.h0000644000175000002070000000005615067751327020643 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/Base64.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/OptionParser.h0000644000000000000000000000013215067751327020341 xustar0030 mtime=1759498967.631490095 30 atime=1759498967.806492754 30 ctime=1759499031.717289503 nordugrid-arc-7.1.1/include/arc/OptionParser.h0000644000175000002070000000006415067751327022243 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/OptionParser.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/ArcRegex.h0000644000000000000000000000013215067751327017414 xustar0030 mtime=1759498967.630671696 30 atime=1759498967.806492754 30 ctime=1759499031.695171105 nordugrid-arc-7.1.1/include/arc/ArcRegex.h0000644000175000002070000000006015067751327021312 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/ArcRegex.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/FileUtils.h0000644000000000000000000000013215067751327017614 xustar0030 mtime=1759498967.630671696 30 atime=1759498967.806492754 30 ctime=1759499031.705572229 nordugrid-arc-7.1.1/include/arc/FileUtils.h0000644000175000002070000000006115067751327021513 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/FileUtils.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/crypto0000644000000000000000000000013215067751427017007 xustar0030 mtime=1759499031.790464994 30 atime=1759499034.762510154 30 ctime=1759499031.790464994 nordugrid-arc-7.1.1/include/arc/crypto/0000755000175000002070000000000015067751427020766 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/crypto/PaxHeaders/OpenSSL.h0000644000000000000000000000013015067751327020515 xustar0030 mtime=1759498967.635260514 29 atime=1759498967.80749277 29 ctime=1759499031.79173186 nordugrid-arc-7.1.1/include/arc/crypto/OpenSSL.h0000644000175000002070000000006215067751327022417 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/crypto/OpenSSL.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/HostnameResolver.h0000644000000000000000000000013215067751327021214 xustar0030 mtime=1759498967.630671696 30 atime=1759498967.806492754 30 ctime=1759499031.708183079 nordugrid-arc-7.1.1/include/arc/HostnameResolver.h0000644000175000002070000000007015067751327023113 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/HostnameResolver.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/IntraProcessCounter.h0000644000000000000000000000013115067751327021667 xustar0030 mtime=1759498967.630671696 30 atime=1759498967.806492754 29 ctime=1759499031.71191124 nordugrid-arc-7.1.1/include/arc/IntraProcessCounter.h0000644000175000002070000000007315067751327023572 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/IntraProcessCounter.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/FileAccess.h0000644000000000000000000000013215067751327017715 xustar0030 mtime=1759498967.630671696 30 atime=1759498967.806492754 30 ctime=1759499031.702583468 nordugrid-arc-7.1.1/include/arc/FileAccess.h0000644000175000002070000000006215067751327021615 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/FileAccess.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/CheckSum.h0000644000000000000000000000013215067751327017416 xustar0030 mtime=1759498967.630671696 30 atime=1759498967.806492754 30 ctime=1759499031.697596592 nordugrid-arc-7.1.1/include/arc/CheckSum.h0000644000175000002070000000006015067751327021314 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/CheckSum.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/Utils.h0000644000000000000000000000013215067751327017014 xustar0030 mtime=1759498967.631490095 30 atime=1759498967.806492754 30 ctime=1759499031.727841869 nordugrid-arc-7.1.1/include/arc/Utils.h0000644000175000002070000000005515067751327020716 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/Utils.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/globusutils0000644000000000000000000000013215067751427020043 xustar0030 mtime=1759499031.837465708 30 atime=1759499034.762510154 30 ctime=1759499031.837465708 nordugrid-arc-7.1.1/include/arc/globusutils/0000755000175000002070000000000015067751427022022 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/globusutils/PaxHeaders/GSSCredential.h0000644000000000000000000000013115067751327022716 xustar0030 mtime=1759498967.637360123 29 atime=1759498967.80749277 30 ctime=1759499031.835624574 nordugrid-arc-7.1.1/include/arc/globusutils/GSSCredential.h0000644000175000002070000000007515067751327024623 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/globusutils/GSSCredential.h" nordugrid-arc-7.1.1/include/arc/globusutils/PaxHeaders/GlobusErrorUtils.h0000644000000000000000000000013115067751327023555 xustar0030 mtime=1759498967.637599126 29 atime=1759498967.80749277 30 ctime=1759499031.837177724 nordugrid-arc-7.1.1/include/arc/globusutils/GlobusErrorUtils.h0000644000175000002070000000010015067751327025447 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/globusutils/GlobusErrorUtils.h" nordugrid-arc-7.1.1/include/arc/globusutils/PaxHeaders/GlobusWorkarounds.h0000644000000000000000000000013115067751327023761 xustar0030 mtime=1759498967.637599126 29 atime=1759498967.80749277 30 ctime=1759499031.838782174 nordugrid-arc-7.1.1/include/arc/globusutils/GlobusWorkarounds.h0000644000175000002070000000010115067751327025654 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/globusutils/GlobusWorkarounds.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/message0000644000000000000000000000013215067751427017113 xustar0030 mtime=1759499031.868466179 30 atime=1759499034.762510154 30 ctime=1759499031.868466179 nordugrid-arc-7.1.1/include/arc/message/0000755000175000002070000000000015067751427021072 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/message/PaxHeaders/MCC_Status.h0000644000000000000000000000013115067751327021304 xustar0030 mtime=1759498967.638286539 29 atime=1759498967.80749277 30 ctime=1759499031.851073028 nordugrid-arc-7.1.1/include/arc/message/MCC_Status.h0000644000175000002070000000006615067751327023211 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/MCC_Status.h" nordugrid-arc-7.1.1/include/arc/message/PaxHeaders/PayloadStream.h0000644000000000000000000000013115067751327022104 xustar0030 mtime=1759498967.638286539 29 atime=1759498967.80749277 30 ctime=1759499031.860403087 nordugrid-arc-7.1.1/include/arc/message/PayloadStream.h0000644000175000002070000000007115067751327024005 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/PayloadStream.h" nordugrid-arc-7.1.1/include/arc/message/PaxHeaders/PayloadSOAP.h0000644000000000000000000000013115067751327021413 xustar0030 mtime=1759498967.638286539 29 atime=1759498967.80749277 30 ctime=1759499031.858830958 nordugrid-arc-7.1.1/include/arc/message/PayloadSOAP.h0000644000175000002070000000006715067751327023321 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/PayloadSOAP.h" nordugrid-arc-7.1.1/include/arc/message/PaxHeaders/SecHandler.h0000644000000000000000000000013115067751327021347 xustar0030 mtime=1759498967.638490202 29 atime=1759498967.80749277 30 ctime=1759499031.868066326 nordugrid-arc-7.1.1/include/arc/message/SecHandler.h0000644000175000002070000000006615067751327023254 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/SecHandler.h" nordugrid-arc-7.1.1/include/arc/message/PaxHeaders/Plexer.h0000644000000000000000000000013115067751327020576 xustar0030 mtime=1759498967.638286539 29 atime=1759498967.80749277 30 ctime=1759499031.861908329 nordugrid-arc-7.1.1/include/arc/message/Plexer.h0000644000175000002070000000006215067751327022477 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/Plexer.h" nordugrid-arc-7.1.1/include/arc/message/PaxHeaders/MessageAuth.h0000644000000000000000000000013115067751327021545 xustar0030 mtime=1759498967.638286539 29 atime=1759498967.80749277 30 ctime=1759499031.855841949 nordugrid-arc-7.1.1/include/arc/message/MessageAuth.h0000644000175000002070000000006715067751327023453 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/MessageAuth.h" nordugrid-arc-7.1.1/include/arc/message/PaxHeaders/SecAttr.h0000644000000000000000000000013115067751327020704 xustar0030 mtime=1759498967.638490202 29 atime=1759498967.80749277 30 ctime=1759499031.866609723 nordugrid-arc-7.1.1/include/arc/message/SecAttr.h0000644000175000002070000000006315067751327022606 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/SecAttr.h" nordugrid-arc-7.1.1/include/arc/message/PaxHeaders/Message.h0000644000000000000000000000013115067751327020723 xustar0030 mtime=1759498967.638286539 29 atime=1759498967.80749277 30 ctime=1759499031.852595194 nordugrid-arc-7.1.1/include/arc/message/Message.h0000644000175000002070000000006315067751327022625 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/Message.h" nordugrid-arc-7.1.1/include/arc/message/PaxHeaders/MCCLoader.h0000644000000000000000000000013115067751327021070 xustar0030 mtime=1759498967.638286539 29 atime=1759498967.80749277 30 ctime=1759499031.849498125 nordugrid-arc-7.1.1/include/arc/message/MCCLoader.h0000644000175000002070000000006515067751327022774 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/MCCLoader.h" nordugrid-arc-7.1.1/include/arc/message/PaxHeaders/SOAPMessage.h0000644000000000000000000000013015067751327021405 xustar0030 mtime=1759498967.638286539 29 atime=1759498967.80749277 29 ctime=1759499031.86518232 nordugrid-arc-7.1.1/include/arc/message/SOAPMessage.h0000644000175000002070000000006715067751327023314 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/SOAPMessage.h" nordugrid-arc-7.1.1/include/arc/message/PaxHeaders/MessageAttributes.h0000644000000000000000000000013115067751327022772 xustar0030 mtime=1759498967.638286539 29 atime=1759498967.80749277 30 ctime=1759499031.854151654 nordugrid-arc-7.1.1/include/arc/message/MessageAttributes.h0000644000175000002070000000007515067751327024677 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/MessageAttributes.h" nordugrid-arc-7.1.1/include/arc/message/PaxHeaders/MCC.h0000644000000000000000000000013115067751327017741 xustar0030 mtime=1759498967.637978549 29 atime=1759498967.80749277 30 ctime=1759499031.847876187 nordugrid-arc-7.1.1/include/arc/message/MCC.h0000644000175000002070000000005715067751327021646 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/MCC.h" nordugrid-arc-7.1.1/include/arc/message/PaxHeaders/PayloadRaw.h0000644000000000000000000000013115067751327021402 xustar0030 mtime=1759498967.638286539 29 atime=1759498967.80749277 30 ctime=1759499031.857348475 nordugrid-arc-7.1.1/include/arc/message/PayloadRaw.h0000644000175000002070000000006615067751327023307 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/PayloadRaw.h" nordugrid-arc-7.1.1/include/arc/message/PaxHeaders/SOAPEnvelope.h0000644000000000000000000000013115067751327021577 xustar0030 mtime=1759498967.638286539 29 atime=1759498967.80749277 30 ctime=1759499031.863388991 nordugrid-arc-7.1.1/include/arc/message/SOAPEnvelope.h0000644000175000002070000000007015067751327023477 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/SOAPEnvelope.h" nordugrid-arc-7.1.1/include/arc/message/PaxHeaders/Service.h0000644000000000000000000000013115067751327020737 xustar0030 mtime=1759498967.638490202 29 atime=1759498967.80749277 30 ctime=1759499031.869563436 nordugrid-arc-7.1.1/include/arc/message/Service.h0000644000175000002070000000006315067751327022641 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/message/Service.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/delegation0000644000000000000000000000013215067751427017602 xustar0030 mtime=1759499031.831465617 30 atime=1759499034.762510154 30 ctime=1759499031.831465617 nordugrid-arc-7.1.1/include/arc/delegation/0000755000175000002070000000000015067751427021561 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/delegation/PaxHeaders/DelegationInterface.h0000644000000000000000000000013115067751327023722 xustar0030 mtime=1759498967.636490171 29 atime=1759498967.80749277 30 ctime=1759499031.832557138 nordugrid-arc-7.1.1/include/arc/delegation/DelegationInterface.h0000644000175000002070000000010215067751327025616 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/delegation/DelegationInterface.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/ws-addressing0000644000000000000000000000013215067751427020241 xustar0030 mtime=1759499031.927467076 30 atime=1759499034.762510154 30 ctime=1759499031.927467076 nordugrid-arc-7.1.1/include/arc/ws-addressing/0000755000175000002070000000000015067751427022220 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/ws-addressing/PaxHeaders/WSA.h0000644000000000000000000000013215067751327021120 xustar0030 mtime=1759498967.641276717 30 atime=1759498967.808492785 30 ctime=1759499031.928667937 nordugrid-arc-7.1.1/include/arc/ws-addressing/WSA.h0000644000175000002070000000006515067751327023023 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/ws-addressing/WSA.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/URL.h0000644000000000000000000000013215067751327016356 xustar0030 mtime=1759498967.631490095 30 atime=1759498967.806492754 30 ctime=1759499031.723711109 nordugrid-arc-7.1.1/include/arc/URL.h0000644000175000002070000000005315067751327020256 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/URL.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/otokens0000644000000000000000000000013215067751427017151 xustar0030 mtime=1759499031.871466225 30 atime=1759499034.762510154 30 ctime=1759499031.871466225 nordugrid-arc-7.1.1/include/arc/otokens/0000755000175000002070000000000015067751427021130 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/otokens/PaxHeaders/otokens.h0000644000000000000000000000013115067751327021057 xustar0030 mtime=1759498967.639266635 29 atime=1759498967.80749277 30 ctime=1759499031.872573672 nordugrid-arc-7.1.1/include/arc/otokens/otokens.h0000644000175000002070000000006315067751327022761 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/otokens/otokens.h" nordugrid-arc-7.1.1/include/arc/otokens/PaxHeaders/openid_metadata.h0000644000000000000000000000013115067751327022513 xustar0030 mtime=1759498967.638490202 29 atime=1759498967.80749277 30 ctime=1759499031.871019177 nordugrid-arc-7.1.1/include/arc/otokens/openid_metadata.h0000644000175000002070000000007315067751327024416 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/otokens/openid_metadata.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/compute0000644000000000000000000000013215067751427017143 xustar0030 mtime=1759499031.773464736 30 atime=1759499034.762510154 30 ctime=1759499031.773464736 nordugrid-arc-7.1.1/include/arc/compute/0000755000175000002070000000000015067751427021122 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/SubmissionStatus.h0000644000000000000000000000013215067751327022727 xustar0030 mtime=1759498967.633490126 30 atime=1759498967.806492754 30 ctime=1759499031.769078818 nordugrid-arc-7.1.1/include/arc/compute/SubmissionStatus.h0000644000175000002070000000007415067751327024632 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/SubmissionStatus.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/JobControllerPlugin.h0000644000000000000000000000013215067751327023325 xustar0030 mtime=1759498967.633035048 30 atime=1759498967.806492754 30 ctime=1759499031.753442884 nordugrid-arc-7.1.1/include/arc/compute/JobControllerPlugin.h0000644000175000002070000000007715067751327025233 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/JobControllerPlugin.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/RSLParser.h0000644000000000000000000000013215067751327021205 xustar0030 mtime=1759498967.633490126 30 atime=1759498967.806492754 30 ctime=1759499031.766252709 nordugrid-arc-7.1.1/include/arc/compute/RSLParser.h0000644000175000002070000000006515067751327023110 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/RSLParser.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/GLUE2.h0000644000000000000000000000013215067751327020206 xustar0030 mtime=1759498967.633035048 30 atime=1759498967.806492754 30 ctime=1759499031.748835871 nordugrid-arc-7.1.1/include/arc/compute/GLUE2.h0000644000175000002070000000006115067751327022105 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/GLUE2.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/JobDescriptionParserPlugin.h0000644000000000000000000000013215067751327024642 xustar0030 mtime=1759498967.633035048 30 atime=1759498967.806492754 30 ctime=1759499031.756553218 nordugrid-arc-7.1.1/include/arc/compute/JobDescriptionParserPlugin.h0000644000175000002070000000010615067751327026541 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/JobDescriptionParserPlugin.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/JobInformationStorage.h0000644000000000000000000000013215067751327023635 xustar0030 mtime=1759498967.633035048 30 atime=1759498967.806492754 30 ctime=1759499031.758121469 nordugrid-arc-7.1.1/include/arc/compute/JobInformationStorage.h0000644000175000002070000000010115067751327025527 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/JobInformationStorage.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/GLUE2Entity.h0000644000000000000000000000013215067751327021403 xustar0030 mtime=1759498967.633035048 30 atime=1759498967.806492754 30 ctime=1759499031.750383571 nordugrid-arc-7.1.1/include/arc/compute/GLUE2Entity.h0000644000175000002070000000006715067751327023310 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/GLUE2Entity.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/JobDescription.h0000644000000000000000000000013215067751327022306 xustar0030 mtime=1759498967.633035048 30 atime=1759498967.806492754 30 ctime=1759499031.754464447 nordugrid-arc-7.1.1/include/arc/compute/JobDescription.h0000644000175000002070000000007215067751327024207 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/JobDescription.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/ComputingServiceRetriever.h0000644000000000000000000000013215067751327024546 xustar0030 mtime=1759498967.633035048 30 atime=1759498967.806492754 30 ctime=1759499031.739120477 nordugrid-arc-7.1.1/include/arc/compute/ComputingServiceRetriever.h0000644000175000002070000000010515067751327026444 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/ComputingServiceRetriever.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/EndpointQueryingStatus.h0000644000000000000000000000013215067751327024100 xustar0030 mtime=1759498967.633035048 30 atime=1759498967.806492754 30 ctime=1759499031.742362297 nordugrid-arc-7.1.1/include/arc/compute/EndpointQueryingStatus.h0000644000175000002070000000010215067751327025773 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/EndpointQueryingStatus.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/Broker.h0000644000000000000000000000013215067751327020614 xustar0030 mtime=1759498967.632772835 30 atime=1759498967.806492754 30 ctime=1759499031.735844079 nordugrid-arc-7.1.1/include/arc/compute/Broker.h0000644000175000002070000000006215067751327022514 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/Broker.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/SubmitterPlugin.h0000644000000000000000000000013215067751327022525 xustar0030 mtime=1759498967.633490126 30 atime=1759498967.806492754 30 ctime=1759499031.771713974 nordugrid-arc-7.1.1/include/arc/compute/SubmitterPlugin.h0000644000175000002070000000007315067751327024427 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/SubmitterPlugin.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/JobInformationStorageXML.h0000644000000000000000000000013215067751327024216 xustar0030 mtime=1759498967.633490126 30 atime=1759498967.806492754 30 ctime=1759499031.761160194 nordugrid-arc-7.1.1/include/arc/compute/JobInformationStorageXML.h0000644000175000002070000000010415067751327026113 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/JobInformationStorageXML.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/ExecutionTarget.h0000644000000000000000000000013215067751327022502 xustar0030 mtime=1759498967.633035048 30 atime=1759498967.806492754 30 ctime=1759499031.747350806 nordugrid-arc-7.1.1/include/arc/compute/ExecutionTarget.h0000644000175000002070000000007315067751327024404 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/ExecutionTarget.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/Job.h0000644000000000000000000000013215067751327020102 xustar0030 mtime=1759498967.633035048 30 atime=1759498967.806492754 30 ctime=1759499031.751853437 nordugrid-arc-7.1.1/include/arc/compute/Job.h0000644000175000002070000000005715067751327022006 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/Job.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/JobSupervisor.h0000644000000000000000000000013115067751327022203 xustar0030 mtime=1759498967.633490126 30 atime=1759498967.806492754 29 ctime=1759499031.76437453 nordugrid-arc-7.1.1/include/arc/compute/JobSupervisor.h0000644000175000002070000000007115067751327024104 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/JobSupervisor.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/JobInformationStorageSQLite.h0000644000000000000000000000013215067751327024717 xustar0030 mtime=1759498967.633035048 30 atime=1759498967.806492754 30 ctime=1759499031.759624175 nordugrid-arc-7.1.1/include/arc/compute/JobInformationStorageSQLite.h0000644000175000002070000000010715067751327026617 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/JobInformationStorageSQLite.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/Submitter.h0000644000000000000000000000013215067751327021346 xustar0030 mtime=1759498967.633490126 30 atime=1759498967.806492754 30 ctime=1759499031.770493631 nordugrid-arc-7.1.1/include/arc/compute/Submitter.h0000644000175000002070000000006515067751327023251 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/Submitter.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/EntityRetriever.h0000644000000000000000000000013215067751327022534 xustar0030 mtime=1759498967.633035048 30 atime=1759498967.806492754 30 ctime=1759499031.744098877 nordugrid-arc-7.1.1/include/arc/compute/EntityRetriever.h0000644000175000002070000000007315067751327024436 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/EntityRetriever.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/JobState.h0000644000000000000000000000013215067751327021103 xustar0030 mtime=1759498967.633490126 30 atime=1759498967.806492754 30 ctime=1759499031.762787735 nordugrid-arc-7.1.1/include/arc/compute/JobState.h0000644000175000002070000000006415067751327023005 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/JobState.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/WSCommonPlugin.h0000644000000000000000000000013015067751327022247 xustar0030 mtime=1759498967.633490126 30 atime=1759498967.806492754 28 ctime=1759499031.7743146 nordugrid-arc-7.1.1/include/arc/compute/WSCommonPlugin.h0000644000175000002070000000007215067751327024152 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/WSCommonPlugin.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/Endpoint.h0000644000000000000000000000013115067751327021147 xustar0030 mtime=1759498967.633035048 30 atime=1759498967.806492754 29 ctime=1759499031.74073697 nordugrid-arc-7.1.1/include/arc/compute/Endpoint.h0000644000175000002070000000006415067751327023052 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/Endpoint.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/BrokerPlugin.h0000644000000000000000000000013215067751327021773 xustar0030 mtime=1759498967.633035048 30 atime=1759498967.806492754 30 ctime=1759499031.737528203 nordugrid-arc-7.1.1/include/arc/compute/BrokerPlugin.h0000644000175000002070000000007015067751327023672 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/BrokerPlugin.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/Software.h0000644000000000000000000000013215067751327021162 xustar0030 mtime=1759498967.633490126 30 atime=1759498967.806492754 30 ctime=1759499031.767666648 nordugrid-arc-7.1.1/include/arc/compute/Software.h0000644000175000002070000000006415067751327023064 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/Software.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/TestACCControl.h0000644000000000000000000000013215067751327022157 xustar0030 mtime=1759498967.633490126 30 atime=1759498967.806492754 30 ctime=1759499031.773099215 nordugrid-arc-7.1.1/include/arc/compute/TestACCControl.h0000644000175000002070000000007215067751327024060 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/TestACCControl.h" nordugrid-arc-7.1.1/include/arc/compute/PaxHeaders/EntityRetrieverPlugin.h0000644000000000000000000000013215067751327023713 xustar0030 mtime=1759498967.633035048 30 atime=1759498967.806492754 30 ctime=1759499031.745702375 nordugrid-arc-7.1.1/include/arc/compute/EntityRetrieverPlugin.h0000644000175000002070000000010115067751327025605 0ustar00mockbuildmock00000000000000#include "../../../src/hed/libs/compute/EntityRetrieverPlugin.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/Counter.h0000644000000000000000000000013115067751327017332 xustar0030 mtime=1759498967.630671696 30 atime=1759498967.806492754 29 ctime=1759499031.69891255 nordugrid-arc-7.1.1/include/arc/Counter.h0000644000175000002070000000005715067751327021237 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/Counter.h" nordugrid-arc-7.1.1/include/arc/PaxHeaders/StringConv.h0000644000000000000000000000013215067751327020010 xustar0030 mtime=1759498967.631490095 30 atime=1759498967.806492754 30 ctime=1759499031.721038567 nordugrid-arc-7.1.1/include/arc/StringConv.h0000644000175000002070000000006215067751327021710 0ustar00mockbuildmock00000000000000#include "../../src/hed/libs/common/StringConv.h" nordugrid-arc-7.1.1/include/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327017032 xustar0030 mtime=1759498967.629490065 30 atime=1759498967.806492754 30 ctime=1759499031.686888406 nordugrid-arc-7.1.1/include/Makefile.am0000644000175000002070000000100115067751327020724 0ustar00mockbuildmock00000000000000HEADERFILESCHECK: ./$(DEPDIR)/HEADERFILES: HEADERFILESCHECK echo "HEADERFILES = \\" > HEADERFILES find $(srcdir) -name \*.h -a ! -name ArcVersion.h -print | sort | \ sed -e 's|^$(srcdir)/||' -e 's/$$/ \\/' >> HEADERFILES echo "./$(DEPDIR)/HEADERFILES" >> HEADERFILES if diff ./$(DEPDIR)/HEADERFILES HEADERFILES >/dev/null 2>&1 ; then \ rm -f HEADERFILES ; \ else \ mkdir -p ./$(DEPDIR) ; \ mv HEADERFILES ./$(DEPDIR)/HEADERFILES ; \ fi include ./$(DEPDIR)/HEADERFILES EXTRA_DIST = $(HEADERFILES) nordugrid-arc-7.1.1/include/PaxHeaders/Makefile.in0000644000000000000000000000013215067751347017045 xustar0030 mtime=1759498983.045127311 30 atime=1759499020.578294625 30 ctime=1759499031.688204995 nordugrid-arc-7.1.1/include/Makefile.in0000644000175000002070000004522315067751347020755 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = include ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ EXTRA_DIST = $(HEADERFILES) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign include/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign include/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags-am uninstall uninstall-am .PRECIOUS: Makefile HEADERFILESCHECK: ./$(DEPDIR)/HEADERFILES: HEADERFILESCHECK echo "HEADERFILES = \\" > HEADERFILES find $(srcdir) -name \*.h -a ! -name ArcVersion.h -print | sort | \ sed -e 's|^$(srcdir)/||' -e 's/$$/ \\/' >> HEADERFILES echo "./$(DEPDIR)/HEADERFILES" >> HEADERFILES if diff ./$(DEPDIR)/HEADERFILES HEADERFILES >/dev/null 2>&1 ; then \ rm -f HEADERFILES ; \ else \ mkdir -p ./$(DEPDIR) ; \ mv HEADERFILES ./$(DEPDIR)/HEADERFILES ; \ fi include ./$(DEPDIR)/HEADERFILES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/include/PaxHeaders/glibmm-compat.h0000644000000000000000000000013215067751327017677 xustar0030 mtime=1759498967.642338994 30 atime=1759498967.808492785 30 ctime=1759499031.940086637 nordugrid-arc-7.1.1/include/glibmm-compat.h0000644000175000002070000000064515067751327021606 0ustar00mockbuildmock00000000000000#ifdef HAVE_GLIBMM_268 #define FILE_TEST_IS_REGULAR FileTest::IS_REGULAR #define FILE_TEST_IS_SYMLINK FileTest::IS_SYMLINK #define FILE_TEST_IS_DIR FileTest::IS_DIR #define FILE_TEST_IS_EXECUTABLE FileTest::IS_EXECUTABLE #define FILE_TEST_EXISTS FileTest::EXISTS #define MODULE_BIND_LAZY Module::Flags::LAZY #define MODULE_BIND_LOCAL Module::Flags::LOCAL #define ModuleFlags Module::Flags #endif nordugrid-arc-7.1.1/include/PaxHeaders/.deps0000644000000000000000000000013215067751427015733 xustar0030 mtime=1759499031.940467273 30 atime=1759499034.762510154 30 ctime=1759499031.940467273 nordugrid-arc-7.1.1/include/.deps/0000755000175000002070000000000015067751427017712 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/include/.deps/PaxHeaders/HEADERFILES0000644000000000000000000000013215067751427017446 xustar0030 mtime=1759499031.650462867 30 atime=1759499031.655462943 30 ctime=1759499031.941727077 nordugrid-arc-7.1.1/include/.deps/HEADERFILES0000644000175000002070000001147515067751427021360 0ustar00mockbuildmock00000000000000HEADERFILES = \ arc/ArcConfig.h \ arc/ArcConfigFile.h \ arc/ArcConfigIni.h \ arc/ArcLocation.h \ arc/ArcRegex.h \ arc/Base64.h \ arc/CheckSum.h \ arc/Counter.h \ arc/DBInterface.h \ arc/DateTime.h \ arc/FileAccess.h \ arc/FileLock.h \ arc/FileUtils.h \ arc/GUID.h \ arc/HostnameResolver.h \ arc/IString.h \ arc/IniConfig.h \ arc/IntraProcessCounter.h \ arc/JSON.h \ arc/JobPerfLog.h \ arc/Logger.h \ arc/OptionParser.h \ arc/Profile.h \ arc/Run.h \ arc/StringConv.h \ arc/Thread.h \ arc/URL.h \ arc/User.h \ arc/UserConfig.h \ arc/Utils.h \ arc/Watchdog.h \ arc/XMLNode.h \ arc/communication/ClientInterface.h \ arc/communication/ClientSAML2SSO.h \ arc/communication/ClientX509Delegation.h \ arc/compute/Broker.h \ arc/compute/BrokerPlugin.h \ arc/compute/ComputingServiceRetriever.h \ arc/compute/Endpoint.h \ arc/compute/EndpointQueryingStatus.h \ arc/compute/EntityRetriever.h \ arc/compute/EntityRetrieverPlugin.h \ arc/compute/ExecutionTarget.h \ arc/compute/GLUE2.h \ arc/compute/GLUE2Entity.h \ arc/compute/Job.h \ arc/compute/JobControllerPlugin.h \ arc/compute/JobDescription.h \ arc/compute/JobDescriptionParserPlugin.h \ arc/compute/JobInformationStorage.h \ arc/compute/JobInformationStorageSQLite.h \ arc/compute/JobInformationStorageXML.h \ arc/compute/JobState.h \ arc/compute/JobSupervisor.h \ arc/compute/RSLParser.h \ arc/compute/Software.h \ arc/compute/SubmissionStatus.h \ arc/compute/Submitter.h \ arc/compute/SubmitterPlugin.h \ arc/compute/TestACCControl.h \ arc/compute/WSCommonPlugin.h \ arc/credential/CertUtil.h \ arc/credential/Credential.h \ arc/credential/NSSUtil.h \ arc/credential/PasswordSource.h \ arc/credential/Proxycertinfo.h \ arc/credential/VOMSAttribute.h \ arc/credential/VOMSConfig.h \ arc/credential/VOMSUtil.h \ arc/credentialstore/ClientVOMS.h \ arc/credentialstore/ClientVOMSRESTful.h \ arc/credentialstore/CredentialStore.h \ arc/crypto/OpenSSL.h \ arc/data-staging/DTR.h \ arc/data-staging/DTRList.h \ arc/data-staging/DTRStatus.h \ arc/data-staging/DataDelivery.h \ arc/data-staging/DataDeliveryComm.h \ arc/data-staging/Generator.h \ arc/data-staging/Processor.h \ arc/data-staging/Scheduler.h \ arc/data-staging/TransferShares.h \ arc/data/DataBuffer.h \ arc/data/DataCallback.h \ arc/data/DataExternalComm.h \ arc/data/DataExternalHelper.h \ arc/data/DataHandle.h \ arc/data/DataMover.h \ arc/data/DataPoint.h \ arc/data/DataPointDelegate.h \ arc/data/DataPointDirect.h \ arc/data/DataPointIndex.h \ arc/data/DataSpeed.h \ arc/data/DataStatus.h \ arc/data/FileCache.h \ arc/data/FileCacheHash.h \ arc/data/FileInfo.h \ arc/data/URLMap.h \ arc/delegation/DelegationInterface.h \ arc/external/cJSON/cJSON.h \ arc/globusutils/GSSCredential.h \ arc/globusutils/GlobusErrorUtils.h \ arc/globusutils/GlobusWorkarounds.h \ arc/infosys/InformationInterface.h \ arc/loader/FinderLoader.h \ arc/loader/Loader.h \ arc/loader/ModuleManager.h \ arc/loader/Plugin.h \ arc/message/MCC.h \ arc/message/MCCLoader.h \ arc/message/MCC_Status.h \ arc/message/Message.h \ arc/message/MessageAttributes.h \ arc/message/MessageAuth.h \ arc/message/PayloadRaw.h \ arc/message/PayloadSOAP.h \ arc/message/PayloadStream.h \ arc/message/Plexer.h \ arc/message/SOAPEnvelope.h \ arc/message/SOAPMessage.h \ arc/message/SecAttr.h \ arc/message/SecHandler.h \ arc/message/Service.h \ arc/otokens/openid_metadata.h \ arc/otokens/otokens.h \ arc/security/ArcPDP/EvaluationCtx.h \ arc/security/ArcPDP/Evaluator.h \ arc/security/ArcPDP/EvaluatorLoader.h \ arc/security/ArcPDP/PolicyParser.h \ arc/security/ArcPDP/PolicyStore.h \ arc/security/ArcPDP/Request.h \ arc/security/ArcPDP/RequestItem.h \ arc/security/ArcPDP/Response.h \ arc/security/ArcPDP/Result.h \ arc/security/ArcPDP/Source.h \ arc/security/ArcPDP/alg/AlgFactory.h \ arc/security/ArcPDP/alg/CombiningAlg.h \ arc/security/ArcPDP/alg/DenyOverridesAlg.h \ arc/security/ArcPDP/alg/OrderedAlg.h \ arc/security/ArcPDP/alg/PermitOverridesAlg.h \ arc/security/ArcPDP/attr/AnyURIAttribute.h \ arc/security/ArcPDP/attr/AttributeFactory.h \ arc/security/ArcPDP/attr/AttributeProxy.h \ arc/security/ArcPDP/attr/AttributeValue.h \ arc/security/ArcPDP/attr/BooleanAttribute.h \ arc/security/ArcPDP/attr/DateTimeAttribute.h \ arc/security/ArcPDP/attr/GenericAttribute.h \ arc/security/ArcPDP/attr/RequestAttribute.h \ arc/security/ArcPDP/attr/StringAttribute.h \ arc/security/ArcPDP/attr/X500NameAttribute.h \ arc/security/ArcPDP/fn/EqualFunction.h \ arc/security/ArcPDP/fn/FnFactory.h \ arc/security/ArcPDP/fn/Function.h \ arc/security/ArcPDP/fn/InRangeFunction.h \ arc/security/ArcPDP/fn/MatchFunction.h \ arc/security/ArcPDP/policy/Policy.h \ arc/security/ClassLoader.h \ arc/security/PDP.h \ arc/security/Security.h \ arc/ws-addressing/WSA.h \ arc/ws-security/SAMLToken.h \ arc/ws-security/UsernameToken.h \ arc/ws-security/X509Token.h \ arc/xmlsec/XMLSecNode.h \ arc/xmlsec/XmlSecUtils.h \ arc/xmlsec/saml_util.h \ glibmm-compat.h \ ./.deps/HEADERFILES nordugrid-arc-7.1.1/include/PaxHeaders/README0000644000000000000000000000013215067751327015656 xustar0030 mtime=1759498967.630465473 30 atime=1759498967.806492754 30 ctime=1759499031.689333784 nordugrid-arc-7.1.1/include/README0000644000175000002070000000200315067751327017553 0ustar00mockbuildmock00000000000000 This directory contains tree of header files refering to files with same name located in source directory src/ - actual header files. The refered files constitute API of ARC HED software. Files' location in this tree is defined by how API is presented to external developer. Actual header files are located next to corresponding source files and their location is defined by convenience of developers of ARC HED. These files are not used during istallation procedure. Upon instalaltion actual header files get located under same layout as files in this directory. Files in this directory are used through all ARC code. All source and header files of entire source tree must refer to actual header files through these ones. Code being built outside source tree should refer either to installed actual header files or to those located in this directory. Due to similar layout switch between two options does not require changes in source and header files of code. Only building procedure need to be changed. nordugrid-arc-7.1.1/PaxHeaders/ABOUT-NLS0000644000000000000000000000013215067751331014575 xustar0030 mtime=1759498969.734941551 30 atime=1759498969.733522036 30 ctime=1759499024.696544715 nordugrid-arc-7.1.1/ABOUT-NLS0000644000175000002070000022532615067751331016511 0ustar00mockbuildmock000000000000001 Notes on the Free Translation Project *************************************** Free software is going international! The Free Translation Project is a way to get maintainers of free software, translators, and users all together, so that free software will gradually become able to speak many languages. A few packages already provide translations for their messages. If you found this `ABOUT-NLS' file inside a distribution, you may assume that the distributed package does use GNU `gettext' internally, itself available at your nearest GNU archive site. But you do _not_ need to install GNU `gettext' prior to configuring, installing or using this package with messages translated. Installers will find here some useful hints. These notes also explain how users should proceed for getting the programs to use the available translations. They tell how people wanting to contribute and work on translations can contact the appropriate team. When reporting bugs in the `intl/' directory or bugs which may be related to internationalization, you should tell about the version of `gettext' which is used. The information can be found in the `intl/VERSION' file, in internationalized packages. 1.1 Quick configuration advice ============================== If you want to exploit the full power of internationalization, you should configure it using ./configure --with-included-gettext to force usage of internationalizing routines provided within this package, despite the existence of internationalizing capabilities in the operating system where this package is being installed. So far, only the `gettext' implementation in the GNU C library version 2 provides as many features (such as locale alias, message inheritance, automatic charset conversion or plural form handling) as the implementation here. It is also not possible to offer this additional functionality on top of a `catgets' implementation. Future versions of GNU `gettext' will very likely convey even more functionality. So it might be a good idea to change to GNU `gettext' as soon as possible. So you need _not_ provide this option if you are using GNU libc 2 or you have installed a recent copy of the GNU gettext package with the included `libintl'. 1.2 INSTALL Matters =================== Some packages are "localizable" when properly installed; the programs they contain can be made to speak your own native language. Most such packages use GNU `gettext'. Other packages have their own ways to internationalization, predating GNU `gettext'. By default, this package will be installed to allow translation of messages. It will automatically detect whether the system already provides the GNU `gettext' functions. If not, the included GNU `gettext' library will be used. This library is wholly contained within this package, usually in the `intl/' subdirectory, so prior installation of the GNU `gettext' package is _not_ required. Installers may use special options at configuration time for changing the default behaviour. The commands: ./configure --with-included-gettext ./configure --disable-nls will, respectively, bypass any pre-existing `gettext' to use the internationalizing routines provided within this package, or else, _totally_ disable translation of messages. When you already have GNU `gettext' installed on your system and run configure without an option for your new package, `configure' will probably detect the previously built and installed `libintl.a' file and will decide to use this. This might not be desirable. You should use the more recent version of the GNU `gettext' library. I.e. if the file `intl/VERSION' shows that the library which comes with this package is more recent, you should use ./configure --with-included-gettext to prevent auto-detection. The configuration process will not test for the `catgets' function and therefore it will not be used. The reason is that even an emulation of `gettext' on top of `catgets' could not provide all the extensions of the GNU `gettext' library. Internationalized packages usually have many `po/LL.po' files, where LL gives an ISO 639 two-letter code identifying the language. Unless translations have been forbidden at `configure' time by using the `--disable-nls' switch, all available translations are installed together with the package. However, the environment variable `LINGUAS' may be set, prior to configuration, to limit the installed set. `LINGUAS' should then contain a space separated list of two-letter codes, stating which languages are allowed. 1.3 Using This Package ====================== As a user, if your language has been installed for this package, you only have to set the `LANG' environment variable to the appropriate `LL_CC' combination. If you happen to have the `LC_ALL' or some other `LC_xxx' environment variables set, you should unset them before setting `LANG', otherwise the setting of `LANG' will not have the desired effect. Here `LL' is an ISO 639 two-letter language code, and `CC' is an ISO 3166 two-letter country code. For example, let's suppose that you speak German and live in Germany. At the shell prompt, merely execute `setenv LANG de_DE' (in `csh'), `export LANG; LANG=de_DE' (in `sh') or `export LANG=de_DE' (in `bash'). This can be done from your `.login' or `.profile' file, once and for all. You might think that the country code specification is redundant. But in fact, some languages have dialects in different countries. For example, `de_AT' is used for Austria, and `pt_BR' for Brazil. The country code serves to distinguish the dialects. The locale naming convention of `LL_CC', with `LL' denoting the language and `CC' denoting the country, is the one use on systems based on GNU libc. On other systems, some variations of this scheme are used, such as `LL' or `LL_CC.ENCODING'. You can get the list of locales supported by your system for your language by running the command `locale -a | grep '^LL''. Not all programs have translations for all languages. By default, an English message is shown in place of a nonexistent translation. If you understand other languages, you can set up a priority list of languages. This is done through a different environment variable, called `LANGUAGE'. GNU `gettext' gives preference to `LANGUAGE' over `LANG' for the purpose of message handling, but you still need to have `LANG' set to the primary language; this is required by other parts of the system libraries. For example, some Swedish users who would rather read translations in German than English for when Swedish is not available, set `LANGUAGE' to `sv:de' while leaving `LANG' to `sv_SE'. Special advice for Norwegian users: The language code for Norwegian bokma*l changed from `no' to `nb' recently (in 2003). During the transition period, while some message catalogs for this language are installed under `nb' and some older ones under `no', it's recommended for Norwegian users to set `LANGUAGE' to `nb:no' so that both newer and older translations are used. In the `LANGUAGE' environment variable, but not in the `LANG' environment variable, `LL_CC' combinations can be abbreviated as `LL' to denote the language's main dialect. For example, `de' is equivalent to `de_DE' (German as spoken in Germany), and `pt' to `pt_PT' (Portuguese as spoken in Portugal) in this context. 1.4 Translating Teams ===================== For the Free Translation Project to be a success, we need interested people who like their own language and write it well, and who are also able to synergize with other translators speaking the same language. Each translation team has its own mailing list. The up-to-date list of teams can be found at the Free Translation Project's homepage, `http://translationproject.org/', in the "Teams" area. If you'd like to volunteer to _work_ at translating messages, you should become a member of the translating team for your own language. The subscribing address is _not_ the same as the list itself, it has `-request' appended. For example, speakers of Swedish can send a message to `sv-request@li.org', having this message body: subscribe Keep in mind that team members are expected to participate _actively_ in translations, or at solving translational difficulties, rather than merely lurking around. If your team does not exist yet and you want to start one, or if you are unsure about what to do or how to get started, please write to `coordinator@translationproject.org' to reach the coordinator for all translator teams. The English team is special. It works at improving and uniformizing the terminology in use. Proven linguistic skills are praised more than programming skills, here. 1.5 Available Packages ====================== Languages are not equally supported in all packages. The following matrix shows the current state of internationalization, as of November 2007. The matrix shows, in regard of each package, for which languages PO files have been submitted to translation coordination, with a translation percentage of at least 50%. Ready PO files af am ar az be bg bs ca cs cy da de el en en_GB eo +----------------------------------------------------+ Compendium | [] [] [] [] | a2ps | [] [] [] [] [] | aegis | () | ant-phone | () | anubis | [] | ap-utils | | aspell | [] [] [] [] [] | bash | [] | bfd | | bibshelf | [] | binutils | | bison | [] [] | bison-runtime | [] | bluez-pin | [] [] [] [] [] | cflow | [] | clisp | [] [] [] | console-tools | [] [] | coreutils | [] [] [] [] | cpio | | cpplib | [] [] [] | cryptonit | [] | dialog | | diffutils | [] [] [] [] [] [] | doodle | [] | e2fsprogs | [] [] | enscript | [] [] [] [] | fetchmail | [] [] () [] [] | findutils | [] | findutils_stable | [] [] [] | flex | [] [] [] | fslint | | gas | | gawk | [] [] [] | gcal | [] | gcc | [] | gettext-examples | [] [] [] [] [] | gettext-runtime | [] [] [] [] [] | gettext-tools | [] [] | gip | [] | gliv | [] [] | glunarclock | [] | gmult | [] [] | gnubiff | () | gnucash | [] [] () () [] | gnuedu | | gnulib | [] | gnunet | | gnunet-gtk | | gnutls | [] | gpe-aerial | [] [] | gpe-beam | [] [] | gpe-calendar | | gpe-clock | [] [] | gpe-conf | [] [] | gpe-contacts | | gpe-edit | [] | gpe-filemanager | | gpe-go | [] | gpe-login | [] [] | gpe-ownerinfo | [] [] | gpe-package | | gpe-sketchbook | [] [] | gpe-su | [] [] | gpe-taskmanager | [] [] | gpe-timesheet | [] | gpe-today | [] [] | gpe-todo | | gphoto2 | [] [] [] [] | gprof | [] [] | gpsdrive | | gramadoir | [] [] | grep | [] [] | gretl | () | gsasl | | gss | | gst-plugins-bad | [] [] | gst-plugins-base | [] [] | gst-plugins-good | [] [] [] | gst-plugins-ugly | [] [] | gstreamer | [] [] [] [] [] [] [] | gtick | () | gtkam | [] [] [] [] | gtkorphan | [] [] | gtkspell | [] [] [] [] | gutenprint | [] | hello | [] [] [] [] [] | herrie | [] | hylafax | | idutils | [] [] | indent | [] [] [] [] | iso_15924 | | iso_3166 | [] [] [] [] [] [] [] [] [] [] [] | iso_3166_2 | | iso_4217 | [] [] [] | iso_639 | [] [] [] [] | jpilot | [] | jtag | | jwhois | | kbd | [] [] [] [] | keytouch | [] [] | keytouch-editor | [] | keytouch-keyboa... | [] | latrine | () | ld | [] | leafpad | [] [] [] [] [] | libc | [] [] [] [] | libexif | [] | libextractor | [] | libgpewidget | [] [] [] | libgpg-error | [] | libgphoto2 | [] [] | libgphoto2_port | [] [] | libgsasl | | libiconv | [] [] | libidn | [] [] [] | lifelines | [] () | lilypond | [] | lingoteach | | lprng | | lynx | [] [] [] [] | m4 | [] [] [] [] | mailfromd | | mailutils | [] | make | [] [] | man-db | [] [] [] | minicom | [] [] [] | nano | [] [] [] | opcodes | [] | parted | [] [] | pilot-qof | | popt | [] [] [] | psmisc | [] | pwdutils | | qof | | radius | [] | recode | [] [] [] [] [] [] | rpm | [] | screem | | scrollkeeper | [] [] [] [] [] [] [] [] | sed | [] [] [] | shared-mime-info | [] [] [] [] () [] [] [] | sharutils | [] [] [] [] [] [] | shishi | | skencil | [] () | solfege | | soundtracker | [] [] | sp | [] | system-tools-ba... | [] [] [] [] [] [] [] [] [] | tar | [] [] | texinfo | [] [] [] | tin | () () | tuxpaint | [] [] [] [] [] [] | unicode-han-tra... | | unicode-transla... | | util-linux | [] [] [] [] | util-linux-ng | [] [] [] [] | vorbis-tools | [] | wastesedge | () | wdiff | [] [] [] [] | wget | [] [] [] | xchat | [] [] [] [] [] [] [] | xkeyboard-config | [] | xpad | [] [] [] | +----------------------------------------------------+ af am ar az be bg bs ca cs cy da de el en en_GB eo 6 0 2 1 8 26 2 40 48 2 56 88 15 1 15 18 es et eu fa fi fr ga gl gu he hi hr hu id is it +--------------------------------------------------+ Compendium | [] [] [] [] [] | a2ps | [] [] [] () | aegis | | ant-phone | [] | anubis | [] | ap-utils | [] [] | aspell | [] [] [] | bash | [] | bfd | [] [] | bibshelf | [] [] [] | binutils | [] [] [] | bison | [] [] [] [] [] [] | bison-runtime | [] [] [] [] [] | bluez-pin | [] [] [] [] [] | cflow | [] | clisp | [] [] | console-tools | | coreutils | [] [] [] [] [] [] | cpio | [] [] [] | cpplib | [] [] | cryptonit | [] | dialog | [] [] [] | diffutils | [] [] [] [] [] [] [] [] [] | doodle | [] [] | e2fsprogs | [] [] [] | enscript | [] [] [] | fetchmail | [] | findutils | [] [] [] | findutils_stable | [] [] [] [] | flex | [] [] [] | fslint | | gas | [] [] | gawk | [] [] [] [] () | gcal | [] [] | gcc | [] | gettext-examples | [] [] [] [] [] [] [] | gettext-runtime | [] [] [] [] [] [] | gettext-tools | [] [] [] [] | gip | [] [] [] [] | gliv | () | glunarclock | [] [] [] | gmult | [] [] [] | gnubiff | () () | gnucash | () () () | gnuedu | [] | gnulib | [] [] [] | gnunet | | gnunet-gtk | | gnutls | | gpe-aerial | [] [] | gpe-beam | [] [] | gpe-calendar | | gpe-clock | [] [] [] [] | gpe-conf | [] | gpe-contacts | [] [] | gpe-edit | [] [] [] [] | gpe-filemanager | [] | gpe-go | [] [] [] | gpe-login | [] [] [] | gpe-ownerinfo | [] [] [] [] [] | gpe-package | [] | gpe-sketchbook | [] [] | gpe-su | [] [] [] [] | gpe-taskmanager | [] [] [] | gpe-timesheet | [] [] [] [] | gpe-today | [] [] [] [] | gpe-todo | [] | gphoto2 | [] [] [] [] [] | gprof | [] [] [] [] [] | gpsdrive | [] | gramadoir | [] [] | grep | [] [] [] | gretl | [] [] [] () | gsasl | [] [] | gss | [] [] | gst-plugins-bad | [] [] [] [] | gst-plugins-base | [] [] [] [] | gst-plugins-good | [] [] [] [] [] | gst-plugins-ugly | [] [] [] [] | gstreamer | [] [] [] | gtick | [] [] [] | gtkam | [] [] [] [] | gtkorphan | [] [] | gtkspell | [] [] [] [] [] [] [] | gutenprint | [] | hello | [] [] [] [] [] [] [] [] [] [] [] [] [] | herrie | [] | hylafax | | idutils | [] [] [] [] [] | indent | [] [] [] [] [] [] [] [] [] [] | iso_15924 | [] | iso_3166 | [] [] [] [] [] [] [] [] [] [] [] [] [] | iso_3166_2 | [] | iso_4217 | [] [] [] [] [] [] | iso_639 | [] [] [] [] [] [] | jpilot | [] [] | jtag | [] | jwhois | [] [] [] [] [] | kbd | [] [] | keytouch | [] [] [] | keytouch-editor | [] | keytouch-keyboa... | [] [] | latrine | [] [] | ld | [] [] [] [] | leafpad | [] [] [] [] [] [] | libc | [] [] [] [] [] | libexif | [] | libextractor | [] | libgpewidget | [] [] [] [] [] | libgpg-error | [] | libgphoto2 | [] [] [] | libgphoto2_port | [] [] | libgsasl | [] [] | libiconv | [] [] [] | libidn | [] [] | lifelines | () | lilypond | [] [] [] | lingoteach | [] [] [] | lprng | | lynx | [] [] [] | m4 | [] [] [] [] | mailfromd | | mailutils | [] [] | make | [] [] [] [] [] [] [] [] | man-db | [] | minicom | [] [] [] [] | nano | [] [] [] [] [] [] [] | opcodes | [] [] [] [] | parted | [] [] [] | pilot-qof | | popt | [] [] [] [] | psmisc | [] [] | pwdutils | | qof | [] | radius | [] [] | recode | [] [] [] [] [] [] [] [] | rpm | [] [] | screem | | scrollkeeper | [] [] [] | sed | [] [] [] [] [] | shared-mime-info | [] [] [] [] [] [] | sharutils | [] [] [] [] [] [] [] [] | shishi | [] | skencil | [] [] | solfege | [] | soundtracker | [] [] [] | sp | [] | system-tools-ba... | [] [] [] [] [] [] [] [] [] | tar | [] [] [] [] [] | texinfo | [] [] [] | tin | [] () | tuxpaint | [] [] | unicode-han-tra... | | unicode-transla... | [] [] | util-linux | [] [] [] [] [] [] [] | util-linux-ng | [] [] [] [] [] [] [] | vorbis-tools | | wastesedge | () | wdiff | [] [] [] [] [] [] [] [] | wget | [] [] [] [] [] [] [] [] | xchat | [] [] [] [] [] [] [] | xkeyboard-config | [] [] [] [] | xpad | [] [] [] | +--------------------------------------------------+ es et eu fa fi fr ga gl gu he hi hr hu id is it 85 22 14 2 48 101 61 12 2 8 2 6 53 29 1 52 ja ka ko ku ky lg lt lv mk mn ms mt nb ne nl nn +--------------------------------------------------+ Compendium | [] | a2ps | () [] [] | aegis | () | ant-phone | [] | anubis | [] [] [] | ap-utils | [] | aspell | [] [] | bash | [] | bfd | | bibshelf | [] | binutils | | bison | [] [] [] | bison-runtime | [] [] [] | bluez-pin | [] [] [] | cflow | | clisp | [] | console-tools | | coreutils | [] | cpio | [] | cpplib | [] | cryptonit | [] | dialog | [] [] | diffutils | [] [] [] | doodle | | e2fsprogs | [] | enscript | [] | fetchmail | [] [] | findutils | [] | findutils_stable | [] | flex | [] [] | fslint | | gas | | gawk | [] [] | gcal | | gcc | | gettext-examples | [] [] [] | gettext-runtime | [] [] [] | gettext-tools | [] [] | gip | [] [] | gliv | [] | glunarclock | [] [] | gmult | [] [] [] | gnubiff | | gnucash | () () () | gnuedu | | gnulib | [] [] | gnunet | | gnunet-gtk | | gnutls | [] | gpe-aerial | [] | gpe-beam | [] | gpe-calendar | [] | gpe-clock | [] [] [] | gpe-conf | [] [] [] | gpe-contacts | [] | gpe-edit | [] [] [] | gpe-filemanager | [] [] | gpe-go | [] [] [] | gpe-login | [] [] [] | gpe-ownerinfo | [] [] | gpe-package | [] [] | gpe-sketchbook | [] [] | gpe-su | [] [] [] | gpe-taskmanager | [] [] [] [] | gpe-timesheet | [] | gpe-today | [] [] | gpe-todo | [] | gphoto2 | [] [] | gprof | [] | gpsdrive | [] | gramadoir | () | grep | [] [] | gretl | | gsasl | [] | gss | | gst-plugins-bad | [] | gst-plugins-base | [] | gst-plugins-good | [] | gst-plugins-ugly | [] | gstreamer | [] | gtick | [] | gtkam | [] [] | gtkorphan | [] | gtkspell | [] [] | gutenprint | [] | hello | [] [] [] [] [] [] [] | herrie | [] | hylafax | | idutils | [] | indent | [] [] | iso_15924 | [] | iso_3166 | [] [] [] [] [] [] [] [] | iso_3166_2 | [] | iso_4217 | [] [] [] | iso_639 | [] [] [] [] | jpilot | () () | jtag | | jwhois | [] | kbd | [] | keytouch | [] | keytouch-editor | [] | keytouch-keyboa... | | latrine | [] | ld | | leafpad | [] [] | libc | [] [] [] | libexif | | libextractor | | libgpewidget | [] | libgpg-error | | libgphoto2 | [] | libgphoto2_port | [] | libgsasl | [] | libiconv | [] | libidn | [] [] | lifelines | [] | lilypond | [] | lingoteach | [] | lprng | | lynx | [] [] | m4 | [] [] | mailfromd | | mailutils | | make | [] [] [] | man-db | | minicom | [] | nano | [] [] [] | opcodes | [] | parted | [] [] | pilot-qof | | popt | [] [] [] | psmisc | [] [] [] | pwdutils | | qof | | radius | | recode | [] | rpm | [] [] | screem | [] | scrollkeeper | [] [] [] [] | sed | [] [] | shared-mime-info | [] [] [] [] [] [] [] | sharutils | [] [] | shishi | | skencil | | solfege | () () | soundtracker | | sp | () | system-tools-ba... | [] [] [] [] | tar | [] [] [] | texinfo | [] [] | tin | | tuxpaint | () [] [] | unicode-han-tra... | | unicode-transla... | | util-linux | [] [] | util-linux-ng | [] [] | vorbis-tools | | wastesedge | [] | wdiff | [] [] | wget | [] [] | xchat | [] [] [] [] | xkeyboard-config | [] [] [] | xpad | [] [] [] | +--------------------------------------------------+ ja ka ko ku ky lg lt lv mk mn ms mt nb ne nl nn 51 2 25 3 2 0 6 0 2 2 20 0 11 1 103 6 or pa pl pt pt_BR rm ro ru rw sk sl sq sr sv ta +--------------------------------------------------+ Compendium | [] [] [] [] [] | a2ps | () [] [] [] [] [] [] | aegis | () () | ant-phone | [] [] | anubis | [] [] [] | ap-utils | () | aspell | [] [] [] | bash | [] [] | bfd | | bibshelf | [] | binutils | [] [] | bison | [] [] [] [] [] | bison-runtime | [] [] [] [] [] | bluez-pin | [] [] [] [] [] [] [] [] [] | cflow | [] | clisp | [] | console-tools | [] | coreutils | [] [] [] [] | cpio | [] [] [] | cpplib | [] | cryptonit | [] [] | dialog | [] | diffutils | [] [] [] [] [] [] | doodle | [] [] | e2fsprogs | [] [] | enscript | [] [] [] [] [] | fetchmail | [] [] [] | findutils | [] [] [] | findutils_stable | [] [] [] [] [] [] | flex | [] [] [] [] [] | fslint | [] | gas | | gawk | [] [] [] [] | gcal | [] | gcc | [] [] | gettext-examples | [] [] [] [] [] [] [] [] | gettext-runtime | [] [] [] [] [] [] [] [] | gettext-tools | [] [] [] [] [] [] [] | gip | [] [] [] [] | gliv | [] [] [] [] [] [] | glunarclock | [] [] [] [] [] [] | gmult | [] [] [] [] | gnubiff | () [] | gnucash | () [] | gnuedu | | gnulib | [] [] [] | gnunet | | gnunet-gtk | [] | gnutls | [] [] | gpe-aerial | [] [] [] [] [] [] [] | gpe-beam | [] [] [] [] [] [] [] | gpe-calendar | [] [] [] [] | gpe-clock | [] [] [] [] [] [] [] [] | gpe-conf | [] [] [] [] [] [] [] | gpe-contacts | [] [] [] [] [] | gpe-edit | [] [] [] [] [] [] [] [] [] | gpe-filemanager | [] [] | gpe-go | [] [] [] [] [] [] [] [] | gpe-login | [] [] [] [] [] [] [] [] | gpe-ownerinfo | [] [] [] [] [] [] [] [] | gpe-package | [] [] | gpe-sketchbook | [] [] [] [] [] [] [] [] | gpe-su | [] [] [] [] [] [] [] [] | gpe-taskmanager | [] [] [] [] [] [] [] [] | gpe-timesheet | [] [] [] [] [] [] [] [] | gpe-today | [] [] [] [] [] [] [] [] | gpe-todo | [] [] [] [] | gphoto2 | [] [] [] [] [] [] | gprof | [] [] [] | gpsdrive | [] [] | gramadoir | [] [] | grep | [] [] [] [] | gretl | [] [] [] | gsasl | [] [] [] | gss | [] [] [] [] | gst-plugins-bad | [] [] [] | gst-plugins-base | [] [] | gst-plugins-good | [] [] | gst-plugins-ugly | [] [] [] | gstreamer | [] [] [] [] | gtick | [] | gtkam | [] [] [] [] [] | gtkorphan | [] | gtkspell | [] [] [] [] [] [] [] [] | gutenprint | [] | hello | [] [] [] [] [] [] [] [] | herrie | [] [] [] | hylafax | | idutils | [] [] [] [] [] | indent | [] [] [] [] [] [] [] | iso_15924 | | iso_3166 | [] [] [] [] [] [] [] [] [] [] [] [] [] | iso_3166_2 | | iso_4217 | [] [] [] [] [] [] [] | iso_639 | [] [] [] [] [] [] [] | jpilot | | jtag | [] | jwhois | [] [] [] [] | kbd | [] [] [] | keytouch | [] | keytouch-editor | [] | keytouch-keyboa... | [] | latrine | | ld | [] | leafpad | [] [] [] [] [] [] | libc | [] [] [] [] | libexif | [] [] | libextractor | [] [] | libgpewidget | [] [] [] [] [] [] [] [] | libgpg-error | [] [] [] | libgphoto2 | [] | libgphoto2_port | [] [] [] | libgsasl | [] [] [] [] | libiconv | [] [] [] | libidn | [] [] () | lifelines | [] [] | lilypond | | lingoteach | [] | lprng | [] | lynx | [] [] [] | m4 | [] [] [] [] [] | mailfromd | [] | mailutils | [] [] [] | make | [] [] [] [] | man-db | [] [] [] [] | minicom | [] [] [] [] [] | nano | [] [] [] [] | opcodes | [] [] | parted | [] | pilot-qof | | popt | [] [] [] [] | psmisc | [] [] | pwdutils | [] [] | qof | [] [] | radius | [] [] | recode | [] [] [] [] [] [] [] | rpm | [] [] [] [] | screem | | scrollkeeper | [] [] [] [] [] [] [] | sed | [] [] [] [] [] [] [] [] [] | shared-mime-info | [] [] [] [] [] [] | sharutils | [] [] [] [] | shishi | [] | skencil | [] [] [] | solfege | [] | soundtracker | [] [] | sp | | system-tools-ba... | [] [] [] [] [] [] [] [] [] | tar | [] [] [] [] | texinfo | [] [] [] [] | tin | () | tuxpaint | [] [] [] [] [] [] | unicode-han-tra... | | unicode-transla... | | util-linux | [] [] [] [] | util-linux-ng | [] [] [] [] | vorbis-tools | [] | wastesedge | | wdiff | [] [] [] [] [] [] [] | wget | [] [] [] [] | xchat | [] [] [] [] [] [] [] | xkeyboard-config | [] [] [] | xpad | [] [] [] | +--------------------------------------------------+ or pa pl pt pt_BR rm ro ru rw sk sl sq sr sv ta 0 5 77 31 53 4 58 72 3 45 46 9 45 122 3 tg th tk tr uk ven vi wa xh zh_CN zh_HK zh_TW zu +---------------------------------------------------+ Compendium | [] [] [] [] | 19 a2ps | [] [] [] | 19 aegis | [] | 1 ant-phone | [] [] | 6 anubis | [] [] [] | 11 ap-utils | () [] | 4 aspell | [] [] [] | 16 bash | [] | 6 bfd | | 2 bibshelf | [] | 7 binutils | [] [] [] [] | 9 bison | [] [] [] [] | 20 bison-runtime | [] [] [] [] | 18 bluez-pin | [] [] [] [] [] [] | 28 cflow | [] [] | 5 clisp | | 9 console-tools | [] [] | 5 coreutils | [] [] [] | 18 cpio | [] [] [] [] | 11 cpplib | [] [] [] [] [] | 12 cryptonit | [] | 6 dialog | [] [] [] | 9 diffutils | [] [] [] [] [] | 29 doodle | [] | 6 e2fsprogs | [] [] | 10 enscript | [] [] [] | 16 fetchmail | [] [] | 12 findutils | [] [] [] | 11 findutils_stable | [] [] [] [] | 18 flex | [] [] | 15 fslint | [] | 2 gas | [] | 3 gawk | [] [] [] | 16 gcal | [] | 5 gcc | [] [] [] | 7 gettext-examples | [] [] [] [] [] [] | 29 gettext-runtime | [] [] [] [] [] [] | 28 gettext-tools | [] [] [] [] [] | 20 gip | [] [] | 13 gliv | [] [] | 11 glunarclock | [] [] [] | 15 gmult | [] [] [] [] | 16 gnubiff | [] | 2 gnucash | () [] | 5 gnuedu | [] | 2 gnulib | [] | 10 gnunet | | 0 gnunet-gtk | [] [] | 3 gnutls | | 4 gpe-aerial | [] [] | 14 gpe-beam | [] [] | 14 gpe-calendar | [] [] | 7 gpe-clock | [] [] [] [] | 21 gpe-conf | [] [] [] | 16 gpe-contacts | [] [] | 10 gpe-edit | [] [] [] [] [] | 22 gpe-filemanager | [] [] | 7 gpe-go | [] [] [] [] | 19 gpe-login | [] [] [] [] [] | 21 gpe-ownerinfo | [] [] [] [] | 21 gpe-package | [] | 6 gpe-sketchbook | [] [] | 16 gpe-su | [] [] [] [] | 21 gpe-taskmanager | [] [] [] [] | 21 gpe-timesheet | [] [] [] [] | 18 gpe-today | [] [] [] [] [] | 21 gpe-todo | [] [] | 8 gphoto2 | [] [] [] [] | 21 gprof | [] [] | 13 gpsdrive | [] | 5 gramadoir | [] | 7 grep | [] | 12 gretl | | 6 gsasl | [] [] [] | 9 gss | [] | 7 gst-plugins-bad | [] [] [] | 13 gst-plugins-base | [] [] | 11 gst-plugins-good | [] [] [] [] [] | 16 gst-plugins-ugly | [] [] [] | 13 gstreamer | [] [] [] | 18 gtick | [] [] | 7 gtkam | [] | 16 gtkorphan | [] | 7 gtkspell | [] [] [] [] [] [] | 27 gutenprint | | 4 hello | [] [] [] [] [] | 38 herrie | [] [] | 8 hylafax | | 0 idutils | [] [] | 15 indent | [] [] [] [] [] | 28 iso_15924 | [] [] | 4 iso_3166 | [] [] [] [] [] [] [] [] [] | 54 iso_3166_2 | [] [] | 4 iso_4217 | [] [] [] [] [] | 24 iso_639 | [] [] [] [] [] | 26 jpilot | [] [] [] [] | 7 jtag | [] | 3 jwhois | [] [] [] | 13 kbd | [] [] [] | 13 keytouch | [] | 8 keytouch-editor | [] | 5 keytouch-keyboa... | [] | 5 latrine | [] [] | 5 ld | [] [] [] [] | 10 leafpad | [] [] [] [] [] | 24 libc | [] [] [] | 19 libexif | [] | 5 libextractor | [] | 5 libgpewidget | [] [] [] | 20 libgpg-error | [] | 6 libgphoto2 | [] [] | 9 libgphoto2_port | [] [] [] | 11 libgsasl | [] | 8 libiconv | [] [] | 11 libidn | [] [] | 11 lifelines | | 4 lilypond | [] | 6 lingoteach | [] | 6 lprng | [] | 2 lynx | [] [] [] | 15 m4 | [] [] [] | 18 mailfromd | [] [] | 3 mailutils | [] [] | 8 make | [] [] [] | 20 man-db | [] | 9 minicom | [] | 14 nano | [] [] [] | 20 opcodes | [] [] | 10 parted | [] [] [] | 11 pilot-qof | [] | 1 popt | [] [] [] [] | 18 psmisc | [] [] | 10 pwdutils | [] | 3 qof | [] | 4 radius | [] [] | 7 recode | [] [] [] | 25 rpm | [] [] [] [] | 13 screem | [] | 2 scrollkeeper | [] [] [] [] | 26 sed | [] [] [] [] | 23 shared-mime-info | [] [] [] | 29 sharutils | [] [] [] | 23 shishi | [] | 3 skencil | [] | 7 solfege | [] | 3 soundtracker | [] [] | 9 sp | [] | 3 system-tools-ba... | [] [] [] [] [] [] [] | 38 tar | [] [] [] | 17 texinfo | [] [] [] | 15 tin | | 1 tuxpaint | [] [] [] | 19 unicode-han-tra... | | 0 unicode-transla... | | 2 util-linux | [] [] [] | 20 util-linux-ng | [] [] [] | 20 vorbis-tools | [] [] | 4 wastesedge | | 1 wdiff | [] [] | 23 wget | [] [] [] | 20 xchat | [] [] [] [] | 29 xkeyboard-config | [] [] [] | 14 xpad | [] [] [] | 15 +---------------------------------------------------+ 76 teams tg th tk tr uk ven vi wa xh zh_CN zh_HK zh_TW zu 163 domains 0 3 1 74 51 0 143 21 1 57 7 45 0 2036 Some counters in the preceding matrix are higher than the number of visible blocks let us expect. This is because a few extra PO files are used for implementing regional variants of languages, or language dialects. For a PO file in the matrix above to be effective, the package to which it applies should also have been internationalized and distributed as such by its maintainer. There might be an observable lag between the mere existence a PO file and its wide availability in a distribution. If November 2007 seems to be old, you may fetch a more recent copy of this `ABOUT-NLS' file on most GNU archive sites. The most up-to-date matrix with full percentage details can be found at `http://translationproject.org/extra/matrix.html'. 1.6 Using `gettext' in new packages =================================== If you are writing a freely available program and want to internationalize it you are welcome to use GNU `gettext' in your package. Of course you have to respect the GNU Library General Public License which covers the use of the GNU `gettext' library. This means in particular that even non-free programs can use `libintl' as a shared library, whereas only free software can use `libintl' as a static library or use modified versions of `libintl'. Once the sources are changed appropriately and the setup can handle the use of `gettext' the only thing missing are the translations. The Free Translation Project is also available for packages which are not developed inside the GNU project. Therefore the information given above applies also for every other Free Software Project. Contact `coordinator@translationproject.org' to make the `.pot' files available to the translation teams. nordugrid-arc-7.1.1/PaxHeaders/missing0000644000000000000000000000013215067751346014750 xustar0030 mtime=1759498982.911463334 30 atime=1759498992.427866878 30 ctime=1759499024.707241606 nordugrid-arc-7.1.1/missing0000755000175000002070000001533615067751346016665 0ustar00mockbuildmock00000000000000#! /bin/sh # Common wrapper for a few potentially missing GNU programs. scriptversion=2018-03-07.03; # UTC # Copyright (C) 1996-2020 Free Software Foundation, Inc. # Originally written by Fran,cois Pinard , 1996. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. if test $# -eq 0; then echo 1>&2 "Try '$0 --help' for more information" exit 1 fi case $1 in --is-lightweight) # Used by our autoconf macros to check whether the available missing # script is modern enough. exit 0 ;; --run) # Back-compat with the calling convention used by older automake. shift ;; -h|--h|--he|--hel|--help) echo "\ $0 [OPTION]... PROGRAM [ARGUMENT]... Run 'PROGRAM [ARGUMENT]...', returning a proper advice when this fails due to PROGRAM being missing or too old. Options: -h, --help display this help and exit -v, --version output version information and exit Supported PROGRAM values: aclocal autoconf autoheader autom4te automake makeinfo bison yacc flex lex help2man Version suffixes to PROGRAM as well as the prefixes 'gnu-', 'gnu', and 'g' are ignored when checking the name. Send bug reports to ." exit $? ;; -v|--v|--ve|--ver|--vers|--versi|--versio|--version) echo "missing $scriptversion (GNU Automake)" exit $? ;; -*) echo 1>&2 "$0: unknown '$1' option" echo 1>&2 "Try '$0 --help' for more information" exit 1 ;; esac # Run the given program, remember its exit status. "$@"; st=$? # If it succeeded, we are done. test $st -eq 0 && exit 0 # Also exit now if we it failed (or wasn't found), and '--version' was # passed; such an option is passed most likely to detect whether the # program is present and works. case $2 in --version|--help) exit $st;; esac # Exit code 63 means version mismatch. This often happens when the user # tries to use an ancient version of a tool on a file that requires a # minimum version. if test $st -eq 63; then msg="probably too old" elif test $st -eq 127; then # Program was missing. msg="missing on your system" else # Program was found and executed, but failed. Give up. exit $st fi perl_URL=https://www.perl.org/ flex_URL=https://github.com/westes/flex gnu_software_URL=https://www.gnu.org/software program_details () { case $1 in aclocal|automake) echo "The '$1' program is part of the GNU Automake package:" echo "<$gnu_software_URL/automake>" echo "It also requires GNU Autoconf, GNU m4 and Perl in order to run:" echo "<$gnu_software_URL/autoconf>" echo "<$gnu_software_URL/m4/>" echo "<$perl_URL>" ;; autoconf|autom4te|autoheader) echo "The '$1' program is part of the GNU Autoconf package:" echo "<$gnu_software_URL/autoconf/>" echo "It also requires GNU m4 and Perl in order to run:" echo "<$gnu_software_URL/m4/>" echo "<$perl_URL>" ;; esac } give_advice () { # Normalize program name to check for. normalized_program=`echo "$1" | sed ' s/^gnu-//; t s/^gnu//; t s/^g//; t'` printf '%s\n' "'$1' is $msg." configure_deps="'configure.ac' or m4 files included by 'configure.ac'" case $normalized_program in autoconf*) echo "You should only need it if you modified 'configure.ac'," echo "or m4 files included by it." program_details 'autoconf' ;; autoheader*) echo "You should only need it if you modified 'acconfig.h' or" echo "$configure_deps." program_details 'autoheader' ;; automake*) echo "You should only need it if you modified 'Makefile.am' or" echo "$configure_deps." program_details 'automake' ;; aclocal*) echo "You should only need it if you modified 'acinclude.m4' or" echo "$configure_deps." program_details 'aclocal' ;; autom4te*) echo "You might have modified some maintainer files that require" echo "the 'autom4te' program to be rebuilt." program_details 'autom4te' ;; bison*|yacc*) echo "You should only need it if you modified a '.y' file." echo "You may want to install the GNU Bison package:" echo "<$gnu_software_URL/bison/>" ;; lex*|flex*) echo "You should only need it if you modified a '.l' file." echo "You may want to install the Fast Lexical Analyzer package:" echo "<$flex_URL>" ;; help2man*) echo "You should only need it if you modified a dependency" \ "of a man page." echo "You may want to install the GNU Help2man package:" echo "<$gnu_software_URL/help2man/>" ;; makeinfo*) echo "You should only need it if you modified a '.texi' file, or" echo "any other file indirectly affecting the aspect of the manual." echo "You might want to install the Texinfo package:" echo "<$gnu_software_URL/texinfo/>" echo "The spurious makeinfo call might also be the consequence of" echo "using a buggy 'make' (AIX, DU, IRIX), in which case you might" echo "want to install GNU make:" echo "<$gnu_software_URL/make/>" ;; *) echo "You might have modified some files without having the proper" echo "tools for further handling them. Check the 'README' file, it" echo "often tells you about the needed prerequisites for installing" echo "this package. You may also peek at any GNU archive site, in" echo "case some other package contains this missing '$1' program." ;; esac } give_advice "$1" | sed -e '1s/^/WARNING: /' \ -e '2,$s/^/ /' >&2 # Propagate the correct exit status (expected to be 127 for a program # not found, 63 for a program that failed due to version mismatch). exit $st # Local variables: # eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC0" # time-stamp-end: "; # UTC" # End: nordugrid-arc-7.1.1/PaxHeaders/debian0000644000000000000000000000013215067751432014515 xustar0030 mtime=1759499034.757510078 30 atime=1759499034.762510154 30 ctime=1759499034.757510078 nordugrid-arc-7.1.1/debian/0000755000175000002070000000000015067751432016474 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-plugins-gfal.install0000644000000000000000000000013115067751327023312 xustar0029 mtime=1759498967.62849005 30 atime=1759498967.806492754 30 ctime=1759499034.707701628 nordugrid-arc-7.1.1/debian/nordugrid-arc-plugins-gfal.install0000644000175000002070000000021215067751327025210 0ustar00mockbuildmock00000000000000/usr/lib/arc/external/libdmcgfal.so /usr/lib/arc/external/libdmcgfal.apd /usr/lib/arc/libdmcgfaldeleg.so /usr/lib/arc/libdmcgfaldeleg.apd nordugrid-arc-7.1.1/debian/PaxHeaders/libarccommon4.install0000644000000000000000000000013115067751327020715 xustar0029 mtime=1759498967.62552191 30 atime=1759498967.805492739 30 ctime=1759499034.688672975 nordugrid-arc-7.1.1/debian/libarccommon4.install0000644000175000002070000000260115067751327022617 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/libarccompute.so.* debian/tmp/usr/lib/libarccommunication.so.* debian/tmp/usr/lib/libarccommon.so.* debian/tmp/usr/lib/libarccredential.so.* debian/tmp/usr/lib/libarccredentialstore.so.* debian/tmp/usr/lib/libarccrypto.so.* debian/tmp/usr/lib/libarcdata.so.* debian/tmp/usr/lib/libarcdatastaging.so.* debian/tmp/usr/lib/libarcloader.so.* debian/tmp/usr/lib/libarcmessage.so.* debian/tmp/usr/lib/libarcsecurity.so.* debian/tmp/usr/lib/libarcotokens.so.* debian/tmp/usr/lib/libarcinfosys.so.* debian/tmp/usr/lib/libarcwsaddressing.so.* debian/tmp/usr/lib/libarcwssecurity.so.* debian/tmp/usr/lib/libarcxmlsec.so.* debian/tmp/usr/lib/arc/libmodcrypto.so debian/tmp/usr/lib/arc/libmodcredential.so debian/tmp/usr/lib/arc/libmodcrypto.apd debian/tmp/usr/lib/arc/libmodcredential.apd debian/tmp/usr/lib/arc/arc-file-access debian/tmp/usr/lib/arc/arc-hostname-resolver debian/tmp/usr/lib/arc/DataStagingDelivery debian/tmp/usr/lib/arc/arcconfig-parser debian/tmp/usr/lib/arc/arc-dmc debian/tmp/usr/lib/python?.*/site-packages/arc/__init__.py* debian/tmp/usr/lib/python?.*/site-packages/arc/paths.py* debian/tmp/usr/lib/python?.*/site-packages/arc/paths_dist.py* debian/tmp/usr/lib/python?.*/site-packages/arc/utils debian/tmp/usr/share/arc/arc.parser.defaults debian/tmp/usr/share/arc/schema debian/tmp/usr/share/locale/*/LC_MESSAGES/nordugrid-arc.mo debian/tmp/usr/share/arc/test-jobs/test-job-* nordugrid-arc-7.1.1/debian/PaxHeaders/libarccommon4.docs0000644000000000000000000000013015067751327020176 xustar0029 mtime=1759498967.62552191 30 atime=1759498967.805492739 29 ctime=1759499034.72472918 nordugrid-arc-7.1.1/debian/libarccommon4.docs0000644000175000002070000000011215067751327022074 0ustar00mockbuildmock00000000000000README AUTHORS NOTICE src/doc/arc.conf.reference src/doc/arc.conf.DELETED nordugrid-arc-7.1.1/debian/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327016631 xustar0030 mtime=1759498967.624489989 30 atime=1759498967.805492739 30 ctime=1759499034.676153496 nordugrid-arc-7.1.1/debian/Makefile.am0000644000175000002070000000075315067751327020540 0ustar00mockbuildmock00000000000000EXTRA_DIST = changelog compat control copyright rules watch source/format \ $(srcdir)/*.install $(srcdir)/*.docs $(srcdir)/*.dirs \ $(srcdir)/*.enable $(srcdir)/*.no-enable $(srcdir)/*.logrotate \ $(srcdir)/*.postinst $(srcdir)/*.postrm $(srcdir)/*.prerm \ $(srcdir)/*.links $(srcdir)/*.lintian-overrides \ README.Debian README.source changelog: changelog.deb cp -p changelog.deb changelog MAINTAINERCLEANFILES = changelog nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-arex.postinst0000644000000000000000000000013215067751327022077 xustar0030 mtime=1759498967.626490019 30 atime=1759498967.805492739 30 ctime=1759499034.749038234 nordugrid-arc-7.1.1/debian/nordugrid-arc-arex.postinst0000644000175000002070000000037615067751327024007 0ustar00mockbuildmock00000000000000#!/bin/sh set -e if [ "$1" = "configure" ] ; then # check hostcert is already generated (update vs install) if [ ! -f /etc/grid-security/testCA-hostcert.pem ] ; then arcctl test-ca init arcctl test-ca hostcert fi fi #DEBHELPER# nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-arcctl.install0000644000000000000000000000013115067751327022172 xustar0029 mtime=1759498967.62552191 30 atime=1759498967.805492739 30 ctime=1759499034.693656674 nordugrid-arc-7.1.1/debian/nordugrid-arc-arcctl.install0000644000175000002070000000107415067751327024077 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/python?.*/site-packages/arc/control/__init__.py* debian/tmp/usr/lib/python?.*/site-packages/arc/control/CertificateGenerator.py* debian/tmp/usr/lib/python?.*/site-packages/arc/control/ControlCommon.py* debian/tmp/usr/lib/python?.*/site-packages/arc/control/OSPackage.py* debian/tmp/usr/lib/python?.*/site-packages/arc/control/TestCA.py* debian/tmp/usr/lib/python?.*/site-packages/arc/control/TestJWT.py* debian/tmp/usr/lib/python?.*/site-packages/arc/control/ThirdPartyDeployment.py* debian/tmp/usr/share/man/man1/arcctl.1 debian/tmp/usr/sbin/arcctl nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-datadelivery-service.preinst.no-enable0000644000000000000000000000013215067751327026713 xustar0030 mtime=1759498967.627490034 30 atime=1759498967.806492754 30 ctime=1759499034.738047331 nordugrid-arc-7.1.1/debian/nordugrid-arc-datadelivery-service.preinst.no-enable0000644000175000002070000000133615067751327030620 0ustar00mockbuildmock00000000000000#!/bin/sh set -e # Version 6.7.0 starts using defaults-disabled # Remove rc.d links if upgrading from an older version and the service is # disabled in /etc/default if [ -n "$2" ] ; then if dpkg --compare-versions "$2" "<<" "6.7.0" ; then [ -r /etc/default/arc-datadelivery-service ] && . /etc/default/arc-datadelivery-service if [ -n "${RUN}" ] && [ "${RUN}" != "yes" ] ; then update-rc.d -f arc-datadelivery-service remove >/dev/null if [ -x "/usr/bin/deb-systemd-helper" ]; then deb-systemd-helper purge 'arc-datadelivery-service.service' >/dev/null || true deb-systemd-helper unmask 'arc-datadelivery-service.service' >/dev/null || true fi fi fi fi #DEBHELPER# nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-community-rtes.install0000644000000000000000000000013215067751327023722 xustar0030 mtime=1759498967.626490019 30 atime=1759498967.806492754 30 ctime=1759499034.699647306 nordugrid-arc-7.1.1/debian/nordugrid-arc-community-rtes.install0000644000175000002070000000016315067751327025624 0ustar00mockbuildmock00000000000000debian/tmp/usr/share/arc/community_rtes.sh debian/tmp/usr/lib/python?.*/site-packages/arc/control/CommunityRTE.py* nordugrid-arc-7.1.1/debian/PaxHeaders/rules0000644000000000000000000000013215067751327015652 xustar0030 mtime=1759498967.629490065 30 atime=1759498967.806492754 30 ctime=1759499034.684883902 nordugrid-arc-7.1.1/debian/rules0000755000175000002070000001357115067751327017566 0ustar00mockbuildmock00000000000000#!/usr/bin/make -f # Filter out -Wl,-Bsymbolic-functions from default Ubuntu LDFLAGS export DEB_LDFLAGS_MAINT_STRIP=-Wl,-Bsymbolic-functions ifeq ($(shell type dh_systemd_enable > /dev/null 2> /dev/null && echo 1),1) WSD = --with systemd else WSD = endif ifeq ($(shell test -r /usr/include/systemd/sd-daemon.h && echo 1),1) ifeq ($(DEB_HOST_ARCH_OS),linux) SYSTEMD = yes else SYSTEMD = no endif else SYSTEMD = no endif ifeq ($(shell grep -q no-enable /usr/bin/dh_installinit 2>/dev/null && echo 1),1) SYSVNOENBL = --no-enable else SYSVNOENBL = endif ifeq ($(shell grep -q no-enable /usr/bin/dh_systemd_enable 2>/dev/null && echo 1),1) SYSDNOENBL = --no-enable else SYSDNOENBL = endif ifeq ($(shell test -r /usr/include/xrootd/XrdVersion.hh && echo 1),1) XROOTD = yes NX = else XROOTD = no NX = -Nnordugrid-arc-plugins-xrootd endif %: dh $@ --with autoreconf $(WSD) --with python3 $(NX) override_dh_auto_configure: if [ ! -f po/POTFILES.in.save ] ; then \ cp -p po/POTFILES.in po/POTFILES.in.save ; \ fi LDFLAGS="$(LDFLAGS) -Wl,-z,defs" \ dh_auto_configure -- \ --disable-doc \ --libdir=/usr/lib \ --libexecdir=/usr/lib \ --docdir=/usr/share/doc/nordugrid-arc \ --enable-gfal \ --enable-s3 \ --enable-xrootd=$(XROOTD) \ --enable-internal \ --enable-systemd=$(SYSTEMD) \ --with-systemd-units-location=/lib/systemd/system \ --with-sysv-scripts-location=/etc/init.d \ --with-python=python3 \ --disable-pylint override_dh_auto_clean: dh_auto_clean rm -f debian/nordugrid-arc-arex.arc-arex.init rm -f debian/nordugrid-arc-arex.arc-arex-ws.init rm -f debian/nordugrid-arc-hed.arched.init rm -f debian/nordugrid-arc-datadelivery-service.arc-datadelivery-service.init rm -f debian/nordugrid-arc-infosys-ldap.arc-infosys-ldap.init rm -f debian/nordugrid-arc-arex.arc-arex.service rm -f debian/nordugrid-arc-arex.arc-arex-ws.service rm -f debian/nordugrid-arc-hed.arched.service rm -f debian/nordugrid-arc-datadelivery-service.arc-datadelivery-service.service rm -f debian/nordugrid-arc-infosys-ldap.arc-infosys-ldap.service rm -f debian/nordugrid-arc-infosys-ldap.arc-infosys-ldap-slapd.service rm -f debian/*.default rm -f debian/*.maintscript rm -f debian/*.preinst if [ -f po/POTFILES.in.save ] ; then \ mv po/POTFILES.in.save po/POTFILES.in ; \ fi find python src -depth -name __pycache__ -exec rm -rf {} ';' override_dh_auto_install: DEB_PYTHON_INSTALL_LAYOUT=deb dh_auto_install find debian/tmp -name \*.la -exec rm -fv '{}' ';' rm -f debian/tmp/usr/lib/arc/*.a rm -f debian/tmp/usr/lib/libarcglobusutils.so rm -f debian/tmp/usr/lib/python3*/*-packages/pyarcrest-*.*-info/direct_url.json find debian/tmp -depth -name __pycache__ -exec rm -rf '{}' ';' if [ -d debian/tmp/etc/bash_completion.d ]; then \ mkdir -p debian/tmp/usr/share/bash-completion; \ mv debian/tmp/etc/bash_completion.d \ debian/tmp/usr/share/bash-completion/completions; \ fi if [ -z "$(SYSVNOENBL)" ] ; then \ for x in debian/*.enable ; do \ cp -p $$x $${x%.enable} ; \ done ; \ else \ for x in debian/*.no-enable ; do \ cp -p $$x $${x%.no-enable} ; \ done ; \ fi mv debian/tmp/etc/init.d/arc-arex \ debian/nordugrid-arc-arex.arc-arex.init mv debian/tmp/etc/init.d/arc-arex-ws \ debian/nordugrid-arc-arex.arc-arex-ws.init mv debian/tmp/etc/init.d/arched \ debian/nordugrid-arc-hed.arched.init mv debian/tmp/etc/init.d/arc-datadelivery-service \ debian/nordugrid-arc-datadelivery-service.arc-datadelivery-service.init mv debian/tmp/etc/init.d/arc-infosys-ldap \ debian/nordugrid-arc-infosys-ldap.arc-infosys-ldap.init mv debian/tmp/lib/systemd/system/arc-arex.service \ debian/nordugrid-arc-arex.arc-arex.service mv debian/tmp/lib/systemd/system/arc-arex-ws.service \ debian/nordugrid-arc-arex.arc-arex-ws.service mv debian/tmp/lib/systemd/system/arched.service \ debian/nordugrid-arc-hed.arched.service mv debian/tmp/lib/systemd/system/arc-datadelivery-service.service \ debian/nordugrid-arc-datadelivery-service.arc-datadelivery-service.service mv debian/tmp/lib/systemd/system/arc-infosys-ldap.service \ debian/nordugrid-arc-infosys-ldap.arc-infosys-ldap.service mv debian/tmp/lib/systemd/system/arc-infosys-ldap-slapd.service \ debian/nordugrid-arc-infosys-ldap.arc-infosys-ldap-slapd.service chmod 4755 debian/tmp/usr/bin/arc-job-cgroup override_dh_install: dh_install --fail-missing override_dh_installinit: dh_installinit $(SYSVNOENBL) -p nordugrid-arc-hed --name arched dh_installinit $(SYSVNOENBL) -p nordugrid-arc-arex --name arc-arex dh_installinit $(SYSVNOENBL) -p nordugrid-arc-arex --name arc-arex-ws dh_installinit $(SYSVNOENBL) -p nordugrid-arc-datadelivery-service --name arc-datadelivery-service dh_installinit $(SYSVNOENBL) -p nordugrid-arc-infosys-ldap --name arc-infosys-ldap override_dh_systemd_enable: dh_systemd_enable $(SYSDNOENBL) -p nordugrid-arc-hed --name arched dh_systemd_enable $(SYSDNOENBL) -p nordugrid-arc-arex --name arc-arex dh_systemd_enable $(SYSDNOENBL) -p nordugrid-arc-arex --name arc-arex-ws dh_systemd_enable $(SYSDNOENBL) -p nordugrid-arc-datadelivery-service --name arc-datadelivery-service dh_systemd_enable $(SYSDNOENBL) -p nordugrid-arc-infosys-ldap --name arc-infosys-ldap dh_systemd_enable $(SYSDNOENBL) -p nordugrid-arc-infosys-ldap --name arc-infosys-ldap-slapd override_dh_systemd_start: dh_systemd_start -p nordugrid-arc-hed --name arched dh_systemd_start -p nordugrid-arc-arex --name arc-arex dh_systemd_start -p nordugrid-arc-arex --name arc-arex-ws dh_systemd_start -p nordugrid-arc-datadelivery-service --name arc-datadelivery-service dh_systemd_start -p nordugrid-arc-infosys-ldap --name arc-infosys-ldap dh_systemd_start -p nordugrid-arc-infosys-ldap --name arc-infosys-ldap-slapd override_dh_fixperms: dh_fixperms -X /usr/bin/arc-job-cgroup override_dh_compress: dh_compress -X arc.conf.reference -X arc.conf.DELETED nordugrid-arc-7.1.1/debian/PaxHeaders/Makefile.in0000644000000000000000000000013215067751347016644 xustar0030 mtime=1759498983.016027246 30 atime=1759499020.598294929 30 ctime=1759499034.677329548 nordugrid-arc-7.1.1/debian/Makefile.in0000644000175000002070000004554715067751347020565 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = debian ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = changelog.deb CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/changelog.deb.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ EXTRA_DIST = changelog compat control copyright rules watch source/format \ $(srcdir)/*.install $(srcdir)/*.docs $(srcdir)/*.dirs \ $(srcdir)/*.enable $(srcdir)/*.no-enable $(srcdir)/*.logrotate \ $(srcdir)/*.postinst $(srcdir)/*.postrm $(srcdir)/*.prerm \ $(srcdir)/*.links $(srcdir)/*.lintian-overrides \ README.Debian README.source MAINTAINERCLEANFILES = changelog all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign debian/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign debian/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): changelog.deb: $(top_builddir)/config.status $(srcdir)/changelog.deb.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags-am uninstall uninstall-am .PRECIOUS: Makefile changelog: changelog.deb cp -p changelog.deb changelog # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-archery-manage.install0000644000000000000000000000013115067751327023605 xustar0029 mtime=1759498967.62552191 30 atime=1759498967.805492739 30 ctime=1759499034.694855052 nordugrid-arc-7.1.1/debian/nordugrid-arc-archery-manage.install0000644000175000002070000000004315067751327025505 0ustar00mockbuildmock00000000000000debian/tmp/usr/sbin/archery-manage nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-arex.logrotate0000644000000000000000000000013215067751327022214 xustar0030 mtime=1759498967.626490019 30 atime=1759498967.805492739 30 ctime=1759499034.744829666 nordugrid-arc-7.1.1/debian/nordugrid-arc-arex.logrotate0000644000175000002070000000162315067751327024120 0ustar00mockbuildmock00000000000000# # Logs written by A-REX # /var/log/arc/arex.log /var/log/arc/arex-jobs.log /var/log/arc/job.helper.errors /var/log/arc/datastaging.log { missingok compress delaycompress daily rotate 14 create sharedscripts postrotate kill -HUP `cat /run/arched-arex.pid 2> /dev/null` 2> /dev/null || true endscript } # # Logs written by A-REX WS interface # /var/log/arc/ws-interface.log { missingok compress delaycompress daily rotate 14 create sharedscripts postrotate kill -HUP `cat /run/arched-arex-ws.pid 2> /dev/null` 2> /dev/null || true endscript } # # External processes spawned by A-REX # /var/log/arc/infoprovider.log /var/log/arc/cache-cleaner.log /var/log/arc/jura.log { missingok compress delaycompress daily rotate 14 create } # # Developer-enabled performance logging # /var/log/arc/perfdata/*.perflog { missingok compress daily rotate 14 create } nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-infosys-ldap.logrotate0000644000000000000000000000013115067751327023664 xustar0030 mtime=1759498967.627490034 30 atime=1759498967.806492754 29 ctime=1759499034.74764115 nordugrid-arc-7.1.1/debian/nordugrid-arc-infosys-ldap.logrotate0000644000175000002070000000014115067751327025563 0ustar00mockbuildmock00000000000000/var/log/arc/bdii/bdii-update.log { missingok compress daily rotate 14 copytruncate } nordugrid-arc-7.1.1/debian/PaxHeaders/changelog.deb.in0000644000000000000000000000013115067751327017604 xustar0029 mtime=1759498967.62552191 30 atime=1759498967.805492739 30 ctime=1759499034.678598942 nordugrid-arc-7.1.1/debian/changelog.deb.in0000644000175000002070000002160715067751327021515 0ustar00mockbuildmock00000000000000nordugrid-arc (@debianversion@-1) unstable; urgency=low * Unofficial build. -- Anders Waananen @DATER@ nordugrid-arc (4.0.0~rc2-1) unstable; urgency=low * 4.0.0 Release Candidate 2 -- Anders Waananen Thu, 07 Nov 2013 11:01:24 +0100 nordugrid-arc (4.0.0~rc1-1) unstable; urgency=low * 4.0.0 Release Candidate 1 -- Anders Waananen Tue, 29 Oct 2013 23:22:57 +0100 nordugrid-arc (3.0.3-1) unstable; urgency=low * 3.0.3 Final Release -- Anders Waananen Fri, 19 Jul 2013 12:05:50 +0200 nordugrid-arc (3.0.2-1) unstable; urgency=low * 3.0.2 Final Release -- Anders Waananen Wed, 12 Jun 2013 15:09:59 +0200 nordugrid-arc (3.0.1-1) unstable; urgency=low * 3.0.1 Final Release -- Anders Waananen Tue, 30 Apr 2013 00:47:43 +0200 nordugrid-arc (3.0.1~rc2-1) unstable; urgency=low * 3.0.1 Release Candidate 2 -- Anders Waananen Fri, 12 Apr 2013 16:56:03 +0200 nordugrid-arc (3.0.1~rc1-1) unstable; urgency=low * 3.0.1 Release Candidate 1 -- Anders Waananen Fri, 12 Apr 2013 13:50:41 +0200 nordugrid-arc (3.0.0-1) unstable; urgency=low * 3.0.0 Final Release -- Anders Waananen Fri, 22 Mar 2013 12:32:51 +0100 nordugrid-arc (3.0.0~rc5-1) unstable; urgency=low * 3.0.0 Release Candidate 5 -- Anders Waananen Wed, 06 Feb 2013 12:12:48 +0100 nordugrid-arc (3.0.0~rc4-1) unstable; urgency=low * 3.0.0 Release Candidate 4 -- Anders Waananen Sat, 02 Feb 2013 01:00:33 +0100 nordugrid-arc (3.0.0~rc3-1) unstable; urgency=low * 3.0.0 Release Candidate 3 -- Anders Waananen Wed, 30 Jan 2013 09:02:17 +0100 nordugrid-arc (3.0.0~rc2-1) unstable; urgency=low * 3.0.0 Release Candidate 2 -- Anders Waananen Mon, 28 Jan 2013 07:55:14 +0100 nordugrid-arc (3.0.0~rc1-1) unstable; urgency=low * 3.0.0 Release Candidate 1 -- Anders Waananen Thu, 06 Dec 2012 22:05:31 +0100 nordugrid-arc (2.0.1-1) unstable; urgency=low * 2.0.1 Final Release -- Anders Waananen Thu, 22 Nov 2012 23:47:19 +0100 nordugrid-arc (2.0.1rc2) unstable; urgency=low * 2.0.1rc2 Release Candidate 2 -- Anders Waananen Thu, 25 Oct 2012 13:00:02 +0200 nordugrid-arc (2.0.1rc1) unstable; urgency=low * 2.0.1rc1 Release Candidate 1 -- Anders Waananen Mon, 27 Aug 2012 13:26:30 +0200 nordugrid-arc (2.0.0-1) unstable; urgency=low * 2.0.0 Final Release -- Mattias Ellert Wed, 23 May 2012 19:27:47 +0200 nordugrid-arc (2.0.0~rc4-1) unstable; urgency=low * 2.0.0 Release Candidate 4 -- Mattias Ellert Mon, 02 Apr 2012 16:06:45 +0200 nordugrid-arc (2.0.0~rc3.1-1) unstable; urgency=low * 2.0.0 Release Candidate 3.1 -- Mattias Ellert Tue, 27 Mar 2012 10:30:23 +0200 nordugrid-arc (2.0.0~rc3-1) unstable; urgency=low * 2.0.0 Release Candidate 3 -- Mattias Ellert Mon, 05 Mar 2012 16:27:32 +0100 nordugrid-arc (2.0.0~rc2-1) unstable; urgency=low * 2.0.0 Release Candidate 2 -- Mattias Ellert Wed, 15 Feb 2012 13:54:17 +0100 nordugrid-arc (1.1.0-1) unstable; urgency=low * 1.1.0 Final Release -- Mattias Ellert Mon, 03 Oct 2011 14:30:45 +0200 nordugrid-arc (1.1.0~rc2-1) unstable; urgency=low * 1.1.0 Release Candidate 2 -- Mattias Ellert Sun, 25 Sep 2011 05:42:22 +0200 nordugrid-arc (1.1.0~rc1-1) unstable; urgency=low * 1.1.0 Release Candidate 1 -- Mattias Ellert Sun, 11 Sep 2011 20:08:33 +0200 nordugrid-arc (1.0.1-1) unstable; urgency=low * 1.0.0 Final Release -- Mattias Ellert Sat, 23 Jul 2011 09:32:53 +0200 nordugrid-arc (1.0.1~rc4-1) unstable; urgency=low * 1.0.1 Release Candidate 4 -- Mattias Ellert Tue, 19 Jul 2011 15:17:05 +0200 nordugrid-arc (1.0.1~rc1-1) unstable; urgency=low * 1.0.1 Release Candidate 1 -- Mattias Ellert Sat, 18 Jun 2011 18:29:09 +0200 nordugrid-arc (1.0.0-1) unstable; urgency=low * 1.0.0 Final Release -- Mattias Ellert Mon, 18 Apr 2011 08:59:55 +0200 nordugrid-arc (1.0.0~b5-1) unstable; urgency=low * 1.0.0 Beta Release 5 -- Mattias Ellert Wed, 06 Apr 2011 14:08:52 +0200 nordugrid-arc (1.0.0~b4-1) unstable; urgency=low * 1.0.0 Beta Release 4 -- Mattias Ellert Wed, 23 Mar 2011 15:19:08 +0100 nordugrid-arc (1.0.0~b3-1) unstable; urgency=low * 1.0.0 Beta Release 3 -- Mattias Ellert Thu, 10 Mar 2011 17:05:28 +0100 nordugrid-arc (1.0.0~b2-1) unstable; urgency=low * 1.0.0 Beta Release 2 -- Mattias Ellert Mon, 07 Mar 2011 05:12:30 +0100 nordugrid-arc (1.0.0~b1-1) unstable; urgency=low * 1.0.0 Beta Release 1 -- Mattias Ellert Mon, 14 Feb 2011 17:19:04 +0100 nordugrid-arc-nox (1.2.1-1) unstable; urgency=low * 1.2.1 Final release -- Mattias Ellert Tue, 21 Dec 2010 22:34:02 +0100 nordugrid-arc-nox (1.2.1~rc2-1) unstable; urgency=low * 1.2.1 Release Candidate 2 -- Mattias Ellert Tue, 21 Dec 2010 09:36:46 +0100 nordugrid-arc-nox (1.2.1~rc1-1) unstable; urgency=low * 1.2.1 Release Candidate 1 -- Mattias Ellert Wed, 08 Dec 2010 15:30:37 +0100 nordugrid-arc-nox (1.2.0-1) unstable; urgency=low * 1.2.0 Final release -- Mattias Ellert Fri, 22 Oct 2010 15:25:07 +0200 nordugrid-arc-nox (1.2.0~rc2-1) unstable; urgency=low * 1.2.0 Release Candidate 2 -- Mattias Ellert Thu, 30 Sep 2010 10:11:14 +0200 nordugrid-arc-nox (1.2.0~rc1-1) unstable; urgency=low * 1.2.0 Release Candidate 1 -- Mattias Ellert Mon, 13 Sep 2010 11:14:51 +0200 nordugrid-arc-nox (1.1.0-1) unstable; urgency=low * 1.1.0 Final release -- Mattias Ellert Wed, 05 May 2010 18:31:59 +0200 nordugrid-arc-nox (1.1.0~rc6-1) unstable; urgency=low * 1.1.0 Release Candidate 6 -- Mattias Ellert Mon, 08 Mar 2010 20:36:00 +0100 nordugrid-arc-nox (1.1.0~rc5-2) unstable; urgency=low * Rebuild for Globus Toolkit 5 -- Mattias Ellert Fri, 26 Feb 2010 16:25:39 +0100 nordugrid-arc-nox (1.1.0~rc5-1) unstable; urgency=low * 1.1.0 Release Candidate 5 -- Mattias Ellert Fri, 26 Feb 2010 15:07:39 +0100 nordugrid-arc-nox (1.1.0~rc4-1) unstable; urgency=low * 1.1.0 release candidate 4 -- Mattias Ellert Wed, 24 Feb 2010 12:34:41 +0100 nordugrid-arc-nox (1.1.0~rc3-1) unstable; urgency=low * 1.1.0 release candidate 3 -- Mattias Ellert Mon, 22 Feb 2010 10:20:27 +0100 nordugrid-arc-nox (1.1.0~rc2-1) unstable; urgency=low * 1.1.0 release candidate 2 -- Mattias Ellert Mon, 15 Feb 2010 19:08:07 +0100 nordugrid-arc-nox (1.1.0~rc1-1) unstable; urgency=low * 1.1.0 release candidate 1 -- Mattias Ellert Thu, 11 Feb 2010 19:48:01 +0100 nordugrid-arc-nox (1.0.0-1) unstable; urgency=low * 1.0.0 Final release -- Mattias Ellert Sun, 29 Nov 2009 23:13:41 +0100 nordugrid-arc-nox (1.0.0~rc7-1) unstable; urgency=low * 1.0.0 release candidate 7 -- Mattias Ellert Thu, 19 Nov 2009 15:30:32 +0100 nordugrid-arc-nox (1.0.0~rc6-1) unstable; urgency=low * 1.0.0 release candidate 6 -- Mattias Ellert Thu, 12 Nov 2009 10:12:45 +0100 nordugrid-arc-nox (1.0.0~rc5-1) unstable; urgency=low * 1.0.0 release candidate 5 -- Mattias Ellert Wed, 04 Nov 2009 16:45:22 +0100 nordugrid-arc1 (0.9.4~rc4-1) unstable; urgency=low * 0.9.3 release candidate 4 -- Mattias Ellert Mon, 26 Oct 2009 23:19:55 +0100 nordugrid-arc1 (0.9.4~rc3-1) unstable; urgency=low * 0.9.3 release candidate 3 -- Mattias Ellert Thu, 22 Oct 2009 19:22:31 +0200 nordugrid-arc1 (0.9.4~rc2-1) unstable; urgency=low * 0.9.3 release candidate 2 -- Mattias Ellert Thu, 15 Oct 2009 09:04:24 +0200 nordugrid-arc1 (0.9.3-1) unstable; urgency=low * Final 0.9.3 release -- Mattias Ellert Sun, 27 Sep 2009 01:27:31 +0200 nordugrid-arc1 (0.9.3~rc3-1) unstable; urgency=low * Initial release -- Mattias Ellert Mon, 5 Nov 2007 10:12:49 -0400 nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-client.install0000644000000000000000000000013215067751327022201 xustar0030 mtime=1759498967.626490019 30 atime=1759498967.805492739 30 ctime=1759499034.698471054 nordugrid-arc-7.1.1/debian/nordugrid-arc-client.install0000644000175000002070000000240015067751327024077 0ustar00mockbuildmock00000000000000debian/tmp/usr/bin/arccat debian/tmp/usr/bin/arcclean debian/tmp/usr/bin/arccp debian/tmp/usr/bin/arcget debian/tmp/usr/bin/arcinfo debian/tmp/usr/bin/arckill debian/tmp/usr/bin/arcls debian/tmp/usr/bin/arcmkdir debian/tmp/usr/bin/arcproxy debian/tmp/usr/bin/arcrename debian/tmp/usr/bin/arcrenew debian/tmp/usr/bin/arcresume debian/tmp/usr/bin/arcrm debian/tmp/usr/bin/arcstat debian/tmp/usr/bin/arcsub debian/tmp/usr/bin/arcsync debian/tmp/usr/bin/arctest debian/tmp/etc/arc/client.conf debian/tmp/usr/share/arc/examples/client.conf debian/tmp/usr/share/man/man1/arccat.1 debian/tmp/usr/share/man/man1/arcclean.1 debian/tmp/usr/share/man/man1/arccp.1 debian/tmp/usr/share/man/man1/arcget.1 debian/tmp/usr/share/man/man1/arcinfo.1 debian/tmp/usr/share/man/man1/arckill.1 debian/tmp/usr/share/man/man1/arcls.1 debian/tmp/usr/share/man/man1/arcmkdir.1 debian/tmp/usr/share/man/man1/arcproxy.1 debian/tmp/usr/share/man/man1/arcrename.1 debian/tmp/usr/share/man/man1/arcrenew.1 debian/tmp/usr/share/man/man1/arcresume.1 debian/tmp/usr/share/man/man1/arcrm.1 debian/tmp/usr/share/man/man1/arcstat.1 debian/tmp/usr/share/man/man1/arcsub.1 debian/tmp/usr/share/man/man1/arcsync.1 debian/tmp/usr/share/man/man1/arctest.1 debian/tmp/usr/share/bash-completion/completions/arc-client-tools nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-hed.maintscript.no-enable0000644000000000000000000000013115067751327024210 xustar0030 mtime=1759498967.627490034 30 atime=1759498967.806492754 29 ctime=1759499034.73944142 nordugrid-arc-7.1.1/debian/nordugrid-arc-hed.maintscript.no-enable0000644000175000002070000000004615067751327026113 0ustar00mockbuildmock00000000000000rm_conffile /etc/default/arched 6.7.0 nordugrid-arc-7.1.1/debian/PaxHeaders/source0000644000000000000000000000013215067751432016015 xustar0030 mtime=1759499034.686508999 30 atime=1759499034.762510154 30 ctime=1759499034.686508999 nordugrid-arc-7.1.1/debian/source/0000755000175000002070000000000015067751432017774 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/debian/source/PaxHeaders/format0000644000000000000000000000013215067751327017310 xustar0030 mtime=1759498967.629490065 30 atime=1759498967.806492754 30 ctime=1759499034.687488441 nordugrid-arc-7.1.1/debian/source/format0000644000175000002070000000001415067751327021205 0ustar00mockbuildmock000000000000003.0 (quilt) nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-arex.install0000644000000000000000000000013215067751327021662 xustar0030 mtime=1759498967.626490019 30 atime=1759498967.805492739 30 ctime=1759499034.697126832 nordugrid-arc-7.1.1/debian/nordugrid-arc-arex.install0000644000175000002070000000641215067751327023567 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/arc/arc-blahp-logger debian/tmp/usr/lib/arc/cache-clean debian/tmp/usr/lib/arc/cache-list debian/tmp/usr/lib/arc/gm-* debian/tmp/usr/lib/arc/inputcheck debian/tmp/usr/lib/arc/jura-ng debian/tmp/usr/lib/arc/smtp-send debian/tmp/usr/lib/arc/smtp-send.sh debian/tmp/usr/share/arc/cancel-condor-job debian/tmp/usr/share/arc/cancel-fork-job debian/tmp/usr/share/arc/cancel-SLURM-job debian/tmp/usr/share/arc/scan-condor-job debian/tmp/usr/share/arc/scan-fork-job debian/tmp/usr/share/arc/scan-SLURM-job debian/tmp/usr/share/arc/submit-condor-job debian/tmp/usr/share/arc/submit-fork-job debian/tmp/usr/share/arc/submit-SLURM-job debian/tmp/usr/share/arc/perferator debian/tmp/usr/share/arc/update-controldir debian/tmp/usr/share/arc/PerfData.pl debian/tmp/usr/share/arc/arc-arex-start debian/tmp/usr/share/arc/arc-arex-ws-start debian/tmp/usr/lib/arc/libarex.so debian/tmp/usr/lib/arc/libarex.apd debian/tmp/usr/lib/arc/libcandypond.so debian/tmp/usr/lib/arc/libcandypond.apd debian/tmp/usr/share/arc/CEinfo.pl debian/tmp/usr/share/arc/ARC0mod.pm debian/tmp/usr/share/arc/Condor.pm debian/tmp/usr/share/arc/Fork.pm debian/tmp/usr/share/arc/FORKmod.pm debian/tmp/usr/share/arc/SLURM.pm debian/tmp/usr/share/arc/SLURMmod.pm debian/tmp/usr/share/arc/XmlPrinter.pm debian/tmp/usr/share/arc/InfosysHelper.pm debian/tmp/usr/share/arc/LdifPrinter.pm debian/tmp/usr/share/arc/GLUE2xmlPrinter.pm debian/tmp/usr/share/arc/GLUE2ldifPrinter.pm debian/tmp/usr/share/arc/NGldifPrinter.pm debian/tmp/usr/share/arc/ARC0ClusterInfo.pm debian/tmp/usr/share/arc/ARC1ClusterInfo.pm debian/tmp/usr/share/arc/ConfigCentral.pm debian/tmp/usr/share/arc/GMJobsInfo.pm debian/tmp/usr/share/arc/HostInfo.pm debian/tmp/usr/share/arc/RTEInfo.pm debian/tmp/usr/share/arc/InfoChecker.pm debian/tmp/usr/share/arc/IniParser.pm debian/tmp/usr/share/arc/LRMSInfo.pm debian/tmp/usr/share/arc/Sysinfo.pm debian/tmp/usr/share/arc/LogUtils.pm debian/tmp/usr/share/arc/cancel_common.sh debian/tmp/usr/share/arc/condor_env.pm debian/tmp/usr/share/arc/configure-*-env.sh debian/tmp/usr/share/arc/submit_common.sh debian/tmp/usr/share/arc/scan_common.sh debian/tmp/usr/share/arc/lrms_common.sh debian/tmp/usr/share/arc/sql-schema/arex_accounting_db_schema_v2.sql debian/tmp/usr/share/man/man1/cache-clean.1 debian/tmp/usr/share/man/man1/cache-list.1 debian/tmp/usr/share/man/man8/a-rex-backtrace-collect.8 debian/tmp/usr/share/man/man8/arc-blahp-logger.8 debian/tmp/usr/share/man/man8/gm-*.8 debian/tmp/usr/lib/python?.*/site-packages/arc/control/AccountingDB.py* debian/tmp/usr/lib/python?.*/site-packages/arc/control/AccountingPublishing.py* debian/tmp/usr/lib/python?.*/site-packages/arc/control/Accounting.py* debian/tmp/usr/lib/python?.*/site-packages/arc/control/Cache.py* debian/tmp/usr/lib/python?.*/site-packages/arc/control/DataStaging.py* debian/tmp/usr/lib/python?.*/site-packages/arc/control/Jobs.py* debian/tmp/usr/lib/python?.*/site-packages/arc/control/RunTimeEnvironment.py* debian/tmp/usr/lib/arc/arccandypond debian/tmp/usr/share/arc/rte/ENV/LRMS-SCRATCH debian/tmp/usr/share/arc/rte/ENV/PROXY debian/tmp/usr/share/arc/rte/ENV/RTE debian/tmp/usr/share/arc/rte/ENV/CANDYPOND debian/tmp/usr/share/arc/rte/ENV/SINGULARITY debian/tmp/usr/share/arc/rte/ENV/CONDOR/DOCKER debian/tmp/usr/sbin/a-rex-backtrace-collect debian/tmp/etc/arc.conf nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-plugins-s3.install0000644000000000000000000000013115067751327022726 xustar0029 mtime=1759498967.62849005 30 atime=1759498967.806492754 30 ctime=1759499034.716725546 nordugrid-arc-7.1.1/debian/nordugrid-arc-plugins-s3.install0000644000175000002070000000010715067751327024627 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/arc/libdmcs3.so debian/tmp/usr/lib/arc/libdmcs3.apd nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-arex.maintscript.no-enable0000644000000000000000000000013215067751327024410 xustar0030 mtime=1759498967.626490019 30 atime=1759498967.805492739 30 ctime=1759499034.734020581 nordugrid-arc-7.1.1/debian/nordugrid-arc-arex.maintscript.no-enable0000644000175000002070000000005015067751327026305 0ustar00mockbuildmock00000000000000rm_conffile /etc/default/arc-arex 6.7.0 nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-plugins-xrootd.install0000644000000000000000000000013015067751327023717 xustar0029 mtime=1759498967.62849005 30 atime=1759498967.806492754 29 ctime=1759499034.71811958 nordugrid-arc-7.1.1/debian/nordugrid-arc-plugins-xrootd.install0000644000175000002070000000022215067751327025617 0ustar00mockbuildmock00000000000000/usr/lib/arc/external/libdmcxrootd.so /usr/lib/arc/external/libdmcxrootd.apd /usr/lib/arc/libdmcxrootddeleg.so /usr/lib/arc/libdmcxrootddeleg.apd nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-hed.install0000644000000000000000000000013215067751327021463 xustar0030 mtime=1759498967.627490034 30 atime=1759498967.806492754 30 ctime=1759499034.703538956 nordugrid-arc-7.1.1/debian/nordugrid-arc-hed.install0000644000175000002070000000051015067751327023361 0ustar00mockbuildmock00000000000000debian/tmp/usr/sbin/arched debian/tmp/usr/lib/arc/libecho.so debian/tmp/usr/lib/arc/libecho.apd debian/tmp/usr/share/man/man8/arched.8 debian/tmp/usr/share/man/man5/arc.conf.5 debian/tmp/usr/share/arc/arched-start debian/tmp/usr/share/arc/profiles debian/tmp/usr/share/arc/examples/config debian/tmp/usr/share/arc/examples/echo nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-hed.arched.default.enable0000644000000000000000000000013215067751327024113 xustar0030 mtime=1759498967.627490034 30 atime=1759498967.806492754 30 ctime=1759499034.731361764 nordugrid-arc-7.1.1/debian/nordugrid-arc-hed.arched.default.enable0000644000175000002070000000021315067751327026011 0ustar00mockbuildmock00000000000000# To enable arched, i.e. to indicate that a readily usable # configuration is in place, comment out or delete the # following line. RUN=no nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-test-utils.install0000644000000000000000000000013115067751327023037 xustar0029 mtime=1759498967.62849005 30 atime=1759498967.806492754 30 ctime=1759499034.719394596 nordugrid-arc-7.1.1/debian/nordugrid-arc-test-utils.install0000644000175000002070000000011315067751327024735 0ustar00mockbuildmock00000000000000debian/tmp/usr/bin/arcperftest debian/tmp/usr/share/man/man1/arcperftest.1 nordugrid-arc-7.1.1/debian/PaxHeaders/libarccommon4.links0000644000000000000000000000013115067751327020367 xustar0029 mtime=1759498967.62552191 30 atime=1759498967.805492739 30 ctime=1759499034.754425029 nordugrid-arc-7.1.1/debian/libarccommon4.links0000644000175000002070000000007215067751327022271 0ustar00mockbuildmock00000000000000/usr/share/doc/libarccommon4 /usr/share/doc/nordugrid-arc nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-dev.docs0000644000000000000000000000013215067751327020763 xustar0030 mtime=1759498967.627490034 30 atime=1759498967.806492754 30 ctime=1759499034.726062112 nordugrid-arc-7.1.1/debian/nordugrid-arc-dev.docs0000644000175000002070000000003115067751327022657 0ustar00mockbuildmock00000000000000src/hed/shc/arcpdp/*.xsd nordugrid-arc-7.1.1/debian/PaxHeaders/copyright0000644000000000000000000000013115067751327016527 xustar0029 mtime=1759498967.62552191 30 atime=1759498967.805492739 30 ctime=1759499034.683678154 nordugrid-arc-7.1.1/debian/copyright0000644000175000002070000001057515067751327020442 0ustar00mockbuildmock00000000000000Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: nordugrid-arc Upstream-Contact: contact@nordigrd.org Source: http://download.nordugrid.org/packages/nordugrid-arc/releases Files: * Copyright: 2006-2025 David Cameron Péter Dóbé Mattias Ellert Thomas FrÃ¥gÃ¥t Ali Gholami Michael Glodek Jørgen Beck Hansen Henrik Thostrup Jensen Daniel Johansson Johan Jönemo Dmytro Karpenko Tamás Kazinczy Marek KoÄan Aleksandr Konstantinov Balázs Kónya Hajo Nils Krabbenhöft Andrew Lahiff Juha Lento Peter Lundgaard Rosendahl Iván Márton Luca Mazzaferro Bjarte Mohn Steffen Möller Zsombor Nagy Aleksei Nazarov Jon Kerr Nilsen Markus Nordén Weizhong Qiang Gábor RÅ‘czei Florido Paganelli Andrii Salnikov Martin Savko Martin Skou Andersen Oxana Smirnova Ferenc Szalai Gábor Szigeti Christian Ulrik Søttrup Adrian Taga Salman Zubair Toor Olli Tourunen Petter Urkedal Wenjing Wu Anders Wäänänen Thomas Zangerl . University of Copenhagen (Denmark) NORDUnet - Nordic Infrastructure for Research and Education (Denmark) CSC - IT Center for Science Ltd (Finland) University of Lübeck (Germany) NIIFI - National Information Infrastructure Development Institute (Hungary) University of Oslo (Norway) NordForsk (Norway) Pavol Jozef Å afárik University in KoÅ¡ice (Slovakia) Linköping University (Sweden) Lund University (Sweden) Royal Institute of Technology (Sweden) Uppsala University (Sweden) Taras Shevchenko National University of Kyiv (Ukraine) License: Apache-2.0 Files: src/hed/libs/data/cache-clean Copyright: 2008 Niklas Edmundsson, Tomas Ögren, David Cameron License: Apache-2.0 Files: src/external/cJSON/cJSON.c src/external/cJSON/cJSON.h Copyright: 2009 Dave Gamble License: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: . The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. . THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. License: Apache-2.0 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at . http://www.apache.org/licenses/LICENSE-2.0 . Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. . On Debian systems, the complete text of the Apache version 2.0 license can be found in /usr/share/common-licenses/Apache-2.0. nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-datadelivery-service.logrotate0000644000000000000000000000013215067751327025370 xustar0030 mtime=1759498967.626490019 30 atime=1759498967.806492754 30 ctime=1759499034.746178794 nordugrid-arc-7.1.1/debian/nordugrid-arc-datadelivery-service.logrotate0000644000175000002070000000034015067751327027267 0ustar00mockbuildmock00000000000000/var/log/arc/datadelivery-service.log { missingok compress delaycompress daily rotate 14 create postrotate kill -HUP `cat /run/arched-datadelivery-service.pid 2> /dev/null` 2> /dev/null || true endscript } nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-infosys-ldap.arc-infosys-ldap.default.enable0000644000000000000000000000013215067751327027710 xustar0030 mtime=1759498967.627490034 30 atime=1759498967.806492754 30 ctime=1759499034.732699089 nordugrid-arc-7.1.1/debian/nordugrid-arc-infosys-ldap.arc-infosys-ldap.default.enable0000644000175000002070000000022515067751327031611 0ustar00mockbuildmock00000000000000# To enable arc-infosys-ldap, i.e. to indicate that a readily usable # configuration is in place, comment out or delete the # following line. RUN=no nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-plugins-needed.install0000644000000000000000000000013115067751327023625 xustar0029 mtime=1759498967.62849005 30 atime=1759498967.806492754 30 ctime=1759499034.713328805 nordugrid-arc-7.1.1/debian/nordugrid-arc-plugins-needed.install0000644000175000002070000000265015067751327025533 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/arc/libaccARCREST.so debian/tmp/usr/lib/arc/libaccBroker.so debian/tmp/usr/lib/arc/libaccJobDescriptionParser.so debian/tmp/usr/lib/arc/libaccARCHERY.so debian/tmp/usr/lib/arc/test/libaccTEST.so debian/tmp/usr/lib/arc/libarcshclegacy.so debian/tmp/usr/lib/arc/libarcshcotokens.so debian/tmp/usr/lib/arc/libarcshc.so debian/tmp/usr/lib/arc/libdmcfile.so debian/tmp/usr/lib/arc/libdmchttp.so debian/tmp/usr/lib/arc/libdmcsrm.so debian/tmp/usr/lib/arc/libdmcrucio.so debian/tmp/usr/lib/arc/libidentitymap.so debian/tmp/usr/lib/arc/libmcchttp.so debian/tmp/usr/lib/arc/libmccmsgvalidator.so debian/tmp/usr/lib/arc/libmccsoap.so debian/tmp/usr/lib/arc/libmcctcp.so debian/tmp/usr/lib/arc/libmcctls.so debian/tmp/usr/lib/arc/libaccARCREST.apd debian/tmp/usr/lib/arc/libaccBroker.apd debian/tmp/usr/lib/arc/libaccJobDescriptionParser.apd debian/tmp/usr/lib/arc/libaccARCHERY.apd debian/tmp/usr/lib/arc/test/libaccTEST.apd debian/tmp/usr/lib/arc/libarcshclegacy.apd debian/tmp/usr/lib/arc/libarcshcotokens.apd debian/tmp/usr/lib/arc/libarcshc.apd debian/tmp/usr/lib/arc/libdmcfile.apd debian/tmp/usr/lib/arc/libdmchttp.apd debian/tmp/usr/lib/arc/libmccmsgvalidator.apd debian/tmp/usr/lib/arc/libdmcsrm.apd debian/tmp/usr/lib/arc/libdmcrucio.apd debian/tmp/usr/lib/arc/libidentitymap.apd debian/tmp/usr/lib/arc/libmcchttp.apd debian/tmp/usr/lib/arc/libmccsoap.apd debian/tmp/usr/lib/arc/libmcctcp.apd debian/tmp/usr/lib/arc/libmcctls.apd nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-infosys-ldap.install0000644000000000000000000000013215067751327023333 xustar0030 mtime=1759498967.627490034 30 atime=1759498967.806492754 30 ctime=1759499034.704945302 nordugrid-arc-7.1.1/debian/nordugrid-arc-infosys-ldap.install0000644000175000002070000000017615067751327025241 0ustar00mockbuildmock00000000000000debian/tmp/usr/share/arc/create-bdii-config debian/tmp/usr/share/arc/create-slapd-config debian/tmp/usr/share/arc/ldap-schema nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-plugins-internal.install0000644000000000000000000000013115067751327024215 xustar0029 mtime=1759498967.62849005 30 atime=1759498967.806492754 30 ctime=1759499034.710553522 nordugrid-arc-7.1.1/debian/nordugrid-arc-plugins-internal.install0000644000175000002070000000012315067751327026114 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/arc/libaccINTERNAL.so debian/tmp/usr/lib/arc/libaccINTERNAL.apd nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-arex.arc-arex.default.enable0000644000000000000000000000013015067751327024564 xustar0030 mtime=1759498967.626490019 30 atime=1759498967.805492739 28 ctime=1759499034.7286226 nordugrid-arc-7.1.1/debian/nordugrid-arc-arex.arc-arex.default.enable0000644000175000002070000000021515067751327026466 0ustar00mockbuildmock00000000000000# To enable arc-arex, i.e. to indicate that a readily usable # configuration is in place, comment out or delete the # following line. RUN=no nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-infosys-ldap.postinst0000644000000000000000000000013115067751327023547 xustar0030 mtime=1759498967.627490034 30 atime=1759498967.806492754 29 ctime=1759499034.75034115 nordugrid-arc-7.1.1/debian/nordugrid-arc-infosys-ldap.postinst0000644000175000002070000000223515067751327025454 0ustar00mockbuildmock00000000000000#!/bin/sh set -e if [ "$1" = "configure" ] ; then APP_PROFILE=/etc/apparmor.d/usr.sbin.slapd LOCAL_APP_PROFILE=/etc/apparmor.d/local/usr.sbin.slapd if [ ! -r "$LOCAL_APP_PROFILE" ] ; then # Create the local profile if it does not yet exist tmp=`mktemp` cat < "$tmp" # Site-specific additions and overrides for usr.sbin.slapd. # For more details, please see /etc/apparmor.d/local/README. EOM mkdir -p `dirname $LOCAL_APP_PROFILE` 2>/dev/null || true mv -f "$tmp" "$LOCAL_APP_PROFILE" chmod 644 "$LOCAL_APP_PROFILE" fi grep -q "AppArmor profile for NorduGrid ARC ARIS" "$LOCAL_APP_PROFILE" || \ cat <> "$LOCAL_APP_PROFILE" # AppArmor profile for NorduGrid ARC ARIS START #include /etc/bdii/* r, /usr/share/arc/ldap-schema/* r, /{,var/}run/arc/infosys/bdii-slapd.conf r, /var/lib/arc/bdii/db/** rwk, /{,var/}run/arc/bdii/db/* w, # AppArmor profile for NorduGrid ARC ARIS END EOM if [ -r "$APP_PROFILE" ] ; then # Reload the profile if aa-status --enabled 2>/dev/null ; then apparmor_parser -r -T -W "$APP_PROFILE" || true fi fi fi #DEBHELPER# nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-datadelivery-service.install0000644000000000000000000000013215067751327025036 xustar0030 mtime=1759498967.626490019 30 atime=1759498967.806492754 30 ctime=1759499034.700875981 nordugrid-arc-7.1.1/debian/nordugrid-arc-datadelivery-service.install0000644000175000002070000000023315067751327026736 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/arc/libdatadeliveryservice.so debian/tmp/usr/lib/arc/libdatadeliveryservice.apd debian/tmp/usr/share/arc/arc-datadelivery-service-start nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-plugins-lcas-lcmaps.install0000644000000000000000000000013115067751327024600 xustar0029 mtime=1759498967.62849005 30 atime=1759498967.806492754 30 ctime=1759499034.712052063 nordugrid-arc-7.1.1/debian/nordugrid-arc-plugins-lcas-lcmaps.install0000644000175000002070000000010215067751327026474 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/arc/arc-lcas debian/tmp/usr/lib/arc/arc-lcmaps nordugrid-arc-7.1.1/debian/PaxHeaders/python3-arcrest.install0000644000000000000000000000013115067751327021231 xustar0029 mtime=1759498967.62849005 30 atime=1759498967.806492754 30 ctime=1759499034.722013704 nordugrid-arc-7.1.1/debian/python3-arcrest.install0000644000175000002070000000015015067751327023130 0ustar00mockbuildmock00000000000000/usr/lib/python3*/*-packages/pyarcrest /usr/lib/python3*/*-packages/pyarcrest-*.*-info /usr/bin/arcrest nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-arex.preinst.no-enable0000644000000000000000000000013215067751327023537 xustar0030 mtime=1759498967.626490019 30 atime=1759498967.805492739 30 ctime=1759499034.735390082 nordugrid-arc-7.1.1/debian/nordugrid-arc-arex.preinst.no-enable0000644000175000002070000000121615067751327025441 0ustar00mockbuildmock00000000000000#!/bin/sh set -e # Version 6.7.0 starts using defaults-disabled # Remove rc.d links if upgrading from an older version and the service is # disabled in /etc/default if [ -n "$2" ] ; then if dpkg --compare-versions "$2" "<<" "6.7.0" ; then [ -r /etc/default/arc-arex ] && . /etc/default/arc-arex if [ -n "${RUN}" ] && [ "${RUN}" != "yes" ] ; then update-rc.d -f arc-arex remove >/dev/null if [ -x "/usr/bin/deb-systemd-helper" ]; then deb-systemd-helper purge 'arc-arex.service' >/dev/null || true deb-systemd-helper unmask 'arc-arex.service' >/dev/null || true fi fi fi fi #DEBHELPER# nordugrid-arc-7.1.1/debian/PaxHeaders/python3-nordugrid-arc.install0000644000000000000000000000013115067751327022326 xustar0029 mtime=1759498967.62849005 30 atime=1759498967.806492754 30 ctime=1759499034.723438711 nordugrid-arc-7.1.1/debian/python3-nordugrid-arc.install0000644000175000002070000000111715067751327024231 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/python3.*/site-packages/_arc.*so debian/tmp/usr/lib/python3.*/site-packages/arc/common.py* debian/tmp/usr/lib/python3.*/site-packages/arc/communication.py* debian/tmp/usr/lib/python3.*/site-packages/arc/compute.py* debian/tmp/usr/lib/python3.*/site-packages/arc/credential.py* debian/tmp/usr/lib/python3.*/site-packages/arc/data.py* debian/tmp/usr/lib/python3.*/site-packages/arc/delegation.py* debian/tmp/usr/lib/python3.*/site-packages/arc/loader.py* debian/tmp/usr/lib/python3.*/site-packages/arc/message.py* debian/tmp/usr/lib/python3.*/site-packages/arc/security.py* nordugrid-arc-7.1.1/debian/PaxHeaders/compat0000644000000000000000000000013115067751327016002 xustar0029 mtime=1759498967.62552191 30 atime=1759498967.805492739 30 ctime=1759499034.681097168 nordugrid-arc-7.1.1/debian/compat0000644000175000002070000000000215067751327017675 0ustar00mockbuildmock000000000000009 nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-arex.prerm0000644000000000000000000000013215067751327021341 xustar0030 mtime=1759498967.626490019 30 atime=1759498967.805492739 30 ctime=1759499034.753087172 nordugrid-arc-7.1.1/debian/nordugrid-arc-arex.prerm0000644000175000002070000000051715067751327023246 0ustar00mockbuildmock00000000000000#!/bin/sh set -e #DEBHELPER# if [ "$1" = "remove" ] ; then # Don't regenerate the bytecode that was removed by the autogenerated # debhelper maintainer script above... PYTHONDONTWRITEBYTECODE=1 arcctl test-ca cleanup rmdir /etc/grid-security/certificates 2>/dev/null || : rmdir /etc/grid-security 2>/dev/null || : fi nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-hed.preinst.no-enable0000644000000000000000000000013215067751327023340 xustar0030 mtime=1759498967.627490034 30 atime=1759498967.806492754 30 ctime=1759499034.740749194 nordugrid-arc-7.1.1/debian/nordugrid-arc-hed.preinst.no-enable0000644000175000002070000000120415067751327025237 0ustar00mockbuildmock00000000000000#!/bin/sh set -e # Version 6.7.0 starts using defaults-disabled # Remove rc.d links if upgrading from an older version and the service is # disabled in /etc/default if [ -n "$2" ] ; then if dpkg --compare-versions "$2" "<<" "6.7.0" ; then [ -r /etc/default/arched ] && . /etc/default/arched if [ -n "${RUN}" ] && [ "${RUN}" != "yes" ] ; then update-rc.d -f arched remove >/dev/null if [ -x "/usr/bin/deb-systemd-helper" ]; then deb-systemd-helper purge 'arched.service' >/dev/null || true deb-systemd-helper unmask 'arched.service' >/dev/null || true fi fi fi fi #DEBHELPER# nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-dev.install0000644000000000000000000000013215067751327021501 xustar0030 mtime=1759498967.627490034 30 atime=1759498967.806492754 30 ctime=1759499034.702210526 nordugrid-arc-7.1.1/debian/nordugrid-arc-dev.install0000644000175000002070000000050315067751327023401 0ustar00mockbuildmock00000000000000debian/tmp/usr/include/arc debian/tmp/usr/lib/lib*.so debian/tmp/usr/bin/wsdl2hed debian/tmp/usr/share/man/man1/wsdl2hed.1 debian/tmp/usr/bin/arcplugin debian/tmp/usr/share/man/man1/arcplugin.1 debian/tmp/usr/share/arc/examples/sdk/*.cpp debian/tmp/usr/share/arc/examples/sdk/*.h debian/tmp/usr/share/arc/examples/sdk/*.py nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-datadelivery-service.arc-datadelivery-service.de0000644000000000000000000000031115067751327030634 xustar00111 path=nordugrid-arc-7.1.1/debian/nordugrid-arc-datadelivery-service.arc-datadelivery-service.default.enable 30 mtime=1759498967.626490019 30 atime=1759498967.806492754 30 ctime=1759499034.729905536 nordugrid-arc-7.1.1/debian/nordugrid-arc-datadelivery-service.arc-datadelivery-service.default.enabl0000644000175000002070000000023515067751327034653 0ustar00mockbuildmock00000000000000# To enable arc-datadelivery-service, i.e. to indicate that a readily usable # configuration is in place, comment out or delete the # following line. RUN=no nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-arc-exporter.install0000644000000000000000000000013115067751327023335 xustar0029 mtime=1759498967.62552191 30 atime=1759498967.805492739 30 ctime=1759499034.691249872 nordugrid-arc-7.1.1/debian/nordugrid-arc-arc-exporter.install0000644000175000002070000000004115067751327025233 0ustar00mockbuildmock00000000000000debian/tmp/usr/sbin/arc-exporter nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-infosys-ldap.preinst.no-enable0000644000000000000000000000013115067751327025207 xustar0029 mtime=1759498967.62849005 30 atime=1759498967.806492754 30 ctime=1759499034.743483755 nordugrid-arc-7.1.1/debian/nordugrid-arc-infosys-ldap.preinst.no-enable0000644000175000002070000000126615067751327027117 0ustar00mockbuildmock00000000000000#!/bin/sh set -e # Version 6.7.0 starts using defaults-disabled # Remove rc.d links if upgrading from an older version and the service is # disabled in /etc/default if [ -n "$2" ] ; then if dpkg --compare-versions "$2" "<<" "6.7.0" ; then [ -r /etc/default/arc-infosys-ldap ] && . /etc/default/arc-infosys-ldap if [ -n "${RUN}" ] && [ "${RUN}" != "yes" ] ; then update-rc.d -f arc-infosys-ldap remove >/dev/null if [ -x "/usr/bin/deb-systemd-helper" ]; then deb-systemd-helper purge 'arc-infosys-ldap.service' >/dev/null || true deb-systemd-helper unmask 'arc-infosys-ldap.service' >/dev/null || true fi fi fi fi #DEBHELPER# nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-arex-lrms-contrib.install0000644000000000000000000000013115067751327024272 xustar0029 mtime=1759498967.62552191 30 atime=1759498967.805492739 30 ctime=1759499034.695939149 nordugrid-arc-7.1.1/debian/nordugrid-arc-arex-lrms-contrib.install0000644000175000002070000000167015067751327026201 0ustar00mockbuildmock00000000000000debian/tmp/usr/share/arc/cancel-boinc-job debian/tmp/usr/share/arc/cancel-ll-job debian/tmp/usr/share/arc/cancel-lsf-job debian/tmp/usr/share/arc/cancel-pbs-job debian/tmp/usr/share/arc/cancel-pbspro-job debian/tmp/usr/share/arc/cancel-sge-job debian/tmp/usr/share/arc/scan-boinc-job debian/tmp/usr/share/arc/scan-ll-job debian/tmp/usr/share/arc/scan-lsf-job debian/tmp/usr/share/arc/scan-pbs-job debian/tmp/usr/share/arc/scan-pbspro-job debian/tmp/usr/share/arc/scan-sge-job debian/tmp/usr/share/arc/submit-boinc-job debian/tmp/usr/share/arc/submit-ll-job debian/tmp/usr/share/arc/submit-lsf-job debian/tmp/usr/share/arc/submit-pbs-job debian/tmp/usr/share/arc/submit-pbspro-job debian/tmp/usr/share/arc/submit-sge-job debian/tmp/usr/share/arc/Boinc.pm debian/tmp/usr/share/arc/LL.pm debian/tmp/usr/share/arc/LSF.pm debian/tmp/usr/share/arc/PBS.pm debian/tmp/usr/share/arc/PBSPRO.pm debian/tmp/usr/share/arc/SGE.pm debian/tmp/usr/share/arc/SGEmod.pm nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-monitor.install0000644000000000000000000000013115067751327022411 xustar0029 mtime=1759498967.62849005 30 atime=1759498967.806492754 30 ctime=1759499034.706266575 nordugrid-arc-7.1.1/debian/nordugrid-arc-monitor.install0000644000175000002070000000011515067751327024311 0ustar00mockbuildmock00000000000000debian/tmp/usr/share/arc/monitor/* debian/tmp/usr/share/man/man7/monitor.7* nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-plugins-python.install0000644000000000000000000000013115067751327023722 xustar0029 mtime=1759498967.62849005 30 atime=1759498967.806492754 30 ctime=1759499034.714719764 nordugrid-arc-7.1.1/debian/nordugrid-arc-plugins-python.install0000644000175000002070000000041715067751327025627 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/arc/libaccPythonBroker.so debian/tmp/usr/lib/arc/libpythonservice.so debian/tmp/usr/lib/arc/libaccPythonBroker.apd debian/tmp/usr/lib/arc/libpythonservice.apd debian/tmp/usr/share/arc/examples/PythonBroker debian/tmp/usr/share/arc/examples/echo_python nordugrid-arc-7.1.1/debian/PaxHeaders/README.source0000644000000000000000000000013115067751327016753 xustar0029 mtime=1759498967.62552191 30 atime=1759498967.805492739 30 ctime=1759499034.758511467 nordugrid-arc-7.1.1/debian/README.source0000644000175000002070000000130615067751327020656 0ustar00mockbuildmock00000000000000The source code presented as .orig.tar.gz to Debian is functionally complete. The sources of all images shall be distributed with it. There is however the possibility that some files do exist in the subversion repository, that are not distributed further. This shall be considered a regular consequence of the development process, i.e. the Debian packages are not expected to be built from the development branch. The upstream developers, who are also maintaining this package, invite everyone to contribute actively with the future development of the ARC middleware and suggest to inspect http://svn.nordugrid.org for first steps. -- Steffen Moeller Thu, 14 Jan 2010 12:13:30 +0000 nordugrid-arc-7.1.1/debian/PaxHeaders/README.Debian0000644000000000000000000000013115067751327016635 xustar0029 mtime=1759498967.62552191 30 atime=1759498967.805492739 30 ctime=1759499034.757223661 nordugrid-arc-7.1.1/debian/README.Debian0000644000175000002070000000361115067751327020541 0ustar00mockbuildmock00000000000000nordugrid-arc ============= Open Source, Linux distributions and grid computing --------------------------------------------------- Grid Computing is all about having a community that is full of trust in the integrity of its contributors. Everything is logged - but you would not expect anyone intentionally evil amongst yourselves. The Debian Society is a forerunner in the formal representation of such collaborative floks of individuals and much respected throughout the scientific community for its achievement that maintains most of today's computational grids. The development of this second generation of the Advanced Resource Connector was mostly funded by the EU project "KnowARC". The aim to appeal to smaller and/or more heterogeneous communities than the traditional High Energy Physics is key to the project. It was foreseen from the beginnings, to disseminate the development to the Linux community. The developers of ARC are found on the mailing list of the NorduGrid (http://www.nordugrid.org) where the project has its roots. You may also be interested in the wiki pages (http://wiki.nordugrid.org) for a summary of first steps for you to adopt the technology. If you are interested to prepare your own Campus Grid or when working in a larger company with CPU time to harvest for your computations, or if you just feel to join with your own cluster, then please join in. Comments on the packaging ------------------------- ARC-1 was developed with Debian in mind. No special adaptations were required. For PDF generation, doxygen needs the texlive-extra-utils, texlive-latex-base, texlive-latex-recommended and texlive-latex-extra package, which might come as a surprise to some. -- Mattias Ellert, Steffen Möller, Balazs Konya, Farid Ould-Saada, Anders Wäänänen, Aleksander Konstantinov, Peter Stefan and all other contributors of the ARC grid middleware. Wed, 09 Dec 2009 13:34:52 +0100 nordugrid-arc-7.1.1/debian/PaxHeaders/watch0000644000000000000000000000013215067751327015626 xustar0030 mtime=1759498967.629490065 30 atime=1759498967.806492754 30 ctime=1759499034.686355832 nordugrid-arc-7.1.1/debian/watch0000644000175000002070000000022515067751327017527 0ustar00mockbuildmock00000000000000version = 3 https://download.nordugrid.org/packages/nordugrid-arc/releases/(\d+\.\d+\.\d+)/src/nordugrid-arc-(\d+\.\d+\.\d+)\.tar\.gz debian uupdate nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-infosys-ldap.postrm0000644000000000000000000000013215067751327023211 xustar0030 mtime=1759498967.627490034 30 atime=1759498967.806492754 30 ctime=1759499034.751626238 nordugrid-arc-7.1.1/debian/nordugrid-arc-infosys-ldap.postrm0000644000175000002070000000144615067751327025120 0ustar00mockbuildmock00000000000000#!/bin/sh set -e #DEBHELPER# if [ "$1" = "purge" ] ; then APP_PROFILE=/etc/apparmor.d/usr.sbin.slapd LOCAL_APP_PROFILE=/etc/apparmor.d/local/usr.sbin.slapd if [ -r "$LOCAL_APP_PROFILE" ] ; then sed '/# AppArmor profile for NorduGrid ARC ARIS START/,/# AppArmor profile for NorduGrid ARC ARIS END/d' -i "$LOCAL_APP_PROFILE" fi if [ ! -r "$APP_PROFILE" ] ; then if [ -r "$LOCAL_APP_PROFILE" ] ; then if [ -z "`sed '/^#/d' $LOCAL_APP_PROFILE`" ] ; then rm -f "$LOCAL_APP_PROFILE" || true fi fi rmdir /etc/apparmor.d/local 2>/dev/null || true rmdir /etc/apparmor.d 2>/dev/null || true fi if [ -r "$APP_PROFILE" ] ; then # Reload the profile if aa-status --enabled 2>/dev/null ; then apparmor_parser -r -T -W "$APP_PROFILE" || true fi fi fi nordugrid-arc-7.1.1/debian/PaxHeaders/libarcglobusutils4.install0000644000000000000000000000013115067751327022001 xustar0029 mtime=1759498967.62552191 30 atime=1759498967.805492739 30 ctime=1759499034.690040758 nordugrid-arc-7.1.1/debian/libarcglobusutils4.install0000644000175000002070000000005215067751327023701 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/libarcglobusutils.so.* nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-wn.lintian-overrides0000644000000000000000000000013115067751327023336 xustar0029 mtime=1759498967.62849005 30 atime=1759498967.806492754 30 ctime=1759499034.755832876 nordugrid-arc-7.1.1/debian/nordugrid-arc-wn.lintian-overrides0000644000175000002070000000010615067751327025236 0ustar00mockbuildmock00000000000000nordugrid-arc-wn: setuid-binary usr/bin/arc-job-cgroup 4755 root/root nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-arex.dirs0000644000000000000000000000013215067751327021155 xustar0030 mtime=1759498967.626490019 30 atime=1759498967.805492739 30 ctime=1759499034.727331783 nordugrid-arc-7.1.1/debian/nordugrid-arc-arex.dirs0000644000175000002070000000010215067751327023050 0ustar00mockbuildmock00000000000000var/spool/arc var/spool/arc/ssm var/spool/arc/urs etc/arc.conf.d nordugrid-arc-7.1.1/debian/PaxHeaders/control0000644000000000000000000000013115067751327016177 xustar0029 mtime=1759498967.62552191 30 atime=1759498967.805492739 30 ctime=1759499034.682342354 nordugrid-arc-7.1.1/debian/control0000644000175000002070000004466215067751327020116 0ustar00mockbuildmock00000000000000Source: nordugrid-arc Section: net Priority: optional Maintainer: Mattias Ellert Uploaders: Anders Waananen Build-Depends: debhelper (>= 9), dh-autoreconf, autopoint, dh-python | python-support, dh-systemd | debhelper (>= 9.20160709) | debhelper (<< 9.20130630), libxml2-dev (>= 2.4.0), libssl-dev, libglibmm-2.68-dev | libglibmm-2.4-dev, libltdl-dev, uuid-dev, libcppunit-dev, pkg-config, libxmlsec1-dev (>= 1.2.4), libglobus-common-dev, libglobus-gssapi-gsi-dev, libglobus-ftp-client-dev, libglobus-ftp-control-dev, libxrootd-client-dev | dpkg (<< 1.20.9), libgfal2-dev, libs3-dev, openssl, swig, libnss3-dev, libjson-xs-perl, libxml-simple-perl, libdbi-perl, libsqlite3-dev (>= 3.6), libldns-dev, libsystemd-dev [linux-any] | debhelper (<< 9.20150101) [linux-any], bash-completion, help2man, python3-pip, python3-setuptools, python3-wheel, python3-dev Standards-Version: 4.4.1 Homepage: http://www.nordugrid.org Package: libarccommon4 Replaces: nordugrid-arc-hed (<< 1.0.1~rc2~), nordugrid-arc-arex (<< 2.0.1~), libarccommon0, libarccommon1, libarccommon2, libarccommon3 Conflicts: nordugrid-arc-chelonia (<< 2.0.0~), nordugrid-arc-hopi (<< 2.0.0~), nordugrid-arc-isis (<< 2.0.0~), nordugrid-arc-janitor (<< 2.0.0~), nordugrid-arc-doxygen (<< 4.0.0~), nordugrid-arc-arcproxyalt (<< 6.0.0~), nordugrid-arc-java (<< 6.0.0~), nordugrid-arc-egiis (<< 6.0.0~), nordugrid-arc-acix-cache (<< 6.0.0~), nordugrid-arc-acix-core (<< 7.0.0~), nordugrid-arc-acix-scanner (<< 7.0.0~), nordugrid-arc-acix-index (<< 7.0.0~), nordugrid-arc-arex-python-lrms (<< 7.0.0~), nordugrid-arc-gridftpd (<< 7.0.0~), nordugrid-arc-python (<< 7.0.0~), python-nordugrid-arc (<< 7.0.0~), nordugrid-arc-nordugridmap (<< 7.0.0~), nordugrid-arc-gridmap-utils (<< 6.0.0~), nordugrid-arc-plugins-gridftpjob (<< 7.0.0~), nordugrid-arc-plugins-ldap (<< 7.0.0~) Breaks: nordugrid-arc-hed (<< 1.0.1~rc2~), nordugrid-arc-arex (<< 2.0.1~), libarccommon0, libarccommon1, libarccommon2, libarccommon3 Architecture: any Section: libs Depends: ${shlibs:Depends}, ${misc:Depends}, ${python3:Depends}, openssl Description: Advanced Resource Connector Middleware NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . The ARC middleware is a software solution that uses distributed computing technologies to enable sharing and federation of computing resources across different administrative and application domains. ARC is used to create distributed infrastructures of various scope and complexity, from campus to national and global deployments. Package: nordugrid-arc-client Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon4 (= ${binary:Version}), nordugrid-arc-plugins-needed (= ${binary:Version}) Description: ARC command line clients NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . This client package contains all the CLI tools that are needed to operate with x509 proxies, submit and manage jobs and handle data transfers. Package: nordugrid-arc-hed Replaces: nordugrid-arc-client (<< 1.0.1~rc2~) Breaks: nordugrid-arc-client (<< 1.0.1~rc2~) Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon4 (= ${binary:Version}), lsb-base (>= 3.0-6) Description: ARC Hosting Environment Daemon NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . The ARC Hosting Environment Daemon (HED) is a Web Service container for ARC services. Package: nordugrid-arc-datadelivery-service Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon4 (= ${binary:Version}), nordugrid-arc-hed (= ${binary:Version}), nordugrid-arc-arcctl-service (= ${source:Version}), nordugrid-arc-plugins-needed (= ${binary:Version}), lsb-base (>= 3.0-6) Description: ARC data delivery service NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . This package contains the ARC data delivery service. Package: nordugrid-arc-infosys-ldap Provides: nordugrid-arc-ldap-infosys, nordugrid-arc-aris Replaces: nordugrid-arc-ldap-infosys (<< 6.0.0~), nordugrid-arc-aris (<< 6.0.0~), nordugrid-arc-ldap-egiis (<< 6.0.0~) Conflicts: nordugrid-arc-ldap-infosys (<< 6.0.0~), nordugrid-arc-aris (<< 6.0.0~), nordugrid-arc-ldap-egiis (<< 6.0.0~) Architecture: all Depends: ${misc:Depends}, ${perl:Depends}, slapd, glue-schema (>= 2.0.10), bdii, nordugrid-arc-arcctl-service (= ${source:Version}), lsb-base (>= 3.0-6) Description: ARC LDAP-based information services NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . This package contains the ARC information services relying on BDII and LDAP technologies to publish ARC CE information according to various LDAP schemas. Please note that the information collectors are part of another package, the nordugrid-arc-arex. Package: nordugrid-arc-monitor Replaces: nordugrid-arc-ldap-monitor (<< 6.0.0~), nordugrid-arc-ws-monitor (<< 6.0.0~) Conflicts: nordugrid-arc-ldap-monitor (<< 6.0.0~), nordugrid-arc-ws-monitor (<< 6.0.0~) Architecture: all Depends: ${misc:Depends}, php-common | php5-common, php-ldap | php5-ldap, php-gd | php5-gd Description: ARC LDAP monitor web application NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . This package contains the PHP web application that is used to set up a web-based monitor which pulls information from the LDAP information system and visualizes it. Package: nordugrid-arc-arcctl Replaces: libarccommon3 (<< 6.5.0~), nordugrid-arc-arex (<< 6.5.0~) Breaks: libarccommon3 (<< 6.5.0~), nordugrid-arc-arex (<< 6.5.0~) Architecture: all Depends: ${misc:Depends}, ${python3:Depends}, python3-jwcrypto, libarccommon4 (>= ${source:Version}) Description: ARC Control Tool NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . This package contains the ARC Control Tool with basic set of control modules suitable for both server and client side. Package: nordugrid-arc-arcctl-service Replaces: libarccommon3 (<< 6.5.0~), nordugrid-arc-arcctl (<< 6.6.0~), nordugrid-arc-arex (<< 6.6.0~) Breaks: libarccommon3 (<< 6.5.0~), nordugrid-arc-arcctl (<< 6.6.0~), nordugrid-arc-arex (<< 6.6.0~) Architecture: all Depends: ${misc:Depends}, ${python3:Depends}, libarccommon4 (>= ${source:Version}), nordugrid-arc-arcctl (= ${source:Version}) Description: ARC Control Tool - service control modules NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . This package contains the service control modules for ARC Contol Tool that allow working with server-side config and manage ARC services. Package: nordugrid-arc-arex Provides: nordugrid-arc-cache-service, nordugrid-arc-candypond Replaces: nordugrid-arc-cache-service (<< 6.0.0~), nordugrid-arc-candypond (<< 6.0.0~), nordugrid-arc-aris (<< 6.0.0~), nordugrid-arc-infosys-ldap (<< 6.3.0~), libarccommon3 (<< 6.5.0~) Conflicts: nordugrid-arc-cache-service (<< 6.0.0~), nordugrid-arc-candypond (<< 6.0.0~) Breaks: nordugrid-arc-aris (<< 6.0.0~), nordugrid-arc-infosys-ldap (<< 6.3.0~), libarccommon3 (<< 6.5.0~) Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, ${perl:Depends}, ${python3:Depends}, libarccommon4 (= ${binary:Version}), nordugrid-arc-hed (= ${binary:Version}), nordugrid-arc-arcctl (= ${source:Version}), nordugrid-arc-arcctl-service (= ${source:Version}), nordugrid-arc-plugins-needed (= ${binary:Version}), libjson-xs-perl, libxml-simple-perl, procps, lsb-base (>= 3.0-6) Description: ARC Resource-coupled EXecution service NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . The ARC Resource-coupled EXecution service (AREX) is the Computing Element of the ARC middleware. AREX offers a full-featured middle layer to manage computational tasks including interfacing to local batch systems, taking care of complex environments such as data staging, data caching, software environment provisioning, information collection and exposure, accounting information gathering and publishing. Package: nordugrid-arc-arex-lrms-contrib Replaces: nordugrid-arc-arex (<< 7.0.0~) Breaks: nordugrid-arc-arex (<< 7.0.0~) Architecture: all Depends: ${misc:Depends}, ${perl:Depends}, ${python3:Depends}, nordugrid-arc-arex (>= ${source:Version}) Description: ARC Resource-coupled EXecution service - conributed LRMS backends NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . The AREX contributed LRMS backends package contains additional LRMS support script contributed by the ARC user community. Package: nordugrid-arc-community-rtes Architecture: all Depends: ${misc:Depends}, ${python3:Depends}, nordugrid-arc-arex (>= ${source:Version}), nordugrid-arc-arcctl (= ${source:Version}), python3-dnspython Description: ARC community defined RTEs support NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . Community RTEs is the framework that allows deploying software packages (tarballs, containers, etc) provided by trusted communities to ARC CE using simple arcctl commands. It is released as a technology preview. Package: nordugrid-arc-plugins-needed Architecture: any Provides: nordugrid-arc-plugins-arcrest Replaces: nordugrid-arc-plugins-arcrest (<< 7.0.0~) Conflicts: nordugrid-arc-plugins-arcrest (<< 7.0.0~) Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon4 (= ${binary:Version}) Description: ARC base plugins NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . ARC base plugins. This includes the Message Chain Components (MCCs) and Data Manager Components (DMCs). Package: nordugrid-arc-plugins-globus Architecture: all Section: oldlibs Depends: ${misc:Depends}, nordugrid-arc-plugins-gridftp (>= ${source:Version}), nordugrid-arc-plugins-lcas-lcmaps (>= ${source:Version}) Description: ARC Globus plugins (transitional package) NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . ARC Globus plugins. This compat metapackage brings all Globus dependent plugins at once, including: Data Manager Components (DMCs) and LCAS/LCMAPS tools. . This is a transitional package. It is meant to allow smooth transition and will be removed from the upcoming releases. Package: libarcglobusutils4 Replaces: libarcglobusutils3, nordugrid-arc-plugins-globus (<< 6.5.0~) Breaks: libarcglobusutils3, nordugrid-arc-plugins-globus (<< 6.5.0~) Architecture: any Section: libs Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon4 (= ${binary:Version}) Description: ARC Globus plugins common libraries NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . ARC Globus plugins common libraries package includes the bundle of necessary Globus libraries needed for all other globus-dependent ARC components. Package: nordugrid-arc-plugins-gridftp Replaces: nordugrid-arc-plugins-globus (<< 6.5.0~) Breaks: nordugrid-arc-plugins-globus (<< 6.5.0~) Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon4 (= ${binary:Version}), libarcglobusutils4 (= ${binary:Version}) Description: ARC Globus dependent DMCs NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . ARC Globus GridFTP plugins. These allow access to data through the gridftp protocol. Package: nordugrid-arc-plugins-lcas-lcmaps Replaces: nordugrid-arc-plugins-globus (<< 6.5.0~) Breaks: nordugrid-arc-plugins-globus (<< 6.5.0~) Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon4 (= ${binary:Version}), libarcglobusutils4 (= ${binary:Version}) Description: ARC LCAS/LCMAPS plugins NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . ARC LCAS/LCMAPS tools allow configuring ARC CE to use LCAS/LCMAPS services for authorization and mapping. Package: nordugrid-arc-plugins-xrootd Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon4 (= ${binary:Version}), Description: ARC xrootd plugins NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . ARC xrootd plugins. These allow access to data through the xrootd protocol. Package: nordugrid-arc-plugins-s3 Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon4 (= ${binary:Version}), Description: ARC S3 plugins NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . ARC plugins for S3. These allow access to data through the S3 protocol. Package: nordugrid-arc-plugins-gfal Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon4 (= ${binary:Version}), Description: ARC GFAL2 plugins NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . ARC plugins for GFAL2. This allows third-party transfer and adds support for several extra transfer protocols (rfio, dcap, gsidcap). Support for specific protocols is provided by separate 3rd-party GFAL2 plugin packages. Package: nordugrid-arc-plugins-internal Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon4 (= ${binary:Version}), nordugrid-arc-arex (= ${binary:Version}) Description: ARC internal plugin NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . The ARC internal plugin. A special interface aimed for restrictive HPC sites, to be used with a local installation of the ARC Control Tower. Package: nordugrid-arc-plugins-python Replaces: nordugrid-arc-python (<< 6.6.0~) Breaks: nordugrid-arc-python (<< 6.6.0~) Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon4 (= ${binary:Version}), python3-nordugrid-arc (= ${binary:Version}) Description: ARC Python dependent plugin NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . ARC plugins dependent on Python. Package: nordugrid-arc-dev Replaces: nordugrid-arc-python (<< 6.6.0~) Breaks: nordugrid-arc-python (<< 6.6.0~) Architecture: any Section: libdevel Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon4 (= ${binary:Version}), libxml2-dev (>= 2.4.0), libssl-dev, libglibmm-2.68-dev | libglibmm-2.4-dev Description: ARC development files NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . Header files and libraries needed to develop applications using ARC. Package: python3-nordugrid-arc Provides: ${python3:Provides} Replaces: nordugrid-arc-python (<< 6.6.0~) Breaks: nordugrid-arc-python (<< 6.6.0~) Architecture: any Section: python Depends: ${shlibs:Depends}, ${misc:Depends}, ${python3:Depends}, libarccommon4 (= ${binary:Version}) Description: ARC Python 3 wrapper NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . Python 3 bindings for ARC. Package: nordugrid-arc-test-utils Provides: nordugrid-arc-misc-utils Replaces: nordugrid-arc-misc-utils (<< 6.0.0~) Conflicts: nordugrid-arc-misc-utils (<< 6.0.0~) Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, libarccommon4 (= ${binary:Version}), nordugrid-arc-plugins-needed (= ${binary:Version}) Description: ARC test tools NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . This package contains a few utilities useful to test various ARC subsystems. The package is not required by users or sysadmins and it is mainly for developers. Package: nordugrid-arc-archery-manage Architecture: all Depends: ${misc:Depends}, ${python3:Depends}, python3-ldap | python3-pyldap, python3-dnspython Description: ARCHERY administration tool NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . This package contains the archery-manage utility for administration of an ARCHERY DNS-embedded service endpoint registry. Package: nordugrid-arc-wn Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends} Description: ARC optional worker nodes components NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . This package contains the optional components that provide new job management features on the worker nodes (WN). Package: python3-arcrest Provides: ${python3:Provides} Architecture: all Section: python Depends: ${misc:Depends}, ${python3:Depends} Description: ARC REST client NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . This package contains the ARC REST client. Package: nordugrid-arc-arc-exporter Architecture: all Depends: ${misc:Depends}, ${python3:Depends}, python3-prometheus-client Description: ARC prometheus exporter NorduGrid is a collaboration aiming at development, maintenance and support of the middleware, known as the Advanced Resource Connector (ARC). . This package contains the Prometheus arc-exporter which collects and publishes metrics about jobs and datastaging on the ARC-CE. nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-plugins-gridftp.install0000644000000000000000000000013115067751327024040 xustar0029 mtime=1759498967.62849005 30 atime=1759498967.806492754 30 ctime=1759499034.708992014 nordugrid-arc-7.1.1/debian/nordugrid-arc-plugins-gridftp.install0000644000175000002070000000020115067751327025734 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/arc/libdmcgridftpdeleg.so debian/tmp/usr/lib/arc/libdmcgridftpdeleg.apd debian/tmp/usr/lib/arc/arc-dmcgridftp nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-wn.install0000644000000000000000000000013115067751327021346 xustar0029 mtime=1759498967.62849005 30 atime=1759498967.806492754 30 ctime=1759499034.720714753 nordugrid-arc-7.1.1/debian/nordugrid-arc-wn.install0000644000175000002070000000004215067751327023245 0ustar00mockbuildmock00000000000000debian/tmp/usr/bin/arc-job-cgroup nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-arcctl-service.install0000644000000000000000000000013115067751327023630 xustar0029 mtime=1759498967.62552191 30 atime=1759498967.805492739 30 ctime=1759499034.692496646 nordugrid-arc-7.1.1/debian/nordugrid-arc-arcctl-service.install0000644000175000002070000000063415067751327025536 0ustar00mockbuildmock00000000000000debian/tmp/usr/lib/python?.*/site-packages/arc/control/Cleanup.py* debian/tmp/usr/lib/python?.*/site-packages/arc/control/Config.py* debian/tmp/usr/lib/python?.*/site-packages/arc/control/ServiceCommon.py* debian/tmp/usr/lib/python?.*/site-packages/arc/control/Services.py* debian/tmp/usr/lib/python?.*/site-packages/arc/control/OSService.py* debian/tmp/usr/lib/python?.*/site-packages/arc/control/Validator.py* nordugrid-arc-7.1.1/debian/PaxHeaders/changelog0000644000000000000000000000013215067751414016444 xustar0030 mtime=1759499020.623295308 30 atime=1759499024.524354585 30 ctime=1759499034.679819947 nordugrid-arc-7.1.1/debian/changelog0000644000175000002070000002162515067751414020354 0ustar00mockbuildmock00000000000000nordugrid-arc (7.1.1-1) unstable; urgency=low * Unofficial build. -- Anders Waananen Fri, 03 Oct 2025 15:43:34 +0200 nordugrid-arc (4.0.0~rc2-1) unstable; urgency=low * 4.0.0 Release Candidate 2 -- Anders Waananen Thu, 07 Nov 2013 11:01:24 +0100 nordugrid-arc (4.0.0~rc1-1) unstable; urgency=low * 4.0.0 Release Candidate 1 -- Anders Waananen Tue, 29 Oct 2013 23:22:57 +0100 nordugrid-arc (3.0.3-1) unstable; urgency=low * 3.0.3 Final Release -- Anders Waananen Fri, 19 Jul 2013 12:05:50 +0200 nordugrid-arc (3.0.2-1) unstable; urgency=low * 3.0.2 Final Release -- Anders Waananen Wed, 12 Jun 2013 15:09:59 +0200 nordugrid-arc (3.0.1-1) unstable; urgency=low * 3.0.1 Final Release -- Anders Waananen Tue, 30 Apr 2013 00:47:43 +0200 nordugrid-arc (3.0.1~rc2-1) unstable; urgency=low * 3.0.1 Release Candidate 2 -- Anders Waananen Fri, 12 Apr 2013 16:56:03 +0200 nordugrid-arc (3.0.1~rc1-1) unstable; urgency=low * 3.0.1 Release Candidate 1 -- Anders Waananen Fri, 12 Apr 2013 13:50:41 +0200 nordugrid-arc (3.0.0-1) unstable; urgency=low * 3.0.0 Final Release -- Anders Waananen Fri, 22 Mar 2013 12:32:51 +0100 nordugrid-arc (3.0.0~rc5-1) unstable; urgency=low * 3.0.0 Release Candidate 5 -- Anders Waananen Wed, 06 Feb 2013 12:12:48 +0100 nordugrid-arc (3.0.0~rc4-1) unstable; urgency=low * 3.0.0 Release Candidate 4 -- Anders Waananen Sat, 02 Feb 2013 01:00:33 +0100 nordugrid-arc (3.0.0~rc3-1) unstable; urgency=low * 3.0.0 Release Candidate 3 -- Anders Waananen Wed, 30 Jan 2013 09:02:17 +0100 nordugrid-arc (3.0.0~rc2-1) unstable; urgency=low * 3.0.0 Release Candidate 2 -- Anders Waananen Mon, 28 Jan 2013 07:55:14 +0100 nordugrid-arc (3.0.0~rc1-1) unstable; urgency=low * 3.0.0 Release Candidate 1 -- Anders Waananen Thu, 06 Dec 2012 22:05:31 +0100 nordugrid-arc (2.0.1-1) unstable; urgency=low * 2.0.1 Final Release -- Anders Waananen Thu, 22 Nov 2012 23:47:19 +0100 nordugrid-arc (2.0.1rc2) unstable; urgency=low * 2.0.1rc2 Release Candidate 2 -- Anders Waananen Thu, 25 Oct 2012 13:00:02 +0200 nordugrid-arc (2.0.1rc1) unstable; urgency=low * 2.0.1rc1 Release Candidate 1 -- Anders Waananen Mon, 27 Aug 2012 13:26:30 +0200 nordugrid-arc (2.0.0-1) unstable; urgency=low * 2.0.0 Final Release -- Mattias Ellert Wed, 23 May 2012 19:27:47 +0200 nordugrid-arc (2.0.0~rc4-1) unstable; urgency=low * 2.0.0 Release Candidate 4 -- Mattias Ellert Mon, 02 Apr 2012 16:06:45 +0200 nordugrid-arc (2.0.0~rc3.1-1) unstable; urgency=low * 2.0.0 Release Candidate 3.1 -- Mattias Ellert Tue, 27 Mar 2012 10:30:23 +0200 nordugrid-arc (2.0.0~rc3-1) unstable; urgency=low * 2.0.0 Release Candidate 3 -- Mattias Ellert Mon, 05 Mar 2012 16:27:32 +0100 nordugrid-arc (2.0.0~rc2-1) unstable; urgency=low * 2.0.0 Release Candidate 2 -- Mattias Ellert Wed, 15 Feb 2012 13:54:17 +0100 nordugrid-arc (1.1.0-1) unstable; urgency=low * 1.1.0 Final Release -- Mattias Ellert Mon, 03 Oct 2011 14:30:45 +0200 nordugrid-arc (1.1.0~rc2-1) unstable; urgency=low * 1.1.0 Release Candidate 2 -- Mattias Ellert Sun, 25 Sep 2011 05:42:22 +0200 nordugrid-arc (1.1.0~rc1-1) unstable; urgency=low * 1.1.0 Release Candidate 1 -- Mattias Ellert Sun, 11 Sep 2011 20:08:33 +0200 nordugrid-arc (1.0.1-1) unstable; urgency=low * 1.0.0 Final Release -- Mattias Ellert Sat, 23 Jul 2011 09:32:53 +0200 nordugrid-arc (1.0.1~rc4-1) unstable; urgency=low * 1.0.1 Release Candidate 4 -- Mattias Ellert Tue, 19 Jul 2011 15:17:05 +0200 nordugrid-arc (1.0.1~rc1-1) unstable; urgency=low * 1.0.1 Release Candidate 1 -- Mattias Ellert Sat, 18 Jun 2011 18:29:09 +0200 nordugrid-arc (1.0.0-1) unstable; urgency=low * 1.0.0 Final Release -- Mattias Ellert Mon, 18 Apr 2011 08:59:55 +0200 nordugrid-arc (1.0.0~b5-1) unstable; urgency=low * 1.0.0 Beta Release 5 -- Mattias Ellert Wed, 06 Apr 2011 14:08:52 +0200 nordugrid-arc (1.0.0~b4-1) unstable; urgency=low * 1.0.0 Beta Release 4 -- Mattias Ellert Wed, 23 Mar 2011 15:19:08 +0100 nordugrid-arc (1.0.0~b3-1) unstable; urgency=low * 1.0.0 Beta Release 3 -- Mattias Ellert Thu, 10 Mar 2011 17:05:28 +0100 nordugrid-arc (1.0.0~b2-1) unstable; urgency=low * 1.0.0 Beta Release 2 -- Mattias Ellert Mon, 07 Mar 2011 05:12:30 +0100 nordugrid-arc (1.0.0~b1-1) unstable; urgency=low * 1.0.0 Beta Release 1 -- Mattias Ellert Mon, 14 Feb 2011 17:19:04 +0100 nordugrid-arc-nox (1.2.1-1) unstable; urgency=low * 1.2.1 Final release -- Mattias Ellert Tue, 21 Dec 2010 22:34:02 +0100 nordugrid-arc-nox (1.2.1~rc2-1) unstable; urgency=low * 1.2.1 Release Candidate 2 -- Mattias Ellert Tue, 21 Dec 2010 09:36:46 +0100 nordugrid-arc-nox (1.2.1~rc1-1) unstable; urgency=low * 1.2.1 Release Candidate 1 -- Mattias Ellert Wed, 08 Dec 2010 15:30:37 +0100 nordugrid-arc-nox (1.2.0-1) unstable; urgency=low * 1.2.0 Final release -- Mattias Ellert Fri, 22 Oct 2010 15:25:07 +0200 nordugrid-arc-nox (1.2.0~rc2-1) unstable; urgency=low * 1.2.0 Release Candidate 2 -- Mattias Ellert Thu, 30 Sep 2010 10:11:14 +0200 nordugrid-arc-nox (1.2.0~rc1-1) unstable; urgency=low * 1.2.0 Release Candidate 1 -- Mattias Ellert Mon, 13 Sep 2010 11:14:51 +0200 nordugrid-arc-nox (1.1.0-1) unstable; urgency=low * 1.1.0 Final release -- Mattias Ellert Wed, 05 May 2010 18:31:59 +0200 nordugrid-arc-nox (1.1.0~rc6-1) unstable; urgency=low * 1.1.0 Release Candidate 6 -- Mattias Ellert Mon, 08 Mar 2010 20:36:00 +0100 nordugrid-arc-nox (1.1.0~rc5-2) unstable; urgency=low * Rebuild for Globus Toolkit 5 -- Mattias Ellert Fri, 26 Feb 2010 16:25:39 +0100 nordugrid-arc-nox (1.1.0~rc5-1) unstable; urgency=low * 1.1.0 Release Candidate 5 -- Mattias Ellert Fri, 26 Feb 2010 15:07:39 +0100 nordugrid-arc-nox (1.1.0~rc4-1) unstable; urgency=low * 1.1.0 release candidate 4 -- Mattias Ellert Wed, 24 Feb 2010 12:34:41 +0100 nordugrid-arc-nox (1.1.0~rc3-1) unstable; urgency=low * 1.1.0 release candidate 3 -- Mattias Ellert Mon, 22 Feb 2010 10:20:27 +0100 nordugrid-arc-nox (1.1.0~rc2-1) unstable; urgency=low * 1.1.0 release candidate 2 -- Mattias Ellert Mon, 15 Feb 2010 19:08:07 +0100 nordugrid-arc-nox (1.1.0~rc1-1) unstable; urgency=low * 1.1.0 release candidate 1 -- Mattias Ellert Thu, 11 Feb 2010 19:48:01 +0100 nordugrid-arc-nox (1.0.0-1) unstable; urgency=low * 1.0.0 Final release -- Mattias Ellert Sun, 29 Nov 2009 23:13:41 +0100 nordugrid-arc-nox (1.0.0~rc7-1) unstable; urgency=low * 1.0.0 release candidate 7 -- Mattias Ellert Thu, 19 Nov 2009 15:30:32 +0100 nordugrid-arc-nox (1.0.0~rc6-1) unstable; urgency=low * 1.0.0 release candidate 6 -- Mattias Ellert Thu, 12 Nov 2009 10:12:45 +0100 nordugrid-arc-nox (1.0.0~rc5-1) unstable; urgency=low * 1.0.0 release candidate 5 -- Mattias Ellert Wed, 04 Nov 2009 16:45:22 +0100 nordugrid-arc1 (0.9.4~rc4-1) unstable; urgency=low * 0.9.3 release candidate 4 -- Mattias Ellert Mon, 26 Oct 2009 23:19:55 +0100 nordugrid-arc1 (0.9.4~rc3-1) unstable; urgency=low * 0.9.3 release candidate 3 -- Mattias Ellert Thu, 22 Oct 2009 19:22:31 +0200 nordugrid-arc1 (0.9.4~rc2-1) unstable; urgency=low * 0.9.3 release candidate 2 -- Mattias Ellert Thu, 15 Oct 2009 09:04:24 +0200 nordugrid-arc1 (0.9.3-1) unstable; urgency=low * Final 0.9.3 release -- Mattias Ellert Sun, 27 Sep 2009 01:27:31 +0200 nordugrid-arc1 (0.9.3~rc3-1) unstable; urgency=low * Initial release -- Mattias Ellert Mon, 5 Nov 2007 10:12:49 -0400 nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-infosys-ldap.maintscript.no-enable0000644000000000000000000000013215067751327026061 xustar0030 mtime=1759498967.627490034 30 atime=1759498967.806492754 30 ctime=1759499034.742165393 nordugrid-arc-7.1.1/debian/nordugrid-arc-infosys-ldap.maintscript.no-enable0000644000175000002070000000006015067751327027757 0ustar00mockbuildmock00000000000000rm_conffile /etc/default/arc-infosys-ldap 6.7.0 nordugrid-arc-7.1.1/debian/PaxHeaders/nordugrid-arc-datadelivery-service.maintscript.no-enable0000644000000000000000000000013215067751327027564 xustar0030 mtime=1759498967.627490034 30 atime=1759498967.806492754 30 ctime=1759499034.736771507 nordugrid-arc-7.1.1/debian/nordugrid-arc-datadelivery-service.maintscript.no-enable0000644000175000002070000000007015067751327031463 0ustar00mockbuildmock00000000000000rm_conffile /etc/default/arc-datadelivery-service 6.7.0 nordugrid-arc-7.1.1/PaxHeaders/config.guess0000644000000000000000000000013215067751346015671 xustar0030 mtime=1759498982.903913345 30 atime=1759498994.253894624 30 ctime=1759499024.701096955 nordugrid-arc-7.1.1/config.guess0000755000175000002070000012617315067751346017610 0ustar00mockbuildmock00000000000000#! /bin/sh # Attempt to guess a canonical system name. # Copyright 1992-2018 Free Software Foundation, Inc. timestamp='2018-08-29' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that # program. This Exception is an additional permission under section 7 # of the GNU General Public License, version 3 ("GPLv3"). # # Originally written by Per Bothner; maintained since 2000 by Ben Elliston. # # You can get the latest version of this script from: # https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess # # Please send patches to . me=`echo "$0" | sed -e 's,.*/,,'` usage="\ Usage: $0 [OPTION] Output the configuration name of the system \`$me' is run on. Options: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit Report bugs and patches to ." version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. Copyright 1992-2018 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" Try \`$me --help' for more information." # Parse command line while test $# -gt 0 ; do case $1 in --time-stamp | --time* | -t ) echo "$timestamp" ; exit ;; --version | -v ) echo "$version" ; exit ;; --help | --h* | -h ) echo "$usage"; exit ;; -- ) # Stop option processing shift; break ;; - ) # Use stdin as input. break ;; -* ) echo "$me: invalid option $1$help" >&2 exit 1 ;; * ) break ;; esac done if test $# != 0; then echo "$me: too many arguments$help" >&2 exit 1 fi # CC_FOR_BUILD -- compiler used by this script. Note that the use of a # compiler to aid in system detection is discouraged as it requires # temporary files to be created and, as you can see below, it is a # headache to deal with in a portable fashion. # Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still # use `HOST_CC' if defined, but it is deprecated. # Portable tmp directory creation inspired by the Autoconf team. tmp= # shellcheck disable=SC2172 trap 'test -z "$tmp" || rm -fr "$tmp"' 1 2 13 15 trap 'exitcode=$?; test -z "$tmp" || rm -fr "$tmp"; exit $exitcode' 0 set_cc_for_build() { : "${TMPDIR=/tmp}" # shellcheck disable=SC2039 { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir "$tmp" 2>/dev/null) ; } || { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir "$tmp" 2>/dev/null) && echo "Warning: creating insecure temp directory" >&2 ; } || { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } dummy=$tmp/dummy case ${CC_FOR_BUILD-},${HOST_CC-},${CC-} in ,,) echo "int x;" > "$dummy.c" for driver in cc gcc c89 c99 ; do if ($driver -c -o "$dummy.o" "$dummy.c") >/dev/null 2>&1 ; then CC_FOR_BUILD="$driver" break fi done if test x"$CC_FOR_BUILD" = x ; then CC_FOR_BUILD=no_compiler_found fi ;; ,,*) CC_FOR_BUILD=$CC ;; ,*,*) CC_FOR_BUILD=$HOST_CC ;; esac } # This is needed to find uname on a Pyramid OSx when run in the BSD universe. # (ghazi@noc.rutgers.edu 1994-08-24) if test -f /.attbin/uname ; then PATH=$PATH:/.attbin ; export PATH fi UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown case "$UNAME_SYSTEM" in Linux|GNU|GNU/*) # If the system lacks a compiler, then just pick glibc. # We could probably try harder. LIBC=gnu set_cc_for_build cat <<-EOF > "$dummy.c" #include #if defined(__UCLIBC__) LIBC=uclibc #elif defined(__dietlibc__) LIBC=dietlibc #else LIBC=gnu #endif EOF eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g'`" # If ldd exists, use it to detect musl libc. if command -v ldd >/dev/null && \ ldd --version 2>&1 | grep -q ^musl then LIBC=musl fi ;; esac # Note: order is significant - the case branches are not exclusive. case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in *:NetBSD:*:*) # NetBSD (nbsd) targets should (where applicable) match one or # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently # switched to ELF, *-*-netbsd* would select the old # object file format. This provides both forward # compatibility and a consistent mechanism for selecting the # object file format. # # Note: NetBSD doesn't particularly care about the vendor # portion of the name. We always set it to "unknown". sysctl="sysctl -n hw.machine_arch" UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \ "/sbin/$sysctl" 2>/dev/null || \ "/usr/sbin/$sysctl" 2>/dev/null || \ echo unknown)` case "$UNAME_MACHINE_ARCH" in armeb) machine=armeb-unknown ;; arm*) machine=arm-unknown ;; sh3el) machine=shl-unknown ;; sh3eb) machine=sh-unknown ;; sh5el) machine=sh5le-unknown ;; earmv*) arch=`echo "$UNAME_MACHINE_ARCH" | sed -e 's,^e\(armv[0-9]\).*$,\1,'` endian=`echo "$UNAME_MACHINE_ARCH" | sed -ne 's,^.*\(eb\)$,\1,p'` machine="${arch}${endian}"-unknown ;; *) machine="$UNAME_MACHINE_ARCH"-unknown ;; esac # The Operating System including object format, if it has switched # to ELF recently (or will in the future) and ABI. case "$UNAME_MACHINE_ARCH" in earm*) os=netbsdelf ;; arm*|i386|m68k|ns32k|sh3*|sparc|vax) set_cc_for_build if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ELF__ then # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). # Return netbsd for either. FIX? os=netbsd else os=netbsdelf fi ;; *) os=netbsd ;; esac # Determine ABI tags. case "$UNAME_MACHINE_ARCH" in earm*) expr='s/^earmv[0-9]/-eabi/;s/eb$//' abi=`echo "$UNAME_MACHINE_ARCH" | sed -e "$expr"` ;; esac # The OS release # Debian GNU/NetBSD machines have a different userland, and # thus, need a distinct triplet. However, they do not need # kernel version information, so it can be replaced with a # suitable tag, in the style of linux-gnu. case "$UNAME_VERSION" in Debian*) release='-gnu' ;; *) release=`echo "$UNAME_RELEASE" | sed -e 's/[-_].*//' | cut -d. -f1,2` ;; esac # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: # contains redundant information, the shorter form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. echo "$machine-${os}${release}${abi-}" exit ;; *:Bitrig:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` echo "$UNAME_MACHINE_ARCH"-unknown-bitrig"$UNAME_RELEASE" exit ;; *:OpenBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` echo "$UNAME_MACHINE_ARCH"-unknown-openbsd"$UNAME_RELEASE" exit ;; *:LibertyBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'` echo "$UNAME_MACHINE_ARCH"-unknown-libertybsd"$UNAME_RELEASE" exit ;; *:MidnightBSD:*:*) echo "$UNAME_MACHINE"-unknown-midnightbsd"$UNAME_RELEASE" exit ;; *:ekkoBSD:*:*) echo "$UNAME_MACHINE"-unknown-ekkobsd"$UNAME_RELEASE" exit ;; *:SolidBSD:*:*) echo "$UNAME_MACHINE"-unknown-solidbsd"$UNAME_RELEASE" exit ;; macppc:MirBSD:*:*) echo powerpc-unknown-mirbsd"$UNAME_RELEASE" exit ;; *:MirBSD:*:*) echo "$UNAME_MACHINE"-unknown-mirbsd"$UNAME_RELEASE" exit ;; *:Sortix:*:*) echo "$UNAME_MACHINE"-unknown-sortix exit ;; *:Redox:*:*) echo "$UNAME_MACHINE"-unknown-redox exit ;; mips:OSF1:*.*) echo mips-dec-osf1 exit ;; alpha:OSF1:*:*) case $UNAME_RELEASE in *4.0) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` ;; *5.*) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` ;; esac # According to Compaq, /usr/sbin/psrinfo has been available on # OSF/1 and Tru64 systems produced since 1995. I hope that # covers most systems running today. This code pipes the CPU # types through head -n 1, so we only detect the type of CPU 0. ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` case "$ALPHA_CPU_TYPE" in "EV4 (21064)") UNAME_MACHINE=alpha ;; "EV4.5 (21064)") UNAME_MACHINE=alpha ;; "LCA4 (21066/21068)") UNAME_MACHINE=alpha ;; "EV5 (21164)") UNAME_MACHINE=alphaev5 ;; "EV5.6 (21164A)") UNAME_MACHINE=alphaev56 ;; "EV5.6 (21164PC)") UNAME_MACHINE=alphapca56 ;; "EV5.7 (21164PC)") UNAME_MACHINE=alphapca57 ;; "EV6 (21264)") UNAME_MACHINE=alphaev6 ;; "EV6.7 (21264A)") UNAME_MACHINE=alphaev67 ;; "EV6.8CB (21264C)") UNAME_MACHINE=alphaev68 ;; "EV6.8AL (21264B)") UNAME_MACHINE=alphaev68 ;; "EV6.8CX (21264D)") UNAME_MACHINE=alphaev68 ;; "EV6.9A (21264/EV69A)") UNAME_MACHINE=alphaev69 ;; "EV7 (21364)") UNAME_MACHINE=alphaev7 ;; "EV7.9 (21364A)") UNAME_MACHINE=alphaev79 ;; esac # A Pn.n version is a patched version. # A Vn.n version is a released version. # A Tn.n version is a released field test version. # A Xn.n version is an unreleased experimental baselevel. # 1.2 uses "1.2" for uname -r. echo "$UNAME_MACHINE"-dec-osf"`echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`" # Reset EXIT trap before exiting to avoid spurious non-zero exit code. exitcode=$? trap '' 0 exit $exitcode ;; Amiga*:UNIX_System_V:4.0:*) echo m68k-unknown-sysv4 exit ;; *:[Aa]miga[Oo][Ss]:*:*) echo "$UNAME_MACHINE"-unknown-amigaos exit ;; *:[Mm]orph[Oo][Ss]:*:*) echo "$UNAME_MACHINE"-unknown-morphos exit ;; *:OS/390:*:*) echo i370-ibm-openedition exit ;; *:z/VM:*:*) echo s390-ibm-zvmoe exit ;; *:OS400:*:*) echo powerpc-ibm-os400 exit ;; arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) echo arm-acorn-riscix"$UNAME_RELEASE" exit ;; arm*:riscos:*:*|arm*:RISCOS:*:*) echo arm-unknown-riscos exit ;; SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) echo hppa1.1-hitachi-hiuxmpp exit ;; Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. if test "`(/bin/universe) 2>/dev/null`" = att ; then echo pyramid-pyramid-sysv3 else echo pyramid-pyramid-bsd fi exit ;; NILE*:*:*:dcosx) echo pyramid-pyramid-svr4 exit ;; DRS?6000:unix:4.0:6*) echo sparc-icl-nx6 exit ;; DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) case `/usr/bin/uname -p` in sparc) echo sparc-icl-nx7; exit ;; esac ;; s390x:SunOS:*:*) echo "$UNAME_MACHINE"-ibm-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`" exit ;; sun4H:SunOS:5.*:*) echo sparc-hal-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" exit ;; sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) echo sparc-sun-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`" exit ;; i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) echo i386-pc-auroraux"$UNAME_RELEASE" exit ;; i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) UNAME_REL="`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`" case `isainfo -b` in 32) echo i386-pc-solaris2"$UNAME_REL" ;; 64) echo x86_64-pc-solaris2"$UNAME_REL" ;; esac exit ;; sun4*:SunOS:6*:*) # According to config.sub, this is the proper way to canonicalize # SunOS6. Hard to guess exactly what SunOS6 will be like, but # it's likely to be more like Solaris than SunOS4. echo sparc-sun-solaris3"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" exit ;; sun4*:SunOS:*:*) case "`/usr/bin/arch -k`" in Series*|S4*) UNAME_RELEASE=`uname -v` ;; esac # Japanese Language versions have a version number like `4.1.3-JL'. echo sparc-sun-sunos"`echo "$UNAME_RELEASE"|sed -e 's/-/_/'`" exit ;; sun3*:SunOS:*:*) echo m68k-sun-sunos"$UNAME_RELEASE" exit ;; sun*:*:4.2BSD:*) UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` test "x$UNAME_RELEASE" = x && UNAME_RELEASE=3 case "`/bin/arch`" in sun3) echo m68k-sun-sunos"$UNAME_RELEASE" ;; sun4) echo sparc-sun-sunos"$UNAME_RELEASE" ;; esac exit ;; aushp:SunOS:*:*) echo sparc-auspex-sunos"$UNAME_RELEASE" exit ;; # The situation for MiNT is a little confusing. The machine name # can be virtually everything (everything which is not # "atarist" or "atariste" at least should have a processor # > m68000). The system name ranges from "MiNT" over "FreeMiNT" # to the lowercase version "mint" (or "freemint"). Finally # the system name "TOS" denotes a system which is actually not # MiNT. But MiNT is downward compatible to TOS, so this should # be no problem. atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) echo m68k-atari-mint"$UNAME_RELEASE" exit ;; atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) echo m68k-atari-mint"$UNAME_RELEASE" exit ;; *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) echo m68k-atari-mint"$UNAME_RELEASE" exit ;; milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) echo m68k-milan-mint"$UNAME_RELEASE" exit ;; hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) echo m68k-hades-mint"$UNAME_RELEASE" exit ;; *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) echo m68k-unknown-mint"$UNAME_RELEASE" exit ;; m68k:machten:*:*) echo m68k-apple-machten"$UNAME_RELEASE" exit ;; powerpc:machten:*:*) echo powerpc-apple-machten"$UNAME_RELEASE" exit ;; RISC*:Mach:*:*) echo mips-dec-mach_bsd4.3 exit ;; RISC*:ULTRIX:*:*) echo mips-dec-ultrix"$UNAME_RELEASE" exit ;; VAX*:ULTRIX*:*:*) echo vax-dec-ultrix"$UNAME_RELEASE" exit ;; 2020:CLIX:*:* | 2430:CLIX:*:*) echo clipper-intergraph-clix"$UNAME_RELEASE" exit ;; mips:*:*:UMIPS | mips:*:*:RISCos) set_cc_for_build sed 's/^ //' << EOF > "$dummy.c" #ifdef __cplusplus #include /* for printf() prototype */ int main (int argc, char *argv[]) { #else int main (argc, argv) int argc; char *argv[]; { #endif #if defined (host_mips) && defined (MIPSEB) #if defined (SYSTYPE_SYSV) printf ("mips-mips-riscos%ssysv\\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_SVR4) printf ("mips-mips-riscos%ssvr4\\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) printf ("mips-mips-riscos%sbsd\\n", argv[1]); exit (0); #endif #endif exit (-1); } EOF $CC_FOR_BUILD -o "$dummy" "$dummy.c" && dummyarg=`echo "$UNAME_RELEASE" | sed -n 's/\([0-9]*\).*/\1/p'` && SYSTEM_NAME=`"$dummy" "$dummyarg"` && { echo "$SYSTEM_NAME"; exit; } echo mips-mips-riscos"$UNAME_RELEASE" exit ;; Motorola:PowerMAX_OS:*:*) echo powerpc-motorola-powermax exit ;; Motorola:*:4.3:PL8-*) echo powerpc-harris-powermax exit ;; Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) echo powerpc-harris-powermax exit ;; Night_Hawk:Power_UNIX:*:*) echo powerpc-harris-powerunix exit ;; m88k:CX/UX:7*:*) echo m88k-harris-cxux7 exit ;; m88k:*:4*:R4*) echo m88k-motorola-sysv4 exit ;; m88k:*:3*:R3*) echo m88k-motorola-sysv3 exit ;; AViiON:dgux:*:*) # DG/UX returns AViiON for all architectures UNAME_PROCESSOR=`/usr/bin/uname -p` if [ "$UNAME_PROCESSOR" = mc88100 ] || [ "$UNAME_PROCESSOR" = mc88110 ] then if [ "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx ] || \ [ "$TARGET_BINARY_INTERFACE"x = x ] then echo m88k-dg-dgux"$UNAME_RELEASE" else echo m88k-dg-dguxbcs"$UNAME_RELEASE" fi else echo i586-dg-dgux"$UNAME_RELEASE" fi exit ;; M88*:DolphinOS:*:*) # DolphinOS (SVR3) echo m88k-dolphin-sysv3 exit ;; M88*:*:R3*:*) # Delta 88k system running SVR3 echo m88k-motorola-sysv3 exit ;; XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) echo m88k-tektronix-sysv3 exit ;; Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) echo m68k-tektronix-bsd exit ;; *:IRIX*:*:*) echo mips-sgi-irix"`echo "$UNAME_RELEASE"|sed -e 's/-/_/g'`" exit ;; ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' i*86:AIX:*:*) echo i386-ibm-aix exit ;; ia64:AIX:*:*) if [ -x /usr/bin/oslevel ] ; then IBM_REV=`/usr/bin/oslevel` else IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" fi echo "$UNAME_MACHINE"-ibm-aix"$IBM_REV" exit ;; *:AIX:2:3) if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then set_cc_for_build sed 's/^ //' << EOF > "$dummy.c" #include main() { if (!__power_pc()) exit(1); puts("powerpc-ibm-aix3.2.5"); exit(0); } EOF if $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` then echo "$SYSTEM_NAME" else echo rs6000-ibm-aix3.2.5 fi elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then echo rs6000-ibm-aix3.2.4 else echo rs6000-ibm-aix3.2 fi exit ;; *:AIX:*:[4567]) IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` if /usr/sbin/lsattr -El "$IBM_CPU_ID" | grep ' POWER' >/dev/null 2>&1; then IBM_ARCH=rs6000 else IBM_ARCH=powerpc fi if [ -x /usr/bin/lslpp ] ; then IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | awk -F: '{ print $3 }' | sed s/[0-9]*$/0/` else IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" fi echo "$IBM_ARCH"-ibm-aix"$IBM_REV" exit ;; *:AIX:*:*) echo rs6000-ibm-aix exit ;; ibmrt:4.4BSD:*|romp-ibm:4.4BSD:*) echo romp-ibm-bsd4.4 exit ;; ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and echo romp-ibm-bsd"$UNAME_RELEASE" # 4.3 with uname added to exit ;; # report: romp-ibm BSD 4.3 *:BOSX:*:*) echo rs6000-bull-bosx exit ;; DPX/2?00:B.O.S.:*:*) echo m68k-bull-sysv3 exit ;; 9000/[34]??:4.3bsd:1.*:*) echo m68k-hp-bsd exit ;; hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) echo m68k-hp-bsd4.4 exit ;; 9000/[34678]??:HP-UX:*:*) HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'` case "$UNAME_MACHINE" in 9000/31?) HP_ARCH=m68000 ;; 9000/[34]??) HP_ARCH=m68k ;; 9000/[678][0-9][0-9]) if [ -x /usr/bin/getconf ]; then sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` case "$sc_cpu_version" in 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 532) # CPU_PA_RISC2_0 case "$sc_kernel_bits" in 32) HP_ARCH=hppa2.0n ;; 64) HP_ARCH=hppa2.0w ;; '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 esac ;; esac fi if [ "$HP_ARCH" = "" ]; then set_cc_for_build sed 's/^ //' << EOF > "$dummy.c" #define _HPUX_SOURCE #include #include int main () { #if defined(_SC_KERNEL_BITS) long bits = sysconf(_SC_KERNEL_BITS); #endif long cpu = sysconf (_SC_CPU_VERSION); switch (cpu) { case CPU_PA_RISC1_0: puts ("hppa1.0"); break; case CPU_PA_RISC1_1: puts ("hppa1.1"); break; case CPU_PA_RISC2_0: #if defined(_SC_KERNEL_BITS) switch (bits) { case 64: puts ("hppa2.0w"); break; case 32: puts ("hppa2.0n"); break; default: puts ("hppa2.0"); break; } break; #else /* !defined(_SC_KERNEL_BITS) */ puts ("hppa2.0"); break; #endif default: puts ("hppa1.0"); break; } exit (0); } EOF (CCOPTS="" $CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null) && HP_ARCH=`"$dummy"` test -z "$HP_ARCH" && HP_ARCH=hppa fi ;; esac if [ "$HP_ARCH" = hppa2.0w ] then set_cc_for_build # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler # generating 64-bit code. GNU and HP use different nomenclature: # # $ CC_FOR_BUILD=cc ./config.guess # => hppa2.0w-hp-hpux11.23 # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess # => hppa64-hp-hpux11.23 if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | grep -q __LP64__ then HP_ARCH=hppa2.0w else HP_ARCH=hppa64 fi fi echo "$HP_ARCH"-hp-hpux"$HPUX_REV" exit ;; ia64:HP-UX:*:*) HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'` echo ia64-hp-hpux"$HPUX_REV" exit ;; 3050*:HI-UX:*:*) set_cc_for_build sed 's/^ //' << EOF > "$dummy.c" #include int main () { long cpu = sysconf (_SC_CPU_VERSION); /* The order matters, because CPU_IS_HP_MC68K erroneously returns true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct results, however. */ if (CPU_IS_PA_RISC (cpu)) { switch (cpu) { case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; default: puts ("hppa-hitachi-hiuxwe2"); break; } } else if (CPU_IS_HP_MC68K (cpu)) puts ("m68k-hitachi-hiuxwe2"); else puts ("unknown-hitachi-hiuxwe2"); exit (0); } EOF $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` && { echo "$SYSTEM_NAME"; exit; } echo unknown-hitachi-hiuxwe2 exit ;; 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:*) echo hppa1.1-hp-bsd exit ;; 9000/8??:4.3bsd:*:*) echo hppa1.0-hp-bsd exit ;; *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) echo hppa1.0-hp-mpeix exit ;; hp7??:OSF1:*:* | hp8?[79]:OSF1:*:*) echo hppa1.1-hp-osf exit ;; hp8??:OSF1:*:*) echo hppa1.0-hp-osf exit ;; i*86:OSF1:*:*) if [ -x /usr/sbin/sysversion ] ; then echo "$UNAME_MACHINE"-unknown-osf1mk else echo "$UNAME_MACHINE"-unknown-osf1 fi exit ;; parisc*:Lites*:*:*) echo hppa1.1-hp-lites exit ;; C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) echo c1-convex-bsd exit ;; C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) if getsysinfo -f scalar_acc then echo c32-convex-bsd else echo c2-convex-bsd fi exit ;; C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) echo c34-convex-bsd exit ;; C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) echo c38-convex-bsd exit ;; C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) echo c4-convex-bsd exit ;; CRAY*Y-MP:*:*:*) echo ymp-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*[A-Z]90:*:*:*) echo "$UNAME_MACHINE"-cray-unicos"$UNAME_RELEASE" \ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ -e 's/\.[^.]*$/.X/' exit ;; CRAY*TS:*:*:*) echo t90-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*T3E:*:*:*) echo alphaev5-cray-unicosmk"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*SV1:*:*:*) echo sv1-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; *:UNICOS/mp:*:*) echo craynv-cray-unicosmp"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` FUJITSU_REL=`echo "$UNAME_RELEASE" | sed -e 's/ /_/'` echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; 5000:UNIX_System_V:4.*:*) FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` FUJITSU_REL=`echo "$UNAME_RELEASE" | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'` echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) echo "$UNAME_MACHINE"-pc-bsdi"$UNAME_RELEASE" exit ;; sparc*:BSD/OS:*:*) echo sparc-unknown-bsdi"$UNAME_RELEASE" exit ;; *:BSD/OS:*:*) echo "$UNAME_MACHINE"-unknown-bsdi"$UNAME_RELEASE" exit ;; arm:FreeBSD:*:*) UNAME_PROCESSOR=`uname -p` set_cc_for_build if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_PCS_VFP then echo "${UNAME_PROCESSOR}"-unknown-freebsd"`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`"-gnueabi else echo "${UNAME_PROCESSOR}"-unknown-freebsd"`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`"-gnueabihf fi exit ;; *:FreeBSD:*:*) UNAME_PROCESSOR=`/usr/bin/uname -p` case "$UNAME_PROCESSOR" in amd64) UNAME_PROCESSOR=x86_64 ;; i386) UNAME_PROCESSOR=i586 ;; esac echo "$UNAME_PROCESSOR"-unknown-freebsd"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`" exit ;; i*:CYGWIN*:*) echo "$UNAME_MACHINE"-pc-cygwin exit ;; *:MINGW64*:*) echo "$UNAME_MACHINE"-pc-mingw64 exit ;; *:MINGW*:*) echo "$UNAME_MACHINE"-pc-mingw32 exit ;; *:MSYS*:*) echo "$UNAME_MACHINE"-pc-msys exit ;; i*:PW*:*) echo "$UNAME_MACHINE"-pc-pw32 exit ;; *:Interix*:*) case "$UNAME_MACHINE" in x86) echo i586-pc-interix"$UNAME_RELEASE" exit ;; authenticamd | genuineintel | EM64T) echo x86_64-unknown-interix"$UNAME_RELEASE" exit ;; IA64) echo ia64-unknown-interix"$UNAME_RELEASE" exit ;; esac ;; i*:UWIN*:*) echo "$UNAME_MACHINE"-pc-uwin exit ;; amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) echo x86_64-unknown-cygwin exit ;; prep*:SunOS:5.*:*) echo powerpcle-unknown-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" exit ;; *:GNU:*:*) # the GNU system echo "`echo "$UNAME_MACHINE"|sed -e 's,[-/].*$,,'`-unknown-$LIBC`echo "$UNAME_RELEASE"|sed -e 's,/.*$,,'`" exit ;; *:GNU/*:*:*) # other systems with GNU libc and userland echo "$UNAME_MACHINE-unknown-`echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`-$LIBC" exit ;; *:Minix:*:*) echo "$UNAME_MACHINE"-unknown-minix exit ;; aarch64:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; aarch64_be:Linux:*:*) UNAME_MACHINE=aarch64_be echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; alpha:Linux:*:*) case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in EV5) UNAME_MACHINE=alphaev5 ;; EV56) UNAME_MACHINE=alphaev56 ;; PCA56) UNAME_MACHINE=alphapca56 ;; PCA57) UNAME_MACHINE=alphapca56 ;; EV6) UNAME_MACHINE=alphaev6 ;; EV67) UNAME_MACHINE=alphaev67 ;; EV68*) UNAME_MACHINE=alphaev68 ;; esac objdump --private-headers /bin/sh | grep -q ld.so.1 if test "$?" = 0 ; then LIBC=gnulibc1 ; fi echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; arc:Linux:*:* | arceb:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; arm*:Linux:*:*) set_cc_for_build if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_EABI__ then echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" else if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_PCS_VFP then echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabi else echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabihf fi fi exit ;; avr32*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; cris:Linux:*:*) echo "$UNAME_MACHINE"-axis-linux-"$LIBC" exit ;; crisv32:Linux:*:*) echo "$UNAME_MACHINE"-axis-linux-"$LIBC" exit ;; e2k:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; frv:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; hexagon:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; i*86:Linux:*:*) echo "$UNAME_MACHINE"-pc-linux-"$LIBC" exit ;; ia64:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; k1om:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; m32r*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; m68*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; mips:Linux:*:* | mips64:Linux:*:*) set_cc_for_build sed 's/^ //' << EOF > "$dummy.c" #undef CPU #undef ${UNAME_MACHINE} #undef ${UNAME_MACHINE}el #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) CPU=${UNAME_MACHINE}el #else #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) CPU=${UNAME_MACHINE} #else CPU= #endif #endif EOF eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU'`" test "x$CPU" != x && { echo "$CPU-unknown-linux-$LIBC"; exit; } ;; mips64el:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; openrisc*:Linux:*:*) echo or1k-unknown-linux-"$LIBC" exit ;; or32:Linux:*:* | or1k*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; padre:Linux:*:*) echo sparc-unknown-linux-"$LIBC" exit ;; parisc64:Linux:*:* | hppa64:Linux:*:*) echo hppa64-unknown-linux-"$LIBC" exit ;; parisc:Linux:*:* | hppa:Linux:*:*) # Look for CPU level case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in PA7*) echo hppa1.1-unknown-linux-"$LIBC" ;; PA8*) echo hppa2.0-unknown-linux-"$LIBC" ;; *) echo hppa-unknown-linux-"$LIBC" ;; esac exit ;; ppc64:Linux:*:*) echo powerpc64-unknown-linux-"$LIBC" exit ;; ppc:Linux:*:*) echo powerpc-unknown-linux-"$LIBC" exit ;; ppc64le:Linux:*:*) echo powerpc64le-unknown-linux-"$LIBC" exit ;; ppcle:Linux:*:*) echo powerpcle-unknown-linux-"$LIBC" exit ;; riscv32:Linux:*:* | riscv64:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; s390:Linux:*:* | s390x:Linux:*:*) echo "$UNAME_MACHINE"-ibm-linux-"$LIBC" exit ;; sh64*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; sh*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; sparc:Linux:*:* | sparc64:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; tile*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; vax:Linux:*:*) echo "$UNAME_MACHINE"-dec-linux-"$LIBC" exit ;; x86_64:Linux:*:*) echo "$UNAME_MACHINE"-pc-linux-"$LIBC" exit ;; xtensa*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; i*86:DYNIX/ptx:4*:*) # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. # earlier versions are messed up and put the nodename in both # sysname and nodename. echo i386-sequent-sysv4 exit ;; i*86:UNIX_SV:4.2MP:2.*) # Unixware is an offshoot of SVR4, but it has its own version # number series starting with 2... # I am not positive that other SVR4 systems won't match this, # I just have to hope. -- rms. # Use sysv4.2uw... so that sysv4* matches it. echo "$UNAME_MACHINE"-pc-sysv4.2uw"$UNAME_VERSION" exit ;; i*86:OS/2:*:*) # If we were able to find `uname', then EMX Unix compatibility # is probably installed. echo "$UNAME_MACHINE"-pc-os2-emx exit ;; i*86:XTS-300:*:STOP) echo "$UNAME_MACHINE"-unknown-stop exit ;; i*86:atheos:*:*) echo "$UNAME_MACHINE"-unknown-atheos exit ;; i*86:syllable:*:*) echo "$UNAME_MACHINE"-pc-syllable exit ;; i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) echo i386-unknown-lynxos"$UNAME_RELEASE" exit ;; i*86:*DOS:*:*) echo "$UNAME_MACHINE"-pc-msdosdjgpp exit ;; i*86:*:4.*:*) UNAME_REL=`echo "$UNAME_RELEASE" | sed 's/\/MP$//'` if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then echo "$UNAME_MACHINE"-univel-sysv"$UNAME_REL" else echo "$UNAME_MACHINE"-pc-sysv"$UNAME_REL" fi exit ;; i*86:*:5:[678]*) # UnixWare 7.x, OpenUNIX and OpenServer 6. case `/bin/uname -X | grep "^Machine"` in *486*) UNAME_MACHINE=i486 ;; *Pentium) UNAME_MACHINE=i586 ;; *Pent*|*Celeron) UNAME_MACHINE=i686 ;; esac echo "$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}{$UNAME_VERSION}" exit ;; i*86:*:3.2:*) if test -f /usr/options/cb.name; then UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ && UNAME_MACHINE=i586 (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ && UNAME_MACHINE=i686 (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ && UNAME_MACHINE=i686 echo "$UNAME_MACHINE"-pc-sco"$UNAME_REL" else echo "$UNAME_MACHINE"-pc-sysv32 fi exit ;; pc:*:*:*) # Left here for compatibility: # uname -m prints for DJGPP always 'pc', but it prints nothing about # the processor, so we play safe by assuming i586. # Note: whatever this is, it MUST be the same as what config.sub # prints for the "djgpp" host, or else GDB configure will decide that # this is a cross-build. echo i586-pc-msdosdjgpp exit ;; Intel:Mach:3*:*) echo i386-pc-mach3 exit ;; paragon:*:*:*) echo i860-intel-osf1 exit ;; i860:*:4.*:*) # i860-SVR4 if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then echo i860-stardent-sysv"$UNAME_RELEASE" # Stardent Vistra i860-SVR4 else # Add other i860-SVR4 vendors below as they are discovered. echo i860-unknown-sysv"$UNAME_RELEASE" # Unknown i860-SVR4 fi exit ;; mini*:CTIX:SYS*5:*) # "miniframe" echo m68010-convergent-sysv exit ;; mc68k:UNIX:SYSTEM5:3.51m) echo m68k-convergent-sysv exit ;; M680?0:D-NIX:5.3:*) echo m68k-diab-dnix exit ;; M68*:*:R3V[5678]*:*) test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) OS_REL='' test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4; exit; } ;; NCR*:*:4.2:* | MPRAS*:*:4.2:*) OS_REL='.3' test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) echo m68k-unknown-lynxos"$UNAME_RELEASE" exit ;; mc68030:UNIX_System_V:4.*:*) echo m68k-atari-sysv4 exit ;; TSUNAMI:LynxOS:2.*:*) echo sparc-unknown-lynxos"$UNAME_RELEASE" exit ;; rs6000:LynxOS:2.*:*) echo rs6000-unknown-lynxos"$UNAME_RELEASE" exit ;; PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) echo powerpc-unknown-lynxos"$UNAME_RELEASE" exit ;; SM[BE]S:UNIX_SV:*:*) echo mips-dde-sysv"$UNAME_RELEASE" exit ;; RM*:ReliantUNIX-*:*:*) echo mips-sni-sysv4 exit ;; RM*:SINIX-*:*:*) echo mips-sni-sysv4 exit ;; *:SINIX-*:*:*) if uname -p 2>/dev/null >/dev/null ; then UNAME_MACHINE=`(uname -p) 2>/dev/null` echo "$UNAME_MACHINE"-sni-sysv4 else echo ns32k-sni-sysv fi exit ;; PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort # says echo i586-unisys-sysv4 exit ;; *:UNIX_System_V:4*:FTX*) # From Gerald Hewes . # How about differentiating between stratus architectures? -djm echo hppa1.1-stratus-sysv4 exit ;; *:*:*:FTX*) # From seanf@swdc.stratus.com. echo i860-stratus-sysv4 exit ;; i*86:VOS:*:*) # From Paul.Green@stratus.com. echo "$UNAME_MACHINE"-stratus-vos exit ;; *:VOS:*:*) # From Paul.Green@stratus.com. echo hppa1.1-stratus-vos exit ;; mc68*:A/UX:*:*) echo m68k-apple-aux"$UNAME_RELEASE" exit ;; news*:NEWS-OS:6*:*) echo mips-sony-newsos6 exit ;; R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) if [ -d /usr/nec ]; then echo mips-nec-sysv"$UNAME_RELEASE" else echo mips-unknown-sysv"$UNAME_RELEASE" fi exit ;; BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. echo powerpc-be-beos exit ;; BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. echo powerpc-apple-beos exit ;; BePC:BeOS:*:*) # BeOS running on Intel PC compatible. echo i586-pc-beos exit ;; BePC:Haiku:*:*) # Haiku running on Intel PC compatible. echo i586-pc-haiku exit ;; x86_64:Haiku:*:*) echo x86_64-unknown-haiku exit ;; SX-4:SUPER-UX:*:*) echo sx4-nec-superux"$UNAME_RELEASE" exit ;; SX-5:SUPER-UX:*:*) echo sx5-nec-superux"$UNAME_RELEASE" exit ;; SX-6:SUPER-UX:*:*) echo sx6-nec-superux"$UNAME_RELEASE" exit ;; SX-7:SUPER-UX:*:*) echo sx7-nec-superux"$UNAME_RELEASE" exit ;; SX-8:SUPER-UX:*:*) echo sx8-nec-superux"$UNAME_RELEASE" exit ;; SX-8R:SUPER-UX:*:*) echo sx8r-nec-superux"$UNAME_RELEASE" exit ;; SX-ACE:SUPER-UX:*:*) echo sxace-nec-superux"$UNAME_RELEASE" exit ;; Power*:Rhapsody:*:*) echo powerpc-apple-rhapsody"$UNAME_RELEASE" exit ;; *:Rhapsody:*:*) echo "$UNAME_MACHINE"-apple-rhapsody"$UNAME_RELEASE" exit ;; *:Darwin:*:*) UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown set_cc_for_build if test "$UNAME_PROCESSOR" = unknown ; then UNAME_PROCESSOR=powerpc fi if test "`echo "$UNAME_RELEASE" | sed -e 's/\..*//'`" -le 10 ; then if [ "$CC_FOR_BUILD" != no_compiler_found ]; then if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then case $UNAME_PROCESSOR in i386) UNAME_PROCESSOR=x86_64 ;; powerpc) UNAME_PROCESSOR=powerpc64 ;; esac fi # On 10.4-10.6 one might compile for PowerPC via gcc -arch ppc if (echo '#ifdef __POWERPC__'; echo IS_PPC; echo '#endif') | \ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_PPC >/dev/null then UNAME_PROCESSOR=powerpc fi fi elif test "$UNAME_PROCESSOR" = i386 ; then # Avoid executing cc on OS X 10.9, as it ships with a stub # that puts up a graphical alert prompting to install # developer tools. Any system running Mac OS X 10.7 or # later (Darwin 11 and later) is required to have a 64-bit # processor. This is not true of the ARM version of Darwin # that Apple uses in portable devices. UNAME_PROCESSOR=x86_64 fi echo "$UNAME_PROCESSOR"-apple-darwin"$UNAME_RELEASE" exit ;; *:procnto*:*:* | *:QNX:[0123456789]*:*) UNAME_PROCESSOR=`uname -p` if test "$UNAME_PROCESSOR" = x86; then UNAME_PROCESSOR=i386 UNAME_MACHINE=pc fi echo "$UNAME_PROCESSOR"-"$UNAME_MACHINE"-nto-qnx"$UNAME_RELEASE" exit ;; *:QNX:*:4*) echo i386-pc-qnx exit ;; NEO-*:NONSTOP_KERNEL:*:*) echo neo-tandem-nsk"$UNAME_RELEASE" exit ;; NSE-*:NONSTOP_KERNEL:*:*) echo nse-tandem-nsk"$UNAME_RELEASE" exit ;; NSR-*:NONSTOP_KERNEL:*:*) echo nsr-tandem-nsk"$UNAME_RELEASE" exit ;; NSV-*:NONSTOP_KERNEL:*:*) echo nsv-tandem-nsk"$UNAME_RELEASE" exit ;; NSX-*:NONSTOP_KERNEL:*:*) echo nsx-tandem-nsk"$UNAME_RELEASE" exit ;; *:NonStop-UX:*:*) echo mips-compaq-nonstopux exit ;; BS2000:POSIX*:*:*) echo bs2000-siemens-sysv exit ;; DS/*:UNIX_System_V:*:*) echo "$UNAME_MACHINE"-"$UNAME_SYSTEM"-"$UNAME_RELEASE" exit ;; *:Plan9:*:*) # "uname -m" is not consistent, so use $cputype instead. 386 # is converted to i386 for consistency with other x86 # operating systems. # shellcheck disable=SC2154 if test "$cputype" = 386; then UNAME_MACHINE=i386 else UNAME_MACHINE="$cputype" fi echo "$UNAME_MACHINE"-unknown-plan9 exit ;; *:TOPS-10:*:*) echo pdp10-unknown-tops10 exit ;; *:TENEX:*:*) echo pdp10-unknown-tenex exit ;; KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) echo pdp10-dec-tops20 exit ;; XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) echo pdp10-xkl-tops20 exit ;; *:TOPS-20:*:*) echo pdp10-unknown-tops20 exit ;; *:ITS:*:*) echo pdp10-unknown-its exit ;; SEI:*:*:SEIUX) echo mips-sei-seiux"$UNAME_RELEASE" exit ;; *:DragonFly:*:*) echo "$UNAME_MACHINE"-unknown-dragonfly"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`" exit ;; *:*VMS:*:*) UNAME_MACHINE=`(uname -p) 2>/dev/null` case "$UNAME_MACHINE" in A*) echo alpha-dec-vms ; exit ;; I*) echo ia64-dec-vms ; exit ;; V*) echo vax-dec-vms ; exit ;; esac ;; *:XENIX:*:SysV) echo i386-pc-xenix exit ;; i*86:skyos:*:*) echo "$UNAME_MACHINE"-pc-skyos"`echo "$UNAME_RELEASE" | sed -e 's/ .*$//'`" exit ;; i*86:rdos:*:*) echo "$UNAME_MACHINE"-pc-rdos exit ;; i*86:AROS:*:*) echo "$UNAME_MACHINE"-pc-aros exit ;; x86_64:VMkernel:*:*) echo "$UNAME_MACHINE"-unknown-esx exit ;; amd64:Isilon\ OneFS:*:*) echo x86_64-unknown-onefs exit ;; esac echo "$0: unable to guess system type" >&2 case "$UNAME_MACHINE:$UNAME_SYSTEM" in mips:Linux | mips64:Linux) # If we got here on MIPS GNU/Linux, output extra information. cat >&2 <&2 </dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` /bin/uname -X = `(/bin/uname -X) 2>/dev/null` hostinfo = `(hostinfo) 2>/dev/null` /bin/universe = `(/bin/universe) 2>/dev/null` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` /bin/arch = `(/bin/arch) 2>/dev/null` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` UNAME_MACHINE = "$UNAME_MACHINE" UNAME_RELEASE = "$UNAME_RELEASE" UNAME_SYSTEM = "$UNAME_SYSTEM" UNAME_VERSION = "$UNAME_VERSION" EOF exit 1 # Local variables: # eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" # End: nordugrid-arc-7.1.1/PaxHeaders/ltmain.sh0000644000000000000000000000013215067751337015174 xustar0030 mtime=1759498975.821614545 30 atime=1759499024.561355147 30 ctime=1759499024.705916858 nordugrid-arc-7.1.1/ltmain.sh0000644000175000002070000117106715067751337017113 0ustar00mockbuildmock00000000000000#! /bin/sh ## DO NOT EDIT - This file generated from ./build-aux/ltmain.in ## by inline-source v2014-01-03.01 # libtool (GNU libtool) 2.4.6 # Provide generalized library-building support services. # Written by Gordon Matzigkeit , 1996 # Copyright (C) 1996-2015 Free Software Foundation, Inc. # This is free software; see the source for copying conditions. There is NO # warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # GNU Libtool is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # As a special exception to the GNU General Public License, # if you distribute this file as part of a program or library that # is built using GNU Libtool, you may include this file under the # same distribution terms that you use for the rest of that program. # # GNU Libtool is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . PROGRAM=libtool PACKAGE=libtool VERSION=2.4.6 package_revision=2.4.6 ## ------ ## ## Usage. ## ## ------ ## # Run './libtool --help' for help with using this script from the # command line. ## ------------------------------- ## ## User overridable command paths. ## ## ------------------------------- ## # After configure completes, it has a better idea of some of the # shell tools we need than the defaults used by the functions shared # with bootstrap, so set those here where they can still be over- # ridden by the user, but otherwise take precedence. : ${AUTOCONF="autoconf"} : ${AUTOMAKE="automake"} ## -------------------------- ## ## Source external libraries. ## ## -------------------------- ## # Much of our low-level functionality needs to be sourced from external # libraries, which are installed to $pkgauxdir. # Set a version string for this script. scriptversion=2015-01-20.17; # UTC # General shell script boiler plate, and helper functions. # Written by Gary V. Vaughan, 2004 # Copyright (C) 2004-2015 Free Software Foundation, Inc. # This is free software; see the source for copying conditions. There is NO # warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # As a special exception to the GNU General Public License, if you distribute # this file as part of a program or library that is built using GNU Libtool, # you may include this file under the same distribution terms that you use # for the rest of that program. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNES FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # Please report bugs or propose patches to gary@gnu.org. ## ------ ## ## Usage. ## ## ------ ## # Evaluate this file near the top of your script to gain access to # the functions and variables defined here: # # . `echo "$0" | ${SED-sed} 's|[^/]*$||'`/build-aux/funclib.sh # # If you need to override any of the default environment variable # settings, do that before evaluating this file. ## -------------------- ## ## Shell normalisation. ## ## -------------------- ## # Some shells need a little help to be as Bourne compatible as possible. # Before doing anything else, make sure all that help has been provided! DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac fi # NLS nuisances: We save the old values in case they are required later. _G_user_locale= _G_safe_locale= for _G_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES do eval "if test set = \"\${$_G_var+set}\"; then save_$_G_var=\$$_G_var $_G_var=C export $_G_var _G_user_locale=\"$_G_var=\\\$save_\$_G_var; \$_G_user_locale\" _G_safe_locale=\"$_G_var=C; \$_G_safe_locale\" fi" done # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # Make sure IFS has a sensible default sp=' ' nl=' ' IFS="$sp $nl" # There are apparently some retarded systems that use ';' as a PATH separator! if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi ## ------------------------- ## ## Locate command utilities. ## ## ------------------------- ## # func_executable_p FILE # ---------------------- # Check that FILE is an executable regular file. func_executable_p () { test -f "$1" && test -x "$1" } # func_path_progs PROGS_LIST CHECK_FUNC [PATH] # -------------------------------------------- # Search for either a program that responds to --version with output # containing "GNU", or else returned by CHECK_FUNC otherwise, by # trying all the directories in PATH with each of the elements of # PROGS_LIST. # # CHECK_FUNC should accept the path to a candidate program, and # set $func_check_prog_result if it truncates its output less than # $_G_path_prog_max characters. func_path_progs () { _G_progs_list=$1 _G_check_func=$2 _G_PATH=${3-"$PATH"} _G_path_prog_max=0 _G_path_prog_found=false _G_save_IFS=$IFS; IFS=${PATH_SEPARATOR-:} for _G_dir in $_G_PATH; do IFS=$_G_save_IFS test -z "$_G_dir" && _G_dir=. for _G_prog_name in $_G_progs_list; do for _exeext in '' .EXE; do _G_path_prog=$_G_dir/$_G_prog_name$_exeext func_executable_p "$_G_path_prog" || continue case `"$_G_path_prog" --version 2>&1` in *GNU*) func_path_progs_result=$_G_path_prog _G_path_prog_found=: ;; *) $_G_check_func $_G_path_prog func_path_progs_result=$func_check_prog_result ;; esac $_G_path_prog_found && break 3 done done done IFS=$_G_save_IFS test -z "$func_path_progs_result" && { echo "no acceptable sed could be found in \$PATH" >&2 exit 1 } } # We want to be able to use the functions in this file before configure # has figured out where the best binaries are kept, which means we have # to search for them ourselves - except when the results are already set # where we skip the searches. # Unless the user overrides by setting SED, search the path for either GNU # sed, or the sed that truncates its output the least. test -z "$SED" && { _G_sed_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ for _G_i in 1 2 3 4 5 6 7; do _G_sed_script=$_G_sed_script$nl$_G_sed_script done echo "$_G_sed_script" 2>/dev/null | sed 99q >conftest.sed _G_sed_script= func_check_prog_sed () { _G_path_prog=$1 _G_count=0 printf 0123456789 >conftest.in while : do cat conftest.in conftest.in >conftest.tmp mv conftest.tmp conftest.in cp conftest.in conftest.nl echo '' >> conftest.nl "$_G_path_prog" -f conftest.sed conftest.out 2>/dev/null || break diff conftest.out conftest.nl >/dev/null 2>&1 || break _G_count=`expr $_G_count + 1` if test "$_G_count" -gt "$_G_path_prog_max"; then # Best one so far, save it but keep looking for a better one func_check_prog_result=$_G_path_prog _G_path_prog_max=$_G_count fi # 10*(2^10) chars as input seems more than enough test 10 -lt "$_G_count" && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out } func_path_progs "sed gsed" func_check_prog_sed $PATH:/usr/xpg4/bin rm -f conftest.sed SED=$func_path_progs_result } # Unless the user overrides by setting GREP, search the path for either GNU # grep, or the grep that truncates its output the least. test -z "$GREP" && { func_check_prog_grep () { _G_path_prog=$1 _G_count=0 _G_path_prog_max=0 printf 0123456789 >conftest.in while : do cat conftest.in conftest.in >conftest.tmp mv conftest.tmp conftest.in cp conftest.in conftest.nl echo 'GREP' >> conftest.nl "$_G_path_prog" -e 'GREP$' -e '-(cannot match)-' conftest.out 2>/dev/null || break diff conftest.out conftest.nl >/dev/null 2>&1 || break _G_count=`expr $_G_count + 1` if test "$_G_count" -gt "$_G_path_prog_max"; then # Best one so far, save it but keep looking for a better one func_check_prog_result=$_G_path_prog _G_path_prog_max=$_G_count fi # 10*(2^10) chars as input seems more than enough test 10 -lt "$_G_count" && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out } func_path_progs "grep ggrep" func_check_prog_grep $PATH:/usr/xpg4/bin GREP=$func_path_progs_result } ## ------------------------------- ## ## User overridable command paths. ## ## ------------------------------- ## # All uppercase variable names are used for environment variables. These # variables can be overridden by the user before calling a script that # uses them if a suitable command of that name is not already available # in the command search PATH. : ${CP="cp -f"} : ${ECHO="printf %s\n"} : ${EGREP="$GREP -E"} : ${FGREP="$GREP -F"} : ${LN_S="ln -s"} : ${MAKE="make"} : ${MKDIR="mkdir"} : ${MV="mv -f"} : ${RM="rm -f"} : ${SHELL="${CONFIG_SHELL-/bin/sh}"} ## -------------------- ## ## Useful sed snippets. ## ## -------------------- ## sed_dirname='s|/[^/]*$||' sed_basename='s|^.*/||' # Sed substitution that helps us do robust quoting. It backslashifies # metacharacters that are still active within double-quoted strings. sed_quote_subst='s|\([`"$\\]\)|\\\1|g' # Same as above, but do not quote variable references. sed_double_quote_subst='s/\(["`\\]\)/\\\1/g' # Sed substitution that turns a string into a regex matching for the # string literally. sed_make_literal_regex='s|[].[^$\\*\/]|\\&|g' # Sed substitution that converts a w32 file name or path # that contains forward slashes, into one that contains # (escaped) backslashes. A very naive implementation. sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' # Re-'\' parameter expansions in output of sed_double_quote_subst that # were '\'-ed in input to the same. If an odd number of '\' preceded a # '$' in input to sed_double_quote_subst, that '$' was protected from # expansion. Since each input '\' is now two '\'s, look for any number # of runs of four '\'s followed by two '\'s and then a '$'. '\' that '$'. _G_bs='\\' _G_bs2='\\\\' _G_bs4='\\\\\\\\' _G_dollar='\$' sed_double_backslash="\ s/$_G_bs4/&\\ /g s/^$_G_bs2$_G_dollar/$_G_bs&/ s/\\([^$_G_bs]\\)$_G_bs2$_G_dollar/\\1$_G_bs2$_G_bs$_G_dollar/g s/\n//g" ## ----------------- ## ## Global variables. ## ## ----------------- ## # Except for the global variables explicitly listed below, the following # functions in the '^func_' namespace, and the '^require_' namespace # variables initialised in the 'Resource management' section, sourcing # this file will not pollute your global namespace with anything # else. There's no portable way to scope variables in Bourne shell # though, so actually running these functions will sometimes place # results into a variable named after the function, and often use # temporary variables in the '^_G_' namespace. If you are careful to # avoid using those namespaces casually in your sourcing script, things # should continue to work as you expect. And, of course, you can freely # overwrite any of the functions or variables defined here before # calling anything to customize them. EXIT_SUCCESS=0 EXIT_FAILURE=1 EXIT_MISMATCH=63 # $? = 63 is used to indicate version mismatch to missing. EXIT_SKIP=77 # $? = 77 is used to indicate a skipped test to automake. # Allow overriding, eg assuming that you follow the convention of # putting '$debug_cmd' at the start of all your functions, you can get # bash to show function call trace with: # # debug_cmd='eval echo "${FUNCNAME[0]} $*" >&2' bash your-script-name debug_cmd=${debug_cmd-":"} exit_cmd=: # By convention, finish your script with: # # exit $exit_status # # so that you can set exit_status to non-zero if you want to indicate # something went wrong during execution without actually bailing out at # the point of failure. exit_status=$EXIT_SUCCESS # Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh # is ksh but when the shell is invoked as "sh" and the current value of # the _XPG environment variable is not equal to 1 (one), the special # positional parameter $0, within a function call, is the name of the # function. progpath=$0 # The name of this program. progname=`$ECHO "$progpath" |$SED "$sed_basename"` # Make sure we have an absolute progpath for reexecution: case $progpath in [\\/]*|[A-Za-z]:\\*) ;; *[\\/]*) progdir=`$ECHO "$progpath" |$SED "$sed_dirname"` progdir=`cd "$progdir" && pwd` progpath=$progdir/$progname ;; *) _G_IFS=$IFS IFS=${PATH_SEPARATOR-:} for progdir in $PATH; do IFS=$_G_IFS test -x "$progdir/$progname" && break done IFS=$_G_IFS test -n "$progdir" || progdir=`pwd` progpath=$progdir/$progname ;; esac ## ----------------- ## ## Standard options. ## ## ----------------- ## # The following options affect the operation of the functions defined # below, and should be set appropriately depending on run-time para- # meters passed on the command line. opt_dry_run=false opt_quiet=false opt_verbose=false # Categories 'all' and 'none' are always available. Append any others # you will pass as the first argument to func_warning from your own # code. warning_categories= # By default, display warnings according to 'opt_warning_types'. Set # 'warning_func' to ':' to elide all warnings, or func_fatal_error to # treat the next displayed warning as a fatal error. warning_func=func_warn_and_continue # Set to 'all' to display all warnings, 'none' to suppress all # warnings, or a space delimited list of some subset of # 'warning_categories' to display only the listed warnings. opt_warning_types=all ## -------------------- ## ## Resource management. ## ## -------------------- ## # This section contains definitions for functions that each ensure a # particular resource (a file, or a non-empty configuration variable for # example) is available, and if appropriate to extract default values # from pertinent package files. Call them using their associated # 'require_*' variable to ensure that they are executed, at most, once. # # It's entirely deliberate that calling these functions can set # variables that don't obey the namespace limitations obeyed by the rest # of this file, in order that that they be as useful as possible to # callers. # require_term_colors # ------------------- # Allow display of bold text on terminals that support it. require_term_colors=func_require_term_colors func_require_term_colors () { $debug_cmd test -t 1 && { # COLORTERM and USE_ANSI_COLORS environment variables take # precedence, because most terminfo databases neglect to describe # whether color sequences are supported. test -n "${COLORTERM+set}" && : ${USE_ANSI_COLORS="1"} if test 1 = "$USE_ANSI_COLORS"; then # Standard ANSI escape sequences tc_reset='' tc_bold=''; tc_standout='' tc_red=''; tc_green='' tc_blue=''; tc_cyan='' else # Otherwise trust the terminfo database after all. test -n "`tput sgr0 2>/dev/null`" && { tc_reset=`tput sgr0` test -n "`tput bold 2>/dev/null`" && tc_bold=`tput bold` tc_standout=$tc_bold test -n "`tput smso 2>/dev/null`" && tc_standout=`tput smso` test -n "`tput setaf 1 2>/dev/null`" && tc_red=`tput setaf 1` test -n "`tput setaf 2 2>/dev/null`" && tc_green=`tput setaf 2` test -n "`tput setaf 4 2>/dev/null`" && tc_blue=`tput setaf 4` test -n "`tput setaf 5 2>/dev/null`" && tc_cyan=`tput setaf 5` } fi } require_term_colors=: } ## ----------------- ## ## Function library. ## ## ----------------- ## # This section contains a variety of useful functions to call in your # scripts. Take note of the portable wrappers for features provided by # some modern shells, which will fall back to slower equivalents on # less featureful shells. # func_append VAR VALUE # --------------------- # Append VALUE onto the existing contents of VAR. # We should try to minimise forks, especially on Windows where they are # unreasonably slow, so skip the feature probes when bash or zsh are # being used: if test set = "${BASH_VERSION+set}${ZSH_VERSION+set}"; then : ${_G_HAVE_ARITH_OP="yes"} : ${_G_HAVE_XSI_OPS="yes"} # The += operator was introduced in bash 3.1 case $BASH_VERSION in [12].* | 3.0 | 3.0*) ;; *) : ${_G_HAVE_PLUSEQ_OP="yes"} ;; esac fi # _G_HAVE_PLUSEQ_OP # Can be empty, in which case the shell is probed, "yes" if += is # useable or anything else if it does not work. test -z "$_G_HAVE_PLUSEQ_OP" \ && (eval 'x=a; x+=" b"; test "a b" = "$x"') 2>/dev/null \ && _G_HAVE_PLUSEQ_OP=yes if test yes = "$_G_HAVE_PLUSEQ_OP" then # This is an XSI compatible shell, allowing a faster implementation... eval 'func_append () { $debug_cmd eval "$1+=\$2" }' else # ...otherwise fall back to using expr, which is often a shell builtin. func_append () { $debug_cmd eval "$1=\$$1\$2" } fi # func_append_quoted VAR VALUE # ---------------------------- # Quote VALUE and append to the end of shell variable VAR, separated # by a space. if test yes = "$_G_HAVE_PLUSEQ_OP"; then eval 'func_append_quoted () { $debug_cmd func_quote_for_eval "$2" eval "$1+=\\ \$func_quote_for_eval_result" }' else func_append_quoted () { $debug_cmd func_quote_for_eval "$2" eval "$1=\$$1\\ \$func_quote_for_eval_result" } fi # func_append_uniq VAR VALUE # -------------------------- # Append unique VALUE onto the existing contents of VAR, assuming # entries are delimited by the first character of VALUE. For example: # # func_append_uniq options " --another-option option-argument" # # will only append to $options if " --another-option option-argument " # is not already present somewhere in $options already (note spaces at # each end implied by leading space in second argument). func_append_uniq () { $debug_cmd eval _G_current_value='`$ECHO $'$1'`' _G_delim=`expr "$2" : '\(.\)'` case $_G_delim$_G_current_value$_G_delim in *"$2$_G_delim"*) ;; *) func_append "$@" ;; esac } # func_arith TERM... # ------------------ # Set func_arith_result to the result of evaluating TERMs. test -z "$_G_HAVE_ARITH_OP" \ && (eval 'test 2 = $(( 1 + 1 ))') 2>/dev/null \ && _G_HAVE_ARITH_OP=yes if test yes = "$_G_HAVE_ARITH_OP"; then eval 'func_arith () { $debug_cmd func_arith_result=$(( $* )) }' else func_arith () { $debug_cmd func_arith_result=`expr "$@"` } fi # func_basename FILE # ------------------ # Set func_basename_result to FILE with everything up to and including # the last / stripped. if test yes = "$_G_HAVE_XSI_OPS"; then # If this shell supports suffix pattern removal, then use it to avoid # forking. Hide the definitions single quotes in case the shell chokes # on unsupported syntax... _b='func_basename_result=${1##*/}' _d='case $1 in */*) func_dirname_result=${1%/*}$2 ;; * ) func_dirname_result=$3 ;; esac' else # ...otherwise fall back to using sed. _b='func_basename_result=`$ECHO "$1" |$SED "$sed_basename"`' _d='func_dirname_result=`$ECHO "$1" |$SED "$sed_dirname"` if test "X$func_dirname_result" = "X$1"; then func_dirname_result=$3 else func_append func_dirname_result "$2" fi' fi eval 'func_basename () { $debug_cmd '"$_b"' }' # func_dirname FILE APPEND NONDIR_REPLACEMENT # ------------------------------------------- # Compute the dirname of FILE. If nonempty, add APPEND to the result, # otherwise set result to NONDIR_REPLACEMENT. eval 'func_dirname () { $debug_cmd '"$_d"' }' # func_dirname_and_basename FILE APPEND NONDIR_REPLACEMENT # -------------------------------------------------------- # Perform func_basename and func_dirname in a single function # call: # dirname: Compute the dirname of FILE. If nonempty, # add APPEND to the result, otherwise set result # to NONDIR_REPLACEMENT. # value returned in "$func_dirname_result" # basename: Compute filename of FILE. # value retuned in "$func_basename_result" # For efficiency, we do not delegate to the functions above but instead # duplicate the functionality here. eval 'func_dirname_and_basename () { $debug_cmd '"$_b"' '"$_d"' }' # func_echo ARG... # ---------------- # Echo program name prefixed message. func_echo () { $debug_cmd _G_message=$* func_echo_IFS=$IFS IFS=$nl for _G_line in $_G_message; do IFS=$func_echo_IFS $ECHO "$progname: $_G_line" done IFS=$func_echo_IFS } # func_echo_all ARG... # -------------------- # Invoke $ECHO with all args, space-separated. func_echo_all () { $ECHO "$*" } # func_echo_infix_1 INFIX ARG... # ------------------------------ # Echo program name, followed by INFIX on the first line, with any # additional lines not showing INFIX. func_echo_infix_1 () { $debug_cmd $require_term_colors _G_infix=$1; shift _G_indent=$_G_infix _G_prefix="$progname: $_G_infix: " _G_message=$* # Strip color escape sequences before counting printable length for _G_tc in "$tc_reset" "$tc_bold" "$tc_standout" "$tc_red" "$tc_green" "$tc_blue" "$tc_cyan" do test -n "$_G_tc" && { _G_esc_tc=`$ECHO "$_G_tc" | $SED "$sed_make_literal_regex"` _G_indent=`$ECHO "$_G_indent" | $SED "s|$_G_esc_tc||g"` } done _G_indent="$progname: "`echo "$_G_indent" | $SED 's|.| |g'`" " ## exclude from sc_prohibit_nested_quotes func_echo_infix_1_IFS=$IFS IFS=$nl for _G_line in $_G_message; do IFS=$func_echo_infix_1_IFS $ECHO "$_G_prefix$tc_bold$_G_line$tc_reset" >&2 _G_prefix=$_G_indent done IFS=$func_echo_infix_1_IFS } # func_error ARG... # ----------------- # Echo program name prefixed message to standard error. func_error () { $debug_cmd $require_term_colors func_echo_infix_1 " $tc_standout${tc_red}error$tc_reset" "$*" >&2 } # func_fatal_error ARG... # ----------------------- # Echo program name prefixed message to standard error, and exit. func_fatal_error () { $debug_cmd func_error "$*" exit $EXIT_FAILURE } # func_grep EXPRESSION FILENAME # ----------------------------- # Check whether EXPRESSION matches any line of FILENAME, without output. func_grep () { $debug_cmd $GREP "$1" "$2" >/dev/null 2>&1 } # func_len STRING # --------------- # Set func_len_result to the length of STRING. STRING may not # start with a hyphen. test -z "$_G_HAVE_XSI_OPS" \ && (eval 'x=a/b/c; test 5aa/bb/cc = "${#x}${x%%/*}${x%/*}${x#*/}${x##*/}"') 2>/dev/null \ && _G_HAVE_XSI_OPS=yes if test yes = "$_G_HAVE_XSI_OPS"; then eval 'func_len () { $debug_cmd func_len_result=${#1} }' else func_len () { $debug_cmd func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` } fi # func_mkdir_p DIRECTORY-PATH # --------------------------- # Make sure the entire path to DIRECTORY-PATH is available. func_mkdir_p () { $debug_cmd _G_directory_path=$1 _G_dir_list= if test -n "$_G_directory_path" && test : != "$opt_dry_run"; then # Protect directory names starting with '-' case $_G_directory_path in -*) _G_directory_path=./$_G_directory_path ;; esac # While some portion of DIR does not yet exist... while test ! -d "$_G_directory_path"; do # ...make a list in topmost first order. Use a colon delimited # list incase some portion of path contains whitespace. _G_dir_list=$_G_directory_path:$_G_dir_list # If the last portion added has no slash in it, the list is done case $_G_directory_path in */*) ;; *) break ;; esac # ...otherwise throw away the child directory and loop _G_directory_path=`$ECHO "$_G_directory_path" | $SED -e "$sed_dirname"` done _G_dir_list=`$ECHO "$_G_dir_list" | $SED 's|:*$||'` func_mkdir_p_IFS=$IFS; IFS=: for _G_dir in $_G_dir_list; do IFS=$func_mkdir_p_IFS # mkdir can fail with a 'File exist' error if two processes # try to create one of the directories concurrently. Don't # stop in that case! $MKDIR "$_G_dir" 2>/dev/null || : done IFS=$func_mkdir_p_IFS # Bail out if we (or some other process) failed to create a directory. test -d "$_G_directory_path" || \ func_fatal_error "Failed to create '$1'" fi } # func_mktempdir [BASENAME] # ------------------------- # Make a temporary directory that won't clash with other running # libtool processes, and avoids race conditions if possible. If # given, BASENAME is the basename for that directory. func_mktempdir () { $debug_cmd _G_template=${TMPDIR-/tmp}/${1-$progname} if test : = "$opt_dry_run"; then # Return a directory name, but don't create it in dry-run mode _G_tmpdir=$_G_template-$$ else # If mktemp works, use that first and foremost _G_tmpdir=`mktemp -d "$_G_template-XXXXXXXX" 2>/dev/null` if test ! -d "$_G_tmpdir"; then # Failing that, at least try and use $RANDOM to avoid a race _G_tmpdir=$_G_template-${RANDOM-0}$$ func_mktempdir_umask=`umask` umask 0077 $MKDIR "$_G_tmpdir" umask $func_mktempdir_umask fi # If we're not in dry-run mode, bomb out on failure test -d "$_G_tmpdir" || \ func_fatal_error "cannot create temporary directory '$_G_tmpdir'" fi $ECHO "$_G_tmpdir" } # func_normal_abspath PATH # ------------------------ # Remove doubled-up and trailing slashes, "." path components, # and cancel out any ".." path components in PATH after making # it an absolute path. func_normal_abspath () { $debug_cmd # These SED scripts presuppose an absolute path with a trailing slash. _G_pathcar='s|^/\([^/]*\).*$|\1|' _G_pathcdr='s|^/[^/]*||' _G_removedotparts=':dotsl s|/\./|/|g t dotsl s|/\.$|/|' _G_collapseslashes='s|/\{1,\}|/|g' _G_finalslash='s|/*$|/|' # Start from root dir and reassemble the path. func_normal_abspath_result= func_normal_abspath_tpath=$1 func_normal_abspath_altnamespace= case $func_normal_abspath_tpath in "") # Empty path, that just means $cwd. func_stripname '' '/' "`pwd`" func_normal_abspath_result=$func_stripname_result return ;; # The next three entries are used to spot a run of precisely # two leading slashes without using negated character classes; # we take advantage of case's first-match behaviour. ///*) # Unusual form of absolute path, do nothing. ;; //*) # Not necessarily an ordinary path; POSIX reserves leading '//' # and for example Cygwin uses it to access remote file shares # over CIFS/SMB, so we conserve a leading double slash if found. func_normal_abspath_altnamespace=/ ;; /*) # Absolute path, do nothing. ;; *) # Relative path, prepend $cwd. func_normal_abspath_tpath=`pwd`/$func_normal_abspath_tpath ;; esac # Cancel out all the simple stuff to save iterations. We also want # the path to end with a slash for ease of parsing, so make sure # there is one (and only one) here. func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ -e "$_G_removedotparts" -e "$_G_collapseslashes" -e "$_G_finalslash"` while :; do # Processed it all yet? if test / = "$func_normal_abspath_tpath"; then # If we ascended to the root using ".." the result may be empty now. if test -z "$func_normal_abspath_result"; then func_normal_abspath_result=/ fi break fi func_normal_abspath_tcomponent=`$ECHO "$func_normal_abspath_tpath" | $SED \ -e "$_G_pathcar"` func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ -e "$_G_pathcdr"` # Figure out what to do with it case $func_normal_abspath_tcomponent in "") # Trailing empty path component, ignore it. ;; ..) # Parent dir; strip last assembled component from result. func_dirname "$func_normal_abspath_result" func_normal_abspath_result=$func_dirname_result ;; *) # Actual path component, append it. func_append func_normal_abspath_result "/$func_normal_abspath_tcomponent" ;; esac done # Restore leading double-slash if one was found on entry. func_normal_abspath_result=$func_normal_abspath_altnamespace$func_normal_abspath_result } # func_notquiet ARG... # -------------------- # Echo program name prefixed message only when not in quiet mode. func_notquiet () { $debug_cmd $opt_quiet || func_echo ${1+"$@"} # A bug in bash halts the script if the last line of a function # fails when set -e is in force, so we need another command to # work around that: : } # func_relative_path SRCDIR DSTDIR # -------------------------------- # Set func_relative_path_result to the relative path from SRCDIR to DSTDIR. func_relative_path () { $debug_cmd func_relative_path_result= func_normal_abspath "$1" func_relative_path_tlibdir=$func_normal_abspath_result func_normal_abspath "$2" func_relative_path_tbindir=$func_normal_abspath_result # Ascend the tree starting from libdir while :; do # check if we have found a prefix of bindir case $func_relative_path_tbindir in $func_relative_path_tlibdir) # found an exact match func_relative_path_tcancelled= break ;; $func_relative_path_tlibdir*) # found a matching prefix func_stripname "$func_relative_path_tlibdir" '' "$func_relative_path_tbindir" func_relative_path_tcancelled=$func_stripname_result if test -z "$func_relative_path_result"; then func_relative_path_result=. fi break ;; *) func_dirname $func_relative_path_tlibdir func_relative_path_tlibdir=$func_dirname_result if test -z "$func_relative_path_tlibdir"; then # Have to descend all the way to the root! func_relative_path_result=../$func_relative_path_result func_relative_path_tcancelled=$func_relative_path_tbindir break fi func_relative_path_result=../$func_relative_path_result ;; esac done # Now calculate path; take care to avoid doubling-up slashes. func_stripname '' '/' "$func_relative_path_result" func_relative_path_result=$func_stripname_result func_stripname '/' '/' "$func_relative_path_tcancelled" if test -n "$func_stripname_result"; then func_append func_relative_path_result "/$func_stripname_result" fi # Normalisation. If bindir is libdir, return '.' else relative path. if test -n "$func_relative_path_result"; then func_stripname './' '' "$func_relative_path_result" func_relative_path_result=$func_stripname_result fi test -n "$func_relative_path_result" || func_relative_path_result=. : } # func_quote_for_eval ARG... # -------------------------- # Aesthetically quote ARGs to be evaled later. # This function returns two values: # i) func_quote_for_eval_result # double-quoted, suitable for a subsequent eval # ii) func_quote_for_eval_unquoted_result # has all characters that are still active within double # quotes backslashified. func_quote_for_eval () { $debug_cmd func_quote_for_eval_unquoted_result= func_quote_for_eval_result= while test 0 -lt $#; do case $1 in *[\\\`\"\$]*) _G_unquoted_arg=`printf '%s\n' "$1" |$SED "$sed_quote_subst"` ;; *) _G_unquoted_arg=$1 ;; esac if test -n "$func_quote_for_eval_unquoted_result"; then func_append func_quote_for_eval_unquoted_result " $_G_unquoted_arg" else func_append func_quote_for_eval_unquoted_result "$_G_unquoted_arg" fi case $_G_unquoted_arg in # Double-quote args containing shell metacharacters to delay # word splitting, command substitution and variable expansion # for a subsequent eval. # Many Bourne shells cannot handle close brackets correctly # in scan sets, so we specify it separately. *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") _G_quoted_arg=\"$_G_unquoted_arg\" ;; *) _G_quoted_arg=$_G_unquoted_arg ;; esac if test -n "$func_quote_for_eval_result"; then func_append func_quote_for_eval_result " $_G_quoted_arg" else func_append func_quote_for_eval_result "$_G_quoted_arg" fi shift done } # func_quote_for_expand ARG # ------------------------- # Aesthetically quote ARG to be evaled later; same as above, # but do not quote variable references. func_quote_for_expand () { $debug_cmd case $1 in *[\\\`\"]*) _G_arg=`$ECHO "$1" | $SED \ -e "$sed_double_quote_subst" -e "$sed_double_backslash"` ;; *) _G_arg=$1 ;; esac case $_G_arg in # Double-quote args containing shell metacharacters to delay # word splitting and command substitution for a subsequent eval. # Many Bourne shells cannot handle close brackets correctly # in scan sets, so we specify it separately. *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") _G_arg=\"$_G_arg\" ;; esac func_quote_for_expand_result=$_G_arg } # func_stripname PREFIX SUFFIX NAME # --------------------------------- # strip PREFIX and SUFFIX from NAME, and store in func_stripname_result. # PREFIX and SUFFIX must not contain globbing or regex special # characters, hashes, percent signs, but SUFFIX may contain a leading # dot (in which case that matches only a dot). if test yes = "$_G_HAVE_XSI_OPS"; then eval 'func_stripname () { $debug_cmd # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are # positional parameters, so assign one to ordinary variable first. func_stripname_result=$3 func_stripname_result=${func_stripname_result#"$1"} func_stripname_result=${func_stripname_result%"$2"} }' else func_stripname () { $debug_cmd case $2 in .*) func_stripname_result=`$ECHO "$3" | $SED -e "s%^$1%%" -e "s%\\\\$2\$%%"`;; *) func_stripname_result=`$ECHO "$3" | $SED -e "s%^$1%%" -e "s%$2\$%%"`;; esac } fi # func_show_eval CMD [FAIL_EXP] # ----------------------------- # Unless opt_quiet is true, then output CMD. Then, if opt_dryrun is # not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP # is given, then evaluate it. func_show_eval () { $debug_cmd _G_cmd=$1 _G_fail_exp=${2-':'} func_quote_for_expand "$_G_cmd" eval "func_notquiet $func_quote_for_expand_result" $opt_dry_run || { eval "$_G_cmd" _G_status=$? if test 0 -ne "$_G_status"; then eval "(exit $_G_status); $_G_fail_exp" fi } } # func_show_eval_locale CMD [FAIL_EXP] # ------------------------------------ # Unless opt_quiet is true, then output CMD. Then, if opt_dryrun is # not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP # is given, then evaluate it. Use the saved locale for evaluation. func_show_eval_locale () { $debug_cmd _G_cmd=$1 _G_fail_exp=${2-':'} $opt_quiet || { func_quote_for_expand "$_G_cmd" eval "func_echo $func_quote_for_expand_result" } $opt_dry_run || { eval "$_G_user_locale $_G_cmd" _G_status=$? eval "$_G_safe_locale" if test 0 -ne "$_G_status"; then eval "(exit $_G_status); $_G_fail_exp" fi } } # func_tr_sh # ---------- # Turn $1 into a string suitable for a shell variable name. # Result is stored in $func_tr_sh_result. All characters # not in the set a-zA-Z0-9_ are replaced with '_'. Further, # if $1 begins with a digit, a '_' is prepended as well. func_tr_sh () { $debug_cmd case $1 in [0-9]* | *[!a-zA-Z0-9_]*) func_tr_sh_result=`$ECHO "$1" | $SED -e 's/^\([0-9]\)/_\1/' -e 's/[^a-zA-Z0-9_]/_/g'` ;; * ) func_tr_sh_result=$1 ;; esac } # func_verbose ARG... # ------------------- # Echo program name prefixed message in verbose mode only. func_verbose () { $debug_cmd $opt_verbose && func_echo "$*" : } # func_warn_and_continue ARG... # ----------------------------- # Echo program name prefixed warning message to standard error. func_warn_and_continue () { $debug_cmd $require_term_colors func_echo_infix_1 "${tc_red}warning$tc_reset" "$*" >&2 } # func_warning CATEGORY ARG... # ---------------------------- # Echo program name prefixed warning message to standard error. Warning # messages can be filtered according to CATEGORY, where this function # elides messages where CATEGORY is not listed in the global variable # 'opt_warning_types'. func_warning () { $debug_cmd # CATEGORY must be in the warning_categories list! case " $warning_categories " in *" $1 "*) ;; *) func_internal_error "invalid warning category '$1'" ;; esac _G_category=$1 shift case " $opt_warning_types " in *" $_G_category "*) $warning_func ${1+"$@"} ;; esac } # func_sort_ver VER1 VER2 # ----------------------- # 'sort -V' is not generally available. # Note this deviates from the version comparison in automake # in that it treats 1.5 < 1.5.0, and treats 1.4.4a < 1.4-p3a # but this should suffice as we won't be specifying old # version formats or redundant trailing .0 in bootstrap.conf. # If we did want full compatibility then we should probably # use m4_version_compare from autoconf. func_sort_ver () { $debug_cmd printf '%s\n%s\n' "$1" "$2" \ | sort -t. -k 1,1n -k 2,2n -k 3,3n -k 4,4n -k 5,5n -k 6,6n -k 7,7n -k 8,8n -k 9,9n } # func_lt_ver PREV CURR # --------------------- # Return true if PREV and CURR are in the correct order according to # func_sort_ver, otherwise false. Use it like this: # # func_lt_ver "$prev_ver" "$proposed_ver" || func_fatal_error "..." func_lt_ver () { $debug_cmd test "x$1" = x`func_sort_ver "$1" "$2" | $SED 1q` } # Local variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-pattern: "10/scriptversion=%:y-%02m-%02d.%02H; # UTC" # time-stamp-time-zone: "UTC" # End: #! /bin/sh # Set a version string for this script. scriptversion=2014-01-07.03; # UTC # A portable, pluggable option parser for Bourne shell. # Written by Gary V. Vaughan, 2010 # Copyright (C) 2010-2015 Free Software Foundation, Inc. # This is free software; see the source for copying conditions. There is NO # warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # Please report bugs or propose patches to gary@gnu.org. ## ------ ## ## Usage. ## ## ------ ## # This file is a library for parsing options in your shell scripts along # with assorted other useful supporting features that you can make use # of too. # # For the simplest scripts you might need only: # # #!/bin/sh # . relative/path/to/funclib.sh # . relative/path/to/options-parser # scriptversion=1.0 # func_options ${1+"$@"} # eval set dummy "$func_options_result"; shift # ...rest of your script... # # In order for the '--version' option to work, you will need to have a # suitably formatted comment like the one at the top of this file # starting with '# Written by ' and ending with '# warranty; '. # # For '-h' and '--help' to work, you will also need a one line # description of your script's purpose in a comment directly above the # '# Written by ' line, like the one at the top of this file. # # The default options also support '--debug', which will turn on shell # execution tracing (see the comment above debug_cmd below for another # use), and '--verbose' and the func_verbose function to allow your script # to display verbose messages only when your user has specified # '--verbose'. # # After sourcing this file, you can plug processing for additional # options by amending the variables from the 'Configuration' section # below, and following the instructions in the 'Option parsing' # section further down. ## -------------- ## ## Configuration. ## ## -------------- ## # You should override these variables in your script after sourcing this # file so that they reflect the customisations you have added to the # option parser. # The usage line for option parsing errors and the start of '-h' and # '--help' output messages. You can embed shell variables for delayed # expansion at the time the message is displayed, but you will need to # quote other shell meta-characters carefully to prevent them being # expanded when the contents are evaled. usage='$progpath [OPTION]...' # Short help message in response to '-h' and '--help'. Add to this or # override it after sourcing this library to reflect the full set of # options your script accepts. usage_message="\ --debug enable verbose shell tracing -W, --warnings=CATEGORY report the warnings falling in CATEGORY [all] -v, --verbose verbosely report processing --version print version information and exit -h, --help print short or long help message and exit " # Additional text appended to 'usage_message' in response to '--help'. long_help_message=" Warning categories include: 'all' show all warnings 'none' turn off all the warnings 'error' warnings are treated as fatal errors" # Help message printed before fatal option parsing errors. fatal_help="Try '\$progname --help' for more information." ## ------------------------- ## ## Hook function management. ## ## ------------------------- ## # This section contains functions for adding, removing, and running hooks # to the main code. A hook is just a named list of of function, that can # be run in order later on. # func_hookable FUNC_NAME # ----------------------- # Declare that FUNC_NAME will run hooks added with # 'func_add_hook FUNC_NAME ...'. func_hookable () { $debug_cmd func_append hookable_fns " $1" } # func_add_hook FUNC_NAME HOOK_FUNC # --------------------------------- # Request that FUNC_NAME call HOOK_FUNC before it returns. FUNC_NAME must # first have been declared "hookable" by a call to 'func_hookable'. func_add_hook () { $debug_cmd case " $hookable_fns " in *" $1 "*) ;; *) func_fatal_error "'$1' does not accept hook functions." ;; esac eval func_append ${1}_hooks '" $2"' } # func_remove_hook FUNC_NAME HOOK_FUNC # ------------------------------------ # Remove HOOK_FUNC from the list of functions called by FUNC_NAME. func_remove_hook () { $debug_cmd eval ${1}_hooks='`$ECHO "\$'$1'_hooks" |$SED "s| '$2'||"`' } # func_run_hooks FUNC_NAME [ARG]... # --------------------------------- # Run all hook functions registered to FUNC_NAME. # It is assumed that the list of hook functions contains nothing more # than a whitespace-delimited list of legal shell function names, and # no effort is wasted trying to catch shell meta-characters or preserve # whitespace. func_run_hooks () { $debug_cmd case " $hookable_fns " in *" $1 "*) ;; *) func_fatal_error "'$1' does not support hook funcions.n" ;; esac eval _G_hook_fns=\$$1_hooks; shift for _G_hook in $_G_hook_fns; do eval $_G_hook '"$@"' # store returned options list back into positional # parameters for next 'cmd' execution. eval _G_hook_result=\$${_G_hook}_result eval set dummy "$_G_hook_result"; shift done func_quote_for_eval ${1+"$@"} func_run_hooks_result=$func_quote_for_eval_result } ## --------------- ## ## Option parsing. ## ## --------------- ## # In order to add your own option parsing hooks, you must accept the # full positional parameter list in your hook function, remove any # options that you action, and then pass back the remaining unprocessed # options in '_result', escaped suitably for # 'eval'. Like this: # # my_options_prep () # { # $debug_cmd # # # Extend the existing usage message. # usage_message=$usage_message' # -s, --silent don'\''t print informational messages # ' # # func_quote_for_eval ${1+"$@"} # my_options_prep_result=$func_quote_for_eval_result # } # func_add_hook func_options_prep my_options_prep # # # my_silent_option () # { # $debug_cmd # # # Note that for efficiency, we parse as many options as we can # # recognise in a loop before passing the remainder back to the # # caller on the first unrecognised argument we encounter. # while test $# -gt 0; do # opt=$1; shift # case $opt in # --silent|-s) opt_silent=: ;; # # Separate non-argument short options: # -s*) func_split_short_opt "$_G_opt" # set dummy "$func_split_short_opt_name" \ # "-$func_split_short_opt_arg" ${1+"$@"} # shift # ;; # *) set dummy "$_G_opt" "$*"; shift; break ;; # esac # done # # func_quote_for_eval ${1+"$@"} # my_silent_option_result=$func_quote_for_eval_result # } # func_add_hook func_parse_options my_silent_option # # # my_option_validation () # { # $debug_cmd # # $opt_silent && $opt_verbose && func_fatal_help "\ # '--silent' and '--verbose' options are mutually exclusive." # # func_quote_for_eval ${1+"$@"} # my_option_validation_result=$func_quote_for_eval_result # } # func_add_hook func_validate_options my_option_validation # # You'll alse need to manually amend $usage_message to reflect the extra # options you parse. It's preferable to append if you can, so that # multiple option parsing hooks can be added safely. # func_options [ARG]... # --------------------- # All the functions called inside func_options are hookable. See the # individual implementations for details. func_hookable func_options func_options () { $debug_cmd func_options_prep ${1+"$@"} eval func_parse_options \ ${func_options_prep_result+"$func_options_prep_result"} eval func_validate_options \ ${func_parse_options_result+"$func_parse_options_result"} eval func_run_hooks func_options \ ${func_validate_options_result+"$func_validate_options_result"} # save modified positional parameters for caller func_options_result=$func_run_hooks_result } # func_options_prep [ARG]... # -------------------------- # All initialisations required before starting the option parse loop. # Note that when calling hook functions, we pass through the list of # positional parameters. If a hook function modifies that list, and # needs to propogate that back to rest of this script, then the complete # modified list must be put in 'func_run_hooks_result' before # returning. func_hookable func_options_prep func_options_prep () { $debug_cmd # Option defaults: opt_verbose=false opt_warning_types= func_run_hooks func_options_prep ${1+"$@"} # save modified positional parameters for caller func_options_prep_result=$func_run_hooks_result } # func_parse_options [ARG]... # --------------------------- # The main option parsing loop. func_hookable func_parse_options func_parse_options () { $debug_cmd func_parse_options_result= # this just eases exit handling while test $# -gt 0; do # Defer to hook functions for initial option parsing, so they # get priority in the event of reusing an option name. func_run_hooks func_parse_options ${1+"$@"} # Adjust func_parse_options positional parameters to match eval set dummy "$func_run_hooks_result"; shift # Break out of the loop if we already parsed every option. test $# -gt 0 || break _G_opt=$1 shift case $_G_opt in --debug|-x) debug_cmd='set -x' func_echo "enabling shell trace mode" $debug_cmd ;; --no-warnings|--no-warning|--no-warn) set dummy --warnings none ${1+"$@"} shift ;; --warnings|--warning|-W) test $# = 0 && func_missing_arg $_G_opt && break case " $warning_categories $1" in *" $1 "*) # trailing space prevents matching last $1 above func_append_uniq opt_warning_types " $1" ;; *all) opt_warning_types=$warning_categories ;; *none) opt_warning_types=none warning_func=: ;; *error) opt_warning_types=$warning_categories warning_func=func_fatal_error ;; *) func_fatal_error \ "unsupported warning category: '$1'" ;; esac shift ;; --verbose|-v) opt_verbose=: ;; --version) func_version ;; -\?|-h) func_usage ;; --help) func_help ;; # Separate optargs to long options (plugins may need this): --*=*) func_split_equals "$_G_opt" set dummy "$func_split_equals_lhs" \ "$func_split_equals_rhs" ${1+"$@"} shift ;; # Separate optargs to short options: -W*) func_split_short_opt "$_G_opt" set dummy "$func_split_short_opt_name" \ "$func_split_short_opt_arg" ${1+"$@"} shift ;; # Separate non-argument short options: -\?*|-h*|-v*|-x*) func_split_short_opt "$_G_opt" set dummy "$func_split_short_opt_name" \ "-$func_split_short_opt_arg" ${1+"$@"} shift ;; --) break ;; -*) func_fatal_help "unrecognised option: '$_G_opt'" ;; *) set dummy "$_G_opt" ${1+"$@"}; shift; break ;; esac done # save modified positional parameters for caller func_quote_for_eval ${1+"$@"} func_parse_options_result=$func_quote_for_eval_result } # func_validate_options [ARG]... # ------------------------------ # Perform any sanity checks on option settings and/or unconsumed # arguments. func_hookable func_validate_options func_validate_options () { $debug_cmd # Display all warnings if -W was not given. test -n "$opt_warning_types" || opt_warning_types=" $warning_categories" func_run_hooks func_validate_options ${1+"$@"} # Bail if the options were screwed! $exit_cmd $EXIT_FAILURE # save modified positional parameters for caller func_validate_options_result=$func_run_hooks_result } ## ----------------- ## ## Helper functions. ## ## ----------------- ## # This section contains the helper functions used by the rest of the # hookable option parser framework in ascii-betical order. # func_fatal_help ARG... # ---------------------- # Echo program name prefixed message to standard error, followed by # a help hint, and exit. func_fatal_help () { $debug_cmd eval \$ECHO \""Usage: $usage"\" eval \$ECHO \""$fatal_help"\" func_error ${1+"$@"} exit $EXIT_FAILURE } # func_help # --------- # Echo long help message to standard output and exit. func_help () { $debug_cmd func_usage_message $ECHO "$long_help_message" exit 0 } # func_missing_arg ARGNAME # ------------------------ # Echo program name prefixed message to standard error and set global # exit_cmd. func_missing_arg () { $debug_cmd func_error "Missing argument for '$1'." exit_cmd=exit } # func_split_equals STRING # ------------------------ # Set func_split_equals_lhs and func_split_equals_rhs shell variables after # splitting STRING at the '=' sign. test -z "$_G_HAVE_XSI_OPS" \ && (eval 'x=a/b/c; test 5aa/bb/cc = "${#x}${x%%/*}${x%/*}${x#*/}${x##*/}"') 2>/dev/null \ && _G_HAVE_XSI_OPS=yes if test yes = "$_G_HAVE_XSI_OPS" then # This is an XSI compatible shell, allowing a faster implementation... eval 'func_split_equals () { $debug_cmd func_split_equals_lhs=${1%%=*} func_split_equals_rhs=${1#*=} test "x$func_split_equals_lhs" = "x$1" \ && func_split_equals_rhs= }' else # ...otherwise fall back to using expr, which is often a shell builtin. func_split_equals () { $debug_cmd func_split_equals_lhs=`expr "x$1" : 'x\([^=]*\)'` func_split_equals_rhs= test "x$func_split_equals_lhs" = "x$1" \ || func_split_equals_rhs=`expr "x$1" : 'x[^=]*=\(.*\)$'` } fi #func_split_equals # func_split_short_opt SHORTOPT # ----------------------------- # Set func_split_short_opt_name and func_split_short_opt_arg shell # variables after splitting SHORTOPT after the 2nd character. if test yes = "$_G_HAVE_XSI_OPS" then # This is an XSI compatible shell, allowing a faster implementation... eval 'func_split_short_opt () { $debug_cmd func_split_short_opt_arg=${1#??} func_split_short_opt_name=${1%"$func_split_short_opt_arg"} }' else # ...otherwise fall back to using expr, which is often a shell builtin. func_split_short_opt () { $debug_cmd func_split_short_opt_name=`expr "x$1" : 'x-\(.\)'` func_split_short_opt_arg=`expr "x$1" : 'x-.\(.*\)$'` } fi #func_split_short_opt # func_usage # ---------- # Echo short help message to standard output and exit. func_usage () { $debug_cmd func_usage_message $ECHO "Run '$progname --help |${PAGER-more}' for full usage" exit 0 } # func_usage_message # ------------------ # Echo short help message to standard output. func_usage_message () { $debug_cmd eval \$ECHO \""Usage: $usage"\" echo $SED -n 's|^# || /^Written by/{ x;p;x } h /^Written by/q' < "$progpath" echo eval \$ECHO \""$usage_message"\" } # func_version # ------------ # Echo version message to standard output and exit. func_version () { $debug_cmd printf '%s\n' "$progname $scriptversion" $SED -n ' /(C)/!b go :more /\./!{ N s|\n# | | b more } :go /^# Written by /,/# warranty; / { s|^# || s|^# *$|| s|\((C)\)[ 0-9,-]*[ ,-]\([1-9][0-9]* \)|\1 \2| p } /^# Written by / { s|^# || p } /^warranty; /q' < "$progpath" exit $? } # Local variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-pattern: "10/scriptversion=%:y-%02m-%02d.%02H; # UTC" # time-stamp-time-zone: "UTC" # End: # Set a version string. scriptversion='(GNU libtool) 2.4.6' # func_echo ARG... # ---------------- # Libtool also displays the current mode in messages, so override # funclib.sh func_echo with this custom definition. func_echo () { $debug_cmd _G_message=$* func_echo_IFS=$IFS IFS=$nl for _G_line in $_G_message; do IFS=$func_echo_IFS $ECHO "$progname${opt_mode+: $opt_mode}: $_G_line" done IFS=$func_echo_IFS } # func_warning ARG... # ------------------- # Libtool warnings are not categorized, so override funclib.sh # func_warning with this simpler definition. func_warning () { $debug_cmd $warning_func ${1+"$@"} } ## ---------------- ## ## Options parsing. ## ## ---------------- ## # Hook in the functions to make sure our own options are parsed during # the option parsing loop. usage='$progpath [OPTION]... [MODE-ARG]...' # Short help message in response to '-h'. usage_message="Options: --config show all configuration variables --debug enable verbose shell tracing -n, --dry-run display commands without modifying any files --features display basic configuration information and exit --mode=MODE use operation mode MODE --no-warnings equivalent to '-Wnone' --preserve-dup-deps don't remove duplicate dependency libraries --quiet, --silent don't print informational messages --tag=TAG use configuration variables from tag TAG -v, --verbose print more informational messages than default --version print version information -W, --warnings=CATEGORY report the warnings falling in CATEGORY [all] -h, --help, --help-all print short, long, or detailed help message " # Additional text appended to 'usage_message' in response to '--help'. func_help () { $debug_cmd func_usage_message $ECHO "$long_help_message MODE must be one of the following: clean remove files from the build directory compile compile a source file into a libtool object execute automatically set library path, then run a program finish complete the installation of libtool libraries install install libraries or executables link create a library or an executable uninstall remove libraries from an installed directory MODE-ARGS vary depending on the MODE. When passed as first option, '--mode=MODE' may be abbreviated as 'MODE' or a unique abbreviation of that. Try '$progname --help --mode=MODE' for a more detailed description of MODE. When reporting a bug, please describe a test case to reproduce it and include the following information: host-triplet: $host shell: $SHELL compiler: $LTCC compiler flags: $LTCFLAGS linker: $LD (gnu? $with_gnu_ld) version: $progname (GNU libtool) 2.4.6 automake: `($AUTOMAKE --version) 2>/dev/null |$SED 1q` autoconf: `($AUTOCONF --version) 2>/dev/null |$SED 1q` Report bugs to . GNU libtool home page: . General help using GNU software: ." exit 0 } # func_lo2o OBJECT-NAME # --------------------- # Transform OBJECT-NAME from a '.lo' suffix to the platform specific # object suffix. lo2o=s/\\.lo\$/.$objext/ o2lo=s/\\.$objext\$/.lo/ if test yes = "$_G_HAVE_XSI_OPS"; then eval 'func_lo2o () { case $1 in *.lo) func_lo2o_result=${1%.lo}.$objext ;; * ) func_lo2o_result=$1 ;; esac }' # func_xform LIBOBJ-OR-SOURCE # --------------------------- # Transform LIBOBJ-OR-SOURCE from a '.o' or '.c' (or otherwise) # suffix to a '.lo' libtool-object suffix. eval 'func_xform () { func_xform_result=${1%.*}.lo }' else # ...otherwise fall back to using sed. func_lo2o () { func_lo2o_result=`$ECHO "$1" | $SED "$lo2o"` } func_xform () { func_xform_result=`$ECHO "$1" | $SED 's|\.[^.]*$|.lo|'` } fi # func_fatal_configuration ARG... # ------------------------------- # Echo program name prefixed message to standard error, followed by # a configuration failure hint, and exit. func_fatal_configuration () { func_fatal_error ${1+"$@"} \ "See the $PACKAGE documentation for more information." \ "Fatal configuration error." } # func_config # ----------- # Display the configuration for all the tags in this script. func_config () { re_begincf='^# ### BEGIN LIBTOOL' re_endcf='^# ### END LIBTOOL' # Default configuration. $SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath" # Now print the configurations for the tags. for tagname in $taglist; do $SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath" done exit $? } # func_features # ------------- # Display the features supported by this script. func_features () { echo "host: $host" if test yes = "$build_libtool_libs"; then echo "enable shared libraries" else echo "disable shared libraries" fi if test yes = "$build_old_libs"; then echo "enable static libraries" else echo "disable static libraries" fi exit $? } # func_enable_tag TAGNAME # ----------------------- # Verify that TAGNAME is valid, and either flag an error and exit, or # enable the TAGNAME tag. We also add TAGNAME to the global $taglist # variable here. func_enable_tag () { # Global variable: tagname=$1 re_begincf="^# ### BEGIN LIBTOOL TAG CONFIG: $tagname\$" re_endcf="^# ### END LIBTOOL TAG CONFIG: $tagname\$" sed_extractcf=/$re_begincf/,/$re_endcf/p # Validate tagname. case $tagname in *[!-_A-Za-z0-9,/]*) func_fatal_error "invalid tag name: $tagname" ;; esac # Don't test for the "default" C tag, as we know it's # there but not specially marked. case $tagname in CC) ;; *) if $GREP "$re_begincf" "$progpath" >/dev/null 2>&1; then taglist="$taglist $tagname" # Evaluate the configuration. Be careful to quote the path # and the sed script, to avoid splitting on whitespace, but # also don't use non-portable quotes within backquotes within # quotes we have to do it in 2 steps: extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"` eval "$extractedcf" else func_error "ignoring unknown tag $tagname" fi ;; esac } # func_check_version_match # ------------------------ # Ensure that we are using m4 macros, and libtool script from the same # release of libtool. func_check_version_match () { if test "$package_revision" != "$macro_revision"; then if test "$VERSION" != "$macro_version"; then if test -z "$macro_version"; then cat >&2 <<_LT_EOF $progname: Version mismatch error. This is $PACKAGE $VERSION, but the $progname: definition of this LT_INIT comes from an older release. $progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION $progname: and run autoconf again. _LT_EOF else cat >&2 <<_LT_EOF $progname: Version mismatch error. This is $PACKAGE $VERSION, but the $progname: definition of this LT_INIT comes from $PACKAGE $macro_version. $progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION $progname: and run autoconf again. _LT_EOF fi else cat >&2 <<_LT_EOF $progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, $progname: but the definition of this LT_INIT comes from revision $macro_revision. $progname: You should recreate aclocal.m4 with macros from revision $package_revision $progname: of $PACKAGE $VERSION and run autoconf again. _LT_EOF fi exit $EXIT_MISMATCH fi } # libtool_options_prep [ARG]... # ----------------------------- # Preparation for options parsed by libtool. libtool_options_prep () { $debug_mode # Option defaults: opt_config=false opt_dlopen= opt_dry_run=false opt_help=false opt_mode= opt_preserve_dup_deps=false opt_quiet=false nonopt= preserve_args= # Shorthand for --mode=foo, only valid as the first argument case $1 in clean|clea|cle|cl) shift; set dummy --mode clean ${1+"$@"}; shift ;; compile|compil|compi|comp|com|co|c) shift; set dummy --mode compile ${1+"$@"}; shift ;; execute|execut|execu|exec|exe|ex|e) shift; set dummy --mode execute ${1+"$@"}; shift ;; finish|finis|fini|fin|fi|f) shift; set dummy --mode finish ${1+"$@"}; shift ;; install|instal|insta|inst|ins|in|i) shift; set dummy --mode install ${1+"$@"}; shift ;; link|lin|li|l) shift; set dummy --mode link ${1+"$@"}; shift ;; uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) shift; set dummy --mode uninstall ${1+"$@"}; shift ;; esac # Pass back the list of options. func_quote_for_eval ${1+"$@"} libtool_options_prep_result=$func_quote_for_eval_result } func_add_hook func_options_prep libtool_options_prep # libtool_parse_options [ARG]... # --------------------------------- # Provide handling for libtool specific options. libtool_parse_options () { $debug_cmd # Perform our own loop to consume as many options as possible in # each iteration. while test $# -gt 0; do _G_opt=$1 shift case $_G_opt in --dry-run|--dryrun|-n) opt_dry_run=: ;; --config) func_config ;; --dlopen|-dlopen) opt_dlopen="${opt_dlopen+$opt_dlopen }$1" shift ;; --preserve-dup-deps) opt_preserve_dup_deps=: ;; --features) func_features ;; --finish) set dummy --mode finish ${1+"$@"}; shift ;; --help) opt_help=: ;; --help-all) opt_help=': help-all' ;; --mode) test $# = 0 && func_missing_arg $_G_opt && break opt_mode=$1 case $1 in # Valid mode arguments: clean|compile|execute|finish|install|link|relink|uninstall) ;; # Catch anything else as an error *) func_error "invalid argument for $_G_opt" exit_cmd=exit break ;; esac shift ;; --no-silent|--no-quiet) opt_quiet=false func_append preserve_args " $_G_opt" ;; --no-warnings|--no-warning|--no-warn) opt_warning=false func_append preserve_args " $_G_opt" ;; --no-verbose) opt_verbose=false func_append preserve_args " $_G_opt" ;; --silent|--quiet) opt_quiet=: opt_verbose=false func_append preserve_args " $_G_opt" ;; --tag) test $# = 0 && func_missing_arg $_G_opt && break opt_tag=$1 func_append preserve_args " $_G_opt $1" func_enable_tag "$1" shift ;; --verbose|-v) opt_quiet=false opt_verbose=: func_append preserve_args " $_G_opt" ;; # An option not handled by this hook function: *) set dummy "$_G_opt" ${1+"$@"}; shift; break ;; esac done # save modified positional parameters for caller func_quote_for_eval ${1+"$@"} libtool_parse_options_result=$func_quote_for_eval_result } func_add_hook func_parse_options libtool_parse_options # libtool_validate_options [ARG]... # --------------------------------- # Perform any sanity checks on option settings and/or unconsumed # arguments. libtool_validate_options () { # save first non-option argument if test 0 -lt $#; then nonopt=$1 shift fi # preserve --debug test : = "$debug_cmd" || func_append preserve_args " --debug" case $host in # Solaris2 added to fix http://debbugs.gnu.org/cgi/bugreport.cgi?bug=16452 # see also: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=59788 *cygwin* | *mingw* | *pw32* | *cegcc* | *solaris2* | *os2*) # don't eliminate duplications in $postdeps and $predeps opt_duplicate_compiler_generated_deps=: ;; *) opt_duplicate_compiler_generated_deps=$opt_preserve_dup_deps ;; esac $opt_help || { # Sanity checks first: func_check_version_match test yes != "$build_libtool_libs" \ && test yes != "$build_old_libs" \ && func_fatal_configuration "not configured to build any kind of library" # Darwin sucks eval std_shrext=\"$shrext_cmds\" # Only execute mode is allowed to have -dlopen flags. if test -n "$opt_dlopen" && test execute != "$opt_mode"; then func_error "unrecognized option '-dlopen'" $ECHO "$help" 1>&2 exit $EXIT_FAILURE fi # Change the help message to a mode-specific one. generic_help=$help help="Try '$progname --help --mode=$opt_mode' for more information." } # Pass back the unparsed argument list func_quote_for_eval ${1+"$@"} libtool_validate_options_result=$func_quote_for_eval_result } func_add_hook func_validate_options libtool_validate_options # Process options as early as possible so that --help and --version # can return quickly. func_options ${1+"$@"} eval set dummy "$func_options_result"; shift ## ----------- ## ## Main. ## ## ----------- ## magic='%%%MAGIC variable%%%' magic_exe='%%%MAGIC EXE variable%%%' # Global variables. extracted_archives= extracted_serial=0 # If this variable is set in any of the actions, the command in it # will be execed at the end. This prevents here-documents from being # left over by shells. exec_cmd= # A function that is used when there is no print builtin or printf. func_fallback_echo () { eval 'cat <<_LTECHO_EOF $1 _LTECHO_EOF' } # func_generated_by_libtool # True iff stdin has been generated by Libtool. This function is only # a basic sanity check; it will hardly flush out determined imposters. func_generated_by_libtool_p () { $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1 } # func_lalib_p file # True iff FILE is a libtool '.la' library or '.lo' object file. # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_lalib_p () { test -f "$1" && $SED -e 4q "$1" 2>/dev/null | func_generated_by_libtool_p } # func_lalib_unsafe_p file # True iff FILE is a libtool '.la' library or '.lo' object file. # This function implements the same check as func_lalib_p without # resorting to external programs. To this end, it redirects stdin and # closes it afterwards, without saving the original file descriptor. # As a safety measure, use it only where a negative result would be # fatal anyway. Works if 'file' does not exist. func_lalib_unsafe_p () { lalib_p=no if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then for lalib_p_l in 1 2 3 4 do read lalib_p_line case $lalib_p_line in \#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;; esac done exec 0<&5 5<&- fi test yes = "$lalib_p" } # func_ltwrapper_script_p file # True iff FILE is a libtool wrapper script # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_ltwrapper_script_p () { test -f "$1" && $lt_truncate_bin < "$1" 2>/dev/null | func_generated_by_libtool_p } # func_ltwrapper_executable_p file # True iff FILE is a libtool wrapper executable # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_ltwrapper_executable_p () { func_ltwrapper_exec_suffix= case $1 in *.exe) ;; *) func_ltwrapper_exec_suffix=.exe ;; esac $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1 } # func_ltwrapper_scriptname file # Assumes file is an ltwrapper_executable # uses $file to determine the appropriate filename for a # temporary ltwrapper_script. func_ltwrapper_scriptname () { func_dirname_and_basename "$1" "" "." func_stripname '' '.exe' "$func_basename_result" func_ltwrapper_scriptname_result=$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper } # func_ltwrapper_p file # True iff FILE is a libtool wrapper script or wrapper executable # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_ltwrapper_p () { func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1" } # func_execute_cmds commands fail_cmd # Execute tilde-delimited COMMANDS. # If FAIL_CMD is given, eval that upon failure. # FAIL_CMD may read-access the current command in variable CMD! func_execute_cmds () { $debug_cmd save_ifs=$IFS; IFS='~' for cmd in $1; do IFS=$sp$nl eval cmd=\"$cmd\" IFS=$save_ifs func_show_eval "$cmd" "${2-:}" done IFS=$save_ifs } # func_source file # Source FILE, adding directory component if necessary. # Note that it is not necessary on cygwin/mingw to append a dot to # FILE even if both FILE and FILE.exe exist: automatic-append-.exe # behavior happens only for exec(3), not for open(2)! Also, sourcing # 'FILE.' does not work on cygwin managed mounts. func_source () { $debug_cmd case $1 in */* | *\\*) . "$1" ;; *) . "./$1" ;; esac } # func_resolve_sysroot PATH # Replace a leading = in PATH with a sysroot. Store the result into # func_resolve_sysroot_result func_resolve_sysroot () { func_resolve_sysroot_result=$1 case $func_resolve_sysroot_result in =*) func_stripname '=' '' "$func_resolve_sysroot_result" func_resolve_sysroot_result=$lt_sysroot$func_stripname_result ;; esac } # func_replace_sysroot PATH # If PATH begins with the sysroot, replace it with = and # store the result into func_replace_sysroot_result. func_replace_sysroot () { case $lt_sysroot:$1 in ?*:"$lt_sysroot"*) func_stripname "$lt_sysroot" '' "$1" func_replace_sysroot_result='='$func_stripname_result ;; *) # Including no sysroot. func_replace_sysroot_result=$1 ;; esac } # func_infer_tag arg # Infer tagged configuration to use if any are available and # if one wasn't chosen via the "--tag" command line option. # Only attempt this if the compiler in the base compile # command doesn't match the default compiler. # arg is usually of the form 'gcc ...' func_infer_tag () { $debug_cmd if test -n "$available_tags" && test -z "$tagname"; then CC_quoted= for arg in $CC; do func_append_quoted CC_quoted "$arg" done CC_expanded=`func_echo_all $CC` CC_quoted_expanded=`func_echo_all $CC_quoted` case $@ in # Blanks in the command may have been stripped by the calling shell, # but not from the CC environment variable when configure was run. " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) ;; # Blanks at the start of $base_compile will cause this to fail # if we don't check for them as well. *) for z in $available_tags; do if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then # Evaluate the configuration. eval "`$SED -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" CC_quoted= for arg in $CC; do # Double-quote args containing other shell metacharacters. func_append_quoted CC_quoted "$arg" done CC_expanded=`func_echo_all $CC` CC_quoted_expanded=`func_echo_all $CC_quoted` case "$@ " in " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) # The compiler in the base compile command matches # the one in the tagged configuration. # Assume this is the tagged configuration we want. tagname=$z break ;; esac fi done # If $tagname still isn't set, then no tagged configuration # was found and let the user know that the "--tag" command # line option must be used. if test -z "$tagname"; then func_echo "unable to infer tagged configuration" func_fatal_error "specify a tag with '--tag'" # else # func_verbose "using $tagname tagged configuration" fi ;; esac fi } # func_write_libtool_object output_name pic_name nonpic_name # Create a libtool object file (analogous to a ".la" file), # but don't create it if we're doing a dry run. func_write_libtool_object () { write_libobj=$1 if test yes = "$build_libtool_libs"; then write_lobj=\'$2\' else write_lobj=none fi if test yes = "$build_old_libs"; then write_oldobj=\'$3\' else write_oldobj=none fi $opt_dry_run || { cat >${write_libobj}T </dev/null` if test "$?" -eq 0 && test -n "$func_convert_core_file_wine_to_w32_tmp"; then func_convert_core_file_wine_to_w32_result=`$ECHO "$func_convert_core_file_wine_to_w32_tmp" | $SED -e "$sed_naive_backslashify"` else func_convert_core_file_wine_to_w32_result= fi fi } # end: func_convert_core_file_wine_to_w32 # func_convert_core_path_wine_to_w32 ARG # Helper function used by path conversion functions when $build is *nix, and # $host is mingw, cygwin, or some other w32 environment. Relies on a correctly # configured wine environment available, with the winepath program in $build's # $PATH. Assumes ARG has no leading or trailing path separator characters. # # ARG is path to be converted from $build format to win32. # Result is available in $func_convert_core_path_wine_to_w32_result. # Unconvertible file (directory) names in ARG are skipped; if no directory names # are convertible, then the result may be empty. func_convert_core_path_wine_to_w32 () { $debug_cmd # unfortunately, winepath doesn't convert paths, only file names func_convert_core_path_wine_to_w32_result= if test -n "$1"; then oldIFS=$IFS IFS=: for func_convert_core_path_wine_to_w32_f in $1; do IFS=$oldIFS func_convert_core_file_wine_to_w32 "$func_convert_core_path_wine_to_w32_f" if test -n "$func_convert_core_file_wine_to_w32_result"; then if test -z "$func_convert_core_path_wine_to_w32_result"; then func_convert_core_path_wine_to_w32_result=$func_convert_core_file_wine_to_w32_result else func_append func_convert_core_path_wine_to_w32_result ";$func_convert_core_file_wine_to_w32_result" fi fi done IFS=$oldIFS fi } # end: func_convert_core_path_wine_to_w32 # func_cygpath ARGS... # Wrapper around calling the cygpath program via LT_CYGPATH. This is used when # when (1) $build is *nix and Cygwin is hosted via a wine environment; or (2) # $build is MSYS and $host is Cygwin, or (3) $build is Cygwin. In case (1) or # (2), returns the Cygwin file name or path in func_cygpath_result (input # file name or path is assumed to be in w32 format, as previously converted # from $build's *nix or MSYS format). In case (3), returns the w32 file name # or path in func_cygpath_result (input file name or path is assumed to be in # Cygwin format). Returns an empty string on error. # # ARGS are passed to cygpath, with the last one being the file name or path to # be converted. # # Specify the absolute *nix (or w32) name to cygpath in the LT_CYGPATH # environment variable; do not put it in $PATH. func_cygpath () { $debug_cmd if test -n "$LT_CYGPATH" && test -f "$LT_CYGPATH"; then func_cygpath_result=`$LT_CYGPATH "$@" 2>/dev/null` if test "$?" -ne 0; then # on failure, ensure result is empty func_cygpath_result= fi else func_cygpath_result= func_error "LT_CYGPATH is empty or specifies non-existent file: '$LT_CYGPATH'" fi } #end: func_cygpath # func_convert_core_msys_to_w32 ARG # Convert file name or path ARG from MSYS format to w32 format. Return # result in func_convert_core_msys_to_w32_result. func_convert_core_msys_to_w32 () { $debug_cmd # awkward: cmd appends spaces to result func_convert_core_msys_to_w32_result=`( cmd //c echo "$1" ) 2>/dev/null | $SED -e 's/[ ]*$//' -e "$sed_naive_backslashify"` } #end: func_convert_core_msys_to_w32 # func_convert_file_check ARG1 ARG2 # Verify that ARG1 (a file name in $build format) was converted to $host # format in ARG2. Otherwise, emit an error message, but continue (resetting # func_to_host_file_result to ARG1). func_convert_file_check () { $debug_cmd if test -z "$2" && test -n "$1"; then func_error "Could not determine host file name corresponding to" func_error " '$1'" func_error "Continuing, but uninstalled executables may not work." # Fallback: func_to_host_file_result=$1 fi } # end func_convert_file_check # func_convert_path_check FROM_PATHSEP TO_PATHSEP FROM_PATH TO_PATH # Verify that FROM_PATH (a path in $build format) was converted to $host # format in TO_PATH. Otherwise, emit an error message, but continue, resetting # func_to_host_file_result to a simplistic fallback value (see below). func_convert_path_check () { $debug_cmd if test -z "$4" && test -n "$3"; then func_error "Could not determine the host path corresponding to" func_error " '$3'" func_error "Continuing, but uninstalled executables may not work." # Fallback. This is a deliberately simplistic "conversion" and # should not be "improved". See libtool.info. if test "x$1" != "x$2"; then lt_replace_pathsep_chars="s|$1|$2|g" func_to_host_path_result=`echo "$3" | $SED -e "$lt_replace_pathsep_chars"` else func_to_host_path_result=$3 fi fi } # end func_convert_path_check # func_convert_path_front_back_pathsep FRONTPAT BACKPAT REPL ORIG # Modifies func_to_host_path_result by prepending REPL if ORIG matches FRONTPAT # and appending REPL if ORIG matches BACKPAT. func_convert_path_front_back_pathsep () { $debug_cmd case $4 in $1 ) func_to_host_path_result=$3$func_to_host_path_result ;; esac case $4 in $2 ) func_append func_to_host_path_result "$3" ;; esac } # end func_convert_path_front_back_pathsep ################################################## # $build to $host FILE NAME CONVERSION FUNCTIONS # ################################################## # invoked via '$to_host_file_cmd ARG' # # In each case, ARG is the path to be converted from $build to $host format. # Result will be available in $func_to_host_file_result. # func_to_host_file ARG # Converts the file name ARG from $build format to $host format. Return result # in func_to_host_file_result. func_to_host_file () { $debug_cmd $to_host_file_cmd "$1" } # end func_to_host_file # func_to_tool_file ARG LAZY # converts the file name ARG from $build format to toolchain format. Return # result in func_to_tool_file_result. If the conversion in use is listed # in (the comma separated) LAZY, no conversion takes place. func_to_tool_file () { $debug_cmd case ,$2, in *,"$to_tool_file_cmd",*) func_to_tool_file_result=$1 ;; *) $to_tool_file_cmd "$1" func_to_tool_file_result=$func_to_host_file_result ;; esac } # end func_to_tool_file # func_convert_file_noop ARG # Copy ARG to func_to_host_file_result. func_convert_file_noop () { func_to_host_file_result=$1 } # end func_convert_file_noop # func_convert_file_msys_to_w32 ARG # Convert file name ARG from (mingw) MSYS to (mingw) w32 format; automatic # conversion to w32 is not available inside the cwrapper. Returns result in # func_to_host_file_result. func_convert_file_msys_to_w32 () { $debug_cmd func_to_host_file_result=$1 if test -n "$1"; then func_convert_core_msys_to_w32 "$1" func_to_host_file_result=$func_convert_core_msys_to_w32_result fi func_convert_file_check "$1" "$func_to_host_file_result" } # end func_convert_file_msys_to_w32 # func_convert_file_cygwin_to_w32 ARG # Convert file name ARG from Cygwin to w32 format. Returns result in # func_to_host_file_result. func_convert_file_cygwin_to_w32 () { $debug_cmd func_to_host_file_result=$1 if test -n "$1"; then # because $build is cygwin, we call "the" cygpath in $PATH; no need to use # LT_CYGPATH in this case. func_to_host_file_result=`cygpath -m "$1"` fi func_convert_file_check "$1" "$func_to_host_file_result" } # end func_convert_file_cygwin_to_w32 # func_convert_file_nix_to_w32 ARG # Convert file name ARG from *nix to w32 format. Requires a wine environment # and a working winepath. Returns result in func_to_host_file_result. func_convert_file_nix_to_w32 () { $debug_cmd func_to_host_file_result=$1 if test -n "$1"; then func_convert_core_file_wine_to_w32 "$1" func_to_host_file_result=$func_convert_core_file_wine_to_w32_result fi func_convert_file_check "$1" "$func_to_host_file_result" } # end func_convert_file_nix_to_w32 # func_convert_file_msys_to_cygwin ARG # Convert file name ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. # Returns result in func_to_host_file_result. func_convert_file_msys_to_cygwin () { $debug_cmd func_to_host_file_result=$1 if test -n "$1"; then func_convert_core_msys_to_w32 "$1" func_cygpath -u "$func_convert_core_msys_to_w32_result" func_to_host_file_result=$func_cygpath_result fi func_convert_file_check "$1" "$func_to_host_file_result" } # end func_convert_file_msys_to_cygwin # func_convert_file_nix_to_cygwin ARG # Convert file name ARG from *nix to Cygwin format. Requires Cygwin installed # in a wine environment, working winepath, and LT_CYGPATH set. Returns result # in func_to_host_file_result. func_convert_file_nix_to_cygwin () { $debug_cmd func_to_host_file_result=$1 if test -n "$1"; then # convert from *nix to w32, then use cygpath to convert from w32 to cygwin. func_convert_core_file_wine_to_w32 "$1" func_cygpath -u "$func_convert_core_file_wine_to_w32_result" func_to_host_file_result=$func_cygpath_result fi func_convert_file_check "$1" "$func_to_host_file_result" } # end func_convert_file_nix_to_cygwin ############################################# # $build to $host PATH CONVERSION FUNCTIONS # ############################################# # invoked via '$to_host_path_cmd ARG' # # In each case, ARG is the path to be converted from $build to $host format. # The result will be available in $func_to_host_path_result. # # Path separators are also converted from $build format to $host format. If # ARG begins or ends with a path separator character, it is preserved (but # converted to $host format) on output. # # All path conversion functions are named using the following convention: # file name conversion function : func_convert_file_X_to_Y () # path conversion function : func_convert_path_X_to_Y () # where, for any given $build/$host combination the 'X_to_Y' value is the # same. If conversion functions are added for new $build/$host combinations, # the two new functions must follow this pattern, or func_init_to_host_path_cmd # will break. # func_init_to_host_path_cmd # Ensures that function "pointer" variable $to_host_path_cmd is set to the # appropriate value, based on the value of $to_host_file_cmd. to_host_path_cmd= func_init_to_host_path_cmd () { $debug_cmd if test -z "$to_host_path_cmd"; then func_stripname 'func_convert_file_' '' "$to_host_file_cmd" to_host_path_cmd=func_convert_path_$func_stripname_result fi } # func_to_host_path ARG # Converts the path ARG from $build format to $host format. Return result # in func_to_host_path_result. func_to_host_path () { $debug_cmd func_init_to_host_path_cmd $to_host_path_cmd "$1" } # end func_to_host_path # func_convert_path_noop ARG # Copy ARG to func_to_host_path_result. func_convert_path_noop () { func_to_host_path_result=$1 } # end func_convert_path_noop # func_convert_path_msys_to_w32 ARG # Convert path ARG from (mingw) MSYS to (mingw) w32 format; automatic # conversion to w32 is not available inside the cwrapper. Returns result in # func_to_host_path_result. func_convert_path_msys_to_w32 () { $debug_cmd func_to_host_path_result=$1 if test -n "$1"; then # Remove leading and trailing path separator characters from ARG. MSYS # behavior is inconsistent here; cygpath turns them into '.;' and ';.'; # and winepath ignores them completely. func_stripname : : "$1" func_to_host_path_tmp1=$func_stripname_result func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" func_to_host_path_result=$func_convert_core_msys_to_w32_result func_convert_path_check : ";" \ "$func_to_host_path_tmp1" "$func_to_host_path_result" func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" fi } # end func_convert_path_msys_to_w32 # func_convert_path_cygwin_to_w32 ARG # Convert path ARG from Cygwin to w32 format. Returns result in # func_to_host_file_result. func_convert_path_cygwin_to_w32 () { $debug_cmd func_to_host_path_result=$1 if test -n "$1"; then # See func_convert_path_msys_to_w32: func_stripname : : "$1" func_to_host_path_tmp1=$func_stripname_result func_to_host_path_result=`cygpath -m -p "$func_to_host_path_tmp1"` func_convert_path_check : ";" \ "$func_to_host_path_tmp1" "$func_to_host_path_result" func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" fi } # end func_convert_path_cygwin_to_w32 # func_convert_path_nix_to_w32 ARG # Convert path ARG from *nix to w32 format. Requires a wine environment and # a working winepath. Returns result in func_to_host_file_result. func_convert_path_nix_to_w32 () { $debug_cmd func_to_host_path_result=$1 if test -n "$1"; then # See func_convert_path_msys_to_w32: func_stripname : : "$1" func_to_host_path_tmp1=$func_stripname_result func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" func_to_host_path_result=$func_convert_core_path_wine_to_w32_result func_convert_path_check : ";" \ "$func_to_host_path_tmp1" "$func_to_host_path_result" func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" fi } # end func_convert_path_nix_to_w32 # func_convert_path_msys_to_cygwin ARG # Convert path ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. # Returns result in func_to_host_file_result. func_convert_path_msys_to_cygwin () { $debug_cmd func_to_host_path_result=$1 if test -n "$1"; then # See func_convert_path_msys_to_w32: func_stripname : : "$1" func_to_host_path_tmp1=$func_stripname_result func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" func_cygpath -u -p "$func_convert_core_msys_to_w32_result" func_to_host_path_result=$func_cygpath_result func_convert_path_check : : \ "$func_to_host_path_tmp1" "$func_to_host_path_result" func_convert_path_front_back_pathsep ":*" "*:" : "$1" fi } # end func_convert_path_msys_to_cygwin # func_convert_path_nix_to_cygwin ARG # Convert path ARG from *nix to Cygwin format. Requires Cygwin installed in a # a wine environment, working winepath, and LT_CYGPATH set. Returns result in # func_to_host_file_result. func_convert_path_nix_to_cygwin () { $debug_cmd func_to_host_path_result=$1 if test -n "$1"; then # Remove leading and trailing path separator characters from # ARG. msys behavior is inconsistent here, cygpath turns them # into '.;' and ';.', and winepath ignores them completely. func_stripname : : "$1" func_to_host_path_tmp1=$func_stripname_result func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" func_cygpath -u -p "$func_convert_core_path_wine_to_w32_result" func_to_host_path_result=$func_cygpath_result func_convert_path_check : : \ "$func_to_host_path_tmp1" "$func_to_host_path_result" func_convert_path_front_back_pathsep ":*" "*:" : "$1" fi } # end func_convert_path_nix_to_cygwin # func_dll_def_p FILE # True iff FILE is a Windows DLL '.def' file. # Keep in sync with _LT_DLL_DEF_P in libtool.m4 func_dll_def_p () { $debug_cmd func_dll_def_p_tmp=`$SED -n \ -e 's/^[ ]*//' \ -e '/^\(;.*\)*$/d' \ -e 's/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p' \ -e q \ "$1"` test DEF = "$func_dll_def_p_tmp" } # func_mode_compile arg... func_mode_compile () { $debug_cmd # Get the compilation command and the source file. base_compile= srcfile=$nonopt # always keep a non-empty value in "srcfile" suppress_opt=yes suppress_output= arg_mode=normal libobj= later= pie_flag= for arg do case $arg_mode in arg ) # do not "continue". Instead, add this to base_compile lastarg=$arg arg_mode=normal ;; target ) libobj=$arg arg_mode=normal continue ;; normal ) # Accept any command-line options. case $arg in -o) test -n "$libobj" && \ func_fatal_error "you cannot specify '-o' more than once" arg_mode=target continue ;; -pie | -fpie | -fPIE) func_append pie_flag " $arg" continue ;; -shared | -static | -prefer-pic | -prefer-non-pic) func_append later " $arg" continue ;; -no-suppress) suppress_opt=no continue ;; -Xcompiler) arg_mode=arg # the next one goes into the "base_compile" arg list continue # The current "srcfile" will either be retained or ;; # replaced later. I would guess that would be a bug. -Wc,*) func_stripname '-Wc,' '' "$arg" args=$func_stripname_result lastarg= save_ifs=$IFS; IFS=, for arg in $args; do IFS=$save_ifs func_append_quoted lastarg "$arg" done IFS=$save_ifs func_stripname ' ' '' "$lastarg" lastarg=$func_stripname_result # Add the arguments to base_compile. func_append base_compile " $lastarg" continue ;; *) # Accept the current argument as the source file. # The previous "srcfile" becomes the current argument. # lastarg=$srcfile srcfile=$arg ;; esac # case $arg ;; esac # case $arg_mode # Aesthetically quote the previous argument. func_append_quoted base_compile "$lastarg" done # for arg case $arg_mode in arg) func_fatal_error "you must specify an argument for -Xcompile" ;; target) func_fatal_error "you must specify a target with '-o'" ;; *) # Get the name of the library object. test -z "$libobj" && { func_basename "$srcfile" libobj=$func_basename_result } ;; esac # Recognize several different file suffixes. # If the user specifies -o file.o, it is replaced with file.lo case $libobj in *.[cCFSifmso] | \ *.ada | *.adb | *.ads | *.asm | \ *.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \ *.[fF][09]? | *.for | *.java | *.go | *.obj | *.sx | *.cu | *.cup) func_xform "$libobj" libobj=$func_xform_result ;; esac case $libobj in *.lo) func_lo2o "$libobj"; obj=$func_lo2o_result ;; *) func_fatal_error "cannot determine name of library object from '$libobj'" ;; esac func_infer_tag $base_compile for arg in $later; do case $arg in -shared) test yes = "$build_libtool_libs" \ || func_fatal_configuration "cannot build a shared library" build_old_libs=no continue ;; -static) build_libtool_libs=no build_old_libs=yes continue ;; -prefer-pic) pic_mode=yes continue ;; -prefer-non-pic) pic_mode=no continue ;; esac done func_quote_for_eval "$libobj" test "X$libobj" != "X$func_quote_for_eval_result" \ && $ECHO "X$libobj" | $GREP '[]~#^*{};<>?"'"'"' &()|`$[]' \ && func_warning "libobj name '$libobj' may not contain shell special characters." func_dirname_and_basename "$obj" "/" "" objname=$func_basename_result xdir=$func_dirname_result lobj=$xdir$objdir/$objname test -z "$base_compile" && \ func_fatal_help "you must specify a compilation command" # Delete any leftover library objects. if test yes = "$build_old_libs"; then removelist="$obj $lobj $libobj ${libobj}T" else removelist="$lobj $libobj ${libobj}T" fi # On Cygwin there's no "real" PIC flag so we must build both object types case $host_os in cygwin* | mingw* | pw32* | os2* | cegcc*) pic_mode=default ;; esac if test no = "$pic_mode" && test pass_all != "$deplibs_check_method"; then # non-PIC code in shared libraries is not supported pic_mode=default fi # Calculate the filename of the output object if compiler does # not support -o with -c if test no = "$compiler_c_o"; then output_obj=`$ECHO "$srcfile" | $SED 's%^.*/%%; s%\.[^.]*$%%'`.$objext lockfile=$output_obj.lock else output_obj= need_locks=no lockfile= fi # Lock this critical section if it is needed # We use this script file to make the link, it avoids creating a new file if test yes = "$need_locks"; then until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do func_echo "Waiting for $lockfile to be removed" sleep 2 done elif test warn = "$need_locks"; then if test -f "$lockfile"; then $ECHO "\ *** ERROR, $lockfile exists and contains: `cat $lockfile 2>/dev/null` This indicates that another process is trying to use the same temporary object file, and libtool could not work around it because your compiler does not support '-c' and '-o' together. If you repeat this compilation, it may succeed, by chance, but you had better avoid parallel builds (make -j) in this platform, or get a better compiler." $opt_dry_run || $RM $removelist exit $EXIT_FAILURE fi func_append removelist " $output_obj" $ECHO "$srcfile" > "$lockfile" fi $opt_dry_run || $RM $removelist func_append removelist " $lockfile" trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15 func_to_tool_file "$srcfile" func_convert_file_msys_to_w32 srcfile=$func_to_tool_file_result func_quote_for_eval "$srcfile" qsrcfile=$func_quote_for_eval_result # Only build a PIC object if we are building libtool libraries. if test yes = "$build_libtool_libs"; then # Without this assignment, base_compile gets emptied. fbsd_hideous_sh_bug=$base_compile if test no != "$pic_mode"; then command="$base_compile $qsrcfile $pic_flag" else # Don't build PIC code command="$base_compile $qsrcfile" fi func_mkdir_p "$xdir$objdir" if test -z "$output_obj"; then # Place PIC objects in $objdir func_append command " -o $lobj" fi func_show_eval_locale "$command" \ 'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE' if test warn = "$need_locks" && test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then $ECHO "\ *** ERROR, $lockfile contains: `cat $lockfile 2>/dev/null` but it should contain: $srcfile This indicates that another process is trying to use the same temporary object file, and libtool could not work around it because your compiler does not support '-c' and '-o' together. If you repeat this compilation, it may succeed, by chance, but you had better avoid parallel builds (make -j) in this platform, or get a better compiler." $opt_dry_run || $RM $removelist exit $EXIT_FAILURE fi # Just move the object if needed, then go on to compile the next one if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then func_show_eval '$MV "$output_obj" "$lobj"' \ 'error=$?; $opt_dry_run || $RM $removelist; exit $error' fi # Allow error messages only from the first compilation. if test yes = "$suppress_opt"; then suppress_output=' >/dev/null 2>&1' fi fi # Only build a position-dependent object if we build old libraries. if test yes = "$build_old_libs"; then if test yes != "$pic_mode"; then # Don't build PIC code command="$base_compile $qsrcfile$pie_flag" else command="$base_compile $qsrcfile $pic_flag" fi if test yes = "$compiler_c_o"; then func_append command " -o $obj" fi # Suppress compiler output if we already did a PIC compilation. func_append command "$suppress_output" func_show_eval_locale "$command" \ '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' if test warn = "$need_locks" && test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then $ECHO "\ *** ERROR, $lockfile contains: `cat $lockfile 2>/dev/null` but it should contain: $srcfile This indicates that another process is trying to use the same temporary object file, and libtool could not work around it because your compiler does not support '-c' and '-o' together. If you repeat this compilation, it may succeed, by chance, but you had better avoid parallel builds (make -j) in this platform, or get a better compiler." $opt_dry_run || $RM $removelist exit $EXIT_FAILURE fi # Just move the object if needed if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then func_show_eval '$MV "$output_obj" "$obj"' \ 'error=$?; $opt_dry_run || $RM $removelist; exit $error' fi fi $opt_dry_run || { func_write_libtool_object "$libobj" "$objdir/$objname" "$objname" # Unlock the critical section if it was locked if test no != "$need_locks"; then removelist=$lockfile $RM "$lockfile" fi } exit $EXIT_SUCCESS } $opt_help || { test compile = "$opt_mode" && func_mode_compile ${1+"$@"} } func_mode_help () { # We need to display help for each of the modes. case $opt_mode in "") # Generic help is extracted from the usage comments # at the start of this file. func_help ;; clean) $ECHO \ "Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE... Remove files from the build directory. RM is the name of the program to use to delete files associated with each FILE (typically '/bin/rm'). RM-OPTIONS are options (such as '-f') to be passed to RM. If FILE is a libtool library, object or program, all the files associated with it are deleted. Otherwise, only FILE itself is deleted using RM." ;; compile) $ECHO \ "Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE Compile a source file into a libtool library object. This mode accepts the following additional options: -o OUTPUT-FILE set the output file name to OUTPUT-FILE -no-suppress do not suppress compiler output for multiple passes -prefer-pic try to build PIC objects only -prefer-non-pic try to build non-PIC objects only -shared do not build a '.o' file suitable for static linking -static only build a '.o' file suitable for static linking -Wc,FLAG pass FLAG directly to the compiler COMPILE-COMMAND is a command to be used in creating a 'standard' object file from the given SOURCEFILE. The output file name is determined by removing the directory component from SOURCEFILE, then substituting the C source code suffix '.c' with the library object suffix, '.lo'." ;; execute) $ECHO \ "Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]... Automatically set library path, then run a program. This mode accepts the following additional options: -dlopen FILE add the directory containing FILE to the library path This mode sets the library path environment variable according to '-dlopen' flags. If any of the ARGS are libtool executable wrappers, then they are translated into their corresponding uninstalled binary, and any of their required library directories are added to the library path. Then, COMMAND is executed, with ARGS as arguments." ;; finish) $ECHO \ "Usage: $progname [OPTION]... --mode=finish [LIBDIR]... Complete the installation of libtool libraries. Each LIBDIR is a directory that contains libtool libraries. The commands that this mode executes may require superuser privileges. Use the '--dry-run' option if you just want to see what would be executed." ;; install) $ECHO \ "Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND... Install executables or libraries. INSTALL-COMMAND is the installation command. The first component should be either the 'install' or 'cp' program. The following components of INSTALL-COMMAND are treated specially: -inst-prefix-dir PREFIX-DIR Use PREFIX-DIR as a staging area for installation The rest of the components are interpreted as arguments to that command (only BSD-compatible install options are recognized)." ;; link) $ECHO \ "Usage: $progname [OPTION]... --mode=link LINK-COMMAND... Link object files or libraries together to form another library, or to create an executable program. LINK-COMMAND is a command using the C compiler that you would use to create a program from several object files. The following components of LINK-COMMAND are treated specially: -all-static do not do any dynamic linking at all -avoid-version do not add a version suffix if possible -bindir BINDIR specify path to binaries directory (for systems where libraries must be found in the PATH setting at runtime) -dlopen FILE '-dlpreopen' FILE if it cannot be dlopened at runtime -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) -export-symbols SYMFILE try to export only the symbols listed in SYMFILE -export-symbols-regex REGEX try to export only the symbols matching REGEX -LLIBDIR search LIBDIR for required installed libraries -lNAME OUTPUT-FILE requires the installed library libNAME -module build a library that can dlopened -no-fast-install disable the fast-install mode -no-install link a not-installable executable -no-undefined declare that a library does not refer to external symbols -o OUTPUT-FILE create OUTPUT-FILE from the specified objects -objectlist FILE use a list of object files found in FILE to specify objects -os2dllname NAME force a short DLL name on OS/2 (no effect on other OSes) -precious-files-regex REGEX don't remove output files matching REGEX -release RELEASE specify package release information -rpath LIBDIR the created library will eventually be installed in LIBDIR -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries -shared only do dynamic linking of libtool libraries -shrext SUFFIX override the standard shared library file extension -static do not do any dynamic linking of uninstalled libtool libraries -static-libtool-libs do not do any dynamic linking of libtool libraries -version-info CURRENT[:REVISION[:AGE]] specify library version info [each variable defaults to 0] -weak LIBNAME declare that the target provides the LIBNAME interface -Wc,FLAG -Xcompiler FLAG pass linker-specific FLAG directly to the compiler -Wl,FLAG -Xlinker FLAG pass linker-specific FLAG directly to the linker -XCClinker FLAG pass link-specific FLAG to the compiler driver (CC) All other options (arguments beginning with '-') are ignored. Every other argument is treated as a filename. Files ending in '.la' are treated as uninstalled libtool libraries, other files are standard or library object files. If the OUTPUT-FILE ends in '.la', then a libtool library is created, only library objects ('.lo' files) may be specified, and '-rpath' is required, except when creating a convenience library. If OUTPUT-FILE ends in '.a' or '.lib', then a standard library is created using 'ar' and 'ranlib', or on Windows using 'lib'. If OUTPUT-FILE ends in '.lo' or '.$objext', then a reloadable object file is created, otherwise an executable program is created." ;; uninstall) $ECHO \ "Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... Remove libraries from an installation directory. RM is the name of the program to use to delete files associated with each FILE (typically '/bin/rm'). RM-OPTIONS are options (such as '-f') to be passed to RM. If FILE is a libtool library, all the files associated with it are deleted. Otherwise, only FILE itself is deleted using RM." ;; *) func_fatal_help "invalid operation mode '$opt_mode'" ;; esac echo $ECHO "Try '$progname --help' for more information about other modes." } # Now that we've collected a possible --mode arg, show help if necessary if $opt_help; then if test : = "$opt_help"; then func_mode_help else { func_help noexit for opt_mode in compile link execute install finish uninstall clean; do func_mode_help done } | $SED -n '1p; 2,$s/^Usage:/ or: /p' { func_help noexit for opt_mode in compile link execute install finish uninstall clean; do echo func_mode_help done } | $SED '1d /^When reporting/,/^Report/{ H d } $x /information about other modes/d /more detailed .*MODE/d s/^Usage:.*--mode=\([^ ]*\) .*/Description of \1 mode:/' fi exit $? fi # func_mode_execute arg... func_mode_execute () { $debug_cmd # The first argument is the command name. cmd=$nonopt test -z "$cmd" && \ func_fatal_help "you must specify a COMMAND" # Handle -dlopen flags immediately. for file in $opt_dlopen; do test -f "$file" \ || func_fatal_help "'$file' is not a file" dir= case $file in *.la) func_resolve_sysroot "$file" file=$func_resolve_sysroot_result # Check to see that this really is a libtool archive. func_lalib_unsafe_p "$file" \ || func_fatal_help "'$lib' is not a valid libtool archive" # Read the libtool library. dlname= library_names= func_source "$file" # Skip this library if it cannot be dlopened. if test -z "$dlname"; then # Warn if it was a shared library. test -n "$library_names" && \ func_warning "'$file' was not linked with '-export-dynamic'" continue fi func_dirname "$file" "" "." dir=$func_dirname_result if test -f "$dir/$objdir/$dlname"; then func_append dir "/$objdir" else if test ! -f "$dir/$dlname"; then func_fatal_error "cannot find '$dlname' in '$dir' or '$dir/$objdir'" fi fi ;; *.lo) # Just add the directory containing the .lo file. func_dirname "$file" "" "." dir=$func_dirname_result ;; *) func_warning "'-dlopen' is ignored for non-libtool libraries and objects" continue ;; esac # Get the absolute pathname. absdir=`cd "$dir" && pwd` test -n "$absdir" && dir=$absdir # Now add the directory to shlibpath_var. if eval "test -z \"\$$shlibpath_var\""; then eval "$shlibpath_var=\"\$dir\"" else eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" fi done # This variable tells wrapper scripts just to set shlibpath_var # rather than running their programs. libtool_execute_magic=$magic # Check if any of the arguments is a wrapper script. args= for file do case $file in -* | *.la | *.lo ) ;; *) # Do a test to see if this is really a libtool program. if func_ltwrapper_script_p "$file"; then func_source "$file" # Transform arg to wrapped name. file=$progdir/$program elif func_ltwrapper_executable_p "$file"; then func_ltwrapper_scriptname "$file" func_source "$func_ltwrapper_scriptname_result" # Transform arg to wrapped name. file=$progdir/$program fi ;; esac # Quote arguments (to preserve shell metacharacters). func_append_quoted args "$file" done if $opt_dry_run; then # Display what would be done. if test -n "$shlibpath_var"; then eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\"" echo "export $shlibpath_var" fi $ECHO "$cmd$args" exit $EXIT_SUCCESS else if test -n "$shlibpath_var"; then # Export the shlibpath_var. eval "export $shlibpath_var" fi # Restore saved environment variables for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES do eval "if test \"\${save_$lt_var+set}\" = set; then $lt_var=\$save_$lt_var; export $lt_var else $lt_unset $lt_var fi" done # Now prepare to actually exec the command. exec_cmd=\$cmd$args fi } test execute = "$opt_mode" && func_mode_execute ${1+"$@"} # func_mode_finish arg... func_mode_finish () { $debug_cmd libs= libdirs= admincmds= for opt in "$nonopt" ${1+"$@"} do if test -d "$opt"; then func_append libdirs " $opt" elif test -f "$opt"; then if func_lalib_unsafe_p "$opt"; then func_append libs " $opt" else func_warning "'$opt' is not a valid libtool archive" fi else func_fatal_error "invalid argument '$opt'" fi done if test -n "$libs"; then if test -n "$lt_sysroot"; then sysroot_regex=`$ECHO "$lt_sysroot" | $SED "$sed_make_literal_regex"` sysroot_cmd="s/\([ ']\)$sysroot_regex/\1/g;" else sysroot_cmd= fi # Remove sysroot references if $opt_dry_run; then for lib in $libs; do echo "removing references to $lt_sysroot and '=' prefixes from $lib" done else tmpdir=`func_mktempdir` for lib in $libs; do $SED -e "$sysroot_cmd s/\([ ']-[LR]\)=/\1/g; s/\([ ']\)=/\1/g" $lib \ > $tmpdir/tmp-la mv -f $tmpdir/tmp-la $lib done ${RM}r "$tmpdir" fi fi if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then for libdir in $libdirs; do if test -n "$finish_cmds"; then # Do each command in the finish commands. func_execute_cmds "$finish_cmds" 'admincmds="$admincmds '"$cmd"'"' fi if test -n "$finish_eval"; then # Do the single finish_eval. eval cmds=\"$finish_eval\" $opt_dry_run || eval "$cmds" || func_append admincmds " $cmds" fi done fi # Exit here if they wanted silent mode. $opt_quiet && exit $EXIT_SUCCESS if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then echo "----------------------------------------------------------------------" echo "Libraries have been installed in:" for libdir in $libdirs; do $ECHO " $libdir" done echo echo "If you ever happen to want to link against installed libraries" echo "in a given directory, LIBDIR, you must either use libtool, and" echo "specify the full pathname of the library, or use the '-LLIBDIR'" echo "flag during linking and do at least one of the following:" if test -n "$shlibpath_var"; then echo " - add LIBDIR to the '$shlibpath_var' environment variable" echo " during execution" fi if test -n "$runpath_var"; then echo " - add LIBDIR to the '$runpath_var' environment variable" echo " during linking" fi if test -n "$hardcode_libdir_flag_spec"; then libdir=LIBDIR eval flag=\"$hardcode_libdir_flag_spec\" $ECHO " - use the '$flag' linker flag" fi if test -n "$admincmds"; then $ECHO " - have your system administrator run these commands:$admincmds" fi if test -f /etc/ld.so.conf; then echo " - have your system administrator add LIBDIR to '/etc/ld.so.conf'" fi echo echo "See any operating system documentation about shared libraries for" case $host in solaris2.[6789]|solaris2.1[0-9]) echo "more information, such as the ld(1), crle(1) and ld.so(8) manual" echo "pages." ;; *) echo "more information, such as the ld(1) and ld.so(8) manual pages." ;; esac echo "----------------------------------------------------------------------" fi exit $EXIT_SUCCESS } test finish = "$opt_mode" && func_mode_finish ${1+"$@"} # func_mode_install arg... func_mode_install () { $debug_cmd # There may be an optional sh(1) argument at the beginning of # install_prog (especially on Windows NT). if test "$SHELL" = "$nonopt" || test /bin/sh = "$nonopt" || # Allow the use of GNU shtool's install command. case $nonopt in *shtool*) :;; *) false;; esac then # Aesthetically quote it. func_quote_for_eval "$nonopt" install_prog="$func_quote_for_eval_result " arg=$1 shift else install_prog= arg=$nonopt fi # The real first argument should be the name of the installation program. # Aesthetically quote it. func_quote_for_eval "$arg" func_append install_prog "$func_quote_for_eval_result" install_shared_prog=$install_prog case " $install_prog " in *[\\\ /]cp\ *) install_cp=: ;; *) install_cp=false ;; esac # We need to accept at least all the BSD install flags. dest= files= opts= prev= install_type= isdir=false stripme= no_mode=: for arg do arg2= if test -n "$dest"; then func_append files " $dest" dest=$arg continue fi case $arg in -d) isdir=: ;; -f) if $install_cp; then :; else prev=$arg fi ;; -g | -m | -o) prev=$arg ;; -s) stripme=" -s" continue ;; -*) ;; *) # If the previous option needed an argument, then skip it. if test -n "$prev"; then if test X-m = "X$prev" && test -n "$install_override_mode"; then arg2=$install_override_mode no_mode=false fi prev= else dest=$arg continue fi ;; esac # Aesthetically quote the argument. func_quote_for_eval "$arg" func_append install_prog " $func_quote_for_eval_result" if test -n "$arg2"; then func_quote_for_eval "$arg2" fi func_append install_shared_prog " $func_quote_for_eval_result" done test -z "$install_prog" && \ func_fatal_help "you must specify an install program" test -n "$prev" && \ func_fatal_help "the '$prev' option requires an argument" if test -n "$install_override_mode" && $no_mode; then if $install_cp; then :; else func_quote_for_eval "$install_override_mode" func_append install_shared_prog " -m $func_quote_for_eval_result" fi fi if test -z "$files"; then if test -z "$dest"; then func_fatal_help "no file or destination specified" else func_fatal_help "you must specify a destination" fi fi # Strip any trailing slash from the destination. func_stripname '' '/' "$dest" dest=$func_stripname_result # Check to see that the destination is a directory. test -d "$dest" && isdir=: if $isdir; then destdir=$dest destname= else func_dirname_and_basename "$dest" "" "." destdir=$func_dirname_result destname=$func_basename_result # Not a directory, so check to see that there is only one file specified. set dummy $files; shift test "$#" -gt 1 && \ func_fatal_help "'$dest' is not a directory" fi case $destdir in [\\/]* | [A-Za-z]:[\\/]*) ;; *) for file in $files; do case $file in *.lo) ;; *) func_fatal_help "'$destdir' must be an absolute directory name" ;; esac done ;; esac # This variable tells wrapper scripts just to set variables rather # than running their programs. libtool_install_magic=$magic staticlibs= future_libdirs= current_libdirs= for file in $files; do # Do each installation. case $file in *.$libext) # Do the static libraries later. func_append staticlibs " $file" ;; *.la) func_resolve_sysroot "$file" file=$func_resolve_sysroot_result # Check to see that this really is a libtool archive. func_lalib_unsafe_p "$file" \ || func_fatal_help "'$file' is not a valid libtool archive" library_names= old_library= relink_command= func_source "$file" # Add the libdir to current_libdirs if it is the destination. if test "X$destdir" = "X$libdir"; then case "$current_libdirs " in *" $libdir "*) ;; *) func_append current_libdirs " $libdir" ;; esac else # Note the libdir as a future libdir. case "$future_libdirs " in *" $libdir "*) ;; *) func_append future_libdirs " $libdir" ;; esac fi func_dirname "$file" "/" "" dir=$func_dirname_result func_append dir "$objdir" if test -n "$relink_command"; then # Determine the prefix the user has applied to our future dir. inst_prefix_dir=`$ECHO "$destdir" | $SED -e "s%$libdir\$%%"` # Don't allow the user to place us outside of our expected # location b/c this prevents finding dependent libraries that # are installed to the same prefix. # At present, this check doesn't affect windows .dll's that # are installed into $libdir/../bin (currently, that works fine) # but it's something to keep an eye on. test "$inst_prefix_dir" = "$destdir" && \ func_fatal_error "error: cannot install '$file' to a directory not ending in $libdir" if test -n "$inst_prefix_dir"; then # Stick the inst_prefix_dir data into the link command. relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` else relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%%"` fi func_warning "relinking '$file'" func_show_eval "$relink_command" \ 'func_fatal_error "error: relink '\''$file'\'' with the above command before installing it"' fi # See the names of the shared library. set dummy $library_names; shift if test -n "$1"; then realname=$1 shift srcname=$realname test -n "$relink_command" && srcname=${realname}T # Install the shared library and build the symlinks. func_show_eval "$install_shared_prog $dir/$srcname $destdir/$realname" \ 'exit $?' tstripme=$stripme case $host_os in cygwin* | mingw* | pw32* | cegcc*) case $realname in *.dll.a) tstripme= ;; esac ;; os2*) case $realname in *_dll.a) tstripme= ;; esac ;; esac if test -n "$tstripme" && test -n "$striplib"; then func_show_eval "$striplib $destdir/$realname" 'exit $?' fi if test "$#" -gt 0; then # Delete the old symlinks, and create new ones. # Try 'ln -sf' first, because the 'ln' binary might depend on # the symlink we replace! Solaris /bin/ln does not understand -f, # so we also need to try rm && ln -s. for linkname do test "$linkname" != "$realname" \ && func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })" done fi # Do each command in the postinstall commands. lib=$destdir/$realname func_execute_cmds "$postinstall_cmds" 'exit $?' fi # Install the pseudo-library for information purposes. func_basename "$file" name=$func_basename_result instname=$dir/${name}i func_show_eval "$install_prog $instname $destdir/$name" 'exit $?' # Maybe install the static library, too. test -n "$old_library" && func_append staticlibs " $dir/$old_library" ;; *.lo) # Install (i.e. copy) a libtool object. # Figure out destination file name, if it wasn't already specified. if test -n "$destname"; then destfile=$destdir/$destname else func_basename "$file" destfile=$func_basename_result destfile=$destdir/$destfile fi # Deduce the name of the destination old-style object file. case $destfile in *.lo) func_lo2o "$destfile" staticdest=$func_lo2o_result ;; *.$objext) staticdest=$destfile destfile= ;; *) func_fatal_help "cannot copy a libtool object to '$destfile'" ;; esac # Install the libtool object if requested. test -n "$destfile" && \ func_show_eval "$install_prog $file $destfile" 'exit $?' # Install the old object if enabled. if test yes = "$build_old_libs"; then # Deduce the name of the old-style object file. func_lo2o "$file" staticobj=$func_lo2o_result func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?' fi exit $EXIT_SUCCESS ;; *) # Figure out destination file name, if it wasn't already specified. if test -n "$destname"; then destfile=$destdir/$destname else func_basename "$file" destfile=$func_basename_result destfile=$destdir/$destfile fi # If the file is missing, and there is a .exe on the end, strip it # because it is most likely a libtool script we actually want to # install stripped_ext= case $file in *.exe) if test ! -f "$file"; then func_stripname '' '.exe' "$file" file=$func_stripname_result stripped_ext=.exe fi ;; esac # Do a test to see if this is really a libtool program. case $host in *cygwin* | *mingw*) if func_ltwrapper_executable_p "$file"; then func_ltwrapper_scriptname "$file" wrapper=$func_ltwrapper_scriptname_result else func_stripname '' '.exe' "$file" wrapper=$func_stripname_result fi ;; *) wrapper=$file ;; esac if func_ltwrapper_script_p "$wrapper"; then notinst_deplibs= relink_command= func_source "$wrapper" # Check the variables that should have been set. test -z "$generated_by_libtool_version" && \ func_fatal_error "invalid libtool wrapper script '$wrapper'" finalize=: for lib in $notinst_deplibs; do # Check to see that each library is installed. libdir= if test -f "$lib"; then func_source "$lib" fi libfile=$libdir/`$ECHO "$lib" | $SED 's%^.*/%%g'` if test -n "$libdir" && test ! -f "$libfile"; then func_warning "'$lib' has not been installed in '$libdir'" finalize=false fi done relink_command= func_source "$wrapper" outputname= if test no = "$fast_install" && test -n "$relink_command"; then $opt_dry_run || { if $finalize; then tmpdir=`func_mktempdir` func_basename "$file$stripped_ext" file=$func_basename_result outputname=$tmpdir/$file # Replace the output file specification. relink_command=`$ECHO "$relink_command" | $SED 's%@OUTPUT@%'"$outputname"'%g'` $opt_quiet || { func_quote_for_expand "$relink_command" eval "func_echo $func_quote_for_expand_result" } if eval "$relink_command"; then : else func_error "error: relink '$file' with the above command before installing it" $opt_dry_run || ${RM}r "$tmpdir" continue fi file=$outputname else func_warning "cannot relink '$file'" fi } else # Install the binary that we compiled earlier. file=`$ECHO "$file$stripped_ext" | $SED "s%\([^/]*\)$%$objdir/\1%"` fi fi # remove .exe since cygwin /usr/bin/install will append another # one anyway case $install_prog,$host in */usr/bin/install*,*cygwin*) case $file:$destfile in *.exe:*.exe) # this is ok ;; *.exe:*) destfile=$destfile.exe ;; *:*.exe) func_stripname '' '.exe' "$destfile" destfile=$func_stripname_result ;; esac ;; esac func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?' $opt_dry_run || if test -n "$outputname"; then ${RM}r "$tmpdir" fi ;; esac done for file in $staticlibs; do func_basename "$file" name=$func_basename_result # Set up the ranlib parameters. oldlib=$destdir/$name func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 tool_oldlib=$func_to_tool_file_result func_show_eval "$install_prog \$file \$oldlib" 'exit $?' if test -n "$stripme" && test -n "$old_striplib"; then func_show_eval "$old_striplib $tool_oldlib" 'exit $?' fi # Do each command in the postinstall commands. func_execute_cmds "$old_postinstall_cmds" 'exit $?' done test -n "$future_libdirs" && \ func_warning "remember to run '$progname --finish$future_libdirs'" if test -n "$current_libdirs"; then # Maybe just do a dry run. $opt_dry_run && current_libdirs=" -n$current_libdirs" exec_cmd='$SHELL "$progpath" $preserve_args --finish$current_libdirs' else exit $EXIT_SUCCESS fi } test install = "$opt_mode" && func_mode_install ${1+"$@"} # func_generate_dlsyms outputname originator pic_p # Extract symbols from dlprefiles and create ${outputname}S.o with # a dlpreopen symbol table. func_generate_dlsyms () { $debug_cmd my_outputname=$1 my_originator=$2 my_pic_p=${3-false} my_prefix=`$ECHO "$my_originator" | $SED 's%[^a-zA-Z0-9]%_%g'` my_dlsyms= if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then if test -n "$NM" && test -n "$global_symbol_pipe"; then my_dlsyms=${my_outputname}S.c else func_error "not configured to extract global symbols from dlpreopened files" fi fi if test -n "$my_dlsyms"; then case $my_dlsyms in "") ;; *.c) # Discover the nlist of each of the dlfiles. nlist=$output_objdir/$my_outputname.nm func_show_eval "$RM $nlist ${nlist}S ${nlist}T" # Parse the name list into a source file. func_verbose "creating $output_objdir/$my_dlsyms" $opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\ /* $my_dlsyms - symbol resolution table for '$my_outputname' dlsym emulation. */ /* Generated by $PROGRAM (GNU $PACKAGE) $VERSION */ #ifdef __cplusplus extern \"C\" { #endif #if defined __GNUC__ && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4)) #pragma GCC diagnostic ignored \"-Wstrict-prototypes\" #endif /* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ #if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE /* DATA imports from DLLs on WIN32 can't be const, because runtime relocations are performed -- see ld's documentation on pseudo-relocs. */ # define LT_DLSYM_CONST #elif defined __osf__ /* This system does not cope well with relocations in const data. */ # define LT_DLSYM_CONST #else # define LT_DLSYM_CONST const #endif #define STREQ(s1, s2) (strcmp ((s1), (s2)) == 0) /* External symbol declarations for the compiler. */\ " if test yes = "$dlself"; then func_verbose "generating symbol list for '$output'" $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist" # Add our own program objects to the symbol list. progfiles=`$ECHO "$objs$old_deplibs" | $SP2NL | $SED "$lo2o" | $NL2SP` for progfile in $progfiles; do func_to_tool_file "$progfile" func_convert_file_msys_to_w32 func_verbose "extracting global C symbols from '$func_to_tool_file_result'" $opt_dry_run || eval "$NM $func_to_tool_file_result | $global_symbol_pipe >> '$nlist'" done if test -n "$exclude_expsyms"; then $opt_dry_run || { eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' eval '$MV "$nlist"T "$nlist"' } fi if test -n "$export_symbols_regex"; then $opt_dry_run || { eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' eval '$MV "$nlist"T "$nlist"' } fi # Prepare the list of exported symbols if test -z "$export_symbols"; then export_symbols=$output_objdir/$outputname.exp $opt_dry_run || { $RM $export_symbols eval "$SED -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' case $host in *cygwin* | *mingw* | *cegcc* ) eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' ;; esac } else $opt_dry_run || { eval "$SED -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' eval '$MV "$nlist"T "$nlist"' case $host in *cygwin* | *mingw* | *cegcc* ) eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' ;; esac } fi fi for dlprefile in $dlprefiles; do func_verbose "extracting global C symbols from '$dlprefile'" func_basename "$dlprefile" name=$func_basename_result case $host in *cygwin* | *mingw* | *cegcc* ) # if an import library, we need to obtain dlname if func_win32_import_lib_p "$dlprefile"; then func_tr_sh "$dlprefile" eval "curr_lafile=\$libfile_$func_tr_sh_result" dlprefile_dlbasename= if test -n "$curr_lafile" && func_lalib_p "$curr_lafile"; then # Use subshell, to avoid clobbering current variable values dlprefile_dlname=`source "$curr_lafile" && echo "$dlname"` if test -n "$dlprefile_dlname"; then func_basename "$dlprefile_dlname" dlprefile_dlbasename=$func_basename_result else # no lafile. user explicitly requested -dlpreopen . $sharedlib_from_linklib_cmd "$dlprefile" dlprefile_dlbasename=$sharedlib_from_linklib_result fi fi $opt_dry_run || { if test -n "$dlprefile_dlbasename"; then eval '$ECHO ": $dlprefile_dlbasename" >> "$nlist"' else func_warning "Could not compute DLL name from $name" eval '$ECHO ": $name " >> "$nlist"' fi func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe | $SED -e '/I __imp/d' -e 's/I __nm_/D /;s/_nm__//' >> '$nlist'" } else # not an import lib $opt_dry_run || { eval '$ECHO ": $name " >> "$nlist"' func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" } fi ;; *) $opt_dry_run || { eval '$ECHO ": $name " >> "$nlist"' func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" } ;; esac done $opt_dry_run || { # Make sure we have at least an empty file. test -f "$nlist" || : > "$nlist" if test -n "$exclude_expsyms"; then $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T $MV "$nlist"T "$nlist" fi # Try sorting and uniquifying the output. if $GREP -v "^: " < "$nlist" | if sort -k 3 /dev/null 2>&1; then sort -k 3 else sort +2 fi | uniq > "$nlist"S; then : else $GREP -v "^: " < "$nlist" > "$nlist"S fi if test -f "$nlist"S; then eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"' else echo '/* NONE */' >> "$output_objdir/$my_dlsyms" fi func_show_eval '$RM "${nlist}I"' if test -n "$global_symbol_to_import"; then eval "$global_symbol_to_import"' < "$nlist"S > "$nlist"I' fi echo >> "$output_objdir/$my_dlsyms" "\ /* The mapping between symbol names and symbols. */ typedef struct { const char *name; void *address; } lt_dlsymlist; extern LT_DLSYM_CONST lt_dlsymlist lt_${my_prefix}_LTX_preloaded_symbols[];\ " if test -s "$nlist"I; then echo >> "$output_objdir/$my_dlsyms" "\ static void lt_syminit(void) { LT_DLSYM_CONST lt_dlsymlist *symbol = lt_${my_prefix}_LTX_preloaded_symbols; for (; symbol->name; ++symbol) {" $SED 's/.*/ if (STREQ (symbol->name, \"&\")) symbol->address = (void *) \&&;/' < "$nlist"I >> "$output_objdir/$my_dlsyms" echo >> "$output_objdir/$my_dlsyms" "\ } }" fi echo >> "$output_objdir/$my_dlsyms" "\ LT_DLSYM_CONST lt_dlsymlist lt_${my_prefix}_LTX_preloaded_symbols[] = { {\"$my_originator\", (void *) 0}," if test -s "$nlist"I; then echo >> "$output_objdir/$my_dlsyms" "\ {\"@INIT@\", (void *) <_syminit}," fi case $need_lib_prefix in no) eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms" ;; *) eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms" ;; esac echo >> "$output_objdir/$my_dlsyms" "\ {0, (void *) 0} }; /* This works around a problem in FreeBSD linker */ #ifdef FREEBSD_WORKAROUND static const void *lt_preloaded_setup() { return lt_${my_prefix}_LTX_preloaded_symbols; } #endif #ifdef __cplusplus } #endif\ " } # !$opt_dry_run pic_flag_for_symtable= case "$compile_command " in *" -static "*) ;; *) case $host in # compiling the symbol table file with pic_flag works around # a FreeBSD bug that causes programs to crash when -lm is # linked before any other PIC object. But we must not use # pic_flag when linking with -static. The problem exists in # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. *-*-freebsd2.*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;; *-*-hpux*) pic_flag_for_symtable=" $pic_flag" ;; *) $my_pic_p && pic_flag_for_symtable=" $pic_flag" ;; esac ;; esac symtab_cflags= for arg in $LTCFLAGS; do case $arg in -pie | -fpie | -fPIE) ;; *) func_append symtab_cflags " $arg" ;; esac done # Now compile the dynamic symbol file. func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?' # Clean up the generated files. func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T" "${nlist}I"' # Transform the symbol file into the correct name. symfileobj=$output_objdir/${my_outputname}S.$objext case $host in *cygwin* | *mingw* | *cegcc* ) if test -f "$output_objdir/$my_outputname.def"; then compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` else compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` fi ;; *) compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` ;; esac ;; *) func_fatal_error "unknown suffix for '$my_dlsyms'" ;; esac else # We keep going just in case the user didn't refer to # lt_preloaded_symbols. The linker will fail if global_symbol_pipe # really was required. # Nullify the symbol file. compile_command=`$ECHO "$compile_command" | $SED "s% @SYMFILE@%%"` finalize_command=`$ECHO "$finalize_command" | $SED "s% @SYMFILE@%%"` fi } # func_cygming_gnu_implib_p ARG # This predicate returns with zero status (TRUE) if # ARG is a GNU/binutils-style import library. Returns # with nonzero status (FALSE) otherwise. func_cygming_gnu_implib_p () { $debug_cmd func_to_tool_file "$1" func_convert_file_msys_to_w32 func_cygming_gnu_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $EGREP ' (_head_[A-Za-z0-9_]+_[ad]l*|[A-Za-z0-9_]+_[ad]l*_iname)$'` test -n "$func_cygming_gnu_implib_tmp" } # func_cygming_ms_implib_p ARG # This predicate returns with zero status (TRUE) if # ARG is an MS-style import library. Returns # with nonzero status (FALSE) otherwise. func_cygming_ms_implib_p () { $debug_cmd func_to_tool_file "$1" func_convert_file_msys_to_w32 func_cygming_ms_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $GREP '_NULL_IMPORT_DESCRIPTOR'` test -n "$func_cygming_ms_implib_tmp" } # func_win32_libid arg # return the library type of file 'arg' # # Need a lot of goo to handle *both* DLLs and import libs # Has to be a shell function in order to 'eat' the argument # that is supplied when $file_magic_command is called. # Despite the name, also deal with 64 bit binaries. func_win32_libid () { $debug_cmd win32_libid_type=unknown win32_fileres=`file -L $1 2>/dev/null` case $win32_fileres in *ar\ archive\ import\ library*) # definitely import win32_libid_type="x86 archive import" ;; *ar\ archive*) # could be an import, or static # Keep the egrep pattern in sync with the one in _LT_CHECK_MAGIC_METHOD. if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | $EGREP 'file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then case $nm_interface in "MS dumpbin") if func_cygming_ms_implib_p "$1" || func_cygming_gnu_implib_p "$1" then win32_nmres=import else win32_nmres= fi ;; *) func_to_tool_file "$1" func_convert_file_msys_to_w32 win32_nmres=`eval $NM -f posix -A \"$func_to_tool_file_result\" | $SED -n -e ' 1,100{ / I /{ s|.*|import| p q } }'` ;; esac case $win32_nmres in import*) win32_libid_type="x86 archive import";; *) win32_libid_type="x86 archive static";; esac fi ;; *DLL*) win32_libid_type="x86 DLL" ;; *executable*) # but shell scripts are "executable" too... case $win32_fileres in *MS\ Windows\ PE\ Intel*) win32_libid_type="x86 DLL" ;; esac ;; esac $ECHO "$win32_libid_type" } # func_cygming_dll_for_implib ARG # # Platform-specific function to extract the # name of the DLL associated with the specified # import library ARG. # Invoked by eval'ing the libtool variable # $sharedlib_from_linklib_cmd # Result is available in the variable # $sharedlib_from_linklib_result func_cygming_dll_for_implib () { $debug_cmd sharedlib_from_linklib_result=`$DLLTOOL --identify-strict --identify "$1"` } # func_cygming_dll_for_implib_fallback_core SECTION_NAME LIBNAMEs # # The is the core of a fallback implementation of a # platform-specific function to extract the name of the # DLL associated with the specified import library LIBNAME. # # SECTION_NAME is either .idata$6 or .idata$7, depending # on the platform and compiler that created the implib. # # Echos the name of the DLL associated with the # specified import library. func_cygming_dll_for_implib_fallback_core () { $debug_cmd match_literal=`$ECHO "$1" | $SED "$sed_make_literal_regex"` $OBJDUMP -s --section "$1" "$2" 2>/dev/null | $SED '/^Contents of section '"$match_literal"':/{ # Place marker at beginning of archive member dllname section s/.*/====MARK====/ p d } # These lines can sometimes be longer than 43 characters, but # are always uninteresting /:[ ]*file format pe[i]\{,1\}-/d /^In archive [^:]*:/d # Ensure marker is printed /^====MARK====/p # Remove all lines with less than 43 characters /^.\{43\}/!d # From remaining lines, remove first 43 characters s/^.\{43\}//' | $SED -n ' # Join marker and all lines until next marker into a single line /^====MARK====/ b para H $ b para b :para x s/\n//g # Remove the marker s/^====MARK====// # Remove trailing dots and whitespace s/[\. \t]*$// # Print /./p' | # we now have a list, one entry per line, of the stringified # contents of the appropriate section of all members of the # archive that possess that section. Heuristic: eliminate # all those that have a first or second character that is # a '.' (that is, objdump's representation of an unprintable # character.) This should work for all archives with less than # 0x302f exports -- but will fail for DLLs whose name actually # begins with a literal '.' or a single character followed by # a '.'. # # Of those that remain, print the first one. $SED -e '/^\./d;/^.\./d;q' } # func_cygming_dll_for_implib_fallback ARG # Platform-specific function to extract the # name of the DLL associated with the specified # import library ARG. # # This fallback implementation is for use when $DLLTOOL # does not support the --identify-strict option. # Invoked by eval'ing the libtool variable # $sharedlib_from_linklib_cmd # Result is available in the variable # $sharedlib_from_linklib_result func_cygming_dll_for_implib_fallback () { $debug_cmd if func_cygming_gnu_implib_p "$1"; then # binutils import library sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$7' "$1"` elif func_cygming_ms_implib_p "$1"; then # ms-generated import library sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$6' "$1"` else # unknown sharedlib_from_linklib_result= fi } # func_extract_an_archive dir oldlib func_extract_an_archive () { $debug_cmd f_ex_an_ar_dir=$1; shift f_ex_an_ar_oldlib=$1 if test yes = "$lock_old_archive_extraction"; then lockfile=$f_ex_an_ar_oldlib.lock until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do func_echo "Waiting for $lockfile to be removed" sleep 2 done fi func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" \ 'stat=$?; rm -f "$lockfile"; exit $stat' if test yes = "$lock_old_archive_extraction"; then $opt_dry_run || rm -f "$lockfile" fi if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then : else func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" fi } # func_extract_archives gentop oldlib ... func_extract_archives () { $debug_cmd my_gentop=$1; shift my_oldlibs=${1+"$@"} my_oldobjs= my_xlib= my_xabs= my_xdir= for my_xlib in $my_oldlibs; do # Extract the objects. case $my_xlib in [\\/]* | [A-Za-z]:[\\/]*) my_xabs=$my_xlib ;; *) my_xabs=`pwd`"/$my_xlib" ;; esac func_basename "$my_xlib" my_xlib=$func_basename_result my_xlib_u=$my_xlib while :; do case " $extracted_archives " in *" $my_xlib_u "*) func_arith $extracted_serial + 1 extracted_serial=$func_arith_result my_xlib_u=lt$extracted_serial-$my_xlib ;; *) break ;; esac done extracted_archives="$extracted_archives $my_xlib_u" my_xdir=$my_gentop/$my_xlib_u func_mkdir_p "$my_xdir" case $host in *-darwin*) func_verbose "Extracting $my_xabs" # Do not bother doing anything if just a dry run $opt_dry_run || { darwin_orig_dir=`pwd` cd $my_xdir || exit $? darwin_archive=$my_xabs darwin_curdir=`pwd` func_basename "$darwin_archive" darwin_base_archive=$func_basename_result darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true` if test -n "$darwin_arches"; then darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'` darwin_arch= func_verbose "$darwin_base_archive has multiple architectures $darwin_arches" for darwin_arch in $darwin_arches; do func_mkdir_p "unfat-$$/$darwin_base_archive-$darwin_arch" $LIPO -thin $darwin_arch -output "unfat-$$/$darwin_base_archive-$darwin_arch/$darwin_base_archive" "$darwin_archive" cd "unfat-$$/$darwin_base_archive-$darwin_arch" func_extract_an_archive "`pwd`" "$darwin_base_archive" cd "$darwin_curdir" $RM "unfat-$$/$darwin_base_archive-$darwin_arch/$darwin_base_archive" done # $darwin_arches ## Okay now we've a bunch of thin objects, gotta fatten them up :) darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$sed_basename" | sort -u` darwin_file= darwin_files= for darwin_file in $darwin_filelist; do darwin_files=`find unfat-$$ -name $darwin_file -print | sort | $NL2SP` $LIPO -create -output "$darwin_file" $darwin_files done # $darwin_filelist $RM -rf unfat-$$ cd "$darwin_orig_dir" else cd $darwin_orig_dir func_extract_an_archive "$my_xdir" "$my_xabs" fi # $darwin_arches } # !$opt_dry_run ;; *) func_extract_an_archive "$my_xdir" "$my_xabs" ;; esac my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | sort | $NL2SP` done func_extract_archives_result=$my_oldobjs } # func_emit_wrapper [arg=no] # # Emit a libtool wrapper script on stdout. # Don't directly open a file because we may want to # incorporate the script contents within a cygwin/mingw # wrapper executable. Must ONLY be called from within # func_mode_link because it depends on a number of variables # set therein. # # ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR # variable will take. If 'yes', then the emitted script # will assume that the directory where it is stored is # the $objdir directory. This is a cygwin/mingw-specific # behavior. func_emit_wrapper () { func_emit_wrapper_arg1=${1-no} $ECHO "\ #! $SHELL # $output - temporary wrapper script for $objdir/$outputname # Generated by $PROGRAM (GNU $PACKAGE) $VERSION # # The $output program cannot be directly executed until all the libtool # libraries that it depends on are installed. # # This wrapper script should never be moved out of the build directory. # If it is, it will not operate correctly. # Sed substitution that helps us do robust quoting. It backslashifies # metacharacters that are still active within double-quoted strings. sed_quote_subst='$sed_quote_subst' # Be Bourne compatible if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac fi BIN_SH=xpg4; export BIN_SH # for Tru64 DUALCASE=1; export DUALCASE # for MKS sh # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH relink_command=\"$relink_command\" # This environment variable determines our operation mode. if test \"\$libtool_install_magic\" = \"$magic\"; then # install mode needs the following variables: generated_by_libtool_version='$macro_version' notinst_deplibs='$notinst_deplibs' else # When we are sourced in execute mode, \$file and \$ECHO are already set. if test \"\$libtool_execute_magic\" != \"$magic\"; then file=\"\$0\"" qECHO=`$ECHO "$ECHO" | $SED "$sed_quote_subst"` $ECHO "\ # A function that is used when there is no print builtin or printf. func_fallback_echo () { eval 'cat <<_LTECHO_EOF \$1 _LTECHO_EOF' } ECHO=\"$qECHO\" fi # Very basic option parsing. These options are (a) specific to # the libtool wrapper, (b) are identical between the wrapper # /script/ and the wrapper /executable/ that is used only on # windows platforms, and (c) all begin with the string "--lt-" # (application programs are unlikely to have options that match # this pattern). # # There are only two supported options: --lt-debug and # --lt-dump-script. There is, deliberately, no --lt-help. # # The first argument to this parsing function should be the # script's $0 value, followed by "$@". lt_option_debug= func_parse_lt_options () { lt_script_arg0=\$0 shift for lt_opt do case \"\$lt_opt\" in --lt-debug) lt_option_debug=1 ;; --lt-dump-script) lt_dump_D=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%/[^/]*$%%'\` test \"X\$lt_dump_D\" = \"X\$lt_script_arg0\" && lt_dump_D=. lt_dump_F=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%^.*/%%'\` cat \"\$lt_dump_D/\$lt_dump_F\" exit 0 ;; --lt-*) \$ECHO \"Unrecognized --lt- option: '\$lt_opt'\" 1>&2 exit 1 ;; esac done # Print the debug banner immediately: if test -n \"\$lt_option_debug\"; then echo \"$outputname:$output:\$LINENO: libtool wrapper (GNU $PACKAGE) $VERSION\" 1>&2 fi } # Used when --lt-debug. Prints its arguments to stdout # (redirection is the responsibility of the caller) func_lt_dump_args () { lt_dump_args_N=1; for lt_arg do \$ECHO \"$outputname:$output:\$LINENO: newargv[\$lt_dump_args_N]: \$lt_arg\" lt_dump_args_N=\`expr \$lt_dump_args_N + 1\` done } # Core function for launching the target application func_exec_program_core () { " case $host in # Backslashes separate directories on plain windows *-*-mingw | *-*-os2* | *-cegcc*) $ECHO "\ if test -n \"\$lt_option_debug\"; then \$ECHO \"$outputname:$output:\$LINENO: newargv[0]: \$progdir\\\\\$program\" 1>&2 func_lt_dump_args \${1+\"\$@\"} 1>&2 fi exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} " ;; *) $ECHO "\ if test -n \"\$lt_option_debug\"; then \$ECHO \"$outputname:$output:\$LINENO: newargv[0]: \$progdir/\$program\" 1>&2 func_lt_dump_args \${1+\"\$@\"} 1>&2 fi exec \"\$progdir/\$program\" \${1+\"\$@\"} " ;; esac $ECHO "\ \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 exit 1 } # A function to encapsulate launching the target application # Strips options in the --lt-* namespace from \$@ and # launches target application with the remaining arguments. func_exec_program () { case \" \$* \" in *\\ --lt-*) for lt_wr_arg do case \$lt_wr_arg in --lt-*) ;; *) set x \"\$@\" \"\$lt_wr_arg\"; shift;; esac shift done ;; esac func_exec_program_core \${1+\"\$@\"} } # Parse options func_parse_lt_options \"\$0\" \${1+\"\$@\"} # Find the directory that this script lives in. thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\` test \"x\$thisdir\" = \"x\$file\" && thisdir=. # Follow symbolic links until we get to the real thisdir. file=\`ls -ld \"\$file\" | $SED -n 's/.*-> //p'\` while test -n \"\$file\"; do destdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*\$%%'\` # If there was a directory component, then change thisdir. if test \"x\$destdir\" != \"x\$file\"; then case \"\$destdir\" in [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; *) thisdir=\"\$thisdir/\$destdir\" ;; esac fi file=\`\$ECHO \"\$file\" | $SED 's%^.*/%%'\` file=\`ls -ld \"\$thisdir/\$file\" | $SED -n 's/.*-> //p'\` done # Usually 'no', except on cygwin/mingw when embedded into # the cwrapper. WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_arg1 if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then # special case for '.' if test \"\$thisdir\" = \".\"; then thisdir=\`pwd\` fi # remove .libs from thisdir case \"\$thisdir\" in *[\\\\/]$objdir ) thisdir=\`\$ECHO \"\$thisdir\" | $SED 's%[\\\\/][^\\\\/]*$%%'\` ;; $objdir ) thisdir=. ;; esac fi # Try to get the absolute directory name. absdir=\`cd \"\$thisdir\" && pwd\` test -n \"\$absdir\" && thisdir=\"\$absdir\" " if test yes = "$fast_install"; then $ECHO "\ program=lt-'$outputname'$exeext progdir=\"\$thisdir/$objdir\" if test ! -f \"\$progdir/\$program\" || { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | $SED 1q\`; \\ test \"X\$file\" != \"X\$progdir/\$program\"; }; then file=\"\$\$-\$program\" if test ! -d \"\$progdir\"; then $MKDIR \"\$progdir\" else $RM \"\$progdir/\$file\" fi" $ECHO "\ # relink executable if necessary if test -n \"\$relink_command\"; then if relink_command_output=\`eval \$relink_command 2>&1\`; then : else \$ECHO \"\$relink_command_output\" >&2 $RM \"\$progdir/\$file\" exit 1 fi fi $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || { $RM \"\$progdir/\$program\"; $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; } $RM \"\$progdir/\$file\" fi" else $ECHO "\ program='$outputname' progdir=\"\$thisdir/$objdir\" " fi $ECHO "\ if test -f \"\$progdir/\$program\"; then" # fixup the dll searchpath if we need to. # # Fix the DLL searchpath if we need to. Do this before prepending # to shlibpath, because on Windows, both are PATH and uninstalled # libraries must come first. if test -n "$dllsearchpath"; then $ECHO "\ # Add the dll search path components to the executable PATH PATH=$dllsearchpath:\$PATH " fi # Export our shlibpath_var if we have one. if test yes = "$shlibpath_overrides_runpath" && test -n "$shlibpath_var" && test -n "$temp_rpath"; then $ECHO "\ # Add our own library path to $shlibpath_var $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" # Some systems cannot cope with colon-terminated $shlibpath_var # The second colon is a workaround for a bug in BeOS R4 sed $shlibpath_var=\`\$ECHO \"\$$shlibpath_var\" | $SED 's/::*\$//'\` export $shlibpath_var " fi $ECHO "\ if test \"\$libtool_execute_magic\" != \"$magic\"; then # Run the actual program with our arguments. func_exec_program \${1+\"\$@\"} fi else # The program doesn't exist. \$ECHO \"\$0: error: '\$progdir/\$program' does not exist\" 1>&2 \$ECHO \"This script is just a wrapper for \$program.\" 1>&2 \$ECHO \"See the $PACKAGE documentation for more information.\" 1>&2 exit 1 fi fi\ " } # func_emit_cwrapperexe_src # emit the source code for a wrapper executable on stdout # Must ONLY be called from within func_mode_link because # it depends on a number of variable set therein. func_emit_cwrapperexe_src () { cat < #include #ifdef _MSC_VER # include # include # include #else # include # include # ifdef __CYGWIN__ # include # endif #endif #include #include #include #include #include #include #include #include #define STREQ(s1, s2) (strcmp ((s1), (s2)) == 0) /* declarations of non-ANSI functions */ #if defined __MINGW32__ # ifdef __STRICT_ANSI__ int _putenv (const char *); # endif #elif defined __CYGWIN__ # ifdef __STRICT_ANSI__ char *realpath (const char *, char *); int putenv (char *); int setenv (const char *, const char *, int); # endif /* #elif defined other_platform || defined ... */ #endif /* portability defines, excluding path handling macros */ #if defined _MSC_VER # define setmode _setmode # define stat _stat # define chmod _chmod # define getcwd _getcwd # define putenv _putenv # define S_IXUSR _S_IEXEC #elif defined __MINGW32__ # define setmode _setmode # define stat _stat # define chmod _chmod # define getcwd _getcwd # define putenv _putenv #elif defined __CYGWIN__ # define HAVE_SETENV # define FOPEN_WB "wb" /* #elif defined other platforms ... */ #endif #if defined PATH_MAX # define LT_PATHMAX PATH_MAX #elif defined MAXPATHLEN # define LT_PATHMAX MAXPATHLEN #else # define LT_PATHMAX 1024 #endif #ifndef S_IXOTH # define S_IXOTH 0 #endif #ifndef S_IXGRP # define S_IXGRP 0 #endif /* path handling portability macros */ #ifndef DIR_SEPARATOR # define DIR_SEPARATOR '/' # define PATH_SEPARATOR ':' #endif #if defined _WIN32 || defined __MSDOS__ || defined __DJGPP__ || \ defined __OS2__ # define HAVE_DOS_BASED_FILE_SYSTEM # define FOPEN_WB "wb" # ifndef DIR_SEPARATOR_2 # define DIR_SEPARATOR_2 '\\' # endif # ifndef PATH_SEPARATOR_2 # define PATH_SEPARATOR_2 ';' # endif #endif #ifndef DIR_SEPARATOR_2 # define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) #else /* DIR_SEPARATOR_2 */ # define IS_DIR_SEPARATOR(ch) \ (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) #endif /* DIR_SEPARATOR_2 */ #ifndef PATH_SEPARATOR_2 # define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR) #else /* PATH_SEPARATOR_2 */ # define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2) #endif /* PATH_SEPARATOR_2 */ #ifndef FOPEN_WB # define FOPEN_WB "w" #endif #ifndef _O_BINARY # define _O_BINARY 0 #endif #define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) #define XFREE(stale) do { \ if (stale) { free (stale); stale = 0; } \ } while (0) #if defined LT_DEBUGWRAPPER static int lt_debug = 1; #else static int lt_debug = 0; #endif const char *program_name = "libtool-wrapper"; /* in case xstrdup fails */ void *xmalloc (size_t num); char *xstrdup (const char *string); const char *base_name (const char *name); char *find_executable (const char *wrapper); char *chase_symlinks (const char *pathspec); int make_executable (const char *path); int check_executable (const char *path); char *strendzap (char *str, const char *pat); void lt_debugprintf (const char *file, int line, const char *fmt, ...); void lt_fatal (const char *file, int line, const char *message, ...); static const char *nonnull (const char *s); static const char *nonempty (const char *s); void lt_setenv (const char *name, const char *value); char *lt_extend_str (const char *orig_value, const char *add, int to_end); void lt_update_exe_path (const char *name, const char *value); void lt_update_lib_path (const char *name, const char *value); char **prepare_spawn (char **argv); void lt_dump_script (FILE *f); EOF cat <= 0) && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) return 1; else return 0; } int make_executable (const char *path) { int rval = 0; struct stat st; lt_debugprintf (__FILE__, __LINE__, "(make_executable): %s\n", nonempty (path)); if ((!path) || (!*path)) return 0; if (stat (path, &st) >= 0) { rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR); } return rval; } /* Searches for the full path of the wrapper. Returns newly allocated full path name if found, NULL otherwise Does not chase symlinks, even on platforms that support them. */ char * find_executable (const char *wrapper) { int has_slash = 0; const char *p; const char *p_next; /* static buffer for getcwd */ char tmp[LT_PATHMAX + 1]; size_t tmp_len; char *concat_name; lt_debugprintf (__FILE__, __LINE__, "(find_executable): %s\n", nonempty (wrapper)); if ((wrapper == NULL) || (*wrapper == '\0')) return NULL; /* Absolute path? */ #if defined HAVE_DOS_BASED_FILE_SYSTEM if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':') { concat_name = xstrdup (wrapper); if (check_executable (concat_name)) return concat_name; XFREE (concat_name); } else { #endif if (IS_DIR_SEPARATOR (wrapper[0])) { concat_name = xstrdup (wrapper); if (check_executable (concat_name)) return concat_name; XFREE (concat_name); } #if defined HAVE_DOS_BASED_FILE_SYSTEM } #endif for (p = wrapper; *p; p++) if (*p == '/') { has_slash = 1; break; } if (!has_slash) { /* no slashes; search PATH */ const char *path = getenv ("PATH"); if (path != NULL) { for (p = path; *p; p = p_next) { const char *q; size_t p_len; for (q = p; *q; q++) if (IS_PATH_SEPARATOR (*q)) break; p_len = (size_t) (q - p); p_next = (*q == '\0' ? q : q + 1); if (p_len == 0) { /* empty path: current directory */ if (getcwd (tmp, LT_PATHMAX) == NULL) lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", nonnull (strerror (errno))); tmp_len = strlen (tmp); concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); memcpy (concat_name, tmp, tmp_len); concat_name[tmp_len] = '/'; strcpy (concat_name + tmp_len + 1, wrapper); } else { concat_name = XMALLOC (char, p_len + 1 + strlen (wrapper) + 1); memcpy (concat_name, p, p_len); concat_name[p_len] = '/'; strcpy (concat_name + p_len + 1, wrapper); } if (check_executable (concat_name)) return concat_name; XFREE (concat_name); } } /* not found in PATH; assume curdir */ } /* Relative path | not found in path: prepend cwd */ if (getcwd (tmp, LT_PATHMAX) == NULL) lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", nonnull (strerror (errno))); tmp_len = strlen (tmp); concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); memcpy (concat_name, tmp, tmp_len); concat_name[tmp_len] = '/'; strcpy (concat_name + tmp_len + 1, wrapper); if (check_executable (concat_name)) return concat_name; XFREE (concat_name); return NULL; } char * chase_symlinks (const char *pathspec) { #ifndef S_ISLNK return xstrdup (pathspec); #else char buf[LT_PATHMAX]; struct stat s; char *tmp_pathspec = xstrdup (pathspec); char *p; int has_symlinks = 0; while (strlen (tmp_pathspec) && !has_symlinks) { lt_debugprintf (__FILE__, __LINE__, "checking path component for symlinks: %s\n", tmp_pathspec); if (lstat (tmp_pathspec, &s) == 0) { if (S_ISLNK (s.st_mode) != 0) { has_symlinks = 1; break; } /* search backwards for last DIR_SEPARATOR */ p = tmp_pathspec + strlen (tmp_pathspec) - 1; while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) p--; if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) { /* no more DIR_SEPARATORS left */ break; } *p = '\0'; } else { lt_fatal (__FILE__, __LINE__, "error accessing file \"%s\": %s", tmp_pathspec, nonnull (strerror (errno))); } } XFREE (tmp_pathspec); if (!has_symlinks) { return xstrdup (pathspec); } tmp_pathspec = realpath (pathspec, buf); if (tmp_pathspec == 0) { lt_fatal (__FILE__, __LINE__, "could not follow symlinks for %s", pathspec); } return xstrdup (tmp_pathspec); #endif } char * strendzap (char *str, const char *pat) { size_t len, patlen; assert (str != NULL); assert (pat != NULL); len = strlen (str); patlen = strlen (pat); if (patlen <= len) { str += len - patlen; if (STREQ (str, pat)) *str = '\0'; } return str; } void lt_debugprintf (const char *file, int line, const char *fmt, ...) { va_list args; if (lt_debug) { (void) fprintf (stderr, "%s:%s:%d: ", program_name, file, line); va_start (args, fmt); (void) vfprintf (stderr, fmt, args); va_end (args); } } static void lt_error_core (int exit_status, const char *file, int line, const char *mode, const char *message, va_list ap) { fprintf (stderr, "%s:%s:%d: %s: ", program_name, file, line, mode); vfprintf (stderr, message, ap); fprintf (stderr, ".\n"); if (exit_status >= 0) exit (exit_status); } void lt_fatal (const char *file, int line, const char *message, ...) { va_list ap; va_start (ap, message); lt_error_core (EXIT_FAILURE, file, line, "FATAL", message, ap); va_end (ap); } static const char * nonnull (const char *s) { return s ? s : "(null)"; } static const char * nonempty (const char *s) { return (s && !*s) ? "(empty)" : nonnull (s); } void lt_setenv (const char *name, const char *value) { lt_debugprintf (__FILE__, __LINE__, "(lt_setenv) setting '%s' to '%s'\n", nonnull (name), nonnull (value)); { #ifdef HAVE_SETENV /* always make a copy, for consistency with !HAVE_SETENV */ char *str = xstrdup (value); setenv (name, str, 1); #else size_t len = strlen (name) + 1 + strlen (value) + 1; char *str = XMALLOC (char, len); sprintf (str, "%s=%s", name, value); if (putenv (str) != EXIT_SUCCESS) { XFREE (str); } #endif } } char * lt_extend_str (const char *orig_value, const char *add, int to_end) { char *new_value; if (orig_value && *orig_value) { size_t orig_value_len = strlen (orig_value); size_t add_len = strlen (add); new_value = XMALLOC (char, add_len + orig_value_len + 1); if (to_end) { strcpy (new_value, orig_value); strcpy (new_value + orig_value_len, add); } else { strcpy (new_value, add); strcpy (new_value + add_len, orig_value); } } else { new_value = xstrdup (add); } return new_value; } void lt_update_exe_path (const char *name, const char *value) { lt_debugprintf (__FILE__, __LINE__, "(lt_update_exe_path) modifying '%s' by prepending '%s'\n", nonnull (name), nonnull (value)); if (name && *name && value && *value) { char *new_value = lt_extend_str (getenv (name), value, 0); /* some systems can't cope with a ':'-terminated path #' */ size_t len = strlen (new_value); while ((len > 0) && IS_PATH_SEPARATOR (new_value[len-1])) { new_value[--len] = '\0'; } lt_setenv (name, new_value); XFREE (new_value); } } void lt_update_lib_path (const char *name, const char *value) { lt_debugprintf (__FILE__, __LINE__, "(lt_update_lib_path) modifying '%s' by prepending '%s'\n", nonnull (name), nonnull (value)); if (name && *name && value && *value) { char *new_value = lt_extend_str (getenv (name), value, 0); lt_setenv (name, new_value); XFREE (new_value); } } EOF case $host_os in mingw*) cat <<"EOF" /* Prepares an argument vector before calling spawn(). Note that spawn() does not by itself call the command interpreter (getenv ("COMSPEC") != NULL ? getenv ("COMSPEC") : ({ OSVERSIONINFO v; v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); GetVersionEx(&v); v.dwPlatformId == VER_PLATFORM_WIN32_NT; }) ? "cmd.exe" : "command.com"). Instead it simply concatenates the arguments, separated by ' ', and calls CreateProcess(). We must quote the arguments since Win32 CreateProcess() interprets characters like ' ', '\t', '\\', '"' (but not '<' and '>') in a special way: - Space and tab are interpreted as delimiters. They are not treated as delimiters if they are surrounded by double quotes: "...". - Unescaped double quotes are removed from the input. Their only effect is that within double quotes, space and tab are treated like normal characters. - Backslashes not followed by double quotes are not special. - But 2*n+1 backslashes followed by a double quote become n backslashes followed by a double quote (n >= 0): \" -> " \\\" -> \" \\\\\" -> \\" */ #define SHELL_SPECIAL_CHARS "\"\\ \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" #define SHELL_SPACE_CHARS " \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" char ** prepare_spawn (char **argv) { size_t argc; char **new_argv; size_t i; /* Count number of arguments. */ for (argc = 0; argv[argc] != NULL; argc++) ; /* Allocate new argument vector. */ new_argv = XMALLOC (char *, argc + 1); /* Put quoted arguments into the new argument vector. */ for (i = 0; i < argc; i++) { const char *string = argv[i]; if (string[0] == '\0') new_argv[i] = xstrdup ("\"\""); else if (strpbrk (string, SHELL_SPECIAL_CHARS) != NULL) { int quote_around = (strpbrk (string, SHELL_SPACE_CHARS) != NULL); size_t length; unsigned int backslashes; const char *s; char *quoted_string; char *p; length = 0; backslashes = 0; if (quote_around) length++; for (s = string; *s != '\0'; s++) { char c = *s; if (c == '"') length += backslashes + 1; length++; if (c == '\\') backslashes++; else backslashes = 0; } if (quote_around) length += backslashes + 1; quoted_string = XMALLOC (char, length + 1); p = quoted_string; backslashes = 0; if (quote_around) *p++ = '"'; for (s = string; *s != '\0'; s++) { char c = *s; if (c == '"') { unsigned int j; for (j = backslashes + 1; j > 0; j--) *p++ = '\\'; } *p++ = c; if (c == '\\') backslashes++; else backslashes = 0; } if (quote_around) { unsigned int j; for (j = backslashes; j > 0; j--) *p++ = '\\'; *p++ = '"'; } *p = '\0'; new_argv[i] = quoted_string; } else new_argv[i] = (char *) string; } new_argv[argc] = NULL; return new_argv; } EOF ;; esac cat <<"EOF" void lt_dump_script (FILE* f) { EOF func_emit_wrapper yes | $SED -n -e ' s/^\(.\{79\}\)\(..*\)/\1\ \2/ h s/\([\\"]\)/\\\1/g s/$/\\n/ s/\([^\n]*\).*/ fputs ("\1", f);/p g D' cat <<"EOF" } EOF } # end: func_emit_cwrapperexe_src # func_win32_import_lib_p ARG # True if ARG is an import lib, as indicated by $file_magic_cmd func_win32_import_lib_p () { $debug_cmd case `eval $file_magic_cmd \"\$1\" 2>/dev/null | $SED -e 10q` in *import*) : ;; *) false ;; esac } # func_suncc_cstd_abi # !!ONLY CALL THIS FOR SUN CC AFTER $compile_command IS FULLY EXPANDED!! # Several compiler flags select an ABI that is incompatible with the # Cstd library. Avoid specifying it if any are in CXXFLAGS. func_suncc_cstd_abi () { $debug_cmd case " $compile_command " in *" -compat=g "*|*\ -std=c++[0-9][0-9]\ *|*" -library=stdcxx4 "*|*" -library=stlport4 "*) suncc_use_cstd_abi=no ;; *) suncc_use_cstd_abi=yes ;; esac } # func_mode_link arg... func_mode_link () { $debug_cmd case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) # It is impossible to link a dll without this setting, and # we shouldn't force the makefile maintainer to figure out # what system we are compiling for in order to pass an extra # flag for every libtool invocation. # allow_undefined=no # FIXME: Unfortunately, there are problems with the above when trying # to make a dll that has undefined symbols, in which case not # even a static library is built. For now, we need to specify # -no-undefined on the libtool link line when we can be certain # that all symbols are satisfied, otherwise we get a static library. allow_undefined=yes ;; *) allow_undefined=yes ;; esac libtool_args=$nonopt base_compile="$nonopt $@" compile_command=$nonopt finalize_command=$nonopt compile_rpath= finalize_rpath= compile_shlibpath= finalize_shlibpath= convenience= old_convenience= deplibs= old_deplibs= compiler_flags= linker_flags= dllsearchpath= lib_search_path=`pwd` inst_prefix_dir= new_inherited_linker_flags= avoid_version=no bindir= dlfiles= dlprefiles= dlself=no export_dynamic=no export_symbols= export_symbols_regex= generated= libobjs= ltlibs= module=no no_install=no objs= os2dllname= non_pic_objects= precious_files_regex= prefer_static_libs=no preload=false prev= prevarg= release= rpath= xrpath= perm_rpath= temp_rpath= thread_safe=no vinfo= vinfo_number=no weak_libs= single_module=$wl-single_module func_infer_tag $base_compile # We need to know -static, to get the right output filenames. for arg do case $arg in -shared) test yes != "$build_libtool_libs" \ && func_fatal_configuration "cannot build a shared library" build_old_libs=no break ;; -all-static | -static | -static-libtool-libs) case $arg in -all-static) if test yes = "$build_libtool_libs" && test -z "$link_static_flag"; then func_warning "complete static linking is impossible in this configuration" fi if test -n "$link_static_flag"; then dlopen_self=$dlopen_self_static fi prefer_static_libs=yes ;; -static) if test -z "$pic_flag" && test -n "$link_static_flag"; then dlopen_self=$dlopen_self_static fi prefer_static_libs=built ;; -static-libtool-libs) if test -z "$pic_flag" && test -n "$link_static_flag"; then dlopen_self=$dlopen_self_static fi prefer_static_libs=yes ;; esac build_libtool_libs=no build_old_libs=yes break ;; esac done # See if our shared archives depend on static archives. test -n "$old_archive_from_new_cmds" && build_old_libs=yes # Go through the arguments, transforming them on the way. while test "$#" -gt 0; do arg=$1 shift func_quote_for_eval "$arg" qarg=$func_quote_for_eval_unquoted_result func_append libtool_args " $func_quote_for_eval_result" # If the previous option needs an argument, assign it. if test -n "$prev"; then case $prev in output) func_append compile_command " @OUTPUT@" func_append finalize_command " @OUTPUT@" ;; esac case $prev in bindir) bindir=$arg prev= continue ;; dlfiles|dlprefiles) $preload || { # Add the symbol object into the linking commands. func_append compile_command " @SYMFILE@" func_append finalize_command " @SYMFILE@" preload=: } case $arg in *.la | *.lo) ;; # We handle these cases below. force) if test no = "$dlself"; then dlself=needless export_dynamic=yes fi prev= continue ;; self) if test dlprefiles = "$prev"; then dlself=yes elif test dlfiles = "$prev" && test yes != "$dlopen_self"; then dlself=yes else dlself=needless export_dynamic=yes fi prev= continue ;; *) if test dlfiles = "$prev"; then func_append dlfiles " $arg" else func_append dlprefiles " $arg" fi prev= continue ;; esac ;; expsyms) export_symbols=$arg test -f "$arg" \ || func_fatal_error "symbol file '$arg' does not exist" prev= continue ;; expsyms_regex) export_symbols_regex=$arg prev= continue ;; framework) case $host in *-*-darwin*) case "$deplibs " in *" $qarg.ltframework "*) ;; *) func_append deplibs " $qarg.ltframework" # this is fixed later ;; esac ;; esac prev= continue ;; inst_prefix) inst_prefix_dir=$arg prev= continue ;; mllvm) # Clang does not use LLVM to link, so we can simply discard any # '-mllvm $arg' options when doing the link step. prev= continue ;; objectlist) if test -f "$arg"; then save_arg=$arg moreargs= for fil in `cat "$save_arg"` do # func_append moreargs " $fil" arg=$fil # A libtool-controlled object. # Check to see that this really is a libtool object. if func_lalib_unsafe_p "$arg"; then pic_object= non_pic_object= # Read the .lo file func_source "$arg" if test -z "$pic_object" || test -z "$non_pic_object" || test none = "$pic_object" && test none = "$non_pic_object"; then func_fatal_error "cannot find name of object for '$arg'" fi # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir=$func_dirname_result if test none != "$pic_object"; then # Prepend the subdirectory the object is found in. pic_object=$xdir$pic_object if test dlfiles = "$prev"; then if test yes = "$build_libtool_libs" && test yes = "$dlopen_support"; then func_append dlfiles " $pic_object" prev= continue else # If libtool objects are unsupported, then we need to preload. prev=dlprefiles fi fi # CHECK ME: I think I busted this. -Ossama if test dlprefiles = "$prev"; then # Preload the old-style object. func_append dlprefiles " $pic_object" prev= fi # A PIC object. func_append libobjs " $pic_object" arg=$pic_object fi # Non-PIC object. if test none != "$non_pic_object"; then # Prepend the subdirectory the object is found in. non_pic_object=$xdir$non_pic_object # A standard non-PIC object func_append non_pic_objects " $non_pic_object" if test -z "$pic_object" || test none = "$pic_object"; then arg=$non_pic_object fi else # If the PIC object exists, use it instead. # $xdir was prepended to $pic_object above. non_pic_object=$pic_object func_append non_pic_objects " $non_pic_object" fi else # Only an error if not doing a dry-run. if $opt_dry_run; then # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir=$func_dirname_result func_lo2o "$arg" pic_object=$xdir$objdir/$func_lo2o_result non_pic_object=$xdir$func_lo2o_result func_append libobjs " $pic_object" func_append non_pic_objects " $non_pic_object" else func_fatal_error "'$arg' is not a valid libtool object" fi fi done else func_fatal_error "link input file '$arg' does not exist" fi arg=$save_arg prev= continue ;; os2dllname) os2dllname=$arg prev= continue ;; precious_regex) precious_files_regex=$arg prev= continue ;; release) release=-$arg prev= continue ;; rpath | xrpath) # We need an absolute path. case $arg in [\\/]* | [A-Za-z]:[\\/]*) ;; *) func_fatal_error "only absolute run-paths are allowed" ;; esac if test rpath = "$prev"; then case "$rpath " in *" $arg "*) ;; *) func_append rpath " $arg" ;; esac else case "$xrpath " in *" $arg "*) ;; *) func_append xrpath " $arg" ;; esac fi prev= continue ;; shrext) shrext_cmds=$arg prev= continue ;; weak) func_append weak_libs " $arg" prev= continue ;; xcclinker) func_append linker_flags " $qarg" func_append compiler_flags " $qarg" prev= func_append compile_command " $qarg" func_append finalize_command " $qarg" continue ;; xcompiler) func_append compiler_flags " $qarg" prev= func_append compile_command " $qarg" func_append finalize_command " $qarg" continue ;; xlinker) func_append linker_flags " $qarg" func_append compiler_flags " $wl$qarg" prev= func_append compile_command " $wl$qarg" func_append finalize_command " $wl$qarg" continue ;; *) eval "$prev=\"\$arg\"" prev= continue ;; esac fi # test -n "$prev" prevarg=$arg case $arg in -all-static) if test -n "$link_static_flag"; then # See comment for -static flag below, for more details. func_append compile_command " $link_static_flag" func_append finalize_command " $link_static_flag" fi continue ;; -allow-undefined) # FIXME: remove this flag sometime in the future. func_fatal_error "'-allow-undefined' must not be used because it is the default" ;; -avoid-version) avoid_version=yes continue ;; -bindir) prev=bindir continue ;; -dlopen) prev=dlfiles continue ;; -dlpreopen) prev=dlprefiles continue ;; -export-dynamic) export_dynamic=yes continue ;; -export-symbols | -export-symbols-regex) if test -n "$export_symbols" || test -n "$export_symbols_regex"; then func_fatal_error "more than one -exported-symbols argument is not allowed" fi if test X-export-symbols = "X$arg"; then prev=expsyms else prev=expsyms_regex fi continue ;; -framework) prev=framework continue ;; -inst-prefix-dir) prev=inst_prefix continue ;; # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* # so, if we see these flags be careful not to treat them like -L -L[A-Z][A-Z]*:*) case $with_gcc/$host in no/*-*-irix* | /*-*-irix*) func_append compile_command " $arg" func_append finalize_command " $arg" ;; esac continue ;; -L*) func_stripname "-L" '' "$arg" if test -z "$func_stripname_result"; then if test "$#" -gt 0; then func_fatal_error "require no space between '-L' and '$1'" else func_fatal_error "need path for '-L' option" fi fi func_resolve_sysroot "$func_stripname_result" dir=$func_resolve_sysroot_result # We need an absolute path. case $dir in [\\/]* | [A-Za-z]:[\\/]*) ;; *) absdir=`cd "$dir" && pwd` test -z "$absdir" && \ func_fatal_error "cannot determine absolute directory name of '$dir'" dir=$absdir ;; esac case "$deplibs " in *" -L$dir "* | *" $arg "*) # Will only happen for absolute or sysroot arguments ;; *) # Preserve sysroot, but never include relative directories case $dir in [\\/]* | [A-Za-z]:[\\/]* | =*) func_append deplibs " $arg" ;; *) func_append deplibs " -L$dir" ;; esac func_append lib_search_path " $dir" ;; esac case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) testbindir=`$ECHO "$dir" | $SED 's*/lib$*/bin*'` case :$dllsearchpath: in *":$dir:"*) ;; ::) dllsearchpath=$dir;; *) func_append dllsearchpath ":$dir";; esac case :$dllsearchpath: in *":$testbindir:"*) ;; ::) dllsearchpath=$testbindir;; *) func_append dllsearchpath ":$testbindir";; esac ;; esac continue ;; -l*) if test X-lc = "X$arg" || test X-lm = "X$arg"; then case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc* | *-*-haiku*) # These systems don't actually have a C or math library (as such) continue ;; *-*-os2*) # These systems don't actually have a C library (as such) test X-lc = "X$arg" && continue ;; *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* | *-*-bitrig*) # Do not include libc due to us having libc/libc_r. test X-lc = "X$arg" && continue ;; *-*-rhapsody* | *-*-darwin1.[012]) # Rhapsody C and math libraries are in the System framework func_append deplibs " System.ltframework" continue ;; *-*-sco3.2v5* | *-*-sco5v6*) # Causes problems with __ctype test X-lc = "X$arg" && continue ;; *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) # Compiler inserts libc in the correct place for threads to work test X-lc = "X$arg" && continue ;; esac elif test X-lc_r = "X$arg"; then case $host in *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* | *-*-bitrig*) # Do not include libc_r directly, use -pthread flag. continue ;; esac fi func_append deplibs " $arg" continue ;; -mllvm) prev=mllvm continue ;; -module) module=yes continue ;; # Tru64 UNIX uses -model [arg] to determine the layout of C++ # classes, name mangling, and exception handling. # Darwin uses the -arch flag to determine output architecture. -model|-arch|-isysroot|--sysroot) func_append compiler_flags " $arg" func_append compile_command " $arg" func_append finalize_command " $arg" prev=xcompiler continue ;; -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) func_append compiler_flags " $arg" func_append compile_command " $arg" func_append finalize_command " $arg" case "$new_inherited_linker_flags " in *" $arg "*) ;; * ) func_append new_inherited_linker_flags " $arg" ;; esac continue ;; -multi_module) single_module=$wl-multi_module continue ;; -no-fast-install) fast_install=no continue ;; -no-install) case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*) # The PATH hackery in wrapper scripts is required on Windows # and Darwin in order for the loader to find any dlls it needs. func_warning "'-no-install' is ignored for $host" func_warning "assuming '-no-fast-install' instead" fast_install=no ;; *) no_install=yes ;; esac continue ;; -no-undefined) allow_undefined=no continue ;; -objectlist) prev=objectlist continue ;; -os2dllname) prev=os2dllname continue ;; -o) prev=output ;; -precious-files-regex) prev=precious_regex continue ;; -release) prev=release continue ;; -rpath) prev=rpath continue ;; -R) prev=xrpath continue ;; -R*) func_stripname '-R' '' "$arg" dir=$func_stripname_result # We need an absolute path. case $dir in [\\/]* | [A-Za-z]:[\\/]*) ;; =*) func_stripname '=' '' "$dir" dir=$lt_sysroot$func_stripname_result ;; *) func_fatal_error "only absolute run-paths are allowed" ;; esac case "$xrpath " in *" $dir "*) ;; *) func_append xrpath " $dir" ;; esac continue ;; -shared) # The effects of -shared are defined in a previous loop. continue ;; -shrext) prev=shrext continue ;; -static | -static-libtool-libs) # The effects of -static are defined in a previous loop. # We used to do the same as -all-static on platforms that # didn't have a PIC flag, but the assumption that the effects # would be equivalent was wrong. It would break on at least # Digital Unix and AIX. continue ;; -thread-safe) thread_safe=yes continue ;; -version-info) prev=vinfo continue ;; -version-number) prev=vinfo vinfo_number=yes continue ;; -weak) prev=weak continue ;; -Wc,*) func_stripname '-Wc,' '' "$arg" args=$func_stripname_result arg= save_ifs=$IFS; IFS=, for flag in $args; do IFS=$save_ifs func_quote_for_eval "$flag" func_append arg " $func_quote_for_eval_result" func_append compiler_flags " $func_quote_for_eval_result" done IFS=$save_ifs func_stripname ' ' '' "$arg" arg=$func_stripname_result ;; -Wl,*) func_stripname '-Wl,' '' "$arg" args=$func_stripname_result arg= save_ifs=$IFS; IFS=, for flag in $args; do IFS=$save_ifs func_quote_for_eval "$flag" func_append arg " $wl$func_quote_for_eval_result" func_append compiler_flags " $wl$func_quote_for_eval_result" func_append linker_flags " $func_quote_for_eval_result" done IFS=$save_ifs func_stripname ' ' '' "$arg" arg=$func_stripname_result ;; -Xcompiler) prev=xcompiler continue ;; -Xlinker) prev=xlinker continue ;; -XCClinker) prev=xcclinker continue ;; # -msg_* for osf cc -msg_*) func_quote_for_eval "$arg" arg=$func_quote_for_eval_result ;; # Flags to be passed through unchanged, with rationale: # -64, -mips[0-9] enable 64-bit mode for the SGI compiler # -r[0-9][0-9]* specify processor for the SGI compiler # -xarch=*, -xtarget=* enable 64-bit mode for the Sun compiler # +DA*, +DD* enable 64-bit mode for the HP compiler # -q* compiler args for the IBM compiler # -m*, -t[45]*, -txscale* architecture-specific flags for GCC # -F/path path to uninstalled frameworks, gcc on darwin # -p, -pg, --coverage, -fprofile-* profiling flags for GCC # -fstack-protector* stack protector flags for GCC # @file GCC response files # -tp=* Portland pgcc target processor selection # --sysroot=* for sysroot support # -O*, -g*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization # -specs=* GCC specs files # -stdlib=* select c++ std lib with clang -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \ -O*|-g*|-flto*|-fwhopr*|-fuse-linker-plugin|-fstack-protector*|-stdlib=*| \ -specs=*) func_quote_for_eval "$arg" arg=$func_quote_for_eval_result func_append compile_command " $arg" func_append finalize_command " $arg" func_append compiler_flags " $arg" continue ;; -Z*) if test os2 = "`expr $host : '.*\(os2\)'`"; then # OS/2 uses -Zxxx to specify OS/2-specific options compiler_flags="$compiler_flags $arg" func_append compile_command " $arg" func_append finalize_command " $arg" case $arg in -Zlinker | -Zstack) prev=xcompiler ;; esac continue else # Otherwise treat like 'Some other compiler flag' below func_quote_for_eval "$arg" arg=$func_quote_for_eval_result fi ;; # Some other compiler flag. -* | +*) func_quote_for_eval "$arg" arg=$func_quote_for_eval_result ;; *.$objext) # A standard object. func_append objs " $arg" ;; *.lo) # A libtool-controlled object. # Check to see that this really is a libtool object. if func_lalib_unsafe_p "$arg"; then pic_object= non_pic_object= # Read the .lo file func_source "$arg" if test -z "$pic_object" || test -z "$non_pic_object" || test none = "$pic_object" && test none = "$non_pic_object"; then func_fatal_error "cannot find name of object for '$arg'" fi # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir=$func_dirname_result test none = "$pic_object" || { # Prepend the subdirectory the object is found in. pic_object=$xdir$pic_object if test dlfiles = "$prev"; then if test yes = "$build_libtool_libs" && test yes = "$dlopen_support"; then func_append dlfiles " $pic_object" prev= continue else # If libtool objects are unsupported, then we need to preload. prev=dlprefiles fi fi # CHECK ME: I think I busted this. -Ossama if test dlprefiles = "$prev"; then # Preload the old-style object. func_append dlprefiles " $pic_object" prev= fi # A PIC object. func_append libobjs " $pic_object" arg=$pic_object } # Non-PIC object. if test none != "$non_pic_object"; then # Prepend the subdirectory the object is found in. non_pic_object=$xdir$non_pic_object # A standard non-PIC object func_append non_pic_objects " $non_pic_object" if test -z "$pic_object" || test none = "$pic_object"; then arg=$non_pic_object fi else # If the PIC object exists, use it instead. # $xdir was prepended to $pic_object above. non_pic_object=$pic_object func_append non_pic_objects " $non_pic_object" fi else # Only an error if not doing a dry-run. if $opt_dry_run; then # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir=$func_dirname_result func_lo2o "$arg" pic_object=$xdir$objdir/$func_lo2o_result non_pic_object=$xdir$func_lo2o_result func_append libobjs " $pic_object" func_append non_pic_objects " $non_pic_object" else func_fatal_error "'$arg' is not a valid libtool object" fi fi ;; *.$libext) # An archive. func_append deplibs " $arg" func_append old_deplibs " $arg" continue ;; *.la) # A libtool-controlled library. func_resolve_sysroot "$arg" if test dlfiles = "$prev"; then # This library was specified with -dlopen. func_append dlfiles " $func_resolve_sysroot_result" prev= elif test dlprefiles = "$prev"; then # The library was specified with -dlpreopen. func_append dlprefiles " $func_resolve_sysroot_result" prev= else func_append deplibs " $func_resolve_sysroot_result" fi continue ;; # Some other compiler argument. *) # Unknown arguments in both finalize_command and compile_command need # to be aesthetically quoted because they are evaled later. func_quote_for_eval "$arg" arg=$func_quote_for_eval_result ;; esac # arg # Now actually substitute the argument into the commands. if test -n "$arg"; then func_append compile_command " $arg" func_append finalize_command " $arg" fi done # argument parsing loop test -n "$prev" && \ func_fatal_help "the '$prevarg' option requires an argument" if test yes = "$export_dynamic" && test -n "$export_dynamic_flag_spec"; then eval arg=\"$export_dynamic_flag_spec\" func_append compile_command " $arg" func_append finalize_command " $arg" fi oldlibs= # calculate the name of the file, without its directory func_basename "$output" outputname=$func_basename_result libobjs_save=$libobjs if test -n "$shlibpath_var"; then # get the directories listed in $shlibpath_var eval shlib_search_path=\`\$ECHO \"\$$shlibpath_var\" \| \$SED \'s/:/ /g\'\` else shlib_search_path= fi eval sys_lib_search_path=\"$sys_lib_search_path_spec\" eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" # Definition is injected by LT_CONFIG during libtool generation. func_munge_path_list sys_lib_dlsearch_path "$LT_SYS_LIBRARY_PATH" func_dirname "$output" "/" "" output_objdir=$func_dirname_result$objdir func_to_tool_file "$output_objdir/" tool_output_objdir=$func_to_tool_file_result # Create the object directory. func_mkdir_p "$output_objdir" # Determine the type of output case $output in "") func_fatal_help "you must specify an output file" ;; *.$libext) linkmode=oldlib ;; *.lo | *.$objext) linkmode=obj ;; *.la) linkmode=lib ;; *) linkmode=prog ;; # Anything else should be a program. esac specialdeplibs= libs= # Find all interdependent deplibs by searching for libraries # that are linked more than once (e.g. -la -lb -la) for deplib in $deplibs; do if $opt_preserve_dup_deps; then case "$libs " in *" $deplib "*) func_append specialdeplibs " $deplib" ;; esac fi func_append libs " $deplib" done if test lib = "$linkmode"; then libs="$predeps $libs $compiler_lib_search_path $postdeps" # Compute libraries that are listed more than once in $predeps # $postdeps and mark them as special (i.e., whose duplicates are # not to be eliminated). pre_post_deps= if $opt_duplicate_compiler_generated_deps; then for pre_post_dep in $predeps $postdeps; do case "$pre_post_deps " in *" $pre_post_dep "*) func_append specialdeplibs " $pre_post_deps" ;; esac func_append pre_post_deps " $pre_post_dep" done fi pre_post_deps= fi deplibs= newdependency_libs= newlib_search_path= need_relink=no # whether we're linking any uninstalled libtool libraries notinst_deplibs= # not-installed libtool libraries notinst_path= # paths that contain not-installed libtool libraries case $linkmode in lib) passes="conv dlpreopen link" for file in $dlfiles $dlprefiles; do case $file in *.la) ;; *) func_fatal_help "libraries can '-dlopen' only libtool libraries: $file" ;; esac done ;; prog) compile_deplibs= finalize_deplibs= alldeplibs=false newdlfiles= newdlprefiles= passes="conv scan dlopen dlpreopen link" ;; *) passes="conv" ;; esac for pass in $passes; do # The preopen pass in lib mode reverses $deplibs; put it back here # so that -L comes before libs that need it for instance... if test lib,link = "$linkmode,$pass"; then ## FIXME: Find the place where the list is rebuilt in the wrong ## order, and fix it there properly tmp_deplibs= for deplib in $deplibs; do tmp_deplibs="$deplib $tmp_deplibs" done deplibs=$tmp_deplibs fi if test lib,link = "$linkmode,$pass" || test prog,scan = "$linkmode,$pass"; then libs=$deplibs deplibs= fi if test prog = "$linkmode"; then case $pass in dlopen) libs=$dlfiles ;; dlpreopen) libs=$dlprefiles ;; link) libs="$deplibs %DEPLIBS% $dependency_libs" ;; esac fi if test lib,dlpreopen = "$linkmode,$pass"; then # Collect and forward deplibs of preopened libtool libs for lib in $dlprefiles; do # Ignore non-libtool-libs dependency_libs= func_resolve_sysroot "$lib" case $lib in *.la) func_source "$func_resolve_sysroot_result" ;; esac # Collect preopened libtool deplibs, except any this library # has declared as weak libs for deplib in $dependency_libs; do func_basename "$deplib" deplib_base=$func_basename_result case " $weak_libs " in *" $deplib_base "*) ;; *) func_append deplibs " $deplib" ;; esac done done libs=$dlprefiles fi if test dlopen = "$pass"; then # Collect dlpreopened libraries save_deplibs=$deplibs deplibs= fi for deplib in $libs; do lib= found=false case $deplib in -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) if test prog,link = "$linkmode,$pass"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else func_append compiler_flags " $deplib" if test lib = "$linkmode"; then case "$new_inherited_linker_flags " in *" $deplib "*) ;; * ) func_append new_inherited_linker_flags " $deplib" ;; esac fi fi continue ;; -l*) if test lib != "$linkmode" && test prog != "$linkmode"; then func_warning "'-l' is ignored for archives/objects" continue fi func_stripname '-l' '' "$deplib" name=$func_stripname_result if test lib = "$linkmode"; then searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path" else searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path" fi for searchdir in $searchdirs; do for search_ext in .la $std_shrext .so .a; do # Search the libtool library lib=$searchdir/lib$name$search_ext if test -f "$lib"; then if test .la = "$search_ext"; then found=: else found=false fi break 2 fi done done if $found; then # deplib is a libtool library # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, # We need to do some special things here, and not later. if test yes = "$allow_libtool_libs_with_static_runtimes"; then case " $predeps $postdeps " in *" $deplib "*) if func_lalib_p "$lib"; then library_names= old_library= func_source "$lib" for l in $old_library $library_names; do ll=$l done if test "X$ll" = "X$old_library"; then # only static version available found=false func_dirname "$lib" "" "." ladir=$func_dirname_result lib=$ladir/$old_library if test prog,link = "$linkmode,$pass"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else deplibs="$deplib $deplibs" test lib = "$linkmode" && newdependency_libs="$deplib $newdependency_libs" fi continue fi fi ;; *) ;; esac fi else # deplib doesn't seem to be a libtool library if test prog,link = "$linkmode,$pass"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else deplibs="$deplib $deplibs" test lib = "$linkmode" && newdependency_libs="$deplib $newdependency_libs" fi continue fi ;; # -l *.ltframework) if test prog,link = "$linkmode,$pass"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else deplibs="$deplib $deplibs" if test lib = "$linkmode"; then case "$new_inherited_linker_flags " in *" $deplib "*) ;; * ) func_append new_inherited_linker_flags " $deplib" ;; esac fi fi continue ;; -L*) case $linkmode in lib) deplibs="$deplib $deplibs" test conv = "$pass" && continue newdependency_libs="$deplib $newdependency_libs" func_stripname '-L' '' "$deplib" func_resolve_sysroot "$func_stripname_result" func_append newlib_search_path " $func_resolve_sysroot_result" ;; prog) if test conv = "$pass"; then deplibs="$deplib $deplibs" continue fi if test scan = "$pass"; then deplibs="$deplib $deplibs" else compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" fi func_stripname '-L' '' "$deplib" func_resolve_sysroot "$func_stripname_result" func_append newlib_search_path " $func_resolve_sysroot_result" ;; *) func_warning "'-L' is ignored for archives/objects" ;; esac # linkmode continue ;; # -L -R*) if test link = "$pass"; then func_stripname '-R' '' "$deplib" func_resolve_sysroot "$func_stripname_result" dir=$func_resolve_sysroot_result # Make sure the xrpath contains only unique directories. case "$xrpath " in *" $dir "*) ;; *) func_append xrpath " $dir" ;; esac fi deplibs="$deplib $deplibs" continue ;; *.la) func_resolve_sysroot "$deplib" lib=$func_resolve_sysroot_result ;; *.$libext) if test conv = "$pass"; then deplibs="$deplib $deplibs" continue fi case $linkmode in lib) # Linking convenience modules into shared libraries is allowed, # but linking other static libraries is non-portable. case " $dlpreconveniencelibs " in *" $deplib "*) ;; *) valid_a_lib=false case $deplibs_check_method in match_pattern*) set dummy $deplibs_check_method; shift match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` if eval "\$ECHO \"$deplib\"" 2>/dev/null | $SED 10q \ | $EGREP "$match_pattern_regex" > /dev/null; then valid_a_lib=: fi ;; pass_all) valid_a_lib=: ;; esac if $valid_a_lib; then echo $ECHO "*** Warning: Linking the shared library $output against the" $ECHO "*** static library $deplib is not portable!" deplibs="$deplib $deplibs" else echo $ECHO "*** Warning: Trying to link with static lib archive $deplib." echo "*** I have the capability to make that library automatically link in when" echo "*** you link to this library. But I can only do this if you have a" echo "*** shared version of the library, which you do not appear to have" echo "*** because the file extensions .$libext of this argument makes me believe" echo "*** that it is just a static archive that I should not use here." fi ;; esac continue ;; prog) if test link != "$pass"; then deplibs="$deplib $deplibs" else compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" fi continue ;; esac # linkmode ;; # *.$libext *.lo | *.$objext) if test conv = "$pass"; then deplibs="$deplib $deplibs" elif test prog = "$linkmode"; then if test dlpreopen = "$pass" || test yes != "$dlopen_support" || test no = "$build_libtool_libs"; then # If there is no dlopen support or we're linking statically, # we need to preload. func_append newdlprefiles " $deplib" compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else func_append newdlfiles " $deplib" fi fi continue ;; %DEPLIBS%) alldeplibs=: continue ;; esac # case $deplib $found || test -f "$lib" \ || func_fatal_error "cannot find the library '$lib' or unhandled argument '$deplib'" # Check to see that this really is a libtool archive. func_lalib_unsafe_p "$lib" \ || func_fatal_error "'$lib' is not a valid libtool archive" func_dirname "$lib" "" "." ladir=$func_dirname_result dlname= dlopen= dlpreopen= libdir= library_names= old_library= inherited_linker_flags= # If the library was installed with an old release of libtool, # it will not redefine variables installed, or shouldnotlink installed=yes shouldnotlink=no avoidtemprpath= # Read the .la file func_source "$lib" # Convert "-framework foo" to "foo.ltframework" if test -n "$inherited_linker_flags"; then tmp_inherited_linker_flags=`$ECHO "$inherited_linker_flags" | $SED 's/-framework \([^ $]*\)/\1.ltframework/g'` for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do case " $new_inherited_linker_flags " in *" $tmp_inherited_linker_flag "*) ;; *) func_append new_inherited_linker_flags " $tmp_inherited_linker_flag";; esac done fi dependency_libs=`$ECHO " $dependency_libs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` if test lib,link = "$linkmode,$pass" || test prog,scan = "$linkmode,$pass" || { test prog != "$linkmode" && test lib != "$linkmode"; }; then test -n "$dlopen" && func_append dlfiles " $dlopen" test -n "$dlpreopen" && func_append dlprefiles " $dlpreopen" fi if test conv = "$pass"; then # Only check for convenience libraries deplibs="$lib $deplibs" if test -z "$libdir"; then if test -z "$old_library"; then func_fatal_error "cannot find name of link library for '$lib'" fi # It is a libtool convenience library, so add in its objects. func_append convenience " $ladir/$objdir/$old_library" func_append old_convenience " $ladir/$objdir/$old_library" elif test prog != "$linkmode" && test lib != "$linkmode"; then func_fatal_error "'$lib' is not a convenience library" fi tmp_libs= for deplib in $dependency_libs; do deplibs="$deplib $deplibs" if $opt_preserve_dup_deps; then case "$tmp_libs " in *" $deplib "*) func_append specialdeplibs " $deplib" ;; esac fi func_append tmp_libs " $deplib" done continue fi # $pass = conv # Get the name of the library we link against. linklib= if test -n "$old_library" && { test yes = "$prefer_static_libs" || test built,no = "$prefer_static_libs,$installed"; }; then linklib=$old_library else for l in $old_library $library_names; do linklib=$l done fi if test -z "$linklib"; then func_fatal_error "cannot find name of link library for '$lib'" fi # This library was specified with -dlopen. if test dlopen = "$pass"; then test -z "$libdir" \ && func_fatal_error "cannot -dlopen a convenience library: '$lib'" if test -z "$dlname" || test yes != "$dlopen_support" || test no = "$build_libtool_libs" then # If there is no dlname, no dlopen support or we're linking # statically, we need to preload. We also need to preload any # dependent libraries so libltdl's deplib preloader doesn't # bomb out in the load deplibs phase. func_append dlprefiles " $lib $dependency_libs" else func_append newdlfiles " $lib" fi continue fi # $pass = dlopen # We need an absolute path. case $ladir in [\\/]* | [A-Za-z]:[\\/]*) abs_ladir=$ladir ;; *) abs_ladir=`cd "$ladir" && pwd` if test -z "$abs_ladir"; then func_warning "cannot determine absolute directory name of '$ladir'" func_warning "passing it literally to the linker, although it might fail" abs_ladir=$ladir fi ;; esac func_basename "$lib" laname=$func_basename_result # Find the relevant object directory and library name. if test yes = "$installed"; then if test ! -f "$lt_sysroot$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then func_warning "library '$lib' was moved." dir=$ladir absdir=$abs_ladir libdir=$abs_ladir else dir=$lt_sysroot$libdir absdir=$lt_sysroot$libdir fi test yes = "$hardcode_automatic" && avoidtemprpath=yes else if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then dir=$ladir absdir=$abs_ladir # Remove this search path later func_append notinst_path " $abs_ladir" else dir=$ladir/$objdir absdir=$abs_ladir/$objdir # Remove this search path later func_append notinst_path " $abs_ladir" fi fi # $installed = yes func_stripname 'lib' '.la' "$laname" name=$func_stripname_result # This library was specified with -dlpreopen. if test dlpreopen = "$pass"; then if test -z "$libdir" && test prog = "$linkmode"; then func_fatal_error "only libraries may -dlpreopen a convenience library: '$lib'" fi case $host in # special handling for platforms with PE-DLLs. *cygwin* | *mingw* | *cegcc* ) # Linker will automatically link against shared library if both # static and shared are present. Therefore, ensure we extract # symbols from the import library if a shared library is present # (otherwise, the dlopen module name will be incorrect). We do # this by putting the import library name into $newdlprefiles. # We recover the dlopen module name by 'saving' the la file # name in a special purpose variable, and (later) extracting the # dlname from the la file. if test -n "$dlname"; then func_tr_sh "$dir/$linklib" eval "libfile_$func_tr_sh_result=\$abs_ladir/\$laname" func_append newdlprefiles " $dir/$linklib" else func_append newdlprefiles " $dir/$old_library" # Keep a list of preopened convenience libraries to check # that they are being used correctly in the link pass. test -z "$libdir" && \ func_append dlpreconveniencelibs " $dir/$old_library" fi ;; * ) # Prefer using a static library (so that no silly _DYNAMIC symbols # are required to link). if test -n "$old_library"; then func_append newdlprefiles " $dir/$old_library" # Keep a list of preopened convenience libraries to check # that they are being used correctly in the link pass. test -z "$libdir" && \ func_append dlpreconveniencelibs " $dir/$old_library" # Otherwise, use the dlname, so that lt_dlopen finds it. elif test -n "$dlname"; then func_append newdlprefiles " $dir/$dlname" else func_append newdlprefiles " $dir/$linklib" fi ;; esac fi # $pass = dlpreopen if test -z "$libdir"; then # Link the convenience library if test lib = "$linkmode"; then deplibs="$dir/$old_library $deplibs" elif test prog,link = "$linkmode,$pass"; then compile_deplibs="$dir/$old_library $compile_deplibs" finalize_deplibs="$dir/$old_library $finalize_deplibs" else deplibs="$lib $deplibs" # used for prog,scan pass fi continue fi if test prog = "$linkmode" && test link != "$pass"; then func_append newlib_search_path " $ladir" deplibs="$lib $deplibs" linkalldeplibs=false if test no != "$link_all_deplibs" || test -z "$library_names" || test no = "$build_libtool_libs"; then linkalldeplibs=: fi tmp_libs= for deplib in $dependency_libs; do case $deplib in -L*) func_stripname '-L' '' "$deplib" func_resolve_sysroot "$func_stripname_result" func_append newlib_search_path " $func_resolve_sysroot_result" ;; esac # Need to link against all dependency_libs? if $linkalldeplibs; then deplibs="$deplib $deplibs" else # Need to hardcode shared library paths # or/and link against static libraries newdependency_libs="$deplib $newdependency_libs" fi if $opt_preserve_dup_deps; then case "$tmp_libs " in *" $deplib "*) func_append specialdeplibs " $deplib" ;; esac fi func_append tmp_libs " $deplib" done # for deplib continue fi # $linkmode = prog... if test prog,link = "$linkmode,$pass"; then if test -n "$library_names" && { { test no = "$prefer_static_libs" || test built,yes = "$prefer_static_libs,$installed"; } || test -z "$old_library"; }; then # We need to hardcode the library path if test -n "$shlibpath_var" && test -z "$avoidtemprpath"; then # Make sure the rpath contains only unique directories. case $temp_rpath: in *"$absdir:"*) ;; *) func_append temp_rpath "$absdir:" ;; esac fi # Hardcode the library path. # Skip directories that are in the system default run-time # search path. case " $sys_lib_dlsearch_path " in *" $absdir "*) ;; *) case "$compile_rpath " in *" $absdir "*) ;; *) func_append compile_rpath " $absdir" ;; esac ;; esac case " $sys_lib_dlsearch_path " in *" $libdir "*) ;; *) case "$finalize_rpath " in *" $libdir "*) ;; *) func_append finalize_rpath " $libdir" ;; esac ;; esac fi # $linkmode,$pass = prog,link... if $alldeplibs && { test pass_all = "$deplibs_check_method" || { test yes = "$build_libtool_libs" && test -n "$library_names"; }; }; then # We only need to search for static libraries continue fi fi link_static=no # Whether the deplib will be linked statically use_static_libs=$prefer_static_libs if test built = "$use_static_libs" && test yes = "$installed"; then use_static_libs=no fi if test -n "$library_names" && { test no = "$use_static_libs" || test -z "$old_library"; }; then case $host in *cygwin* | *mingw* | *cegcc* | *os2*) # No point in relinking DLLs because paths are not encoded func_append notinst_deplibs " $lib" need_relink=no ;; *) if test no = "$installed"; then func_append notinst_deplibs " $lib" need_relink=yes fi ;; esac # This is a shared library # Warn about portability, can't link against -module's on some # systems (darwin). Don't bleat about dlopened modules though! dlopenmodule= for dlpremoduletest in $dlprefiles; do if test "X$dlpremoduletest" = "X$lib"; then dlopenmodule=$dlpremoduletest break fi done if test -z "$dlopenmodule" && test yes = "$shouldnotlink" && test link = "$pass"; then echo if test prog = "$linkmode"; then $ECHO "*** Warning: Linking the executable $output against the loadable module" else $ECHO "*** Warning: Linking the shared library $output against the loadable module" fi $ECHO "*** $linklib is not portable!" fi if test lib = "$linkmode" && test yes = "$hardcode_into_libs"; then # Hardcode the library path. # Skip directories that are in the system default run-time # search path. case " $sys_lib_dlsearch_path " in *" $absdir "*) ;; *) case "$compile_rpath " in *" $absdir "*) ;; *) func_append compile_rpath " $absdir" ;; esac ;; esac case " $sys_lib_dlsearch_path " in *" $libdir "*) ;; *) case "$finalize_rpath " in *" $libdir "*) ;; *) func_append finalize_rpath " $libdir" ;; esac ;; esac fi if test -n "$old_archive_from_expsyms_cmds"; then # figure out the soname set dummy $library_names shift realname=$1 shift libname=`eval "\\$ECHO \"$libname_spec\""` # use dlname if we got it. it's perfectly good, no? if test -n "$dlname"; then soname=$dlname elif test -n "$soname_spec"; then # bleh windows case $host in *cygwin* | mingw* | *cegcc* | *os2*) func_arith $current - $age major=$func_arith_result versuffix=-$major ;; esac eval soname=\"$soname_spec\" else soname=$realname fi # Make a new name for the extract_expsyms_cmds to use soroot=$soname func_basename "$soroot" soname=$func_basename_result func_stripname 'lib' '.dll' "$soname" newlib=libimp-$func_stripname_result.a # If the library has no export list, then create one now if test -f "$output_objdir/$soname-def"; then : else func_verbose "extracting exported symbol list from '$soname'" func_execute_cmds "$extract_expsyms_cmds" 'exit $?' fi # Create $newlib if test -f "$output_objdir/$newlib"; then :; else func_verbose "generating import library for '$soname'" func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?' fi # make sure the library variables are pointing to the new library dir=$output_objdir linklib=$newlib fi # test -n "$old_archive_from_expsyms_cmds" if test prog = "$linkmode" || test relink != "$opt_mode"; then add_shlibpath= add_dir= add= lib_linked=yes case $hardcode_action in immediate | unsupported) if test no = "$hardcode_direct"; then add=$dir/$linklib case $host in *-*-sco3.2v5.0.[024]*) add_dir=-L$dir ;; *-*-sysv4*uw2*) add_dir=-L$dir ;; *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \ *-*-unixware7*) add_dir=-L$dir ;; *-*-darwin* ) # if the lib is a (non-dlopened) module then we cannot # link against it, someone is ignoring the earlier warnings if /usr/bin/file -L $add 2> /dev/null | $GREP ": [^:]* bundle" >/dev/null; then if test "X$dlopenmodule" != "X$lib"; then $ECHO "*** Warning: lib $linklib is a module, not a shared library" if test -z "$old_library"; then echo echo "*** And there doesn't seem to be a static archive available" echo "*** The link will probably fail, sorry" else add=$dir/$old_library fi elif test -n "$old_library"; then add=$dir/$old_library fi fi esac elif test no = "$hardcode_minus_L"; then case $host in *-*-sunos*) add_shlibpath=$dir ;; esac add_dir=-L$dir add=-l$name elif test no = "$hardcode_shlibpath_var"; then add_shlibpath=$dir add=-l$name else lib_linked=no fi ;; relink) if test yes = "$hardcode_direct" && test no = "$hardcode_direct_absolute"; then add=$dir/$linklib elif test yes = "$hardcode_minus_L"; then add_dir=-L$absdir # Try looking first in the location we're being installed to. if test -n "$inst_prefix_dir"; then case $libdir in [\\/]*) func_append add_dir " -L$inst_prefix_dir$libdir" ;; esac fi add=-l$name elif test yes = "$hardcode_shlibpath_var"; then add_shlibpath=$dir add=-l$name else lib_linked=no fi ;; *) lib_linked=no ;; esac if test yes != "$lib_linked"; then func_fatal_configuration "unsupported hardcode properties" fi if test -n "$add_shlibpath"; then case :$compile_shlibpath: in *":$add_shlibpath:"*) ;; *) func_append compile_shlibpath "$add_shlibpath:" ;; esac fi if test prog = "$linkmode"; then test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" test -n "$add" && compile_deplibs="$add $compile_deplibs" else test -n "$add_dir" && deplibs="$add_dir $deplibs" test -n "$add" && deplibs="$add $deplibs" if test yes != "$hardcode_direct" && test yes != "$hardcode_minus_L" && test yes = "$hardcode_shlibpath_var"; then case :$finalize_shlibpath: in *":$libdir:"*) ;; *) func_append finalize_shlibpath "$libdir:" ;; esac fi fi fi if test prog = "$linkmode" || test relink = "$opt_mode"; then add_shlibpath= add_dir= add= # Finalize command for both is simple: just hardcode it. if test yes = "$hardcode_direct" && test no = "$hardcode_direct_absolute"; then add=$libdir/$linklib elif test yes = "$hardcode_minus_L"; then add_dir=-L$libdir add=-l$name elif test yes = "$hardcode_shlibpath_var"; then case :$finalize_shlibpath: in *":$libdir:"*) ;; *) func_append finalize_shlibpath "$libdir:" ;; esac add=-l$name elif test yes = "$hardcode_automatic"; then if test -n "$inst_prefix_dir" && test -f "$inst_prefix_dir$libdir/$linklib"; then add=$inst_prefix_dir$libdir/$linklib else add=$libdir/$linklib fi else # We cannot seem to hardcode it, guess we'll fake it. add_dir=-L$libdir # Try looking first in the location we're being installed to. if test -n "$inst_prefix_dir"; then case $libdir in [\\/]*) func_append add_dir " -L$inst_prefix_dir$libdir" ;; esac fi add=-l$name fi if test prog = "$linkmode"; then test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" test -n "$add" && finalize_deplibs="$add $finalize_deplibs" else test -n "$add_dir" && deplibs="$add_dir $deplibs" test -n "$add" && deplibs="$add $deplibs" fi fi elif test prog = "$linkmode"; then # Here we assume that one of hardcode_direct or hardcode_minus_L # is not unsupported. This is valid on all known static and # shared platforms. if test unsupported != "$hardcode_direct"; then test -n "$old_library" && linklib=$old_library compile_deplibs="$dir/$linklib $compile_deplibs" finalize_deplibs="$dir/$linklib $finalize_deplibs" else compile_deplibs="-l$name -L$dir $compile_deplibs" finalize_deplibs="-l$name -L$dir $finalize_deplibs" fi elif test yes = "$build_libtool_libs"; then # Not a shared library if test pass_all != "$deplibs_check_method"; then # We're trying link a shared library against a static one # but the system doesn't support it. # Just print a warning and add the library to dependency_libs so # that the program can be linked against the static library. echo $ECHO "*** Warning: This system cannot link to static lib archive $lib." echo "*** I have the capability to make that library automatically link in when" echo "*** you link to this library. But I can only do this if you have a" echo "*** shared version of the library, which you do not appear to have." if test yes = "$module"; then echo "*** But as you try to build a module library, libtool will still create " echo "*** a static module, that should work as long as the dlopening application" echo "*** is linked with the -dlopen flag to resolve symbols at runtime." if test -z "$global_symbol_pipe"; then echo echo "*** However, this would only work if libtool was able to extract symbol" echo "*** lists from a program, using 'nm' or equivalent, but libtool could" echo "*** not find such a program. So, this module is probably useless." echo "*** 'nm' from GNU binutils and a full rebuild may help." fi if test no = "$build_old_libs"; then build_libtool_libs=module build_old_libs=yes else build_libtool_libs=no fi fi else deplibs="$dir/$old_library $deplibs" link_static=yes fi fi # link shared/static library? if test lib = "$linkmode"; then if test -n "$dependency_libs" && { test yes != "$hardcode_into_libs" || test yes = "$build_old_libs" || test yes = "$link_static"; }; then # Extract -R from dependency_libs temp_deplibs= for libdir in $dependency_libs; do case $libdir in -R*) func_stripname '-R' '' "$libdir" temp_xrpath=$func_stripname_result case " $xrpath " in *" $temp_xrpath "*) ;; *) func_append xrpath " $temp_xrpath";; esac;; *) func_append temp_deplibs " $libdir";; esac done dependency_libs=$temp_deplibs fi func_append newlib_search_path " $absdir" # Link against this library test no = "$link_static" && newdependency_libs="$abs_ladir/$laname $newdependency_libs" # ... and its dependency_libs tmp_libs= for deplib in $dependency_libs; do newdependency_libs="$deplib $newdependency_libs" case $deplib in -L*) func_stripname '-L' '' "$deplib" func_resolve_sysroot "$func_stripname_result";; *) func_resolve_sysroot "$deplib" ;; esac if $opt_preserve_dup_deps; then case "$tmp_libs " in *" $func_resolve_sysroot_result "*) func_append specialdeplibs " $func_resolve_sysroot_result" ;; esac fi func_append tmp_libs " $func_resolve_sysroot_result" done if test no != "$link_all_deplibs"; then # Add the search paths of all dependency libraries for deplib in $dependency_libs; do path= case $deplib in -L*) path=$deplib ;; *.la) func_resolve_sysroot "$deplib" deplib=$func_resolve_sysroot_result func_dirname "$deplib" "" "." dir=$func_dirname_result # We need an absolute path. case $dir in [\\/]* | [A-Za-z]:[\\/]*) absdir=$dir ;; *) absdir=`cd "$dir" && pwd` if test -z "$absdir"; then func_warning "cannot determine absolute directory name of '$dir'" absdir=$dir fi ;; esac if $GREP "^installed=no" $deplib > /dev/null; then case $host in *-*-darwin*) depdepl= eval deplibrary_names=`$SED -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` if test -n "$deplibrary_names"; then for tmp in $deplibrary_names; do depdepl=$tmp done if test -f "$absdir/$objdir/$depdepl"; then depdepl=$absdir/$objdir/$depdepl darwin_install_name=`$OTOOL -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` if test -z "$darwin_install_name"; then darwin_install_name=`$OTOOL64 -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` fi func_append compiler_flags " $wl-dylib_file $wl$darwin_install_name:$depdepl" func_append linker_flags " -dylib_file $darwin_install_name:$depdepl" path= fi fi ;; *) path=-L$absdir/$objdir ;; esac else eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` test -z "$libdir" && \ func_fatal_error "'$deplib' is not a valid libtool archive" test "$absdir" != "$libdir" && \ func_warning "'$deplib' seems to be moved" path=-L$absdir fi ;; esac case " $deplibs " in *" $path "*) ;; *) deplibs="$path $deplibs" ;; esac done fi # link_all_deplibs != no fi # linkmode = lib done # for deplib in $libs if test link = "$pass"; then if test prog = "$linkmode"; then compile_deplibs="$new_inherited_linker_flags $compile_deplibs" finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs" else compiler_flags="$compiler_flags "`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` fi fi dependency_libs=$newdependency_libs if test dlpreopen = "$pass"; then # Link the dlpreopened libraries before other libraries for deplib in $save_deplibs; do deplibs="$deplib $deplibs" done fi if test dlopen != "$pass"; then test conv = "$pass" || { # Make sure lib_search_path contains only unique directories. lib_search_path= for dir in $newlib_search_path; do case "$lib_search_path " in *" $dir "*) ;; *) func_append lib_search_path " $dir" ;; esac done newlib_search_path= } if test prog,link = "$linkmode,$pass"; then vars="compile_deplibs finalize_deplibs" else vars=deplibs fi for var in $vars dependency_libs; do # Add libraries to $var in reverse order eval tmp_libs=\"\$$var\" new_libs= for deplib in $tmp_libs; do # FIXME: Pedantically, this is the right thing to do, so # that some nasty dependency loop isn't accidentally # broken: #new_libs="$deplib $new_libs" # Pragmatically, this seems to cause very few problems in # practice: case $deplib in -L*) new_libs="$deplib $new_libs" ;; -R*) ;; *) # And here is the reason: when a library appears more # than once as an explicit dependence of a library, or # is implicitly linked in more than once by the # compiler, it is considered special, and multiple # occurrences thereof are not removed. Compare this # with having the same library being listed as a # dependency of multiple other libraries: in this case, # we know (pedantically, we assume) the library does not # need to be listed more than once, so we keep only the # last copy. This is not always right, but it is rare # enough that we require users that really mean to play # such unportable linking tricks to link the library # using -Wl,-lname, so that libtool does not consider it # for duplicate removal. case " $specialdeplibs " in *" $deplib "*) new_libs="$deplib $new_libs" ;; *) case " $new_libs " in *" $deplib "*) ;; *) new_libs="$deplib $new_libs" ;; esac ;; esac ;; esac done tmp_libs= for deplib in $new_libs; do case $deplib in -L*) case " $tmp_libs " in *" $deplib "*) ;; *) func_append tmp_libs " $deplib" ;; esac ;; *) func_append tmp_libs " $deplib" ;; esac done eval $var=\"$tmp_libs\" done # for var fi # Add Sun CC postdeps if required: test CXX = "$tagname" && { case $host_os in linux*) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 func_suncc_cstd_abi if test no != "$suncc_use_cstd_abi"; then func_append postdeps ' -library=Cstd -library=Crun' fi ;; esac ;; solaris*) func_cc_basename "$CC" case $func_cc_basename_result in CC* | sunCC*) func_suncc_cstd_abi if test no != "$suncc_use_cstd_abi"; then func_append postdeps ' -library=Cstd -library=Crun' fi ;; esac ;; esac } # Last step: remove runtime libs from dependency_libs # (they stay in deplibs) tmp_libs= for i in $dependency_libs; do case " $predeps $postdeps $compiler_lib_search_path " in *" $i "*) i= ;; esac if test -n "$i"; then func_append tmp_libs " $i" fi done dependency_libs=$tmp_libs done # for pass if test prog = "$linkmode"; then dlfiles=$newdlfiles fi if test prog = "$linkmode" || test lib = "$linkmode"; then dlprefiles=$newdlprefiles fi case $linkmode in oldlib) if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then func_warning "'-dlopen' is ignored for archives" fi case " $deplibs" in *\ -l* | *\ -L*) func_warning "'-l' and '-L' are ignored for archives" ;; esac test -n "$rpath" && \ func_warning "'-rpath' is ignored for archives" test -n "$xrpath" && \ func_warning "'-R' is ignored for archives" test -n "$vinfo" && \ func_warning "'-version-info/-version-number' is ignored for archives" test -n "$release" && \ func_warning "'-release' is ignored for archives" test -n "$export_symbols$export_symbols_regex" && \ func_warning "'-export-symbols' is ignored for archives" # Now set the variables for building old libraries. build_libtool_libs=no oldlibs=$output func_append objs "$old_deplibs" ;; lib) # Make sure we only generate libraries of the form 'libNAME.la'. case $outputname in lib*) func_stripname 'lib' '.la' "$outputname" name=$func_stripname_result eval shared_ext=\"$shrext_cmds\" eval libname=\"$libname_spec\" ;; *) test no = "$module" \ && func_fatal_help "libtool library '$output' must begin with 'lib'" if test no != "$need_lib_prefix"; then # Add the "lib" prefix for modules if required func_stripname '' '.la' "$outputname" name=$func_stripname_result eval shared_ext=\"$shrext_cmds\" eval libname=\"$libname_spec\" else func_stripname '' '.la' "$outputname" libname=$func_stripname_result fi ;; esac if test -n "$objs"; then if test pass_all != "$deplibs_check_method"; then func_fatal_error "cannot build libtool library '$output' from non-libtool objects on this host:$objs" else echo $ECHO "*** Warning: Linking the shared library $output against the non-libtool" $ECHO "*** objects $objs is not portable!" func_append libobjs " $objs" fi fi test no = "$dlself" \ || func_warning "'-dlopen self' is ignored for libtool libraries" set dummy $rpath shift test 1 -lt "$#" \ && func_warning "ignoring multiple '-rpath's for a libtool library" install_libdir=$1 oldlibs= if test -z "$rpath"; then if test yes = "$build_libtool_libs"; then # Building a libtool convenience library. # Some compilers have problems with a '.al' extension so # convenience libraries should have the same extension an # archive normally would. oldlibs="$output_objdir/$libname.$libext $oldlibs" build_libtool_libs=convenience build_old_libs=yes fi test -n "$vinfo" && \ func_warning "'-version-info/-version-number' is ignored for convenience libraries" test -n "$release" && \ func_warning "'-release' is ignored for convenience libraries" else # Parse the version information argument. save_ifs=$IFS; IFS=: set dummy $vinfo 0 0 0 shift IFS=$save_ifs test -n "$7" && \ func_fatal_help "too many parameters to '-version-info'" # convert absolute version numbers to libtool ages # this retains compatibility with .la files and attempts # to make the code below a bit more comprehensible case $vinfo_number in yes) number_major=$1 number_minor=$2 number_revision=$3 # # There are really only two kinds -- those that # use the current revision as the major version # and those that subtract age and use age as # a minor version. But, then there is irix # that has an extra 1 added just for fun # case $version_type in # correct linux to gnu/linux during the next big refactor darwin|freebsd-elf|linux|osf|windows|none) func_arith $number_major + $number_minor current=$func_arith_result age=$number_minor revision=$number_revision ;; freebsd-aout|qnx|sunos) current=$number_major revision=$number_minor age=0 ;; irix|nonstopux) func_arith $number_major + $number_minor current=$func_arith_result age=$number_minor revision=$number_minor lt_irix_increment=no ;; esac ;; no) current=$1 revision=$2 age=$3 ;; esac # Check that each of the things are valid numbers. case $current in 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; *) func_error "CURRENT '$current' must be a nonnegative integer" func_fatal_error "'$vinfo' is not valid version information" ;; esac case $revision in 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; *) func_error "REVISION '$revision' must be a nonnegative integer" func_fatal_error "'$vinfo' is not valid version information" ;; esac case $age in 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; *) func_error "AGE '$age' must be a nonnegative integer" func_fatal_error "'$vinfo' is not valid version information" ;; esac if test "$age" -gt "$current"; then func_error "AGE '$age' is greater than the current interface number '$current'" func_fatal_error "'$vinfo' is not valid version information" fi # Calculate the version variables. major= versuffix= verstring= case $version_type in none) ;; darwin) # Like Linux, but with the current version available in # verstring for coding it into the library header func_arith $current - $age major=.$func_arith_result versuffix=$major.$age.$revision # Darwin ld doesn't like 0 for these options... func_arith $current + 1 minor_current=$func_arith_result xlcverstring="$wl-compatibility_version $wl$minor_current $wl-current_version $wl$minor_current.$revision" verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" # On Darwin other compilers case $CC in nagfor*) verstring="$wl-compatibility_version $wl$minor_current $wl-current_version $wl$minor_current.$revision" ;; *) verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" ;; esac ;; freebsd-aout) major=.$current versuffix=.$current.$revision ;; freebsd-elf) func_arith $current - $age major=.$func_arith_result versuffix=$major.$age.$revision ;; irix | nonstopux) if test no = "$lt_irix_increment"; then func_arith $current - $age else func_arith $current - $age + 1 fi major=$func_arith_result case $version_type in nonstopux) verstring_prefix=nonstopux ;; *) verstring_prefix=sgi ;; esac verstring=$verstring_prefix$major.$revision # Add in all the interfaces that we are compatible with. loop=$revision while test 0 -ne "$loop"; do func_arith $revision - $loop iface=$func_arith_result func_arith $loop - 1 loop=$func_arith_result verstring=$verstring_prefix$major.$iface:$verstring done # Before this point, $major must not contain '.'. major=.$major versuffix=$major.$revision ;; linux) # correct to gnu/linux during the next big refactor func_arith $current - $age major=.$func_arith_result versuffix=$major.$age.$revision ;; osf) func_arith $current - $age major=.$func_arith_result versuffix=.$current.$age.$revision verstring=$current.$age.$revision # Add in all the interfaces that we are compatible with. loop=$age while test 0 -ne "$loop"; do func_arith $current - $loop iface=$func_arith_result func_arith $loop - 1 loop=$func_arith_result verstring=$verstring:$iface.0 done # Make executables depend on our current version. func_append verstring ":$current.0" ;; qnx) major=.$current versuffix=.$current ;; sco) major=.$current versuffix=.$current ;; sunos) major=.$current versuffix=.$current.$revision ;; windows) # Use '-' rather than '.', since we only want one # extension on DOS 8.3 file systems. func_arith $current - $age major=$func_arith_result versuffix=-$major ;; *) func_fatal_configuration "unknown library version type '$version_type'" ;; esac # Clear the version info if we defaulted, and they specified a release. if test -z "$vinfo" && test -n "$release"; then major= case $version_type in darwin) # we can't check for "0.0" in archive_cmds due to quoting # problems, so we reset it completely verstring= ;; *) verstring=0.0 ;; esac if test no = "$need_version"; then versuffix= else versuffix=.0.0 fi fi # Remove version info from name if versioning should be avoided if test yes,no = "$avoid_version,$need_version"; then major= versuffix= verstring= fi # Check to see if the archive will have undefined symbols. if test yes = "$allow_undefined"; then if test unsupported = "$allow_undefined_flag"; then if test yes = "$build_old_libs"; then func_warning "undefined symbols not allowed in $host shared libraries; building static only" build_libtool_libs=no else func_fatal_error "can't build $host shared library unless -no-undefined is specified" fi fi else # Don't allow undefined symbols. allow_undefined_flag=$no_undefined_flag fi fi func_generate_dlsyms "$libname" "$libname" : func_append libobjs " $symfileobj" test " " = "$libobjs" && libobjs= if test relink != "$opt_mode"; then # Remove our outputs, but don't remove object files since they # may have been created when compiling PIC objects. removelist= tempremovelist=`$ECHO "$output_objdir/*"` for p in $tempremovelist; do case $p in *.$objext | *.gcno) ;; $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/$libname$release.*) if test -n "$precious_files_regex"; then if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 then continue fi fi func_append removelist " $p" ;; *) ;; esac done test -n "$removelist" && \ func_show_eval "${RM}r \$removelist" fi # Now set the variables for building old libraries. if test yes = "$build_old_libs" && test convenience != "$build_libtool_libs"; then func_append oldlibs " $output_objdir/$libname.$libext" # Transform .lo files to .o files. oldobjs="$objs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.$libext$/d; $lo2o" | $NL2SP` fi # Eliminate all temporary directories. #for path in $notinst_path; do # lib_search_path=`$ECHO "$lib_search_path " | $SED "s% $path % %g"` # deplibs=`$ECHO "$deplibs " | $SED "s% -L$path % %g"` # dependency_libs=`$ECHO "$dependency_libs " | $SED "s% -L$path % %g"` #done if test -n "$xrpath"; then # If the user specified any rpath flags, then add them. temp_xrpath= for libdir in $xrpath; do func_replace_sysroot "$libdir" func_append temp_xrpath " -R$func_replace_sysroot_result" case "$finalize_rpath " in *" $libdir "*) ;; *) func_append finalize_rpath " $libdir" ;; esac done if test yes != "$hardcode_into_libs" || test yes = "$build_old_libs"; then dependency_libs="$temp_xrpath $dependency_libs" fi fi # Make sure dlfiles contains only unique files that won't be dlpreopened old_dlfiles=$dlfiles dlfiles= for lib in $old_dlfiles; do case " $dlprefiles $dlfiles " in *" $lib "*) ;; *) func_append dlfiles " $lib" ;; esac done # Make sure dlprefiles contains only unique files old_dlprefiles=$dlprefiles dlprefiles= for lib in $old_dlprefiles; do case "$dlprefiles " in *" $lib "*) ;; *) func_append dlprefiles " $lib" ;; esac done if test yes = "$build_libtool_libs"; then if test -n "$rpath"; then case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc* | *-*-haiku*) # these systems don't actually have a c library (as such)! ;; *-*-rhapsody* | *-*-darwin1.[012]) # Rhapsody C library is in the System framework func_append deplibs " System.ltframework" ;; *-*-netbsd*) # Don't link with libc until the a.out ld.so is fixed. ;; *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) # Do not include libc due to us having libc/libc_r. ;; *-*-sco3.2v5* | *-*-sco5v6*) # Causes problems with __ctype ;; *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) # Compiler inserts libc in the correct place for threads to work ;; *) # Add libc to deplibs on all other systems if necessary. if test yes = "$build_libtool_need_lc"; then func_append deplibs " -lc" fi ;; esac fi # Transform deplibs into only deplibs that can be linked in shared. name_save=$name libname_save=$libname release_save=$release versuffix_save=$versuffix major_save=$major # I'm not sure if I'm treating the release correctly. I think # release should show up in the -l (ie -lgmp5) so we don't want to # add it in twice. Is that correct? release= versuffix= major= newdeplibs= droppeddeps=no case $deplibs_check_method in pass_all) # Don't check for shared/static. Everything works. # This might be a little naive. We might want to check # whether the library exists or not. But this is on # osf3 & osf4 and I'm not really sure... Just # implementing what was already the behavior. newdeplibs=$deplibs ;; test_compile) # This code stresses the "libraries are programs" paradigm to its # limits. Maybe even breaks it. We compile a program, linking it # against the deplibs as a proxy for the library. Then we can check # whether they linked in statically or dynamically with ldd. $opt_dry_run || $RM conftest.c cat > conftest.c </dev/null` $nocaseglob else potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null` fi for potent_lib in $potential_libs; do # Follow soft links. if ls -lLd "$potent_lib" 2>/dev/null | $GREP " -> " >/dev/null; then continue fi # The statement above tries to avoid entering an # endless loop below, in case of cyclic links. # We might still enter an endless loop, since a link # loop can be closed while we follow links, # but so what? potlib=$potent_lib while test -h "$potlib" 2>/dev/null; do potliblink=`ls -ld $potlib | $SED 's/.* -> //'` case $potliblink in [\\/]* | [A-Za-z]:[\\/]*) potlib=$potliblink;; *) potlib=`$ECHO "$potlib" | $SED 's|[^/]*$||'`"$potliblink";; esac done if eval $file_magic_cmd \"\$potlib\" 2>/dev/null | $SED -e 10q | $EGREP "$file_magic_regex" > /dev/null; then func_append newdeplibs " $a_deplib" a_deplib= break 2 fi done done fi if test -n "$a_deplib"; then droppeddeps=yes echo $ECHO "*** Warning: linker path does not have real file for library $a_deplib." echo "*** I have the capability to make that library automatically link in when" echo "*** you link to this library. But I can only do this if you have a" echo "*** shared version of the library, which you do not appear to have" echo "*** because I did check the linker path looking for a file starting" if test -z "$potlib"; then $ECHO "*** with $libname but no candidates were found. (...for file magic test)" else $ECHO "*** with $libname and none of the candidates passed a file format test" $ECHO "*** using a file magic. Last file checked: $potlib" fi fi ;; *) # Add a -L argument. func_append newdeplibs " $a_deplib" ;; esac done # Gone through all deplibs. ;; match_pattern*) set dummy $deplibs_check_method; shift match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` for a_deplib in $deplibs; do case $a_deplib in -l*) func_stripname -l '' "$a_deplib" name=$func_stripname_result if test yes = "$allow_libtool_libs_with_static_runtimes"; then case " $predeps $postdeps " in *" $a_deplib "*) func_append newdeplibs " $a_deplib" a_deplib= ;; esac fi if test -n "$a_deplib"; then libname=`eval "\\$ECHO \"$libname_spec\""` for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do potential_libs=`ls $i/$libname[.-]* 2>/dev/null` for potent_lib in $potential_libs; do potlib=$potent_lib # see symlink-check above in file_magic test if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \ $EGREP "$match_pattern_regex" > /dev/null; then func_append newdeplibs " $a_deplib" a_deplib= break 2 fi done done fi if test -n "$a_deplib"; then droppeddeps=yes echo $ECHO "*** Warning: linker path does not have real file for library $a_deplib." echo "*** I have the capability to make that library automatically link in when" echo "*** you link to this library. But I can only do this if you have a" echo "*** shared version of the library, which you do not appear to have" echo "*** because I did check the linker path looking for a file starting" if test -z "$potlib"; then $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)" else $ECHO "*** with $libname and none of the candidates passed a file format test" $ECHO "*** using a regex pattern. Last file checked: $potlib" fi fi ;; *) # Add a -L argument. func_append newdeplibs " $a_deplib" ;; esac done # Gone through all deplibs. ;; none | unknown | *) newdeplibs= tmp_deplibs=`$ECHO " $deplibs" | $SED 's/ -lc$//; s/ -[LR][^ ]*//g'` if test yes = "$allow_libtool_libs_with_static_runtimes"; then for i in $predeps $postdeps; do # can't use Xsed below, because $i might contain '/' tmp_deplibs=`$ECHO " $tmp_deplibs" | $SED "s|$i||"` done fi case $tmp_deplibs in *[!\ \ ]*) echo if test none = "$deplibs_check_method"; then echo "*** Warning: inter-library dependencies are not supported in this platform." else echo "*** Warning: inter-library dependencies are not known to be supported." fi echo "*** All declared inter-library dependencies are being dropped." droppeddeps=yes ;; esac ;; esac versuffix=$versuffix_save major=$major_save release=$release_save libname=$libname_save name=$name_save case $host in *-*-rhapsody* | *-*-darwin1.[012]) # On Rhapsody replace the C library with the System framework newdeplibs=`$ECHO " $newdeplibs" | $SED 's/ -lc / System.ltframework /'` ;; esac if test yes = "$droppeddeps"; then if test yes = "$module"; then echo echo "*** Warning: libtool could not satisfy all declared inter-library" $ECHO "*** dependencies of module $libname. Therefore, libtool will create" echo "*** a static module, that should work as long as the dlopening" echo "*** application is linked with the -dlopen flag." if test -z "$global_symbol_pipe"; then echo echo "*** However, this would only work if libtool was able to extract symbol" echo "*** lists from a program, using 'nm' or equivalent, but libtool could" echo "*** not find such a program. So, this module is probably useless." echo "*** 'nm' from GNU binutils and a full rebuild may help." fi if test no = "$build_old_libs"; then oldlibs=$output_objdir/$libname.$libext build_libtool_libs=module build_old_libs=yes else build_libtool_libs=no fi else echo "*** The inter-library dependencies that have been dropped here will be" echo "*** automatically added whenever a program is linked with this library" echo "*** or is declared to -dlopen it." if test no = "$allow_undefined"; then echo echo "*** Since this library must not contain undefined symbols," echo "*** because either the platform does not support them or" echo "*** it was explicitly requested with -no-undefined," echo "*** libtool will only create a static version of it." if test no = "$build_old_libs"; then oldlibs=$output_objdir/$libname.$libext build_libtool_libs=module build_old_libs=yes else build_libtool_libs=no fi fi fi fi # Done checking deplibs! deplibs=$newdeplibs fi # Time to change all our "foo.ltframework" stuff back to "-framework foo" case $host in *-*-darwin*) newdeplibs=`$ECHO " $newdeplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` new_inherited_linker_flags=`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` deplibs=`$ECHO " $deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` ;; esac # move library search paths that coincide with paths to not yet # installed libraries to the beginning of the library search list new_libs= for path in $notinst_path; do case " $new_libs " in *" -L$path/$objdir "*) ;; *) case " $deplibs " in *" -L$path/$objdir "*) func_append new_libs " -L$path/$objdir" ;; esac ;; esac done for deplib in $deplibs; do case $deplib in -L*) case " $new_libs " in *" $deplib "*) ;; *) func_append new_libs " $deplib" ;; esac ;; *) func_append new_libs " $deplib" ;; esac done deplibs=$new_libs # All the library-specific variables (install_libdir is set above). library_names= old_library= dlname= # Test again, we may have decided not to build it any more if test yes = "$build_libtool_libs"; then # Remove $wl instances when linking with ld. # FIXME: should test the right _cmds variable. case $archive_cmds in *\$LD\ *) wl= ;; esac if test yes = "$hardcode_into_libs"; then # Hardcode the library paths hardcode_libdirs= dep_rpath= rpath=$finalize_rpath test relink = "$opt_mode" || rpath=$compile_rpath$rpath for libdir in $rpath; do if test -n "$hardcode_libdir_flag_spec"; then if test -n "$hardcode_libdir_separator"; then func_replace_sysroot "$libdir" libdir=$func_replace_sysroot_result if test -z "$hardcode_libdirs"; then hardcode_libdirs=$libdir else # Just accumulate the unique libdirs. case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) ;; *) func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" ;; esac fi else eval flag=\"$hardcode_libdir_flag_spec\" func_append dep_rpath " $flag" fi elif test -n "$runpath_var"; then case "$perm_rpath " in *" $libdir "*) ;; *) func_append perm_rpath " $libdir" ;; esac fi done # Substitute the hardcoded libdirs into the rpath. if test -n "$hardcode_libdir_separator" && test -n "$hardcode_libdirs"; then libdir=$hardcode_libdirs eval "dep_rpath=\"$hardcode_libdir_flag_spec\"" fi if test -n "$runpath_var" && test -n "$perm_rpath"; then # We should set the runpath_var. rpath= for dir in $perm_rpath; do func_append rpath "$dir:" done eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" fi test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" fi shlibpath=$finalize_shlibpath test relink = "$opt_mode" || shlibpath=$compile_shlibpath$shlibpath if test -n "$shlibpath"; then eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" fi # Get the real and link names of the library. eval shared_ext=\"$shrext_cmds\" eval library_names=\"$library_names_spec\" set dummy $library_names shift realname=$1 shift if test -n "$soname_spec"; then eval soname=\"$soname_spec\" else soname=$realname fi if test -z "$dlname"; then dlname=$soname fi lib=$output_objdir/$realname linknames= for link do func_append linknames " $link" done # Use standard objects if they are pic test -z "$pic_flag" && libobjs=`$ECHO "$libobjs" | $SP2NL | $SED "$lo2o" | $NL2SP` test "X$libobjs" = "X " && libobjs= delfiles= if test -n "$export_symbols" && test -n "$include_expsyms"; then $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp" export_symbols=$output_objdir/$libname.uexp func_append delfiles " $export_symbols" fi orig_export_symbols= case $host_os in cygwin* | mingw* | cegcc*) if test -n "$export_symbols" && test -z "$export_symbols_regex"; then # exporting using user supplied symfile func_dll_def_p "$export_symbols" || { # and it's NOT already a .def file. Must figure out # which of the given symbols are data symbols and tag # them as such. So, trigger use of export_symbols_cmds. # export_symbols gets reassigned inside the "prepare # the list of exported symbols" if statement, so the # include_expsyms logic still works. orig_export_symbols=$export_symbols export_symbols= always_export_symbols=yes } fi ;; esac # Prepare the list of exported symbols if test -z "$export_symbols"; then if test yes = "$always_export_symbols" || test -n "$export_symbols_regex"; then func_verbose "generating symbol list for '$libname.la'" export_symbols=$output_objdir/$libname.exp $opt_dry_run || $RM $export_symbols cmds=$export_symbols_cmds save_ifs=$IFS; IFS='~' for cmd1 in $cmds; do IFS=$save_ifs # Take the normal branch if the nm_file_list_spec branch # doesn't work or if tool conversion is not needed. case $nm_file_list_spec~$to_tool_file_cmd in *~func_convert_file_noop | *~func_convert_file_msys_to_w32 | ~*) try_normal_branch=yes eval cmd=\"$cmd1\" func_len " $cmd" len=$func_len_result ;; *) try_normal_branch=no ;; esac if test yes = "$try_normal_branch" \ && { test "$len" -lt "$max_cmd_len" \ || test "$max_cmd_len" -le -1; } then func_show_eval "$cmd" 'exit $?' skipped_export=false elif test -n "$nm_file_list_spec"; then func_basename "$output" output_la=$func_basename_result save_libobjs=$libobjs save_output=$output output=$output_objdir/$output_la.nm func_to_tool_file "$output" libobjs=$nm_file_list_spec$func_to_tool_file_result func_append delfiles " $output" func_verbose "creating $NM input file list: $output" for obj in $save_libobjs; do func_to_tool_file "$obj" $ECHO "$func_to_tool_file_result" done > "$output" eval cmd=\"$cmd1\" func_show_eval "$cmd" 'exit $?' output=$save_output libobjs=$save_libobjs skipped_export=false else # The command line is too long to execute in one step. func_verbose "using reloadable object file for export list..." skipped_export=: # Break out early, otherwise skipped_export may be # set to false by a later but shorter cmd. break fi done IFS=$save_ifs if test -n "$export_symbols_regex" && test : != "$skipped_export"; then func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' func_show_eval '$MV "${export_symbols}T" "$export_symbols"' fi fi fi if test -n "$export_symbols" && test -n "$include_expsyms"; then tmp_export_symbols=$export_symbols test -n "$orig_export_symbols" && tmp_export_symbols=$orig_export_symbols $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' fi if test : != "$skipped_export" && test -n "$orig_export_symbols"; then # The given exports_symbols file has to be filtered, so filter it. func_verbose "filter symbol list for '$libname.la' to tag DATA exports" # FIXME: $output_objdir/$libname.filter potentially contains lots of # 's' commands, which not all seds can handle. GNU sed should be fine # though. Also, the filter scales superlinearly with the number of # global variables. join(1) would be nice here, but unfortunately # isn't a blessed tool. $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter func_append delfiles " $export_symbols $output_objdir/$libname.filter" export_symbols=$output_objdir/$libname.def $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols fi tmp_deplibs= for test_deplib in $deplibs; do case " $convenience " in *" $test_deplib "*) ;; *) func_append tmp_deplibs " $test_deplib" ;; esac done deplibs=$tmp_deplibs if test -n "$convenience"; then if test -n "$whole_archive_flag_spec" && test yes = "$compiler_needs_object" && test -z "$libobjs"; then # extract the archives, so we have objects to list. # TODO: could optimize this to just extract one archive. whole_archive_flag_spec= fi if test -n "$whole_archive_flag_spec"; then save_libobjs=$libobjs eval libobjs=\"\$libobjs $whole_archive_flag_spec\" test "X$libobjs" = "X " && libobjs= else gentop=$output_objdir/${outputname}x func_append generated " $gentop" func_extract_archives $gentop $convenience func_append libobjs " $func_extract_archives_result" test "X$libobjs" = "X " && libobjs= fi fi if test yes = "$thread_safe" && test -n "$thread_safe_flag_spec"; then eval flag=\"$thread_safe_flag_spec\" func_append linker_flags " $flag" fi # Make a backup of the uninstalled library when relinking if test relink = "$opt_mode"; then $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $? fi # Do each of the archive commands. if test yes = "$module" && test -n "$module_cmds"; then if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then eval test_cmds=\"$module_expsym_cmds\" cmds=$module_expsym_cmds else eval test_cmds=\"$module_cmds\" cmds=$module_cmds fi else if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then eval test_cmds=\"$archive_expsym_cmds\" cmds=$archive_expsym_cmds else eval test_cmds=\"$archive_cmds\" cmds=$archive_cmds fi fi if test : != "$skipped_export" && func_len " $test_cmds" && len=$func_len_result && test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then : else # The command line is too long to link in one step, link piecewise # or, if using GNU ld and skipped_export is not :, use a linker # script. # Save the value of $output and $libobjs because we want to # use them later. If we have whole_archive_flag_spec, we # want to use save_libobjs as it was before # whole_archive_flag_spec was expanded, because we can't # assume the linker understands whole_archive_flag_spec. # This may have to be revisited, in case too many # convenience libraries get linked in and end up exceeding # the spec. if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then save_libobjs=$libobjs fi save_output=$output func_basename "$output" output_la=$func_basename_result # Clear the reloadable object creation command queue and # initialize k to one. test_cmds= concat_cmds= objlist= last_robj= k=1 if test -n "$save_libobjs" && test : != "$skipped_export" && test yes = "$with_gnu_ld"; then output=$output_objdir/$output_la.lnkscript func_verbose "creating GNU ld script: $output" echo 'INPUT (' > $output for obj in $save_libobjs do func_to_tool_file "$obj" $ECHO "$func_to_tool_file_result" >> $output done echo ')' >> $output func_append delfiles " $output" func_to_tool_file "$output" output=$func_to_tool_file_result elif test -n "$save_libobjs" && test : != "$skipped_export" && test -n "$file_list_spec"; then output=$output_objdir/$output_la.lnk func_verbose "creating linker input file list: $output" : > $output set x $save_libobjs shift firstobj= if test yes = "$compiler_needs_object"; then firstobj="$1 " shift fi for obj do func_to_tool_file "$obj" $ECHO "$func_to_tool_file_result" >> $output done func_append delfiles " $output" func_to_tool_file "$output" output=$firstobj\"$file_list_spec$func_to_tool_file_result\" else if test -n "$save_libobjs"; then func_verbose "creating reloadable object files..." output=$output_objdir/$output_la-$k.$objext eval test_cmds=\"$reload_cmds\" func_len " $test_cmds" len0=$func_len_result len=$len0 # Loop over the list of objects to be linked. for obj in $save_libobjs do func_len " $obj" func_arith $len + $func_len_result len=$func_arith_result if test -z "$objlist" || test "$len" -lt "$max_cmd_len"; then func_append objlist " $obj" else # The command $test_cmds is almost too long, add a # command to the queue. if test 1 -eq "$k"; then # The first file doesn't have a previous command to add. reload_objs=$objlist eval concat_cmds=\"$reload_cmds\" else # All subsequent reloadable object files will link in # the last one created. reload_objs="$objlist $last_robj" eval concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\" fi last_robj=$output_objdir/$output_la-$k.$objext func_arith $k + 1 k=$func_arith_result output=$output_objdir/$output_la-$k.$objext objlist=" $obj" func_len " $last_robj" func_arith $len0 + $func_len_result len=$func_arith_result fi done # Handle the remaining objects by creating one last # reloadable object file. All subsequent reloadable object # files will link in the last one created. test -z "$concat_cmds" || concat_cmds=$concat_cmds~ reload_objs="$objlist $last_robj" eval concat_cmds=\"\$concat_cmds$reload_cmds\" if test -n "$last_robj"; then eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" fi func_append delfiles " $output" else output= fi ${skipped_export-false} && { func_verbose "generating symbol list for '$libname.la'" export_symbols=$output_objdir/$libname.exp $opt_dry_run || $RM $export_symbols libobjs=$output # Append the command to create the export file. test -z "$concat_cmds" || concat_cmds=$concat_cmds~ eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\" if test -n "$last_robj"; then eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" fi } test -n "$save_libobjs" && func_verbose "creating a temporary reloadable object file: $output" # Loop through the commands generated above and execute them. save_ifs=$IFS; IFS='~' for cmd in $concat_cmds; do IFS=$save_ifs $opt_quiet || { func_quote_for_expand "$cmd" eval "func_echo $func_quote_for_expand_result" } $opt_dry_run || eval "$cmd" || { lt_exit=$? # Restore the uninstalled library and exit if test relink = "$opt_mode"; then ( cd "$output_objdir" && \ $RM "${realname}T" && \ $MV "${realname}U" "$realname" ) fi exit $lt_exit } done IFS=$save_ifs if test -n "$export_symbols_regex" && ${skipped_export-false}; then func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' func_show_eval '$MV "${export_symbols}T" "$export_symbols"' fi fi ${skipped_export-false} && { if test -n "$export_symbols" && test -n "$include_expsyms"; then tmp_export_symbols=$export_symbols test -n "$orig_export_symbols" && tmp_export_symbols=$orig_export_symbols $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' fi if test -n "$orig_export_symbols"; then # The given exports_symbols file has to be filtered, so filter it. func_verbose "filter symbol list for '$libname.la' to tag DATA exports" # FIXME: $output_objdir/$libname.filter potentially contains lots of # 's' commands, which not all seds can handle. GNU sed should be fine # though. Also, the filter scales superlinearly with the number of # global variables. join(1) would be nice here, but unfortunately # isn't a blessed tool. $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter func_append delfiles " $export_symbols $output_objdir/$libname.filter" export_symbols=$output_objdir/$libname.def $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols fi } libobjs=$output # Restore the value of output. output=$save_output if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then eval libobjs=\"\$libobjs $whole_archive_flag_spec\" test "X$libobjs" = "X " && libobjs= fi # Expand the library linking commands again to reset the # value of $libobjs for piecewise linking. # Do each of the archive commands. if test yes = "$module" && test -n "$module_cmds"; then if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then cmds=$module_expsym_cmds else cmds=$module_cmds fi else if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then cmds=$archive_expsym_cmds else cmds=$archive_cmds fi fi fi if test -n "$delfiles"; then # Append the command to remove temporary files to $cmds. eval cmds=\"\$cmds~\$RM $delfiles\" fi # Add any objects from preloaded convenience libraries if test -n "$dlprefiles"; then gentop=$output_objdir/${outputname}x func_append generated " $gentop" func_extract_archives $gentop $dlprefiles func_append libobjs " $func_extract_archives_result" test "X$libobjs" = "X " && libobjs= fi save_ifs=$IFS; IFS='~' for cmd in $cmds; do IFS=$sp$nl eval cmd=\"$cmd\" IFS=$save_ifs $opt_quiet || { func_quote_for_expand "$cmd" eval "func_echo $func_quote_for_expand_result" } $opt_dry_run || eval "$cmd" || { lt_exit=$? # Restore the uninstalled library and exit if test relink = "$opt_mode"; then ( cd "$output_objdir" && \ $RM "${realname}T" && \ $MV "${realname}U" "$realname" ) fi exit $lt_exit } done IFS=$save_ifs # Restore the uninstalled library and exit if test relink = "$opt_mode"; then $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $? if test -n "$convenience"; then if test -z "$whole_archive_flag_spec"; then func_show_eval '${RM}r "$gentop"' fi fi exit $EXIT_SUCCESS fi # Create links to the real library. for linkname in $linknames; do if test "$realname" != "$linkname"; then func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?' fi done # If -module or -export-dynamic was specified, set the dlname. if test yes = "$module" || test yes = "$export_dynamic"; then # On all known operating systems, these are identical. dlname=$soname fi fi ;; obj) if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then func_warning "'-dlopen' is ignored for objects" fi case " $deplibs" in *\ -l* | *\ -L*) func_warning "'-l' and '-L' are ignored for objects" ;; esac test -n "$rpath" && \ func_warning "'-rpath' is ignored for objects" test -n "$xrpath" && \ func_warning "'-R' is ignored for objects" test -n "$vinfo" && \ func_warning "'-version-info' is ignored for objects" test -n "$release" && \ func_warning "'-release' is ignored for objects" case $output in *.lo) test -n "$objs$old_deplibs" && \ func_fatal_error "cannot build library object '$output' from non-libtool objects" libobj=$output func_lo2o "$libobj" obj=$func_lo2o_result ;; *) libobj= obj=$output ;; esac # Delete the old objects. $opt_dry_run || $RM $obj $libobj # Objects from convenience libraries. This assumes # single-version convenience libraries. Whenever we create # different ones for PIC/non-PIC, this we'll have to duplicate # the extraction. reload_conv_objs= gentop= # if reload_cmds runs $LD directly, get rid of -Wl from # whole_archive_flag_spec and hope we can get by with turning comma # into space. case $reload_cmds in *\$LD[\ \$]*) wl= ;; esac if test -n "$convenience"; then if test -n "$whole_archive_flag_spec"; then eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\" test -n "$wl" || tmp_whole_archive_flags=`$ECHO "$tmp_whole_archive_flags" | $SED 's|,| |g'` reload_conv_objs=$reload_objs\ $tmp_whole_archive_flags else gentop=$output_objdir/${obj}x func_append generated " $gentop" func_extract_archives $gentop $convenience reload_conv_objs="$reload_objs $func_extract_archives_result" fi fi # If we're not building shared, we need to use non_pic_objs test yes = "$build_libtool_libs" || libobjs=$non_pic_objects # Create the old-style object. reload_objs=$objs$old_deplibs' '`$ECHO "$libobjs" | $SP2NL | $SED "/\.$libext$/d; /\.lib$/d; $lo2o" | $NL2SP`' '$reload_conv_objs output=$obj func_execute_cmds "$reload_cmds" 'exit $?' # Exit if we aren't doing a library object file. if test -z "$libobj"; then if test -n "$gentop"; then func_show_eval '${RM}r "$gentop"' fi exit $EXIT_SUCCESS fi test yes = "$build_libtool_libs" || { if test -n "$gentop"; then func_show_eval '${RM}r "$gentop"' fi # Create an invalid libtool object if no PIC, so that we don't # accidentally link it into a program. # $show "echo timestamp > $libobj" # $opt_dry_run || eval "echo timestamp > $libobj" || exit $? exit $EXIT_SUCCESS } if test -n "$pic_flag" || test default != "$pic_mode"; then # Only do commands if we really have different PIC objects. reload_objs="$libobjs $reload_conv_objs" output=$libobj func_execute_cmds "$reload_cmds" 'exit $?' fi if test -n "$gentop"; then func_show_eval '${RM}r "$gentop"' fi exit $EXIT_SUCCESS ;; prog) case $host in *cygwin*) func_stripname '' '.exe' "$output" output=$func_stripname_result.exe;; esac test -n "$vinfo" && \ func_warning "'-version-info' is ignored for programs" test -n "$release" && \ func_warning "'-release' is ignored for programs" $preload \ && test unknown,unknown,unknown = "$dlopen_support,$dlopen_self,$dlopen_self_static" \ && func_warning "'LT_INIT([dlopen])' not used. Assuming no dlopen support." case $host in *-*-rhapsody* | *-*-darwin1.[012]) # On Rhapsody replace the C library is the System framework compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's/ -lc / System.ltframework /'` finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's/ -lc / System.ltframework /'` ;; esac case $host in *-*-darwin*) # Don't allow lazy linking, it breaks C++ global constructors # But is supposedly fixed on 10.4 or later (yay!). if test CXX = "$tagname"; then case ${MACOSX_DEPLOYMENT_TARGET-10.0} in 10.[0123]) func_append compile_command " $wl-bind_at_load" func_append finalize_command " $wl-bind_at_load" ;; esac fi # Time to change all our "foo.ltframework" stuff back to "-framework foo" compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` ;; esac # move library search paths that coincide with paths to not yet # installed libraries to the beginning of the library search list new_libs= for path in $notinst_path; do case " $new_libs " in *" -L$path/$objdir "*) ;; *) case " $compile_deplibs " in *" -L$path/$objdir "*) func_append new_libs " -L$path/$objdir" ;; esac ;; esac done for deplib in $compile_deplibs; do case $deplib in -L*) case " $new_libs " in *" $deplib "*) ;; *) func_append new_libs " $deplib" ;; esac ;; *) func_append new_libs " $deplib" ;; esac done compile_deplibs=$new_libs func_append compile_command " $compile_deplibs" func_append finalize_command " $finalize_deplibs" if test -n "$rpath$xrpath"; then # If the user specified any rpath flags, then add them. for libdir in $rpath $xrpath; do # This is the magic to use -rpath. case "$finalize_rpath " in *" $libdir "*) ;; *) func_append finalize_rpath " $libdir" ;; esac done fi # Now hardcode the library paths rpath= hardcode_libdirs= for libdir in $compile_rpath $finalize_rpath; do if test -n "$hardcode_libdir_flag_spec"; then if test -n "$hardcode_libdir_separator"; then if test -z "$hardcode_libdirs"; then hardcode_libdirs=$libdir else # Just accumulate the unique libdirs. case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) ;; *) func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" ;; esac fi else eval flag=\"$hardcode_libdir_flag_spec\" func_append rpath " $flag" fi elif test -n "$runpath_var"; then case "$perm_rpath " in *" $libdir "*) ;; *) func_append perm_rpath " $libdir" ;; esac fi case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) testbindir=`$ECHO "$libdir" | $SED -e 's*/lib$*/bin*'` case :$dllsearchpath: in *":$libdir:"*) ;; ::) dllsearchpath=$libdir;; *) func_append dllsearchpath ":$libdir";; esac case :$dllsearchpath: in *":$testbindir:"*) ;; ::) dllsearchpath=$testbindir;; *) func_append dllsearchpath ":$testbindir";; esac ;; esac done # Substitute the hardcoded libdirs into the rpath. if test -n "$hardcode_libdir_separator" && test -n "$hardcode_libdirs"; then libdir=$hardcode_libdirs eval rpath=\" $hardcode_libdir_flag_spec\" fi compile_rpath=$rpath rpath= hardcode_libdirs= for libdir in $finalize_rpath; do if test -n "$hardcode_libdir_flag_spec"; then if test -n "$hardcode_libdir_separator"; then if test -z "$hardcode_libdirs"; then hardcode_libdirs=$libdir else # Just accumulate the unique libdirs. case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) ;; *) func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" ;; esac fi else eval flag=\"$hardcode_libdir_flag_spec\" func_append rpath " $flag" fi elif test -n "$runpath_var"; then case "$finalize_perm_rpath " in *" $libdir "*) ;; *) func_append finalize_perm_rpath " $libdir" ;; esac fi done # Substitute the hardcoded libdirs into the rpath. if test -n "$hardcode_libdir_separator" && test -n "$hardcode_libdirs"; then libdir=$hardcode_libdirs eval rpath=\" $hardcode_libdir_flag_spec\" fi finalize_rpath=$rpath if test -n "$libobjs" && test yes = "$build_old_libs"; then # Transform all the library objects into standard objects. compile_command=`$ECHO "$compile_command" | $SP2NL | $SED "$lo2o" | $NL2SP` finalize_command=`$ECHO "$finalize_command" | $SP2NL | $SED "$lo2o" | $NL2SP` fi func_generate_dlsyms "$outputname" "@PROGRAM@" false # template prelinking step if test -n "$prelink_cmds"; then func_execute_cmds "$prelink_cmds" 'exit $?' fi wrappers_required=: case $host in *cegcc* | *mingw32ce*) # Disable wrappers for cegcc and mingw32ce hosts, we are cross compiling anyway. wrappers_required=false ;; *cygwin* | *mingw* ) test yes = "$build_libtool_libs" || wrappers_required=false ;; *) if test no = "$need_relink" || test yes != "$build_libtool_libs"; then wrappers_required=false fi ;; esac $wrappers_required || { # Replace the output file specification. compile_command=`$ECHO "$compile_command" | $SED 's%@OUTPUT@%'"$output"'%g'` link_command=$compile_command$compile_rpath # We have no uninstalled library dependencies, so finalize right now. exit_status=0 func_show_eval "$link_command" 'exit_status=$?' if test -n "$postlink_cmds"; then func_to_tool_file "$output" postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` func_execute_cmds "$postlink_cmds" 'exit $?' fi # Delete the generated files. if test -f "$output_objdir/${outputname}S.$objext"; then func_show_eval '$RM "$output_objdir/${outputname}S.$objext"' fi exit $exit_status } if test -n "$compile_shlibpath$finalize_shlibpath"; then compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" fi if test -n "$finalize_shlibpath"; then finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" fi compile_var= finalize_var= if test -n "$runpath_var"; then if test -n "$perm_rpath"; then # We should set the runpath_var. rpath= for dir in $perm_rpath; do func_append rpath "$dir:" done compile_var="$runpath_var=\"$rpath\$$runpath_var\" " fi if test -n "$finalize_perm_rpath"; then # We should set the runpath_var. rpath= for dir in $finalize_perm_rpath; do func_append rpath "$dir:" done finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " fi fi if test yes = "$no_install"; then # We don't need to create a wrapper script. link_command=$compile_var$compile_command$compile_rpath # Replace the output file specification. link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output"'%g'` # Delete the old output file. $opt_dry_run || $RM $output # Link the executable and exit func_show_eval "$link_command" 'exit $?' if test -n "$postlink_cmds"; then func_to_tool_file "$output" postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` func_execute_cmds "$postlink_cmds" 'exit $?' fi exit $EXIT_SUCCESS fi case $hardcode_action,$fast_install in relink,*) # Fast installation is not supported link_command=$compile_var$compile_command$compile_rpath relink_command=$finalize_var$finalize_command$finalize_rpath func_warning "this platform does not like uninstalled shared libraries" func_warning "'$output' will be relinked during installation" ;; *,yes) link_command=$finalize_var$compile_command$finalize_rpath relink_command=`$ECHO "$compile_var$compile_command$compile_rpath" | $SED 's%@OUTPUT@%\$progdir/\$file%g'` ;; *,no) link_command=$compile_var$compile_command$compile_rpath relink_command=$finalize_var$finalize_command$finalize_rpath ;; *,needless) link_command=$finalize_var$compile_command$finalize_rpath relink_command= ;; esac # Replace the output file specification. link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` # Delete the old output files. $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname func_show_eval "$link_command" 'exit $?' if test -n "$postlink_cmds"; then func_to_tool_file "$output_objdir/$outputname" postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` func_execute_cmds "$postlink_cmds" 'exit $?' fi # Now create the wrapper script. func_verbose "creating $output" # Quote the relink command for shipping. if test -n "$relink_command"; then # Preserve any variables that may affect compiler behavior for var in $variables_saved_for_relink; do if eval test -z \"\${$var+set}\"; then relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" elif eval var_value=\$$var; test -z "$var_value"; then relink_command="$var=; export $var; $relink_command" else func_quote_for_eval "$var_value" relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" fi done relink_command="(cd `pwd`; $relink_command)" relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` fi # Only actually do things if not in dry run mode. $opt_dry_run || { # win32 will think the script is a binary if it has # a .exe suffix, so we strip it off here. case $output in *.exe) func_stripname '' '.exe' "$output" output=$func_stripname_result ;; esac # test for cygwin because mv fails w/o .exe extensions case $host in *cygwin*) exeext=.exe func_stripname '' '.exe' "$outputname" outputname=$func_stripname_result ;; *) exeext= ;; esac case $host in *cygwin* | *mingw* ) func_dirname_and_basename "$output" "" "." output_name=$func_basename_result output_path=$func_dirname_result cwrappersource=$output_path/$objdir/lt-$output_name.c cwrapper=$output_path/$output_name.exe $RM $cwrappersource $cwrapper trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 func_emit_cwrapperexe_src > $cwrappersource # The wrapper executable is built using the $host compiler, # because it contains $host paths and files. If cross- # compiling, it, like the target executable, must be # executed on the $host or under an emulation environment. $opt_dry_run || { $LTCC $LTCFLAGS -o $cwrapper $cwrappersource $STRIP $cwrapper } # Now, create the wrapper script for func_source use: func_ltwrapper_scriptname $cwrapper $RM $func_ltwrapper_scriptname_result trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15 $opt_dry_run || { # note: this script will not be executed, so do not chmod. if test "x$build" = "x$host"; then $cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result else func_emit_wrapper no > $func_ltwrapper_scriptname_result fi } ;; * ) $RM $output trap "$RM $output; exit $EXIT_FAILURE" 1 2 15 func_emit_wrapper no > $output chmod +x $output ;; esac } exit $EXIT_SUCCESS ;; esac # See if we need to build an old-fashioned archive. for oldlib in $oldlibs; do case $build_libtool_libs in convenience) oldobjs="$libobjs_save $symfileobj" addlibs=$convenience build_libtool_libs=no ;; module) oldobjs=$libobjs_save addlibs=$old_convenience build_libtool_libs=no ;; *) oldobjs="$old_deplibs $non_pic_objects" $preload && test -f "$symfileobj" \ && func_append oldobjs " $symfileobj" addlibs=$old_convenience ;; esac if test -n "$addlibs"; then gentop=$output_objdir/${outputname}x func_append generated " $gentop" func_extract_archives $gentop $addlibs func_append oldobjs " $func_extract_archives_result" fi # Do each command in the archive commands. if test -n "$old_archive_from_new_cmds" && test yes = "$build_libtool_libs"; then cmds=$old_archive_from_new_cmds else # Add any objects from preloaded convenience libraries if test -n "$dlprefiles"; then gentop=$output_objdir/${outputname}x func_append generated " $gentop" func_extract_archives $gentop $dlprefiles func_append oldobjs " $func_extract_archives_result" fi # POSIX demands no paths to be encoded in archives. We have # to avoid creating archives with duplicate basenames if we # might have to extract them afterwards, e.g., when creating a # static archive out of a convenience library, or when linking # the entirety of a libtool archive into another (currently # not supported by libtool). if (for obj in $oldobjs do func_basename "$obj" $ECHO "$func_basename_result" done | sort | sort -uc >/dev/null 2>&1); then : else echo "copying selected object files to avoid basename conflicts..." gentop=$output_objdir/${outputname}x func_append generated " $gentop" func_mkdir_p "$gentop" save_oldobjs=$oldobjs oldobjs= counter=1 for obj in $save_oldobjs do func_basename "$obj" objbase=$func_basename_result case " $oldobjs " in " ") oldobjs=$obj ;; *[\ /]"$objbase "*) while :; do # Make sure we don't pick an alternate name that also # overlaps. newobj=lt$counter-$objbase func_arith $counter + 1 counter=$func_arith_result case " $oldobjs " in *[\ /]"$newobj "*) ;; *) if test ! -f "$gentop/$newobj"; then break; fi ;; esac done func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" func_append oldobjs " $gentop/$newobj" ;; *) func_append oldobjs " $obj" ;; esac done fi func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 tool_oldlib=$func_to_tool_file_result eval cmds=\"$old_archive_cmds\" func_len " $cmds" len=$func_len_result if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then cmds=$old_archive_cmds elif test -n "$archiver_list_spec"; then func_verbose "using command file archive linking..." for obj in $oldobjs do func_to_tool_file "$obj" $ECHO "$func_to_tool_file_result" done > $output_objdir/$libname.libcmd func_to_tool_file "$output_objdir/$libname.libcmd" oldobjs=" $archiver_list_spec$func_to_tool_file_result" cmds=$old_archive_cmds else # the command line is too long to link in one step, link in parts func_verbose "using piecewise archive linking..." save_RANLIB=$RANLIB RANLIB=: objlist= concat_cmds= save_oldobjs=$oldobjs oldobjs= # Is there a better way of finding the last object in the list? for obj in $save_oldobjs do last_oldobj=$obj done eval test_cmds=\"$old_archive_cmds\" func_len " $test_cmds" len0=$func_len_result len=$len0 for obj in $save_oldobjs do func_len " $obj" func_arith $len + $func_len_result len=$func_arith_result func_append objlist " $obj" if test "$len" -lt "$max_cmd_len"; then : else # the above command should be used before it gets too long oldobjs=$objlist if test "$obj" = "$last_oldobj"; then RANLIB=$save_RANLIB fi test -z "$concat_cmds" || concat_cmds=$concat_cmds~ eval concat_cmds=\"\$concat_cmds$old_archive_cmds\" objlist= len=$len0 fi done RANLIB=$save_RANLIB oldobjs=$objlist if test -z "$oldobjs"; then eval cmds=\"\$concat_cmds\" else eval cmds=\"\$concat_cmds~\$old_archive_cmds\" fi fi fi func_execute_cmds "$cmds" 'exit $?' done test -n "$generated" && \ func_show_eval "${RM}r$generated" # Now create the libtool archive. case $output in *.la) old_library= test yes = "$build_old_libs" && old_library=$libname.$libext func_verbose "creating $output" # Preserve any variables that may affect compiler behavior for var in $variables_saved_for_relink; do if eval test -z \"\${$var+set}\"; then relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" elif eval var_value=\$$var; test -z "$var_value"; then relink_command="$var=; export $var; $relink_command" else func_quote_for_eval "$var_value" relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" fi done # Quote the link command for shipping. relink_command="(cd `pwd`; $SHELL \"$progpath\" $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` if test yes = "$hardcode_automatic"; then relink_command= fi # Only create the output if not a dry run. $opt_dry_run || { for installed in no yes; do if test yes = "$installed"; then if test -z "$install_libdir"; then break fi output=$output_objdir/${outputname}i # Replace all uninstalled libtool libraries with the installed ones newdependency_libs= for deplib in $dependency_libs; do case $deplib in *.la) func_basename "$deplib" name=$func_basename_result func_resolve_sysroot "$deplib" eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result` test -z "$libdir" && \ func_fatal_error "'$deplib' is not a valid libtool archive" func_append newdependency_libs " ${lt_sysroot:+=}$libdir/$name" ;; -L*) func_stripname -L '' "$deplib" func_replace_sysroot "$func_stripname_result" func_append newdependency_libs " -L$func_replace_sysroot_result" ;; -R*) func_stripname -R '' "$deplib" func_replace_sysroot "$func_stripname_result" func_append newdependency_libs " -R$func_replace_sysroot_result" ;; *) func_append newdependency_libs " $deplib" ;; esac done dependency_libs=$newdependency_libs newdlfiles= for lib in $dlfiles; do case $lib in *.la) func_basename "$lib" name=$func_basename_result eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $lib` test -z "$libdir" && \ func_fatal_error "'$lib' is not a valid libtool archive" func_append newdlfiles " ${lt_sysroot:+=}$libdir/$name" ;; *) func_append newdlfiles " $lib" ;; esac done dlfiles=$newdlfiles newdlprefiles= for lib in $dlprefiles; do case $lib in *.la) # Only pass preopened files to the pseudo-archive (for # eventual linking with the app. that links it) if we # didn't already link the preopened objects directly into # the library: func_basename "$lib" name=$func_basename_result eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $lib` test -z "$libdir" && \ func_fatal_error "'$lib' is not a valid libtool archive" func_append newdlprefiles " ${lt_sysroot:+=}$libdir/$name" ;; esac done dlprefiles=$newdlprefiles else newdlfiles= for lib in $dlfiles; do case $lib in [\\/]* | [A-Za-z]:[\\/]*) abs=$lib ;; *) abs=`pwd`"/$lib" ;; esac func_append newdlfiles " $abs" done dlfiles=$newdlfiles newdlprefiles= for lib in $dlprefiles; do case $lib in [\\/]* | [A-Za-z]:[\\/]*) abs=$lib ;; *) abs=`pwd`"/$lib" ;; esac func_append newdlprefiles " $abs" done dlprefiles=$newdlprefiles fi $RM $output # place dlname in correct position for cygwin # In fact, it would be nice if we could use this code for all target # systems that can't hard-code library paths into their executables # and that have no shared library path variable independent of PATH, # but it turns out we can't easily determine that from inspecting # libtool variables, so we have to hard-code the OSs to which it # applies here; at the moment, that means platforms that use the PE # object format with DLL files. See the long comment at the top of # tests/bindir.at for full details. tdlname=$dlname case $host,$output,$installed,$module,$dlname in *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll) # If a -bindir argument was supplied, place the dll there. if test -n "$bindir"; then func_relative_path "$install_libdir" "$bindir" tdlname=$func_relative_path_result/$dlname else # Otherwise fall back on heuristic. tdlname=../bin/$dlname fi ;; esac $ECHO > $output "\ # $outputname - a libtool library file # Generated by $PROGRAM (GNU $PACKAGE) $VERSION # # Please DO NOT delete this file! # It is necessary for linking the library. # The name that we can dlopen(3). dlname='$tdlname' # Names of this library. library_names='$library_names' # The name of the static archive. old_library='$old_library' # Linker flags that cannot go in dependency_libs. inherited_linker_flags='$new_inherited_linker_flags' # Libraries that this one depends upon. dependency_libs='$dependency_libs' # Names of additional weak libraries provided by this library weak_library_names='$weak_libs' # Version information for $libname. current=$current age=$age revision=$revision # Is this an already installed library? installed=$installed # Should we warn about portability when linking against -modules? shouldnotlink=$module # Files to dlopen/dlpreopen dlopen='$dlfiles' dlpreopen='$dlprefiles' # Directory that this library needs to be installed in: libdir='$install_libdir'" if test no,yes = "$installed,$need_relink"; then $ECHO >> $output "\ relink_command=\"$relink_command\"" fi done } # Do a symbolic link so that the libtool archive can be found in # LD_LIBRARY_PATH before the program is installed. func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?' ;; esac exit $EXIT_SUCCESS } if test link = "$opt_mode" || test relink = "$opt_mode"; then func_mode_link ${1+"$@"} fi # func_mode_uninstall arg... func_mode_uninstall () { $debug_cmd RM=$nonopt files= rmforce=false exit_status=0 # This variable tells wrapper scripts just to set variables rather # than running their programs. libtool_install_magic=$magic for arg do case $arg in -f) func_append RM " $arg"; rmforce=: ;; -*) func_append RM " $arg" ;; *) func_append files " $arg" ;; esac done test -z "$RM" && \ func_fatal_help "you must specify an RM program" rmdirs= for file in $files; do func_dirname "$file" "" "." dir=$func_dirname_result if test . = "$dir"; then odir=$objdir else odir=$dir/$objdir fi func_basename "$file" name=$func_basename_result test uninstall = "$opt_mode" && odir=$dir # Remember odir for removal later, being careful to avoid duplicates if test clean = "$opt_mode"; then case " $rmdirs " in *" $odir "*) ;; *) func_append rmdirs " $odir" ;; esac fi # Don't error if the file doesn't exist and rm -f was used. if { test -L "$file"; } >/dev/null 2>&1 || { test -h "$file"; } >/dev/null 2>&1 || test -f "$file"; then : elif test -d "$file"; then exit_status=1 continue elif $rmforce; then continue fi rmfiles=$file case $name in *.la) # Possibly a libtool archive, so verify it. if func_lalib_p "$file"; then func_source $dir/$name # Delete the libtool libraries and symlinks. for n in $library_names; do func_append rmfiles " $odir/$n" done test -n "$old_library" && func_append rmfiles " $odir/$old_library" case $opt_mode in clean) case " $library_names " in *" $dlname "*) ;; *) test -n "$dlname" && func_append rmfiles " $odir/$dlname" ;; esac test -n "$libdir" && func_append rmfiles " $odir/$name $odir/${name}i" ;; uninstall) if test -n "$library_names"; then # Do each command in the postuninstall commands. func_execute_cmds "$postuninstall_cmds" '$rmforce || exit_status=1' fi if test -n "$old_library"; then # Do each command in the old_postuninstall commands. func_execute_cmds "$old_postuninstall_cmds" '$rmforce || exit_status=1' fi # FIXME: should reinstall the best remaining shared library. ;; esac fi ;; *.lo) # Possibly a libtool object, so verify it. if func_lalib_p "$file"; then # Read the .lo file func_source $dir/$name # Add PIC object to the list of files to remove. if test -n "$pic_object" && test none != "$pic_object"; then func_append rmfiles " $dir/$pic_object" fi # Add non-PIC object to the list of files to remove. if test -n "$non_pic_object" && test none != "$non_pic_object"; then func_append rmfiles " $dir/$non_pic_object" fi fi ;; *) if test clean = "$opt_mode"; then noexename=$name case $file in *.exe) func_stripname '' '.exe' "$file" file=$func_stripname_result func_stripname '' '.exe' "$name" noexename=$func_stripname_result # $file with .exe has already been added to rmfiles, # add $file without .exe func_append rmfiles " $file" ;; esac # Do a test to see if this is a libtool program. if func_ltwrapper_p "$file"; then if func_ltwrapper_executable_p "$file"; then func_ltwrapper_scriptname "$file" relink_command= func_source $func_ltwrapper_scriptname_result func_append rmfiles " $func_ltwrapper_scriptname_result" else relink_command= func_source $dir/$noexename fi # note $name still contains .exe if it was in $file originally # as does the version of $file that was added into $rmfiles func_append rmfiles " $odir/$name $odir/${name}S.$objext" if test yes = "$fast_install" && test -n "$relink_command"; then func_append rmfiles " $odir/lt-$name" fi if test "X$noexename" != "X$name"; then func_append rmfiles " $odir/lt-$noexename.c" fi fi fi ;; esac func_show_eval "$RM $rmfiles" 'exit_status=1' done # Try to remove the $objdir's in the directories where we deleted files for dir in $rmdirs; do if test -d "$dir"; then func_show_eval "rmdir $dir >/dev/null 2>&1" fi done exit $exit_status } if test uninstall = "$opt_mode" || test clean = "$opt_mode"; then func_mode_uninstall ${1+"$@"} fi test -z "$opt_mode" && { help=$generic_help func_fatal_help "you must specify a MODE" } test -z "$exec_cmd" && \ func_fatal_help "invalid operation mode '$opt_mode'" if test -n "$exec_cmd"; then eval exec "$exec_cmd" exit $EXIT_FAILURE fi exit $exit_status # The TAGs below are defined such that we never get into a situation # where we disable both kinds of libraries. Given conflicting # choices, we go for a static library, that is the most portable, # since we can't tell whether shared libraries were disabled because # the user asked for that or because the platform doesn't support # them. This is particularly important on AIX, because we don't # support having both static and shared libraries enabled at the same # time on that platform, so we default to a shared-only configuration. # If a disable-shared tag is given, we'll fallback to a static-only # configuration. But we'll never go from static-only to shared-only. # ### BEGIN LIBTOOL TAG CONFIG: disable-shared build_libtool_libs=no build_old_libs=yes # ### END LIBTOOL TAG CONFIG: disable-shared # ### BEGIN LIBTOOL TAG CONFIG: disable-static build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac` # ### END LIBTOOL TAG CONFIG: disable-static # Local Variables: # mode:shell-script # sh-indentation:2 # End: nordugrid-arc-7.1.1/PaxHeaders/autogen.sh0000644000000000000000000000013215067751327015351 xustar0030 mtime=1759498967.623489974 30 atime=1759498967.805492739 30 ctime=1759499024.709596457 nordugrid-arc-7.1.1/autogen.sh0000755000175000002070000000170515067751327017261 0ustar00mockbuildmock00000000000000#!/bin/sh # # autogen.sh glue # # Requires: automake 1.9, autoconf 2.57+ # Conflicts: autoconf 2.13 set -x cleanup() { find . -type d -name autom4te.cache -print | xargs rm -rf \; find . -type f \( -name missing -o -name install-sh \ -o -name mkinstalldirs \ -o -name depcomp -o -name ltmain.sh -o -name configure \ -o -name config.sub -o -name config.guess \ -o -name Makefile.in -o -name config.h.in -o -name aclocal.m4 \ -o -name autoscan.log -o -name configure.scan -o -name config.log \ -o -name config.status -o -name config.h -o -name stamp-h1 \ -o -name Makefile -o -name libtool \) \ -print | xargs rm -f } if [ "x$1" = "xclean" ]; then cleanup exit fi # Refresh GNU autotools toolchain. echo Cleaning autotools files... cleanup type glibtoolize > /dev/null 2>&1 && export LIBTOOLIZE=glibtoolize echo Running autoreconf... autoreconf --verbose --force --install exit 0 nordugrid-arc-7.1.1/PaxHeaders/m40000644000000000000000000000013215067751420013610 xustar0030 mtime=1759499024.683357001 30 atime=1759499034.762510154 30 ctime=1759499024.683357001 nordugrid-arc-7.1.1/m4/0000755000175000002070000000000015067751420015567 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/m4/PaxHeaders/gettext.m40000644000000000000000000000013115067751331015613 xustar0030 mtime=1759498969.900639998 29 atime=1759498971.44554805 30 ctime=1759499024.669207477 nordugrid-arc-7.1.1/m4/gettext.m40000644000175000002070000003457015067751331017527 0ustar00mockbuildmock00000000000000# gettext.m4 serial 60 (gettext-0.17) dnl Copyright (C) 1995-2007 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl dnl This file can can be used in projects which are not available under dnl the GNU General Public License or the GNU Library General Public dnl License but which still want to provide support for the GNU gettext dnl functionality. dnl Please note that the actual code of the GNU gettext library is covered dnl by the GNU Library General Public License, and the rest of the GNU dnl gettext package package is covered by the GNU General Public License. dnl They are *not* in the public domain. dnl Authors: dnl Ulrich Drepper , 1995-2000. dnl Bruno Haible , 2000-2006. dnl Macro to add for using GNU gettext. dnl Usage: AM_GNU_GETTEXT([INTLSYMBOL], [NEEDSYMBOL], [INTLDIR]). dnl INTLSYMBOL can be one of 'external', 'no-libtool', 'use-libtool'. The dnl default (if it is not specified or empty) is 'no-libtool'. dnl INTLSYMBOL should be 'external' for packages with no intl directory, dnl and 'no-libtool' or 'use-libtool' for packages with an intl directory. dnl If INTLSYMBOL is 'use-libtool', then a libtool library dnl $(top_builddir)/intl/libintl.la will be created (shared and/or static, dnl depending on --{enable,disable}-{shared,static} and on the presence of dnl AM-DISABLE-SHARED). If INTLSYMBOL is 'no-libtool', a static library dnl $(top_builddir)/intl/libintl.a will be created. dnl If NEEDSYMBOL is specified and is 'need-ngettext', then GNU gettext dnl implementations (in libc or libintl) without the ngettext() function dnl will be ignored. If NEEDSYMBOL is specified and is dnl 'need-formatstring-macros', then GNU gettext implementations that don't dnl support the ISO C 99 formatstring macros will be ignored. dnl INTLDIR is used to find the intl libraries. If empty, dnl the value `$(top_builddir)/intl/' is used. dnl dnl The result of the configuration is one of three cases: dnl 1) GNU gettext, as included in the intl subdirectory, will be compiled dnl and used. dnl Catalog format: GNU --> install in $(datadir) dnl Catalog extension: .mo after installation, .gmo in source tree dnl 2) GNU gettext has been found in the system's C library. dnl Catalog format: GNU --> install in $(datadir) dnl Catalog extension: .mo after installation, .gmo in source tree dnl 3) No internationalization, always use English msgid. dnl Catalog format: none dnl Catalog extension: none dnl If INTLSYMBOL is 'external', only cases 2 and 3 can occur. dnl The use of .gmo is historical (it was needed to avoid overwriting the dnl GNU format catalogs when building on a platform with an X/Open gettext), dnl but we keep it in order not to force irrelevant filename changes on the dnl maintainers. dnl AC_DEFUN([AM_GNU_GETTEXT], [ dnl Argument checking. ifelse([$1], [], , [ifelse([$1], [external], , [ifelse([$1], [no-libtool], , [ifelse([$1], [use-libtool], , [errprint([ERROR: invalid first argument to AM_GNU_GETTEXT ])])])])]) ifelse([$2], [], , [ifelse([$2], [need-ngettext], , [ifelse([$2], [need-formatstring-macros], , [errprint([ERROR: invalid second argument to AM_GNU_GETTEXT ])])])]) define([gt_included_intl], ifelse([$1], [external], ifdef([AM_GNU_GETTEXT_][INTL_SUBDIR], [yes], [no]), [yes])) define([gt_libtool_suffix_prefix], ifelse([$1], [use-libtool], [l], [])) gt_NEEDS_INIT AM_GNU_GETTEXT_NEED([$2]) AC_REQUIRE([AM_PO_SUBDIRS])dnl ifelse(gt_included_intl, yes, [ AC_REQUIRE([AM_INTL_SUBDIR])dnl ]) dnl Prerequisites of AC_LIB_LINKFLAGS_BODY. AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) AC_REQUIRE([AC_LIB_RPATH]) dnl Sometimes libintl requires libiconv, so first search for libiconv. dnl Ideally we would do this search only after the dnl if test "$USE_NLS" = "yes"; then dnl if { eval "gt_val=\$$gt_func_gnugettext_libc"; test "$gt_val" != "yes"; }; then dnl tests. But if configure.in invokes AM_ICONV after AM_GNU_GETTEXT dnl the configure script would need to contain the same shell code dnl again, outside any 'if'. There are two solutions: dnl - Invoke AM_ICONV_LINKFLAGS_BODY here, outside any 'if'. dnl - Control the expansions in more detail using AC_PROVIDE_IFELSE. dnl Since AC_PROVIDE_IFELSE is only in autoconf >= 2.52 and not dnl documented, we avoid it. ifelse(gt_included_intl, yes, , [ AC_REQUIRE([AM_ICONV_LINKFLAGS_BODY]) ]) dnl Sometimes, on MacOS X, libintl requires linking with CoreFoundation. gt_INTL_MACOSX dnl Set USE_NLS. AC_REQUIRE([AM_NLS]) ifelse(gt_included_intl, yes, [ BUILD_INCLUDED_LIBINTL=no USE_INCLUDED_LIBINTL=no ]) LIBINTL= LTLIBINTL= POSUB= dnl Add a version number to the cache macros. case " $gt_needs " in *" need-formatstring-macros "*) gt_api_version=3 ;; *" need-ngettext "*) gt_api_version=2 ;; *) gt_api_version=1 ;; esac gt_func_gnugettext_libc="gt_cv_func_gnugettext${gt_api_version}_libc" gt_func_gnugettext_libintl="gt_cv_func_gnugettext${gt_api_version}_libintl" dnl If we use NLS figure out what method if test "$USE_NLS" = "yes"; then gt_use_preinstalled_gnugettext=no ifelse(gt_included_intl, yes, [ AC_MSG_CHECKING([whether included gettext is requested]) AC_ARG_WITH(included-gettext, [ --with-included-gettext use the GNU gettext library included here], nls_cv_force_use_gnu_gettext=$withval, nls_cv_force_use_gnu_gettext=no) AC_MSG_RESULT($nls_cv_force_use_gnu_gettext) nls_cv_use_gnu_gettext="$nls_cv_force_use_gnu_gettext" if test "$nls_cv_force_use_gnu_gettext" != "yes"; then ]) dnl User does not insist on using GNU NLS library. Figure out what dnl to use. If GNU gettext is available we use this. Else we have dnl to fall back to GNU NLS library. if test $gt_api_version -ge 3; then gt_revision_test_code=' #ifndef __GNU_GETTEXT_SUPPORTED_REVISION #define __GNU_GETTEXT_SUPPORTED_REVISION(major) ((major) == 0 ? 0 : -1) #endif changequote(,)dnl typedef int array [2 * (__GNU_GETTEXT_SUPPORTED_REVISION(0) >= 1) - 1]; changequote([,])dnl ' else gt_revision_test_code= fi if test $gt_api_version -ge 2; then gt_expression_test_code=' + * ngettext ("", "", 0)' else gt_expression_test_code= fi AC_CACHE_CHECK([for GNU gettext in libc], [$gt_func_gnugettext_libc], [AC_TRY_LINK([#include $gt_revision_test_code extern int _nl_msg_cat_cntr; extern int *_nl_domain_bindings;], [bindtextdomain ("", ""); return * gettext ("")$gt_expression_test_code + _nl_msg_cat_cntr + *_nl_domain_bindings], [eval "$gt_func_gnugettext_libc=yes"], [eval "$gt_func_gnugettext_libc=no"])]) if { eval "gt_val=\$$gt_func_gnugettext_libc"; test "$gt_val" != "yes"; }; then dnl Sometimes libintl requires libiconv, so first search for libiconv. ifelse(gt_included_intl, yes, , [ AM_ICONV_LINK ]) dnl Search for libintl and define LIBINTL, LTLIBINTL and INCINTL dnl accordingly. Don't use AC_LIB_LINKFLAGS_BODY([intl],[iconv]) dnl because that would add "-liconv" to LIBINTL and LTLIBINTL dnl even if libiconv doesn't exist. AC_LIB_LINKFLAGS_BODY([intl]) AC_CACHE_CHECK([for GNU gettext in libintl], [$gt_func_gnugettext_libintl], [gt_save_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS $INCINTL" gt_save_LIBS="$LIBS" LIBS="$LIBS $LIBINTL" dnl Now see whether libintl exists and does not depend on libiconv. AC_TRY_LINK([#include $gt_revision_test_code extern int _nl_msg_cat_cntr; extern #ifdef __cplusplus "C" #endif const char *_nl_expand_alias (const char *);], [bindtextdomain ("", ""); return * gettext ("")$gt_expression_test_code + _nl_msg_cat_cntr + *_nl_expand_alias ("")], [eval "$gt_func_gnugettext_libintl=yes"], [eval "$gt_func_gnugettext_libintl=no"]) dnl Now see whether libintl exists and depends on libiconv. if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" != yes; } && test -n "$LIBICONV"; then LIBS="$LIBS $LIBICONV" AC_TRY_LINK([#include $gt_revision_test_code extern int _nl_msg_cat_cntr; extern #ifdef __cplusplus "C" #endif const char *_nl_expand_alias (const char *);], [bindtextdomain ("", ""); return * gettext ("")$gt_expression_test_code + _nl_msg_cat_cntr + *_nl_expand_alias ("")], [LIBINTL="$LIBINTL $LIBICONV" LTLIBINTL="$LTLIBINTL $LTLIBICONV" eval "$gt_func_gnugettext_libintl=yes" ]) fi CPPFLAGS="$gt_save_CPPFLAGS" LIBS="$gt_save_LIBS"]) fi dnl If an already present or preinstalled GNU gettext() is found, dnl use it. But if this macro is used in GNU gettext, and GNU dnl gettext is already preinstalled in libintl, we update this dnl libintl. (Cf. the install rule in intl/Makefile.in.) if { eval "gt_val=\$$gt_func_gnugettext_libc"; test "$gt_val" = "yes"; } \ || { { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; } \ && test "$PACKAGE" != gettext-runtime \ && test "$PACKAGE" != gettext-tools; }; then gt_use_preinstalled_gnugettext=yes else dnl Reset the values set by searching for libintl. LIBINTL= LTLIBINTL= INCINTL= fi ifelse(gt_included_intl, yes, [ if test "$gt_use_preinstalled_gnugettext" != "yes"; then dnl GNU gettext is not found in the C library. dnl Fall back on included GNU gettext library. nls_cv_use_gnu_gettext=yes fi fi if test "$nls_cv_use_gnu_gettext" = "yes"; then dnl Mark actions used to generate GNU NLS library. BUILD_INCLUDED_LIBINTL=yes USE_INCLUDED_LIBINTL=yes LIBINTL="ifelse([$3],[],\${top_builddir}/intl,[$3])/libintl.[]gt_libtool_suffix_prefix[]a $LIBICONV $LIBTHREAD" LTLIBINTL="ifelse([$3],[],\${top_builddir}/intl,[$3])/libintl.[]gt_libtool_suffix_prefix[]a $LTLIBICONV $LTLIBTHREAD" LIBS=`echo " $LIBS " | sed -e 's/ -lintl / /' -e 's/^ //' -e 's/ $//'` fi CATOBJEXT= if test "$gt_use_preinstalled_gnugettext" = "yes" \ || test "$nls_cv_use_gnu_gettext" = "yes"; then dnl Mark actions to use GNU gettext tools. CATOBJEXT=.gmo fi ]) if test -n "$INTL_MACOSX_LIBS"; then if test "$gt_use_preinstalled_gnugettext" = "yes" \ || test "$nls_cv_use_gnu_gettext" = "yes"; then dnl Some extra flags are needed during linking. LIBINTL="$LIBINTL $INTL_MACOSX_LIBS" LTLIBINTL="$LTLIBINTL $INTL_MACOSX_LIBS" fi fi if test "$gt_use_preinstalled_gnugettext" = "yes" \ || test "$nls_cv_use_gnu_gettext" = "yes"; then AC_DEFINE(ENABLE_NLS, 1, [Define to 1 if translation of program messages to the user's native language is requested.]) else USE_NLS=no fi fi AC_MSG_CHECKING([whether to use NLS]) AC_MSG_RESULT([$USE_NLS]) if test "$USE_NLS" = "yes"; then AC_MSG_CHECKING([where the gettext function comes from]) if test "$gt_use_preinstalled_gnugettext" = "yes"; then if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; }; then gt_source="external libintl" else gt_source="libc" fi else gt_source="included intl directory" fi AC_MSG_RESULT([$gt_source]) fi if test "$USE_NLS" = "yes"; then if test "$gt_use_preinstalled_gnugettext" = "yes"; then if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; }; then AC_MSG_CHECKING([how to link with libintl]) AC_MSG_RESULT([$LIBINTL]) AC_LIB_APPENDTOVAR([CPPFLAGS], [$INCINTL]) fi dnl For backward compatibility. Some packages may be using this. AC_DEFINE(HAVE_GETTEXT, 1, [Define if the GNU gettext() function is already present or preinstalled.]) AC_DEFINE(HAVE_DCGETTEXT, 1, [Define if the GNU dcgettext() function is already present or preinstalled.]) fi dnl We need to process the po/ directory. POSUB=po fi ifelse(gt_included_intl, yes, [ dnl If this is used in GNU gettext we have to set BUILD_INCLUDED_LIBINTL dnl to 'yes' because some of the testsuite requires it. if test "$PACKAGE" = gettext-runtime || test "$PACKAGE" = gettext-tools; then BUILD_INCLUDED_LIBINTL=yes fi dnl Make all variables we use known to autoconf. AC_SUBST(BUILD_INCLUDED_LIBINTL) AC_SUBST(USE_INCLUDED_LIBINTL) AC_SUBST(CATOBJEXT) dnl For backward compatibility. Some configure.ins may be using this. nls_cv_header_intl= nls_cv_header_libgt= dnl For backward compatibility. Some Makefiles may be using this. DATADIRNAME=share AC_SUBST(DATADIRNAME) dnl For backward compatibility. Some Makefiles may be using this. INSTOBJEXT=.mo AC_SUBST(INSTOBJEXT) dnl For backward compatibility. Some Makefiles may be using this. GENCAT=gencat AC_SUBST(GENCAT) dnl For backward compatibility. Some Makefiles may be using this. INTLOBJS= if test "$USE_INCLUDED_LIBINTL" = yes; then INTLOBJS="\$(GETTOBJS)" fi AC_SUBST(INTLOBJS) dnl Enable libtool support if the surrounding package wishes it. INTL_LIBTOOL_SUFFIX_PREFIX=gt_libtool_suffix_prefix AC_SUBST(INTL_LIBTOOL_SUFFIX_PREFIX) ]) dnl For backward compatibility. Some Makefiles may be using this. INTLLIBS="$LIBINTL" AC_SUBST(INTLLIBS) dnl Make all documented variables known to autoconf. AC_SUBST(LIBINTL) AC_SUBST(LTLIBINTL) AC_SUBST(POSUB) ]) dnl gt_NEEDS_INIT ensures that the gt_needs variable is initialized. m4_define([gt_NEEDS_INIT], [ m4_divert_text([DEFAULTS], [gt_needs=]) m4_define([gt_NEEDS_INIT], []) ]) dnl Usage: AM_GNU_GETTEXT_NEED([NEEDSYMBOL]) AC_DEFUN([AM_GNU_GETTEXT_NEED], [ m4_divert_text([INIT_PREPARE], [gt_needs="$gt_needs $1"]) ]) dnl Usage: AM_GNU_GETTEXT_VERSION([gettext-version]) AC_DEFUN([AM_GNU_GETTEXT_VERSION], []) nordugrid-arc-7.1.1/m4/PaxHeaders/lt~obsolete.m40000644000000000000000000000013215067751340016502 xustar0030 mtime=1759498976.446689936 30 atime=1759498976.720628205 30 ctime=1759499024.680356955 nordugrid-arc-7.1.1/m4/lt~obsolete.m40000644000175000002070000001377415067751340020420 0ustar00mockbuildmock00000000000000# lt~obsolete.m4 -- aclocal satisfying obsolete definitions. -*-Autoconf-*- # # Copyright (C) 2004-2005, 2007, 2009, 2011-2015 Free Software # Foundation, Inc. # Written by Scott James Remnant, 2004. # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # serial 5 lt~obsolete.m4 # These exist entirely to fool aclocal when bootstrapping libtool. # # In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN), # which have later been changed to m4_define as they aren't part of the # exported API, or moved to Autoconf or Automake where they belong. # # The trouble is, aclocal is a bit thick. It'll see the old AC_DEFUN # in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us # using a macro with the same name in our local m4/libtool.m4 it'll # pull the old libtool.m4 in (it doesn't see our shiny new m4_define # and doesn't know about Autoconf macros at all.) # # So we provide this file, which has a silly filename so it's always # included after everything else. This provides aclocal with the # AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything # because those macros already exist, or will be overwritten later. # We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6. # # Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here. # Yes, that means every name once taken will need to remain here until # we give up compatibility with versions before 1.7, at which point # we need to keep only those names which we still refer to. # This is to help aclocal find these macros, as it can't see m4_define. AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])]) m4_ifndef([AC_LIBTOOL_LINKER_OPTION], [AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])]) m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP])]) m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])]) m4_ifndef([_LT_AC_SHELL_INIT], [AC_DEFUN([_LT_AC_SHELL_INIT])]) m4_ifndef([_LT_AC_SYS_LIBPATH_AIX], [AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])]) m4_ifndef([_LT_PROG_LTMAIN], [AC_DEFUN([_LT_PROG_LTMAIN])]) m4_ifndef([_LT_AC_TAGVAR], [AC_DEFUN([_LT_AC_TAGVAR])]) m4_ifndef([AC_LTDL_ENABLE_INSTALL], [AC_DEFUN([AC_LTDL_ENABLE_INSTALL])]) m4_ifndef([AC_LTDL_PREOPEN], [AC_DEFUN([AC_LTDL_PREOPEN])]) m4_ifndef([_LT_AC_SYS_COMPILER], [AC_DEFUN([_LT_AC_SYS_COMPILER])]) m4_ifndef([_LT_AC_LOCK], [AC_DEFUN([_LT_AC_LOCK])]) m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE], [AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])]) m4_ifndef([_LT_AC_TRY_DLOPEN_SELF], [AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])]) m4_ifndef([AC_LIBTOOL_PROG_CC_C_O], [AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])]) m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])]) m4_ifndef([AC_LIBTOOL_OBJDIR], [AC_DEFUN([AC_LIBTOOL_OBJDIR])]) m4_ifndef([AC_LTDL_OBJDIR], [AC_DEFUN([AC_LTDL_OBJDIR])]) m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])]) m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP], [AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])]) m4_ifndef([AC_PATH_MAGIC], [AC_DEFUN([AC_PATH_MAGIC])]) m4_ifndef([AC_PROG_LD_GNU], [AC_DEFUN([AC_PROG_LD_GNU])]) m4_ifndef([AC_PROG_LD_RELOAD_FLAG], [AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])]) m4_ifndef([AC_DEPLIBS_CHECK_METHOD], [AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])]) m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])]) m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])]) m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])]) m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS], [AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])]) m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP], [AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])]) m4_ifndef([LT_AC_PROG_EGREP], [AC_DEFUN([LT_AC_PROG_EGREP])]) m4_ifndef([LT_AC_PROG_SED], [AC_DEFUN([LT_AC_PROG_SED])]) m4_ifndef([_LT_CC_BASENAME], [AC_DEFUN([_LT_CC_BASENAME])]) m4_ifndef([_LT_COMPILER_BOILERPLATE], [AC_DEFUN([_LT_COMPILER_BOILERPLATE])]) m4_ifndef([_LT_LINKER_BOILERPLATE], [AC_DEFUN([_LT_LINKER_BOILERPLATE])]) m4_ifndef([_AC_PROG_LIBTOOL], [AC_DEFUN([_AC_PROG_LIBTOOL])]) m4_ifndef([AC_LIBTOOL_SETUP], [AC_DEFUN([AC_LIBTOOL_SETUP])]) m4_ifndef([_LT_AC_CHECK_DLFCN], [AC_DEFUN([_LT_AC_CHECK_DLFCN])]) m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER], [AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])]) m4_ifndef([_LT_AC_TAGCONFIG], [AC_DEFUN([_LT_AC_TAGCONFIG])]) m4_ifndef([AC_DISABLE_FAST_INSTALL], [AC_DEFUN([AC_DISABLE_FAST_INSTALL])]) m4_ifndef([_LT_AC_LANG_CXX], [AC_DEFUN([_LT_AC_LANG_CXX])]) m4_ifndef([_LT_AC_LANG_F77], [AC_DEFUN([_LT_AC_LANG_F77])]) m4_ifndef([_LT_AC_LANG_GCJ], [AC_DEFUN([_LT_AC_LANG_GCJ])]) m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])]) m4_ifndef([_LT_AC_LANG_C_CONFIG], [AC_DEFUN([_LT_AC_LANG_C_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])]) m4_ifndef([_LT_AC_LANG_CXX_CONFIG], [AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])]) m4_ifndef([_LT_AC_LANG_F77_CONFIG], [AC_DEFUN([_LT_AC_LANG_F77_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])]) m4_ifndef([_LT_AC_LANG_GCJ_CONFIG], [AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])]) m4_ifndef([_LT_AC_LANG_RC_CONFIG], [AC_DEFUN([_LT_AC_LANG_RC_CONFIG])]) m4_ifndef([AC_LIBTOOL_CONFIG], [AC_DEFUN([AC_LIBTOOL_CONFIG])]) m4_ifndef([_LT_AC_FILE_LTDLL_C], [AC_DEFUN([_LT_AC_FILE_LTDLL_C])]) m4_ifndef([_LT_REQUIRED_DARWIN_CHECKS], [AC_DEFUN([_LT_REQUIRED_DARWIN_CHECKS])]) m4_ifndef([_LT_AC_PROG_CXXCPP], [AC_DEFUN([_LT_AC_PROG_CXXCPP])]) m4_ifndef([_LT_PREPARE_SED_QUOTE_VARS], [AC_DEFUN([_LT_PREPARE_SED_QUOTE_VARS])]) m4_ifndef([_LT_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_PROG_ECHO_BACKSLASH])]) m4_ifndef([_LT_PROG_F77], [AC_DEFUN([_LT_PROG_F77])]) m4_ifndef([_LT_PROG_FC], [AC_DEFUN([_LT_PROG_FC])]) m4_ifndef([_LT_PROG_CXX], [AC_DEFUN([_LT_PROG_CXX])]) nordugrid-arc-7.1.1/m4/PaxHeaders/fsusage.m40000644000000000000000000000013215067751327015572 xustar0030 mtime=1759498967.642740224 30 atime=1759498967.808492785 30 ctime=1759499024.668173266 nordugrid-arc-7.1.1/m4/fsusage.m40000644000175000002070000001752215067751327017503 0ustar00mockbuildmock00000000000000#serial 23 # Obtaining file system usage information. # Copyright (C) 1997, 1998, 2000, 2001, 2003-2007 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # Written by Jim Meyering. AC_DEFUN([gl_FSUSAGE], [ AC_CHECK_HEADERS(sys/param.h) AC_CHECK_HEADERS(sys/vfs.h sys/fs_types.h) AC_CHECK_HEADERS(sys/mount.h, [], [], [AC_INCLUDES_DEFAULT [#if HAVE_SYS_PARAM_H #include #endif]]) gl_FILE_SYSTEM_USAGE([gl_cv_fs_space=yes], [gl_cv_fs_space=no]) if test $gl_cv_fs_space = yes; then AC_LIBOBJ(fsusage) gl_PREREQ_FSUSAGE_EXTRA fi ]) # Try to determine how a program can obtain file system usage information. # If successful, define the appropriate symbol (see fsusage.c) and # execute ACTION-IF-FOUND. Otherwise, execute ACTION-IF-NOT-FOUND. # # gl_FILE_SYSTEM_USAGE([ACTION-IF-FOUND[, ACTION-IF-NOT-FOUND]]) AC_DEFUN([gl_FILE_SYSTEM_USAGE], [ AC_MSG_NOTICE([checking how to get file system space usage]) ac_fsusage_space=no # Perform only the link test since it seems there are no variants of the # statvfs function. This check is more than just AC_CHECK_FUNCS(statvfs) # because that got a false positive on SCO OSR5. Adding the declaration # of a `struct statvfs' causes this test to fail (as it should) on such # systems. That system is reported to work fine with STAT_STATFS4 which # is what it gets when this test fails. if test $ac_fsusage_space = no; then # SVR4 AC_CACHE_CHECK([for statvfs function (SVR4)], fu_cv_sys_stat_statvfs, [AC_TRY_LINK([#include #if defined __GLIBC__ && !defined __BEOS__ Do not use statvfs on systems with GNU libc, because that function stats all preceding entries in /proc/mounts, and that makes df hang if even one of the corresponding file systems is hard-mounted, but not available. statvfs in GNU libc on BeOS operates differently: it only makes a system call. #endif #ifdef __osf__ "Do not use Tru64's statvfs implementation" #endif #include ], [struct statvfs fsd; statvfs (0, &fsd);], fu_cv_sys_stat_statvfs=yes, fu_cv_sys_stat_statvfs=no)]) if test $fu_cv_sys_stat_statvfs = yes; then ac_fsusage_space=yes AC_DEFINE(STAT_STATVFS, 1, [ Define if there is a function named statvfs. (SVR4)]) fi fi if test $ac_fsusage_space = no; then # DEC Alpha running OSF/1 AC_MSG_CHECKING([for 3-argument statfs function (DEC OSF/1)]) AC_CACHE_VAL(fu_cv_sys_stat_statfs3_osf1, [AC_TRY_RUN([ #include #include #include int main () { struct statfs fsd; fsd.f_fsize = 0; return statfs (".", &fsd, sizeof (struct statfs)) != 0; }], fu_cv_sys_stat_statfs3_osf1=yes, fu_cv_sys_stat_statfs3_osf1=no, fu_cv_sys_stat_statfs3_osf1=no)]) AC_MSG_RESULT($fu_cv_sys_stat_statfs3_osf1) if test $fu_cv_sys_stat_statfs3_osf1 = yes; then ac_fsusage_space=yes AC_DEFINE(STAT_STATFS3_OSF1, 1, [ Define if statfs takes 3 args. (DEC Alpha running OSF/1)]) fi fi if test $ac_fsusage_space = no; then # AIX AC_MSG_CHECKING([for two-argument statfs with statfs.bsize dnl member (AIX, 4.3BSD)]) AC_CACHE_VAL(fu_cv_sys_stat_statfs2_bsize, [AC_TRY_RUN([ #ifdef HAVE_SYS_PARAM_H #include #endif #ifdef HAVE_SYS_MOUNT_H #include #endif #ifdef HAVE_SYS_VFS_H #include #endif int main () { struct statfs fsd; fsd.f_bsize = 0; return statfs (".", &fsd) != 0; }], fu_cv_sys_stat_statfs2_bsize=yes, fu_cv_sys_stat_statfs2_bsize=no, fu_cv_sys_stat_statfs2_bsize=no)]) AC_MSG_RESULT($fu_cv_sys_stat_statfs2_bsize) if test $fu_cv_sys_stat_statfs2_bsize = yes; then ac_fsusage_space=yes AC_DEFINE(STAT_STATFS2_BSIZE, 1, [ Define if statfs takes 2 args and struct statfs has a field named f_bsize. (4.3BSD, SunOS 4, HP-UX, AIX PS/2)]) fi fi if test $ac_fsusage_space = no; then # SVR3 AC_MSG_CHECKING([for four-argument statfs (AIX-3.2.5, SVR3)]) AC_CACHE_VAL(fu_cv_sys_stat_statfs4, [AC_TRY_RUN([#include #include int main () { struct statfs fsd; return statfs (".", &fsd, sizeof fsd, 0) != 0; }], fu_cv_sys_stat_statfs4=yes, fu_cv_sys_stat_statfs4=no, fu_cv_sys_stat_statfs4=no)]) AC_MSG_RESULT($fu_cv_sys_stat_statfs4) if test $fu_cv_sys_stat_statfs4 = yes; then ac_fsusage_space=yes AC_DEFINE(STAT_STATFS4, 1, [ Define if statfs takes 4 args. (SVR3, Dynix, Irix, Dolphin)]) fi fi if test $ac_fsusage_space = no; then # 4.4BSD and NetBSD AC_MSG_CHECKING([for two-argument statfs with statfs.fsize dnl member (4.4BSD and NetBSD)]) AC_CACHE_VAL(fu_cv_sys_stat_statfs2_fsize, [AC_TRY_RUN([#include #ifdef HAVE_SYS_PARAM_H #include #endif #ifdef HAVE_SYS_MOUNT_H #include #endif int main () { struct statfs fsd; fsd.f_fsize = 0; return statfs (".", &fsd) != 0; }], fu_cv_sys_stat_statfs2_fsize=yes, fu_cv_sys_stat_statfs2_fsize=no, fu_cv_sys_stat_statfs2_fsize=no)]) AC_MSG_RESULT($fu_cv_sys_stat_statfs2_fsize) if test $fu_cv_sys_stat_statfs2_fsize = yes; then ac_fsusage_space=yes AC_DEFINE(STAT_STATFS2_FSIZE, 1, [ Define if statfs takes 2 args and struct statfs has a field named f_fsize. (4.4BSD, NetBSD)]) fi fi if test $ac_fsusage_space = no; then # Ultrix AC_MSG_CHECKING([for two-argument statfs with struct fs_data (Ultrix)]) AC_CACHE_VAL(fu_cv_sys_stat_fs_data, [AC_TRY_RUN([#include #ifdef HAVE_SYS_PARAM_H #include #endif #ifdef HAVE_SYS_MOUNT_H #include #endif #ifdef HAVE_SYS_FS_TYPES_H #include #endif int main () { struct fs_data fsd; /* Ultrix's statfs returns 1 for success, 0 for not mounted, -1 for failure. */ return statfs (".", &fsd) != 1; }], fu_cv_sys_stat_fs_data=yes, fu_cv_sys_stat_fs_data=no, fu_cv_sys_stat_fs_data=no)]) AC_MSG_RESULT($fu_cv_sys_stat_fs_data) if test $fu_cv_sys_stat_fs_data = yes; then ac_fsusage_space=yes AC_DEFINE(STAT_STATFS2_FS_DATA, 1, [ Define if statfs takes 2 args and the second argument has type struct fs_data. (Ultrix)]) fi fi if test $ac_fsusage_space = no; then # SVR2 AC_TRY_CPP([#include ], AC_DEFINE(STAT_READ_FILSYS, 1, [Define if there is no specific function for reading file systems usage information and you have the header file. (SVR2)]) ac_fsusage_space=yes) fi AS_IF([test $ac_fsusage_space = yes], [$1], [$2]) ]) # Check for SunOS statfs brokenness wrt partitions 2GB and larger. # If exists and struct statfs has a member named f_spare, # enable the work-around code in fsusage.c. AC_DEFUN([gl_STATFS_TRUNCATES], [ AC_MSG_CHECKING([for statfs that truncates block counts]) AC_CACHE_VAL(fu_cv_sys_truncating_statfs, [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #if !defined(sun) && !defined(__sun) choke -- this is a workaround for a Sun-specific problem #endif #include #include ]], [[struct statfs t; long c = *(t.f_spare); if (c) return 0;]])], [fu_cv_sys_truncating_statfs=yes], [fu_cv_sys_truncating_statfs=no])]) if test $fu_cv_sys_truncating_statfs = yes; then AC_DEFINE(STATFS_TRUNCATES_BLOCK_COUNTS, 1, [Define if the block counts reported by statfs may be truncated to 2GB and the correct values may be stored in the f_spare array. (SunOS 4.1.2, 4.1.3, and 4.1.3_U1 are reported to have this problem. SunOS 4.1.1 seems not to be affected.)]) fi AC_MSG_RESULT($fu_cv_sys_truncating_statfs) ]) # Prerequisites of lib/fsusage.c not done by gl_FILE_SYSTEM_USAGE. AC_DEFUN([gl_PREREQ_FSUSAGE_EXTRA], [ AC_CHECK_HEADERS(dustat.h sys/fs/s5param.h sys/filsys.h sys/statfs.h) gl_STATFS_TRUNCATES ]) nordugrid-arc-7.1.1/m4/PaxHeaders/ltversion.m40000644000000000000000000000013115067751340016154 xustar0030 mtime=1759498976.362052761 29 atime=1759498976.72162822 30 ctime=1759499024.679887633 nordugrid-arc-7.1.1/m4/ltversion.m40000644000175000002070000000127315067751340020062 0ustar00mockbuildmock00000000000000# ltversion.m4 -- version numbers -*- Autoconf -*- # # Copyright (C) 2004, 2011-2015 Free Software Foundation, Inc. # Written by Scott James Remnant, 2004 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # @configure_input@ # serial 4179 ltversion.m4 # This file is part of GNU Libtool m4_define([LT_PACKAGE_VERSION], [2.4.6]) m4_define([LT_PACKAGE_REVISION], [2.4.6]) AC_DEFUN([LTVERSION_VERSION], [macro_version='2.4.6' macro_revision='2.4.6' _LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?]) _LT_DECL(, macro_revision, 0) ]) nordugrid-arc-7.1.1/m4/PaxHeaders/ac_cxx_have_sstream.m40000644000000000000000000000013215067751327020143 xustar0030 mtime=1759498967.642338994 30 atime=1759498967.808492785 30 ctime=1759499024.663813334 nordugrid-arc-7.1.1/m4/ac_cxx_have_sstream.m40000644000175000002070000000134115067751327022044 0ustar00mockbuildmock00000000000000dnl @synopsis AC_CXX_HAVE_SSTREAM dnl dnl If the C++ library has a working stringstream, define HAVE_SSTREAM. dnl dnl @author Ben Stanley dnl @version $Id: ac_cxx_have_sstream.m4 3830 2005-06-24 07:01:15Z waananen $ dnl AC_DEFUN([AC_CXX_HAVE_SSTREAM], [AC_CACHE_CHECK(whether the compiler has stringstream, ac_cv_cxx_have_sstream, [AC_REQUIRE([AC_CXX_NAMESPACES]) AC_LANG_SAVE AC_LANG_CPLUSPLUS AC_TRY_COMPILE([#include #ifdef HAVE_NAMESPACES using namespace std; #endif],[stringstream message; message << "Hello"; return 0;], ac_cv_cxx_have_sstream=yes, ac_cv_cxx_have_sstream=no) AC_LANG_RESTORE ]) if test "$ac_cv_cxx_have_sstream" = yes; then AC_DEFINE(HAVE_SSTREAM,,[define if the compiler has stringstream]) fi ]) nordugrid-arc-7.1.1/m4/PaxHeaders/gpt.m40000644000000000000000000000013215067751327014727 xustar0030 mtime=1759498967.642740224 30 atime=1759498967.808492785 30 ctime=1759499024.670216727 nordugrid-arc-7.1.1/m4/gpt.m40000644000175000002070000000615715067751327016642 0ustar00mockbuildmock00000000000000# globus.m4 -*- Autoconf -*- # Macros to for compiling and linking against globus/gpt packages AC_DEFUN([GPT_PROG_GPT_FLAVOR_CONFIGURATION], [ AC_ARG_VAR([GPT_FLAVOR_CONFIGURATION], [path to gpt-flavor-configuration]) if test "x$ac_cv_env_GPT_FLAVOR_CONFIGURATION_set" != "xset"; then AC_PATH_TOOL([GPT_FLAVOR_CONFIGURATION], [gpt-flavor-configuration], [], $PATH:/usr/sbin:/opt/gpt/sbin) fi if test -f "$GPT_FLAVOR_CONFIGURATION" && test "x$GPT_LOCATION" = "x"; then GPT_LOCATION=`dirname $GPT_FLAVOR_CONFIGURATION` GPT_LOCATION=`dirname $GPT_LOCATION` export GPT_LOCATION fi ]) AC_DEFUN([GPT_PROG_GPT_QUERY], [ AC_ARG_VAR([GPT_QUERY], [path to gpt-query]) if test "x$ac_cv_env_GPT_QUERY_set" != "xset"; then AC_PATH_TOOL([GPT_QUERY], [gpt-query], [], $PATH:/usr/sbin:/opt/gpt/sbin) fi if test -f "$GPT_QUERY" && test "x$GPT_LOCATION" = "x"; then GPT_LOCATION=`dirname $GPT_QUERY` GPT_LOCATION=`dirname $GPT_LOCATION` export GPT_LOCATION fi ]) AC_DEFUN([GPT_PROG_GLOBUS_MAKEFILE_HEADER], [ AC_ARG_VAR([GLOBUS_MAKEFILE_HEADER], [path to globus-makefile-header]) if test "x$ac_cv_env_GLOBUS_MAKEFILE_HEADER_set" != "xset"; then AC_PATH_TOOL([GLOBUS_MAKEFILE_HEADER], [globus-makefile-header], [], $PATH:/opt/globus/bin) fi if test -f "$GLOBUS_MAKEFILE_HEADER" && test "x$GLOBUS_LOCATION" = "x"; then GLOBUS_LOCATION=`dirname $GLOBUS_MAKEFILE_HEADER` GLOBUS_LOCATION=`dirname $GLOBUS_LOCATION` export GLOBUS_LOCATION fi ]) AC_DEFUN([GPT_ARG_GPT_FLAVOR], [ AC_REQUIRE([GPT_PROG_GPT_FLAVOR_CONFIGURATION]) AC_MSG_CHECKING([for gpt flavor]) AC_ARG_WITH([flavor], AC_HELP_STRING([--with-flavor=(flavor)], [Specify the gpt build flavor [[autodetect]]]), [GPT_FLAVOR=$withval], if test -n "$GPT_FLAVOR_CONFIGURATION" ; then [GPT_FLAVOR=`$GPT_FLAVOR_CONFIGURATION | \\ grep '^[[a-zA-Z]].*:$' | cut -f1 -d: | grep thr | tail -1`] fi) if test -n "$GPT_FLAVOR"; then AC_MSG_RESULT($GPT_FLAVOR) else AC_MSG_RESULT([none detected, is globus_core-devel installed?]) fi ]) AC_DEFUN([GPT_PKG_VERSION], [ AC_REQUIRE([GPT_PROG_GPT_QUERY]) AC_REQUIRE([GPT_ARG_GPT_FLAVOR]) if test -n "$GPT_QUERY" && test -n "$GPT_FLAVOR"; then gpt_cv_[]$1[]_version=`$GPT_QUERY $1[]-[]$GPT_FLAVOR[]-dev | \\ grep 'pkg version' | sed 's%.*: *%%'` fi ]) AC_DEFUN([GPT_PKG], [ AC_REQUIRE([GPT_PROG_GLOBUS_MAKEFILE_HEADER]) AC_REQUIRE([GPT_ARG_GPT_FLAVOR]) AC_MSG_CHECKING([for $1]) GPT_PKG_VERSION($1) if test -n "$gpt_cv_[]$1[]_version"; then if test -n "$GLOBUS_MAKEFILE_HEADER" && test -n "$GPT_FLAVOR" ; then gpt_cv_tmp=`$GLOBUS_MAKEFILE_HEADER --flavor=$GPT_FLAVOR $1 | \\ sed 's% *= *\(.*\)%="\1"%'` gpt_cv_[]$1[]_cflags=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_INCLUDES'` gpt_cv_[]$1[]_libs=`eval "$gpt_cv_tmp" \\ echo '$GLOBUS_LDFLAGS $GLOBUS_PKG_LIBS $GLOBUS_LIBS'` gpt_cv_tmp= fi fi if test -n "$gpt_cv_[]$1[]_version"; then AC_MSG_RESULT($gpt_cv_[]$1[]_version) m4_toupper([$1])[]_VERSION=$gpt_cv_[]$1[]_version m4_toupper([$1])[]_LIBS=$gpt_cv_[]$1[]_libs m4_toupper([$1])[]_CFLAGS=$gpt_cv_[]$1[]_cflags else AC_MSG_RESULT(no) fi ]) nordugrid-arc-7.1.1/m4/PaxHeaders/arc_api.m40000644000000000000000000000013215067751327015533 xustar0030 mtime=1759498967.642740224 30 atime=1759498967.808492785 30 ctime=1759499024.666034611 nordugrid-arc-7.1.1/m4/arc_api.m40000644000175000002070000000435215067751327017441 0ustar00mockbuildmock00000000000000 # # ARC Public API # AC_DEFUN([ARC_API], [ ARCCLIENT_LIBS='$(top_builddir)/src/hed/libs/compute/libarccompute.la' ARCCLIENT_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCCLIENT_LIBS) AC_SUBST(ARCCLIENT_CFLAGS) ARCCOMMON_LIBS='$(top_builddir)/src/hed/libs/common/libarccommon.la' ARCCOMMON_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCCOMMON_LIBS) AC_SUBST(ARCCOMMON_CFLAGS) ARCCREDENTIAL_LIBS='$(top_builddir)/src/hed/libs/credential/libarccredential.la' ARCCREDENTIAL_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCCREDENTIAL_LIBS) AC_SUBST(ARCCREDENTIAL_CFLAGS) ARCDATA_LIBS='$(top_builddir)/src/hed/libs/data/libarcdata.la' ARCDATA_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCDATA_LIBS) AC_SUBST(ARCDATA_CFLAGS) ARCJOB_LIBS='$(top_builddir)/src/hed/libs/job/libarcjob.la' ARCJOB_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCJOB_LIBS) AC_SUBST(ARCJOB_CFLAGS) ARCLOADER_LIBS='$(top_builddir)/src/hed/libs/loader/libarcloader.la' ARCLOADER_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCLOADER_LIBS) AC_SUBST(ARCLOADER_CFLAGS) ARCMESSAGE_LIBS='$(top_builddir)/src/hed/libs/message/libarcmessage.la' ARCMESSAGE_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCMESSAGE_LIBS) AC_SUBST(ARCMESSAGE_CFLAGS) ARCSECURITY_LIBS='$(top_builddir)/src/hed/libs/security/libarcsecurity.la' ARCSECURITY_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCSECURITY_LIBS) AC_SUBST(ARCSECURITY_CFLAGS) ARCOTOKENS_LIBS='$(top_builddir)/src/hed/libs/security/libarcotokens.la' ARCOTOKENS_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCOTOKENS_LIBS) AC_SUBST(ARCOTOKENS_CFLAGS) ARCINFOSYS_LIBS='$(top_builddir)/src/hed/libs/infosys/libarcinfosys.la' ARCINFOSYS_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCINFOSYS_LIBS) AC_SUBST(ARCINFOSYS_CFLAGS) ARCWSADDRESSING_LIBS='$(top_builddir)/src/hed/libs/ws-addressing/libarcwsaddressing.la' ARCWSADDRESSING_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCWSADDRESSING_LIBS) AC_SUBST(ARCWSADDRESSING_CFLAGS) ARCWSSECURITY_LIBS='$(top_builddir)/src/hed/libs/ws-security/libarcwssecurity.la' ARCWSSECURITY_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCWSSECURITY_LIBS) AC_SUBST(ARCWSSECURITY_CFLAGS) ARCXMLSEC_LIBS='$(top_builddir)/src/hed/libs/xmlsec/libarcxmlsec.la' ARCXMLSEC_CFLAGS='-I$(top_srcdir)/include' AC_SUBST(ARCXMLSEC_LIBS) AC_SUBST(ARCXMLSEC_CFLAGS) ]) nordugrid-arc-7.1.1/m4/PaxHeaders/progtest.m40000644000000000000000000000013215067751332016000 xustar0030 mtime=1759498970.154789954 30 atime=1759498971.436547913 30 ctime=1759499024.684242422 nordugrid-arc-7.1.1/m4/progtest.m40000644000175000002070000000555015067751332017707 0ustar00mockbuildmock00000000000000# progtest.m4 serial 4 (gettext-0.14.2) dnl Copyright (C) 1996-2003, 2005 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl dnl This file can can be used in projects which are not available under dnl the GNU General Public License or the GNU Library General Public dnl License but which still want to provide support for the GNU gettext dnl functionality. dnl Please note that the actual code of the GNU gettext library is covered dnl by the GNU Library General Public License, and the rest of the GNU dnl gettext package package is covered by the GNU General Public License. dnl They are *not* in the public domain. dnl Authors: dnl Ulrich Drepper , 1996. AC_PREREQ(2.50) # Search path for a program which passes the given test. dnl AM_PATH_PROG_WITH_TEST(VARIABLE, PROG-TO-CHECK-FOR, dnl TEST-PERFORMED-ON-FOUND_PROGRAM [, VALUE-IF-NOT-FOUND [, PATH]]) AC_DEFUN([AM_PATH_PROG_WITH_TEST], [ # Prepare PATH_SEPARATOR. # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then echo "#! /bin/sh" >conf$$.sh echo "exit 0" >>conf$$.sh chmod +x conf$$.sh if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then PATH_SEPARATOR=';' else PATH_SEPARATOR=: fi rm -f conf$$.sh fi # Find out how to test for executable files. Don't use a zero-byte file, # as systems may use methods other than mode bits to determine executability. cat >conf$$.file <<_ASEOF #! /bin/sh exit 0 _ASEOF chmod +x conf$$.file if test -x conf$$.file >/dev/null 2>&1; then ac_executable_p="test -x" else ac_executable_p="test -f" fi rm -f conf$$.file # Extract the first word of "$2", so it can be a program name with args. set dummy $2; ac_word=[$]2 AC_MSG_CHECKING([for $ac_word]) AC_CACHE_VAL(ac_cv_path_$1, [case "[$]$1" in [[\\/]]* | ?:[[\\/]]*) ac_cv_path_$1="[$]$1" # Let the user override the test with a path. ;; *) ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in ifelse([$5], , $PATH, [$5]); do IFS="$ac_save_IFS" test -z "$ac_dir" && ac_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then echo "$as_me: trying $ac_dir/$ac_word..." >&AS_MESSAGE_LOG_FD if [$3]; then ac_cv_path_$1="$ac_dir/$ac_word$ac_exec_ext" break 2 fi fi done done IFS="$ac_save_IFS" dnl If no 4th arg is given, leave the cache variable unset, dnl so AC_PATH_PROGS will keep looking. ifelse([$4], , , [ test -z "[$]ac_cv_path_$1" && ac_cv_path_$1="$4" ])dnl ;; esac])dnl $1="$ac_cv_path_$1" if test ifelse([$4], , [-n "[$]$1"], ["[$]$1" != "$4"]); then AC_MSG_RESULT([$]$1) else AC_MSG_RESULT(no) fi AC_SUBST($1)dnl ]) nordugrid-arc-7.1.1/m4/PaxHeaders/ltsugar.m40000644000000000000000000000013115067751340015610 xustar0030 mtime=1759498976.268621337 29 atime=1759498976.72162822 30 ctime=1759499024.678884627 nordugrid-arc-7.1.1/m4/ltsugar.m40000644000175000002070000001044015067751340017512 0ustar00mockbuildmock00000000000000# ltsugar.m4 -- libtool m4 base layer. -*-Autoconf-*- # # Copyright (C) 2004-2005, 2007-2008, 2011-2015 Free Software # Foundation, Inc. # Written by Gary V. Vaughan, 2004 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # serial 6 ltsugar.m4 # This is to help aclocal find these macros, as it can't see m4_define. AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])]) # lt_join(SEP, ARG1, [ARG2...]) # ----------------------------- # Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their # associated separator. # Needed until we can rely on m4_join from Autoconf 2.62, since all earlier # versions in m4sugar had bugs. m4_define([lt_join], [m4_if([$#], [1], [], [$#], [2], [[$2]], [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])]) m4_define([_lt_join], [m4_if([$#$2], [2], [], [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])]) # lt_car(LIST) # lt_cdr(LIST) # ------------ # Manipulate m4 lists. # These macros are necessary as long as will still need to support # Autoconf-2.59, which quotes differently. m4_define([lt_car], [[$1]]) m4_define([lt_cdr], [m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])], [$#], 1, [], [m4_dquote(m4_shift($@))])]) m4_define([lt_unquote], $1) # lt_append(MACRO-NAME, STRING, [SEPARATOR]) # ------------------------------------------ # Redefine MACRO-NAME to hold its former content plus 'SEPARATOR''STRING'. # Note that neither SEPARATOR nor STRING are expanded; they are appended # to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked). # No SEPARATOR is output if MACRO-NAME was previously undefined (different # than defined and empty). # # This macro is needed until we can rely on Autoconf 2.62, since earlier # versions of m4sugar mistakenly expanded SEPARATOR but not STRING. m4_define([lt_append], [m4_define([$1], m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])]) # lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...]) # ---------------------------------------------------------- # Produce a SEP delimited list of all paired combinations of elements of # PREFIX-LIST with SUFFIX1 through SUFFIXn. Each element of the list # has the form PREFIXmINFIXSUFFIXn. # Needed until we can rely on m4_combine added in Autoconf 2.62. m4_define([lt_combine], [m4_if(m4_eval([$# > 3]), [1], [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl [[m4_foreach([_Lt_prefix], [$2], [m4_foreach([_Lt_suffix], ]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[, [_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])]) # lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ]) # ----------------------------------------------------------------------- # Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited # by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ. m4_define([lt_if_append_uniq], [m4_ifdef([$1], [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1], [lt_append([$1], [$2], [$3])$4], [$5])], [lt_append([$1], [$2], [$3])$4])]) # lt_dict_add(DICT, KEY, VALUE) # ----------------------------- m4_define([lt_dict_add], [m4_define([$1($2)], [$3])]) # lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE) # -------------------------------------------- m4_define([lt_dict_add_subkey], [m4_define([$1($2:$3)], [$4])]) # lt_dict_fetch(DICT, KEY, [SUBKEY]) # ---------------------------------- m4_define([lt_dict_fetch], [m4_ifval([$3], m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]), m4_ifdef([$1($2)], [m4_defn([$1($2)])]))]) # lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE]) # ----------------------------------------------------------------- m4_define([lt_if_dict_fetch], [m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4], [$5], [$6])]) # lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...]) # -------------------------------------------------------------- m4_define([lt_dict_filter], [m4_if([$5], [], [], [lt_join(m4_quote(m4_default([$4], [[, ]])), lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]), [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl ]) nordugrid-arc-7.1.1/m4/PaxHeaders/lib-link.m40000644000000000000000000000013215067751332015632 xustar0030 mtime=1759498970.065302813 30 atime=1759498971.439547959 30 ctime=1759499024.674205912 nordugrid-arc-7.1.1/m4/lib-link.m40000644000175000002070000007205515067751332017545 0ustar00mockbuildmock00000000000000# lib-link.m4 serial 13 (gettext-0.17) dnl Copyright (C) 2001-2007 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl From Bruno Haible. AC_PREREQ(2.54) dnl AC_LIB_LINKFLAGS(name [, dependencies]) searches for libname and dnl the libraries corresponding to explicit and implicit dependencies. dnl Sets and AC_SUBSTs the LIB${NAME} and LTLIB${NAME} variables and dnl augments the CPPFLAGS variable. dnl Sets and AC_SUBSTs the LIB${NAME}_PREFIX variable to nonempty if libname dnl was found in ${LIB${NAME}_PREFIX}/$acl_libdirstem. AC_DEFUN([AC_LIB_LINKFLAGS], [ AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) AC_REQUIRE([AC_LIB_RPATH]) define([Name],[translit([$1],[./-], [___])]) define([NAME],[translit([$1],[abcdefghijklmnopqrstuvwxyz./-], [ABCDEFGHIJKLMNOPQRSTUVWXYZ___])]) AC_CACHE_CHECK([how to link with lib[]$1], [ac_cv_lib[]Name[]_libs], [ AC_LIB_LINKFLAGS_BODY([$1], [$2]) ac_cv_lib[]Name[]_libs="$LIB[]NAME" ac_cv_lib[]Name[]_ltlibs="$LTLIB[]NAME" ac_cv_lib[]Name[]_cppflags="$INC[]NAME" ac_cv_lib[]Name[]_prefix="$LIB[]NAME[]_PREFIX" ]) LIB[]NAME="$ac_cv_lib[]Name[]_libs" LTLIB[]NAME="$ac_cv_lib[]Name[]_ltlibs" INC[]NAME="$ac_cv_lib[]Name[]_cppflags" LIB[]NAME[]_PREFIX="$ac_cv_lib[]Name[]_prefix" AC_LIB_APPENDTOVAR([CPPFLAGS], [$INC]NAME) AC_SUBST([LIB]NAME) AC_SUBST([LTLIB]NAME) AC_SUBST([LIB]NAME[_PREFIX]) dnl Also set HAVE_LIB[]NAME so that AC_LIB_HAVE_LINKFLAGS can reuse the dnl results of this search when this library appears as a dependency. HAVE_LIB[]NAME=yes undefine([Name]) undefine([NAME]) ]) dnl AC_LIB_HAVE_LINKFLAGS(name, dependencies, includes, testcode) dnl searches for libname and the libraries corresponding to explicit and dnl implicit dependencies, together with the specified include files and dnl the ability to compile and link the specified testcode. If found, it dnl sets and AC_SUBSTs HAVE_LIB${NAME}=yes and the LIB${NAME} and dnl LTLIB${NAME} variables and augments the CPPFLAGS variable, and dnl #defines HAVE_LIB${NAME} to 1. Otherwise, it sets and AC_SUBSTs dnl HAVE_LIB${NAME}=no and LIB${NAME} and LTLIB${NAME} to empty. dnl Sets and AC_SUBSTs the LIB${NAME}_PREFIX variable to nonempty if libname dnl was found in ${LIB${NAME}_PREFIX}/$acl_libdirstem. AC_DEFUN([AC_LIB_HAVE_LINKFLAGS], [ AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) AC_REQUIRE([AC_LIB_RPATH]) define([Name],[translit([$1],[./-], [___])]) define([NAME],[translit([$1],[abcdefghijklmnopqrstuvwxyz./-], [ABCDEFGHIJKLMNOPQRSTUVWXYZ___])]) dnl Search for lib[]Name and define LIB[]NAME, LTLIB[]NAME and INC[]NAME dnl accordingly. AC_LIB_LINKFLAGS_BODY([$1], [$2]) dnl Add $INC[]NAME to CPPFLAGS before performing the following checks, dnl because if the user has installed lib[]Name and not disabled its use dnl via --without-lib[]Name-prefix, he wants to use it. ac_save_CPPFLAGS="$CPPFLAGS" AC_LIB_APPENDTOVAR([CPPFLAGS], [$INC]NAME) AC_CACHE_CHECK([for lib[]$1], [ac_cv_lib[]Name], [ ac_save_LIBS="$LIBS" LIBS="$LIBS $LIB[]NAME" AC_TRY_LINK([$3], [$4], [ac_cv_lib[]Name=yes], [ac_cv_lib[]Name=no]) LIBS="$ac_save_LIBS" ]) if test "$ac_cv_lib[]Name" = yes; then HAVE_LIB[]NAME=yes AC_DEFINE([HAVE_LIB]NAME, 1, [Define if you have the $1 library.]) AC_MSG_CHECKING([how to link with lib[]$1]) AC_MSG_RESULT([$LIB[]NAME]) else HAVE_LIB[]NAME=no dnl If $LIB[]NAME didn't lead to a usable library, we don't need dnl $INC[]NAME either. CPPFLAGS="$ac_save_CPPFLAGS" LIB[]NAME= LTLIB[]NAME= LIB[]NAME[]_PREFIX= fi AC_SUBST([HAVE_LIB]NAME) AC_SUBST([LIB]NAME) AC_SUBST([LTLIB]NAME) AC_SUBST([LIB]NAME[_PREFIX]) undefine([Name]) undefine([NAME]) ]) dnl Determine the platform dependent parameters needed to use rpath: dnl acl_libext, dnl acl_shlibext, dnl acl_hardcode_libdir_flag_spec, dnl acl_hardcode_libdir_separator, dnl acl_hardcode_direct, dnl acl_hardcode_minus_L. AC_DEFUN([AC_LIB_RPATH], [ dnl Tell automake >= 1.10 to complain if config.rpath is missing. m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([config.rpath])]) AC_REQUIRE([AC_PROG_CC]) dnl we use $CC, $GCC, $LDFLAGS AC_REQUIRE([AC_LIB_PROG_LD]) dnl we use $LD, $with_gnu_ld AC_REQUIRE([AC_CANONICAL_HOST]) dnl we use $host AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT]) dnl we use $ac_aux_dir AC_CACHE_CHECK([for shared library run path origin], acl_cv_rpath, [ CC="$CC" GCC="$GCC" LDFLAGS="$LDFLAGS" LD="$LD" with_gnu_ld="$with_gnu_ld" \ ${CONFIG_SHELL-/bin/sh} "$ac_aux_dir/config.rpath" "$host" > conftest.sh . ./conftest.sh rm -f ./conftest.sh acl_cv_rpath=done ]) wl="$acl_cv_wl" acl_libext="$acl_cv_libext" acl_shlibext="$acl_cv_shlibext" acl_libname_spec="$acl_cv_libname_spec" acl_library_names_spec="$acl_cv_library_names_spec" acl_hardcode_libdir_flag_spec="$acl_cv_hardcode_libdir_flag_spec" acl_hardcode_libdir_separator="$acl_cv_hardcode_libdir_separator" acl_hardcode_direct="$acl_cv_hardcode_direct" acl_hardcode_minus_L="$acl_cv_hardcode_minus_L" dnl Determine whether the user wants rpath handling at all. AC_ARG_ENABLE(rpath, [ --disable-rpath do not hardcode runtime library paths], :, enable_rpath=yes) ]) dnl AC_LIB_LINKFLAGS_BODY(name [, dependencies]) searches for libname and dnl the libraries corresponding to explicit and implicit dependencies. dnl Sets the LIB${NAME}, LTLIB${NAME} and INC${NAME} variables. dnl Also, sets the LIB${NAME}_PREFIX variable to nonempty if libname was found dnl in ${LIB${NAME}_PREFIX}/$acl_libdirstem. AC_DEFUN([AC_LIB_LINKFLAGS_BODY], [ AC_REQUIRE([AC_LIB_PREPARE_MULTILIB]) define([NAME],[translit([$1],[abcdefghijklmnopqrstuvwxyz./-], [ABCDEFGHIJKLMNOPQRSTUVWXYZ___])]) dnl Autoconf >= 2.61 supports dots in --with options. define([N_A_M_E],[m4_if(m4_version_compare(m4_defn([m4_PACKAGE_VERSION]),[2.61]),[-1],[translit([$1],[.],[_])],[$1])]) dnl By default, look in $includedir and $libdir. use_additional=yes AC_LIB_WITH_FINAL_PREFIX([ eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" ]) AC_LIB_ARG_WITH([lib]N_A_M_E[-prefix], [ --with-lib]N_A_M_E[-prefix[=DIR] search for lib$1 in DIR/include and DIR/lib --without-lib]N_A_M_E[-prefix don't search for lib$1 in includedir and libdir], [ if test "X$withval" = "Xno"; then use_additional=no else if test "X$withval" = "X"; then AC_LIB_WITH_FINAL_PREFIX([ eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" ]) else additional_includedir="$withval/include" additional_libdir="$withval/$acl_libdirstem" fi fi ]) dnl Search the library and its dependencies in $additional_libdir and dnl $LDFLAGS. Using breadth-first-seach. LIB[]NAME= LTLIB[]NAME= INC[]NAME= LIB[]NAME[]_PREFIX= rpathdirs= ltrpathdirs= names_already_handled= names_next_round='$1 $2' while test -n "$names_next_round"; do names_this_round="$names_next_round" names_next_round= for name in $names_this_round; do already_handled= for n in $names_already_handled; do if test "$n" = "$name"; then already_handled=yes break fi done if test -z "$already_handled"; then names_already_handled="$names_already_handled $name" dnl See if it was already located by an earlier AC_LIB_LINKFLAGS dnl or AC_LIB_HAVE_LINKFLAGS call. uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./-|ABCDEFGHIJKLMNOPQRSTUVWXYZ___|'` eval value=\"\$HAVE_LIB$uppername\" if test -n "$value"; then if test "$value" = yes; then eval value=\"\$LIB$uppername\" test -z "$value" || LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$value" eval value=\"\$LTLIB$uppername\" test -z "$value" || LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }$value" else dnl An earlier call to AC_LIB_HAVE_LINKFLAGS has determined dnl that this library doesn't exist. So just drop it. : fi else dnl Search the library lib$name in $additional_libdir and $LDFLAGS dnl and the already constructed $LIBNAME/$LTLIBNAME. found_dir= found_la= found_so= found_a= eval libname=\"$acl_libname_spec\" # typically: libname=lib$name if test -n "$acl_shlibext"; then shrext=".$acl_shlibext" # typically: shrext=.so else shrext= fi if test $use_additional = yes; then dir="$additional_libdir" dnl The same code as in the loop below: dnl First look for a shared library. if test -n "$acl_shlibext"; then if test -f "$dir/$libname$shrext"; then found_dir="$dir" found_so="$dir/$libname$shrext" else if test "$acl_library_names_spec" = '$libname$shrext$versuffix'; then ver=`(cd "$dir" && \ for f in "$libname$shrext".*; do echo "$f"; done \ | sed -e "s,^$libname$shrext\\\\.,," \ | sort -t '.' -n -r -k1,1 -k2,2 -k3,3 -k4,4 -k5,5 \ | sed 1q ) 2>/dev/null` if test -n "$ver" && test -f "$dir/$libname$shrext.$ver"; then found_dir="$dir" found_so="$dir/$libname$shrext.$ver" fi else eval library_names=\"$acl_library_names_spec\" for f in $library_names; do if test -f "$dir/$f"; then found_dir="$dir" found_so="$dir/$f" break fi done fi fi fi dnl Then look for a static library. if test "X$found_dir" = "X"; then if test -f "$dir/$libname.$acl_libext"; then found_dir="$dir" found_a="$dir/$libname.$acl_libext" fi fi if test "X$found_dir" != "X"; then if test -f "$dir/$libname.la"; then found_la="$dir/$libname.la" fi fi fi if test "X$found_dir" = "X"; then for x in $LDFLAGS $LTLIB[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) case "$x" in -L*) dir=`echo "X$x" | sed -e 's/^X-L//'` dnl First look for a shared library. if test -n "$acl_shlibext"; then if test -f "$dir/$libname$shrext"; then found_dir="$dir" found_so="$dir/$libname$shrext" else if test "$acl_library_names_spec" = '$libname$shrext$versuffix'; then ver=`(cd "$dir" && \ for f in "$libname$shrext".*; do echo "$f"; done \ | sed -e "s,^$libname$shrext\\\\.,," \ | sort -t '.' -n -r -k1,1 -k2,2 -k3,3 -k4,4 -k5,5 \ | sed 1q ) 2>/dev/null` if test -n "$ver" && test -f "$dir/$libname$shrext.$ver"; then found_dir="$dir" found_so="$dir/$libname$shrext.$ver" fi else eval library_names=\"$acl_library_names_spec\" for f in $library_names; do if test -f "$dir/$f"; then found_dir="$dir" found_so="$dir/$f" break fi done fi fi fi dnl Then look for a static library. if test "X$found_dir" = "X"; then if test -f "$dir/$libname.$acl_libext"; then found_dir="$dir" found_a="$dir/$libname.$acl_libext" fi fi if test "X$found_dir" != "X"; then if test -f "$dir/$libname.la"; then found_la="$dir/$libname.la" fi fi ;; esac if test "X$found_dir" != "X"; then break fi done fi if test "X$found_dir" != "X"; then dnl Found the library. LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-L$found_dir -l$name" if test "X$found_so" != "X"; then dnl Linking with a shared library. We attempt to hardcode its dnl directory into the executable's runpath, unless it's the dnl standard /usr/lib. if test "$enable_rpath" = no || test "X$found_dir" = "X/usr/$acl_libdirstem"; then dnl No hardcoding is needed. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" else dnl Use an explicit option to hardcode DIR into the resulting dnl binary. dnl Potentially add DIR to ltrpathdirs. dnl The ltrpathdirs will be appended to $LTLIBNAME at the end. haveit= for x in $ltrpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $found_dir" fi dnl The hardcoding into $LIBNAME is system dependent. if test "$acl_hardcode_direct" = yes; then dnl Using DIR/libNAME.so during linking hardcodes DIR into the dnl resulting binary. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" else if test -n "$acl_hardcode_libdir_flag_spec" && test "$acl_hardcode_minus_L" = no; then dnl Use an explicit option to hardcode DIR into the resulting dnl binary. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" dnl Potentially add DIR to rpathdirs. dnl The rpathdirs will be appended to $LIBNAME at the end. haveit= for x in $rpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $found_dir" fi else dnl Rely on "-L$found_dir". dnl But don't add it if it's already contained in the LDFLAGS dnl or the already constructed $LIBNAME haveit= for x in $LDFLAGS $LIB[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-L$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$found_dir" fi if test "$acl_hardcode_minus_L" != no; then dnl FIXME: Not sure whether we should use dnl "-L$found_dir -l$name" or "-L$found_dir $found_so" dnl here. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" else dnl We cannot use $acl_hardcode_runpath_var and LD_RUN_PATH dnl here, because this doesn't fit in flags passed to the dnl compiler. So give up. No hardcoding. This affects only dnl very old systems. dnl FIXME: Not sure whether we should use dnl "-L$found_dir -l$name" or "-L$found_dir $found_so" dnl here. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-l$name" fi fi fi fi else if test "X$found_a" != "X"; then dnl Linking with a static library. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_a" else dnl We shouldn't come here, but anyway it's good to have a dnl fallback. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$found_dir -l$name" fi fi dnl Assume the include files are nearby. additional_includedir= case "$found_dir" in */$acl_libdirstem | */$acl_libdirstem/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem/"'*$,,'` LIB[]NAME[]_PREFIX="$basedir" additional_includedir="$basedir/include" ;; esac if test "X$additional_includedir" != "X"; then dnl Potentially add $additional_includedir to $INCNAME. dnl But don't add it dnl 1. if it's the standard /usr/include, dnl 2. if it's /usr/local/include and we are using GCC on Linux, dnl 3. if it's already present in $CPPFLAGS or the already dnl constructed $INCNAME, dnl 4. if it doesn't exist as a directory. if test "X$additional_includedir" != "X/usr/include"; then haveit= if test "X$additional_includedir" = "X/usr/local/include"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then for x in $CPPFLAGS $INC[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-I$additional_includedir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_includedir"; then dnl Really add $additional_includedir to $INCNAME. INC[]NAME="${INC[]NAME}${INC[]NAME:+ }-I$additional_includedir" fi fi fi fi fi dnl Look for dependencies. if test -n "$found_la"; then dnl Read the .la file. It defines the variables dnl dlname, library_names, old_library, dependency_libs, current, dnl age, revision, installed, dlopen, dlpreopen, libdir. save_libdir="$libdir" case "$found_la" in */* | *\\*) . "$found_la" ;; *) . "./$found_la" ;; esac libdir="$save_libdir" dnl We use only dependency_libs. for dep in $dependency_libs; do case "$dep" in -L*) additional_libdir=`echo "X$dep" | sed -e 's/^X-L//'` dnl Potentially add $additional_libdir to $LIBNAME and $LTLIBNAME. dnl But don't add it dnl 1. if it's the standard /usr/lib, dnl 2. if it's /usr/local/lib and we are using GCC on Linux, dnl 3. if it's already present in $LDFLAGS or the already dnl constructed $LIBNAME, dnl 4. if it doesn't exist as a directory. if test "X$additional_libdir" != "X/usr/$acl_libdirstem"; then haveit= if test "X$additional_libdir" = "X/usr/local/$acl_libdirstem"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then haveit= for x in $LDFLAGS $LIB[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then dnl Really add $additional_libdir to $LIBNAME. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$additional_libdir" fi fi haveit= for x in $LDFLAGS $LTLIB[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then dnl Really add $additional_libdir to $LTLIBNAME. LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-L$additional_libdir" fi fi fi fi ;; -R*) dir=`echo "X$dep" | sed -e 's/^X-R//'` if test "$enable_rpath" != no; then dnl Potentially add DIR to rpathdirs. dnl The rpathdirs will be appended to $LIBNAME at the end. haveit= for x in $rpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $dir" fi dnl Potentially add DIR to ltrpathdirs. dnl The ltrpathdirs will be appended to $LTLIBNAME at the end. haveit= for x in $ltrpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $dir" fi fi ;; -l*) dnl Handle this in the next round. names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'` ;; *.la) dnl Handle this in the next round. Throw away the .la's dnl directory; it is already contained in a preceding -L dnl option. names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` ;; *) dnl Most likely an immediate library name. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$dep" LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }$dep" ;; esac done fi else dnl Didn't find the library; assume it is in the system directories dnl known to the linker and runtime loader. (All the system dnl directories known to the linker should also be known to the dnl runtime loader, otherwise the system is severely misconfigured.) LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-l$name" LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-l$name" fi fi fi done done if test "X$rpathdirs" != "X"; then if test -n "$acl_hardcode_libdir_separator"; then dnl Weird platform: only the last -rpath option counts, the user must dnl pass all path elements in one option. We can arrange that for a dnl single library, but not when more than one $LIBNAMEs are used. alldirs= for found_dir in $rpathdirs; do alldirs="${alldirs}${alldirs:+$acl_hardcode_libdir_separator}$found_dir" done dnl Note: acl_hardcode_libdir_flag_spec uses $libdir and $wl. acl_save_libdir="$libdir" libdir="$alldirs" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$flag" else dnl The -rpath options are cumulative. for found_dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$found_dir" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$flag" done fi fi if test "X$ltrpathdirs" != "X"; then dnl When using libtool, the option that works for both libraries and dnl executables is -R. The -R options are cumulative. for found_dir in $ltrpathdirs; do LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-R$found_dir" done fi ]) dnl AC_LIB_APPENDTOVAR(VAR, CONTENTS) appends the elements of CONTENTS to VAR, dnl unless already present in VAR. dnl Works only for CPPFLAGS, not for LIB* variables because that sometimes dnl contains two or three consecutive elements that belong together. AC_DEFUN([AC_LIB_APPENDTOVAR], [ for element in [$2]; do haveit= for x in $[$1]; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X$element"; then haveit=yes break fi done if test -z "$haveit"; then [$1]="${[$1]}${[$1]:+ }$element" fi done ]) dnl For those cases where a variable contains several -L and -l options dnl referring to unknown libraries and directories, this macro determines the dnl necessary additional linker options for the runtime path. dnl AC_LIB_LINKFLAGS_FROM_LIBS([LDADDVAR], [LIBSVALUE], [USE-LIBTOOL]) dnl sets LDADDVAR to linker options needed together with LIBSVALUE. dnl If USE-LIBTOOL evaluates to non-empty, linking with libtool is assumed, dnl otherwise linking without libtool is assumed. AC_DEFUN([AC_LIB_LINKFLAGS_FROM_LIBS], [ AC_REQUIRE([AC_LIB_RPATH]) AC_REQUIRE([AC_LIB_PREPARE_MULTILIB]) $1= if test "$enable_rpath" != no; then if test -n "$acl_hardcode_libdir_flag_spec" && test "$acl_hardcode_minus_L" = no; then dnl Use an explicit option to hardcode directories into the resulting dnl binary. rpathdirs= next= for opt in $2; do if test -n "$next"; then dir="$next" dnl No need to hardcode the standard /usr/lib. if test "X$dir" != "X/usr/$acl_libdirstem"; then rpathdirs="$rpathdirs $dir" fi next= else case $opt in -L) next=yes ;; -L*) dir=`echo "X$opt" | sed -e 's,^X-L,,'` dnl No need to hardcode the standard /usr/lib. if test "X$dir" != "X/usr/$acl_libdirstem"; then rpathdirs="$rpathdirs $dir" fi next= ;; *) next= ;; esac fi done if test "X$rpathdirs" != "X"; then if test -n ""$3""; then dnl libtool is used for linking. Use -R options. for dir in $rpathdirs; do $1="${$1}${$1:+ }-R$dir" done else dnl The linker is used for linking directly. if test -n "$acl_hardcode_libdir_separator"; then dnl Weird platform: only the last -rpath option counts, the user dnl must pass all path elements in one option. alldirs= for dir in $rpathdirs; do alldirs="${alldirs}${alldirs:+$acl_hardcode_libdir_separator}$dir" done acl_save_libdir="$libdir" libdir="$alldirs" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" $1="$flag" else dnl The -rpath options are cumulative. for dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$dir" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" $1="${$1}${$1:+ }$flag" done fi fi fi fi fi AC_SUBST([$1]) ]) nordugrid-arc-7.1.1/m4/PaxHeaders/intlmacosx.m40000644000000000000000000000013115067751331016310 xustar0030 mtime=1759498969.989076755 30 atime=1759498971.442548005 29 ctime=1759499024.67221604 nordugrid-arc-7.1.1/m4/intlmacosx.m40000644000175000002070000000456515067751331020225 0ustar00mockbuildmock00000000000000# intlmacosx.m4 serial 1 (gettext-0.17) dnl Copyright (C) 2004-2007 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl dnl This file can can be used in projects which are not available under dnl the GNU General Public License or the GNU Library General Public dnl License but which still want to provide support for the GNU gettext dnl functionality. dnl Please note that the actual code of the GNU gettext library is covered dnl by the GNU Library General Public License, and the rest of the GNU dnl gettext package package is covered by the GNU General Public License. dnl They are *not* in the public domain. dnl Checks for special options needed on MacOS X. dnl Defines INTL_MACOSX_LIBS. AC_DEFUN([gt_INTL_MACOSX], [ dnl Check for API introduced in MacOS X 10.2. AC_CACHE_CHECK([for CFPreferencesCopyAppValue], gt_cv_func_CFPreferencesCopyAppValue, [gt_save_LIBS="$LIBS" LIBS="$LIBS -Wl,-framework -Wl,CoreFoundation" AC_TRY_LINK([#include ], [CFPreferencesCopyAppValue(NULL, NULL)], [gt_cv_func_CFPreferencesCopyAppValue=yes], [gt_cv_func_CFPreferencesCopyAppValue=no]) LIBS="$gt_save_LIBS"]) if test $gt_cv_func_CFPreferencesCopyAppValue = yes; then AC_DEFINE([HAVE_CFPREFERENCESCOPYAPPVALUE], 1, [Define to 1 if you have the MacOS X function CFPreferencesCopyAppValue in the CoreFoundation framework.]) fi dnl Check for API introduced in MacOS X 10.3. AC_CACHE_CHECK([for CFLocaleCopyCurrent], gt_cv_func_CFLocaleCopyCurrent, [gt_save_LIBS="$LIBS" LIBS="$LIBS -Wl,-framework -Wl,CoreFoundation" AC_TRY_LINK([#include ], [CFLocaleCopyCurrent();], [gt_cv_func_CFLocaleCopyCurrent=yes], [gt_cv_func_CFLocaleCopyCurrent=no]) LIBS="$gt_save_LIBS"]) if test $gt_cv_func_CFLocaleCopyCurrent = yes; then AC_DEFINE([HAVE_CFLOCALECOPYCURRENT], 1, [Define to 1 if you have the MacOS X function CFLocaleCopyCurrent in the CoreFoundation framework.]) fi INTL_MACOSX_LIBS= if test $gt_cv_func_CFPreferencesCopyAppValue = yes || test $gt_cv_func_CFLocaleCopyCurrent = yes; then INTL_MACOSX_LIBS="-Wl,-framework -Wl,CoreFoundation" fi AC_SUBST([INTL_MACOSX_LIBS]) ]) nordugrid-arc-7.1.1/m4/PaxHeaders/lib-prefix.m40000644000000000000000000000013215067751332016172 xustar0030 mtime=1759498970.077918259 30 atime=1759498971.439547959 30 ctime=1759499024.675223031 nordugrid-arc-7.1.1/m4/lib-prefix.m40000644000175000002070000001503615067751332020101 0ustar00mockbuildmock00000000000000# lib-prefix.m4 serial 5 (gettext-0.15) dnl Copyright (C) 2001-2005 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl From Bruno Haible. dnl AC_LIB_ARG_WITH is synonymous to AC_ARG_WITH in autoconf-2.13, and dnl similar to AC_ARG_WITH in autoconf 2.52...2.57 except that is doesn't dnl require excessive bracketing. ifdef([AC_HELP_STRING], [AC_DEFUN([AC_LIB_ARG_WITH], [AC_ARG_WITH([$1],[[$2]],[$3],[$4])])], [AC_DEFUN([AC_][LIB_ARG_WITH], [AC_ARG_WITH([$1],[$2],[$3],[$4])])]) dnl AC_LIB_PREFIX adds to the CPPFLAGS and LDFLAGS the flags that are needed dnl to access previously installed libraries. The basic assumption is that dnl a user will want packages to use other packages he previously installed dnl with the same --prefix option. dnl This macro is not needed if only AC_LIB_LINKFLAGS is used to locate dnl libraries, but is otherwise very convenient. AC_DEFUN([AC_LIB_PREFIX], [ AC_BEFORE([$0], [AC_LIB_LINKFLAGS]) AC_REQUIRE([AC_PROG_CC]) AC_REQUIRE([AC_CANONICAL_HOST]) AC_REQUIRE([AC_LIB_PREPARE_MULTILIB]) AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) dnl By default, look in $includedir and $libdir. use_additional=yes AC_LIB_WITH_FINAL_PREFIX([ eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" ]) AC_LIB_ARG_WITH([lib-prefix], [ --with-lib-prefix[=DIR] search for libraries in DIR/include and DIR/lib --without-lib-prefix don't search for libraries in includedir and libdir], [ if test "X$withval" = "Xno"; then use_additional=no else if test "X$withval" = "X"; then AC_LIB_WITH_FINAL_PREFIX([ eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" ]) else additional_includedir="$withval/include" additional_libdir="$withval/$acl_libdirstem" fi fi ]) if test $use_additional = yes; then dnl Potentially add $additional_includedir to $CPPFLAGS. dnl But don't add it dnl 1. if it's the standard /usr/include, dnl 2. if it's already present in $CPPFLAGS, dnl 3. if it's /usr/local/include and we are using GCC on Linux, dnl 4. if it doesn't exist as a directory. if test "X$additional_includedir" != "X/usr/include"; then haveit= for x in $CPPFLAGS; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-I$additional_includedir"; then haveit=yes break fi done if test -z "$haveit"; then if test "X$additional_includedir" = "X/usr/local/include"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then if test -d "$additional_includedir"; then dnl Really add $additional_includedir to $CPPFLAGS. CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }-I$additional_includedir" fi fi fi fi dnl Potentially add $additional_libdir to $LDFLAGS. dnl But don't add it dnl 1. if it's the standard /usr/lib, dnl 2. if it's already present in $LDFLAGS, dnl 3. if it's /usr/local/lib and we are using GCC on Linux, dnl 4. if it doesn't exist as a directory. if test "X$additional_libdir" != "X/usr/$acl_libdirstem"; then haveit= for x in $LDFLAGS; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test "X$additional_libdir" = "X/usr/local/$acl_libdirstem"; then if test -n "$GCC"; then case $host_os in linux*) haveit=yes;; esac fi fi if test -z "$haveit"; then if test -d "$additional_libdir"; then dnl Really add $additional_libdir to $LDFLAGS. LDFLAGS="${LDFLAGS}${LDFLAGS:+ }-L$additional_libdir" fi fi fi fi fi ]) dnl AC_LIB_PREPARE_PREFIX creates variables acl_final_prefix, dnl acl_final_exec_prefix, containing the values to which $prefix and dnl $exec_prefix will expand at the end of the configure script. AC_DEFUN([AC_LIB_PREPARE_PREFIX], [ dnl Unfortunately, prefix and exec_prefix get only finally determined dnl at the end of configure. if test "X$prefix" = "XNONE"; then acl_final_prefix="$ac_default_prefix" else acl_final_prefix="$prefix" fi if test "X$exec_prefix" = "XNONE"; then acl_final_exec_prefix='${prefix}' else acl_final_exec_prefix="$exec_prefix" fi acl_save_prefix="$prefix" prefix="$acl_final_prefix" eval acl_final_exec_prefix=\"$acl_final_exec_prefix\" prefix="$acl_save_prefix" ]) dnl AC_LIB_WITH_FINAL_PREFIX([statement]) evaluates statement, with the dnl variables prefix and exec_prefix bound to the values they will have dnl at the end of the configure script. AC_DEFUN([AC_LIB_WITH_FINAL_PREFIX], [ acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" $1 exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" ]) dnl AC_LIB_PREPARE_MULTILIB creates a variable acl_libdirstem, containing dnl the basename of the libdir, either "lib" or "lib64". AC_DEFUN([AC_LIB_PREPARE_MULTILIB], [ dnl There is no formal standard regarding lib and lib64. The current dnl practice is that on a system supporting 32-bit and 64-bit instruction dnl sets or ABIs, 64-bit libraries go under $prefix/lib64 and 32-bit dnl libraries go under $prefix/lib. We determine the compiler's default dnl mode by looking at the compiler's library search path. If at least dnl of its elements ends in /lib64 or points to a directory whose absolute dnl pathname ends in /lib64, we assume a 64-bit ABI. Otherwise we use the dnl default, namely "lib". acl_libdirstem=lib searchpath=`(LC_ALL=C $CC -print-search-dirs) 2>/dev/null | sed -n -e 's,^libraries: ,,p' | sed -e 's,^=,,'` if test -n "$searchpath"; then acl_save_IFS="${IFS= }"; IFS=":" for searchdir in $searchpath; do if test -d "$searchdir"; then case "$searchdir" in */lib64/ | */lib64 ) acl_libdirstem=lib64 ;; *) searchdir=`cd "$searchdir" && pwd` case "$searchdir" in */lib64 ) acl_libdirstem=lib64 ;; esac ;; esac fi done IFS="$acl_save_IFS" fi ]) nordugrid-arc-7.1.1/m4/PaxHeaders/lib-ld.m40000644000000000000000000000013215067751332015274 xustar0030 mtime=1759498970.053236468 30 atime=1759498971.441547989 30 ctime=1759499024.673190847 nordugrid-arc-7.1.1/m4/lib-ld.m40000644000175000002070000000653115067751332017203 0ustar00mockbuildmock00000000000000# lib-ld.m4 serial 3 (gettext-0.13) dnl Copyright (C) 1996-2003 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl Subroutines of libtool.m4, dnl with replacements s/AC_/AC_LIB/ and s/lt_cv/acl_cv/ to avoid collision dnl with libtool.m4. dnl From libtool-1.4. Sets the variable with_gnu_ld to yes or no. AC_DEFUN([AC_LIB_PROG_LD_GNU], [AC_CACHE_CHECK([if the linker ($LD) is GNU ld], acl_cv_prog_gnu_ld, [# I'd rather use --version here, but apparently some GNU ld's only accept -v. case `$LD -v 2>&1 conf$$.sh echo "exit 0" >>conf$$.sh chmod +x conf$$.sh if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then PATH_SEPARATOR=';' else PATH_SEPARATOR=: fi rm -f conf$$.sh fi ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. AC_MSG_CHECKING([for ld used by GCC]) case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [[\\/]* | [A-Za-z]:[\\/]*)] [re_direlt='/[^/][^/]*/\.\./'] # Canonicalize the path of ld ac_prog=`echo $ac_prog| sed 's%\\\\%/%g'` while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do ac_prog=`echo $ac_prog| sed "s%$re_direlt%/%"` done test -z "$LD" && LD="$ac_prog" ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test "$with_gnu_ld" = yes; then AC_MSG_CHECKING([for GNU ld]) else AC_MSG_CHECKING([for non-GNU ld]) fi AC_CACHE_VAL(acl_cv_path_LD, [if test -z "$LD"; then IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR-:}" for ac_dir in $PATH; do test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then acl_cv_path_LD="$ac_dir/$ac_prog" # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some GNU ld's only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$acl_cv_path_LD" -v 2>&1 < /dev/null` in *GNU* | *'with BFD'*) test "$with_gnu_ld" != no && break ;; *) test "$with_gnu_ld" != yes && break ;; esac fi done IFS="$ac_save_ifs" else acl_cv_path_LD="$LD" # Let the user override the test with a path. fi]) LD="$acl_cv_path_LD" if test -n "$LD"; then AC_MSG_RESULT($LD) else AC_MSG_RESULT(no) fi test -z "$LD" && AC_MSG_ERROR([no acceptable ld found in \$PATH]) AC_LIB_PROG_LD_GNU ]) nordugrid-arc-7.1.1/m4/PaxHeaders/arc_paths.m40000644000000000000000000000013115067751327016100 xustar0030 mtime=1759498967.642740224 30 atime=1759498967.808492785 29 ctime=1759499024.66706331 nordugrid-arc-7.1.1/m4/arc_paths.m40000644000175000002070000000522615067751327020010 0ustar00mockbuildmock00000000000000dnl dnl Substitite some relative paths dnl AC_DEFUN([ARC_RELATIVE_PATHS], [ AC_REQUIRE([ARC_RELATIVE_PATHS_INIT]) AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) AC_LIB_WITH_FINAL_PREFIX([ eval instprefix="\"${exec_prefix}\"" eval arc_libdir="\"${libdir}\"" eval arc_bindir="\"${bindir}\"" eval arc_sbindir="\"${sbindir}\"" eval arc_pkglibdir="\"${libdir}/arc\"" eval arc_pkglibexecdir="\"${libexecdir}/arc\"" # It seems arc_datadir should be evaluated twice to be expanded fully. eval arc_datadir="\"${datadir}/arc\"" eval arc_datadir="\"${arc_datadir}\"" ]) libsubdir=`get_relative_path "$instprefix" "$arc_libdir"` pkglibsubdir=`get_relative_path "$instprefix" "$arc_pkglibdir"` pkglibexecsubdir=`get_relative_path "$instprefix" "$arc_pkglibexecdir"` pkgdatasubdir=`get_relative_path "$instprefix" "$arc_datadir"` pkglibdir_rel_to_pkglibexecdir=`get_relative_path "$arc_pkglibexecdir" "$arc_pkglibdir"` sbindir_rel_to_pkglibexecdir=`get_relative_path "$arc_pkglibexecdir" "$arc_sbindir"` bindir_rel_to_pkglibexecdir=`get_relative_path "$arc_pkglibexecdir" "$arc_bindir"` pkgdatadir_rel_to_pkglibexecdir=`get_relative_path "$arc_pkglibexecdir" "$arc_datadir"` AC_MSG_NOTICE([pkglib subdirectory is: $pkglibsubdir]) AC_MSG_NOTICE([pkglibexec subdirectory is: $pkglibexecsubdir]) AC_MSG_NOTICE([relative path of pkglib to pkglibexec is: $pkglibdir_rel_to_pkglibexecdir]) AC_SUBST([libsubdir]) AC_SUBST([pkglibsubdir]) AC_SUBST([pkglibexecsubdir]) AC_SUBST([pkglibdir_rel_to_pkglibexecdir]) AC_SUBST([sbindir_rel_to_pkglibexecdir]) AC_SUBST([bindir_rel_to_pkglibexecdir]) AC_SUBST([pkgdatadir_rel_to_pkglibexecdir]) AC_SUBST([pkgdatasubdir]) AC_DEFINE_UNQUOTED([INSTPREFIX], ["${instprefix}"], [installation prefix]) AC_DEFINE_UNQUOTED([LIBSUBDIR], ["${libsubdir}"], [library installation subdirectory]) AC_DEFINE_UNQUOTED([PKGLIBSUBDIR], ["${pkglibsubdir}"], [plugin installation subdirectory]) AC_DEFINE_UNQUOTED([PKGLIBEXECSUBDIR], ["${pkglibexecsubdir}"], [helper programs installation subdirectory]) AC_DEFINE_UNQUOTED([PKGDATASUBDIR], ["${pkgdatasubdir}"], [package data subdirectory]) ]) AC_DEFUN([ARC_RELATIVE_PATHS_INIT], [ get_relative_path() { olddir=`echo $[]1 | sed -e 's|/+|/|g' -e 's|^/||' -e 's|/*$|/|'` newdir=`echo $[]2 | sed -e 's|/+|/|g' -e 's|^/||' -e 's|/*$|/|'` O_IFS=$IFS IFS=/ relative="" common="" for i in $olddir; do if echo "$newdir" | grep -q "^$common$i/"; then common="$common$i/" else relative="../$relative" fi done IFS=$O_IFS echo $newdir | sed "s|^$common|$relative|" | sed 's|/*$||' } ]) nordugrid-arc-7.1.1/m4/PaxHeaders/ltoptions.m40000644000000000000000000000013115067751340016162 xustar0030 mtime=1759498976.187471832 29 atime=1759498976.72162822 30 ctime=1759499024.677857642 nordugrid-arc-7.1.1/m4/ltoptions.m40000644000175000002070000003426215067751340020074 0ustar00mockbuildmock00000000000000# Helper functions for option handling. -*- Autoconf -*- # # Copyright (C) 2004-2005, 2007-2009, 2011-2015 Free Software # Foundation, Inc. # Written by Gary V. Vaughan, 2004 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # serial 8 ltoptions.m4 # This is to help aclocal find these macros, as it can't see m4_define. AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])]) # _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME) # ------------------------------------------ m4_define([_LT_MANGLE_OPTION], [[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])]) # _LT_SET_OPTION(MACRO-NAME, OPTION-NAME) # --------------------------------------- # Set option OPTION-NAME for macro MACRO-NAME, and if there is a # matching handler defined, dispatch to it. Other OPTION-NAMEs are # saved as a flag. m4_define([_LT_SET_OPTION], [m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]), _LT_MANGLE_DEFUN([$1], [$2]), [m4_warning([Unknown $1 option '$2'])])[]dnl ]) # _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET]) # ------------------------------------------------------------ # Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. m4_define([_LT_IF_OPTION], [m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])]) # _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET) # ------------------------------------------------------- # Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME # are set. m4_define([_LT_UNLESS_OPTIONS], [m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option), [m4_define([$0_found])])])[]dnl m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3 ])[]dnl ]) # _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST) # ---------------------------------------- # OPTION-LIST is a space-separated list of Libtool options associated # with MACRO-NAME. If any OPTION has a matching handler declared with # LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about # the unknown option and exit. m4_defun([_LT_SET_OPTIONS], [# Set options m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), [_LT_SET_OPTION([$1], _LT_Option)]) m4_if([$1],[LT_INIT],[ dnl dnl Simply set some default values (i.e off) if boolean options were not dnl specified: _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no ]) _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no ]) dnl dnl If no reference was made to various pairs of opposing options, then dnl we run the default mode handler for the pair. For example, if neither dnl 'shared' nor 'disable-shared' was passed, we enable building of shared dnl archives by default: _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED]) _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC]) _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC]) _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install], [_LT_ENABLE_FAST_INSTALL]) _LT_UNLESS_OPTIONS([LT_INIT], [aix-soname=aix aix-soname=both aix-soname=svr4], [_LT_WITH_AIX_SONAME([aix])]) ]) ])# _LT_SET_OPTIONS ## --------------------------------- ## ## Macros to handle LT_INIT options. ## ## --------------------------------- ## # _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME) # ----------------------------------------- m4_define([_LT_MANGLE_DEFUN], [[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])]) # LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE) # ----------------------------------------------- m4_define([LT_OPTION_DEFINE], [m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl ])# LT_OPTION_DEFINE # dlopen # ------ LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes ]) AU_DEFUN([AC_LIBTOOL_DLOPEN], [_LT_SET_OPTION([LT_INIT], [dlopen]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the 'dlopen' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], []) # win32-dll # --------- # Declare package support for building win32 dll's. LT_OPTION_DEFINE([LT_INIT], [win32-dll], [enable_win32_dll=yes case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-cegcc*) AC_CHECK_TOOL(AS, as, false) AC_CHECK_TOOL(DLLTOOL, dlltool, false) AC_CHECK_TOOL(OBJDUMP, objdump, false) ;; esac test -z "$AS" && AS=as _LT_DECL([], [AS], [1], [Assembler program])dnl test -z "$DLLTOOL" && DLLTOOL=dlltool _LT_DECL([], [DLLTOOL], [1], [DLL creation program])dnl test -z "$OBJDUMP" && OBJDUMP=objdump _LT_DECL([], [OBJDUMP], [1], [Object dumper program])dnl ])# win32-dll AU_DEFUN([AC_LIBTOOL_WIN32_DLL], [AC_REQUIRE([AC_CANONICAL_HOST])dnl _LT_SET_OPTION([LT_INIT], [win32-dll]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the 'win32-dll' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], []) # _LT_ENABLE_SHARED([DEFAULT]) # ---------------------------- # implement the --enable-shared flag, and supports the 'shared' and # 'disable-shared' LT_INIT options. # DEFAULT is either 'yes' or 'no'. If omitted, it defaults to 'yes'. m4_define([_LT_ENABLE_SHARED], [m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl AC_ARG_ENABLE([shared], [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@], [build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])], [p=${PACKAGE-default} case $enableval in yes) enable_shared=yes ;; no) enable_shared=no ;; *) enable_shared=no # Look at the argument we got. We use all the common list separators. lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, for pkg in $enableval; do IFS=$lt_save_ifs if test "X$pkg" = "X$p"; then enable_shared=yes fi done IFS=$lt_save_ifs ;; esac], [enable_shared=]_LT_ENABLE_SHARED_DEFAULT) _LT_DECL([build_libtool_libs], [enable_shared], [0], [Whether or not to build shared libraries]) ])# _LT_ENABLE_SHARED LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])]) LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])]) # Old names: AC_DEFUN([AC_ENABLE_SHARED], [_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared]) ]) AC_DEFUN([AC_DISABLE_SHARED], [_LT_SET_OPTION([LT_INIT], [disable-shared]) ]) AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)]) AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AM_ENABLE_SHARED], []) dnl AC_DEFUN([AM_DISABLE_SHARED], []) # _LT_ENABLE_STATIC([DEFAULT]) # ---------------------------- # implement the --enable-static flag, and support the 'static' and # 'disable-static' LT_INIT options. # DEFAULT is either 'yes' or 'no'. If omitted, it defaults to 'yes'. m4_define([_LT_ENABLE_STATIC], [m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl AC_ARG_ENABLE([static], [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@], [build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])], [p=${PACKAGE-default} case $enableval in yes) enable_static=yes ;; no) enable_static=no ;; *) enable_static=no # Look at the argument we got. We use all the common list separators. lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, for pkg in $enableval; do IFS=$lt_save_ifs if test "X$pkg" = "X$p"; then enable_static=yes fi done IFS=$lt_save_ifs ;; esac], [enable_static=]_LT_ENABLE_STATIC_DEFAULT) _LT_DECL([build_old_libs], [enable_static], [0], [Whether or not to build static libraries]) ])# _LT_ENABLE_STATIC LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])]) LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])]) # Old names: AC_DEFUN([AC_ENABLE_STATIC], [_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static]) ]) AC_DEFUN([AC_DISABLE_STATIC], [_LT_SET_OPTION([LT_INIT], [disable-static]) ]) AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)]) AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AM_ENABLE_STATIC], []) dnl AC_DEFUN([AM_DISABLE_STATIC], []) # _LT_ENABLE_FAST_INSTALL([DEFAULT]) # ---------------------------------- # implement the --enable-fast-install flag, and support the 'fast-install' # and 'disable-fast-install' LT_INIT options. # DEFAULT is either 'yes' or 'no'. If omitted, it defaults to 'yes'. m4_define([_LT_ENABLE_FAST_INSTALL], [m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl AC_ARG_ENABLE([fast-install], [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@], [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])], [p=${PACKAGE-default} case $enableval in yes) enable_fast_install=yes ;; no) enable_fast_install=no ;; *) enable_fast_install=no # Look at the argument we got. We use all the common list separators. lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, for pkg in $enableval; do IFS=$lt_save_ifs if test "X$pkg" = "X$p"; then enable_fast_install=yes fi done IFS=$lt_save_ifs ;; esac], [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT) _LT_DECL([fast_install], [enable_fast_install], [0], [Whether or not to optimize for fast installation])dnl ])# _LT_ENABLE_FAST_INSTALL LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])]) LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])]) # Old names: AU_DEFUN([AC_ENABLE_FAST_INSTALL], [_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the 'fast-install' option into LT_INIT's first parameter.]) ]) AU_DEFUN([AC_DISABLE_FAST_INSTALL], [_LT_SET_OPTION([LT_INIT], [disable-fast-install]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the 'disable-fast-install' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], []) dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], []) # _LT_WITH_AIX_SONAME([DEFAULT]) # ---------------------------------- # implement the --with-aix-soname flag, and support the `aix-soname=aix' # and `aix-soname=both' and `aix-soname=svr4' LT_INIT options. DEFAULT # is either `aix', `both' or `svr4'. If omitted, it defaults to `aix'. m4_define([_LT_WITH_AIX_SONAME], [m4_define([_LT_WITH_AIX_SONAME_DEFAULT], [m4_if($1, svr4, svr4, m4_if($1, both, both, aix))])dnl shared_archive_member_spec= case $host,$enable_shared in power*-*-aix[[5-9]]*,yes) AC_MSG_CHECKING([which variant of shared library versioning to provide]) AC_ARG_WITH([aix-soname], [AS_HELP_STRING([--with-aix-soname=aix|svr4|both], [shared library versioning (aka "SONAME") variant to provide on AIX, @<:@default=]_LT_WITH_AIX_SONAME_DEFAULT[@:>@.])], [case $withval in aix|svr4|both) ;; *) AC_MSG_ERROR([Unknown argument to --with-aix-soname]) ;; esac lt_cv_with_aix_soname=$with_aix_soname], [AC_CACHE_VAL([lt_cv_with_aix_soname], [lt_cv_with_aix_soname=]_LT_WITH_AIX_SONAME_DEFAULT) with_aix_soname=$lt_cv_with_aix_soname]) AC_MSG_RESULT([$with_aix_soname]) if test aix != "$with_aix_soname"; then # For the AIX way of multilib, we name the shared archive member # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o', # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File. # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag, # the AIX toolchain works better with OBJECT_MODE set (default 32). if test 64 = "${OBJECT_MODE-32}"; then shared_archive_member_spec=shr_64 else shared_archive_member_spec=shr fi fi ;; *) with_aix_soname=aix ;; esac _LT_DECL([], [shared_archive_member_spec], [0], [Shared archive member basename, for filename based shared library versioning on AIX])dnl ])# _LT_WITH_AIX_SONAME LT_OPTION_DEFINE([LT_INIT], [aix-soname=aix], [_LT_WITH_AIX_SONAME([aix])]) LT_OPTION_DEFINE([LT_INIT], [aix-soname=both], [_LT_WITH_AIX_SONAME([both])]) LT_OPTION_DEFINE([LT_INIT], [aix-soname=svr4], [_LT_WITH_AIX_SONAME([svr4])]) # _LT_WITH_PIC([MODE]) # -------------------- # implement the --with-pic flag, and support the 'pic-only' and 'no-pic' # LT_INIT options. # MODE is either 'yes' or 'no'. If omitted, it defaults to 'both'. m4_define([_LT_WITH_PIC], [AC_ARG_WITH([pic], [AS_HELP_STRING([--with-pic@<:@=PKGS@:>@], [try to use only PIC/non-PIC objects @<:@default=use both@:>@])], [lt_p=${PACKAGE-default} case $withval in yes|no) pic_mode=$withval ;; *) pic_mode=default # Look at the argument we got. We use all the common list separators. lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, for lt_pkg in $withval; do IFS=$lt_save_ifs if test "X$lt_pkg" = "X$lt_p"; then pic_mode=yes fi done IFS=$lt_save_ifs ;; esac], [pic_mode=m4_default([$1], [default])]) _LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl ])# _LT_WITH_PIC LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])]) LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])]) # Old name: AU_DEFUN([AC_LIBTOOL_PICMODE], [_LT_SET_OPTION([LT_INIT], [pic-only]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the 'pic-only' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_PICMODE], []) ## ----------------- ## ## LTDL_INIT Options ## ## ----------------- ## m4_define([_LTDL_MODE], []) LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive], [m4_define([_LTDL_MODE], [nonrecursive])]) LT_OPTION_DEFINE([LTDL_INIT], [recursive], [m4_define([_LTDL_MODE], [recursive])]) LT_OPTION_DEFINE([LTDL_INIT], [subproject], [m4_define([_LTDL_MODE], [subproject])]) m4_define([_LTDL_TYPE], []) LT_OPTION_DEFINE([LTDL_INIT], [installable], [m4_define([_LTDL_TYPE], [installable])]) LT_OPTION_DEFINE([LTDL_INIT], [convenience], [m4_define([_LTDL_TYPE], [convenience])]) nordugrid-arc-7.1.1/m4/PaxHeaders/libtool.m40000644000000000000000000000013215067751340015574 xustar0030 mtime=1759498976.100862697 30 atime=1759498976.724628266 30 ctime=1759499024.676591854 nordugrid-arc-7.1.1/m4/libtool.m40000644000175000002070000112530615067751340017506 0ustar00mockbuildmock00000000000000# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- # # Copyright (C) 1996-2001, 2003-2015 Free Software Foundation, Inc. # Written by Gordon Matzigkeit, 1996 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. m4_define([_LT_COPYING], [dnl # Copyright (C) 2014 Free Software Foundation, Inc. # This is free software; see the source for copying conditions. There is NO # warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # GNU Libtool is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of of the License, or # (at your option) any later version. # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program or library that is built # using GNU Libtool, you may include this file under the same # distribution terms that you use for the rest of that program. # # GNU Libtool is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . ]) # serial 58 LT_INIT # LT_PREREQ(VERSION) # ------------------ # Complain and exit if this libtool version is less that VERSION. m4_defun([LT_PREREQ], [m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1, [m4_default([$3], [m4_fatal([Libtool version $1 or higher is required], 63)])], [$2])]) # _LT_CHECK_BUILDDIR # ------------------ # Complain if the absolute build directory name contains unusual characters m4_defun([_LT_CHECK_BUILDDIR], [case `pwd` in *\ * | *\ *) AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;; esac ]) # LT_INIT([OPTIONS]) # ------------------ AC_DEFUN([LT_INIT], [AC_PREREQ([2.62])dnl We use AC_PATH_PROGS_FEATURE_CHECK AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl AC_BEFORE([$0], [LT_LANG])dnl AC_BEFORE([$0], [LT_OUTPUT])dnl AC_BEFORE([$0], [LTDL_INIT])dnl m4_require([_LT_CHECK_BUILDDIR])dnl dnl Autoconf doesn't catch unexpanded LT_ macros by default: m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4 dnl unless we require an AC_DEFUNed macro: AC_REQUIRE([LTOPTIONS_VERSION])dnl AC_REQUIRE([LTSUGAR_VERSION])dnl AC_REQUIRE([LTVERSION_VERSION])dnl AC_REQUIRE([LTOBSOLETE_VERSION])dnl m4_require([_LT_PROG_LTMAIN])dnl _LT_SHELL_INIT([SHELL=${CONFIG_SHELL-/bin/sh}]) dnl Parse OPTIONS _LT_SET_OPTIONS([$0], [$1]) # This can be used to rebuild libtool when needed LIBTOOL_DEPS=$ltmain # Always use our own libtool. LIBTOOL='$(SHELL) $(top_builddir)/libtool' AC_SUBST(LIBTOOL)dnl _LT_SETUP # Only expand once: m4_define([LT_INIT]) ])# LT_INIT # Old names: AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT]) AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_PROG_LIBTOOL], []) dnl AC_DEFUN([AM_PROG_LIBTOOL], []) # _LT_PREPARE_CC_BASENAME # ----------------------- m4_defun([_LT_PREPARE_CC_BASENAME], [ # Calculate cc_basename. Skip known compiler wrappers and cross-prefix. func_cc_basename () { for cc_temp in @S|@*""; do case $cc_temp in compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;; distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;; \-*) ;; *) break;; esac done func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` } ])# _LT_PREPARE_CC_BASENAME # _LT_CC_BASENAME(CC) # ------------------- # It would be clearer to call AC_REQUIREs from _LT_PREPARE_CC_BASENAME, # but that macro is also expanded into generated libtool script, which # arranges for $SED and $ECHO to be set by different means. m4_defun([_LT_CC_BASENAME], [m4_require([_LT_PREPARE_CC_BASENAME])dnl AC_REQUIRE([_LT_DECL_SED])dnl AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl func_cc_basename $1 cc_basename=$func_cc_basename_result ]) # _LT_FILEUTILS_DEFAULTS # ---------------------- # It is okay to use these file commands and assume they have been set # sensibly after 'm4_require([_LT_FILEUTILS_DEFAULTS])'. m4_defun([_LT_FILEUTILS_DEFAULTS], [: ${CP="cp -f"} : ${MV="mv -f"} : ${RM="rm -f"} ])# _LT_FILEUTILS_DEFAULTS # _LT_SETUP # --------- m4_defun([_LT_SETUP], [AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_CANONICAL_BUILD])dnl AC_REQUIRE([_LT_PREPARE_SED_QUOTE_VARS])dnl AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl _LT_DECL([], [PATH_SEPARATOR], [1], [The PATH separator for the build system])dnl dnl _LT_DECL([], [host_alias], [0], [The host system])dnl _LT_DECL([], [host], [0])dnl _LT_DECL([], [host_os], [0])dnl dnl _LT_DECL([], [build_alias], [0], [The build system])dnl _LT_DECL([], [build], [0])dnl _LT_DECL([], [build_os], [0])dnl dnl AC_REQUIRE([AC_PROG_CC])dnl AC_REQUIRE([LT_PATH_LD])dnl AC_REQUIRE([LT_PATH_NM])dnl dnl AC_REQUIRE([AC_PROG_LN_S])dnl test -z "$LN_S" && LN_S="ln -s" _LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl dnl AC_REQUIRE([LT_CMD_MAX_LEN])dnl _LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl _LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_CHECK_SHELL_FEATURES])dnl m4_require([_LT_PATH_CONVERSION_FUNCTIONS])dnl m4_require([_LT_CMD_RELOAD])dnl m4_require([_LT_CHECK_MAGIC_METHOD])dnl m4_require([_LT_CHECK_SHAREDLIB_FROM_LINKLIB])dnl m4_require([_LT_CMD_OLD_ARCHIVE])dnl m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl m4_require([_LT_WITH_SYSROOT])dnl m4_require([_LT_CMD_TRUNCATE])dnl _LT_CONFIG_LIBTOOL_INIT([ # See if we are running on zsh, and set the options that allow our # commands through without removal of \ escapes INIT. if test -n "\${ZSH_VERSION+set}"; then setopt NO_GLOB_SUBST fi ]) if test -n "${ZSH_VERSION+set}"; then setopt NO_GLOB_SUBST fi _LT_CHECK_OBJDIR m4_require([_LT_TAG_COMPILER])dnl case $host_os in aix3*) # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test set != "${COLLECT_NAMES+set}"; then COLLECT_NAMES= export COLLECT_NAMES fi ;; esac # Global variables: ofile=libtool can_build_shared=yes # All known linkers require a '.a' archive for static linking (except MSVC, # which needs '.lib'). libext=a with_gnu_ld=$lt_cv_prog_gnu_ld old_CC=$CC old_CFLAGS=$CFLAGS # Set sane defaults for various variables test -z "$CC" && CC=cc test -z "$LTCC" && LTCC=$CC test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS test -z "$LD" && LD=ld test -z "$ac_objext" && ac_objext=o _LT_CC_BASENAME([$compiler]) # Only perform the check for file, if the check method requires it test -z "$MAGIC_CMD" && MAGIC_CMD=file case $deplibs_check_method in file_magic*) if test "$file_magic_cmd" = '$MAGIC_CMD'; then _LT_PATH_MAGIC fi ;; esac # Use C for the default configuration in the libtool script LT_SUPPORTED_TAG([CC]) _LT_LANG_C_CONFIG _LT_LANG_DEFAULT_CONFIG _LT_CONFIG_COMMANDS ])# _LT_SETUP # _LT_PREPARE_SED_QUOTE_VARS # -------------------------- # Define a few sed substitution that help us do robust quoting. m4_defun([_LT_PREPARE_SED_QUOTE_VARS], [# Backslashify metacharacters that are still active within # double-quoted strings. sed_quote_subst='s/\([["`$\\]]\)/\\\1/g' # Same as above, but do not quote variable references. double_quote_subst='s/\([["`\\]]\)/\\\1/g' # Sed substitution to delay expansion of an escaped shell variable in a # double_quote_subst'ed string. delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' # Sed substitution to delay expansion of an escaped single quote. delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' # Sed substitution to avoid accidental globbing in evaled expressions no_glob_subst='s/\*/\\\*/g' ]) # _LT_PROG_LTMAIN # --------------- # Note that this code is called both from 'configure', and 'config.status' # now that we use AC_CONFIG_COMMANDS to generate libtool. Notably, # 'config.status' has no value for ac_aux_dir unless we are using Automake, # so we pass a copy along to make sure it has a sensible value anyway. m4_defun([_LT_PROG_LTMAIN], [m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl _LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir']) ltmain=$ac_aux_dir/ltmain.sh ])# _LT_PROG_LTMAIN ## ------------------------------------- ## ## Accumulate code for creating libtool. ## ## ------------------------------------- ## # So that we can recreate a full libtool script including additional # tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS # in macros and then make a single call at the end using the 'libtool' # label. # _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS]) # ---------------------------------------- # Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later. m4_define([_LT_CONFIG_LIBTOOL_INIT], [m4_ifval([$1], [m4_append([_LT_OUTPUT_LIBTOOL_INIT], [$1 ])])]) # Initialize. m4_define([_LT_OUTPUT_LIBTOOL_INIT]) # _LT_CONFIG_LIBTOOL([COMMANDS]) # ------------------------------ # Register COMMANDS to be passed to AC_CONFIG_COMMANDS later. m4_define([_LT_CONFIG_LIBTOOL], [m4_ifval([$1], [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS], [$1 ])])]) # Initialize. m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS]) # _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS]) # ----------------------------------------------------- m4_defun([_LT_CONFIG_SAVE_COMMANDS], [_LT_CONFIG_LIBTOOL([$1]) _LT_CONFIG_LIBTOOL_INIT([$2]) ]) # _LT_FORMAT_COMMENT([COMMENT]) # ----------------------------- # Add leading comment marks to the start of each line, and a trailing # full-stop to the whole comment if one is not present already. m4_define([_LT_FORMAT_COMMENT], [m4_ifval([$1], [ m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])], [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.]) )]) ## ------------------------ ## ## FIXME: Eliminate VARNAME ## ## ------------------------ ## # _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?]) # ------------------------------------------------------------------- # CONFIGNAME is the name given to the value in the libtool script. # VARNAME is the (base) name used in the configure script. # VALUE may be 0, 1 or 2 for a computed quote escaped value based on # VARNAME. Any other value will be used directly. m4_define([_LT_DECL], [lt_if_append_uniq([lt_decl_varnames], [$2], [, ], [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name], [m4_ifval([$1], [$1], [$2])]) lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3]) m4_ifval([$4], [lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])]) lt_dict_add_subkey([lt_decl_dict], [$2], [tagged?], [m4_ifval([$5], [yes], [no])])]) ]) # _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION]) # -------------------------------------------------------- m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])]) # lt_decl_tag_varnames([SEPARATOR], [VARNAME1...]) # ------------------------------------------------ m4_define([lt_decl_tag_varnames], [_lt_decl_filter([tagged?], [yes], $@)]) # _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..]) # --------------------------------------------------------- m4_define([_lt_decl_filter], [m4_case([$#], [0], [m4_fatal([$0: too few arguments: $#])], [1], [m4_fatal([$0: too few arguments: $#: $1])], [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)], [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)], [lt_dict_filter([lt_decl_dict], $@)])[]dnl ]) # lt_decl_quote_varnames([SEPARATOR], [VARNAME1...]) # -------------------------------------------------- m4_define([lt_decl_quote_varnames], [_lt_decl_filter([value], [1], $@)]) # lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...]) # --------------------------------------------------- m4_define([lt_decl_dquote_varnames], [_lt_decl_filter([value], [2], $@)]) # lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...]) # --------------------------------------------------- m4_define([lt_decl_varnames_tagged], [m4_assert([$# <= 2])dnl _$0(m4_quote(m4_default([$1], [[, ]])), m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]), m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))]) m4_define([_lt_decl_varnames_tagged], [m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])]) # lt_decl_all_varnames([SEPARATOR], [VARNAME1...]) # ------------------------------------------------ m4_define([lt_decl_all_varnames], [_$0(m4_quote(m4_default([$1], [[, ]])), m4_if([$2], [], m4_quote(lt_decl_varnames), m4_quote(m4_shift($@))))[]dnl ]) m4_define([_lt_decl_all_varnames], [lt_join($@, lt_decl_varnames_tagged([$1], lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl ]) # _LT_CONFIG_STATUS_DECLARE([VARNAME]) # ------------------------------------ # Quote a variable value, and forward it to 'config.status' so that its # declaration there will have the same value as in 'configure'. VARNAME # must have a single quote delimited value for this to work. m4_define([_LT_CONFIG_STATUS_DECLARE], [$1='`$ECHO "$][$1" | $SED "$delay_single_quote_subst"`']) # _LT_CONFIG_STATUS_DECLARATIONS # ------------------------------ # We delimit libtool config variables with single quotes, so when # we write them to config.status, we have to be sure to quote all # embedded single quotes properly. In configure, this macro expands # each variable declared with _LT_DECL (and _LT_TAGDECL) into: # # ='`$ECHO "$" | $SED "$delay_single_quote_subst"`' m4_defun([_LT_CONFIG_STATUS_DECLARATIONS], [m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames), [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])]) # _LT_LIBTOOL_TAGS # ---------------- # Output comment and list of tags supported by the script m4_defun([_LT_LIBTOOL_TAGS], [_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl available_tags='_LT_TAGS'dnl ]) # _LT_LIBTOOL_DECLARE(VARNAME, [TAG]) # ----------------------------------- # Extract the dictionary values for VARNAME (optionally with TAG) and # expand to a commented shell variable setting: # # # Some comment about what VAR is for. # visible_name=$lt_internal_name m4_define([_LT_LIBTOOL_DECLARE], [_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [description])))[]dnl m4_pushdef([_libtool_name], m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])), [0], [_libtool_name=[$]$1], [1], [_libtool_name=$lt_[]$1], [2], [_libtool_name=$lt_[]$1], [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl ]) # _LT_LIBTOOL_CONFIG_VARS # ----------------------- # Produce commented declarations of non-tagged libtool config variables # suitable for insertion in the LIBTOOL CONFIG section of the 'libtool' # script. Tagged libtool config variables (even for the LIBTOOL CONFIG # section) are produced by _LT_LIBTOOL_TAG_VARS. m4_defun([_LT_LIBTOOL_CONFIG_VARS], [m4_foreach([_lt_var], m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)), [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])]) # _LT_LIBTOOL_TAG_VARS(TAG) # ------------------------- m4_define([_LT_LIBTOOL_TAG_VARS], [m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames), [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])]) # _LT_TAGVAR(VARNAME, [TAGNAME]) # ------------------------------ m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])]) # _LT_CONFIG_COMMANDS # ------------------- # Send accumulated output to $CONFIG_STATUS. Thanks to the lists of # variables for single and double quote escaping we saved from calls # to _LT_DECL, we can put quote escaped variables declarations # into 'config.status', and then the shell code to quote escape them in # for loops in 'config.status'. Finally, any additional code accumulated # from calls to _LT_CONFIG_LIBTOOL_INIT is expanded. m4_defun([_LT_CONFIG_COMMANDS], [AC_PROVIDE_IFELSE([LT_OUTPUT], dnl If the libtool generation code has been placed in $CONFIG_LT, dnl instead of duplicating it all over again into config.status, dnl then we will have config.status run $CONFIG_LT later, so it dnl needs to know what name is stored there: [AC_CONFIG_COMMANDS([libtool], [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])], dnl If the libtool generation code is destined for config.status, dnl expand the accumulated commands and init code now: [AC_CONFIG_COMMANDS([libtool], [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])]) ])#_LT_CONFIG_COMMANDS # Initialize. m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT], [ # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH sed_quote_subst='$sed_quote_subst' double_quote_subst='$double_quote_subst' delay_variable_subst='$delay_variable_subst' _LT_CONFIG_STATUS_DECLARATIONS LTCC='$LTCC' LTCFLAGS='$LTCFLAGS' compiler='$compiler_DEFAULT' # A function that is used when there is no print builtin or printf. func_fallback_echo () { eval 'cat <<_LTECHO_EOF \$[]1 _LTECHO_EOF' } # Quote evaled strings. for var in lt_decl_all_varnames([[ \ ]], lt_decl_quote_varnames); do case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in *[[\\\\\\\`\\"\\\$]]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done # Double-quote double-evaled strings. for var in lt_decl_all_varnames([[ \ ]], lt_decl_dquote_varnames); do case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in *[[\\\\\\\`\\"\\\$]]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done _LT_OUTPUT_LIBTOOL_INIT ]) # _LT_GENERATED_FILE_INIT(FILE, [COMMENT]) # ------------------------------------ # Generate a child script FILE with all initialization necessary to # reuse the environment learned by the parent script, and make the # file executable. If COMMENT is supplied, it is inserted after the # '#!' sequence but before initialization text begins. After this # macro, additional text can be appended to FILE to form the body of # the child script. The macro ends with non-zero status if the # file could not be fully written (such as if the disk is full). m4_ifdef([AS_INIT_GENERATED], [m4_defun([_LT_GENERATED_FILE_INIT],[AS_INIT_GENERATED($@)])], [m4_defun([_LT_GENERATED_FILE_INIT], [m4_require([AS_PREPARE])]dnl [m4_pushdef([AS_MESSAGE_LOG_FD])]dnl [lt_write_fail=0 cat >$1 <<_ASEOF || lt_write_fail=1 #! $SHELL # Generated by $as_me. $2 SHELL=\${CONFIG_SHELL-$SHELL} export SHELL _ASEOF cat >>$1 <<\_ASEOF || lt_write_fail=1 AS_SHELL_SANITIZE _AS_PREPARE exec AS_MESSAGE_FD>&1 _ASEOF test 0 = "$lt_write_fail" && chmod +x $1[]dnl m4_popdef([AS_MESSAGE_LOG_FD])])])# _LT_GENERATED_FILE_INIT # LT_OUTPUT # --------- # This macro allows early generation of the libtool script (before # AC_OUTPUT is called), incase it is used in configure for compilation # tests. AC_DEFUN([LT_OUTPUT], [: ${CONFIG_LT=./config.lt} AC_MSG_NOTICE([creating $CONFIG_LT]) _LT_GENERATED_FILE_INIT(["$CONFIG_LT"], [# Run this file to recreate a libtool stub with the current configuration.]) cat >>"$CONFIG_LT" <<\_LTEOF lt_cl_silent=false exec AS_MESSAGE_LOG_FD>>config.log { echo AS_BOX([Running $as_me.]) } >&AS_MESSAGE_LOG_FD lt_cl_help="\ '$as_me' creates a local libtool stub from the current configuration, for use in further configure time tests before the real libtool is generated. Usage: $[0] [[OPTIONS]] -h, --help print this help, then exit -V, --version print version number, then exit -q, --quiet do not print progress messages -d, --debug don't remove temporary files Report bugs to ." lt_cl_version="\ m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION]) configured by $[0], generated by m4_PACKAGE_STRING. Copyright (C) 2011 Free Software Foundation, Inc. This config.lt script is free software; the Free Software Foundation gives unlimited permision to copy, distribute and modify it." while test 0 != $[#] do case $[1] in --version | --v* | -V ) echo "$lt_cl_version"; exit 0 ;; --help | --h* | -h ) echo "$lt_cl_help"; exit 0 ;; --debug | --d* | -d ) debug=: ;; --quiet | --q* | --silent | --s* | -q ) lt_cl_silent=: ;; -*) AC_MSG_ERROR([unrecognized option: $[1] Try '$[0] --help' for more information.]) ;; *) AC_MSG_ERROR([unrecognized argument: $[1] Try '$[0] --help' for more information.]) ;; esac shift done if $lt_cl_silent; then exec AS_MESSAGE_FD>/dev/null fi _LTEOF cat >>"$CONFIG_LT" <<_LTEOF _LT_OUTPUT_LIBTOOL_COMMANDS_INIT _LTEOF cat >>"$CONFIG_LT" <<\_LTEOF AC_MSG_NOTICE([creating $ofile]) _LT_OUTPUT_LIBTOOL_COMMANDS AS_EXIT(0) _LTEOF chmod +x "$CONFIG_LT" # configure is writing to config.log, but config.lt does its own redirection, # appending to config.log, which fails on DOS, as config.log is still kept # open by configure. Here we exec the FD to /dev/null, effectively closing # config.log, so it can be properly (re)opened and appended to by config.lt. lt_cl_success=: test yes = "$silent" && lt_config_lt_args="$lt_config_lt_args --quiet" exec AS_MESSAGE_LOG_FD>/dev/null $SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false exec AS_MESSAGE_LOG_FD>>config.log $lt_cl_success || AS_EXIT(1) ])# LT_OUTPUT # _LT_CONFIG(TAG) # --------------- # If TAG is the built-in tag, create an initial libtool script with a # default configuration from the untagged config vars. Otherwise add code # to config.status for appending the configuration named by TAG from the # matching tagged config vars. m4_defun([_LT_CONFIG], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl _LT_CONFIG_SAVE_COMMANDS([ m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl m4_if(_LT_TAG, [C], [ # See if we are running on zsh, and set the options that allow our # commands through without removal of \ escapes. if test -n "${ZSH_VERSION+set}"; then setopt NO_GLOB_SUBST fi cfgfile=${ofile}T trap "$RM \"$cfgfile\"; exit 1" 1 2 15 $RM "$cfgfile" cat <<_LT_EOF >> "$cfgfile" #! $SHELL # Generated automatically by $as_me ($PACKAGE) $VERSION # Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: # NOTE: Changes made to this file will be lost: look at ltmain.sh. # Provide generalized library-building support services. # Written by Gordon Matzigkeit, 1996 _LT_COPYING _LT_LIBTOOL_TAGS # Configured defaults for sys_lib_dlsearch_path munging. : \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"} # ### BEGIN LIBTOOL CONFIG _LT_LIBTOOL_CONFIG_VARS _LT_LIBTOOL_TAG_VARS # ### END LIBTOOL CONFIG _LT_EOF cat <<'_LT_EOF' >> "$cfgfile" # ### BEGIN FUNCTIONS SHARED WITH CONFIGURE _LT_PREPARE_MUNGE_PATH_LIST _LT_PREPARE_CC_BASENAME # ### END FUNCTIONS SHARED WITH CONFIGURE _LT_EOF case $host_os in aix3*) cat <<\_LT_EOF >> "$cfgfile" # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test set != "${COLLECT_NAMES+set}"; then COLLECT_NAMES= export COLLECT_NAMES fi _LT_EOF ;; esac _LT_PROG_LTMAIN # We use sed instead of cat because bash on DJGPP gets confused if # if finds mixed CR/LF and LF-only lines. Since sed operates in # text mode, it properly converts lines to CR/LF. This bash problem # is reportedly fixed, but why not run on old versions too? sed '$q' "$ltmain" >> "$cfgfile" \ || (rm -f "$cfgfile"; exit 1) mv -f "$cfgfile" "$ofile" || (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") chmod +x "$ofile" ], [cat <<_LT_EOF >> "$ofile" dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded dnl in a comment (ie after a #). # ### BEGIN LIBTOOL TAG CONFIG: $1 _LT_LIBTOOL_TAG_VARS(_LT_TAG) # ### END LIBTOOL TAG CONFIG: $1 _LT_EOF ])dnl /m4_if ], [m4_if([$1], [], [ PACKAGE='$PACKAGE' VERSION='$VERSION' RM='$RM' ofile='$ofile'], []) ])dnl /_LT_CONFIG_SAVE_COMMANDS ])# _LT_CONFIG # LT_SUPPORTED_TAG(TAG) # --------------------- # Trace this macro to discover what tags are supported by the libtool # --tag option, using: # autoconf --trace 'LT_SUPPORTED_TAG:$1' AC_DEFUN([LT_SUPPORTED_TAG], []) # C support is built-in for now m4_define([_LT_LANG_C_enabled], []) m4_define([_LT_TAGS], []) # LT_LANG(LANG) # ------------- # Enable libtool support for the given language if not already enabled. AC_DEFUN([LT_LANG], [AC_BEFORE([$0], [LT_OUTPUT])dnl m4_case([$1], [C], [_LT_LANG(C)], [C++], [_LT_LANG(CXX)], [Go], [_LT_LANG(GO)], [Java], [_LT_LANG(GCJ)], [Fortran 77], [_LT_LANG(F77)], [Fortran], [_LT_LANG(FC)], [Windows Resource], [_LT_LANG(RC)], [m4_ifdef([_LT_LANG_]$1[_CONFIG], [_LT_LANG($1)], [m4_fatal([$0: unsupported language: "$1"])])])dnl ])# LT_LANG # _LT_LANG(LANGNAME) # ------------------ m4_defun([_LT_LANG], [m4_ifdef([_LT_LANG_]$1[_enabled], [], [LT_SUPPORTED_TAG([$1])dnl m4_append([_LT_TAGS], [$1 ])dnl m4_define([_LT_LANG_]$1[_enabled], [])dnl _LT_LANG_$1_CONFIG($1)])dnl ])# _LT_LANG m4_ifndef([AC_PROG_GO], [ ############################################################ # NOTE: This macro has been submitted for inclusion into # # GNU Autoconf as AC_PROG_GO. When it is available in # # a released version of Autoconf we should remove this # # macro and use it instead. # ############################################################ m4_defun([AC_PROG_GO], [AC_LANG_PUSH(Go)dnl AC_ARG_VAR([GOC], [Go compiler command])dnl AC_ARG_VAR([GOFLAGS], [Go compiler flags])dnl _AC_ARG_VAR_LDFLAGS()dnl AC_CHECK_TOOL(GOC, gccgo) if test -z "$GOC"; then if test -n "$ac_tool_prefix"; then AC_CHECK_PROG(GOC, [${ac_tool_prefix}gccgo], [${ac_tool_prefix}gccgo]) fi fi if test -z "$GOC"; then AC_CHECK_PROG(GOC, gccgo, gccgo, false) fi ])#m4_defun ])#m4_ifndef # _LT_LANG_DEFAULT_CONFIG # ----------------------- m4_defun([_LT_LANG_DEFAULT_CONFIG], [AC_PROVIDE_IFELSE([AC_PROG_CXX], [LT_LANG(CXX)], [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])]) AC_PROVIDE_IFELSE([AC_PROG_F77], [LT_LANG(F77)], [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])]) AC_PROVIDE_IFELSE([AC_PROG_FC], [LT_LANG(FC)], [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])]) dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal dnl pulling things in needlessly. AC_PROVIDE_IFELSE([AC_PROG_GCJ], [LT_LANG(GCJ)], [AC_PROVIDE_IFELSE([A][M_PROG_GCJ], [LT_LANG(GCJ)], [AC_PROVIDE_IFELSE([LT_PROG_GCJ], [LT_LANG(GCJ)], [m4_ifdef([AC_PROG_GCJ], [m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])]) m4_ifdef([A][M_PROG_GCJ], [m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])]) m4_ifdef([LT_PROG_GCJ], [m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])]) AC_PROVIDE_IFELSE([AC_PROG_GO], [LT_LANG(GO)], [m4_define([AC_PROG_GO], defn([AC_PROG_GO])[LT_LANG(GO)])]) AC_PROVIDE_IFELSE([LT_PROG_RC], [LT_LANG(RC)], [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])]) ])# _LT_LANG_DEFAULT_CONFIG # Obsolete macros: AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)]) AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)]) AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)]) AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)]) AU_DEFUN([AC_LIBTOOL_RC], [LT_LANG(Windows Resource)]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_CXX], []) dnl AC_DEFUN([AC_LIBTOOL_F77], []) dnl AC_DEFUN([AC_LIBTOOL_FC], []) dnl AC_DEFUN([AC_LIBTOOL_GCJ], []) dnl AC_DEFUN([AC_LIBTOOL_RC], []) # _LT_TAG_COMPILER # ---------------- m4_defun([_LT_TAG_COMPILER], [AC_REQUIRE([AC_PROG_CC])dnl _LT_DECL([LTCC], [CC], [1], [A C compiler])dnl _LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl _LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl _LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC ])# _LT_TAG_COMPILER # _LT_COMPILER_BOILERPLATE # ------------------------ # Check for compiler boilerplate output or warnings with # the simple compiler test code. m4_defun([_LT_COMPILER_BOILERPLATE], [m4_require([_LT_DECL_SED])dnl ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" >conftest.$ac_ext eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_compiler_boilerplate=`cat conftest.err` $RM conftest* ])# _LT_COMPILER_BOILERPLATE # _LT_LINKER_BOILERPLATE # ---------------------- # Check for linker boilerplate output or warnings with # the simple link test code. m4_defun([_LT_LINKER_BOILERPLATE], [m4_require([_LT_DECL_SED])dnl ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` $RM -r conftest* ])# _LT_LINKER_BOILERPLATE # _LT_REQUIRED_DARWIN_CHECKS # ------------------------- m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[ case $host_os in rhapsody* | darwin*) AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:]) AC_CHECK_TOOL([NMEDIT], [nmedit], [:]) AC_CHECK_TOOL([LIPO], [lipo], [:]) AC_CHECK_TOOL([OTOOL], [otool], [:]) AC_CHECK_TOOL([OTOOL64], [otool64], [:]) _LT_DECL([], [DSYMUTIL], [1], [Tool to manipulate archived DWARF debug symbol files on Mac OS X]) _LT_DECL([], [NMEDIT], [1], [Tool to change global to local symbols on Mac OS X]) _LT_DECL([], [LIPO], [1], [Tool to manipulate fat objects and archives on Mac OS X]) _LT_DECL([], [OTOOL], [1], [ldd/readelf like tool for Mach-O binaries on Mac OS X]) _LT_DECL([], [OTOOL64], [1], [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4]) AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod], [lt_cv_apple_cc_single_mod=no if test -z "$LT_MULTI_MODULE"; then # By default we will add the -single_module flag. You can override # by either setting the environment variable LT_MULTI_MODULE # non-empty at configure time, or by adding -multi_module to the # link flags. rm -rf libconftest.dylib* echo "int foo(void){return 1;}" > conftest.c echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c 2>conftest.err _lt_result=$? # If there is a non-empty error log, and "single_module" # appears in it, assume the flag caused a linker warning if test -s conftest.err && $GREP single_module conftest.err; then cat conftest.err >&AS_MESSAGE_LOG_FD # Otherwise, if the output was created with a 0 exit code from # the compiler, it worked. elif test -f libconftest.dylib && test 0 = "$_lt_result"; then lt_cv_apple_cc_single_mod=yes else cat conftest.err >&AS_MESSAGE_LOG_FD fi rm -rf libconftest.dylib* rm -f conftest.* fi]) AC_CACHE_CHECK([for -exported_symbols_list linker flag], [lt_cv_ld_exported_symbols_list], [lt_cv_ld_exported_symbols_list=no save_LDFLAGS=$LDFLAGS echo "_main" > conftest.sym LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], [lt_cv_ld_exported_symbols_list=yes], [lt_cv_ld_exported_symbols_list=no]) LDFLAGS=$save_LDFLAGS ]) AC_CACHE_CHECK([for -force_load linker flag],[lt_cv_ld_force_load], [lt_cv_ld_force_load=no cat > conftest.c << _LT_EOF int forced_loaded() { return 2;} _LT_EOF echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&AS_MESSAGE_LOG_FD $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&AS_MESSAGE_LOG_FD echo "$AR cru libconftest.a conftest.o" >&AS_MESSAGE_LOG_FD $AR cru libconftest.a conftest.o 2>&AS_MESSAGE_LOG_FD echo "$RANLIB libconftest.a" >&AS_MESSAGE_LOG_FD $RANLIB libconftest.a 2>&AS_MESSAGE_LOG_FD cat > conftest.c << _LT_EOF int main() { return 0;} _LT_EOF echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&AS_MESSAGE_LOG_FD $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err _lt_result=$? if test -s conftest.err && $GREP force_load conftest.err; then cat conftest.err >&AS_MESSAGE_LOG_FD elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then lt_cv_ld_force_load=yes else cat conftest.err >&AS_MESSAGE_LOG_FD fi rm -f conftest.err libconftest.a conftest conftest.c rm -rf conftest.dSYM ]) case $host_os in rhapsody* | darwin1.[[012]]) _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;; darwin1.*) _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; darwin*) # darwin 5.x on # if running on 10.5 or later, the deployment target defaults # to the OS version, if on x86, and 10.4, the deployment # target defaults to 10.4. Don't you love it? case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in 10.0,*86*-darwin8*|10.0,*-darwin[[91]]*) _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; 10.[[012]][[,.]]*) _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; 10.*) _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; esac ;; esac if test yes = "$lt_cv_apple_cc_single_mod"; then _lt_dar_single_mod='$single_module' fi if test yes = "$lt_cv_ld_exported_symbols_list"; then _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' else _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib' fi if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then _lt_dsymutil='~$DSYMUTIL $lib || :' else _lt_dsymutil= fi ;; esac ]) # _LT_DARWIN_LINKER_FEATURES([TAG]) # --------------------------------- # Checks for linker and compiler features on darwin m4_defun([_LT_DARWIN_LINKER_FEATURES], [ m4_require([_LT_REQUIRED_DARWIN_CHECKS]) _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_automatic, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported if test yes = "$lt_cv_ld_force_load"; then _LT_TAGVAR(whole_archive_flag_spec, $1)='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' m4_case([$1], [F77], [_LT_TAGVAR(compiler_needs_object, $1)=yes], [FC], [_LT_TAGVAR(compiler_needs_object, $1)=yes]) else _LT_TAGVAR(whole_archive_flag_spec, $1)='' fi _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(allow_undefined_flag, $1)=$_lt_dar_allow_undefined case $cc_basename in ifort*|nagfor*) _lt_dar_can_shared=yes ;; *) _lt_dar_can_shared=$GCC ;; esac if test yes = "$_lt_dar_can_shared"; then output_verbose_link_cmd=func_echo_all _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" _LT_TAGVAR(module_expsym_cmds, $1)="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" m4_if([$1], [CXX], [ if test yes != "$lt_cv_apple_cc_single_mod"; then _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dsymutil" _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dar_export_syms$_lt_dsymutil" fi ],[]) else _LT_TAGVAR(ld_shlibs, $1)=no fi ]) # _LT_SYS_MODULE_PATH_AIX([TAGNAME]) # ---------------------------------- # Links a minimal program and checks the executable # for the system default hardcoded library path. In most cases, # this is /usr/lib:/lib, but when the MPI compilers are used # the location of the communication and MPI libs are included too. # If we don't find anything, use the default library path according # to the aix ld manual. # Store the results from the different compilers for each TAGNAME. # Allow to override them for all tags through lt_cv_aix_libpath. m4_defun([_LT_SYS_MODULE_PATH_AIX], [m4_require([_LT_DECL_SED])dnl if test set = "${lt_cv_aix_libpath+set}"; then aix_libpath=$lt_cv_aix_libpath else AC_CACHE_VAL([_LT_TAGVAR([lt_cv_aix_libpath_], [$1])], [AC_LINK_IFELSE([AC_LANG_PROGRAM],[ lt_aix_libpath_sed='[ /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }]' _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi],[]) if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=/usr/lib:/lib fi ]) aix_libpath=$_LT_TAGVAR([lt_cv_aix_libpath_], [$1]) fi ])# _LT_SYS_MODULE_PATH_AIX # _LT_SHELL_INIT(ARG) # ------------------- m4_define([_LT_SHELL_INIT], [m4_divert_text([M4SH-INIT], [$1 ])])# _LT_SHELL_INIT # _LT_PROG_ECHO_BACKSLASH # ----------------------- # Find how we can fake an echo command that does not interpret backslash. # In particular, with Autoconf 2.60 or later we add some code to the start # of the generated configure script that will find a shell with a builtin # printf (that we can use as an echo command). m4_defun([_LT_PROG_ECHO_BACKSLASH], [ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO AC_MSG_CHECKING([how to print strings]) # Test print first, because it will be a builtin if present. if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then ECHO='print -r --' elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then ECHO='printf %s\n' else # Use this function as a fallback that always works. func_fallback_echo () { eval 'cat <<_LTECHO_EOF $[]1 _LTECHO_EOF' } ECHO='func_fallback_echo' fi # func_echo_all arg... # Invoke $ECHO with all args, space-separated. func_echo_all () { $ECHO "$*" } case $ECHO in printf*) AC_MSG_RESULT([printf]) ;; print*) AC_MSG_RESULT([print -r]) ;; *) AC_MSG_RESULT([cat]) ;; esac m4_ifdef([_AS_DETECT_SUGGESTED], [_AS_DETECT_SUGGESTED([ test -n "${ZSH_VERSION+set}${BASH_VERSION+set}" || ( ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO PATH=/empty FPATH=/empty; export PATH FPATH test "X`printf %s $ECHO`" = "X$ECHO" \ || test "X`print -r -- $ECHO`" = "X$ECHO" )])]) _LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts]) _LT_DECL([], [ECHO], [1], [An echo program that protects backslashes]) ])# _LT_PROG_ECHO_BACKSLASH # _LT_WITH_SYSROOT # ---------------- AC_DEFUN([_LT_WITH_SYSROOT], [AC_MSG_CHECKING([for sysroot]) AC_ARG_WITH([sysroot], [AS_HELP_STRING([--with-sysroot@<:@=DIR@:>@], [Search for dependent libraries within DIR (or the compiler's sysroot if not specified).])], [], [with_sysroot=no]) dnl lt_sysroot will always be passed unquoted. We quote it here dnl in case the user passed a directory name. lt_sysroot= case $with_sysroot in #( yes) if test yes = "$GCC"; then lt_sysroot=`$CC --print-sysroot 2>/dev/null` fi ;; #( /*) lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` ;; #( no|'') ;; #( *) AC_MSG_RESULT([$with_sysroot]) AC_MSG_ERROR([The sysroot must be an absolute path.]) ;; esac AC_MSG_RESULT([${lt_sysroot:-no}]) _LT_DECL([], [lt_sysroot], [0], [The root where to search for ]dnl [dependent libraries, and where our libraries should be installed.])]) # _LT_ENABLE_LOCK # --------------- m4_defun([_LT_ENABLE_LOCK], [AC_ARG_ENABLE([libtool-lock], [AS_HELP_STRING([--disable-libtool-lock], [avoid locking (might break parallel builds)])]) test no = "$enable_libtool_lock" || enable_libtool_lock=yes # Some flags need to be propagated to the compiler or linker for good # libtool support. case $host in ia64-*-hpux*) # Find out what ABI is being produced by ac_compile, and set mode # options accordingly. echo 'int i;' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then case `/usr/bin/file conftest.$ac_objext` in *ELF-32*) HPUX_IA64_MODE=32 ;; *ELF-64*) HPUX_IA64_MODE=64 ;; esac fi rm -rf conftest* ;; *-*-irix6*) # Find out what ABI is being produced by ac_compile, and set linker # options accordingly. echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then if test yes = "$lt_cv_prog_gnu_ld"; then case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -melf32bsmip" ;; *N32*) LD="${LD-ld} -melf32bmipn32" ;; *64-bit*) LD="${LD-ld} -melf64bmip" ;; esac else case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -32" ;; *N32*) LD="${LD-ld} -n32" ;; *64-bit*) LD="${LD-ld} -64" ;; esac fi fi rm -rf conftest* ;; mips64*-*linux*) # Find out what ABI is being produced by ac_compile, and set linker # options accordingly. echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then emul=elf case `/usr/bin/file conftest.$ac_objext` in *32-bit*) emul="${emul}32" ;; *64-bit*) emul="${emul}64" ;; esac case `/usr/bin/file conftest.$ac_objext` in *MSB*) emul="${emul}btsmip" ;; *LSB*) emul="${emul}ltsmip" ;; esac case `/usr/bin/file conftest.$ac_objext` in *N32*) emul="${emul}n32" ;; esac LD="${LD-ld} -m $emul" fi rm -rf conftest* ;; x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) # Find out what ABI is being produced by ac_compile, and set linker # options accordingly. Note that the listed cases only cover the # situations where additional linker options are needed (such as when # doing 32-bit compilation for a host where ld defaults to 64-bit, or # vice versa); the common cases where no linker options are needed do # not appear in the list. echo 'int i;' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then case `/usr/bin/file conftest.o` in *32-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_i386_fbsd" ;; x86_64-*linux*) case `/usr/bin/file conftest.o` in *x86-64*) LD="${LD-ld} -m elf32_x86_64" ;; *) LD="${LD-ld} -m elf_i386" ;; esac ;; powerpc64le-*linux*) LD="${LD-ld} -m elf32lppclinux" ;; powerpc64-*linux*) LD="${LD-ld} -m elf32ppclinux" ;; s390x-*linux*) LD="${LD-ld} -m elf_s390" ;; sparc64-*linux*) LD="${LD-ld} -m elf32_sparc" ;; esac ;; *64-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_x86_64_fbsd" ;; x86_64-*linux*) LD="${LD-ld} -m elf_x86_64" ;; powerpcle-*linux*) LD="${LD-ld} -m elf64lppc" ;; powerpc-*linux*) LD="${LD-ld} -m elf64ppc" ;; s390*-*linux*|s390*-*tpf*) LD="${LD-ld} -m elf64_s390" ;; sparc*-*linux*) LD="${LD-ld} -m elf64_sparc" ;; esac ;; esac fi rm -rf conftest* ;; *-*-sco3.2v5*) # On SCO OpenServer 5, we need -belf to get full-featured binaries. SAVE_CFLAGS=$CFLAGS CFLAGS="$CFLAGS -belf" AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf, [AC_LANG_PUSH(C) AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no]) AC_LANG_POP]) if test yes != "$lt_cv_cc_needs_belf"; then # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf CFLAGS=$SAVE_CFLAGS fi ;; *-*solaris*) # Find out what ABI is being produced by ac_compile, and set linker # options accordingly. echo 'int i;' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then case `/usr/bin/file conftest.o` in *64-bit*) case $lt_cv_prog_gnu_ld in yes*) case $host in i?86-*-solaris*|x86_64-*-solaris*) LD="${LD-ld} -m elf_x86_64" ;; sparc*-*-solaris*) LD="${LD-ld} -m elf64_sparc" ;; esac # GNU ld 2.21 introduced _sol2 emulations. Use them if available. if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then LD=${LD-ld}_sol2 fi ;; *) if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then LD="${LD-ld} -64" fi ;; esac ;; esac fi rm -rf conftest* ;; esac need_locks=$enable_libtool_lock ])# _LT_ENABLE_LOCK # _LT_PROG_AR # ----------- m4_defun([_LT_PROG_AR], [AC_CHECK_TOOLS(AR, [ar], false) : ${AR=ar} : ${AR_FLAGS=cru} _LT_DECL([], [AR], [1], [The archiver]) _LT_DECL([], [AR_FLAGS], [1], [Flags to create an archive]) AC_CACHE_CHECK([for archiver @FILE support], [lt_cv_ar_at_file], [lt_cv_ar_at_file=no AC_COMPILE_IFELSE([AC_LANG_PROGRAM], [echo conftest.$ac_objext > conftest.lst lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&AS_MESSAGE_LOG_FD' AC_TRY_EVAL([lt_ar_try]) if test 0 -eq "$ac_status"; then # Ensure the archiver fails upon bogus file names. rm -f conftest.$ac_objext libconftest.a AC_TRY_EVAL([lt_ar_try]) if test 0 -ne "$ac_status"; then lt_cv_ar_at_file=@ fi fi rm -f conftest.* libconftest.a ]) ]) if test no = "$lt_cv_ar_at_file"; then archiver_list_spec= else archiver_list_spec=$lt_cv_ar_at_file fi _LT_DECL([], [archiver_list_spec], [1], [How to feed a file listing to the archiver]) ])# _LT_PROG_AR # _LT_CMD_OLD_ARCHIVE # ------------------- m4_defun([_LT_CMD_OLD_ARCHIVE], [_LT_PROG_AR AC_CHECK_TOOL(STRIP, strip, :) test -z "$STRIP" && STRIP=: _LT_DECL([], [STRIP], [1], [A symbol stripping program]) AC_CHECK_TOOL(RANLIB, ranlib, :) test -z "$RANLIB" && RANLIB=: _LT_DECL([], [RANLIB], [1], [Commands used to install an old-style archive]) # Determine commands to create old-style static archives. old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' old_postinstall_cmds='chmod 644 $oldlib' old_postuninstall_cmds= if test -n "$RANLIB"; then case $host_os in bitrig* | openbsd*) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" ;; *) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" ;; esac old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" fi case $host_os in darwin*) lock_old_archive_extraction=yes ;; *) lock_old_archive_extraction=no ;; esac _LT_DECL([], [old_postinstall_cmds], [2]) _LT_DECL([], [old_postuninstall_cmds], [2]) _LT_TAGDECL([], [old_archive_cmds], [2], [Commands used to build an old-style archive]) _LT_DECL([], [lock_old_archive_extraction], [0], [Whether to use a lock for old archive extraction]) ])# _LT_CMD_OLD_ARCHIVE # _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, # [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE]) # ---------------------------------------------------------------- # Check whether the given compiler option works AC_DEFUN([_LT_COMPILER_OPTION], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_SED])dnl AC_CACHE_CHECK([$1], [$2], [$2=no m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$3" ## exclude from sc_useless_quotes_in_assignment # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&AS_MESSAGE_LOG_FD echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then $2=yes fi fi $RM conftest* ]) if test yes = "[$]$2"; then m4_if([$5], , :, [$5]) else m4_if([$6], , :, [$6]) fi ])# _LT_COMPILER_OPTION # Old name: AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], []) # _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, # [ACTION-SUCCESS], [ACTION-FAILURE]) # ---------------------------------------------------- # Check whether the given linker option works AC_DEFUN([_LT_LINKER_OPTION], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_SED])dnl AC_CACHE_CHECK([$1], [$2], [$2=no save_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $3" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&AS_MESSAGE_LOG_FD $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then $2=yes fi else $2=yes fi fi $RM -r conftest* LDFLAGS=$save_LDFLAGS ]) if test yes = "[$]$2"; then m4_if([$4], , :, [$4]) else m4_if([$5], , :, [$5]) fi ])# _LT_LINKER_OPTION # Old name: AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], []) # LT_CMD_MAX_LEN #--------------- AC_DEFUN([LT_CMD_MAX_LEN], [AC_REQUIRE([AC_CANONICAL_HOST])dnl # find the maximum length of command line arguments AC_MSG_CHECKING([the maximum length of command line arguments]) AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl i=0 teststring=ABCD case $build_os in msdosdjgpp*) # On DJGPP, this test can blow up pretty badly due to problems in libc # (any single argument exceeding 2000 bytes causes a buffer overrun # during glob expansion). Even if it were fixed, the result of this # check would be larger than it should be. lt_cv_sys_max_cmd_len=12288; # 12K is about right ;; gnu*) # Under GNU Hurd, this test is not required because there is # no limit to the length of command line arguments. # Libtool will interpret -1 as no limit whatsoever lt_cv_sys_max_cmd_len=-1; ;; cygwin* | mingw* | cegcc*) # On Win9x/ME, this test blows up -- it succeeds, but takes # about 5 minutes as the teststring grows exponentially. # Worse, since 9x/ME are not pre-emptively multitasking, # you end up with a "frozen" computer, even though with patience # the test eventually succeeds (with a max line length of 256k). # Instead, let's just punt: use the minimum linelength reported by # all of the supported platforms: 8192 (on NT/2K/XP). lt_cv_sys_max_cmd_len=8192; ;; mint*) # On MiNT this can take a long time and run out of memory. lt_cv_sys_max_cmd_len=8192; ;; amigaos*) # On AmigaOS with pdksh, this test takes hours, literally. # So we just punt and use a minimum line length of 8192. lt_cv_sys_max_cmd_len=8192; ;; bitrig* | darwin* | dragonfly* | freebsd* | netbsd* | openbsd*) # This has been around since 386BSD, at least. Likely further. if test -x /sbin/sysctl; then lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` elif test -x /usr/sbin/sysctl; then lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` else lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs fi # And add a safety zone lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` ;; interix*) # We know the value 262144 and hardcode it with a safety zone (like BSD) lt_cv_sys_max_cmd_len=196608 ;; os2*) # The test takes a long time on OS/2. lt_cv_sys_max_cmd_len=8192 ;; osf*) # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not # nice to cause kernel panics so lets avoid the loop below. # First set a reasonable default. lt_cv_sys_max_cmd_len=16384 # if test -x /sbin/sysconfig; then case `/sbin/sysconfig -q proc exec_disable_arg_limit` in *1*) lt_cv_sys_max_cmd_len=-1 ;; esac fi ;; sco3.2v5*) lt_cv_sys_max_cmd_len=102400 ;; sysv5* | sco5v6* | sysv4.2uw2*) kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` if test -n "$kargmax"; then lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[[ ]]//'` else lt_cv_sys_max_cmd_len=32768 fi ;; *) lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` if test -n "$lt_cv_sys_max_cmd_len" && \ test undefined != "$lt_cv_sys_max_cmd_len"; then lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` else # Make teststring a little bigger before we do anything with it. # a 1K string should be a reasonable start. for i in 1 2 3 4 5 6 7 8; do teststring=$teststring$teststring done SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} # If test is not a shell built-in, we'll probably end up computing a # maximum length that is only half of the actual maximum length, but # we can't tell. while { test X`env echo "$teststring$teststring" 2>/dev/null` \ = "X$teststring$teststring"; } >/dev/null 2>&1 && test 17 != "$i" # 1/2 MB should be enough do i=`expr $i + 1` teststring=$teststring$teststring done # Only check the string length outside the loop. lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` teststring= # Add a significant safety factor because C++ compilers can tack on # massive amounts of additional arguments before passing them to the # linker. It appears as though 1/2 is a usable value. lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` fi ;; esac ]) if test -n "$lt_cv_sys_max_cmd_len"; then AC_MSG_RESULT($lt_cv_sys_max_cmd_len) else AC_MSG_RESULT(none) fi max_cmd_len=$lt_cv_sys_max_cmd_len _LT_DECL([], [max_cmd_len], [0], [What is the maximum length of a command?]) ])# LT_CMD_MAX_LEN # Old name: AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], []) # _LT_HEADER_DLFCN # ---------------- m4_defun([_LT_HEADER_DLFCN], [AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl ])# _LT_HEADER_DLFCN # _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE, # ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING) # ---------------------------------------------------------------- m4_defun([_LT_TRY_DLOPEN_SELF], [m4_require([_LT_HEADER_DLFCN])dnl if test yes = "$cross_compiling"; then : [$4] else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF [#line $LINENO "configure" #include "confdefs.h" #if HAVE_DLFCN_H #include #endif #include #ifdef RTLD_GLOBAL # define LT_DLGLOBAL RTLD_GLOBAL #else # ifdef DL_GLOBAL # define LT_DLGLOBAL DL_GLOBAL # else # define LT_DLGLOBAL 0 # endif #endif /* We may have to define LT_DLLAZY_OR_NOW in the command line if we find out it does not work in some platform. */ #ifndef LT_DLLAZY_OR_NOW # ifdef RTLD_LAZY # define LT_DLLAZY_OR_NOW RTLD_LAZY # else # ifdef DL_LAZY # define LT_DLLAZY_OR_NOW DL_LAZY # else # ifdef RTLD_NOW # define LT_DLLAZY_OR_NOW RTLD_NOW # else # ifdef DL_NOW # define LT_DLLAZY_OR_NOW DL_NOW # else # define LT_DLLAZY_OR_NOW 0 # endif # endif # endif # endif #endif /* When -fvisibility=hidden is used, assume the code has been annotated correspondingly for the symbols needed. */ #if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) int fnord () __attribute__((visibility("default"))); #endif int fnord () { return 42; } int main () { void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); int status = $lt_dlunknown; if (self) { if (dlsym (self,"fnord")) status = $lt_dlno_uscore; else { if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; else puts (dlerror ()); } /* dlclose (self); */ } else puts (dlerror ()); return status; }] _LT_EOF if AC_TRY_EVAL(ac_link) && test -s "conftest$ac_exeext" 2>/dev/null; then (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null lt_status=$? case x$lt_status in x$lt_dlno_uscore) $1 ;; x$lt_dlneed_uscore) $2 ;; x$lt_dlunknown|x*) $3 ;; esac else : # compilation failed $3 fi fi rm -fr conftest* ])# _LT_TRY_DLOPEN_SELF # LT_SYS_DLOPEN_SELF # ------------------ AC_DEFUN([LT_SYS_DLOPEN_SELF], [m4_require([_LT_HEADER_DLFCN])dnl if test yes != "$enable_dlopen"; then enable_dlopen=unknown enable_dlopen_self=unknown enable_dlopen_self_static=unknown else lt_cv_dlopen=no lt_cv_dlopen_libs= case $host_os in beos*) lt_cv_dlopen=load_add_on lt_cv_dlopen_libs= lt_cv_dlopen_self=yes ;; mingw* | pw32* | cegcc*) lt_cv_dlopen=LoadLibrary lt_cv_dlopen_libs= ;; cygwin*) lt_cv_dlopen=dlopen lt_cv_dlopen_libs= ;; darwin*) # if libdl is installed we need to link against it AC_CHECK_LIB([dl], [dlopen], [lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl],[ lt_cv_dlopen=dyld lt_cv_dlopen_libs= lt_cv_dlopen_self=yes ]) ;; tpf*) # Don't try to run any link tests for TPF. We know it's impossible # because TPF is a cross-compiler, and we know how we open DSOs. lt_cv_dlopen=dlopen lt_cv_dlopen_libs= lt_cv_dlopen_self=no ;; *) AC_CHECK_FUNC([shl_load], [lt_cv_dlopen=shl_load], [AC_CHECK_LIB([dld], [shl_load], [lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld], [AC_CHECK_FUNC([dlopen], [lt_cv_dlopen=dlopen], [AC_CHECK_LIB([dl], [dlopen], [lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl], [AC_CHECK_LIB([svld], [dlopen], [lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld], [AC_CHECK_LIB([dld], [dld_link], [lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld]) ]) ]) ]) ]) ]) ;; esac if test no = "$lt_cv_dlopen"; then enable_dlopen=no else enable_dlopen=yes fi case $lt_cv_dlopen in dlopen) save_CPPFLAGS=$CPPFLAGS test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" save_LDFLAGS=$LDFLAGS wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" save_LIBS=$LIBS LIBS="$lt_cv_dlopen_libs $LIBS" AC_CACHE_CHECK([whether a program can dlopen itself], lt_cv_dlopen_self, [dnl _LT_TRY_DLOPEN_SELF( lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes, lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross) ]) if test yes = "$lt_cv_dlopen_self"; then wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" AC_CACHE_CHECK([whether a statically linked program can dlopen itself], lt_cv_dlopen_self_static, [dnl _LT_TRY_DLOPEN_SELF( lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross) ]) fi CPPFLAGS=$save_CPPFLAGS LDFLAGS=$save_LDFLAGS LIBS=$save_LIBS ;; esac case $lt_cv_dlopen_self in yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; *) enable_dlopen_self=unknown ;; esac case $lt_cv_dlopen_self_static in yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; *) enable_dlopen_self_static=unknown ;; esac fi _LT_DECL([dlopen_support], [enable_dlopen], [0], [Whether dlopen is supported]) _LT_DECL([dlopen_self], [enable_dlopen_self], [0], [Whether dlopen of programs is supported]) _LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0], [Whether dlopen of statically linked programs is supported]) ])# LT_SYS_DLOPEN_SELF # Old name: AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], []) # _LT_COMPILER_C_O([TAGNAME]) # --------------------------- # Check to see if options -c and -o are simultaneously supported by compiler. # This macro does not hard code the compiler like AC_PROG_CC_C_O. m4_defun([_LT_COMPILER_C_O], [m4_require([_LT_DECL_SED])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_TAG_COMPILER])dnl AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)], [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&AS_MESSAGE_LOG_FD echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes fi fi chmod u+w . 2>&AS_MESSAGE_LOG_FD $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* ]) _LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1], [Does compiler simultaneously support -c and -o options?]) ])# _LT_COMPILER_C_O # _LT_COMPILER_FILE_LOCKS([TAGNAME]) # ---------------------------------- # Check to see if we can do hard links to lock some files if needed m4_defun([_LT_COMPILER_FILE_LOCKS], [m4_require([_LT_ENABLE_LOCK])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl _LT_COMPILER_C_O([$1]) hard_links=nottested if test no = "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" && test no != "$need_locks"; then # do not overwrite the value of need_locks provided by the user AC_MSG_CHECKING([if we can lock with hard links]) hard_links=yes $RM conftest* ln conftest.a conftest.b 2>/dev/null && hard_links=no touch conftest.a ln conftest.a conftest.b 2>&5 || hard_links=no ln conftest.a conftest.b 2>/dev/null && hard_links=no AC_MSG_RESULT([$hard_links]) if test no = "$hard_links"; then AC_MSG_WARN(['$CC' does not support '-c -o', so 'make -j' may be unsafe]) need_locks=warn fi else need_locks=no fi _LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?]) ])# _LT_COMPILER_FILE_LOCKS # _LT_CHECK_OBJDIR # ---------------- m4_defun([_LT_CHECK_OBJDIR], [AC_CACHE_CHECK([for objdir], [lt_cv_objdir], [rm -f .libs 2>/dev/null mkdir .libs 2>/dev/null if test -d .libs; then lt_cv_objdir=.libs else # MS-DOS does not allow filenames that begin with a dot. lt_cv_objdir=_libs fi rmdir .libs 2>/dev/null]) objdir=$lt_cv_objdir _LT_DECL([], [objdir], [0], [The name of the directory that contains temporary libtool files])dnl m4_pattern_allow([LT_OBJDIR])dnl AC_DEFINE_UNQUOTED([LT_OBJDIR], "$lt_cv_objdir/", [Define to the sub-directory where libtool stores uninstalled libraries.]) ])# _LT_CHECK_OBJDIR # _LT_LINKER_HARDCODE_LIBPATH([TAGNAME]) # -------------------------------------- # Check hardcoding attributes. m4_defun([_LT_LINKER_HARDCODE_LIBPATH], [AC_MSG_CHECKING([how to hardcode library paths into programs]) _LT_TAGVAR(hardcode_action, $1)= if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" || test -n "$_LT_TAGVAR(runpath_var, $1)" || test yes = "$_LT_TAGVAR(hardcode_automatic, $1)"; then # We can hardcode non-existent directories. if test no != "$_LT_TAGVAR(hardcode_direct, $1)" && # If the only mechanism to avoid hardcoding is shlibpath_var, we # have to relink, otherwise we might link with an installed library # when we should be linking with a yet-to-be-installed one ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" && test no != "$_LT_TAGVAR(hardcode_minus_L, $1)"; then # Linking always hardcodes the temporary library directory. _LT_TAGVAR(hardcode_action, $1)=relink else # We can link without hardcoding, and we can hardcode nonexisting dirs. _LT_TAGVAR(hardcode_action, $1)=immediate fi else # We cannot hardcode anything, or else we can only hardcode existing # directories. _LT_TAGVAR(hardcode_action, $1)=unsupported fi AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)]) if test relink = "$_LT_TAGVAR(hardcode_action, $1)" || test yes = "$_LT_TAGVAR(inherit_rpath, $1)"; then # Fast installation is not supported enable_fast_install=no elif test yes = "$shlibpath_overrides_runpath" || test no = "$enable_shared"; then # Fast installation is not necessary enable_fast_install=needless fi _LT_TAGDECL([], [hardcode_action], [0], [How to hardcode a shared library path into an executable]) ])# _LT_LINKER_HARDCODE_LIBPATH # _LT_CMD_STRIPLIB # ---------------- m4_defun([_LT_CMD_STRIPLIB], [m4_require([_LT_DECL_EGREP]) striplib= old_striplib= AC_MSG_CHECKING([whether stripping libraries is possible]) if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" test -z "$striplib" && striplib="$STRIP --strip-unneeded" AC_MSG_RESULT([yes]) else # FIXME - insert some real tests, host_os isn't really good enough case $host_os in darwin*) if test -n "$STRIP"; then striplib="$STRIP -x" old_striplib="$STRIP -S" AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) fi ;; *) AC_MSG_RESULT([no]) ;; esac fi _LT_DECL([], [old_striplib], [1], [Commands to strip libraries]) _LT_DECL([], [striplib], [1]) ])# _LT_CMD_STRIPLIB # _LT_PREPARE_MUNGE_PATH_LIST # --------------------------- # Make sure func_munge_path_list() is defined correctly. m4_defun([_LT_PREPARE_MUNGE_PATH_LIST], [[# func_munge_path_list VARIABLE PATH # ----------------------------------- # VARIABLE is name of variable containing _space_ separated list of # directories to be munged by the contents of PATH, which is string # having a format: # "DIR[:DIR]:" # string "DIR[ DIR]" will be prepended to VARIABLE # ":DIR[:DIR]" # string "DIR[ DIR]" will be appended to VARIABLE # "DIRP[:DIRP]::[DIRA:]DIRA" # string "DIRP[ DIRP]" will be prepended to VARIABLE and string # "DIRA[ DIRA]" will be appended to VARIABLE # "DIR[:DIR]" # VARIABLE will be replaced by "DIR[ DIR]" func_munge_path_list () { case x@S|@2 in x) ;; *:) eval @S|@1=\"`$ECHO @S|@2 | $SED 's/:/ /g'` \@S|@@S|@1\" ;; x:*) eval @S|@1=\"\@S|@@S|@1 `$ECHO @S|@2 | $SED 's/:/ /g'`\" ;; *::*) eval @S|@1=\"\@S|@@S|@1\ `$ECHO @S|@2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" eval @S|@1=\"`$ECHO @S|@2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \@S|@@S|@1\" ;; *) eval @S|@1=\"`$ECHO @S|@2 | $SED 's/:/ /g'`\" ;; esac } ]])# _LT_PREPARE_PATH_LIST # _LT_SYS_DYNAMIC_LINKER([TAG]) # ----------------------------- # PORTME Fill in your ld.so characteristics m4_defun([_LT_SYS_DYNAMIC_LINKER], [AC_REQUIRE([AC_CANONICAL_HOST])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_OBJDUMP])dnl m4_require([_LT_DECL_SED])dnl m4_require([_LT_CHECK_SHELL_FEATURES])dnl m4_require([_LT_PREPARE_MUNGE_PATH_LIST])dnl AC_MSG_CHECKING([dynamic linker characteristics]) m4_if([$1], [], [ if test yes = "$GCC"; then case $host_os in darwin*) lt_awk_arg='/^libraries:/,/LR/' ;; *) lt_awk_arg='/^libraries:/' ;; esac case $host_os in mingw* | cegcc*) lt_sed_strip_eq='s|=\([[A-Za-z]]:\)|\1|g' ;; *) lt_sed_strip_eq='s|=/|/|g' ;; esac lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` case $lt_search_path_spec in *\;*) # if the path contains ";" then we assume it to be the separator # otherwise default to the standard path separator (i.e. ":") - it is # assumed that no part of a normal pathname contains ";" but that should # okay in the real world where ";" in dirpaths is itself problematic. lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` ;; *) lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` ;; esac # Ok, now we have the path, separated by spaces, we can step through it # and add multilib dir if necessary... lt_tmp_lt_search_path_spec= lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` # ...but if some path component already ends with the multilib dir we assume # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer). case "$lt_multi_os_dir; $lt_search_path_spec " in "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*) lt_multi_os_dir= ;; esac for lt_sys_path in $lt_search_path_spec; do if test -d "$lt_sys_path$lt_multi_os_dir"; then lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir" elif test -n "$lt_multi_os_dir"; then test -d "$lt_sys_path" && \ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" fi done lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' BEGIN {RS = " "; FS = "/|\n";} { lt_foo = ""; lt_count = 0; for (lt_i = NF; lt_i > 0; lt_i--) { if ($lt_i != "" && $lt_i != ".") { if ($lt_i == "..") { lt_count++; } else { if (lt_count == 0) { lt_foo = "/" $lt_i lt_foo; } else { lt_count--; } } } } if (lt_foo != "") { lt_freq[[lt_foo]]++; } if (lt_freq[[lt_foo]] == 1) { print lt_foo; } }'` # AWK program above erroneously prepends '/' to C:/dos/paths # for these hosts. case $host_os in mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ $SED 's|/\([[A-Za-z]]:\)|\1|g'` ;; esac sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` else sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" fi]) library_names_spec= libname_spec='lib$name' soname_spec= shrext_cmds=.so postinstall_cmds= postuninstall_cmds= finish_cmds= finish_eval= shlibpath_var= shlibpath_overrides_runpath=unknown version_type=none dynamic_linker="$host_os ld.so" sys_lib_dlsearch_path_spec="/lib /usr/lib" need_lib_prefix=unknown hardcode_into_libs=no # when you set need_version to no, make sure it does not cause -set_version # flags to be left without arguments need_version=unknown AC_ARG_VAR([LT_SYS_LIBRARY_PATH], [User-defined run-time library search path.]) case $host_os in aix3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname.a' shlibpath_var=LIBPATH # AIX 3 has no versioning support, so we append a major version to the name. soname_spec='$libname$release$shared_ext$major' ;; aix[[4-9]]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no hardcode_into_libs=yes if test ia64 = "$host_cpu"; then # AIX 5 supports IA64 library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH else # With GCC up to 2.95.x, collect2 would create an import file # for dependence libraries. The import file would start with # the line '#! .'. This would cause the generated library to # depend on '.', always an invalid library. This was fixed in # development snapshots of GCC prior to 3.0. case $host_os in aix4 | aix4.[[01]] | aix4.[[01]].*) if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' echo ' yes ' echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then : else can_build_shared=no fi ;; esac # Using Import Files as archive members, it is possible to support # filename-based versioning of shared library archives on AIX. While # this would work for both with and without runtime linking, it will # prevent static linking of such archives. So we do filename-based # shared library versioning with .so extension only, which is used # when both runtime linking and shared linking is enabled. # Unfortunately, runtime linking may impact performance, so we do # not want this to be the default eventually. Also, we use the # versioned .so libs for executables only if there is the -brtl # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. # To allow for filename-based versioning support, we need to create # libNAME.so.V as an archive file, containing: # *) an Import File, referring to the versioned filename of the # archive as well as the shared archive member, telling the # bitwidth (32 or 64) of that shared object, and providing the # list of exported symbols of that shared object, eventually # decorated with the 'weak' keyword # *) the shared object with the F_LOADONLY flag set, to really avoid # it being seen by the linker. # At run time we better use the real file rather than another symlink, # but for link time we create the symlink libNAME.so -> libNAME.so.V case $with_aix_soname,$aix_use_runtimelinking in # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct # soname into executable. Probably we can add versioning support to # collect2, so additional links can be useful in future. aix,yes) # traditional libtool dynamic_linker='AIX unversionable lib.so' # If using run time linking (on AIX 4.2 or later) use lib.so # instead of lib.a to let people know that these are not # typical AIX shared libraries. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ;; aix,no) # traditional AIX only dynamic_linker='AIX lib.a[(]lib.so.V[)]' # We preserve .a as extension for shared libraries through AIX4.2 # and later when we are not doing run time linking. library_names_spec='$libname$release.a $libname.a' soname_spec='$libname$release$shared_ext$major' ;; svr4,*) # full svr4 only dynamic_linker="AIX lib.so.V[(]$shared_archive_member_spec.o[)]" library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' # We do not specify a path in Import Files, so LIBPATH fires. shlibpath_overrides_runpath=yes ;; *,yes) # both, prefer svr4 dynamic_linker="AIX lib.so.V[(]$shared_archive_member_spec.o[)], lib.a[(]lib.so.V[)]" library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' # unpreferred sharedlib libNAME.a needs extra handling postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' # We do not specify a path in Import Files, so LIBPATH fires. shlibpath_overrides_runpath=yes ;; *,no) # both, prefer aix dynamic_linker="AIX lib.a[(]lib.so.V[)], lib.so.V[(]$shared_archive_member_spec.o[)]" library_names_spec='$libname$release.a $libname.a' soname_spec='$libname$release$shared_ext$major' # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' ;; esac shlibpath_var=LIBPATH fi ;; amigaos*) case $host_cpu in powerpc) # Since July 2007 AmigaOS4 officially supports .so libraries. # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ;; m68k) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; esac ;; beos*) library_names_spec='$libname$shared_ext' dynamic_linker="$host_os ld.so" shlibpath_var=LIBRARY_PATH ;; bsdi[[45]]*) version_type=linux # correct to gnu/linux during the next big refactor need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" # the default ld.so.conf also contains /usr/contrib/lib and # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow # libtool to hard-code these into programs ;; cygwin* | mingw* | pw32* | cegcc*) version_type=windows shrext_cmds=.dll need_version=no need_lib_prefix=no case $GCC,$cc_basename in yes,*) # gcc library_names_spec='$libname.dll.a' # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes case $host_os in cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo $libname | sed -e 's/^lib/cyg/'``echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' m4_if([$1], [],[ sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"]) ;; mingw* | cegcc*) # MinGW DLLs use traditional 'lib' prefix soname_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' ;; pw32*) # pw32 DLLs use 'pw' prefix rather than 'lib' library_names_spec='`echo $libname | sed -e 's/^lib/pw/'``echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' ;; esac dynamic_linker='Win32 ld.exe' ;; *,cl*) # Native MSVC libname_spec='$name' soname_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' library_names_spec='$libname.dll.lib' case $build_os in mingw*) sys_lib_search_path_spec= lt_save_ifs=$IFS IFS=';' for lt_path in $LIB do IFS=$lt_save_ifs # Let DOS variable expansion print the short 8.3 style file name. lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" done IFS=$lt_save_ifs # Convert to MSYS style. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([[a-zA-Z]]\\):| /\\1|g' -e 's|^ ||'` ;; cygwin*) # Convert to unix form, then to dos form, then back to unix form # but this time dos style (no spaces!) so that the unix form looks # like /cygdrive/c/PROGRA~1:/cygdr... sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ;; *) sys_lib_search_path_spec=$LIB if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then # It is most probably a Windows format PATH. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` else sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi # FIXME: find the short name or the path components, as spaces are # common. (e.g. "Program Files" -> "PROGRA~1") ;; esac # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes dynamic_linker='Win32 link.exe' ;; *) # Assume MSVC wrapper library_names_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext $libname.lib' dynamic_linker='Win32 ld.exe' ;; esac # FIXME: first we should search . and the directory the executable is in shlibpath_var=PATH ;; darwin* | rhapsody*) dynamic_linker="$host_os dyld" version_type=darwin need_lib_prefix=no need_version=no library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' soname_spec='$libname$release$major$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' m4_if([$1], [],[ sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"]) sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; dgux*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH ;; freebsd* | dragonfly*) # DragonFly does not have aout. When/if they implement a new # versioning mechanism, adjust this. if test -x /usr/bin/objformat; then objformat=`/usr/bin/objformat` else case $host_os in freebsd[[23]].*) objformat=aout ;; *) objformat=elf ;; esac fi version_type=freebsd-$objformat case $version_type in freebsd-elf*) library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' need_version=no need_lib_prefix=no ;; freebsd-*) library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' need_version=yes ;; esac shlibpath_var=LD_LIBRARY_PATH case $host_os in freebsd2.*) shlibpath_overrides_runpath=yes ;; freebsd3.[[01]]* | freebsdelf3.[[01]]*) shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \ freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1) shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; *) # from 4.6 on, and DragonFly shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; esac ;; haiku*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no dynamic_linker="$host_os runtime_loader" library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LIBRARY_PATH shlibpath_overrides_runpath=no sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' hardcode_into_libs=yes ;; hpux9* | hpux10* | hpux11*) # Give a soname corresponding to the major version so that dld.sl refuses to # link against other versions. version_type=sunos need_lib_prefix=no need_version=no case $host_cpu in ia64*) shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' if test 32 = "$HPUX_IA64_MODE"; then sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" sys_lib_dlsearch_path_spec=/usr/lib/hpux32 else sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" sys_lib_dlsearch_path_spec=/usr/lib/hpux64 fi ;; hppa*64*) shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' ;; esac # HP-UX runs *really* slowly unless shared libraries are mode 555, ... postinstall_cmds='chmod 555 $lib' # or fails outright, so override atomically: install_override_mode=555 ;; interix[[3-9]]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; irix5* | irix6* | nonstopux*) case $host_os in nonstopux*) version_type=nonstopux ;; *) if test yes = "$lt_cv_prog_gnu_ld"; then version_type=linux # correct to gnu/linux during the next big refactor else version_type=irix fi ;; esac need_lib_prefix=no need_version=no soname_spec='$libname$release$shared_ext$major' library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' case $host_os in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in # libtool.m4 will add one of these switches to LD *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= libmagic=32-bit;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 libmagic=N32;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 libmagic=64-bit;; *) libsuff= shlibsuff= libmagic=never-match;; esac ;; esac shlibpath_var=LD_LIBRARY${shlibsuff}_PATH shlibpath_overrides_runpath=no sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" hardcode_into_libs=yes ;; # No shared lib support for Linux oldld, aout, or coff. linux*oldld* | linux*aout* | linux*coff*) dynamic_linker=no ;; linux*android*) version_type=none # Android doesn't support versioned libraries. need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext' soname_spec='$libname$release$shared_ext' finish_cmds= shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes dynamic_linker='Android linker' # Don't embed -rpath directories since the linker doesn't support them. _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no # Some binutils ld are patched to set DT_RUNPATH AC_CACHE_VAL([lt_cv_shlibpath_overrides_runpath], [lt_cv_shlibpath_overrides_runpath=no save_LDFLAGS=$LDFLAGS save_libdir=$libdir eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \ LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\"" AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null], [lt_cv_shlibpath_overrides_runpath=yes])]) LDFLAGS=$save_LDFLAGS libdir=$save_libdir ]) shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes # Add ABI-specific directories to the system library path. sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" # Ideally, we could use ldconfig to report *all* directores which are # searched for libraries, however this is still not possible. Aside from not # being certain /sbin/ldconfig is available, command # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, # even though it is searched at run-time. Try to do the best guess by # appending ld.so.conf contents (and includes) to the search path. if test -f /etc/ld.so.conf; then lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, # most powerpc-linux boxes support dynamic linking these days and # people can always --disable-shared, the test was removed, and we # assume the GNU/Linux dynamic linker is in use. dynamic_linker='GNU/Linux ld.so' ;; netbsd*) version_type=sunos need_lib_prefix=no need_version=no if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' dynamic_linker='NetBSD ld.elf_so' fi shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; newsos6) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; *nto* | *qnx*) version_type=qnx need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='ldqnx.so' ;; openbsd* | bitrig*) version_type=sunos sys_lib_dlsearch_path_spec=/usr/lib need_lib_prefix=no if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then need_version=no else need_version=yes fi library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; os2*) libname_spec='$name' version_type=windows shrext_cmds=.dll need_version=no need_lib_prefix=no # OS/2 can only load a DLL with a base name of 8 characters or less. soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; v=$($ECHO $release$versuffix | tr -d .-); n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); $ECHO $n$v`$shared_ext' library_names_spec='${libname}_dll.$libext' dynamic_linker='OS/2 ld.exe' shlibpath_var=BEGINLIBPATH sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' ;; osf3* | osf4* | osf5*) version_type=osf need_lib_prefix=no need_version=no soname_spec='$libname$release$shared_ext$major' library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; rdos*) dynamic_linker=no ;; solaris*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes # ldd complains unless libraries are executable postinstall_cmds='chmod +x $lib' ;; sunos4*) version_type=sunos library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes if test yes = "$with_gnu_ld"; then need_lib_prefix=no fi need_version=yes ;; sysv4 | sysv4.3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH case $host_vendor in sni) shlibpath_overrides_runpath=no need_lib_prefix=no runpath_var=LD_RUN_PATH ;; siemens) need_lib_prefix=no ;; motorola) need_lib_prefix=no need_version=no shlibpath_overrides_runpath=no sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' ;; esac ;; sysv4*MP*) if test -d /usr/nec; then version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' soname_spec='$libname$shared_ext.$major' shlibpath_var=LD_LIBRARY_PATH fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) version_type=sco need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes if test yes = "$with_gnu_ld"; then sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' else sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' case $host_os in sco3.2v5*) sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" ;; esac fi sys_lib_dlsearch_path_spec='/usr/lib' ;; tpf*) # TPF is a cross-target only. Preferred cross-host = GNU/Linux. version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; uts4*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH ;; *) dynamic_linker=no ;; esac AC_MSG_RESULT([$dynamic_linker]) test no = "$dynamic_linker" && can_build_shared=no variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test yes = "$GCC"; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec fi if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec fi # remember unaugmented sys_lib_dlsearch_path content for libtool script decls... configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec # ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" # to be used as default LT_SYS_LIBRARY_PATH value in generated libtool configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH _LT_DECL([], [variables_saved_for_relink], [1], [Variables whose values should be saved in libtool wrapper scripts and restored at link time]) _LT_DECL([], [need_lib_prefix], [0], [Do we need the "lib" prefix for modules?]) _LT_DECL([], [need_version], [0], [Do we need a version for libraries?]) _LT_DECL([], [version_type], [0], [Library versioning type]) _LT_DECL([], [runpath_var], [0], [Shared library runtime path variable]) _LT_DECL([], [shlibpath_var], [0],[Shared library path variable]) _LT_DECL([], [shlibpath_overrides_runpath], [0], [Is shlibpath searched before the hard-coded library search path?]) _LT_DECL([], [libname_spec], [1], [Format of library name prefix]) _LT_DECL([], [library_names_spec], [1], [[List of archive names. First name is the real one, the rest are links. The last name is the one that the linker finds with -lNAME]]) _LT_DECL([], [soname_spec], [1], [[The coded name of the library, if different from the real name]]) _LT_DECL([], [install_override_mode], [1], [Permission mode override for installation of shared libraries]) _LT_DECL([], [postinstall_cmds], [2], [Command to use after installation of a shared archive]) _LT_DECL([], [postuninstall_cmds], [2], [Command to use after uninstallation of a shared archive]) _LT_DECL([], [finish_cmds], [2], [Commands used to finish a libtool library installation in a directory]) _LT_DECL([], [finish_eval], [1], [[As "finish_cmds", except a single script fragment to be evaled but not shown]]) _LT_DECL([], [hardcode_into_libs], [0], [Whether we should hardcode library paths into libraries]) _LT_DECL([], [sys_lib_search_path_spec], [2], [Compile-time system search path for libraries]) _LT_DECL([sys_lib_dlsearch_path_spec], [configure_time_dlsearch_path], [2], [Detected run-time system search path for libraries]) _LT_DECL([], [configure_time_lt_sys_library_path], [2], [Explicit LT_SYS_LIBRARY_PATH set during ./configure time]) ])# _LT_SYS_DYNAMIC_LINKER # _LT_PATH_TOOL_PREFIX(TOOL) # -------------------------- # find a file program that can recognize shared library AC_DEFUN([_LT_PATH_TOOL_PREFIX], [m4_require([_LT_DECL_EGREP])dnl AC_MSG_CHECKING([for $1]) AC_CACHE_VAL(lt_cv_path_MAGIC_CMD, [case $MAGIC_CMD in [[\\/*] | ?:[\\/]*]) lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. ;; *) lt_save_MAGIC_CMD=$MAGIC_CMD lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR dnl $ac_dummy forces splitting on constant user-supplied paths. dnl POSIX.2 word splitting is done only on the output of word expansions, dnl not every word. This closes a longstanding sh security hole. ac_dummy="m4_if([$2], , $PATH, [$2])" for ac_dir in $ac_dummy; do IFS=$lt_save_ifs test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$1"; then lt_cv_path_MAGIC_CMD=$ac_dir/"$1" if test -n "$file_magic_test_file"; then case $deplibs_check_method in "file_magic "*) file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` MAGIC_CMD=$lt_cv_path_MAGIC_CMD if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | $EGREP "$file_magic_regex" > /dev/null; then : else cat <<_LT_EOF 1>&2 *** Warning: the command libtool uses to detect shared libraries, *** $file_magic_cmd, produces output that libtool cannot recognize. *** The result is that libtool may fail to recognize shared libraries *** as such. This will affect the creation of libtool libraries that *** depend on shared libraries, but programs linked with such libtool *** libraries will work regardless of this problem. Nevertheless, you *** may want to report the problem to your system manager and/or to *** bug-libtool@gnu.org _LT_EOF fi ;; esac fi break fi done IFS=$lt_save_ifs MAGIC_CMD=$lt_save_MAGIC_CMD ;; esac]) MAGIC_CMD=$lt_cv_path_MAGIC_CMD if test -n "$MAGIC_CMD"; then AC_MSG_RESULT($MAGIC_CMD) else AC_MSG_RESULT(no) fi _LT_DECL([], [MAGIC_CMD], [0], [Used to examine libraries when file_magic_cmd begins with "file"])dnl ])# _LT_PATH_TOOL_PREFIX # Old name: AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], []) # _LT_PATH_MAGIC # -------------- # find a file program that can recognize a shared library m4_defun([_LT_PATH_MAGIC], [_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH) if test -z "$lt_cv_path_MAGIC_CMD"; then if test -n "$ac_tool_prefix"; then _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH) else MAGIC_CMD=: fi fi ])# _LT_PATH_MAGIC # LT_PATH_LD # ---------- # find the pathname to the GNU or non-GNU linker AC_DEFUN([LT_PATH_LD], [AC_REQUIRE([AC_PROG_CC])dnl AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_CANONICAL_BUILD])dnl m4_require([_LT_DECL_SED])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_PROG_ECHO_BACKSLASH])dnl AC_ARG_WITH([gnu-ld], [AS_HELP_STRING([--with-gnu-ld], [assume the C compiler uses GNU ld @<:@default=no@:>@])], [test no = "$withval" || with_gnu_ld=yes], [with_gnu_ld=no])dnl ac_prog=ld if test yes = "$GCC"; then # Check if gcc -print-prog-name=ld gives a path. AC_MSG_CHECKING([for ld used by $CC]) case $host in *-*-mingw*) # gcc leaves a trailing carriage return, which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [[\\/]]* | ?:[[\\/]]*) re_direlt='/[[^/]][[^/]]*/\.\./' # Canonicalize the pathname of ld ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` done test -z "$LD" && LD=$ac_prog ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test yes = "$with_gnu_ld"; then AC_MSG_CHECKING([for GNU ld]) else AC_MSG_CHECKING([for non-GNU ld]) fi AC_CACHE_VAL(lt_cv_path_LD, [if test -z "$LD"; then lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS=$lt_save_ifs test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then lt_cv_path_LD=$ac_dir/$ac_prog # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$lt_cv_path_LD" -v 2>&1 &1 conftest.i cat conftest.i conftest.i >conftest2.i : ${lt_DD:=$DD} AC_PATH_PROGS_FEATURE_CHECK([lt_DD], [dd], [if "$ac_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then cmp -s conftest.i conftest.out \ && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=: fi]) rm -f conftest.i conftest2.i conftest.out]) ])# _LT_PATH_DD # _LT_CMD_TRUNCATE # ---------------- # find command to truncate a binary pipe m4_defun([_LT_CMD_TRUNCATE], [m4_require([_LT_PATH_DD]) AC_CACHE_CHECK([how to truncate binary pipes], [lt_cv_truncate_bin], [printf 0123456789abcdef0123456789abcdef >conftest.i cat conftest.i conftest.i >conftest2.i lt_cv_truncate_bin= if "$ac_cv_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then cmp -s conftest.i conftest.out \ && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1" fi rm -f conftest.i conftest2.i conftest.out test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q"]) _LT_DECL([lt_truncate_bin], [lt_cv_truncate_bin], [1], [Command to truncate a binary pipe]) ])# _LT_CMD_TRUNCATE # _LT_CHECK_MAGIC_METHOD # ---------------------- # how to check for library dependencies # -- PORTME fill in with the dynamic library characteristics m4_defun([_LT_CHECK_MAGIC_METHOD], [m4_require([_LT_DECL_EGREP]) m4_require([_LT_DECL_OBJDUMP]) AC_CACHE_CHECK([how to recognize dependent libraries], lt_cv_deplibs_check_method, [lt_cv_file_magic_cmd='$MAGIC_CMD' lt_cv_file_magic_test_file= lt_cv_deplibs_check_method='unknown' # Need to set the preceding variable on all platforms that support # interlibrary dependencies. # 'none' -- dependencies not supported. # 'unknown' -- same as none, but documents that we really don't know. # 'pass_all' -- all dependencies passed with no checks. # 'test_compile' -- check by making test program. # 'file_magic [[regex]]' -- check by looking for files in library path # that responds to the $file_magic_cmd with a given extended regex. # If you have 'file' or equivalent on your system and you're not sure # whether 'pass_all' will *always* work, you probably want this one. case $host_os in aix[[4-9]]*) lt_cv_deplibs_check_method=pass_all ;; beos*) lt_cv_deplibs_check_method=pass_all ;; bsdi[[45]]*) lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib)' lt_cv_file_magic_cmd='/usr/bin/file -L' lt_cv_file_magic_test_file=/shlib/libc.so ;; cygwin*) # func_win32_libid is a shell function defined in ltmain.sh lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' lt_cv_file_magic_cmd='func_win32_libid' ;; mingw* | pw32*) # Base MSYS/MinGW do not provide the 'file' command needed by # func_win32_libid shell function, so use a weaker test based on 'objdump', # unless we find 'file', for example because we are cross-compiling. if ( file / ) >/dev/null 2>&1; then lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' lt_cv_file_magic_cmd='func_win32_libid' else # Keep this pattern in sync with the one in func_win32_libid. lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' lt_cv_file_magic_cmd='$OBJDUMP -f' fi ;; cegcc*) # use the weaker test based on 'objdump'. See mingw*. lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' lt_cv_file_magic_cmd='$OBJDUMP -f' ;; darwin* | rhapsody*) lt_cv_deplibs_check_method=pass_all ;; freebsd* | dragonfly*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then case $host_cpu in i*86 ) # Not sure whether the presence of OpenBSD here was a mistake. # Let's accept both of them until this is cleared up. lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` ;; esac else lt_cv_deplibs_check_method=pass_all fi ;; haiku*) lt_cv_deplibs_check_method=pass_all ;; hpux10.20* | hpux11*) lt_cv_file_magic_cmd=/usr/bin/file case $host_cpu in ia64*) lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64' lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so ;; hppa*64*) [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]'] lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl ;; *) lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]]\.[[0-9]]) shared library' lt_cv_file_magic_test_file=/usr/lib/libc.sl ;; esac ;; interix[[3-9]]*) # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$' ;; irix5* | irix6* | nonstopux*) case $LD in *-32|*"-32 ") libmagic=32-bit;; *-n32|*"-n32 ") libmagic=N32;; *-64|*"-64 ") libmagic=64-bit;; *) libmagic=never-match;; esac lt_cv_deplibs_check_method=pass_all ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) lt_cv_deplibs_check_method=pass_all ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$' fi ;; newos6*) lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=/usr/lib/libnls.so ;; *nto* | *qnx*) lt_cv_deplibs_check_method=pass_all ;; openbsd* | bitrig*) if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' fi ;; osf3* | osf4* | osf5*) lt_cv_deplibs_check_method=pass_all ;; rdos*) lt_cv_deplibs_check_method=pass_all ;; solaris*) lt_cv_deplibs_check_method=pass_all ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) lt_cv_deplibs_check_method=pass_all ;; sysv4 | sysv4.3*) case $host_vendor in motorola) lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]' lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` ;; ncr) lt_cv_deplibs_check_method=pass_all ;; sequent) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' ;; sni) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib" lt_cv_file_magic_test_file=/lib/libc.so ;; siemens) lt_cv_deplibs_check_method=pass_all ;; pc) lt_cv_deplibs_check_method=pass_all ;; esac ;; tpf*) lt_cv_deplibs_check_method=pass_all ;; os2*) lt_cv_deplibs_check_method=pass_all ;; esac ]) file_magic_glob= want_nocaseglob=no if test "$build" = "$host"; then case $host_os in mingw* | pw32*) if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then want_nocaseglob=yes else file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[[\1]]\/[[\1]]\/g;/g"` fi ;; esac fi file_magic_cmd=$lt_cv_file_magic_cmd deplibs_check_method=$lt_cv_deplibs_check_method test -z "$deplibs_check_method" && deplibs_check_method=unknown _LT_DECL([], [deplibs_check_method], [1], [Method to check whether dependent libraries are shared objects]) _LT_DECL([], [file_magic_cmd], [1], [Command to use when deplibs_check_method = "file_magic"]) _LT_DECL([], [file_magic_glob], [1], [How to find potential files when deplibs_check_method = "file_magic"]) _LT_DECL([], [want_nocaseglob], [1], [Find potential files using nocaseglob when deplibs_check_method = "file_magic"]) ])# _LT_CHECK_MAGIC_METHOD # LT_PATH_NM # ---------- # find the pathname to a BSD- or MS-compatible name lister AC_DEFUN([LT_PATH_NM], [AC_REQUIRE([AC_PROG_CC])dnl AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM, [if test -n "$NM"; then # Let the user override the test. lt_cv_path_NM=$NM else lt_nm_to_check=${ac_tool_prefix}nm if test -n "$ac_tool_prefix" && test "$build" = "$host"; then lt_nm_to_check="$lt_nm_to_check nm" fi for lt_tmp_nm in $lt_nm_to_check; do lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do IFS=$lt_save_ifs test -z "$ac_dir" && ac_dir=. tmp_nm=$ac_dir/$lt_tmp_nm if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then # Check to see if the nm accepts a BSD-compat flag. # Adding the 'sed 1q' prevents false positives on HP-UX, which says: # nm: unknown option "B" ignored # Tru64's nm complains that /dev/null is an invalid object file # MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty case $build_os in mingw*) lt_bad_file=conftest.nm/nofile ;; *) lt_bad_file=/dev/null ;; esac case `"$tmp_nm" -B $lt_bad_file 2>&1 | sed '1q'` in *$lt_bad_file* | *'Invalid file or object type'*) lt_cv_path_NM="$tmp_nm -B" break 2 ;; *) case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in */dev/null*) lt_cv_path_NM="$tmp_nm -p" break 2 ;; *) lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but continue # so that we can try to find one that supports BSD flags ;; esac ;; esac fi done IFS=$lt_save_ifs done : ${lt_cv_path_NM=no} fi]) if test no != "$lt_cv_path_NM"; then NM=$lt_cv_path_NM else # Didn't find any BSD compatible name lister, look for dumpbin. if test -n "$DUMPBIN"; then : # Let the user override the test. else AC_CHECK_TOOLS(DUMPBIN, [dumpbin "link -dump"], :) case `$DUMPBIN -symbols -headers /dev/null 2>&1 | sed '1q'` in *COFF*) DUMPBIN="$DUMPBIN -symbols -headers" ;; *) DUMPBIN=: ;; esac fi AC_SUBST([DUMPBIN]) if test : != "$DUMPBIN"; then NM=$DUMPBIN fi fi test -z "$NM" && NM=nm AC_SUBST([NM]) _LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface], [lt_cv_nm_interface="BSD nm" echo "int some_variable = 0;" > conftest.$ac_ext (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&AS_MESSAGE_LOG_FD) (eval "$ac_compile" 2>conftest.err) cat conftest.err >&AS_MESSAGE_LOG_FD (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD) (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) cat conftest.err >&AS_MESSAGE_LOG_FD (eval echo "\"\$as_me:$LINENO: output\"" >&AS_MESSAGE_LOG_FD) cat conftest.out >&AS_MESSAGE_LOG_FD if $GREP 'External.*some_variable' conftest.out > /dev/null; then lt_cv_nm_interface="MS dumpbin" fi rm -f conftest*]) ])# LT_PATH_NM # Old names: AU_ALIAS([AM_PROG_NM], [LT_PATH_NM]) AU_ALIAS([AC_PROG_NM], [LT_PATH_NM]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AM_PROG_NM], []) dnl AC_DEFUN([AC_PROG_NM], []) # _LT_CHECK_SHAREDLIB_FROM_LINKLIB # -------------------------------- # how to determine the name of the shared library # associated with a specific link library. # -- PORTME fill in with the dynamic library characteristics m4_defun([_LT_CHECK_SHAREDLIB_FROM_LINKLIB], [m4_require([_LT_DECL_EGREP]) m4_require([_LT_DECL_OBJDUMP]) m4_require([_LT_DECL_DLLTOOL]) AC_CACHE_CHECK([how to associate runtime and link libraries], lt_cv_sharedlib_from_linklib_cmd, [lt_cv_sharedlib_from_linklib_cmd='unknown' case $host_os in cygwin* | mingw* | pw32* | cegcc*) # two different shell functions defined in ltmain.sh; # decide which one to use based on capabilities of $DLLTOOL case `$DLLTOOL --help 2>&1` in *--identify-strict*) lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib ;; *) lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback ;; esac ;; *) # fallback: assume linklib IS sharedlib lt_cv_sharedlib_from_linklib_cmd=$ECHO ;; esac ]) sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO _LT_DECL([], [sharedlib_from_linklib_cmd], [1], [Command to associate shared and link libraries]) ])# _LT_CHECK_SHAREDLIB_FROM_LINKLIB # _LT_PATH_MANIFEST_TOOL # ---------------------- # locate the manifest tool m4_defun([_LT_PATH_MANIFEST_TOOL], [AC_CHECK_TOOL(MANIFEST_TOOL, mt, :) test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt AC_CACHE_CHECK([if $MANIFEST_TOOL is a manifest tool], [lt_cv_path_mainfest_tool], [lt_cv_path_mainfest_tool=no echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&AS_MESSAGE_LOG_FD $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out cat conftest.err >&AS_MESSAGE_LOG_FD if $GREP 'Manifest Tool' conftest.out > /dev/null; then lt_cv_path_mainfest_tool=yes fi rm -f conftest*]) if test yes != "$lt_cv_path_mainfest_tool"; then MANIFEST_TOOL=: fi _LT_DECL([], [MANIFEST_TOOL], [1], [Manifest tool])dnl ])# _LT_PATH_MANIFEST_TOOL # _LT_DLL_DEF_P([FILE]) # --------------------- # True iff FILE is a Windows DLL '.def' file. # Keep in sync with func_dll_def_p in the libtool script AC_DEFUN([_LT_DLL_DEF_P], [dnl test DEF = "`$SED -n dnl -e '\''s/^[[ ]]*//'\'' dnl Strip leading whitespace -e '\''/^\(;.*\)*$/d'\'' dnl Delete empty lines and comments -e '\''s/^\(EXPORTS\|LIBRARY\)\([[ ]].*\)*$/DEF/p'\'' dnl -e q dnl Only consider the first "real" line $1`" dnl ])# _LT_DLL_DEF_P # LT_LIB_M # -------- # check for math library AC_DEFUN([LT_LIB_M], [AC_REQUIRE([AC_CANONICAL_HOST])dnl LIBM= case $host in *-*-beos* | *-*-cegcc* | *-*-cygwin* | *-*-haiku* | *-*-pw32* | *-*-darwin*) # These system don't have libm, or don't need it ;; *-ncr-sysv4.3*) AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM=-lmw) AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm") ;; *) AC_CHECK_LIB(m, cos, LIBM=-lm) ;; esac AC_SUBST([LIBM]) ])# LT_LIB_M # Old name: AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_CHECK_LIBM], []) # _LT_COMPILER_NO_RTTI([TAGNAME]) # ------------------------------- m4_defun([_LT_COMPILER_NO_RTTI], [m4_require([_LT_TAG_COMPILER])dnl _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= if test yes = "$GCC"; then case $cc_basename in nvcc*) _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -Xcompiler -fno-builtin' ;; *) _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' ;; esac _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions], lt_cv_prog_compiler_rtti_exceptions, [-fno-rtti -fno-exceptions], [], [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"]) fi _LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1], [Compiler flag to turn off builtin functions]) ])# _LT_COMPILER_NO_RTTI # _LT_CMD_GLOBAL_SYMBOLS # ---------------------- m4_defun([_LT_CMD_GLOBAL_SYMBOLS], [AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_PROG_CC])dnl AC_REQUIRE([AC_PROG_AWK])dnl AC_REQUIRE([LT_PATH_NM])dnl AC_REQUIRE([LT_PATH_LD])dnl m4_require([_LT_DECL_SED])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_TAG_COMPILER])dnl # Check for command to grab the raw symbol name followed by C symbol from nm. AC_MSG_CHECKING([command to parse $NM output from $compiler object]) AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], [ # These are sane defaults that work on at least a few old systems. # [They come from Ultrix. What could be older than Ultrix?!! ;)] # Character class describing NM global symbol codes. symcode='[[BCDEGRST]]' # Regexp to match symbols that can be accessed directly from C. sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)' # Define system-specific variables. case $host_os in aix*) symcode='[[BCDT]]' ;; cygwin* | mingw* | pw32* | cegcc*) symcode='[[ABCDGISTW]]' ;; hpux*) if test ia64 = "$host_cpu"; then symcode='[[ABCDEGRST]]' fi ;; irix* | nonstopux*) symcode='[[BCDEGRST]]' ;; osf*) symcode='[[BCDEGQRST]]' ;; solaris*) symcode='[[BDRT]]' ;; sco3.2v5*) symcode='[[DT]]' ;; sysv4.2uw2*) symcode='[[DT]]' ;; sysv5* | sco5v6* | unixware* | OpenUNIX*) symcode='[[ABDT]]' ;; sysv4) symcode='[[DFNSTU]]' ;; esac # If we're using GNU nm, then use its standard symbol codes. case `$NM -V 2>&1` in *GNU* | *'with BFD'*) symcode='[[ABCDGIRSTW]]' ;; esac if test "$lt_cv_nm_interface" = "MS dumpbin"; then # Gets list of data symbols to import. lt_cv_sys_global_symbol_to_import="sed -n -e 's/^I .* \(.*\)$/\1/p'" # Adjust the below global symbol transforms to fixup imported variables. lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'" lt_c_name_hook=" -e 's/^I .* \(.*\)$/ {\"\1\", (void *) 0},/p'" lt_c_name_lib_hook="\ -e 's/^I .* \(lib.*\)$/ {\"\1\", (void *) 0},/p'\ -e 's/^I .* \(.*\)$/ {\"lib\1\", (void *) 0},/p'" else # Disable hooks by default. lt_cv_sys_global_symbol_to_import= lt_cdecl_hook= lt_c_name_hook= lt_c_name_lib_hook= fi # Transform an extracted symbol line into a proper C declaration. # Some systems (esp. on ia64) link data and code symbols differently, # so use this general approach. lt_cv_sys_global_symbol_to_cdecl="sed -n"\ $lt_cdecl_hook\ " -e 's/^T .* \(.*\)$/extern int \1();/p'"\ " -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'" # Transform an extracted symbol line into symbol name and symbol address lt_cv_sys_global_symbol_to_c_name_address="sed -n"\ $lt_c_name_hook\ " -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ " -e 's/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/p'" # Transform an extracted symbol line into symbol name with lib prefix and # symbol address. lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n"\ $lt_c_name_lib_hook\ " -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ " -e 's/^$symcode$symcode* .* \(lib.*\)$/ {\"\1\", (void *) \&\1},/p'"\ " -e 's/^$symcode$symcode* .* \(.*\)$/ {\"lib\1\", (void *) \&\1},/p'" # Handle CRLF in mingw tool chain opt_cr= case $build_os in mingw*) opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp ;; esac # Try without a prefix underscore, then with it. for ac_symprfx in "" "_"; do # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. symxfrm="\\1 $ac_symprfx\\2 \\2" # Write the raw and C identifiers. if test "$lt_cv_nm_interface" = "MS dumpbin"; then # Fake it for dumpbin and say T for any non-static function, # D for any global variable and I for any imported variable. # Also find C++ and __fastcall symbols from MSVC++, # which start with @ or ?. lt_cv_sys_global_symbol_pipe="$AWK ['"\ " {last_section=section; section=\$ 3};"\ " /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ " /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ " /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\ " /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\ " /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\ " \$ 0!~/External *\|/{next};"\ " / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ " {if(hide[section]) next};"\ " {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\ " {split(\$ 0,a,/\||\r/); split(a[2],s)};"\ " s[1]~/^[@?]/{print f,s[1],s[1]; next};"\ " s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\ " ' prfx=^$ac_symprfx]" else lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" fi lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" # Check to see that the pipe works correctly. pipe_works=no rm -f conftest* cat > conftest.$ac_ext <<_LT_EOF #ifdef __cplusplus extern "C" { #endif char nm_test_var; void nm_test_func(void); void nm_test_func(void){} #ifdef __cplusplus } #endif int main(){nm_test_var='a';nm_test_func();return(0);} _LT_EOF if AC_TRY_EVAL(ac_compile); then # Now try to grab the symbols. nlist=conftest.nm if AC_TRY_EVAL(NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) && test -s "$nlist"; then # Try sorting and uniquifying the output. if sort "$nlist" | uniq > "$nlist"T; then mv -f "$nlist"T "$nlist" else rm -f "$nlist"T fi # Make sure that we snagged all the symbols we need. if $GREP ' nm_test_var$' "$nlist" >/dev/null; then if $GREP ' nm_test_func$' "$nlist" >/dev/null; then cat <<_LT_EOF > conftest.$ac_ext /* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ #if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE /* DATA imports from DLLs on WIN32 can't be const, because runtime relocations are performed -- see ld's documentation on pseudo-relocs. */ # define LT@&t@_DLSYM_CONST #elif defined __osf__ /* This system does not cope well with relocations in const data. */ # define LT@&t@_DLSYM_CONST #else # define LT@&t@_DLSYM_CONST const #endif #ifdef __cplusplus extern "C" { #endif _LT_EOF # Now generate the symbol file. eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' cat <<_LT_EOF >> conftest.$ac_ext /* The mapping between symbol names and symbols. */ LT@&t@_DLSYM_CONST struct { const char *name; void *address; } lt__PROGRAM__LTX_preloaded_symbols[[]] = { { "@PROGRAM@", (void *) 0 }, _LT_EOF $SED "s/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext cat <<\_LT_EOF >> conftest.$ac_ext {0, (void *) 0} }; /* This works around a problem in FreeBSD linker */ #ifdef FREEBSD_WORKAROUND static const void *lt_preloaded_setup() { return lt__PROGRAM__LTX_preloaded_symbols; } #endif #ifdef __cplusplus } #endif _LT_EOF # Now try linking the two files. mv conftest.$ac_objext conftstm.$ac_objext lt_globsym_save_LIBS=$LIBS lt_globsym_save_CFLAGS=$CFLAGS LIBS=conftstm.$ac_objext CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" if AC_TRY_EVAL(ac_link) && test -s conftest$ac_exeext; then pipe_works=yes fi LIBS=$lt_globsym_save_LIBS CFLAGS=$lt_globsym_save_CFLAGS else echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD fi else echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD fi else echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD fi else echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD cat conftest.$ac_ext >&5 fi rm -rf conftest* conftst* # Do not use the global_symbol_pipe unless it works. if test yes = "$pipe_works"; then break else lt_cv_sys_global_symbol_pipe= fi done ]) if test -z "$lt_cv_sys_global_symbol_pipe"; then lt_cv_sys_global_symbol_to_cdecl= fi if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then AC_MSG_RESULT(failed) else AC_MSG_RESULT(ok) fi # Response file support. if test "$lt_cv_nm_interface" = "MS dumpbin"; then nm_file_list_spec='@' elif $NM --help 2>/dev/null | grep '[[@]]FILE' >/dev/null; then nm_file_list_spec='@' fi _LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1], [Take the output of nm and produce a listing of raw symbols and C names]) _LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1], [Transform the output of nm in a proper C declaration]) _LT_DECL([global_symbol_to_import], [lt_cv_sys_global_symbol_to_import], [1], [Transform the output of nm into a list of symbols to manually relocate]) _LT_DECL([global_symbol_to_c_name_address], [lt_cv_sys_global_symbol_to_c_name_address], [1], [Transform the output of nm in a C name address pair]) _LT_DECL([global_symbol_to_c_name_address_lib_prefix], [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1], [Transform the output of nm in a C name address pair when lib prefix is needed]) _LT_DECL([nm_interface], [lt_cv_nm_interface], [1], [The name lister interface]) _LT_DECL([], [nm_file_list_spec], [1], [Specify filename containing input files for $NM]) ]) # _LT_CMD_GLOBAL_SYMBOLS # _LT_COMPILER_PIC([TAGNAME]) # --------------------------- m4_defun([_LT_COMPILER_PIC], [m4_require([_LT_TAG_COMPILER])dnl _LT_TAGVAR(lt_prog_compiler_wl, $1)= _LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_static, $1)= m4_if([$1], [CXX], [ # C++ specific cases for pic, static, wl, etc. if test yes = "$GXX"; then _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' case $host_os in aix*) # All AIX code is PIC. if test ia64 = "$host_cpu"; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' fi _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the '-m68020' flag to GCC prevents building anything better, # like '-m68040'. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | os2* | pw32* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries m4_if([$1], [GCJ], [], [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) case $host_os in os2*) _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static' ;; esac ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' ;; *djgpp*) # DJGPP does not support shared libraries at all _LT_TAGVAR(lt_prog_compiler_pic, $1)= ;; haiku*) # PIC is the default for Haiku. # The "-static" flag exists, but is broken. _LT_TAGVAR(lt_prog_compiler_static, $1)= ;; interix[[3-9]]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; sysv4*MP*) if test -d /usr/nec; then _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic fi ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac else case $host_os in aix[[4-9]]*) # All AIX code is PIC. if test ia64 = "$host_cpu"; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' else _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' fi ;; chorus*) case $cc_basename in cxch68*) # Green Hills C++ Compiler # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" ;; esac ;; mingw* | cygwin* | os2* | pw32* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). m4_if([$1], [GCJ], [], [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ;; dgux*) case $cc_basename in ec++*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ;; ghcx*) # Green Hills C++ Compiler _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ;; *) ;; esac ;; freebsd* | dragonfly*) # FreeBSD uses GNU C++ ;; hpux9* | hpux10* | hpux11*) case $cc_basename in CC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive' if test ia64 != "$host_cpu"; then _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' fi ;; aCC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive' case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' ;; esac ;; *) ;; esac ;; interix*) # This is c89, which is MS Visual C++ (no shared libs) # Anyone wants to do a port? ;; irix5* | irix6* | nonstopux*) case $cc_basename in CC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' # CC pic flag -KPIC is the default. ;; *) ;; esac ;; linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) case $cc_basename in KCC*) # KAI C++ Compiler _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; ecpc* ) # old Intel C++ for x86_64, which still supported -KPIC. _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; icpc* ) # Intel C++, used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; pgCC* | pgcpp*) # Portland Group C++ compiler _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; cxx*) # Compaq C++ # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. _LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; xlc* | xlC* | bgxl[[cC]]* | mpixl[[cC]]*) # IBM XL 8.0, 9.0 on PPC and BlueGene _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' ;; esac ;; esac ;; lynxos*) ;; m88k*) ;; mvs*) case $cc_basename in cxx*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall' ;; *) ;; esac ;; netbsd*) ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' ;; RCC*) # Rational C++ 2.4.1 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ;; cxx*) # Digital/Compaq C++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. _LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; *) ;; esac ;; psos*) ;; solaris*) case $cc_basename in CC* | sunCC*) # Sun C++ 4.2, 5.x and Centerline C++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' ;; gcx*) # Green Hills C++ Compiler _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' ;; *) ;; esac ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; lcc*) # Lucid _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ;; *) ;; esac ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) case $cc_basename in CC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ;; *) ;; esac ;; vxworks*) ;; *) _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ;; esac fi ], [ if test yes = "$GCC"; then _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' case $host_os in aix*) # All AIX code is PIC. if test ia64 = "$host_cpu"; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' fi _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the '-m68020' flag to GCC prevents building anything better, # like '-m68040'. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries m4_if([$1], [GCJ], [], [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) case $host_os in os2*) _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static' ;; esac ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' ;; haiku*) # PIC is the default for Haiku. # The "-static" flag exists, but is broken. _LT_TAGVAR(lt_prog_compiler_static, $1)= ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) # +Z the default ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac ;; interix[[3-9]]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; msdosdjgpp*) # Just because we use GCC doesn't mean we suddenly get shared libraries # on systems that don't support them. _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no enable_shared=no ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; sysv4*MP*) if test -d /usr/nec; then _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic fi ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac case $cc_basename in nvcc*) # Cuda Compiler Driver 2.2 _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Xlinker ' if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then _LT_TAGVAR(lt_prog_compiler_pic, $1)="-Xcompiler $_LT_TAGVAR(lt_prog_compiler_pic, $1)" fi ;; esac else # PORTME Check for flag to pass linker flags through the system compiler. case $host_os in aix*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' if test ia64 = "$host_cpu"; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' else _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' fi ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' case $cc_basename in nagfor*) # NAG Fortran compiler _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; esac ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). m4_if([$1], [GCJ], [], [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) case $host_os in os2*) _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static' ;; esac ;; hpux9* | hpux10* | hpux11*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but # not for PA HP-UX. case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' ;; esac # Is there a better lt_prog_compiler_static that works with the bundled CC? _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive' ;; irix5* | irix6* | nonstopux*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # PIC (with -KPIC) is the default. _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) case $cc_basename in # old Intel for x86_64, which still supported -KPIC. ecc*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; # icc used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. icc* | ifort*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; # Lahey Fortran 8.1. lf95*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared' _LT_TAGVAR(lt_prog_compiler_static, $1)='--static' ;; nagfor*) # NAG Fortran compiler _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; tcc*) # Fabrice Bellard et al's Tiny C Compiler _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) # Portland Group compilers (*not* the Pentium gcc compiler, # which looks to be a dead project) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; ccc*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # All Alpha code is PIC. _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; xl* | bgxl* | bgf* | mpixl*) # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [[1-7]].* | *Sun*Fortran*\ 8.[[0-3]]*) # Sun Fortran 8.3 passes all unrecognized flags to the linker _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='' ;; *Sun\ F* | *Sun*Fortran*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' ;; *Sun\ C*) # Sun C 5.9 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ;; *Intel*\ [[CF]]*Compiler*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; *Portland\ Group*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; esac ;; esac ;; newsos6) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; osf3* | osf4* | osf5*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # All OSF/1 code is PIC. _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; rdos*) _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; solaris*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' case $cc_basename in f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; *) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; esac ;; sunos4*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; sysv4 | sysv4.2uw2* | sysv4.3*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; sysv4*MP*) if test -d /usr/nec; then _LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' fi ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; unicos*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ;; uts4*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; *) _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ;; esac fi ]) case $host_os in # For platforms that do not support PIC, -DPIC is meaningless: *djgpp*) _LT_TAGVAR(lt_prog_compiler_pic, $1)= ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])" ;; esac AC_CACHE_CHECK([for $compiler option to produce PIC], [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)], [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_prog_compiler_pic, $1)]) _LT_TAGVAR(lt_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_cv_prog_compiler_pic, $1) # # Check to make sure the PIC flag actually works. # if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works], [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)], [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [], [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in "" | " "*) ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;; esac], [_LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no]) fi _LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1], [Additional compiler flags for building library objects]) _LT_TAGDECL([wl], [lt_prog_compiler_wl], [1], [How to pass a linker flag through the compiler]) # # Check to make sure the static flag actually works. # wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\" _LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works], _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1), $lt_tmp_static_flag, [], [_LT_TAGVAR(lt_prog_compiler_static, $1)=]) _LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1], [Compiler flag to prevent dynamic linking]) ])# _LT_COMPILER_PIC # _LT_LINKER_SHLIBS([TAGNAME]) # ---------------------------- # See if the linker supports building shared libraries. m4_defun([_LT_LINKER_SHLIBS], [AC_REQUIRE([LT_PATH_LD])dnl AC_REQUIRE([LT_PATH_NM])dnl m4_require([_LT_PATH_MANIFEST_TOOL])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_DECL_SED])dnl m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl m4_require([_LT_TAG_COMPILER])dnl AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) m4_if([$1], [CXX], [ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] case $host_os in aix[[4-9]]*) # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to GNU nm, but means don't demangle to AIX nm. # Without the "-l" option, or with the "-B" option, AIX nm treats # weak defined symbols like other global defined symbols, whereas # GNU nm marks them as "W". # While the 'weak' keyword is ignored in the Export File, we need # it in the Import File for the 'aix-soname' feature, so we have # to replace the "-B" option with "-P" for AIX nm. if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' else _LT_TAGVAR(export_symbols_cmds, $1)='`func_echo_all $NM | $SED -e '\''s/B\([[^B]]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && ([substr](\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' fi ;; pw32*) _LT_TAGVAR(export_symbols_cmds, $1)=$ltdll_cmds ;; cygwin* | mingw* | cegcc*) case $cc_basename in cl*) _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' ;; *) _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] ;; esac ;; *) _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' ;; esac ], [ runpath_var= _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_cmds, $1)= _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(compiler_needs_object, $1)=no _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(old_archive_from_new_cmds, $1)= _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)= _LT_TAGVAR(thread_safe_flag_spec, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= # include_expsyms should be a list of space-separated symbols to be *always* # included in the symbol list _LT_TAGVAR(include_expsyms, $1)= # exclude_expsyms can be an extended regexp of symbols to exclude # it will be wrapped by ' (' and ')$', so one must not match beginning or # end of line. Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc', # as well as any symbol that contains 'd'. _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out # platforms (ab)use it in PIC code, but their linkers get confused if # the symbol is explicitly referenced. Since portable code cannot # rely on this symbol name, it's probably fine to never include it in # preloaded symbol tables. # Exclude shared library initialization/finalization symbols. dnl Note also adjust exclude_expsyms for C++ above. extract_expsyms_cmds= case $host_os in cygwin* | mingw* | pw32* | cegcc*) # FIXME: the MSVC++ port hasn't been tested in a loooong time # When not using gcc, we currently assume that we are using # Microsoft Visual C++. if test yes != "$GCC"; then with_gnu_ld=no fi ;; interix*) # we just hope/assume this is gcc and not c89 (= MSVC++) with_gnu_ld=yes ;; openbsd* | bitrig*) with_gnu_ld=no ;; esac _LT_TAGVAR(ld_shlibs, $1)=yes # On some targets, GNU ld is compatible enough with the native linker # that we're better off using the native interface for both. lt_use_gnu_ld_interface=no if test yes = "$with_gnu_ld"; then case $host_os in aix*) # The AIX port of GNU ld has always aspired to compatibility # with the native linker. However, as the warning in the GNU ld # block says, versions before 2.19.5* couldn't really create working # shared libraries, regardless of the interface used. case `$LD -v 2>&1` in *\ \(GNU\ Binutils\)\ 2.19.5*) ;; *\ \(GNU\ Binutils\)\ 2.[[2-9]]*) ;; *\ \(GNU\ Binutils\)\ [[3-9]]*) ;; *) lt_use_gnu_ld_interface=yes ;; esac ;; *) lt_use_gnu_ld_interface=yes ;; esac fi if test yes = "$lt_use_gnu_ld_interface"; then # If archive_cmds runs LD, not CC, wlarc should be empty wlarc='$wl' # Set some defaults for GNU ld with shared library support. These # are reset later if shared libraries are not supported. Putting them # here allows them to be overridden if necessary. runpath_var=LD_RUN_PATH _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' # ancient GNU ld didn't support --whole-archive et. al. if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' else _LT_TAGVAR(whole_archive_flag_spec, $1)= fi supports_anon_versioning=no case `$LD -v | $SED -e 's/([^)]\+)\s\+//' 2>&1` in *GNU\ gold*) supports_anon_versioning=yes ;; *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... *\ 2.11.*) ;; # other 2.11 versions *) supports_anon_versioning=yes ;; esac # See if GNU ld supports shared libraries. case $host_os in aix[[3-9]]*) # On AIX/PPC, the GNU linker is very broken if test ia64 != "$host_cpu"; then _LT_TAGVAR(ld_shlibs, $1)=no cat <<_LT_EOF 1>&2 *** Warning: the GNU linker, at least up to release 2.19, is reported *** to be unable to reliably create shared libraries on AIX. *** Therefore, libtool is disabling shared libraries support. If you *** really care for shared libraries, you may want to install binutils *** 2.20 or above, or modify your PATH so that a non-GNU linker is found. *** You will then need to restart the configuration process. _LT_EOF fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='' ;; m68k) _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_minus_L, $1)=yes ;; esac ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(allow_undefined_flag, $1)=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; cygwin* | mingw* | pw32* | cegcc*) # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, # as there is no search path for DLLs. _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-all-symbols' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file, use it as # is; otherwise, prepend EXPORTS... _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; haiku*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(link_all_deplibs, $1)=yes ;; os2*) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(allow_undefined_flag, $1)=unsupported shrext_cmds=.dll _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ prefix_cmds="$SED"~ if test EXPORTS = "`$SED 1q $export_symbols`"; then prefix_cmds="$prefix_cmds -e 1d"; fi~ prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ;; interix[[3-9]]*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) tmp_diet=no if test linux-dietlibc = "$host_os"; then case $cc_basename in diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) esac fi if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ && test no = "$tmp_diet" then tmp_addflag=' $pic_flag' tmp_sharedflag='-shared' case $cc_basename,$host_cpu in pgcc*) # Portland Group C compiler _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' tmp_addflag=' $pic_flag' ;; pgf77* | pgf90* | pgf95* | pgfortran*) # Portland Group f77 and f90 compilers _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' tmp_addflag=' $pic_flag -Mnomain' ;; ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 tmp_addflag=' -i_dynamic' ;; efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 tmp_addflag=' -i_dynamic -nofor_main' ;; ifc* | ifort*) # Intel Fortran compiler tmp_addflag=' -nofor_main' ;; lf95*) # Lahey Fortran 8.1 _LT_TAGVAR(whole_archive_flag_spec, $1)= tmp_sharedflag='--shared' ;; nagfor*) # NAGFOR 5.3 tmp_sharedflag='-Wl,-shared' ;; xl[[cC]]* | bgxl[[cC]]* | mpixl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below) tmp_sharedflag='-qmkshrobj' tmp_addflag= ;; nvcc*) # Cuda Compiler Driver 2.2 _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' _LT_TAGVAR(compiler_needs_object, $1)=yes ;; esac case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C 5.9 _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' _LT_TAGVAR(compiler_needs_object, $1)=yes tmp_sharedflag='-G' ;; *Sun\ F*) # Sun Fortran 8.3 tmp_sharedflag='-G' ;; esac _LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' if test yes = "$supports_anon_versioning"; then _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' fi case $cc_basename in tcc*) _LT_TAGVAR(export_dynamic_flag_spec, $1)='-rdynamic' ;; xlf* | bgf* | bgxlf* | mpixlf*) # IBM XL Fortran 10.1 on PPC cannot create shared libs itself _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' if test yes = "$supports_anon_versioning"; then _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' fi ;; esac else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' wlarc= else _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' fi ;; solaris*) if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then _LT_TAGVAR(ld_shlibs, $1)=no cat <<_LT_EOF 1>&2 *** Warning: The releases 2.8.* of the GNU linker cannot reliably *** create shared libraries on Solaris systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.9.1 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) case `$LD -v 2>&1` in *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*) _LT_TAGVAR(ld_shlibs, $1)=no cat <<_LT_EOF 1>&2 *** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot *** reliably create shared libraries on SCO systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.16.91.0.3 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF ;; *) # For security reasons, it is highly recommended that you always # use absolute paths for naming shared libraries, and exclude the # DT_RUNPATH tag from executables and libraries. But doing so # requires that you compile everything twice, which is a pain. if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; sunos4*) _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' wlarc= _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac if test no = "$_LT_TAGVAR(ld_shlibs, $1)"; then runpath_var= _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= fi else # PORTME fill in a description of your system's linker (not GNU ld) case $host_os in aix3*) _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=yes _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' # Note: this linker hardcodes the directories in LIBPATH if there # are no directories specified by -L. _LT_TAGVAR(hardcode_minus_L, $1)=yes if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then # Neither direct hardcoding nor static linking is supported with a # broken collect2. _LT_TAGVAR(hardcode_direct, $1)=unsupported fi ;; aix[[4-9]]*) if test ia64 = "$host_cpu"; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag= else # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to GNU nm, but means don't demangle to AIX nm. # Without the "-l" option, or with the "-B" option, AIX nm treats # weak defined symbols like other global defined symbols, whereas # GNU nm marks them as "W". # While the 'weak' keyword is ignored in the Export File, we need # it in the Import File for the 'aix-soname' feature, so we have # to replace the "-B" option with "-P" for AIX nm. if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' else _LT_TAGVAR(export_symbols_cmds, $1)='`func_echo_all $NM | $SED -e '\''s/B\([[^B]]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && ([substr](\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' fi aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # have runtime linking enabled, and use it for executables. # For shared libraries, we enable/disable runtime linking # depending on the kind of the shared library created - # when "with_aix_soname,aix_use_runtimelinking" is: # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables # "aix,yes" lib.so shared, rtl:yes, for executables # lib.a static archive # "both,no" lib.so.V(shr.o) shared, rtl:yes # lib.a(lib.so.V) shared, rtl:no, for executables # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables # lib.a(lib.so.V) shared, rtl:no # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables # lib.a static archive case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) for ld_flag in $LDFLAGS; do if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then aix_use_runtimelinking=yes break fi done if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then # With aix-soname=svr4, we create the lib.so.V shared archives only, # so we don't have lib.a shared libs to link our executables. # We have to force runtime linking in this case. aix_use_runtimelinking=yes LDFLAGS="$LDFLAGS -Wl,-brtl" fi ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. _LT_TAGVAR(archive_cmds, $1)='' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(file_list_spec, $1)='$wl-f,' case $with_aix_soname,$aix_use_runtimelinking in aix,*) ;; # traditional, no import file svr4,* | *,yes) # use import file # The Import File defines what to hardcode. _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no ;; esac if test yes = "$GCC"; then case $host_os in aix4.[[012]]|aix4.[[012]].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`$CC -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 _LT_TAGVAR(hardcode_direct, $1)=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)= fi ;; esac shared_flag='-shared' if test yes = "$aix_use_runtimelinking"; then shared_flag="$shared_flag "'$wl-G' fi # Need to ensure runtime linking is disabled for the traditional # shared library, or the linker may eventually find shared libraries # /with/ Import File - we do not want to mix them. shared_flag_aix='-shared' shared_flag_svr4='-shared $wl-G' else # not using gcc if test ia64 = "$host_cpu"; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test yes = "$aix_use_runtimelinking"; then shared_flag='$wl-G' else shared_flag='$wl-bM:SRE' fi shared_flag_aix='$wl-bM:SRE' shared_flag_svr4='$wl-G' fi fi _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to export. _LT_TAGVAR(always_export_symbols, $1)=yes if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. _LT_TAGVAR(allow_undefined_flag, $1)='-berok' # Determine the default libpath from the value encoded in an # empty executable. _LT_SYS_MODULE_PATH_AIX([$1]) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag else if test ia64 = "$host_cpu"; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $libdir:/usr/lib:/lib' _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. _LT_SYS_MODULE_PATH_AIX([$1]) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. _LT_TAGVAR(no_undefined_flag, $1)=' $wl-bernotok' _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-berok' if test yes = "$with_gnu_ld"; then # We only use this code for GNU lds that support --whole-archive. _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive' else # Exported symbols can be pulled into shared objects from archives _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' fi _LT_TAGVAR(archive_cmds_need_lc, $1)=yes _LT_TAGVAR(archive_expsym_cmds, $1)='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' # -brtl affects multiple linker settings, -berok does not and is overridden later compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([[, ]]\\)%-berok\\1%g"`' if test svr4 != "$with_aix_soname"; then # This is similar to how AIX traditionally builds its shared libraries. _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' fi if test aix != "$with_aix_soname"; then _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' else # used by -dlpreopen to get the symbols _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$MV $output_objdir/$realname.d/$soname $output_objdir' fi _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$RM -r $output_objdir/$realname.d' fi fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='' ;; m68k) _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_minus_L, $1)=yes ;; esac ;; bsdi[[45]]*) _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic ;; cygwin* | mingw* | pw32* | cegcc*) # When not using gcc, we currently assume that we are using # Microsoft Visual C++. # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. case $cc_basename in cl*) # Native MSVC _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=yes _LT_TAGVAR(file_list_spec, $1)='@' # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=.dll # FIXME: Setting linknames here is a bad hack. _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then cp "$export_symbols" "$output_objdir/$soname.def"; echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; else $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; fi~ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ linknames=' # The linker will not automatically build a static lib if we build a DLL. # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1,DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' # Don't use ranlib _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ lt_tool_outputfile="@TOOL_OUTPUT@"~ case $lt_outputfile in *.exe|*.EXE) ;; *) lt_outputfile=$lt_outputfile.exe lt_tool_outputfile=$lt_tool_outputfile.exe ;; esac~ if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; $RM "$lt_outputfile.manifest"; fi' ;; *) # Assume MSVC wrapper _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=.dll # FIXME: Setting linknames here is a bad hack. _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' # The linker will automatically build a .lib file if we build a DLL. _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' # FIXME: Should let the user specify the lib program. _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs' _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ;; esac ;; darwin* | rhapsody*) _LT_DARWIN_LINKER_FEATURES($1) ;; dgux*) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor # support. Future versions do this automatically, but an explicit c++rt0.o # does not break anything, and helps significantly (at the cost of a little # extra space). freebsd2.2*) _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; # Unfortunately, older versions of FreeBSD 2 do not have this feature. freebsd2.*) _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; # FreeBSD 3 and greater uses gcc -shared to do shared libraries. freebsd* | dragonfly*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; hpux9*) if test yes = "$GCC"; then _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' else _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(hardcode_direct, $1)=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' ;; hpux10*) if test yes,no = "$GCC,$with_gnu_ld"; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' fi if test no = "$with_gnu_ld"; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. _LT_TAGVAR(hardcode_minus_L, $1)=yes fi ;; hpux11*) if test yes,no = "$GCC,$with_gnu_ld"; then case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ;; esac else case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) m4_if($1, [], [ # Older versions of the 11.00 compiler do not understand -b yet # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) _LT_LINKER_OPTION([if $CC understands -b], _LT_TAGVAR(lt_cv_prog_compiler__b, $1), [-b], [_LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'], [_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'])], [_LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags']) ;; esac fi if test no = "$with_gnu_ld"; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: case $host_cpu in hppa*64*|ia64*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. _LT_TAGVAR(hardcode_minus_L, $1)=yes ;; esac fi ;; irix5* | irix6* | nonstopux*) if test yes = "$GCC"; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' # Try to use the -exported_symbol ld option, if it does not # work, assume that -exports_file does not work either and # implicitly export all symbols. # This should be the same for all languages, so no per-tag cache variable. AC_CACHE_CHECK([whether the $host_os linker accepts -exported_symbol], [lt_cv_irix_exported_symbol], [save_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null" AC_LINK_IFELSE( [AC_LANG_SOURCE( [AC_LANG_CASE([C], [[int foo (void) { return 0; }]], [C++], [[int foo (void) { return 0; }]], [Fortran 77], [[ subroutine foo end]], [Fortran], [[ subroutine foo end]])])], [lt_cv_irix_exported_symbol=yes], [lt_cv_irix_exported_symbol=no]) LDFLAGS=$save_LDFLAGS]) if test yes = "$lt_cv_irix_exported_symbol"; then _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib' fi else _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib' fi _LT_TAGVAR(archive_cmds_need_lc, $1)='no' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(inherit_rpath, $1)=yes _LT_TAGVAR(link_all_deplibs, $1)=yes ;; linux*) case $cc_basename in tcc*) # Fabrice Bellard et al's Tiny C Compiler _LT_TAGVAR(ld_shlibs, $1)=yes _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out else _LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; newsos6) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *nto* | *qnx*) ;; openbsd* | bitrig*) if test -f /usr/libexec/ld.so; then _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=yes if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' else _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' fi else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; os2*) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(allow_undefined_flag, $1)=unsupported shrext_cmds=.dll _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ prefix_cmds="$SED"~ if test EXPORTS = "`$SED 1q $export_symbols`"; then prefix_cmds="$prefix_cmds -e 1d"; fi~ prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ;; osf3*) if test yes = "$GCC"; then _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' else _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' fi _LT_TAGVAR(archive_cmds_need_lc, $1)='no' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: ;; osf4* | osf5*) # as osf3* with the addition of -msym flag if test yes = "$GCC"; then _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' else _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp' # Both c and cxx compiler support -rpath directly _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' fi _LT_TAGVAR(archive_cmds_need_lc, $1)='no' _LT_TAGVAR(hardcode_libdir_separator, $1)=: ;; solaris*) _LT_TAGVAR(no_undefined_flag, $1)=' -z defs' if test yes = "$GCC"; then wlarc='$wl' _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' else case `$CC -V 2>&1` in *"Compilers 5.0"*) wlarc='' _LT_TAGVAR(archive_cmds, $1)='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' ;; *) wlarc='$wl' _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ;; esac fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no case $host_os in solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands '-z linker_flag'. GCC discards it without '$wl', # but is careful enough not to reorder. # Supported since Solaris 2.6 (maybe 2.5.1?) if test yes = "$GCC"; then _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' else _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' fi ;; esac _LT_TAGVAR(link_all_deplibs, $1)=yes ;; sunos4*) if test sequent = "$host_vendor"; then # Use $CC to link under sequent, because it throws in some extra .o # files that make .init and .fini sections work. _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; sysv4) case $host_vendor in sni) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true??? ;; siemens) ## LD is ld it makes a PLAMLIB ## CC just makes a GrossModule. _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs' _LT_TAGVAR(hardcode_direct, $1)=no ;; motorola) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie ;; esac runpath_var='LD_RUN_PATH' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; sysv4.3*) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport' ;; sysv4*MP*) if test -d /usr/nec; then _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no runpath_var=LD_RUN_PATH hardcode_runpath_var=yes _LT_TAGVAR(ld_shlibs, $1)=yes fi ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no runpath_var='LD_RUN_PATH' if test yes = "$GCC"; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We CANNOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' _LT_TAGVAR(allow_undefined_flag, $1)='$wl-z,nodefs' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R,$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Bexport' runpath_var='LD_RUN_PATH' if test yes = "$GCC"; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; uts4*) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) _LT_TAGVAR(ld_shlibs, $1)=no ;; esac if test sni = "$host_vendor"; then case $host in sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Blargedynsym' ;; esac fi fi ]) AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) test no = "$_LT_TAGVAR(ld_shlibs, $1)" && can_build_shared=no _LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld _LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl _LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl _LT_DECL([], [extract_expsyms_cmds], [2], [The commands to extract the exported symbol list from a shared archive]) # # Do we need to explicitly link libc? # case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in x|xyes) # Assume -lc should be added _LT_TAGVAR(archive_cmds_need_lc, $1)=yes if test yes,yes = "$GCC,$enable_shared"; then case $_LT_TAGVAR(archive_cmds, $1) in *'~'*) # FIXME: we may have to deal with multi-command sequences. ;; '$CC '*) # Test whether the compiler implicitly links with -lc since on some # systems, -lgcc has to come before -lc. If gcc already passes -lc # to ld, don't add -lc before -lgcc. AC_CACHE_CHECK([whether -lc should be explicitly linked in], [lt_cv_]_LT_TAGVAR(archive_cmds_need_lc, $1), [$RM conftest* echo "$lt_simple_compile_test_code" > conftest.$ac_ext if AC_TRY_EVAL(ac_compile) 2>conftest.err; then soname=conftest lib=conftest libobjs=conftest.$ac_objext deplibs= wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1) compiler_flags=-v linker_flags=-v verstring= output_objdir=. libname=conftest lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1) _LT_TAGVAR(allow_undefined_flag, $1)= if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) then lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=no else lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=yes fi _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag else cat conftest.err 1>&5 fi $RM conftest* ]) _LT_TAGVAR(archive_cmds_need_lc, $1)=$lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1) ;; esac fi ;; esac _LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0], [Whether or not to add -lc for building shared libraries]) _LT_TAGDECL([allow_libtool_libs_with_static_runtimes], [enable_shared_with_static_runtimes], [0], [Whether or not to disallow shared libs when runtime libs are static]) _LT_TAGDECL([], [export_dynamic_flag_spec], [1], [Compiler flag to allow reflexive dlopens]) _LT_TAGDECL([], [whole_archive_flag_spec], [1], [Compiler flag to generate shared objects directly from archives]) _LT_TAGDECL([], [compiler_needs_object], [1], [Whether the compiler copes with passing no objects directly]) _LT_TAGDECL([], [old_archive_from_new_cmds], [2], [Create an old-style archive from a shared archive]) _LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2], [Create a temporary old-style archive to link instead of a shared archive]) _LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive]) _LT_TAGDECL([], [archive_expsym_cmds], [2]) _LT_TAGDECL([], [module_cmds], [2], [Commands used to build a loadable module if different from building a shared archive.]) _LT_TAGDECL([], [module_expsym_cmds], [2]) _LT_TAGDECL([], [with_gnu_ld], [1], [Whether we are building with GNU ld or not]) _LT_TAGDECL([], [allow_undefined_flag], [1], [Flag that allows shared libraries with undefined symbols to be built]) _LT_TAGDECL([], [no_undefined_flag], [1], [Flag that enforces no undefined symbols]) _LT_TAGDECL([], [hardcode_libdir_flag_spec], [1], [Flag to hardcode $libdir into a binary during linking. This must work even if $libdir does not exist]) _LT_TAGDECL([], [hardcode_libdir_separator], [1], [Whether we need a single "-rpath" flag with a separated argument]) _LT_TAGDECL([], [hardcode_direct], [0], [Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes DIR into the resulting binary]) _LT_TAGDECL([], [hardcode_direct_absolute], [0], [Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes DIR into the resulting binary and the resulting library dependency is "absolute", i.e impossible to change by setting $shlibpath_var if the library is relocated]) _LT_TAGDECL([], [hardcode_minus_L], [0], [Set to "yes" if using the -LDIR flag during linking hardcodes DIR into the resulting binary]) _LT_TAGDECL([], [hardcode_shlibpath_var], [0], [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into the resulting binary]) _LT_TAGDECL([], [hardcode_automatic], [0], [Set to "yes" if building a shared library automatically hardcodes DIR into the library and all subsequent libraries and executables linked against it]) _LT_TAGDECL([], [inherit_rpath], [0], [Set to yes if linker adds runtime paths of dependent libraries to runtime path list]) _LT_TAGDECL([], [link_all_deplibs], [0], [Whether libtool must link a program against all its dependency libraries]) _LT_TAGDECL([], [always_export_symbols], [0], [Set to "yes" if exported symbols are required]) _LT_TAGDECL([], [export_symbols_cmds], [2], [The commands to list exported symbols]) _LT_TAGDECL([], [exclude_expsyms], [1], [Symbols that should not be listed in the preloaded symbols]) _LT_TAGDECL([], [include_expsyms], [1], [Symbols that must always be exported]) _LT_TAGDECL([], [prelink_cmds], [2], [Commands necessary for linking programs (against libraries) with templates]) _LT_TAGDECL([], [postlink_cmds], [2], [Commands necessary for finishing linking programs]) _LT_TAGDECL([], [file_list_spec], [1], [Specify filename containing input files]) dnl FIXME: Not yet implemented dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1], dnl [Compiler flag to generate thread safe objects]) ])# _LT_LINKER_SHLIBS # _LT_LANG_C_CONFIG([TAG]) # ------------------------ # Ensure that the configuration variables for a C compiler are suitably # defined. These variables are subsequently used by _LT_CONFIG to write # the compiler configuration to 'libtool'. m4_defun([_LT_LANG_C_CONFIG], [m4_require([_LT_DECL_EGREP])dnl lt_save_CC=$CC AC_LANG_PUSH(C) # Source file extension for C test sources. ac_ext=c # Object file extension for compiled C test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(){return(0);}' _LT_TAG_COMPILER # Save the default compiler, since it gets overwritten when the other # tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. compiler_DEFAULT=$CC # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... if test -n "$compiler"; then _LT_COMPILER_NO_RTTI($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) LT_SYS_DLOPEN_SELF _LT_CMD_STRIPLIB # Report what library types will actually be built AC_MSG_CHECKING([if libtool supports shared libraries]) AC_MSG_RESULT([$can_build_shared]) AC_MSG_CHECKING([whether to build shared libraries]) test no = "$can_build_shared" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test yes = "$enable_shared" && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[[4-9]]*) if test ia64 != "$host_cpu"; then case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in yes,aix,yes) ;; # shared object as lib.so file only yes,svr4,*) ;; # shared object as lib.so archive member only yes,*) enable_static=no ;; # shared object in lib.a archive as well esac fi ;; esac AC_MSG_RESULT([$enable_shared]) AC_MSG_CHECKING([whether to build static libraries]) # Make sure either enable_shared or enable_static is yes. test yes = "$enable_shared" || enable_static=yes AC_MSG_RESULT([$enable_static]) _LT_CONFIG($1) fi AC_LANG_POP CC=$lt_save_CC ])# _LT_LANG_C_CONFIG # _LT_LANG_CXX_CONFIG([TAG]) # -------------------------- # Ensure that the configuration variables for a C++ compiler are suitably # defined. These variables are subsequently used by _LT_CONFIG to write # the compiler configuration to 'libtool'. m4_defun([_LT_LANG_CXX_CONFIG], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_PATH_MANIFEST_TOOL])dnl if test -n "$CXX" && ( test no != "$CXX" && ( (test g++ = "$CXX" && `g++ -v >/dev/null 2>&1` ) || (test g++ != "$CXX"))); then AC_PROG_CXXCPP else _lt_caught_CXX_error=yes fi AC_LANG_PUSH(C++) _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(compiler_needs_object, $1)=no _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(reload_flag, $1)=$reload_flag _LT_TAGVAR(reload_cmds, $1)=$reload_cmds _LT_TAGVAR(no_undefined_flag, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no # Source file extension for C++ test sources. ac_ext=cpp # Object file extension for compiled C++ test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # No sense in running all these tests if we already determined that # the CXX compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test yes != "$_lt_caught_CXX_error"; then # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }' # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_CFLAGS=$CFLAGS lt_save_LD=$LD lt_save_GCC=$GCC GCC=$GXX lt_save_with_gnu_ld=$with_gnu_ld lt_save_path_LD=$lt_cv_path_LD if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx else $as_unset lt_cv_prog_gnu_ld fi if test -n "${lt_cv_path_LDCXX+set}"; then lt_cv_path_LD=$lt_cv_path_LDCXX else $as_unset lt_cv_path_LD fi test -z "${LDCXX+set}" || LD=$LDCXX CC=${CXX-"c++"} CFLAGS=$CXXFLAGS compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) if test -n "$compiler"; then # We don't want -fno-exception when compiling C++ code, so set the # no_builtin_flag separately if test yes = "$GXX"; then _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' else _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= fi if test yes = "$GXX"; then # Set up default GNU C++ configuration LT_PATH_LD # Check if GNU C++ uses GNU ld as the underlying linker, since the # archiving commands below assume that GNU ld is being used. if test yes = "$with_gnu_ld"; then _LT_TAGVAR(archive_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' # If archive_cmds runs LD, not CC, wlarc should be empty # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to # investigate it a little bit more. (MM) wlarc='$wl' # ancient GNU ld didn't support --whole-archive et. al. if eval "`$CC -print-prog-name=ld` --help 2>&1" | $GREP 'no-whole-archive' > /dev/null; then _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' else _LT_TAGVAR(whole_archive_flag_spec, $1)= fi else with_gnu_ld=no wlarc= # A generic and very simple default shared library creation # command for GNU C++ for the case where it uses the native # linker, instead of GNU ld. If possible, this setting should # overridden to take advantage of the native linker features on # the platform it is being used on. _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' fi # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else GXX=no with_gnu_ld=no wlarc= fi # PORTME: fill in a description of your system's C++ link characteristics AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) _LT_TAGVAR(ld_shlibs, $1)=yes case $host_os in aix3*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; aix[[4-9]]*) if test ia64 = "$host_cpu"; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag= else aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # have runtime linking enabled, and use it for executables. # For shared libraries, we enable/disable runtime linking # depending on the kind of the shared library created - # when "with_aix_soname,aix_use_runtimelinking" is: # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables # "aix,yes" lib.so shared, rtl:yes, for executables # lib.a static archive # "both,no" lib.so.V(shr.o) shared, rtl:yes # lib.a(lib.so.V) shared, rtl:no, for executables # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables # lib.a(lib.so.V) shared, rtl:no # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables # lib.a static archive case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) for ld_flag in $LDFLAGS; do case $ld_flag in *-brtl*) aix_use_runtimelinking=yes break ;; esac done if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then # With aix-soname=svr4, we create the lib.so.V shared archives only, # so we don't have lib.a shared libs to link our executables. # We have to force runtime linking in this case. aix_use_runtimelinking=yes LDFLAGS="$LDFLAGS -Wl,-brtl" fi ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. _LT_TAGVAR(archive_cmds, $1)='' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(file_list_spec, $1)='$wl-f,' case $with_aix_soname,$aix_use_runtimelinking in aix,*) ;; # no import file svr4,* | *,yes) # use import file # The Import File defines what to hardcode. _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no ;; esac if test yes = "$GXX"; then case $host_os in aix4.[[012]]|aix4.[[012]].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`$CC -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 _LT_TAGVAR(hardcode_direct, $1)=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)= fi esac shared_flag='-shared' if test yes = "$aix_use_runtimelinking"; then shared_flag=$shared_flag' $wl-G' fi # Need to ensure runtime linking is disabled for the traditional # shared library, or the linker may eventually find shared libraries # /with/ Import File - we do not want to mix them. shared_flag_aix='-shared' shared_flag_svr4='-shared $wl-G' else # not using gcc if test ia64 = "$host_cpu"; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test yes = "$aix_use_runtimelinking"; then shared_flag='$wl-G' else shared_flag='$wl-bM:SRE' fi shared_flag_aix='$wl-bM:SRE' shared_flag_svr4='$wl-G' fi fi _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to # export. _LT_TAGVAR(always_export_symbols, $1)=yes if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. # The "-G" linker flag allows undefined symbols. _LT_TAGVAR(no_undefined_flag, $1)='-bernotok' # Determine the default libpath from the value encoded in an empty # executable. _LT_SYS_MODULE_PATH_AIX([$1]) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag else if test ia64 = "$host_cpu"; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $libdir:/usr/lib:/lib' _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. _LT_SYS_MODULE_PATH_AIX([$1]) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. _LT_TAGVAR(no_undefined_flag, $1)=' $wl-bernotok' _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-berok' if test yes = "$with_gnu_ld"; then # We only use this code for GNU lds that support --whole-archive. _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive' else # Exported symbols can be pulled into shared objects from archives _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' fi _LT_TAGVAR(archive_cmds_need_lc, $1)=yes _LT_TAGVAR(archive_expsym_cmds, $1)='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' # -brtl affects multiple linker settings, -berok does not and is overridden later compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([[, ]]\\)%-berok\\1%g"`' if test svr4 != "$with_aix_soname"; then # This is similar to how AIX traditionally builds its shared # libraries. Need -bnortl late, we may have -brtl in LDFLAGS. _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' fi if test aix != "$with_aix_soname"; then _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' else # used by -dlpreopen to get the symbols _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$MV $output_objdir/$realname.d/$soname $output_objdir' fi _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$RM -r $output_objdir/$realname.d' fi fi ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(allow_undefined_flag, $1)=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; chorus*) case $cc_basename in *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; cygwin* | mingw* | pw32* | cegcc*) case $GXX,$cc_basename in ,cl* | no,cl*) # Native MSVC # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=yes _LT_TAGVAR(file_list_spec, $1)='@' # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=.dll # FIXME: Setting linknames here is a bad hack. _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then cp "$export_symbols" "$output_objdir/$soname.def"; echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; else $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; fi~ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ linknames=' # The linker will not automatically build a static lib if we build a DLL. # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes # Don't use ranlib _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ lt_tool_outputfile="@TOOL_OUTPUT@"~ case $lt_outputfile in *.exe|*.EXE) ;; *) lt_outputfile=$lt_outputfile.exe lt_tool_outputfile=$lt_tool_outputfile.exe ;; esac~ func_to_tool_file "$lt_outputfile"~ if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; $RM "$lt_outputfile.manifest"; fi' ;; *) # g++ # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, # as there is no search path for DLLs. _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-all-symbols' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file, use it as # is; otherwise, prepend EXPORTS... _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; darwin* | rhapsody*) _LT_DARWIN_LINKER_FEATURES($1) ;; os2*) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(allow_undefined_flag, $1)=unsupported shrext_cmds=.dll _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ prefix_cmds="$SED"~ if test EXPORTS = "`$SED 1q $export_symbols`"; then prefix_cmds="$prefix_cmds -e 1d"; fi~ prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ;; dgux*) case $cc_basename in ec++*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; ghcx*) # Green Hills C++ Compiler # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; freebsd2.*) # C++ shared libraries reported to be fairly broken before # switch to ELF _LT_TAGVAR(ld_shlibs, $1)=no ;; freebsd-elf*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; freebsd* | dragonfly*) # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF # conventions _LT_TAGVAR(ld_shlibs, $1)=yes ;; haiku*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(link_all_deplibs, $1)=yes ;; hpux9*) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, # but as the default # location of the library. case $cc_basename in CC*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; aCC*) _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test yes = "$GXX"; then _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' else # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; hpux10*|hpux11*) if test no = "$with_gnu_ld"; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: case $host_cpu in hppa*64*|ia64*) ;; *) _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' ;; esac fi case $host_cpu in hppa*64*|ia64*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, # but as the default # location of the library. ;; esac case $cc_basename in CC*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; aCC*) case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test yes = "$GXX"; then if test no = "$with_gnu_ld"; then case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac fi else # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; interix[[3-9]]*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; irix5* | irix6*) case $cc_basename in CC*) # SGI C++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' # Archives containing C++ object files must be created using # "CC -ar", where "CC" is the IRIX C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs' ;; *) if test yes = "$GXX"; then if test no = "$with_gnu_ld"; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' else _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` -o $lib' fi fi _LT_TAGVAR(link_all_deplibs, $1)=yes ;; esac _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(inherit_rpath, $1)=yes ;; linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib $wl-retain-symbols-file,$export_symbols; mv \$templib $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' # Archives containing C++ object files must be created using # "CC -Bstatic", where "CC" is the KAI C++ compiler. _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; icpc* | ecpc* ) # Intel C++ with_gnu_ld=yes # version 8.0 and above of icpc choke on multiply defined symbols # if we add $predep_objects and $postdep_objects, however 7.1 and # earlier do not add the objects themselves. case `$CC -V 2>&1` in *"Version 7."*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ;; *) # Version 8.0 or newer tmp_idyn= case $host_cpu in ia64*) tmp_idyn=' -i_dynamic';; esac _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ;; esac _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive' ;; pgCC* | pgcpp*) # Portland Group C++ compiler case `$CC -V` in *pgCC\ [[1-5]].* | *pgcpp\ [[1-5]].*) _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ $RANLIB $oldlib' _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ;; *) # Version 6 and above use weak symbols _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ;; esac _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl--rpath $wl$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ;; cxx*) # Compaq C++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib $wl-retain-symbols-file $wl$export_symbols' runpath_var=LD_RUN_PATH _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' ;; xl* | mpixl* | bgxl*) # IBM XL 8.0 on PPC, with GNU ld _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' if test yes = "$supports_anon_versioning"; then _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' fi ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file $wl$export_symbols' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' _LT_TAGVAR(compiler_needs_object, $1)=yes # Not sure whether something based on # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 # would be better. output_verbose_link_cmd='func_echo_all' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' ;; esac ;; esac ;; lynxos*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; m88k*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; mvs*) case $cc_basename in cxx*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' wlarc= _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no fi # Workaround some broken pre-1.5 toolchains output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' ;; *nto* | *qnx*) _LT_TAGVAR(ld_shlibs, $1)=yes ;; openbsd* | bitrig*) if test -f /usr/libexec/ld.so; then _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`"; then _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file,$export_symbols -o $lib' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' fi output_verbose_link_cmd=func_echo_all else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Archives containing C++ object files must be created using # the KAI C++ compiler. case $host in osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;; esac ;; RCC*) # Rational C++ 2.4.1 # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; cxx*) case $host in osf3*) _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $soname `test -n "$verstring" && func_echo_all "$wl-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' ;; *) _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ echo "-hidden">> $lib.exp~ $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname $wl-input $wl$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~ $RM $lib.exp' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' ;; esac _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test yes,no = "$GXX,$with_gnu_ld"; then _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' case $host in osf3*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ;; esac _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; psos*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; lcc*) # Lucid # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; solaris*) case $cc_basename in CC* | sunCC*) # Sun C++ 4.2, 5.x and Centerline C++ _LT_TAGVAR(archive_cmds_need_lc,$1)=yes _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G$allow_undefined_flag $wl-M $wl$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no case $host_os in solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands '-z linker_flag'. # Supported since Solaris 2.6 (maybe 2.5.1?) _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' ;; esac _LT_TAGVAR(link_all_deplibs, $1)=yes output_verbose_link_cmd='func_echo_all' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' ;; gcx*) # Green Hills C++ Compiler _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' # The C++ compiler must be used to create the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs' ;; *) # GNU C++ compiler with Solaris linker if test yes,no = "$GXX,$with_gnu_ld"; then _LT_TAGVAR(no_undefined_flag, $1)=' $wl-z ${wl}defs' if $CC --version | $GREP -v '^2\.7' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared $pic_flag -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else # g++ 2.7 appears to require '-G' NOT '-shared' on this # platform. _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $wl$libdir' case $host_os in solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; *) _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' ;; esac fi ;; esac ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no runpath_var='LD_RUN_PATH' case $cc_basename in CC*) _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We CANNOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' _LT_TAGVAR(allow_undefined_flag, $1)='$wl-z,nodefs' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R,$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Bexport' runpath_var='LD_RUN_PATH' case $cc_basename in CC*) _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(old_archive_cmds, $1)='$CC -Tprelink_objects $oldobjs~ '"$_LT_TAGVAR(old_archive_cmds, $1)" _LT_TAGVAR(reload_cmds, $1)='$CC -Tprelink_objects $reload_objs~ '"$_LT_TAGVAR(reload_cmds, $1)" ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; vxworks*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) test no = "$_LT_TAGVAR(ld_shlibs, $1)" && can_build_shared=no _LT_TAGVAR(GCC, $1)=$GXX _LT_TAGVAR(LD, $1)=$LD ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... _LT_SYS_HIDDEN_LIBDEPS($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi # test -n "$compiler" CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS LDCXX=$LD LD=$lt_save_LD GCC=$lt_save_GCC with_gnu_ld=$lt_save_with_gnu_ld lt_cv_path_LDCXX=$lt_cv_path_LD lt_cv_path_LD=$lt_save_path_LD lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld fi # test yes != "$_lt_caught_CXX_error" AC_LANG_POP ])# _LT_LANG_CXX_CONFIG # _LT_FUNC_STRIPNAME_CNF # ---------------------- # func_stripname_cnf prefix suffix name # strip PREFIX and SUFFIX off of NAME. # PREFIX and SUFFIX must not contain globbing or regex special # characters, hashes, percent signs, but SUFFIX may contain a leading # dot (in which case that matches only a dot). # # This function is identical to the (non-XSI) version of func_stripname, # except this one can be used by m4 code that may be executed by configure, # rather than the libtool script. m4_defun([_LT_FUNC_STRIPNAME_CNF],[dnl AC_REQUIRE([_LT_DECL_SED]) AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH]) func_stripname_cnf () { case @S|@2 in .*) func_stripname_result=`$ECHO "@S|@3" | $SED "s%^@S|@1%%; s%\\\\@S|@2\$%%"`;; *) func_stripname_result=`$ECHO "@S|@3" | $SED "s%^@S|@1%%; s%@S|@2\$%%"`;; esac } # func_stripname_cnf ])# _LT_FUNC_STRIPNAME_CNF # _LT_SYS_HIDDEN_LIBDEPS([TAGNAME]) # --------------------------------- # Figure out "hidden" library dependencies from verbose # compiler output when linking a shared library. # Parse the compiler output and extract the necessary # objects, libraries and library flags. m4_defun([_LT_SYS_HIDDEN_LIBDEPS], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl AC_REQUIRE([_LT_FUNC_STRIPNAME_CNF])dnl # Dependencies to place before and after the object being linked: _LT_TAGVAR(predep_objects, $1)= _LT_TAGVAR(postdep_objects, $1)= _LT_TAGVAR(predeps, $1)= _LT_TAGVAR(postdeps, $1)= _LT_TAGVAR(compiler_lib_search_path, $1)= dnl we can't use the lt_simple_compile_test_code here, dnl because it contains code intended for an executable, dnl not a library. It's possible we should let each dnl tag define a new lt_????_link_test_code variable, dnl but it's only used here... m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF int a; void foo (void) { a = 0; } _LT_EOF ], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF class Foo { public: Foo (void) { a = 0; } private: int a; }; _LT_EOF ], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF subroutine foo implicit none integer*4 a a=0 return end _LT_EOF ], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF subroutine foo implicit none integer a a=0 return end _LT_EOF ], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF public class foo { private int a; public void bar (void) { a = 0; } }; _LT_EOF ], [$1], [GO], [cat > conftest.$ac_ext <<_LT_EOF package foo func foo() { } _LT_EOF ]) _lt_libdeps_save_CFLAGS=$CFLAGS case "$CC $CFLAGS " in #( *\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; *\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; *\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; esac dnl Parse the compiler output and extract the necessary dnl objects, libraries and library flags. if AC_TRY_EVAL(ac_compile); then # Parse the compiler output and extract the necessary # objects, libraries and library flags. # Sentinel used to keep track of whether or not we are before # the conftest object file. pre_test_object_deps_done=no for p in `eval "$output_verbose_link_cmd"`; do case $prev$p in -L* | -R* | -l*) # Some compilers place space between "-{L,R}" and the path. # Remove the space. if test x-L = "$p" || test x-R = "$p"; then prev=$p continue fi # Expand the sysroot to ease extracting the directories later. if test -z "$prev"; then case $p in -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; esac fi case $p in =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; esac if test no = "$pre_test_object_deps_done"; then case $prev in -L | -R) # Internal compiler library paths should come after those # provided the user. The postdeps already come after the # user supplied libs so there is no need to process them. if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then _LT_TAGVAR(compiler_lib_search_path, $1)=$prev$p else _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} $prev$p" fi ;; # The "-l" case would never come before the object being # linked, so don't bother handling this case. esac else if test -z "$_LT_TAGVAR(postdeps, $1)"; then _LT_TAGVAR(postdeps, $1)=$prev$p else _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} $prev$p" fi fi prev= ;; *.lto.$objext) ;; # Ignore GCC LTO objects *.$objext) # This assumes that the test object file only shows up # once in the compiler output. if test "$p" = "conftest.$objext"; then pre_test_object_deps_done=yes continue fi if test no = "$pre_test_object_deps_done"; then if test -z "$_LT_TAGVAR(predep_objects, $1)"; then _LT_TAGVAR(predep_objects, $1)=$p else _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p" fi else if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then _LT_TAGVAR(postdep_objects, $1)=$p else _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p" fi fi ;; *) ;; # Ignore the rest. esac done # Clean up. rm -f a.out a.exe else echo "libtool.m4: error: problem compiling $1 test program" fi $RM -f confest.$objext CFLAGS=$_lt_libdeps_save_CFLAGS # PORTME: override above test on systems where it is broken m4_if([$1], [CXX], [case $host_os in interix[[3-9]]*) # Interix 3.5 installs completely hosed .la files for C++, so rather than # hack all around it, let's just trust "g++" to DTRT. _LT_TAGVAR(predep_objects,$1)= _LT_TAGVAR(postdep_objects,$1)= _LT_TAGVAR(postdeps,$1)= ;; esac ]) case " $_LT_TAGVAR(postdeps, $1) " in *" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; esac _LT_TAGVAR(compiler_lib_search_dirs, $1)= if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | $SED -e 's! -L! !g' -e 's!^ !!'` fi _LT_TAGDECL([], [compiler_lib_search_dirs], [1], [The directories searched by this compiler when creating a shared library]) _LT_TAGDECL([], [predep_objects], [1], [Dependencies to place before and after the objects being linked to create a shared library]) _LT_TAGDECL([], [postdep_objects], [1]) _LT_TAGDECL([], [predeps], [1]) _LT_TAGDECL([], [postdeps], [1]) _LT_TAGDECL([], [compiler_lib_search_path], [1], [The library search path used internally by the compiler when linking a shared library]) ])# _LT_SYS_HIDDEN_LIBDEPS # _LT_LANG_F77_CONFIG([TAG]) # -------------------------- # Ensure that the configuration variables for a Fortran 77 compiler are # suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to 'libtool'. m4_defun([_LT_LANG_F77_CONFIG], [AC_LANG_PUSH(Fortran 77) if test -z "$F77" || test no = "$F77"; then _lt_disable_F77=yes fi _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(reload_flag, $1)=$reload_flag _LT_TAGVAR(reload_cmds, $1)=$reload_cmds _LT_TAGVAR(no_undefined_flag, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no # Source file extension for f77 test sources. ac_ext=f # Object file extension for compiled f77 test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # No sense in running all these tests if we already determined that # the F77 compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test yes != "$_lt_disable_F77"; then # Code to be used in simple compile tests lt_simple_compile_test_code="\ subroutine t return end " # Code to be used in simple link tests lt_simple_link_test_code="\ program t end " # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_GCC=$GCC lt_save_CFLAGS=$CFLAGS CC=${F77-"f77"} CFLAGS=$FFLAGS compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) GCC=$G77 if test -n "$compiler"; then AC_MSG_CHECKING([if libtool supports shared libraries]) AC_MSG_RESULT([$can_build_shared]) AC_MSG_CHECKING([whether to build shared libraries]) test no = "$can_build_shared" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test yes = "$enable_shared" && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[[4-9]]*) if test ia64 != "$host_cpu"; then case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in yes,aix,yes) ;; # shared object as lib.so file only yes,svr4,*) ;; # shared object as lib.so archive member only yes,*) enable_static=no ;; # shared object in lib.a archive as well esac fi ;; esac AC_MSG_RESULT([$enable_shared]) AC_MSG_CHECKING([whether to build static libraries]) # Make sure either enable_shared or enable_static is yes. test yes = "$enable_shared" || enable_static=yes AC_MSG_RESULT([$enable_static]) _LT_TAGVAR(GCC, $1)=$G77 _LT_TAGVAR(LD, $1)=$LD ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi # test -n "$compiler" GCC=$lt_save_GCC CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS fi # test yes != "$_lt_disable_F77" AC_LANG_POP ])# _LT_LANG_F77_CONFIG # _LT_LANG_FC_CONFIG([TAG]) # ------------------------- # Ensure that the configuration variables for a Fortran compiler are # suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to 'libtool'. m4_defun([_LT_LANG_FC_CONFIG], [AC_LANG_PUSH(Fortran) if test -z "$FC" || test no = "$FC"; then _lt_disable_FC=yes fi _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(reload_flag, $1)=$reload_flag _LT_TAGVAR(reload_cmds, $1)=$reload_cmds _LT_TAGVAR(no_undefined_flag, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no # Source file extension for fc test sources. ac_ext=${ac_fc_srcext-f} # Object file extension for compiled fc test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # No sense in running all these tests if we already determined that # the FC compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test yes != "$_lt_disable_FC"; then # Code to be used in simple compile tests lt_simple_compile_test_code="\ subroutine t return end " # Code to be used in simple link tests lt_simple_link_test_code="\ program t end " # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_GCC=$GCC lt_save_CFLAGS=$CFLAGS CC=${FC-"f95"} CFLAGS=$FCFLAGS compiler=$CC GCC=$ac_cv_fc_compiler_gnu _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) if test -n "$compiler"; then AC_MSG_CHECKING([if libtool supports shared libraries]) AC_MSG_RESULT([$can_build_shared]) AC_MSG_CHECKING([whether to build shared libraries]) test no = "$can_build_shared" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test yes = "$enable_shared" && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[[4-9]]*) if test ia64 != "$host_cpu"; then case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in yes,aix,yes) ;; # shared object as lib.so file only yes,svr4,*) ;; # shared object as lib.so archive member only yes,*) enable_static=no ;; # shared object in lib.a archive as well esac fi ;; esac AC_MSG_RESULT([$enable_shared]) AC_MSG_CHECKING([whether to build static libraries]) # Make sure either enable_shared or enable_static is yes. test yes = "$enable_shared" || enable_static=yes AC_MSG_RESULT([$enable_static]) _LT_TAGVAR(GCC, $1)=$ac_cv_fc_compiler_gnu _LT_TAGVAR(LD, $1)=$LD ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... _LT_SYS_HIDDEN_LIBDEPS($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi # test -n "$compiler" GCC=$lt_save_GCC CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS fi # test yes != "$_lt_disable_FC" AC_LANG_POP ])# _LT_LANG_FC_CONFIG # _LT_LANG_GCJ_CONFIG([TAG]) # -------------------------- # Ensure that the configuration variables for the GNU Java Compiler compiler # are suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to 'libtool'. m4_defun([_LT_LANG_GCJ_CONFIG], [AC_REQUIRE([LT_PROG_GCJ])dnl AC_LANG_SAVE # Source file extension for Java test sources. ac_ext=java # Object file extension for compiled Java test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="class foo {}" # Code to be used in simple link tests lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }' # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_CFLAGS=$CFLAGS lt_save_GCC=$GCC GCC=yes CC=${GCJ-"gcj"} CFLAGS=$GCJFLAGS compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_TAGVAR(LD, $1)=$LD _LT_CC_BASENAME([$compiler]) # GCJ did not exist at the time GCC didn't implicitly link libc in. _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(reload_flag, $1)=$reload_flag _LT_TAGVAR(reload_cmds, $1)=$reload_cmds ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... if test -n "$compiler"; then _LT_COMPILER_NO_RTTI($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi AC_LANG_RESTORE GCC=$lt_save_GCC CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS ])# _LT_LANG_GCJ_CONFIG # _LT_LANG_GO_CONFIG([TAG]) # -------------------------- # Ensure that the configuration variables for the GNU Go compiler # are suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to 'libtool'. m4_defun([_LT_LANG_GO_CONFIG], [AC_REQUIRE([LT_PROG_GO])dnl AC_LANG_SAVE # Source file extension for Go test sources. ac_ext=go # Object file extension for compiled Go test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="package main; func main() { }" # Code to be used in simple link tests lt_simple_link_test_code='package main; func main() { }' # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_CFLAGS=$CFLAGS lt_save_GCC=$GCC GCC=yes CC=${GOC-"gccgo"} CFLAGS=$GOFLAGS compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_TAGVAR(LD, $1)=$LD _LT_CC_BASENAME([$compiler]) # Go did not exist at the time GCC didn't implicitly link libc in. _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(reload_flag, $1)=$reload_flag _LT_TAGVAR(reload_cmds, $1)=$reload_cmds ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... if test -n "$compiler"; then _LT_COMPILER_NO_RTTI($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi AC_LANG_RESTORE GCC=$lt_save_GCC CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS ])# _LT_LANG_GO_CONFIG # _LT_LANG_RC_CONFIG([TAG]) # ------------------------- # Ensure that the configuration variables for the Windows resource compiler # are suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to 'libtool'. m4_defun([_LT_LANG_RC_CONFIG], [AC_REQUIRE([LT_PROG_RC])dnl AC_LANG_SAVE # Source file extension for RC test sources. ac_ext=rc # Object file extension for compiled RC test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # Code to be used in simple compile tests lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }' # Code to be used in simple link tests lt_simple_link_test_code=$lt_simple_compile_test_code # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_CFLAGS=$CFLAGS lt_save_GCC=$GCC GCC= CC=${RC-"windres"} CFLAGS= compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes if test -n "$compiler"; then : _LT_CONFIG($1) fi GCC=$lt_save_GCC AC_LANG_RESTORE CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS ])# _LT_LANG_RC_CONFIG # LT_PROG_GCJ # ----------- AC_DEFUN([LT_PROG_GCJ], [m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ], [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ], [AC_CHECK_TOOL(GCJ, gcj,) test set = "${GCJFLAGS+set}" || GCJFLAGS="-g -O2" AC_SUBST(GCJFLAGS)])])[]dnl ]) # Old name: AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([LT_AC_PROG_GCJ], []) # LT_PROG_GO # ---------- AC_DEFUN([LT_PROG_GO], [AC_CHECK_TOOL(GOC, gccgo,) ]) # LT_PROG_RC # ---------- AC_DEFUN([LT_PROG_RC], [AC_CHECK_TOOL(RC, windres,) ]) # Old name: AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([LT_AC_PROG_RC], []) # _LT_DECL_EGREP # -------------- # If we don't have a new enough Autoconf to choose the best grep # available, choose the one first in the user's PATH. m4_defun([_LT_DECL_EGREP], [AC_REQUIRE([AC_PROG_EGREP])dnl AC_REQUIRE([AC_PROG_FGREP])dnl test -z "$GREP" && GREP=grep _LT_DECL([], [GREP], [1], [A grep program that handles long lines]) _LT_DECL([], [EGREP], [1], [An ERE matcher]) _LT_DECL([], [FGREP], [1], [A literal string matcher]) dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too AC_SUBST([GREP]) ]) # _LT_DECL_OBJDUMP # -------------- # If we don't have a new enough Autoconf to choose the best objdump # available, choose the one first in the user's PATH. m4_defun([_LT_DECL_OBJDUMP], [AC_CHECK_TOOL(OBJDUMP, objdump, false) test -z "$OBJDUMP" && OBJDUMP=objdump _LT_DECL([], [OBJDUMP], [1], [An object symbol dumper]) AC_SUBST([OBJDUMP]) ]) # _LT_DECL_DLLTOOL # ---------------- # Ensure DLLTOOL variable is set. m4_defun([_LT_DECL_DLLTOOL], [AC_CHECK_TOOL(DLLTOOL, dlltool, false) test -z "$DLLTOOL" && DLLTOOL=dlltool _LT_DECL([], [DLLTOOL], [1], [DLL creation program]) AC_SUBST([DLLTOOL]) ]) # _LT_DECL_SED # ------------ # Check for a fully-functional sed program, that truncates # as few characters as possible. Prefer GNU sed if found. m4_defun([_LT_DECL_SED], [AC_PROG_SED test -z "$SED" && SED=sed Xsed="$SED -e 1s/^X//" _LT_DECL([], [SED], [1], [A sed program that does not truncate output]) _LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"], [Sed that helps us avoid accidentally triggering echo(1) options like -n]) ])# _LT_DECL_SED m4_ifndef([AC_PROG_SED], [ ############################################################ # NOTE: This macro has been submitted for inclusion into # # GNU Autoconf as AC_PROG_SED. When it is available in # # a released version of Autoconf we should remove this # # macro and use it instead. # ############################################################ m4_defun([AC_PROG_SED], [AC_MSG_CHECKING([for a sed that does not truncate output]) AC_CACHE_VAL(lt_cv_path_SED, [# Loop through the user's path and test for sed and gsed. # Then use that list of sed's as ones to test for truncation. as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for lt_ac_prog in sed gsed; do for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" fi done done done IFS=$as_save_IFS lt_ac_max=0 lt_ac_count=0 # Add /usr/xpg4/bin/sed as it is typically found on Solaris # along with /bin/sed that truncates output. for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do test ! -f "$lt_ac_sed" && continue cat /dev/null > conftest.in lt_ac_count=0 echo $ECHO_N "0123456789$ECHO_C" >conftest.in # Check for GNU sed and select it if it is found. if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then lt_cv_path_SED=$lt_ac_sed break fi while true; do cat conftest.in conftest.in >conftest.tmp mv conftest.tmp conftest.in cp conftest.in conftest.nl echo >>conftest.nl $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break cmp -s conftest.out conftest.nl || break # 10000 chars as input seems more than enough test 10 -lt "$lt_ac_count" && break lt_ac_count=`expr $lt_ac_count + 1` if test "$lt_ac_count" -gt "$lt_ac_max"; then lt_ac_max=$lt_ac_count lt_cv_path_SED=$lt_ac_sed fi done done ]) SED=$lt_cv_path_SED AC_SUBST([SED]) AC_MSG_RESULT([$SED]) ])#AC_PROG_SED ])#m4_ifndef # Old name: AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([LT_AC_PROG_SED], []) # _LT_CHECK_SHELL_FEATURES # ------------------------ # Find out whether the shell is Bourne or XSI compatible, # or has some other useful features. m4_defun([_LT_CHECK_SHELL_FEATURES], [if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then lt_unset=unset else lt_unset=false fi _LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl # test EBCDIC or ASCII case `echo X|tr X '\101'` in A) # ASCII based system # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr lt_SP2NL='tr \040 \012' lt_NL2SP='tr \015\012 \040\040' ;; *) # EBCDIC based system lt_SP2NL='tr \100 \n' lt_NL2SP='tr \r\n \100\100' ;; esac _LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl _LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl ])# _LT_CHECK_SHELL_FEATURES # _LT_PATH_CONVERSION_FUNCTIONS # ----------------------------- # Determine what file name conversion functions should be used by # func_to_host_file (and, implicitly, by func_to_host_path). These are needed # for certain cross-compile configurations and native mingw. m4_defun([_LT_PATH_CONVERSION_FUNCTIONS], [AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_CANONICAL_BUILD])dnl AC_MSG_CHECKING([how to convert $build file names to $host format]) AC_CACHE_VAL(lt_cv_to_host_file_cmd, [case $host in *-*-mingw* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 ;; *-*-cygwin* ) lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 ;; * ) # otherwise, assume *nix lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 ;; esac ;; *-*-cygwin* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin ;; *-*-cygwin* ) lt_cv_to_host_file_cmd=func_convert_file_noop ;; * ) # otherwise, assume *nix lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin ;; esac ;; * ) # unhandled hosts (and "normal" native builds) lt_cv_to_host_file_cmd=func_convert_file_noop ;; esac ]) to_host_file_cmd=$lt_cv_to_host_file_cmd AC_MSG_RESULT([$lt_cv_to_host_file_cmd]) _LT_DECL([to_host_file_cmd], [lt_cv_to_host_file_cmd], [0], [convert $build file names to $host format])dnl AC_MSG_CHECKING([how to convert $build file names to toolchain format]) AC_CACHE_VAL(lt_cv_to_tool_file_cmd, [#assume ordinary cross tools, or native build. lt_cv_to_tool_file_cmd=func_convert_file_noop case $host in *-*-mingw* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 ;; esac ;; esac ]) to_tool_file_cmd=$lt_cv_to_tool_file_cmd AC_MSG_RESULT([$lt_cv_to_tool_file_cmd]) _LT_DECL([to_tool_file_cmd], [lt_cv_to_tool_file_cmd], [0], [convert $build files to toolchain format])dnl ])# _LT_PATH_CONVERSION_FUNCTIONS nordugrid-arc-7.1.1/m4/PaxHeaders/nls.m40000644000000000000000000000013215067751332014725 xustar0030 mtime=1759498970.116153093 30 atime=1759498971.437547929 30 ctime=1759499024.682048903 nordugrid-arc-7.1.1/m4/nls.m40000644000175000002070000000226615067751332016635 0ustar00mockbuildmock00000000000000# nls.m4 serial 3 (gettext-0.15) dnl Copyright (C) 1995-2003, 2005-2006 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl dnl This file can can be used in projects which are not available under dnl the GNU General Public License or the GNU Library General Public dnl License but which still want to provide support for the GNU gettext dnl functionality. dnl Please note that the actual code of the GNU gettext library is covered dnl by the GNU Library General Public License, and the rest of the GNU dnl gettext package package is covered by the GNU General Public License. dnl They are *not* in the public domain. dnl Authors: dnl Ulrich Drepper , 1995-2000. dnl Bruno Haible , 2000-2003. AC_PREREQ(2.50) AC_DEFUN([AM_NLS], [ AC_MSG_CHECKING([whether NLS is requested]) dnl Default is enabled NLS AC_ARG_ENABLE(nls, [ --disable-nls do not use Native Language Support], USE_NLS=$enableval, USE_NLS=yes) AC_MSG_RESULT($USE_NLS) AC_SUBST(USE_NLS) ]) nordugrid-arc-7.1.1/m4/PaxHeaders/iconv.m40000644000000000000000000000013115067751331015245 xustar0030 mtime=1759498969.938693154 29 atime=1759498971.44354802 30 ctime=1759499024.671218025 nordugrid-arc-7.1.1/m4/iconv.m40000644000175000002070000001375315067751331017161 0ustar00mockbuildmock00000000000000# iconv.m4 serial AM6 (gettext-0.17) dnl Copyright (C) 2000-2002, 2007 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl From Bruno Haible. AC_DEFUN([AM_ICONV_LINKFLAGS_BODY], [ dnl Prerequisites of AC_LIB_LINKFLAGS_BODY. AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) AC_REQUIRE([AC_LIB_RPATH]) dnl Search for libiconv and define LIBICONV, LTLIBICONV and INCICONV dnl accordingly. AC_LIB_LINKFLAGS_BODY([iconv]) ]) AC_DEFUN([AM_ICONV_LINK], [ dnl Some systems have iconv in libc, some have it in libiconv (OSF/1 and dnl those with the standalone portable GNU libiconv installed). AC_REQUIRE([AC_CANONICAL_HOST]) dnl for cross-compiles dnl Search for libiconv and define LIBICONV, LTLIBICONV and INCICONV dnl accordingly. AC_REQUIRE([AM_ICONV_LINKFLAGS_BODY]) dnl Add $INCICONV to CPPFLAGS before performing the following checks, dnl because if the user has installed libiconv and not disabled its use dnl via --without-libiconv-prefix, he wants to use it. The first dnl AC_TRY_LINK will then fail, the second AC_TRY_LINK will succeed. am_save_CPPFLAGS="$CPPFLAGS" AC_LIB_APPENDTOVAR([CPPFLAGS], [$INCICONV]) AC_CACHE_CHECK([for iconv], am_cv_func_iconv, [ am_cv_func_iconv="no, consider installing GNU libiconv" am_cv_lib_iconv=no AC_TRY_LINK([#include #include ], [iconv_t cd = iconv_open("",""); iconv(cd,NULL,NULL,NULL,NULL); iconv_close(cd);], am_cv_func_iconv=yes) if test "$am_cv_func_iconv" != yes; then am_save_LIBS="$LIBS" LIBS="$LIBS $LIBICONV" AC_TRY_LINK([#include #include ], [iconv_t cd = iconv_open("",""); iconv(cd,NULL,NULL,NULL,NULL); iconv_close(cd);], am_cv_lib_iconv=yes am_cv_func_iconv=yes) LIBS="$am_save_LIBS" fi ]) if test "$am_cv_func_iconv" = yes; then AC_CACHE_CHECK([for working iconv], am_cv_func_iconv_works, [ dnl This tests against bugs in AIX 5.1 and HP-UX 11.11. am_save_LIBS="$LIBS" if test $am_cv_lib_iconv = yes; then LIBS="$LIBS $LIBICONV" fi AC_TRY_RUN([ #include #include int main () { /* Test against AIX 5.1 bug: Failures are not distinguishable from successful returns. */ { iconv_t cd_utf8_to_88591 = iconv_open ("ISO8859-1", "UTF-8"); if (cd_utf8_to_88591 != (iconv_t)(-1)) { static const char input[] = "\342\202\254"; /* EURO SIGN */ char buf[10]; const char *inptr = input; size_t inbytesleft = strlen (input); char *outptr = buf; size_t outbytesleft = sizeof (buf); size_t res = iconv (cd_utf8_to_88591, (char **) &inptr, &inbytesleft, &outptr, &outbytesleft); if (res == 0) return 1; } } #if 0 /* This bug could be worked around by the caller. */ /* Test against HP-UX 11.11 bug: Positive return value instead of 0. */ { iconv_t cd_88591_to_utf8 = iconv_open ("utf8", "iso88591"); if (cd_88591_to_utf8 != (iconv_t)(-1)) { static const char input[] = "\304rger mit b\366sen B\374bchen ohne Augenma\337"; char buf[50]; const char *inptr = input; size_t inbytesleft = strlen (input); char *outptr = buf; size_t outbytesleft = sizeof (buf); size_t res = iconv (cd_88591_to_utf8, (char **) &inptr, &inbytesleft, &outptr, &outbytesleft); if ((int)res > 0) return 1; } } #endif /* Test against HP-UX 11.11 bug: No converter from EUC-JP to UTF-8 is provided. */ if (/* Try standardized names. */ iconv_open ("UTF-8", "EUC-JP") == (iconv_t)(-1) /* Try IRIX, OSF/1 names. */ && iconv_open ("UTF-8", "eucJP") == (iconv_t)(-1) /* Try AIX names. */ && iconv_open ("UTF-8", "IBM-eucJP") == (iconv_t)(-1) /* Try HP-UX names. */ && iconv_open ("utf8", "eucJP") == (iconv_t)(-1)) return 1; return 0; }], [am_cv_func_iconv_works=yes], [am_cv_func_iconv_works=no], [case "$host_os" in aix* | hpux*) am_cv_func_iconv_works="guessing no" ;; *) am_cv_func_iconv_works="guessing yes" ;; esac]) LIBS="$am_save_LIBS" ]) case "$am_cv_func_iconv_works" in *no) am_func_iconv=no am_cv_lib_iconv=no ;; *) am_func_iconv=yes ;; esac else am_func_iconv=no am_cv_lib_iconv=no fi if test "$am_func_iconv" = yes; then AC_DEFINE(HAVE_ICONV, 1, [Define if you have the iconv() function and it works.]) fi if test "$am_cv_lib_iconv" = yes; then AC_MSG_CHECKING([how to link with libiconv]) AC_MSG_RESULT([$LIBICONV]) else dnl If $LIBICONV didn't lead to a usable library, we don't need $INCICONV dnl either. CPPFLAGS="$am_save_CPPFLAGS" LIBICONV= LTLIBICONV= fi AC_SUBST(LIBICONV) AC_SUBST(LTLIBICONV) ]) AC_DEFUN([AM_ICONV], [ AM_ICONV_LINK if test "$am_cv_func_iconv" = yes; then AC_MSG_CHECKING([for iconv declaration]) AC_CACHE_VAL(am_cv_proto_iconv, [ AC_TRY_COMPILE([ #include #include extern #ifdef __cplusplus "C" #endif #if defined(__STDC__) || defined(__cplusplus) size_t iconv (iconv_t cd, char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft); #else size_t iconv(); #endif ], [], am_cv_proto_iconv_arg1="", am_cv_proto_iconv_arg1="const") am_cv_proto_iconv="extern size_t iconv (iconv_t cd, $am_cv_proto_iconv_arg1 char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft);"]) am_cv_proto_iconv=`echo "[$]am_cv_proto_iconv" | tr -s ' ' | sed -e 's/( /(/'` AC_MSG_RESULT([$]{ac_t:- }[$]am_cv_proto_iconv) AC_DEFINE_UNQUOTED(ICONV_CONST, $am_cv_proto_iconv_arg1, [Define as const if the declaration of iconv() needs const.]) fi ]) nordugrid-arc-7.1.1/m4/PaxHeaders/po.m40000644000000000000000000000013215067751332014547 xustar0030 mtime=1759498970.129073731 30 atime=1759498971.436547913 30 ctime=1759499024.683111442 nordugrid-arc-7.1.1/m4/po.m40000644000175000002070000004460615067751332016463 0ustar00mockbuildmock00000000000000# po.m4 serial 15 (gettext-0.17) dnl Copyright (C) 1995-2007 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl dnl This file can can be used in projects which are not available under dnl the GNU General Public License or the GNU Library General Public dnl License but which still want to provide support for the GNU gettext dnl functionality. dnl Please note that the actual code of the GNU gettext library is covered dnl by the GNU Library General Public License, and the rest of the GNU dnl gettext package package is covered by the GNU General Public License. dnl They are *not* in the public domain. dnl Authors: dnl Ulrich Drepper , 1995-2000. dnl Bruno Haible , 2000-2003. AC_PREREQ(2.50) dnl Checks for all prerequisites of the po subdirectory. AC_DEFUN([AM_PO_SUBDIRS], [ AC_REQUIRE([AC_PROG_MAKE_SET])dnl AC_REQUIRE([AC_PROG_INSTALL])dnl AC_REQUIRE([AM_PROG_MKDIR_P])dnl defined by automake AC_REQUIRE([AM_NLS])dnl dnl Release version of the gettext macros. This is used to ensure that dnl the gettext macros and po/Makefile.in.in are in sync. AC_SUBST([GETTEXT_MACRO_VERSION], [0.17]) dnl Perform the following tests also if --disable-nls has been given, dnl because they are needed for "make dist" to work. dnl Search for GNU msgfmt in the PATH. dnl The first test excludes Solaris msgfmt and early GNU msgfmt versions. dnl The second test excludes FreeBSD msgfmt. AM_PATH_PROG_WITH_TEST(MSGFMT, msgfmt, [$ac_dir/$ac_word --statistics /dev/null >&]AS_MESSAGE_LOG_FD[ 2>&1 && (if $ac_dir/$ac_word --statistics /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi)], :) AC_PATH_PROG(GMSGFMT, gmsgfmt, $MSGFMT) dnl Test whether it is GNU msgfmt >= 0.15. changequote(,)dnl case `$MSGFMT --version | sed 1q | sed -e 's,^[^0-9]*,,'` in '' | 0.[0-9] | 0.[0-9].* | 0.1[0-4] | 0.1[0-4].*) MSGFMT_015=: ;; *) MSGFMT_015=$MSGFMT ;; esac changequote([,])dnl AC_SUBST([MSGFMT_015]) changequote(,)dnl case `$GMSGFMT --version | sed 1q | sed -e 's,^[^0-9]*,,'` in '' | 0.[0-9] | 0.[0-9].* | 0.1[0-4] | 0.1[0-4].*) GMSGFMT_015=: ;; *) GMSGFMT_015=$GMSGFMT ;; esac changequote([,])dnl AC_SUBST([GMSGFMT_015]) dnl Search for GNU xgettext 0.12 or newer in the PATH. dnl The first test excludes Solaris xgettext and early GNU xgettext versions. dnl The second test excludes FreeBSD xgettext. AM_PATH_PROG_WITH_TEST(XGETTEXT, xgettext, [$ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null >&]AS_MESSAGE_LOG_FD[ 2>&1 && (if $ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi)], :) dnl Remove leftover from FreeBSD xgettext call. rm -f messages.po dnl Test whether it is GNU xgettext >= 0.15. changequote(,)dnl case `$XGETTEXT --version | sed 1q | sed -e 's,^[^0-9]*,,'` in '' | 0.[0-9] | 0.[0-9].* | 0.1[0-4] | 0.1[0-4].*) XGETTEXT_015=: ;; *) XGETTEXT_015=$XGETTEXT ;; esac changequote([,])dnl AC_SUBST([XGETTEXT_015]) dnl Search for GNU msgmerge 0.11 or newer in the PATH. AM_PATH_PROG_WITH_TEST(MSGMERGE, msgmerge, [$ac_dir/$ac_word --update -q /dev/null /dev/null >&]AS_MESSAGE_LOG_FD[ 2>&1], :) dnl Installation directories. dnl Autoconf >= 2.60 defines localedir. For older versions of autoconf, we dnl have to define it here, so that it can be used in po/Makefile. test -n "$localedir" || localedir='${datadir}/locale' AC_SUBST([localedir]) dnl Support for AM_XGETTEXT_OPTION. test -n "${XGETTEXT_EXTRA_OPTIONS+set}" || XGETTEXT_EXTRA_OPTIONS= AC_SUBST([XGETTEXT_EXTRA_OPTIONS]) AC_CONFIG_COMMANDS([po-directories], [[ for ac_file in $CONFIG_FILES; do # Support "outfile[:infile[:infile...]]" case "$ac_file" in *:*) ac_file=`echo "$ac_file"|sed 's%:.*%%'` ;; esac # PO directories have a Makefile.in generated from Makefile.in.in. case "$ac_file" in */Makefile.in) # Adjust a relative srcdir. ac_dir=`echo "$ac_file"|sed 's%/[^/][^/]*$%%'` ac_dir_suffix="/`echo "$ac_dir"|sed 's%^\./%%'`" ac_dots=`echo "$ac_dir_suffix"|sed 's%/[^/]*%../%g'` # In autoconf-2.13 it is called $ac_given_srcdir. # In autoconf-2.50 it is called $srcdir. test -n "$ac_given_srcdir" || ac_given_srcdir="$srcdir" case "$ac_given_srcdir" in .) top_srcdir=`echo $ac_dots|sed 's%/$%%'` ;; /*) top_srcdir="$ac_given_srcdir" ;; *) top_srcdir="$ac_dots$ac_given_srcdir" ;; esac # Treat a directory as a PO directory if and only if it has a # POTFILES.in file. This allows packages to have multiple PO # directories under different names or in different locations. if test -f "$ac_given_srcdir/$ac_dir/POTFILES.in"; then rm -f "$ac_dir/POTFILES" test -n "$as_me" && echo "$as_me: creating $ac_dir/POTFILES" || echo "creating $ac_dir/POTFILES" cat "$ac_given_srcdir/$ac_dir/POTFILES.in" | sed -e "/^#/d" -e "/^[ ]*\$/d" -e "s,.*, $top_srcdir/& \\\\," | sed -e "\$s/\(.*\) \\\\/\1/" > "$ac_dir/POTFILES" POMAKEFILEDEPS="POTFILES.in" # ALL_LINGUAS, POFILES, UPDATEPOFILES, DUMMYPOFILES, GMOFILES depend # on $ac_dir but don't depend on user-specified configuration # parameters. if test -f "$ac_given_srcdir/$ac_dir/LINGUAS"; then # The LINGUAS file contains the set of available languages. if test -n "$OBSOLETE_ALL_LINGUAS"; then test -n "$as_me" && echo "$as_me: setting ALL_LINGUAS in configure.in is obsolete" || echo "setting ALL_LINGUAS in configure.in is obsolete" fi ALL_LINGUAS_=`sed -e "/^#/d" -e "s/#.*//" "$ac_given_srcdir/$ac_dir/LINGUAS"` # Hide the ALL_LINGUAS assigment from automake < 1.5. eval 'ALL_LINGUAS''=$ALL_LINGUAS_' POMAKEFILEDEPS="$POMAKEFILEDEPS LINGUAS" else # The set of available languages was given in configure.in. # Hide the ALL_LINGUAS assigment from automake < 1.5. eval 'ALL_LINGUAS''=$OBSOLETE_ALL_LINGUAS' fi # Compute POFILES # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).po) # Compute UPDATEPOFILES # as $(foreach lang, $(ALL_LINGUAS), $(lang).po-update) # Compute DUMMYPOFILES # as $(foreach lang, $(ALL_LINGUAS), $(lang).nop) # Compute GMOFILES # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).gmo) case "$ac_given_srcdir" in .) srcdirpre= ;; *) srcdirpre='$(srcdir)/' ;; esac POFILES= UPDATEPOFILES= DUMMYPOFILES= GMOFILES= for lang in $ALL_LINGUAS; do POFILES="$POFILES $srcdirpre$lang.po" UPDATEPOFILES="$UPDATEPOFILES $lang.po-update" DUMMYPOFILES="$DUMMYPOFILES $lang.nop" GMOFILES="$GMOFILES $srcdirpre$lang.gmo" done # CATALOGS depends on both $ac_dir and the user's LINGUAS # environment variable. INST_LINGUAS= if test -n "$ALL_LINGUAS"; then for presentlang in $ALL_LINGUAS; do useit=no if test "%UNSET%" != "$LINGUAS"; then desiredlanguages="$LINGUAS" else desiredlanguages="$ALL_LINGUAS" fi for desiredlang in $desiredlanguages; do # Use the presentlang catalog if desiredlang is # a. equal to presentlang, or # b. a variant of presentlang (because in this case, # presentlang can be used as a fallback for messages # which are not translated in the desiredlang catalog). case "$desiredlang" in "$presentlang"*) useit=yes;; esac done if test $useit = yes; then INST_LINGUAS="$INST_LINGUAS $presentlang" fi done fi CATALOGS= if test -n "$INST_LINGUAS"; then for lang in $INST_LINGUAS; do CATALOGS="$CATALOGS $lang.gmo" done fi test -n "$as_me" && echo "$as_me: creating $ac_dir/Makefile" || echo "creating $ac_dir/Makefile" sed -e "/^POTFILES =/r $ac_dir/POTFILES" -e "/^# Makevars/r $ac_given_srcdir/$ac_dir/Makevars" -e "s|@POFILES@|$POFILES|g" -e "s|@UPDATEPOFILES@|$UPDATEPOFILES|g" -e "s|@DUMMYPOFILES@|$DUMMYPOFILES|g" -e "s|@GMOFILES@|$GMOFILES|g" -e "s|@CATALOGS@|$CATALOGS|g" -e "s|@POMAKEFILEDEPS@|$POMAKEFILEDEPS|g" "$ac_dir/Makefile.in" > "$ac_dir/Makefile" for f in "$ac_given_srcdir/$ac_dir"/Rules-*; do if test -f "$f"; then case "$f" in *.orig | *.bak | *~) ;; *) cat "$f" >> "$ac_dir/Makefile" ;; esac fi done fi ;; esac done]], [# Capture the value of obsolete ALL_LINGUAS because we need it to compute # POFILES, UPDATEPOFILES, DUMMYPOFILES, GMOFILES, CATALOGS. But hide it # from automake < 1.5. eval 'OBSOLETE_ALL_LINGUAS''="$ALL_LINGUAS"' # Capture the value of LINGUAS because we need it to compute CATALOGS. LINGUAS="${LINGUAS-%UNSET%}" ]) ]) dnl Postprocesses a Makefile in a directory containing PO files. AC_DEFUN([AM_POSTPROCESS_PO_MAKEFILE], [ # When this code is run, in config.status, two variables have already been # set: # - OBSOLETE_ALL_LINGUAS is the value of LINGUAS set in configure.in, # - LINGUAS is the value of the environment variable LINGUAS at configure # time. changequote(,)dnl # Adjust a relative srcdir. ac_dir=`echo "$ac_file"|sed 's%/[^/][^/]*$%%'` ac_dir_suffix="/`echo "$ac_dir"|sed 's%^\./%%'`" ac_dots=`echo "$ac_dir_suffix"|sed 's%/[^/]*%../%g'` # In autoconf-2.13 it is called $ac_given_srcdir. # In autoconf-2.50 it is called $srcdir. test -n "$ac_given_srcdir" || ac_given_srcdir="$srcdir" case "$ac_given_srcdir" in .) top_srcdir=`echo $ac_dots|sed 's%/$%%'` ;; /*) top_srcdir="$ac_given_srcdir" ;; *) top_srcdir="$ac_dots$ac_given_srcdir" ;; esac # Find a way to echo strings without interpreting backslash. if test "X`(echo '\t') 2>/dev/null`" = 'X\t'; then gt_echo='echo' else if test "X`(printf '%s\n' '\t') 2>/dev/null`" = 'X\t'; then gt_echo='printf %s\n' else echo_func () { cat < "$ac_file.tmp" if grep -l '@TCLCATALOGS@' "$ac_file" > /dev/null; then # Add dependencies that cannot be formulated as a simple suffix rule. for lang in $ALL_LINGUAS; do frobbedlang=`echo $lang | sed -e 's/\..*$//' -e 'y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/'` cat >> "$ac_file.tmp" < /dev/null; then # Add dependencies that cannot be formulated as a simple suffix rule. for lang in $ALL_LINGUAS; do frobbedlang=`echo $lang | sed -e 's/_/-/g' -e 's/^sr-CS/sr-SP/' -e 's/@latin$/-Latn/' -e 's/@cyrillic$/-Cyrl/' -e 's/^sr-SP$/sr-SP-Latn/' -e 's/^uz-UZ$/uz-UZ-Latn/'` cat >> "$ac_file.tmp" <> "$ac_file.tmp" <. # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # Originally written by Alexandre Oliva . case $1 in '') echo "$0: No command. Try '$0 --help' for more information." 1>&2 exit 1; ;; -h | --h*) cat <<\EOF Usage: depcomp [--help] [--version] PROGRAM [ARGS] Run PROGRAMS ARGS to compile a file, generating dependencies as side-effects. Environment variables: depmode Dependency tracking mode. source Source file read by 'PROGRAMS ARGS'. object Object file output by 'PROGRAMS ARGS'. DEPDIR directory where to store dependencies. depfile Dependency file to output. tmpdepfile Temporary file to use when outputting dependencies. libtool Whether libtool is used (yes/no). Report bugs to . EOF exit $? ;; -v | --v*) echo "depcomp $scriptversion" exit $? ;; esac # Get the directory component of the given path, and save it in the # global variables '$dir'. Note that this directory component will # be either empty or ending with a '/' character. This is deliberate. set_dir_from () { case $1 in */*) dir=`echo "$1" | sed -e 's|/[^/]*$|/|'`;; *) dir=;; esac } # Get the suffix-stripped basename of the given path, and save it the # global variable '$base'. set_base_from () { base=`echo "$1" | sed -e 's|^.*/||' -e 's/\.[^.]*$//'` } # If no dependency file was actually created by the compiler invocation, # we still have to create a dummy depfile, to avoid errors with the # Makefile "include basename.Plo" scheme. make_dummy_depfile () { echo "#dummy" > "$depfile" } # Factor out some common post-processing of the generated depfile. # Requires the auxiliary global variable '$tmpdepfile' to be set. aix_post_process_depfile () { # If the compiler actually managed to produce a dependency file, # post-process it. if test -f "$tmpdepfile"; then # Each line is of the form 'foo.o: dependency.h'. # Do two passes, one to just change these to # $object: dependency.h # and one to simply output # dependency.h: # which is needed to avoid the deleted-header problem. { sed -e "s,^.*\.[$lower]*:,$object:," < "$tmpdepfile" sed -e "s,^.*\.[$lower]*:[$tab ]*,," -e 's,$,:,' < "$tmpdepfile" } > "$depfile" rm -f "$tmpdepfile" else make_dummy_depfile fi } # A tabulation character. tab=' ' # A newline character. nl=' ' # Character ranges might be problematic outside the C locale. # These definitions help. upper=ABCDEFGHIJKLMNOPQRSTUVWXYZ lower=abcdefghijklmnopqrstuvwxyz digits=0123456789 alpha=${upper}${lower} if test -z "$depmode" || test -z "$source" || test -z "$object"; then echo "depcomp: Variables source, object and depmode must be set" 1>&2 exit 1 fi # Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po. depfile=${depfile-`echo "$object" | sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`} tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`} rm -f "$tmpdepfile" # Avoid interferences from the environment. gccflag= dashmflag= # Some modes work just like other modes, but use different flags. We # parameterize here, but still list the modes in the big case below, # to make depend.m4 easier to write. Note that we *cannot* use a case # here, because this file can only contain one case statement. if test "$depmode" = hp; then # HP compiler uses -M and no extra arg. gccflag=-M depmode=gcc fi if test "$depmode" = dashXmstdout; then # This is just like dashmstdout with a different argument. dashmflag=-xM depmode=dashmstdout fi cygpath_u="cygpath -u -f -" if test "$depmode" = msvcmsys; then # This is just like msvisualcpp but w/o cygpath translation. # Just convert the backslash-escaped backslashes to single forward # slashes to satisfy depend.m4 cygpath_u='sed s,\\\\,/,g' depmode=msvisualcpp fi if test "$depmode" = msvc7msys; then # This is just like msvc7 but w/o cygpath translation. # Just convert the backslash-escaped backslashes to single forward # slashes to satisfy depend.m4 cygpath_u='sed s,\\\\,/,g' depmode=msvc7 fi if test "$depmode" = xlc; then # IBM C/C++ Compilers xlc/xlC can output gcc-like dependency information. gccflag=-qmakedep=gcc,-MF depmode=gcc fi case "$depmode" in gcc3) ## gcc 3 implements dependency tracking that does exactly what ## we want. Yay! Note: for some reason libtool 1.4 doesn't like ## it if -MD -MP comes after the -MF stuff. Hmm. ## Unfortunately, FreeBSD c89 acceptance of flags depends upon ## the command line argument order; so add the flags where they ## appear in depend2.am. Note that the slowdown incurred here ## affects only configure: in makefiles, %FASTDEP% shortcuts this. for arg do case $arg in -c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;; *) set fnord "$@" "$arg" ;; esac shift # fnord shift # $arg done "$@" stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi mv "$tmpdepfile" "$depfile" ;; gcc) ## Note that this doesn't just cater to obsosete pre-3.x GCC compilers. ## but also to in-use compilers like IMB xlc/xlC and the HP C compiler. ## (see the conditional assignment to $gccflag above). ## There are various ways to get dependency output from gcc. Here's ## why we pick this rather obscure method: ## - Don't want to use -MD because we'd like the dependencies to end ## up in a subdir. Having to rename by hand is ugly. ## (We might end up doing this anyway to support other compilers.) ## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like ## -MM, not -M (despite what the docs say). Also, it might not be ## supported by the other compilers which use the 'gcc' depmode. ## - Using -M directly means running the compiler twice (even worse ## than renaming). if test -z "$gccflag"; then gccflag=-MD, fi "$@" -Wp,"$gccflag$tmpdepfile" stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" echo "$object : \\" > "$depfile" # The second -e expression handles DOS-style file names with drive # letters. sed -e 's/^[^:]*: / /' \ -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile" ## This next piece of magic avoids the "deleted header file" problem. ## The problem is that when a header file which appears in a .P file ## is deleted, the dependency causes make to die (because there is ## typically no way to rebuild the header). We avoid this by adding ## dummy dependencies for each header file. Too bad gcc doesn't do ## this for us directly. ## Some versions of gcc put a space before the ':'. On the theory ## that the space means something, we add a space to the output as ## well. hp depmode also adds that space, but also prefixes the VPATH ## to the object. Take care to not repeat it in the output. ## Some versions of the HPUX 10.20 sed can't process this invocation ## correctly. Breaking it into two sed invocations is a workaround. tr ' ' "$nl" < "$tmpdepfile" \ | sed -e 's/^\\$//' -e '/^$/d' -e "s|.*$object$||" -e '/:$/d' \ | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; hp) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; sgi) if test "$libtool" = yes; then "$@" "-Wp,-MDupdate,$tmpdepfile" else "$@" -MDupdate "$tmpdepfile" fi stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files echo "$object : \\" > "$depfile" # Clip off the initial element (the dependent). Don't try to be # clever and replace this with sed code, as IRIX sed won't handle # lines with more than a fixed number of characters (4096 in # IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines; # the IRIX cc adds comments like '#:fec' to the end of the # dependency line. tr ' ' "$nl" < "$tmpdepfile" \ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' \ | tr "$nl" ' ' >> "$depfile" echo >> "$depfile" # The second pass generates a dummy entry for each header file. tr ' ' "$nl" < "$tmpdepfile" \ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \ >> "$depfile" else make_dummy_depfile fi rm -f "$tmpdepfile" ;; xlc) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; aix) # The C for AIX Compiler uses -M and outputs the dependencies # in a .u file. In older versions, this file always lives in the # current directory. Also, the AIX compiler puts '$object:' at the # start of each line; $object doesn't have directory information. # Version 6 uses the directory in both cases. set_dir_from "$object" set_base_from "$object" if test "$libtool" = yes; then tmpdepfile1=$dir$base.u tmpdepfile2=$base.u tmpdepfile3=$dir.libs/$base.u "$@" -Wc,-M else tmpdepfile1=$dir$base.u tmpdepfile2=$dir$base.u tmpdepfile3=$dir$base.u "$@" -M fi stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" do test -f "$tmpdepfile" && break done aix_post_process_depfile ;; tcc) # tcc (Tiny C Compiler) understand '-MD -MF file' since version 0.9.26 # FIXME: That version still under development at the moment of writing. # Make that this statement remains true also for stable, released # versions. # It will wrap lines (doesn't matter whether long or short) with a # trailing '\', as in: # # foo.o : \ # foo.c \ # foo.h \ # # It will put a trailing '\' even on the last line, and will use leading # spaces rather than leading tabs (at least since its commit 0394caf7 # "Emit spaces for -MD"). "$@" -MD -MF "$tmpdepfile" stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" # Each non-empty line is of the form 'foo.o : \' or ' dep.h \'. # We have to change lines of the first kind to '$object: \'. sed -e "s|.*:|$object :|" < "$tmpdepfile" > "$depfile" # And for each line of the second kind, we have to emit a 'dep.h:' # dummy dependency, to avoid the deleted-header problem. sed -n -e 's|^ *\(.*\) *\\$|\1:|p' < "$tmpdepfile" >> "$depfile" rm -f "$tmpdepfile" ;; ## The order of this option in the case statement is important, since the ## shell code in configure will try each of these formats in the order ## listed in this file. A plain '-MD' option would be understood by many ## compilers, so we must ensure this comes after the gcc and icc options. pgcc) # Portland's C compiler understands '-MD'. # Will always output deps to 'file.d' where file is the root name of the # source file under compilation, even if file resides in a subdirectory. # The object file name does not affect the name of the '.d' file. # pgcc 10.2 will output # foo.o: sub/foo.c sub/foo.h # and will wrap long lines using '\' : # foo.o: sub/foo.c ... \ # sub/foo.h ... \ # ... set_dir_from "$object" # Use the source, not the object, to determine the base name, since # that's sadly what pgcc will do too. set_base_from "$source" tmpdepfile=$base.d # For projects that build the same source file twice into different object # files, the pgcc approach of using the *source* file root name can cause # problems in parallel builds. Use a locking strategy to avoid stomping on # the same $tmpdepfile. lockdir=$base.d-lock trap " echo '$0: caught signal, cleaning up...' >&2 rmdir '$lockdir' exit 1 " 1 2 13 15 numtries=100 i=$numtries while test $i -gt 0; do # mkdir is a portable test-and-set. if mkdir "$lockdir" 2>/dev/null; then # This process acquired the lock. "$@" -MD stat=$? # Release the lock. rmdir "$lockdir" break else # If the lock is being held by a different process, wait # until the winning process is done or we timeout. while test -d "$lockdir" && test $i -gt 0; do sleep 1 i=`expr $i - 1` done fi i=`expr $i - 1` done trap - 1 2 13 15 if test $i -le 0; then echo "$0: failed to acquire lock after $numtries attempts" >&2 echo "$0: check lockdir '$lockdir'" >&2 exit 1 fi if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" # Each line is of the form `foo.o: dependent.h', # or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'. # Do two passes, one to just change these to # `$object: dependent.h' and one to simply `dependent.h:'. sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile" # Some versions of the HPUX 10.20 sed can't process this invocation # correctly. Breaking it into two sed invocations is a workaround. sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" \ | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; hp2) # The "hp" stanza above does not work with aCC (C++) and HP's ia64 # compilers, which have integrated preprocessors. The correct option # to use with these is +Maked; it writes dependencies to a file named # 'foo.d', which lands next to the object file, wherever that # happens to be. # Much of this is similar to the tru64 case; see comments there. set_dir_from "$object" set_base_from "$object" if test "$libtool" = yes; then tmpdepfile1=$dir$base.d tmpdepfile2=$dir.libs/$base.d "$@" -Wc,+Maked else tmpdepfile1=$dir$base.d tmpdepfile2=$dir$base.d "$@" +Maked fi stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile1" "$tmpdepfile2" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" do test -f "$tmpdepfile" && break done if test -f "$tmpdepfile"; then sed -e "s,^.*\.[$lower]*:,$object:," "$tmpdepfile" > "$depfile" # Add 'dependent.h:' lines. sed -ne '2,${ s/^ *// s/ \\*$// s/$/:/ p }' "$tmpdepfile" >> "$depfile" else make_dummy_depfile fi rm -f "$tmpdepfile" "$tmpdepfile2" ;; tru64) # The Tru64 compiler uses -MD to generate dependencies as a side # effect. 'cc -MD -o foo.o ...' puts the dependencies into 'foo.o.d'. # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put # dependencies in 'foo.d' instead, so we check for that too. # Subdirectories are respected. set_dir_from "$object" set_base_from "$object" if test "$libtool" = yes; then # Libtool generates 2 separate objects for the 2 libraries. These # two compilations output dependencies in $dir.libs/$base.o.d and # in $dir$base.o.d. We have to check for both files, because # one of the two compilations can be disabled. We should prefer # $dir$base.o.d over $dir.libs/$base.o.d because the latter is # automatically cleaned when .libs/ is deleted, while ignoring # the former would cause a distcleancheck panic. tmpdepfile1=$dir$base.o.d # libtool 1.5 tmpdepfile2=$dir.libs/$base.o.d # Likewise. tmpdepfile3=$dir.libs/$base.d # Compaq CCC V6.2-504 "$@" -Wc,-MD else tmpdepfile1=$dir$base.d tmpdepfile2=$dir$base.d tmpdepfile3=$dir$base.d "$@" -MD fi stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" do test -f "$tmpdepfile" && break done # Same post-processing that is required for AIX mode. aix_post_process_depfile ;; msvc7) if test "$libtool" = yes; then showIncludes=-Wc,-showIncludes else showIncludes=-showIncludes fi "$@" $showIncludes > "$tmpdepfile" stat=$? grep -v '^Note: including file: ' "$tmpdepfile" if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" echo "$object : \\" > "$depfile" # The first sed program below extracts the file names and escapes # backslashes for cygpath. The second sed program outputs the file # name when reading, but also accumulates all include files in the # hold buffer in order to output them again at the end. This only # works with sed implementations that can handle large buffers. sed < "$tmpdepfile" -n ' /^Note: including file: *\(.*\)/ { s//\1/ s/\\/\\\\/g p }' | $cygpath_u | sort -u | sed -n ' s/ /\\ /g s/\(.*\)/'"$tab"'\1 \\/p s/.\(.*\) \\/\1:/ H $ { s/.*/'"$tab"'/ G p }' >> "$depfile" echo >> "$depfile" # make sure the fragment doesn't end with a backslash rm -f "$tmpdepfile" ;; msvc7msys) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; #nosideeffect) # This comment above is used by automake to tell side-effect # dependency tracking mechanisms from slower ones. dashmstdout) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout, regardless of -o. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # Remove '-o $object'. IFS=" " for arg do case $arg in -o) shift ;; $object) shift ;; *) set fnord "$@" "$arg" shift # fnord shift # $arg ;; esac done test -z "$dashmflag" && dashmflag=-M # Require at least two characters before searching for ':' # in the target name. This is to cope with DOS-style filenames: # a dependency such as 'c:/foo/bar' could be seen as target 'c' otherwise. "$@" $dashmflag | sed "s|^[$tab ]*[^:$tab ][^:][^:]*:[$tab ]*|$object: |" > "$tmpdepfile" rm -f "$depfile" cat < "$tmpdepfile" > "$depfile" # Some versions of the HPUX 10.20 sed can't process this sed invocation # correctly. Breaking it into two sed invocations is a workaround. tr ' ' "$nl" < "$tmpdepfile" \ | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \ | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; dashXmstdout) # This case only exists to satisfy depend.m4. It is never actually # run, as this mode is specially recognized in the preamble. exit 1 ;; makedepend) "$@" || exit $? # Remove any Libtool call if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # X makedepend shift cleared=no eat=no for arg do case $cleared in no) set ""; shift cleared=yes ;; esac if test $eat = yes; then eat=no continue fi case "$arg" in -D*|-I*) set fnord "$@" "$arg"; shift ;; # Strip any option that makedepend may not understand. Remove # the object too, otherwise makedepend will parse it as a source file. -arch) eat=yes ;; -*|$object) ;; *) set fnord "$@" "$arg"; shift ;; esac done obj_suffix=`echo "$object" | sed 's/^.*\././'` touch "$tmpdepfile" ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@" rm -f "$depfile" # makedepend may prepend the VPATH from the source file name to the object. # No need to regex-escape $object, excess matching of '.' is harmless. sed "s|^.*\($object *:\)|\1|" "$tmpdepfile" > "$depfile" # Some versions of the HPUX 10.20 sed can't process the last invocation # correctly. Breaking it into two sed invocations is a workaround. sed '1,2d' "$tmpdepfile" \ | tr ' ' "$nl" \ | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \ | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" "$tmpdepfile".bak ;; cpp) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # Remove '-o $object'. IFS=" " for arg do case $arg in -o) shift ;; $object) shift ;; *) set fnord "$@" "$arg" shift # fnord shift # $arg ;; esac done "$@" -E \ | sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ | sed '$ s: \\$::' > "$tmpdepfile" rm -f "$depfile" echo "$object : \\" > "$depfile" cat < "$tmpdepfile" >> "$depfile" sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; msvisualcpp) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi IFS=" " for arg do case "$arg" in -o) shift ;; $object) shift ;; "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI") set fnord "$@" shift shift ;; *) set fnord "$@" "$arg" shift shift ;; esac done "$@" -E 2>/dev/null | sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile" rm -f "$depfile" echo "$object : \\" > "$depfile" sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::'"$tab"'\1 \\:p' >> "$depfile" echo "$tab" >> "$depfile" sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile" rm -f "$tmpdepfile" ;; msvcmsys) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; none) exec "$@" ;; *) echo "Unknown depmode $depmode" 1>&2 exit 1 ;; esac exit 0 # Local Variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC0" # time-stamp-end: "; # UTC" # End: nordugrid-arc-7.1.1/PaxHeaders/config.sub0000644000000000000000000000013215067751346015334 xustar0030 mtime=1759498982.907112673 30 atime=1759498994.246894518 30 ctime=1759499024.703494272 nordugrid-arc-7.1.1/config.sub0000755000175000002070000007530415067751346017252 0ustar00mockbuildmock00000000000000#! /bin/sh # Configuration validation subroutine script. # Copyright 1992-2018 Free Software Foundation, Inc. timestamp='2018-08-29' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that # program. This Exception is an additional permission under section 7 # of the GNU General Public License, version 3 ("GPLv3"). # Please send patches to . # # Configuration subroutine to validate and canonicalize a configuration type. # Supply the specified configuration type as an argument. # If it is invalid, we print an error message on stderr and exit with code 1. # Otherwise, we print the canonical config type on stdout and succeed. # You can get the latest version of this script from: # https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub # This file is supposed to be the same for all GNU packages # and recognize all the CPU types, system types and aliases # that are meaningful with *any* GNU software. # Each package is responsible for reporting which valid configurations # it does not support. The user should be able to distinguish # a failure to support a valid configuration from a meaningless # configuration. # The goal of this file is to map all the various variations of a given # machine specification into a single specification in the form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM # or in some cases, the newer four-part form: # CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM # It is wrong to echo any other type of specification. me=`echo "$0" | sed -e 's,.*/,,'` usage="\ Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS Canonicalize a configuration name. Options: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit Report bugs and patches to ." version="\ GNU config.sub ($timestamp) Copyright 1992-2018 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" Try \`$me --help' for more information." # Parse command line while test $# -gt 0 ; do case $1 in --time-stamp | --time* | -t ) echo "$timestamp" ; exit ;; --version | -v ) echo "$version" ; exit ;; --help | --h* | -h ) echo "$usage"; exit ;; -- ) # Stop option processing shift; break ;; - ) # Use stdin as input. break ;; -* ) echo "$me: invalid option $1$help" exit 1 ;; *local*) # First pass through any local machine types. echo "$1" exit ;; * ) break ;; esac done case $# in 0) echo "$me: missing argument$help" >&2 exit 1;; 1) ;; *) echo "$me: too many arguments$help" >&2 exit 1;; esac # Split fields of configuration type IFS="-" read -r field1 field2 field3 field4 <&2 exit 1 ;; *-*-*-*) basic_machine=$field1-$field2 os=$field3-$field4 ;; *-*-*) # Ambiguous whether COMPANY is present, or skipped and KERNEL-OS is two # parts maybe_os=$field2-$field3 case $maybe_os in nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc \ | linux-newlib* | linux-musl* | linux-uclibc* | uclinux-uclibc* \ | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* \ | netbsd*-eabi* | kopensolaris*-gnu* | cloudabi*-eabi* \ | storm-chaos* | os2-emx* | rtmk-nova*) basic_machine=$field1 os=$maybe_os ;; android-linux) basic_machine=$field1-unknown os=linux-android ;; *) basic_machine=$field1-$field2 os=$field3 ;; esac ;; *-*) # A lone config we happen to match not fitting any patern case $field1-$field2 in decstation-3100) basic_machine=mips-dec os= ;; *-*) # Second component is usually, but not always the OS case $field2 in # Prevent following clause from handling this valid os sun*os*) basic_machine=$field1 os=$field2 ;; # Manufacturers dec* | mips* | sequent* | encore* | pc533* | sgi* | sony* \ | att* | 7300* | 3300* | delta* | motorola* | sun[234]* \ | unicom* | ibm* | next | hp | isi* | apollo | altos* \ | convergent* | ncr* | news | 32* | 3600* | 3100* \ | hitachi* | c[123]* | convex* | sun | crds | omron* | dg \ | ultra | tti* | harris | dolphin | highlevel | gould \ | cbm | ns | masscomp | apple | axis | knuth | cray \ | microblaze* | sim | cisco \ | oki | wec | wrs | winbond) basic_machine=$field1-$field2 os= ;; *) basic_machine=$field1 os=$field2 ;; esac ;; esac ;; *) # Convert single-component short-hands not valid as part of # multi-component configurations. case $field1 in 386bsd) basic_machine=i386-pc os=bsd ;; a29khif) basic_machine=a29k-amd os=udi ;; adobe68k) basic_machine=m68010-adobe os=scout ;; alliant) basic_machine=fx80-alliant os= ;; altos | altos3068) basic_machine=m68k-altos os= ;; am29k) basic_machine=a29k-none os=bsd ;; amdahl) basic_machine=580-amdahl os=sysv ;; amiga) basic_machine=m68k-unknown os= ;; amigaos | amigados) basic_machine=m68k-unknown os=amigaos ;; amigaunix | amix) basic_machine=m68k-unknown os=sysv4 ;; apollo68) basic_machine=m68k-apollo os=sysv ;; apollo68bsd) basic_machine=m68k-apollo os=bsd ;; aros) basic_machine=i386-pc os=aros ;; aux) basic_machine=m68k-apple os=aux ;; balance) basic_machine=ns32k-sequent os=dynix ;; blackfin) basic_machine=bfin-unknown os=linux ;; cegcc) basic_machine=arm-unknown os=cegcc ;; convex-c1) basic_machine=c1-convex os=bsd ;; convex-c2) basic_machine=c2-convex os=bsd ;; convex-c32) basic_machine=c32-convex os=bsd ;; convex-c34) basic_machine=c34-convex os=bsd ;; convex-c38) basic_machine=c38-convex os=bsd ;; cray) basic_machine=j90-cray os=unicos ;; crds | unos) basic_machine=m68k-crds os= ;; da30) basic_machine=m68k-da30 os= ;; decstation | pmax | pmin | dec3100 | decstatn) basic_machine=mips-dec os= ;; delta88) basic_machine=m88k-motorola os=sysv3 ;; dicos) basic_machine=i686-pc os=dicos ;; djgpp) basic_machine=i586-pc os=msdosdjgpp ;; ebmon29k) basic_machine=a29k-amd os=ebmon ;; es1800 | OSE68k | ose68k | ose | OSE) basic_machine=m68k-ericsson os=ose ;; gmicro) basic_machine=tron-gmicro os=sysv ;; go32) basic_machine=i386-pc os=go32 ;; h8300hms) basic_machine=h8300-hitachi os=hms ;; h8300xray) basic_machine=h8300-hitachi os=xray ;; h8500hms) basic_machine=h8500-hitachi os=hms ;; harris) basic_machine=m88k-harris os=sysv3 ;; hp300) basic_machine=m68k-hp ;; hp300bsd) basic_machine=m68k-hp os=bsd ;; hp300hpux) basic_machine=m68k-hp os=hpux ;; hppaosf) basic_machine=hppa1.1-hp os=osf ;; hppro) basic_machine=hppa1.1-hp os=proelf ;; i386mach) basic_machine=i386-mach os=mach ;; vsta) basic_machine=i386-pc os=vsta ;; isi68 | isi) basic_machine=m68k-isi os=sysv ;; m68knommu) basic_machine=m68k-unknown os=linux ;; magnum | m3230) basic_machine=mips-mips os=sysv ;; merlin) basic_machine=ns32k-utek os=sysv ;; mingw64) basic_machine=x86_64-pc os=mingw64 ;; mingw32) basic_machine=i686-pc os=mingw32 ;; mingw32ce) basic_machine=arm-unknown os=mingw32ce ;; monitor) basic_machine=m68k-rom68k os=coff ;; morphos) basic_machine=powerpc-unknown os=morphos ;; moxiebox) basic_machine=moxie-unknown os=moxiebox ;; msdos) basic_machine=i386-pc os=msdos ;; msys) basic_machine=i686-pc os=msys ;; mvs) basic_machine=i370-ibm os=mvs ;; nacl) basic_machine=le32-unknown os=nacl ;; ncr3000) basic_machine=i486-ncr os=sysv4 ;; netbsd386) basic_machine=i386-pc os=netbsd ;; netwinder) basic_machine=armv4l-rebel os=linux ;; news | news700 | news800 | news900) basic_machine=m68k-sony os=newsos ;; news1000) basic_machine=m68030-sony os=newsos ;; necv70) basic_machine=v70-nec os=sysv ;; nh3000) basic_machine=m68k-harris os=cxux ;; nh[45]000) basic_machine=m88k-harris os=cxux ;; nindy960) basic_machine=i960-intel os=nindy ;; mon960) basic_machine=i960-intel os=mon960 ;; nonstopux) basic_machine=mips-compaq os=nonstopux ;; os400) basic_machine=powerpc-ibm os=os400 ;; OSE68000 | ose68000) basic_machine=m68000-ericsson os=ose ;; os68k) basic_machine=m68k-none os=os68k ;; paragon) basic_machine=i860-intel os=osf ;; parisc) basic_machine=hppa-unknown os=linux ;; pw32) basic_machine=i586-unknown os=pw32 ;; rdos | rdos64) basic_machine=x86_64-pc os=rdos ;; rdos32) basic_machine=i386-pc os=rdos ;; rom68k) basic_machine=m68k-rom68k os=coff ;; sa29200) basic_machine=a29k-amd os=udi ;; sei) basic_machine=mips-sei os=seiux ;; sequent) basic_machine=i386-sequent os= ;; sps7) basic_machine=m68k-bull os=sysv2 ;; st2000) basic_machine=m68k-tandem os= ;; stratus) basic_machine=i860-stratus os=sysv4 ;; sun2) basic_machine=m68000-sun os= ;; sun2os3) basic_machine=m68000-sun os=sunos3 ;; sun2os4) basic_machine=m68000-sun os=sunos4 ;; sun3) basic_machine=m68k-sun os= ;; sun3os3) basic_machine=m68k-sun os=sunos3 ;; sun3os4) basic_machine=m68k-sun os=sunos4 ;; sun4) basic_machine=sparc-sun os= ;; sun4os3) basic_machine=sparc-sun os=sunos3 ;; sun4os4) basic_machine=sparc-sun os=sunos4 ;; sun4sol2) basic_machine=sparc-sun os=solaris2 ;; sun386 | sun386i | roadrunner) basic_machine=i386-sun os= ;; sv1) basic_machine=sv1-cray os=unicos ;; symmetry) basic_machine=i386-sequent os=dynix ;; t3e) basic_machine=alphaev5-cray os=unicos ;; t90) basic_machine=t90-cray os=unicos ;; toad1) basic_machine=pdp10-xkl os=tops20 ;; tpf) basic_machine=s390x-ibm os=tpf ;; udi29k) basic_machine=a29k-amd os=udi ;; ultra3) basic_machine=a29k-nyu os=sym1 ;; v810 | necv810) basic_machine=v810-nec os=none ;; vaxv) basic_machine=vax-dec os=sysv ;; vms) basic_machine=vax-dec os=vms ;; vxworks960) basic_machine=i960-wrs os=vxworks ;; vxworks68) basic_machine=m68k-wrs os=vxworks ;; vxworks29k) basic_machine=a29k-wrs os=vxworks ;; xbox) basic_machine=i686-pc os=mingw32 ;; ymp) basic_machine=ymp-cray os=unicos ;; *) basic_machine=$1 os= ;; esac ;; esac # Decode 1-component or ad-hoc basic machines case $basic_machine in # Here we handle the default manufacturer of certain CPU types. It is in # some cases the only manufacturer, in others, it is the most popular. w89k) cpu=hppa1.1 vendor=winbond ;; op50n) cpu=hppa1.1 vendor=oki ;; op60c) cpu=hppa1.1 vendor=oki ;; ibm*) cpu=i370 vendor=ibm ;; orion105) cpu=clipper vendor=highlevel ;; mac | mpw | mac-mpw) cpu=m68k vendor=apple ;; pmac | pmac-mpw) cpu=powerpc vendor=apple ;; # Recognize the various machine names and aliases which stand # for a CPU type and a company and sometimes even an OS. 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) cpu=m68000 vendor=att ;; 3b*) cpu=we32k vendor=att ;; bluegene*) cpu=powerpc vendor=ibm os=cnk ;; decsystem10* | dec10*) cpu=pdp10 vendor=dec os=tops10 ;; decsystem20* | dec20*) cpu=pdp10 vendor=dec os=tops20 ;; delta | 3300 | motorola-3300 | motorola-delta \ | 3300-motorola | delta-motorola) cpu=m68k vendor=motorola ;; dpx2*) cpu=m68k vendor=bull os=sysv3 ;; encore | umax | mmax) cpu=ns32k vendor=encore ;; elxsi) cpu=elxsi vendor=elxsi os=${os:-bsd} ;; fx2800) cpu=i860 vendor=alliant ;; genix) cpu=ns32k vendor=ns ;; h3050r* | hiux*) cpu=hppa1.1 vendor=hitachi os=hiuxwe2 ;; hp3k9[0-9][0-9] | hp9[0-9][0-9]) cpu=hppa1.0 vendor=hp ;; hp9k2[0-9][0-9] | hp9k31[0-9]) cpu=m68000 vendor=hp ;; hp9k3[2-9][0-9]) cpu=m68k vendor=hp ;; hp9k6[0-9][0-9] | hp6[0-9][0-9]) cpu=hppa1.0 vendor=hp ;; hp9k7[0-79][0-9] | hp7[0-79][0-9]) cpu=hppa1.1 vendor=hp ;; hp9k78[0-9] | hp78[0-9]) # FIXME: really hppa2.0-hp cpu=hppa1.1 vendor=hp ;; hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) # FIXME: really hppa2.0-hp cpu=hppa1.1 vendor=hp ;; hp9k8[0-9][13679] | hp8[0-9][13679]) cpu=hppa1.1 vendor=hp ;; hp9k8[0-9][0-9] | hp8[0-9][0-9]) cpu=hppa1.0 vendor=hp ;; i*86v32) cpu=`echo "$1" | sed -e 's/86.*/86/'` vendor=pc os=sysv32 ;; i*86v4*) cpu=`echo "$1" | sed -e 's/86.*/86/'` vendor=pc os=sysv4 ;; i*86v) cpu=`echo "$1" | sed -e 's/86.*/86/'` vendor=pc os=sysv ;; i*86sol2) cpu=`echo "$1" | sed -e 's/86.*/86/'` vendor=pc os=solaris2 ;; j90 | j90-cray) cpu=j90 vendor=cray os=${os:-unicos} ;; iris | iris4d) cpu=mips vendor=sgi case $os in irix*) ;; *) os=irix4 ;; esac ;; miniframe) cpu=m68000 vendor=convergent ;; *mint | mint[0-9]* | *MiNT | *MiNT[0-9]*) cpu=m68k vendor=atari os=mint ;; news-3600 | risc-news) cpu=mips vendor=sony os=newsos ;; next | m*-next) cpu=m68k vendor=next case $os in nextstep* ) ;; ns2*) os=nextstep2 ;; *) os=nextstep3 ;; esac ;; np1) cpu=np1 vendor=gould ;; op50n-* | op60c-*) cpu=hppa1.1 vendor=oki os=proelf ;; pa-hitachi) cpu=hppa1.1 vendor=hitachi os=hiuxwe2 ;; pbd) cpu=sparc vendor=tti ;; pbb) cpu=m68k vendor=tti ;; pc532) cpu=ns32k vendor=pc532 ;; pn) cpu=pn vendor=gould ;; power) cpu=power vendor=ibm ;; ps2) cpu=i386 vendor=ibm ;; rm[46]00) cpu=mips vendor=siemens ;; rtpc | rtpc-*) cpu=romp vendor=ibm ;; sde) cpu=mipsisa32 vendor=sde os=${os:-elf} ;; simso-wrs) cpu=sparclite vendor=wrs os=vxworks ;; tower | tower-32) cpu=m68k vendor=ncr ;; vpp*|vx|vx-*) cpu=f301 vendor=fujitsu ;; w65) cpu=w65 vendor=wdc ;; w89k-*) cpu=hppa1.1 vendor=winbond os=proelf ;; none) cpu=none vendor=none ;; leon|leon[3-9]) cpu=sparc vendor=$basic_machine ;; leon-*|leon[3-9]-*) cpu=sparc vendor=`echo "$basic_machine" | sed 's/-.*//'` ;; *-*) IFS="-" read -r cpu vendor <&2 exit 1 ;; esac ;; esac # Here we canonicalize certain aliases for manufacturers. case $vendor in digital*) vendor=dec ;; commodore*) vendor=cbm ;; *) ;; esac # Decode manufacturer-specific aliases for certain operating systems. if [ x$os != x ] then case $os in # First match some system type aliases that might get confused # with valid system types. # solaris* is a basic system type, with this one exception. auroraux) os=auroraux ;; bluegene*) os=cnk ;; solaris1 | solaris1.*) os=`echo $os | sed -e 's|solaris1|sunos4|'` ;; solaris) os=solaris2 ;; unixware*) os=sysv4.2uw ;; gnu/linux*) os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` ;; # es1800 is here to avoid being matched by es* (a different OS) es1800*) os=ose ;; # Some version numbers need modification chorusos*) os=chorusos ;; isc) os=isc2.2 ;; sco6) os=sco5v6 ;; sco5) os=sco3.2v5 ;; sco4) os=sco3.2v4 ;; sco3.2.[4-9]*) os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` ;; sco3.2v[4-9]* | sco5v6*) # Don't forget version if it is 3.2v4 or newer. ;; scout) # Don't match below ;; sco*) os=sco3.2v2 ;; psos*) os=psos ;; # Now accept the basic system types. # The portable systems comes first. # Each alternative MUST end in a * to match a version number. # sysv* is not here because it comes later, after sysvr4. gnu* | bsd* | mach* | minix* | genix* | ultrix* | irix* \ | *vms* | esix* | aix* | cnk* | sunos | sunos[34]*\ | hpux* | unos* | osf* | luna* | dgux* | auroraux* | solaris* \ | sym* | kopensolaris* | plan9* \ | amigaos* | amigados* | msdos* | newsos* | unicos* | aof* \ | aos* | aros* | cloudabi* | sortix* \ | nindy* | vxsim* | vxworks* | ebmon* | hms* | mvs* \ | clix* | riscos* | uniplus* | iris* | isc* | rtu* | xenix* \ | knetbsd* | mirbsd* | netbsd* \ | bitrig* | openbsd* | solidbsd* | libertybsd* \ | ekkobsd* | kfreebsd* | freebsd* | riscix* | lynxos* \ | bosx* | nextstep* | cxux* | aout* | elf* | oabi* \ | ptx* | coff* | ecoff* | winnt* | domain* | vsta* \ | udi* | eabi* | lites* | ieee* | go32* | aux* | hcos* \ | chorusrdb* | cegcc* | glidix* \ | cygwin* | msys* | pe* | moss* | proelf* | rtems* \ | midipix* | mingw32* | mingw64* | linux-gnu* | linux-android* \ | linux-newlib* | linux-musl* | linux-uclibc* \ | uxpv* | beos* | mpeix* | udk* | moxiebox* \ | interix* | uwin* | mks* | rhapsody* | darwin* \ | openstep* | oskit* | conix* | pw32* | nonstopux* \ | storm-chaos* | tops10* | tenex* | tops20* | its* \ | os2* | vos* | palmos* | uclinux* | nucleus* \ | morphos* | superux* | rtmk* | windiss* \ | powermax* | dnix* | nx6 | nx7 | sei* | dragonfly* \ | skyos* | haiku* | rdos* | toppers* | drops* | es* \ | onefs* | tirtos* | phoenix* | fuchsia* | redox* | bme* \ | midnightbsd*) # Remember, each alternative MUST END IN *, to match a version number. ;; qnx*) case $cpu in x86 | i*86) ;; *) os=nto-$os ;; esac ;; hiux*) os=hiuxwe2 ;; nto-qnx*) ;; nto*) os=`echo $os | sed -e 's|nto|nto-qnx|'` ;; sim | xray | os68k* | v88r* \ | windows* | osx | abug | netware* | os9* \ | macos* | mpw* | magic* | mmixware* | mon960* | lnews*) ;; linux-dietlibc) os=linux-dietlibc ;; linux*) os=`echo $os | sed -e 's|linux|linux-gnu|'` ;; lynx*178) os=lynxos178 ;; lynx*5) os=lynxos5 ;; lynx*) os=lynxos ;; mac*) os=`echo "$os" | sed -e 's|mac|macos|'` ;; opened*) os=openedition ;; os400*) os=os400 ;; sunos5*) os=`echo "$os" | sed -e 's|sunos5|solaris2|'` ;; sunos6*) os=`echo "$os" | sed -e 's|sunos6|solaris3|'` ;; wince*) os=wince ;; utek*) os=bsd ;; dynix*) os=bsd ;; acis*) os=aos ;; atheos*) os=atheos ;; syllable*) os=syllable ;; 386bsd) os=bsd ;; ctix* | uts*) os=sysv ;; nova*) os=rtmk-nova ;; ns2) os=nextstep2 ;; nsk*) os=nsk ;; # Preserve the version number of sinix5. sinix5.*) os=`echo $os | sed -e 's|sinix|sysv|'` ;; sinix*) os=sysv4 ;; tpf*) os=tpf ;; triton*) os=sysv3 ;; oss*) os=sysv3 ;; svr4*) os=sysv4 ;; svr3) os=sysv3 ;; sysvr4) os=sysv4 ;; # This must come after sysvr4. sysv*) ;; ose*) os=ose ;; *mint | mint[0-9]* | *MiNT | MiNT[0-9]*) os=mint ;; zvmoe) os=zvmoe ;; dicos*) os=dicos ;; pikeos*) # Until real need of OS specific support for # particular features comes up, bare metal # configurations are quite functional. case $cpu in arm*) os=eabi ;; *) os=elf ;; esac ;; nacl*) ;; ios) ;; none) ;; *-eabi) ;; *) echo Invalid configuration \`"$1"\': system \`"$os"\' not recognized 1>&2 exit 1 ;; esac else # Here we handle the default operating systems that come with various machines. # The value should be what the vendor currently ships out the door with their # machine or put another way, the most popular os provided with the machine. # Note that if you're going to try to match "-MANUFACTURER" here (say, # "-sun"), then you have to tell the case statement up towards the top # that MANUFACTURER isn't an operating system. Otherwise, code above # will signal an error saying that MANUFACTURER isn't an operating # system, and we'll never get to this point. case $cpu-$vendor in score-*) os=elf ;; spu-*) os=elf ;; *-acorn) os=riscix1.2 ;; arm*-rebel) os=linux ;; arm*-semi) os=aout ;; c4x-* | tic4x-*) os=coff ;; c8051-*) os=elf ;; clipper-intergraph) os=clix ;; hexagon-*) os=elf ;; tic54x-*) os=coff ;; tic55x-*) os=coff ;; tic6x-*) os=coff ;; # This must come before the *-dec entry. pdp10-*) os=tops20 ;; pdp11-*) os=none ;; *-dec | vax-*) os=ultrix4.2 ;; m68*-apollo) os=domain ;; i386-sun) os=sunos4.0.2 ;; m68000-sun) os=sunos3 ;; m68*-cisco) os=aout ;; mep-*) os=elf ;; mips*-cisco) os=elf ;; mips*-*) os=elf ;; or32-*) os=coff ;; *-tti) # must be before sparc entry or we get the wrong os. os=sysv3 ;; sparc-* | *-sun) os=sunos4.1.1 ;; pru-*) os=elf ;; *-be) os=beos ;; *-ibm) os=aix ;; *-knuth) os=mmixware ;; *-wec) os=proelf ;; *-winbond) os=proelf ;; *-oki) os=proelf ;; *-hp) os=hpux ;; *-hitachi) os=hiux ;; i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) os=sysv ;; *-cbm) os=amigaos ;; *-dg) os=dgux ;; *-dolphin) os=sysv3 ;; m68k-ccur) os=rtu ;; m88k-omron*) os=luna ;; *-next) os=nextstep ;; *-sequent) os=ptx ;; *-crds) os=unos ;; *-ns) os=genix ;; i370-*) os=mvs ;; *-gould) os=sysv ;; *-highlevel) os=bsd ;; *-encore) os=bsd ;; *-sgi) os=irix ;; *-siemens) os=sysv4 ;; *-masscomp) os=rtu ;; f30[01]-fujitsu | f700-fujitsu) os=uxpv ;; *-rom68k) os=coff ;; *-*bug) os=coff ;; *-apple) os=macos ;; *-atari*) os=mint ;; *-wrs) os=vxworks ;; *) os=none ;; esac fi # Here we handle the case where we know the os, and the CPU type, but not the # manufacturer. We pick the logical manufacturer. case $vendor in unknown) case $os in riscix*) vendor=acorn ;; sunos*) vendor=sun ;; cnk*|-aix*) vendor=ibm ;; beos*) vendor=be ;; hpux*) vendor=hp ;; mpeix*) vendor=hp ;; hiux*) vendor=hitachi ;; unos*) vendor=crds ;; dgux*) vendor=dg ;; luna*) vendor=omron ;; genix*) vendor=ns ;; clix*) vendor=intergraph ;; mvs* | opened*) vendor=ibm ;; os400*) vendor=ibm ;; ptx*) vendor=sequent ;; tpf*) vendor=ibm ;; vxsim* | vxworks* | windiss*) vendor=wrs ;; aux*) vendor=apple ;; hms*) vendor=hitachi ;; mpw* | macos*) vendor=apple ;; *mint | mint[0-9]* | *MiNT | MiNT[0-9]*) vendor=atari ;; vos*) vendor=stratus ;; esac ;; esac echo "$cpu-$vendor-$os" exit # Local variables: # eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" # End: nordugrid-arc-7.1.1/PaxHeaders/configure.ac0000644000000000000000000000013215067751327015641 xustar0030 mtime=1759498967.624489989 30 atime=1759498967.805492739 30 ctime=1759499024.686494556 nordugrid-arc-7.1.1/configure.ac0000644000175000002070000023467715067751327017566 0ustar00mockbuildmock00000000000000# -*- Autoconf -*- # Process this file with autoconf to produce a configure script. AC_PREREQ(2.56) AC_INIT([nordugrid-arc],m4_normalize(m4_include(VERSION)),[http://bugzilla.nordugrid.org/]) dnl serial-tests is not recognized before 1.12, and required after 1.13 m4_define([serial_tests], [ m4_esyscmd([case `${AUTOMAKE:-automake} --version | head -n 1` in *1.11.*|*1.10.*|*1.9.*);; *) echo serial-tests;; esac]) ]) AM_INIT_AUTOMAKE([foreign 1.9 tar-pax] serial_tests) AC_CONFIG_SRCDIR([Makefile.am]) AC_CONFIG_HEADERS([config.h]) baseversion=`echo $VERSION | sed 's/[[^0-9.]].*//'` preversion=`echo $VERSION | sed 's/^[[0-9.]]*//'` if test "x$baseversion" = "x" ; then baseversion=$VERSION preversion="" fi if test "x$preversion" = "x" ; then fedorarelease="1" fedorasetupopts="-q" debianversion="$baseversion" else fedorarelease="0.$preversion" fedorasetupopts="-q -n %{name}-%{version}$preversion" debianversion="$baseversion~$preversion" fi # numeric ARC_VERSION_* used for API fall back to current release seriese (e.g. when 'master' is specified in VESRION file, the "6.0.0" will be used) ARC_VERSION_MAJOR=`echo $VERSION | awk -F. '{print match($1, /^[[0-9]]+$/) ? $1 : "6"}'` ARC_VERSION_MINOR=`echo $VERSION | awk -F. '{print match($2, /[[^ ]]/) ? $2 : "0"}'` ARC_VERSION_PATCH=`echo $VERSION | awk -F. '{print match($3, /[[^ ]]/) ? $3 : "0"}'` ARC_VERSION_NUM=`printf "0x%02x%02x%02x" $ARC_VERSION_MAJOR $ARC_VERSION_MINOR $ARC_VERSION_PATCH` ARC_VERSION=`echo $ARC_VERSION_MAJOR.$ARC_VERSION_MINOR.$ARC_VERSION_PATCH` AC_SUBST(baseversion) AC_SUBST(preversion) AC_SUBST(fedorarelease) AC_SUBST(fedorasetupopts) AC_SUBST(debianversion) AC_SUBST(ARC_VERSION_MAJOR) AC_SUBST(ARC_VERSION_MINOR) AC_SUBST(ARC_VERSION_PATCH) AC_SUBST(ARC_VERSION_NUM) AC_SUBST(ARC_VERSION) # This macro was introduced in autoconf 2.57g? but we currently only require 2.56 m4_ifdef([AC_CONFIG_MACRO_DIR], [AC_CONFIG_MACRO_DIR([m4])]) m4_pattern_allow([AC_PATH_PROG]) m4_pattern_allow([AC_MSG_WARN]) AC_PROG_CXX AC_PROG_CC AC_PROG_CPP AC_GNU_SOURCE AC_PROG_AWK AC_PROG_INSTALL AC_PROG_LN_S AC_PROG_MAKE_SET AC_DISABLE_STATIC AM_PROG_LIBTOOL cat > cxxtst.cxx < int main() { exit(__cplusplus < 201103L ? EXIT_FAILURE : EXIT_SUCCESS); } EOF cxx11=no for CXX11FLAG in '' -std=gnu++11 -std=gnu++0x -std=c++11 -std=c++0x; do $CXX -o cxxtst $CXX11FLAG $CXXFLAGS $CPPFLAGS $LDFLAGS cxxtst.cxx 2>/dev/null if test "$?" = '0' ; then if ./cxxtst ; then if test -z "$CXX11FLAG" ; then AC_MSG_NOTICE([No extra flag for C++ 11]) else AC_MSG_NOTICE([Extra flag for C++ 11: $CXX11FLAG]) CXXFLAGS="$CXXFLAGS $CXX11FLAG" fi cxx11=yes break fi fi done rm -f cxxtst.cxx cxxtst if test x$cxx11 = xno ; then AC_MSG_ERROR([ARC requires a C++ 11 capable compiler]) fi AC_PATH_PROG(PERL, perl, /usr/bin/perl, :) # EL-5 compatibility. $(mkdir_p) is now obsolete. test -n "$MKDIR_P" || MKDIR_P="$mkdir_p" AC_SUBST([MKDIR_P]) # Use arc for "pkgdir" instead of nordugrid-arc (@PACKAGE@) pkgdatadir='${datadir}/arc' pkgincludedir='${includedir}/arc' pkglibdir='${libdir}/arc' extpkglibdir='${libdir}/arc/external' pkglibexecdir='${libexecdir}/arc' AC_SUBST(pkgdatadir) AC_SUBST(pkgincludedir) AC_SUBST(pkglibdir) AC_SUBST(extpkglibdir) AC_SUBST(pkglibexecdir) ARC_API ARC_RELATIVE_PATHS AC_ARG_WITH(systemd-units-location, AC_HELP_STRING([--with-systemd-units-location=], [Location of the systemd unit files. [[None]]]), [ unitsdir="$withval" ], [ unitsdir= ] ) AC_MSG_RESULT($unitsdir) AC_SUBST(unitsdir) AM_CONDITIONAL([SYSTEMD_UNITS_ENABLED],[test "x$unitsdir" != "x"]) AC_ARG_WITH(sysv-scripts-location, AC_HELP_STRING([--with-sysv-scripts-location=], [Location of the SYSV init scripts. [[autodetect]]]), [ initddirauto="no" initddir="$withval" ], [ initddirauto="yes" initddir= case "${host}" in *linux* | *kfreebsd* | *gnu* ) for i in init.d rc.d/init.d rc.d; do if test -d "/etc/$i" -a ! -h "/etc/$i" ; then initddir="$sysconfdir/$i" break fi done if test -z "$initddir"; then AC_MSG_WARN(could not find a suitable location for the SYSV init scripts - not installing) fi ;; esac ] ) AC_MSG_RESULT($initddir) AC_SUBST(initddir) AM_CONDITIONAL([SYSV_SCRIPTS_ENABLED],[ ( test "x$initddirauto" == "xno" || test "x$unitsdir" = "x" ) && test "x$initddir" != "x"]) AC_ARG_WITH(cron-scripts-prefix, AC_HELP_STRING([--with-cron-scripts-prefix=], [Specify the location of the cron directory. [[SYSCONFDIR/cron.d]]]), [ cronddir="$withval" ], [ cronddir="$sysconfdir/cron.d" ] ) AC_SUBST(cronddir) # gettext AM_GNU_GETTEXT([external]) AM_GNU_GETTEXT_VERSION([0.17]) [[ -r $srcdir/po/POTFILES.in ]] || touch $srcdir/po/POTFILES.in # Portable 64bit file offsets AC_SYS_LARGEFILE # pkg-config needed for many checks AC_PATH_TOOL(PKG_CONFIG, pkg-config, no) if test "x$PKG_CONFIG" = "xno"; then AC_MSG_ERROR([ *** pkg-config not found]) else pkgconfigdir=${libdir}/pkgconfig AC_SUBST(pkgconfigdir) fi # Default enable/disable switches # Features enables_systemd=no enables_swig_python=yes # Features directly related to components enables_cppunit=yes enables_python=yes enables_altpython=yes enables_pylint=yes enables_mock_dmc=no enables_gfal=no enables_s3=no enables_xrootd=yes enables_xmlsec1=yes enables_sqlitejstore=yes enables_ldns=yes # Libraries and plugins # Currently no fine-grained choice is supported. # Also this variable is used to check if source # build is needed at all because no component can # be built without HED. enables_hed=yes # Services enables_a_rex_service=yes enables_internal=no enables_ldap_service=yes enables_candypond=yes enables_datadelivery_service=yes enables_monitor=yes # Clients enables_compute_client=yes enables_credentials_client=yes enables_data_client=yes enables_arcrest_client=yes # Documentation enables_doc=yes # Handle group enable/disable switches AC_ARG_ENABLE(all, AC_HELP_STRING([--disable-all], [disables all buildable components. Can be overwritten with --enable-* for group or specific component. It is also possible to use --enable-all to overwrite defaults for most of components.]), [ enables_a_rex_service=$enableval enables_internal=$enableval enables_ldap_service=$enableval enables_monitor=$enableval enables_candypond=$enableval enables_datadelivery_service=$enableval enables_compute_client=$enableval enables_credentials_client=$enableval enables_echo_client=$enableval enables_data_client=$enableval enables_arcrest_client=$enableval enables_hed=$enableval enables_python=$enableval enables_altpython=$enableval enables_pylint=$enableval enables_mock_dmc=$enableval enables_gfal=$enableval enables_s3=$enableval enables_xrootd=$enableval enables_xmlsec1=$enableval enables_cppunit=$enableval enables_doc=$enableval enables_sqlitejstore=$enableval enables_ldns=$enableval ], []) AC_ARG_ENABLE(all-clients, AC_HELP_STRING([--disable-all-clients], [disables all buildable client components. Can be overwritten with --enable-* for specific component. It is also possible to use --enable-all-clients to overwrite defaults and --enable-all.]), [ enables_compute_client=$enableval enables_credentials_client=$enableval enables_echo_client=$enableval enables_data_client=$enableval enables_arcrest_client=$enableval enables_doc=$enableval ], []) AC_ARG_ENABLE(all-data-clients, AC_HELP_STRING([--disable-all-data-clients], [disables all buildable client components providing data handling abilities. Can be overwritten with --enable-* for specific component. It is also possible to use --enable-all-data-clients to overwrite defaults, --enable-all and --enable-all-clients.]), [ enables_data_client=$enableval ], []) AC_ARG_ENABLE(all-services, AC_HELP_STRING([--disable-all-services], [disables all buildable service componets. Can be overwritten with --enable-* for specific component. It is also possible to use --enable-all-services to overwrite defaults and --enable-all.]), [ enables_a_rex_service=$enableval enables_ldap_service=$enableval enables_monitor=$enableval enables_candypond=$enableval enables_datadelivery_service=$enableval ], []) AC_ARG_ENABLE(hed, AC_HELP_STRING([--disable-hed], [disable building HED libraries and plugins. Do not do that unless You do not want to build anything. Even in that case better use --disable-all.]), [enables_hed=$enableval],[]) if test "$enables_hed" = "no" ; then enables_a_rex_service=no enables_candypond=no enables_datadelivery_service=no enables_compute_client=no enables_credentials_client=no enables_data_client=no enables_swig_python=no fi # Be pedantic about compiler warnings. AC_ARG_ENABLE(pedantic-compile, AC_HELP_STRING([--enable-pedantic-compile], [add pedantic compiler flags]), [enables_pedantic_compile="yes"], [enables_pedantic_compile="no"]) if test "x$enables_pedantic_compile" = "xyes"; then # This check need to be enhanced. It won't work in case of cross-compilation # and if path to compiler is explicitly specified. if test x"$CXX" = x"g++"; then # GNU C/C++ flags AM_CXXFLAGS="-Wall -Wextra -Werror -Wno-sign-compare -Wno-unused" SAVE_CPPFLAGS=$CPPFLAGS AC_LANG_SAVE AC_LANG_CPLUSPLUS CPPFLAGS="$CPPFLAGS -Wno-unused-result" AC_TRY_COMPILE([],[], [ AM_CXXFLAGS="$AM_CXXFLAGS -Wno-unused-result" ], [ AC_MSG_NOTICE([compilation flag -Wno-unused-result is not supported]) ] ) AC_LANG_RESTORE CPPFLAGS=$SAVE_CPPFLAGS else # TODO: set generic flags for generic compiler AM_CXXFLAGS="" fi AC_SUBST(AM_CXXFLAGS) fi AM_CONDITIONAL([PEDANTIC_COMPILE], [test "x$enables_pedantic_compile" = "xyes"]) # Enable/disable switches for third-party. # Swig AC_ARG_ENABLE(swig-python, AC_HELP_STRING([--disable-swig-python], [disable SWIG python bindings]), [enables_swig_python=$enableval],[]) AC_ARG_ENABLE(swig, AC_HELP_STRING([--disable-swig], [disable all bindings through SWIG]), [enables_swig_python=$enableval],[]) if test "$enables_swig_python" = "yes"; then AC_PATH_PROGS(SWIG, swig) if test "x$SWIG" = "x"; then enables_swig="no" else swigver=`$SWIG -version 2>&1 | grep Version | sed 's/.* //'` swigver1=`echo $swigver | cut -d. -f1` swigver2=`echo $swigver | cut -d. -f2` swigver3=`echo $swigver | cut -d. -f3` if test $swigver1 -lt 1 || ( test $swigver1 -eq 1 && ( \ test $swigver2 -lt 3 || ( test $swigver2 -eq 3 && ( \ test $swigver3 -lt 25 ) ) ) ) ; then AC_MSG_NOTICE([swig is too old (< 1.3.25)]) SWIG="" enables_swig="no" elif test $swigver1 -eq 1 && test $swigver2 -eq 3 && test $swigver3 -eq 38 ; then AC_MSG_NOTICE([swig version 1.3.38 has bug which prevents it from being used for this software. Please upgrade or downgrade.]) SWIG="" enables_swig="no" else SWIG2="no" if test $swigver1 -ge 2 then SWIG2="yes" fi AC_SUBST(SWIG2) SWIG_PYTHON_NAMING="SwigPy" # In SWIG version 1.3.37 naming was changed from "PySwig" to "SwigPy". if test $swigver1 -lt 1 || ( test $swigver1 -eq 1 && ( \ test $swigver2 -lt 3 || ( test $swigver2 -eq 3 && ( \ test $swigver3 -lt 37 ) ) ) ) ; then SWIG_PYTHON_NAMING="PySwig" fi AC_SUBST(SWIG_PYTHON_NAMING) fi fi else SWIG="" fi AM_CONDITIONAL([SWIG_ENABLED],[test "x$enables_swig" = "xyes"]) # Python AC_ARG_ENABLE(python, AC_HELP_STRING([--disable-python], [disable Python components]), [enables_arcrest_client=$enableval enables_swig_python=$enableval], []) enables_python=yes if test "$enables_python" = "yes"; then AC_ARG_WITH(python, AC_HELP_STRING([--with-python=(PYTHON)], [specify python program from PATH])) # We do not look for python binary when cross-compiling # but we need to make the variable non-empty if test "${build}" = "${host}"; then AC_PATH_PROGS(PYTHON, $with_python python3) else PYTHON=/usr/bin/python3 fi if test "X$PYTHON" != "X"; then PYNAME=`basename $PYTHON` PKG_CHECK_MODULES(PYTHON, $PYNAME-embed, [ PYTHON_VERSION=`$PKG_CONFIG --modversion $PYNAME-embed` PYTHON_MAJOR=`echo $PYTHON_VERSION|cut -f1 -d.` ],[ PKG_CHECK_MODULES(PYTHON, $PYNAME, [ PYTHON_VERSION=`$PKG_CONFIG --modversion $PYNAME` PYTHON_MAJOR=`echo $PYTHON_VERSION|cut -f1 -d.` ],[ PYNAME=python-`$PYTHON -c 'import sys; print(".".join(sys.version.split(" ")[[0]].split(".")[[:2]]))'` PKG_CHECK_MODULES(PYTHON, $PYNAME-embed, [ PYTHON_VERSION=`$PKG_CONFIG --modversion $PYNAME-embed` PYTHON_MAJOR=`echo $PYTHON_VERSION|cut -f1 -d.` ],[ PKG_CHECK_MODULES(PYTHON, $PYNAME, [ PYTHON_VERSION=`$PKG_CONFIG --modversion $PYNAME` PYTHON_MAJOR=`echo $PYTHON_VERSION|cut -f1 -d.` ],[ PYTHON_VERSION=`$PYTHON -c 'import sys; print(".".join(sys.version.split(" ")[[0]].split(".")[[:2]]))'` PYTHON_MAJOR=`$PYTHON -c 'import sys; print(sys.version_info[[0]])'` PYTHON_CFLAGS=-I`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` PY_LIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` PY_SYSLIBS=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` PY_LIBDEST=`$PYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` PYTHON_LIBS="$PY_LIBS $PY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$PYTHON_LIBS $LDFLAGS" AC_CHECK_LIB([python$PYTHON_VERSION], [Py_Initialize],[ AC_MSG_NOTICE([No additional path to python library needed]) PYTHON_LIBS="-lpython$PYTHON_VERSION $PYTHON_LIBS"],[ LDFLAGS="-L$PY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value AC_CHECK_LIB([python$PYTHON_VERSION], [Py_Finalize],[ AC_MSG_NOTICE([Adding path to python library]) PYTHON_LIBS="-L$PY_LIBDEST/config -lpython$PYTHON_VERSION $PYTHON_LIBS"],[ PYTHON_LIBS=""])]) LDFLAGS=$SAVE_LDFLAGS ])])])]) AC_SUBST(PYTHON_VERSION) AC_SUBST(PYTHON_CFLAGS) AC_SUBST(PYTHON_LIBS) if test "${build}" = "${host}"; then PYTHON_EXT_SUFFIX=`$PYTHON -c "from distutils import sysconfig; v = sysconfig.get_config_vars(); print(v.get('EXT_SUFFIX', v.get('SO')))" | sed s/None//` else PYTHON_EXT_SUFFIX="" fi AC_SUBST(PYTHON_EXT_SUFFIX) AC_ARG_WITH(python-site-arch, AC_HELP_STRING([--with-python-site-arch=directory], [Direcory where Python modules will be installed - defaults is to query the Python binary])) if test "X$PYTHON_SITE_ARCH" = "X"; then if test "${build}" = "${host}"; then PYTHON_SITE_ARCH=`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_lib(1,0,"${prefix}"))'` else PYTHON_SITE_ARCH="${libdir}/python${PYTHON_VERSION}/site-packages" fi fi AC_SUBST(PYTHON_SITE_ARCH) AC_ARG_WITH(python-site-lib, AC_HELP_STRING([--with-python-site-lib=directory], [Direcory where Python modules will be installed - defaults is to query the Python binary])) if test "X$PYTHON_SITE_LIB" = "X"; then if test "${build}" = "${host}"; then PYTHON_SITE_LIB=`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_lib(0,0,"${prefix}"))'` else PYTHON_SITE_LIB="${libdir}/python${PYTHON_VERSION}/site-packages" fi fi AC_SUBST(PYTHON_SITE_LIB) SAVE_LDFLAGS=$LDFLAGS SAVE_CPPFLAGS=$CPPFLAGS LDFLAGS="$LDFLAGS $PYTHON_LIBS" CPPFLAGS="$CPPFLAGS $PYTHON_CFLAGS" AC_CHECK_HEADER(Python.h, [pythonh="yes"], [pythonh="no"]) AC_TRY_COMPILE([#include ], [Py_InitializeEx(0)],[ AC_MSG_NOTICE([Python includes functionality of skipping initialization registration of signal handlers]) AC_DEFINE(HAVE_PYTHON_INITIALIZE_EX, 1, [Define if you have Py_InitializeEx function]) enables_python_service="yes" ],[ AC_MSG_NOTICE([Python does not include functionality of skipping initialization registration of signal handlers, since its version is below 2.4]) enables_python_service="no" ]) LDFLAGS=$SAVE_LDFLAGS CPPFLAGS=$SAVE_CPPFLAGS fi if test "X$PYTHON" = "X"; then AC_MSG_NOTICE([Missing Python - skipping Python components]) enables_python=no elif test "X$PYTHON_SITE_ARCH" = "X" || test "X$PYTHON_SITE_LIB" = "X"; then AC_MSG_NOTICE([Missing python site packages location - skipping Python components]) enables_python=no else AC_MSG_NOTICE([Python available: $PYTHON_VERSION]) fi if test "x$enables_python" != "xyes"; then AC_MSG_NOTICE([Missing Python - skipping Python bindings]) enables_swig_python=no elif test "X$PYTHON_LIBS" = "X"; then AC_MSG_NOTICE([Missing Python library - skipping Python bindings]) enables_swig_python=no elif test "X$pythonh" != "Xyes"; then AC_MSG_NOTICE([Missing Python header - skipping Python bindings]) enables_swig_python=no elif ! test -f python/arc_wrap.cpp && test "x$enables_swig_python" != "xyes"; then AC_MSG_NOTICE([Missing pre-compiled Python wrapper and SWIG - skipping Python bindings]) enables_swig_python=no fi fi AC_MSG_NOTICE([Python enabled: $enables_python]) if test "$enables_python" = "no"; then AC_MSG_ERROR([Python is not optional...]) fi AC_MSG_NOTICE([Python SWIG bindings enabled: $enables_swig_python]) AM_CONDITIONAL([PYTHON_ENABLED],[test "x$enables_python" = "xyes"]) AM_CONDITIONAL([PYTHON3], [test "x$enables_python" = "xyes" && test "x$PYTHON_MAJOR" = "x3"]) AM_CONDITIONAL([PYTHON_SWIG_ENABLED],[test "x$enables_swig_python" = "xyes"]) AM_CONDITIONAL([PYTHON_SERVICE],[test "x$enables_swig_python" = "xyes" && test "x$enables_python_service" = "xyes"]) # Alternative Python if test "$enables_hed" = "yes"; then AC_ARG_ENABLE(altpython, AC_HELP_STRING([--disable-altpython], [enable alternative Python binding]), [enables_altpython=$enableval], []) if test "$enables_altpython" = "yes"; then AC_ARG_WITH(altpython, AC_HELP_STRING([--with-altpython=(PYTHON)], [specify alternative python program from PATH])) AC_PATH_PROGS(ALTPYTHON, $with_altpython) if test "X$ALTPYTHON" != "X"; then ALTPYNAME=`basename $ALTPYTHON` PKG_CHECK_MODULES(ALTPYTHON, $ALTPYNAME-embed, [ ALTPYTHON_VERSION=`$PKG_CONFIG --modversion $ALTPYNAME-embed` ALTPYTHON_MAJOR=`echo $ALTPYTHON_VERSION|cut -f1 -d.` ],[ PKG_CHECK_MODULES(ALTPYTHON, $ALTPYNAME, [ ALTPYTHON_VERSION=`$PKG_CONFIG --modversion $ALTPYNAME` ALTPYTHON_MAJOR=`echo $ALTPYTHON_VERSION|cut -f1 -d.` ],[ ALTPYNAME=python-`$ALTPYTHON -c 'import sys; print(".".join(sys.version.split(" ")[[0]].split(".")[[:2]]))'` PKG_CHECK_MODULES(ALTPYTHON, $ALTPYNAME-embed, [ ALTPYTHON_VERSION=`$PKG_CONFIG --modversion $ALTPYNAME-embed` ALTPYTHON_MAJOR=`echo $ALTPYTHON_VERSION|cut -f1 -d.` ],[ PKG_CHECK_MODULES(ALTPYTHON, $ALTPYNAME, [ ALTPYTHON_VERSION=`$PKG_CONFIG --modversion $ALTPYNAME` ALTPYTHON_MAJOR=`echo $ALTPYTHON_VERSION|cut -f1 -d.` ],[ ALTPYTHON_VERSION=`$ALTPYTHON -c 'import sys; print(".".join(sys.version.split(" ")[[0]].split(".")[[:2]]))'` ALTPYTHON_MAJOR=`$ALTPYTHON -c 'import sys; print(sys.version_info[[0]])'` ALTPYTHON_CFLAGS=-I`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_inc())'` ALTPY_LIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBS'))" | sed s/None//` ALTPY_SYSLIBS=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('SYSLIBS'))" | sed s/None//` ALTPY_LIBDEST=`$ALTPYTHON -c "from distutils import sysconfig; print(sysconfig.get_config_vars().get('LIBDEST'))" | sed s/None//` ALTPYTHON_LIBS="$ALTPY_LIBS $ALTPY_SYSLIBS" SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$ALTPYTHON_LIBS $LDFLAGS" AC_CHECK_LIB([python$ALTPYTHON_VERSION], [Py_Initialize],[ AC_MSG_NOTICE([No additional path to python library needed]) ALTPYTHON_LIBS="-lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS"],[ LDFLAGS="-L$ALTPY_LIBDEST/config $LDFLAGS" # check a different symbol or else configure will used cached value AC_CHECK_LIB([python$ALTPYTHON_VERSION], [Py_Finalize],[ AC_MSG_NOTICE([Adding path to python library]) ALTPYTHON_LIBS="-L$ALTPY_LIBDEST/config -lpython$ALTPYTHON_VERSION $ALTPYTHON_LIBS"],[ ALTPYTHON_LIBS=""])]) LDFLAGS=$SAVE_LDFLAGS ])])])]) AC_SUBST(ALTPYTHON_VERSION) AC_SUBST(ALTPYTHON_CFLAGS) AC_SUBST(ALTPYTHON_LIBS) ALTPYTHON_EXT_SUFFIX=`$ALTPYTHON -c "from distutils import sysconfig; v = sysconfig.get_config_vars(); print(v.get('EXT_SUFFIX', v.get('SO')))" | sed s/None//` AC_SUBST(ALTPYTHON_EXT_SUFFIX) AC_ARG_WITH(altpython-site-arch, AC_HELP_STRING([--with-altpython-site-arch=directory], [Direcory where Python modules will be installed - defaults is to query the Python binary])) if test "X$ALTPYTHON_SITE_ARCH" = "X"; then ALTPYTHON_SITE_ARCH=`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_lib(1,0,"${prefix}"))'` fi AC_SUBST(ALTPYTHON_SITE_ARCH) AC_ARG_WITH(altpython-site-lib, AC_HELP_STRING([--with-altpython-site-lib=directory], [Direcory where Python modules will be installed - defaults is to query the Python binary])) if test "X$ALTPYTHON_SITE_LIB" = "X"; then ALTPYTHON_SITE_LIB=`$ALTPYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_lib(0,0,"${prefix}"))'` fi AC_SUBST(ALTPYTHON_SITE_LIB) SAVE_LDFLAGS=$LDFLAGS SAVE_CPPFLAGS=$CPPFLAGS LDFLAGS="$LDFLAGS $ALTPYTHON_LIBS" CPPFLAGS="$CPPFLAGS $ALTPYTHON_CFLAGS" AC_CHECK_HEADER(Python.h, [altpythonh="yes"], [altpythonh="no"]) LDFLAGS=$SAVE_LDFLAGS CPPFLAGS=$SAVE_CPPFLAGS fi if test "X$ALTPYTHON" = "X"; then AC_MSG_NOTICE([Missing alternative Python - skipping alternative Python]) enables_altpython=no elif test "X$ALTPYTHON_LIBS" = "X"; then AC_MSG_NOTICE([Missing alternative Python library - skipping alternative Python bindings]) enables_altpython=no elif test "X$altpythonh" != "Xyes"; then AC_MSG_NOTICE([Missing alternative Python header - skipping alternative Python bindings]) enables_altpython=no elif test "X$ALTPYTHON_SITE_ARCH" = "X" || test "X$ALTPYTHON_SITE_LIB" = "X"; then AC_MSG_NOTICE([Missing python site packages location - skipping Python bindings]) enables_altpython=no else AC_MSG_NOTICE([Alternative Python available: $ALTPYTHON_VERSION]) fi if test "x$enables_altpython" != "xyes"; then AC_MSG_NOTICE([Missing alternative Python - skipping alternative Python bindings]) enables_altpython=no elif ! test -f python/arc_wrap.cpp && test "x$enables_swig_python" != "xyes"; then AC_MSG_NOTICE([Missing pre-compiled Python wrapper and SWIG - skipping alternative Python bindings]) enables_altpython=no fi fi else enables_altpython=no fi AC_MSG_NOTICE([Alternative Python enabled: $enables_altpython]) AM_CONDITIONAL([ALTPYTHON_ENABLED],[test "x$enables_altpython" = "xyes"]) AM_CONDITIONAL([ALTPYTHON3], [test "x$enables_altpython" = "xyes" && test "x$ALTPYTHON_MAJOR" = "x3"]) # check for pylint dnl Check if pylint is explicitly disabled. if test "$enables_hed" = "yes"; then AC_ARG_ENABLE(pylint, AC_HELP_STRING([--disable-pylint], [disable python example checking using pylint]), [enables_pylint=$enableval],[]) if test "$enables_pylint" = "yes"; then AC_PATH_PROGS(PYLINT, pylint) if test "x$PYLINT" = "x"; then enables_pylint="no" else PYLINT_VERSION=`$PYLINT --version 2> /dev/null | sed -n 's/^pylint \([[0-9.]]*\).*/\1/p'` # Check if pylint supports the following arguments, otherwise disable pylint (python example checking). # Do not generate report # Disable convention and recommendation messages - we are only interested in fatals, errors and warnings. PYLINT_ARGS="--reports=no --disable=C,R" if $PYLINT $PYLINT_ARGS /dev/null > /dev/null 2>&1 ; then AC_MSG_NOTICE([pylint version $PYLINT_VERSION found - version ok]) enables_pylint="yes" else AC_MSG_NOTICE([pylint version $PYLINT_VERSION found - bad version]) enables_pylint="no" PYLINT_ARGS="" fi AC_SUBST(PYLINT_ARGS) fi # Check if the --disable=W0221 option is supported # W0221: Disable arguments differ messages since Swig uses tuple syntax (*args). if test "$enables_pylint" = "yes"; then PYLINT_ARGS_ARGUMENTS_DIFFER="--disable=W0221" if ! $PYLINT $PYLINT_ARGS $PYLINT_ARGS_ARGUMENTS_DIFFER /dev/null > /dev/null 2>&1 ; then PYLINT_ARGS_ARGUMENTS_DIFFER="" fi AC_SUBST(PYLINT_ARGS_ARGUMENTS_DIFFER) fi fi fi AM_CONDITIONAL([PYLINT_ENABLED], [test "x$enables_pylint" = "xyes"]) AC_MSG_NOTICE([Python example checking with pylint enabled: $enables_pylint]) # check systemd daemon integration AC_ARG_ENABLE(systemd, AC_HELP_STRING([--enable-systemd], [enable use of the systemd daemon integration features]),[enables_systemd="$enableval"],[]) if test "x$enables_systemd" = "xyes"; then systemd_daemon_save_LIBS=$LIBS LIBS= AC_SEARCH_LIBS(sd_listen_fds,[systemd systemd-daemon], [have_sd_listen_fds=yes],[have_sd_listen_fds=no],$systemd_daemon_save_LIBS) AC_SEARCH_LIBS(sd_notify,[systemd systemd-daemon], [have_sd_notify=yes],[have_sd_notify=no],$systemd_daemon_save_LIBS) AC_CHECK_HEADERS(systemd/sd-daemon.h, [have_systemd_sd_daemon_h=yes],[have_systemd_sd_daemon_h=no]) if test x"$have_sd_listen_fds" = x"yes" && \ test x"$have_sd_notify" = x"yes" && \ test x"$have_systemd_sd_daemon_h" = x"yes"; then AC_DEFINE([HAVE_SYSTEMD_DAEMON],[1],[Define if you have systemd daemon]) SYSTEMD_DAEMON_LIBS=$LIBS else AC_MSG_FAILURE([--enable-systemd was given, but test for systemd libraries had failed]) fi LIBS=$systemd_daemon_save_LIBS fi AC_SUBST(SYSTEMD_DAEMON_LIBS) # check glibmm # check for API version 2.68 first, then API version 2.4 if test "$enables_hed" = "yes"; then "$PKG_CONFIG" glibmm-2.68 if test "$?" = '1'; then PKG_CHECK_MODULES(GLIBMM, [giomm-2.4 glibmm-2.4]) else PKG_CHECK_MODULES(GLIBMM, [giomm-2.68 glibmm-2.68]) AC_DEFINE(HAVE_GLIBMM_268, 1, [define if using glibmm 2.68 API]) fi AC_SUBST(GLIBMM_CFLAGS) AC_SUBST(GLIBMM_LIBS) SAVE_CPPFLAGS=$CPPFLAGS AC_LANG_SAVE AC_LANG_CPLUSPLUS CPPFLAGS="$CPPFLAGS $GLIBMM_CFLAGS" AC_CHECK_HEADER([glibmm/optioncontext.h], [ AC_TRY_COMPILE([#include ], [Glib::OptionContext ctx; ctx.set_summary("summary")], [ AC_DEFINE(HAVE_GLIBMM_OPTIONCONTEXT_SET_SUMMARY, 1, [define if glibmm has Glib::OptionContext::set_summary()]) AC_MSG_NOTICE([using glibmm command line parsing]) ], [ AC_MSG_NOTICE([using getopt_long command line parsing]) ] ) AC_TRY_COMPILE([#include ], [Glib::OptionContext ctx; ctx.get_help();],[ AC_DEFINE(HAVE_GLIBMM_OPTIONCONTEXT_GET_HELP, 1, [define if glibmm has Glib::OptionContext::get_help()]) ], [ ] ) ]) AC_TRY_COMPILE([#include ],[Glib::ModuleFlags flags = Glib::MODULE_BIND_LOCAL;],[glibmm_bind_local=yes],[glibmm_bind_local=no]) if test "$glibmm_bind_local" = yes; then AC_DEFINE(HAVE_GLIBMM_BIND_LOCAL, 1, [define if glibmm have support local symbol resolution in shared libraries]) else AC_MSG_NOTICE([WARNING: glibmm has no way to limit scope of symbols of shared libraries. Make sure external libraries used by plugins have no conflicting symbols. HINT: use Globus compiled against system OpenSSL library.]) fi AC_TRY_COMPILE([#include ],[Glib::getenv("");],[glibmm_getenv=yes],[glibmm_getenv=no]) if test "$glibmm_getenv" = yes; then AC_DEFINE(HAVE_GLIBMM_GETENV, 1, [define if glibmm have getenv operations]) else AC_MSG_NOTICE([WARNING: glibmm has no support for getenv. Usage of libc getenv is unsafe in multi-threaded applications.]) fi AC_TRY_COMPILE([#include ],[Glib::setenv("", "");],[glibmm_setenv=yes],[glibmm_setenv=no]) if test "$glibmm_setenv" = yes; then AC_DEFINE(HAVE_GLIBMM_SETENV, 1, [define if glibmm have setenv operations]) else AC_MSG_NOTICE([WARNING: glibmm has no support for setenv. Usage of libc setenv may be unsafe in multi-threaded applications.]) fi AC_TRY_COMPILE([#include ],[Glib::unsetenv("");],[glibmm_unsetenv=yes],[glibmm_unsetenv=no]) if test "$glibmm_unsetenv" = yes; then AC_DEFINE(HAVE_GLIBMM_UNSETENV, 1, [define if glibmm have unsetenv operations]) else AC_MSG_NOTICE([WARNING: glibmm has no support for unsetenv. Usage of libc unsetenv may be unsafe in multi-threaded applications.]) fi AC_TRY_COMPILE([#include ],[Glib::listenv();],[glibmm_listenv=yes],[glibmm_listenv=no]) if test "$glibmm_listenv" = yes; then AC_DEFINE(HAVE_GLIBMM_LISTENV, 1, [define if glibmm have listenv operations]) else AC_MSG_NOTICE([WARNING: glibmm has no support for listenv. Usage of libc environ is unsafe in multi-threaded applications.]) fi AC_LANG_RESTORE CPPFLAGS=$SAVE_CPPFLAGS fi # check libxml if test "$enables_hed" = "yes"; then PKG_CHECK_MODULES(LIBXML2, [libxml-2.0 >= 2.4.0]) AC_SUBST(LIBXML2_CFLAGS) AC_SUBST(LIBXML2_LIBS) fi # check openssl if test "$enables_hed" = "yes"; then PKG_CHECK_MODULES(OPENSSL, [openssl >= 1.1.1], [ OPENSSL_CFLAGS="$OPENSSL_CFLAGS -DOPENSSL_API_COMPAT=0x10101000L" AC_MSG_NOTICE([Forcing off deprecated functions for OpenSSL]) ], [ AC_MSG_ERROR([OpenSSL not found or is pre-1.1.1]) ]) AC_SUBST(OPENSSL_CFLAGS) AC_SUBST(OPENSSL_LIBS) fi # Check for available *_method functions in OpenSSL SAVE_CPPFLAGS=$CPPFLAGS SAVE_LIBS=$LIBS CPPFLAGS="$CPPFLAGS $OPENSSL_CFLAGS" LIBS="$LIBS $OPENSSL_LIBS" AC_LANG_PUSH([C++]) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[ #include void _test(void) { (void)SSLv3_method(); } ]])], [AC_DEFINE(HAVE_SSLV3_METHOD,1,[define if SSLv3_method is available])], [AC_MSG_NOTICE([No SSLv3_method function avialable])]) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[ #include void _test(void) { (void)TLSv1_method(); } ]])], [AC_DEFINE(HAVE_TLSV1_METHOD,1,[define if TLSv1_method is available])], [AC_MSG_NOTICE([No TLSv1_method function avialable])]) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[ #include void _test(void) { (void)TLSv1_1_method(); } ]])], [AC_DEFINE(HAVE_TLSV1_1_METHOD,1,[define if TLSv1_1_method is available])], [AC_MSG_NOTICE([No TLSv1_1_method function avialable])]) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[ #include void _test(void) { (void)TLSv1_2_method(); } ]])], [AC_DEFINE(HAVE_TLSV1_2_METHOD,1,[define if TLSv1_2_method is available])], [AC_MSG_NOTICE([No TLSv1_2_method function avialable])]) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[ #include void _test(void) { (void)TLS_method(); } ]])], [AC_DEFINE(HAVE_TLS_METHOD,1,[define if TLS_method is available])], [AC_MSG_NOTICE([No TLS_method function avialable])]) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[ #include void _test(void) { (void)DTLSv1_method(); } ]])], [AC_DEFINE(HAVE_DTLSV1_METHOD,1,[define if DTLSv1_method is available])], [AC_MSG_NOTICE([No DTLSv1_method function avialable])]) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[ #include void _test(void) { (void)DTLSv1_2_method(); } ]])], [AC_DEFINE(HAVE_DTLSV1_2_METHOD,1,[define if DTLSv1_2_method is available])], [AC_MSG_NOTICE([No DTLSv1_2_method function avialable])]) AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[ #include void _test(void) { (void)DTLS_method(); } ]])], [AC_DEFINE(HAVE_DTLS_METHOD,1,[define if DTLS_method is available])], [AC_MSG_NOTICE([No DTLS_method function avialable])]) AC_LANG_POP([C++]) CPPFLAGS=$SAVE_CPPFLAGS LIBS=$SAVE_LIBS #check mozilla nss enables_nss=yes NSS_INSTALLED=no dnl Check if nss lib is explicitly enabled, default is disable. AC_ARG_ENABLE(nss, AC_HELP_STRING([--disable-nss], [disable use of the mozilla nss library]),[enables_nss="$enableval"],[]) if test "$enables_nss" = "yes"; then PKG_CHECK_MODULES(NSS, [nss >= 3.10], [NSS_INSTALLED=yes] , [ AC_MSG_WARN([Cannot locate nss lib]) NSS_INSTALLED=no enables_nss=no ]) if test "x$NSS_INSTALLED" = "xyes" ; then AC_DEFINE(HAVE_NSS, 1, [define if NSS is enabled and available]) fi fi AC_SUBST(NSS_CFLAGS) AC_SUBST(NSS_LIBS) AM_CONDITIONAL([NSS_ENABLED], test x$NSS_INSTALLED = xyes) #check SQLite SQLITE_INSTALLED=no PKG_CHECK_MODULES(SQLITE, [sqlite3 >= 3.6], [SQLITE_INSTALLED=yes] , [ AC_MSG_WARN([Cannot locate SQLite newer than 3.6]) SQLITE_INSTALLED=no ]) if test "x$SQLITE_INSTALLED" = "xyes" ; then AC_DEFINE(HAVE_SQLITE, 1, [define if SQLite is available]) # Check for function available since 3.8 SAVE_CFLAGS=$CFLAGS SAVE_LIBS=$LIBS CFLAGS="$CFLAGS $SQLITE_CFLAGS" LIBS="$LIBS $SQLITE_LIBS" AC_CHECK_FUNCS(sqlite3_errstr) CFLAGS=$SAVE_CFLAGS LIBS=$SAVE_LIBS fi AC_SUBST(SQLITE_CFLAGS) AC_SUBST(SQLITE_LIBS) AM_CONDITIONAL([SQLITE_ENABLED], test x$SQLITE_INSTALLED = xyes) # check cppunit if test "$enables_hed" = "yes"; then AC_ARG_ENABLE(cppunit, AC_HELP_STRING([--disable-cppunit], [disable cppunit-based UNIT testing of code]),[enables_cppunit=$enableval],[]) if test "$enables_cppunit" = "yes"; then PKG_CHECK_MODULES(CPPUNIT, [cppunit],[], [AC_PATH_PROG(CPPUNIT_CONFIG, cppunit-config, no) if test "x$CPPUNIT_CONFIG" = "xno"; then AC_MSG_WARN([cppunit-config not found - no UNIT testing will be performed]) CPPUNIT_CFLAGS= CPPUNIT_LIBS= enables_cppunit="no" else CPPUNIT_CFLAGS="`$CPPUNIT_CONFIG --cflags`" CPPUNIT_LIBS="`$CPPUNIT_CONFIG --libs`" fi]) if test "x$CPPUNIT_CONFIG" != "xno" || test "x$CPPUNIT_PKG_ERRORS" != "x" then TEST_DIR=test else enables_cppunit=no TEST_DIR= fi fi AC_SUBST(CPPUNIT_CFLAGS) AC_SUBST(CPPUNIT_LIBS) AC_SUBST(TEST_DIR) else enables_cppunit="no" fi # check ldns library if test "$enables_compute_client" = "yes"; then AC_ARG_ENABLE(ldns, AC_HELP_STRING([--disable-ldns], [disable ldns library usage (makes ARCHERY client unavailable) ]),[enables_ldns=$enableval],[]) if test "$enables_ldns" = "yes"; then PKG_CHECK_MODULES(LDNS, [ldns],[], [AC_PATH_PROG(LDNS_CONFIG, ldns-config, no) if test "x$LDNS_CONFIG" = "xno"; then AC_CHECK_HEADER([ldns/ldns.h], [AC_CHECK_LIB([ldns], [ldns_dname_new_frm_str], [ LDNS_CFLAGS="$LDNS_CFLAGS" LDNS_LIBS="$LDNS_LIBS -lldns" ], [enables_ldns="no"]) ],[enables_ldns="no"]) else LDNS_CFLAGS="`$LDNS_CONFIG --cflags`" LDNS_LIBS="`$LDNS_CONFIG --libs`" fi ]) if test "$enables_ldns" = "no"; then AC_MSG_WARN([ldns library was not found. Compute clients will be built without ARCHERY support.]) fi fi else enables_ldns="no" fi if test "x$enables_ldns" = "xyes" ; then AC_DEFINE(HAVE_LDNS, 1, [define if LDNS is enabled and available]) else LDNS_CFLAGS= LDNS_LIBS= fi AC_SUBST(LDNS_CFLAGS) AC_SUBST(LDNS_LIBS) AM_CONDITIONAL(LDNS_ENABLED, test "x$enables_ldns" = "xyes") ############################## # # Check xmlsec1 # ############################# MACOSX="" case "${host}" in *darwin*) MACOSX="yes" ;; esac if test "x$MACOSX" = "xyes"; then AC_DEFINE(_MACOSX, 1, [Define if compiling for MacOSX]) fi AM_CONDITIONAL([MACOSX], [ test "x$MACOSX" = "xyes"]) if test "$enables_hed" = "yes"; then XMLSEC_MIN_VERSION="1.2.4" XMLSEC_OPENSSL_MIN_VERSION="1.2.4" XMLSEC_CONFIG="${XMLSEC1_CONFIG:-xmlsec1-config}" XMLSEC_CFLAGS="" XMLSEC_LIBS="" XMLSEC_INSTALLED=no dnl Check if xmlsec1 is explicitly disabled, default is enable. AC_ARG_ENABLE(xmlsec1, AC_HELP_STRING([--disable-xmlsec1], [disable features which need xmlsec1 library]),[enables_xmlsec1=$enableval],[]) if test "x$enables_xmlsec1" = "xyes"; then AC_ARG_WITH(xmlsec1, [ --with-xmlsec1=(PATH) xmlsec1 location]) if test "x$with_xmlsec1" = "x" ; then PKG_CHECK_MODULES(XMLSEC, [xmlsec1 >= $XMLSEC_MIN_VERSION], [XMLSEC_INSTALLED=yes], [XMLSEC_INSTALLED=no]) if test "x$XMLSEC_INSTALLED" = "xyes" ; then PKG_CHECK_MODULES(XMLSEC_OPENSSL, [xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION], [XMLSEC_INSTALLED=yes],[XMLSEC_INSTALLED=no]) fi # Find number of backslashes in XMLSEC_CFLAGS n=$(echo $XMLSEC_CFLAGS|sed 's/.*-DXMLSEC_CRYPTO=\([[^ ]]*\).*/\1/'|tr -d '[[A-Za-z0-1\n"]]'| wc -c) # Fixes due to bugs in pkg-config and/or xmlsec1 # # 0: Indicates a bug in pkg-config which removes the escaping of the quotes # 2: Correct value with escaped quotes # 6: Old xmlsec1 version which used 3 back-slashes to escape quotes # See eg. https://bugzilla.redhat.com/show_bug.cgi?id=675334 # Make sure that the quotes are escaped with single backslash if test $n = 0 -o $n = 6; then AC_MSG_NOTICE([Working around bad combination of pkgconfig and xmlsec1 with $n back-slashes]) XMLSEC_CFLAGS=$(echo $XMLSEC_CFLAGS|sed 's/\(.*-DXMLSEC_CRYPTO=\)\\*"\([[^ \\"]]*\)\\*" \(.*\)/\1\\"\2\\" \3/') XMLSEC_OPENSSL_CFLAGS=$(echo $XMLSEC_OPENSSL_CFLAGS|sed 's/\(.*-DXMLSEC_CRYPTO=\)\\*"\([[^ \\"]]*\)\\*" \(.*\)/\1\\"\2\\" \3/') fi fi if test "x$XMLSEC_INSTALLED" = "xno" -a "x$MACOSX" != "xyes"; then AC_MSG_CHECKING(for xmlsec1 libraries >= $XMLSEC_MIN_VERSION) if test "x$with_xmlsec1" != "x" ; then XMLSEC_CONFIG=$with_xmlsec1/bin/$XMLSEC_CONFIG fi "$XMLSEC_CONFIG" --version 2>/dev/null 1>/dev/null if test "$?" != '0' ; then AC_MSG_WARN(Could not find xmlsec1 anywhere; The xml security related functionality will not be compiled) else vers=`$XMLSEC_CONFIG --version 2>/dev/null | awk -F. '{ printf "%d", ($1 * 1000 + $2) * 1000 + $3;}'` minvers=`echo $XMLSEC_MIN_VERSION | awk -F. '{ printf "%d", ($1 * 1000 + $2) * 1000 + $3;}'` if test "$vers" -ge "$minvers" ; then XMLSEC_LIBS="`$XMLSEC_CONFIG --libs`" XMLSEC_CFLAGS="`$XMLSEC_CONFIG --cflags`" #check the xmlsec1-openssl here if test "x$PKG_CONFIG_PATH" != "x"; then PKG_CONFIG_PATH="$with_xmlsec1/lib/pkgconfig:$PKG_CONFIG_PATH" else PKG_CONFIG_PATH="$with_xmlsec1/lib/pkgconfig" fi PKG_CHECK_MODULES(XMLSEC_OPENSSL, [xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION], [XMLSEC_INSTALLED=yes],[XMLSEC_INSTALLED=no]) else AC_MSG_WARN(You need at least xmlsec1 $XMLSEC_MIN_VERSION for this version of arc) fi fi elif test "x$XMLSEC_INSTALLED" = "xno" -a "x$MACOSX" = "xyes"; then #MACOSX has no "ldd" which is needed by xmlsec1-config, so here simply we use PKG_CHECK_MODULES if test "x$PKG_CONFIG_PATH" != "x"; then PKG_CONFIG_PATH="$with_xmlsec1/lib/pkgconfig:$PKG_CONFIG_PATH" else PKG_CONFIG_PATH="$with_xmlsec1/lib/pkgconfig" fi PKG_CHECK_MODULES(XMLSEC, [xmlsec1 >= $XMLSEC_MIN_VERSION], [XMLSEC_INSTALLED=yes], [XMLSEC_INSTALLED=no]) if test "x$XMLSEC_INSTALLED" = "xyes" ; then PKG_CHECK_MODULES(XMLSEC_OPENSSL, [xmlsec1-openssl >= $XMLSEC_OPENSSL_MIN_VERSION], [XMLSEC_INSTALLED=yes],[XMLSEC_INSTALLED=no]) fi fi AC_SUBST(XMLSEC_CFLAGS) AC_SUBST(XMLSEC_LIBS) AC_SUBST(XMLSEC_OPENSSL_CFLAGS) AC_SUBST(XMLSEC_OPENSSL_LIBS) #AC_SUBST(XMLSEC_CONFIG) #AC_SUBST(XMLSEC_MIN_VERSION) enables_xmlsec1="$XMLSEC_INSTALLED" fi else enables_xmlsec1="no" fi # Check monitor AC_ARG_ENABLE(monitor, AC_HELP_STRING([--enable-monitor], [enable use of the monitor]),[enables_monitor="$enableval"],[]) if test "x$enables_monitor" = "xyes"; then AC_ARG_WITH(monitor, [ --with-monitor=(PATH) where to install the monitor, eg /var/www/monitor or /usr/share/arc/monitor]) AC_MSG_CHECKING(for monitor installation path) if test "x$with_monitor" != "x" ; then monitor_prefix=$with_monitor else monitor_prefix=${datadir}/arc/monitor fi AC_MSG_RESULT([$monitor_prefix]) AC_SUBST(monitor_prefix) fi # check zlib ZLIB_CFLAGS= ZLIB_LDFLAGS= ZLIB_LIBS= if test "$enables_hed" = "yes"; then SAVE_CPPFLAGS=$CPPFLAGS SAVE_LDFLAGS=$LDFLAGS AC_ARG_WITH(zlib, AC_HELP_STRING([--with-zlib=PATH], [where zlib is installed]), [ if test -d "$withval"; then ZLIB_CFLAGS="${CPPFLAGS} -I$withval/include" ZLIB_LDFLAGS="${LDFLAGS} -L$withval/lib" fi ] ) CPPFLAGS="$CPPFLAGS $ZLIB_CFLAGS" LDFLAGS="$LDFLAGS $ZLIB_LDFLAGS" AC_CHECK_HEADER([zlib.h],[ZLIB_CFLAGS="$ZLIB_CFLAGS"],AC_MSG_ERROR([unable to find zlib header files])) AC_CHECK_LIB([z],[deflateInit2_],[ZLIB_LIBS="$ZLIB_LDFLAGS -lz"],AC_MSG_ERROR([unable to link with zlib library])) CPPFLAGS=$SAVE_CPPFLAGS LDFLAGS=$SAVE_LDFLAGS fi AC_SUBST(ZLIB_CFLAGS) AC_SUBST(ZLIB_LIBS) # SQLITEJSTORE (storing job information in SQLite) AC_ARG_ENABLE(sqlitejstore, AC_HELP_STRING([--disable-sqlitejstore], [disable storing local job information in SQLite]), [enables_sqlitejstore=$enableval],[]) if test "$enables_sqlitejstore" = "yes"; then if test "x$SQLITE_INSTALLED" != "xyes" ; then AC_MSG_NOTICE([For storing jobs in SQLite install SQLite 3.6 or newer - disabling]) enables_sqlitejstore="no" fi fi AC_MSG_NOTICE([Storing jobs in SQLite enabled: $enables_sqlitejstore]) AM_CONDITIONAL([SQLITEJSTORE_ENABLED],[test "x$enables_sqlitejstore" = "xyes"]) if test "x$enables_sqlitejstore" = "xyes"; then AC_DEFINE(SQLITEJSTORE_ENABLED, 1, [define to build job information in SQLite storage]) fi # globus/gpt packages # globus/gpt packages if test "$enables_hed" = "yes"; then PKG_CHECK_MODULES(GLOBUS_COMMON, [globus-common], [ GLOBUS_COMMON_VERSION=`$PKG_CONFIG --modversion globus-common`], [ GPT_PKG(globus_common) ]) AC_SUBST(GLOBUS_COMMON_CFLAGS) AC_SUBST(GLOBUS_COMMON_LIBS) PKG_CHECK_MODULES(GLOBUS_GSSAPI_GSI, [globus-gssapi-gsi], [ GLOBUS_GSSAPI_GSI_VERSION=`$PKG_CONFIG --modversion globus-gssapi-gsi`], [ GPT_PKG(globus_gssapi_gsi) ]) AC_SUBST(GLOBUS_GSSAPI_GSI_CFLAGS) AC_SUBST(GLOBUS_GSSAPI_GSI_LIBS) PKG_CHECK_MODULES(GLOBUS_GSS_ASSIST, [globus-gss-assist], [ GLOBUS_GSS_ASSIST_VERSION=`$PKG_CONFIG --modversion globus-gss-assist`], [ GPT_PKG(globus_gss_assist) ]) AC_SUBST(GLOBUS_GSS_ASSIST_CFLAGS) AC_SUBST(GLOBUS_GSS_ASSIST_LIBS) PKG_CHECK_MODULES(GLOBUS_GSI_CALLBACK, [globus-gsi-callback], [ GLOBUS_GSI_CALLBACK_VERSION=`$PKG_CONFIG --modversion globus-gsi-callback`], [ GPT_PKG(globus_gsi_callback) ]) AC_SUBST(GLOBUS_GSI_CALLBACK_CFLAGS) AC_SUBST(GLOBUS_GSI_CALLBACK_LIBS) PKG_CHECK_MODULES(GLOBUS_FTP_CLIENT, [globus-ftp-client], [ GLOBUS_FTP_CLIENT_VERSION=`$PKG_CONFIG --modversion globus-ftp-client`], [ GPT_PKG(globus_ftp_client) ]) AC_SUBST(GLOBUS_FTP_CLIENT_CFLAGS) AC_SUBST(GLOBUS_FTP_CLIENT_LIBS) PKG_CHECK_MODULES(GLOBUS_FTP_CONTROL, [globus-ftp-control], [ GLOBUS_FTP_CONTROL_VERSION=`$PKG_CONFIG --modversion globus-ftp-control`], [ GPT_PKG(globus_ftp_control) ]) AC_SUBST(GLOBUS_FTP_CONTROL_CFLAGS) AC_SUBST(GLOBUS_FTP_CONTROL_LIBS) PKG_CHECK_MODULES(GLOBUS_IO, [globus-io], [ GLOBUS_IO_VERSION=`$PKG_CONFIG --modversion globus-io`], [ GPT_PKG(globus_io) ]) AC_SUBST(GLOBUS_IO_CFLAGS) AC_SUBST(GLOBUS_IO_LIBS) PKG_CHECK_MODULES(GLOBUS_GSI_CERT_UTILS, [globus-gsi-cert-utils], [ GLOBUS_GSI_CERT_UTILS_VERSION=`$PKG_CONFIG --modversion globus-gsi-cert-utils`], [ GPT_PKG(globus_gsi_cert_utils) ]) AC_SUBST(GLOBUS_GSI_CERT_UTILS_CFLAGS) AC_SUBST(GLOBUS_GSI_CERT_UTILS_LIBS) PKG_CHECK_MODULES(GLOBUS_GSI_CREDENTIAL, [globus-gsi-credential], [ GLOBUS_GSI_CREDENTIAL_VERSION=`$PKG_CONFIG --modversion globus-gsi-credential`], [ GPT_PKG(globus_gsi_credential) ]) AC_SUBST(GLOBUS_GSI_CREDENTIAL_CFLAGS) AC_SUBST(GLOBUS_GSI_CREDENTIAL_LIBS) PKG_CHECK_MODULES(GLOBUS_OPENSSL_MODULE, [globus-openssl-module], [ GLOBUS_OPENSSL_MODULE_VERSION=`$PKG_CONFIG --modversion globus-openssl-module`], [ GPT_PKG(globus_openssl_module) ]) AC_SUBST(GLOBUS_OPENSSL_MODULE_CFLAGS) AC_SUBST(GLOBUS_OPENSSL_MODULE_LIBS) # Check for new globus thread model selection SAVE_CFLAGS=$CFLAGS SAVE_LIBS=$LIBS CFLAGS="$CFLAGS $GLOBUS_COMMON_CFLAGS" LIBS="$LIBS $GLOBUS_COMMON_LIBS" AC_CHECK_FUNCS(globus_thread_set_model) CFLAGS=$SAVE_CFLAGS LIBS=$SAVE_LIBS # Check for gridftp-v2 SAVE_CFLAGS=$CFLAGS SAVE_LIBS=$LIBS CFLAGS="$CFLAGS $GLOBUS_FTP_CLIENT_CFLAGS" LIBS="$LIBS $GLOBUS_FTP_CLIENT_LIBS" AC_CHECK_FUNCS(globus_ftp_client_handleattr_set_gridftp2) CFLAGS=$SAVE_CFLAGS LIBS=$SAVE_LIBS globus_openssl_detected= PKG_CHECK_MODULES(GLOBUS_OPENSSL, [globus-openssl], [ GLOBUS_OPENSSL_VERSION=`$PKG_CONFIG --modversion globus-openssl`], [ GPT_PKG(globus_openssl) ]) if test ! "x$GLOBUS_OPENSSL_LIBS" = "x" ; then globus_openssl_detected=`echo "$GLOBUS_OPENSSL_LIBS" | grep "lssl_$GPT_FLAVOR"` if test ! "x$globus_openssl_detected" = "x" ; then globus_openssl_detected="yes" fi fi if test "x$globus_openssl_detected" = "xyes" ; then AC_MSG_RESULT([ Globus own OpenSSL library detected. In order to avoid runtime conflicts following components will be disabled: GridFTP DMC, SRM DMC, GSI MCC. To enable these components use Globus compiled for system OpenSSL. ]) GLOBUS_FTP_CLIENT_VERSION= GLOBUS_FTP_CONTROL_VERSION= GLOBUS_IO_VERSION= GLOBUS_GSSAPI_GSI_VERSION= fi if test "x$GLOBUS_IO_VERSION" = "x"; then IO_VERSION_MAJOR=0 else IO_VERSION_MAJOR=`echo "$GLOBUS_IO_VERSION" | sed 's/^\([[^.]]*\).*/\1/'`; fi AC_DEFINE_UNQUOTED(GLOBUS_IO_VERSION,$IO_VERSION_MAJOR,[Globus IO version]) if test "x$GLOBUS_GSSAPI_GSI_VERSION" = "x"; then GLOBUS_GSSAPI_GSI_VERSION_MAJOR=0 GLOBUS_GSSAPI_GSI_VERSION_MINOR=0 else GLOBUS_GSSAPI_GSI_VERSION_MAJOR=`echo "$GLOBUS_GSSAPI_GSI_VERSION" | sed 's/^\([[^.]]*\).*/\1/'`; GLOBUS_GSSAPI_GSI_VERSION_MINOR=`echo "$GLOBUS_GSSAPI_GSI_VERSION" | sed 's/^[[^.]]*\.\([[^.]]*\).*/\1/'`; fi if test "$GLOBUS_GSSAPI_GSI_VERSION_MAJOR" -lt "12"; then GLOBUS_GSSAPI_GSI_OLD_OPENSSL=1 elif test "$GLOBUS_GSSAPI_GSI_VERSION_MAJOR" -eq "12"; then if test "$GLOBUS_GSSAPI_GSI_VERSION_MINOR" -lt "2"; then GLOBUS_GSSAPI_GSI_OLD_OPENSSL=1 else GLOBUS_GSSAPI_GSI_OLD_OPENSSL=0 fi else GLOBUS_GSSAPI_GSI_OLD_OPENSSL=0 fi AC_DEFINE_UNQUOTED(GLOBUS_GSSAPI_GSI_VERSION,$GSSAPI_GSI_VERSION_MAJOR,[Globus GSSAPI GSI version]) AC_DEFINE_UNQUOTED(GLOBUS_GSSAPI_GSI_OLD_OPENSSL,$GLOBUS_GSSAPI_GSI_OLD_OPENSSL,[Globus GSSAPI GSI is for OpenSSL post-1.1]) dnl dnl DEFAULT_GLOBUS_LOCATION dnl AC_MSG_CHECKING(for DEFAULT_GLOBUS_LOCATION) # GLOBUS_LOCATION is set by GPT macros DEFAULT_GLOBUS_LOCATION="$GLOBUS_LOCATION" AC_MSG_RESULT($DEFAULT_GLOBUS_LOCATION) AC_SUBST(DEFAULT_GLOBUS_LOCATION) #check lcas DEFAULT_LCAS_LOCATION=/opt/glite LCAS_LOCATION= LCAS_CFLAGS= LCAS_LIBS= AC_ARG_WITH(lcas-location, [ --with-lcas-location= Specify the LCAS installation path. [[/opt/glite]]], [ LCAS_LOCATION=$with_lcas_location if test ! -d $LCAS_LOCATION; then AC_MSG_WARN([LCAS_LOCATION ($LCAS_LOCATION) does not exist]) LCAS_LOCATION= fi ],[ if test "x$LCAS_LOCATION" = "x"; then LCAS_LOCATION=$DEFAULT_LCAS_LOCATION fi if test ! -d $LCAS_LOCATION; then LCAS_LOCATION= fi ] ) if test "x$LCAS_LOCATION" != "x"; then LCAS_CFLAGS=$LCAS_LOCATION/include/glite/security/lcas if test ! -d $LCAS_CFLAGS; then LCAS_CFLAGS=$LCAS_LOCATION/include/lcas if test ! -d $LCAS_CFLAGS; then LCAS_CFLAGS=$LCAS_LOCATION/include fi fi LCAS_CFLAGS=-I$LCAS_CFLAGS SAVE_CPPFLAGS=$CPPFLAGS CPPFLAGS="$LCAS_CFLAGS $GLOBUS_GSSAPI_GSI_CFLAGS" AC_CHECK_HEADERS([lcas.h], LCAS_LDFLAGS= if test -d $LCAS_LOCATION/lib64; then LCAS_LDFLAGS="-L$LCAS_LOCATION/lib64 $GLOBUS_GSSAPI_GSI_LIBS" else LCAS_LDFLAGS="-L$LCAS_LOCATION/lib $GLOBUS_GSSAPI_GSI_LIBS" fi SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $LCAS_LDFLAGS" AC_CHECK_LIB(lcas,lcas_init, LCAS_LIBS="$LCAS_LDFLAGS -llcas",LCAS_LOCATION="",) LDFLAGS=$SAVE_LDFLAGS , LCAS_LOCATION="" ) CPPFLAGS=$SAVE_CPPFLAGS fi if test "x$LCAS_LOCATION" != "x"; then AC_DEFINE(HAVE_LCAS, 1, [define if lcas is available]) AC_SUBST(LCAS_LOCATION) AC_SUBST(LCAS_CFLAGS) AC_SUBST(LCAS_LIBS) fi #check lcmaps DEFAULT_LCMAPS_LOCATION=/opt/glite LCMAPS_LOCATION= LCMAPS_CFLAGS= LCMAPS_LIBS= AC_ARG_WITH(lcmaps-location, [ --with-lcmaps-location= Specify the LCMAPS installation path. [[/opt/glite]]], [ LCMAPS_LOCATION=$with_lcmaps_location if test ! -d $LCMAPS_LOCATION; then AC_MSG_WARN([LCMAPS_LOCATION ($LCMAPS_LOCATION) does not exist]) LCMAPS_LOCATION= fi ],[ if test "x$LCMAPS_LOCATION" = "x"; then LCMAPS_LOCATION=$DEFAULT_LCMAPS_LOCATION fi if test ! -d $LCMAPS_LOCATION; then LCMAPS_LOCATION= fi ] ) if test "x$LCMAPS_LOCATION" != "x"; then LCMAPS_CFLAGS=$LCMAPS_LOCATION/include/glite/security/lcmaps if test ! -d $LCMAPS_CFLAGS; then LCMAPS_CFLAGS=$LCMAPS_LOCATION/include/lcmaps if test ! -d $LCMAPS_CFLAGS; then LCMAPS_CFLAGS=$LCMAPS_LOCATION/include fi fi LCMAPS_CFLAGS=-I$LCMAPS_CFLAGS SAVE_CPPFLAGS=$CPPFLAGS CPPFLAGS="$LCMAPS_CFLAGS $GLOBUS_GSSAPI_GSI_CFLAGS" AC_CHECK_HEADERS([lcmaps.h], LCMAPS_LDFLAGS= if test -d $LCMAPS_LOCATION/lib64; then LCMAPS_LDFLAGS="-L$LCMAPS_LOCATION/lib64 $GLOBUS_GSSAPI_GSI_LIBS" else LCMAPS_LDFLAGS="-L$LCMAPS_LOCATION/lib $GLOBUS_GSSAPI_GSI_LIBS" fi SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $LCMAPS_LDFLAGS" AC_CHECK_LIB(lcmaps,lcmaps_init, LCMAPS_LIBS="$LCMAPS_LDFLAGS -llcmaps",LCMAPS_LOCATION="",) LDFLAGS=$SAVE_LDFLAGS , LCMAPS_LOCATION="" ) CPPFLAGS=$SAVE_CPPFLAGS fi if test "x$LCMAPS_LOCATION" != "x"; then AC_DEFINE(HAVE_LCMAPS, 1, [define if lcmaps is available]) AC_SUBST(LCMAPS_LOCATION) AC_SUBST(LCMAPS_CFLAGS) AC_SUBST(LCMAPS_LIBS) fi # Check if mock DMC is enabled AC_ARG_ENABLE(mock-dmc, AC_HELP_STRING([--enable-mock-dmc], [enable mock DMC, default is disable]),[enables_mock_dmc="$enableval"],[]) # Check for GFAL2 AC_ARG_ENABLE(gfal, AC_HELP_STRING([--enable-gfal], [enable the GFAL support, default is disable]),[enables_gfal="$enableval"],[]) if test "x$enables_gfal" = "xyes"; then PKG_CHECK_MODULES(GFAL2, gfal_transfer, [], [enables_gfal="no"]) AC_SUBST(GFAL2_CFLAGS) AC_SUBST(GFAL2_LIBS) fi # Check for S3 AC_ARG_ENABLE(s3, AC_HELP_STRING([--enable-s3], [enable the S3 support, default is disable]),[enables_s3="$enableval"],[]) if test "x$enables_s3" = "xyes"; then AC_ARG_WITH(s3, [ --with-s3=(PATH) libs3 location]) if test ! "x$with_s3" = "x" ; then S3_LOCATION="$with_s3" S3_CPPFLAGS="-I$S3_LOCATION/include" if test -d $S3_LOCATION/lib64; then S3_LDFLAGS="-L$S3_LOCATION/lib64" else S3_LDFLAGS="-L$S3_LOCATION/lib" fi fi SAVE_CPPFLAGS=$CPPFLAGS CPPFLAGS="$CPPFLAGS $S3_CPPFLAGS" AC_CHECK_HEADER(libs3.h, [], [enables_s3="no"]) CPPFLAGS=$SAVE_CPPFLAGS SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $S3_LDFLAGS" AC_CHECK_LIB([s3], [S3_initialize], [S3_LIBS="$S3_LDFLAGS -ls3"], [enables_s3="no"]) LDFLAGS=$SAVE_LDFLAGS AC_SUBST(S3_CPPFLAGS) AC_SUBST(S3_LIBS) if test x$enables_s3 = xyes then if s3 help 2>&1 | grep -q -- '--timeout' ; then AC_DEFINE([HAVE_S3_TIMEOUT], 1, [Define if S3 API has timeouts]) fi fi fi # Check for xrootd (c++) AC_LANG_SAVE AC_LANG_CPLUSPLUS AC_ARG_ENABLE(xrootd, AC_HELP_STRING([--disable-xrootd], [disable the xrootd support, default is enable]),[enables_xrootd="$enableval"],[]) if test "x$enables_xrootd" = "xyes"; then XROOTD_CPPFLAGS="-I/usr/include/xrootd" AC_ARG_WITH(xrootd, [ --with-xrootd=(PATH) Xrootd location]) if test ! "x$with_xrootd" = "x" ; then XROOTD_LOCATION="$with_xrootd" XROOTD_CPPFLAGS="-I$XROOTD_LOCATION/include/xrootd" if test -d $XROOTD_LOCATION/lib64; then XROOTD_LDFLAGS="-L$XROOTD_LOCATION/lib64" else XROOTD_LDFLAGS="-L$XROOTD_LOCATION/lib" fi fi AC_MSG_CHECKING([for XROOTD headers]) SAVE_CPPFLAGS=$CPPFLAGS CPPFLAGS="$CPPFLAGS $XROOTD_CPPFLAGS" AC_TRY_COMPILE([#include ], [], [ AC_MSG_RESULT([$XROOTD_CPPFLAGS]) ], [ XROOTD_CPPFLAGS="-std=c++0x $XROOTD_CPPFLAGS" CPPFLAGS="$SAVE_CPPFLAGS $XROOTD_CPPFLAGS" AC_TRY_COMPILE([#include ], [], [ AC_MSG_RESULT([$XROOTD_CPPFLAGS]) ], [ AC_MSG_RESULT([no]) enables_xrootd="no" ]) ]) CPPFLAGS=$SAVE_CPPFLAGS SAVE_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $XROOTD_LDFLAGS" AC_CHECK_LIB([XrdPosix], [main], [XROOTD_LIBS="$XROOTD_LDFLAGS -lXrdPosix -lXrdCl"], [enables_xrootd="no"]) LDFLAGS=$SAVE_LDFLAGS fi AC_SUBST(XROOTD_CPPFLAGS) AC_SUBST(XROOTD_LIBS) fi AC_LANG_RESTORE # Setup conditionals AM_CONDITIONAL([GLOBUSUTILS_ENABLED], test -n "$GLOBUS_COMMON_VERSION") AM_CONDITIONAL([GRIDFTP_ENABLED], test -n "$GLOBUS_FTP_CLIENT_VERSION") AM_CONDITIONAL([MOCK_DMC_ENABLED], test x$enables_mock_dmc = xyes) AM_CONDITIONAL([GFAL_ENABLED], test x$enables_gfal = xyes) AM_CONDITIONAL([S3_DMC_ENABLED], test x$enables_s3 = xyes) AM_CONDITIONAL([XROOTD_ENABLED], test x$enables_xrootd = xyes) AM_CONDITIONAL([XMLSEC_ENABLED], test x$XMLSEC_INSTALLED = xyes) AM_CONDITIONAL([CPPUNIT_ENABLED], test x$enables_cppunit = xyes) enables_srm_dmc=no if test "$enables_hed" = "yes"; then enables_srm_dmc=yes fi AM_CONDITIONAL([SRM_DMC_ENABLED],[test "x$enables_srm_dmc" = "xyes"]) # Setup defines if test -n "$GLOBUS_COMMON_VERSION"; then AC_DEFINE(HAVE_GLOBUS, 1, [define if GLOBUS is available]) fi if test x"$XMLSEC_INSTALLED" = xyes; then AC_DEFINE(HAVE_XMLSEC, 1, [define if XMLSEC package is available]) fi # Setup messages for reporting enables_gridftp=no if test -n "$GLOBUS_FTP_CLIENT_VERSION" ; then enables_gridftp=yes; fi enables_sqlite=no if test "x$SQLITE_INSTALLED" = "xyes" ; then enables_sqlite=yes; fi # Check version of Test::More Perl module. min_perl_test_more_version_required="0.88" # Stable version of Test::More containing done_testing sub. PERL_TEST_DIR= perl_test_more_version_found=$(perl -MTest::More -e "print \"\$Test::More::VERSION\"") if test $(echo "$perl_test_more_version_found" | cut -d. -f1) -gt $(echo "$min_perl_test_more_version_required" | cut -d. -f1) || \ test $(echo "$perl_test_more_version_found" | cut -d. -f1) -eq $(echo "$min_perl_test_more_version_required" | cut -d. -f1) && \ test $(echo "$perl_test_more_version_found" | cut -d. -f2) -ge $(echo "$min_perl_test_more_version_required" | cut -d. -f2); then PERL_TEST_DIR="test" fi AC_SUBST(PERL_TEST_DIR) # Check for the uuid lib UUID_LIBS="" if test "$enables_hed" = "yes"; then AC_CHECK_HEADER(uuid/uuid.h, [ AC_CHECK_FUNC([uuid_generate], [UUID_LIBS=], [ AC_CHECK_LIB([uuid], [uuid_generate], [UUID_LIBS=-luuid], [ AC_MSG_NOTICE([Can't find library containing uuid implementation]) ]) ]) ], [AC_MSG_NOTICE([Can't find uuid header])]) AC_SUBST(UUID_LIBS) LIBS="$LIBS $UUID_LIBS" fi # Check for dlopen DLOPEN_LIBS="" if test "$enables_hed" = "yes"; then AC_CHECK_FUNC([dlopen], [DLOPEN_LIBS=], [ AC_CHECK_LIB([dl], [dlopen], [DLOPEN_LIBS=-ldl], [ AC_MSG_NOTICE([Can't find library containing dlopen implementation]) ]) ]) AC_SUBST(DLOPEN_LIBS) fi # Check for clock_gettime AC_SEARCH_LIBS([clock_gettime], [rt]) # Define bash-completion dir PKG_CHECK_MODULES([BASH_COMPLETION], [bash-completion >= 2.0], [bashcompdir="`pkg-config --variable=completionsdir --define-variable=prefix=${prefix} --define-variable=datadir=${datadir} bash-completion`"], [bashcompdir="${sysconfdir}/bash_completion.d"]) AC_SUBST([bashcompdir]) # check for fsusage if test "$enables_hed" = "yes"; then gl_FSUSAGE fi if test "$enables_hed" = "yes"; then # Checks for header files. AC_HEADER_DIRENT AC_HEADER_STDC AC_HEADER_SYS_WAIT AC_CHECK_HEADERS([arpa/inet.h fcntl.h float.h limits.h netdb.h netinet/in.h sasl.h sasl/sasl.h stdint.h stdlib.h string.h sys/file.h sys/socket.h sys/vfs.h unistd.h uuid/uuid.h getopt.h]) AC_CXX_HAVE_SSTREAM # Checks for typedefs, structures, and compiler characteristics. AC_HEADER_STDBOOL AC_C_CONST AC_TYPE_UID_T AC_C_INLINE AC_TYPE_MODE_T AC_TYPE_OFF_T AC_TYPE_PID_T AC_TYPE_SIZE_T AC_CHECK_MEMBERS([struct stat.st_blksize]) AC_HEADER_TIME AC_STRUCT_TM AC_CHECK_TYPES([ptrdiff_t]) # Checks for library functions. AC_FUNC_CHOWN AC_FUNC_CLOSEDIR_VOID AC_FUNC_ERROR_AT_LINE AC_FUNC_FORK AC_FUNC_LSTAT AC_FUNC_LSTAT_FOLLOWS_SLASHED_SYMLINK AC_FUNC_MEMCMP AC_FUNC_MKTIME AC_FUNC_MALLOC AC_FUNC_REALLOC AC_FUNC_SELECT_ARGTYPES AC_TYPE_SIGNAL AC_FUNC_STRERROR_R AC_FUNC_STAT AC_CHECK_FUNCS([acl dup2 floor ftruncate gethostname getdomainname getpid gmtime_r lchown localtime_r memchr memmove memset mkdir mkfifo regcomp rmdir select setenv socket strcasecmp strchr strcspn strdup strerror strncasecmp strstr strtol strtoul strtoull timegm tzset unsetenv getopt_long_only getgrouplist mkdtemp posix_fallocate readdir_r [mkstemp] mktemp]) AC_CHECK_LIB([resolv], [res_query], [LIBRESOLV=-lresolv], [LIBRESOLV=]) AC_CHECK_LIB([resolv], [__dn_skipname], [LIBRESOLV=-lresolv], [LIBRESOLV=]) AC_CHECK_LIB([nsl], [gethostbyname], [LIBRESOLV="$LIBRESOLV -lnsl"], []) AC_CHECK_LIB([nsl], [getdomainname]) AC_SUBST(LIBRESOLV) fi # check for platfom specific flags case " $LDFLAGS " in " -Wl,--no-undefined ") ;; " -Wl,-no-undefined ") ;; " -Wl,-z -Wl,defs ") ;; " -Wl,-z,defs ") ;; *) case "${host}" in *darwin*);; *) LDFLAGS="$LDFLAGS -Wl,--no-undefined" ;; esac ;; esac AC_PATH_PROGS(PDFLATEX, pdflatex) AC_PATH_PROGS(DOXYGEN, doxygen) AC_PATH_PROGS(DOT, dot) # Check if user asks to skip documentation build AC_ARG_ENABLE(doc, AC_HELP_STRING([--disable-doc], [disable building documentation (requires doxygen and pdflatex)]),[enables_doc=$enableval],[]) #if test "x$enables_doc" = "xyes"; then # There is no point disabling docs due to missing tools since the pdf # files are both in svn and in the dist tarball # if test "x$PDFLATEX" = "x"; then # enables_doc="no" # AC_MSG_NOTICE([WARNING: Missing pdflatex - documentation won't be built]) # elif test "x$DOXYGEN" = "x"; then # enables_doc="no" # AC_MSG_NOTICE([WARNING: Missing doxygen - documentation won't be built]) # elif test "x$DOT" = "x"; then # enables_doc="no" # AC_MSG_NOTICE([WARNING: Missing dot - documentation won't be built]) # fi #fi AC_MSG_NOTICE([Documentation enabled: $enables_doc]) AM_CONDITIONAL([DOC_ENABLED],[test "x$enables_doc" = "xyes"]) AM_CONDITIONAL([PYDOXYGEN],[test -f python/python/arc/index.xml -o "x$DOXYGEN" != "x"]) AM_CONDITIONAL([ALTPYDOXYGEN],[test -f python/altpython/arc/index.xml -o "x$DOXYGEN" != "x"]) # Check for explicitly and implicitely disabled services # A-Rex AC_ARG_ENABLE(a_rex_service, AC_HELP_STRING([--disable-a-rex-service], [disable building A-Rex service]), [enables_a_rex_service=$enableval],[]) if test "$enables_a_rex_service" = "yes"; then if test "x$SQLITE_INSTALLED" != "xyes" ; then AC_MSG_NOTICE([A-Rex can't be built without SQLite - disabling]) enables_a_rex_service="no" fi fi AC_MSG_NOTICE([A-Rex service enabled: $enables_a_rex_service]) AM_CONDITIONAL([A_REX_SERVICE_ENABLED],[test "x$enables_a_rex_service" = "xyes"]) # Internal job plugin AC_ARG_ENABLE(internal, AC_HELP_STRING([--enable-internal], [enable building the internal job plugin]), [enables_internal=$enableval],[]) if test "$enables_internal" = "yes"; then if test "x$enables_a_rex_service" != "xyes" ; then AC_MSG_NOTICE([Internal job plugin can't be built without A-Rex - disabling]) enables_internal="no" fi fi AC_MSG_NOTICE([Internal plugin enabled: $enables_internal]) AM_CONDITIONAL([INTERNAL_ENABLED],[test "x$enables_internal" = "xyes"]) # LDAP service AC_ARG_ENABLE(ldap_service, AC_HELP_STRING([--disable-ldap-service], [disable building LDAP Infosystem Service]), [enables_ldap_service=$enableval],[]) AC_MSG_NOTICE([LDAP Infosystem service enabled: $enables_ldap_service]) AM_CONDITIONAL([LDAP_SERVICE_ENABLED],[test "x$enables_ldap_service" = "xyes"]) # LDAP monitor AC_ARG_ENABLE(monitor, AC_HELP_STRING([--disable-monitor], [disable building LDAP Monitor]), [enables_monitor=$enableval],[]) AC_MSG_NOTICE([LDAP Monitor enabled: $enables_monitor]) AM_CONDITIONAL([MONITOR_ENABLED],[test "x$enables_monitor" = "xyes"]) # Cache service AC_ARG_ENABLE(candypond, AC_HELP_STRING([--disable-candypond], [disable building candypond]), [enables_candypond=$enableval],[]) if test "$enables_candypond" = "yes"; then if test ! "x$enables_a_rex_service" = "xyes" ; then enables_candypond="no" AC_MSG_NOTICE([CandyPond can't be built without A-REX - disabling]) fi fi AC_MSG_NOTICE([CandyPond enabled: $enables_candypond]) AM_CONDITIONAL([CANDYPOND_ENABLED],[test "x$enables_candypond" = "xyes"]) # DataDelivery service AC_ARG_ENABLE(datadelivery_service, AC_HELP_STRING([--disable-datadelivery-service], [disable building DataDelivery service]), [enables_datadelivery_service=$enableval],[]) AC_MSG_NOTICE([DataDelivery service enabled: $enables_datadelivery_service]) AM_CONDITIONAL([DATADELIVERY_SERVICE_ENABLED],[test "x$enables_datadelivery_service" = "xyes"]) # Check for explicitly and implicitely disabled clients AC_ARG_ENABLE(compute_client, AC_HELP_STRING([--disable-compute-client], [disable building compute (job management) client tools]), [enables_compute_client=$enableval],[]) AC_MSG_NOTICE([Compute client tools enabled: $enables_compute_client]) AM_CONDITIONAL([COMPUTE_CLIENT_ENABLED],[test "x$enables_compute_client" = "xyes"]) AC_ARG_ENABLE(credentials_client, AC_HELP_STRING([--disable-credentials-client], [disable building client tools for handling X.509 credentials]), [enables_credentials_client=$enableval],[]) AC_MSG_NOTICE([Credentials client tools enabled: $enables_credentials_client]) AM_CONDITIONAL([CREDENTIALS_CLIENT_ENABLED],[test "x$enables_credentials_client" = "xyes"]) AC_ARG_ENABLE(data_client, AC_HELP_STRING([--disable-data-client], [disable building generic client tools for handling data]), [enables_data_client=$enableval],[]) AC_MSG_NOTICE([Data client tools enabled: $enables_data_client]) AM_CONDITIONAL([DATA_CLIENT_ENABLED],[test "x$enables_data_client" = "xyes"]) AC_ARG_ENABLE(arcrest_client, AC_HELP_STRING([--disable-arcrest-client], [disables building ARC REST python module.]), [enables_arcrest_client=$enableval],[]) if test "x$enables_arcrest_client" = "xyes" ; then $PYTHON -m pip >/dev/null if test "$?" != '0' ; then AC_MSG_WARN([PIP not avilable -- disabling ARC REST pythin module]) enables_arcrest_client=no fi fi AC_MSG_NOTICE([ARC REST python module enabled: $enables_arcrest_client]) AM_CONDITIONAL([ARCREST_ENABLED],[test "x$enables_arcrest_client" = "xyes"]) # Check for consistency among disabled components if test "$enables_hed" = "no"; then if test "$enables_a_rex_service" = "yes" -o \ "$enables_candypond" = "yes" -o \ "$enables_datadelivery_service" = "yes" -o \ "$enables_compute_client" = "yes" -o \ "$enables_credentials_client" = "yes" -o \ "$enables_data_client" = "yes" -o \ "$enables_swig_python" = "yes" ; then AC_MSG_ERROR(HED is needed for building any of the client or service tools. Please enable HED by using --enable-hed.) fi fi AM_CONDITIONAL([HED_ENABLED],[test "x$enables_hed" = "xyes"]) # A-Rex specific hack for backend scripts tmp_dir=/tmp gnu_time=/usr/bin/time case "${host}" in *darwin*) # hostname -f does not work on OS X nodename="hostname" ;; *) nodename="/bin/hostname -f" ;; esac arc_location=$prefix AC_SUBST(arc_location) AC_SUBST(tmp_dir) AC_SUBST(gnu_time) AC_SUBST(nodename) # Shell for the job control scripts posix_shell='/bin/sh' AC_SUBST(posix_shell) DATE=`date +%Y-%m-%d ${SOURCE_DATE_EPOCH:+-u -d @$SOURCE_DATE_EPOCH}` AC_SUBST(DATE) #DATER=`date -R` DATER=`date +'%a, %d %b %Y %H:%M:%S %z'` AC_SUBST(DATER) SPECDATE=`LANG=C date +"%a %b %d %Y"` AC_SUBST(SPECDATE) AC_CONFIG_FILES([Makefile include/arc/ArcVersion.h src/Makefile src/external/Makefile src/external/cJSON/Makefile src/hed/Makefile src/hed/libs/compute/Makefile src/hed/libs/compute/test/Makefile src/hed/libs/compute/examples/Makefile src/hed/libs/common/ArcVersion.h src/hed/libs/common/Makefile src/hed/libs/common/test/Makefile src/hed/libs/communication/Makefile src/hed/libs/credential/Makefile src/hed/libs/credential/test/Makefile src/hed/libs/credentialmod/Makefile src/hed/libs/crypto/Makefile src/hed/libs/cryptomod/Makefile src/hed/libs/data/Makefile src/hed/libs/data/cache-clean.1 src/hed/libs/data/cache-list.1 src/hed/libs/data/test/Makefile src/hed/libs/data/examples/Makefile src/hed/libs/Makefile src/hed/libs/loader/Makefile src/hed/libs/loader/schema/Makefile src/hed/libs/loader/test/Makefile src/hed/libs/message/Makefile src/hed/libs/message/test/Makefile src/hed/libs/security/Makefile src/hed/libs/security/ArcPDP/Makefile src/hed/libs/security/ArcPDP/attr/Makefile src/hed/libs/security/ArcPDP/policy/Makefile src/hed/libs/security/ArcPDP/alg/Makefile src/hed/libs/security/ArcPDP/fn/Makefile src/hed/libs/credentialstore/Makefile src/hed/libs/ws-addressing/Makefile src/hed/libs/ws-security/Makefile src/hed/libs/ws-security/test/Makefile src/hed/libs/infosys/Makefile src/hed/libs/infosys/schema/Makefile src/hed/libs/infosys/test/Makefile src/hed/libs/delegation/Makefile src/hed/libs/delegation/test/Makefile src/hed/libs/xmlsec/Makefile src/hed/libs/globusutils/Makefile src/hed/libs/otokens/Makefile src/hed/daemon/Makefile src/hed/daemon/scripts/Makefile src/hed/daemon/schema/Makefile src/hed/daemon/unix/Makefile src/hed/mcc/Makefile src/hed/mcc/soap/Makefile src/hed/mcc/tcp/Makefile src/hed/mcc/tcp/schema/Makefile src/hed/mcc/http/Makefile src/hed/mcc/http/schema/Makefile src/hed/mcc/tls/Makefile src/hed/mcc/tls/schema/Makefile src/hed/mcc/tls/test/Makefile src/hed/mcc/msgvalidator/Makefile src/hed/mcc/msgvalidator/schema/Makefile src/hed/acc/Makefile src/hed/acc/ARCREST/Makefile src/hed/acc/Broker/Makefile src/hed/acc/Broker/test/Makefile src/hed/acc/PythonBroker/Makefile src/hed/acc/JobDescriptionParser/Makefile src/hed/acc/JobDescriptionParser/test/Makefile src/hed/acc/ARCHERY/Makefile src/hed/acc/TEST/Makefile src/hed/dmc/Makefile src/hed/dmc/file/Makefile src/hed/dmc/gridftp/Makefile src/hed/dmc/http/Makefile src/hed/dmc/srm/Makefile src/hed/dmc/srm/srmclient/Makefile src/hed/dmc/gfal/Makefile src/hed/dmc/xrootd/Makefile src/hed/dmc/mock/Makefile src/hed/dmc/rucio/Makefile src/hed/dmc/s3/Makefile src/hed/profiles/general/general.xml src/hed/shc/Makefile src/hed/shc/arcpdp/Makefile src/hed/shc/arcpdp/schema/Makefile src/hed/shc/xacmlpdp/Makefile src/hed/shc/xacmlpdp/schema/Makefile src/hed/shc/delegationpdp/Makefile src/hed/shc/delegationpdp/schema/Makefile src/hed/shc/gaclpdp/Makefile src/hed/shc/pdpserviceinvoker/Makefile src/hed/shc/pdpserviceinvoker/schema/Makefile src/hed/shc/allowpdp/Makefile src/hed/shc/denypdp/Makefile src/hed/shc/simplelistpdp/Makefile src/hed/shc/simplelistpdp/schema/Makefile src/hed/shc/arcauthzsh/Makefile src/hed/shc/arcauthzsh/schema/Makefile src/hed/shc/usernametokensh/Makefile src/hed/shc/usernametokensh/schema/Makefile src/hed/shc/x509tokensh/Makefile src/hed/shc/x509tokensh/schema/Makefile src/hed/shc/samltokensh/Makefile src/hed/shc/samltokensh/schema/Makefile src/hed/shc/saml2sso_assertionconsumersh/Makefile src/hed/shc/delegationsh/Makefile src/hed/shc/delegationsh/schema/Makefile src/hed/shc/legacy/Makefile src/hed/shc/legacy/test/Makefile src/hed/shc/legacy/schema/Makefile src/hed/shc/otokens/Makefile src/hed/identitymap/Makefile src/hed/identitymap/schema/Makefile src/libs/Makefile src/libs/data-staging/Makefile src/libs/data-staging/test/Makefile src/libs/data-staging/examples/Makefile src/services/Makefile src/services/a-rex/Makefile src/services/a-rex/arc-arex src/services/a-rex/arc-arex.service src/services/a-rex/arc-arex-start src/services/a-rex/arc-arex-ws src/services/a-rex/arc-arex-ws.service src/services/a-rex/arc-arex-ws-start src/services/a-rex/a-rex-backtrace-collect src/services/a-rex/a-rex-backtrace-collect.8 src/services/a-rex/perferator src/services/a-rex/update-controldir src/services/a-rex/grid-manager/arc-blahp-logger.8 src/services/a-rex/grid-manager/gm-jobs.8 src/services/a-rex/rest/Makefile src/services/a-rex/rest/test/Makefile src/services/a-rex/delegation/Makefile src/services/a-rex/grid-manager/Makefile src/services/a-rex/grid-manager/accounting/Makefile src/services/a-rex/grid-manager/conf/Makefile src/services/a-rex/grid-manager/files/Makefile src/services/a-rex/grid-manager/jobs/Makefile src/services/a-rex/grid-manager/log/Makefile src/services/a-rex/grid-manager/mail/Makefile src/services/a-rex/grid-manager/misc/Makefile src/services/a-rex/grid-manager/run/Makefile src/services/a-rex/internaljobplugin/Makefile src/services/a-rex/infoproviders/Makefile src/services/a-rex/infoproviders/CEinfo.pl src/services/a-rex/infoproviders/ConfigCentral.pm src/services/a-rex/infoproviders/PerfData.pl src/services/a-rex/infoproviders/test/Makefile src/services/a-rex/lrms/Makefile src/services/a-rex/lrms/test/Makefile src/services/a-rex/lrms/lrms_common.sh src/services/a-rex/lrms/condor/Makefile src/services/a-rex/lrms/condor/scan-condor-job src/services/a-rex/lrms/condor/cancel-condor-job src/services/a-rex/lrms/condor/submit-condor-job src/services/a-rex/lrms/fork/Makefile src/services/a-rex/lrms/fork/scan-fork-job src/services/a-rex/lrms/fork/submit-fork-job src/services/a-rex/lrms/fork/cancel-fork-job src/services/a-rex/lrms/ll/Makefile src/services/a-rex/lrms/ll/submit-ll-job src/services/a-rex/lrms/ll/cancel-ll-job src/services/a-rex/lrms/ll/scan-ll-job src/services/a-rex/lrms/lsf/Makefile src/services/a-rex/lrms/lsf/submit-lsf-job src/services/a-rex/lrms/lsf/cancel-lsf-job src/services/a-rex/lrms/lsf/scan-lsf-job src/services/a-rex/lrms/pbs/Makefile src/services/a-rex/lrms/pbs/submit-pbs-job src/services/a-rex/lrms/pbs/cancel-pbs-job src/services/a-rex/lrms/pbs/scan-pbs-job src/services/a-rex/lrms/pbspro/Makefile src/services/a-rex/lrms/pbspro/submit-pbspro-job src/services/a-rex/lrms/pbspro/cancel-pbspro-job src/services/a-rex/lrms/pbspro/scan-pbspro-job src/services/a-rex/lrms/sge/Makefile src/services/a-rex/lrms/sge/submit-sge-job src/services/a-rex/lrms/sge/scan-sge-job src/services/a-rex/lrms/sge/cancel-sge-job src/services/a-rex/lrms/slurm/Makefile src/services/a-rex/lrms/slurm/submit-SLURM-job src/services/a-rex/lrms/slurm/scan-SLURM-job src/services/a-rex/lrms/slurm/cancel-SLURM-job src/services/a-rex/lrms/slurm/test/Makefile src/services/a-rex/lrms/slurm/test/scan/Makefile src/services/a-rex/lrms/slurm/test/submit/Makefile src/services/a-rex/lrms/boinc/Makefile src/services/a-rex/lrms/boinc/submit-boinc-job src/services/a-rex/lrms/boinc/scan-boinc-job src/services/a-rex/lrms/boinc/cancel-boinc-job src/services/a-rex/rte/Makefile src/services/a-rex/rte/ENV/PROXY src/services/a-rex/rte/ENV/CANDYPOND src/services/a-rex/schema/Makefile src/services/candypond/Makefile src/services/data-staging/Makefile src/services/data-staging/arc-datadelivery-service src/services/data-staging/arc-datadelivery-service.service src/services/data-staging/arc-datadelivery-service-start src/services/ldap-infosys/Makefile src/services/ldap-infosys/create-bdii-config src/services/ldap-infosys/create-slapd-config src/services/ldap-infosys/arc-infosys-ldap src/services/ldap-infosys/arc-infosys-ldap.service src/services/ldap-infosys/arc-infosys-ldap-slapd.service src/services/monitor/Makefile src/services/monitor/monitor src/services/monitor/README src/services/monitor/man/Makefile src/services/monitor/man/monitor.7 src/services/monitor/includes/Makefile src/services/monitor/mon-icons/Makefile src/services/monitor/lang/Makefile src/services/examples/Makefile src/services/examples/echo_python/Makefile src/services/wrappers/Makefile src/services/wrappers/python/Makefile src/services/wrappers/python/schema/Makefile src/clients/Makefile src/clients/data/Makefile src/clients/credentials/Makefile src/clients/compute/Makefile src/clients/pyarcrest/Makefile src/tests/Makefile src/tests/echo/Makefile src/tests/echo/perftest.1 src/tests/echo/echo_service.xml.example src/tests/echo/schema/Makefile src/tests/policy-delegation/Makefile src/tests/delegation/Makefile src/tests/translator/Makefile src/tests/xpath/Makefile src/tests/arcpolicy/Makefile src/tests/perf/Makefile src/tests/perf/arcperftest.1 src/tests/client/Makefile src/tests/lrms/Makefile src/utils/arc-exporter/Makefile src/utils/arc-exporter/arc-exporter src/utils/archery/Makefile src/utils/archery/archery-manage src/utils/python/Makefile src/utils/python/arccandypond src/utils/python/arcctl src/utils/python/arcctl.1 src/utils/python/jura-ng src/utils/python/arc/Makefile src/utils/python/arc/gen_paths_dist.sh src/utils/python/arc/utils/Makefile src/utils/python/arc/control/Makefile src/utils/hed/wsdl2hed.1 src/utils/hed/arcplugin.1 src/utils/hed/Makefile src/utils/Makefile src/wn/Makefile src/doc/Makefile src/doc/arc.conf.5 swig/Makefile python/Makefile python/Doxyfile.api python/python/Makefile python/python/arc/Makefile python/altpython/Makefile python/altpython/arc/Makefile python/test/Makefile python/test/python/Makefile python/test/altpython/Makefile python/examples/Makefile po/Makefile.in include/Makefile debian/Makefile debian/changelog.deb nordugrid-arc.spec src/hed/daemon/arched.8 src/hed/daemon/scripts/arched src/hed/daemon/scripts/arched.service src/hed/daemon/scripts/arched-start src/doxygen/Makefile ]) AC_CONFIG_FILES([src/utils/python/arcconfig-parser], [chmod +x src/utils/python/arcconfig-parser]) AC_OUTPUT AC_MSG_RESULT([ Unit testing: ${enables_cppunit} Python binding: ${enables_swig_python} ($PYTHON_VERSION) Alt.Python binding: ${enables_altpython} ($ALTPYTHON_VERSION) Available third-party features: GridFTP: ${enables_gridftp} GFAL: ${enables_gfal} S3: ${enables_s3} Xrootd: ${enables_xrootd} xmlsec1: ${enables_xmlsec1} NSS: ${enables_nss} SQLite: ${enables_sqlite} LDNS: ${enables_ldns} Enabled features: Local jobs info in SQLite: ${enables_sqlitejstore} Systemd Integration: ${enables_systemd} Included components: HED: ${enables_hed} A-REX service: ${enables_a_rex_service} Internal plugin: ${enables_internal} LDAP Info service: ${enables_ldap_service} CANDYPOND service: ${enables_candypond} DATADELIVERY service: ${enables_datadelivery_service} COMPUTE clients: ${enables_compute_client} DATA clients: ${enables_data_client} CREDENTIAL clients: ${enables_credentials_client} ARC REST client: ${enables_arcrest_client} SRM client (DMC): ${enables_srm_dmc} Documentation: ${enables_doc} Monitoring: LDAP Monitor ${enables_monitor} ]) nordugrid-arc-7.1.1/PaxHeaders/compile0000644000000000000000000000013215067751346014727 xustar0030 mtime=1759498982.901737426 30 atime=1759498982.900722112 30 ctime=1759499024.700012609 nordugrid-arc-7.1.1/compile0000755000175000002070000001635015067751346016641 0ustar00mockbuildmock00000000000000#! /bin/sh # Wrapper for compilers which do not understand '-c -o'. scriptversion=2018-03-07.03; # UTC # Copyright (C) 1999-2020 Free Software Foundation, Inc. # Written by Tom Tromey . # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # This file is maintained in Automake, please report # bugs to or send patches to # . nl=' ' # We need space, tab and new line, in precisely that order. Quoting is # there to prevent tools from complaining about whitespace usage. IFS=" "" $nl" file_conv= # func_file_conv build_file lazy # Convert a $build file to $host form and store it in $file # Currently only supports Windows hosts. If the determined conversion # type is listed in (the comma separated) LAZY, no conversion will # take place. func_file_conv () { file=$1 case $file in / | /[!/]*) # absolute file, and not a UNC file if test -z "$file_conv"; then # lazily determine how to convert abs files case `uname -s` in MINGW*) file_conv=mingw ;; CYGWIN* | MSYS*) file_conv=cygwin ;; *) file_conv=wine ;; esac fi case $file_conv/,$2, in *,$file_conv,*) ;; mingw/*) file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'` ;; cygwin/* | msys/*) file=`cygpath -m "$file" || echo "$file"` ;; wine/*) file=`winepath -w "$file" || echo "$file"` ;; esac ;; esac } # func_cl_dashL linkdir # Make cl look for libraries in LINKDIR func_cl_dashL () { func_file_conv "$1" if test -z "$lib_path"; then lib_path=$file else lib_path="$lib_path;$file" fi linker_opts="$linker_opts -LIBPATH:$file" } # func_cl_dashl library # Do a library search-path lookup for cl func_cl_dashl () { lib=$1 found=no save_IFS=$IFS IFS=';' for dir in $lib_path $LIB do IFS=$save_IFS if $shared && test -f "$dir/$lib.dll.lib"; then found=yes lib=$dir/$lib.dll.lib break fi if test -f "$dir/$lib.lib"; then found=yes lib=$dir/$lib.lib break fi if test -f "$dir/lib$lib.a"; then found=yes lib=$dir/lib$lib.a break fi done IFS=$save_IFS if test "$found" != yes; then lib=$lib.lib fi } # func_cl_wrapper cl arg... # Adjust compile command to suit cl func_cl_wrapper () { # Assume a capable shell lib_path= shared=: linker_opts= for arg do if test -n "$eat"; then eat= else case $1 in -o) # configure might choose to run compile as 'compile cc -o foo foo.c'. eat=1 case $2 in *.o | *.[oO][bB][jJ]) func_file_conv "$2" set x "$@" -Fo"$file" shift ;; *) func_file_conv "$2" set x "$@" -Fe"$file" shift ;; esac ;; -I) eat=1 func_file_conv "$2" mingw set x "$@" -I"$file" shift ;; -I*) func_file_conv "${1#-I}" mingw set x "$@" -I"$file" shift ;; -l) eat=1 func_cl_dashl "$2" set x "$@" "$lib" shift ;; -l*) func_cl_dashl "${1#-l}" set x "$@" "$lib" shift ;; -L) eat=1 func_cl_dashL "$2" ;; -L*) func_cl_dashL "${1#-L}" ;; -static) shared=false ;; -Wl,*) arg=${1#-Wl,} save_ifs="$IFS"; IFS=',' for flag in $arg; do IFS="$save_ifs" linker_opts="$linker_opts $flag" done IFS="$save_ifs" ;; -Xlinker) eat=1 linker_opts="$linker_opts $2" ;; -*) set x "$@" "$1" shift ;; *.cc | *.CC | *.cxx | *.CXX | *.[cC]++) func_file_conv "$1" set x "$@" -Tp"$file" shift ;; *.c | *.cpp | *.CPP | *.lib | *.LIB | *.Lib | *.OBJ | *.obj | *.[oO]) func_file_conv "$1" mingw set x "$@" "$file" shift ;; *) set x "$@" "$1" shift ;; esac fi shift done if test -n "$linker_opts"; then linker_opts="-link$linker_opts" fi exec "$@" $linker_opts exit 1 } eat= case $1 in '') echo "$0: No command. Try '$0 --help' for more information." 1>&2 exit 1; ;; -h | --h*) cat <<\EOF Usage: compile [--help] [--version] PROGRAM [ARGS] Wrapper for compilers which do not understand '-c -o'. Remove '-o dest.o' from ARGS, run PROGRAM with the remaining arguments, and rename the output as expected. If you are trying to build a whole package this is not the right script to run: please start by reading the file 'INSTALL'. Report bugs to . EOF exit $? ;; -v | --v*) echo "compile $scriptversion" exit $? ;; cl | *[/\\]cl | cl.exe | *[/\\]cl.exe | \ icl | *[/\\]icl | icl.exe | *[/\\]icl.exe ) func_cl_wrapper "$@" # Doesn't return... ;; esac ofile= cfile= for arg do if test -n "$eat"; then eat= else case $1 in -o) # configure might choose to run compile as 'compile cc -o foo foo.c'. # So we strip '-o arg' only if arg is an object. eat=1 case $2 in *.o | *.obj) ofile=$2 ;; *) set x "$@" -o "$2" shift ;; esac ;; *.c) cfile=$1 set x "$@" "$1" shift ;; *) set x "$@" "$1" shift ;; esac fi shift done if test -z "$ofile" || test -z "$cfile"; then # If no '-o' option was seen then we might have been invoked from a # pattern rule where we don't need one. That is ok -- this is a # normal compilation that the losing compiler can handle. If no # '.c' file was seen then we are probably linking. That is also # ok. exec "$@" fi # Name of file we expect compiler to create. cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'` # Create the lock directory. # Note: use '[/\\:.-]' here to ensure that we don't use the same name # that we are using for the .o file. Also, base the name on the expected # object file name, since that is what matters with a parallel build. lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d while true; do if mkdir "$lockdir" >/dev/null 2>&1; then break fi sleep 1 done # FIXME: race condition here if user kills between mkdir and trap. trap "rmdir '$lockdir'; exit 1" 1 2 15 # Run the compile. "$@" ret=$? if test -f "$cofile"; then test "$cofile" = "$ofile" || mv "$cofile" "$ofile" elif test -f "${cofile}bj"; then test "${cofile}bj" = "$ofile" || mv "${cofile}bj" "$ofile" fi rmdir "$lockdir" exit $ret # Local Variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC0" # time-stamp-end: "; # UTC" # End: nordugrid-arc-7.1.1/PaxHeaders/AUTHORS0000644000000000000000000000013215067751327014423 xustar0030 mtime=1759498967.622758609 30 atime=1759498967.805492739 30 ctime=1759499024.697799998 nordugrid-arc-7.1.1/AUTHORS0000644000175000002070000000447515067751327016337 0ustar00mockbuildmock00000000000000Individual contributors to the source code ------------------------------------------ David Cameron Péter Dóbé Mattias Ellert Thomas FrÃ¥gÃ¥t Ali Gholami Michael Glodek Jørgen Beck Hansen Henrik Thostrup Jensen Daniel Johansson Johan Jönemo Dmytro Karpenko Tamás Kazinczy Marek KoÄan Aleksandr Konstantinov Balázs Kónya Hajo Nils Krabbenhöft Andrew Lahiff Juha Lento Peter Lundgaard Rosendahl Iván Márton Luca Mazzaferro Bjarte Mohn Steffen Möller Zsombor Nagy Aleksei Nazarov Jon Kerr Nilsen Markus Nordén Weizhong Qiang Gábor RÅ‘czei Florido Paganelli Andrii Salnikov Martin Savko Martin Skou Andersen Oxana Smirnova Ferenc Szalai Gábor Szigeti Christian Ulrik Søttrup Adrian Taga Salman Zubair Toor Olli Tourunen Petter Urkedal Wenjing Wu Anders Wäänänen Thomas Zangerl Organisations employing contributors ------------------------------------ University of Copenhagen (Denmark) NORDUnet - Nordic Infrastructure for Research and Education (Denmark) CSC - IT Center for Science Ltd (Finland) University of Lübeck (Germany) NIIFI - National Information Infrastructure Development Institute (Hungary) University of Oslo (Norway) NordForsk (Norway) Pavol Jozef Å afárik University in KoÅ¡ice (Slovakia) Linköping University (Sweden) Lund University (Sweden) Royal Institute of Technology (Sweden) Uppsala University (Sweden) Taras Shevchenko National University of Kyiv (Ukraine) nordugrid-arc-7.1.1/PaxHeaders/LICENSE0000644000000000000000000000013215067751327014360 xustar0030 mtime=1759498967.622758609 30 atime=1759498967.805492739 30 ctime=1759499024.711108683 nordugrid-arc-7.1.1/LICENSE0000644000175000002070000002367615067751327016300 0ustar00mockbuildmock00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS nordugrid-arc-7.1.1/PaxHeaders/py-compile0000644000000000000000000000013215067751360015351 xustar0030 mtime=1759498992.174918057 30 atime=1759498992.173863018 30 ctime=1759499031.418348444 nordugrid-arc-7.1.1/py-compile0000755000175000002070000001216415067751360017262 0ustar00mockbuildmock00000000000000#!/bin/sh # py-compile - Compile a Python program scriptversion=2020-02-19.23; # UTC # Copyright (C) 2000-2020 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # This file is maintained in Automake, please report # bugs to or send patches to # . if [ -z "$PYTHON" ]; then PYTHON=python fi me=py-compile usage_error () { echo "$me: $*" >&2 echo "Try '$me --help' for more information." >&2 exit 1 } basedir= destdir= while test $# -ne 0; do case "$1" in --basedir) if test $# -lt 2; then usage_error "option '--basedir' requires an argument" else basedir=$2 fi shift ;; --destdir) if test $# -lt 2; then usage_error "option '--destdir' requires an argument" else destdir=$2 fi shift ;; -h|--help) cat <<\EOF Usage: py-compile [--help] [--version] [--basedir DIR] [--destdir DIR] FILES..." Byte compile some python scripts FILES. Use --destdir to specify any leading directory path to the FILES that you don't want to include in the byte compiled file. Specify --basedir for any additional path information you do want to be shown in the byte compiled file. Example: py-compile --destdir /tmp/pkg-root --basedir /usr/share/test test.py test2.py Report bugs to . EOF exit $? ;; -v|--version) echo "$me $scriptversion" exit $? ;; --) shift break ;; -*) usage_error "unrecognized option '$1'" ;; *) break ;; esac shift done files=$* if test -z "$files"; then usage_error "no files given" fi # if basedir was given, then it should be prepended to filenames before # byte compilation. if [ -z "$basedir" ]; then pathtrans="path = file" else pathtrans="path = os.path.join('$basedir', file)" fi # if destdir was given, then it needs to be prepended to the filename to # byte compile but not go into the compiled file. if [ -z "$destdir" ]; then filetrans="filepath = path" else filetrans="filepath = os.path.normpath('$destdir' + os.sep + path)" fi python_major=$($PYTHON -V 2>&1 | sed -e 's/.* //;s/\..*$//;1q') if test -z "$python_major"; then echo "$me: could not determine $PYTHON major version, guessing 3" >&2 python_major=3 fi # The old way to import libraries was deprecated. if test "$python_major" -le 2; then import_lib=imp import_test="hasattr(imp, 'get_tag')" import_call=imp.cache_from_source import_arg2=', False' # needed in one call and not the other else import_lib=importlib import_test="hasattr(sys.implementation, 'cache_tag')" import_call=importlib.util.cache_from_source import_arg2= fi $PYTHON -c " import sys, os, py_compile, $import_lib files = '''$files''' sys.stdout.write('Byte-compiling python modules...\n') for file in files.split(): $pathtrans $filetrans if not os.path.exists(filepath) or not (len(filepath) >= 3 and filepath[-3:] == '.py'): continue sys.stdout.write(file) sys.stdout.flush() if $import_test: py_compile.compile(filepath, $import_call(filepath), path) else: py_compile.compile(filepath, filepath + 'c', path) sys.stdout.write('\n')" || exit $? # this will fail for python < 1.5, but that doesn't matter ... $PYTHON -O -c " import sys, os, py_compile, $import_lib # pypy does not use .pyo optimization if hasattr(sys, 'pypy_translation_info'): sys.exit(0) files = '''$files''' sys.stdout.write('Byte-compiling python modules (optimized versions) ...\n') for file in files.split(): $pathtrans $filetrans if not os.path.exists(filepath) or not (len(filepath) >= 3 and filepath[-3:] == '.py'): continue sys.stdout.write(file) sys.stdout.flush() if $import_test: py_compile.compile(filepath, $import_call(filepath$import_arg2), path) else: py_compile.compile(filepath, filepath + 'o', path) sys.stdout.write('\n')" 2>/dev/null || : # Local Variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC0" # time-stamp-end: "; # UTC" # End: nordugrid-arc-7.1.1/PaxHeaders/NOTICE0000644000000000000000000000013215067751327014257 xustar0030 mtime=1759498967.622758609 30 atime=1759498967.805492739 30 ctime=1759499024.712370738 nordugrid-arc-7.1.1/NOTICE0000644000175000002070000000230015067751327016154 0ustar00mockbuildmock00000000000000Advanced Resource Connector (ARC) This product includes Advanced Resource Connector (ARC) software. The software is developed by the NorduGrid collaboration (http://www.nordugrid.org) with financial support from the European Commission and Nordic Research Councils. Unless stated otherwise, the Copyright is collectively owned by individual contributors and contributing organisations as listed in the AUTHORS file. The software is licensed under the Apache License, Version 2.0 (the "License"); you may not use files from this software distribution except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Support for json parsing is provided by the cJSON library written by Dave Gamble and licensed under the MIT license. This code is in src/external/cJSON. The original software is available from http://cjson.sourceforge.net/ nordugrid-arc-7.1.1/PaxHeaders/src0000644000000000000000000000013215067751427014066 xustar0030 mtime=1759499031.597462061 30 atime=1759499034.762510154 30 ctime=1759499031.597462061 nordugrid-arc-7.1.1/src/0000755000175000002070000000000015067751427016045 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327016176 xustar0030 mtime=1759498967.675490764 30 atime=1759498967.824493028 30 ctime=1759499024.735275581 nordugrid-arc-7.1.1/src/Makefile.am0000644000175000002070000000166515067751327020110 0ustar00mockbuildmock00000000000000if HED_ENABLED if DOC_ENABLED BUILD_SOURCES = external doc hed libs tests services clients utils wn doxygen else BUILD_SOURCES = external doc hed libs tests services clients utils wn endif else BUILD_SOURCES = clients endif SUBDIRS = $(BUILD_SOURCES) DIST_SUBDIRS = external doc hed libs tests services clients utils doxygen wn # This won't work in case of cross-compilation. Please # some autotools experts fix it. if HED_ENABLED install-exec-hook: if test "x$(build_triplet)" = "x$(host_triplet)"; then env LD_LIBRARY_PATH=$(DESTDIR)$(libdir):$(LD_LIBRARY_PATH) $(top_builddir)/src/utils/hed/arcplugin$(EXEEXT) -c $(DESTDIR)$(pkglibdir) -c $(DESTDIR)$(pkglibdir)/test -c $(DESTDIR)$(pkglibdir)/external; else echo "No .apd files since we are cross-compiling"; fi uninstall-local: test "x$(build_triplet)" = "x$(host_triplet)" && rm -f $(DESTDIR)$(pkglibdir)/*.apd $(DESTDIR)$(pkglibdir)/test/*.apd $(DESTDIR)$(pkglibdir)/external/*.apd endif nordugrid-arc-7.1.1/src/PaxHeaders/doc0000644000000000000000000000013215067751420014624 xustar0030 mtime=1759499024.818608797 30 atime=1759499034.762510154 30 ctime=1759499024.818608797 nordugrid-arc-7.1.1/src/doc/0000755000175000002070000000000015067751420016603 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/doc/PaxHeaders/arc.conf.5.in0000644000000000000000000000013215067751327017073 xustar0030 mtime=1759498967.682844158 30 atime=1759498967.826493058 30 ctime=1759499024.817636297 nordugrid-arc-7.1.1/src/doc/arc.conf.5.in0000644000175000002070000000677615067751327021015 0ustar00mockbuildmock00000000000000.TH arc.conf 5 "@DATE@" "NorduGrid ARC @VERSION@" "NorduGrid ARC" .SH NAME arc.conf \- ARC services configuration .SH DESCRIPTION .PP ARC has two separate configuration files - one for client tools and another for services. This man page describes the services configuration file. For client configuration please see "ARC Clients User Manual" at http://www.nordugrid.org/documents/arc-ui.pdf .PP This man page IS NOT the ultimate source of information about \fBarc.conf\fR. The reference documentation containing all configuration option description is \fBarc.conf.reference\fR file that can be found in \fB@prefix@/@pkgdatasubdir@/doc\fR. .PP To get inline help about particular configuration option use \fBarcctl (1)\fR tool. .PP For example to get description of \fBsessiondir\fR option in \fB[arex]\fR block run .IP \f(CW# arcctl config describe arex sessiondir\fR .PP .SH BASIC STRUCTURE .PP A block configures an ARC service, a service interface, a utility or a subsystem. Enabling (turning on) a functionality, a service or an interface requires the presence of the appropriate configuration block. To disable a service or an interface, simply delete or comment out the related arc.conf block (you may need to rerun the corresponding startup script). .PP A block is identified by its block header. A block header may consist of keywords and optionally block identifiers. Keywords may be separated by "/" and used to label subblocks (e.g. \fI[arex/jura]\fR), while block identifiers are separated by ":" from keywords. .PP For example, in the \fI[queue:short]\fR block header \fIqueue\fR is a keyword while \fIshort\fR is an identifier, e.g. the name of the queue. Block headers must be UNIQUE. .PP A block starts with a unique \fI[keyword:identifier]\fR blockheader and ends where the next block starts, that is at the next \fI[blockheader]\fR directive. .PP A block may have sub-blocks e.g. the various interfaces of the AREX service are configured via sub-blocks (e.g. \fI[arex/ws]\fR). When a sub-block is enabled then the corresponding parent block MUST also appear in the arc.conf file. .PP Configuration blocks contain (config option, config value) pairs following the syntax: .I config_option=value element [optional value element] in single line. .PP Each of the configuration options have well-defined default that is specified in this reference file. The default can take either a pre-set value, a special substitution or the keyword \fIundefined\fR. Configuration options within an enabled block take their default values in case they are missing (or commented out). Configuration parameters with undefined defaults takes no values. Furthermore, configuration options within disabled blocks takes no values either. .PP Configuration blocks related to authorization are ORDER-DEPENDENT! The authorization blocks \fI[authgroup:name]\fR MUST be defined before used in the other blocks. Furthermore, the order of the authorization blocks itself may have influence over authorization decisions! .PP Note that quotes around the configuration value(s) must NOT be used any longer. .PP Note that the arc.conf is CASE-SENSITIVE! .SH FILES .I /etc/arc.conf, .I ${ARC_LOCATION}/etc/arc.conf, .SH REPORTING BUGS Report bugs to http://bugzilla.nordugrid.org/ .SH COPYRIGHT APACHE LICENSE Version 2.0 .SH AUTHOR ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org .SH SEE ALSO .BR arcctl (1), nordugrid-arc-7.1.1/src/doc/PaxHeaders/Makefile.am0000644000000000000000000000013115067751327016742 xustar0029 mtime=1759498967.68248103 30 atime=1759498967.826493058 30 ctime=1759499024.815642976 nordugrid-arc-7.1.1/src/doc/Makefile.am0000644000175000002070000000010715067751327020643 0ustar00mockbuildmock00000000000000man_MANS = arc.conf.5 EXTRA_DIST = arc.conf.reference arc.conf.DELETED nordugrid-arc-7.1.1/src/doc/PaxHeaders/Makefile.in0000644000000000000000000000013115067751347016755 xustar0030 mtime=1759498983.778049379 30 atime=1759499020.310290552 29 ctime=1759499024.81663832 nordugrid-arc-7.1.1/src/doc/Makefile.in0000644000175000002070000005263615067751347020674 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/doc ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = arc.conf.5 CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } man5dir = $(mandir)/man5 am__installdirs = "$(DESTDIR)$(man5dir)" NROFF = nroff MANS = $(man_MANS) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/arc.conf.5.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ man_MANS = arc.conf.5 EXTRA_DIST = arc.conf.reference arc.conf.DELETED all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/doc/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/doc/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): arc.conf.5: $(top_builddir)/config.status $(srcdir)/arc.conf.5.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man5: $(man_MANS) @$(NORMAL_INSTALL) @list1=''; \ list2='$(man_MANS)'; \ test -n "$(man5dir)" \ && test -n "`echo $$list1$$list2`" \ || exit 0; \ echo " $(MKDIR_P) '$(DESTDIR)$(man5dir)'"; \ $(MKDIR_P) "$(DESTDIR)$(man5dir)" || exit 1; \ { for i in $$list1; do echo "$$i"; done; \ if test -n "$$list2"; then \ for i in $$list2; do echo "$$i"; done \ | sed -n '/\.5[a-z]*$$/p'; \ fi; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^5][0-9a-z]*$$,5,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man5dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man5dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man5dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man5dir)" || exit $$?; }; \ done; } uninstall-man5: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man5dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.5[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^5][0-9a-z]*$$,5,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ dir='$(DESTDIR)$(man5dir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(MANS) installdirs: for dir in "$(DESTDIR)$(man5dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-man install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-man5 install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-man uninstall-man: uninstall-man5 .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-man5 install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags-am uninstall uninstall-am uninstall-man \ uninstall-man5 .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/doc/PaxHeaders/arc.conf.DELETED0000644000000000000000000000013215067751327017370 xustar0030 mtime=1759498967.682844158 30 atime=1759498967.826493058 30 ctime=1759499024.819632583 nordugrid-arc-7.1.1/src/doc/arc.conf.DELETED0000644000175000002070000000370615067751327021300 0ustar00mockbuildmock00000000000000############################################################################### ## ## This is the arc.conf DELETED file that contains all the configuration blocks ## and options that have been DELETED in ARC version 7.0.0 and later ### The [deleted:blocks] block ########################## #[deleted:blocks] ## Following blocks and corresponding functionality are removed complemete from ARC7 ## release and should be cleaned up from previous ARC6 configuration: ## ## [authtokens] (always enabled in ARC7) ## [lrms/ssh] ## [arex/ws/publicinfo] (always enabled in ARC7) ## [arex/ws/argus] ## [gridftpd] ## [gridftpd/jobs] ## [gridftpd/filedir] ## [infosys/glue1] ## [infosys/glue1/site-bdii] ## [acix-scanner] ## [acix-index] ## [userlist:name] ## [nordugridmap] ## ### end of the [deleted:blocks] ################### ## NOTE that Options marked DELETED without stating a version were deleted in version 7.0.0 ## compared to the latest ARC6 supported configuration. ### The [authgroup:groupname] blocks ########################## #[authgroup:groupname] ## userlist = ulist_name [ulist_name ...] - Match user belonging to ulist_name defined ## in an earlier [userlist:ulist_name] block. Multiple userlist names are allowed for ## this rule. ## sequenced ## default: undefined #userlist=biousers ## CHANGE: DELETED ## ## ### end of the [authgroup:groupname] blocks ################### ### The [arex/data-staging] block ########### #[arex/data-staging] ## use_remote_acix = URL - If configured then the ## ARC Cache Index, available at the URL, will be queried for every input file ## specified in a job description and any replicas found in sites with accessible caches ## will be added to the replica list of the input file. ## The replicas will be tried in the order specified by preferredpattern variable. ## default: undefined #use_remote_acix=https://cacheindex.ndgf.org:6443/data/index ## CHANGE: DELETED ## ### end of the [arex/data-staging] block ########### nordugrid-arc-7.1.1/src/doc/PaxHeaders/arc.conf.reference0000644000000000000000000000013215067751327020260 xustar0030 mtime=1759498967.683490885 30 atime=1759498967.826493058 30 ctime=1759499024.818608797 nordugrid-arc-7.1.1/src/doc/arc.conf.reference0000644000175000002070000033343115067751327022171 0ustar00mockbuildmock00000000000000#################################################################### ## ## This is the arc.conf REFERENCE DOCUMENT defining the configuration blocks and ## configuration options for the ARC services. ## ##! WARNING: this file will not work as a configuration template! ##! NEVER USE THIS DOCUMENT AS A CONFIGURATION FILE! ##! ##! WARNING: this file is machine-read to extract defaults and render the web version! ##! PLEASE BE CAREFUL EDITING IT AND FOLLOW THE MARKUP! ##! ##! Reference markup used by web-rendering: ##! - Text delimited by double-quotes("text") or square breaets([text]) is rendered monospace. ##! - Lines starded with "##!" are ignored in the web-rendering. ##! - There should be an empty line before new option definition. ##! - Lines started with "## " are rendered as an option desciption text. ##! - Lines started with "#text" are rendered as an example blocks. ##! - Line ending with colon followed by space (": ") starts monospace block. ##! Content on the block should be indented by spaces. The block ends when indent ends. ##! - Line ending with colon (":") start the list. Following lines should be indented and prefixed ##! either with dash ("-") or numbers. Another option is to use definition lists with ##! further indented description text (see for example "voms_processing") ##! - Line started with "NOTE that" rendered as Note block. If you need multiline indent ##! subsequent lines with 2 spaces. ##! - Lines started with "CHANGE" or "TODO" are rendered as corresponding Warning blocks. ##! - Special keywords "## multivalued", "## sequenced' and "## allowedvalues:" are parsed. ## ## The arc.conf configuration file consists of the following blocks: ## ## [common] ## [authgroup:groupname] ## [mapping] ## [lrms] ## [arex] ## [arex/cache] ## [arex/cache/cleaner] ## [arex/data-staging] ## [arex/ws] ## [arex/ws/jobs] ## [arex/ws/cache] ## [arex/ws/candypond] ## [arex/jura] ## [arex/jura/sgas:targetname] ## [arex/jura/apel:targetname] ## [arex/ganglia] ## [infosys] ## [infosys/ldap] ## [infosys/nordugrid] ## [infosys/glue2] ## [infosys/glue2/ldap] ## [infosys/cluster] ## [infosys/accesscontrol] ## [queue:name] ## [datadelivery-service] ## [custom:blockname] ## ## If "arc.conf.d" directory exists next to "arc.conf" file, all files in this directory ## ending with ".conf" will be read in alphabetical order and their content merged with ## the arc.conf file. ## ## A block configures an ARC service, a service interface, a utility or a subsystem. ## Enabling (turning on) a functionality, a service or an interface requires the presence of the ## appropriate configuration block. To disable a service or an interface, simply delete or ## comment out the related arc.conf block (you may need to rerun the corresponding startup script). ## ## The [common] block is mandatory even if not a single option is specified within. The presence of ## the block turns on the default values for the configuration options within the block. ## ## As an example, in order to set up a minimalistic ARC CE offering no external interfaces ## you need to configure at least the [common], [mapping], [arex], [lrms], ## [infosys] and [queue:name] blocks. ## ## As another example, an ARC-based data offloader would require the [common] and the ## [datadelivery-service] blocks. ## ## A block is identified by its block header. A block header may consist of ## keywords and optionally block identifiers. Keywords may be separated by "/" ## and used to label subblocks (e.g. [arex/jura]), while block identifiers ## are separated by ":" from keywords. For example, in the [queue:short] ## block header "queue" is a keyword while "short" is an identifier, e.g. the name of the queue. ## Block headers must be UNIQUE. ## ## A block starts with a unique [keyword:identifier] blockheader and ends where the next block ## starts, that is at the next [blockheader] directive. ## ## A block may have sub-blocks e.g. the various interfaces of the AREX service are configured via ## sub-blocks (e.g. [arex/ws]). When a sub-block is enabled then the corresponding parent block must ## also appear in the arc.conf file. ## ## Configuration blocks contain (config option, config value) pairs following ## the syntax in single line: ## config_option=value element [optional value element] ## NOTE that quotes around the configuration value(s) must NOT be used. ## NOTE that the arc.conf is CASE-SENSITIVE! ## ## Space handling syntax in arc.conf ## for configuration lines: ## (stripped space)option(stripped space)=(stripped space)value(saved space)(value)(stripped space) ## ## and for block headers: ## [keyword:(stripped space)space is NOT allowed within identifier(stripped space)] ## ## Detailed textual definition: ## a) All trailing and leading spaces on each confiuration line are stripped and ignored. ## This aplies both to block headers and block content. ## b) All spaces around the "=" sign in "option=value" kind of string (after 'a' is applied) ## are stripped and ignored. For example line "hostname = myhost.info" is treated as ## identical to "hostname=myhost.info". ## c) In block headers of [keyword] kind (after 'a' is applied) no additional spaces are allowed ## around "keyword" and inside "keyword". ## d) In block headers of [keyword:identifier] kind (after 'a' is applied) no additional spaces ## are allowed around "keyword" and inside both "keyword" and "identifier". ## The spaces ARE allowed around "identifier" part and stripped and ignored. ## ## Mandatory configuration options are indicated by an asterix prefix to the ## option name e.g: "*mandatory_configoption". Mandatory options with undefined values ## will result in service stop during the startup process. ## ## Each of the configuration options have well-defined default that is specified in this reference ## file. The default can take either a pre-set value, a special substitution or the keyword ## "undefined". Configuration options within an enabled block take their default values in case ## they are missing (or commented out). Configuration parameters with "undefined" defaults takes ## no values. Furthermore, configuration options within disabled blocks takes no values either. ## ## Configuration blocks are ORDER-DEPENDENT. The order dependency is also honoured within options ## inside a certain block. ## This means for instance that configuration blocks related to authorization MUST appear before used in ## the blocks such as [mapping], [arex/ws/jobs] or [gridftp/jobs]. Order dependency within a block is ## for instance important when it comes to authorization decisions, as the first matching rule is used. ## ## ARC configuration parser makes sure that blocks are AUTOMATICALLY RE-ORDERED in accordance to the ## defaults-defined order of block keywords. Several blocks with the same keyword but different ## identifiers (named blocks) will be sorted in according to read-out order. ## ## When the same block is defined in the several files in "arc.conf.d" directory, configuration options ## inside this block are appended in the read-out order. Complex logic (like overrides) is not supported. ## It is advised to use "arcctl config dump" to verify the desired running configuration after merge. ## ## Below we give a detailed description of all the configuration options of the ## different configuration blocks. Every configuration option is described ## in a dedicated paragraph with the following reference syntax notation. ## This file is parsed at buildtime to assist in configuration default parsing and validation script ## and so it is important that it follows the agreed syntax: For each block or ## option please add explanatory text with two "##" followed by a space at the ## beginning of the line and then an example with a single "#" and no spaces at ## the beginning of the line. ## ## example_config_option = value [optional values] - Here comes the explanation ## of the config option. Mandatory configuration options are indicated by an asterix prefix to the ## option name e.g: "*mandatory_configoption" vs. "optional_configoption". ## The explanation can be followed by the special keywords in a separate line: ## - "multivalued" - used to indicate that config option can be specified multiple times. ## This forms a set of values for the same configuration option irrespective of lines order. ## - "sequenced" - used to indicate that config option is a part of the sequence and its ## effect on configuration depends on the lines order. Sequenced option can be specified ## several times in the configuration sequence independently. ## Missing such keywords means the config option can only occur once in the arc.conf. ## By default the arc.conf config options are optional and single-valued. ## For some config options only a fix set of values are allowed. These are ## listed in a separate line after the "allowedvalues" keyword. ## The default of every config option is explicitly given in the "default:" line. ## Default can be a pre-set value, a substitution or the "undefined" keyword. ## The last line of the paragraph is always a valid example preceded by a single "#" ## multivalued ## allowedvalues: 12 34 56 ## default: 34 #example_config_option=56 ########################################################################### ### The [common] block ############################################## ## Common configuration affecting all ARC components, usually related to networking or security ## or service behaviour. This block is mandatory. ## The common block options may be overridden by the specific sections of the components later. ## The [common] always appears at the beginning of the config file. The config options set within ## this block are available for all the other blocks thus shared by the different components of ARC. #[common] ## hostname = string - The FQDN of the frontend on which the ARC services are deployed. ## default: $EXEC{hostname -f} #hostname=myhost.org ## http_proxy = url - The http proxy server. ## This setting affects all client HTTP(s) requests that initiated by ARC core services, ## including data staging, SAML communications, and pushing SGAS accounting records. ## This variable is similar to setting the ARC_HTTP_PROXY environmental variable. ## default: undefined #http_proxy=proxy.mydomain.org:3128 ### X509 related parameters ## x509_host_key = path - Server credential location. ## Sets the full path to the host private key. ## These variables are similar to the GSI enviroment variable "X509_USER_KEY" ## If indicated, the variable can be set individually for each service/component in the ## corresponding block. ## default: /etc/grid-security/hostkey.pem #x509_host_key=/etc/grid-security/hostkey.pem ## x509_host_cert = path - Server credential location. Sets the full ## path to the host public certificate. ## These variables are similar to the GSI environment variable "X509_USER_CERT" ## If indicated, the variable can be set individually for each service/component in the ## corresponding block. ## default: /etc/grid-security/hostcert.pem #x509_host_cert=/etc/grid-security/hostcert.pem ## x509_cert_policy = keyword - layout of CA certificates. ## The following keywords are supported: grid, system, any. ## This variable defines if the server is going to use Globus (igtf) layout of ## CA certificates, just let OpenSSL handle that (system), or allow both options (any). ## default: grid #x509_cert_policy=grid ## x509_cert_dir = path - Location of trusted CA certificates. ## This variable is similar to the GSI enviroment variable "X509_CERT_DIR" ## If indicated, the variable can be set individually for each service/component in the ## corresponding block. If x509_cert_policy is set to 'system' this variable is ignored. ## default: /etc/grid-security/certificates #x509_cert_dir=/etc/grid-security/certificates ### VOMS related parameters ## x509_voms_dir = path - the path to the directory containing *.lsc files ## needed for verification of VOMS service signature in the proxy-certificate. ## default: /etc/grid-security/vomsdir #x509_voms_dir=/etc/grid-security/vomsdir ## voms_processing = keyword - Defines how to behave if errors in VOMS AC processing detected. ## The following keywords are supported: ## "relaxed" ## use everything that passed validation. ## "standard" ## same as relaxed but fail if parsing errors took place and ## VOMS extension is marked as critical. This is a default. ## "strict" ## fail if any parsing error was discovered ## "noerrors" ## fail if any parsing or validation error happened. ## allowedvalues: relaxed standard strict noerrors ## default: standard #voms_processing=strict ## ## ### end of the [common] block ############################################## ### The [authgroup:groupname] blocks ########################## ## These configuration blocks contain authorization rules. ## An [authrgroup:groupname] block always defines a group of users where members of the group are ## those who satisfy the authorization rules. ## The rules within the block determine which user belong to the authgroup. ## Then, access control and identity mapping of ARC services are implemented ## via associating a authgroup with an interface, queue or a mapping rule ## using one of the "allowaccess", "denyaccess" or [mapping] block parameters. ## The authgroup should not be mistaken for a virtual organisation (VO). ## An authgroup may match a single VO if only a single check (rule) on VO membership is perfomed. ## ## IMPORTANT: Rules in an authgroup are processed in their order of appearance. ## The first matching rule decides the membership of the user to the authgroup ## being evaluated and the processing STOPS within that authgroup. This does not mean that ## the same user is not processed for the next authgroup: all [authgroup:groupname] blocks are ## evaluated, even if a user already has a match with one of the earlier groups. ## ## All the objects used in the rules MUST be defined before it may be used. For example, ## to create group of authgroups you must first defined the child groups. ## ## There are positively and negatively matching rules. ## If a rule is matched positively then the user tested is accepted ## into the respective group and further processing is stopped. Upon a ## negative match the user would be rejected for that group - processing ## stops too. The sign of rule is determined by prepending the rule with ## "+" (for positive) or "-" (for negative) signs. "+" is default and can ## be omitted. A rule may also be prepended with "!" to invert result of rule, ## which will let the rule match the complement of users. That complement ## operator ("!") may be combined with the operator for positive or negative ## matching. #[authgroup:prodtesters] ## subject = certificate_subject - Rule to match specific subject of user's ## X.509 certificate. No masks, patterns and regular expressions are allowed. ## sequenced ## default: undefined #subject=/O=Grid/O=Big VO/CN=Main Boss #subject=/O=Grid/O=Big VO/CN=Deputy Boss ## file = path - Processes a list of DNs stored in an external file one per line ## in grid-mapfile format (see map_with_file from [mapping] block, unixname is ignored) ## and adds those to the authgroup. ## sequenced ## default: undefined #file=/etc/grid-security/local_users #file=/etc/grid-security/atlas_users ## voms = vo group role capabilities - Match VOMS attribute in user's credential. ## Use "*" to match any value. ## sequenced ## default: undefined #voms=nordugrid Guests * * #voms=atlas students prodman * ## authgroup = group_name [group_name ...] - Match user already belonging to one ## of specified authgroups. The authgroup referred here must be defined earlier in ## arc.conf configuration file. Multiple authgroup names may be specified for this rule. ## That allows creating hierarchical structure of authorization groups like ## "all-atlas" are those which are "atlas-users" and "atlas-admins". ## sequenced ## default: undefined #authgroup=local_admins #authgroup=local_admins remote_users ## plugin = timeout path [arg1 [arg2 [arg3...]]] - Run external executable or ## function from shared library. Rule is matched if plugin returns 0. ## Any other return code or timeout are treated as rule not matched. ## In arguments following substitutions are supported: ## - "%D" - subject of certicate ## - "%P" - path to proxy ## ## The environment variables passed to plugin contain basic information about user ## authentication. Following variables are set if corresponding information is available: ## - "X509_SUBJECT_NAME" - common name of user's certificate. ## - "BEARER_TOKEN_#_SUBJECT" - user's subject (identifier) extracted from JWT token (here # is tokens index, typically 0) ## - "BEARER_TOKEN_#_ISSUER" - issuer of the token extracted from JWT token ## - "BEARER_TOKEN_#_AUDIENCE_#" - designated audiences extracted from JWT token (here second # is audience's index starting from 0) ## - "BEARER_TOKEN_#_SCOPE_#" - assigned scope extracted from JWT token (here second # is scope's index starting from 0) ## - "BEARER_TOKEN_#_GROUP_#" - assigned WLCG group extracted from JWT token ## - "BEARER_TOKEN_#_CLAIM__#" - raw claim values of the token of claim "" ## ## ARC ships with LCAS plugin that can be enabled with following plugin configuration. ## For more information about configuring LCAS refer to 'Using LCAS/LCMAPS' document. ## sequenced ## default: undefined #plugin=10 /usr/libexec/arc/arc-lcas %D %P liblcas.so /usr/lib64 /etc/lcas/lcas.db ## CHANGE: NEW environment variables in 7.0.0. ## authtokens = subject issuer audience scope group - Match OIDC token claims. ## Use "*" to match any value. ## sequenced ## default: undefined #authtokens=e83eec5a-e2e3-43c6-bb67-df8f5ec3e8d0 https://wlcg.cloud.cnaf.infn.it/ * * * ## authtokensgen = logical expression - Match OIDC token claims. ## Expression to match. Following operators are available: ## - = - match token claim value (left part represents claim name) to specified string (right part), produces boolean result ## - ~ - match token claim value (left part represents claim name) to regex expression (right part), produces boolean result ## - ! - boolean negation ## - & - boolean AND ## - | - boolean OR ## - ^ - boolean XOR ## - () - brackets are used to control priority of evaluation, without brackets all operators have same priority ## - "" - strings can be enclosed in quotes to allow special symbols in strings, to in include " in such string use \" ## All empty spaces are optional. This functionality is experimental. ## sequenced ## default: undefined #authtokensgen = (sub=e83eec5a-e2e3-43c6-bb67-df8f5ec3e8d0) & (iss="https://wlcg.cloud.cnaf.infn.it/") & !(email~".*\.evil\.com") ## CHANGE: NEW in 7.0.0 ## all = yes|no - Matches any or none user identity. For "yes" argument this rule ## always returns positive match. For "no" it is always no match. ## sequenced ## default: undefined #all=yes ## ## ### end of the [authgroup:name] blocks ############################### ### The [mapping] block ############################################## ## This block defines the grid-identity to local UNIX identity mapping rules ## used by various ARC components. ## ## Rules in the [mapping] block are processed IN A SEQUENCE in line order of the ## configuration file (from top to bottom). ## ## There are two kind of rules: ## - mapping rules that defines how the particular "authgroup" members are mapped ## - policy rules that modifies the mapping rules sequence processing ## ## Default policy for mapping rules processing is: ## - processing CONTINUES to the next rule if identity of user DO NOT match "authgroup" ## specified in the rule (can be redefined with "policy_on_nogroup" option) ## - processing STOPS if identity of user match "authgroup" specified in the mapping rule. ## Depend on whether this mapping rule returns valid UNIX identity the processing can be ## redefined with "policy_on_map" and "policy_on_nomap" options. ## ## Policy can be redefined at the any point of configuration sequence and affects ## all mapping rules defined after the polcy rule. ## ## NOTE that if mapping process STOPS and there is still no local UNIX identity ## identified, the user running A-REX will be used. ## NOTE that when grid-identity is mapped to "root" account - request processing fails implicitely. ## ## #[mapping] ## map_to_user = authgroup_name unixname[:unixgroup] - the users that belongs to ## specified authgroup are mapped to "unixname" local UNIX account that may be ## optionally followed by a "unixgroup" UNIX group. ## In case of non-existing "unixname" account the mapping rule treated as a rule that ## did not returned mapped identity (nomap). ## sequenced ## default: undefined #map_to_user=authgroupA nobody:nobody ## map_to_pool = authgroup_name directory - the user that belong to specified ## authgroup is assigned one of the local UNIX accounts in the pool. Account names that ## are part of this pool are stored line-by-line in the "pool" file inside the "directory". ## The "directory" also contains information about used accont names stored in another files. ## If there are no more available accounts in the defined pool for mapping then ## accounts not used for a configurable time period may be reassigned. ## The pool behaviour, including account reuse, is configureable with the opional ## "directory/config" file that has INI syntax (line-by-line "key=value"). ## Possible keys of the "config" file are: ## "timeout" ## Define the timeout in days (default is "10") after which the UNIX ## account can be reassigned to another user if not used. The "0" value ## means no lease expiration. ## sequenced ## default: undefined #map_to_pool=atlas /etc/grid-security/pool/atlas ## map_with_file = authgroup_name file - for users that belongs to specified ## authgroup the DN of certificate is matched against a list of DNs stored in ## the specified "file", one per line followed by a local UNIX account name. ## The DN must be quoted if it contains blank spaces. ## This rule can be used to implement legacy grid-mapfile aproach. ## sequenced ## default: undefined #map_with_file=authgroupB /etc/grid-security/grid-mapfile ## map_with_plugin = authgroup_name timeout plugin [arg1 [arg2 [...]]] - run ## external "plugin" executable with specified arguments to find the UNIX account ## name to which users that belogns to specified authgroup will be mapped to. ## A rule matches if the exit code is "0" and there is a UNIX account name ## printed on stdout (optionally followed by a UNIX group name separated by colon). ## The exit code 1 designates failed mapping. Any other code or timeout means fatal ## failure and will abort any further mapping processing. That will also cause ## rejection of corresponding connection. ## Plugin execution time is limited to "timeout" seconds. ## The environment variables passed to plugin contain basic information about user ## authentication. For description of those variables see 'plugin' command from ## [authgroup] section. ## ## In the arguments the following substitutions are applied before the plugin is started: ## - "%D" - subject of user's certificate, ## - "%P" - path to credentials' proxy file. ## ## The environment variables passed to the plugin contain basic information about user ## authentication. Following variables are set if corresponding information is available: ## - "X509_SUBJECT_NAME" - common name of user's certificate. ## - "BEARER_TOKEN_#_SUBJECT" - user's subject (identifier) extracted from JWT token (here # is tokens index, typically 0) ## - "BEARER_TOKEN_#_ISSUER" - issuer of the token extracted from JWT token ## - "BEARER_TOKEN_#_AUDIENCE_#" - designated audiences extracted from JWT token (here second # is audience's index starting from 0) ## - "BEARER_TOKEN_#_SCOPE_#" - assigned scope extracted from JWT token (here second # is scope's index starting from 0) ## - "BEARER_TOKEN_#_GROUP_#" - assigned WLCG group extracted from JWT token ## - "BEARER_TOKEN_#_CLAIM__#" - raw claim values of the token of claim "" ## ## ARC ships with LCMAPS plugin that can be enabled with the corresponfing ## configuration. For more information about configuring LCMAPS refer to ## 'Using LCAS/LCMAPS' document. ## sequenced ## default: undefined #map_with_plugin=authgroupC 30 /usr/libexec/arc/arc-lcmaps %D %P liblcmaps.so /usr/lib64 /etc/lcmaps/lcmaps.db arc ## policy_on_nomap = continue/stop - redefines mapping rules sequence processing policy ## in case identity of user match "authgroup" specified in the mapping rule and mapping ## rule DO NOT return valid UNIX identity. Default policy is "stop" processing the furhter ## rules. ## For example this policy will be triggered if pool is depleted, certificate subject is ## missing in the map file used for defined authgroup or plugin execution failed. ## sequenced ## default: undefined ## allowedvalues: continue stop #policy_on_nomap=continue ## policy_on_map = continue/stop - redefines mapping rules sequence processing policy ## in case identity of user match "authgroup" specified in the mapping rule and mapping ## rule return valid UNIX identity. Default policy is "stop" processing the furhter ## rules. ## This policy will be triggered if rule successfully returns the result (allocated in pool, ## matched in map file, plugin call was successful). ## sequenced ## default: undefined ## allowedvalues: continue stop #policy_on_map=stop ## policy_on_nogroup = continue/stop - redefines mapping rules sequence processing policy ## in case identity of user DO NOT match "authgroup" specified in the mapping rule. ## Default policy is "continue" processing the furhter rules. ## sequenced ## default: undefined ## allowedvalues: continue stop #policy_on_nogroup=stop ## ## ### end of the [mapping] block ############################################## ### The [lrms] block ############################################## ## This block specifies the characteristics of the Local Resource Manager System (batch system) ## underneath the ARC CE. This block contains all the lrms-specific parameters and information. ## Configuration values in this block are available for A-REX, the backends, accounting and infosys ## ARC subsystems. ## ## ARC primarily supports SLURM, Condor and its own fork LRMS flavours, as listed below. ## Please note that the various PBS flavours have best effort community support only. #[lrms] ## *lrms = lrmstype [defaultqueue] - Sets the type of the LRMS (queue system) and optionally the ## default queue name. ## ONLY ONE LRMS IS ALLOWED. MULTIPLE LRMS ENTRIES WILL TRIGGER UNEXPECTED BEHAVIOUR. ## ## For lrmstype, the following values can be chosen: ## - fork - simple forking of jobs to the same node as the server ## - condor - Condor ## - pbs - PBS (covers Torque and other old PBS flavours e.g. OpenPBS, older PBSPro, etc) ## - pbspro - Altair PBS Professional ## - slurm - SLURM ## ## The optional "defaultqueue" parameter specifies the name of an existing LRMS queue ## in the cluster that will be used by AREX as the default queue to submit grid jobs in case ## the client does not specify a queue name during the job submission procees. ## This queue name must match one of the [queue:queue_name] blocks. ## ## allowedvalues: fork sge condor pbs pbspro lsf ll slurm boinc ## default: undefined ## mandatory #lrms=pbspro gridlong #lrms=slurm ## lrmsconfig = text - An optional free text field to describe the configuration of your ## Local Resource Management System (batch system). The value is published in the infosys, ## and is not used otherwise. ## default: undefined #lrmsconfig=single job per processor ## benchmark = string - Defines the default benchmark specification to store in ## the accounting AAR records if per-job data is missing. ## It is advised to set it to cluster-wide defaults in case of reporting to APEL ## to aviod records diversity for failed jobs or buggy backends. ## default: HEPSPEC 1.0 #benchmark=HEPSPEC 12.26 ## CHANGE: MODIFIED in 7.0.0 ## defaultmemory = number - The LRMS memory request of job to be set by the LRMS backend ## scripts, if a user submits a job without specifying how much memory should be used. ## The order of precedence is: job description -> defaultmemory. ## This is the amount of memory (specified in MB) that a job will request. ## default: undefined #defaultmemory=512 ## nodename = path - Redefine the command to obtain hostname of LRMS worker node. ## By default the value is defined on buildtime and depend on the OS. ## In most cases "/bin/hostname -f" will be used. ## NOTE that this way of getting WN hostname will be used only in case of ## particular LRMS backend had no native LRMS-defined way. ## default: undefined #nodename=/bin/hostname -s ## gnu_time = path - Path to the GNU time command on the LRMS worker nodes. ## If time command exists on the node, jobscript will write additional diagnostic information. ## default: /usr/bin/time #gnu_time=/usr/bin/time ## movetool = comand - Redefine the command used to move files during jobscript ## execution on LRMS worker node (the command should be available on WNs). ## This in particular applies to files movement from sessiondir to scratchdir ## in the shared sessiondir case. ## default: mv #movetool=rsync -av ### PBS options: set these only in case of lrms=pbs ## pbs_bin_path = path - The path to the qstat,pbsnodes,qmgr etc PBS binaries, ## no need to set if PBS is not used ## default: /usr/bin #pbs_bin_path=/usr/bin ## pbs_log_path = path - The path of the PBS server logfiles which are used by A-REX to determine ## whether a PBS job is completed. If not specified, A-REX will use qstat for that. ## default: /var/spool/pbs/server_logs #pbs_log_path=/var/spool/pbs/server_logs ## pbs_dedicated_node_string = string - The string which ## is used in the PBS node config to distinguish the grid nodes from the rest. ## Suppose only a subset of nodes are available for grid jobs, ## and these nodes have a common "node property" string, ## this case the string should be set to this value and only the ## nodes with the corresponding "pbs node property" are counted as grid enabled ## nodes. Setting the dedicated_node_string to the value of the "pbs node ## property" of the grid-enabled nodes will influence how the totalcpus, user ## freecpus is calculated. You don't need to set this attribute if your cluster ## is fully available for the grid and your cluster's PBS config does not use ## the "node property" method to assign certain nodes to grid queues. You ## shouldn't use this config option unless you make sure your PBS config makes ## use of the above described setup. ## default: undefined #pbs_dedicated_node_string=gridnode ### Condor options: set these only in case of lrms=condor ## condor_bin_path = path - Path to Condor binaries. Must be set if Condor ## is used. ## default: /usr/bin #condor_bin_path=/opt/condor/bin ## condor_config = path - Full path to Condor config file. Must be set if Condor ## is used and the config file is not in its default location ## (/etc/condor/condor_config or ~/condor/condor_config). ## The full path to the file should be given. ## default: /etc/condor/condor_config #condor_config=/opt/condor/etc/condor_config ## condor_rank = ClassAd_float_expression - If you are not happy with the way Condor picks nodes ## when running jobs, you can define your own ranking algorithm by optionally ## setting the condor_rank attribute. condor_rank should be set to a ## ClassAd float expression that you could use in the Rank attribute ## in a Condor job description. ## default: undefined #condor_rank=(1-LoadAvg/2)*(1-LoadAvg/2)*Memory/1000*KFlops/1000000 ## condor_requirements = string - Specify additional constraints for Condor resources. ## The value of "condor_requirements" must be a valid constraints string ## which is recognized by a "condor_status -constraint ..." command. It can ## reference pre-defined ClassAd attributes (like Memory, Opsys, Arch, HasJava, ## etc) but also custom ClassAd attributes. To define a custom attribute on a ## condor node, just add two lines like the ones below in the "$(hostname).local" ## config file on the node: ## NORDUGRID_RESOURCE=TRUE ## STARTD_EXPRS = NORDUGRID_RESOURCE, $(STARTD_EXPRS) ## A job submitted to this resource is allowed to run on any node which satisfies ## the "condor_requirements" constraint. If "condor_requirements" is not set, ## jobs will be allowed to run on any of the nodes in the pool. When configuring ## multiple queues, you can differentiate them based on memory size or disk ## space, for example. ## default: undefined #condor_requirements=(OpSys == "linux" && NORDUGRID_RESOURCE && Memory >= 1000 && Memory < 2000) ### SLURM options: set these only in case of lrms=slurm ## slurm_bin_path = path - Path to SLURM binaries, must be set if installed ## outside of normal PATH ## default: /usr/bin #slurm_bin_path=/usr/bin ## slurm_wakeupperiod = numsec - How long should infosys wait before querying SLURM ## for new data (seconds) ## default: 30 #slurm_wakeupperiod=15 ## slurm_use_sacct = yes/no - Indicates whether ARC should use sacct instead of scontrol ## to obtain information about finished jobs ## allowedvalues: yes no ## default: yes #slurm_use_sacct=yes ## slurm_requirements = string - Use this option to specify extra SLURM-specific parameters. ## default: undefined #slurm_requirements=mincpus=8 ## slurm_query_retries = number - Number of sacct/scontrol retries performed in scan-SLURM-job ## If slurm is overloaded the sacct/scontrol command call may fail. ## If retries > 1 sacct/scontrol is retried after some seconds for that(those) particular job(s). ## If all retry attempts fail, the next scan-SLURM-job institiation will pick up the job(s) from last time. ## default: 1 #slurm_query_retries=3 ## ## ### end of [lrms] block ##################################################### ### The [arex] block ##################### ## The [arex] block, together with its various subblocks, ## configures the A-REX service hosted in "arched". A-REX takes care of ## various middleware tasks on the frontend such as job creation and management, ## stagein/stageout, LRMS job submission, data caching, etc... #[arex] ## user = user[:group] - Switch to a non root user/group after startup. ## Use with caution because of limited functionality when arex is not run under root. ## default: root #user=grid:grid ## norootpower = yes|no - If set to yes, all job management processes ## will switch to mapped user's identity while accessing session directory. ## This is useful if session directory is on NFS with root squashing turned on. ## allowedvalues: yes no ## default: no #norootpower=yes ## delegationdb = db_name - specify which DB to use to store delegations. ## Currently supported db_names are bdb and sqlite ## default: sqlite #delegationdb=sqlite ## watchdog = yes/no - Specifies if additional watchdog processes is spawned to restart ## main process if it is stuck or dies. ## allowedvalues: yes no ## default: no #watchdog=no ## loglevel = level - Set loglevel of the arched daemon hosting A-REX service ## between 0 (FATAL) and 5 (DEBUG). Defaults to 3 (INFO). ## allowedvalues: 0 1 2 3 4 5 FATAL ERROR WARNING INFO VERBOSE DEBUG ## default: 3 #loglevel=3 ## logfile = path - Specify A-REX log file location. If using an external log ## rotation tool be careful to make sure it matches the path specified here. ## default: /var/log/arc/arex.log #logfile=/var/log/arc/arex.log ## joblog = path - Specifies where to store specialized log about started ## and finished jobs. If path is empty log is NOT written. ## Controlled by logrotate if deafult name is kept. ## This log is not used by any other part of ARC so can be safely disabled if you are ## not interested in storing jobs log. ## default: /var/log/arc/arex-jobs.log #joblog= ## fixdirectories = yes/missing/no - Specifies during startup A-REX should ## create all directories needed for it operation and set suitable default ## permissions. If "no" is specified then A-REX does nothing to prepare its ## operational environment. In case of "missing" A-REX only creates and ## sets permissions for directories which are not present yet. For "yes" ## all directories are created and permissions for all used directories are ## set to default safe values. ## allowedvalues: yes missing no ## default: yes #fixdirectories=yes ## controldir = path - The directory of the A-REX's internal job metadata files. ## For a heavy loaded computing elements you can consider to locate controldir on ## a dedicated partition optimized for small random reads and writes and for ## storing many small files. The directory is not needed on the nodes. ## default: /var/spool/arc/jobstatus #controldir=/var/spool/arc/jobstatus ## sessiondir = path [drain] - the directory which holds the sessiondirs of the grid jobs. ## Multiple session directories may be specified. ## In this case jobs are spread evenly over the session directories. ## If "sessiondir=*" is set, the session directory will be spread over the ## "${HOME}/.jobs" directories of every locally mapped unix user. It is preferred ## to use common session directories. The path may be followed by "drain", in ## which case no new jobs will be assigned to that sessiondir, but current jobs ## will still be processed and accessible. ## multivalued ## default: /var/spool/arc/sessiondir #sessiondir=/scratch/arcsessions drain #sessiondir=* ## defaultttl = [ttl [ttr]] - The ttl parameter sets the time in seconds for how long a job session ## directory will survive after job execution has finished. If not specified ## the default is 1 week. The ttr parameter sets how long information about a job will be kept ## after the session directory is deleted. If not specified, the ttr default is one month. ## default: 604800 2592000 #defaultttl=2592000 ## shared_filesystem = yes/no - Specifies if computing nodes can access folders mounted ## with protocols like NFS with the same pathnames as the frontend. ## NOTE that the default 'yes' assumes that the paths to the session directories ## are the same on both frontend and nodes. ## If these paths are not the same, then one should set the "scratchdir" option. ## The option changes the "RUNTIME_NODE_SEES_FRONTEND" variable in the submission scripts. ## allowedvalues: yes no ## default: yes #shared_filesystem=yes ## scratchdir = path - The path on computing node to move session directory to before ## execution. If defined should contain the path to the directory on the ## computing node which can be used to store a jobs' files during execution. ## Sets the environment variable RUNTIME_LOCAL_SCRATCH_DIR. If the variable is not set, ## then the session dir is not moved before execution. Don't set this parameter unless ## you want to move the sessiondir to scratchdir on the node. ## default: undefined #scratchdir=/local/scratch/ ## shared_scratch = path - The path on frontend where scratchdir can be found. If ## defined should contain the path corresponding to that set in scratchdir as ## seen on the frontend machine. Sets the environment variable ## RUNTIME_FRONTEND_SEES_NODE. ## default: undefined #shared_scratch=/mnt/scratch ## tmpdir = path - A temporary directory used by A-REX. ## default: /tmp #tmpdir=/tmp ## runtimedir = path - The directory which holds the additional runtimeenvironment scripts, ## added by system administrator. Several directories can be specified. ## To enable RTEs to be advertised in the information system and used during submission ## the arcctl tool should be used. ## multivalued ## default: undefined #runtimedir=/var/spool/arc/extraruntimes #runtimedir=/cvmfs/vo/arcruntime ## maxjobs = number1 number2 number3 number4 number5 - specifies maximum allowed number of jobs: ## - number1 - jobs which are not in FINISHED state (jobs tracked in RAM) ## - number2 - jobs being run (SUBMITTING, INLRMS states) ## - number3 - jobs being processed per DN ## - number4 - jobs in whole system ## - number5 - LRMS scripts limit (jobs in SUBMITTING and CANCELING) ## A parameter set to -1 means no limit. ## default: -1 -1 -1 -1 -1 #maxjobs=10000 10 2000 -1 -1 ## maxrerun = number - Specifies how many times job can be rerun if it failed in LRMS. ## This is only an upper limit, the actual rerun value is set by the user in his xrsl. ## default: 5 #maxrerun=5 ## statecallout = state options plugin_path [plugin_arguments] - Enables a callout feature ## of A-REX: every time job goes to "state" A-REX will run "plugin_path" executable. ## The following states are allowed: ## ACCEPTED, PREPARING, SUBMIT, INLRMS, FINISHING, FINISHED and DELETED. ## Options consist of "key=value" pairs separated by comma. Possible keys are: ## "timeout" ## defines the timeout in seconds to wait for plugin execution ("timeout=" can be omitted). ## "onsuccess", "onfailure", "ontimeout" ## defines the action that A-REX should take on successful execution (exit code 0), ## failed execution (exit code is not 0) or execution timeout respectively. ## Possible actions are: ## - "pass" - continue executing job, ## - "fail" - cancel job, ## - "log" - write to log about the failure and continue executing job. ## It is possible to use following sugstitutions to construct plugin command line: ## - "%R" - session root (value of sessiondir in [arex] block) ## - "%C" - controldir path ## - "%U" - username of mapped UNIX account ## - "%u" - numeric UID of mapped UNIX account ## - "%g" - numeric GID of mapped UNIX account ## - "%H" - home directory of mapped UNIX account as specified in "/etc/passwd" ## - "%Q" - default queue (see "lrms" configuration option in [lrms] block) ## - "%L" - LRMS name (see "lrms" configuration option in [lrms] block) ## - "%W" - ARC installation path (corresponds to the "ARC_LOCATION" environmental variable) ## - "%F" - path to configuration file for this instance ## - "%I" - job ID (substituted in runtime) ## - "%S" - job state (substituted in runtime) ## Plugins included into ARC distribution: ## - "arc-blahp-logger" - write accounting log for every finished job in BLAH format ## multivalued ## default: undefined #statecallout=FINISHED timeout=10,onfailure=pass /usr/libexec/arc/arc-blahp-logger -I %I -U %u -L %C/job.%I.local -P %C/job.%I.proxy ## wakeupperiod = time - Specifies how often A-REX checks for new jobs ## arrived, job state change requests, etc. That is responsiveness of ## A-REX. "time" is time period in seconds. Default is 3 minutes. ## Usually no need to change this parameter because important state changes ## are also triggering out-of-schedule checks. ## NOTE that this parameter does not affect responsiveness of backend scripts - ## especially "scan--job". That means that upper estimation of time for ## detecting job finished executing is sum of responsiveness of backend ## script + wakeupperiod. ## default: 180 #wakeupperiod=180 ## infoproviders_timelimit = seconds - Sets the ## execution time limit of the infoprovider scripts started by the A-REX. ## Infoprovider scripts running longer than the specified timelimit are ## gracefully handled by the A-REX (the behaviour depends on ## the state of the system) ## Increase this value if you have many jobs in the controldir and ## infoproviders need more time to process. ## default: 10800 #infoproviders_timelimit=10800 ## pidfile = path - Specify location of file containing PID of daemon process. ## default: /run/arched-arex.pid #pidfile=/run/arched-arex.pid ## mail = email_address - Specifies the email address from where the notification mails are sent ## default: $VAR{user}@$VAR{[common]hostname} #mail=grid.support@somewhere.org ## helper = user executable arguments - By enabling this parameter A-REX will ## run an external "helper" program under the user useraccount. The program will be ## kept running, every time the executable finishes it will be started again. ## As a limitation, currently only '.' is supported as username, which corresponds ## to the user running A-REX. ## default: undefined #helper=. /usr/local/bin/myutility ## helperlog = path - Configuration option to specify the location of log for helpers. ## default: /var/log/arc/job.helper.errors #helperlog=/var/log/arc/job.helper.errors ## forcedefaultvoms = VOMS_FQAN - specify VOMS FQAN which user will be ## assigned if his/her credentials contain no VOMS attributes. ## To assign different values to different queues put this command ## into [queue] block. ## default: undefined #forcedefaultvoms=/vo/group/subgroup ## usetokenforvoms = yes/no - Whether claims from WLCG complaint tokens ## to be used as VOMS attributes. ## allowedvalues: yes no ## default: no #usetokenforvoms=yes ## CHANGE: NEW in 7.0.0 ## tokenscopes = action=scope[,action=scope[...]] - assigns JWT token scopes required ## to perform specific actions. Multiple tokenscopes entries are allowed. ## Following actions are supported: ## - "info" - information about server ## - "jobinfo" - information about jobs ## - "jobcreate" - create new job or restart existing ## - "jobcancel" - cancel active jobs ## - "jobdelete" - remove jobs from server ## - "datainfo" - information about files in session directory ## - "datawrite" - create new or modify files in session directory ## - "dataread" - read files in session directory ## The action=scope pairs can be replaced with identifier which works as shortcut for ## multiple actions and scopes. Only currently supported shortcut identifier is "wlcg" (see below) ## default: undefined ## Following example assigns scopes according to WLCG profile and alternatively ## can be defined by "tokenscopes=wlcg". #tokenscopes=jobinfo=compute.read,jobcreate=compute.create,jobcancel=compute.cancel,jobdelete=compute.cancel #tokenscopes=datainfo=compute.read,datawrite=compute.modify,dataread=compute.read ## CHANGE: NEW in 7.0.0 ## authtokenmap = claim:attrname[,...] - map your token claim key to a custom attribute name to be used in accounting ## multivalued ## default: name:name,email:email,preferred_username:username,wlcg_groups:group #authtokenmap=sub:user ## CHANGE: NEW in 7.0.0 ## ## ### end of the [arex] block ################################### ### The [arex/cache] block ######################################### ## This subblock enables and configures the cache functionality of A-REX. ## A-REX can cache input files downloaded as part of the stage-in process of grid jobs ## so that subsequent jobs requiring the same file don't have to download it again. ## The cached file will be symlinked (or copied) into the session directory of the job. ## To disable to cache functionality simply comment out the [arex/cache] config block. ## It is a good idea to have the cache on its own separate file system that is shared with the nodes. ## For more information about the cache functionality of A-REX consult the Data Cache ## technical description in the online documentation. #[arex/cache] ## *cachedir = cache_path [link_path] - Specifies a directory to store cached ## data. Multiple cache directories may be specified. Cached data will be distributed ## evenly over the caches. ## Optional "link_path" specifies the path at which the "cache_path" is accessible on ## computing nodes, if it is different from the path on the A-REX host. ## If "link_path" is set to "." files are not soft-linked, but copied to session ## directory. ## If a cache directory needs to be drained, then "link_path" should specify "drain", ## in which case no new files will be added to the cache and files in the cache ## will no longer be used. ## Setting "link_path" to "readonly" ensures that no new files are written to ## this cache, but existing files can still be used. ## Draining and read-only caches are not cleaned by the A-REX cache cleaner. ## A restart of A-REX is required when changing cache options. ## multivalued ## default: undefined #cachedir=/scratch/cache #cachedir=/shared/cache /frontend/jobcache #cachedir=/fs1/cache drain ## ## ### end of the [arex/cache] ############################################# ### The [arex/cache/cleaner] block ######################################### ## This subblock enables the cleaning functionality of the cache. If this block is not enabled ## then the cache will not be cleaned by A-REX. Either cachesize or cachelifetime should also be ## set to enable cleaning. #[arex/cache/cleaner] ## logfile = path - sets the filename where output of the cache-clean ## tool should be logged. Defaults to /var/log/arc/cache-clean.log. ## default: /var/log/arc/cache-cleaner.log #logfile=/tmp/cache-clean.log ## loglevel = level - specifies the level of logging by the cache-clean ## tool, between 0 (FATAL) and 5 (DEBUG). Defaults to 3 (INFO). ## allowedvalues: 0 1 2 3 4 5 FATAL ERROR WARNING INFO VERBOSE DEBUG ## default: 3 #loglevel=4 ## cachesize = max min - Specifies high and low watermarks for space used ## by cache, as a percentage of the space on the file system on which ## the cache directory is located. When the max is exceeded, files will ## be deleted to bring the used space down to the min level. It is a ## good idea to have the cache on its own separate file system. ## default: 100 100 #cachesize=50 20 ## calculatesize = filesystem/cachedir - specifies the way the space ## occupied by the cache will be calculated. If set to cachedir then cache-clean calculates ## the size of the cache instead of using filesystem used space. ## allowedvalues: filesystem cachedir ## default: filesystem #calculatesize=cachedir ## cachelifetime = time - Turns on time-based file cleaning. Files accessed less recently than ## the given time period will be deleted. Example values of this option are 1800, 90s, 24h, 30d. ## When no suffix is given the unit is seconds. ## default: undefined #cachelifetime=30d ## cachespacetool = path [options] - specifies an alternative tool to "df" that ## cache-clean should use to obtain space information on the cache file system. ## The output of this command must be "total_bytes used_bytes". The cache ## directory is passed as the last argument to this command. ## default: undefined #cachespacetool=/etc/getspace.sh ## cachecleantimeout = time - the timeout in seconds for running the cache-clean ## tool. If using a large cache or slow file system this value can be ## increased to allow the cleaning to complete. Defaults to 3600 (1 hour). ## default: 3600 #cachecleantimeout=10000 ## ## ### end of the [arex/cache/cleaner] ############################################# ### The [arex/data-staging] block ########### ## This subblock enables and configures the data staging capabilities of A-REX. ## A subsystem called DTR (Data Transfer Reloaded) is responsible for collecting input data ## for a job before submission to the LRMS, and for staging out data ## after the job has finished. Automagic data staging of A-REX is a very powerful feature, ## disabling this functionality (by commenting out the subblock) is not recommended. #[arex/data-staging] ## loglevel = number - Sets the log level for transfer logging in job.id.errors files, ## between 0 (FATAL) and 5 (DEBUG). Default is to use value set by loglevel option in ## [arex] section. ## allowedvalues: 0 1 2 3 4 5 FATAL ERROR WARNING INFO VERBOSE DEBUG ## default: $VAR{[arex]loglevel} #loglevel=4 ## logfile = path - A central file in which all data staging messages ## from every job will be collected and logged in addition to their job.id.errors files. ## If this option is not present or the path is empty the log file is not created. ## This file is not automatically controlled by logrotate unless you name it as ## /var/log/arc/datastaging.log. ## default: undefined #logfile=/var/log/arc/datastaging.log ## statefile = path - A file in which data staging state information ## (for monitoring and recovery purposes) is periodically dumped. ## default: $VAR{[arex]controldir}/dtr.state #statefile=/tmp/dtr.state ## usehostcert = yes/no - Whether the A-REX host certificate should be used for ## communication with remote hosts instead of the users' proxies. ## allowedvalues: yes no ## default: no #usehostcert=yes ## maxtransfertries = number - the maximum number of times download and upload will ## be attempted per job (retries are only performed if an error is judged to be temporary) ## default: 10 #maxtransfertries=20 ## passivetransfer = yes/no - If yes, gridftp transfers are passive. Setting ## this option to yes can solve transfer problems caused by firewalls. ## allowedvalues: yes no ## default: yes #passivetransfer=yes ## globus_tcp_port_range = port_range - In a firewalled environment ## the software which uses GSI needs to know what ports are available. ## This parameter is only needed if "passivetransfer=no" was set. ## These variable are similar to the Globus enviroment variables ## "GLOBUS_TCP_PORT_RANGE" and "GLOBUS_UDP_PORT_RANGE". ## default: 9000,9300 #globus_tcp_port_range=9000,12000 ## globus_udp_port_range = port_range - In a firewalled environment ## the software which uses GSI needs to know what ports are available. ## This parameter is only needed if "passivetransfer=no" was set. ## These variable are similar to the Globus enviroment variables ## "GLOBUS_TCP_PORT_RANGE" and "GLOBUS_UDP_PORT_RANGE". ## default: 9000,9300 #globus_udp_port_range=9000,12000 ## httpgetpartial = yes/no - If yes, HTTP GET transfers may transfer data in ## chunks/parts. If no - data is always transfered in one piece. ## allowedvalues: yes no ## default: no #httpgetpartial=no ## speedcontrol = min_speed min_time min_average_speed max_inactivity - specifies ## how slow data transfer must be to trigger error. Transfer is cancelled if ## speed is below min_speed bytes per second for at least min_time seconds, ## or if average rate is below min_average_speed bytes per second, or no data ## was transferred for longer than max_inactivity seconds. ## Value of zero turns feature off. ## default: 0 300 0 300 #speedcontrol=0 300 100 300 #speedcontrol= ## maxdelivery = number - Maximum number of concurrent file transfers, i.e. active ## transfers using network bandwidth. This is the total number for the whole ## system including any remote staging hosts. ## default: 10 #maxdelivery=40 ## maxprocessor = number - Maximum number of concurrent files in each of the DTR ## internal pre- and post-processing states, eg cache check or replica resolution. ## default: 10 #maxprocessor=20 ## maxemergency = number - Maximum "emergency" slots which can be assigned to transfer ## shares when all slots up to the limits configured by the above two options ## are used by other shares. This ensures shares cannot be blocked by others. ## default: 1 #maxemergency=5 ## maxprepared = number - Maximum number of files in a prepared state, i.e. pinned on a ## remote storage such as SRM for transfer. A good value is a small multiple of maxdelivery. ## default: 200 #maxprepared=250 ## sharepolicy = grouping - Defines the mechanism to be used for the ## grouping of the job transfers. DTR assigns the transfers to shares, so that those shares ## can be assigned to different priorities. ## Possible values for "grouping" are dn, voms:vo, voms:role and voms:group: ## dn ## each job is assigned to a share based on the DN of the user sumbitting the job. ## voms:vo ## each job is assigned to a share based on the VO specified in the proxy. ## voms:role ## each job is assigned to a share based on the role specified in the first attribute ## found in the proxy. ## voms:group ## each job is assigned to a share based on the group specified in the first attribute ## found in the proxy. ## In case of the voms schemes, if the proxy is not a VOMS proxy, then a default share is used. ## If sharepolicy is not set then the client-defined priority is applied. ## default: undefined #sharepolicy=voms:role ## sharepriority = share priority - Defines a share with a fixed priority, ## different from the default (50). Priority is an integer between 1 (lowest) and 100 (highest). ## multivalued ## default: undefined #sharepriority=myvo:students 20 #sharepriority=myvo:production 80 ## copyurl = url_head local_path - Configures a mapping of URLs to locally- ## accessible paths. If a URL starts with "url_head", the "local_path" will be ## substituted for the actual transfer. Applies to both input and output files. ## NOTE that "local_path" can also be of URL type. ## multivalued ## default: undefined #copyurl=gsiftp://example.org:2811/data/ /data/ #copyurl=gsiftp://example2.org:2811/data/ /data/ ## CHANGE: MODIFIED in 7.0.0 - applies also to output files ## linkurl = url_head local_path [node_path] - Identical to "copyurl", configures DTR ## so that for certain URLs files won't be downloaded or copied (in case of copyurl), ## but soft-link will be created. The "local_path" ## specifies the way to access the file from the frontend, and is used ## to check permissions. The "node_path" specifies how the file can be ## accessed from computing nodes, and will be used for soft-link creation. ## If "node_path" is missing - "local_path" will be used. This option applies ## only to input files. ## multivalued ## default: undefined #linkurl=gsiftp://somewhere.org/data /data #linkurl=gsiftp://example.org:2811/data/ /scratch/data/ ## preferredpattern = pattern - specifies a preferred pattern on which ## to sort multiple replicas of an input file. It consists of one or ## more patterns separated by a pipe character (|) listed in order of ## preference. Replicas will be ordered by the earliest match. If the ## dollar character ($) is used at the end of a pattern, the pattern ## will be matched to the end of the hostname of the replica. If an ## exclamation mark (!) is used at the beginning of a pattern, any replicas ## matching the pattern will be excluded from the sorted replicas. ## default: undefined #preferredpattern=srm://myhost.ac.uk|.uk$|ndgf.org$|badhost.org$ ## The following options are used to configure multi-host data staging deployment scenario. ## In that setup a couple of additional data staging boxes are enabled to off-load data transfers. ## deliveryservice = URL - The URL to a remote data delivery service which can perform remote ## data staging. ## multivalued ## default: undefined #deliveryservice=https://myhost.org:443/datadeliveryservice ## localdelivery = yes/no - If any deliveryservice is defined, this option determines ## whether local data transfer is also performed. ## allowedvalues: yes no ## default: no #localdelivery=yes ## remotesizelimit = size - Lower limit on file size (in bytes) of files that remote ## hosts should transfer. Can be used to increase performance by transferring ## small files using local processes. ## default: undefined #remotesizelimit=100000 ## ## ### end of the [arex/data-staging] block ############################ ### The [arex/ws] block ################################# ## A-REX exposes a set of Web Service interfaces that can be used to create and ## manage jobs, obtain information about the CE and the jobs, handle delegations, ## access cache information, so on. Comment out this block if you don't want to ## provide WS-interfaces for various A-REX functionalities. #[arex/ws] ## wsurl = url - Specifies the base URL under which ## the web service intrefaces will be available. The URL argument must be a ## full URL consisting of protocol+host+port+path: e.g. "https://:/" ## Make sure the chosen port is not blocked by firewall or other security rules. ## default: https://$VAR{[common]hostname}:443/arex #wsurl=https://piff.hep.lu.se:443/arex ## logfile = path - Specify log file location for WS-interface operations. ## default: /var/log/arc/ws-interface.log #logfile=/var/log/arc/ws-interface.log ## pidfile = path - Specify location of file containing PID of daemon process. ## default: /run/arched-arex-ws.pid #pidfile=/run/arched-arex-ws.pid ## max_job_control_requests = number - The max number of simultaneously processed job management ## requests over WS interface - like job submission, cancel, status check etc. ## default: 100 #max_job_control_requests=100 ## max_infosys_requests = number - The max number of simultaneously processed info ## requests over WS interface. ## default: 1 #max_infosys_requests=1 ## max_data_transfer_requests = number - The max number of simultaneously processed data transfer ## requests over WS interface - like data staging. ## default: 100 #max_data_transfer_requests=100 ## tlsciphers = ciphers_list - Override OpenSSL ciphers list enabled on server ## default: HIGH:!eNULL:!aNULL #tlsciphers=HIGH:!eNULL:!aNULL ## tlsserverorder = yes - Force priority order of ciphers for TLS connection to be decided on server sid ## default: no #tlsserverorder=yes ## CHANGE: NEW in 7.0.0 ## tlsprotocols = SSL/TLS protocols - Specify which protocols to enable ## This is space separated list of values - SSLv2 SSLv3 TLSv1.0 TLSv1.1 TLSv1.2 TLSv1.3 ## default: TLSv1.2 TLSv1.3 #tlsprotocols=TLSv1.2 TLSv1.3 ## tlscurve = curve - Specify SSL/TLS ECDH curve name (SN) ## default: secp521r1 #tlscurve=secp521r1 ## ## ### end of the [arex/ws] block ############################## ### The [arex/ws/jobs] block ################################ ## This block enables the job management, info query, delegation protocols through REST interface. ## Read http://www.nordugrid.org/arc/arc7/tech/rest/rest.html for the REST interface specification. #[arex/ws/jobs] ## allownew = yes/no - The 'allownew' config parameter sets if the Computing Element accepts ## submission of new jobs via the WS-interface. This parameter can be used to close down the CE. ## allowedvalues: yes no ## default: yes #allownew=yes ## allownew_override = [authgroup ...] - Defines which authorization ## groups are allowed to submit new jobs via the WS-interfaces ## when the CE is closed with "allownew=no" ## NOTE that it requires the "allownew=no" to be set. ## multivalued ## default: undefined #allownew_override=biousers atlasusers #allownew_override=yourauthgroup ## allowaccess = authgroup - Defines that the specified authgroup members ## are authorized to access the ARC-CE via this interface. A related config option the ## "denyaccess" (see below) can be used to reject access. ## Multiple "allowaccess" and "denyaccess" authorization statements are allowed within a configuration block. ## These statements are processed sequentially in the order they are specified in the ## config block. The processing stops on first "allowaccess" or "denyaccess" statement matching the authgroup membership. ## If there are no authorization statements specified, then no additional restrictions are applied ## for authorizing user access and the interface is open to everybody authenticated. ## default: undefined ## multivalued #allowaccess=biousers #allowaccess=atlasusers ## denyaccess = authgroup - Defines that the specified authgroup members ## are REJECTED, not authorized to access the ARC-CE via this interface. ## NOTE that a related config option the "allowaccess" (see above) can be used to grant access. ## Multiple "denyaccess" and "allowaccess" authorization statements are allowed within a configuration block. ## These statements are processed sequentially in the order they are specified in the ## config block. The processing stops on first "allowaccess" or "denyaccess" statement matching the authgroup membership. ## If there are no authorization statements specified, then no additional restrictions are applied ## for authorizing user access and the interface is open to everybody authenticated. ## default: undefined ## multivalued #denyaccess=blacklisted-users ## maxjobdesc = size - specifies maximal allowed size of job description ## in bytes. Default value is 5MB. Use 0 to set unlimited size. ## default: 5242880 #maxjobdesc=0 ## ## ### end of the [arex/ws/jobs] block ############################## ### The [arex/ws/cache] block ################################ ## The content of the A-REX cache can be accessed via a WS-interface. ## Configuring this block will allow reading cache files through a special URL. ## For example, if the remote file gsiftp://remotehost/file1 is stored in the cache ## and the WS interfaces (configured above) are available via wsurl of https://hostname:443/arex/, ## then the cached copy of the file can be access via the following special URL: ## https://hostname:443/arex/cache/gsiftp://remotehost/file1 ## Comment out this block if you don't want to expose the cache content via WS-interface. #[arex/ws/cache] ## cacheaccess = rule - This parameter defines the access control rules for the cache wsinterface, ## the rules for allowing access to files in the cache remotely through the A-REX web interface. ## If not set, then noone can access anything. The default is not set that means complete denial. ## A rule has three parts: ## 1. Regular expression defining a URL pattern ## 2. Credential attribute to match against a client's credential ## 3. Regular expression defining a credential value to match against a client's ## credential ## A client is allowed to access the cached file if a URL pattern matches the ## cached file URL and the client's credential has the attribute and matches the ## value required for that pattern. Possible values for credential attribute are ## dn, voms:vo, voms:role and voms:group. ## multivalued ## default: undefined #cacheaccess=gsiftp://host.org/private/data/.* voms:vo myvo:production #cacheaccess=gsiftp://host.org/private/data/bob/.* dn /O=Grid/O=NorduGrid/.* ## ## ### end of the [arex/ws/cache] block #################### ### The [arex/ws/candypond] block ############# ## The CandyPond (Cache and deliver your pilot on-demand data) A-REX Web ## Service exposes various useful data-staging related operations ## for the pilot job model where input data for jobs is not known until the job ## is running on the worker node. This service is intended to be used by A-REX managed jobs. ## This service requires the [arex/data-staging] functionality. ## To use service from the job context enable "EVN/CANDYPOND" RTE. ## ## The CandyPond service is available via the wsurl/candypond URL ## (e.g. https://hostname:443/arex/candypond) ## #[arex/ws/candypond] ## ## ### end of the [arex/ws/candypond] block #################### ### The [arex/jura] block ################################### ## A-REX is responsible for collecting accounting measurements from various ARC ## subsystems, including batch system backends and DTR data staging. ## ## A-REX writes all accounting data into the local accounting ## database that can be queried with "arcctl accounting". ## ## JURA is the accounting record generating and reporting ARC CE module. ## A-REX periodically executes JURA to create usage records based on the ## accounting target configuration and accounting database data. ## ## Enable and configure this block if you want to send accounting records to ## accounting services. ## ## NOTE that a dedicated "accounting target" subblock is needed for every accounting ## destination. The target subblocks are either of a type "apel" or "sgas": ## "[arex/jura/apel:targetname]" or "[arex/jura/sgas:targetname]". ## #[arex/jura] ## logfile = path - The name of the jura logfile. ## default: /var/log/arc/jura.log #logfile=/var/log/arc/jura.log ## loglevel = number - Log level for the JURA accounting module. ## allowedvalues: 0 1 2 3 4 5 FATAL ERROR WARNING INFO VERBOSE DEBUG ## default: 3 #loglevel=3 ## vomsless_vo = [authgroup ]voname[#voissuer] - This parameter allows the sysadmin to manually assign ## VOs during pubishing to jobs that were submitted with "VOMS-less grid proxies". ## "voname" is the VO name to be used in the generated records (the same as expected in voms-proxy) ## optional "voissuer" (relevant to SGAS only) value is the VOMS server identity (certificate DN). ## If authgroup is specified then this parameter is applied only if user belongs to specified authgroup. ## default: undefined #vomsless_vo=atlas #vomsless_vo=atlas#/DC=ch/DC=cern/OU=computers/CN=lcg-voms.cern.ch #vomsless_vo=atlasgroup atlas ## vo_group = group - Adds an additional VO group attribute(s) to the usage records. ## multivalued ## default: undefined #vo_group=/atlas/production ## urdelivery_frequency = seconds - Specifies the frequency of ## JURA process regular execution by the A-REX. ## The actual treshold of records reporting frequency can be defined on per-target basis. ## default: 3600 #urdelivery_frequency=3600 ## x509_host_key = path - Optional parameter to overwrite [common] block values. ## default: $VAR{[common]x509_host_key} #x509_host_key=/etc/grid-security/hostkey.pem ## x509_host_cert = path - Optional parameter to overwrite [common] block values. ## default: $VAR{[common]x509_host_cert} #x509_host_cert=/etc/grid-security/hostcert.pem ## x509_cert_dir = path - Optional parameter to overwrite [common] block values. ## default: $VAR{[common]x509_cert_dir} #x509_cert_dir=/etc/grid-security/certificates ## ## ### end of the [arex/jura] block ######################################## ### The [arex/jura/sgas:targetname] blocks ############################## ## An SGAS sub-block of [arex/jura] enables and configures an SGAS accounting ## server as a target destination to which JURA will send properly formatted usage records. ## You need to define a separate block with a unique targetname for every SGAS target server. ## ## Note that the block name will be used by JURA to track that latest records sent to ## this targed. Be aware that if you rename the block, target will be handled as a new one. ## However "targeturl" change will not trigger a new target handling. # #[arex/jura/sgas:neic_sgas] ## *targeturl = url - The service endpoint URL of SGAS server. ## default: undefined #targeturl=https://grid.uio.no:8001/logger ## localid_prefix = prefix_string - Sets a prefix value for the LocalJobID ur parameter ## for the SGAS usage records. ## default: undefined #localid_prefix=some_text_for_SGAS ## vofilter = vo - Configures a job record filtering mechanism based on the ## VO attribute of the jobs. Only the matching job records, which was one ## of VO that you set here, will be sent to the target accounting service. ## multivalued ## default: undefined #vofilter=atlas #vofilter=fgi.csc.fi ## urbatchsize = number - JURA sends usage records not one-by-one, but in batches. ## This options sets the size of a batch. Zero value means unlimited batch size. ## default: 50 #urbatchsize=80 ## urdelivery_frequency = seconds - Add optional minimal treshold ## of the interval between subsequent records publishing to this target. ## NOTE that the actual delivery interval is the value divisible by "urdelivery_frequency" ## defined in [arex/jura] block that define the entire JURA process invocation frequency. ## default: undefined #urdelivery_frequency=3600 ## ## ### end of the [arex/jura/sgas:targetname] blocks ######################## ### The [arex/jura/apel:targetname] blocks ############################### ## An APEL sub-block of [arex/jura] enables and configures an APEL accounting ## server as a target destination to which JURA will send properly formatted usage records. ## You need to define a separate block with a unique targetname for every APEL target server. ## ## Note that the block name will be used by JURA to track that latest records sent to ## this targed. Be aware that if you rename the block, target will be handled as a new one. ## However "targeturl" change will not trigger a new target handling. #[arex/jura/apel:egi_prod_apel] ## *targeturl = url - The service endpoint URL of the APEL accounting server. ## default: undefined #targeturl=https://msg.argo.grnet.gr ## topic = topic_name - Sets the name of the APEL topic to which JURA will publish the ## accounting records. ## AMS destination topic for compute element is 'gLite-APEL' ## default: gLite-APEL #topic=/queue/global.accounting.test.cpu.central ## project = project_name - Sets the name of the APEL project to use. ## default: accounting #project=accounting-nl ## *gocdb_name = name - Can be used to specify the GOCDB name of the resource. ## This value would be seen as Site attribute in the generated APEL records. ## default: undefined #gocdb_name=GRID_UIO_NO ## apel_messages = type - Define what kind of records JURA ## will send to APEL services during regular publishing process. ## Possible cases are: per-job EMI CAR records ("urs"), APEL summary records ("summaries") ## or APEL summary records v0.4 ("summaries-v04") ## APEL Sync messages are always generated. ## allowedvalues: urs summaries summaries-v04 ## default: summaries #apel_messages=urs ## vofilter = vo - Configures a job record filtering mechanism based on the ## VO attribute of the jobs. Only the matching job records, which was one ## of VO that you set here, will be sent to the target accounting service. ## multivalued ## default: undefined #vofilter=atlas #vofilter=fgi.csc.fi ## urbatchsize = number - JURA sends usage records not one-by-one, but in batches. ## This options sets the size of a batch. Zero value means unlimited batch size. ## 500 is recommended to avoid too large messages using AMS ## default: 500 #urbatchsize=500 ## urdelivery_frequency = seconds - Add optional minimal ## treshold of the interval between subsequent records publishing to this target. ## NOTE that the actual delivery interval is the value divisible by "urdelivery_frequency" ## defined in [arex/jura] block that define the entire JURA process invocation frequency. ## APEL recommended value is once per day for "summaries". Use smaller values for "urs". ## default: 86000 #urdelivery_frequency=14000 ## ## ### end of the [arex/jura/apel:targetname] blocks ############################ ### The [arex/ganglia] block ############################### ## ## This block enables the monitoring of ARC-specific metrics. ## Earlier versions (ARC < 6.0) relied only on the standalone tool gangliarc, ## ganglia is now instead integrated into ARC, and gangliarc is obsolete. ## NOTE that AREX ganglia (as gangliarc did) depends on an existing ganglia installation, ## as it sends its metrics to a running gmond process. #[arex/ganglia] ## gmetric_bin_path = path - The path to gmetric executable. ## default: /usr/bin/gmetric #gmetric_bin_path=/usr/local/bin/gmetric ## metrics = name_of_the_metrics - the metrics to be monitored. ## metrics takes a comma-separated list of one or more of the following metrics: ## - staging -- number of tasks in different data staging states - not yet implemented ## - cache -- free cache space ## - session -- free session directory space ## - heartbeat -- last modification time of A-REX heartbeat ## - failedjobs -- the number of failed jobs per last 100 finished ## - jobstates -- number of jobs in different A-REX stages ## - all -- all of the above metrics ## default: all ## allowedvalues: staging cache session heartbeat failedjobs jobstates all #metrics=all ## frequency = seconds - The period between each information gathering cycle, in seconds. ## default: 60 #frequency=300 ## ## ### end of the [arex/ganglia] block ############## ### The [infosys] block ################################################ ## This block enables and configures the core part of the information system. ## Enables the information collection to be used by other ARC components, including interfaces. ## Parameters in this block applies to all the infosys subsystems. #[infosys] ## logfile = path - Specifies log file location for the information ## provider scripts. ## default: /var/log/arc/infoprovider.log #logfile=/var/log/arc/infoprovider.log ## loglevel = number - The loglevel for the infoprovider scripts (0-5). ## Each value corresponds to the following verbosity levels: ## FATAL => 0, ERROR => 1 , WARNING => 2, INFO => 3, VERBOSE => 4, DEBUG => 5 ## allowedvalues: 0 1 2 3 4 5 FATAL ERROR WARNING INFO VERBOSE DEBUG ## default: 3 #loglevel=3 ## validity_ttl = seconds - The published infosys records advertise their ## validity e.g. how long the info should be considered up-to-date by the clients. ## Use this parameter to set the published validity value. ## NOTE that different schemas may render this information differently. ## default: 10800 #validity_ttl=10800 ## ## ### end of [infosys] block ################################################# ### The [infosys/ldap] block ################################################ ## This infosys subblock enables and configures the ldap hosting service ## for the infosys functionality. Using an LDAP server with some schema is one way to ## publish information about your Computing Element. ## Comment out this block if you don't want to run an LDAP-based information system. #[infosys/ldap] ## hostname = FQDN - the hostname of the machine running the slapd service ## will be the bind for slapd. If not present, will be taken from the [common] ## default: $VAR{[common]hostname} #hostname=my.testbox ## slapd_hostnamebind = string - May be used to set the hostname part of the ## network interface to which the slapd process will bind. Most of ## the cases no need to set since the hostname parameter is already ## sufficient. The example below will bind the slapd ## process to all the network interfaces available on the server. ## default: undefined #slapd_hostnamebind=* ## port = port_number - The port on which the slapd service runs. The default ## infosys port is assumed to be 2135 by many clients, therefore think twice before you change it ## because 3rd party clients assume 2135 to be the ldap infosys port. ## default: 2135 #port=2135 ## user = unix_user - overwrites the unix user running the slapd. ## By default the startup scripts search for well-known ldap-users like "ldap" or "openldap" ## than fall-back to "root" if not found. ## default: undefined #user=slapd ## slapd = path - explicitly define the path to slapd command. ## By default the startup scripts search for "slapd" binary in the system PATH. ## default: undefined #slapd=/usr/sbin/slapd ## slapd_loglevel = number - Sets the native slapd loglevel (see man slapd). ## Slapd logs via syslog. The default is set to no-logging (0) and it is ## RECOMMENDED not to be changed in a production environment. ## Non-zero slap_loglevel value causes serious performance decrease. ## default: 0 #slapd_loglevel=0 ## threads = number - The native slapd threads parameter, default is 32. ## default: 32 #threads=128 ## timelimit = seconds - The native slapd timelimit parameter. Maximum number of seconds ## the slapd server will spend answering a search request. ## Default is 3600. You probably want a much lower value. ## default: 3600 #timelimit=1800 ## idletimeout = seconds - The native slapd idletimeout parameter. Maximum number of ## seconds the slapd server will wait before forcibly closing idle client ## connections. It's value must be larger than the value of "timelimit" option. ## If not set, it defaults to timelimit + 1. ## default: $EVAL{$VAR{timelimit} + 1} #idletimeout=1801 ## infosys_ldap_run_dir = path - The location where NorduGrid/GLUE2 LDAP ldif file ## will be generated, and where the fifo to sync between infoproviders ## and BDII will be generated. ## default: /run/arc/infosys #infosys_ldap_run_dir=/run/arc/infosys ## ldap_schema_dir = path - Allows to explicitly specify an additional path to the schema ## files. Note that this doesn't override standard location, but adds ## the specified path to the standard locations /etc/ldap and /etc/openldap. ## Normally it is sufficient to use only standard schema file locations, ## therefore not to set this parameter. ## default: undefined #ldap_schema_dir=/nfs/ldap/schema/ ## NOTE that the following options configure the third-party bdii ldap parameters. ## In 99% of cases no need to change anything and use the defaults. ## These variables are usually automatically set by ARC, and are here mostly for debug purposes ## and to tweak exotic BDII installations. ## bdii_debug_level = level - Set this parameter to DEBUG to check bdii errors in bdii-update.log ## At the same time don't enable slapd logs this way reducing performance issues. ## default: WARNING #bdii_debug_level=ERROR ## bdii_provider_timeout = seconds - This variable allows a system administrator to modify ## the behaviour of bdii-update. ## This is the time BDII waits for the bdii provider scripts generated by ## A-REX infosys to produce their output. ## default: 10800 #bdii_provider_timeout=10800 ## NOTE that BDII5 uses these variables. These might change depending on BDII version. ## ARC sets them by inspecting distributed bdii configuration files. ## DO NOT change unless YOU KNOW WHAT YOU'RE DOING ## bdii_location = path - The installation directory for the BDII. ## default: /usr #bdii_location=/usr ## bdii_run_dir = path - Contains BDII pid files and slapd pid files ## default: /run/arc/bdii #bdii_run_dir=/run/arc/bdii ## bdii_log_dir = path - Contains infosys logs ## default: /var/log/arc/bdii #bdii_log_dir=/var/log/arc/bdii ## bdii_tmp_dir = path - Contains provider scripts ## default: /var/tmp/arc/bdii #bdii_tmp_dir=/var/tmp/arc/bdii ## bdii_var_dir = path - Contains slapd databases ## default: /var/lib/arc/bdii #bdii_var_dir=/var/lib/arc/bdii ## bdii_update_pid_file = path - Allows to change bdii-update ## pidfiles filename and location ## default: $VAR{bdii_run_dir}/bdii-update.pid #bdii_update_pid_file=/run/arc/bdii/bdii-update.pid ## bdii_database = backend_type - Configure what ldap database backend should ## be used. If left "undefined" it will default to "hdb" for openldap versions ## up to 2.4 and to "mdb" for openldap versions 2.5 and later. ## default: undefined #bdii_database=hdb ## bdii_conf = path - Location of the bdii config file generated by ARC. ## default: $VAR{[infosys/ldap]infosys_ldap_run_dir}/bdii.conf #bdii_conf=/run/arc/infosys/bdii.conf ## bdii_update_cmd = path - path to bdii-update script ## default: $VAR{bdii_location}/sbin/bdii-update #bdii_update_cmd=/usr/sbin/bdii-update ## bdii_db_config = path - path to slapd database configuration file ## default: /etc/bdii/DB_CONFIG #bdii_db_config=/etc/bdii/DB_CONFIG ## bdii_archive_size = number - Sets BDII_ARCHIVE_SIZE in bdii configuration file ## default: 0 #bdii_archive_size=0 ## bdii_breathe_time = number - Sets BDII_BREATHE_TIME in bdii configuration file ## default: 10 #bdii_breathe_time=10 ## bdii_delete_delay = number - Sets BDII_DELETE_DELAY in bdii configuration file ## default: 0 #bdii_delete_delay=0 ## bdii_read_timeout = number - Sets BDII_READ_TIMEOUT in bdii configuration file ## default: $EVAL{$VAR{bdii_provider_timeout} + $VAR{[arex]infoproviders_timelimit} + $VAR{[arex]wakeupperiod}} #bdii_read_timeout=300 ## ## ### end of the [infosys/ldap] sub-block ######################################## ## Infosys Schema sub-blocks: The following infosys sub-blocks enable ## information publishing according to various information schema. ## In order to publish information in a certain schema, the corresponding ## sub-block must be defined in addition to the schema-neutral [infosys/cluster] ## and [queue:name] blocks! ## Comment out a specific schema block if you don't want to publish a specific ## information schema representation. ## Currently available information model (schema) sub-blocks: ## - [infosys/nordugrid] - The native ARC info representation of a cluster and its queues ## - [infosys/glue2] - The GLUE2 information model, both LDAP and XML (the latter is for WS-interface) ## - [infosys/glue2/ldap] - The LDAP rendering of the GLUE2 model ### The [infosys/nordugrid] schema sub-block ########################### ## Enables the publication of the NorduGrid information model in ## the LDAP-based infosys. See the NORDUGRID-TECH-4 for schema definition. ## The configuration block does not contain any parameter. The information tree ## is populated based on the contents of the schema-neutral [infosys/cluster] ## and [queue:name] blocks. #[infosys/nordugrid] ## ## ### end of the [infosys/nordugrid] schema block ######################## ### The [infosys/glue2] schema sub-block ########################### ## Enables the publication of the GLUE2 information model both in the LDAP and ## XML rendering. ## The information tree is populated based on the contents of the schema-neutral ## [infosys/cluster] and [queue:name] blocks and the GLUE2 specific schema sub-blocks. #[infosys/glue2] ## admindomain_name = string - The Name attribute for the admindomain. This will show ## in top-BDII to group the resources belonging to this cluster. ## To group a bunch of clusters under the same AdminDomain, just use the same name. ## If not specified, will default to UNDEFINEDVALUE. ## default: UNDEFINEDVALUE #admindomain_name=ARC-TESTDOMAIN ## admindomain_description = text - The free-form description of this domain. ## default: undefined #admindomain_description=ARC test Domain ## admindomain_www = url - The URL pointing at a site holding information about the AdminDomain. ## default: undefined #admindomain_www=http://www.nordugrid.org/ ## admindomain_distributed = yes/no - Set this to yes if the domain is distributed ## that means, if the resources belonging to the domain ## are considered geographically distributed. ## allowedvalues: yes no ## default: no #admindomain_distributed=yes ## admindomain_owner = email - The contact email of a responsible person for the domain ## default: undefined #admindomain_owner=admin@nordugrid.org ## admindomain_otherinfo = text - Free-form text that fills the OtherInfo GLUE2 field. ## no need to set, used only for future development. ## default: undefined #admindomain_otherinfo=Test Other info ## computingservice_qualitylevel = qlevel - Allows a sysadmin to define different ## GLUE2 QualityLevel values for A-REX. ## Refer to GLUE2 documentation for the qualitylevel definitions. ## allowedvalues: production pre-production testing development ## default: production #computingservice_qualitylevel=production ## ## ### end of the [infosys/glue2] schema block ######################## ### The [infosys/glue2/ldap] schema sub-block ########################### ## Enables the publication of the LDAP-rendering of the GLUE2 infomodel. #[infosys/glue2/ldap] ## showactivities = yes/no - Enables GLUE2 ComputingActivities in the LDAP rendering ## allowedvalues: yes no ## default: no #showactivities=no ## ## ### end of the [infosys/glue2/ldap] schema sub-block ########################### ### The [infosys/cluster] block ################################################### ## Information schema-neutral blocks [infosys/cluster] and [queue:NAME] contain attributes ## that describe the computing cluster together with its queues. The parameters are ## available for every information model/schema representation. ## ## This block describes the cluster characteristics of a Computing Element. ## The information specified here is mostly used by the Infosys ARC component. #[infosys/cluster] ## alias = text - An arbitrary alias name of the cluster, optional. ## default: undefined #alias=Big Blue Cluster in Nowhere ## hostname = fqdn - Set the FQDN of the frontend. ## default: $VAR{[common]hostname} #hostname=myhost.org ## interactive_contactstring = url - the contact URL for interactive logins, set this ## if the cluster supports some sort of grid-enabled interactive login (gsi-ssh), ## multivalued ## default: undefined #interactive_contactstring=gsissh://frontend.cluster:2200 ## CHANGE: REMOVED obsoleted in ARC 7.0.0 ## comment = text - Free text field for additional comments on the cluster in a single ## line, no newline character is allowed! ## default: undefined #comment=This cluster is specially designed for XYZ applications: www.xyz.org ## cluster_location = formatted_string - The geographical location of the cluster, preferably ## specified as a postal code with a two letter country prefix ## default: undefined #cluster_location=DK-2100 ## cluster_owner = text - It can be used to indicate the owner of a resource, multiple ## entries can be used ## multivalued ## default: undefined #cluster_owner=World Grid Project #cluster_owner=University of NeverLand ## advertisedvo = vo_name - This attribute is used to advertise ## which VOs are authorized on the cluster. ## Add only one VO for each advertisedvo entry. Multiple VOs in the same line ## will cause errors. ## These entries will be shown in all GLUE2 AccessPolicy and MappingPolicy ## objects, that is, they will apply for all Endpoints(Interfaces) and all ## Shares(currently queues). You can override the advertisedvos per queue. ## The information is also published in the NorduGrid schema. ## NOTE that it is IMPORTANT to understand that this parameter is NOT enforcing any ## access control, it is just for information publishing! ## multivalued ## default: undefined #advertisedvo=atlas #advertisedvo=community.nordugrid.org ## clustersupport = email - This is the support email address of the resource. ## multivalued ## default: undefined #clustersupport=arc.support@mysite.org #clustersupport=arc.support@myproject.org ## homogeneity = True/False - Determines whether the cluster consists of identical NODES with ## respect to cputype, memory, installed software (opsys). The frontend is NOT ## needed to be homogeneous with the nodes. In case of inhomogeneous nodes, try ## to arrange the nodes into homogeneous groups assigned to a queue and use ## queue-level attributes. ## False may trigger multiple GLUE2 ExecutionEnvironments to be published ## if applicable. ## allowedvalues: True False ## default: True #homogeneity=True ## architecture = string - Sets the hardware architecture of the NODES. The "architecture" ## is defined as the output of the "uname -m" (e.g. i686). Use this cluster ## attribute if only the NODES are homogeneous with respect to the architecture. ## Otherwise the queue-level attribute may be used for inhomogeneous nodes. If ## the frontend's architecture agrees to the nodes, the "adotf" (Automatically ## Determine On The Frontend) can be used to request automatic determination. ## default: adotf #architecture=adotf ## opsys = formatted_string - This multivalued attribute is meant to describe the operating system ## of the computing NODES. Set it to the opsys distribution of the NODES and not ## the frontend! opsys can also be used to describe the kernel or libc version ## in case those differ from the originally shipped ones. The distribution name ## should be given as distroname-version.number, where spaces are not allowed. ## Kernel version should come in the form kernelname-version.number. ## If the NODES are inhomogeneous with respect to this attribute do NOT set it on ## cluster level, arrange your nodes into homogeneous groups assigned to a queue ## and use queue-level attributes. ## If opsys=adotf, will result in Automatic Determination of the Operating System ## On The Frontend, which should only be used if the frontend has the same ## OS as the nodes. ## The adotf discovered values will be used to fill GLUE2 OSName, OSVersion ## and OSFamily unless these values are explicitly defined for each queue. ## See the [queue:queuename] block for their usage. ## NOTE that any custom value other than "adotf" does NOT affect values in the GLUE2 schema. ## multivalued ## default: adotf #opsys=Linux-2.6.18 #opsys=glibc-2.5.58 #opsys=CentOS-5.6 ## nodecpu = formatted_string - This is the cputype of the homogeneous nodes. The string is ## constructed from the /proc/cpuinfo as the value of "model name" and "@" and ## value of "cpu MHz". Do NOT set this attribute on cluster level if the NODES ## are inhomogeneous with respect to cputype, instead arrange the nodes into ## homogeneous groups assigned to a queue and use queue-level attributes. Setting ## the nodecpu=adotf will result in Automatic Determination On The Frontend, ## which should only be used if the frontend has the same cputype as the ## homogeneous nodes. ## default: adotf #nodecpu=AMD Duron(tm) Processor @ 700 MHz ## nodememory = number - This is the amount of memory (specified in MB) on the node ## which can be guaranteed to be available for the application. Please note ## in most cases it is less than the physical memory installed in the nodes. ## Do NOT set this attribute on cluster level if the NODES are inhomogeneous ## with respect to their memories, instead arrange the nodes into homogeneous ## groups assigned to a queue and use queue-level attributes. ## default: undefined #nodememory=64000 ## middleware = string - The multivalued attribute shows the installed grid software on ## the cluster. Nordugrid-ARC is automatically set, no need to specify ## multivalued ## default: undefined #middleware=my software ## nodeaccess = inbound/outbound - Determines how the nodes can connect to the internet. ## Not setting anything means the nodes are sitting on a private isolated network. ## "outbound" access means the nodes can connect to the outside world while ## "inbound" access means the nodes can be connected from outside. ## inbound & outbound access together means the nodes are sitting on a fully open network. ## multivalued ## default: undefined ## allowedvalues: inbound outbound #nodeaccess=inbound #nodeaccess=outbound ## localse = url - This multivalued parameter tells the BROKER that certain URLs (and ## locations below that) should be considered "locally" available to the cluster. ## multivalued ## default: undefined #localse=gsiftp://my.storage/data1/ #localse=gsiftp://my.storage/data2/ ## cpudistribution = formatted_string - This is the CPU distribution over nodes ## given in the form "ncpu:m" where: ## "n" is the number of CPUs per machine ## "m" is the number of such machines ## Example: "1cpu:3,2cpu:4,4cpu:1" represents a cluster with ## 3 single CPU machines, 4 dual CPU machines and one machine with 4 CPUs. ## default: undefined #cpudistribution=1cpu:3,2cpu:4,4cpu:1 ## maxcputime = number - This is the maximum CPU time specified in seconds ## that the LRMS can allocate for the job. The default if not defined ## is that infoproviders get this value automatically from the LRMS. ## The purpose of this option is to tweak and override discovered value, ## or publish this value in case the LRMS module do not support automatic ## detection. ## default: undefined #maxcputime=300000 ## mincputime = number - This is the minimum CPU time specified in seconds ## that the LRMS can allocate for the job. The default if not defined ## is that infoproviders get this value automatically from the LRMS. ## The purpose of this option is to tweak and override discovered value, ## or publish this value in case the LRMS module do not support automatic ## detection. ## default: undefined #mincputime=1200 ## maxwalltime = number - This is the maximum Wall time specified in ## seconds that the LRMS can allocate for the job. The default ## if not defined is that infoproviders get this value automatically ## from the LRMS. ## The purpose of this option is to tweak and override discovered value, ## or publish this value in case the LRMS module do not support automatic ## detection. ## default: undefined #maxwalltime=600000 ## minwalltime = number - This is the minimum Wall time specified in ## seconds that the LRMS can allocate for the job. The default ## if not defined is that infoproviders get this value automatically ## from the LRMS. ## The purpose of this option is to tweak and override discovered value, ## or publish this value in case the LRMS module do not support automatic ## detection. ## default: undefined #maxwalltime=1800 ## ## ### end of the [infosys/cluster] block ##################### ### The [infosys/accesscontrol] block ################################ ## AREX allows to control access to public informaton for non-authorized users. ## If this block contains no entries public information is available to anyone. #[infosys/accesscontrol] ## CHANGE: NEW in 7.0.0 ## allowaccess = authgroup - Defines that only the specified authgroup members are authorized to access ## public information. For more information see similar configuration option in [arex/ws/jobs] block. ## default: undefined ## multivalued #allowaccess=monitors ## denyaccess = authgroup - Defines that the specified authgroup members are REJECTED, not authorized ## to access public information. For more information see similar configuration option in [arex/ws/jobs] block. ## default: undefined ## multivalued #denyaccess=badactors ## ## ### end of the [infosys/accesscontrol] block #################### ### The [queue:name] blocks ####################################### ## Each grid-enabled queue on the cluster should be represented and described ## by a separate queue block. The queue_name should be used as a label in the block name. ## In case of fork, or other LRMSes with no queue names, just use any unique string. ## A queue can represent a PBS/LSF/SGE/SLURM/LL queue, a SGE pool, a Condor ## pool or a single machine in case 'fork' type of LRMS. ## This block describes the queue characteristics. #[queue:gridlong] ## homogeneity = True/False - determines whether the queue consists of identical NODES with ## respect to cputype, memory, installed software (opsys). ## In case of inhomogeneous nodes, try to arrange the nodes into homogeneous ## groups and assigned them to a queue. ## Possible values: True,False, the default is True. ## allowedvalues: True False ## default: $VAR{[infosys/cluster]homogeneity} #homogeneity=True ## comment = text - A free-form text field for additional comments on the queue in a single ## line, no newline character is allowed! ## default: undefined #comment=This queue is nothing more than a condor pool ## pbs_queue_node = string - In PBS you can assign nodes to a queue ## (or a queue to nodes) by using the "node property" mark in PBS config. ## ## Essentially, "pbs_queue_node" value is used to construct "nodes=" string in ## PBS script, such as "nodes=count:pbs_queue_node" where "count" is taken from ## the job description (1 if not specified). ## ## This corresponds to setting the following parameter in PBS for this queue: ## resources_default.neednodes = cpu_topology[:pbs_queue_node] ## ## Setting the "pbs_queue_node" changes how the queue-totalcpus, user freecpus are ## determined for this queue. ## ## You shouldn't use this option unless you are sure that your PBS configuration makes ## use of the above configuration. Read NorduGrid PBS instructions for more information: ## http://www.nordugrid.org/documents/pbs-config.html ## default: undefined #pbs_queue_node=gridlong_nodes #pbs_queue_node=ppn=4:ib ## sge_jobopts = string - Per-queue override of additional SGE options to be used when ## submitting jobs to SGE to this queue ## default: undefined #sge_jobopts=-P atlas -r yes ## condor_requirements = string - It may be defined for each Condor queue. ## Use this option to determine which nodes belong to the current queue. ## The value of "condor_requirements" must be a valid constraints string ## which is recognized by a "condor_status -constraint ..." command. It can ## reference pre-defined ClassAd attributes (like Memory, Opsys, Arch, HasJava, ## etc) but also custom ClassAd attributes. To define a custom attribute on a ## condor node, just add two lines like the ones below in the "$(hostname).local" ## config file on the node: ## NORDUGRID_RESOURCE=TRUE ## STARTD_EXPRS = NORDUGRID_RESOURCE, $(STARTD_EXPRS) ## A job submitted to this queue is allowed to run on any node which satisfies ## the "condor_requirements" constraint. If "condor_requirements" is not set, ## jobs will be allowed to run on any of the nodes in the pool. When configuring ## multiple queues, you can differentiate them based on memory size or disk ## space, for example. ## default: $VAR{[lrms]condor_requirements} #condor_requirements=(OpSys == "linux" && NORDUGRID_RESOURCE && Memory >= 1000 && Memory < 2000) ## slurm_requirements = string - Use this option to specify extra SLURM-specific parameters. ## default: undefined #slurm_requirements=memory on node >> 200 ## totalcpus = number - Manually sets the number of cpus assigned to the queue. No need to ## specify the parameter in case the queue_node_string method was used to assign ## nodes to the queue (this case it is dynamically calculated and the static ## value is overwritten) or when the queue have access to the entire cluster ## (this case the cluster level totalcpus is the relevant parameter). ## default: undefined #totalcpus=32 ## queue-level configuration parameters: nodecpu, nodememory, architecture, opsys ## should be set if they are homogeneous over the nodes assigned ## to the queue AND they are different from the cluster-level value. ## Their meanings are described in the [infosys/cluster] block. ## Usage: this queue collects nodes with "nodememory=512" while another queue has nodes ## with "nodememory=256" -> don't set the cluster attributes but use the queue-level ## attributes. When the frontend's architecture or cputype agrees with the queue ## nodes, the "adotf" (Automatically Determine On The Frontend) can be used to ## request automatic determination of architecture or nodecpu. ## For GLUE2, fine tune configuration of ExecutionEnvironments' OSName, OSVersion, OSFamily ## is allowed with dedicated options osname,osversion,osfamily. ## nodecpu = formatted_string - see description at [infosys/cluster] block ## default: $VAR{[infosys/cluster]nodecpu} #nodecpu=AMD Duron(tm) Processor @ 700 MHz ## nodememory = number - see description at [infosys/cluster] block ## default: $VAR{[infosys/cluster]nodememory} #nodememory=512 ## defaultmemory = number - The LRMS memory request of job to be set by the LRMS backend ## scripts, if a user submits a job without specifying how much memory should be used. ## The order of precedence is: job description -> [lrms-defaultmemory] -> [queue-defaultmemory]. ## This is the amount of memory (specified in MB) that a job will request. ## default: undefined #defaultmemory=512 ## architecture = string - see description at [infosys/cluster] block ## default: $VAR{[infosys/cluster]architecture} #architecture=adotf ## opsys = formatted_string - see description at [infosys/cluster] block ## If osname, osversion are present, the values in opsys are ignored. ## multivalued ## default: $VAR{[infosys/cluster]opsys} #opsys=Linux-2.6.18 #opsys=glibc-2.5.58 ## osname = string - Only for GLUE2 ## overrides values defined in opsys for a single ExecutionEnvironment. ## Configuration of multiple ExecutionEnvironment for the same queue ## is not supported. Create a different queue for that. ## default: undefined #osname=Ubuntu ## osversion = string - Only for GLUE2 ## overrides values defined in opsys for a single ExecutionEnvironment. ## Configuration of multiple ExecutionEnvironment for the same queue ## is not supported. Create a different queue for that. ## default: undefined #osversion=12.04 ## osfamily = string - Only for GLUE2 ## overrides values defined in opsys for a single ExecutionEnvironment. ## Configuration of multiple ExecutionEnvironment for the same queue ## is not supported. Create a different queue for that. ## default: undefined #osfamily=linux ## benchmark = name value - Defines resource benchmark results for accounting and ## information publishing. The nodes in the same queue are assumed to be homogeneous ## with respect to the benchmark performance. ## In case of multiple benchmarks are specified: ## - Accounting subsystem will use ONLY THE FIRST defined benchmark. ## - Infosys will publish all defined benchmark values. ## ## The values represent per-core CPU performance. ## NOTE that APEL accounting services supports "HEPscore23", "HEPSPEC" or "Si2k" benchmark types only. ## multivalued ## default: HEPSPEC 1.0 #benchmark=HEPscore23 16.5 #benchmark=HEPSPEC 12.26 #benchmark=Si2k 3065 ## allowaccess = authgroup - Defines that the specified authgroup members ## are authorized to submit jobs to this queue of ARC-CE after the user already granted access to the CE via one of the interfaces. ## A related config option the "denyaccess" (see below) can be used to deny submission to the queue. ## Multiple "allowaccess" and "denyaccess" authorization statements are allowed within a configuration block. ## These statements are processed sequentially in the order they are specified in the ## config block. The processing stops on first "allowaccess" or "denyaccess" statement matching the authgroup membership. ## If there are no authorization statements specified, then the queue is accessible by everyone already authorized. ## default: undefined ## multivalued #allowaccess=biousers #allowaccess=atlasusers ## denyaccess = authgroup - Defines that the specified authgroup members ## are NOT allowed to submit jobs to this queue of ARC-CE after despite the user is already granted access to the CE via one of the interfaces. ## A related config option the "allowaccess" (see below) can be used to grant job submission to the queue. ## Multiple "allowaccess" and "denyaccess" authorization statements are allowed within a configuration block. ## These statements are processed sequentially in the order they are specified in the ## config block. The processing stops on first "allowaccess" or "denyaccess" statement matching the authgroup membership. ## If there are no authorization statements specified, then the queue is accessible by everyone already authorized. ## default: undefined ## multivalued #denyaccess=blacklisted-for-the-queue ## advertisedvo = vo_name - This attribute is used to advertise ## which VOs are authorized on the [queue:name] of the cluster. ## Add only one VO for each advertiseddvo entry. Multiple VOs in the same line ## will cause errors. ## These entries will be shown in the MappingPolicy objects, that is, ## they will apply for the Shares that corresponds to the queue. ## The information is also published in the NorduGrid schema. ## NOTE that if you have also configured "advertisedvo" in the [infosys/cluster] block, ## the result advertised VOs per queue will override whatever is defined in [infosys/cluster] block! ## NOTE that it is IMPORTANT to understand that this parameter is NOT enforcing any ## access control, it is just for information publishing! ## multivalued ## default: $VAR{[infosys/cluster]advertisedvo} #advertisedvo=atlas #advertisedvo=community.nordugrid.org ## maxslotsperjob = number - This GLUE2 specific parameter configures the MaxSlotsPerJob value ## on a particular queue. This value is usually generated by LRMS infocollectors, ## but there are cases in which a system administrator might like to tweak it. ## Default is to publish what is returned by the LRMS, and if nothing is ## returned, NOT to publish the MaxSlotsPerJob attribute. ## If a system administrator sets the value here, that value will be ## published instead, regardless of what the LRMS returns. ## Each LRMS might have a different meaning for this value. ## default: undefined #maxslotsperjob=5 ## forcedefaultvoms = VOMS_FQAN - specify VOMS FQAN which user will be ## assigned if his/her credentials contain no VOMS attributes. ## default: $VAR{[arex]forcedefaultvoms} #forcedefaultvoms=/vo/group/subgroup ## maxcputime = number - This value overrides the one defined in ## the [infosys/cluster] block. See description in that block. ## default: undefined #maxcputime=300000 ## mincputime = number - This value overrides the one defined in ## the [infosys/cluster] block. See description in that block. ## default: undefined #mincputime=1200 ## maxwalltime = number - This value overrides the one defined in ## the [infosys/cluster] block. See description in that block. ## default: undefined #maxwalltime=600000 ## minwalltime = number - This value overrides the one defined in ## the [infosys/cluster] block. See description in that block. ## default: undefined #minwalltime=1800 ## ## ### end of the [queue:name] blocks ######################## ### The [datadelivery-service] block ############################### ## This block configures and enables the data delivery service. This service is intended to off-load ## data-staging from A-REX and usually deployed on one or more separate machines. ## ## This service can also act as an independent data transfers service that case it would require ## an inteligent data manager that could replace A-REX's intelligence. ## #[datadelivery-service] ## *transfer_dir = path - The directori(es) on the DDS host in which ## the service is allowed to read and write. ## When DDS is used as a remote transfer service assisting A-REX then this is usually ## one or more cache and/or session directories shared as a common mount with A-REX. ## multivalued ## default: undefined #transfer_dir=/shared/arc/cache #transfer_dir=/shared/arc/session ## hostname = FQDN - The hostname of the machine on which DDS service runs. ## default: $EXEC{hostname -f} #hostname=localhost ## port = port - Port on which service listens ## default: 443 #port=8443 ## pidfile = path - pid file of the daemon ## default: /run/arched-datadelivery-service.pid #pidfile=/run/arched-datadelivery-service.pid ## logfile = path - log file of the daemon ## default: /var/log/arc/datadelivery-service.log #logfile=/tmp/delivery.log ## loglevel = level - set loglevel of the data delivery service between 0 ## (FATAL) and 5 (DEBUG). Defaults to 3 (INFO). ## allowedvalues: 0 1 2 3 4 5 ## default: 3 #loglevel=4 ## user = username - Overwrites the user under which the service runs. The default is the user ## starting the service. DDS is very limited if not run as root. ## default: undefined #user=ddsuser ## secure = yes/no - Set to "no" if the service should run without a host certificate. In this case ## the corresponding deliveryservice option in the [arex/data-staging] A-REX configuration block ## should use http rather than https URLs. ## allowedvalues: yes no ## default: yes #secure=no ## *allowed_ip = ip - IP address authorized to access service. Normally this is the ## A-REX host IP. By default the delivery service listens on all available ## interfaces, so if both IPv4 and IPv6 are enabled on this and the A-REX host, ## remember to add both A-REX host IPs here. ## multivalued ## default: undefined #allowed_ip=192.0.2.1 #allowed_ip=2001:db8:85a3::8a2e:370:7334 ## allowed_dn = DN - DN authorized to access service. This option restricts access ## to specified DNs (of the users who submit jobs to A-REX). It is only effective if secure=yes. ## multivalued ## default: undefined #allowed_dn=/O=Grid/O=Big VO/CN=Main Boss ### X509 related parameters ## x509_host_key = path - Optional parameter to overwrite [common] block values. ## default: $VAR{[common]x509_host_key} #x509_host_key=/etc/grid-security/hostkey.pem ## x509_host_cert = path - Optional parameter to overwrite [common] block values. ## default: $VAR{[common]x509_host_cert} #x509_host_cert=/etc/grid-security/hostcert.pem ## x509_cert_dir = path - Optional parameter to overwrite [common] block values. ## default: $VAR{[common]x509_cert_dir} #x509_cert_dir=/etc/grid-security/certificates ## ## ### end of the [datadelivery-service] block ############## ### The [custom:name] block ################################################### ## This optional block is for those who wish to include non-ARC configuration ## in arc.conf. Custom blocks will be ignored by ARC components including the ## configuration validator. Any non-ARC configuration which is not in a ## custom block will be flagged as an error by the validator and A-REX will not ## start. #[custom:mytool] ## ## ### end of the [custom] block ################################################# nordugrid-arc-7.1.1/src/PaxHeaders/Makefile.in0000644000000000000000000000013215067751347016211 xustar0030 mtime=1759498983.452718684 30 atime=1759499015.173212494 30 ctime=1759499024.736391536 nordugrid-arc-7.1.1/src/Makefile.in0000644000175000002070000006307515067751347020126 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir distdir-am am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ @DOC_ENABLED_FALSE@@HED_ENABLED_TRUE@BUILD_SOURCES = external doc hed libs tests services clients utils wn @DOC_ENABLED_TRUE@@HED_ENABLED_TRUE@BUILD_SOURCES = external doc hed libs tests services clients utils wn doxygen @HED_ENABLED_FALSE@BUILD_SOURCES = clients SUBDIRS = $(BUILD_SOURCES) DIST_SUBDIRS = external doc hed libs tests services clients utils doxygen wn all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." @HED_ENABLED_FALSE@uninstall-local: @HED_ENABLED_FALSE@install-exec-hook: clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: @$(NORMAL_INSTALL) $(MAKE) $(AM_MAKEFLAGS) install-exec-hook install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-local .MAKE: $(am__recursive_targets) install-am install-exec-am \ install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ check-am clean clean-generic clean-libtool cscopelist-am ctags \ ctags-am distclean distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-exec-hook \ install-html install-html-am install-info install-info-am \ install-man install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-am uninstall \ uninstall-am uninstall-local .PRECIOUS: Makefile # This won't work in case of cross-compilation. Please # some autotools experts fix it. @HED_ENABLED_TRUE@install-exec-hook: @HED_ENABLED_TRUE@ if test "x$(build_triplet)" = "x$(host_triplet)"; then env LD_LIBRARY_PATH=$(DESTDIR)$(libdir):$(LD_LIBRARY_PATH) $(top_builddir)/src/utils/hed/arcplugin$(EXEEXT) -c $(DESTDIR)$(pkglibdir) -c $(DESTDIR)$(pkglibdir)/test -c $(DESTDIR)$(pkglibdir)/external; else echo "No .apd files since we are cross-compiling"; fi @HED_ENABLED_TRUE@uninstall-local: @HED_ENABLED_TRUE@ test "x$(build_triplet)" = "x$(host_triplet)" && rm -f $(DESTDIR)$(pkglibdir)/*.apd $(DESTDIR)$(pkglibdir)/test/*.apd $(DESTDIR)$(pkglibdir)/external/*.apd # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/PaxHeaders/doxygen0000644000000000000000000000013215067751427015543 xustar0030 mtime=1759499031.595462031 30 atime=1759499034.762510154 30 ctime=1759499031.595462031 nordugrid-arc-7.1.1/src/doxygen/0000755000175000002070000000000015067751427017522 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/doxygen/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327017653 xustar0030 mtime=1759498967.684692305 30 atime=1759498967.827493073 30 ctime=1759499031.586809734 nordugrid-arc-7.1.1/src/doxygen/Makefile.am0000644000175000002070000000634615067751327021566 0ustar00mockbuildmock00000000000000SWIG_DEPENDS = \ $(top_srcdir)/swig/common.i \ $(top_srcdir)/swig/credential.i \ $(top_srcdir)/swig/data.i \ $(top_srcdir)/swig/compute.i INPUT = $(top_srcdir)/src/hed/libs/common \ $(top_srcdir)/src/hed/libs/credential \ $(top_srcdir)/src/hed/libs/data \ $(top_srcdir)/src/libs/data-staging \ $(top_srcdir)/src/hed/libs/compute CPP_EXAMPLES = $(top_srcdir)/src/hed/libs/common/examples \ $(top_srcdir)/src/hed/libs/credential/examples \ $(top_srcdir)/src/hed/libs/data/examples \ $(top_srcdir)/src/libs/data-staging/examples \ $(top_srcdir)/src/hed/libs/compute/examples PYTHON_EXAMPLES = $(top_srcdir)/python/examples SPECIALISATION_MAPPINGS = JobDescription SPECIALISATION_MAPPINGS_JobDescription = \ $(top_srcdir)/src/hed/acc/JobDescriptionParser/XRSLParser.cpp \ $(top_srcdir)/src/hed/acc/JobDescriptionParser/ADLParser.cpp .SECONDEXPANSION: $(addsuffix _Mapping.dox, $(SPECIALISATION_MAPPINGS)): %_Mapping.dox: $(srcdir)/create-mapping-documentation.py $(top_srcdir)/src/hed/libs/compute/%.h $$(SPECIALISATION_MAPPINGS_%) $(PYTHON) $^ $*_Mapping.dox Doxyfile.SDK.build: $(top_srcdir)/src/doxygen/Doxyfile.SDK cp $(srcdir)/Doxyfile.SDK Doxyfile.SDK.build sed "s/@TOP_SRCDIR@/$(subst /,\/,$(top_srcdir))/g" Doxyfile.SDK.build > Doxyfile.SDK.build.tmp mv Doxyfile.SDK.build.tmp Doxyfile.SDK.build sed "s/@INPUT@/$(subst /,\/,$(INPUT) $(addsuffix _Mapping.dox, $(SPECIALISATION_MAPPINGS)))/g" Doxyfile.SDK.build > Doxyfile.SDK.build.tmp mv Doxyfile.SDK.build.tmp Doxyfile.SDK.build sed "s/@EXAMPLES@/$(subst /,\/,$(CPP_EXAMPLES) $(PYTHON_EXAMPLES))/g" Doxyfile.SDK.build > Doxyfile.SDK.build.tmp mv Doxyfile.SDK.build.tmp Doxyfile.SDK.build sed "s/Doxyfile.SDK.layout.xml/Doxyfile.SDK.build.layout.xml/g" Doxyfile.SDK.build > Doxyfile.SDK.build.tmp mv Doxyfile.SDK.build.tmp Doxyfile.SDK.build for mapping in $(SPECIALISATION_MAPPINGS); do \ sed "s/^FILTER_PATTERNS[[:space:]]*=/& *\/$${mapping}.h=$(subst /,\/,$(srcdir))\/adapt-and-filter-mapping-attributes.sed/g" Doxyfile.SDK.build > Doxyfile.SDK.build.tmp;\ mv Doxyfile.SDK.build.tmp Doxyfile.SDK.build;\ done Doxyfile.SDK.build.layout.xml: $(top_srcdir)/src/doxygen/Doxyfile.SDK.layout.xml cp $(srcdir)/Doxyfile.SDK.layout.xml Doxyfile.SDK.build.layout.xml SDKDEPENDENCIES = Doxyfile.SDK.build Doxyfile.SDK.build.layout.xml \ $(srcdir)/add-bindings-deviations-to-dox.py \ $(srcdir)/images/arcsdk.png \ $(srcdir)/adapt-and-filter-mapping-attributes.sed \ $(SWIG_DEPENDS) \ $(wildcard $(addsuffix /*.h, $(INPUT))) \ $(wildcard $(addsuffix /*.cpp, $(CPP_EXAMPLES))) \ $(wildcard $(addsuffix /*.py, $(PYTHON_EXAMPLES))) \ $(addsuffix _Mapping.dox, $(SPECIALISATION_MAPPINGS)) SDK: $(SDKDEPENDENCIES) doxygen -v | awk -F . '{ exit !($$1 >= 2 || $$1 == 1 && $$2 >= 8) }' || (echo "doxygen version 1.8.0 or greater required (version $$(doxygen -v) found)" && exit 1) doxygen Doxyfile.SDK.build # Postprocessing: Add deviations from SDK API for language bindings (Python). for file in $(SWIG_DEPENDS); do $(PYTHON) $(srcdir)/add-bindings-deviations-to-dox.py $${file} SDK/html; done EXTRA_DIST = Doxyfile.SDK Doxyfile.SDK.layout.xml images/arcsdk.png \ add-bindings-deviations-to-dox.py \ adapt-and-filter-mapping-attributes.sed \ create-mapping-documentation.py CLEANFILES = SDK nordugrid-arc-7.1.1/src/doxygen/PaxHeaders/images0000644000000000000000000000012715067751427017014 xustar0029 mtime=1759499031.59146197 29 atime=1759499034.76351017 29 ctime=1759499031.59146197 nordugrid-arc-7.1.1/src/doxygen/images/0000755000175000002070000000000015067751427020767 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/doxygen/images/PaxHeaders/arcsdk.png0000644000000000000000000000013215067751327021041 xustar0030 mtime=1759498967.685490916 30 atime=1759498967.827493073 30 ctime=1759499031.592115003 nordugrid-arc-7.1.1/src/doxygen/images/arcsdk.png0000644000175000002070000032432715067751327022756 0ustar00mockbuildmock00000000000000‰PNG  IHDR½° ØsBIT|dˆ pHYs × ×B(›xtEXtSoftwarewww.inkscape.org›î< IDATxœìÝwxÇ™?ðï.zg;)‘*Õ‹e[]²dÉ–›ÜË9Nâ8Í)—\’ߥÙIî.qr—»TÇq‰kÜmYÍêÕêV!%±÷ vô¶»¿? .¸ HA )½ŸçÉíbwg@/€ywfÞaAÐGOOª«*ÑÑÙ Ÿ×B¢Ža Qk`Æä¼<¨Tª±®‘„ÍfCuyº:šàóØÞ;ÖU"×V •F8sròf@£ÑŒu$ìv;j*ŠÑÙÞ¯Ûðô[B¢ŒUA©6 Μ†œ¼BhµÚ±®‘„ÓéDEE9:;:àv»ÞCHd1P©TˆO@Nn.ôz½ôÕÞA\¸PŒ†ºZ˜ãtШ•`YfLªLˆŸãawzÐÕãFáÌYHOOë*J/žECÅQä3oÀÄÖCÎPÇŒ ¯ E'—ƒr<Œ©…K0){òXW P^R„šÒcÈg^C¬¬rÆ=ÖU"×)Ÿ F—r<‚É7!;wêXW PUU‰ÒÒ$˜4Ðj”ÉØ±®¹Nq<§Ë‹ö.'r'ç!?Šø#`ßÞ=PÊx$Ä釸!ÑÅóZ;ìˆOÄìÙsƬ‚ àÐÞÍ09¶a²ü0àǬ.„ôÅ J\ä¾’Íœ:uý†‡ÍfCcå1d³[£^6!ášÎ¾‚âÓûàõFNŒÓéDåÅ#È“½õ² ×TÙ[(+:tyÌty<!‘Ä’qÌ«CYi œN'تªJÄSp@Æ9¹Œ…^£“^„†Úäâm04‰ŒŒcrÆ4aZZZ¢^vC]²ðõq…™Â'hijˆzÙMMM0®Ï¹M­8zâ,>Ù¾ÇOëê!0 ƒX£õõuwvv %ž2þ©•2t´·#999ªåvZêQ «‹j™„Œ†‰)C»¥™™™Q-·ÓRƒÉlmTË$d4bdÕhh­DVNt'õ··Y Q)¢ZæP|~?ÚÚ:aw8a0èk‚J©¼*e?ueÕ…B~UÊ í]àùÀc†a`NˆãE†V­D{[ä>¯,«ëú2,F‰ööè÷ xÜV(gÔË%d¤Ll#*:Z£^®ÝÖ -Ûõr )=Û kOôïÕîîn¤'ýð¢VK;Žž8‹òÊpœt>ÝÔü,_²1&ÃÕnbyóÝ-p:]•J‰ï|ýñ±­P„(•rØÛ{@¹µÈ„!—±p¹\Q/—çüQ/“Ñ1^pœ/êå2BôË$d4àÇà3â÷ûÇÅÄäã§Î£¤¬*$8€’²*¼ñÎfx<´¶(@ „B¹,^8 à .Ö„Ù3§aÙÍ ‘79K|ÝfsàäçEcWÁ0ðÂÈæò<‘'<ÏCaÙÙµ3ŒB!„ Êœ‡‡ï¿i)I’÷7ïDyE  ¥µ=ä<¯×‡†¦X­vØNð<“Ñ€ä¤$%&ˆÇùü~45[ÐÒÒŸß$s<üþ‘õÂïÜ{-­ÁáÄ÷Ýu+,í8~òšZ,ðùüHI6ㆳ›:ßêôÙ ¨­kD«¥§Kì½ÑjÔ0t˜š—ƒùsg@.—6'Þß¼SÜNKIªå‹qüÔ9TTÖ¢©¥ ñq10'Ä¡«»nwp¡TŸ×‡WßúHÜÖ¨Õ¸õ–¥ø ÏõÒS“±rÙ !õíòÕký-Ë;¢¿ÙÕ@!„BÈu"=54чN«ÿ­Ñ¨$¯í;t'O‰rûÊÉÊÀ}wß °´uà£-»ÑÙÕsEõkoïDS³EÜ>W\‚ƒGNIzêšÑÐØ‚UËnÀü¹…’óÏž¿„¶öNÉ>AàpºàpºÐÒÚŽÓg/àñGî–¼oŽã%媔JìÞ÷N)–\«­½–6éõyAœ«Ói¡×i¡ËQ[ßhµtà†…³¡Õ¨%õ:}¦vG`žcŒÉ€ø¸˜ðþPW 1"„B¹ŽpÇ ›ÍóJQ|1¸€\~n–äØÎ®žƒHN2œ.7^{óÙwð8X†AjJ¢¤A/ö8†öŽ®s”Jr³30«p*ÍŸ‰ÜìLhûœk³;°{ßgC–[×Ѐ\&‡LÚ|–ÉØàÿØÀëóçÌ_ç8EJ%ç44¶ˆÁÌœ1e\ÌU¨B!äºròôyì?|"dÿÊe7Hæ$ô—™ž‚Ì/èî±"#-ÐqìÄYx½Éß,à ?/3 ¦Àîpâð±Ó°Zí£®klŒÝ{; xžÇ–ûq±$°Ú//8xä$îÞx‹xüêå‹‘ššE¿!D}éŸp¹ å•–U[·2™lÀr9ŽƒV£Æê7bRF*8ŽG[G§8¬éÿþúÚ°YŒrs'Ád4 Çjœ+*Á¢ù³Ä×/•U‰ÿf…Ó§ŒôÏsÕP€@!„Bàtº! O±ããb‘“²¿¤OCwÚÔɸýÖâvEUÝ7.š ƒ!ŽŸeY¬\vJJ+ÅÉÊ ÒÅ!'e¦¬S`µÙáp¸àöxÀ lô¼  «Û:äxÿÛn]œ¬àû5G–ª–eÌ]€}ôÆÔ74##=‚ HþnÙ“ÒÅ÷9\S‚ÏïË0ƒFƒÃq»=èèìs¹+‹Œœ è±ÚÑÑÙ…´”$¨ÕªáO"„BHÔ¤$›±`n!\n,m°´Ö†8vò,¬6;6®_öµxž‡Ýî·{{"%9)A²­×i¡×ë`µ‚§Ë ×+.ôVZ^ÃGO‡ÌCHÕ>h€ ×i%ÁÁhÍš1‡?;ßåÉÚçŠJ‘ž‚º†f± /O®™áà‘S8}¦2‹›n˜‡ys Â>·©Å‚÷?Þ Çåq`³ §R€0 ;÷AQq©ø!xâÑ{(@ „BÆ™I™iâ“v8sþ>Ý}@ íõú T†·ò³Ãá’¤MHˆlžê!—K{=>¨”JÔÖ7áã­{$s&†É¨˲p8]’uaà¹cŒ@íµZ…éÓ&ã\Q  ¤¼ «W܈’Ò`ïF£F^”)Q xA@KKêš`µ9àñx¡V+¡Q«‘œ”€ôÔä+jHÚì;qF¼A9ya/îõxÅà€ŒžÍfƒB!„L ³fLÁî}Ÿã8p‡úÆæSˆD§Ó€eY±Qn·G¶=eµ9`2WwA2±W&“A¯×d0ê,š?7Þ0Wl~°y'Ê.§sNÿ9 Wbþœb€à÷s(ºP†Òò`€0cZÞ¨G¿\-W=@(ºP†#ÇN£»Ç6è1,ËbrN&V¯¸FÃÈ—"w¹Ü’èÕçóÃíö„ \ëÜn.”T ££ í]ˆ‹ÁÚÕKƺZdœ²:tÚyØxHŽca6±mb·WÀérZ:y8\b ,RâXÌÎU@q…ß@>?p¦Â‡Æv6§£ŽER,‹Ù“åÐ(ÇG&rmðs@[»“‡Ý-À¨e‘ÏB«ý}VÝ¡¤Î+…ŒAœ‘EA–©ñWž`°¦•Ã¥Z?Ú{x¨ bô rSåÈM_2>T×6JÒˆŽ¤ýIJ,L&º.g0*ºPŠ©ù9«ÛÅKå’aKUuâ„h ´wÎDïP©^ æÍß‹ËåFeu}Äê¥R*Å!B^¯.—š>)Lû2'Ä!óò°"8tô”ä=äG¬^‘rÕžç±mçA_, ëØŠª:ܺfé¨ÊJ4Ç#=5 M‰*Sós$Ñæõ®«ÛŠ]{ˆÛ}S}‘kÏÓ´âÅíÁqËg+±õWÃwù.ýN'*ýèq„®©ɱ2¬_¤Â“4˜š9üWÇñþç=vöÂí ½¦VÅà–ùJ|u£7ÏY0_\íÇ¿ïÀÖcØ]¡×V),›¥ÄWnÓ`Ý‚@ïdC‡)J0±¨}ÓVyÿõ–¿x=8ÉîÛ÷èð«'Fþ0ƒŒú63¾ØŸUw?‡ÅÓ‡FñÆ~õ†íœäÜ^&ƒé“äxbw/QA=L`jw xa«/nw¡ºeàÕ]óÓåxh¥OÝ®…A~âò xy‡ ÏâDEÓÀ×NŒaqûb¾¶Q+ùÿô;~÷npùw7éð‹/ |ÏùJÊ‚½Åû~‡…SƒÇ‚'ÚQÓ,ÿó¿ÆcJÆ53¢yBÛµ÷<^ÒRa2àç8Ô54£¬¼Z½Øƒ¼Àâ­›½Ÿ/Ôø±é™nÔY†n¤”5øñóWíX5W‰¹yá¯må°é™n\¬z˜§¥›Ç‹Û]Щüç—èAÚõ¦ª¦]ÝÖ!Þ.½i>Tª‘=´Y4&.\*ç啵(¯¬½¢ºöŠ5áô™ 8}æBèk1&Ì™9MÜž6%U5Á^‚¾ç± ƒØX“ØÓq¥¦æç º¶AÜnj¶ˆ‹¥Í™9-$@˜œ; F£>$£SaÁøšœÜëªe5âX«^&£kVÞ„Ôd3´Z8†¬©Ù‚óÅ%(œ>|÷ŠÛíÝá„L&'œôb“2Rî£Ãá„ÏÏÁpyÜÚ•òùü°Ûðú{L&#ØQŒÉèî±ÁétÁœÅÆ_‚—Û—Ë ¹\ƒA?ªòûòx½èé±Áåö@£V!Æd {ÒúØ9àÓúßàÀËß7E¬œ÷º¡S3øó·B'q}÷/V¼°Õ5ÀY{`yøú?ÉöÚ+¨·Œ]6¯|zOn;îAy#‡¼´È »é´ñ¸ïÙnì~.Ó'I¿¿ËüXóoöÐ $/MvpÐÒÉcõ÷;ÑÔ1ø„ËþZEŸë×냻ÏÝþ´5nX8 ú­L™L†Gî߈}£èbaàû\>Šqö÷ݹ»ö}&iø@Zjî¾}¤-XXæÖ6œ9wQR™ŒÅV£¶¾§# Ìœ1]=8}¶þºûasgN—¬?¡ËQ0mrDêiW%@8ÐoñÔ”DÜÏzɘ6†a`Ðë0%/Sò²¼NEU-*«êP[ß„« üòcYé)xpÓÀŸ_xþË]G:­_|lSÈõlvvíý M-p\7¦ËG=ã¾¶¾ §Ï£¹µ 6›ôi©LÆ">.Óò°`^áu›Ý=û¢º¶Aœ]Ï0 âãb°pÞÌS_õæÏ­ª©G]}3lv‡dbŽ\.Ô¼I.â^å•5ø¿¿¾&Ù·aí21Ú=~òN|^4àÄm£AÅ‹fcvá´q³Ú èº0¹ñìãzd˜Ãÿr~ýG&ÄêYô8”7øñê.*û [øÇN¾´^#iÄüsŸ;$8Щ|ën-LQ 1–ECšV[yðy¹nï ÿ¾³^üò ip ”3øêF nž¡DZ‚ Mj[9ì:íÅž3Ü»lô½’äÚñüîÐ /ÿ÷øføÙJn_¬Â“´àx ­›Ç®Ó¼wÐ-öhõ8üô;ÞûY°gÂçîûEOHp°¤P‰Gר15C§G@…ùJ?ÞÞïÆý#ný¯îààæJÜ·\ütÌ&VœïðÞAü¼€Âlòs½Q*øæS¢ÕÒŽ–ÖöËí*1&#bMF¤§%øpÙM Ä A¯ü¡ªF£ÆúµËpã saië@gW7ü~F½fsSò‚ó#Ì}RœêtZÙ¾O’Žª/žç%“&.×\}C3>üdœ.éð Ÿßæ–¶× jë Ïq¼˜[øRiîØ° ±1¡OpKʪ°}×Á÷)Ú;º°mç”–WaãúU’.?¿ŸÃÇ[÷ Z7¿Ÿ—ú¨n}sïößËëó šÕÉj³ãÓ݇QWׄ;n[=hù$úÞØã’4@î¼I…Žx&X¾°Õ…gÜü깪>㟢›¾Õªæ>c‹Ëýb€àõ xæUi>/M†~‹¬¤``2;7ðÿ߸C‹.:¼@ó'/ÛÑ÷¡TR,‹-¿Š•<©íFõÔíkÇ®|²'™Ø|~ào}çÂl9nA¼ÿ¹Ï_>a€IÞ}˜,ÃÊ9Áïâ‡V©±pšßûk0Ç™ Ÿäœìt¡¼A:ôçÙÇõøî&]Èäÿ‡W¿zÂ/¼ž†­Ç=øì‚´¼Ÿ>ªÇ÷ï×ísí©™rܺP…ïlÒ¡Û~O¹¶° ƒ”$3R’›`È…Äc2Œ* Ë0HI6#%9¼z'šã×kBlìÀ½é2‹Ìô”×M&ckBÜ ×í«¦¾IÒ³1gæô—-ÿõ¬®m”lÇÇÅ cðp¤&‡·VÏóØúéþà ZZZÛ±yëÞ.·« [vì4êUY]ý‡Ž¸Üä0?Hýõý2`YxîRY•8Ù†Œëóä>ÁÄâ/ß2I&8¾ºË5èƒpu –Ï’úEÕÁ†ÉîÓÞ±Õø¦Qônþór_H£ë7OB†qŒæÚäÚöñgn´vÄ®Ñà‘ÕÁ§óN€7ö„?$n wÞ$í©jéäÑÖ,óÅíÒ.Ëg+ñ¯÷†½r@¯ /`ùû6iÝMUàHƒƒþbôôÙ $Zš[Ú°ÿð |°y'¶ï< îOKMBz„•‹¤ˆ÷ ØlÒ'ˆqqCO —\.ÇC÷Þ½N ·ÇƒÎ®ćyí³E%’4«ZkVÝŒ´”D¸Ýœ:SŒóÅ¥W\Çå7/DaA><^*ªjqàðIqÆ|skΕH&ÓìÚû™d¢vjJ"V-[ ½^‹Òòjì?x\Lßz¶¨3gL4z6õ¸ë¶5Ði5pºÜèìêôÆKKM ógIö¥ôY.=-wÞ¶©)‰bwbCc N)–ô˜\*©U´M"ïP‘%uÁ{é¡•ju î_®mÝ<>þ̃{—Ž~ØMC»ôÉ£¶ÏÓÿ§=’×–*±¤02]§»NKƒè¼t96]Áû ׿m 6 U ®TÃåð«7ìèµú÷m.|mãèç£Õ0éXs9ýik¢jiïÁÔº¬¾Ü^‡ÎK?ß½72×&„DF[G'Ž8+Ù§T*°vÕøN7ñÁí–6LC¬kp±¤gûLfÎË™„óž#cYquc#ô#J UZV%Ù^·f)ò'g®eÐcú”܈F£::°pÞL( 츼2!—Š‚ÓåFEUp†¿R©À}wÝ*.·pÞL8n;¸©AÀÅ’ŠA¥B!¾f4êC–&ïË ×!ïòûìõþ9Œ33RaÐë$‚Õve©ËHä<¿Eúñ±5'¤_X§‘Ý,Ã=Kƒ÷l§UÚ»ÎZ áê´IAS3h±'2¼þ™‹»%8´è ë4b€/mw… ¼sÀ£}èv°tqf%úÖÝÁ§øýïÝä86ìùÃéÿ™“Ë€´„?n¯ Yç€aü¿‡¨·Œ?£]k¼šY03Çi*Ó¡D<@Ш¥O'‡ZA9ü~¿dInVµì;2™ 1&£¸²Ÿ×ëƒÕf‡Éh@{{§äØ&ô˜ŒÒÞ—öήc®†ÏŽŸÁᣧ%Y‘"4Ñm<øç~7|}°?¼Jú|p¥ç*ƒŸÃW>uá?¾8ü²ÏË}ƒ¾–Ïâãgc R?Ký›Iƒd¹¾ß¢¼4¹FyýÞÞ œ“ãXÉäâµóUˆ5°è²¾Ç6õ ÛÎ;>¿¥“GKçàß}OߥŗÖ‘þ_£‘ü\Œä§Ìíðoö @!dPï·IJ”ýiµ´‡ ;ЦþßÅ}W¯‹Y¿•¤X&°-—Kc³¾)\Å}ý~YdìÕjz¾¸œ”J¥1&Ã-dG®ž×wŸ’² °pªµ­œø¿¾+œÀ›{¥ÅHÝ·\3Ï'„ô˜´Ò{}¸Å F¤“^»ÞBÁ)Úö^tôy¾jŽ mÁÏES‡}&Ý»½ÞÚ7úDz ƒO²øXL¿ÞK7v†¢áÄõ›ˆïç+†BÈ•Šx¤ŒT0 #fìñz}8qú<–Þ´ ÒE…E!—ÃhУÇx‚êñza³9`0\ý''‚ HzPärô—sãöO6Ðð¡Ž龄øÑ ÁêŸ×x¨lNK+$ÛwlX…©ù9b¯Ëž}Шý ¶P ‰œs•~ÉH^nþVçg&+o;áÁ7½þÀ³ë¡R2øûV'ʃŽ3ƒô,LJfq¤ÏB—'J"Œg%Kƒã“¥>ÂÈž ’ëËë»¥Ã‹ÞØã6[Ñ+ŸºðÕÛ‡ž¬¼¤P‰Û«PÓÌá/Ÿ¿ í.nž!=~R¿{W÷ïHVLº™…ZÉHG<[éǤ!2‡ ÆÐ/kR‡uð ¼ÿkýÏ%„L|ïAÐj5!\<‡“ŸEº¨°õÏvtè詨”[QUWŸÆxlŒIlh'ÄÇJ†:U×6„ Ç:sþ¢d;>nt ºõ_ؤ¥µmÐÆ»ÅÒ!þ[¡cJ^vØC²ú¯ØÕmäH)ýAáúÇÎáÏ{rƒ߸C‹7þ_ 4Êà=PÞÈá/„¼qº´Ás¨È‹Se‘ O—¹e ~l=>v=“d|këæ±óÔÈïâjÿCë`Îd9¾q‡¿}Ê€ûú­þ½¿Ú$‹ u’ã¤?µÿó^xY†£R0X0EúÙxîmÇ Gmr¿Õ¤«[9I€À2@v Í "äZsU¦†/½i¾ä©5Ïóسÿ(^}ë#ìÙgÎ_Ä™óQ¥<úsfI¢8_\ŠÍÛö¢¬¢ M-håBiýú¼çŠJPY]‡#Ç>ÇæmÒ…ÌúNRQ*˜]LyÊó<Þ|çœ/.EUM=¶ï:ˆ¢ eâër¹ sfMÃh¨TJÉh¯×‡7ÞþÅ—ÊQSÛ€Óg. ¾±\QÔçóãÄéóâ1^†@«ï×+³ÿÐq_*v2:~x÷àè†DìùÜ#ÉÓ>”‚,9~ò¨t>Ì+Ÿºp¨HúßõöÅ*¨•Ò`ò‹Ïõàbíàã™ì.!¬áN+f+‘`’~]}ûOV»4xcÎãB2¼ëÃ;Ü!“êÃõÏ 3úý׌’Æ¿Ë+„d4€ûú­è½ã¤Ͼf?Äíîbf½Ëz©ðá™W‡¾ö@òÒ¤ yq¾*ôÃù»÷¤HF¢,äsO™ø®J>´ØÖß² ›·í•Œeoj¶ ©Ùr5ŠR^î$LÍÏAIŸt§K*p±¤bˆ³F®©Å‚¦–ߟ^§ T–ݼ¥åUâ«ÍŽm}Ñè놳a2Ž~eÂ93§áhŸ<š ° IDAT¼ M-hhj‘\?#-é©É’ÿFûÀþC' T*à÷ùÅu’™–‚“§ƒ=EÝ=6lÙ¾O<âÁ³ëñÄ:Í€×ty¤Y]úÓ¨˜+ÊOÆNÿFþ_¿mÄ-ó¾Ç¶÷à›6êß=àÆ~ÉYÏL:¿{ʈ‡ÿ#8ôÀ9/ÞÞçÆý+‚AÁ×ïÐâïÛ\pz‚ߟ¿þ§ûÏz±f¾ …Ùr˜t ºìŠ«ýØrÌ ËàÐï‡Oƒxßr5~û®¥õÁÏæoÞvàd©›–ª13GŽ=‹Æö¡#¦©™rLÉ‹×á`ãOºðÓGõ˜+‡Õ!à½CîÌPw/¡¹i$TWw˜ñâÀ^à÷û%Ù%Õ*ŒÆÁS⓱qÕ&OÍÏF£Æ–û`³®Ë3’Ö­^–e#„Ãd4àîk ï—V­VaÓ]ëðɶ}èêîð\†a0Î ,^8çŠê°`ÞLTTÕ¡­}àñé½û—Þ4õ Íhn•öª„3¹;or²&¥¡¦ÏjÚ Ã .ö꤯½ÞõoÝ¿\¤ØÁ[5÷-SãWoØoï?@±Ào¿bÄš Þ?e ~üe³Oß¼Æ3ë±çŒW²nÓ#\^‹aàaMït Ô‘ÛŽ{$½>?ðÚ.^Û5ð9ït 8Ü‚$àè/ÁÄR€0•5H‡ é5 6-SK†Éõuÿr5~ð7›Øx·tóØÖ‹UsÛ#pçM*,›¥ÄsÁµɆ 7¨ÄµÒÍ2<÷¾þÒÞ…ã%>b®NU3‡œa†ïÈeÀ?2aÙw;áp}g½Øw6üÞ[¹ øõ— ¸ó§Álymݼ$xê/-A±EßȵeËŽýhlj·¿ýµG2´utáo|(¾V0-·ßº"êu$C»ª«OLÊHÅSO<ˆµ«oFnvTÊà.˲Ðë´H4Ç!r6V¯¸ û­î›‘–‚ÌŒTdf¤»µäØÔÐcÕj6®_‰ûïY)yÙˆ‹ß`½iɘ3s: §çêýæMÎBzj245TJ%23R±hþ,|á‘»‘”8ðÂe©É‰øâc÷`ñÂÙHMI„ârv#ƒA‡¼ÜIx`Ó¬Z¾8$Ã0âûÍÌHtµ^Z=t'Λ‰ä¤q˜Z­BZjr³3²+=xïmX»úf¤¥&A§Ó‚e° ­F¸XÒS“1«pjÈ/¸ÿîõ¸ýÖÈÉÊ@|\ Ì q!¹rv—€-Ǥc¬‡{’wÏi#üD‰UÍáø±@²ÈÚýÓkŸ<ð:5ƒíÿ‹yùá/µå˜G2Ér0rðÁ3±a7Ü€À0 K˜C©Èµá­½ÒÀyý"Õ Á¸g×-~6þ¹ds{~ûiCs'?~$gðøZ ~ÿ5#FòuøÁ¡ð†;M›$ÇÎßÄ"Ã|eßµkæ)ñ¯÷êÂê=I‰cñâ÷ŒÐ©ix¹v|øÉ.üÏ_ÿ7p•«¾ä¢LÆbÎÌé˜330¼Æïçà÷û¡R)‡üºéεa—sÿ=ëÃ:.{R:²'¥8ŽƒßÏA©TDdm„iù9˜>uòˆÏ“ËåXvóBqþ>¯*ÕÐ ¹\†‡î½mDå(är¬\vƒ¸Íq|HàæGôýoÖ;¡9œ¿Ã0(˜–‡‚iy#ª™?sK†+LŸ$G^ÚЃ)Ò!ðÎ~7~8‚'€¿|B-Ç<âØþ‡€?mvJž"&DZØó\ÞÚëÂ_>q¢¨:t%Y Ð0»m± _X«‘¬§0“ŽÁGÏÆâÃÃnüé#'N•ù0@†`¨ ÖÎWâñµÄ¯U8ÉÐ’µàΛ†sÇM*|p8xÞæÏ<ø¿oC}MŸ$Ç—Ökñü–`Pð‡œøúZ´Ák|yƒËg)ðÜ;Nl9æp‘5†¦eÊñèMÈš&C™«Àé¿Æãù-NüãS*šþe,07O¥3þyöq=î¸Q…ý« g*|!s9Œ:,×àÙÇõ’÷FH¸X†‘<0V*ÆÏêß>Ÿ_²À¯p¯ºÃlß¶EÈJ]vœëÝÁ#'ñÙñ3âöÆõ+G ðÕ4vcݭბ²û“¿ãFÙ£Z浤ÃÊãb­í=<†A†™Ef¢ æ˜+o¸[ÎWûÐaàçd˜eÈL”!)–½nS Åï°jýÃQ-sÏ–—±˜ýQTËœèx¸PãGS{`5fs ‹ŒDÒ؈LúmhãPÝ¡Ó&À a`Žaa6±H0±a÷bøü@e“¥ bt ¦dÈC22MDG…_cÕ†‰j™Û·mEvúõÓÖzíŸ:Äh<{çƒí¨ª©·¿öå‡`4\ó#j›º¯~!äúod±¤ðÊs¾Ĩc"’Ožhc 0[ŽÂì«ó3œn–!ý ‡)äÉËýE$×'ŸÏ¢ ¥èèêFw·n:­ ñ±ÈÉJv(x/«ÍŽÏÏÓ¸'%&`Z¿ôø@`”ÇÅ’JXÚ;ÐÙÙ ¿ŸCBB,’Í(˜6Y&Þ×¹¢tõçÍܸp8ŽÃ…Kå¨olÛíABB,æÏžØX“x\IYZ,íèì’®?uìÄY(ûŒè¸qáqˆ¶Óé™ó—ÐÖÞ »Ã žç¡Q« Ói‘dN@jj"R’†þ=žÑ§žB!„ êbIö<›=tL~YE5Š/–ák_~(¬k9œ.ë“U±`Z^H€PUSÝû>“d;€Úú&ÀÉÏ‹p뚥!ó//–TˆÇaå;v”¬ËT[ß„¢ e¸ïî[Å9«•Õu’Ôò½ú2°pn!”JªkðÞGŸ‚ã›ÇWŠ“O}ñÁA^ÿ&~_áJKM‚¹…âÿú/ÈF!„2‘•”Uaó¶½!ÁLvu4·¶á½wHS¡ªU’E_-møç{[ XúzïÃ.Úêõú°gßÑQÕOlÞ¶W z¤&'Â`Љé\':êA¸¹Ù™ÈÍÎëjB!„DœÃáĎ݇$û¦æç`éM c„×ëC}c3·>} Žã°uÇ~ÉšKÖ.Çô©¹`X.–cë§û¯Ÿî>„Mw®òz+–.B^nšZ,øt×!øüDÍ­mèî±!ÆdÀ‹æbÎÌ騽ÿ3ÉZP÷l¼º>‰Z­BgW\®`Rƒ[×,Ŭ©â¶ßïGEUݘ¬ûI B!„•5õp»ƒ)µS’͸cÃ*1«¡J¥ÄäœI˜œ3)"åµ´¶£½#¸ǤŒTSÏäãØÉ³èè ̨¬®‡Ïç‡bLHsfMÇ¢Ë)ÙãbM¨¨¬•,šÛc ±1FÄÆ¡VI'R'%%„LRîŸÑ±¾±)Éfq18¹\Ž©ù9˜:À¼Š‰„B!„ÂÒ&]\µ`Z^DÒ¦·áßwû¥×Þ“ìs8ƒë”‚€Î®îAךêßH7è¥i½éz%ሉ1B¯ÓÂ~ùÜâ‹e(¾X™L†„øX¤$›19'3bAÓX¡B!„„è鑎ß7 W·<«M²mw8ņø`ºº­ƒ&“´¾l8«ƒeܶn9>Ù¾O¬p‡VK;Z-í8{þ2ÒSðঠf[š(@ „B!!L&£d»>âåõ @2ÒS˜ÍÑ_¶\«7ÁÅÇ !c¬‡ËD\BZÔË5šâ`ã’£^.!#ÕÃ¥"&.²ÊpÄÅÅÁåñF½ÜH˜6%Ö.—¬`ˆU„¯LrR6ݹq±&qŸÃáDS³]ÝV18­VѲ§äecڔܰ6»M-–à`Ñ‚Y2“‘ÛíƒÑd‚<>Á ‡Õ“!²dB"Ííáõrã³ÐÙƒ4ötÔË&d$º˜$&FñƸ¤ltvL†MQ/›‘è¦ >)úùé̉¨­ê„N31{¤ ò1%/ÅËÐÑÕîn+Ü/tZ-âc‘“•.9~VáTdO îë;ÔÆ ×áæÅóÄíþó '+_ú—{qáRÚÚ;ÑÕ݇ÓV½N ³99Yˆé—¥hÆô|d¤§ˆÛJ…BòzVfšdèþs†ÁVañÂÙhµtÀj³‹Ã›z{IT*%žxtšš[Ñbi‡Ýî€Ëí{¹7!6Ö„Óó%ÎDât{‘œ–Æét öïŤԘ«šÛ–+áñúÑÖåÁªÕ«£~Ÿz<Úù&É~ |Q-›p¹ΰÏ`ÅÚû¢žVÏï÷cߎ·°ù Œ{ø^A‡Sx+Ö=(i$FÇqؽk'RõPLàqéäÚÆñ<ê›­X½æ°S§ÀÒaþLBÆHK»7,^<&A¬J¥ÂÔY+p‰{,êe Îó_üÅëÆ$ç¶\.ÇÌù«QÌ9êe®bþIÌZxKÔƒÉdX°pZÚ©­EƯ–6;æÌ™ ¹\Èb”••­! -=’Ù„Œ5×Ú¦näO™½>2“ F#=3 ª”[pÚÿmxÝð'%N>Ç|?FzþrÄÄÄŒY=’’S“¹'ü߇[#ôÉÕÔÔ„³g?‡I¯‚Z%‡F¥œp3îÉÄççx¸Ü^¸Ü~x|ݰFãøhpXZ›qöän¤ {`B)Ll#dÌÄœxF&.Ÿ E—†Nf&ÚdK°àÆucôÕÖfÁ™ã»"ìE .ÁÄ6BÎxƺZä:ãÔèáÓÐ…´²+0oñZÄÅ…Žu 8yâ8ty ­¥V@6AWÛ%Ïóp¹}pyü°9¼˜¿`¡$Œ$@ŸÏ‡––´µYÐÙÙyÕf©_‹Þ—­E²adГAédÃà .>ÁŒ„„$''»¹1~¿ÍÍÍè´Ô «£œŸ„° œ¶fB°§R&SB­»§z‘B©F\Bâ3‘œœ<&ʆÂqZZZÐÑZƒ®Žfø} ŒÏ¾S|;ü>'†ÖýìT™\¡Bœ9 qæL¤¤¤Œ»ÏÏóhnnFG{;::Úáóù ¹ÌÙÓ²OkLÆY{a¼bÈärÄÇÇ#!ÁŒ”””¡w!=¯»»_¹ ï—ì¿å‹» PE??!ãÏcÃÎ×HöÉ•z¬ýÒî1ª!ã Ïy±ýù¥’}¬L[¿rhŒjDÈø³ýù%à9iÒ[¿r¬L9Èd¤ÆWH=Á)Õ1c² !„B!‘B!„B!DD!„B!DD!„B!DD!„B!DD!„B!DD!„B!DD!„B!DD!„B!DD!„B!DD!„B!DD!„B!DD!„B!DD!„B!DD!„B!DD!„B!DD!„B!DD!„B!DD!„B!DD!„B!DD!„B!DD!„B!DD!„B!DD!„B!DD!„B!DD!„B!DD!„B!D$h§Ëå‚Õj… Ѯτ瓥Â/³KöYÚ:!“;ƨF„ŒœßŸ,U²O`uhii£2¾¼†„É>†UÐg„>¼l ø%ûZ[-`Ø›µd,ËÂ`0@£Ñ„¼Æ—£ŸÏ‡Ó§O£¶¶r¹:ê½p~7xÎ'n3Œ r¥v kDÈøâ÷¹ ðÁ/v–•C¦ýr"„Bâ÷: |pÃ@¡Ô]…&0§ÓŽó#++sçÎ…B¡p9@èèèÀÞ½{a6Ç#--2™lŒ«K!„B¹Ú8ŽCCC:::°rå*ÄÅÅñz½ÂæÍ›‘“““É8Öu$„B!„DYWW7jkë±qãF0§N¬ÖLš”1Öõ"„B!„Œ‘êêZÄÅŃmiiFBBüXׇB!„2†Ìæ´´4ƒµZmÐéh-!„B!×3N‹îî° ÃŒu]!„B!cŒa?ð:WÏóp»=P©”W”-Éáp ¹¹V«F£ ÉÉIÐë)Å!„B!WbÄ«¯¾Ž¿ÿýEqûÉ'¿„GyxÈsž~úÛ¨¨¨@}}œN'@&“!!!‰‰‰(((ÀC=€‚‚‚!¯ãõzñÎ;ïâÝwßÇÅ‹ÁóÁ¸,ËbêÔ)X·n~øAÄÄÄŒô­B!„rÝcÞzë-aáÂyaìóù°lÙ ´¶ZÄ}©©©Ø¿Ͻ3fÌ‚Ëåöú‹- /ü:.䵚š<ùäS¨¬¬ö:Ï>ûs<üðCÃG!„B :vìØ‘œðá‡I‚hjjÂŽŸF¤BÇÇ×¾ö øýÒå³[ZZpÿý…( lذ!"õ!„B!äzö#A$C‹úzñÅ—°aÃú°®“——‡¿üåOp:(.¾ˆ¢¢"¼óλà8pøð=z K–Ü,žóïü+ÚÛÛÅmFƒ¯~õ),Yr3’““ÑÖfAmm>ýt' &ÆîÛ"„B!„ôv€päÈg’'ø_øÂãxùåWçÎǹsç1kÖÌa¯£Ñ¨‘(((Àý÷ß‹ŒŒtüæ7¿)**„ƒáĉ“âkZ­}ô!rssÄ}‰‰f`ýú[Ã};„B!„„=Äè7Þÿ=mÚT|ï{ß…Á`÷½öÚ룮Daa¡d»ººZü÷Ö­Û$¯}ó›ß„B!„È +@hmmÅž={Åíûî»jµ7Þ.îÛºuºººFU‰ƒI¶SR’Å?~\òÚm·ÑüB!„B®–°†½ýö;â…B!›6Ý#ö,x½^¼ûî{xòÉ/y-›Í†ƒÂét¡³³ÂîÝ»%ÇÜrËyÍÍ-â~Fƒ”””0ß!„B!d¤† xžÇ»ï¾/n¯ZµR\c`æÌBLž<€wÞywØ¡ºº_øÂ—}ý‰'¾€3f\.—$£QBB®ÆÊÏÇ¡²²rÔ= db2 ÈË˃B¡Õù‡eeåðz=®¿¤¦¦ ##cÔWhmmEmmþ`rMËÈÍÍu úº>]éoÇq(++‡ÕÚáš‘ñÌh4!??ïŠ#ÂŽù MMMâö]wÝ!yý®»îÀsÏý@ ñòä),X0ĉ‰1á;ßù¶dÑ5F™L&ö^tttŒøºC/½ö:Ž^(‚aRXCèú äÚÅ;]p¼ù:f¤gâë_þrئžžüæ@çƒ6=Œrt_Þdbò}vîf XV._öygÏÃ_^ ŠÄx¨ãvDY¦ÉDÆqpíØ•Û‡|ýëHNNþœË^}ë->ºŒTúºÎ.7o¾Ž))ixú+_\^^žçñÂ?þSå¥Ðg¤‚Õk¯rMÉxÂÛ°¿ú fçäâ©'žu 0ìBiO?ýmÉDá¼¼<ÈåÁÂ'êêêÄí»ï¾ Ï=÷kÉ5ú.”f4±páBtwwáÔ©Óâ1騲e3ôz½äÜ¥KW ±±QÜ>vìÌfóßæÀþð·¿¡-^¤©y¹™˜º›á=}Ïüè‡ÃËó<¾öýï#ïÞÛ 6è‡=ž\».mÛûoZ†%7Þ8챕øÍ«/aÖ}wW¡”L ~¯ç_øÅ/¡Õßh{þ¥—Q«a6³ µ#ãUwc3\'‹ðËÿ÷°Žÿõÿþ/<¹iˆÏžt•kFƳŽêZ(ËðÃï|{Äç;vbè„îînìܹK²¯¼¼|È‹nß¾?ûÙOBú½²²&áùçÿ žçñØcãèÑc€úú<óÌ/B‚‹™3 %ÂK/½‚üàûCÖ!ÝÝ=¸ÐTY7ß1üÁäš—žŠ’âTTTbòäÜ!}ÿ㑼d!´FÃÇ‘k_Á†5xóÕwà þôÊ˘ußF°Ôkp]SªT˜rÇ:üùï/â{OsÈcN'ÎÔTbÖwF©vd¼ŠKOEé¥r”——#/oèšÍÍͨwXQsS”jGÆ+sNŠ‹JÐÔÔ„ÔÔÔŸ?d€°uë6ø|¾]ÐåraÇŽO±iÓ=CDz,~ûÛß`íÚõ°Ûí€>ø·ß¾K—.[½z¶oß!n¿úêk˜7oV¯^=àu½^/”Jå°õ!žî0™Ðt¼hØãê[šaš‘K÷ ĘÍh(®Õ¹ƒMMM8}úsq{éÒ%(,œ1à±¹¹9HOOCCC üø ´¶¶"))iØ <øàxë­¢¤¤@`¨Ñ /üßøÆ×Òªþïÿþ7|ð±7C|üñf|üñ毹wï>lذ~زŒ`¥8rM û>ºgˆ(ì‡t }× pîáòw Ý7d¤è¾!½®ä>4@øä“-ú<å®Á½aÃ<ÿüß&rnÞ¼_þò‡­€L&Ãüïxä‘ÇÄ}Ï?ÿ~ø!ÄÆÆæÌ™ƒ7Þx ßüæÓhmµ {Í]»v‡ °Xš0H¸»ºgHP˜÷C÷ ú"#Å`ú¾!LØm›PƒÍÍÍX´haoX¹rźë®;pöìYq»©)8±xÁ‚ùðx¹âssCÇx/^|{ìQ”––Šû<„;îØ(nÏ›7{÷îÆ;+ݻ÷àܹóâ܃Á€´´Tdeeaýú[±fÍÀóú£é¸†¿Ð6 bÃ+é¾!Aa?Ž ß(2 l¤îŽ×Ù6.ÌæÔ¿nô>X'ŸÛ+¹ ~þóŸèByyyxóÍ×|íå—_öüŸýì'ãV«ñØcâ±ÇQÝÃ0ÌUYxLÖUº&Eâ¾á::ž¿²ë ¸ÎNpv”Ql,sV+8‹ÊÜܨ>Éw>®=¡Ìyà bËŽZÙƒaÁ€å¹×m€@ãôH_áÞ×òPÇÉèþà}€nñPççq¼ hÿÓ]’¦x IDAT øü0¬\ã­ÃÏ/êÏ~äz¶²²V¯:;2_ÜáÏ\¹vï›ñÂ[RAÀÈdãþoNí®Åß(Ûöm°9 0ÝyTc\£k‘!F}ÏÍ}èïè€å·¿à÷C‘š†Äï|÷Šê®–_<Îj¤þ×oÀDqíeF&\gÎþ™9.>»,Àe°xÝ@Çé’ Ãv†sÌx5‘êN“”¯üa4ŸÞúzø-­Xú…7„ã½x‚Ïp?‡˜õF\Nß·ÉÆz¸÷÷XþÎp]Ý '˜|šÆ;ðµY K€lˆ×mo· OL{95ø{zÀÛ¬P˜“À¨_‹†·ÛÁY{ øü'$€Õé®è=dØ¿5ÏÃßfïõB‘˜dx‚ß/¹F4ð—ç»ö–Í6ˆqÉR(““ž‡:ʸ˜‡0ú)Ê×u€ÀŒI÷­·±={vÃÛØg'X­êIYˆ»{d11ÁãE÷®p••ÁßÚyl,”™“sË:È$׳ØwEümømV€ç¡0'B?ú‹`?~ ®KÀõX!7›¡ÎËCÌÚ[Á(â5œEçáB¡°é½WÜ»Üd«÷º½Þß»{µk­äµ±-lÏû<ûH{wî̹÷Ι3ç̺^]ŠgÏòn¿£ÿ‡øÙæ¸ÌõtÅ?ïŽ_À±zÕyÝ[6hi¦ðÞû&2Ž©V…ãÝGkކ•2þŒ“ÍÂr(R½Òé7!—‹ögþ‹§zgJùµ>ó4Þ}{¤Ñåå“só-hsr•sb;:h}ì/Ê÷ìk®Ã2c&¾†Zþô‡$ªHŸ6ö /V…=nšÿöWü ’g_#–©Óo}ü1åÛ™g‘yÙÈá0M¿ûuÒöÌ4~97Ü”0rôÑùÊR\7 H`=†¼;îÀ³y#]¯EöçJ?oö "î°mO?5èùúⲯ¿mVvÂqÇgkè~ýUB.×À“$‰Â{ÿ'b­8ŸgGíSVA8Þþþ¦&:–.Q*¥¡´ ýˆÈþr(ˆF8w½ô¢¢Hj5¦ ðîÙCÈåBøé\üæQUÊ@;Ûi§öxpmÙª Ò%­óĉ„ººð쯉ÈÒP¯ºÓØØ,aÿ=0‹ÆfùqaŸ?ú³„iôXT Îm[@– »\8V® ãKw@[]¯.UÊÕdd /(À½};²,ãoj¤çÝ·ÉZti´Äþ2µ™˜'LÀ×Ѐ'Úðø›špmÜ€uúŒÏ}ïS!åzp :ì°ËEßúuŠUIm³a(+Ã:s*9Æ{ oM ÁžnBhÔè²rHŸ;O™õ û}ô­X¯¾Ž°×‹.7óä)™ñìªÆ½w¶6Ô&3r¨ßœÌDèêız5¾ÆFT ºüa9Œ6#}álgœ™0ЋWDަù=õµ+ǧ­é[½JQ$­–ôsÎCŸŸOØë…°ŒJ’{½t½ü’¢èGŒÀ:ët\ë×)mD×›o`ž4ÍÁ‘K$‰¼›o!ÐÕEÏÇ)¾¾ºÜ<ÒÏ97b!]±9$ØÙ‰7®‰ç3/þ2Úœ\¼µèùèC‡ñìÜŽ{ㆤ}ü@0þ6&Û'@’Td\Ô/)E%I¸wíR”Mz:Y—^ŽÊb¡ë­7ñFßÇÊå¤Ï™{¸·ý˜q"¯Ae׿MxkììDcOÇßÞ®ü?èô·¶àܰk!—•V‹&#󸱘ƌ;(çþûÑ÷é2TæÈLª.;G©;ŽÏVã=°Ÿ`O/a¿MZ:ºìll³ç¢þ‚Y‰£ïšÖúôSxvU§œŸ.77¢ H’J…Š,õ·4ÓùÒb îþ¦rÎ`Äêµ>¯3]I£éwI ‡éùà}L£*1F-„o¼Þ¯HšôtBN'r €±´ôÐ1™4´™™ÚÛ‘ÔjäpXi ÝÛ¶Ò÷ÉGØÏ;?r’,Óô¿ã««ë¿úHŸ,#÷ ÅM^ ÒÏ„_&ì««¥ã¹g)üæ·•cÎ ëéxþÙø†Ê`$ìq+r©d9¥g|Lœž H?ÿ´®wÞR”Ëä)äÞtË€²½uõ¸£Z¼¤ÕRòƒ£±Û ûü4üé÷ø98Ö~–´Ëüò"Ô&3½Ÿ|LûÒ—0–•‘Ý„®ùOâÚ±øê`ixãÅH?s6¦±ã°v& ¿ÿ *–ü;¾Š$Iô|ø¯½‘·ö€r =} 4ÖÉSɽñ&P©ðìÞMcÔ…¤÷Óed^x*>áÚõÅÅd]r¯½B÷‡Dòß·ÛŒ™Ç—ühÖ÷®]´=óÁè@+†kËfÒN;I’hy1}«V8רçŸ$IøhùÇ“ –'wõNz—}‚ý‚/‘±àüþe™¶ÅÏ'Í3†$%^§cý:Ú_x>ÑÅdëfz–Jîõ7bÛ߉·<ù8ao¢+ Ѩ÷“)¸ó.4öŒXIJ’@GÝïô+†Ò2t9ÙÚÛ•ã–‰“°NžI Э‹1¼€ƒ5ô­YɈ{¾‹¤ÕTÊÑ 9š²^yËŠÞeŸ(ÿg\tö¹gHãÞµ3âŽHdP5â»ß ý̳¨ÿí¯Ó½}+i§Ÿ™p®„¤(hÈ(÷_›“yWgÌÄßÞŽ{Çvü­˜¥ƒx`5Cñ,&‚,Ó}çû>[uÚt¥,¥ÜA|‰“WIØÏ9o@ZׯõÊÿÙ—]ŽyÂ$ô7ÞÄþh˜mÏ®jìsç 8÷h“úЧ3ÌiØç¥õ_ÿÄ5Ĭ4q׿¯­¥û½w¤è[¹œ´³f“}ù•ñ§)ô._¦üo;N©;Ýï¿O £_éO¿œ‚»¿þ š“‰X•J½ñîß;Ú6K: ÎÇ:cÁÎNšÿ«âÏŸŸ}þÙØÏ>­ÝŽáÙ³‡æ'Ÿ@…ðìÝKØãAm2%4ŽÚìlr¯ïO¯ÍÊF’$Ô&3Eß¼]A*ƒ@g']o½¡L97oÂT5:"ëýÊù#¾{/ú¢ Ëøš›Ð4û£è›÷@t¢Hmµ)×wã-hìé¨ÍBn7}+WÐùæë@¤ïÍXp™p‰)j‹•¬…‹°L†¯®–†?þ^¹á±|&¢9ž{ÃM˜ÆŒÅµyí/¿ˆ âÝ_C µ]^r0@Gtü`›9 ûyç£ÍÊ¢áw¿Á[W;à™ Å1Ù(íTàxš'ýýf1ÛŒ™¨“¸Î[š”ÿ¥eè¢þk*ƒ˸qtEóð76$•;v=†ââþ<ûz•´ÆÒEA9Ióˆ™ã……H’ Yöù‘ü>T#ƸPeá¸<üMý²[&OAõE4UV¢6›#f²p˜@K Æ’Ò„™ãx÷ Cqÿlw¨·ç¸=ŸáX¤ìî¢åWÝ*½mV6aMFj]d¨ç¢2èÑ¤Û »\H=³9 õßÿR”]nšô4<{÷"‡Bt¿ý&æÑ£•gçX¿.A9PŒ¨­fíI¯ÓßÒLÛ³O#‡Bíò‘„¼uµ„ÝnÚž–²þ•AÏÁèró~]]‘¼Z[è~÷r¯¹.Rvü½”¤VµJ5àŸ•Q¥¥£6›Ñ¤¥¡¶Útu(×àoj¢wÙ'dœ» šõ±± N.Ǻ.Ë¡P È6~bÒ2ƒÍ-ÊÿÆÊª„4æÊJÅï?ÐÔ4h;`ˆ® Ýžþv¦h„¢ „®þ<âgþãf-£Ç( ‚¿¥uv)þ™Iƒ(c<þæþµo¼NWœ2Š$Eü„ÛÛ¿P3ö'ª M×$(«I­&ÐÓ£“âê€./•A>7É`ˆø‡G]+z—JÚÌÓ”~-AÙW«•¨J­IÈIF“–ì÷á­¯‹„Üt»è|m)E_ûƱ¼üa'Õzã­éw2WV‘m/uii¨ôú~!®ÍÔgç ‡ÃÚÛ öõ¡1™1–Ľg7áîn´fs•N©´,© ¦‘# ¹\øëëûüX'MV„Pw·R®ÆbÅOäîzû-2Ï¿Cq ÆÂÁCåKK“nðf,.&ì÷GÖ¸]˜G¦ûƒ÷û|ãÊôF-ª¶é3H›9+r?éO¾ãÉžÆdBk6“~Æ™ô­Z©›Ù.?_[¡è&À’VKî•W'¸<)å¦èÍ ,GÈÑ\¬8²,èîV¾›ËF&-7—ÆPX”&þ%vv*¿Åç[H¤³gö …”´ºÌ¸Î`\Ò€<ÐêP[­‘EË€’Q!¡Ïì×ÔÃÁ€’Güõ™ GôË.I ‹pEÝB¨JÊ]4ªSf–#nŽ×½ˆ<)šo’Lí¯,U”se…·Ý‰*M%ð'}>yW\ƒ-êvKÓµüSüí‘¢¶é3È¿þ&$I¹} Oü Y–éùàÌ_¹€®÷ÞSò˘;¬/}•AO×ûïÑöú+Ñ2û¯³ýÕWëPÞU×’vú´-}™®?$ÔׇsíÒg´j•ý¿€J…cý:šþûo›6‘Íõ®Í\5šw}}@«nñ÷¿â&t½kVÑüìÓxöÕ :wà{"qôžááÌëºòú”ç$©Õèì™IåSÌÔDñî~iéýé\®¤2ÇŽi-ýîGrXî?ç–$…ÂC¶U†ì~?Ú°;y™R0”4ŸÁîkÒöÕÙo¥ó·¶ø=R¾û¸´9)[žŽSu4‘z>ùXùž{Ù¤Ÿ9I­¦ù™§èýl xmæ’2*úµrcd9LóŸ¢o}d œwÿ~LÅ]%Ëïû Úì¬Ç‹n½=¡]ð··³ÿÁ_"Ëa¼5ûO¸{#U©S­7ÁŽ~‹³idÅ çÄ¿¯]}@ׇït8’gí·ãó¬Í twÓòü3¸vUX‘ýkbçeÌ™§(!®íÛpm߆±¬œì‹¾Œ©"~ÏžÄñÌÁm ÐòâóômX (37îˆ^°TVŽvmƒkëôùŠ‚v:Q!lëŸä1••£Ö&°–êBtÅè‰Ìb‡‚$ Õª¸‘øêë°Œ= ™>³`ïk¨OÍ·0G—‘‘TîØõ \lý_Qâ®=¡òÆß“¸|byKq†).o]f&g¤Áð6֡ω4Ú²,ã“]Ÿ™Éç ùUÉe?Ö¤ZNü5^±4¾†zåw}Nîm[ûeÕêüø[›QI‘Yf_[ÿ ræü³Ñ#3ÿjcÿº–ør|õµÊAµÙˆkë´öþÁ¤¯µeÐ:)©$Ò&O¦ùé§")¯|>T}KRò<K#Kàܱ [+a•Á¨üìí>tÿœ^˜Ó£Sæ`¨,fT:=aDQð¨Á\1p³;Cn¿ß¯{ß^²ãäòÄ-8Öçæ ù<{.R Ï4þxkû]tÑö@ç+hoÅT2p9¾ —4šH‡+õ+]fÁîÈ vöb3f `ÍqksRá¸õQG_gG¿UÔ 'cΤhÇ¡Ž[;Y?ý"I„\.œ;wèêD‡•É€P_Ï!ëQ5îýûñÖ×rö¡2ÑØíº: üÈwÄ æ$%åzP$ìõ zN,¿žÕ«h{e ™„0¡Òëñ64rG&bÏd°>¾¿l™ú¿þQY—¢IKCŸ›KØëÅïó=Ï6q"ÅwÝMÛk¯âzSxö×P÷—?RtëmØ&MN*÷Ár4>÷4½Q …Ê`ÀP4ä0ž” –X™js ´·¡RÖS%¿¶ƒ âIûŸøz¯Ö'öó* 4–þvÏßÑžØÎ’ÿPˆEÊGHÄ w|¢äèsrÁ¹e3¶ƒÂB¢•À}`?¡î^´vÂ>ΨÉ¢Ö…˜ÜñƒxT¨$UAý{Ö›äÚ¥¸{’ØKѼãÏéOk,,ÄS{ r}›7aŸ<T*œ»ª ¹]ѼUó #ùÄkÚRœŒƒ?ÖN9GC¦×£ÌÀHj5¶Ñc”NtÌ™‰é|-ýþö7ßHš…¿³•¤ÂßÝ©ÌÔh¬Vôý³oͦ*BnwÿL‘,Óð'“æèè¤Þ¨"×¥Ó£µÛñGÝ d¯•ÑxÐ;ùûx°/z,§öÿ«bŽ@Xî—é÷ðHI5—ãU— #FàŽ*ž=Ÿ.Ã\ZŽ*jž¹#ƒ"SÙH%½g ®;°Žsw5ÎÝý‹Í¥eIeî¿§ªäÇãîJb{Òÿ d•¤"ÐÝE÷ªÊqcQ±’^›ÑoMtnÙ‚mÜ„¾ÍñÏRm±îîF–eœ›7‘~Z$d® "i4˜ŠKqïÜ›¾ ëH›<}~¾R7|--èSˆ r4HU­<ž}ÔÑ"ØÙ?+m,*A­êf ö.w.û˜–¥/!“/æ”âßå¸ãÉîOÐå¢ö¯ÂS_Ç`¨äã×·URì ì«“£Û™Þµs'ª‹™Ÿcó&åXñm_Å:~Ïü—îÕ+Òjâ&kÝ]äñ65(Ê¡°ˆ‘÷þ’ZMÐÑGõîŽSlcÇc;×¾½´½þ*®}{A–é|ïÒ£kÓ"®Q^ ⢤‹ —*‡Ã8¢\*­–ªŸýu4LÿXq…•©ÏÍùc½ë×a;}Naw¿6^ÆAû™„µ ñÏf`zCNÿ3 tuÑ·n-iÓf@(DØïðLÅç©ç‡¥ „Ãaš››éèè$//œœì”fÐdYÆëõ"I†$Ñw†‹£9“x(rÎ;ŸýÑΩkŧøš›0@‡p×ÕQúÕ;1uì8;¶#ƒìùÕO±N˜„k÷.‚QLÙLÖg9›r¸3{ñÇfuâP%É›¸´ÙçœGÏg« CônÜ€§¶Ca!Žèl6@æì9h¢³Ô©ÈŸÿ±æx[$.’QXF…9ûÐed&Oÿ²g7 a*.V¼„<¢3£š8+AÈéDv9QGC_&}&:-’$!G• ëèщf¤X¹%%ƒ[bÏ6.ªPRk—´^œ§,ËÔ=ñ¸¢XÇMÀTZ‚Ðöî;‰×À±³ ¤ì+‚t\êrþÂEìû]$¸@ï¦8«w ÍÌBöûñuv0î¡G1“>}=ëÖöû©}ü¯hl¶„…ò–ŠJlã..†x‹d? ³e)Xüí/è22ñwu)Á$I"ûÜó”ôöéÓéŽîºÛ»y#ŽÛÑeeá '*seÝkVÐðÌS´½ûH¦âŠoù ¹ гfA— _[{þ«U¹ö ÃÁøG›4BÜÑ&e×´Ђ 1ö ý­ÍHôo˜™ì=ô44Ðüâ ‘c ö™³ÐÙ3ð45ÐÝ•6Þâ0ØJÒf´¼ø¼¢ ‹°Ÿ€¤VѹbÁÞˆéD´Ì¤JìKåú¬£*i®¿ñÔ×Ññþ;Øg†J£MËOŽ;ìîL>CM«³Zˆ­í ¹Ýt¾ÿ.öÓN¬k$¤8ëEÈá@ QiÔƒŽ1ú¶lF—™…¡ kEšË.gÏ£Ddqº”tZ[¿ktû{ï{Á…HZ !‡}^žr á`Ã6Öÿ%)3{Î<º–-CñÔ`Ï÷£µÙÄÄRJÿ“Ì" Ï°“>u="Ažú7Í/½ˆðŽs‰l¬v0*)e½r))[¶låø#«V­Æ·K^¯gܸq\~ùe,ZôeŒÑ†Áãñp÷Ýß ©©‰ææ\qq\µZ-ùùùŒ;†³ÏžÏ—¾ô%L&ã€2ÞxãMyäQåûO~ò#Î;ïÜ„4|ð!¿øÅ/÷5“ÉÄO<Έƒ/\"õ8úw¦OÖœyt,ûWÍ>\5ýæ|_cú4;EW]CÍŸþ1ÓFÛ1T:E×^ƘÜ4»ÕAÕ4™ éÒb“Ü“X¾ƒùƒsr)¸ä _ZŒ,‡ñwu*³ÅY¸˜ñ¤>|ñ²¤â·x,P¥ÚeKG©Î¨5èÒí‘àÚYá̳’÷²:kÈÎÁ]YL¥ÏÍ£ä¦[-Vgµ¡6 y=‘™Ö;°Ïˆ.¼JrïUZ]tæ?"§}Ú 2N;#åËL¬3RÂqÚ¸ºìmi9¤?yL._[h'`È/`ä]wG„eEAˆ?7^9 ¹=Gq BêéŽG]¶–WPtõµ4-y‰°ßOÈë#ÔØ¨üîkjÆRQAÑWG"¢E’Æ+æÒRŠo¼iHäÈßäÇßìä~¸„eüq‹âU* .½sÑå­¢ŠÌ3ΤseÄÂöûñÆC88ÿüó/ĵ«t!l,I±XªÌVJo»ƒºÿüKIt8|©ý--ƒ.¦NÄÒâ]ؾúLÉG¯Í×fÏžGÁe—зc»¢ Ä¿?ê¸6ÃßÚ‚17ÑêãŒú©T|óÅU͹k—¢ œˆk; õö&Õzc)-#óô3é\¹d™–W_¡åÕW¤‹Ý/ë¨JœÑ6£qñ ´½ûj£QéâӢђ>i2=›"ϰùÕ¥4¿º€œsñB4f3A—‹@_/Ûÿç^ôÙÙ ³äñϽé¥Åø»ºÔjÔ&Á8˱©¸DIgŸ6OÔ­¹{õ*º£¡-£¨øöw±Œ¬À]ó°û¡_¡ÏÊ$e}|™†¬lН¿‘†ž%äõFô&î;•ÊZƒÁúðÁÒ^v%Á¾>œ{÷(^ƒ•;Ÿ§žRAxöÙçøñšô7ŸÏdž ؼy3 œ«(Á`eË>MzN  ®®Žºº:Þ~û|ða~ö³Ÿ°pá—¤u:]4Æunî8³À¦M›ùö·¿ƒ'ºùŽZ­NM9 ¦ÝßÅW]ƒmÌX:–/ÃÛÜD »•Áˆ!/^J’0fe3ú?¡åÍ×èÛ½ _KÚô L%%\¼ýA» ZÊÊ º"œZ£E%I¨µZ,£"¾Çºt»rZ«U9nÌÏWŽ›òó ¹"ǵf«rÜR^NÀ™˜·&!û—3o>æÒRZßy W}A— SA!i&’{Þù ‹Æ´KRY4Fcÿñ¢ãö|§œ£%Sæ¬Ói~+âÔ´ôEBN'†ü|ÂîºZŠ.¹ T©RÒÀò³Ïbp IDATN;“®èÌiϺµ„½^lUcÐÙ휜»vQrãÍJ$ûŒ™t|ú õ/Þ8¿OâÞÌÓΠ9®á¥Å¸öíÅ2ª µV‡¯§‹@OE—]‘ôÛê>}Fg·+Ѳ‚½=Ôýû¤M™JÈéÄ4¢SqÉAfÛHžñ a}--ônÙ„¥|$r ˜PŽÕÄÚ¿ÁZ÷gk0æå“yÚé k=Ž„”-ODZ­É™=—´ÑcèÞ°oK þž.´& †ÂB ÙÙ‘ûn±Pñõoѽq=®½{ñ¶µ ÏÊÆRVŽ}æ¬îC^™y‹»ÝN~t##½¿±”•+Ç-ååý×wùy\H ¯ÙïÇ—m츤‹PK®»ûä©8öîÆÛÜŒ]Ï¥³gb*.Æ2²BÉߓØþ”ŽËñ45t»ÑÚÒ°Ž£¤±UŽfÌFçšÕx›šð¶µ!©%t™˜ŠKÚ¢cJ E Gu4Ч¥cU‰#:P?ð÷¿‘{Þè22ðµ÷/úŒ]Ÿ.n±{ßö-اN‹¸q8qiã¢èddàŠêͯ,!ØÛ‡Öb%èq‘uæl´qëý:>þ¬3ÎB¥ÑôÇ­çļ¯R‘Y:¬ëqåÕh-Z?xOñÁOÌNR"Êå»O]-=QW£@oJÕïæ\téø;;qäîåml@­ÑPvÛWÙÿ¿t:"ë¦âÜe!ªÜK!·4Š %(õ¦Å_s­RfÎÜy¸k(ŠI OS#*I¢ä†›Øûç?àmm„Im?(îAnÚ™3ga«cÏ.¼ÍͨFe£k14f‹’>1Ìiòg«÷‘ÿã&ÍâÒëÓÓ©üöwqì®Æ]ßa,,¤éÕ¥ø¢:«5¥gIsdu}HaÇŽÜÿ/×jµâLóçÏ##Î_ôpèééá;ßùMMMÜu×)ŸwàÀn¿ý«ŠrðË_þ‚¹s礜ÇpʶOœ„}â¤!ÓhŒŠâã>ÁÈ;Þ3­ÍFÕ=÷8n«¬ÂÝt$ž‚‹“û–ßž$o«5iÞ1,eåX’D¢ndEÒ|ŒyùC欎}ò\@÷úuxÛZ y}4½ž8s“}Æ™ò MêIꬵ²’ÌÓÏ sÕJdY¦wëz£~–1rÏ?SQd¯è’ËèÛ¾Wa¯—ÆW— yù Î6› {½t®^EgÜn´j£‰ƒÔ×Ců×Z,ØgÌ +Ù¤kÝZº¢»wç_¼sIibÓ&¡œg«Cß®Èr˜ýO<>ä5ØFA­Óòûy=Ô¿øY§Ÿqžåñ¯7©`ÈÉ%?º‹ç`H’Dæ´dN;ôf„ù_ºhÀ1]Z: œØ1—”bŽ ‡W¢ò_úÄɘÙÀï`ÒÆ'mÜø”ÒjÌfòâ÷üH–Æh"wÞÀý!Ž'©Y+‡§:”\#;úaŸw7õ/<; M¬ °VV¡±X:xÛÚØõ›G¦¥ÿýÉž3Ÿ®µŸàmkSòÖX,dŸ5‡Œ)Ó•‰Å–·ß¤åí7-û¤å0®O­ÓQ¸èRr满>2H—èMft˜F+VI£aäW¿†·­w]AG’FƒÚdBk±b,,RvkÐge1úû÷á¬Ù‡·¹‰p €ÎnÇ6z,’$a«ÍÄƱgþ®.B^“ µÉ„>;C^dÆlfâCÿ‹kß>ü==]N4f ÆÂB¬£*¯Go`äw᮫ÅÓØHÀá@k³a­¬B’$ô™ŒûÉý8÷×àkk%èr£6èQ›ÍèÒí î ¢KO'3ji¨}ú)åCNŽ’>cÚ ¥íÓgd*Ç .^HN´Í‰Ï?wþ9Ø£›ûró”ã±uS¶Ñc±Ž¬Wõ¶4+ÊJ«E—––š‹ëç¨çC*ï¿ÿ~‚"p饗ðõ¯ßMii N§“}ûjxíµ×™3gö…Ìš5‹_ýê„Ã!jkëØ·oÿüç¿h‹ çô裿aÒ¤Iœ~úi‡º­­[n¹î¸Ðš_ÿúÝ\}ujƒê'ª™Qpô9œzp´êŒJ§gÜ}?¦îåè\³:Ñ¿P«%ØÛ‡*¿pP—xÊ®¿KI¯¿’`z…È þþÐm*ƒ‘ñ?ü) ¯,¥sÍ*BþÄMÍ4 Ƽüþr´zÆþÏiXú2«V$˜€!ÖÒçG¥¸B¼‹Ñ`fÖÒ«¯GB¢sÝg þè²×›Ä­­ÿ¼‘·ÝAÝóÏнy#áƒ7ª FLÅÅýáäléTÜu7õ//ÆÓ؈>3n ¼‡ËaYžNñ¶f°çj’ªbybÞ'cv.~ò ê^|žÞm›¼Ÿºt;{&*$ô¶4FÝùuê—¼ˆ3ÎÝ(!mNŽrlå#)ÿÊW©{ᙄ¶NRk½> ο°ßOûŠeÊÚ=%Fƒ± µFsBÞ×TQqøí>-}Z:Œ›pÈ´¦œB7þÞä`82+m6'xÈž>ƒìé3ðwwGÚL‹Cf¶âóW|Ée/¼o/gjµÉŒÆbÆ9çœÍ7¿ùuÆ /;JZ¨Ö·ÉÊÁÄ+³fÍâw¿û ê¸È8‡‹x‰1†#Š‘àÄ'Õª ‰=AÂUDp,.‚Ÿg1þ ‚J¥â±Çþ£þšÿþ÷Âq›d„ÃaÞ{ï}>üð#¾õ­oðo:rM2²²]œÎñ^“±uëV­ZÍìÙÉcÇšd{NUR­1ó­@©/“$Qo‡è£ ¤êš&ê Ê1ÝÁd2ñ³Ÿý”[o½…çŸ_ÌóÏ¿=( ñ»ßýQ£Fqþù [€úú†„ï¶!|§M&#nw$¬©ÛíáöÛ¿Êþð;.¸`è°vÉÐD-'u¨3AÊÈa9%k”J­uF ©7C« *• 9õFpXhµ:ä°,êˆLÊjT‡î£Ô*5²,ê ‚,˨S¨7ÉHi'e€ââb¾ÿýïñÍo~¥K_åñÇÿº¸•þò—ÇŽHA¨®NŒ””Ÿ?øbÃüà>Þ~ûV¬X D6dûÎw¾‡ÝngÖ¬™‡U#Yµ§šâ²ÒÃYpÒRWǨ‘‡ŽF`1š þ\®m‚“‡î–VŠŠ†Þ˜Q«ÕöQ‹[E“B]SUÅÇ[7S2²ì8H$ø¢ÓT›ZUU1’ýµudçƒ@‚ŽæµTVY¤¥ÃŽ÷e0¸æš«xüñ¿& ’jjjeù°òÚ²e+|ð¡ò]­V3kÖ¬AÓ›Ífž|ò æÏŸ§óûýÜu×ÝìÝ;0~òPŒ;†½›¶(¦ˆÏ©ýÙøñ Î:ãôCÖ›ùsf³üÍw‡]^ñþ×ã!àô Ñzž%Ãf£»£cØeŸáÿì\¿‰)k~Ô¨ voˆì;Ü2‹Ïð>{ï#ÎHaŸ¨iÓ¦±mÕÚa—W|¾Ÿêµ˜65õý#â9¤‚à?hS¤•••÷‡$ „’mÓ=;wVsï½ßOX×°`ÁyX“Ä`G«Õòç?ÿ‘©qÜ××Ç×¾v7n·;åòµZ-W,\È’¿ÿ›ßÝXD|Nµá0ï/^ÂiS&cO²_ÀÁL2µÇÇÆOW »ìâ3|Gw7/üùÿøÖÝw‘ ߸ëN^ÿçÓ´54 »ìâ3|Ÿ½[·³wýF.Y˜|÷úxÔj57\u/=þÑGÂ)ÚG͘0‘ÌÌL…Édâ‚ùóyóé瑃Áa—_|†ç#ƒ¼ñÔ³œ;{6fsꡃãrêkÆ ÜqÇ]\~ù¥L™2…‚‚²³³èééᥗ–°ÿ%meå¨AgÒ:;;Y³f N§‹ÚÚZvìØÉ«¯¾– P˜LF¾ÿýï¥$´Á`à‰'þÆ¢E—ÒÐÙR½¦f?÷Ý÷CþøÇß§”Ài3g’—“Ã?ÿþB±˜r‚S‡°Œ ™k.¿œ±cSgü­»¿ÆÛï¾ËK=Iù”ª7r(@Ðß§|W©´¨õ)î¹p È`1ùÅ~8äš©xŒF#ÿâ~ÿû“¬èîF–àðì­'6™&/CÿdS§ËˆÓ§F‰Ž3²ŒZ’=j?ûáS>mê”)dgeñÏ>…?>¥Ú€¢t'jUÿ$b}·•°|êÜIIsÅ%—01«SŒ³çÏ£xÄžzâß„N±>JÈ2*®¿ú*ª*+8éÙgŸ•gΜ–ôÇŸþôç û Å~p·ßþ“''Ï3:ŽÇŒ9sf'þùÅüð‡?R¾ÿö·¿fÑ¢þ™—;vpÅWãóù”c=ôW]ueÊe ‚Ôqvl¡~Ý£ÊwsÖЧÿ¿a”Hp"вýŸt×÷»“æ»ûˆ³‡Q"Á‰Àž¿AÐß«|¯<çqÔZÓg‚£ÁêÕŸ¡ìGY–yÿý÷SÊ袋.T”ƒÃeòäI¼òÊË”ƒT;v,÷Ý—88yà‡hnn>"Y@ ‚SA]Œ$IbÉ’—Y²d +W®bçÎjº»»‘eFCnn.UU•ÜvÛ­œvZâÂNÇ5×\E}}M8<žÈb>«Õʈ#˜Gù¾eëÔ†Ža”Hp2¡R©±Ûídggc·Û•ãˆ4lŸ}öz½ŽÜÜlªªFŠ…_ÁÁŒ3Ü_hB¡0ÝÝ=lÛ¶NÏÔ©ÓP«ÕÃ-Ö1gß¾}lÞ¼™üü\ÒÒlX,æáIp‘™ù½áAp#Ëaúúzسg7éévfΜ‰V«E#Ë2Ë–-c„±ØíéÃ-§@ N`ÌfEE´´´±råJfÏž=Ü"SöìÙÃÞ½{™=û t:íp‹#‡M^^.£FdïÞ>úèC,8ÕöíÛ)++Ê@ Žyy9X,&êëë‡[”c†ÇãaÛ¶mLŸ>Y(à„§¢¢½^ÏîÝ»Pµ´43bDápË$¤Ä¶mÛyç÷illnQR¦ºz7«WF0nQŽ Ë—¯ä7Þ>ª~סPˆåËWòúëo´÷íT ´´„ººÚáã˜ÑÐÐ@QQ:n¸E†•={öòøãO°té+Ã-Šàs2jÔH8¸HYðÅÅï÷³yóV›ñù¼˜L& ò™4iÍû1ú|>š›[…BŒYþ¹ò:p ŽÖÖ6ªªF%ý½££ƒM›¶ÒÓÓƒ$IX­VªªFQRRü¹Ê=RdYfÍšµƒA (..¢µµ®®nJJF`2™Ž8ïÆÆ&¶mÛA__jµ›ÍƸqcÈÏÏÃëõÒÜÜ@YYéQ¹–ÁصkáðÑ lËJ¾²|Ô²gŒF÷øz+LWW'vû©µûD¥½½ƒ×_ƒ––Ün7YYYàì³çö‚ú¶¶vÇ÷g½½½,Yò ÍÍ͸\.ÒÒÒ(..æÂ /ÀjµQžñ¸ÝnöîÝÇĉ>w^©²té+<ðÀCÜtÓ\rÉ¢ãV®àèc6›p8œhÄbäƒwßý€ÖÖ6T*N§ §ÓÅÔ©“‡Y²CÓÔÔ̇~B^^îçV†¢³³‹×_›P(DîSOO/öaS$IbÚ´Étw÷ŸŸÀ‡~‚Ûí&''ëˆ„ÆÆ&Þyç}dYV tw÷PXX@~~µµõ,_¾’# ¹‚  ÅÉÜÇøý´ZáZt"°mÛ6¾ó‹}m6ßûÞwøö·¿™R>ûÛãÜw߸êª+ø¿ÿûÛɲÿî¹ç»Ž›Ífž|ò .¸`Áå «Ö„ SÐëu´´4q>‚SY ‰@OO¯¢,ZtYYY¸\.§ÒùÖÖÖ )**D§ÓÑÑÑA_ŸƒÌÌLÒÒlx<ZZZq:]ƒAÒÒlJZˆÌtÇf¶}>:]$¢UVVV‚,áp˜jéîîÁb±PZZŒ^¯Oµ½½ǃJ%)ƒbˆD˪©Ù@AA>ƒ€––Vš›[Ðj5‘žž8›ÓÖÖN{{;*•š@ 0è}ª©ÙO(";;‹/}é|Ôj]˜LÆ„t]]]444EåÈp‡úúFúúú0›Í”–£Õj©«‹øR———ÐÐЈßïW®Åï÷ÓÜÜ¢Üc‹ÅÌÈ‘åØli˜L&Âáñ‘…šèííÃd2‘–f£¹¹µZ­(3@Pñߎ•c÷î½È²LII1óæÍF’$Z[ÛÈÊÊLHçv÷ßó¢¢BT*Ôâp8 …B¤¥¥QVV¢X¡Ün7mmí¸\.üþV«…ââ .²,SW×@oo/fó@§½½ƒÖÖV</F£‚‚22úC§uw÷ÐÞÞ×ëE­V‘ŸŸOF†p8L}}½½}J݈G–eÚÛ;hllB¥R‘™™AQ‘p<å9‰ÁÑÇjµòØc¦½½ƒ>ø×_ƒŸýì~ ¹âŠËèëëc×®ÝôööRXX@ee¥+ŒL@y<^¥_ÎÊÊD­VS__Omm¡PˆŠŠ‘Ý>©T*þóŸâp8xúégY¾|wÝu7û÷ïA’$‚Á uuõttt`6›¨¬¬TÒ®®.¶oßANN£FU RE¶µ ‡ÃȲ¬ôé‘k¶(“QÁ`êê]8Æ—ÔbÑ××ǶmÛQ©TLš4£1҇ʲ̞={immeìØ1dff8÷`¼^ëÖ­C§Ó1nÜXÌfáëDA('ñ3p­­mddd`6›^´•+Wãv»¹ä’‹ÉÌ̤ºz7»vía֬餥cÓ¦-ìØQ¯Ñhä¼óÎ&;; —ËÍo¼= ìqãÆpÚi3ˆ«Ð›o¾KWW—òûúõ9ÿüsÉÌÌ ««›%K^M8úô©Øl‘¨§§—>ZÀ…žO~~kÖ¬cÛ¶íJúuë62þe¼~ýF6mÚrX÷ËíöÐ××Kff&ÙÙ‰ƒÿíÛw²fÍZ䨂$IÌš5ƒqѦ1kG¼?½ÍfÅnO磖!I’2X_»v=]]Ý\tÑäåxûí÷hoïMSV­ZƒÓédá‹ÈÎîW¦Ö­Û@qqsçÎfÙ²„B!®¾ú ,3|ôÑ2²³³(±:áp8p¹Ü¤¥Ù((Èp/:;;•{¾hÑEX­V>ùdyBšM›6sé¥ Ñh4¬Y³NQ(b˜L&-º“É„,ˇ\²cG5{÷îKuÞ¼Ù”——ÑÞÞÁ«¯¾‘~Ö¬¤¥Ùxë­w•-~ø ôû³çäd A z½Ž‹/¾€[o½™üàÇ<öØß¸ÿþ_* ‚_¢ºz—rÎØ±cyùåÈËëŸðzíµ×yíµ×ذa-~¿ŸÓN;3¡¬[o½™ßýî7ƒÊ"I’"ËYgÉ„ Sèé顦f?MMM\ýMôõõ)é{ìÏ\{í5<üðÿòë_ÿVYŸ5eÊdþõ¯')))QÒú|~ªªÆðóŸÿ”{îù›7oá¶ÛîPÚg£ÑÈÃ?ÈÍ7ßD€ÿýß_ó裿Qò>ûìù¼üòbêëëùÊWî`íÚu¨Õj¾ûÝ{øÑ~0èõ=øàÃüéOÁãñ0kÖLÞyçÍAÓ ¾Xá -ÍFaaM¬^½–Í›·1jÔH&NŸ0{Ÿ ååe”–³mÛÚÚÚùä“O¹üòK”ßÕj5çž;Ÿ¦¦fvì¨fûöäççSR2‚õë7ÒÕՅݞ΄ ãÙ·¯†ÆÆ&–/_Å¢E)yhµæÏŸG à'33SQ(ÒÒlLš4€ôô4ZZZÙ¶m;f³™9sÎÄétñé§+X±b5ÅÅ#èíícóæ­H’ÄĉãÉÎÎbõêµ8Τ×VY9Š;«q¹\,]ú:ÙÙYŒ?V\÷õ9X»v=š¹sçðñÇËX·nå¨Õj–-[Ï磨¨ªªQrrrü)ßã‰Ç“——̾} IDAT{HWè©S'c±X0›Mèt: ó©«kàÀZÆKCCcô™•8wôèJjjöÓÕÕÍ‹/.¡°°€qãÆ0bDQBºÌÌ Æ‹tV«½^Ïüùs°Z­¸ÝnV¯^K_Ÿƒººú%dôèJ òY¿~#½½}ìØQÍôéSÙ³gM ¦L™„V«aÙ² eŽ;šQ£F¢V«Ù½{»wïeÛ¶ ùëõzæÎ=‹@ @vv6ÕÕ»immÃh4*ns+V¬RÒ»\.jkëÐj5Ì;‹ÅD($6sŸÛn»•Çûõõõôöö’––Æõ×_‡Çã! ²xñ‹ìرƒ?ýé¯<ðÀ/”óJKK8ýôÓ°X,¤¥¥qË-7QZZJ}}=O?ý ÿü翹îºk™1cú2ž~úY bUÈÍÍaëÖ­ôõõ‘››Ãµ×^C{{§v/¿¼„‡þ_t:W]uŸ}¶–7qç_ç­·^SòT«Õ\uÕ•@DÁñz½ÜtÓ­ÔÖÖ2aÂxJJJxã7ùÎw¾Çøñã˜6m*Ï>û=ô*•Š .XÀ¨Q£=º €;ï¼›µk×QRR¬Y3XºôU}ô7TUU)ŠU<--­üþ÷D¥’¸çžoE7PÌþ|Kp\  ÂyçÍöí;Ù¾}'n·›-[¶±oß~-ºH1ÿ¥‚Íf¥¬¬”ÜÜž}v1½½}8NÅ< W”¢¢B\.755ûill¤¤dõõ‘ë´iS())¦  çž{‘ŽŽÄCÕjMBd¬˜‚`45j¤r|ûö@DYèííSÒx<\.­­mȲLff&Ó§O`ëÖíƒ*ii6-ú26lâÀZÚÛ;øè£e455sÖYg( ¥ÓÓ3q»ÝÊýèêꦫ«I’p¹\h4Î9g^ÂâïÃQrrr Ô“Q\\”`¢-//£®®ÚÚº¨‚™¥/++pn^^.‹]Ć ›¨¯o¤±±‰ÆÆ&¦M›ÂäÉ•t&Sâ=‡ˆ¢ÐØØ„ßÀd2ât:q:] i"®G¥ôôô²aÃ&åùÄ>WT”3vìh€ ‚ÍfeÿþZ‡b?8­V“pbùVUbôèJ`‚‚ ÑhP©TAöìÙËøñcÉËËMv[ eâ­Ì¤¥¥ñoÜͶmÛijjF’$yäQª«-ð3gÎà±ÇþœpìÁÅæÍ[3f »wïáÓO—S]]=¨‚ …˜6m&õõ øý‘>æÊ+/Çb±(iÊËËùùϪ|¿÷ÞÿÀw¿{÷Ý÷ÿèêꢢb4«W¯fïÞ}èõwP­V“ ß{ï½Omm-|òɇ¨T*îºënž{îžyæY¦M›ÊO< À~ð?|ÿûýë5jjö³rå*$IâƒÞ%++“ŠŠ |ðaþûß§“*ÝÝ]øý~Ìf3%%%\}õ•Ÿ+(‡àø#„µZÍĉã?~,Ô²lÙ \.;wîbêÔÉŠËI8œZ“É„^¯Ççóáñx’úfdØ©©ÙÛ1ÆÌ„6›MÉC£Ñ q¹Ü‡}M±AclpÇãU”³9u(-ÍÆüùsp»Ý¬_¿‘Ý»÷²k×&Ož¤(¬\ÙyPyå›Í: 2TìþʲL8NP¨ŽÅÅÅhµZ[Û¨¯oÄår‘››3¨Ïfff&çw}}>ûl-µµõlܸ™ Æ ZÆŽÕ¬Zµ•J…ÍfU%ysG¬ì˜¹9öLSJ=K—¾ŽÛíŽÖˆßî¡"Åò¬‰Y>V®\CmmµµuŒYμy'÷&\‚ÃGF„½¤ÎΑ¿Z­¦¼¼œÖÖ6.¹ärvîÜ™Îë:×Ûo¿Ãm·}—+q2Äë:üsMÍ~222(.Áe—]ÊwÜ>dú}û"îAÓ§O ##ƒ²²Röí«aß¾eâfày5L:E鿦M›ÆsϽ üs/3göAçFÊ,--QÖ¹ÅÊ{0cÆŒá+_¹…üã_|ç;ßãþûÉÃ?È5×\5äõ ¾8ýQŽà˜ktT*ååeŠ¿y¬Ñйõôô¦”_oo_Ü<ù ¬µµˆøÒÊâáØ`¾­­]8š-6Ø>¸á´Z#³$yy¹ÜvÛÍ Ÿìì,å÷––VCÇÂ÷ûJ:“ɤX 2pÍÊØlVn¹å†„òÊËË”kèêêVÎ1âéÆfÓ­6r?œN÷€ã•••Ȳ̧ŸFfåG®Lš‡×ëU¢5ÙlVÅj‡ñûýJ1å.Æž=‘†þì³çrùå—0fLòÎ$ÆÁhbëI“*MM͸Ýnòòr¹öÚ+Y¸ð¢i’Ëw¨MµJJйúêË™;÷,Ôj5ûöÕ¤\ß'/b‘²àÈðz½<øàÃLœ8ƒÁÀSOý—;wrÁç³mÛ&þóŸ&œë zzzŽÿêWâr¹xôÑGس§šk®¹úå«Õjº»ÛÙ·o}ô>ßüæ×1†v1bÛ·ïÀétR[[D‚èt‘óýþ@ÂÄWqqä¼ø5Û·GþÏÏŒ'rss¤‰/³®®‡Ã‘P~l,ë+býÀoûk–/ÿ„+¯¼œžž¾õ­{Ü7ÁaA8ðx<<ÿüKØíéX,f|>¿²˜3öBäÓÕÕÅÊ•«ihh¤½½=i^ÕÕ»éî¹ˆ Î-‹2› …xë­wñùütvFfÙc.*cÆŒfùò•|öÙ:ªªF)ÒdddØ‘$ ‡ÃÉk¯½‰Ùl¦ªj••£Øºu;--­,^ü2999øý>l¶4fÍšNQQQT6'Ï=·˜ÒÒâ!çÕջشi3 E~N‡ÝžNZZë×o¤¯ÏÁâÅKÈÍÍ! ¡V«™?ÙÙYdeeÑÑÑÁâÅ¿~ˆ¬m(.."//—––VÞzë] uu:™™™ôööñé§+8p NËé§Ï`üø1ìÜYÇãA¯×¢tÆMÔÔÀnOG¯×ÑÖYm³Y1ŠëRggo¼ñ6ƒ Æa4F¢mݺîîž!'#²ÎcÍÍ-¼øâ’„(U€}¨³³“µk×§li=ºŠ={öQ_ßÈK/-°¸¼§§—+V%DœÒëõÂd- —ËÍC=Bcc}ô1èt:þð‡ßýÖÎîînjjöh#ËÊJÈ‘>ú¬V+ ^¬Xïëëë©©©0!v´X¸ðË|úér~ÿû?âõzùä“eƒA**F2fÌhT*f³—ËÅ·¾usçΡ  €3Ï<ƒŒŒ ZZZ¸á†›9²œçŸ_Íób.¼ðKìÞ½‡‡z˜ššýŒ7†@ È 7\ÇèÑ£©®®æª«®eÞ¼¹<þø|ùË‘s322x÷Ýwyýõ7˜0a<Ï=÷“&MâÒK/á•W^C¥R%(‚/6BA8p»Ý ¾ò:–qãÆ*û Lž<ÞÞš3¡N§à  •Ù†œœì¤.íííAt:-3gÎP›••x½^6mÚBcc’$1jT³fÍR~³Ù̬YÓY»vÿŸ½û lë8½ÿGX°÷"JbE‘ê½7Y²{wʼní$›MöͽÙìÝ{7»ÙÜl’ëd×Iìôb'qKlYÍ’,Y½S¢(J좗ó~yHˆE’-6q~_$àÌ9œ€óœ™y¦¹¹h!))‘””dÖ¬Yɇ°Z;éì Þ™˜4)ø±ÔhÔòöÆÆ&ÊË+äçZ¹´gM€¾?èÑÑQÌŸ?WîÅX·n{÷ ­­M>O}«V-cß¾ÔÕÕ‡œ§ôôT.œÏž={ikkÉq³yÐKJf`±Xhoï ¢â²|÷‚“ÝÒÒR¨ªª!'gªœZïZ=?´Mòsññq,\œ4g2™9³˜S§Jå2iÌšUBWWMMÍòyÒhÔ¦Htt«V­àСÃtvÚäk¦×ëQ*•¤¤$“——Cyy¥¥gäë†ËŠK9|ø‹Uî0 (Á æÍÍ-ò{ Nx/D«9èA¸qN§“üàGòã9sfóïüyQ±ü¿þõo8|øë×oì·ÿŠË™3g6GŽå{ßû>¼Ñöå/?Í×¾ö žþg<ÿüÏúíw«lÞüGŽá7ÞâûßÿìøÕ¯~!ÿ-úæ7ÿ‘û·ïòÆoñÆo±iÓFV¬X΋/þœ§Ÿ~–÷ßf’S(|ík_åŽ;ÖÉû;wž;vòóŸ¿@rr2?þ(¿øÅÏyä‘Ç9xð`ãÆ <óÌ—€`àòãÿµµµlÛ¶‹Å"ׯÇ7¿ù7”U;wî.œ7Úõn€ËåÂét¡Õj p"¿ßÃáD«Õ„d8:xð0çÏ—1cÆt¦MËG©T…Üõïê²óÚko¢R©xôчðx<ƒŽ3—$ ›­‹ðð°A°ƒíg·ÛÑh4ý²/¹Ýn§œÑçZ^¯»ÝŽN§E¯×ºø’$Iò:ƒ«çx]]]èõºß§Ï磫+øz×n÷x¼x<´Zígj ºÝn|>ƒI’hnnÁív³oß~üþ÷ß¿!d²Úµ]]vaaaÖ%ÈŸ‡¾çÂívãñxÑétŸú=¸\.Ünz½®ßõôù|¸\nT*%:î¦æl8N</ƒ>¤Î=ŸmµZuSó…ѵÿaV®\9lÇ?øöÓ´7œ–ÏßøK¢“ІíõúÚ·o©©I!ë|c“ËåâÌ™³466âtºHLL`Ò¤LyøL_N§“½{?¡¥¥™ÈÈHâããÉÍÍ!**x}>¢¦¦†èèh–,YLXXµµµ=z ‡ÃALL )))dggˇ{ØívNž<…B¡`áÂÖ·µµ²²2L&……Óúm?sæ,¥¥¥ÄÅÅ3þÜ~7a.\¸À™3çP©TÌ;›ÔÔ`Rˆööv<„Íf£¤¤„ìì©ýŽ}üø .^¼ˆßï§  €’’b¹Þ¤¹¹™‚‚fÌýžõ¬yCff&¢®®ŽÈÈH,XЯgX»vìØ%„‰¢o€0sfq¿í}„'žxdj8±]»>À¬Y%ŽbáÖ‚ Âø²cÇ.1Äh¢HHˆ'ô[5¸‡F£&77û¦z„[Ç`00cÆtü~?II‰7”&U ß$e‘ÅHá³Â‘•5©ßj¼}ét:yüº0ò""ÂìÙAAi"Í© ‚ ‚ 2 ‚ ‚ ‚ ‚ ‚ ‚ 2 ‚ ã×µ)Xá[A¸9"@AAA&AAAd7æT’$œ—s8ê#:}†°ˆþ]ç×ð¸8í6$Ñ¥>,Tj á‘&”ʡ׆ø}ØmVü~ßÕlbQ(•Â"ÑêôC–“$ §Ý†Ç-~›†‹VoÀ9èJæÂõ5Ú*¹ÐtÀ3ÚU¹-Eêc˜–¸ˆp­iÈrv•³Ÿ`sµPÍ&•RK^Â#³†,ü”·§ÆRÖoE᳓€TóT²cg£RÞ\“ÿ¦„ƚ˸]Ž›ÝM¸Ia&â“3ÝÞÙÑJ{KýÖhbêh©'5+oÐ ÁçóPw¥I ŒpÍ&–ˆML#Â8øŠµMuWp9ºF°VSX¸‘ø”ÌѮƸt¤z o•þx´«qÛûÛ¹øÖŠ?©ø÷Âæîà»ÁëwpÍ&–­e¿aÓôdNúƒ–ùãÑÿMYó‘¬ÕÄ49¶˜/ÍûÑMísSCŒ?>_p’®Z­F­îÍ"$Z[ÛimëÀ ×‘”^§ 9n  ©¹‹•ˆð0bc¢ 3\·Þ=¯«R«Ð¨Õx½>ü~?*• ¦ÿé H·§_{ÎAKk;míLÆãû½AAAK†%@ðz}|çÿþ7~èÄA½NÇÌâi¬Z¾ƒ!˜ÓüåWÞæÒ嫤§%óå/>@—ÝÁ‹¿þmí"#ÂùÊÓb6 ì;pŒöìÇãñÊÇU*Ï(àsƒ©´.\¼Ì»ìÀbé )3on1ëï\5dÝÿ¶å#;Å«—²tñ\Þx{ ¥gËØ¸~ sgÍ)k³uñç×ßãjU-kV.fÅÒùò¶§ÎòÁÖÝØ½9Ùããbxäï6s3§SA̵7]ĺ0Âx!¥Ñ9Q7d:.{»—ŽzÆxÆxíÈÕO˜Ð†§A¨T*´Z .——ÛÍþCÇ©©kàéÍ¡RõÏ-ïñxùßޤ­Ý‚^§cócŸÃl2"I¿ùM.]¾| …‚ð0§‹@ €NüÒ´wXùËëïáõzQ(ÄÆDár{°ÙºˆŒˆ¸nÕëHINè~Ü|œ”(—ñûý9vš»÷ãpô_”©«ËÎ{|„Ëí&&& ½NG}CÍ-m¼ù·r 4–I’ÄG{àr¹‘€UËÈA]@’ØñÑ>¼^aazV,]0à1ÊÊ/Sq¹ªßóÙS'‘=e’üøju-G—ÒÜÒFJR‹Ì&6¦7µ$I8|‚ŠËW±;œÄÇŰxÁlâcoí›F­m;q†‹·ÛƒV«!2"œÔ”$¦OË‘¿ @€-Ûö›3™)YÁµ0¼>í>€ÏçC§Ó²bé|yŸ¦æVŽŸ:KSS+¶.̦H&e¦Q\T@dD8 f÷:Am}#N§ “)’ôÔ$fÑ]f0»÷Ânw°hþ,Ìf#;v}‚ÛíaùÒù„_Ó+'IŸ<†Õjcé¢9DFü;tô­­íÌ›3ƒØ˜èO}^‡›$IlÝñ1~€¨( çÍìW¦ôlÕ5õ €u«–Ê=‰K'‡Žž¢¡± ‡ÃE|| S²2˜1=¿ßBc'OŸ£®¾ NËê‹ú½F]}'OŸ`íªÅh4½½­ç.”sºômtZ-11QLŸ–ËÔÉ™·ðLãÉ‘×ð¹$åF0ivï‚aÍÎnk `u,º0'Þmºîñæ}>™co5âs=¯`M,†H5¥[[èlrãuP(Á`Ö•¢'{aÚ°¡žw$èlvÓQçF­S`NÖ¥¹þ~ÀG/VQq ƒìÅÑ,:}À2¿ÄŸ¾üî¯x6c¼) áu ~-4~o€€oè`Y¥Q Ò(A»Å‹ËæC €Á¨Æ`R£TÝþ#ËíT¶0ûsIhôýGÞ7^´SyÄ2àþ %Ì8¥÷ *uP¾‹öš`Ö s²ŽôF&Í2¡PŽŸs:lCŒz¬]¹˜%‹æàv{ø`ÛnŽ;MuM=uõM¤§%‡” üéÕw¨­kD­VñèCHJŒàèñR98(.Êgý«0ô$‰«Uµ»"åWðzƒ½ Ï|áód¤/\sKááaÖÑÖe§üÒ›ZºÿmÅbµÑÞüPÔ54…^¯ãµ·> ôlÙ ï9""œûî]‹ß ¸(öLì?tœÚÚyÈÒX¦P(0›Œ¼¹ûC¼^/÷­_ ÀÁCÇÙ½÷Þ÷ Ç(¿t…‡Oô{Þ`ÐËBéÙ2^}ó}9mkMmgÎ]äK›’€—_y›óeòþÕ5õœ.½Àãl’Ðc]]}#{öpÛÖó¥ÍmÆðÉÁc##˜’•A@’xåõ÷8_VR©äñ‡ï“??ÛvîåãOŽ„¤½mhlæÂÅ˨”JΟE{‡•Ÿþâ8½)Îêê9áQfE…yƒÖÛçó³c×'H’Ī q:]|´çjµŠ;×. )k±vòæ;[©èþžÎž9½_€ÐÕeç÷wpö|9 ‚Ær€ P(hjn£¬ü2jµŠ™ÅÓú gܺãcÚ;¬d¦§ÈÁÁ¡£§Ø²mwHOgM]ÇOžåðÑÓ|þÁõòoß*¯Öš’D^Îä×x÷ƒÁ „àyíùn¼·e'…~Ç*¯ÖP]SÏ7¾ºùa¼9¿«w—I"$@°wx9Ó dÌ >ßóx(sLâÜÎV<Ž¡Ó gÎ2!àø_Ü~äµzÖ~})Ó"oô­Œi:8øçzVoÈóÑ©zîüÖäë ¶æàz"Í7—î³ñ’ƒ÷¾{iÐíü —£o6på¨uÈã”lHdöý‰üåÏck ]ÛD¥Q0y^³6%{ûöZ´U;åï@ɽ m5ÎA¿' Eo€`kõ°ëÅ*/ÚCÊ4Uع´¿ƒGV€>r؛ݷ̈ÕT§ÓR<=Ÿ#ÇNÁûµÂko}@yÅ”J%}n=“û4þ9 @L´™û7Ü!7” Y™ir¹¾7æ®VÕ’ž–ŒB¡rXOss+o¼½%ä¹÷·î yüö{ÛÉLOE¯×QXCSs+KÍáÃícë ý0LŸ–òX¯6*tzݘzÌ*)äÌù‹\,¯äèñRJfL#Êldû®OÈË™LñôüA÷ïiæçM é1HKI‚á[wÈÍžÌì™…|¸}/­mílûh=´‘ /ËÁÁ†{Ö’œÀ»ïï ¶®‘¶îækÏ>ÞïnìX·|É<ÂèkhâTéy¬6^{ëýA{–ÞùÛvùlºw9SƒK×=Q*j1ÑfJfL#Ì §½ÃÊù‹ògp÷Çq:]( V¯XH|\,íí.V\!?oêumlj!È=a=ÿÄ„8”Êà©Ïçg˶Ý=QŠ×;ðâ~’$±ý£}ì?t<¤Ñ<åSV~ŸÏÏ…‹—C>óµu´wÿÏ(*‚Aî»ïï@’$""™3³cd8eå•”•_æju-}wO<²I>NÏ1vï= TTVÉÁAOÙ„øXÚ;¬rp™‘Êœ™Ó‘$¨ª©#)!nxN†p[Ñ…©(\ÛûY¹t°W§c¼–ŒâÞÀB©RP°*VîA¨;o£½Æ…Z«$oyïßÖðh }—Kšº0Šø¬0¬M.îmÃë °ý¿¯òØ ÓPiÆ×ïöµ½RÏéšåÇz£É/á¶ûqÛý„™¯ß‹°ìét®³2iŽéºe‡“ßìiPªh *æàÑ“Ì*.dîìòp‹kÅÄD±nõjj8wáI‰qæqér—+«HKM¢ oª<cZ~6…9lÛ¹ïºï½¡±Y¾+<{æôë–Kî[¿–Ÿüìw¸\nþúîVâb£q»=èõ:6v÷( Æá ¿Ê™:©ßü € +°vÚØ¸~ &c$~€¿¼þÊ*èê²sùJpˆRRbóf±vå~ûÒë446cí´a6oå[v3‹ åÏ¿F£æÈ±ÓÔÖ5××üÍܱë9¨¾sí2fO‚=:lÝ ç¶üý3… ;¹kÝrùÿÍ­Áˆcc¢B†ƒ-Y4gÐ:îØõ ÇOž•{ã:;müçs¿ÀÓý¸©¹_þîžÞüjµŠŠÊ*¼^)ɉÔÕ÷¿{¨P(¨ª®ÃãñZf¬ÊÏ›*ÿ¦œ:}.$@8uæ<JYÔ½÷ÁN$IB¯ÓñÕ/=ŠÙü|ΛSÌ{ìäÀᔕ_¦¼âŠ8wÚzW®®©çju™Ý½Ÿûö ©­»lKkïjê æ–ÈaÏgD®GoT³àÑÞá »puúˆN5„<0ç$ùÿû~_K{ AÙ¯œ¥¡÷nxú #Sæë¢Rtìû}-§Ÿö'qY÷èM—ì”n Éy,ûR:‘qÁ»ì¶Ínùf¥h¹â¤¹Ò½ÍCÀ/aJÒ“¿<†®VÚ0%MnL ½=“ÖF7Õ§:éjó`NÖY—;ÿgÉy¡=µ*µ’Ïføb°á嘕=¿ª`ã¿ecN ¾ÖµAZÎÒh–lNÃï•8ýA3Gßl ³ÉMÍiS†¶Ù&ª™÷& 7öo:_:ÐACYð·yæ}‰Ìº/±_™ñfØ„?9ÌþCÇå† @Rb<)É¡'¯g•T…BFZ­®.»|gÒlºkÒl6òä£÷óÖ»[iniÃbédçîýìÝ„û7ÞÉôî†}È>&#ËÏã½vŸ;•e‹çÑÐ7â@› IDATìR*š–Ç¢³äò7sǺ®¾‘ß½ôn·‡”äV-[xÃûŽ&c$ëï\ÉëÝBsKÍ-ÁÆæ=w¬ "1.{0@¸|¹ ƒ^OFZ ¦>ׯµµÉ2ƒÏ§$ç~H’D{‡Ew‹ÙbµÉÙ®âã{ïXµ¶uŒ»¡¯Žž;Ç’Dp ØÞÏÖ‰Óçä!o+—-`ÉÂÞ}Ss.Wðñ’…sB‚ƒk%ÄÇRU]GKk;¿ù ÍŸÍ”ÉC~Ž]n]v;~ðv ß¸æ±»Ý!—_çJôúಟýò¥¹vÕÑQf¾ÿÜ‹ƒ¾öX£Q«)*Ìãð±S\º\…Ýá$<Ì€$I”ž 5ÌË™ŒÁ ÇçóËó–æÎ™!=V¯XÄÁ#'‘$‰ªê:²§LÂÙ= ‚=3M-|rà(™é)4·´Q^q½^‡N§ÅjµÉ‰âbP(H’Ä»ï“2”Ì(¸îœ’ÛÏø¾=œ¬n*vÈû6ÞGš­µ·ç00†Wâ¾çwµ!I Ò(YùÕ ÂL½¿¿‘qZ9X8ÿQŸü±6dÿðh ù+b¸°«Ê£2ФMþV\=a壟Uáó =ߣ‡R­ Î#¸†Z«„îjht½Û5zåuç¨4 ²GsôÍ,õbåéÖ&7®®ÞžrƒQƒ.BEÓ¥`ÏJ£ øž„ѪÞ-5ìBß,>jµŠÂ‚Ö­^Ú/ehtT°‹­½ÃÊŸ_GÚ@nv°›=<"¥RI  ÓÖ8ϵ2ÒSøÆW7SQYÅ'ŽR^qÇËo}@fzJ¿†mk[‡Žœ”‡qT×Ôóþ‡»¸|%q_¾R…ÅÚ9ä¤ËTU×ñ»—ƒÁARb<›{ $•ëxQ2c'N£¢2x7?+3í†îRöÜé,=w‘ÒsQ*•,˜[Â]ë–£P(°ta†Þ;$}ç‰tÚº˜œ•ξGq:]üâ·!gjkovªñØ4(»x=ójãÑh4x}½?<=ÁAfzJ¿‰«Í-­òÿ†ž¬½rÙÊ+®`±trñÒ.^ºBL´™U+ :Dìž;VpÏ+øù¯^¦¦¶Ç?9ÙYüð'¿¤½ÃÊÓ›’ç÷Léž[[7xÏ@Oy«Õ6h™±jVI!‡"Pzæóç–Py¥Z¾ó_2#ø}him—û ó1ô##°vÚhl ^î>ÖŠe xõõ÷8wá+ûG’$æÌ*âjU-V«MNŒ`6Y¹l;wïÇîp²eû¶îÜKQa.w­]>…ã»ñy+Õ”vRSÚyý‚ä©ÂÇî§­ÖEù¾`—Z«$&ýú)ÇÇ²Žº`ƒ9.Ë E©RP²!ØpÔèn û<öý®Ÿ'€Þ¨&gq4‡Ÿ »Û=î‰w¹°«·=•”AÁªÏ–ÀÃë pzKïð)c‚nˆÒË;ÿ:÷cÞCÉÝ/&Œ ºq?|®Ç°³K¦S<£€ˆð0Ì&ã  äˆˆp{h#¿øí_hmëàO¯¾ÃÜÏ”¬ ” 1ÑfZZÛ9áw¯[ÑoÍk) ¦NÎdêäLöî?–m{ðú|Ô74õ ¬ÖNyÀ¥ËWå†Ó¦ÂÀ“.cë²óÒ_þ*_|âÁZ‡a,²vÚ¨í3$¤¡©[—}Ð![=Ößµ ‡ÃI‡ÅÊ…‹—ƒwFÃdŠdñ‚Ù® !õIQ¨P(È̒͞EsødÿQªkêCÆbÄÆŽÝ ®ƒ¹v~‹^§ãsïèW®'(¾Z]Çž}‡X¶xž¼íf29šŒ‘|ã+›ùäà1Ž;µÓF[»…×Þ|›­+¤g¢‡×çÃëñÊAJT” «ÕìÕQ(0›¸ÝtºÛwòZ_i©I$%ÆÑÐØÂÉÒóÌŸ[‰î¬B&c$¹òœÞ 3Ø%ê ”ÝÙ,úNOŒ%/w*ç.”³gïaN–žC©P°`N MÝ×Âéê-¿jùB&OJçãOŽpñR%@€“§Ïs¥ª–¯åÉ~ª…‰E©™(ës°6\/ÂÙk&v*” –?“¼»=Žuµ'ô†CÛ3‡Ÿ·ÿ5˜€¡dCbȰ¥ZÁÌC;i,·ã°{ZV~9ƒÔi‘üÒBýù®Ç*µ’‚¡³¹êò! µ¥6lmù,ì'ÆÈ6?zÿ¸ $Iâõ¿nÁår£Óiñz¼8.Þ|ûCž|ôþ!÷–Ÿ-ÿõŠEüæ¯Qyµ†3ç.²xÁly¸Qß;¨}ÿo4¯ßk–±`N Í-ØíN.^ª¤ôlzNš4ž$%ÆŽÑAjr"Å3 lÈ-[<ÚºÊ+®°uÇ^T*‹Ì &Æ,—khh–'~F§Ó²rÙ–/™Ç™syëÝ­x<^>ÞwxÀa˶=ì“êÇ?ý­üI’øþÿ{‘˜h3ÿãë_ºé÷?^Í›SÌÛïm§º¦žšº`¶-€9³Šä`76&ZöS_ßÔ¯‡Æfë’?ã‰Ý= }ƒAÏ‚¹Åœ»PÎác§(È›ŠÙlÄ`þ†8œ¡Ýý“2Ó˜”™F[[[¶ïáÜ…KX,œ>saÀ¹?ÂÄ1e~ ë;ÇÀ>dö›[Í” #Ø@r8œ´¶uÅÕêZ¹|ltïݳوÙläreeåÁÅ gÃ;~?¸¡ß$ýèuZ}h#¿}éu®VÕòÁÖݘMF rHŒC§Óâv{øø“ÃäæLî×3&I …‚€$É X¥RIQa—¯TsäØiy‘žŒD=¢£L˜MF,ÖN ÄD›éììÂÚi#22³)r\Ïýø4Чðá¶q¹ÝüùÕwñx¼(•ÊÄšÔäDjê8|ì³gN—3¨I’Ä–m{ä߬̌T ´G@«Ñ09+ƒ¸ØhyrÏoEÏZ/®>BßkÅú;WqîB°h»á˜‚0œfÝŸ(OR¾D%ëi©tÐRéÀÖâ ™sðYhû¤Ø´6º‰›tý aÒ3)ù7>ôy(iEFŠîŒ#̤!Ú³ŸÃÇNãp8å?´Z­†Ø˜($IB­Vm¦­Ý‚×ëËO>æ]ë–ÚåÞ3”¢gòtC÷ãÄO‘.0 Iòøûñ¬ÓÖÅÖ0uJ&ÅEø|~Ξ/§¥µ-Ûv“Ÿ;cdÇOžåÍw>ÄÁ7ÿá‹ÔÖ5pöÂ%’ãÑh5T^©–׎ÈL6Œ²§dÊÐ7ßþEùìÚs€¢Â-—É“‚ #õMûÚ37káüY¼ó·íÄÇÅÈk}¨»·¹»Ë;.žûéoÉž2‰Ø3:Ž‹å•ò±&Ίíãë®0þ¬ŽåÒþv~‰m?¹Â²/¥›ñÙG ô›qfk MEc¹áX1R Æ_¯ü­òñokBæ( V~åÆÖZÊYÍ™­-´×º8»£•¦ IyhtJ,õ.Z«œ<ø£¼~ ¿eà hÔjþóßÿç •ýÂã øü«—rÇê¥!Ï©Õ*Ö®ZÂÚUK°;œtvvn ""\ÎÈR\T@qQA÷v*•jȹ=Ö¬\Ìš•‹åÇ÷ÞµŠ{ïºþ@¾oóÙ~Ï)Š~ÿcÙ[ƒ =) î¹c%@÷YËùãŸßÂãñòáöypÓ]ÔÔîÖNÖÎN**«Øßg^GØî”²üòm¸g5~í=®V×ʽÑQ¦I¹'NåRÅU4 KÍaé¢9ãf=‰ÏÊ`Ðóù×óâ¯ÿŒÓéâõ·>à©'dáü™X¬q*ôºCð†Hß!~Ëm4X“â'‡QxG<¥[ši«vòÖ¿\D­ûì ù¨T=SFqi—öwpå˜õ†We>»«ÇC–S(¸áA¡T°êk™ìz¡ŠÖ«NZ®8h¹â)ÓXn')gü$“=7"<Ì@øãú¯·]šÛí!.6šUËm¹™—3™»×­Àåv£P(ðz½,[2€$‘GlL4Ó§å¢R©hllÆére"59‘Y%…!w¬s³'ó•/=Âé3hk·’”ÀÌ’B"úd3š3³ˆâéùÄDG‘+/:7ž$&ıjy0Åm߬M×R)•r¹ôôÞ…ÓR’ø»ÏÝCss0óM{»%8¤ä®UÏ(àre-­íx<^ŒÆH’âäµ:žzâï8á MÍ8N DG™™U\’vöZù9“™”‘ʤîìC…9äåL!%eð‰vFc„\ÿÁ²èèôZ¹Lt”yÀ2cU”ÙÄÆõk±vÚ3HJ¸‡qÆô|r³'sễç=>>–I™irÏA´”$V-_ð*•Ê~ ²§LB¥RÉË‹òÑëu\­ªÅbí$0›"Éš”F~îÔq·€ pë̸;Ÿ'@Â5ãØ#c5ÌìÎÏn`XLÞŠX¯œ'0ÅF &uHúÌúH•üÑi·ïßàùŸO&qj8GßhÀÒà’‘0Æk<¿7bÉÒ3k8»½58©¼18DZ¡s†A|V˜üy½VÏOhß2êVZgã¿eSyØB[‹ÎF7j’ˆ ©Ó"IÊ?Á€bçÎÒÂ…ó®_p»4TW s•„©“òP’ß¾öJ>¯gÀm­›„)zà†`GKÖŽ—`n-­Î@rÆÀ+?{ÜNê«FnòåD—’™ƒF{cúþý‡Y¹rå°Õåл_¥­®··rÞ½?#&eÖ{Ü:ûöí#55‰èègÿÿöðz-Ÿ†ÏlÈkô*ÂLê[Ò› IÐÙäÆï•0˜Ô˜;ÄE¤ñÍe¿p[sW Ïíyr„k4qýÃ’_’lœ|ý‚ÀŽ»Æo‚ ‚ Âh޵ B² ÂHßɈAAA¸¥D€ ‚ Œ[b®… ­'Aá¶!ÝÌ2ã‚ €D€ ‚ ‚ ‚ì¦F‹b€E•„[O©R™ç_;È‚o­§Ñž–T«ûôËÚ 7g¨ë Öh\ðM¸õ”J*µÈÍþi$DfŽv&Œ¡Îµ¸#g¨sm6ÄbÐÜšU …¡iU¢ 7µÏMýEUªÔDÅ&Š?ÃL­ÑŸkI5 ÁFë=Ïb6LÜÅ7G‚IÇÝÏÜt0vÓiNæXŒæXü~ßÍî*Ü •êú—E«3”>…@À/ÆÜ¥R‰B1tCH¡P—”ŽC–>…BRyýU³#MÑDš¢ÅoÓ0º‘ß&ap ‘“øòÂçqzmøÄçtX´á¨•C/R¦P(ypÆ·Ø4ý8=öªÙÄ¢V©1h¿¹ÖcfêZJR×Ð嶈EЇƒ"tfÜ|2‡Oýk/þPŒ 7Òp†ŸB©D%¦ôŒ â·Ië šH]£N­Ô©ÿt+ ·Ž‘ºP9¢E#‚ ‚ ‚L‚ ‚ ‚ ÈD€ ‚ ‚ ‚L‚ ‚ ‚ ÈD€ ‚ Œ_Š›ÏÎ!‚ M¤ûAnc8í³ÍÝÎÎò—¹Ø|oÀ3ÚÕ¹-õ1”¤¬fqÖ¦!Ëí«|‹u;ètµPÍ&RKNüVe?J¤.zÐrWÚKùøòÔZ."‰<§·œ)¦©,ɺŸÉ±Å7µ¯A„a$þè÷ØYþ2‡ªþ6ÚÕ¸­u¹;¨·VbšJVÌôËT¶•òþùG¸fOÏg}cá? ¸Ý/ùxõäbq6dµ&œ²æÃÔY/ñO+ÿŒZyãù••€XÜIA¸õÀuœH.·í*L•Cœë¡¶ ·ÖPŸùv{ƒFˆÍÝNKWõMí£Ôju¸\îaª’ ‚0Qµ··%@FÞPýV¢OK˜ˆnös¯,**âÄ M ‚ ·N àüùròóó‡õuˆIÊ‚ ·šÒl6“’’ÊÁƒGðxĤ)Aá³±Ûì߈‚‚|t:ÝhWGA¸Ij€œœâââ8~ü>Ÿ­VƒB¤ŽAnB áõzÐëõ,X°ððð¯ƒÈ„"‚ðÙÉYŒ¢££Y¾|^¯i §ŠAÆ¥R‰Z-’ã ‚ Œwþ’k47žIA„±Íï•è¨sÑÕæÁ`RcNÔ£‹P…”qZ}8;}ø}ÂÌÂÌšסó{ø½(@kPõ/ Çé@cP‰µìƸ‹{Û¹|ØB¸YÍÒ/¦vuÆ<) á÷I¨µ·w†6q«GA¿Dëóº*X8ðrö¯üœR¥`úqÌý»d¶ü°’šÒÎýT%YsLß›@T²^~þЫ œÝÖ‚B÷?—èT}È~e·ññojØð¯SI˜:òCÍ&²€OÂÚäÆañ’RyÝòµglÔœî$µðúe'º7þ© k“›§¿*v´«3¬nïðGA&°Ž:½P…½Ã‹Bæd=£š€_"<ªw´€ß\I¡mX°WÀï piû^]m½ILºZ‚ÿ—$8ñvcÈëüÇû<×Ù,’ŸŒ´Ã¯Õóú·ÊØ÷ûÚÑ®Êm§½Îì=›D‚ ‚pûóçBÔœî$à ž“MßË!&Ý@{‹ðèþÉs"Xÿ/Spvú8³µ…“ï5á´ú¨8haÆÝñØZ{ý•G­t6¹1&³UUè «­·§¢o`!ôò¹HhôýïÓzœ~Ô%Jõнc®.úp5Ÿ6Ó¯×@­½½zàÚªœœÙÖB[µI‚¸L MÅm÷qè•zfߟ„)QG˧?.Ô¶øÉ4tá*>‰Ò[¸zÂŠËæC¡&)7\îiëQöq;õe]ÌܘHTJ°­îœó»Ú°Ô¹ˆˆÕ2i–‰Üe1ò~µ.Îïn££Î…ÓâEcP7)Œœ%Ñ\Ü×Ns…AErn83îI@©½ë#AA¸]õ‚ÕPÖELš¦b'0ÕÝÏ©¿5!I`kî]PÕiõ Ò(ð{%Îlmaáã©”~زÍeóÝêw4æx]ÞûKØ;¼L™Å‚GSØ÷‡Z®µ7)Œuÿ_õº8ùn­W¸íÁ91éîÿ¿9Tìàè t6{Pª$L gñ“©rã‚sI޾ÙÀÅÛquùÐTdÍ53ÿáä~óA:›=¼ô•³¤YþLz¿×Qk•·ÍXúÊ£>úy•ØÛ½,}J‰­%ÀåC ׯaJÔáèðÉÏÍ8]¸Šƒ¯Ôsv[‹¼¿7JMÿFzË-W䯈%*ÎîheÿKµòŠdíµ.ªOuR¡‹ÏfÐPn9>@Ó%;g·‡>WwÖ†µÑ#_³Ñ AA¸Me9úF>O€ý/ÕQúa yËcÈ]ƒÁØ¿ I§kƒ›²½ír‡LR^D÷vpv7úó–ÅpvG+e{Û™ý¹$Z®:i«vbŒ×•¢§êd'.›ÄÞëhÑè•Ì}0™~x™3Û[˜4ÛDÀ'q~g+j’¤ P@S¹ºs6 8D’ ̼e{zçm(U ~‰†².Þþ×røA.1Zv½XEå‹\ÎãôS¶§ŽZ÷þëÔ)9R@’ƒ9·#øoÕÉN>úy•\&à—puÿ Îm÷³ïwµ|Q©zŠîŒ':M»ËS=,U'¬¤Mdñæ4> ¯+Я\þŠÒŠŒ@0ضwx9üJ=H˜Nñ= \>l¡|_;—öw0uA”\‚×îÞÿ3•Ú36޾Ù@Ö\3ù+b8ùnu绨8ØÁÂÇSN0D€0ÆyÝt¶VŒv5AÆ$¯+tbmg[J•ö–[K¸y|gu1%ê¸ãd±÷·5XÝØZ<y½Sï7³ü™t2KL!å/ÚùýÏ„<—˜NúŒ`ãÆm÷!‚QCj¡‘–«Nš.Ù)û¸†î!ÓÖÄÑZå¸-Ÿ7"µ0’µqœÙÚÂÇ¿©!àž£¤`J ],0޾ÙÈò§Óiºd—ƒƒwÇS|o÷¶sàå:š*ìT¶0yžY~c‚ŽMß͇ªœx'8?$*YϪ¿ÏÄœ¤cë+©)µ ï fõºäުŧÊíÍ Ö`kñÐzÕÉ•£Vr—FcJìß@I79³÷»Sq°Ÿ'H,y2¨T=iEF®·âqø©>Õ (”?9ŒøÉa”nmÆÝå'²OS@*µBžˆ›i`ý¿LA¡ 60=öÞm˜’i«ciºd§tK3«µNIÎ’hl 6DÝöÛ¿¡ÇÜ“©;g£½ÆÀ¤Y&ò–Çô/¨èù¥P PÓQç’©ü•±DÄhˆˆÑZIÅÁš.ÙhªiQ( x}ZƒŠikâ8þv#î.?M—ì!Bß çHÐV¬[æ,“<ÌL>þ›‚=Cà ˆŸ2tÖ¬¡¦)-x8…Ÿ«ÄiõqðÏu}³¤ |ûèl εQi”ò0…b3 Ô_袳eð¹8¦Í]ù;×7 ô¸Fïûs{ <A„[LñigŽA ¤Ï0r×?MfÖ}‰@pì|Oc¶GRNOý¾ˆÜ¥Ñ´^urõDo/ÛÑÛ`ÑèUdÍ1c0ª±wx‘SF¡ S¡ën˜z'@Pi˜û¤ƒ5% =Ï£‡«³·—%2¶·÷+¢ûÿ=wÆÝå´á*¹á¯P@Dt°œó:ó=|ž~oð.wHPx3'ÜK´×:ûm×z›»]­ƒ7Öã²Âxè¹|>žŠ1A‡Ï`ßïjèl  ªî‰ãžk†EƯßÀÚ])8B¯ëµ=ãº#ÅIÝ,AA¸M9­>.îm——€ÜÁFí@<š*g9úäµòÂggoÃH­ fÚé{wµ ;7¼Zl^L¤¡lO•‡-è"T(” N¿ßDíÙëÝ1÷™„ÜXn—ÿßT²ÕtD%ï,»»üXêƒ OÓ/7B{¶kôÁàÁiõÉf!xM ¦``Ð3âv‘05\Fuè•zªOubïðÏ¿ÖÕ3»lo;mUΪšÓ¸í> VŲòËÁ‰Å’Ô›®·'5på mÕN¬nœ>sÂå¬SÿRGˇ^­—ƒ»ä‚O7äi4Ý^!ä Õ›1ÅçŽv5Aèfi<‹×Ó%?6'LC£ Æ»®Ž*ü^'áæ4ÔÚO·0—ÓÖDWÇ•[\³ÑUq¨ƒ/× Ö*Q(‘']FÆiÀäè IDAT‰É0 ¸ŸF¯dÁÃ)ìøéU/'ßkbîƒÉxû yèI‘Y°&—ÍGx´FN£ªênŒõ·;k£›ý/ÏóÊ/gÐPfçä{Mìz¡Š~˜‹>bðæ–Á¨&%?‚ºó]»õÑMCw°0¥{ØPZ‘m˜ ÃÏÖ_!e,:J•‚I³ƒåŒ Á»Õ§Ÿ=¿®&µ0¿W"gI4YsÌœÛÑJõéN¶þ¸’)ó£èì“¡j¼ŠŒÓRtW<'ßk¢þ|õç{“z.c‚ŽüU±”ni¦î¬7ÿ×Åþ‘`çÏ«ð8ü(½C‘´•ü=™² Šï4ÑRéàÍcñ©ä¯Š¥d}ÇþÚHÕ‰Nªúôº¥äG5ÛÜïåÆ: Œ3¦ø\æÜý_£] Aºíë XšÎÉ }s´Q¬‘ðiUû+g?þaïc£§ÿ3Qk•DÆi±µxäI”ÌÒ2ÿá”!S\fÍ5“¼3‚ú ]œÝÖJáÚ8|î>=šà¾a& ‹7§…¾nwÏ„·;ßÿ51löþ¶Ÿ'@Ö3iÓ¤äGrùPÍ޼ÖÀ’/¤ ¹ÿ¢'ÓxÿûØÛ½! Í¥N‹”Wì5Õ,~"•=¿®ÆÚèæàŸƒ ˜µ©7Ö3ÇÞjÄÖâáâÞv.îm'̬!gI4sH¢ù²ƒ–JG¿†ìx7ç$b2 ”ín£½Ö…0%êðwOŸ÷wI"U\=ÞIg‹^…Á¨&&À֠ÂëZI{ ‡Å‹F§$6Ó@ɆD9ã×̉(” ªNX±wx 3käáZ3ïK$2^Ë…ÝmXêÝDÄhÈ(1Q²>A®c¸YCR^ª>ëÄfPjò4µV)O²­ F AA¸må-!oy ÎN‹•ZADŒVÔãžž2àþ÷ü¯Ðç§ÌbÊü¨ë¾n¢ngN«¤¼’ò"ÈYœ»¡T+Xþtµçl(Á±éI¹á̼/qÀFŸ9IÇ羟˅]m´U;Ñ”$åD0uATH :eAÑi*v`mp\Œ+1»·×L­U²é?r8·³K½ ¥JAì¤0‚ Îߙʥ4W8°·{ц«0ÆiI-Œös5Ü&Ï53yîÀwëJ3îI`Æ= nXý÷™C_©R0ë¾Dyϵ²E“½(zÐý3JŒd”Cž[ôDjÈ㈠ëÿ×ÀßÇ‘$AA€ñÚA·ÏmoƒQ=àºÂgg0©l0&æ„“˜ÓÛpOÊ )wðᇺp3îëE§é™“–4d]¸Š’{n+”Šë6dALRAAA&AAúw!Ü>=‚ 7C‚ ‚ ‚ ÈD€ ‚ Àµ=ceÁ"A„‘2cÉívÓÚÚŠÕjEj-jaĸ,õ¨õ½™ œn%çÎba$ét:bcc1™L#Ò˜ðûýX,¼^ï°¿–pc®½mmm¸¤ÆAJ #I¡P`41Îõ?Ò” qOn¤ u®Åu9Ck…¸#êf?÷j6:N:‰Ëå$::ŠˆˆO·ÈŒpëEF—ö³Ñ®†0ÇÃåËX­3¢ÓÝ6‚µ›84J-9ñs)JúˆüüŒ3ˆcáÂ…£X»Ñu³cvAnbÜ‚$IÔÖÖÒØØ„$Iäädc2™ú•«««£¾¾®®®ãt:)-=ƒF£!??_¾ãßÖÖ†ÏçÇd2¢×ëà]C·ÛƒÑÙ/ÅÞÞ½û¨ªªâŸþéò­oýŠŠf²sçG7ô^–,YÌc=ÊÙ³gyá…_pøðž{î'üë¿þo¹Luu5••W˜<9‹´´þµçÏ_Àï÷“””(ßýÍÈH磶£V«BÊÛl6Ž?Éd"??N‹ßïÀårÒÔÔ 'ë*•J555´´´¢ÕjÉÎÎÆívár¹‰ˆ'<<\®‡ÍÖ…^¯ðzc×ÓO‘ÂÂiìÝ»×_“_ÿú·¬]»†U«VÁ»Òeee´·wŸŸ×oâ¶$Iœ:u‡ÃÁ”)SBî\ÊË/ÑÚÚJAA>QQQò6›Í†Ãá”?Gn·‹Å‚V«‘ËõÜoiiA¯×“N§‚ßá³gÏáñx˜6­ äswéRõõõäææ†ÔçFÔ××ÓÐЈ×ë%;{jÈû½x18¤eÍšÕddd„ÕÕÕ\¹r•ìì©$%%…×çóQ]]Ckk+ááadggw¡6ªªª±X,dd¤“––&¿7£ÑÈ”)“Q©z¿Ç \¹r·ÛMVÖ$¹Áóå—ÏmSS3*•ŠØØ–.]̾}c0èCêd·Û9sæ,ƒ¼¼\´Zí Ÿ‹±NL’AšIhss (,,fõêu¬Ys“'çðüó½©C­V+›6=@AA«W¯ã‹_|ºß1víÚMQQ k×ÞÉŠ«))™Í‘#Gxå•×ÈÉÉgÆMr㸠 ˆ9sæ9ýòåË”••Q__Ï¢E7v—.==ûï¿ï|çÿpÿý›8{6xG×ãñðÌ3_fúô6lØDaa1Ï>ûU¹1/Ißýî÷È̜ªUkY»öN¦O/aݺ»8rä(99ù¬[w—üzO?ý,YYÙlذ‰åËWñoü!õyýõ7ÉÉÉ'''Ÿ¦¦&ÊÊ.’GQÑLV­ZË’%ËyþùŸòÓŸþœœœ|¾üå¿—÷ýú×ÿ‘œœü 7×ãvP\<ƒGy˜_þòE23ƒÍžÏaMM «W¯cÁ‚%Ü}÷½äåMã׿þ­¼ï–-’““Ïò嫸ë®õLÇårPYy…+V3oÞBî¾û^¦NÍã¹çz{¨¾ó'''_þþîÚµ‹œœ|~ø1Nž<ÅäÉ9̘1“Õ«×±xñ2~÷»ßðÒK/“••ÍêÕë¸ë®õ¬Xœ(j±X¸ÿþ™={÷Þ{yyÓøîw¿wÃçbÞ¼…äçOgåÊ5¬[wS¦äòÿñåí^o0[ÍsÏýDþ®8¬V+=ôÓ§—t¿n!_ùÊ×p»ƒå÷îÝGVV6%%³Y³æ.\Ê[oý•—^z™ôô,-ZÊÝwßKaa1wÞyùùÓY½zsç.`Ù²•´´´ÐÒÒJ^^!wÞy7ÞOQÑL6oþb¿ÿüÏÿ›œœ|–,YÀþðGrròC¾ó¯¾ú:¹¹ÓX·î.–.]ÁÌ™s9zôXŸs±¨ß¹¸™s9æÜf=ì‚ 7jBô X­lÞüÍÍ-|ðÁ¾óçî»ï"+kßÿþùè£]˜Íf6mÚÈùó8xð¼SS3O>ù‡ƒø‡¿Çn·ó›ßüŽgžù2‡à+_y–íÛw°wï>þöÎ;:ŽêìÃÏV­¤UïÕVµäÞ{lcŠé½×HB ¡}„Ð’L7ØÛ 6¸Ûr·eɲzïÒ6mßýþi¥µ$[[ºÏ9:G;sgæÝÙ)÷wßrÿýïÿðÍ70™L¼ýöÿˆ‹‹ëdÏüùó˜4i"«V}Êš5_ÍSO=Ñ£ïât:©««'77—70vì@ ûù裕L˜0žË/¿Œ>ø+>búôiÜzëÍ,_þÿøÇ‹hµZn»í¬V+Ë—¿ƒÑhèòXYY{øøãUsÏ=¿`âÄ ^mRR’™>}:¾¾¾”—WP_߀¿¿?·ß~+z½ž³ÎZˆVëÏóÏ¿ÀÆ›°Ùl¨T*ý—]vi¾»àôÁn·SZZÊλ)))Ú¯Ã_ýêöîÝDze2aÂx^zé_üéO3oÞ222xúé¿R[[Çå—_ÊŒ3hllD£Ñàr¹¸ãŽ_pàÀARR’™4i"«W¯å©§þÑ£39ï¼¥§´«¹¹™¦¦&‚ƒƒ¹ùæijjfÞ¼ylݺßüæ÷Ìž=‹éÓ§yF¶zèQ¾ûî{æÏŸÇ9çœÍë¯ÿ›üãEfÏžÅ9çœ}Êc¶=_®½ö¬V+kÖ¬åùç_à /ðº_&MšHFFJ¥Š|˜uëÖÆ’%KX»ö >øàCxðÁ?P__^¯'**’k¯½†ººzf͚ɺuëq»Ý¤¥¥röÙg±bÅÇlß¾ƒ#Fpå•—óÉ'Ÿqøðþýïÿðè£Îm·ÝBBB555¼ÿþ‡|öÙç\}õUœ{n{53¦“œœÜíˆvv6÷ÜóœN'ç·”êêjöï?ÀM7ÝJVÖ´Z-z½t.®¹æjìv«W¯åÿx‘ /¼€I“&žò\ìN+… ‡pˆ2§ýB &Œ„àŒµ-kÎEoeNû¥\MrØxT QšûLeX„6~ûÛ{1bÓ¦Íäøñ|öï?@rr«W¯àå—_䢋–±~ý·^aݺõèt:Î9çl®¿^*ǵmÛrrrÈÏÏ'33“7Þx•Y³æñÈ#ár¹¸þúë¸ä’‹»´ãå—_áÀƒÈd2l6sçÎ&%%™Gy µZå.t"+V|ÄŠy>§¥¥zâ†?üPZþÀ÷1jÔ(BCC¸çžß¶&ß̪UŸð‡?ÜÏo{/ß¿‘åË»Ÿ´§¦¦:þÉÉI\tѲNÕtæÌ™ÓeBXXO>ù¸×²ŒŒ rssùá‡ÍDEER_ßÀøñãHIIîÖÁéɽ÷þÎëó’%‹˜;w.ååålݺ€€yäOÈå *++yë­·ùî»dddxªâÄÆÆ²lÙž°š£GsØ·o?J¥’܈V«%!!_|‰÷Þû G¡ØØXžxâ/žÏ·Ür;7Üp=¯¼ò’g¹ÝnçÓO?C&“ñç??BPPV«•gžy–o¿ÝÐ#ÐÆ]wÝÉĉX¼¸Œ¬¬=ìÝ»ÏK \vÙ¥Ü{ïÝžã~üñJV¬ø€iÓ¦rÎ9gsûíwòþûòàƒðl—ššÊã?ÖéxãÆãoû+†—^ú&Œç¹çž%<<œgžyÖãÑxæ™ÿãÀètzJJJY·n=¹¹¹^ᆮóÊA8‘+>ÆáppñÅñÎ;’W&3sUUUlܸ‰‹.Zæu.&MšHYY9»wg±wï¾3C tJRî[B^]ïíy›Óܧûx¡Mà·óÞè¶sjwZyiË/©3– °eà µÂ—§>FzÄ´Á6Eð!F'’šš H£‡ÃC?vl×5‘óò¤Yþ¾ÿ~#Ó§ÏbúôYäääP^.U&‰åƯÇår!“ÉxàûºÝ×ã?IZZ*ûöe1qâ>úh%¿üå¯ù÷¿ÿÃλNj{LL Ó¦Mõ|~óÍÿœœ„Åb¡´TɽñÆ[˜>}÷ÜóÛVË<³Q£Fâ I´Å”WUUqÛmw2~üd~üqs¶íŠ+®¸ €µk¿`ݺõ€ðœ©Œ=šÔÔ@Ê=y÷Ý·Ñh|<ñöƒ™3ç2}ú,Þzëm ý^yæ™§Q«Õüë_¯2~üdzèQÜn7………€”,ß–t?uêϺ6z;jÛöóçÏóZ^TTŒÍfÃív³xñR¦OŸÅ3Ï< @YYy¯ŽÑFZZ U<ꎲ²rìv; …Â#"¦M›Òº® ›­ç£ËiiÒóÌd2”$Mxi0HžÁíÛw‘1†¥K/àꫯõÜ{ma]=¥°°hÿM:þ_PPØå6m碹¹¹WǪl:¾Bˆƒ ÎXÆÞò Ý®ß[¾AˆƒÀæ4³éøÐ¨€6–¡còžR©$<\*I¹wï¾.Û·­ÏÌÌäË/×xýµ½ sssyóÍ·ðóóÃívó»ß݇Ëåê´¯¬¬,Ün7çœsII#Y½úS&NœÀǯÂáppå•WœÔösÎ9›o¾ùÚSiè¾ûÀét¢Ñh<ª¿üåÏ^6¾òÊË$'K#õŸþ9‹•––“WÛQ©T|òÉÇ|õÕZ.\@ee%wÝ%…ùøH#3:î¤ûèÈM7݈FãÃÚµ_°rå*”J%×\sU·œ>üæ7w³~ý×DD„SWWÏSOI1÷ááá¨Õj>üð=¯ëð®»îàšk®âСý<ðÀ}(r^ý 6løÞ“P_XX€ÅbÚó¢£¥JN1ÜS¢¢¢Z÷wÄkyÛ½ ðê«/{ÙûØcôêmt|¾tGll, …§ÓéUmß5<<¬SâooާPx?ÖŸ}ö9š››y䑇ÈËËá—¿ôαR«ÛîeýI“ßjgû9lûÿÄäêmë­ <¼íìë ƒ­{Ñ(è[ŒÖîóÿN¶NзˆkþÌeX „YºT ]xøáGxî¹çÙ²e«×úóÎ;…BANNï¾û…9’MvöQBBB°ZmÜrËíX­V>ýt%K—.áÇ7ó ÿìt¬¶œ„U«Vñá‡+غuñññžõ{öì=¥½r¹œ—_þ'{÷îóŒÐ^xáù¼ýö»lݺ’’¾ÿ~#€T·àãW1rd27ÝtëI³{w¯¼ò---\zé%€ôÂw¹\žÄÔ7ñâ‹/ñúëoP[[wÒýEFFpÍ5W£×ë),,â‚ Î÷tügááa<󌔀úúëoMffÉÉIØl6^zé_:tˆüü¾üòkÏ|O<ñ»wïfÖ¬™$'§xö7vì˜Vo˜•«®º†gŸ}Ž×^{€eˤDúqãÆ°nÝ7üã/zòXNE[xÒ[o½Íï?~¸‚W^yÐÐPfÍš À«¯¾AVÖŠŠŠøò˯½î˾F£ñañâEÜyç/yá…òàƒíßµ¯h¨¨¬¬¤°°ÀãYh£í^þßÿÞâÍ7ßâ•W^ër?m!D«W¯áÑGã¶Û¨­VËÂ… úÔf@  .Ã*¡;ž|ò/³eËVOxAG233yî¹gyä‘GY¹òV®”bùÇŒÃ]wÝÉ?ÿù¹¹Ç¸æš«™5k& ñüðÃ<ÿü?¸ì²KINNòìkáÂÜqÇmüïË=}är9·ß~+›7oáÃW°xñ9žyw$%äž{îæùç_ào{Žk¯½š§žz’ââRvîÜɳÏ>çi;nÜX’““X²dkÖ|ÆG­¤¶¶¥RÁ7ßlèT†µuëÖóâ‹íñÚr¹œ‡~¹\ÎÒ¥ç2aÂx<ÄO<@rr ¡¡!]î«{î¹›wÞy·Û}ÚÎS!è9W^y9o½µœ;vòØcOðé§+ùÏÞà–[ng×®ÝìÚµÛÓö÷¿ÿ-f³Ù뚘9s&K–Hå×_•›o¾Í›·°yó@Jx½õÖ›¸øâe¼óÎ<6oÞÒ«ê87ß|#{öìåÃW°|ù;,_þ …‚;î¸ýë%n¼ñŽ=ÊOõlsÅ—y…Óô5ÿû³”——säH6O>ù4 %PŸ,é§ðË_þ‚;wy¾÷‰ÜvÛ­¬\¹Š¢¢bxà\wÝ5ÚÍ™3›?ýéüýïÿðˆ­VËk¯ý‹èè¨>µy°8Sü@ÐßÈ6oþÑ=~ü˜Á¶£ÏÐëõ:t¥RÁÌ™ÒÈàŽ;q:L:ƇÜÜ\êëHMMñÁÞ·o¿'· 22’ &áYßVµ£¼¼‚ˆˆp¦M›J\\{÷îÃl6{Õl?r$Û«Fù‰”––rôh …‚±cÇCUU…„††0zôh¯ö……ETVVå‰96›Íž°¨¶Zó.—‹½{÷‘——‡Ýî ==©S§ V«Ñëõ˜L&bbbhiiá¾ûà£Vríµ×ðú믠Óé<õͧL™ŒN§cçÎÝTWWͬY3<Þ-·oßAEEaaá,X0»ÝÎÁƒ‡Ðh4^+«Õ†š5kÖróÍ·‘™™ÉŽ[~Öo}:b0ilÔ1~|ßL„µÿ>bc£O:Ëö@²ÿL&éééž{£ººšüüd23gÎ@¡P`2™Ø³g/…øúj;v,ãÆõ\Ÿ……E8f̘Ñ)IÝ`0°}ûNêëë?~<ãÆyç¹\.öìÙKaa!*•š¨¨HÆG`` ÍÍÍ9’¿¿—I±ÙÙÙdgç`6›IKKeæÌÈårl6{öìåøñ|är™™™Lž<©ÓDhï¹9sf#“ÉØ½; ›ÍƤIñ÷÷çøñ|jjj9rñññäå§¶¶–¤¤‘ªšÙívvîÜEqq1ii©LŸ>ÝsÌúúrss ò:mωððp22FQ[[G^^!!ÁŒ3¦Ëíª«kصkF£‘bccÉÈå5oËλill`äȑ̜9ƒêêj ‹ˆˆˆ`Ô¨tÏñ ‹ÈÊÚƒŸŸ/3fÌðzFfeíÁjµ2qâ´Z-ùùTWWwû 8Ì‚ »\Wxp9ÛÚlòÄëÈœý›n÷µeËâãcN90ÒÆó?Ü*b߈Åé7±(ý¦.×}—÷.òÞ`‹†'ÚX¸|°Íô’ 6= èžO?ýœÛo¿ÓS©Åb± P(øî»oú½ÂÈ£>Æ›oþ‹ÅŠ\.gåÊžIµ†C] g:'>$gûËžÏB œ¹ P_l¦x¯cƒ ÿP©3C‰×œr»Ê#5ÇMø©ÈX0x“ pf²aÃFb4œˆŽŽâüóÏ£¸¸™LFjj ÷Þ{÷€” &==îºëÎ!)ÁPcˆO”æS³¥ZŽÿÉ“ë-F­è2 $Ç67òã›e¸]íÁoÁ1š „}ŸWSqÔHúÜÐA‚3q·#æÌ™Íœ9³åس¾‰`(Ó+àt‹ÉR§ÛÙí:—«ûu‚¾Åu]÷VGÏF¡?»³ûsm;É:AßcsöMXˉUŒ†j™ÓÞÐV=ÇPoã«g 8÷¾$ÆNÙA=†:üî(q™Z”>rË,̼.–ØL1ÛïÉPúÈYø‹DæÞ±Þ†ÊWÁÊs±µ8ñ•JŪ4 bZÏ£ÒÇ[€w2{WWSyÔˆÙà@­‘£ P§AÝê9»$·ËMñ>¡qÂ{ è="IY A—¤Ï áè÷õ読Ô· TËIÈìâØ½² ‹ÞAÁ®fOûæ ‹§ *ÇHEŽ‘°_Ü.7Ç·7yr8"’üŽñá¢GR»ÜÞ7HÉÜS”3•+dL¸ Ò+ ] è B @ÐC4Iyéï“p:ÜFzWÑ ‰×°¬µSÙVw_ÉUË &¿‡¥Ýs=ni©³C¨+lAWmE®”§!*Ý{’/AgJè9øUm§å ÈçOpš ‚@ 0l’”£Òºî„ª}]ŽþË2bFuÞÆ7PIâÄÀ>·o¨3rJn—}­ §ÃM@„š„ñŒœ4ئ „@@  ¢Óý…§@pÚ3$KØZœ=®)|*Ü.·§¤›@  lf'µ-TåÛ@  §­¡0«™}Ÿ×t»þ‡Rд›_™cdßêê‹[°šœÈ2Â}95ˆÉGptcG¿«àŠgF¿½‰_J±€?–†J#i¦¢=:ö¯©¡¡ÌŒËáÆG« *ÕŸéWÅ–èÛ/ßùt%ë“jJöéº\çªâ¼’ÈßÑÄ/jAçüz!'TNh®²òÝ+Åà†Y×Å76€}«k(ÜÝŒ&PÉ…JàØæFS‡©ÑŽÃæÂ7@‰˜ŠÄ d, ÃWL%?(ìälª§¾ØŒ©ÑŽo’ˆ$?’§>Rº/ v5S“gBí§`êåÑ]î'kUv‹‹è ’§{–×µp|k“/‰B ¤2ÇHñž®¯?€×Ä¢Pɰäll &ßDK“ƒÀH5c‡“qz$Lºn6r¥¬SYH—ÓÃ* D¨|]†¾›šì4–šA&#(JM`ÔO›…µ'l~³Œ‚]Í$LôTR– Ñ@ 8§m/ËjpÒPÚýL°ëï\QÙ)áÇåtSWÔ‚B%ós³½Ó>͇g™»uÆÁâ}:¾ý§÷|V£“Òún;¡:ÏÄ–åe¸Ý  T’¹0ŒÀ(ZšíT5’:;d@ì^r¥÷¨®B)“êû)Mðeì’pœv7[ß.Çbtpx]Ýi!Ú°[\¬û{!—=Žˆê¤mmnä‡ÿ–zB&Õ~ *fS“€H!’û“¾ž!@Be}ºOA×h}BÒ:Aß ˜wƒ ï9#Bìh­×èrGv¯ª¤j ?–ÖÞa‚È”ŸÖ™oÑI£>þ Â}Qª¥IH†ûH][ýëîhÑÙ=ÿ»Ýpx]so‘j5ZWç58gé0Ò{2d2)T,,Ñ—„ ”’&ç©/éÞ»$è{¶¾].‰ƒ%Wý5ß öGG›‡n ˜zyŒ' °#rd,{$Õ«Ã]z@OÞÖF/¯ÂéB‹ÎΆ—йèÑÔN‚§ S“íïW€[ª‰~ö¯Gx(,Õy&ü‚ºÆy[š05ÚPj„ÄiHì5éRU®‘ê<Æ;Ú0É3‚=¢¾m»>ªD&—>—¤iAÈäÞöêlînÆPo#8Ú‡”Y!^!€õÅfj [Ð×XqÚÝ„«HšŒ\)#{-:ÁÑ>$Oö*Ù§§&ß„Yï@í+'(FCúÜ~µ÷ĉÒúš³Ò®¥|ÏqlNñüêO"´ L‰_Üíú)ñ‹ÙZô)uF!ÖúµÂ—³Ò®l3?‘3B d}RªÃK-nL#&âv¹iªÜî)3ûîÅ7ZKC‰]µ•îÏ!ã¬02†yf8®XN¶¿Wáµ,ã¬0Bã¥\s«°1)’ýzŽmndÚ•1à†¼-Èä2âÇPvPÙÐûN[c©cƒ$BB†WÈ`b·¸h(“:4 üÄÁ@S²_çuŸ‡ÄiŠ–:´ÅÝâ¢â¨ ËòŒƒ\)£&ßÄÎ*™}C\—mò;Lž´àÎD/ï¥&@ÉÈ)Ý{Eê‹Í¬yò8[{8˜LÉÓ¤mÜnØòV9›¼¶Sû)¼BS¹…¦òöЦØÑZ.|(Õš_´GǦ7J¼ÂÎö|^ÍÒû’=UZ¾y±Ðsß¶±{U5N{ûvûÖÔpÙ“éžëëÀ—5T發¶;¼¾Ž+þ/…ê̬¯‘1Ç–|BaÃ!®žyQ½#PFBpÆIÛ¨><°p9e͹è- 'm+øi(åj’ÃÆ£Rô_®” 9#BîÞ7°B%cÄä@ õvœviħcÌ.@C©Ù*4éâh´a=ïÜO»"†¦ e‡ ˜šììý¬š}«k}N³®‹=c_N?›ÙÙ)O»"†É—x{06ß¼Pˆ©ÑŽÚOÁäKN¿¼¡Yׯ²í½ ¯¯#nL@—mÚ?Ô~Š^—DÌÙXÃæ’¾ÿÅQø)=çàè÷õq>Ò—”!˜õöN€p5g…Q™m â¨‘Ê£RÂxÒ´ Ì:?þ·»ÅÅÈ)AŒ˜ȱÍR8Ú[e\ù× :FèÄ 2Åœ¤ü™\Ƹs#0äïhÂØ`#gSƒç÷5?”¤iÁøø+¨É7‘³±æJ ù;š5¿¿BDNô ôýõ¬Rø0*rZŸïWÐ{N%$‚áÌ!ÆŸéVÓú²ÔtpG[MN¯m õ6Žn”^€™g‡÷J (}äœÿÇ*ŽÉþ¶Žâ½:Ü.7Ùê‘Éd̹©ë¿¡ŽÚOÁ¸¥^Ë‚c%qàv·',«}Œ]Aõ1Ù­" `Ì’jó¥Á¯îp»ñt@T9ËN%"yøæ‚ 8ûH'ôŸ*Øòv97½€¯ª)Ñ£ü½B\´áÞ÷µÕädý? ©Î3¡TËYúû¤ÓÒó—67”Ær 9›øñ¿¥L¹´³ˆ1¶æéøuá±YóÔq,z)³B˜zYçmÛΑB%#8FCâ¤@¯‚<‡×KB?6Së™9·+‚ã4L¾8ЉFòÖ‡qÚ]Ô—´4-ˆ’ý:¬&'>þ æßž€B)#"ÉO9Fc¹c£Ýë¹›éÏäK¢ññW°sE%n—›©—G£öSÐPb– ”µ‡Þd, ÃÖâÄPo#|„/5y&Ë-’µ@Ð÷œaÂù]æ ¨ýø)1ëT倾)Œ­%n´}­5O§Eg§pwó°>þŠ.;#à=÷„ÚWÁˆÉø©<‰Ë¡ bFù£«’^î= •[ž³±c›±[\èj¬B Á1íåjëK̤ÏûùûT¶zìVïŠH“×;ÆÊ·qþRºÌApÚݬû{!5ù&”>rÎ{ ù´.Ñ9ç¦8j [h(1³çÓêNëµ­UºLMöNëôÕ6ZtvÌÍ׌^ÎñmM˜uÖ¿PH`¤šéWÇ’2#—CšÁ8iNQGä Á1>4”š±¥{·±¬ý^~÷×G:mc¨·u90Óæq)Lí§ 4^CS…kë¾]7?¾YFþŽ&¯ŠuÐùšéSN¬b$Êœ ‚aÊ+“ÔZ"±ê˜É3*ös1ÔÛ¼bj#ÕD·Æ1wŒé´Ó+  ô‘!WÈÈ<;̳lÌ¢ðÖuÒ%g3÷l2;™LFTª?ón÷ÄFo{§¢ÇI΂ŸJ#÷Ìý‘ûCºjëÏÞg[u±Ú|“§¼0H‰Å ]'þÝ&莽ŸWS“oB®”±ô¾$bOcq PÉYtÏHÏ'Òæ³[\”Ò÷jß¡ñ®|vcÏ@¥‘£¯µñÝ¿Š©8b@®¡hMŒîXXàÔözw–Û’«*9£ÏëôçÜõø“¼Cˆf[ܶٺàð7uämmD®1íÊÜ‘Ði^@ ôg„aÍSùÈO2“.Š"}^(3®Š¡(«³ÎÁö÷+8òm¡ ¾˜{þâ;‘VRz@OHœÿP£ƒêcRhLTêé—ð8P˜í|ü‡œNËÝ;Y‡‘¶¶$Ò1‹Â)ÜÝŒB%#}^¨×:Ü`³8Qû*:í¯+*9snŽçëç °ìþ¤Šù·%üÌo$è)soŽgÍÓDZ[\|öç¨ÀÖâô8›‹‚Í^ËÂGú2ãêF/ §ô ²Cz*sŒTæ´ Ðx 3®ŽérŸ¹?6vZ‘쇿ÂãÝ«/6S_ì]F2~lÀi)@ìÈßÞ„î„çœ_ŠY×DZùÍRZší|õ·*9 •ÌËc×ÛÞ© <Û@p´Ôao}^¶Í¾<õòh¾~®}­O=F@¸»ÅEæÙaL¿ªës")3ƒÙÿE Må¶¾SÎΕø‡ª05Ú5?ÔSâø§Ð&>JÍ|þ—<üCUÔ´œb+@ ô§­@19e1Ý'Ïu,Å¡æü?¦ «¶ÒXfÁØ`C Ä/HID²Ÿ§rGúüÐN1ÉÉÓƒ=/#¥dpõß3©/n¡±Ì‚ÕäD£Uí3lçA˜´,ò¤UCBâ4¸]nO²c[hDWD¥úyÚµÕ<Ÿp~$©³BÒ­â¤m´a§ï„b •Œ¹·ÆóÕ³Öe,%$·ïWRWÔ‚ÓîÂÙêõ Rv[î×jr`Ñ;¨n Å“+edÌeÔ<©úWÂø@–ޗ̶÷Ê1ÔÙ<¹½™3B®±ìáT¶¿WAaV3›K ?“Ñ)o ·Œ^NS¥•c›$aÐzjT9ÚþL:9@€lóæÝãÇéQãJ}/m¾«ŸM´qÿÂåDj»öX¼“õGk¶°EÓ ±gqÝäGzÔÖ`0ÒØ¨cüøñ}rìýû÷FÓ3O‰¸.Ž“]ýñ¬t9Ý説ȕ2|•žî°šœ´4ÙA& ¤î’»-F¦;¾AÊn'¤ì‘m5V\v7êSÚÖSœv¦F;n·4  ñWv÷s²gå‰8p˜ v¹.o÷9¾çžÏéÓï$mêíÝîkëÖ-ÄÅÅ*Ê. ‚¡Ã† ›N_‚@ Ú‘+d„Ä÷0ƒ©W¡”w_rpFâ2 –f²k¶b±›ºm'øéhTZÆÅÌcZÂÒÁ6¥ÇÌIºŒòæ<òë÷âê0y ïˤGLaöÈ‹»mã«Òrá˜_ò}Þûè,uhÝð"Ø7’%£nF­èz>ˆF©T9ØfÁϦ×~ДðI¤„Oê[½@¥ðá¼Ì;8/óŽÁ6Ep¡UqûŒ¿bu´`stžõZðóñQizÔ!‘xÓÏÇhmƒ)ý ´>Á§L$Aï’ÁÄGé‡ÒïÔ ýŠ >¢F¾@ Î,D#@ à'ç ÁPC@ @àA@ èýDi@0T9@ t©¹”ºÒƒm†@àEHÌx”*‘c&è_„@:å ”å~IYî—ƒdŒ@Ð5ó®z—ÀðôÁ6C0Äéµ@8V—Å+©ÔÇ}bB—àg#“ɈÅ”kH=I9Y›ÓÂ7¹oq¤Z̃Ð_øª´Œ™ÇÒŒÛPÊÕƒmNp¹]ì+ßÀ‘ê­Xâºè4*-ã¢ç1)n²“$±ÖËØUúºãˆ:§ýŒ„àQLO¼€pÿ¸Á6F †½v§••þ&ÕõôÇëöRk(áÁ³ßG!ïú'ÚUò%[‹>`ˆ‡‰-…Ÿæˬ‘ ¶9=â`å&Vüû`›1ä9Z½•Bø˜y]®w»Ý¼õgêMålÙð¢°á Çj³øÝ‚ÿˆ|@ èCz%êLåB :K=-UDhº\_Øxh€-¾5:c‘ª-ƒm°áHõ–nB±Xˆƒ¢ÚPDƒ©²_¼þÁ‰øÆöù~‚ÞÐTu‡½e°Í 3D‚@0„pál† nw÷çÚu’u‚¾çd¿E¯ösB(XBæ2R&ÝØ'û~*[VÞˆ¾þø`›!fˆ2§@ t[Ã!@ €Î3) Á0E@ ºâ$Uª`(3 9ú+å–“¶Ñ(‰N÷ï7 u6Ë,´4Û±[œ¨|hCUÄdhQúxë¤Ú‚tÕV¬F.ø‡ªŒT–à‹\éýÂpØ\”6>Òm˜—ÓMé=¡ñ£|úí{  ¯@ €Åûôìø â¤mbGkYöpj¿ÙõIÇ·5uZ®TË™tq“/Žò,ûöŸE˜šìÚú‡ª˜|Qg…!WHBÁjtòÍ‹E,¸#Œ…a8ínϲY×Ç1þ¼ˆþøJg$v‹ •F8®ÎxÜ`3;Qû*aÚ‚¡‹¸¸Áðd@‚o ’°D_\.7M­ÞÿP­dBвk•¤Î ÁfrR“oBWm%kU‘)~Ä ðj« S‘쇩ÑNc™S£-o—ch°1ãjQú®7ÙPϾϫQúȹîÅуmŽà'R¼WÇÁ¯ki(1c·¸P¨d„ô#mvc‡°ou5…»uø*¹àO)]îç‹gò±¤Ì fÒEíâ¼ì ž]W°ôþ$´ajò¶6rèëºnmºäñ4”êvÑér¸Ùþ~Õy&¦\MÒÔ ¾øêCš¶sÖ\iaÜÒHFLl“‘ƒ À „´9!¤Í  ¥ÙÎ{÷d0å’h2Ïój[WÔÂáõu4–YÐ*IÀØs#<#öÆG¾­§¾ÄŒEï@¡’¯!óìp"SüNiK@˜š97Jõ²­F'ïÿ&‡ÍEU®©“@ˆÈ‚;¤yZtv6¾^Jž¬eÄÄ ¢Gõ_HÔPÃÔ`ìwqfÌH,èÌÆ×K:yáœv75ÇMhCU`l°ÓPjÆ?DÕí¾Ë-XôŽNa…V““†R³g߽ó¬+:V¸¬É7±ey9 %R{«ÑÑó/8Œq:ÜdWÀ¨a§h=ŒèE‚Ãá ¡¡††¬Vk?%nXj>¡žÏÇòòQ–™Ñ"ÁPB¡PBXX~~íýèÓj„¬f¾¥—³}§âˆ’ýz.|8™ Ë,üªÖk»Ú‚ŽmndîÍñŒ^Þ£cÙZœ”Ôã°I½‹°š“¶÷ R±àŽVÜ—ƒÛå¦ì~H —ÃÍÁ¯k©Ì1bl°£ U1ni‰9ðE õ%f"“ý~$;>¨ÀÔd'qb és¥WmA ¾¨¡¹ÊŠ\!# BÍ”K£ éë9ŽYïà»WŠáÇÄeÒþl-Nö­©¡æ¸ §ÝMD²“/‰òêdüª–ºÂôµ6œ>ŒZŠÝââø¶F,'Á±>L¸ Òã±ô ¹?4xÄAìh-“–EåCK³Ê£Æ»–ü. ¥Ú»ÓÖöyÓ¿KÉÛÒ8 v V““â½:JÌÈä‘ìGòô`,FÅ{t¤Ì ÁÇ_®ÚJE¶”•±°=$²*×HéAV“VIÌ(&x{ *² Ø-N’§£ ^¦&;EYÍ説D¨19ˆ èvϯÅà 2ÇHs¥³ÎÚOAdª?‰(ÌÒQWØ‚¿‚ØÑZ¢R‡Î³ ¬¬”APP Cëû —À¶ ‚!ŒÛí¦¡¡–£G³ eòä)(ŠÓG 8¬.¶½]Ëé&,Ñ—©—G 7W IDATS•käк:*sŒälj`ô Þ†óÿ˜ŒÅàdÿ54•[ر¢’‘Sƒð î~ä²®¸…7o=èHŸJò´àSÚ®F¦ÂPg£¡¬ûÍ3 ‡ÍÅÚ§ò©+jŸ©±¹ÒBÊ,ÉëS™c¤ì§ÝÍøó¥õ¥ô4WYñ V‘>tÕV¾ø¿|àh(5{…€ô;ì”fã¶™]L\‰Åàà³?ça¨·yÚÕµP°³‰‹ÿœFH¼$Þö~^ÝÒ¾ÿÆ2 Åût^û¯+j¡p·Ž«ŸËÞŠ>d÷*)ì'$VÃ…¥zV#ÕýZ\àDâÇt›Ãâ¢B®”1úœp²¿­RÑ"uE-|ûÏ"Œ í¹Q>Z)3CÐ×ØØ²\šµ9z”µ-žeisB‘+düª–+*½ö[¡í$ŽmnäØfIhE$û¡ PRvPÏw¯–`kqzÚí^UÍüÛâIŸêÙîÄý¨ý^Û!ƒù·&tòŸœxÉôÄŸŸOii1³gOG©A_׳žÔÞ1Hò£©Â‚¾¶û˜Î¨T?äJ.‡]ÍÉc?ÃGørÙSé¸ÝRùÕõ/Ñ\iaû»§÷:„BXŠvK#úñc<ù½Å?TòÚ8í.ò·5á¨ì”ÓRçaäï¤Ñ¶’°ISƒ™prdj´‘õI5U¹F6—Wj`¤š„ 4”šÙõq›‹ø±È•do¨§¹Êоֆ oh®j¿§´áÞÞ¹ª\#ùۥУ™×Åõ{…ª¶8ù64J@h+x0Ô(Ù¯GßúÜ9÷¾¤.﫞 kýi4JÂFøßuXeÇœ1€ãÛš°è%xÁŸRKôeÂ…‘¼ó«#8l. v6yy •>r&.‹dì’pÞºãn7Œ˜Ȥ‹¢Ð†©Ø²¼œ³Áá$8}è¡¢¢‚¸¸X!N3V®ü„‚‚î½÷n´Zm§õûöí§ªªŠ .8¬;}ÉË;ΦM?ÅÅ_ÔïÇ+**fÆï áÊ+/ï´Þb±°ví—̘1#Fô»=‰ôôTöî=xú„€°ö¨Æ23qc¤—`[rb@x÷á"Æ;.‡ô`?YxQGd2Šö!~Œ–æJ ¦&{{ÙÆnØ¿¶‡Uò:ôftý´Æ ú:©3}ÊP‘“ ÈŽœDê¬òw4Q°«™‚]ÍÄ `ÑÝ#º¥lÃiw{JÊF$µ‹®ð‘Òùu»ÁPkó„uÄ3¿„[J²”+eFùÐ\eõÄP ~>mû=á*4•[8º±€©WÄülpªAÿIEyÍE“1ôc½›+¥ªoJµœØŒÎž2ñÂ(¶¾SNeŽ‘UÊ%v´–¹·Ä{òü«¦ éø>þ ¤{T£U¯¡±ÌâY"J9‘>èk¬žë&4¾ý·OGÐ;ˆ:uC‡uëÖsôh:___âããX¸p }²·ÛÍ/~ñKî¸ãöNÁf³±lÙ%˜L&Ö¯ÿŠ™3gôÉqO¤¹¹™õë¿aûö¤§§qÇ·¡Ñt¯Y,V–/_À7ÞÐÉî•+?¡¡¡ž .8ŸÄÄÄÙP]]ƒÙl&)idÚþùjþú׿qË-7 ˆ@X·n?üg®¸â².ÂûïÈü‘É“'±qã†~·G ¡P(p:§@OòÃG«Àjt²ç3©fõ1“'Ö¿M0t¤ê˜‰ÀH5ûÖÔH d’7ádØ­.š*,ØZ¤j)9?H±¶Ú0u'q`j²S‘mÀÔh§d¿žÂÖ‘ö¨T¯¶3ø)15Ù=•_N¤í¼tÌèj?çÜ=‚±KÂ9üM…»š©8b ëÓjæÝïéÔÙÍ.¯Í*~A*ZtvÊÚ;mÿË@Û8”u_!y}NP”2¹ ·ËMeŽ‘Ñçô¬ÀÉðñS`Ñ;°š¼G}‡kÐGÛY¬Oº(jØÍ£¡ô‘΃ÃîÂÚæÓ…ªý>héïŠ1‹Ã MÐpðëZJ÷ë©ûý~|µZÍìÙ³((( 99¹ß޳hѹäçλï¾Çλxÿýwºm_TTÈC= €Á`ä|À³îСÃÜuׯp»Ý„„„ôH ¼ðÂ?yòɧ¹é¦yùåþÆŽCttóçÏlS†.—ûô*œ™×ÆñãK©>fbõãÇ=ëBâ4Œé¢:Ñ·/y}N›BHÜÉGÚ+-¬|0×k™\!cÞ­ñÚ–ÔSvPïµ,"Ésîá}¦“¡%G¥õüª–¤iÁ8l.d2éܵÆ#7WYÈÛÚHܘœï©®ÆŠYç |¤/gÿjƒ“Šlºj)4¢­‘Åè ÷‡'bÖ9Kô%n¬–ãÛš(Þ£#6S‹&@AÎ&)”$2ÙoØuO7äJ#&R¼OGá®fò§4‘:ëç ä htÕVÊ0륑d[‹“’Ö¤sß@å ê-a‰­Ï4·”G0qY$j_«K¥ïà}-=¨ïÖ«bÑ;ˆÉГ¡¥ô€žuÏbÑ;h,7•êJ#ÇnqQ™c$}nˆGF´zK]N7G¿¯güyìjöˆ‘ˆä¡nÙ‰ÁL¸âüêW¿dôè ~üq3Ÿ|òo¼ño–.]ÂÂ… 0›Í俣¾¾ž¨¨(F…Oûun³Ù(..¡¾¾ž  @RSÓ¼ÖŸˆ^¯Çl¶ T* ãõ×_ÅétÚþ«ªª¢²² »ÝFZZaa“èF#GæC||ç~Àöí;ÈÏ/àîãÑGfÊ”é|ÿýÆ“ž’’RÏÿ¯¿þoî¹çמ’“/¼ðOܭ⵬¬ÜÓ®¹¹™¼¼ãèõzâããHOO÷„»9Òà‹ÙÜBMTù1""ܳÞl6säH6V«•qãÆÔú«×ضm;¤¤$£P´ÖX­6ŠŠŠ¨¯¯'""‚´´T\.Gæàv»5jM{u³¦¦&ŽÏoµ1žôô´nCòÜn7µµR®T``'NäÇ7yý¶V«ââbêëë !%%¥K‘^ZZJiiAAA¤¤${•ïôŒË•2b2%×™ß uÒ3„â¤äà×µ4•Kó Ä `Ú1^am$N ¤©Â‚_°ŠÄ‰Lj-™Ù±™ZZtŒõ6O2­_°’ð~Œ]îU®oä” j [°¸]¡&(Ú‡“;ÅÏ+:~ŸÖð&™ϲ¶ØüÓ™iWÆPvXÕèdçŠJO%’̳Ø›4;ôÁ¯jqÚÝlz£´Ë}ìh&ë©ÒLÖ>ÛÚYIšÌŽ*qØ\üøf ×ë^ÍŒ«c)=¨Çbt°ñõÏ>åJ³oøi9‚¾eöMq”gpX]|ÿj û>¯!8·æÊîs~Zší¼{÷¯ea ¾\ð§Æ.§ô€ô›üÇÂ}©ÍoñTÁ»¤ëÙÇ?}ôX§>Û´+cHž~êg*qcˆåOÕ1û×Öph]ÚP†·¼1M ’¸1Td8øU-y[;λaÅý9ø*ñUal¼2¹ÌSí+nLÅ{uämi¤ü7næß–ÀÈ)AÄ  ü°+*Ù¿¶Æãù ŽÕül±x:á>-ÝC“É“'qå•—sà ׳sçnÊËË9r$›… póÍ·òí·ßyÚ&&&òñÇ™™É®]»Y¶ìl¶vvTT$o¿ý³fÍìtœÜÜ\/>£ÑÈ믿Â5×\ͬYs¨««gÛ¶3f óçŸÅ¡C‡=ÛÈd2î½÷nž|òq@êlÿùÏóŸÿü‡£ÝK·xñ"V­ú¨Û_@^ÞqÊÊÊY°`þIÏGuµ !—Ëijjâ½÷>஻¨˜µk¿@.—ãr¹¨­m/ó¾`Á9””´¿3'LϧŸ®"<¼]ܬZõ)«V} @NÎabbbxÿýøã¢¥Eª\8räØëÙæ³Ï>ç³Ï> 33“•+?$!!}ûö³dÉy^ç`̘1˜LFŠ‹%;¢££øøãL˜0€Ù³çSUUåi?yò$>ýt%!!Ÿ>ú^}õ5ÒÒRY¿þ+6nü;+ùóç±víçüðÃ\yå5ØííÕÜâââxï½·™­¢©Ü‚ÓîF $0RíI˜Ô†©ˆL‘f¶[]hÃT¤Î aâ…’hó R²ì‘T²VUÑ\eA&—1Ò·[QW>“ÁÎ*©-hÁis‘äÇÔ+¢½æ3ˆÉÐb·ºð‘ìÒ*=B¬-D"4Þ›ÙEHÜÕÂÕ\úD:›ß,£&ßDS¥…¦Êö0¹BÖ)¼ËíÆSý¨ Kˆô9aB 3®ŽeÏgUXN*¥2È<+Œ t-Úÿ| /¿ü ^xÓ§OãÕW_çµ×^'((ˆn¸žÆÆFV¬øƒÁØåþfÍšÉôéÓX½z _½Ž°°0ž~úÉ“ž¶ŽÿùçŸÇºuëùÏþË/~q¯½ö:.—‹K.¹˜Õ«×PW×^¨á¦›nÀåúöÎ3<ªjkÀïÔôÞ&½ $4)°QDÄŠHQ±¡ŸzÑkïz¯^ 6ì ½(öBï½BBz/“2½|?™dH`B€ì÷y|dæì³÷šsÎLÖÚ«Ù0|÷Ý"öìÙËûïÀã?æÇÀpsscãÆMÜsÏý9 ˆ¯¯óKbb£GfÙ²å:tˆÿüçuÞzë˜LF, >>>L›v K—.çÀx{{sdzøõ×ßÈÌÌâ¹ç^`ñâï˜>}2™ ƒÁÀwß-bçÎ],Xðÿú×ÃNk~üñ§¼ó넆†²té¢f=8z½³ÙLHH07Ü0•Ÿ~ú™Ã‡Óyøáñûï¿`0¸å–ide§ÿ~Lœ8>øÜÜ\ †æs¥-#|ø@ò~ÔwnŽ€h7Æ>Ør¼f·aþŽzè-ïΕóâ›=æá¯bԜ֫Œ{ÈyýÐîM ±SB[CpæøG¸2á©D* TäÐi%CÒÝOEPlC(Xï+ƒIÒü®rãp±>W“8Ô²,=5eFÜýTD¹áââtNü`?§Êf'S_’³1W?ð kzì|Dí®`Äì(†ÏŒtIžjGu/Ï5×ü;‹ÉFm™ •›7/¥Ãóêî+çÖw{¡×šÑk-¨Üx¨œ”ûz#Ф³¢«2ãî§rä©ÝŒº;šá³"©.1á¨nú×ûÊ`G²z¦¼ÒÃéuPœ;w|Õ§}/N{r”Æ=¸ë®{œ^;Æá¸ùæ›HO?BNN~~~<üð¿8|Ø94888ˆ§žz‚›o¾‰þýrøp::7·†Í¡[o½œœæÍ{„{ï½û”2͘q;    6²cÇÀ¢E‹xòÉÇ™1c:+V¬ä›oZö¼ûî{lß¾™L†ÉdbÈÁtë–ÈSO=ƒÍfã¹çžirNE…”™œœ„\.gåÊU,_¾‚… ¿%&&š)S&³|ù ÊÊ „œËÞ½û(**Æb±ðÆo5ÉïHKKsÊAøè£O¸á†©¼÷ÞüfåOKÂK/=OJJOæÌ¹—;w:ðçÙgŸ¦oß>LŸ>___^yå%FÅ”)SÙ¿ÿ€cìC=ÈÞ½û(..Áh42þ»Mdܺu;K—.'88ˆ•+—2a=""‚§Ÿ~’k¯Ä°a#ؽ{V«•íÛw•uÖ¬YZ­fÕªÕäææ¶:Ÿ yÎ;ÁÍ[Idª´kÝ\Ø‘@ è@dR³´Ö*ßøh\œBöZÃÃOåÔ-»Ù1þª6‡êÕ{–.4$¯@Ë×^©–·x\&“6NUéMí®@íÞ|57¥ZÞbyÔ ‘ƒÐaôìÙƒAϱc™h4!|öÙ'¨Õjt:S¦ÜÀúõÎÞƒ¡ùpƸ¸ØUW¬TUU9YYÇ‘Ëåôï߯M²%&&°aÃFªª¤œ¨¢¢"zôè~Ês³²ŽóÄOÃâÅß3sæl–,Y†\.gÅŠUŽÐ›“©÷bxyy1{öLV®\Å=÷ÜO]]O<ñ˜c—¿ºZWPPÀøñ“8z4ÃižSí”geI¹›§ yHH6àê¯ÃÉ$&& ÓÕ8ª%ÕÔH]Üsss?~™™Îù¢'˘“#….GDDvú›|‰‰’|V«•šš ¥û'Š´ç]ögPœ;W<ÏÄ·øGL Á¹Ëܹ÷±fÍPTTÌK/½ ÀêÕ?²~ýúõëË®]ÛOYÞR&“9’^í'y€î¿ÿ^l6·Þ:ƒž¶l“rGµ£Å‹—`2™±ûͱuë6l6#G^Jll Ë–-¦ÿ~,Z${íµ“š=O«•”pOOO†M;Û_‡››7Þxƒ£ìi½!ñÉ'Ÿqôh&ŒçàÁ½|ðÁ{N󹸜(.RUåô~p°¾Ù8Fÿt¯CÓãõ×½ùñ~ø1™™YLž<‰ƒ÷òÎ;o5;ϨQ#éÙ³';wîbæÌ;šÜÇ–89Ù9>^ºOdïÞ}Øl6Zô8ï @ ç?ÁÁA¼ð‚›/…žÆf“ hµZ²²²ù gÂ=÷ÜÍôéÓÐëõL›v;z}ó¥¼OÅwÎàÓO?'**®IxTc""¤ÂK–,㫯¾fݺõNÕŽNש§~×½Þ2{öLn¸áz¼½½ï×õUŠÊÊÊ8v,“²²R§ùbb¤Ý?ÿü‹×_ƒ÷Þ{Ÿ’’R®¸Bj÷ÙgŸ3wîƒ,\ø o¿ýÎé^Š6Q/ËÊÊÉÌÌrÊŸhŒŸŸ/ß¿€€Ö¬ù‰wß}¯Ùq§¢_¿¾ 8«ÕÊ%—\JXX{öì=cù»:ç]ˆ‘@ Áéî\ Ú©S¯ç“O>cëÖm<õÔ3|ðÁ{$$Ä“‘qŒI“®ûÇó¿ôÒ ¬_¿£G3xöÙxé¥çÛ<ÇĉðððdéÒ¥%÷÷ßÿÀݽi1Œ´´!Üyçl>øàCG2°\.ç¶Ûneóæ­|ÿýbFÍ”)“Ϋ­•ž=<¤|«[o½™ë¯Ÿ‚Z­rz¿ºº»ÝÎ7ÞÀgŸ}Áúõš„c”ÓÑ»w*{öìåÙg¥ÏÇ-·ÜĶmÛùꫯùì³/øì³/ËåÌœy{›¯Ë©¸ùæ›øò˯ù믿ù믿[Îo¼Æ-·ÜÆ3Ï<ÏØ±cÏhÍeËóöÛï°{÷nÜÜÜÙ´i3EEE­6©40@ h†æš1 þï¾;NG÷î ñüŸþ ÇŽe"“ÉðööfÓ¦õ¬_¿‚‚ÜÜÜvÄ»÷îÊêÕ+œêÚ¯X±›ÍF``2™ŒÕ«WàëëƒJ¥béÒEdgç TJ*ÏW_}Ùl&66€?^€Á` )IJªŸ3ç.&Nœ@T””,[QQAïÞ©\~ùhjkk„„„æ‹n¼üò‹Ü}÷]8p¹\N¯^É„……QTTLFFF“ªA¯¿þ_êêêHNN¤g¯q?   Çç²Ûí$&&pàÀÖ®]GYYÞÞÞ;ò$Ôj5¿ýö³£JT@@ ÇC.—3þ›Ì™sû÷D§Ó‘˜˜€‹‹ 7Þ8•´´!h4ºwïÆêÕ+áJÉÉɬ^½ÂáÍˆŽŽfõê¨T’ÊêÕ+P*¥P£=ºsðà^Ö­[ï$cýuž0a<©©©IaOW_}?ýô‹5#F gõêŽë5hÐ@V¯^——”‡ªR©פ>«°°ˆ‡z¥Rɾ}û9ò2 !_Bpú´É@ËDÌÿÙDÞJ[`…¸góé¹eÏ&-_kqÎW„¡£©¯WߘÐÐPBC’Sår9—^:¢Ùó}||:4Íé½!C.vz}òñÈÈH§Ê8ƒ t:~ÑEý^ÇÇÇ9âÙV­úûï???tº:ŒF©ƒöí·OoVÆæÖ©G€FÒìø¾}[¯î¥R©š|.wwwÆŽÓê9-%#'''“œœÜªÌõùõx{{;½vwwwzíêêÚDFe #,,Ìé½Áƒ9½ jh’ëïïï4¿L&k²ÞM7ÝBffÞÞÞ”——0vìåŽ+ÁéÓ¦„¯h4^1$Š 1>Ýðh¹IXï°KÏ¢4]›óéZw º¨³Eè2t êßâ±`Ïh|\›ïå h_üÝC ð;õÀ3B˜z)¯`ìØ1h4ºuëÆµ×Nä·ß~>­ªF‚³Ë¨Q£èׯ/~~¾¤¥ á±Çæñé§w¶Xç%mò È1}à‹ìÈû…|íQÑu²!#·;ý#.ku\¯ÐaÜzÑ3(ZÞRw–¤ëZ¸)=I FRHÓîœç*EŽÅ`®c_Ñ:Œ––+mÎ7¥'½B‡Ñ¯•ï¨B®ä拞dCÖ2òµGÏ¢t]‹HߤÅNlG/Ÿø›&hʨQ#5jdg‹!8 ^|ñ¹Îá‚¡Í9¾nÁŒJ¼¹#d´2zjÒè©I;õ`A—A)W1"a*#¦v¶(]ž(ß$¢ú&u¶@ ´QæT hÆ BŒA×D@ À0@ há@]a @¢ð†@ œ@@ 4ƒè+"º*¢“²@ öŽõ Ô™´ìÊÿƒ’Úì]§«"—ÉIèKÁ(åªÇÙìVÒK·r¤d;V»õ,JØuôˆ oØ(¼\ýZW\“Åî‚?©3UŸ%ɺ®JwRB‡éÛöžÂ@ 9díçA°Ú,¼µî.ªô%í6§ )›Ž¯ä¢È±\×û¡ÇüpðCÖg->‹RuMþÊø†y£¾F­pmöxnÕa毿ç,KÕõøûØ÷Ü6à9’B.>õàFÈ;xÃD èRX­V”Êö³»år9v»­Ýæ­Ñq3Êv ãà,±3ïW¬6K‹Ç·ç®9‹Òt]êLZonñø¶ÜŸÎ¢4]›­9mæåBùÚêêÚm>___´ZázÚ Y›¼íçA°ØLí6— ulv+¶Vv?Ž8{XlÆ–YÅ}8[Xlæ6Ÿ#÷ööA«Õv€8A×£¤¤´] „  `JKËÛm> +SSSƒ‡‡g‹ÇE#@ §¦¦’‘‘…É$,9àŸpôè1Û5ÄÈÝÝOO/ÊÊ*ÚmN +b³Ù8rä©©©§}ލb$º*r…BÁÀƒ8pà0¹¹ùèõúΖI 8o°X,”—W°wï~¼¼|ˆŽŽn÷5RSS)+« ##«UTÜÚJee;wî¡wï>¨T-W·@ Pxzz2bÄ¥äççSXXLmmâ—R 85J¥’ÀÀ@ú÷€»»{‡¬!—Ë4hEE…=š‰ÉdÂfßOàTÈd P(ðññeøðíêÝë*dn©âÈú *9—ÝÓÙâmÂb²Q™g@_mÁ/ܯ@µè~Ño‡ IDATš8ýZ†‡‡ÞY²‚VÐhBÑhB;[ àæ$ÃûV$üZFY¶c­›ÍŽ›· ωCüðqiÓ\£ªB#v› 8玂õdïª&(¶c6@Íc5Û©*4PSj"¦¿Og‹ã„Ù`£2߀Åh#,¹åœžÎÄl°±mQ!û-ÃÞhCÍ+HͰÛ"ˆìí݉Òˆí@ Î3v­*¦®¢ie’K‹è?QCÿIšÓžë‡WŽQt¤ŽÄ4?FÞÕþa’ç†Z †j f£ WO%î¾*ªÎ³üVƦ¯óqñTp[ÿ”N“£9Ö~’KÆÆJ"zy“‚ÍbgÙÓG¨Ì3 WÈpñP ¯¶PSjâÇÿd2|f$=F´_A‘ a @ÍtR>‡]'Žw'q¨?z­™£*©)5±}ia=½íîÑ®kYÍ6ìvPªå­Ž3ÔZpõP¶zù,FÈN=×Ù`ùÓG)ΨszO®”ÑË‹Á7†áÖ|£/Á¹ÉŽåÅã ÷•Á ˜¬A¡’Sv\ϯoeQ]bbÓÂ"{{ãá§¢2ß@ááZÉH) %Çt”סvS0¤¡´ÍbçèÆJʲõ¸y)ˆHñ&8¾Á»Vp°–ªBî¾*‡ççøN-ºJ3>Â{z9Æj,dl¬¤ªÈˆwšØ¾x©ÏÆ%:-„ ÁyФ½.  Û0¾ý¿C¥×¢PÊøéõL.»/Öa0ìXZÄßËp÷U1ù…2·jÉÛ¿€¾×„2&Èq¬µe åÊí6ŠsG&“¡«2“½Së0²¶kÑk-Ä ðu:ßj¶¡×JʲÙàÜDÕfµ#WȰ[íh‹üúöq&¿Ð€(7þú0‡£*)¤Ãl°qd}yz&=Û ™\†¶ÈHy¶T)Q¡’cµØ¨)3±þ³<ܼ•NGg{‘#fGplK¿½}}µ…ÂÃuD÷mˆY¯)3‘¹¥Šš2¾â/öÃÍ»A*ÏÖS’©£®ÒŒ±ÎЇŸ M74Ýšzt*ó äí«¡ªÈˆ«§‚¨ÞÞ„$6Œ³š%ÃËb´áêBÜ _'c¥0½Žò=5%Fì6ðV“p±†:+YÛª0ÖYñ w%~°¯“·æè†J*rõj­¸z*ðt#áb_dré™·Û¡,KGÉ1µ&l;>!.$l>,Gz¦ÌÈä2º õï´ð,CV ½;ùùíቋ‡c•ŠÜ¶Uí\ÿyy¼Õô¹&„ª|û~.e׊"âøívZóXL6þü Cµ…ÐîtæÏñZ²wV³î³\"R¼P¹v¾gM@ œ§T—I_[Ae¾¬í MOCÜ‘É ~/û~.åø-ƒoC§5S~B1:Y!äË%3"IoL@´×>ß²ã:–?s›ÅNÆÆJ¢Ü(;®w)cƒ¸èZ *Y÷YžãX·aþ޹Ôn ¦˜‚¶ÈÈê—2¨-—£:Û@hŒº‘‚æâÑ gm×òçûÙNÔöeEŒ}0ÎaüôzµåM{Kõ½&„S MìÿUÊ3°Y ÓÚr³“`1ÚØ±´ÈñzçŠb&<ˆ» »ÍÎÊçŽ6Ygë¢B,&»SrJ™øL7‡â¾åÛê*sXÿ]ÎÕ%þw9”ëtÜÕ[IòèÀ&륯­à¯9 ½-¢Ss7jJ®»›Oó*®› c•êÒÓïÿe6Ø߯ASÈL•<…éµ”ד»¯æ´ „â#uT‘É`øì(ܼ”Dõñæë=Ñk-”eëÛ=<ðLÂ9NuÙŽlû¨³Å‚ žêÒ#N¯Ï‡Fik)8Xëô^ÏË ï%)0ñƒ%A[d¤"Ï@i¦ì’²ÚÃY ‘+eŽp “‘ËeÈdëŽ_¸+åÙz‡‚Y|´!~¿ï5!¨Ý$dDzbtZ3ÅGëœ „z|4.„$zP[^ÕDYí òöÕðã«Ç¨)5¡-2RØV½Â®×ZøûÃÌ1ý}ˆîëMúÚ ŠŽÔ±ö“\®{©‡“Ó)<Ù“°d/26VRY``÷ê’Gࠦ䘎_äa·ƒg€šî—øc·Óäž(T2ú^‚¶ØHƦ*jJMì[SÊ ©aNãâùâé¯âÐŸå˜ 6T®rz^LyŽžÜ=Õ”çèÉÜZEbš.“26…J†R-'w_ ™[ª¤géP-aI ‰Ç2ôŸ¤A&¹²é®vþþÖ~,ý'jèÙŒq6ñ hˆá×UYš£;ñ¬¹û¶Òå$*ó Ž"g¿Í?ÞäxMÙéõ!Jv;Ž@§¹JÂ@œ£®‚⬵-†@ ÎA\½•ø†ºàî­Â3HMžN%IC<ðVS]"…Å”çœð h'i+~*ʳõX͒Ƥ¯‘1•«W¯µÂ3P…Nkvoi.Ài½³¨«471T 5LuV\<dïÒb¬³ââ¡à’‘(”2‚bÝYüït*ó ÔU˜œÔÐ$OúM!²·KŸ8‚Ýf§"×€g€šý¿”b·Ky7¼ž„\Ñü½PºÈ©ªKL­£ìxÓИ˜~>$¦ùa1Û9ø[r¥ŒAׇb·Ã§³öb6ØN„ÔHBï+ƒ1ÔX¨«0ãNþŒµVª ΂\Fÿ‰ÍWÄÒùù,lV;½¯ æ¢kO¿rVGáæ£ÄÅS±ÖJÆÆJRÇ9ÏÙ]I/5õ’Ï{Ë Õ\½›ªÆïOTo<ý‹z…^¡–Æé«[~æåÊsÉ û0Jç{ïzn$Å A ‚æ8÷ÄôóaøÌÈ–Ƞט 6~™OúÚr‡â’tiC,y}¼sK;®§¢¾ÊÙ`£"GO@´“²ùçK ˜~>\t­£ÎJi¦Ž-ß³»šõŸç1êîhÇί±ÎÊsö79¿¦ÔÙ@¨§^©Â@Uä¡OölÑ8h2O¤+ÅGë0ÖY[sb-³^ ’ÉÀ7Ì•ÒL†Zé<“ÎÊïïf“³§ºIë‹Ñ9÷¤5êÃy”.òN÷4&u\0ÛRš¥ã¯9\t­W/%kùëC)Jí¦ y¤$³W`Ã=ËÞ]M÷Kš÷v)ÕrGBû€ëBÆp]¥ÙaèÖÏUSb¤2߀_xÓg? òD(’]òbô›‚R-Çn½ÖÜ&ÏFG" @ šA&»0þDöÀö%EÔ–K»ã! Æ4ÄK{»5©cÛâB|4.¸y)O»™TD//\=•j-üòÖqzŽ䨿Jl;2YÓ\‡sO…#Ž<,É“Ü}5äï¯q”@­ßéU¨dto&dª^I<E3¡9Êz£L{úFYý<ö&åxiH2v¼wRNÀÖï ÉÙ]«—’¾×„ r•³mq!ú6È’ÒlÖÛÐiͬ~ù“žé†‹gçW{JDöN-%Çt¤¯­ }m…d쟸&2¹ŒKfD:dÕt÷À3@Em¹™¿ä°}I!&³¡¤r•Óçª`¶/-"gw5ŸÏÙO° F½»ÕÎmH½*âù²mqv›ïçÆ'Ø…š“òQ4Ý=ˆLõ"wo »V³çǼÕèµ4Ý=÷P\‡_£ÓáÂøõëBxv#qÀŒÎC œ }ó»ÔVf;^w<O¿®ÙlêBÃ78¹³EhT®r’G²{U1 å(4¦çè@Ò×U`1ÚØ¹\Óßç´ OÃnà÷r¨.6²éë|é€LŠK?ÝäÍs›ÅNqFTá$þ'v~­f;þ‘n$]€\)Ãfµc¬³:U2:Qnª¥4SGîÞ"zy"“˰m(]:¶‚Meä Ñtóp„àìû¹´Í‚W šÁ7†±üé£Tùíã\ùH|§{Þ”j9ãŸHdë¢BŽn¨DWevšîôŸ !"¥¡\!ãò¹±üù^•‡!­PÉœ<ý&jP{(ع¼Cm±äòðS9ò>|B\5'šõŸç9Q»)œ:œ_v_,[¿+àðßXL6GÎKãÄòÎFç.îþhb‡w¶àY» ‚hoüC{wž@‚.Aê¸`Lz«“' 5\Oì–6WNÔ/•kŸïΑuÔ”šP»ÉÆAToo\½”N»ã‰CüŠsÇ¿‘ò7пpW©ñS¡Ï©QTh†xöàúOÒ l´£‘âÊM{ gÎ&G7T’³§c­›URÔd2H H ß»WS‘g`ýçylþ¦ufº_âÏÐÛ"N{­>WsøÏr,&?¾z 7o%r… ßpW®šß!Ÿ¯ž€(7 Ö’½«š•Ïgàê©p*‘ÛÖ¹†ÜÎÚsÉÛWÃþ_Jé5&èÔ'v0r¥ŒÁ7„1xjÛ—±cir…Œž£žÉz‚bÝ™òjL:+uUf\=¤\†Æá_2¤Œ "eLu•ftUf\½”N!J ='ñƒ}ÑU™1ém¸z)š4T¹ÊI›Á[#¨.1bÖÛðôW5›ÿÐYœ;’ÁyÉÉÛeçÎàÂåääËæÐU™ÙüMV“ìÝR‰Æ~5Í–¡ô uq*ÁYOTo¢ú8{w–mŒ_¸+®k:G=ÁñîN]g"S½%#; w?ylV»c]&—¡éæAß«ƒÆ’\!ãªÇØøe>™Ûªv~e8 ŠÓÅÃ_Å„§Yûq.%™:GnHã’ªŀɡÔU˜ÉÚ®uxIê×vói{ü{Ò¥dm«"wo [?دÅ£g¤Ž âø-åÙz~'¹"¿pW©IàI_µ»¢ÅJ^ñðSµRV»¯ ÷SD×ÉdàÓȳp.qŽÜA@ 8O9¹™V+ñÁÁÙ¤8CçèOÒ.ÿ¹”Lz®pÅÃqØ,vô5,&)TÄÍ[ÕlŸ<7o%£îŽæRkÚb#6³¯ µ“RyÓ›'…¦ÉàŽ¯ú4™+ ʉÏtÃb´¡-6¢vS8v£SÇ51Ón 'íÖð†iå²&óö@ÎÍÌÆ?‘èôZå*ç²ûb°mÔUš‘ÉÀÅSédœ47O=£æD3jŽsåt¬×㟠vW0þ‰DÒ×–s|»–꾡. u®# @ øÈNÒ"ZK Î&¡Ý<yW4ÆZ ~®„÷ìÜús¹RvÊa§ñ Y»UgRºÈ©Ï&J9>šss÷º½Q¹Êéuy½.ïüð§óa Á?B„ ÎM\½•ŽÆX@Ð:6U^ .pd2çŸQ»ýôëˆ @p." @ @à@@ð@ \h8rL&‡¢¼¼›­å6Þ‚³‹Ý\"àRÇëJS¿ýök'J$8âââ éÐu ÈÊÊB¯×uè:‚¶a©¶ S7T†Ùµ{òôêN”HÐ…BA``III(•—vê‡\&Ç& È'Ä3¥¢å{áÛã΢D]—pŸÄVŽucGÞ/gQš®K„oË÷¡%”ÅÅÅlß¾ÄÄ8úöMA.Ž…s‹Ñ-€ ôz=éé‡9~ü8lRÕæŸb³ÙØ´iv»•„„xÜÜÚ§r† ½è×ÙZÁjµQZZÊ/¿ü Aƒh¾tcGãïÊUÉw².s)•ú¢N‘¡+å—ĸ3‘µRÃòʤ;ùñÐŽWìÇ.Š tÞ®\}5¯¸Ç о‚<íamÄh_J¡")xi±×¶ù\™^¯·ÿñÇ Ô¿SwW‚ó¬¬lÔj’“{¶ë¼»wïF.‡èèÈvW èJ˜Lf¶mÛÉe—]†JÕöfPͱnÝ:ºu‹ÇÕµm¥"ÍVs»¬/pF&¥üôï­ÅfmK:¥BÙª‘֛݆UD®t ¹¹¬íÍ÷6lØ‚r×®$'wÆA#ŒF..êSÍ–-ÛIHHD­nŸçG§ÓQZZÂÀýÛe> «¢V«èÞ=½{÷Ò¿ç~ŸTŠö1PÿŒ¶‚ŽC.“#WˆÈ•s yuu5~~§èÝEøúë…ÄÅucîÜÛtžÙlF¯×wTmçÇ×лw¾ýö»<×c=ÁÀ“‘q¬$»ð  ¬¬¬Ýæ+--E£ n·ùÎv»üü|JKÛï:œm>ýôsz÷îÏï¿ÿÑ¡ë˜Ífrss©®>;9 ÇŽe2pàÅ<öØge½s€***:[ @ 8ç‘_¨ù%%¥DEÅGYYùiSZZÖ¦? |DLLAA¡„†F“À¥—ŽæË/¿Âb±œ©èÿ˜µkבÍÏ?Ÿ^2³Ñhâ™gžã†nÆlvv}¯^ýGŽeÇŽ!ꇷ·'••í§€TUUáåu~t?-((àæ›§EÏž½ILìArr*_½°³Ek‘ùóßå¶Ûfž~Äéý_ýììlþþ{m‡¬«×ë¹çžû $%¥/QQqôìÙ»ÃÖ«g÷î=9r”åËWtè:ç*2™LT™‚Óà‚+²ÛíŽ]9›­cþ TTTPUU…§§'QQQäçç³k×nî½w.ß¿˜+–vJÂ÷ƒ>@bb"cÇ^~Zã =ÿûß›ØlΙ_|ñ)û÷ïg„ñí.ç…ˆ\.Çjm¿çÍf³EòòòHKŽV«ÅÕÕ…>}zSUUEvvŠVª‰t6ï½÷ùùùÜwß=Nï¿ôÒóŒ=Š ®éuŸxâ)¾úêk<<<4h uuu8p°°°Y¯ž«®º’ùóß$%%¥C×9—±‹ s@ 8%çî_îâ÷ßÿ`áÂo(,,"%¥ü¾þú Ö¬ù‰éÓg²nÝzÞ{ï}î¾{ƒ‘7ß|‹õë7àééÉ5×\Å 7Lm´æ~–-[ÎÆ›ðööbüøk¸é¦©íûï/`ãÆhµÕ¤¦¦ðÒK/PTTÄ·ß~Ï¡C‡(++ÇËË‹‡zÂÂ"öïß··7×]w-äË/’••EMM ÑÑÑ\sÍÕÍ<ò/är9£GâÊ+¯à?þ$77—~ýú’œœ @zúÞyç=Ž9Jll wÜ1‹>}z’çæ“O>åÈ‘£”––âííÍСC˜={–ãæää°dÉ2þøãO ƒbÞ¼GÎì† Î æÎý?´Z-©©)|ÿý·h4RÉ×ììl‚‚‚ã–.]Æòå+©¨¨ ÿ~<øà\|||Ø´i3Ë–-'''‡ÚÚ:ºwïÆu×Mæ—_~eçÎ]„„3mÚ­ r1;vìä»ï¾';;›ÚÚ:âãã¹îºk6l(eeå¼ð‹¸¸¸ðòË/ðâ‹/SZZʃÎ%2²!éûÿ{“ÀÀ’““™5k›6maß¾}DEE2zô(¶lÙÊòå+ÉÉÉ¡ªªŠÈÈH¦NˆÃs˜L&>ùäS6nÜLEEAAA¤¤ôâÞ{ïn’ûÇðä“sdz¨¬¬ÄÏÏÏ1¦²²’ÿþ÷ìܹ“àà`nºéF.¿|´ã³}óÍ·ìß¿ŸÒÒ2ÜÝÝ™3çNöíÛÏáǹâŠq\v™4ö›o¾eëÖmŒs9qqqìØ±“‚‚Bz÷Nu¬µpá7üüó¯Ï3Ïüðc§ãü©ÓZ‹/eÆ¿éÑ£?üð# |ä8¶aÃF¾øâK^yå%î¸c555|úé縻»; „Å‹—™™Å­·Þâd ¬Zµ€Ë/ͬY3X¿~_}õ5Œ=ŠeË–óþû œdùî»ïY´è[F…Ùlf̘+صk7>>>hµZ–-[ÎÌ™·71BCCÉÌÌbÁ‚ cìØ1NÆAMM Ç"''WWW +V¬ä£0yò$6mÚÄO<å4çôéÓ°Z-|úéçäåå; „ÿüç523³¸þúë(((àÓO?'))‰Gy€{¯¾úÚ1ÏŽ;ùï_¡¸¸„´´K(//ÇÍÍÍ›7³lÙ –,ùžK/ÁÞ½û˜<ùzüüüعs×_@ 8ÿé2‚N§ã©§žà¹çžáª«®äþû`íÚu¼øâ+¼ýöޱcÆ\ÆG-`ÍšŸ™=ûN-Z¼yÿ"..ö´Öêß_ª‹~àÀA@Rj¶nÝÆ AY±b)yyù :œ×_ƒ¹sïã×_£¢¢‚nÝùóÏßpuuE§“jþù—lݺ &„7Þø#F §´´Äi½‘#/eá¯(.."<<œŸn¾ñÈĉxë­ÿ±`ÁG<÷Ü ü÷¿¯sË-7áééé³}ûÔj•cW÷düI #S§^Ï¿ÿ=7ß|›>ú„G}œÍ›×;ÆÅÇDZqãz¾ùæ[æÎ}¯¿þ†×^ûÙÙ9lÛ¶6o^Odd$UUU§u]ç&ééGa|w¥“——Ç›o¾ À‚šÂÌ™³Ù¿ÿóç¿ëd¨Î{³gÏdâÄɤ§áÎ;ï`îÜû¸á†›Øµk7Ë—¯dÞ¼Žñ3fLçñÇã7ÞâÍ7ßæé§ŸeêÔ)mú ~I¯^=qssouܽ÷ÞÍÿýßÜÿƒ¬X±’/¿üšÑ£G±pá·ìÚµ›ˆˆ-ú†ØØ84šðçyòÉÇ™<ùzŽËäæ›§¡Ñ„pÿý÷rçw “ɘ?ÿ]rrr¸öÚ‰,Xð>7nâê«'ðÊ+¯2yò$Ç<}úôæ§Ÿ~ ¼¼œ€€@’““xüñ§X·nz½žüü23³gРAMr6oÞÌW_}R©dþü7¹úê«(+“<?þ$åååÌž=“W^y‰eË–sûí³xõÕÿré¥#øî»E\{íD>üðŒF#ryÛËéu%¶ç®a}Ö2Jks;[” ™LN\@*£o%Ê/©Åq…Õ™üœþ)ÇÊva³‹òšA€Gƒ£®fHì„ÇXlf~<ôû ×¢3ÕœEéº.JwzjÒ¸"i6n*ÏSŸÐˆ.c :t˜ÚÚZ¼¼¼¸ë®;P*•LŸ>µk×±uëV§±þþxyy1eÊdžþErrrHOO?m¡®NRî]]¥†VÛ¶I»ÿZm5sæÜ €Z­¦ººšÜÜ}›6m`Ò¤IŽ Æ;Ÿ...¸ºº8íÖ6‡J¥ÄËË‹œË‚R\\ÂÞ½ûœB"##[,ñj6›;¤÷Ýw7‘‘‘Ì{}ô ‡vªÄ"—ËqqQ3eÊdxàÿ0›Í”””Fhh(………Œ=†éÓocΜ;O㪠ÎU‡ßµß½sç., ñL™2€©S¯çñÇŸtòލT*ÂÂÂ:4Í‘<¬Ñ„–6„]»vSRRÒd¼ŸŸ=ö(~ø1:ŽC‡·©³µF£!**ê”ãT*•û¶bÅJòóó©WÀ•WŽ#))Éáak‰Aƒ²cÇVÞÿ>ÿüKŠŠŠyôÑÇ)++ç‰'þͶmÛÈÉÉeÖ,éû¡P(ÈÌÌr*€ }÷] —Œ‘ÐÐP.¾x06lä÷ßÿàØ±L&MšÐ¬§nãÆÍ šÆÔ©’7ÀÃÃÀ!ÃÁƒ‡˜1c¶cÝ#GŽ0`€T*tÉ’eqß}÷0vì˜S^îJ•¾„¥ûÞÄjë¼]ô’mTÊ™{É‚ǬØÿ6Yû΢T]âšlVx‡øÀ¾„x5¯¹â¬$ IDAT›lËý‰ YËϲd] ‹IËÖœñv à²nÓÚtýx†ÔÖÖ:þ­R)1›M€¤˜×+4õ ¼ÑØòó3©FôãkHMíHáõÿ/..¦¸¸˜””^¤¥ Án·ÍÊ•Ë>üŽÍàá‡ÿÅ´i·8 /¯¶Y~­!“Éðóó¤ðÆŠCk>l6;V«´Ûââ"];µº¡AÉÔ´ù»»»Ãà0›Í¸ºº°fÍj&MšHyy/¿ü*£F]ŽÉdúçLÐ)ÄÇ7tÊ\»v]³cêŸú羃¡ÙsêCnl'èÔ{µ,–æwü\\ÔŽï‰Á`@q¢®¶ÉdjR«žúg¿­… êKC×?·õž‡¶”¸ âÉ'çÀ½\wÔåò›o¤ÒÄõ¿UUUŽßŒÁƒ1xð S~WêÃ|–.]ÎÒ¥ËN¼×¼G¥þ·¯±±žzÊË+(..¦¼¼œ´´!¤¤H¿k&ŒgÁ‚÷éÑ£;7nbêÔ›xûíwNûów5òµG…qp–(¬Îlµ]nÕá³(M×ÅŽ>ŽüüBB‚¹ûî9ÜqÇL@êÌ»ví<ýô³ìÙ³×QJ²ªª FCZÚ&Þ¨¨HÒÒ†˜˜àô~LL4ÞÞÞDD„3xð žy¦¡ Ê»ï¾Í“O>Í®]»±Û턇KõØûô采þþR¸ÇWŒcÉ’ïyóÍ·ÉÈ8FïÞ©ÜqÇ,‡bàããCZÚ"##s2£Ñˆ‹‹+6›¾}û’——››+·Ýv+·Ürs«edç>ÑÑÑlÙ²‘—_þ;vì ;;???bcc9òRæÏ‹ÄÄDV­ZMee}ûöá©§žpT‹#-mÑÑQ'æŒ"-mqqRSddóÏubbjµ ÑÑQ 6”'žxÜqìwÞB£ aÓ¦ÍhµÕôêÕ“îÝ»9*-Í›÷f³™¿ÿ^‡N§sä÷$&&œxŽ¥×±±±¤¥ !&FÚ(ðóó#-m ñ޵^{í?<úè<²²²0\uÕx<<<š4º³Ùl\ýÖ¬ù‰ììld29II=˜0a¼£äiLL4ë×ÿÍ /¼Ä¾}û¨¨¨$)©QQ’<Í~÷ë™5k[·nÃÛÛ›+®h0z|}}O|Ž@ÊkøñÇU<÷Ü lܸ‰ââbââb±X, püñ+/¿ü*¤¶¶–ÔÔTǵ‹ŒŒÀßߟììlºuëÆ´i·0kÖŒS<)@ 8ýòËÏöûw¶‚dþüwyüñ'™2eòi—kœeeåÔÕHMm¾šO[Ùµk¾øøx·Ë|Ï>û<¯¿þwÞ9ÛQÆ´³0™LÌšu'ýû÷C.—³hÑböìÙËe—fÑ¢o;U63›7oc̘±í2׺uëèÖ-WW—SŽ=P´/¶?uÊq‚öáùqkP)TÍû÷ã°ØZÎQ´Sú}z;’ÿ‚ÎÆl°a·Ù‘+e(Õ­§ZL6*ó ÔU˜ñðSí†\!åÒXÍ6¬f©œ±Ú]ÕlÇj–ª€<·Ýf½µÙc‚FÁÁZöÿRŠNkaÂS‰-N—Âl°¡r½ðŸeñ׫ ˜˜Ð$n[ 8ßéÙ³g³È:‹˜˜hGŽ‚@p.a¬³òõý0lx¨¹áõ$‡ÂßC­…Í 8²¾»­¡§‰ÊUÎàÃIÀÖï Ù»¦•«œÛ?JEWefÑ£‡s_ÿj”.’ò´syÛ—pÙý1Ä ð=;¸ƒ9ð[eÇõj,`w>!.ÄöÅïù°¦ö¦,[OÖv-.ž"ïlqøï ¶}_ˆÍngÚ»ÿ¬úÞù€0@ ¸€9ø{fƒ´Ë_[n"sk û91T[Xôh::­›/WÊP»+0ÔX0l¸û4¯.x©<5ŒuŸåQ[nbÛâB.¾)m±‘]+¥¦†qƒ|/ã`÷ªjË›–ßòmƒ¦†‘:.¨¤t4ºJ3:­W¯®¡:wO)AÄfµsàW©Ÿg€šÚr{×”61Ö‘'2rS8=/ D®a1Ù8¾CKTï– %$ $s›–ü5ìû¹ŒÄ4¶|[€ÕlÃÕKɰi-ž{>çNâ?*rõdï®F¯µ°ia>aIžƸIƒì ?áipóVBSÇ4ÌfǤ·áâ®hq p"œ«•'0lÈ Pµ c:þ¥vkðBجvlVûyf³ÚÙ÷S)ùk©)5á᫤çeAÄðaßO¥gÔáéF¿ñR%¶­ßR]b$<Ù‹¤‘R§²ãzv­*¦2߀L&À}¯ !$ÁñŽÙ`å·ùÇð så¢I,F;WSt¤³ÁJ`Œ;ýƇà¤vœ»ÿ—RŠêбšmxªIêLék+Ðk-øh\HDp|ë=o:a @pr|»–ºJ3jw£æD³â¹£”fê(ÍÔ') Æ:+Ç6WÐ-ÍŸ”± ;àJµ¼‰1Ñ ŸÉ¢yR¨Ñ¯C_-5:-Wï SÕðpu\«ã;µüüzØ¡(½–À7ÿUΆ/ò±˜$ïB%'qˆ/Cn‰pİÿþN6‡jÑkÍØí t‘ÚÝ“¡·EàÜ XgÔ±þ³<Ês¤†§-ytŽïÔ²å›ªŠŒÈå2cÜ6=Òa°ë¬üüz&å¹L:É@póVÒst eÙzrvWc·Ùñq!í–p"[1 Ï%l;«^Ì èH㽪ˆéï@Ñ‘:2·Va¨±: „¼}5”féP¹*H@m¹™U/d8 '€Š\)cƒÖ²šíŽï‹¦»MÒ`Ò[YúÄ´EFǸ²ãzŽm®äê'+}×v¯*¡®Òì4ö®j§ùK³tdn«bò Ýñ wmËsFœ&¢@ ‚ÓâÀo’÷ qˆšîE±þ}€Ê|ƒãßQ}ÎL!ô Tsñáã ö"â_8¡E­a1Úÿvñ””w•››ÍŽg€¥ZŽÕlãðßì\^ä›w ]•¤0*T2,F¹{«ùéµL8‘RSjbÕ ”×c·Ù‘ÉpR2ëÉÚ&)U…Fd26«’c:–=}Äq-F…éu˜tVä 2¹ }µ…íK‹8¾C HÉåÚ"#?¿‘帗ç:»(¡èH2¹ŒäÑLx*‘+‰#vàé?‡j0é­Èä2Fßôw{qõc „$8ïä«ÝŒy –1Ä2hŠÔ3jÇÒ"´EF”j9® eä]Ñx¨0l¬û$¯ÉZI#¸ò_ñŽï£\!c̱ »=¹R†Íb'}mÅ?¸"ÿœ6›õ‡K¶òûÑ/É©<Ôò€Pï8.M¸‘Þa#ZSc,gÕ÷9P´‹­i,¤àŸã©ö¡wø¥\•<¹ìü°¥ –:Vî‡ýEë1Zt-Ή«Òƒ^¡—0¾×ݨ-ïî¬ÏZÆÆ¬¥”ë Ï¢t]™LN|@oÆt¿(¿æÆuu´EF ÕÐm˜?݇ùSv<Ÿc›«rK8j7µe C<üÏ<É6"Å ¹BRL¡a÷öB¥4Kdž/ò©,0PzLú½•+eDö–š#F÷õæöSP¨$ãàÏr8¶¹ŠœÝÕ šæ4× ©a¤Œ b×Êb¶/)¢2ß@éqA±îìþ¡«ÙŽÊUΘb Oöbó·ìù¡ÄiŽM_çïΨ9Ñj­üòFu•f¶~WȘcÆ_ñH¾a®üðÒ1* D¹1éÙnäì©æçÿea5ÛÉÞUMáþu ÛÌ­'vô»y0ì¶3 ióô—<6v›Œ•¸û( Köl2N®”5y¶söÔÙÛËá¡0ÔXØøU>¥Y: Õ'OšW šˆ/ªŠ(;ž‡Íj'´‡'. ÿYNi–Žê#I›´‹ÍÌ’½¯ ã ƒ)¬ÎdÉÞ×Ñ›k[ógÆ7ì)øSH­Iˆ¬åì)ø«³E9m6f­`GÞ/Â8è@ –:¶ç®aKöªÇ”Õå³úà{Â8è@ìve»Xy`~g‹rÎR¿©t‘S™o }m…#ÜÅb²‘±IRªWÞ©ORn+v;ü¹ ÇalZX€^{~ì@Ÿ ¹öÿRJþ~içÙ;XÍ„'q=áA°Yíü£œßßÉæ—7S[.][]Uó×D®‘0¤!œ«ÞKPž­$å7¼§ÈÀÝ×ÙÓiÍÔœ0ôºÀ;Ä…àxw¢ëCl2êh?¡'”`“ÞŠ\)#ºŸ#J׌§â\¤¦Dúìšn­´Û[<–ìIR.ÂñZV>ŸÁÊçŽ:<<­Q¯Ìׇ6úwuióºšOHC“Åúï¦FzϬ·5{ÎÙ¢MB±œjCÙ© þ1F‹ŽÒÚÜçV¥ŸEiº6¹U‡;[„Óæ|’õ|'·êH‹ÇòµG±Û;÷ǽ« ~ ›Çn³sdd XŒ6þZÃ_ rØò]ƒÑšþw9Þš%¥>¶º­ì[SBá oEê¸ d2iõïrÎô#œóź3`r¨#¡Wí® J ±Yì¬|>ƒ_æslKå9z*r%EßÞŠ’ÚØX³è7a¬• ŠÖªç46ļÕMþmª³:•®uZó„±QoÜÉdàæ#½gµœ¿cî¾Òµ)ÏÑ7{\í&Ý£šòÖ•ýá3#™ô\7Óü+d¦×±ù›@ò€ô}:ùÖ_çÆëW4úwã{Ò˜fƒNƒ~V8?â&@ œ6ùj;Ðñƒ}Iàø/,IÚ1.9¦C[dÄÃOEt_)÷ sK›¾Î§ºÄv0ÖZÉÜREuqËáÚ"#[I†Gì.¾)œ^c¤äÝì]ÕKÝQøGºÒoBCn‘r/ÊŽëÙ±¼Âêwþ'<•ÈÍoõä’gÖ¬ÔûÄ.sÞþG^ÂÉø†º “KšeãDÝâÿö i8~!ÚCz¦ Ö²ke1Úb#•‡ÂÂAkJŒþ»‚ºJ³£Á_=5¥& ÕâæÊˆÙQŽÊ]õ‰ÇõÆ›Åd“šÔU™÷8¼—V–»§†Ãý?{÷ßVy/~üs´å)ïÏ8‰3½I $„]fX—r;øQÊåÂí`”–² ½.´ÐrYRFØ£Œ ÙƒéÄŽãxÅ{Èò’µ¥ß'>¶"Ù±aÇyÞ¯—_‰ÎÐyt$ï3¾™êíì?š=,*Å€±—AåÃÙ™WbAAúT¼ÙÈ]Qο#©Ç½a{£“·î.àÐ& 3¯NdîM)ÊàÕ}_6²ïËF$©»GÆ´+˜yuRÐcmþG5—œsþÍrÿïY×&Qº½«ÅŶwjÈœ‰.ddNê•»(†C›š©+²²ç_õŒY¥Ô6ÙÛ†Z+ÑÞK7“ãÉšeâÈÑ4ªþá–ÄÒXêßT­U‘9=’Ò-ìû²¯Ç‹½ÍMÅ9CNÖ,>ýÊDÊw¶bksóý»µ|ÿ®°ŽžÅù¿HgìÂhv~\‡ÛáeýKÁ[µÊw¶²åŸò8ŽžŸý®€:}j:£§ÍÖ7ªÙòF5†p ·ü}"3¯L¤lG ¶V7ë_îîý¡RKJy¦-‚ ‚0‚¸^Ê~» eÎŒô @ÎíÞÕWúð¹v?2AÏuGÖL“Òe¦ë)4J«ô«?VùÎV*÷É7¡S.ŽW9kt*f\%燷·¹••G$INç*©äì3߯ª%&ͨäÎßùaïß_¤Ü´ÔØs£É™/Mh,ídÝ‹G8¼Õ°ÝÜ›RˆLÐãvxÙóY×7ãóúHÈ %ï’ø€íG’Ð(-×>1ŽñçÇ“fD¢&"AOÄÑ9Œ®|d ™3"‰LÐbÒ=Ê@ÆôH¥¥ Ĥ%!'”Ð(-Z£š¨TÓ¯Ldæ5r`¬ QsÙƒ£5%‚°áq:sBåù>"4\óø8Æ,ˆ&2QOh”–´¼®x(‡Ô£­ £CIÊ #,F.—>TMRnI¹a¨µò5*Ù@Rn1éÆÓy ˆAAA4:ÿþòä>·¹ò‘1ËBLZ.¸+|ÐÑìÄió­õ«ùŸ{c soì®͘ÉmÿÌ zŒq‹b”AŸ#Éäåq8:=Äö¸‹I3²èç£hkpʵÏ^—ÜŸMñf ¥8:=èCÔ#5Äft^Í»8—ë ®U©%¦x+*µ;KÚy·§“3?ŠªüvÚœh *B£u~)8Ãb´\õØX ¿3ÓTÞ‰Z«"atcÏVºi*åù#âå®KIãB™~e¢ß„i“–Æb·zH˜Åg¸2DhXpk’ ,ýÏÌ^×gÏ17-oLš‘‹~t1RÃâÛÒúÜÿ‚»2üÇe†pÙ£ý–M»"iW$ôù<§ƒAAè&¡Ôp zN$×Ó˜sÓæ.Ž!wqïAÒ”‹ýköUjI™™÷X£&G0jrßóTh *&/^>søûüIã”>ü]ºÆg¯ xÝ>Ú›œ¸ì^Ô:‰°h’¢+«ÅEK¯ÛGxœŽÈ$ƒÒëóúpÙå+½ •zäê,Ë‹çhv…cû”v˜´ÔÈzÂãu~)¼@nú¶6»”÷*"N×ëTð}G8}ܯ_êž$•¤|×<.Ÿ<èK¯&JáC™¡RkP)5Zn§¯Ûç÷\ÇrÙ½½fàP©%4úîý<./–jf'!&-¦$ýYùùñz|X-.œ$I"$RtFÛžß³`üÎ,µvÚêè *"Ž6¥ ‚ #Ë 4”tòñ‹nLIzÆ/‰eÒÒ8%e”ËîeÝKGä 5zl£ã’û²‰LÔS_ÜÉ'pÉ}ÙrÎaÁÏÖ±ç³ÔZ?])7cÛÚÜ|÷G¨Üë?exd¢žËÌÁ©¡bW_=]êÿd’œþkⱌ_«ô}Øñ~{?o@£Sñ“Wûn.NoþZΑ=mA×Åe†(]äÏ…œÁãʇÇ—å?ûäá­Öü­€‹ïÍVúe®ý[e?´“fäêÇÇ=ÎçO–P_¬WÞ·c©µ?]9³²¬©2 `êÄp.¾7ûä¿AaÈœñƒ”}^Ÿò#¨Ñ©”Úè–Z[Þ¨fÛ;5ʶ›^«¢t»èÃÔD%Pi$ÜN/áq¢9õDô ŒLIz¹õŇ’ÞËÛãfE£WɵÅ>9£ÆÖ·jxÿ¾"¥†Y8óôœuçGþ}>ü)6›G_šÊm|÷G°µº‘TQÉ á¼ß Í{&ò¸»¯‹‘‰z¢Räk]{““ï×ñÇ0êvzYý¿eJp§“ózKœ‘éûA„¾ Û+»×ã£hƒ<?&ÍHBŽ<€§¥ÆNMa’$)SÇwYvO&ɹaXªì|ùT)Öf¾ibÖµI¨Tå»ZȘ©L9îvz±TÛEW¢àvz©Ê—§·(†…?•s=»ìÞ^sg_ñ‡bÒŒX›]ä¯ndï ´Ö;Øøjçß‘~ÚÊ.ô_l†‘+þã·Lê‘¥½G€P±§ K•]dW¶£…ÖŸ…óà„”‰á,ÿ/ÿAf]]•ŽìiSºB]ûä8LIr÷6s…ðø³·àÂ{21%ðz||úÈaê[)ý¾% EE­•¸õÅIÇì-ŸÛÆÒN¬ÍrNýs2JéSmµ¸ð8ÏŒ‰”A„þ¶-*µœ3xãÊ*¾|ª[›¯ÇÇ7-gãÊ*,Õv%%Ô±ûŤÉž#§s;½ØÚäî]÷2­ eÊsNå75¶08]çÖRmWη֠:nš®Ðh-s®OfôÑ÷ëð6‹2D^$IB­UùýõÌõmk•¿Sj­Ür´ïËeÝÞϺ×!§=\,CWpßãrPWÔ¡t#ŒI7q–Q©%åüëCƒŸcÏ­rí6”t* …Fi•Iœ„¾©¤a[7âH’* µkOâ½8}ú:×j•xNµjà¿ÃúÝ™qU"Uùí4–u²ùõ*¢Gh®´“fdöŠä€í»+[ªìo–s;G$t¢Ëžm¢`­K•·ï) -/‚ñçÇ2j²gp"4:éÓ"(ÛÑJ}±•7ï* sF$ãÏ ÈŒÐ›Ì™‘r^g4WÚ”#aø°TÛyÿþ"¿e‹oKS‚ÀΣýþsŰÿ›&Š·X˜u]2m J: Ò’0:”Ò-ØÛו¬¾ØP†ewg§#sz$»>®Ããò±þåJö|Öp4Íb4†ða}©;¥Ö¿R‰$I4WÚpX=èŒjæ\xýô¸}çvÚ dÍ2Ÿe$"AO[½ƒƒëÌ”ýИùÑŒ_«´Ô};C4mö‘9«ðp2%y!UïÝ §§.akÅg§±Dg'£6Œñ s{]?-õ¾?òÅi,ÑÙkZÊ’ï3¬5Uj‰óïHçýû‹(ÙÞBé¹óü_¤m=øòüÀªµÓÔKvîM)Hj‰Âµf¼å;[)ßÙʨ),½+Ão€¬00‹~–†ÖPMñ¦f<./‡·Z8¼ÕÂè¹Q,þiÇíÂÕ3J{£SÃÛé  êrx•uî£ÿO†¥ÆAõv ¿3c©¶0á‚X:ŽvS±[ׂಖÁ}´6;*ÕÀ…÷d±qe%m NZël_UÞÏë9ÿgmE@]‘ÿàn¯×G[Csko—ß'µVÅE¿Îbý+•Ôvàè𿺑ýß41ëš$ò.Ù“0 Zµ–;æ?Çîêµ4vŸÉU81’¤&+f“’ô¹Ý%n'#z2‡›váõ‰qo§BtH2SSÏG¯é½‡Ffô$nŸ÷,ùµ±¹‚'ÂNŒ^ÂÄ„ùdÅŸ«¤/Ã:@y€ÝÔËØñ~->¯É—ÄûMÒS×MhW?äÅÿ/ìÙÝ“^ht*Ýa± IDATι%•i—'P°ÆÌ5MØÛÜTîmcçGu̾.°VMè]ˆšÅ·¥1ãÊD|ÛDáwfœoµ—idòE}ß@ôì¿™(j$‡#S’žÿ>ÊoYô(ù»èììþ‘ÕUL\Kõv|Ý„ÝêF­•È]þ¯pX÷£—œü¿§á±Ýã R'…³âr©ØÝFþêFj ä›Ù5Ï—sÃ3ãÏÊ®FKÿ3“ðXmõ ¾3S½¿µ¯ :Õà×P¥–²õü.F&ê¹ìÑ4–u’ÿU#%ÛZðz|l_UCâ˜PÇŠ þxLÆx^1ÔÅ8ëiT:òR“—²x¨‹rÖˈž@Fô„¡.†ݯ2wvz(Ú`VmhVú¸kù¯³¸æ‰qJ.õÝŸÖÍßbÒ2ãªD®ÿŸ\¥ëÁ±µlBï:-]µŠ­áq:æ\ŸÌµOŒSZdêÿܬ•ßcI%)7Âð¢5¨IÎ óûëºávô´F5éS#‹ÑÒÙêÂëö‘5Ë„!B£ô}ïP „>,° ǶüI*‰Œé‘\zÿh¥fÛaõ(-g›¨d=±F²f›˜Ó£kfýaÿ磻"àÜ›ã .3„ónOçÂ{º‹÷ç;.‚ œ9†}€°áU¹»@ê¤p’ƆÒÙâbíß*üæ1èÉ”¤gÖµI€œ½¤kp$@éŽË:•I¼nꣃ,ÕAºu˜å ÕzþyݽO(4RUïo§©Ü>9‹TÕ~9cQÏÔ°Å›-˜Ø”óãóù”¬A'BóÉÝEšÊm|ýLµ…€H Qã´uÏΚ9=2àyÖ½ØOôÚ'ÆõÚÅiDòÁ¦×«h©u Ñ©p÷Hi˜»H>ÿ.»— ¯Tâvz‘$Ð…ªqZ=ÊDÆŒÀsûþEËÇ„2ûhpדÇåeÕoú-‹ˆ×qýÓãOä• d>bã;,¿þ©\Ü=2OuÝ,Ž?/–³ C¸š„Ñr÷“®@|°™ªj ;‚–áÇÿ7‰Ã[šÙ¾ªöhTHªîãD&êÏ®ïm=TŒ$ùe±F’Çû'ð¸|AÏí²»31FhØüz›_—»"i *åù4:©“ÎÎñ‚ #Õ°­Rs;¼ly£€©—%­%)ÇÞ½{711&"##úµýë;~OAý–“rì3Ug« [‹µNEXŒö”e'›’¼˜¦=tÝÞšu¼µëÑSrÜpX=8m|^y†ó®ñYƒå´y°š]r@´¶×ùN·'/ù¶ßÛnÛ¶ƒeË.<)Çݸq#cÆdc0ˆÄ ‚ Œ›7oÞ]ŒN•ZYrA­•Ž[K¨ÖJýʉnŒÐ`ìßý® XH¤–ÈÞs ŸMô¡ê“z¯3ªÑ¥ @A8u†m#AAAN? ‚  hµ\.‘QM„‘E¥’D€ ‚ ƒƒÅÒ2ÔÅA8i¼^jµZ‚ ‚0ÉÉÉTW×âó}óã‚02•••“’’:°A£ÿN'µª÷1ä*é¬_>d4}¼ÃøŽž>jUïƒuûZ'œ\}]'Oµ°°0RSS)(8(‚AÎxMÔ×7’››;°,FáúFÇNåpÓîSU6ᨄðL’#G÷º~zê”7çŸÆ$$¦¥.êbôÛ´Ô ØW»~¨‹qV˜–º´×ucãf¦‹¤ÃÙzKtvš–rÁâÄI°eË÷¤¥¥F#DAÎ >´··S_߈ÇãeñâÅH’4ð4§7Mÿ=ûjÖSa)DÉÉ'‘™ÍääÅHÁ&8jæ¨ ‰4ÄQX¿§ÇqËwöÓ›˜’¼˜Äð¬¡.J¿å&Ìá¶¹O±¿n#vWçPgD2hØœ´€ŒèI½n£Uë¹ãœçØ]½³µæ4–îì!Ij²c¦01霡. ãÇ'==ºº:jjêq:ÅÀeAÎ *•Ѝ¨(rsǧ,ÐDi‚ ôm¨'J¡o's¢4A„‘J RAAA!AAA"@AAA!AAA"@AAA1¨f;*9ÒR(&†9$I"9"›¤ˆìãnkw[9ÔðNý4”ìì¦7‘;}H'b §ÛÆ¡¦Ø]Ö¡.ʈdÔ†1&nZµ¾Ïí|ø¨h>@“µú4•ì좒TdÅäa2ÆcAa@|çóñþ¿²µü“SQ¡‡II ¸iúz]¸i7+¿¿·×uKuö ÑEpÏ— ×GuQú¥¶­„¿m¾K§˜NcäÎsž'>,-èz>žÛôKªZŠNsÉÎ.ËsÊÂì놺(‚ #Ê€ºu:ÛØ^ñ¯SU¡‡üÚ4Y«z]¿¡ô=œògþó¡.F¿m*ûH§ÓmcsÙG½®/nÜ)‚ƒÓÀ‡ï¿3ÔÅAq 8Üx}žSUáÎö^×ÙúX'œ\6×™s®;]mC]„³F_Ÿ‹3é3s¦çZá䃔AAAPˆAAA…AAAPˆAAA…ARN›¯GÌ©"‚ ÃÅi›jß—t49ƒ®‹J1{^ e;Z©=ØZ+1ëÚ$$•ä·mK‚5f¦]ž€!B~ û¿n¤­ÞId’ž Kb[žÂïÌXªì¨u*ù8þ‡¡µÎAáwf¬Í.\­Šðx=Q)z²f™Ðèüc«ò­Ôt ÖI̾.¹_çd$ÙùQŽñ£C=7 €]ŸÔÓÞàÀaõàóBXŒ–ðx=Ó"ˆHðŸdÊÚìbïç ä.Ž!*ÕpÚ_ƒœ³ÓƒÖ¨øŽôeÝ‹G¨Úß΢Ÿ§‘:1<è6æ#6¾üïR¬+þ'—Èľ';Û9:<¬m¢©ÂFkC¨š¸ìPFÏ1“n¤µÎÁÁõÍt49qÙ=h jÂb´$Œ %=/ÂïZzhS3Me¶ Ç1Fj˜zYBÀò½Ÿ7`mvõº`Çûµ¸l^â²BÈ™°þðV ‡; Ö2åâøAž AáT;mBÉ6 %Aך¡‡·X(ÝÑ€)ÙÀØsý'¨Úöv »åTŽ3"IŽ t{ µEVRƇ7@°µ¹Ùüj<./ £CȘé·ùˆM¹a=Ö¶·k˜uMãÅ(Ëj‹¬ä¯nDkP•BÑúfÚ›œŒµE+ÂÎêðºk†·¿SCÎü(æÿ[*ZƒhÙÚÜä¯n yB˜†Xm‘•]ÕÑXÞ‰£ÃƒJ-=Ê@Æ´H¦ÿ(úÜ/‡6Yðy}”noé5@p;¼X-þsyo¶ôú½ÓèT\ñP_ü¹„ÎwŸåŸÿo)D§ùú/e´Õ;qÚäôÌ!&-¦$=ãÏ‹aÔ”ˆ>Ÿc8)ÛÑʆ••ØÛü_wuAÍ•6–ÿ*‹¦r{>«ºD‚žËMh´€Ê½íÞj º­)I4WÙÙövò8uR8q™!~Ûx=>vRϽŠQ“• ¿ã^©Äe÷bŒÐˆAa;mB—ˆxY³M~ËLIÝ7ƒí=Zö|VÏØÑÊ͈ùˆM Ú;ð2ìÿºI ö~Ñ ô”wI<†p -µJ¶Y°µºYÿJ%¡QÚ3ê&c¨¤Œ#cz$íMN*÷µc©¶S´¡Ÿÿ¿à3Ñ CçûwkÙýY=ôˆí¼Må6$•Äô+ûÜ_£W1gE5…L\7 cÛÚܘ¯Ùîjµ³TÛé0÷=I ³Ó‹Û奦 ã˜åZjì”ïleÚå ̼&i@å -µÖü­ˋ֠"÷¼X"tt¶¸©Úß´¦>ïÒÔZ‰úCVªö·ÓVï`Ý‹G¸øÞl¿íôajrÇø-3Fhžoß ÇkØ­ÍÝ?ü-µÊwµ*7ïÇÖ,v¶ |&a¯ÛGáÚ&&.‹cÿêFꊬ˜+lĤƒî3æœh¥F{æÕ‰|ð»Ct¶¸X÷R%7?7aÀe8ÛÄeâ2ùFqÎõ>Ö½XÉ¡MÍÚÔ̘s£I6Ä%ºThg÷§r-tLš‘W%•jÀÑá¡îU©îâvxi©uÐÑìD­–ˆN3¥%cz$iy~5Èn§s… {‡Ÿ¯ïqKÿ3®»©¢«‹ÌŸ¦ávÊ7šEš)ßÙŠZ+±ä—ʶqÙF|ÝuL»"ìÙQ´58øáƒ:ÌGlìú¤žñKb ¼!N¶üSníTi$~ôпֵWÖÆ.ˆÂ”,o÷É#ÅÔY©;dÅçõùu52†kŽÛâiow+­ ]×ÌÒï[˜{c2!¦îswlkÐoš˜zi½ ·ÃËo›üÖw¶¸‰ˆ×õã â±âmÍÇÛºo[’! UäT¦©H†áŸ©œn;•–ƒT˜ ¨´nˆ&=f<éÑ㉠Kêâ €öÁÖê¢|g«ß²” árWØÚå&ôèQš+íìû²‘Œé‘X-.J¶µ ÖJ„Åèh­s`oï»›A0¥;Z°µ¹Ñ…¨™³"‰Úƒ˜+lø¶‰s2ê¸û‡˜´L^Ƕ·kèlqÑÞä$ò\Æçîº^“r Úñ"i{o1îûò/ygÇt:ƒÏd?%u7Îþ†˜ ëa8:íí»Må6Vÿo™ß_WK€½ÃÏ+ßlä]’€F§Rnà ¾mÂëñ‘3?S’þèöž¿àh-Vμ(ÔZ•2Ž x³Eé§|<=[zë!ô.ĤUj–Íâü >h®”߬Y‘AoÎ{jorâõøÐT¤N 'qL()‚{Žßüµ{‡I%‘”æWûÌÁuf Ö4)ÍUöÁ½®cTìêþ7„Ÿö:’±Z\8;åëRô‚ƒ½_4²öï¼wßAšÊå÷4}ZàM¡Õâ ¸7•÷+æCiq·0•Zb̹KSáwfåz `k•­A¥Œ/Ø¿ºŸOþü[<ý …ãó9q|Îý¿é58pW¿‡}ãB'N<©Ï/œYN{€ŸÊE¿Î º®g€  Q3qi,×™)Þ,÷MJLš]¨§u`?0×7+ÿ_ó|E@ÚÆƒëÍý ê‹»kØz·p¶HûÖ:Ž£7±âü úPµœÀ‡r3ߥÃì¤`­|Ó9vaŒ_€Ð­µ@Nu“Ö¿÷<-/U2"~ðiPk ýkU#ô\pgjíð$jҢѩ”±ý5yym N¥ûP|vjm`0£å²F÷ú{ôpÀúƒëš{ògF¢F­•È]ÃîOëÙ÷¥XN¸ $ùsfku|Æ„ÁqW¾‰§ñ»íãswàÌ¿ý¬wÖµ¶¶rÿý÷ó /àõzýÖEDDÐØØô9££åŒƒ*•JùÿÉtá…²qãFîºë.üñ“þü•_½­¥Ÿ h—ÇÉk[ä7Ë^G%u_{>üðC~ö³Ÿl¯ÑhX±b/¿ü2zý©O#ßÿhµZL&Óq¶FºaÕ¾ÞóC«W§#!'TéƒÛ•¾T«?Úâ0€ŸŠ7uæÀ9J:±Ô؉Jî½V±±´“½G3zD%†}ÿåSÅçC °¬—ÒßÛÙ÷GÊÙéaë•ò ’ÅøƒaC£S­¥Ã좦°÷n ƒá=ÚbävùäìHýh8ZòËŒãvsê¯Q“#H"§9MÖ“46¬_erÄ¡¦ ƒÒ£sÄ$óÿÎ;ðä¹DÂãô|ø`ÍUv6¿^MÊ„ð]¯œ6e;äÃçóÏ0×¥|w+Žú0µrýÖ½>çžÞϺӞŽ9GnÐäa ×o¡;®Â‡·kÓÞúþOƒÚ¯Ü¼Ÿ­¥Ÿ2?ûŠ ë—-[†^¯g×®]TUUñÏþ½^ÏË/¿|"Åí—Å‹STTDhh¨_Ë…pv:í‚¥ÊÆÆ®ÄæÝœ‚ÓÞýƒ¡>š½dÒ…qÔ[ Ò’5KŽh»º$9;½ÏÓ›š‚vå&ö‚ÿÈ qlw¥­ÅÍû¿+Þl H}¸ÿ›&4z­uvŽìiÇçõ¡RK,¾=0E§ÇåãÛçÊý–…˜´Ì»iä|ÙÜ/ïÞ{QS"ÒRþC÷ ÇøìÀ¦ßšÂv¼WK‡Yî’ÐÙ*¿y—ÄäRØ÷y‡·øçhŸuMRÀäjÂÉ—9ÃDþêFJ:ÙóYy—žœ&ì®TÆö67Õí¤L>7©’5+ÒoÞ’3Éœë“ùè÷‡ðy}üëO%¤O‹ :ÕˆÛé¥þ°•øìPæÞ˜‰H­•Xðï£øä‘bœ6[߬öËôÐÙêz=žvE"UûÛ•lQWü!‡ðŠÍå6¾øïR¼n%Û-Œ??çÑîi]×çðX3")ÛÑʸs£Ñå–_ÍÑ– Ñ‚pâ¼mûñ¹Û½¿§y›_€ðöÛo+ÁÁÏþsžþy4ù6Áív+ã ‚©¨¨àþûïàÉ'Ÿ$=]Nƒk³ÙøÓŸþÄš5kp:̘1ƒ|ädù3ÛÐÐÀ³Ï>ËþýûihhÀáp““ÃM7ÝÄ¥—^pœ?þ˜ÒÒRî¸ã,X0è×?XMÕX:ë½qÃÎ^„W^y…””<çw6là“O>é5@ÈÏÏç±Çà/ù ñññ¬]»–_|ßSéhm^]]O?ý4»ví¢®®Ž¨¨(&L˜À­·ÞÊìÙ³),,äü#ýë_‰‹‹ã›o¾áý÷ß§´´”¦¦&"""˜“»ÌDEEñôÓO+ÁÈÝ]{ŸÅb±°jÕ*î»ï>ÒÓÓ±ÛíÌž=›üü|e»~øwÞy‡üü|RSS))) è2´{÷nÞ}÷]^zé%~úÓŸú­+,,¤°°€‹.ºhH„Šæ‚Ûß|üýÕj5 ò^ss3‡#èvuuuÊyüñlj§¤¤DYöÖ[o!IûöíãÜsÏ¥µÕ?kÙ¦M›ˆgöìÙ466*ûýéO"..Ž·ß~›•+Wúí³aÃÞ~ûmŠ‹‹‰Š œ‡EN[€»¸ï™KÕQ)e¦®A’äøždž1ýÊDT=zŒ]Cò„ð^ÓæE22ýÊDLIú€æx³ktݘ:¬â³C˜½"™ö&'.›QMD‚žèT)ÃÆ/Œš<%#€!LÝëë>]…V¯¢îk³“Ð(-qÙ!Œ™í×mãœK¥¥Ö®ôMÓ™¨'uR8†0ÿ^H”¦Ï ¸ŽÍ¿/œZƒ<[ñÆ•UÙÓF{£Sžð(IÂo\@Â5,½+ƒïþïíMNÊwµ'A1öÜhR'†S¶£K­ƒN‹‹(-‘=&žŒNí¾~öÌÎ4{E’Rûßy4ÓPæ¬H"“zo‘3DhHÎ %ql( Ù!soLÁ\iC’䊑ìÙ&â²B”,s _Ç mÂ’XÚ›œý‹"ôÎÛ^xBûûŽÙÿÀLœ8ñ¤ ~òÉ'ÉÏÏ'66–÷Þ{¨¨(n¼ñF8ÀÿøG^zé%¿íßyçt:÷Þ{/‡âé§Ÿ®ºê*nºé&¦OŸ~ÂeŒš–Þ“+ôG][9^Ÿ•xoÐÐÐ@]];vìàƒ>`ܸq'<á¿ø­­­$''óç?ÿ™%K–0sæL*+[ƒ™2e /¾ø"ß~û-<ðf³™×^{»ï¾û„Ê% _§-@èOó~\fHÐZùcÅg‡äd{n߃¢&/ï{F×” á~Ýôajò.é÷ŠÔIá~ùÙG2I%‘=ÇDöœ¾1õgÀw—H-3Ž3C¯pz„ÅèXþ«,Úê˜+ít49ч© 1i‰ËA4àzi¿ÁÌI¹a\ûçqÔ[±Ô8PI%ZîÚ.{¶‰Ø ùƱgMtoÆŸKZ^DРß®áÒ£pM}Ü Ÿ)B£µÊ„ƒÁD¥˜‘ø 1~·²fšÈšÙ÷ñ¦]Ñ÷÷1sf$™3»[ Ç,èßÀÔã]«…þ“BŽ?wOŸûý÷on–ÇéEFžœyºÒeÎ;—––ZZZ˜3gà‡~Ø~êÔ©Œ3†}ûöñÐCQRR°Í˜1c¸âŠàÝsN—˜ÐëZc2Æ ¦M›æ÷X¥Rð l‡ÃÁ¶mÛ¸õÖ[¹ñÆy0rEFF2kÖ,fÍšÅóÏ?OMMMÐ÷G9†Õ eA†ˆ}Ÿã>LÉL½üN&Œ^û¨Ñ©‚ñžB£µj-Šˆ×õÚj¨ÒH$çù­)ö IDATŠAðÂÈ¥Š˜|bûGú•EUU•ÒÇÿDr×¥®îK]jjjzÝoôh9°w:8Î^F•´èñ'´zLnŸëõz=IIIÌ;—;ùóç÷ëy{›¾©© GnÉ3fÌÀ DNN555´·~ü‹0ü‰AAÎ@ªÈ<”ÜăÚªßã &°aà øúë¯YºtéqŸÃíî}>¢¸¸8š››™3gwÝu—ߺ¾º0©TÝu»‚„¶¶Þ'$;]’MÙhÕz\žàãŽ'=zB¯ëJJJÈÊ ž ¾§®óÞ³µ§¼¼œììÀ9i’’’Ðét8N>ÿüsnºé&T*U@Ûþ öþ#x—Aá $é¢Ð¤ß:¸}CÒФ^ã·ì׿þ5ƒœqìòË/çø_}õß~û-Ï<ó ¿úÕ¯y>„.«V­Âj L,pî¹çòÀ䦦&¦M›ÆìÙ³1Ì;w@åíJ»ùÕW_±}ûvÊÊÊ8räÈ€žãdQIj.šøÓãoD„1†ss®îu}_c zž÷wß}«ÕJvv6jµÜ]éïÿ;JW1¥¼*?þñ•ý233™4iåååƒz ÂÙA‚ ‚p†Ò{)$s€{Iè'? jÿZüÌÌLžyæt:v»‡~˜åË—sÁp÷Ýwóì³Ïb6›7nœ2‹ï<ÀçŸô(?ü0‰‰‰¸Ýnî¼óNÆŽKVVW\qEÐ1}¹á†¹†}Μ9deeñ / ðuŸ<Ë&ü;1Ÿiø¦ÙªÜ1cÆ(™¤|ðA>ýôSbbbX±b|ð&LàÞ{ï Ø÷¿ÿû¿¹á†$‰#GŽpðàA%K•h‚]ŒAáL¥6¢Ÿúw?ÜŒÏ|–c?’ í˜ûPE¯Á¿í¶Û8çœsxòÉ')(( ´´”¨¨(ÒÒÒ¸øâ‹Ñjµ„„„°zõjüq ˜2e jµš… &ýILLT2mÞ¼™ââb “&MRf파Tö3å ʲ®þ?ûÙϰÙl¼ùæ›9r„ØØXRSSÞNJRqë¼GøËÚ;0[{OÑEBbÙ„39eaÀº¤¤$åõöÕ‚`4Y½z5=ö˜rÞ^}õUÒÓÓùæ›o¨¨¨ 22’ÄÄDòòò”óÁ›o¾ÉK/½DEEaaaäææâv»•Ö“ɤ”£«%iìØ±,\¸¼¼<¥S¦LÁëõ’›Û÷X áÌ&}ýõj߬YýKæpwòè·×átÛNq±•¤æÞóÿI¤!xÖ’·v=ÊÞšu§µLg«K'ÜÎ9™WõkÛ¦&3V«É“Olð`—Ý»wc"2²÷Á=}vàol*ûð¤[èÛ¢ì,Ï ÞÍàˆ¥ç7ßyšKtvŠ2&pïùoö{ûmÛv°lÙ…§°DCÃç´à,¸OÍǽn£ ÍF7ùYTQ3NcÉF6»ËÊû»žfÓáÞ¯»Q!‰üÛ܇ÈMœ}Kæï믿fûöíLœ8§ÓÉ?þñeR¼;wdO„µ è5!,s _½†Óc?Ue:ëiTZÎɺª×à`Ñèë9Òrð„fsŽ/Õ4–Y£.êbôÛ9YWQÔ¸ƒÆŽþå¶'!<ù™?êu}ZT.SS–°·f-^ßà ÇgÔ†sḟ u1†I…>ï¼i·à1oÁÛºoû$C2ªÈ)¨"óÐ$^jÃPuD1hC¹iöƒÌ˾œ¢ºï©h. ²¹ˆpC4é1ãIÏ´´%´'>¯Ä‰øì³Ïxî¹ç–ßyç"8‚P B§ÇN]{Ù`'}‘ >, ƒæø>êÚÊIAè[˜ÞDtHÒñ7ìa¨[ºÔ·Wàpwž”2þŒÚ0âÂú—¾ÝÑŒ¥³þ—è줒T$Ed£V ¬§ìHmA„¾¬[·Ž?þ˜òòr\.\rÉ%,_¾|¨‹& Sƒƒ SH3‰¾gCMB")âøéЄ³OBxúPAÂõÑ„ëÅÄ`‚ ­E‹±hÑ¢¡.†pC×AAAPˆAAA…AAAPˆAAA…AAAP 8‹QcG%Û*þÅ‘–‚SQHŽÈfvú¥$GduQ„3Œ×çáû#_²¿n£HszŠ4aLJZÀÌQ"I¢ŽEAy ¼µëQjÚJNEY„£ŽX )¬ÿžßž÷çøÎn»«×ðQþ3C]ŒïPãô#S’uQAá¤PõW‹­A§I«½š¶ÃC] á s nËPá¬QP/ε ‚02 (@ðá=Uå‚ðùÄTÕÂÀø|â;zºˆ¯§ ‚0R‰´‚ ‚ ‚ (D€ ‚ ‚ ‚B‚ ‚0Âøsg.g¨‹rÖq9x½â¼ g6‘"GAF§Çͺ’BJÌ Tµ6cw»PK*Â#eŠaaV. aC]ÌÇçóÑÜXCgG v›Ç €Þ‚ÁJTL†°!.¥ ŒAá Wniâ½}Û1wv(Ë´j5Öv­íä×V²$g" 2Ç¢’¤“z|·Ó‰ÇíFrRŸw¸³Û¬ÔVã°Î;ã°wâ°wÒÖÒDt\ ± £Nòyï‹ÛíÆjµyÚŽy¦©­­E¯×=ÔEvF|£­oVóù%ÚÔ<ÔEáGö´ñí_Ëùê©Ò¡.Š œ±öÔT°rÇÚvtjM¯’$±æðÞÞ³uÐÇÚðÞ›üöüY¬~õïʲß_²ˆÿ˜Ëîo¿</çŒam·Pqx_Ðà 'ŸÏ‡¹¡ŠÊÒ}¦?³ÛíX­Ö“R¶K.¹„ÐÐP{챓ò|#Í3Ïx°ˆöFÿÒèƒ_€`oïþ¾šÁ¨)x=>¾ùK9å;[ýöMÈ eùe¡“ûWþD .»íåŽ÷jñz|x=Ý79{þÕÀ•ŒAgTŸà‰„á¡Ía£ªÕ¢Œ5¯ÏÇ¡Æ:fŽÊÐ~£ÆŽçê_?ˆÎ` Xçè´R¸u#*µšô “1„úWšy½Ê˰Ô×â´Û‰•Nòè1¸N*‹ h®­Æç󓔨܉h´Z¿ýÛÌM4UVÐjn$,2ŠQ¹qÚ:éloC­Ö—–®lÛÒP‡ÝjEg0”2 ×x<^¯—Ö–¦ão„Ïç¥ÕÒH\bš²ì©§žÂb±°dÉ$Iâºë®ã“O>áí·ßö œN'EEETVVâñx7nœrëóù(..æàÁƒ¤§§7‹ÅB~~>MMM„‡‡3qâD’’’ü¶q:ìÙ³‡ŠŠ <iiiLŸ>½^ï·]cc#‡¦®®“ÉÄôéÓýÖ———c0È•¯cÇŽ¥¢¢»Ý®´˜t)))ÁårM|||ÐrÿðÔ””`0ÈÉÉaüøñ~ëÛÛÛÙ½{7‹…)S¦‘‘á·¾¢Bn=7nœòÿ¸¸8¿mŽ9ÂÁƒÈÉÉ¡ººšÎÎN¿ò666b6›IOOÇh4âõz9t艉‰˜L& )**Âjµ’””ÄäÉ“‰õ;–ÕjåСCTUU¡Ñh˜4i’ß9)(( °°ÔÔTòòòÎý©2d‚Ïkÿ~„Ö:j­Ħq9¼hôý¯åË_Ý@D‚ži—'àõøP©% #6]¾ …Çêð¸¼|÷B¶V7º5ãÅP¹§ K­oÕÈÛgø_g_—LmQGö´Éµ¤éFÒ§FR¼¹™öF'»?­gÒ²¸•_†«¢ÍJp“fdô\öiyýÏ‚R¶³•öF'*µÄÄ¥±ÄÅ\a#~t¨ßv:£š±çʃĢRä‘}_4(ÁAZ^†p Å›-Ô[ÙöN ê_•>5‚¨kÍ8;=¨µS.Œ§­ÁAÙŽVZëo²0áÿ‹³ œ©;Útiê1 ¹¿>þËŸÙúÉûä̘Í=¯¼ã·î?ýAù¿F§ãºß>Ä9W_ÀÁí›yñ¿~­½MÙF¥VóÌÖý¬{û|ø¿ò{®ÄÌl~ñ×Wˆ%ßìþÏ-×P²ç¿m®ú¯ûÑCxûÑß!IO®ÝAxt Ïüôê+Ê8÷Ú›¸þGü:ûâ°[Oh*uû1ç}þüù~ÇÇ'Ÿ|Bhh÷uòÍ7ßä¶Ûnó£@]]f³™+Vðí·ßö» ï¾û.+V¬h)ºîºëxî¹ç”›Ø—_~™;î¸Ão›ììl>ûì3rssXºt)ß|óß6?ü0K—.UÿèG?Rþo³Ùxøá‡Y¹r%™™™”–ÊcК››3f ^¯—•+WòãÿØï9m6]tëÖ­ó[¾yófæÍ›À;ï¼Ãí·ßNKK‹²þ–[ná¹çž#,LXÝn¹Rø7Þà7ÞPÊ{ÕUW)ûÜtÓMÊÿxôÑGy饗ÈÉÉQ‚€Ûo¿>ø€×^{[n¹…}ûö1uêT6nÜH\\\@ðÎ+¯¼Â5×\À#<£>ê×láÂ…¬[·Žææfn½õV>ýôS¿sÿæ›o2{ölNµ! êŠ:h,“ö,úY£çE êyTG»èCÔD§ˆË žA!sF¤Ò*P¹¯¶ù Yú¤L göµI¬üy>n§—C›š‰Íð¯uÈ»4ž¼KâYùó|œ6Éã˜yu"1é¾y¶—ÝK[£“èT‚p¦ËÿJ¾F‡rÅCƒkjUõˆ•#ô¤O {¶)`;}˜šy7ûß®— dL‹dÙ=™ÂÔìû²‘âÍÍ,¸5Õ¯2 uR8—Æ!©$vZÏ ³¯KBRIüóÎX-.Ì•¶A½AŽš¬íèص¨'Ë „¾Ã#HËHéÞ¸Þzô2&M!uìx:,ÍJp3}jN‡Vo ul.£§Í$6e–ú:ïúžº²>ûÛÿòïz€†#eD'%“”=†ÖƲ&O#>=“Uú^‡›Ö1粫0×TQ_!o?í‚å'õ5Bà þ€÷·îðàA^{í5*++yï½÷P©Tüæ7¿QÖ×ÔÔ`µZÑh4œ{î¹x½^¥füW¿ú•Œ;–°°0vîÜÙwìv%8X¶l¤¢¢‚U«Va·Ûùøã˜4ióæÍ#;;›ºº:Ö¯_OII ÷ÝwŸ²MWýÔÔT&MšD}}=sçÎõ;Þ˜1c0åJW•JÅŠ+X¹r%eee’››Ë·ß~‹×ëE«Õrùå—”ùÕW_U‚ƒsÏ=—qãÆ‘ŸŸ¯´Vìܹ“o¼¯×Kxx8éééìß¿Ÿ×_ÐÐPžþy¿ç‹‹‹#99ƒ­žF­h†‹/¾˜—^z‰ââbšššˆ‰‰aýúõ€ ÜrË-lÚ´ €¨¨(æÎ‹Ïç#//qãÆáv»Y³f ‹…Ûo¿Ë.» ½^OEEN§“°°0æÍ›‡Õjå¼óÎà¿øŸ~ú)&“‰‹/¾˜;vpèÐ!V¬XAaa¡Ò"sª Y€`©éîV4jòàó2O¹8ž5« ±¬“ˆøìι%µ×@A9~µ<(RRI$Œ‘?*DÂèª :”õ$¹†³þ°§MîbÚÝÒàèpßOÎ$>h©•¿]݇#s¦‰]ŸÔÓZç`ãkU|ÿ^-S.Ž'ïÒúÊöçuûh«—¯IãºkÑ’Æ…±ïËF<.m NLIM­]cˆ¼n§­A"*Õ€ÕâÂÑ.’#‡ÇçE«|  T‚uͯdîåWÓÑbáwžƒÃÖɦÞaÅýþýèÿã…7ÐètÊãܹ È»@yüésOñåKÏQž¿'à ¯û7–Þz›ß²±³æQ¸u#»×|Ŝˮ¢`Ë¢¢É™qòkZOt4¯×‹ÏçóKyzøðaž|òIåqFFF@Ë€ÉdbÍš5Êãööv¥üÎ;ïäÙgŸE’$&L˜@AAA¿Êóå—_âõzùíoËSO=Å'Ÿ|˜0a ,`óæÍʶ?þ8<ðßÿ}Àóüä'?ᡇRoß¾]ùÿªU«ÈËËSŸwÞyÄÆÆÒÔÔć~È<ÀêÕ«•uQQ•Ær¥ULL ¯¿þz@ס'žx¯×KJJ {÷î%::š»ï¾›gŸ}–^xGyÄ/éÍ7ßÌSO=¥<îy¾^}õU,èþL.Y²ƒÁ€ÝngË–-dffÒÔ$·°w6ÈŸ»åË—£>Ú²·{÷n¿ó1gÎÌf3¥¥¥J @^^žòúJKKYµJNðÁpÞyçÑÚÚJjj*ååå¬]»–‹.º(àLCÖFÛ£NgkàHý®Ŷ¶Þoº³ç˜øÑsÈžmB¥–h(éäÓÇ+Ï)=ŒÇí%Ô‡ÊožÏëÃÞÞýüGÇ#t­F¥õ¿³Qkz<>É\Ahtò—§³5ø÷O­•×÷üþKkPqÕ£c™um¡QZVß¿[ËžÏêGþîô+r°®5¨ŽßÙÒ}­0ôòUk/k]ןø‚ #H|XZµzÐ '7?~× o˜)ŠÌÉrW‹úòã§0®>tî¾{ÏŸÍΙÀúUò ¯µµå8{Êf^x)…[7â°u²ÃZòÎ[Šê¨ÞŒ¡ÇߨzcHÀ|yyy¼úê«<òÈ#äääP^^ÎÌ™3›‰¨¤¤ÏÑÙ²/¾øâAϳ V«¹õÖ[•Ç{÷îäÆ×\s )))„……)7Ôf³yPÇé¢Ñh¸úê«øè£ðù||ñÅÊòc]~ùå¨ÕjÌf3£GæÊ+¯ô»ï*óùçŸOLL ’$)ÏåõzÉÏÏtyCCCY¶lß}÷k×ÊŸ±„„ŠŠŠ¨©©aãÆJW¥öövîºë.²³³ çÒK/Užïxçoß¾}Êÿï¾ûnòòòX¸p¡’Ùª«[Ö©4dBLZw­ûþ¯›°­yw;åÁ†!&­Ò} *¿ ¯;ø»½ÍM\fKîÌøÿíÝyPÔgšðoßtsô4 r Aˆ,FÍ¥²¸jœ¨‰ºšÊZ©¤Ö-™2™ŒëNvk¶¦²:&¦ö5›¬Æ“5QÊ”kÖ(x‹ cXu7™%vióûi1:ÑÙʬô ªDæþMêlfî\èdï-–½ !A¥”ÊF ¨CF¾:ˆÃö(/ÀóªµÇŒ?¾± WËA(•à AŸ0¬{e¿¸"©N»—Ž”áV%3ã=gIÉF˜X2ºŽ…’Î×jµØ°a¶mÛÆ¦ µ··³³ÓOÒ—O09ú£Ñ¿P×ápÀjµ¢¨¨ß}÷D"–-[†éÓ§èÚuÅZ·n&5hÿþýÐëõì}’““ƒòòräççÃívãСCÈÍÍeS}àã?†^¯‡T*ÅâÅ‹0+»wïFGGŠ‹‹}V$éî~\«cµZa·Ûa·Û‘˜˜ˆôôtŸú”ñ´OØÈ8¦ÓÐÝË]¸y¢µ' ©„0w8°jGdJ¦â &Ôž2¢©º\¾ÿ;Í¡ßÕÁëBUBXûÍ.†©™Ôí nUpïJ7öÿC x<f¯ˆFú/HÍ@ý9~þ¡ ·Ê °[˜(<$B€ŒTÈHžm³—Gã‡ßßAÁ‰ƒÛëªÀåð"í…佋”¼pÜ8ÖG¯ßl¹¹Zè×­èö)#.hED¬<íÌX˜š .b3? üÏ»µ‹B æaù¿¦aίb û ]z;þôë[à 8l·¢9¯D?¥Ÿ!W˜X‚p±–´Ûäˆ ŸÝc -Ðt™ÍÕ¤ þ@yÿÖ ôv3ÍÞù½PÇ'àòÑÃøü½ÍC¾Ÿ$T†üe¯¢ü«ÿÆÁ?þœv¢’6'/ðÉ#Àã –†Ž¸!Dî›>ãt:}dïܹþôП––‡¯×‹ ¤¤\îÈæ~û§¦¦âÚµklZÏÁƒ‘ÇX0þéß_ºt)D"nܸúúz,X°Ë—/ÇæÍ›±{÷nÌuÒG»‰÷zÛ·oGii), <8èú¤¥¥±¯KKK±iÓ¦AŽA‚[ðÖ4\ŒlEm….‡]z;¸<z NÈ”B䯅ÃêÆý«Ýì 8@x´ºÚÑc`𾔿¨Ôü¿m…ÃêfÛ‹By«˜ÀfvÁíð<^à0ÅÄs~Å´Ò‹Œ#m,.~Û —ÃÎVf?¯—)Ž^Rš„ӟ݇Åä„Çí…PÊüU¤ŒÏƒ !“ ‡ÃÁœ¸$œ¿wgØ ‘J(¤Ož ߶䟯Å!¡øà‡ŠA¯yîз¸yþ4®? Ç ¾Pˆ¢W×zN˜êqqèÿy¸Wsm3¶àõ8ùͰ=êòè¾££MASýÕam”òp%BeǃÄÄDäää`Ú´ihnnFEósØ­F.—cãÆøì³Ïðõ×_£ªª .„N§ò˜æÏŸüøã˜.Jyyy>רºu+Š‹‹}R_‰Gdd$ŒF#Þ|óM=zF£Ÿ~ú)[\ZZŠW^yf³ü:&õ·k×.œ8q3gΗËeƒ'™Œ™hÚ²e ÊÊÊp÷î]$''###ƒ­×X³f_+Ó¿¤V«¡ÑhÐÒÒ‚Í›7ãÔ©SèééÁÎ;‘””¹\Ž%K– ¬¬ v»%%%Ðjµ˜5k[¾jÕ*özõõõسgÏJÏPÌ›7¹¹¹¨¬¬ÄÛo¿Ï?ÿÙÙÙhkk‡Ãr 1A øB.òׯâ¯^E÷C;_€¨Xß÷¢úúzèt:¿ú˜˜ìٳǯgþ@vî܉ÎÎN|ÿý÷¨««cÛpU_ŠÀ¤:íß¿\.qqqx÷Ýwñá‡â§Ÿ~òkeˆH$ÂŽ;°iÓ&˜ÍfìÛ·S¸Û—†³lÙ2$%%¡±±Ï?ÿLX,6dee>x®\¹…"aaCëôµïòvÜ|x~LîM÷ Í|¬yþ·Á¦ÊÊËxé¥ÅÁƘòh4éK}¾øâ ¬_¿³ñXMM ´Z-æÎËîЧ¡¡—.]‚ÉdBxx8T*²²²ØU€‹/Âjµ"!!Á¯³PßùUUUèêêBrr2 Ù1555!++‹ úêb4qþüyy¼^lV ìÖÞG…Ì!ü÷j!ÀåË—Q[[ €iñzèÐ! &˜©ùŽA!„<£¸d"1d¢ñÝiu"Xõ›°píð¸\PÆM€tš§…ÃáB"•A"¿ö±c¥¸¸.\€§¾z0›‹544@"‘ 111(c ƒ£B!“‡Ëö¾ P*•C*€/b±™™™A»? lXë9œ@;žB‚Šfaž&Ú•™BÈÔ4¬!L¢FThÂ8 …ô')  K | !ý¤«ï™MÆNºúÉíø!„ÉlX)FpðÚìm8· ͦ[ðÒ Ú˜ã€ƒy òþ|nðr)Éä4K»·×[ÏÂî²{8S’„Š1…ÈŽ]ì¡B!ãbØ5Q¡ X6óÇc,„Qâqùx!q^HßM‚!„2uQO)B!„B‹B!„B‹B!„B‹B!„B‹B!„B‹ëõR«RBÆŠËå‚@0víiù|>\.ט]gm&H!q½^€‚BÆFWW7 Ř]/22ÝÝÝcv=Bže</ØÃ „ ½þa°ÇAȤçñx`2uŽi€ R© ×·Áã¡ žÑÒéZ¡Ñh‚= B™ð¸YYY¸{·f³9Øc!dÒòz½¸~ý&23gŒé ¥P(DZÚtÔÔܤ•>BF¡³³ :]+ÒÓ3‚=B™ð8^¯×k±XPYY ™,J¥2Y(åi2‡&Stº¤¤¤"))i\îSWW‡¦¦»Ðj5‡P8vu„LU^¯ÝÝftt`µÚ1oÞ nordugrid-arc-7.1.1/src/doxygen/PaxHeaders/Makefile.in0000644000000000000000000000013115067751347017665 xustar0030 mtime=1759498983.808730573 29 atime=1759499020.73329698 30 ctime=1759499031.588116511 nordugrid-arc-7.1.1/src/doxygen/Makefile.in0000644000175000002070000005265515067751347021605 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/doxygen ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ SWIG_DEPENDS = \ $(top_srcdir)/swig/common.i \ $(top_srcdir)/swig/credential.i \ $(top_srcdir)/swig/data.i \ $(top_srcdir)/swig/compute.i INPUT = $(top_srcdir)/src/hed/libs/common \ $(top_srcdir)/src/hed/libs/credential \ $(top_srcdir)/src/hed/libs/data \ $(top_srcdir)/src/libs/data-staging \ $(top_srcdir)/src/hed/libs/compute CPP_EXAMPLES = $(top_srcdir)/src/hed/libs/common/examples \ $(top_srcdir)/src/hed/libs/credential/examples \ $(top_srcdir)/src/hed/libs/data/examples \ $(top_srcdir)/src/libs/data-staging/examples \ $(top_srcdir)/src/hed/libs/compute/examples PYTHON_EXAMPLES = $(top_srcdir)/python/examples SPECIALISATION_MAPPINGS = JobDescription SPECIALISATION_MAPPINGS_JobDescription = \ $(top_srcdir)/src/hed/acc/JobDescriptionParser/XRSLParser.cpp \ $(top_srcdir)/src/hed/acc/JobDescriptionParser/ADLParser.cpp SDKDEPENDENCIES = Doxyfile.SDK.build Doxyfile.SDK.build.layout.xml \ $(srcdir)/add-bindings-deviations-to-dox.py \ $(srcdir)/images/arcsdk.png \ $(srcdir)/adapt-and-filter-mapping-attributes.sed \ $(SWIG_DEPENDS) \ $(wildcard $(addsuffix /*.h, $(INPUT))) \ $(wildcard $(addsuffix /*.cpp, $(CPP_EXAMPLES))) \ $(wildcard $(addsuffix /*.py, $(PYTHON_EXAMPLES))) \ $(addsuffix _Mapping.dox, $(SPECIALISATION_MAPPINGS)) EXTRA_DIST = Doxyfile.SDK Doxyfile.SDK.layout.xml images/arcsdk.png \ add-bindings-deviations-to-dox.py \ adapt-and-filter-mapping-attributes.sed \ create-mapping-documentation.py CLEANFILES = SDK all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/doxygen/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/doxygen/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags-am uninstall uninstall-am .PRECIOUS: Makefile .SECONDEXPANSION: $(addsuffix _Mapping.dox, $(SPECIALISATION_MAPPINGS)): %_Mapping.dox: $(srcdir)/create-mapping-documentation.py $(top_srcdir)/src/hed/libs/compute/%.h $$(SPECIALISATION_MAPPINGS_%) $(PYTHON) $^ $*_Mapping.dox Doxyfile.SDK.build: $(top_srcdir)/src/doxygen/Doxyfile.SDK cp $(srcdir)/Doxyfile.SDK Doxyfile.SDK.build sed "s/@TOP_SRCDIR@/$(subst /,\/,$(top_srcdir))/g" Doxyfile.SDK.build > Doxyfile.SDK.build.tmp mv Doxyfile.SDK.build.tmp Doxyfile.SDK.build sed "s/@INPUT@/$(subst /,\/,$(INPUT) $(addsuffix _Mapping.dox, $(SPECIALISATION_MAPPINGS)))/g" Doxyfile.SDK.build > Doxyfile.SDK.build.tmp mv Doxyfile.SDK.build.tmp Doxyfile.SDK.build sed "s/@EXAMPLES@/$(subst /,\/,$(CPP_EXAMPLES) $(PYTHON_EXAMPLES))/g" Doxyfile.SDK.build > Doxyfile.SDK.build.tmp mv Doxyfile.SDK.build.tmp Doxyfile.SDK.build sed "s/Doxyfile.SDK.layout.xml/Doxyfile.SDK.build.layout.xml/g" Doxyfile.SDK.build > Doxyfile.SDK.build.tmp mv Doxyfile.SDK.build.tmp Doxyfile.SDK.build for mapping in $(SPECIALISATION_MAPPINGS); do \ sed "s/^FILTER_PATTERNS[[:space:]]*=/& *\/$${mapping}.h=$(subst /,\/,$(srcdir))\/adapt-and-filter-mapping-attributes.sed/g" Doxyfile.SDK.build > Doxyfile.SDK.build.tmp;\ mv Doxyfile.SDK.build.tmp Doxyfile.SDK.build;\ done Doxyfile.SDK.build.layout.xml: $(top_srcdir)/src/doxygen/Doxyfile.SDK.layout.xml cp $(srcdir)/Doxyfile.SDK.layout.xml Doxyfile.SDK.build.layout.xml SDK: $(SDKDEPENDENCIES) doxygen -v | awk -F . '{ exit !($$1 >= 2 || $$1 == 1 && $$2 >= 8) }' || (echo "doxygen version 1.8.0 or greater required (version $$(doxygen -v) found)" && exit 1) doxygen Doxyfile.SDK.build # Postprocessing: Add deviations from SDK API for language bindings (Python). for file in $(SWIG_DEPENDS); do $(PYTHON) $(srcdir)/add-bindings-deviations-to-dox.py $${file} SDK/html; done # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/doxygen/PaxHeaders/adapt-and-filter-mapping-attributes.sed0000644000000000000000000000013215067751327025245 xustar0030 mtime=1759498967.684692305 30 atime=1759498967.827493073 30 ctime=1759499031.594791523 nordugrid-arc-7.1.1/src/doxygen/adapt-and-filter-mapping-attributes.sed0000755000175000002070000000225315067751327027154 0ustar00mockbuildmock00000000000000#!/bin/sed -f # Copy mapdef ID to buffer /\\mapdef / { # Copy current line to buffer h # Remove every thing but mapdef ID. s/.*\\mapdef \([^[:space:]]\+\)[[:space:]]\+.*/\1/ # Swap buffer with pattern space. x } # Remove \mapdef attribute plus associated description. End at first empty line, # line with asterisks (*) or line with asterisks followed by slash (/) modulo # spaces. /\\mapdef /,/^[[:space:]]*\**\/\?[[:space:]]*$/ { /^[[:space:]]*\**\/\?[[:space:]]*$/ ! d } # Replace mapdefattr command with link to attribute mapping. /\\mapdefattr/ { # Append buffer (prefixed with new line) to pattern space. This should be the # mapdef ID copied above. Thus the assumption is that the mapdef command must # come before the mapdefattr command. G # Replace \mapdefattr line with a link pointing to mapping of specific # attribute. # mapdefattr name mapdef ID s/\\mapdefattr[[:space:]]\+\([^[:space:]]\+\)[[:space:]]\+[^[:space:]]\+\n\(.*\)$/\2.html#attr_\1/ s/[^[:space:]]\+$/Attribute mapping specific to this field\/value.<\/a>/ # :: should be transformed to _ in URLs. s/::/_/g } nordugrid-arc-7.1.1/src/doxygen/PaxHeaders/add-bindings-deviations-to-dox.py0000644000000000000000000000013215067751327024067 xustar0030 mtime=1759498967.684692305 30 atime=1759498967.827493073 30 ctime=1759499031.593501115 nordugrid-arc-7.1.1/src/doxygen/add-bindings-deviations-to-dox.py0000644000175000002070000001654015067751327025777 0ustar00mockbuildmock00000000000000#!/usr/bin/env python # ''' Script for parsing Swig interface files (.i) and extracting renames (%rename) and ignores (%s), and adding that information to the doxygen generated HTML. Usage: add-bindings-deviations-to-dox.py E.g.: add-bindings-deviations-to-dox.py swig-interface.i dox/html Limitations: * Unable to handle #else or #elif statements. * Unable to handle templates. ''' from __future__ import print_function import sys, re from os.path import isfile # Location of swig file filename = sys.argv[1] # Location of generated doxygen HTML documentation sdkDocumentationLocation = sys.argv[2] # Use list to deal with scoping of #if and #ifdef statements. inIfdef = [] # Use dictionary below to group %rename and %ignore statements per HTML file. expressionsFound = {} f = open(filename, "r") for line in f: line = line.strip() regMatch = re.match('\A#if(n?def)?\s+(\w+)', line) if regMatch: inIfdef.append(regMatch.group(2)) #print " #ifdef %s" % inIfdef continue regMatch = re.search('\A#endif', line) if regMatch: #print " #endif // %s" % inIfdef inIfdef.pop() continue regMatch = re.match('%ignore\s+([^;]+)', line) if regMatch: ignoredName = regMatch.group(1) #print "Expression ignored: %s" % ignoredName regMatch = re.match('\A(Arc|ArcCredential|AuthN|DataStaging)::([^:<]+)(<([^:>]+(::[^:>]+)+)>)?::(.*)', ignoredName) if regMatch: namespaceName, className, _, templateParameters, _, methodName = regMatch.groups() if templateParameters: #print "Found template: %s::%s<%s>::%s" % (namespaceName, className, templateParameters, methodName) print("Error: Unable to handle template signatures %s" % ignoredName) continue #print " Ignoring method '%s' in class '%s' in Arc namespace." % (methodName, className) sdkFNOfIgnoredInstance = sdkDocumentationLocation + '/class' + namespaceName + '_1_1' + className + '.html' if sdkFNOfIgnoredInstance not in expressionsFound: expressionsFound[sdkFNOfIgnoredInstance] = [] ignoreScope = ["Python"] if "SWIGPYTHON" in inIfdef else ["Python"] expressionsFound[sdkFNOfIgnoredInstance].append({"text" : "Method is unavailable", "scope" : ignoreScope, "name" : methodName}) continue print("Error: Couldn't parse ignore signature %s" % ignoredName) continue regMatch = re.match('%rename\(([^)]+)\)\s+([^;]+)', line) if regMatch: #print "Expression '%s' renamed to '%s'" % (regMatch.group(2), regMatch.group(1)) toName, renameFullName = regMatch.groups() regMatch = re.match('\A(Arc|ArcCredential|AuthN|DataStaging)::([^:<]+)(<([^:>]+(::[^:>]+)+)>)?::(.*)', renameFullName) if regMatch: namespaceName, className, _, templateParameters, _, methodName = regMatch.groups() if templateParameters: #print "Found template: %s::%s<%s>::%s" % (namespaceName, className, templateParameters, methodName) print("Error: Unable to handle template signatures %s" % renameFullName) continue #print " Ignoring method '%s' in class '%s' in Arc namespace." % (methodName, className) sdkFNOfRenamedInstance = sdkDocumentationLocation + '/class' + namespaceName + '_1_1' + className + '.html' if sdkFNOfRenamedInstance not in expressionsFound: expressionsFound[sdkFNOfRenamedInstance] = [] renameScope = ["Python"] if "SWIGPYTHON" in inIfdef else ["Python"] expressionsFound[sdkFNOfRenamedInstance].append({"text" : "Renamed to " + toName + "", "scope" : renameScope, "name" : methodName}) continue print("Error: Couldn't parse rename signature %s" % renameFullName) continue f.close() #print expressionsFound for filename, v in expressionsFound.items(): if not isfile(filename): print("Error: No such file %s" % filename) continue doxHTMLFile = open(filename, "r") doxHTMLFileLines = doxHTMLFile.readlines() doxHTMLFile.close() doxHTMLFile = open(filename, "w") i = 0 while i < len(doxHTMLFileLines): doxHTMLFile.write(doxHTMLFileLines[i]) regMatch = re.match('\s+(.+)', doxHTMLFileLines[i]) if not regMatch: i += 1 continue doxMethodName = regMatch.group(1).strip() #print doxMethodName for entry in v: regMatch = re.match("(operator\(\)|[^(]+)" "(\(([^(]*)\))?" "\s*(const)?", entry["name"]) if regMatch: methodName, _, methodParameters, isConst = regMatch.groups() #print "Method name: '%s'; Parameters: '%s'; isConst: %s" % (methodName, methodParameters, str(bool(isConst))) #print "'%s\Z', %s" % (methodName.strip(), doxMethodName) doxMethodName = doxMethodName.replace(">", ">") if doxMethodName.endswith(methodName.strip()): #print "Method '%s' found in file '%s' as '%s'" % (methodName, filename, doxMethodName) isInsideMemdocDiv = False methodParameters = methodParameters.split(",") if methodParameters else [] while True: i += 1 regMatch = re.match('\s+(.+)', doxHTMLFileLines[i]) if regMatch: doxParam = regMatch.group(1).replace(" ", "").replace(" &", "\s*&").strip() doxParam = re.sub(']*>', '', doxParam) # Remove anchor tags if len(methodParameters) == 0: if doxParam != "void": # Doesn't match that in HTML document doxHTMLFile.write(doxHTMLFileLines[i]) break elif re.match(doxParam, methodParameters[0]): methodParameters.pop(0) elif isInsideMemdocDiv and re.match('', doxHTMLFileLines[i]): if len(methodParameters) > 0: # Doesn't match that in HTML document doxHTMLFile.write(doxHTMLFileLines[i]) break for scope in entry["scope"]: doxHTMLFile.write('
' + scope + ' interface deviation
' + entry["text"] + ' in ' + scope + ' interface
') v.remove(entry) doxHTMLFile.write(doxHTMLFileLines[i]) break elif re.search('
', doxHTMLFileLines[i]): isInsideMemdocDiv = True doxHTMLFile.write(doxHTMLFileLines[i]) break else: print("Error: Unable to parse method signature %s" % entry["name"]) i += 1 doxHTMLFile.close() if v: print("Error: The following methods was not found in the HTML file '%s':" % filename) for entry in v: print(" %s" % entry["name"]) print("??? => Is there a API description in the corresponding header file for these?") nordugrid-arc-7.1.1/src/doxygen/PaxHeaders/Doxyfile.SDK0000644000000000000000000000013215067751327017745 xustar0030 mtime=1759498967.683490885 30 atime=1759498967.827493073 30 ctime=1759499031.589473837 nordugrid-arc-7.1.1/src/doxygen/Doxyfile.SDK0000644000175000002070000023414015067751327021653 0ustar00mockbuildmock00000000000000# Doxyfile 1.8.3.1 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" "). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or sequence of words) that should # identify the project. Note that if you do not use Doxywizard you need # to put quotes around the project name if it contains spaces. PROJECT_NAME = "ARC SDK" # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer # a quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = # With the PROJECT_LOGO tag one can specify an logo or icon that is # included in the documentation. The maximum height of the logo should not # exceed 55 pixels and the maximum width should not exceed 200 pixels. # Doxygen will copy the logo to the output directory. PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = SDK # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Tradditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = YES # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = NO # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. Note that you specify absolute paths here, but also # relative paths, which will be relative from the directory where doxygen is # started. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful if your file system # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding # "class=itcl::class" will allow you to use the command class in the # itcl::class meaning. TCL_SUBST = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, # and language is one of the parsers supported by doxygen: IDL, Java, # Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, # C++. For instance to make doxygen treat .inc files as Fortran files (default # is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note # that for custom extensions you also need to set FILE_PATTERNS otherwise the # files are not read by doxygen. EXTENSION_MAPPING = # If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all # comments according to the Markdown format, which allows for more readable # documentation. See http://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you # can mix doxygen, HTML, and XML commands with Markdown formatting. # Disable only in case of backward compatibilities issues. MARKDOWN_SUPPORT = YES # When enabled doxygen tries to link words that correspond to documented classes, # or namespaces to their corresponding documentation. Such a link can be # prevented in individual cases by by putting a % sign in front of the word or # globally by setting AUTOLINK_SUPPORT to NO. AUTOLINK_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also makes the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate # getter and setter methods for a property. Setting this option to YES (the # default) will make doxygen replace the get and set methods by a property in # the documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and # unions are shown inside the group in which they are included (e.g. using # @ingroup) instead of on a separate page (for HTML and Man pages) or # section (for LaTeX and RTF). INLINE_GROUPED_CLASSES = NO # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and # unions with only public data fields will be shown inline in the documentation # of the scope in which they are defined (i.e. file, namespace, or group # documentation), provided this scope is documented. If set to NO (the default), # structs, classes, and unions are shown on a separate page (for HTML and Man # pages) or section (for LaTeX and RTF). INLINE_SIMPLE_STRUCTS = NO # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penalty. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will roughly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols. SYMBOL_CACHE_SIZE = 0 # Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be # set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given # their name and scope. Since this can be an expensive process and often the # same symbol appear multiple times in the code, doxygen keeps a cache of # pre-resolved symbols. If the cache is too small doxygen will become slower. # If the cache is too large, memory is wasted. The cache size is given by this # formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols. LOOKUP_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_PACKAGE tag is set to YES all members with package or internal # scope will be included in the documentation. EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespaces are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen # will list include files with double quotes in the documentation # rather than with sharp brackets. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen # will sort the (brief and detailed) documentation of class members so that # constructors and destructors are listed first. If set to NO (the default) # the constructors will appear in the respective orders defined by # SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. # This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO # and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to # do proper type resolution of all parameters of a function it will reject a # match between the prototype and the implementation of a member function even # if there is only one candidate or it is obvious which candidate to choose # by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen # will still accept a match between prototype and implementation in such cases. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if section-label ... \endif # and \cond section-label ... \endcond blocks. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or macro consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and macros in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. # You can optionally specify a file name after the option, if omitted # DoxygenLayout.xml will be used as the name of the layout file. LAYOUT_FILE = Doxyfile.SDK.layout.xml # The CITE_BIB_FILES tag can be used to specify one or more bib files # containing the references data. This must be a list of .bib files. The # .bib extension is automatically appended if omitted. Using this command # requires the bibtex tool to be installed. See also # http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style # of the bibliography can be controlled using LATEX_BIB_STYLE. To use this # feature you need bibtex and perl available in the search path. Do not use # file names with spaces, bibtex cannot handle them. CITE_BIB_FILES = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # The WARN_NO_PARAMDOC option can be enabled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = @INPUT@ # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh # *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py # *.f90 *.f *.for *.vhd *.vhdl FILE_PATTERNS = *.h *.dox # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. EXCLUDE_SYMLINKS = YES # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = */test* # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = @EXAMPLES@ # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = @TOP_SRCDIR@/src/doxygen/images/ # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty or if # non of the patterns match the file name, INPUT_FILTER is applied. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) # and it is also possible to disable source filtering for a specific pattern # using *.ext= (so without naming a filter). This option only has effect when # FILTER_SOURCE_FILES is enabled. FILTER_SOURCE_PATTERNS = # If the USE_MD_FILE_AS_MAINPAGE tag refers to the name of a markdown file that # is part of the input, its contents will be placed on the main page (index.html). # This can be useful if you have a project on for instance GitHub and want reuse # the introduction page also for the doxygen output. USE_MDFILE_AS_MAINPAGE = #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C, C++ and Fortran comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = YES # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. Note that when using a custom header you are responsible # for the proper inclusion of any scripts and style sheets that doxygen # needs, which is dependent on the configuration options used. # It is advised to generate a default header using "doxygen -w html # header.html footer.html stylesheet.css YourConfigFile" and then modify # that header. Note that the header is subject to change so you typically # have to redo this when upgrading to a newer version of doxygen or when # changing the value of configuration settings such as GENERATE_TREEVIEW! HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If left blank doxygen will # generate a default style sheet. Note that it is recommended to use # HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this # tag will in the future become obsolete. HTML_STYLESHEET = # The HTML_EXTRA_STYLESHEET tag can be used to specify an additional # user-defined cascading style sheet that is included after the standard # style sheets created by doxygen. Using this option one can overrule # certain style aspects. This is preferred over using HTML_STYLESHEET # since it does not replace the standard style sheet and is therefor more # robust against future updates. Doxygen will copy the style sheet file to # the output directory. HTML_EXTRA_STYLESHEET = # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that # the files will be copied as-is; there are no commands or markers available. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. # Doxygen will adjust the colors in the style sheet and background images # according to this color. Hue is specified as an angle on a colorwheel, # see http://en.wikipedia.org/wiki/Hue for more information. # For instance the value 0 represents red, 60 is yellow, 120 is green, # 180 is cyan, 240 is blue, 300 purple, and 360 is red again. # The allowed range is 0 to 359. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of # the colors in the HTML output. For a value of 0 the output will use # grayscales only. A value of 255 will produce the most vivid colors. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to # the luminance component of the colors in the HTML output. Values below # 100 gradually make the output lighter, whereas values above 100 make # the output darker. The value divided by 100 is the actual gamma applied, # so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, # and 100 does not change the gamma. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting # this to NO can help when comparing the output of multiple runs. HTML_TIMESTAMP = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. HTML_DYNAMIC_SECTIONS = NO # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of # entries shown in the various tree structured indices initially; the user # can expand and collapse entries dynamically later on. Doxygen will expand # the tree to such a level that at most the specified number of entries are # visible (unless a fully collapsed tree already exceeds this amount). # So setting the number of entries 1 will produce a full collapsed tree by # default. 0 is a special value representing an infinite number of entries # and will result in a full expanded tree by default. HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely # identify the documentation publisher. This should be a reverse domain-name # style string, e.g. com.mycompany.MyDocSet.documentation. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated # that can be used as input for Qt's qhelpgenerator to generate a # Qt Compressed Help (.qch) of the generated HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to # add. For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see # # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's # filter section matches. # # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files # will be generated, which together with the HTML files, form an Eclipse help # plugin. To install this plugin and make it available under the help contents # menu in Eclipse, the contents of the directory containing the HTML and XML # files needs to be copied into the plugins directory of eclipse. The name of # the directory within the plugins directory should be the same as # the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before # the help appears. GENERATE_ECLIPSEHELP = NO # A unique identifier for the eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have # this name. ECLIPSE_DOC_ID = org.doxygen.Project # The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) # at top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. Since the tabs have the same information as the # navigation tree you can set this option to NO if you already set # GENERATE_TREEVIEW to YES. DISABLE_INDEX = NO # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). # Windows users are probably better off using the HTML help feature. # Since the tree basically has the same information as the tab index you # could consider to set DISABLE_INDEX to NO when enabling this option. GENERATE_TREEVIEW = NO # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values # (range [0,1..20]) that doxygen will group on one line in the generated HTML # documentation. Note that a value of 0 will completely suppress the enum # values from appearing in the overview section. ENUM_VALUES_PER_LINE = 4 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open # links to external symbols imported via tag files in a separate window. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are # not supported properly for IE 6.0, but are supported on all modern browsers. # Note that when changing this option you need to delete any form_*.png files # in the HTML output before the changes have effect. FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax # (see http://www.mathjax.org) which uses client side Javascript for the # rendering instead of using prerendered bitmaps. Use this if you do not # have LaTeX installed or if you want to formulas look prettier in the HTML # output. When enabled you may also need to install MathJax separately and # configure the path to it using the MATHJAX_RELPATH option. USE_MATHJAX = NO # When MathJax is enabled you can set the default output format to be used for # thA MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and # SVG. The default value is HTML-CSS, which is slower, but has the best # compatibility. MATHJAX_FORMAT = HTML-CSS # When MathJax is enabled you need to specify the location relative to the # HTML output directory using the MATHJAX_RELPATH option. The destination # directory should contain the MathJax.js script. For instance, if the mathjax # directory is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to # the MathJax Content Delivery Network so you can quickly see the result without # installing MathJax. # However, it is strongly recommended to install a local # copy of MathJax from http://www.mathjax.org before deployment. MATHJAX_RELPATH = http://www.mathjax.org/mathjax # The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension # names that should be enabled during MathJax rendering. MATHJAX_EXTENSIONS = # When the SEARCHENGINE tag is enabled doxygen will generate a search box # for the HTML output. The underlying search engine uses javascript # and DHTML and should work on any modern browser. Note that when using # HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets # (GENERATE_DOCSET) there is already a search function so this one should # typically be disabled. For large projects the javascript based search engine # can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. SEARCHENGINE = NO # When the SERVER_BASED_SEARCH tag is enabled the search engine will be # implemented using a web server instead of a web client using Javascript. # There are two flavours of web server based search depending on the # EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for # searching and an index file used by the script. When EXTERNAL_SEARCH is # enabled the indexing and searching needs to be provided by external tools. # See the manual for details. SERVER_BASED_SEARCH = NO # When EXTERNAL_SEARCH is enabled doxygen will no longer generate the PHP # script for searching. Instead the search results are written to an XML file # which needs to be processed by an external indexer. Doxygen will invoke an # external search engine pointed to by the SEARCHENGINE_URL option to obtain # the search results. Doxygen ships with an example indexer (doxyindexer) and # search engine (doxysearch.cgi) which are based on the open source search engine # library Xapian. See the manual for configuration details. EXTERNAL_SEARCH = NO # The SEARCHENGINE_URL should point to a search engine hosted by a web server # which will returned the search results when EXTERNAL_SEARCH is enabled. # Doxygen ships with an example search engine (doxysearch) which is based on # the open source search engine library Xapian. See the manual for configuration # details. SEARCHENGINE_URL = # When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed # search data is written to a file for indexing by an external tool. With the # SEARCHDATA_FILE tag the name of this file can be specified. SEARCHDATA_FILE = searchdata.xml # When SERVER_BASED_SEARCH AND EXTERNAL_SEARCH are both enabled the # EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is # useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple # projects and redirect the results back to the right project. EXTERNAL_SEARCH_ID = # The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen # projects other than the one defined by this configuration file, but that are # all added to the same external search index. Each project needs to have a # unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id # of to a relative location where the documentation can be found. # The format is: EXTRA_SEARCH_MAPPINGS = id1=loc1 id2=loc2 ... EXTRA_SEARCH_MAPPINGS = #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. # Note that when enabling USE_PDFLATEX this option is only used for # generating bitmaps for formulas in the HTML output, but not in the # Makefile that is written to the output directory. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for # the generated latex document. The footer should contain everything after # the last chapter. If it is left blank doxygen will generate a # standard footer. Notice: only use this tag if you know what you are doing! LATEX_FOOTER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include # source code with syntax highlighting in the LaTeX output. # Note that which sources are shown also depends on other settings # such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See # http://en.wikipedia.org/wiki/BibTeX for more info. LATEX_BIB_STYLE = plain #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load style sheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # pointed to by INCLUDE_PATH will be searched when a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition that # overrules the definition found in the source code. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all references to function-like macros # that are alone on a line, have an all uppercase name, and do not end with a # semicolon, because these will confuse the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. For each # tag file the location of the external documentation should be added. The # format of a tag file without this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths # or URLs. Note that each tag file must have a unique name (where the name does # NOT include the path). If a tag file is not located in the directory in which # doxygen is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option also works with HAVE_DOT disabled, but it is recommended to # install and use dot, since it yields more powerful graphs. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # The DOT_NUM_THREADS specifies the number of dot invocations doxygen is # allowed to run in parallel. When set to 0 (the default) doxygen will # base this on the number of processors available in the system. You can set it # explicitly to a value larger than 0 to get control over the balance # between CPU load and processing speed. DOT_NUM_THREADS = 0 # By default doxygen will use the Helvetica font for all dot files that # doxygen generates. When you want a differently looking font you can specify # the font name using DOT_FONTNAME. You need to make sure dot is able to find # the font, which can be done by putting it in a standard location or by setting # the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the # directory containing the font. DOT_FONTNAME = Helvetica # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the Helvetica font. # If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to # set the path where dot can find it. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If the UML_LOOK tag is enabled, the fields and methods are shown inside # the class node. If there are many fields or methods and many nodes the # graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS # threshold limits the number of items for each type to make the size more # managable. Set this to 0 for no limit. Note that the threshold may be # exceeded by 50% before the limit is enforced. UML_LIMIT_NUM_FIELDS = 10 # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will generate a graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are svg, png, jpg, or gif. # If left blank png will be used. If you choose svg you need to set # HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible in IE 9+ (other browsers do not have this requirement). DOT_IMAGE_FORMAT = png # If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to # enable generation of interactive SVG images that allow zooming and panning. # Note that this requires a modern browser other than Internet Explorer. # Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you # need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible. Older versions of IE do not have SVG support. INTERACTIVE_SVG = NO # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MSCFILE_DIRS tag can be used to specify one or more directories that # contain msc files that are included in the documentation (see the # \mscfile command). MSCFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = YES # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES nordugrid-arc-7.1.1/src/doxygen/PaxHeaders/create-mapping-documentation.py0000644000000000000000000000013215067751327023734 xustar0030 mtime=1759498967.684692305 30 atime=1759498967.827493073 30 ctime=1759499031.596092917 nordugrid-arc-7.1.1/src/doxygen/create-mapping-documentation.py0000644000175000002070000002261515067751327025644 0ustar00mockbuildmock00000000000000#!/usr/bin/env python # TODO: Document how to use. # TODO: Add list of the plugins which provides the mappings. # TODO: Deal with multiple values. # TODO: Deal with fixed values. # TODO: Deal with conditional values. # TODO: Deal with units # TODO: Deal with expressions # TODO: Deal with attributes in specialisation not mapped to library # TODO: Deal with attributes in library which is not mapped to specialisation # # # Usable commands and syntax: # Use in library files: # \mapdef \n # \mapdefattr # Use in specialisation files: # \mapname \n # \mapattr {->|<-} [""] # \mapnote from __future__ import print_function import sys, re # File to write documentation to outfilename = sys.argv[-1] sourcefilename = sys.argv[1] # Find files which contains documentation on mappings, i.e. specifies the \mapfile attribute. mapfiles = sys.argv[2:-1] mapdef = {"id" : "", "name" : "", "description" : "", "attributes" : [], "attributeprefixes" : []} inMapdef = False justAfterMapdef = False # Go through library file sourcefile = open(sourcefilename, "r") i = 0 for line in sourcefile: i += 1 line = line.strip().lstrip("*").lstrip() if line[0:3] == "///": line = line.lstrip("/").lstrip() if justAfterMapdef: if line == "" or line == "/": justAfterMapdef = False continue mapdef["description"] += line + " " continue elif line[0:12] == "\mapdefattr ": regMatch = re.match("([^\s]+)\s+([^\s]+)", line[12:].lstrip()) if not regMatch: print("ERROR: Wrong format of the \mapdefattr attribute in '%s' file on line %d" % (sourcefilename, i)) sys.exit(1) mapdef["attributes"].append(regMatch.group(1)) mapdef["attributeprefixes"].append(regMatch.group(2)) elif line[0:8] == "\mapdef ": regMatch = re.match("(\w+)\s+(.+)", line[8:].lstrip()) if not regMatch: print("ERROR: Wrong format of the \mapdef attribute in '%s' file on line %d" % (sourcefilename, i)) sys.exit(1) mapdef["id"] = regMatch.group(1) mapdef["name"] = regMatch.group(2) inMapdef = True justAfterMapdef = True continue sourcefile.close() # Go through specialisation files mappings = [] for filename in mapfiles: m = {"id" : "", "name" : "", "description" : [], "notes" : [], "attributes" : {}} for attr in mapdef["attributes"]: m["attributes"][attr] = {} m["attributes"][attr]["in"] = [] m["attributes"][attr]["out"] = [] m["attributes"][attr]["in-note"] = [] m["attributes"][attr]["out-note"] = [] f = open(filename, "r") justAfterMapName = False i = 0 for line in f: i += 1 line = line.strip() if line[0:3] != "///": justAfterMapName = False continue line = line[3:].lstrip() if line[0:9] == "\mapname ": regMatch = re.match("(\w+)\s+(.+)", line[9:].lstrip()) if not regMatch: print("ERROR: Wrong format of the \mapname command in '%s' file on line %d" % (filename, i)) sys.exit(1) m["id"] = regMatch.group(1) m["name"] = regMatch.group(2) justAfterMapName = True elif line[0:9] == "\mapnote ": justAfterMapdef = False m["notes"].append(line[9:].lstrip()) elif line[0:9] == "\mapattr ": justAfterMapdef = False # -> [""] regMatch = re.match("(.+)\s+->\s+([^\s]+)(?:\s+\"([^\"]+)\")?", line[9:]) if regMatch: if regMatch.group(2) not in m["attributes"]: print("ERROR: The '%s' attribute present in file '%s' on line %d is not defined in file '%s'" % (regMatch.group(2), filename, i, sourcefilename)) sys.exit(1) m["attributes"][regMatch.group(2)]["in"].append(regMatch.group(1)) if regMatch.group(3): m["attributes"][regMatch.group(2)]["in-note"].append(regMatch.group(3)) continue regMatch = re.match("(.+)\s+<-\s+([^\s]+)(?:\s+\"([^\"]+)\")?", line[9:]) if regMatch: if regMatch.group(2) not in m["attributes"]: print("ERROR: The '%s' attribute present in file '%s' on line %d is not defined in file '%s'" % (regMatch.group(2), filename, i, sourcefilename)) sys.exit(1) m["attributes"][regMatch.group(2)]["out"].append(regMatch.group(1)) if regMatch.group(3): m["attributes"][regMatch.group(2)]["out-note"].append(regMatch.group(3)) continue elif justAfterMapName: m["description"].append(line) mappings.append(m) f.close() # Write mapping to doxygen formatted file. outfile = open(outfilename, "w") outfile.write("/** \n") outfile.write("\\page {id} {name}\n{description}\n".format(**mapdef)) outfile.write("\\tableofcontents\n") # Create mapping per lib. attribute outfile.write("\\section attr Grouped by libarccompute attributes\n") for i in range(len(mapdef["attributes"])): outfile.write("\n\\subsection attr_{formatted_attr} {attr}\n".format(formatted_attr = re.sub('::', "_", mapdef["attributes"][i]), attr = mapdef["attributes"][i])) outfile.write("\\ref {prefix}::{attr} \"Attribute description\"\n\n".format(attr = mapdef["attributes"][i], prefix = mapdef["attributeprefixes"][i])) has_input = has_output = False attributes_to_write_to_table = "" for m in mappings: has_input = has_input or m["attributes"][mapdef["attributes"][i]]["in"] has_output = has_output or m["attributes"][mapdef["attributes"][i]]["out"] notes = [] for m in mappings: attr = m["attributes"][mapdef["attributes"][i]] if attr["in"] or attr["out"]: attributes_to_write_to_table += "| %s |" % (m["name"]) if has_input: attributes_to_write_to_table += " %s" % (",
".join(attr["in"])) if attr["in-note"]: attributes_to_write_to_table += "[%s]" % ("][".join(str(x) for x in range(len(notes)+1, len(notes)+1+len(attr["in-note"])))) notes += attr["in-note"] attributes_to_write_to_table += " |" if has_output else "" if has_output: attributes_to_write_to_table += " %s" % (",
".join(attr["out"])) if attr["out-note"]: attributes_to_write_to_table += "[%s]" % ("][".join(str(x) for x in range(len(notes)+1, len(notes)+1+len(attr["out-note"])))) notes += attr["out-note"] attributes_to_write_to_table += " |\n" if attributes_to_write_to_table and (has_input or has_output): table_header = "| Specialisation" table_header += " | Input" if has_input else "" table_header += " | Output" if has_output else "" outfile.write(table_header + " |\n") outfile.write(re.sub(r'[ \w]', '-', table_header) + " |\n") outfile.write(attributes_to_write_to_table) if notes: outfile.write("Notes:
  1. %s
" % ("
  • ".join(notes))) else: outfile.write("No specialisations maps attributes to this field/value.\n") # Create mapping per specialisation outfile.write("\\section specialisation Grouped by plugin\n") for m in mappings: outfile.write("\n\\subsection specialisation_{id} {name}\n".format(**m)) if m["description"]: outfile.write(" ".join(m["description"]) + "\n") if len(m["notes"]) > 0: outfile.write('
    \n
    Note
    \n') for note in m["notes"]: outfile.write('
    ' + note+ '
    \n') outfile.write('
    \n') has_input = has_output = False for attr, m_attrs in m["attributes"].items(): has_input = has_input or bool(m_attrs["in"]) has_output = has_output or bool(m_attrs["out"]) table_header = "| Input " if has_input else "" table_header += "| Lib. attr. |" table_header += " Output |" if has_output else "" outfile.write(table_header + "\n") outfile.write(re.sub(r'[. \w]', '-', table_header) + "\n") notes = [] for attr, m_attrs in m["attributes"].items(): if not m_attrs["in"] and not m_attrs["out"]: continue line = "" if has_input: line += "| %s" % (", ".join(m_attrs["in"])) if m_attrs["in-note"]: line += "[%s]" % ("][".join(str(x) for x in range(len(notes)+1, len(notes)+1+len(m_attrs["in-note"])))) notes += m_attrs["in-note"] line += " " line += "| \\ref Arc::" + attr + ' "' + attr + '" |' if has_output: line += " %s" % (", ".join(m_attrs["out"])) if m_attrs["out-note"]: line += "[%s]" % ("][".join(str(x) for x in range(len(notes)+1, len(notes)+1+len(m_attrs["out-note"])))) notes += m_attrs["out-note"] line += " |" outfile.write(line + '\n') if notes: outfile.write("Notes:
    1. %s
    " % ("
  • ".join(notes))) outfile.write("**/\n") nordugrid-arc-7.1.1/src/PaxHeaders/libs0000644000000000000000000000013115067751424015013 xustar0030 mtime=1759499028.777419211 29 atime=1759499034.76351017 30 ctime=1759499028.777419211 nordugrid-arc-7.1.1/src/libs/0000755000175000002070000000000015067751424016773 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/libs/PaxHeaders/Makefile.am0000644000000000000000000000013115067751327017126 xustar0030 mtime=1759498967.747240432 29 atime=1759498967.85949356 30 ctime=1759499028.772424539 nordugrid-arc-7.1.1/src/libs/Makefile.am0000644000175000002070000000006315067751327021030 0ustar00mockbuildmock00000000000000SUBDIRS = data-staging DIST_SUBDIRS = data-staging nordugrid-arc-7.1.1/src/libs/PaxHeaders/Makefile.in0000644000000000000000000000013215067751355017141 xustar0030 mtime=1759498989.272493019 30 atime=1759499017.467247352 30 ctime=1759499028.773432992 nordugrid-arc-7.1.1/src/libs/Makefile.in0000644000175000002070000006064715067751355021060 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/libs ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir distdir-am am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ SUBDIRS = data-staging DIST_SUBDIRS = data-staging all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/libs/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/libs/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ check-am clean clean-generic clean-libtool cscopelist-am ctags \ ctags-am distclean distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags tags-am uninstall uninstall-am .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/libs/PaxHeaders/data-staging0000644000000000000000000000013115067751424017356 xustar0030 mtime=1759499028.862420502 29 atime=1759499034.76351017 30 ctime=1759499028.862420502 nordugrid-arc-7.1.1/src/libs/data-staging/0000755000175000002070000000000015067751424021336 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327021472 xustar0030 mtime=1759498967.747527268 30 atime=1759498967.860493575 30 ctime=1759499028.803419606 nordugrid-arc-7.1.1/src/libs/data-staging/Makefile.am0000644000175000002070000000273015067751327023376 0ustar00mockbuildmock00000000000000DIST_SUBDIRS = test examples SUBDIRS = . $(TEST_DIR) examples lib_LTLIBRARIES = libarcdatastaging.la libarcdatastaging_ladir = $(pkgincludedir)/data-staging libarcdatastaging_la_HEADERS = DataDelivery.h DataDeliveryComm.h \ DataDeliveryLocalComm.h DataDeliveryRemoteComm.h DTR.h DTRList.h \ DTRStatus.h Processor.h Scheduler.h TransferShares.h libarcdatastaging_la_SOURCES = DataDelivery.cpp DataDeliveryComm.cpp \ DataDeliveryLocalComm.cpp DataDeliveryRemoteComm.cpp DTR.cpp DTRList.cpp \ DTRStatus.cpp Processor.cpp Scheduler.cpp TransferShares.cpp libarcdatastaging_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libarcdatastaging_la_LIBADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(GLIBMM_LIBS) -lpthread libarcdatastaging_la_LDFLAGS = -version-info 4:0:0 pgmpkglibdir = $(pkglibdir) pgmpkglib_PROGRAMS = DataStagingDelivery DataStagingDelivery_SOURCES = DataStagingDelivery.cpp DataStagingDelivery_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) DataStagingDelivery_LDADD = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/DTRList.h0000644000000000000000000000013115067751327021073 xustar0030 mtime=1759498967.747527268 29 atime=1759498967.85949356 30 ctime=1759499028.810326256 nordugrid-arc-7.1.1/src/libs/data-staging/DTRList.h0000644000175000002070000001172015067751327022777 0ustar00mockbuildmock00000000000000#ifndef DTRLIST_H_ #define DTRLIST_H_ #include #include "DTR.h" namespace DataStaging { /// Global list of all active DTRs in the system. /** * This class contains several methods for filtering the list by owner, state * etc. * \ingroup datastaging * \headerfile DTRList.h arc/data-staging/DTRList.h */ class DTRList { private: /// Internal list of DTRs std::list DTRs; /// Lock to protect list during modification Arc::SimpleCondition Lock; /// Internal set of sources that are currently being cached. /** * The source is mapped to the highest priority among all the DTRs with * that source. */ std::map CachingSources; /// Lock to protect caching sources set during modification Arc::SimpleCondition CachingLock; public: /// Put a new DTR into the list. bool add_dtr(DTR_ptr DTRToAdd); /// Remove a DTR from the list. bool delete_dtr(DTR_ptr DTRToDelete); /// Filter the queue to select DTRs owned by a specified process. /** * @param OwnerToFilter The owner to filter on * @param FilteredList This list is filled with filtered DTRs */ bool filter_dtrs_by_owner(StagingProcesses OwnerToFilter, std::list& FilteredList); /// Returns the number of DTRs owned by a particular process int number_of_dtrs_by_owner(StagingProcesses OwnerToFilter); /// Filter the queue to select DTRs with particular status. /** * If we have only one common queue for all DTRs, this method is * necessary to make virtual queues for the DTRs about to go into the * pre-, post-processor or delivery stages. * @param StatusToFilter DTR status to filter on * @param FilteredList This list is filled with filtered DTRs */ bool filter_dtrs_by_status(DTRStatus::DTRStatusType StatusToFilter, std::list& FilteredList); /// Filter the queue to select DTRs with particular statuses. /** * @param StatusesToFilter Vector of DTR statuses to filter on * @param FilteredList This list is filled with filtered DTRs */ bool filter_dtrs_by_statuses(const std::vector& StatusesToFilter, std::list& FilteredList); /// Filter the queue to select DTRs with particular statuses. /** * @param StatusesToFilter Vector of DTR statuses to filter on * @param FilteredList This map is filled with filtered DTRs, * one list per state. */ bool filter_dtrs_by_statuses(const std::vector& StatusesToFilter, std::map >& FilteredList); /// Select DTRs that are about to go to the specified process. /** * This selection is actually a virtual queue for pre-, post-processor * and delivery. * @param NextReceiver The process to filter on * @param FilteredList This list is filled with filtered DTRs */ bool filter_dtrs_by_next_receiver(StagingProcesses NextReceiver, std::list& FilteredList); /// Select DTRs that have just arrived from pre-, post-processor, delivery or generator. /** * These DTRs need some reaction from the scheduler. This selection is * actually a virtual queue of DTRs that need to be processed. * @param FilteredList This list is filled with filtered DTRs */ bool filter_pending_dtrs(std::list& FilteredList); /// Get the list of DTRs corresponding to the given job ID. /** * @param jobid Job id to filter on * @param FilteredList This list is filled with filtered DTRs */ bool filter_dtrs_by_job(const std::string& jobid, std::list& FilteredList); /// Check for requested changes in priority in filename /** * @param filename File which is checked for priority changes */ void check_priority_changes(const std::string& filename); /// Update the caching set, add a DTR (only if it is CACHEABLE). void caching_started(DTR_ptr request); /// Update the caching set, removing a DTR. void caching_finished(DTR_ptr request); /// Returns true if the DTR's source is currently in the caching set. bool is_being_cached(DTR_ptr DTRToCheck); /// Returns true if there are no DTRs in the list bool empty(); /// Get the list of all job IDs std::list all_jobs(); /// Return the size of the DTR list unsigned int size(); /// Dump state of all current DTRs to a destination, eg file, database, url... /** * Currently only file is supported. * @param path Path to the file in which to dump state. */ void dumpState(const std::string& path); }; } // namespace DataStaging #endif /*DTRLIST_H_*/ nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/Processor.h0000644000000000000000000000013215067751327021566 xustar0030 mtime=1759498967.747527268 30 atime=1759498967.860493575 30 ctime=1759499028.812416586 nordugrid-arc-7.1.1/src/libs/data-staging/Processor.h0000644000175000002070000000746515067751327023504 0ustar00mockbuildmock00000000000000#ifndef PROCESSOR_H_ #define PROCESSOR_H_ #include #include "DTR.h" namespace DataStaging { /// The Processor performs pre- and post-transfer operations. /** * The Processor takes care of everything that should happen before * and after a transfer takes place. Calling receiveDTR() spawns a * thread to perform the required operation depending on the DTR state. * \ingroup datastaging * \headerfile Processor.h arc/data-staging/Processor.h */ class Processor: public DTRCallback { private: /// Private copy constructor because Processor should not be copied Processor(const Processor&); /// Private assignment operator because Processor should not be copied Processor& operator=(const Processor&); /// Class used to pass information to spawned thread class ThreadArgument { public: Processor* proc; DTR_ptr dtr; ThreadArgument(Processor* proc_, DTR_ptr dtr_):proc(proc_),dtr(dtr_) { }; }; /// Class used to pass information to spawned thread (for bulk operations) class BulkThreadArgument { public: Processor* proc; std::list dtrs; BulkThreadArgument(Processor* proc_, const std::list& dtrs_):proc(proc_),dtrs(dtrs_) { }; }; /// Counter of active threads Arc::SimpleCounter thread_count; /// List of DTRs to be processed in bulk. Filled between receiveDTR /// receiving a DTR with bulk_start on and receiving one with bulk_end on. /// It is up to the caller to make sure that all the requests are suitable /// for bulk handling. The list is cleared after the DTR with bulk_end set. std::list bulk_list; /* Thread methods which deal with each state */ /// Check the cache to see if the file already exists static void DTRCheckCache(void* arg); /// Resolve replicas of source and destination static void DTRResolve(void* arg); /// Bulk resolve replicas of source and destination static void DTRBulkResolve(void* arg); /// Check if source exists static void DTRQueryReplica(void* arg); /// Bulk check if source exists static void DTRBulkQueryReplica(void* arg); /// Remove destination file before creating a new version static void DTRPreClean(void *arg); /// Call external services to prepare physical files for reading/writing static void DTRStagePrepare(void* arg); /// Release requests made during DTRStagePrepare static void DTRReleaseRequest(void* arg); /// Finalise replica static void DTRFinaliseReplica(void* arg); /// Register destination file in catalog static void DTRRegisterReplica(void* arg); /// Link cached file to final destination static void DTRProcessCache(void* arg); public: /// Constructor Processor() {}; /// Destructor waits for all active threads to stop. ~Processor() { stop(); }; /// Start Processor. /** * This method actually does nothing. It is here only to make all classes * of data staging to look alike. But it is better to call it before * starting to use object because it may do something in the future. */ void start(void); /// Stop Processor. /** * This method sends waits for all started threads to end and exits. Since * threads are short-lived it is better to wait rather than interrupt them. */ void stop(void); /// Send a DTR to the Processor. /** * The DTR is sent to the Processor through this method when some * long-latency processing is to be performed, eg contacting a * remote service. The Processor spawns a thread to do the processing, * and then returns. The thread pushes the DTR back to the scheduler when * it is finished. */ virtual void receiveDTR(DTR_ptr dtr); }; } // namespace DataStaging #endif /* PROCESSOR_H_ */ nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/Makefile.in0000644000000000000000000000013215067751355021504 xustar0030 mtime=1759498989.341819985 30 atime=1759499017.486247641 30 ctime=1759499028.815511809 nordugrid-arc-7.1.1/src/libs/data-staging/Makefile.in0000644000175000002070000015525715067751355023425 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ pgmpkglib_PROGRAMS = DataStagingDelivery$(EXEEXT) subdir = src/libs/data-staging ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(libarcdatastaging_la_HEADERS) \ $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__installdirs = "$(DESTDIR)$(pgmpkglibdir)" "$(DESTDIR)$(libdir)" \ "$(DESTDIR)$(libarcdatastaging_ladir)" PROGRAMS = $(pgmpkglib_PROGRAMS) am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } LTLIBRARIES = $(lib_LTLIBRARIES) am__DEPENDENCIES_1 = libarcdatastaging_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(am__DEPENDENCIES_1) am_libarcdatastaging_la_OBJECTS = \ libarcdatastaging_la-DataDelivery.lo \ libarcdatastaging_la-DataDeliveryComm.lo \ libarcdatastaging_la-DataDeliveryLocalComm.lo \ libarcdatastaging_la-DataDeliveryRemoteComm.lo \ libarcdatastaging_la-DTR.lo libarcdatastaging_la-DTRList.lo \ libarcdatastaging_la-DTRStatus.lo \ libarcdatastaging_la-Processor.lo \ libarcdatastaging_la-Scheduler.lo \ libarcdatastaging_la-TransferShares.lo libarcdatastaging_la_OBJECTS = $(am_libarcdatastaging_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = libarcdatastaging_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) \ $(libarcdatastaging_la_LDFLAGS) $(LDFLAGS) -o $@ am_DataStagingDelivery_OBJECTS = \ DataStagingDelivery-DataStagingDelivery.$(OBJEXT) DataStagingDelivery_OBJECTS = $(am_DataStagingDelivery_OBJECTS) DataStagingDelivery_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) DataStagingDelivery_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(DataStagingDelivery_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = \ ./$(DEPDIR)/DataStagingDelivery-DataStagingDelivery.Po \ ./$(DEPDIR)/libarcdatastaging_la-DTR.Plo \ ./$(DEPDIR)/libarcdatastaging_la-DTRList.Plo \ ./$(DEPDIR)/libarcdatastaging_la-DTRStatus.Plo \ ./$(DEPDIR)/libarcdatastaging_la-DataDelivery.Plo \ ./$(DEPDIR)/libarcdatastaging_la-DataDeliveryComm.Plo \ ./$(DEPDIR)/libarcdatastaging_la-DataDeliveryLocalComm.Plo \ ./$(DEPDIR)/libarcdatastaging_la-DataDeliveryRemoteComm.Plo \ ./$(DEPDIR)/libarcdatastaging_la-Processor.Plo \ ./$(DEPDIR)/libarcdatastaging_la-Scheduler.Plo \ ./$(DEPDIR)/libarcdatastaging_la-TransferShares.Plo am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = SOURCES = $(libarcdatastaging_la_SOURCES) \ $(DataStagingDelivery_SOURCES) DIST_SOURCES = $(libarcdatastaging_la_SOURCES) \ $(DataStagingDelivery_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac HEADERS = $(libarcdatastaging_la_HEADERS) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir distdir-am am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ DIST_SUBDIRS = test examples SUBDIRS = . $(TEST_DIR) examples lib_LTLIBRARIES = libarcdatastaging.la libarcdatastaging_ladir = $(pkgincludedir)/data-staging libarcdatastaging_la_HEADERS = DataDelivery.h DataDeliveryComm.h \ DataDeliveryLocalComm.h DataDeliveryRemoteComm.h DTR.h DTRList.h \ DTRStatus.h Processor.h Scheduler.h TransferShares.h libarcdatastaging_la_SOURCES = DataDelivery.cpp DataDeliveryComm.cpp \ DataDeliveryLocalComm.cpp DataDeliveryRemoteComm.cpp DTR.cpp DTRList.cpp \ DTRStatus.cpp Processor.cpp Scheduler.cpp TransferShares.cpp libarcdatastaging_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libarcdatastaging_la_LIBADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(GLIBMM_LIBS) -lpthread libarcdatastaging_la_LDFLAGS = -version-info 4:0:0 pgmpkglibdir = $(pkglibdir) DataStagingDelivery_SOURCES = DataStagingDelivery.cpp DataStagingDelivery_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) DataStagingDelivery_LDADD = \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/libs/data-staging/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/libs/data-staging/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pgmpkglibPROGRAMS: $(pgmpkglib_PROGRAMS) @$(NORMAL_INSTALL) @list='$(pgmpkglib_PROGRAMS)'; test -n "$(pgmpkglibdir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pgmpkglibdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pgmpkglibdir)" || exit 1; \ fi; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p \ || test -f $$p1 \ ; then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' \ -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(pgmpkglibdir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(pgmpkglibdir)$$dir" || exit $$?; \ } \ ; done uninstall-pgmpkglibPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(pgmpkglib_PROGRAMS)'; test -n "$(pgmpkglibdir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' \ `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pgmpkglibdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pgmpkglibdir)" && rm -f $$files clean-pgmpkglibPROGRAMS: @list='$(pgmpkglib_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(MKDIR_P) '$(DESTDIR)$(libdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(libdir)" || exit 1; \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libarcdatastaging.la: $(libarcdatastaging_la_OBJECTS) $(libarcdatastaging_la_DEPENDENCIES) $(EXTRA_libarcdatastaging_la_DEPENDENCIES) $(AM_V_CXXLD)$(libarcdatastaging_la_LINK) -rpath $(libdir) $(libarcdatastaging_la_OBJECTS) $(libarcdatastaging_la_LIBADD) $(LIBS) DataStagingDelivery$(EXEEXT): $(DataStagingDelivery_OBJECTS) $(DataStagingDelivery_DEPENDENCIES) $(EXTRA_DataStagingDelivery_DEPENDENCIES) @rm -f DataStagingDelivery$(EXEEXT) $(AM_V_CXXLD)$(DataStagingDelivery_LINK) $(DataStagingDelivery_OBJECTS) $(DataStagingDelivery_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/DataStagingDelivery-DataStagingDelivery.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdatastaging_la-DTR.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdatastaging_la-DTRList.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdatastaging_la-DTRStatus.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdatastaging_la-DataDelivery.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdatastaging_la-DataDeliveryComm.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdatastaging_la-DataDeliveryLocalComm.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdatastaging_la-DataDeliveryRemoteComm.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdatastaging_la-Processor.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdatastaging_la-Scheduler.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarcdatastaging_la-TransferShares.Plo@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< libarcdatastaging_la-DataDelivery.lo: DataDelivery.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdatastaging_la-DataDelivery.lo -MD -MP -MF $(DEPDIR)/libarcdatastaging_la-DataDelivery.Tpo -c -o libarcdatastaging_la-DataDelivery.lo `test -f 'DataDelivery.cpp' || echo '$(srcdir)/'`DataDelivery.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarcdatastaging_la-DataDelivery.Tpo $(DEPDIR)/libarcdatastaging_la-DataDelivery.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='DataDelivery.cpp' object='libarcdatastaging_la-DataDelivery.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatastaging_la-DataDelivery.lo `test -f 'DataDelivery.cpp' || echo '$(srcdir)/'`DataDelivery.cpp libarcdatastaging_la-DataDeliveryComm.lo: DataDeliveryComm.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdatastaging_la-DataDeliveryComm.lo -MD -MP -MF $(DEPDIR)/libarcdatastaging_la-DataDeliveryComm.Tpo -c -o libarcdatastaging_la-DataDeliveryComm.lo `test -f 'DataDeliveryComm.cpp' || echo '$(srcdir)/'`DataDeliveryComm.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarcdatastaging_la-DataDeliveryComm.Tpo $(DEPDIR)/libarcdatastaging_la-DataDeliveryComm.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='DataDeliveryComm.cpp' object='libarcdatastaging_la-DataDeliveryComm.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatastaging_la-DataDeliveryComm.lo `test -f 'DataDeliveryComm.cpp' || echo '$(srcdir)/'`DataDeliveryComm.cpp libarcdatastaging_la-DataDeliveryLocalComm.lo: DataDeliveryLocalComm.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdatastaging_la-DataDeliveryLocalComm.lo -MD -MP -MF $(DEPDIR)/libarcdatastaging_la-DataDeliveryLocalComm.Tpo -c -o libarcdatastaging_la-DataDeliveryLocalComm.lo `test -f 'DataDeliveryLocalComm.cpp' || echo '$(srcdir)/'`DataDeliveryLocalComm.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarcdatastaging_la-DataDeliveryLocalComm.Tpo $(DEPDIR)/libarcdatastaging_la-DataDeliveryLocalComm.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='DataDeliveryLocalComm.cpp' object='libarcdatastaging_la-DataDeliveryLocalComm.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatastaging_la-DataDeliveryLocalComm.lo `test -f 'DataDeliveryLocalComm.cpp' || echo '$(srcdir)/'`DataDeliveryLocalComm.cpp libarcdatastaging_la-DataDeliveryRemoteComm.lo: DataDeliveryRemoteComm.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdatastaging_la-DataDeliveryRemoteComm.lo -MD -MP -MF $(DEPDIR)/libarcdatastaging_la-DataDeliveryRemoteComm.Tpo -c -o libarcdatastaging_la-DataDeliveryRemoteComm.lo `test -f 'DataDeliveryRemoteComm.cpp' || echo '$(srcdir)/'`DataDeliveryRemoteComm.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarcdatastaging_la-DataDeliveryRemoteComm.Tpo $(DEPDIR)/libarcdatastaging_la-DataDeliveryRemoteComm.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='DataDeliveryRemoteComm.cpp' object='libarcdatastaging_la-DataDeliveryRemoteComm.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatastaging_la-DataDeliveryRemoteComm.lo `test -f 'DataDeliveryRemoteComm.cpp' || echo '$(srcdir)/'`DataDeliveryRemoteComm.cpp libarcdatastaging_la-DTR.lo: DTR.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdatastaging_la-DTR.lo -MD -MP -MF $(DEPDIR)/libarcdatastaging_la-DTR.Tpo -c -o libarcdatastaging_la-DTR.lo `test -f 'DTR.cpp' || echo '$(srcdir)/'`DTR.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarcdatastaging_la-DTR.Tpo $(DEPDIR)/libarcdatastaging_la-DTR.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='DTR.cpp' object='libarcdatastaging_la-DTR.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatastaging_la-DTR.lo `test -f 'DTR.cpp' || echo '$(srcdir)/'`DTR.cpp libarcdatastaging_la-DTRList.lo: DTRList.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdatastaging_la-DTRList.lo -MD -MP -MF $(DEPDIR)/libarcdatastaging_la-DTRList.Tpo -c -o libarcdatastaging_la-DTRList.lo `test -f 'DTRList.cpp' || echo '$(srcdir)/'`DTRList.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarcdatastaging_la-DTRList.Tpo $(DEPDIR)/libarcdatastaging_la-DTRList.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='DTRList.cpp' object='libarcdatastaging_la-DTRList.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatastaging_la-DTRList.lo `test -f 'DTRList.cpp' || echo '$(srcdir)/'`DTRList.cpp libarcdatastaging_la-DTRStatus.lo: DTRStatus.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdatastaging_la-DTRStatus.lo -MD -MP -MF $(DEPDIR)/libarcdatastaging_la-DTRStatus.Tpo -c -o libarcdatastaging_la-DTRStatus.lo `test -f 'DTRStatus.cpp' || echo '$(srcdir)/'`DTRStatus.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarcdatastaging_la-DTRStatus.Tpo $(DEPDIR)/libarcdatastaging_la-DTRStatus.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='DTRStatus.cpp' object='libarcdatastaging_la-DTRStatus.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatastaging_la-DTRStatus.lo `test -f 'DTRStatus.cpp' || echo '$(srcdir)/'`DTRStatus.cpp libarcdatastaging_la-Processor.lo: Processor.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdatastaging_la-Processor.lo -MD -MP -MF $(DEPDIR)/libarcdatastaging_la-Processor.Tpo -c -o libarcdatastaging_la-Processor.lo `test -f 'Processor.cpp' || echo '$(srcdir)/'`Processor.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarcdatastaging_la-Processor.Tpo $(DEPDIR)/libarcdatastaging_la-Processor.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='Processor.cpp' object='libarcdatastaging_la-Processor.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatastaging_la-Processor.lo `test -f 'Processor.cpp' || echo '$(srcdir)/'`Processor.cpp libarcdatastaging_la-Scheduler.lo: Scheduler.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdatastaging_la-Scheduler.lo -MD -MP -MF $(DEPDIR)/libarcdatastaging_la-Scheduler.Tpo -c -o libarcdatastaging_la-Scheduler.lo `test -f 'Scheduler.cpp' || echo '$(srcdir)/'`Scheduler.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarcdatastaging_la-Scheduler.Tpo $(DEPDIR)/libarcdatastaging_la-Scheduler.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='Scheduler.cpp' object='libarcdatastaging_la-Scheduler.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatastaging_la-Scheduler.lo `test -f 'Scheduler.cpp' || echo '$(srcdir)/'`Scheduler.cpp libarcdatastaging_la-TransferShares.lo: TransferShares.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -MT libarcdatastaging_la-TransferShares.lo -MD -MP -MF $(DEPDIR)/libarcdatastaging_la-TransferShares.Tpo -c -o libarcdatastaging_la-TransferShares.lo `test -f 'TransferShares.cpp' || echo '$(srcdir)/'`TransferShares.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarcdatastaging_la-TransferShares.Tpo $(DEPDIR)/libarcdatastaging_la-TransferShares.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='TransferShares.cpp' object='libarcdatastaging_la-TransferShares.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarcdatastaging_la_CXXFLAGS) $(CXXFLAGS) -c -o libarcdatastaging_la-TransferShares.lo `test -f 'TransferShares.cpp' || echo '$(srcdir)/'`TransferShares.cpp DataStagingDelivery-DataStagingDelivery.o: DataStagingDelivery.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DataStagingDelivery_CXXFLAGS) $(CXXFLAGS) -MT DataStagingDelivery-DataStagingDelivery.o -MD -MP -MF $(DEPDIR)/DataStagingDelivery-DataStagingDelivery.Tpo -c -o DataStagingDelivery-DataStagingDelivery.o `test -f 'DataStagingDelivery.cpp' || echo '$(srcdir)/'`DataStagingDelivery.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/DataStagingDelivery-DataStagingDelivery.Tpo $(DEPDIR)/DataStagingDelivery-DataStagingDelivery.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='DataStagingDelivery.cpp' object='DataStagingDelivery-DataStagingDelivery.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DataStagingDelivery_CXXFLAGS) $(CXXFLAGS) -c -o DataStagingDelivery-DataStagingDelivery.o `test -f 'DataStagingDelivery.cpp' || echo '$(srcdir)/'`DataStagingDelivery.cpp DataStagingDelivery-DataStagingDelivery.obj: DataStagingDelivery.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DataStagingDelivery_CXXFLAGS) $(CXXFLAGS) -MT DataStagingDelivery-DataStagingDelivery.obj -MD -MP -MF $(DEPDIR)/DataStagingDelivery-DataStagingDelivery.Tpo -c -o DataStagingDelivery-DataStagingDelivery.obj `if test -f 'DataStagingDelivery.cpp'; then $(CYGPATH_W) 'DataStagingDelivery.cpp'; else $(CYGPATH_W) '$(srcdir)/DataStagingDelivery.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/DataStagingDelivery-DataStagingDelivery.Tpo $(DEPDIR)/DataStagingDelivery-DataStagingDelivery.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='DataStagingDelivery.cpp' object='DataStagingDelivery-DataStagingDelivery.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DataStagingDelivery_CXXFLAGS) $(CXXFLAGS) -c -o DataStagingDelivery-DataStagingDelivery.obj `if test -f 'DataStagingDelivery.cpp'; then $(CYGPATH_W) 'DataStagingDelivery.cpp'; else $(CYGPATH_W) '$(srcdir)/DataStagingDelivery.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-libarcdatastaging_laHEADERS: $(libarcdatastaging_la_HEADERS) @$(NORMAL_INSTALL) @list='$(libarcdatastaging_la_HEADERS)'; test -n "$(libarcdatastaging_ladir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(libarcdatastaging_ladir)'"; \ $(MKDIR_P) "$(DESTDIR)$(libarcdatastaging_ladir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libarcdatastaging_ladir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(libarcdatastaging_ladir)" || exit $$?; \ done uninstall-libarcdatastaging_laHEADERS: @$(NORMAL_UNINSTALL) @list='$(libarcdatastaging_la_HEADERS)'; test -n "$(libarcdatastaging_ladir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(libarcdatastaging_ladir)'; $(am__uninstall_files_from_dir) # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(PROGRAMS) $(LTLIBRARIES) $(HEADERS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pgmpkglibdir)" "$(DESTDIR)$(libdir)" "$(DESTDIR)$(libarcdatastaging_ladir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ clean-pgmpkglibPROGRAMS mostlyclean-am distclean: distclean-recursive -rm -f ./$(DEPDIR)/DataStagingDelivery-DataStagingDelivery.Po -rm -f ./$(DEPDIR)/libarcdatastaging_la-DTR.Plo -rm -f ./$(DEPDIR)/libarcdatastaging_la-DTRList.Plo -rm -f ./$(DEPDIR)/libarcdatastaging_la-DTRStatus.Plo -rm -f ./$(DEPDIR)/libarcdatastaging_la-DataDelivery.Plo -rm -f ./$(DEPDIR)/libarcdatastaging_la-DataDeliveryComm.Plo -rm -f ./$(DEPDIR)/libarcdatastaging_la-DataDeliveryLocalComm.Plo -rm -f ./$(DEPDIR)/libarcdatastaging_la-DataDeliveryRemoteComm.Plo -rm -f ./$(DEPDIR)/libarcdatastaging_la-Processor.Plo -rm -f ./$(DEPDIR)/libarcdatastaging_la-Scheduler.Plo -rm -f ./$(DEPDIR)/libarcdatastaging_la-TransferShares.Plo -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-libarcdatastaging_laHEADERS \ install-pgmpkglibPROGRAMS install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-libLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f ./$(DEPDIR)/DataStagingDelivery-DataStagingDelivery.Po -rm -f ./$(DEPDIR)/libarcdatastaging_la-DTR.Plo -rm -f ./$(DEPDIR)/libarcdatastaging_la-DTRList.Plo -rm -f ./$(DEPDIR)/libarcdatastaging_la-DTRStatus.Plo -rm -f ./$(DEPDIR)/libarcdatastaging_la-DataDelivery.Plo -rm -f ./$(DEPDIR)/libarcdatastaging_la-DataDeliveryComm.Plo -rm -f ./$(DEPDIR)/libarcdatastaging_la-DataDeliveryLocalComm.Plo -rm -f ./$(DEPDIR)/libarcdatastaging_la-DataDeliveryRemoteComm.Plo -rm -f ./$(DEPDIR)/libarcdatastaging_la-Processor.Plo -rm -f ./$(DEPDIR)/libarcdatastaging_la-Scheduler.Plo -rm -f ./$(DEPDIR)/libarcdatastaging_la-TransferShares.Plo -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-libLTLIBRARIES \ uninstall-libarcdatastaging_laHEADERS \ uninstall-pgmpkglibPROGRAMS .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \ am--depfiles check check-am clean clean-generic \ clean-libLTLIBRARIES clean-libtool clean-pgmpkglibPROGRAMS \ cscopelist-am ctags ctags-am distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-libLTLIBRARIES \ install-libarcdatastaging_laHEADERS install-man install-pdf \ install-pdf-am install-pgmpkglibPROGRAMS install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am uninstall-libLTLIBRARIES \ uninstall-libarcdatastaging_laHEADERS \ uninstall-pgmpkglibPROGRAMS .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/DataDeliveryLocalComm.h0000644000000000000000000000013215067751327023753 xustar0030 mtime=1759498967.747527268 30 atime=1759498967.860493575 30 ctime=1759499028.807144399 nordugrid-arc-7.1.1/src/libs/data-staging/DataDeliveryLocalComm.h0000644000175000002070000000275215067751327025663 0ustar00mockbuildmock00000000000000#ifndef DATADELIVERYLOCALCOMM_H_ #define DATADELIVERYLOCALCOMM_H_ #include #include "DataDeliveryComm.h" namespace DataStaging { /// This class starts, monitors and controls a local Delivery process. /** * \ingroup datastaging * \headerfile DataDeliveryLocalComm.h arc/data-staging/DataDeliveryLocalComm.h */ class DataDeliveryLocalComm : public DataDeliveryComm { public: /// Starts child process DataDeliveryLocalComm(DTR_ptr dtr, const TransferParameters& params); /// This stops the child process virtual ~DataDeliveryLocalComm(); /// Read from stdout of child to get status virtual void PullStatus(); /// Returns identifier of delivery handler - localhost. virtual std::string DeliveryId() const; /// Returns "/" since local Delivery can access everywhere static bool CheckComm(DTR_ptr dtr, std::vector& allowed_dirs, std::string& load_avg); /// Returns true if child process exists virtual operator bool() const { return (child_ != NULL); }; /// Returns true if child process does not exist virtual bool operator!() const { return (child_ == NULL); }; private: /// Child process Arc::Run* child_; /// Stdin of child, used to pass credentials std::string stdin_; /// Temporary credentails location std::string tmp_proxy_; /// Time last communication was received from child Arc::Time last_comm; }; } // namespace DataStaging #endif /* DATADELIVERYLOCALCOMM_H_ */ nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/DTRStatus.cpp0000644000000000000000000000013115067751327021776 xustar0030 mtime=1759498967.747527268 29 atime=1759498967.85949356 30 ctime=1759499028.823791187 nordugrid-arc-7.1.1/src/libs/data-staging/DTRStatus.cpp0000644000175000002070000000522715067751327023707 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "DTRStatus.h" namespace DataStaging { // to do states static const DTRStatus::DTRStatusType to_process_states[] = { DTRStatus::CHECK_CACHE, DTRStatus::RESOLVE, DTRStatus::QUERY_REPLICA, DTRStatus::PRE_CLEAN, DTRStatus::STAGE_PREPARE, DTRStatus::TRANSFER, DTRStatus::RELEASE_REQUEST, DTRStatus::FINALISE_REPLICA, DTRStatus::REGISTER_REPLICA, DTRStatus::PROCESS_CACHE }; // doing states static const DTRStatus::DTRStatusType processing_states[] = { DTRStatus::CHECKING_CACHE, DTRStatus::RESOLVING, DTRStatus::QUERYING_REPLICA, DTRStatus::PRE_CLEANING, DTRStatus::STAGING_PREPARING, DTRStatus::TRANSFERRING, DTRStatus::RELEASING_REQUEST, DTRStatus::FINALISING_REPLICA, DTRStatus::REGISTERING_REPLICA, DTRStatus::PROCESSING_CACHE }; static const DTRStatus::DTRStatusType staged_states[] = { DTRStatus::STAGING_PREPARING, DTRStatus::STAGING_PREPARING_WAIT, DTRStatus::STAGED_PREPARED, DTRStatus::TRANSFER, DTRStatus::TRANSFERRING, DTRStatus::TRANSFERRING_CANCEL, }; const std::vector DTRStatus::ToProcessStates(to_process_states, to_process_states + sizeof to_process_states / sizeof to_process_states[0]); const std::vector DTRStatus::ProcessingStates(processing_states, processing_states + sizeof processing_states / sizeof processing_states[0]); const std::vector DTRStatus::StagedStates(staged_states, staged_states + sizeof staged_states / sizeof staged_states[0]); static const std::string status_string[DTRStatus::NULL_STATE + 1] = { "NEW", "CHECK_CACHE", "CHECKING_CACHE", "CACHE_WAIT", "CACHE_CHECKED", "RESOLVE", "RESOLVING", "RESOLVED", "QUERY_REPLICA", "QUERYING_REPLICA", "REPLICA_QUERIED", "PRE_CLEAN", "PRE_CLEANING", "PRE_CLEANED", "STAGE_PREPARE", "STAGING_PREPARING", "STAGING_PREPARING_WAIT", "STAGED_PREPARED", "TRANSFER", "TRANSFERRING", "TRANSFERRING_CANCEL", "TRANSFERRED", "RELEASE_REQUEST", "RELEASING_REQUEST", "REQUEST_RELEASED", "FINALISE_REPLICA", "FINALISING_REPLICA", "REPLICA_FINALISED", "REGISTER_REPLICA", "REGISTERING_REPLICA", "REPLICA_REGISTERED", "PROCESS_CACHE", "PROCESSING_CACHE", "CACHE_PROCESSED", "DONE", "CANCELLED", "CANCELLED_FINISHED", "ERROR", "NULL_STATE" }; std::string DTRStatus::str() const { return status_string[status]; } } // namespace DataStaging nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/DataDeliveryComm.h0000644000000000000000000000013215067751327023000 xustar0030 mtime=1759498967.747527268 30 atime=1759498967.860493575 30 ctime=1759499028.806101843 nordugrid-arc-7.1.1/src/libs/data-staging/DataDeliveryComm.h0000644000175000002070000001505315067751327024706 0ustar00mockbuildmock00000000000000#ifndef DATA_DELIVERY_COMM_H_ #define DATA_DELIVERY_COMM_H_ #include "DTR.h" namespace DataStaging { class DataDeliveryCommHandler; /// This class provides an abstract interface for the Delivery layer. /** * Different implementations provide different ways of providing Delivery * functionality. DataDeliveryLocalComm launches a local process to perform * the transfer and DataDeliveryRemoteComm contacts a remote service which * performs the transfer. The implementation is chosen depending on what is * set in the DTR, which the Scheduler should set based on various factors. * * CreateInstance() should be used to get a pointer to the instantiated * object. This also starts the transfer. Deleting this object stops the * transfer and cleans up any used resources. A singleton instance of * DataDeliveryCommHandler regularly polls all active transfers using * PullStatus() and fills the Status object with current information, * which can be obtained through GetStatus(). * \ingroup datastaging * \headerfile DataDeliveryComm.h arc/data-staging/DataDeliveryComm.h */ class DataDeliveryComm { friend class DataDeliveryCommHandler; public: /// Communication status with transfer enum CommStatusType { CommInit, ///< Initializing/starting transfer, rest of information not valid CommNoError, ///< Communication going on smoothly CommTimeout, ///< Communication experienced timeout CommClosed, ///< Communication channel was closed CommExited, ///< Transfer exited. Mostly same as CommClosed but exit detected before pipe closed CommFailed ///< Transfer failed. If we have CommFailed and no error code ///< reported that normally means segfault or external kill. }; #pragma pack(4) /// Plain C struct to pass information from executing process back to main thread /** \ingroup datastaging */ struct Status { CommStatusType commstatus; ///< Communication state (filled by main thread) time_t timestamp; ///< Time when information was generated (filled externally) DTRStatus::DTRStatusType status; ///< Generic status DTRErrorStatus::DTRErrorStatusType error; ///< Error type DTRErrorStatus::DTRErrorLocation error_location; ///< Where error happened char error_desc[1024]; ///< Error description unsigned int streams; ///< Number of transfer streams active unsigned long long int transferred;///< Number of bytes transferred unsigned long long int offset; ///< Last position to which file has no missing pieces unsigned long long int size; ///< File size as obtained by protocol unsigned int speed; ///< Current transfer speed in bytes/sec during last ~minute char checksum[128]; ///< Calculated checksum unsigned long long int transfer_time; ///< Time in ns to complete transfer (0 if not completed) }; #pragma pack() protected: /// Current status of transfer Status status_; /// Latest status of transfer is read into this buffer Status status_buf_; /// Reading position of Status buffer unsigned int status_pos_; /// Lock to protect access to status std::mutex lock_; /// Transfer limits TransferParameters transfer_params; /// Time transfer was started Arc::Time start_; /// Logger object. Pointer to DTR's Logger. DTRLogger logger_; /// Check for new state and fill state accordingly. /** * This method is periodically called by the comm handler to obtain status * info. It detects communication and delivery failures and delivery * termination. */ virtual void PullStatus() = 0; /// Returns identifier of the handler/delivery service this object uses to perform transfers. virtual std::string DeliveryId() const = 0; /// Start transfer with parameters taken from DTR and supplied transfer limits. /** * Constructor should not be used directly, CreateInstance() should be used * instead. */ DataDeliveryComm(DTR_ptr dtr, const TransferParameters& params); /// Access handler used for this DataDeliveryComm object DataDeliveryCommHandler& GetHandler(); private: /// Pointer to handler used for this DataDeliveryComm object DataDeliveryCommHandler* handler_; public: /// Factory method to get DataDeliveryComm instance. static DataDeliveryComm* CreateInstance(DTR_ptr dtr, const TransferParameters& params); /// Destroy object. This stops any ongoing transfer and cleans up resources. virtual ~DataDeliveryComm() {}; /// Obtain status of transfer Status GetStatus() const; /// Check the delivery method is available. Calls CheckComm of the appropriate subclass. /** * \param dtr DTR from which credentials are used * \param allowed_dirs filled with list of dirs that this comm is allowed * to read/write * \param load_avg filled with the load average reported by the service * \return true if selected delivery method is available */ static bool CheckComm(DTR_ptr dtr, std::vector& allowed_dirs, std::string& load_avg); /// Get explanation of error std::string GetError() const { return status_.error_desc; }; /// Returns true if transfer is currently active virtual operator bool() const = 0; /// Returns true if transfer is currently not active virtual bool operator!() const = 0; }; /// Singleton class handling all active DataDeliveryComm objects /** * \ingroup datastaging * \headerfile DataDeliveryComm.h arc/data-staging/DataDeliveryComm.h */ class DataDeliveryCommHandler { private: std::mutex lock_; static void func(void* arg); std::list items_; static std::mutex comm_lock; static std::map comm_handler; /// Constructor is private - getInstance() should be used instead DataDeliveryCommHandler(); DataDeliveryCommHandler(const DataDeliveryCommHandler&); DataDeliveryCommHandler& operator=(const DataDeliveryCommHandler&); public: ~DataDeliveryCommHandler() {}; /// Add a new DataDeliveryComm instance to the handler void Add(DataDeliveryComm* item); /// Remove a DataDeliveryComm instance from the handler void Remove(DataDeliveryComm* item); /// Get the instance of the handler for specified delivery id static DataDeliveryCommHandler* getInstance(std::string const & id); }; } // namespace DataStaging #endif // DATA_DELIVERY_COMM_H_ nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/DataDelivery.h0000644000000000000000000000013215067751327022164 xustar0030 mtime=1759498967.747527268 30 atime=1759498967.860493575 30 ctime=1759499028.805029511 nordugrid-arc-7.1.1/src/libs/data-staging/DataDelivery.h0000644000175000002070000000636015067751327024073 0ustar00mockbuildmock00000000000000#ifndef DATA_DELIVERY_H_ #define DATA_DELIVERY_H_ #include #include #include #include "DTR.h" #include "DTRList.h" #include "DTRStatus.h" namespace DataStaging { /// DataDelivery transfers data between specified physical locations. /** * start() must be called to start the delivery thread for processing DTRs * and stop() should be called to stop it (this waits for all data transfers * to exit). stop() is also called in the destructor. * * All meta-operations for a DTR such as resolving replicas must be done * before sending to DataDelivery. Calling receiveDTR() starts a new process * which performs data transfer as specified in DTR. * \ingroup datastaging * \headerfile DataDelivery.h arc/data-staging/DataDelivery.h */ class DataDelivery: public DTRCallback { private: /// lock for DTRs list Arc::SimpleCondition dtr_list_lock; /// Wrapper class around delivery process handler class delivery_pair_t; /// DTRs which delivery process has in its queue std::list dtr_list; /// Transfer limits TransferParameters transfer_params; /// Logger object static Arc::Logger logger; /// Flag describing delivery state. Used to decide whether to keep running main loop ProcessState delivery_state; /// Condition to signal end of running Arc::SimpleCondition run_signal; /// Condition on which main thread waits, so it can wake up immediately /// when a new transfer arrives Arc::SimpleCondition cond; /// Thread to start new Delivery process static void start_delivery(void* arg); /// Thread to stop Delivery process static void stop_delivery(void* arg); /// Delete delivery_pair_t object. Starts a new thread which calls stop_delivery() bool delete_delivery_pair(delivery_pair_t* dp); /// Static version of main_thread, used when thread is created static void main_thread(void* arg); /// Main thread, which runs until stopped void main_thread(void); /// Copy constructor is private because DataDelivery should not be copied DataDelivery(const DataDelivery&); /// Assignment constructor is private because DataDelivery should not be copied DataDelivery& operator=(const DataDelivery&); public: /// Constructor. DataDelivery(); /// Destructor calls stop() and waits for cancelled processes to exit. ~DataDelivery() { stop(); }; /// Pass a DTR to Delivery. /** * This method is called by the scheduler to pass a DTR to the delivery. * The DataDelivery starts the data transfer either using a local process * or by sending a request to a remote delivery service, and then returns. * DataDelivery's own thread then monitors the transfer. */ virtual void receiveDTR(DTR_ptr request); /// Stop the transfer corresponding to the given DTR. bool cancelDTR(DTR_ptr request); /// Start the Delivery thread, which runs until stop() is called. bool start(); /// Tell the delivery to stop all transfers and threads and exit. bool stop(); /// Set transfer limits. void SetTransferParameters(const TransferParameters& params); }; } // namespace DataStaging #endif /*DATA_DELIVERY_H_*/ nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/test0000644000000000000000000000013115067751424020335 xustar0030 mtime=1759499028.859734043 29 atime=1759499034.76351017 30 ctime=1759499028.859734043 nordugrid-arc-7.1.1/src/libs/data-staging/test/0000755000175000002070000000000015067751424022315 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/libs/data-staging/test/PaxHeaders/Makefile.am0000644000000000000000000000013015067751327022447 xustar0030 mtime=1759498967.749812815 29 atime=1759498967.86149359 29 ctime=1759499028.85633317 nordugrid-arc-7.1.1/src/libs/data-staging/test/Makefile.am0000644000175000002070000000264215067751327024357 0ustar00mockbuildmock00000000000000# Tests require mock DMC which can be enabled via configure --enable-mock-dmc if MOCK_DMC_ENABLED TESTS = DTRTest ProcessorTest DeliveryTest else TESTS = endif check_PROGRAMS = $(TESTS) TESTS_ENVIRONMENT = env ARC_PLUGIN_PATH=$(top_builddir)/src/hed/dmc/mock/.libs:$(top_builddir)/src/hed/dmc/file/.libs DTRTest_SOURCES = $(top_srcdir)/src/Test.cpp DTRTest.cpp DTRTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) DTRTest_LDADD = ../libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) ProcessorTest_SOURCES = $(top_srcdir)/src/Test.cpp ProcessorTest.cpp ProcessorTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) ProcessorTest_LDADD = ../libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) DeliveryTest_SOURCES = $(top_srcdir)/src/Test.cpp DeliveryTest.cpp DeliveryTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) DeliveryTest_LDADD = ../libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) nordugrid-arc-7.1.1/src/libs/data-staging/test/PaxHeaders/Makefile.in0000644000000000000000000000013215067751355022463 xustar0030 mtime=1759498989.454039615 30 atime=1759499017.508247975 30 ctime=1759499028.857431331 nordugrid-arc-7.1.1/src/libs/data-staging/test/Makefile.in0000644000175000002070000012754415067751355024402 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ @MOCK_DMC_ENABLED_TRUE@TESTS = DTRTest$(EXEEXT) ProcessorTest$(EXEEXT) \ @MOCK_DMC_ENABLED_TRUE@ DeliveryTest$(EXEEXT) check_PROGRAMS = $(am__EXEEXT_1) subdir = src/libs/data-staging/test ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = @MOCK_DMC_ENABLED_TRUE@am__EXEEXT_1 = DTRTest$(EXEEXT) \ @MOCK_DMC_ENABLED_TRUE@ ProcessorTest$(EXEEXT) \ @MOCK_DMC_ENABLED_TRUE@ DeliveryTest$(EXEEXT) am_DTRTest_OBJECTS = DTRTest-Test.$(OBJEXT) DTRTest-DTRTest.$(OBJEXT) DTRTest_OBJECTS = $(am_DTRTest_OBJECTS) am__DEPENDENCIES_1 = DTRTest_DEPENDENCIES = ../libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = DTRTest_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(DTRTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_DeliveryTest_OBJECTS = DeliveryTest-Test.$(OBJEXT) \ DeliveryTest-DeliveryTest.$(OBJEXT) DeliveryTest_OBJECTS = $(am_DeliveryTest_OBJECTS) DeliveryTest_DEPENDENCIES = ../libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) DeliveryTest_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(DeliveryTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_ProcessorTest_OBJECTS = ProcessorTest-Test.$(OBJEXT) \ ProcessorTest-ProcessorTest.$(OBJEXT) ProcessorTest_OBJECTS = $(am_ProcessorTest_OBJECTS) ProcessorTest_DEPENDENCIES = ../libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) ProcessorTest_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(ProcessorTest_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) \ -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = ./$(DEPDIR)/DTRTest-DTRTest.Po \ ./$(DEPDIR)/DTRTest-Test.Po \ ./$(DEPDIR)/DeliveryTest-DeliveryTest.Po \ ./$(DEPDIR)/DeliveryTest-Test.Po \ ./$(DEPDIR)/ProcessorTest-ProcessorTest.Po \ ./$(DEPDIR)/ProcessorTest-Test.Po am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = SOURCES = $(DTRTest_SOURCES) $(DeliveryTest_SOURCES) \ $(ProcessorTest_SOURCES) DIST_SOURCES = $(DTRTest_SOURCES) $(DeliveryTest_SOURCES) \ $(ProcessorTest_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__tty_colors_dummy = \ mgn= red= grn= lgn= blu= brg= std=; \ am__color_tests=no am__tty_colors = { \ $(am__tty_colors_dummy); \ if test "X$(AM_COLOR_TESTS)" = Xno; then \ am__color_tests=no; \ elif test "X$(AM_COLOR_TESTS)" = Xalways; then \ am__color_tests=yes; \ elif test "X$$TERM" != Xdumb && { test -t 1; } 2>/dev/null; then \ am__color_tests=yes; \ fi; \ if test $$am__color_tests = yes; then \ red=''; \ grn=''; \ lgn=''; \ blu=''; \ mgn=''; \ brg=''; \ std=''; \ fi; \ } am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ TESTS_ENVIRONMENT = env ARC_PLUGIN_PATH=$(top_builddir)/src/hed/dmc/mock/.libs:$(top_builddir)/src/hed/dmc/file/.libs DTRTest_SOURCES = $(top_srcdir)/src/Test.cpp DTRTest.cpp DTRTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) DTRTest_LDADD = ../libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) ProcessorTest_SOURCES = $(top_srcdir)/src/Test.cpp ProcessorTest.cpp ProcessorTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) ProcessorTest_LDADD = ../libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) DeliveryTest_SOURCES = $(top_srcdir)/src/Test.cpp DeliveryTest.cpp DeliveryTest_CXXFLAGS = -I$(top_srcdir)/include \ $(CPPUNIT_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) DeliveryTest_LDADD = ../libarcdatastaging.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/libs/data-staging/test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/libs/data-staging/test/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-checkPROGRAMS: @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list DTRTest$(EXEEXT): $(DTRTest_OBJECTS) $(DTRTest_DEPENDENCIES) $(EXTRA_DTRTest_DEPENDENCIES) @rm -f DTRTest$(EXEEXT) $(AM_V_CXXLD)$(DTRTest_LINK) $(DTRTest_OBJECTS) $(DTRTest_LDADD) $(LIBS) DeliveryTest$(EXEEXT): $(DeliveryTest_OBJECTS) $(DeliveryTest_DEPENDENCIES) $(EXTRA_DeliveryTest_DEPENDENCIES) @rm -f DeliveryTest$(EXEEXT) $(AM_V_CXXLD)$(DeliveryTest_LINK) $(DeliveryTest_OBJECTS) $(DeliveryTest_LDADD) $(LIBS) ProcessorTest$(EXEEXT): $(ProcessorTest_OBJECTS) $(ProcessorTest_DEPENDENCIES) $(EXTRA_ProcessorTest_DEPENDENCIES) @rm -f ProcessorTest$(EXEEXT) $(AM_V_CXXLD)$(ProcessorTest_LINK) $(ProcessorTest_OBJECTS) $(ProcessorTest_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/DTRTest-DTRTest.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/DTRTest-Test.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/DeliveryTest-DeliveryTest.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/DeliveryTest-Test.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ProcessorTest-ProcessorTest.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ProcessorTest-Test.Po@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< DTRTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DTRTest_CXXFLAGS) $(CXXFLAGS) -MT DTRTest-Test.o -MD -MP -MF $(DEPDIR)/DTRTest-Test.Tpo -c -o DTRTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/DTRTest-Test.Tpo $(DEPDIR)/DTRTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$(top_srcdir)/src/Test.cpp' object='DTRTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DTRTest_CXXFLAGS) $(CXXFLAGS) -c -o DTRTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp DTRTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DTRTest_CXXFLAGS) $(CXXFLAGS) -MT DTRTest-Test.obj -MD -MP -MF $(DEPDIR)/DTRTest-Test.Tpo -c -o DTRTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/DTRTest-Test.Tpo $(DEPDIR)/DTRTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$(top_srcdir)/src/Test.cpp' object='DTRTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DTRTest_CXXFLAGS) $(CXXFLAGS) -c -o DTRTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` DTRTest-DTRTest.o: DTRTest.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DTRTest_CXXFLAGS) $(CXXFLAGS) -MT DTRTest-DTRTest.o -MD -MP -MF $(DEPDIR)/DTRTest-DTRTest.Tpo -c -o DTRTest-DTRTest.o `test -f 'DTRTest.cpp' || echo '$(srcdir)/'`DTRTest.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/DTRTest-DTRTest.Tpo $(DEPDIR)/DTRTest-DTRTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='DTRTest.cpp' object='DTRTest-DTRTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DTRTest_CXXFLAGS) $(CXXFLAGS) -c -o DTRTest-DTRTest.o `test -f 'DTRTest.cpp' || echo '$(srcdir)/'`DTRTest.cpp DTRTest-DTRTest.obj: DTRTest.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DTRTest_CXXFLAGS) $(CXXFLAGS) -MT DTRTest-DTRTest.obj -MD -MP -MF $(DEPDIR)/DTRTest-DTRTest.Tpo -c -o DTRTest-DTRTest.obj `if test -f 'DTRTest.cpp'; then $(CYGPATH_W) 'DTRTest.cpp'; else $(CYGPATH_W) '$(srcdir)/DTRTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/DTRTest-DTRTest.Tpo $(DEPDIR)/DTRTest-DTRTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='DTRTest.cpp' object='DTRTest-DTRTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DTRTest_CXXFLAGS) $(CXXFLAGS) -c -o DTRTest-DTRTest.obj `if test -f 'DTRTest.cpp'; then $(CYGPATH_W) 'DTRTest.cpp'; else $(CYGPATH_W) '$(srcdir)/DTRTest.cpp'; fi` DeliveryTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DeliveryTest_CXXFLAGS) $(CXXFLAGS) -MT DeliveryTest-Test.o -MD -MP -MF $(DEPDIR)/DeliveryTest-Test.Tpo -c -o DeliveryTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/DeliveryTest-Test.Tpo $(DEPDIR)/DeliveryTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$(top_srcdir)/src/Test.cpp' object='DeliveryTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DeliveryTest_CXXFLAGS) $(CXXFLAGS) -c -o DeliveryTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp DeliveryTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DeliveryTest_CXXFLAGS) $(CXXFLAGS) -MT DeliveryTest-Test.obj -MD -MP -MF $(DEPDIR)/DeliveryTest-Test.Tpo -c -o DeliveryTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/DeliveryTest-Test.Tpo $(DEPDIR)/DeliveryTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$(top_srcdir)/src/Test.cpp' object='DeliveryTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DeliveryTest_CXXFLAGS) $(CXXFLAGS) -c -o DeliveryTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` DeliveryTest-DeliveryTest.o: DeliveryTest.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DeliveryTest_CXXFLAGS) $(CXXFLAGS) -MT DeliveryTest-DeliveryTest.o -MD -MP -MF $(DEPDIR)/DeliveryTest-DeliveryTest.Tpo -c -o DeliveryTest-DeliveryTest.o `test -f 'DeliveryTest.cpp' || echo '$(srcdir)/'`DeliveryTest.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/DeliveryTest-DeliveryTest.Tpo $(DEPDIR)/DeliveryTest-DeliveryTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='DeliveryTest.cpp' object='DeliveryTest-DeliveryTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DeliveryTest_CXXFLAGS) $(CXXFLAGS) -c -o DeliveryTest-DeliveryTest.o `test -f 'DeliveryTest.cpp' || echo '$(srcdir)/'`DeliveryTest.cpp DeliveryTest-DeliveryTest.obj: DeliveryTest.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DeliveryTest_CXXFLAGS) $(CXXFLAGS) -MT DeliveryTest-DeliveryTest.obj -MD -MP -MF $(DEPDIR)/DeliveryTest-DeliveryTest.Tpo -c -o DeliveryTest-DeliveryTest.obj `if test -f 'DeliveryTest.cpp'; then $(CYGPATH_W) 'DeliveryTest.cpp'; else $(CYGPATH_W) '$(srcdir)/DeliveryTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/DeliveryTest-DeliveryTest.Tpo $(DEPDIR)/DeliveryTest-DeliveryTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='DeliveryTest.cpp' object='DeliveryTest-DeliveryTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(DeliveryTest_CXXFLAGS) $(CXXFLAGS) -c -o DeliveryTest-DeliveryTest.obj `if test -f 'DeliveryTest.cpp'; then $(CYGPATH_W) 'DeliveryTest.cpp'; else $(CYGPATH_W) '$(srcdir)/DeliveryTest.cpp'; fi` ProcessorTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProcessorTest_CXXFLAGS) $(CXXFLAGS) -MT ProcessorTest-Test.o -MD -MP -MF $(DEPDIR)/ProcessorTest-Test.Tpo -c -o ProcessorTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ProcessorTest-Test.Tpo $(DEPDIR)/ProcessorTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$(top_srcdir)/src/Test.cpp' object='ProcessorTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProcessorTest_CXXFLAGS) $(CXXFLAGS) -c -o ProcessorTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp ProcessorTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProcessorTest_CXXFLAGS) $(CXXFLAGS) -MT ProcessorTest-Test.obj -MD -MP -MF $(DEPDIR)/ProcessorTest-Test.Tpo -c -o ProcessorTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ProcessorTest-Test.Tpo $(DEPDIR)/ProcessorTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$(top_srcdir)/src/Test.cpp' object='ProcessorTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProcessorTest_CXXFLAGS) $(CXXFLAGS) -c -o ProcessorTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` ProcessorTest-ProcessorTest.o: ProcessorTest.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProcessorTest_CXXFLAGS) $(CXXFLAGS) -MT ProcessorTest-ProcessorTest.o -MD -MP -MF $(DEPDIR)/ProcessorTest-ProcessorTest.Tpo -c -o ProcessorTest-ProcessorTest.o `test -f 'ProcessorTest.cpp' || echo '$(srcdir)/'`ProcessorTest.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ProcessorTest-ProcessorTest.Tpo $(DEPDIR)/ProcessorTest-ProcessorTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='ProcessorTest.cpp' object='ProcessorTest-ProcessorTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProcessorTest_CXXFLAGS) $(CXXFLAGS) -c -o ProcessorTest-ProcessorTest.o `test -f 'ProcessorTest.cpp' || echo '$(srcdir)/'`ProcessorTest.cpp ProcessorTest-ProcessorTest.obj: ProcessorTest.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProcessorTest_CXXFLAGS) $(CXXFLAGS) -MT ProcessorTest-ProcessorTest.obj -MD -MP -MF $(DEPDIR)/ProcessorTest-ProcessorTest.Tpo -c -o ProcessorTest-ProcessorTest.obj `if test -f 'ProcessorTest.cpp'; then $(CYGPATH_W) 'ProcessorTest.cpp'; else $(CYGPATH_W) '$(srcdir)/ProcessorTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ProcessorTest-ProcessorTest.Tpo $(DEPDIR)/ProcessorTest-ProcessorTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='ProcessorTest.cpp' object='ProcessorTest-ProcessorTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ProcessorTest_CXXFLAGS) $(CXXFLAGS) -c -o ProcessorTest-ProcessorTest.obj `if test -f 'ProcessorTest.cpp'; then $(CYGPATH_W) 'ProcessorTest.cpp'; else $(CYGPATH_W) '$(srcdir)/ProcessorTest.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst $(AM_TESTS_FD_REDIRECT); then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ col="$$grn"; \ else \ col="$$red"; \ fi; \ echo "$${col}$$dashes$${std}"; \ echo "$${col}$$banner$${std}"; \ test -z "$$skipped" || echo "$${col}$$skipped$${std}"; \ test -z "$$report" || echo "$${col}$$report$${std}"; \ echo "$${col}$$dashes$${std}"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-checkPROGRAMS clean-generic clean-libtool \ mostlyclean-am distclean: distclean-am -rm -f ./$(DEPDIR)/DTRTest-DTRTest.Po -rm -f ./$(DEPDIR)/DTRTest-Test.Po -rm -f ./$(DEPDIR)/DeliveryTest-DeliveryTest.Po -rm -f ./$(DEPDIR)/DeliveryTest-Test.Po -rm -f ./$(DEPDIR)/ProcessorTest-ProcessorTest.Po -rm -f ./$(DEPDIR)/ProcessorTest-Test.Po -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/DTRTest-DTRTest.Po -rm -f ./$(DEPDIR)/DTRTest-Test.Po -rm -f ./$(DEPDIR)/DeliveryTest-DeliveryTest.Po -rm -f ./$(DEPDIR)/DeliveryTest-Test.Po -rm -f ./$(DEPDIR)/ProcessorTest-ProcessorTest.Po -rm -f ./$(DEPDIR)/ProcessorTest-Test.Po -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-TESTS \ check-am clean clean-checkPROGRAMS clean-generic clean-libtool \ cscopelist-am ctags ctags-am distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/libs/data-staging/test/PaxHeaders/DeliveryTest.cpp0000644000000000000000000000013115067751327023543 xustar0030 mtime=1759498967.749812815 29 atime=1759498967.86149359 30 ctime=1759499028.859734043 nordugrid-arc-7.1.1/src/libs/data-staging/test/DeliveryTest.cpp0000644000175000002070000001173515067751327025455 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "../DTRStatus.h" #include "../DTR.h" #include "../DataDelivery.h" using namespace DataStaging; class DeliveryTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(DeliveryTest); CPPUNIT_TEST(TestDeliverySimple); CPPUNIT_TEST(TestDeliveryFailure); CPPUNIT_TEST(TestDeliveryUnsupported); CPPUNIT_TEST_SUITE_END(); public: void TestDeliverySimple(); void TestDeliveryFailure(); void TestDeliveryUnsupported(); void setUp(); void tearDown(); private: std::list logs; char const * log_name; Arc::UserConfig cfg; }; void DeliveryTest::setUp() { // Hack to make sure DataStagingDelivery executable in the parent dir is used // A fake ARC location is used and a symlink is created in the libexec subdir // to the DataStagingDelivery in the parent dir. TODO: maybe put a test flag // in DTR code which tells it to use this local executable. Arc::DirCreate(std::string("../tmp/")+std::string(PKGLIBSUBDIR), S_IRWXU, true); Arc::ArcLocation::Init("../tmp/x/x"); Arc::FileLink("../../../DataStagingDelivery", std::string("../tmp/")+std::string(PKGLIBSUBDIR)+std::string("/DataStagingDelivery"), true); logs.clear(); const std::list& destinations = Arc::Logger::getRootLogger().getDestinations(); for(std::list::const_iterator dest = destinations.begin(); dest != destinations.end(); ++dest) { logs.push_back(*dest); } log_name = "DataStagingTest"; } void DeliveryTest::tearDown() { Arc::DirDelete("../tmp"); } void DeliveryTest::TestDeliverySimple() { std::string source("mock://mocksrc/1"); std::string destination("mock://mockdest/1"); std::string jobid("1234"); DataStaging::DTR_ptr dtr(new DataStaging::DTR(source,destination,cfg,jobid,Arc::User().get_uid(),logs,log_name)); CPPUNIT_ASSERT(*dtr); // Pass DTR to Delivery DataStaging::DataDelivery delivery; delivery.start(); delivery.receiveDTR(dtr); DataStaging::DTRStatus status = dtr->get_status(); // Wait for result. It must be either ERROR or TRANSFERRED at end. // During transfer state may be NULL or TRANSFERRING for(int cnt=0;;++cnt) { status = dtr->get_status(); if(status == DataStaging::DTRStatus::ERROR) { break; } else if(status == DataStaging::DTRStatus::TRANSFERRED) { break; } else if(status == DataStaging::DTRStatus::TRANSFERRING) { } else if(status == DataStaging::DTRStatus::NULL_STATE) { } else { break; } CPPUNIT_ASSERT(cnt < 300); // 30s limit on transfer time usleep(100000); } CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::TRANSFERRED, status.GetStatus()); CPPUNIT_ASSERT_EQUAL_MESSAGE(dtr->get_error_status().GetDesc(), DataStaging::DTRErrorStatus::NONE_ERROR, dtr->get_error_status().GetErrorStatus()); } void DeliveryTest::TestDeliveryFailure() { std::string source("fail://mocksrc/1"); std::string destination("fail://mockdest/1"); std::string jobid("1234"); DataStaging::DTR_ptr dtr(new DataStaging::DTR(source,destination,cfg,jobid,Arc::User().get_uid(),logs,log_name)); CPPUNIT_ASSERT(*dtr); // Pass DTR to Delivery DataStaging::DataDelivery delivery; delivery.start(); delivery.receiveDTR(dtr); DataStaging::DTRStatus status = dtr->get_status(); // Wait for result. It must be either ERROR or TRANSFERRED at end. // During transfer state may be NULL or TRANSFERRING for(int cnt=0;;++cnt) { status = dtr->get_status(); if(status == DataStaging::DTRStatus::ERROR) { break; } else if(status == DataStaging::DTRStatus::TRANSFERRED) { break; } else if(status == DataStaging::DTRStatus::TRANSFERRING) { } else if(status == DataStaging::DTRStatus::NULL_STATE) { } else { break; } CPPUNIT_ASSERT(cnt < 200); // 20s limit on transfer time usleep(100000); } CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::TRANSFERRED, status.GetStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::TEMPORARY_REMOTE_ERROR, dtr->get_error_status().GetErrorStatus()); } void DeliveryTest::TestDeliveryUnsupported() { std::string source("proto://host/file"); std::string destination("mock://mockdest/1"); std::string jobid("1234"); DataStaging::DTR_ptr dtr(new DataStaging::DTR(source,destination,cfg,jobid,Arc::User().get_uid(),logs,log_name)); CPPUNIT_ASSERT(!(*dtr)); // Pass DTR to Delivery DataStaging::DataDelivery delivery; delivery.start(); delivery.receiveDTR(dtr); // DTR should be checked by delivery and immediately set to TRANSFERRED // with error status set to LOGIC error CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::TRANSFERRED, dtr->get_status().GetStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::INTERNAL_LOGIC_ERROR, dtr->get_error_status().GetErrorStatus()); } CPPUNIT_TEST_SUITE_REGISTRATION(DeliveryTest); nordugrid-arc-7.1.1/src/libs/data-staging/test/PaxHeaders/ProcessorTest.cpp0000644000000000000000000000013115067751327023737 xustar0030 mtime=1759498967.749812815 29 atime=1759498967.86149359 30 ctime=1759499028.860899889 nordugrid-arc-7.1.1/src/libs/data-staging/test/ProcessorTest.cpp0000644000175000002070000004456715067751327025662 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "../DTRStatus.h" #include "../Processor.h" using namespace DataStaging; class ProcessorTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(ProcessorTest); CPPUNIT_TEST(TestPreClean); CPPUNIT_TEST(TestCacheCheck); CPPUNIT_TEST(TestResolve); CPPUNIT_TEST(TestQueryReplica); CPPUNIT_TEST(TestReplicaRegister); CPPUNIT_TEST(TestCacheProcess); CPPUNIT_TEST_SUITE_END(); public: void TestPreClean(); void TestCacheCheck(); void TestResolve(); void TestQueryReplica(); void TestReplicaRegister(); void TestCacheProcess(); void setUp(); void tearDown(); private: std::list logs; char const * log_name; Arc::UserConfig cfg; std::string tmpdir; }; void ProcessorTest::setUp() { logs.clear(); const std::list& destinations = Arc::Logger::getRootLogger().getDestinations(); for(std::list::const_iterator dest = destinations.begin(); dest != destinations.end(); ++dest) { logs.push_back(*dest); } log_name = "DataStagingTest"; } void ProcessorTest::tearDown() { if (!tmpdir.empty()) Arc::DirDelete(tmpdir); } void ProcessorTest::TestPreClean() { // Note: mock doesn't really delete, but reports success std::string jobid("123456789"); std::string source("mock://mocksrc/1"); std::string destination("mock://mockdest;overwrite=yes/1"); DataStaging::DTR_ptr dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logs, log_name); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); dtr->set_status(DataStaging::DTRStatus::PRE_CLEAN); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); DataStaging::Processor processor; processor.start(); processor.receiveDTR(dtr); // sleep while thread deletes while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::PRE_CLEANED) usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::NONE_ERROR, dtr->get_error_status().GetErrorStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::PRE_CLEANED, dtr->get_status().GetStatus()); // use a non-existent file destination = "fail://badhost;overwrite=yes/file1"; dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logs, log_name); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); dtr->set_status(DataStaging::DTRStatus::PRE_CLEAN); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); processor.receiveDTR(dtr); // sleep while thread deletes while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::PRE_CLEANED) usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::TEMPORARY_REMOTE_ERROR, dtr->get_error_status().GetErrorStatus()); // PRE_CLEANED is the correct status even after an error CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::PRE_CLEANED, dtr->get_status().GetStatus()); } void ProcessorTest::TestCacheCheck() { // create tmp cache dir for test CPPUNIT_ASSERT(Arc::TmpDirCreate(tmpdir)); std::string session(tmpdir); session += "/session"; std::string cache_dir(tmpdir); cache_dir += "/cache"; DataStaging::DTRCacheParameters cache_param; cache_param.cache_dirs.push_back(cache_dir); // use non-cacheable input and check it cannot be not cached std::string jobid("123456789"); std::string source("mock://mocksrc;cache=no/1"); std::string destination(std::string(session+"/file1")); DataStaging::DTR_ptr dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logs, log_name); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); CPPUNIT_ASSERT(dtr->get_cache_state() == DataStaging::NON_CACHEABLE); dtr->set_cache_parameters(cache_param); // use cacheable input - set invariant since mock does not set a modification // time and so cache file will appear outdated source = "mock://mocksrc;cache=invariant/1"; dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logs, log_name); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); CPPUNIT_ASSERT(dtr->get_cache_state() == DataStaging::CACHEABLE); dtr->set_cache_parameters(cache_param); dtr->set_status(DataStaging::DTRStatus::CHECK_CACHE); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); std::string cache_file(cache_dir + "/data/58/32ec5285b5990e13fd6628af93ea2b751dac7b"); DataStaging::Processor processor; processor.start(); processor.receiveDTR(dtr); // sleep while thread checks cache while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::CACHE_CHECKED) usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::CACHE_CHECKED, dtr->get_status().GetStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::CACHEABLE, dtr->get_cache_state()); CPPUNIT_ASSERT_EQUAL(cache_file, dtr->get_cache_file()); // locked file std::string lock_file(cache_file + ".lock"); int fd = ::open(lock_file.c_str(), O_WRONLY|O_CREAT, S_IRUSR|S_IWUSR); CPPUNIT_ASSERT(fd); char lock_contents[] = "1@localhost"; CPPUNIT_ASSERT(write(fd, lock_contents, sizeof(lock_contents)) > 0); CPPUNIT_ASSERT_EQUAL(0, close(fd)); dtr->set_status(DataStaging::DTRStatus::CHECK_CACHE); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); processor.receiveDTR(dtr); // sleep while thread checks cache while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::CACHE_WAIT) usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::CACHE_WAIT, dtr->get_status().GetStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::CACHE_LOCKED, dtr->get_cache_state()); CPPUNIT_ASSERT_EQUAL(0, remove(lock_file.c_str())); // write cache file fd = ::open(cache_file.c_str(), O_WRONLY|O_CREAT, S_IRUSR|S_IWUSR); CPPUNIT_ASSERT(fd); char cache_file_contents[] = "abcde"; CPPUNIT_ASSERT(write(fd, cache_file_contents, sizeof(cache_file_contents)) > 0); CPPUNIT_ASSERT_EQUAL(0, close(fd)); // check again, should return already present dtr->set_status(DataStaging::DTRStatus::CHECK_CACHE); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); processor.receiveDTR(dtr); // sleep while thread checks cache while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::CACHE_CHECKED) usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::CACHE_CHECKED, dtr->get_status().GetStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::CACHE_ALREADY_PRESENT, dtr->get_cache_state()); CPPUNIT_ASSERT_EQUAL(0, remove(cache_file.c_str())); // test files using guids are handled properly source = "mock://mocksrc/1:guid=4a2b61aa-1e57-4d32-9f23-873a9c9b9aed"; dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logs, log_name); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); CPPUNIT_ASSERT(dtr->get_cache_state() == DataStaging::CACHEABLE); dtr->set_cache_parameters(cache_param); dtr->set_status(DataStaging::DTRStatus::CHECK_CACHE); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); cache_file = cache_dir + "/data/ba/bb0555ddfccde73069558aacfe512ea42c8c79"; processor.receiveDTR(dtr); // sleep while thread checks cache while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::CACHE_CHECKED) usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::CACHE_CHECKED, dtr->get_status().GetStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::CACHEABLE, dtr->get_cache_state()); CPPUNIT_ASSERT_EQUAL(cache_file, dtr->get_cache_file()); } void ProcessorTest::TestResolve() { // Note: using mock in resolve doesn't really test resolving since mock is // not a DataPointIndex DataStaging::Processor processor; processor.start(); std::string jobid("123456789"); // resolve a good source std::string source("mock://mocksrc/1"); std::string destination("mock://mockdest/1"); DataStaging::DTR_ptr dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logs, log_name); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); dtr->set_status(DataStaging::DTRStatus::RESOLVE); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); processor.receiveDTR(dtr); // sleep while thread resolves while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::RESOLVED) usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::NONE_ERROR, dtr->get_error_status().GetErrorStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::RESOLVED, dtr->get_status().GetStatus()); // check that it found replicas CPPUNIT_ASSERT(dtr->get_source()->HaveLocations()); /* This part can be uncommented if a mock index DataPoint exists // pre-register a good destination source = "mock://mocksrc/1"; destination = "mockindex://mock://mockdest/1@mockindexdest/1"; dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logs, log_name); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); dtr->set_status(DataStaging::DTRStatus::RESOLVE); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); processor.receiveDTR(dtr); // sleep while thread resolves while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::RESOLVED) usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::NONE_ERROR, dtr->get_error_status().GetErrorStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::RESOLVED, dtr->get_status().GetStatus()); // check that it added the destination replica CPPUNIT_ASSERT(dtr->get_destination()->HaveLocations()); CPPUNIT_ASSERT_EQUAL(std::string("mock://mockdest/1"), dtr->get_destination()->CurrentLocation().str()); std::list files; CPPUNIT_ASSERT(dtr->get_destination()->List(files)); CPPUNIT_ASSERT_EQUAL(1, (int)files.size()); CPPUNIT_ASSERT_EQUAL(std::string("mockindex://mockindexdest/1"), files.front().GetName()); // test replication source = "mockindex://mockdestindex/ABCDE"; destination = "mockindex://mock://mockdest/ABCDE@mockindexdest/ABCDE"; dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logs, log_name); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); dtr->set_status(DataStaging::DTRStatus::RESOLVE); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); dtr->set_replication(true); // usually set automatically by scheduler processor.receiveDTR(dtr); // sleep while thread resolves while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::RESOLVED) usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::NONE_ERROR, dtr->get_error_status().GetErrorStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::RESOLVED, dtr->get_status().GetStatus()); // check that it found replicas CPPUNIT_ASSERT(dtr->get_source()->HaveLocations()); // check that it added the destination replica CPPUNIT_ASSERT(dtr->get_destination()->HaveLocations()); CPPUNIT_ASSERT_EQUAL(std::string("mock://mockdest/ABCDE"), dtr->get_destination()->CurrentLocation().str()); // copy to an existing LFN from a different LFN source = "mock://mocksrc/2"; destination = "mockindex://mock://mockdest/2@mockindexdest/ABCDE"; dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logs, log_name); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); dtr->set_status(DataStaging::DTRStatus::RESOLVE); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); processor.receiveDTR(dtr); // sleep while thread resolves while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::RESOLVED) usleep(100); // will fail since force_registration is not set CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::PERMANENT_REMOTE_ERROR, dtr->get_error_status().GetErrorStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::RESOLVED, dtr->get_status().GetStatus()); // set force registration and try again dtr->set_force_registration(true); dtr->reset_error_status(); dtr->set_status(DataStaging::DTRStatus::RESOLVE); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); processor.receiveDTR(dtr); // sleep while thread resolves while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::RESOLVED) usleep(100); // should be successful now CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::NONE_ERROR, dtr->get_error_status().GetErrorStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::RESOLVED, dtr->get_status().GetStatus()); // check that it added the destination replica CPPUNIT_ASSERT(dtr->get_destination()->HaveLocations()); CPPUNIT_ASSERT_EQUAL(std::string("mock://mockdest/2"), dtr->get_destination()->CurrentLocation().str()); */ } void ProcessorTest::TestQueryReplica() { // query a valid file std::string jobid("123456789"); std::string source("mock://mocksrc/1"); std::string destination("mock://mockdest/1"); DataStaging::DTR_ptr dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logs, log_name); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); DataStaging::Processor processor; processor.start(); dtr->set_status(DataStaging::DTRStatus::QUERY_REPLICA); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); processor.receiveDTR(dtr); // sleep while replica is queried while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::REPLICA_QUERIED) usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::NONE_ERROR, dtr->get_error_status().GetErrorStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::REPLICA_QUERIED, dtr->get_status().GetStatus()); // invalid file source = "fail://mocksrc/1"; dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logs, log_name); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); dtr->set_status(DataStaging::DTRStatus::QUERY_REPLICA); DataStaging::DTR::push(dtr, DataStaging::PRE_PROCESSOR); processor.receiveDTR(dtr); // sleep while replica is queried while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::REPLICA_QUERIED) usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::TEMPORARY_REMOTE_ERROR, dtr->get_error_status().GetErrorStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::REPLICA_QUERIED, dtr->get_status().GetStatus()); } void ProcessorTest::TestReplicaRegister() { /* Needs mock index DMC DataStaging::Processor processor; processor.start(); std::string jobid("123456789"); // register a file std::string source("mock://mocksrc/1"); std::string destination("mockindex://mock://mockdest/1@mockindexdest/1"); DataStaging::DTR_ptr dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logs, log_name); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); // have to resolve first CPPUNIT_ASSERT(dtr->get_destination()->Resolve(false).Passed()); CPPUNIT_ASSERT(dtr->get_destination()->HaveLocations()); CPPUNIT_ASSERT_EQUAL(std::string("mock://mockdest/1"), dtr->get_destination()->CurrentLocation().str()); // pre-register CPPUNIT_ASSERT(dtr->get_destination()->PreRegister(false, false).Passed()); // post-register dtr->set_status(DataStaging::DTRStatus::REGISTER_REPLICA); DataStaging::DTR::push(dtr, DataStaging::POST_PROCESSOR); processor.receiveDTR(dtr); // sleep while thread resgisters while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::REPLICA_REGISTERED) usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::NONE_ERROR, dtr->get_error_status().GetErrorStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::REPLICA_REGISTERED, dtr->get_status().GetStatus()); // check registration is ok Arc::FileInfo file; Arc::DataPoint::DataPointInfoType verb = (Arc::DataPoint::DataPointInfoType)(Arc::DataPoint::INFO_TYPE_CONTENT | Arc::DataPoint::INFO_TYPE_STRUCT); CPPUNIT_ASSERT(dtr->get_destination()->Stat(file, verb).Passed()); std::list replicas = file.GetURLs(); CPPUNIT_ASSERT_EQUAL(1, (int)replicas.size()); CPPUNIT_ASSERT_EQUAL(std::string("mock://mockdest/1"), replicas.front().str()); // clean up CPPUNIT_ASSERT(dtr->get_destination()->Unregister(true).Passed()); */ } void ProcessorTest::TestCacheProcess() { CPPUNIT_ASSERT(Arc::TmpDirCreate(tmpdir)); std::string session(tmpdir); session += "/session"; std::string cache_dir(tmpdir); cache_dir += "/cache"; DataStaging::DTRCacheParameters cache_param; cache_param.cache_dirs.push_back(cache_dir); std::string jobid("123456789"); std::string source("mock://mocksrc/1"); std::string destination(std::string(session+"/file1")); DataStaging::DTR_ptr dtr = new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logs, log_name); CPPUNIT_ASSERT(dtr); CPPUNIT_ASSERT(*dtr); CPPUNIT_ASSERT(dtr->get_cache_state() == DataStaging::CACHEABLE); dtr->set_cache_parameters(cache_param); // process with no cache file present std::string cache_file(cache_dir + "/data/58/32ec5285b5990e13fd6628af93ea2b751dac7b"); remove(cache_file.c_str()); DataStaging::Processor processor; processor.start(); dtr->set_status(DataStaging::DTRStatus::PROCESS_CACHE); DataStaging::DTR::push(dtr, DataStaging::POST_PROCESSOR); processor.receiveDTR(dtr); // sleep while cache is processed while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::CACHE_PROCESSED) usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::CACHE_ERROR, dtr->get_error_status().GetErrorStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::CACHE_PROCESSED, dtr->get_status().GetStatus()); // create cache file and try again CPPUNIT_ASSERT(Arc::DirCreate(std::string(cache_dir+"/data/58"), 0700, true)); int fd = ::open(cache_file.c_str(), O_WRONLY|O_CREAT, S_IRUSR|S_IWUSR); CPPUNIT_ASSERT(fd); char cache_file_contents[] = "abcde"; CPPUNIT_ASSERT_EQUAL_MESSAGE(Arc::StrError(errno), (int)sizeof(cache_file_contents), (int)write(fd, cache_file_contents, sizeof(cache_file_contents))); CPPUNIT_ASSERT_EQUAL(0, close(fd)); dtr->reset_error_status(); dtr->set_status(DataStaging::DTRStatus::PROCESS_CACHE); DataStaging::DTR::push(dtr, DataStaging::POST_PROCESSOR); processor.receiveDTR(dtr); // sleep while cache is processed while (dtr->get_status().GetStatus() != DataStaging::DTRStatus::CACHE_PROCESSED) usleep(100); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRErrorStatus::NONE_ERROR, dtr->get_error_status().GetErrorStatus()); CPPUNIT_ASSERT_EQUAL(DataStaging::DTRStatus::CACHE_PROCESSED, dtr->get_status().GetStatus()); // check correct links exist struct stat st; CPPUNIT_ASSERT_EQUAL(0, stat(std::string(cache_dir + "/joblinks/123456789/file1").c_str(), &st)); CPPUNIT_ASSERT_EQUAL(0, stat(std::string(session + "/file1").c_str(), &st)); } CPPUNIT_TEST_SUITE_REGISTRATION(ProcessorTest); nordugrid-arc-7.1.1/src/libs/data-staging/test/PaxHeaders/DTRTest.cpp0000644000000000000000000000013115067751327022411 xustar0030 mtime=1759498967.749501515 29 atime=1759498967.86149359 30 ctime=1759499028.858647116 nordugrid-arc-7.1.1/src/libs/data-staging/test/DTRTest.cpp0000644000175000002070000000602615067751327024320 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "../DTR.h" using namespace DataStaging; class DTRTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(DTRTest); CPPUNIT_TEST(TestDTRConstructor); CPPUNIT_TEST(TestDTREndpoints); CPPUNIT_TEST_SUITE_END(); public: void TestDTRConstructor(); void TestDTREndpoints(); void setUp(); void tearDown(); private: std::list logs; char const * log_name; Arc::UserConfig cfg; }; void DTRTest::setUp() { logs.clear(); const std::list& destinations = Arc::Logger::getRootLogger().getDestinations(); for(std::list::const_iterator dest = destinations.begin(); dest != destinations.end(); ++dest) { logs.push_back(*dest); } log_name = "DataStagingTest"; } void DTRTest::tearDown() { } void DTRTest::TestDTRConstructor() { std::string jobid("123456789"); std::string source("mock://mocksrc/1"); std::string destination("mock://mockdest/1"); DataStaging::DTR_ptr dtr(new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logs, log_name)); CPPUNIT_ASSERT(*dtr); CPPUNIT_ASSERT(!dtr->get_id().empty()); // Copy constructor DataStaging::DTR_ptr dtr2(dtr); CPPUNIT_ASSERT(*dtr2); CPPUNIT_ASSERT_EQUAL(dtr->get_id(), dtr2->get_id()); // a new DataHandle object is created for the new DTR so they should // not be equal. Why does this test pass???? CPPUNIT_ASSERT_EQUAL(dtr->get_source(), dtr2->get_source()); CPPUNIT_ASSERT_EQUAL(dtr->get_owner(), dtr2->get_owner()); CPPUNIT_ASSERT_EQUAL(dtr->get_status().GetStatus(), dtr2->get_status().GetStatus()); // check that creating and destroying a copy doesn't affect the original { DataStaging::DTR_ptr dtr3(dtr); CPPUNIT_ASSERT(*dtr3); } CPPUNIT_ASSERT_EQUAL(std::string("mock://mocksrc/1"), dtr->get_source()->str()); // make a bad DTR source = "myprocotol://blabla/file1"; DataStaging::DTR_ptr dtr4(new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logs, log_name)); CPPUNIT_ASSERT(!(*dtr4)); // bad DTR copying to itself DataStaging::DTR_ptr dtr5(new DataStaging::DTR(source, source, cfg, jobid, Arc::User().get_uid(), logs, log_name)); CPPUNIT_ASSERT(!(*dtr5)); } void DTRTest::TestDTREndpoints() { std::string jobid("123456789"); std::string source("mock://mocksrc/1"); std::string destination("mock://mockdest/1"); DataStaging::DTR_ptr dtr(new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logs, log_name)); CPPUNIT_ASSERT(*dtr); CPPUNIT_ASSERT_EQUAL(std::string("mock://mocksrc/1"), dtr->get_source()->str()); CPPUNIT_ASSERT_EQUAL(std::string("mock://mockdest/1"), dtr->get_destination()->str()); // create a bad url source = "mock:/file1"; DataStaging::DTR_ptr dtrbad(new DataStaging::DTR(source, destination, cfg, jobid, Arc::User().get_uid(), logs, log_name)); CPPUNIT_ASSERT(!dtrbad->get_source()->GetURL()); // TODO DTR validity } CPPUNIT_TEST_SUITE_REGISTRATION(DTRTest); nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/DTR.cpp0000644000000000000000000000012715067751327020577 xustar0030 mtime=1759498967.747355202 29 atime=1759498967.85949356 28 ctime=1759499028.8216315 nordugrid-arc-7.1.1/src/libs/data-staging/DTR.cpp0000644000175000002070000003522515067751327022504 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "Processor.h" #include "DataDelivery.h" #include "Scheduler.h" #include "DTR.h" namespace DataStaging { static const char* const owner_name[] = { "GENERATOR", "SCHEDULER", "PRE-PROCESSOR", "DELIVERY", "POST-PROCESSOR" }; static const char* get_owner_name(StagingProcesses proc) { if(((int)proc) < 0) return ""; if(((int)proc) >= sizeof(owner_name)/sizeof(const char*)) return ""; return owner_name[proc]; } const Arc::URL DTR::LOCAL_DELIVERY("file:/local"); Arc::LogLevel DTR::LOG_LEVEL(Arc::WARNING); DTR::DTR(const std::string& source, const std::string& destination, const Arc::UserConfig& usercfg, const std::string& jobid, const uid_t& uid, const std::list& logs, const std::string& logname) : DTR_ID(""), source_url(source), destination_url(destination), cfg(usercfg), source_endpoint(source_url, cfg), destination_endpoint(destination_url, cfg), source_url_str(source_url.str()), destination_url_str(destination_url.str()), user(uid), parent_job_id(jobid), priority(50), transfershare("_default"), sub_share(""), tries_left(1), initial_tries(1), replication(false), force_registration(false), status(DTRStatus::NEW,"Created by the generator"), bytes_transferred(0), transfer_time(0), created(time(NULL)), cancel_request(false), bulk_start(false), bulk_end(false), source_supports_bulk(false), mandatory(true), delivery_endpoint(LOCAL_DELIVERY), use_host_cert_for_remote_delivery(false), current_owner(GENERATOR), log_destinations(logs), perf_record(perf_log) { logger = new Arc::Logger(Arc::Logger::getRootLogger(), logname.c_str()); logger->addDestinations(get_log_destinations()); // check that endpoints can be handled if (!source_endpoint || !(*source_endpoint)) { logger->msg(Arc::ERROR, "Could not handle endpoint %s", source); return; } if (!destination_endpoint || !(*destination_endpoint)) { logger->msg(Arc::ERROR, "Could not handle endpoint %s", destination); return; } // Some validation checks if (source_url == destination_url) { // It is possible to replicate inside an index service // The physical replicas will be checked in RESOLVING if (source_endpoint->IsIndex() && destination_endpoint->IsIndex()) { replication = true; } else { logger->msg(Arc::ERROR, "Source is the same as destination"); set_error_status(DTRErrorStatus::SELF_REPLICATION_ERROR, DTRErrorStatus::NO_ERROR_LOCATION, "Cannot replicate a file to itself"); return; } } // set insecure by default. Real value will come from configuration source_endpoint->SetSecure(false); destination_endpoint->SetSecure(false); // check for bulk support - call bulk methods with empty list std::list datapoints; if (source_endpoint->IsIndex()) { if (source_endpoint->Resolve(true, datapoints) == Arc::DataStatus::Success) source_supports_bulk = true; } else { std::list files; if (source_endpoint->Stat(files, datapoints) == Arc::DataStatus::Success) source_supports_bulk = true; } cache_state = (source_endpoint->Cache() && destination_endpoint->Local()) ? CACHEABLE : NON_CACHEABLE; if (source_url.Option("failureallowed") == "yes" || destination_url.Option("failureallowed") == "yes") { mandatory = false; } /* Think how to populate transfer parameters */ mark_modification(); set_timeout(60); // setting ID last means all the previous steps have to pass for the DTR to be valid DTR_ID = Arc::UUID(); // Prefix all log messages for this DTR with the short ID for (std::list::iterator dest = log_destinations.begin(); dest != log_destinations.end(); ++dest) { (*dest)->setPrefix("DTR " + get_short_id() + ": "); } } std::list DTR::get_log_destinations() const { std::list log_dest; for (std::list::const_iterator dest = log_destinations.begin(); dest != log_destinations.end(); ++dest) { log_dest.push_back(dest->Ptr()); } return log_dest; } void DTR::registerCallback(DTRCallback* cb, StagingProcesses owner) { lock.lock(); proc_callback[owner].push_back(cb); lock.unlock(); } void DTR::reset() { // remove resolved locations if (source_endpoint->IsIndex()) { source_endpoint->ClearLocations(); } // clear any transfer locations source_endpoint->ClearTransferLocations(); // reset retry count to 1 source_endpoint->SetTries(1); if (destination_endpoint->IsIndex()) { destination_endpoint->ClearLocations(); } destination_endpoint->ClearTransferLocations(); destination_endpoint->SetTries(1); // empty cache and map info cache_file.clear(); mapped_source.clear(); mapped_destination.clear(); bytes_transferred = 0; transfer_time = 0; reset_error_status(); } void DTR::set_id(const std::string& id) { // sanity check - regular expressions would be useful here if (id.length() != DTR_ID.length()) { logger->msg(Arc::WARNING, "Invalid ID: %s", id); } else { DTR_ID = id; // Change logging prefix to new ID for (std::list::iterator dest = log_destinations.begin(); dest != log_destinations.end(); ++dest) { (*dest)->setPrefix("DTR " + get_short_id() + ": "); } } } std::string DTR::get_short_id() const { if(DTR_ID.length() < 8) return DTR_ID; std::string short_id(DTR_ID.substr(0,4)+"..."+DTR_ID.substr(DTR_ID.length()-4)); return short_id; } void DTR::set_priority(int pri) { // limit priority between 1 and 100 if (pri <= 0) pri = 1; if (pri > 100) pri = 100; priority = pri; mark_modification(); } void DTR::set_tries_left(unsigned int tries) { initial_tries = tries; tries_left = initial_tries; } void DTR::decrease_tries_left() { if (tries_left > 0) tries_left--; } void DTR::set_status(DTRStatus stat) { logger->msg(Arc::VERBOSE, "%s->%s", status.str(), stat.str()); lock.lock(); status = stat; lock.unlock(); mark_modification(); } DTRStatus DTR::get_status() { lock.lock(); DTRStatus s = status; lock.unlock(); return s; } void DTR::set_error_status(DTRErrorStatus::DTRErrorStatusType error_stat, DTRErrorStatus::DTRErrorLocation error_loc, const std::string& desc) { lock.lock(); error_status = DTRErrorStatus(error_stat, status.GetStatus(), error_loc, desc); lock.unlock(); mark_modification(); } void DTR::reset_error_status() { lock.lock(); error_status = DTRErrorStatus(); lock.unlock(); mark_modification(); } DTRErrorStatus DTR::get_error_status() { lock.lock(); DTRErrorStatus s = error_status; lock.unlock(); return s; } void DTR::set_bytes_transferred(unsigned long long int bytes) { bytes_transferred = bytes; } void DTR::set_transfer_time(unsigned long long int t) { transfer_time = t; } void DTR::set_cache_file(const std::string& filename) { cache_file = filename; mark_modification(); } void DTR::set_cache_state(CacheState state) { cache_state = state; mark_modification(); } void DTR::set_cancel_request() { cancel_request = true; // set process time to now so it is picked up straight away set_process_time(0); mark_modification(); } void DTR::set_process_time(const Arc::Period& process_time) { Arc::Time t; t = t + process_time; next_process_time.SetTime(t.GetTime(), t.GetTimeNanoseconds()); } bool DTR::bulk_possible() { if (status == DTRStatus::RESOLVE && source_supports_bulk) return true; if (status == DTRStatus::QUERY_REPLICA) { std::list files; std::list datapoints; if (source_endpoint->CurrentLocationHandle()->Stat(files, datapoints) == Arc::DataStatus::Success) return true; } return false; } std::list DTR::get_callbacks(const std::map >& proc_callback, StagingProcesses owner) { std::list l; lock.lock(); std::map >::const_iterator c = proc_callback.find(owner); if(c == proc_callback.end()) { lock.unlock(); return l; } l = c->second; lock.unlock(); return l; } void DTR::push(DTR_ptr dtr, StagingProcesses new_owner) { /* This function contains necessary operations * to pass the pointer to this DTR to another * process and make sure that the process accepted it */ dtr->lock.lock(); dtr->current_owner = new_owner; dtr->lock.unlock(); std::list callbacks = dtr->get_callbacks(dtr->proc_callback,dtr->current_owner); if (callbacks.empty()) dtr->logger->msg(Arc::INFO, "No callback for %s defined", get_owner_name(dtr->current_owner)); for (std::list::iterator callback = callbacks.begin(); callback != callbacks.end(); ++callback) { switch(dtr->current_owner) { case GENERATOR: case SCHEDULER: case PRE_PROCESSOR: case DELIVERY: case POST_PROCESSOR: { // call registered callback if (*callback) (*callback)->receiveDTR(dtr); else dtr->logger->msg(Arc::WARNING, "NULL callback for %s", get_owner_name(dtr->current_owner)); } break; default: // impossible dtr->logger->msg(Arc::INFO, "Request to push to unknown owner - %u", (unsigned int)dtr->current_owner); break; } } dtr->mark_modification(); } bool DTR::suspend() { /* This function will contain necessary operations * to stop the transfer in the DTR */ mark_modification(); return true; } bool DTR::is_destined_for_pre_processor() const { return (status == DTRStatus::PRE_CLEAN || status == DTRStatus::CHECK_CACHE || status == DTRStatus::RESOLVE || status == DTRStatus::QUERY_REPLICA || status == DTRStatus::STAGE_PREPARE); } bool DTR::is_destined_for_post_processor() const { return (status == DTRStatus::RELEASE_REQUEST || status == DTRStatus::FINALISE_REPLICA || status == DTRStatus::REGISTER_REPLICA || status == DTRStatus::PROCESS_CACHE); } bool DTR::is_destined_for_delivery() const { return (status == DTRStatus::TRANSFER); } bool DTR::came_from_pre_processor() const { return (status == DTRStatus::PRE_CLEANED || status == DTRStatus::CACHE_WAIT || status == DTRStatus::CACHE_CHECKED || status == DTRStatus::RESOLVED || status == DTRStatus::REPLICA_QUERIED || status == DTRStatus::STAGING_PREPARING_WAIT || status == DTRStatus::STAGED_PREPARED); } bool DTR::came_from_post_processor() const { return (status == DTRStatus::REQUEST_RELEASED || status == DTRStatus::REPLICA_FINALISED || status == DTRStatus::REPLICA_REGISTERED || status == DTRStatus::CACHE_PROCESSED); } bool DTR::came_from_delivery() const { return (status == DTRStatus::TRANSFERRED); } bool DTR::came_from_generator() const { return (status == DTRStatus::NEW); } bool DTR::is_in_final_state() const { return (status == DTRStatus::DONE || status == DTRStatus::CANCELLED || status == DTRStatus::ERROR); } void DTR::set_transfer_share(const std::string& share_name) { lock.lock(); transfershare = share_name; if (!sub_share.empty()) transfershare += "-" + sub_share; lock.unlock(); } DTRCacheParameters::DTRCacheParameters(std::vector caches, std::vector drain_caches, std::vector readonly_caches): cache_dirs(caches), drain_cache_dirs(drain_caches), readonly_cache_dirs(readonly_caches) { } DTRCredentialInfo::DTRCredentialInfo(const std::string& DN, const Arc::Time& expirytime, const std::list vomsfqans): DN(DN), expirytime(expirytime), vomsfqans(vomsfqans) { } std::string DTRCredentialInfo::extractVOMSVO() const { if (vomsfqans.empty()) return ""; std::vector parts; Arc::tokenize(*(vomsfqans.begin()), parts, "/"); return parts.at(0); } std::string DTRCredentialInfo::extractVOMSGroup() const { if (vomsfqans.empty()) return ""; std::string vomsvo; for (std::list::const_iterator i = vomsfqans.begin(); i != vomsfqans.end(); ++i) { std::vector parts; Arc::tokenize(*i, parts, "/"); if (vomsvo.empty()) vomsvo = parts.at(0); if (parts.size() > 1 && parts.at(1).find("Role=") != 0) { return std::string(vomsvo+":"+parts.at(1)); } } return std::string(vomsvo + ":null"); } std::string DTRCredentialInfo::extractVOMSRole() const { if (vomsfqans.empty()) return ""; std::string vomsvo; for (std::list::const_iterator i = vomsfqans.begin(); i != vomsfqans.end(); ++i) { std::vector parts; Arc::tokenize(*i, parts, "/"); if (vomsvo.empty()) vomsvo = parts.at(0); if (parts.size() > 1 && parts.at(1).find("Role=") == 0) { return std::string(parts.at(0)+":"+parts.at(1).substr(5)); } } return std::string(vomsvo + ":null"); } DTR_ptr createDTRPtr(const std::string& source, const std::string& destination, const Arc::UserConfig& usercfg, const std::string& jobid, const uid_t& uid, const std::list& logs, const std::string& logname) { return DTR_ptr(new DTR(source, destination, usercfg, jobid, uid, logs, logname)); } DTRLogger createDTRLogger(Arc::Logger& parent, const std::string& subdomain) { return DTRLogger(new Arc::Logger(parent, subdomain)); } } // namespace DataStaging nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/DTRList.cpp0000644000000000000000000000013115067751327021426 xustar0030 mtime=1759498967.747527268 29 atime=1759498967.85949356 30 ctime=1759499028.822703741 nordugrid-arc-7.1.1/src/libs/data-staging/DTRList.cpp0000644000175000002070000002050115067751327023327 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "DTRList.h" namespace DataStaging { bool DTRList::add_dtr(DTR_ptr DTRToAdd) { Lock.lock(); DTRs.push_back(DTRToAdd); Lock.unlock(); // Added successfully return true; } bool DTRList::delete_dtr(DTR_ptr DTRToDelete) { Lock.lock(); DTRs.remove(DTRToDelete); Lock.unlock(); // Deleted successfully return true; } bool DTRList::filter_dtrs_by_owner(StagingProcesses OwnerToFilter, std::list& FilteredList){ std::list::iterator it; Lock.lock(); for(it = DTRs.begin();it != DTRs.end(); ++it) if((*it)->get_owner() == OwnerToFilter) FilteredList.push_back(*it); Lock.unlock(); // Filtered successfully return true; } int DTRList::number_of_dtrs_by_owner(StagingProcesses OwnerToFilter){ std::list::iterator it; int counter = 0; Lock.lock(); for(it = DTRs.begin();it != DTRs.end(); ++it) if((*it)->get_owner() == OwnerToFilter) counter++; Lock.unlock(); // Filtered successfully return counter; } bool DTRList::filter_dtrs_by_status(DTRStatus::DTRStatusType StatusToFilter, std::list& FilteredList){ std::vector StatusesToFilter(1, StatusToFilter); return filter_dtrs_by_statuses(StatusesToFilter, FilteredList); } bool DTRList::filter_dtrs_by_statuses(const std::vector& StatusesToFilter, std::list& FilteredList){ std::list::iterator it; Lock.lock(); for(it = DTRs.begin();it != DTRs.end(); ++it) { for (std::vector::const_iterator i = StatusesToFilter.begin(); i != StatusesToFilter.end(); ++i) { if((*it)->get_status().GetStatus() == *i) { FilteredList.push_back(*it); break; } } } Lock.unlock(); // Filtered successfully return true; } bool DTRList::filter_dtrs_by_statuses(const std::vector& StatusesToFilter, std::map >& FilteredList) { std::list::iterator it; Lock.lock(); for(it = DTRs.begin();it != DTRs.end(); ++it) { for (std::vector::const_iterator i = StatusesToFilter.begin(); i != StatusesToFilter.end(); ++i) { if((*it)->get_status().GetStatus() == *i) { FilteredList[*i].push_back(*it); break; } } } Lock.unlock(); // Filtered successfully return true; } bool DTRList::filter_dtrs_by_next_receiver(StagingProcesses NextReceiver, std::list& FilteredList) { std::list::iterator it; switch(NextReceiver){ case PRE_PROCESSOR: { Lock.lock(); for(it = DTRs.begin();it != DTRs.end(); ++it) if((*it)->is_destined_for_pre_processor()) FilteredList.push_back(*it); Lock.unlock(); return true; } case POST_PROCESSOR: { Lock.lock(); for(it = DTRs.begin();it != DTRs.end(); ++it) if((*it)->is_destined_for_post_processor()) FilteredList.push_back(*it); Lock.unlock(); return true; } case DELIVERY: { Lock.lock(); for(it = DTRs.begin();it != DTRs.end(); ++it) if((*it)->is_destined_for_delivery()) FilteredList.push_back(*it); Lock.unlock(); return true; } default: // A strange receiver requested return false; } } bool DTRList::filter_pending_dtrs(std::list& FilteredList){ std::list::iterator it; Arc::Time now; Lock.lock(); for(it = DTRs.begin();it != DTRs.end(); ++it){ if( ((*it)->came_from_pre_processor() || (*it)->came_from_post_processor() || (*it)->came_from_delivery() || (*it)->came_from_generator()) && ((*it)->get_process_time() <= now) ) FilteredList.push_back(*it); } Lock.unlock(); // Filtered successfully return true; } bool DTRList::filter_dtrs_by_job(const std::string& jobid, std::list& FilteredList) { std::list::iterator it; Lock.lock(); for(it = DTRs.begin();it != DTRs.end(); ++it) if((*it)->get_parent_job_id() == jobid) FilteredList.push_back(*it); Lock.unlock(); // Filtered successfully return true; } void DTRList::check_priority_changes(const std::string& filename) { // Check for file with requested changes std::list prio_info; if (!Arc::FileRead(filename, prio_info)) return; Arc::FileCopy(filename, std::string(filename + ".read")); Arc::FileDelete(filename); std::map new_prio; for (std::list::const_iterator i = prio_info.begin(); i != prio_info.end(); ++i) { std::list tokens; Arc::tokenize(*i, tokens); unsigned int prio; if (tokens.size() == 2 && Arc::stringto(tokens.back(), prio)) { new_prio[tokens.front()] = prio; } } std::list::iterator it; Lock.lock(); for(it = DTRs.begin();it != DTRs.end(); ++it) { if(new_prio.find((*it)->get_id()) != new_prio.end()) { (*it)->set_priority(new_prio[(*it)->get_id()]); } } Lock.unlock(); } void DTRList::caching_started(DTR_ptr request) { CachingLock.lock(); CachingSources[request->get_source_str()] = request->get_priority(); CachingLock.unlock(); } void DTRList::caching_finished(DTR_ptr request) { CachingLock.lock(); CachingSources.erase(request->get_source_str()); CachingLock.unlock(); } bool DTRList::is_being_cached(DTR_ptr DTRToCheck) { CachingLock.lock(); std::map::iterator i = CachingSources.find(DTRToCheck->get_source_str()); bool caching = (i != CachingSources.end()); // If already caching, find the DTR and increase its priority if necessary if (caching && i->second < DTRToCheck->get_priority()) { Lock.lock(); for(std::list::iterator it = DTRs.begin();it != DTRs.end(); ++it) { if ((*it)->get_source_str() == DTRToCheck->get_source_str() && ((*it)->get_status() != DTRStatus::CACHE_WAIT && (*it)->get_status() != DTRStatus::CHECK_CACHE)) { (*it)->get_logger()->msg(Arc::INFO, "Boosting priority from %i to %i due to incoming higher priority DTR", (*it)->get_priority(), DTRToCheck->get_priority()); (*it)->set_priority(DTRToCheck->get_priority()); CachingSources[DTRToCheck->get_source_str()] = DTRToCheck->get_priority(); } } Lock.unlock(); } CachingLock.unlock(); return caching; } bool DTRList::empty() { Lock.lock(); bool empty = DTRs.empty(); Lock.unlock(); return empty; } unsigned int DTRList::size() { Lock.lock(); unsigned int size = DTRs.size(); Lock.unlock(); return size; } std::list DTRList::all_jobs() { std::list alljobs; std::list::iterator it; Lock.lock(); for(it = DTRs.begin();it != DTRs.end(); ++it) { std::list::iterator i = alljobs.begin(); for (; i != alljobs.end(); ++i) { if (*i == (*it)->get_parent_job_id()) break; } if (i == alljobs.end()) alljobs.push_back((*it)->get_parent_job_id()); } Lock.unlock(); return alljobs; } void DTRList::dumpState(const std::string& path) { // only files supported for now - simply overwrite path std::string data; Lock.lock(); for(std::list::iterator it = DTRs.begin();it != DTRs.end(); ++it) { data += (*it)->get_id() + " " + (*it)->get_status().str() + " " + Arc::tostring((*it)->get_priority()) + " " + (*it)->get_transfer_share(); // add destination for recovery after crash if ((*it)->get_status() == DTRStatus::TRANSFERRING || (*it)->get_status() == DTRStatus::TRANSFER) { data += " " + (*it)->get_destination()->CurrentLocation().fullstr(); data += " " + (*it)->get_delivery_endpoint().Host(); } data += "\n"; } Lock.unlock(); Arc::FileCreate(path, data); } } // namespace DataStaging nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/TransferShares.h0000644000000000000000000000013115067751327022540 xustar0030 mtime=1759498967.748491873 29 atime=1759498967.86149359 30 ctime=1759499028.814437584 nordugrid-arc-7.1.1/src/libs/data-staging/TransferShares.h0000644000175000002070000001171015067751327024443 0ustar00mockbuildmock00000000000000#ifndef TRANSFERSHARES_H_ #define TRANSFERSHARES_H_ #include #include "DTR.h" namespace DataStaging { /// TransferSharesConf describes the configuration of TransferShares. /** * It allows reference shares to be defined with certain priorities. An * instance of this class is used when creating a TransferShares object. * \ingroup datastaging * \headerfile TransferShares.h arc/data-staging/TransferShares.h */ class TransferSharesConf { public: /// The criterion for assigning a share to a DTR enum ShareType { /// Shares are defined per DN of the user's proxy USER, /// Shares are defined per VOMS VO of the user's proxy VO, /// Shares are defined per VOMS group of the user's proxy GROUP, /// Shares are defined per VOMS role of the user's proxy ROLE, /// No share criterion - all DTRs will be assigned to a single share NONE }; private: /// ReferenceShares are special shares defined in the configuration with /// specific priorities. The "_default" share must always be defined. std::map ReferenceShares; /// Configured share type ShareType shareType; public: /// Construct a new TransferSharesConf with given share type and reference shares TransferSharesConf(const std::string& type, const std::map& ref_shares); /// Construct a new TransferSharesConf with no defined shares or policy TransferSharesConf(); /// Set the share type void set_share_type(const std::string& type); /// Add a reference share void set_reference_share(const std::string& RefShare, int Priority); /// Set reference shares void set_reference_shares(const std::map& shares); /// Returns true if the given share is a reference share bool is_configured(const std::string& ShareToCheck); /// Get the priority of this share int get_basic_priority(const std::string& ShareToCheck); /// Return human-readable configuration of shares std::string conf() const; /// Get the name of the share the DTR should be assigned to and the proxy type std::string extract_share_info(DTR_ptr DTRToExtract); }; /// TransferShares is used to implement fair-sharing and priorities. /** * TransferShares defines the algorithm used to prioritise and share * transfers among different users or groups. Configuration information on * the share type and reference shares is held in a TransferSharesConf * instance. The Scheduler uses TransferShares to determine which DTRs in the * queue for each process go first. The calculation is based on the * configuration and the currently active shares (the DTRs already in the * process). can_start() is the method called by the Scheduler to * determine whether a particular share has an available slot in the process. * \ingroup datastaging * \headerfile TransferShares.h arc/data-staging/TransferShares.h */ class TransferShares { private: /// Configuration of share type and reference shares TransferSharesConf conf; /// Shares which are active, ie running or in the queue, and number of DTRs std::map ActiveShares; /// How many transfer slots each active share can grab std::map ActiveSharesSlots; public: /// Create a new TransferShares with default configuration TransferShares() {}; /// Create a new TransferShares with given configuration TransferShares(const TransferSharesConf& shares_conf); /// Empty destructor ~TransferShares(){}; /// Set a new configuration, if a new reference share gets added for example void set_shares_conf(const TransferSharesConf& share_conf); /// Calculate how many slots to assign to each active share. /** * This method is called each time the Scheduler loops to calculate the * number of slots to assign to each share, based on the current number * of active shares and the shares' relative priorities. */ void calculate_shares(int TotalNumberOfSlots); /// Increase by one the active count for the given share. Called when a new DTR enters the queue. void increase_transfer_share(const std::string& ShareToIncrease); /// Decrease by one the active count for the given share. Called when a completed DTR leaves the queue. void decrease_transfer_share(const std::string& ShareToDecrease); /// Decrease by one the number of slots available to the given share. /** * Called when there is a slot already used by this share to reduce the * number available. */ void decrease_number_of_slots(const std::string& ShareToDecrease); /// Returns true if there is a slot available for the given share bool can_start(const std::string& ShareToStart); /// Returns the map of active shares std::map active_shares() const; }; // class TransferShares } // namespace DataStaging #endif /* TRANSFERSHARES_H_ */ nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/Processor.cpp0000644000000000000000000000013215067751327022121 xustar0030 mtime=1759498967.747527268 30 atime=1759498967.860493575 30 ctime=1759499028.824868737 nordugrid-arc-7.1.1/src/libs/data-staging/Processor.cpp0000644000175000002070000011616315067751327024033 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "DTRStatus.h" #include "Processor.h" namespace DataStaging { /** Set up logging. Should be called at the start of each thread method. */ void setUpLogger(DTR_ptr request) { // Move DTR destinations from DTR logger to Root logger to catch all messages. // disconnect this thread's root logger Arc::Logger::getRootLogger().setThreadContext(); request->get_logger()->setThreadContext(); Arc::Logger::getRootLogger().removeDestinations(); Arc::Logger::getRootLogger().addDestinations(request->get_logger()->getDestinations()); request->get_logger()->removeDestinations(); } /* Thread methods for each state of the DTR */ void Processor::DTRCheckCache(void* arg) { ThreadArgument* targ = (ThreadArgument*)arg; DTR_ptr request = targ->dtr; delete targ; setUpLogger(request); // IMPORTANT: This method creates a lock on the cached file for // this DTR. It must be released at some point using ProcessCache // Create cache using configuration Arc::FileCache cache(request->get_cache_parameters().cache_dirs, request->get_cache_parameters().drain_cache_dirs, request->get_cache_parameters().readonly_cache_dirs, request->get_parent_job_id(), request->get_local_user().get_uid(), request->get_local_user().get_gid()); if (!cache) { request->get_logger()->msg(Arc::ERROR, "Error creating cache"); request->set_cache_state(CACHE_SKIP); request->set_error_status(DTRErrorStatus::CACHE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Failed to create cache"); request->set_status(DTRStatus::CACHE_CHECKED); DTR::push(request, SCHEDULER); return; } // DN is used for checking cache permissions std::string dn = request->get_credential_info().getDN(); Arc::Time exp_time = request->get_credential_info().getExpiryTime(); std::string canonic_url(request->get_source()->GetURL().plainstr()); std::string cacheoption(request->get_source()->GetURL().Option("cache")); // add guid if present // TODO handle guids better in URL class so we don't need to care here if (!request->get_source()->GetURL().MetaDataOption("guid").empty()) canonic_url += ":guid=" + request->get_source()->GetURL().MetaDataOption("guid"); bool is_in_cache = false; bool is_locked = false; // check for forced re-download option bool renew = (cacheoption == "renew"); if (renew) request->get_logger()->msg(Arc::VERBOSE, "Forcing re-download of file %s", canonic_url); for (;;) { if (!cache.Start(canonic_url, is_in_cache, is_locked, renew)) { if (is_locked) { request->get_logger()->msg(Arc::WARNING, "Cached file is locked - should retry"); request->set_cache_state(CACHE_LOCKED); request->set_status(DTRStatus::CACHE_WAIT); // set a flat wait time with some randomness, fine-grained to minimise lock clashes // this may change in future eg be taken from configuration or increase over time time_t cache_wait_time = 10; time_t randomness = (rand() % cache_wait_time) - (cache_wait_time/2); cache_wait_time += randomness; // add random number of milliseconds uint32_t nano_randomness = (rand() % 1000) * 1000000; Arc::Period cache_wait_period(cache_wait_time, nano_randomness); request->get_logger()->msg(Arc::INFO, "Will wait around %is", cache_wait_time); request->set_process_time(cache_wait_period); DTR::push(request, SCHEDULER); return; } request->get_logger()->msg(Arc::ERROR, "Failed to initiate cache"); request->set_cache_state(CACHE_SKIP); request->set_error_status(DTRErrorStatus::CACHE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Failed to initiate cache"); break; } request->set_cache_file(cache.File(canonic_url)); if (is_in_cache) { // Whether cache file is outdated bool outdated = (cacheoption != "invariant"); // Check source if requested if (cacheoption == "check") { request->get_logger()->msg(Arc::INFO, "Force-checking source of cache file %s", cache.File(canonic_url)); Arc::DataStatus cres = request->get_source()->Check(true); if (!cres.Passed()) { request->get_logger()->msg(Arc::ERROR, "Source check requested but failed: %s", std::string(cres)); // Try again skipping cache, maybe this is not worth it request->set_cache_state(CACHE_SKIP); request->set_error_status(DTRErrorStatus::CACHE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Failed to check source for " + canonic_url + ": " + std::string(cres)); break; } } else { // just need to check permissions request->get_logger()->msg(Arc::INFO, "File %s is cached (%s) - checking permissions", canonic_url, cache.File(canonic_url)); // check the list of cached DNs if (cache.CheckDN(canonic_url, dn)) { outdated = false; // If DN is cached then don't check creation date } else { Arc::DataStatus cres = request->get_source()->Check(cacheoption != "invariant"); if (!cres.Passed()) { request->get_logger()->msg(Arc::ERROR, "Permission checking failed, will try downloading without using cache"); request->set_cache_state(CACHE_SKIP); request->set_error_status(DTRErrorStatus::CACHE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Failed to check cache permissions for " + canonic_url + ": " + std::string(cres)); break; } cache.AddDN(canonic_url, dn, exp_time); } } request->get_logger()->msg(Arc::INFO, "Permission checking passed"); // check if file is fresh enough if (request->get_source()->CheckModified() && cache.CheckCreated(canonic_url)) { Arc::Time sourcetime = request->get_source()->GetModified(); Arc::Time cachetime = cache.GetCreated(canonic_url); request->get_logger()->msg(Arc::VERBOSE, "Source modification date: %s", sourcetime.str()); request->get_logger()->msg(Arc::VERBOSE, "Cache creation date: %s", cachetime.str()); if (sourcetime <= cachetime) outdated = false; } if (outdated) { request->get_logger()->msg(Arc::INFO, "Cached file is outdated, will re-download"); renew = true; continue; } // cached file is present and valid request->get_logger()->msg(Arc::VERBOSE, "Cached copy is still valid"); request->set_cache_state(CACHE_ALREADY_PRESENT); } else { // file is not there but we are ready to download it request->get_logger()->msg(Arc::VERBOSE, "Will download to cache file %s", request->get_cache_file()); request->set_cache_state(CACHEABLE); } break; } request->set_status(DTRStatus::CACHE_CHECKED); DTR::push(request, SCHEDULER); } void Processor::DTRResolve(void* arg) { // call request->source.Resolve() to get replicas // call request->destination.Resolve() to check supplied replicas // call request->destination.PreRegister() to lock destination LFN ThreadArgument* targ = (ThreadArgument*)arg; DTR_ptr request = targ->dtr; delete targ; setUpLogger(request); // check for source replicas if (request->get_source()->IsIndex()) { request->get_logger()->msg(Arc::VERBOSE, "Looking up source replicas"); Arc::DataStatus res = request->get_source()->Resolve(true); if (!res.Passed() || !request->get_source()->HaveLocations() || !request->get_source()->LocationValid()) { request->get_logger()->msg(Arc::ERROR, std::string(res)); request->set_error_status(res.Retryable() ? DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_SOURCE, "Could not resolve any source replicas for " + request->get_source()->str() + ": " + std::string(res)); request->set_status(DTRStatus::RESOLVED); DTR::push(request, SCHEDULER); return; } } // If overwrite is requested, the resolving and pre-registering of the // destination will be done in the pre-clean stage after deleting. if (!request->is_replication() && request->get_destination()->GetURL().Option("overwrite") == "yes") { request->set_status(DTRStatus::RESOLVED); DTR::push(request, SCHEDULER); return; } // Check replicas supplied for destination if (request->get_destination()->IsIndex()) { request->get_logger()->msg(Arc::VERBOSE, "Resolving destination replicas"); Arc::DataStatus res = request->get_destination()->Resolve(false); if (!res.Passed() || !request->get_destination()->HaveLocations() || !request->get_destination()->LocationValid()) { request->get_logger()->msg(Arc::ERROR, std::string(res)); request->set_error_status(res.Retryable() ? DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Could not resolve any destination replicas for " + request->get_destination()->str() + ": " + std::string(res)); request->set_status(DTRStatus::RESOLVED); DTR::push(request, SCHEDULER); return; } } // check that replication is possible if (request->is_replication()) { // we do not want to replicate to same physical file request->get_destination()->RemoveLocations(*(request->get_source())); if (!request->get_destination()->HaveLocations()) { request->get_logger()->msg(Arc::ERROR, "No locations for destination different from source found"); request->set_error_status(DTRErrorStatus::SELF_REPLICATION_ERROR, DTRErrorStatus::NO_ERROR_LOCATION, "No locations for destination different from source found for " + request->get_destination()->str()); request->set_status(DTRStatus::RESOLVED); DTR::push(request, SCHEDULER); return; } } // pre-register destination if (request->get_destination()->IsIndex()) { request->get_logger()->msg(Arc::VERBOSE, "Pre-registering destination in index service"); Arc::DataStatus res = request->get_destination()->PreRegister(request->is_replication(), request->is_force_registration()); if (!res.Passed()) { request->get_logger()->msg(Arc::ERROR, std::string(res)); request->set_error_status(res.Retryable() ? DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Could not pre-register destination " + request->get_destination()->str() + ": " + std::string(res)); } } // finished with resolving - send back to scheduler request->set_status(DTRStatus::RESOLVED); DTR::push(request, SCHEDULER); } void Processor::DTRBulkResolve(void* arg) { // call request->source.BulkResolve() to get replicas // NOTE only source resolution can be done in bulk BulkThreadArgument* targ = (BulkThreadArgument*)arg; std::list requests = targ->dtrs; delete targ; if (requests.empty()) return; std::list sources; for (std::list::iterator i = requests.begin(); i != requests.end(); ++i) { setUpLogger(*i); (*i)->get_logger()->msg(Arc::VERBOSE, "Resolving source replicas in bulk"); sources.push_back(&(*((*i)->get_source()))); // nasty... } // check for source replicas Arc::DataStatus res = requests.front()->get_source()->Resolve(true, sources); for (std::list::iterator i = requests.begin(); i != requests.end(); ++i) { DTR_ptr request = *i; if (!res.Passed()) { request->get_logger()->msg(Arc::ERROR, std::string(res)); request->set_error_status(res.Retryable() ? DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_SOURCE, "Could not resolve any source replicas for " + request->get_source()->str() + ": " + std::string(res)); } else if (!request->get_source()->HaveLocations() || !request->get_source()->LocationValid()) { request->get_logger()->msg(Arc::ERROR, "No replicas found for %s", request->get_source()->str()); request->set_error_status(DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_SOURCE, "No replicas found for " + request->get_source()->str()); } request->set_status(DTRStatus::RESOLVED); DTR::push(request, SCHEDULER); } } void Processor::DTRQueryReplica(void* arg) { // check source is ok and obtain metadata ThreadArgument* targ = (ThreadArgument*)arg; DTR_ptr request = targ->dtr; delete targ; setUpLogger(request); Arc::DataStatus res; request->get_logger()->msg(Arc::INFO, "Checking %s", request->get_source()->CurrentLocation().str()); if (request->get_source()->IsIndex()) { res = request->get_source()->CompareLocationMetadata(); } else { Arc::FileInfo file; res = request->get_source()->Stat(file, Arc::DataPoint::INFO_TYPE_CONTENT); } if (res == Arc::DataStatus::InconsistentMetadataError) { request->get_logger()->msg(Arc::ERROR, "Metadata of replica and index service differ"); request->set_error_status(DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_SOURCE, "Metadata of replica and index service differ for " + request->get_source()->CurrentLocation().str() + " and " + request->get_source()->str()); } else if (!res.Passed()) { request->get_logger()->msg(Arc::ERROR, "Failed checking source replica %s: %s", request->get_source()->CurrentLocation().str(), std::string(res) ); request->set_error_status(res.Retryable() ? DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_SOURCE, "Failed checking source replica " + request->get_source()->CurrentLocation().str() + ": " + std::string(res)); } else { // assign metadata to destination request->get_destination()->SetMeta(*request->get_source()); } // finished querying - send back to scheduler request->set_status(DTRStatus::REPLICA_QUERIED); DTR::push(request, SCHEDULER); } void Processor::DTRBulkQueryReplica(void* arg) { BulkThreadArgument* targ = (BulkThreadArgument*)arg; std::list requests = targ->dtrs; delete targ; if (requests.empty()) return; std::list sources; for (std::list::iterator i = requests.begin(); i != requests.end(); ++i) { setUpLogger(*i); (*i)->get_logger()->msg(Arc::VERBOSE, "Querying source replicas in bulk"); sources.push_back((*i)->get_source()->CurrentLocationHandle()); } // Query source std::list files; Arc::DataStatus res = sources.front()->Stat(files, sources, Arc::DataPoint::INFO_TYPE_CONTENT); std::list::const_iterator file = files.begin(); for (std::list::iterator i = requests.begin(); i != requests.end(); ++i, ++file) { DTR_ptr request = *i; if (!res.Passed() || files.size() != requests.size()) { request->get_logger()->msg(Arc::ERROR, "Failed checking source replica: %s", std::string(res)); request->set_error_status(res.Retryable() ? DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_SOURCE, "Failed checking source replica " + request->get_source()->CurrentLocation().str() + ": " + std::string(res)); } else if (!*file) { request->get_logger()->msg(Arc::ERROR, "Failed checking source replica"); request->set_error_status(DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_SOURCE, "Failed checking source replica " + request->get_source()->CurrentLocation().str()); } else if (request->get_source()->IsIndex() && !request->get_source()->CompareMeta(*(request->get_source()->CurrentLocationHandle()))) { request->get_logger()->msg(Arc::ERROR, "Metadata of replica and index service differ"); request->set_error_status(DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_SOURCE, "Metadata of replica and index service differ for " + request->get_source()->CurrentLocation().str() + " and " + request->get_source()->str()); } else { // assign metadata to destination request->get_destination()->SetMeta(*request->get_source()); } request->set_status(DTRStatus::REPLICA_QUERIED); DTR::push(request, SCHEDULER); } } void Processor::DTRPreClean(void *arg) { // If overwrite is requested, for physical files call Remove() // for index services delete entry and all existing replicas // only if the entry already exists. Otherwise check if a remote // destination exists and fail if it does ThreadArgument* targ = (ThreadArgument*)arg; DTR_ptr request = targ->dtr; delete targ; setUpLogger(request); Arc::DataStatus res = Arc::DataStatus::Success; if (!request->is_replication() && (request->get_destination()->GetURL().Option("overwrite") == "yes" || request->get_destination()->CurrentLocation().Option("overwrite") == "yes")) { request->get_logger()->msg(Arc::VERBOSE, "Overwrite requested - will pre-clean destination"); if (!request->get_destination()->IsIndex()) { request->get_logger()->msg(Arc::INFO, "Removing %s", request->get_destination()->CurrentLocation().str()); res = request->get_destination()->Remove(); } else { // get existing locations Arc::DataHandle dest(request->get_destination()->GetURL(), request->get_destination()->GetUserConfig()); request->get_logger()->msg(Arc::VERBOSE, "Finding existing destination replicas"); res = dest->Resolve(true); if (!res.Passed()) { request->get_logger()->msg(Arc::ERROR, std::string(res)); } else { if (dest->HaveLocations()) { while (dest->LocationValid()) { request->get_logger()->msg(Arc::INFO, "Removing %s", dest->CurrentLocation().str()); res = dest->Remove(); if (!res.Passed()) { // if we fail to delete one replica then bail out request->get_logger()->msg(Arc::ERROR, "Failed to delete replica %s: %s", dest->CurrentLocation().str(), std::string(res)); break; } // unregister this replica from the index // not critical if this fails as will be removed in the next step dest->Unregister(false); // next replica dest->RemoveLocation(); } } if (!dest->HaveLocations()) { // all replicas were deleted successfully, now unregister the LFN request->get_logger()->msg(Arc::INFO, "Unregistering %s", dest->str()); res = dest->Unregister(true); } } // if deletion was successful resolve destination and pre-register if (!dest->HaveLocations()) { request->get_logger()->msg(Arc::VERBOSE, "Resolving destination replicas"); res = request->get_destination()->Resolve(false); if (!res.Passed()) { request->get_logger()->msg(Arc::ERROR, std::string(res)); } else { request->get_logger()->msg(Arc::VERBOSE, "Pre-registering destination"); res = request->get_destination()->PreRegister(false, request->is_force_registration()); } } } if (!res.Passed()) { request->get_logger()->msg(Arc::ERROR, "Failed to pre-clean destination: %s", std::string(res)); request->set_error_status(DTRErrorStatus::TEMPORARY_REMOTE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Failed to pre-clean destination " + request->get_destination()->str() + ": " + std::string(res)); } } else if (!request->get_destination()->Local() && !request->get_destination()->IsIndex()) { Arc::FileInfo file; res = request->get_destination()->Stat(file, Arc::DataPoint::INFO_TYPE_MINIMAL); if (res.Passed()) { request->get_logger()->msg(Arc::ERROR, "Destination already exists"); request->set_error_status(DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Destination " + request->get_destination()->str() + " already exists"); } // We should check the error was no such file but just report all errors as ok } request->set_status(DTRStatus::PRE_CLEANED); DTR::push(request, SCHEDULER); } void Processor::DTRStagePrepare(void* arg) { // Only valid for stageable (SRM-like) protocols. // Call request->source.PrepareReading() to get TURL for reading or query status of request // and/or request->destination.PrepareWriting() to get TURL for writing or query status of request ThreadArgument* targ = (ThreadArgument*)arg; DTR_ptr request = targ->dtr; delete targ; setUpLogger(request); // first source - if stageable and not already staged yet if (request->get_source()->IsStageable() && request->get_source()->TransferLocations().empty()) { // give default wait time for cases where no wait time is given by the remote service unsigned int source_wait_time = 10; request->get_logger()->msg(Arc::VERBOSE, "Preparing to stage source"); Arc::DataStatus res = request->get_source()->PrepareReading(0, source_wait_time); if (!res.Passed()) { request->get_logger()->msg(Arc::ERROR, std::string(res)); request->set_error_status(res.Retryable() ? DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_SOURCE, "Failed to prepare source " + request->get_source()->CurrentLocation().str() + ": " + std::string(res)); } else if (res == Arc::DataStatus::ReadPrepareWait) { // if timeout then don't wait - scheduler will deal with it immediately if (Arc::Time() < request->get_timeout()) { if (source_wait_time > 60) source_wait_time = 60; request->set_process_time(source_wait_time); request->get_logger()->msg(Arc::VERBOSE, "Source is not ready, will wait %u seconds", source_wait_time); } request->set_status(DTRStatus::STAGING_PREPARING_WAIT); } else { if (request->get_source()->TransferLocations().empty()) { request->get_logger()->msg(Arc::ERROR, "No physical files found for source"); request->set_error_status(DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_SOURCE, "No physical files found for source " + request->get_source()->CurrentLocation().str()); } else { // TODO order physical files according to eg preferred pattern } } } if (request->error()) { request->set_status(DTRStatus::STAGED_PREPARED); DTR::push(request, SCHEDULER); return; } // now destination - if stageable and not already staged yet if (request->get_destination()->IsStageable() && request->get_destination()->TransferLocations().empty()) { // give default wait time for cases where no wait time is given by the remote service unsigned int dest_wait_time = 10; request->get_logger()->msg(Arc::VERBOSE, "Preparing to stage destination"); Arc::DataStatus res = request->get_destination()->PrepareWriting(0, dest_wait_time); if (!res.Passed()) { request->get_logger()->msg(Arc::ERROR, std::string(res)); request->set_error_status(res.Retryable() ? DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Failed to prepare destination " + request->get_destination()->CurrentLocation().str() + ": " + std::string(res)); } else if (res == Arc::DataStatus::WritePrepareWait) { // if timeout then don't wait - scheduler will deal with it immediately if (Arc::Time() < request->get_timeout()) { if (dest_wait_time > 60) dest_wait_time = 60; request->set_process_time(dest_wait_time); request->get_logger()->msg(Arc::VERBOSE, "Destination is not ready, will wait %u seconds", dest_wait_time); } request->set_status(DTRStatus::STAGING_PREPARING_WAIT); } else { if (request->get_destination()->TransferLocations().empty()) { request->get_logger()->msg(Arc::ERROR, "No physical files found for destination"); request->set_error_status(DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "No physical files found for destination " + request->get_destination()->CurrentLocation().str()); } else { // TODO choose best physical file } } } // set to staged prepared if we don't have to wait for source or destination if (request->get_status() != DTRStatus::STAGING_PREPARING_WAIT) request->set_status(DTRStatus::STAGED_PREPARED); DTR::push(request, SCHEDULER); } void Processor::DTRReleaseRequest(void* arg) { // only valid for stageable (SRM-like) protocols. call request->source.FinishReading() and/or // request->destination.FinishWriting() to release or abort requests ThreadArgument* targ = (ThreadArgument*)arg; DTR_ptr request = targ->dtr; delete targ; setUpLogger(request); Arc::DataStatus res; if (request->get_source()->IsStageable()) { request->get_logger()->msg(Arc::VERBOSE, "Releasing source"); res = request->get_source()->FinishReading(request->error() || request->cancel_requested()); if (!res.Passed()) { // an error here is not critical to the transfer request->get_logger()->msg(Arc::WARNING, "There was a problem during post-transfer source handling: %s", std::string(res)); } } if (request->get_destination()->IsStageable()) { request->get_logger()->msg(Arc::VERBOSE, "Releasing destination"); res = request->get_destination()->FinishWriting(request->error() || request->cancel_requested()); if (!res.Passed()) { if (request->error()) { request->get_logger()->msg(Arc::WARNING, "There was a problem during post-transfer destination handling after error: %s", std::string(res)); } else { request->get_logger()->msg(Arc::ERROR, "Error with post-transfer destination handling: %s", std::string(res)); request->set_error_status(res.Retryable() ? DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Error with post-transfer destination handling of " + request->get_destination()->CurrentLocation().str() + ": " + std::string(res)); } } } request->set_status(DTRStatus::REQUEST_RELEASED); DTR::push(request, SCHEDULER); } void Processor::DTRFinaliseReplica(void* arg) { // Call the source index service to tidy up connections, send traces etc ThreadArgument* targ = (ThreadArgument*)arg; DTR_ptr request = targ->dtr; delete targ; setUpLogger(request); if (request->get_source()->IsIndex()) { request->get_logger()->msg(Arc::VERBOSE, "Finalising current replica %s", request->get_source()->CurrentLocation().str()); request->get_source()->Finalise(request->get_error_status().GetDesc(), request->get_credential_info().getDN()); } request->set_status(DTRStatus::REPLICA_FINALISED); DTR::push(request, SCHEDULER); } void Processor::DTRRegisterReplica(void* arg) { // call request->destination.Register() to add new replica and metadata for normal workflow // call request->destination.PreUnregister() to delete LFN placed during // RESOLVE stage for error workflow ThreadArgument* targ = (ThreadArgument*)arg; DTR_ptr request = targ->dtr; delete targ; setUpLogger(request); // TODO: If the copy completed before request was cancelled, unregistering // here will lead to dark data. Need to check for successful copy if (request->error() || request->cancel_requested()) { request->get_logger()->msg(Arc::VERBOSE, "Removing pre-registered destination in index service"); Arc::DataStatus res = request->get_destination()->PreUnregister(request->is_replication()); if (!res.Passed()) { request->get_logger()->msg(Arc::ERROR, "Failed to unregister pre-registered destination %s: %s." " You may need to unregister it manually", request->get_destination()->str(), std::string(res)); } } else { request->get_logger()->msg(Arc::VERBOSE, "Registering destination replica"); Arc::DataStatus res = request->get_destination()->PostRegister(request->is_replication()); if (!res.Passed()) { request->get_logger()->msg(Arc::ERROR, "Failed to register destination replica: %s", std::string(res)); if (!request->get_destination()->PreUnregister(request->is_replication()).Passed()) { request->get_logger()->msg(Arc::ERROR, "Failed to unregister pre-registered destination %s." " You may need to unregister it manually", request->get_destination()->str()); } request->set_error_status(res.Retryable() ? DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DTRErrorStatus::PERMANENT_REMOTE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Could not post-register destination " + request->get_destination()->str() + ": " + std::string(res)); } } // finished with registration - send back to scheduler request->set_status(DTRStatus::REPLICA_REGISTERED); DTR::push(request, SCHEDULER); } void Processor::DTRProcessCache(void* arg) { // link or copy cached file to session dir, or release locks in case // of error or deciding not to use cache (for example because of a mapped link) ThreadArgument* targ = (ThreadArgument*)arg; DTR_ptr request = targ->dtr; delete targ; setUpLogger(request); Arc::FileCache cache(request->get_cache_parameters().cache_dirs, request->get_cache_parameters().drain_cache_dirs, request->get_cache_parameters().readonly_cache_dirs, request->get_parent_job_id(), request->get_local_user().get_uid(), request->get_local_user().get_gid()); if (!cache) { request->get_logger()->msg(Arc::ERROR, "Error creating cache. Stale locks may remain."); request->set_error_status(DTRErrorStatus::CACHE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Failed to create cache for " + request->get_source()->str()); request->set_status(DTRStatus::CACHE_PROCESSED); DTR::push(request, SCHEDULER); return; } std::string canonic_url(request->get_source()->GetURL().plainstr()); // add guid if present if (!request->get_source()->GetURL().MetaDataOption("guid").empty()) canonic_url += ":guid=" + request->get_source()->GetURL().MetaDataOption("guid"); // don't link if error, cancellation or cache not being used if (request->error() || request->cancel_requested() || request->get_cache_state() == CACHE_NOT_USED) { // release locks if they were acquired if (request->get_cache_state() == CACHEABLE || request->get_cache_state() == CACHE_NOT_USED) { if (request->error() || request->cancel_requested()) { cache.StopAndDelete(canonic_url); } else { cache.Stop(canonic_url); } } request->set_status(DTRStatus::CACHE_PROCESSED); DTR::push(request, SCHEDULER); return; } // check options for whether to copy or link bool executable = (request->get_source()->GetURL().Option("exec") == "yes") ? true : false; bool cache_copy = (request->get_source()->GetURL().Option("cache") == "copy") ? true : false; request->get_logger()->msg(Arc::INFO, "Linking/copying cached file to %s", request->get_destination()->CurrentLocation().Path()); bool was_downloaded = (request->get_cache_state() == CACHE_DOWNLOADED) ? true : false; if (was_downloaded) { // Add DN to cached permissions std::string dn = request->get_credential_info().getDN(); Arc::Time exp_time = request->get_credential_info().getExpiryTime(); cache.AddDN(canonic_url, dn, exp_time); } bool try_again = false; if (!cache.Link(request->get_destination()->CurrentLocation().Path(), canonic_url, cache_copy, executable, was_downloaded, try_again)) { if (try_again) { // set cache status to CACHE_LOCKED, so that the Scheduler will try again request->set_cache_state(CACHE_LOCKED); request->get_logger()->msg(Arc::WARNING, "Failed linking cache file to %s", request->get_destination()->CurrentLocation().Path()); } else { request->get_logger()->msg(Arc::ERROR, "Error linking cache file to %s.", request->get_destination()->CurrentLocation().Path()); } request->set_error_status(DTRErrorStatus::CACHE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Failed to link/copy cache file to session dir"); } if (was_downloaded) cache.Stop(canonic_url); request->set_status(DTRStatus::CACHE_PROCESSED); DTR::push(request, SCHEDULER); } /* main process method called from DTR::push() */ void Processor::receiveDTR(DTR_ptr request) { BulkThreadArgument* bulk_arg = NULL; ThreadArgument* arg = NULL; // first deal with bulk if (request->get_bulk_end()) { // end of bulk request->get_logger()->msg(Arc::VERBOSE, "Adding to bulk request"); request->set_bulk_end(false); bulk_list.push_back(request); bulk_arg = new BulkThreadArgument(this, bulk_list); bulk_list.clear(); } else if (request->get_bulk_start() || !bulk_list.empty()) { // filling bulk list request->get_logger()->msg(Arc::VERBOSE, "Adding to bulk request"); bulk_list.push_back(request); if (request->get_bulk_start()) request->set_bulk_start(false); } else { // non-bulk request arg = new ThreadArgument(this, request); } // switch through the expected DTR states switch (request->get_status().GetStatus()) { // pre-processor states case DTRStatus::CHECK_CACHE: { request->set_status(DTRStatus::CHECKING_CACHE); Arc::CreateThreadFunction(&DTRCheckCache, (void*)arg, &thread_count); }; break; case DTRStatus::RESOLVE: { request->set_status(DTRStatus::RESOLVING); if (bulk_arg) Arc::CreateThreadFunction(&DTRBulkResolve, (void*)bulk_arg, &thread_count); else if (arg) Arc::CreateThreadFunction(&DTRResolve, (void*)arg, &thread_count); }; break; case DTRStatus::QUERY_REPLICA: { request->set_status(DTRStatus::QUERYING_REPLICA); if (bulk_arg) Arc::CreateThreadFunction(&DTRBulkQueryReplica, (void*)bulk_arg, &thread_count); else if (arg) Arc::CreateThreadFunction(&DTRQueryReplica, (void*)arg, &thread_count); }; break; case DTRStatus::PRE_CLEAN: { request->set_status(DTRStatus::PRE_CLEANING); Arc::CreateThreadFunction(&DTRPreClean, (void*)arg, &thread_count); }; break; case DTRStatus::STAGE_PREPARE: { request->set_status(DTRStatus::STAGING_PREPARING); Arc::CreateThreadFunction(&DTRStagePrepare, (void*)arg, &thread_count); }; break; // post-processor states case DTRStatus::RELEASE_REQUEST: { request->set_status(DTRStatus::RELEASING_REQUEST); Arc::CreateThreadFunction(&DTRReleaseRequest, (void*)arg, &thread_count); }; break; case DTRStatus::FINALISE_REPLICA: { request->set_status(DTRStatus::FINALISING_REPLICA); Arc::CreateThreadFunction(&DTRFinaliseReplica, (void*)arg, &thread_count); }; break; case DTRStatus::REGISTER_REPLICA: { request->set_status(DTRStatus::REGISTERING_REPLICA); Arc::CreateThreadFunction(&DTRRegisterReplica, (void*)arg, &thread_count); }; break; case DTRStatus::PROCESS_CACHE: { request->set_status(DTRStatus::PROCESSING_CACHE); Arc::CreateThreadFunction(&DTRProcessCache, (void*)arg, &thread_count); }; break; default: { // unexpected state - report error request->set_error_status(DTRErrorStatus::INTERNAL_LOGIC_ERROR, DTRErrorStatus::ERROR_UNKNOWN, "Received a DTR in an unexpected state ("+request->get_status().str()+") in processor"); DTR::push(request, SCHEDULER); if (arg) delete arg; if (bulk_arg) delete bulk_arg; }; break; } } void Processor::start(void) { } void Processor::stop(void) { // threads are short lived so wait for them to complete rather than interrupting thread_count.wait(60*1000); } } // namespace DataStaging nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/DTRStatus.h0000644000000000000000000000013215067751327021444 xustar0030 mtime=1759498967.747527268 30 atime=1759498967.860493575 30 ctime=1759499028.811379988 nordugrid-arc-7.1.1/src/libs/data-staging/DTRStatus.h0000644000175000002070000002215215067751327023350 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_DTRSTATUS_H__ #define __ARC_DTRSTATUS_H__ #include #include namespace DataStaging { /// Class representing the status of a DTR. /** * \ingroup datastaging * \headerfile DTRStatus.h arc/data-staging/DTRStatus.h */ class DTRStatus { public: /// Possible state values enum DTRStatusType { // ORDER IS IMPORTANT!! /// Just created NEW, /// Check the cache for the file may be already there CHECK_CACHE, /// Checking the cache CHECKING_CACHE, /// Cache file is locked, waiting for its release CACHE_WAIT, /// Cache check completed CACHE_CHECKED, /// Resolve a meta-protocol RESOLVE, /// Resolving replicas RESOLVING, /// Replica resolution completed RESOLVED, /// Query a replica QUERY_REPLICA, /// Replica is being queried QUERYING_REPLICA, /// Replica was queried REPLICA_QUERIED, /// The destination should be deleted PRE_CLEAN, /// Deleting the destination PRE_CLEANING, /// The destination file has been deleted PRE_CLEANED, /// Prepare or stage the source and/or destination STAGE_PREPARE, /// Making a staging or preparing request STAGING_PREPARING, /// Wait for the status of the staging/preparing request STAGING_PREPARING_WAIT, /// Staging/preparing request completed STAGED_PREPARED, /// Transfer ready and can be started TRANSFER, /// Transfer is going TRANSFERRING, /// Transfer is on-going but scheduled for cancellation TRANSFERRING_CANCEL, /// Transfer completed TRANSFERRED, /// Transfer finished, release requests on the storage RELEASE_REQUEST, /// Releasing staging/preparing request RELEASING_REQUEST, /// Release of staging/preparing request completed REQUEST_RELEASED, /// Finalise replica (close catalog, send traces, etc) FINALISE_REPLICA, /// Finalising replica FINALISING_REPLICA, /// Replica finalised REPLICA_FINALISED, /// Register a new replica of the destination REGISTER_REPLICA, /// Registering a replica in an index service REGISTERING_REPLICA, /// Replica registration completed REPLICA_REGISTERED, /// Destination is cacheable, process cache PROCESS_CACHE, /// Releasing locks and copying/linking cache files to the session dir PROCESSING_CACHE, /// Cache processing completed CACHE_PROCESSED, /// Everything completed successfully DONE, /// Cancellation request fulfilled successfully CANCELLED, /// Cancellation request fulfilled but DTR also completed transfer successfully CANCELLED_FINISHED, /// Error occured ERROR, /// "Stateless" DTR NULL_STATE }; /// Make new DTRStatus with given status and optional description. DTRStatus(const DTRStatusType& status, std::string desc="") : status(status), desc(desc) {} /// Make new DTRStatus with default NEW status DTRStatus() : status(NEW), desc ("") {} /// Returns true if this status is the same as the given DTRStatusType bool operator==(const DTRStatusType& s) const { return status == s; } /// Returns true if this status is the same as the given DTRStatus bool operator==(const DTRStatus& s) const { return status == s.status; } /// Returns true if this status is not the same as the given DTRStatusType bool operator!=(const DTRStatusType& s) const { return status != s; } /// Returns true if this status is not the same as the given DTRStatus bool operator!=(const DTRStatus& s) const { return status != s.status; } /// Make a new DTRStatus with the same status as the given DTRStatusType DTRStatus& operator=(const DTRStatusType& s) { status = s; return *this; } /// Returns a string representation of the current state std::string str() const; /// Set the detailed description of the current state void SetDesc(const std::string& d) { desc = d; } /// Get the detailed description of the current state std::string GetDesc() const { return desc; } /// Get the DTRStatusType of the current state DTRStatusType GetStatus() const { return status; } // The actions in the following two vectors must match /// Vector of states with a to be processed action, eg CHECK_CACHE static const std::vector ToProcessStates; /// Vector of states with a processing action, eg CHECKING_CACHE static const std::vector ProcessingStates; /// Vector of states where a DTR is staged - used to limit the number of staged files static const std::vector StagedStates; private: /// status code DTRStatusType status; /// description set by the owner process std::string desc; }; // DTRStatus /// A class to represent error states reported by various components. /** * \ingroup datastaging * \headerfile DTRStatus.h arc/data-staging/DTRStatus.h */ class DTRErrorStatus { public: /// A list of error types enum DTRErrorStatusType { /// No error NONE_ERROR, /// Internal error in Data Staging logic INTERNAL_LOGIC_ERROR, /// Internal processing error, like losing contact with external process INTERNAL_PROCESS_ERROR, /// Attempt to replicate a file to itself SELF_REPLICATION_ERROR, /// Permanent error with cache CACHE_ERROR, /// Temporary error with remote service TEMPORARY_REMOTE_ERROR, /// Permanent error with remote service PERMANENT_REMOTE_ERROR, /// Error with local file LOCAL_FILE_ERROR, /// Transfer rate was too slow TRANSFER_SPEED_ERROR, /// Waited for too long to become staging STAGING_TIMEOUT_ERROR }; /// Describes where the error occurred enum DTRErrorLocation { /// No error NO_ERROR_LOCATION, /// Error with source ERROR_SOURCE, /// Error with destination ERROR_DESTINATION, /// Error during transfer not directly related to source or destination ERROR_TRANSFER, /// Error occurred in an unknown location ERROR_UNKNOWN }; /// Create a new DTRErrorStatus with given error states /** * @param status Type of error * @param error_state DTR state in which the error occurred * @param location Location of error (at source, destination or during transfer) * @param desc Text description of error */ DTRErrorStatus(DTRErrorStatusType status, DTRStatus::DTRStatusType error_state, DTRErrorLocation location, const std::string& desc = ""): error_status(status), last_error_state(error_state), error_location(location), desc(desc) {}; /// Create a new DTRErrorStatus with default none/null error states DTRErrorStatus() : error_status(NONE_ERROR), last_error_state(DTRStatus::NULL_STATE), error_location(NO_ERROR_LOCATION), desc("") {}; /// Returns the error type DTRErrorStatusType GetErrorStatus() const { return error_status; } /// Returns the state in which the error occurred DTRStatus::DTRStatusType GetLastErrorState() const { return last_error_state.GetStatus(); } /// Returns the location at which the error occurred DTRErrorLocation GetErrorLocation() const { return error_location; } /// Returns the error description std::string GetDesc() const { return desc; } /// Returns true if this error status is the same as the given DTRErrorStatusType bool operator==(const DTRErrorStatusType& s) const { return error_status == s; } /// Returns true if this error status is the same as the given DTRErrorStatus bool operator==(const DTRErrorStatus& s) const { return error_status == s.error_status; } /// Returns true if this error status is not the same as the given DTRErrorStatusType bool operator!=(const DTRErrorStatusType& s) const { return error_status != s; } /// Returns true if this error status is not the same as the given DTRErrorStatus bool operator!=(const DTRErrorStatus& s) const { return error_status != s.error_status; } /// Make a new DTRErrorStatus with the same error status as the given DTRErrorStatusType DTRErrorStatus& operator=(const DTRErrorStatusType& s) { error_status = s; return *this; } private: /// error state DTRErrorStatusType error_status; /// state that error occurred in DTRStatus last_error_state; /// place where the error occurred DTRErrorLocation error_location; /// description of error std::string desc; }; } // namespace DataStaging #endif /*__ARC_DTRSTATUS_H_*/ nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/DataDeliveryComm.cpp0000644000000000000000000000013215067751327023333 xustar0030 mtime=1759498967.747527268 30 atime=1759498967.860493575 30 ctime=1759499028.818584725 nordugrid-arc-7.1.1/src/libs/data-staging/DataDeliveryComm.cpp0000644000175000002070000000661015067751327025240 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "DataDeliveryComm.h" #include "DataDeliveryRemoteComm.h" #include "DataDeliveryLocalComm.h" namespace DataStaging { DataDeliveryComm* DataDeliveryComm::CreateInstance(DTR_ptr dtr, const TransferParameters& params) { if (!dtr->get_delivery_endpoint() || dtr->get_delivery_endpoint() == DTR::LOCAL_DELIVERY) return new DataDeliveryLocalComm(dtr, params); return new DataDeliveryRemoteComm(dtr, params); } DataDeliveryComm::DataDeliveryComm(DTR_ptr dtr, const TransferParameters& params) : status_pos_(0),transfer_params(params),logger_(dtr->get_logger()),handler_(NULL) { } DataDeliveryCommHandler& DataDeliveryComm::GetHandler() { if(handler_) return *handler_; return *(handler_ = DataDeliveryCommHandler::getInstance(DeliveryId())); } DataDeliveryComm::Status DataDeliveryComm::GetStatus(void) const { std::unique_lock lock(*(const_cast(&lock_))); DataDeliveryComm::Status tmp = status_; return tmp; } bool DataDeliveryComm::CheckComm(DTR_ptr dtr, std::vector& allowed_dirs, std::string& load_avg) { if (!dtr->get_delivery_endpoint() || dtr->get_delivery_endpoint() == DTR::LOCAL_DELIVERY) return DataDeliveryLocalComm::CheckComm(dtr, allowed_dirs, load_avg); return DataDeliveryRemoteComm::CheckComm(dtr, allowed_dirs, load_avg); } DataDeliveryCommHandler::DataDeliveryCommHandler(void) { std::unique_lock lock(lock_); Arc::CreateThreadFunction(&func,this); } void DataDeliveryCommHandler::Add(DataDeliveryComm* item) { std::unique_lock lock(lock_); items_.push_back(item); } void DataDeliveryCommHandler::Remove(DataDeliveryComm* item) { std::unique_lock lock(lock_); for(std::list::iterator i = items_.begin(); i!=items_.end();) { if(*i == item) { i=items_.erase(i); } else { ++i; } } } std::mutex DataDeliveryCommHandler::comm_lock; std::map DataDeliveryCommHandler::comm_handler; DataDeliveryCommHandler* DataDeliveryCommHandler::getInstance(std::string const & id) { std::unique_lock lock(comm_lock); std::map::iterator it = comm_handler.find(id); if(it != comm_handler.end()) return it->second; return (comm_handler[id] = new DataDeliveryCommHandler); } // This is a dedicated thread which periodically checks for // new state reported by comm instances and modifies states accordingly void DataDeliveryCommHandler::func(void* arg) { if(!arg) return; // disconnect from root logger since messages are logged to per-DTR Logger Arc::Logger::getRootLogger().setThreadContext(); Arc::Logger::getRootLogger().removeDestinations(); // We do not need extremely low latency, so this // thread simply polls for data 2 times per second. DataDeliveryCommHandler& it = *(DataDeliveryCommHandler*)arg; for(;;) { { std::unique_lock lock(it.lock_); for(std::list::iterator i = it.items_.begin(); i != it.items_.end();++i) { DataDeliveryComm* comm = *i; if(comm) comm->PullStatus(); } } usleep(500000); } } } // namespace DataStaging nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/DataDeliveryRemoteComm.h0000644000000000000000000000013215067751327024154 xustar0030 mtime=1759498967.747527268 30 atime=1759498967.860493575 30 ctime=1759499028.808187441 nordugrid-arc-7.1.1/src/libs/data-staging/DataDeliveryRemoteComm.h0000644000175000002070000000511215067751327026055 0ustar00mockbuildmock00000000000000#ifndef DATADELIVERYREMOTECOMM_H_ #define DATADELIVERYREMOTECOMM_H_ #include #include #include #include "DataDeliveryComm.h" namespace DataStaging { /// This class contacts a remote service to make a Delivery request. /** * \ingroup datastaging * \headerfile DataDeliveryRemoteComm.h arc/data-staging/DataDeliveryRemoteComm.h */ class DataDeliveryRemoteComm : public DataDeliveryComm { public: /// Send the transfer request to the remote service. DataDeliveryRemoteComm(DTR_ptr dtr, const TransferParameters& params); /// If transfer is still ongoing, sends a cancellation message to the service. virtual ~DataDeliveryRemoteComm(); /// Read status from service virtual void PullStatus(); /// Returns identifier of delivery handler - URL of delivery service. virtual std::string DeliveryId() const; /// Pings service to find allowed dirs static bool CheckComm(DTR_ptr dtr, std::vector& allowed_dirs, std::string& load_avg); /// Returns true if service is still processing request virtual operator bool() const { return valid; }; /// Returns true if service is not processing request or down virtual bool operator!() const { return !valid; }; private: /// Connection to service Arc::ClientSOAP* client; /// Full DTR ID std::string dtr_full_id; /// Retries allowed after failing to query transfer status, so that a /// transfer is not lost due to temporary communication problem. If a /// transfer fails to start it is handled by the normal DTR retries. int query_retries; /// MCC configuration for connecting to service Arc::MCCConfig cfg; /// Endpoint of remote delivery service Arc::URL endpoint; /// Connection timeout int timeout; /// Flag to say whether transfer is running and service is still up bool valid; /// Logger object (main log, not DTR's log) static Arc::Logger logger; /// Cancel a DTR, by sending a cancel request to the service void CancelDTR(); /// Fill Status object with data in node. If empty fields are initialised /// to default values. void FillStatus(const Arc::XMLNode& node = Arc::XMLNode()); /// Set up delegation so the credentials can be used by the service bool SetupDelegation(Arc::XMLNode& op, const Arc::UserConfig& usercfg); /// Handle a fault during query of service. Attempts to reconnect void HandleQueryFault(const std::string& err=""); }; } // namespace DataStaging #endif /* DATADELIVERYREMOTECOMM_H_ */ nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/DataStagingDelivery.cpp0000644000000000000000000000013215067751327024034 xustar0030 mtime=1759498967.747527268 30 atime=1759498967.860493575 30 ctime=1759499028.828258793 nordugrid-arc-7.1.1/src/libs/data-staging/DataStagingDelivery.cpp0000644000175000002070000005465315067751327025753 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include "DataDeliveryComm.h" using namespace Arc; static Arc::Logger logger(Arc::Logger::getRootLogger(), "DataDelivery"); static bool delivery_shutdown = false; static Arc::Time start_time; static void sig_shutdown(int) { if(delivery_shutdown) _exit(0); delivery_shutdown = true; } static void ReportStatus(DataStaging::DTRStatus::DTRStatusType st, DataStaging::DTRErrorStatus::DTRErrorStatusType err, DataStaging::DTRErrorStatus::DTRErrorLocation err_loc, const std::string& err_desc, unsigned long long int transferred, unsigned long long int size, Arc::Time transfer_start_time, const std::string& checksum = "") { static DataStaging::DataDeliveryComm::Status status; static unsigned int status_pos = 0; static bool status_changed = true; unsigned long long int transfer_time = 0; if (transfer_start_time != Arc::Time(0)) { Arc::Period p = Arc::Time() - transfer_start_time; transfer_time = p.GetPeriod() * 1000000000 + p.GetPeriodNanoseconds(); } // Filling status.commstatus = DataStaging::DataDeliveryComm::CommNoError; status.timestamp = ::time(NULL); status.status = st; status.error = err; status.error_location = err_loc; strncpy(status.error_desc,err_desc.c_str(),sizeof(status.error_desc)); status.streams = 0; status.transferred = transferred; status.size = size; status.transfer_time = transfer_time; status.offset = 0; status.speed = 0; strncpy(status.checksum, checksum.c_str(), sizeof(status.checksum)); if(status_pos == 0) { status_changed=true; }; if(status_changed) { for(;;) { ssize_t l = ::write(STDOUT_FILENO,((char*)&status)+status_pos,sizeof(status)-status_pos); if(l == -1) { // error, parent exited? break; } else if(l == 0) { // will happen if stdout is non-blocking break; } else { status_pos+=l; }; if(status_pos >= sizeof(status)) { status_pos=0; status_changed=false; break; }; }; }; } static unsigned long long int transfer_bytes = 0; static void ReportOngoingStatus(unsigned long long int bytes) { transfer_bytes = bytes; // Send report on stdout ReportStatus(DataStaging::DTRStatus::TRANSFERRING, DataStaging::DTRErrorStatus::NONE_ERROR, DataStaging::DTRErrorStatus::NO_ERROR_LOCATION, "", bytes, 0, 0); // Log progress in log time_t t = Arc::Period(Arc::Time() - start_time).GetPeriod(); logger.msg(INFO, "%5u s: %10.1f kB %8.1f kB/s", (unsigned int)t, ((double)bytes) / 1024, (t == 0) ? 0 : ((double)bytes) / 1024 / t); } static unsigned long long int GetFileSize(const DataPoint& source, const DataPoint& dest) { if(source.CheckSize()) return source.GetSize(); if(dest.CheckSize()) return dest.GetSize(); return 0; } int main(int argc,char* argv[]) { // log to stderr Arc::Logger::getRootLogger().setThreshold(Arc::VERBOSE); //TODO: configurable Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::EmptyFormat); Arc::Logger::getRootLogger().addDestination(logcerr); // Collecting parameters // --surl: source URL // --durl: destination URL // --sopt: any URL option, credential - path to file storing credentials // --dopt: any URL option, credential - path to file storing credentials // --topt: minspeed, minspeedtime, minavgspeed, maxinacttime, avgtime // --size: total size of data to be transferred // --cstype: checksum type to calculate // --csvalue: checksum value of source file to validate against // surl, durl, cstype and csvalue may be given only once // sopt, dopt, topt may be given multiple times // type of credentials is detected automatically, so far only // X.509 proxies or key+certificate are accepted std::string source_str; std::string dest_str; std::list source_opts; std::list dest_opts; std::list transfer_opts; std::string size; std::string checksum_type; std::string checksum_value; std::string source_cred_path; std::string dest_cred_path; std::string source_ca_path; std::string dest_ca_path; bool source_ca_system = false; bool source_ca_grid = true; bool dest_ca_system = false; bool dest_ca_grid = true; OptionParser opt; opt.AddOption(0,"surl","","source URL",source_str); opt.AddOption(0,"durl","","destination URL",dest_str); opt.AddOption(0,"sopt","","source options",source_opts); opt.AddOption(0,"dopt","","destination options",dest_opts); opt.AddOption(0,"topt","","transfer options",transfer_opts); opt.AddOption(0,"size","","total size",size); opt.AddOption(0,"cstype","","checksum type",checksum_type); opt.AddOption(0,"csvalue","","checksum value",checksum_value); if(opt.Parse(argc,argv).size() != 0) { logger.msg(ERROR, "Unexpected arguments"); return -1; }; if(source_str.empty()) { logger.msg(ERROR, "Source URL missing"); return -1; }; if(dest_str.empty()) { logger.msg(ERROR, "Destination URL missing"); return -1; }; URL source_url(source_str); if(!source_url) { logger.msg(ERROR, "Source URL not valid: %s", source_str); return -1; }; URL dest_url(dest_str); if(!dest_url) { logger.msg(ERROR, "Destination URL not valid: %s", dest_str); return -1; }; for(std::list::iterator o = source_opts.begin(); o != source_opts.end();++o) { std::string::size_type p = o->find('='); if(p == std::string::npos) { source_url.AddOption(*o); } else { std::string name = o->substr(0,p); if(name == "credential") { source_cred_path = o->substr(p+1); } else if(name == "ca") { source_ca_path = o->substr(p+1); } else if(name == "casystem") { source_ca_system = (o->substr(p+1) == "1"); } else if(name == "cagrid") { source_ca_grid = (o->substr(p+1) == "1"); } else { source_url.AddOption(*o); }; }; }; for(std::list::iterator o = dest_opts.begin(); o != dest_opts.end();++o) { std::string::size_type p = o->find('='); if(p == std::string::npos) { dest_url.AddOption(*o); } else { std::string name = o->substr(0,p); if(name == "credential") { dest_cred_path = o->substr(p+1); } else if(name == "ca") { dest_ca_path = o->substr(p+1); } else if(name == "casystem") { dest_ca_system = (o->substr(p+1) == "1"); } else if(name == "cagrid") { dest_ca_grid = (o->substr(p+1) == "1"); } else { dest_url.AddOption(*o); }; }; }; DataBuffer buffer; buffer.speed.verbose(true); unsigned long long int minspeed = 0; time_t minspeedtime = 0; for(std::list::iterator o = transfer_opts.begin(); o != transfer_opts.end();++o) { std::string::size_type p = o->find('='); if(p != std::string::npos) { std::string name = o->substr(0,p); unsigned long long int value; if(stringto(o->substr(p+1),value)) { if(name == "minspeed") { minspeed=value; } else if(name == "minspeedtime") { minspeedtime=value; } else if(name == "minavgspeed") { buffer.speed.set_min_average_speed(value); } else if(name == "maxinacttime") { buffer.speed.set_max_inactivity_time(value); } else if(name == "avgtime") { buffer.speed.set_base(value); } else { logger.msg(ERROR, "Unknown transfer option: %s", name); _exit(-1); } }; }; } buffer.speed.set_min_speed(minspeed,minspeedtime); // Checksum objects must be destroyed after DataHandles CheckSumAny crc; CheckSumAny crc_source; CheckSumAny crc_dest; // Read credential from stdin if available std::string proxy_cred; std::getline(std::cin, proxy_cred, '\0'); bool is_x509_cred = false; bool is_token_cred = false; if(strncmp(proxy_cred.c_str(), "x509 ", 5) == 0) { is_x509_cred = true; proxy_cred.erase(0,5); } else if(strncmp(proxy_cred.c_str(), "token ", 6) == 0) { is_token_cred = true; proxy_cred.erase(0,6); } initializeCredentialsType source_cred(initializeCredentialsType::SkipCredentials); UserConfig source_cfg(source_cred); if(!source_cred_path.empty()) { source_cfg.ProxyPath(source_cred_path); } else if(is_x509_cred) { source_cfg.CredentialString(proxy_cred); } else if(is_token_cred) { source_cfg.OToken(proxy_cred); } if(!source_ca_path.empty()) source_cfg.CACertificatesDirectory(source_ca_path); source_cfg.CAUseSystem(source_ca_system); source_cfg.CAUseGrid(source_ca_grid); //source_cfg.UtilsDirPath(...); - probably not needed DataHandle source(source_url, source_cfg); if(!source) { logger.msg(ERROR, "Source URL not supported: %s", source_url.str()); _exit(-1); //return -1; }; if (source->RequiresCredentialsInFile() && source_cred_path.empty()) { logger.msg(ERROR, "No credentials supplied"); _exit(-1); } source->SetSecure(false); source->Passive(true); initializeCredentialsType dest_cred(initializeCredentialsType::SkipCredentials); UserConfig dest_cfg(dest_cred); if(!dest_cred_path.empty()) dest_cfg.ProxyPath(dest_cred_path); else if(is_x509_cred) dest_cfg.CredentialString(proxy_cred); else if(is_token_cred) dest_cfg.OToken(proxy_cred); if(!dest_ca_path.empty()) dest_cfg.CACertificatesDirectory(dest_ca_path); dest_cfg.CAUseSystem(dest_ca_system); dest_cfg.CAUseGrid(dest_ca_grid); //dest_cfg.UtilsDirPath(...); - probably not needed DataHandle dest(dest_url,dest_cfg); if(!dest) { logger.msg(ERROR, "Destination URL not supported: %s", dest_url.str()); _exit(-1); //return -1; }; if (dest->RequiresCredentialsInFile() && dest_cred_path.empty()) { logger.msg(ERROR, "No credentials supplied"); _exit(-1); } dest->SetSecure(false); dest->Passive(true); // set X509* for 3rd party tools which need it (eg GFAL) if (!source_cfg.ProxyPath().empty()) { SetEnv("X509_USER_PROXY", source_cfg.ProxyPath()); if (!source_cfg.CACertificatesDirectory().empty()) SetEnv("X509_CERT_DIR", source_cfg.CACertificatesDirectory()); if (source_cfg.CAUseSystem() && source_cfg.CAUseGrid()) SetEnv("X509_CERT_POLICY", "any"); else if (source_cfg.CAUseSystem()) SetEnv("X509_CERT_POLICY", "system"); else if (source_cfg.CAUseGrid()) SetEnv("X509_CERT_POLICY", "grid"); else SetEnv("X509_CERT_POLICY", "none"); // those tools also use hostcert by default if the user is root... if (getuid() == 0) { SetEnv("X509_USER_CERT", source_cfg.ProxyPath()); SetEnv("X509_USER_KEY", source_cfg.ProxyPath()); } } // set signal handlers signal(SIGTERM, sig_shutdown); signal(SIGINT, sig_shutdown); // Filling initial report buffer ReportStatus(DataStaging::DTRStatus::NULL_STATE, DataStaging::DTRErrorStatus::NONE_ERROR, DataStaging::DTRErrorStatus::NO_ERROR_LOCATION, "",0,0,0,""); // if checksum type is supplied, use that type, otherwise use default for the // destination (if checksum is supported by the destination protocol) std::string crc_type(""); if (!checksum_type.empty()) { crc_type = checksum_type; if (!checksum_value.empty()) source->SetCheckSum(checksum_type+':'+checksum_value); } else if (dest->AcceptsMeta() || dest->ProvidesMeta()) { crc_type = dest->DefaultCheckSum(); } if (!crc_type.empty()) { crc = crc_type.c_str(); crc_source = crc_type.c_str(); crc_dest = crc_type.c_str(); if (crc.Type() != CheckSumAny::none) logger.msg(INFO, "Will calculate %s checksum", crc_type); source->AddCheckSumObject(&crc_source); dest->AddCheckSumObject(&crc_dest); } buffer.set(&crc); if (!size.empty()) { unsigned long long int total_size; if (stringto(size, total_size)) { dest->SetSize(total_size); } else { logger.msg(WARNING, "Cannot use supplied --size option"); } } bool reported = false; bool eof_reached = false; // checksum validation against supplied value std::string calc_csum; // These will stay positive if corresponding transfer type is not used DataStatus source_st; DataStatus dest_st; DataStatus transfer_st; // Check if datapoint handles transfer by itself bool try_another_transfer = true; if (try_another_transfer) { if (source->SupportsTransfer()) { logger.msg(INFO, "Using internal transfer method of %s", source->str()); transfer_st = source->Transfer(dest->GetURL(), true, ReportOngoingStatus); if (transfer_st.Passed()) { try_another_transfer = false; eof_reached = true; // so that full copy is reported back to scheduler buffer.speed.verbose(false); unsigned long long bytes = GetFileSize(*source, *dest); if(bytes < transfer_bytes) bytes = transfer_bytes; buffer.speed.transfer(bytes); } else { if (transfer_st != DataStatus::UnimplementedError) { if (dest->Local()) dest->Remove(); // to allow retries try_another_transfer = false; } else { logger.msg(INFO, "Internal transfer method is not supported for %s", source->str()); } } } } if (try_another_transfer) { if (dest->SupportsTransfer()) { logger.msg(INFO, "Using internal transfer method of %s", dest->str()); transfer_st = dest->Transfer(source->GetURL(), false, ReportOngoingStatus); if (transfer_st.Passed()) { try_another_transfer = false; eof_reached = true; // so that full copy is reported back to scheduler buffer.speed.verbose(false); unsigned long long bytes = GetFileSize(*source, *dest); if(bytes < transfer_bytes) bytes = transfer_bytes; buffer.speed.transfer(bytes); } else { if (transfer_st != DataStatus::UnimplementedError) { try_another_transfer = false; } else { logger.msg(INFO, "Internal transfer method is not supported for %s", dest->str()); } } } } if (try_another_transfer) { // Initiating transfer source_st = source->StartReading(buffer); if(!source_st) { ReportStatus(DataStaging::DTRStatus::TRANSFERRED, (source_url.Protocol()!="file") ? (source_st.Retryable() ? DataStaging::DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DataStaging::DTRErrorStatus::PERMANENT_REMOTE_ERROR) : DataStaging::DTRErrorStatus::LOCAL_FILE_ERROR, DataStaging::DTRErrorStatus::ERROR_SOURCE, std::string("Failed reading from source: ")+source->CurrentLocation().str()+ " : "+std::string(source_st), 0,0,0); _exit(-1); //return -1; }; dest_st = dest->StartWriting(buffer); if(!dest_st) { ReportStatus(DataStaging::DTRStatus::TRANSFERRED, (dest_url.Protocol() != "file") ? (dest_st.Retryable() ? DataStaging::DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DataStaging::DTRErrorStatus::PERMANENT_REMOTE_ERROR) : DataStaging::DTRErrorStatus::LOCAL_FILE_ERROR, DataStaging::DTRErrorStatus::ERROR_DESTINATION, std::string("Failed writing to destination: ")+dest->CurrentLocation().str()+ " : "+std::string(dest_st), 0,0,0); _exit(-1); //return -1; } // While transfer is running in another threads // here we periodically report status to parent for(;!buffer.error() && !delivery_shutdown;) { if(buffer.eof_read() && buffer.eof_write()) { eof_reached = true; break; }; ReportStatus(DataStaging::DTRStatus::TRANSFERRING, DataStaging::DTRErrorStatus::NONE_ERROR, DataStaging::DTRErrorStatus::NO_ERROR_LOCATION, "", buffer.speed.transferred_size(), GetFileSize(*source,*dest),0); buffer.wait_any(); }; dest_st = dest->StopWriting(); source_st = source->StopReading(); } if (delivery_shutdown) { ReportStatus(DataStaging::DTRStatus::TRANSFERRED, DataStaging::DTRErrorStatus::INTERNAL_PROCESS_ERROR, DataStaging::DTRErrorStatus::ERROR_TRANSFER, "DataStagingProcess process killed", buffer.speed.transferred_size(), GetFileSize(*source,*dest),0); dest->StopWriting(); _exit(-1); } ReportStatus(DataStaging::DTRStatus::TRANSFERRING, DataStaging::DTRErrorStatus::NONE_ERROR, DataStaging::DTRErrorStatus::NO_ERROR_LOCATION, "", buffer.speed.transferred_size(), GetFileSize(*source,*dest),0); // These will return false if buffer was not used bool source_failed = buffer.error_read(); bool dest_failed = buffer.error_write(); // Error at source or destination if(source_failed || !source_st) { std::string err("Failed reading from source: "+source->CurrentLocation().str()); // If error reported in read callback, use that instead if (source->GetFailureReason() != DataStatus::UnknownError) source_st = source->GetFailureReason(); if (!source_st) err += " : " + std::string(source_st); ReportStatus(DataStaging::DTRStatus::TRANSFERRED, (source_url.Protocol() != "file") ? (((!source_st && source_st.Retryable()) || buffer.speed.transferred_size() > 0) ? DataStaging::DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DataStaging::DTRErrorStatus::PERMANENT_REMOTE_ERROR) : DataStaging::DTRErrorStatus::LOCAL_FILE_ERROR, DataStaging::DTRErrorStatus::ERROR_SOURCE, err, buffer.speed.transferred_size(), GetFileSize(*source,*dest), start_time); reported = true; }; if(dest_failed || !dest_st) { std::string err("Failed writing to destination: "+dest->CurrentLocation().str()); // If error reported in write callback, use that instead if (dest->GetFailureReason() != DataStatus::UnknownError) dest_st = dest->GetFailureReason(); if (!dest_st) err += " : " + std::string(dest_st); ReportStatus(DataStaging::DTRStatus::TRANSFERRED, (dest_url.Protocol() != "file") ? (((!dest_st && dest_st.Retryable()) || buffer.speed.transferred_size() > 0) ? DataStaging::DTRErrorStatus::TEMPORARY_REMOTE_ERROR : DataStaging::DTRErrorStatus::PERMANENT_REMOTE_ERROR) : DataStaging::DTRErrorStatus::LOCAL_FILE_ERROR, DataStaging::DTRErrorStatus::ERROR_DESTINATION, err, buffer.speed.transferred_size(), GetFileSize(*source,*dest), start_time); reported = true; }; if (!transfer_st) { // Usually it's not possible to know at which end the transfer failed ReportStatus(DataStaging::DTRStatus::TRANSFERRED, DataStaging::DTRErrorStatus::PERMANENT_REMOTE_ERROR, DataStaging::DTRErrorStatus::ERROR_UNKNOWN, transfer_st.GetDesc(), 0, GetFileSize(*source,*dest), start_time); reported = true; } // Transfer error, usually timeout if(!eof_reached) { if((!dest_failed) && (!source_failed)) { ReportStatus(DataStaging::DTRStatus::TRANSFERRED, DataStaging::DTRErrorStatus::TRANSFER_SPEED_ERROR, DataStaging::DTRErrorStatus::ERROR_UNKNOWN, "Transfer timed out", buffer.speed.transferred_size(), GetFileSize(*source,*dest), start_time); reported = true; }; }; if (crc && buffer.checksum_valid()) { char buf[100]; crc.print(buf,100); calc_csum = buf; } else if(crc_source) { char buf[100]; crc_source.print(buf,100); calc_csum = buf; } else if(crc_dest) { char buf[100]; crc_dest.print(buf,100); calc_csum = buf; } if (!reported && !calc_csum.empty() && crc.Type() != CheckSumAny::none) { // compare calculated to any checksum given as an option if (source->CheckCheckSum()) { // Check the checksum types match. Some buggy GridFTP servers return a // different checksum type than requested so also check that the checksum // length matches before comparing. if (calc_csum.substr(0, calc_csum.find(":")) != checksum_type || calc_csum.substr(calc_csum.find(":")+1).length() != checksum_value.length()) { logger.msg(INFO, "Checksum type of source and calculated checksum differ, cannot compare"); } else if (calc_csum.substr(calc_csum.find(":")+1) != Arc::lower(checksum_value)) { logger.msg(ERROR, "Checksum mismatch between calculated checksum %s and source checksum %s", calc_csum, source->GetCheckSum()); ReportStatus(DataStaging::DTRStatus::TRANSFERRED, DataStaging::DTRErrorStatus::TRANSFER_SPEED_ERROR, DataStaging::DTRErrorStatus::ERROR_UNKNOWN, "Checksum mismatch", 0,0,start_time); reported = true; eof_reached = false; // TODO general error flag is better than this // Delete destination if (!dest->Remove().Passed()) { logger.msg(WARNING, "Failed cleaning up destination %s", dest->GetURL().str()); } } else logger.msg(INFO, "Calculated transfer checksum %s matches source checksum", calc_csum); } } else { logger.msg(VERBOSE, "Checksum not computed"); } if(!reported) { ReportStatus(DataStaging::DTRStatus::TRANSFERRED, DataStaging::DTRErrorStatus::NONE_ERROR, DataStaging::DTRErrorStatus::NO_ERROR_LOCATION, "", buffer.speed.transferred_size(), GetFileSize(*source,*dest), start_time, calc_csum); }; _exit(eof_reached?0:1); //return eof_reached?0:1; } nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/DataDeliveryLocalComm.cpp0000644000000000000000000000013215067751327024306 xustar0030 mtime=1759498967.747527268 30 atime=1759498967.860493575 30 ctime=1759499028.819600344 nordugrid-arc-7.1.1/src/libs/data-staging/DataDeliveryLocalComm.cpp0000644000175000002070000002570515067751327026221 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "DataDeliveryLocalComm.h" namespace DataStaging { // Check if needed and create copy of proxy with suitable ownership static std::string prepare_proxy(const std::string& proxy_path, int child_uid, int child_gid) { if (proxy_path.empty()) return ""; // No credentials int my_uid = (int)::getuid(); if (my_uid != 0) return ""; // Can't switch user id if (child_uid == 0) return ""; // Not switching if (child_uid == my_uid) return ""; // Not switching // Check ownership of credentials. struct ::stat st; if(!Arc::FileStat(proxy_path,&st,true)) return ""; // Can't stat - won't read if(st.st_uid == child_uid) return ""; // Owned by child // Ownership may prevent reading of file. std::string proxy_content; if(!Arc::FileRead(proxy_path, proxy_content)) return ""; // Creating temporary file // Probably not most effective solution. But makes sure // access permissions are set properly. std::string proxy_new_path; if(!Arc::TmpFileCreate(proxy_new_path, proxy_content, child_uid, child_gid, S_IRUSR|S_IWUSR)) { if (!proxy_new_path.empty()) Arc::FileDelete(proxy_new_path); return ""; } return proxy_new_path; } DataDeliveryLocalComm::DataDeliveryLocalComm(DTR_ptr dtr, const TransferParameters& params) : DataDeliveryComm(dtr, params),child_(NULL),last_comm(Arc::Time()) { // Initial empty status memset(&status_,0,sizeof(status_)); status_.commstatus = CommInit; status_pos_ = 0; if(!dtr->get_source()) { logger_->msg(Arc::ERROR, "No source defined"); return; } if(!dtr->get_destination()) { logger_->msg(Arc::ERROR, "No destination defined"); return; } { std::unique_lock lock(lock_); // Generate options for child std::list args; std::string execpath = Arc::ArcLocation::GetLibDir()+G_DIR_SEPARATOR_S+"DataStagingDelivery"; args.push_back(execpath); // check for alternative source or destination eg cache, mapped URL, TURL std::string surl; if (!dtr->get_mapped_source().empty()) { surl = dtr->get_mapped_source(); } else if (!dtr->get_source()->TransferLocations().empty()) { surl = dtr->get_source()->TransferLocations()[0].fullstr(); } else { logger_->msg(Arc::ERROR, "No locations defined for %s", dtr->get_source()->str()); return; } if (dtr->get_destination()->TransferLocations().empty()) { logger_->msg(Arc::ERROR, "No locations defined for %s", dtr->get_destination()->str()); return; } std::string durl = dtr->get_destination()->TransferLocations()[0].fullstr(); bool caching = false; if ((dtr->get_cache_state() == CACHEABLE) && !dtr->get_cache_file().empty()) { durl = dtr->get_cache_file(); caching = true; } else if (!dtr->get_mapped_destination().empty()) { durl = dtr->get_mapped_destination(); } int child_uid = 0; int child_gid = 0; if(!caching) { child_uid = dtr->get_local_user().get_uid(); child_gid = dtr->get_local_user().get_gid(); } args.push_back("--surl"); args.push_back(surl); args.push_back("--durl"); args.push_back(durl); // Check if credentials are needed for source/dest Arc::DataHandle surl_h(surl, dtr->get_usercfg()); Arc::DataHandle durl_h(durl, dtr->get_usercfg()); bool needCredentialsInFile = (surl_h && surl_h->RequiresCredentialsInFile()) || (durl_h && durl_h->RequiresCredentialsInFile()); if (!needCredentialsInFile) { // If file-based credentials are not required then send through stdin if (!dtr->get_usercfg().OToken().empty()) { stdin_ = "token "; stdin_ += dtr->get_usercfg().OToken(); } else { stdin_ = "x509 "; stdin_ += dtr->get_usercfg().CredentialString(); } } else { // If child is going to be run under different user ID // we must ensure it will be able to read credentials. tmp_proxy_ = prepare_proxy(dtr->get_usercfg().ProxyPath(), child_uid, child_gid); if (!tmp_proxy_.empty()) { args.push_back("--sopt"); args.push_back(std::string("credential=")+tmp_proxy_); args.push_back("--dopt"); args.push_back(std::string("credential=")+tmp_proxy_); } else if(!dtr->get_usercfg().ProxyPath().empty()) { args.push_back("--sopt"); args.push_back(std::string("credential=")+dtr->get_usercfg().ProxyPath()); args.push_back("--dopt"); args.push_back(std::string("credential=")+dtr->get_usercfg().ProxyPath()); } } if (!dtr->get_usercfg().CACertificatesDirectory().empty()) { args.push_back("--sopt"); args.push_back(std::string("ca=")+dtr->get_usercfg().CACertificatesDirectory()); args.push_back("--dopt"); args.push_back(std::string("ca=")+dtr->get_usercfg().CACertificatesDirectory()); } args.push_back("--sopt"); args.push_back(std::string("casystem=")+Arc::tostring((int)dtr->get_usercfg().CAUseSystem())); args.push_back("--dopt"); args.push_back(std::string("casystem=")+Arc::tostring((int)dtr->get_usercfg().CAUseSystem())); args.push_back("--sopt"); args.push_back(std::string("cagrid=")+Arc::tostring((int)dtr->get_usercfg().CAUseGrid())); args.push_back("--dopt"); args.push_back(std::string("cagrid=")+Arc::tostring((int)dtr->get_usercfg().CAUseGrid())); args.push_back("--topt"); args.push_back(std::string("minspeed=")+Arc::tostring(transfer_params.min_current_bandwidth)); args.push_back("--topt"); args.push_back(std::string("minspeedtime=")+Arc::tostring(transfer_params.averaging_time)); args.push_back("--topt"); args.push_back(std::string("minavgspeed=")+Arc::tostring(transfer_params.min_average_bandwidth)); args.push_back("--topt"); args.push_back(std::string("maxinacttime=")+Arc::tostring(transfer_params.max_inactivity_time)); if (dtr->get_source()->CheckSize()) { args.push_back("--size"); args.push_back(Arc::tostring(dtr->get_source()->GetSize())); } if (dtr->get_source()->CheckCheckSum()) { std::string csum(dtr->get_source()->GetCheckSum()); std::string::size_type pos(csum.find(':')); if (pos == std::string::npos || pos == csum.length()-1) { logger_->msg(Arc::WARNING, "Bad checksum format %s", csum); } else { args.push_back("--cstype"); args.push_back(csum.substr(0, pos)); args.push_back("--csvalue"); args.push_back(csum.substr(pos+1)); } } else if (!dtr->get_destination()->GetURL().MetaDataOption("checksumtype").empty()) { args.push_back("--cstype"); args.push_back(dtr->get_destination()->GetURL().MetaDataOption("checksumtype")); if (!dtr->get_destination()->GetURL().MetaDataOption("checksumvalue").empty()) { args.push_back("--csvalue"); args.push_back(dtr->get_destination()->GetURL().MetaDataOption("checksumvalue")); } } else if (!dtr->get_destination()->GetURL().Option("checksum").empty()) { args.push_back("--cstype"); args.push_back(dtr->get_destination()->GetURL().Option("checksum")); } else if (dtr->get_destination()->AcceptsMeta() || dtr->get_destination()->ProvidesMeta()) { args.push_back("--cstype"); args.push_back(dtr->get_destination()->DefaultCheckSum()); } child_ = new Arc::Run(args); // Set up pipes child_->KeepStdout(false); child_->KeepStderr(false); child_->KeepStdin(false); child_->AssignUserId(child_uid); child_->AssignGroupId(child_gid); child_->AssignStdin(stdin_); // Start child std::string cmd; for(std::list::iterator arg = args.begin();arg!=args.end();++arg) { cmd += *arg; cmd += " "; } logger_->msg(Arc::DEBUG, "Running command: %s", cmd); if(!child_->Start()) { delete child_; child_=NULL; logger_->msg(Arc::ERROR, "Failed to run command: %s", cmd); return; } } GetHandler().Add(this); } DataDeliveryLocalComm::~DataDeliveryLocalComm(void) { { std::unique_lock lock(lock_); if(child_) { child_->Kill(10); // Give it a chance delete child_; child_=NULL; // And then kill for sure } } if(!tmp_proxy_.empty()) Arc::FileDelete(tmp_proxy_); GetHandler().Remove(this); } std::string DataDeliveryLocalComm::DeliveryId() const { return "localhost"; } void DataDeliveryLocalComm::PullStatus(void) { std::unique_lock lock(lock_); if(!child_) return; for(;;) { if(status_pos_ < sizeof(status_buf_)) { int l; // TODO: direct redirect for(;;) { char buf[1024+1]; l = child_->ReadStderr(0,buf,sizeof(buf)-1); if(l <= 0) break; buf[l] = 0; char* start = buf; for(;*start;) { char* end = strchr(start,'\n'); if(end) *end = 0; logger_->msg(Arc::INFO, "DataDelivery: %s", start); if(!end) break; start = end + 1; } } l = child_->ReadStdout(0,((char*)&status_buf_)+status_pos_,sizeof(status_buf_)-status_pos_); if(l == -1) { // child error or closed comm if(child_->Running()) { status_.commstatus = CommClosed; } else { status_.commstatus = CommExited; if(child_->Result() != 0) { logger_->msg(Arc::ERROR, "DataStagingDelivery exited with code %i", child_->Result()); status_.commstatus = CommFailed; } } delete child_; child_=NULL; return; } if(l == 0) break; status_pos_+=l; last_comm = Arc::Time(); } if(status_pos_ >= sizeof(status_buf_)) { status_buf_.error_desc[sizeof(status_buf_.error_desc)-1] = 0; status_=status_buf_; status_pos_-=sizeof(status_buf_); } } // check for stuck child process (no report through comm channel) Arc::Period t = Arc::Time() - last_comm; if (transfer_params.max_inactivity_time > 0 && t >= transfer_params.max_inactivity_time*2) { logger_->msg(Arc::ERROR, "Transfer killed after %i seconds without communication", t.GetPeriod()); child_->Kill(1); delete child_; child_ = NULL; } } bool DataDeliveryLocalComm::CheckComm(DTR_ptr dtr, std::vector& allowed_dirs, std::string& load_avg) { allowed_dirs.push_back("/"); double avg[3]; if (getloadavg(avg, 3) != 3) { load_avg = "-1"; } else { load_avg = Arc::tostring(avg[1]); } return true; } } // namespace DataStaging nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/examples0000644000000000000000000000013115067751424021174 xustar0030 mtime=1759499028.894420988 29 atime=1759499034.76351017 30 ctime=1759499028.894420988 nordugrid-arc-7.1.1/src/libs/data-staging/examples/0000755000175000002070000000000015067751424023154 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/libs/data-staging/examples/PaxHeaders/Makefile.am0000644000000000000000000000013115067751327023307 xustar0030 mtime=1759498967.749501515 29 atime=1759498967.86149359 30 ctime=1759499028.889056712 nordugrid-arc-7.1.1/src/libs/data-staging/examples/Makefile.am0000644000175000002070000000104715067751327025214 0ustar00mockbuildmock00000000000000check_PROGRAMS = generator generator_SOURCES = generator-main.cpp Generator.h Generator.cpp generator_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) generator_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ ../libarcdatastaging.la $(GLIBMM_LIBS) exampledir = $(pkgdatadir)/examples/sdk example_DATA = $(generator_SOURCES) EXTRA_DIST = $(generator_SOURCES) nordugrid-arc-7.1.1/src/libs/data-staging/examples/PaxHeaders/Generator.h0000644000000000000000000000013115067751327023352 xustar0030 mtime=1759498967.749501515 29 atime=1759498967.86149359 30 ctime=1759499028.894020338 nordugrid-arc-7.1.1/src/libs/data-staging/examples/Generator.h0000644000175000002070000000253015067751327025255 0ustar00mockbuildmock00000000000000#ifndef GENERATOR_H_ #define GENERATOR_H_ #include #include #include // This Generator basic implementation shows how a Generator can // be written. It has one method, run(), which creates a single DTR // and submits it to the Scheduler. class Generator: public DataStaging::DTRCallback { private: // Condition to wait on until DTR has finished static Arc::SimpleCondition cond; // DTR Scheduler DataStaging::Scheduler scheduler; // Logger object static Arc::Logger logger; // Root LogDestinations to be used in receiveDTR std::list root_destinations; public: // Counter for main to know how many DTRs are in the system Arc::SimpleCounter counter; // Create a new Generator. start() must be called to start DTR threads. Generator(); // Stop Generator and DTR threads ~Generator(); // Implementation of callback from DTRCallback - the callback method used // when DTR processing is complete to pass the DTR back to the generator. // It decrements counter. virtual void receiveDTR(DataStaging::DTR_ptr dtr); // Start Generator and DTR threads void start(); // Submit a DTR with given source and destination. Increments counter. void run(const std::string& source, const std::string& destination); }; #endif /* GENERATOR_H_ */ nordugrid-arc-7.1.1/src/libs/data-staging/examples/PaxHeaders/Makefile.in0000644000000000000000000000013215067751355023322 xustar0030 mtime=1759498989.393040878 30 atime=1759499017.529248294 30 ctime=1759499028.890309464 nordugrid-arc-7.1.1/src/libs/data-staging/examples/Makefile.in0000644000175000002070000007764115067751355025243 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ check_PROGRAMS = generator$(EXEEXT) subdir = src/libs/data-staging/examples ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am_generator_OBJECTS = generator-generator-main.$(OBJEXT) \ generator-Generator.$(OBJEXT) generator_OBJECTS = $(am_generator_OBJECTS) am__DEPENDENCIES_1 = generator_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ ../libarcdatastaging.la $(am__DEPENDENCIES_1) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = generator_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(generator_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = ./$(DEPDIR)/generator-Generator.Po \ ./$(DEPDIR)/generator-generator-main.Po am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(generator_SOURCES) DIST_SOURCES = $(generator_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(exampledir)" DATA = $(example_DATA) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ generator_SOURCES = generator-main.cpp Generator.h Generator.cpp generator_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) generator_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ ../libarcdatastaging.la $(GLIBMM_LIBS) exampledir = $(pkgdatadir)/examples/sdk example_DATA = $(generator_SOURCES) EXTRA_DIST = $(generator_SOURCES) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/libs/data-staging/examples/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/libs/data-staging/examples/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-checkPROGRAMS: @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list generator$(EXEEXT): $(generator_OBJECTS) $(generator_DEPENDENCIES) $(EXTRA_generator_DEPENDENCIES) @rm -f generator$(EXEEXT) $(AM_V_CXXLD)$(generator_LINK) $(generator_OBJECTS) $(generator_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/generator-Generator.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/generator-generator-main.Po@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< generator-generator-main.o: generator-main.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(generator_CXXFLAGS) $(CXXFLAGS) -MT generator-generator-main.o -MD -MP -MF $(DEPDIR)/generator-generator-main.Tpo -c -o generator-generator-main.o `test -f 'generator-main.cpp' || echo '$(srcdir)/'`generator-main.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/generator-generator-main.Tpo $(DEPDIR)/generator-generator-main.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='generator-main.cpp' object='generator-generator-main.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(generator_CXXFLAGS) $(CXXFLAGS) -c -o generator-generator-main.o `test -f 'generator-main.cpp' || echo '$(srcdir)/'`generator-main.cpp generator-generator-main.obj: generator-main.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(generator_CXXFLAGS) $(CXXFLAGS) -MT generator-generator-main.obj -MD -MP -MF $(DEPDIR)/generator-generator-main.Tpo -c -o generator-generator-main.obj `if test -f 'generator-main.cpp'; then $(CYGPATH_W) 'generator-main.cpp'; else $(CYGPATH_W) '$(srcdir)/generator-main.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/generator-generator-main.Tpo $(DEPDIR)/generator-generator-main.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='generator-main.cpp' object='generator-generator-main.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(generator_CXXFLAGS) $(CXXFLAGS) -c -o generator-generator-main.obj `if test -f 'generator-main.cpp'; then $(CYGPATH_W) 'generator-main.cpp'; else $(CYGPATH_W) '$(srcdir)/generator-main.cpp'; fi` generator-Generator.o: Generator.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(generator_CXXFLAGS) $(CXXFLAGS) -MT generator-Generator.o -MD -MP -MF $(DEPDIR)/generator-Generator.Tpo -c -o generator-Generator.o `test -f 'Generator.cpp' || echo '$(srcdir)/'`Generator.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/generator-Generator.Tpo $(DEPDIR)/generator-Generator.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='Generator.cpp' object='generator-Generator.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(generator_CXXFLAGS) $(CXXFLAGS) -c -o generator-Generator.o `test -f 'Generator.cpp' || echo '$(srcdir)/'`Generator.cpp generator-Generator.obj: Generator.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(generator_CXXFLAGS) $(CXXFLAGS) -MT generator-Generator.obj -MD -MP -MF $(DEPDIR)/generator-Generator.Tpo -c -o generator-Generator.obj `if test -f 'Generator.cpp'; then $(CYGPATH_W) 'Generator.cpp'; else $(CYGPATH_W) '$(srcdir)/Generator.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/generator-Generator.Tpo $(DEPDIR)/generator-Generator.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='Generator.cpp' object='generator-Generator.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(generator_CXXFLAGS) $(CXXFLAGS) -c -o generator-Generator.obj `if test -f 'Generator.cpp'; then $(CYGPATH_W) 'Generator.cpp'; else $(CYGPATH_W) '$(srcdir)/Generator.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-exampleDATA: $(example_DATA) @$(NORMAL_INSTALL) @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(exampledir)'"; \ $(MKDIR_P) "$(DESTDIR)$(exampledir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(exampledir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(exampledir)" || exit $$?; \ done uninstall-exampleDATA: @$(NORMAL_UNINSTALL) @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(exampledir)'; $(am__uninstall_files_from_dir) ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(exampledir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-checkPROGRAMS clean-generic clean-libtool \ mostlyclean-am distclean: distclean-am -rm -f ./$(DEPDIR)/generator-Generator.Po -rm -f ./$(DEPDIR)/generator-generator-main.Po -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-exampleDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/generator-Generator.Po -rm -f ./$(DEPDIR)/generator-generator-main.Po -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-exampleDATA .MAKE: check-am install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ clean-checkPROGRAMS clean-generic clean-libtool cscopelist-am \ ctags ctags-am distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exampleDATA \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am uninstall-exampleDATA .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/libs/data-staging/examples/PaxHeaders/Generator.cpp0000644000000000000000000000013115067751327023705 xustar0030 mtime=1759498967.748491873 29 atime=1759498967.86149359 30 ctime=1759499028.895217637 nordugrid-arc-7.1.1/src/libs/data-staging/examples/Generator.cpp0000644000175000002070000000430315067751327025610 0ustar00mockbuildmock00000000000000#include #include #include "Generator.h" Arc::Logger Generator::logger(Arc::Logger::getRootLogger(), "Generator"); Arc::SimpleCondition Generator::cond; Generator::Generator() { // Set up logging root_destinations = Arc::Logger::getRootLogger().getDestinations(); DataStaging::DTR::LOG_LEVEL = Arc::Logger::getRootLogger().getThreshold(); } Generator::~Generator() { logger.msg(Arc::INFO, "Shutting down scheduler"); scheduler.stop(); logger.msg(Arc::INFO, "Scheduler stopped, exiting"); } void Generator::receiveDTR(DataStaging::DTR_ptr dtr) { // root logger is disabled in Scheduler thread so need to add it here Arc::Logger::getRootLogger().addDestinations(root_destinations); logger.msg(Arc::INFO, "Received DTR %s back from scheduler in state %s", dtr->get_id(), dtr->get_status().str()); Arc::Logger::getRootLogger().removeDestinations(); counter.dec(); } void Generator::start() { // Starting scheduler with default configuration logger.msg(Arc::INFO, "Generator started"); logger.msg(Arc::INFO, "Starting DTR threads"); scheduler.SetDumpLocation("/tmp/dtr.log"); scheduler.start(); } void Generator::run(const std::string& source, const std::string& destination) { std::string job_id = Arc::UUID(); Arc::initializeCredentialsType cred_type(Arc::initializeCredentialsType::TryCredentials); Arc::UserConfig cfg(cred_type); // check credentials if (!Arc::Credential::IsCredentialsValid(cfg)) { logger.msg(Arc::ERROR, "No valid credentials found, exiting"); return; } cfg.UtilsDirPath(Arc::UserConfig::ARCUSERDIRECTORY()); std::list logs; logs.push_back(new Arc::LogStream(std::cout)); DataStaging::DTR_ptr dtr(new DataStaging::DTR(source, destination, cfg, job_id, Arc::User().get_uid(), logs, "DataStaging")); if (!(*dtr)) { logger.msg(Arc::ERROR, "Problem creating dtr (source %s, destination %s)", source, destination); return; } // register callback with DTR dtr->registerCallback(this,DataStaging::GENERATOR); dtr->registerCallback(&scheduler,DataStaging::SCHEDULER); dtr->set_tries_left(5); DataStaging::DTR::push(dtr, DataStaging::SCHEDULER); counter.inc(); } nordugrid-arc-7.1.1/src/libs/data-staging/examples/PaxHeaders/generator-main.cpp0000644000000000000000000000013115067751327024667 xustar0030 mtime=1759498967.749501515 29 atime=1759498967.86149359 30 ctime=1759499028.892840462 nordugrid-arc-7.1.1/src/libs/data-staging/examples/generator-main.cpp0000644000175000002070000000376715067751327026607 0ustar00mockbuildmock00000000000000/* // To compile this example requires that nordugrid-arc-devel be installed. It // also requires including headers of external libraries used by ARC core code: // // g++ -o generator `pkg-config --cflags glibmm-2.4` -I/usr/include/libxml2 \ // -larcdatastaging Generator.cpp Generator.h generator-main.cpp // // If ARC is installed in a non-standard location, the options // -L ARC_LOCATION/lib and -I ARC_LOCATION/include should also be used */ #ifdef HAVE_CONFIG_H #include #endif #include #include #include "Generator.h" static Arc::SimpleCounter counter; static bool run = true; static void do_shutdown(int) { run = false; } static void usage() { std::cout << "Usage: generator [num mock transfers]" << std::endl; std::cout << " generator source destination" << std::endl; std::cout << "To use mock transfers ARC must be built with configure --enable-mock-dmc" << std::endl; std::cout << "The default number of mock transfers is 10" << std::endl; } int main(int argc, char** argv) { signal(SIGTTOU,SIG_IGN); signal(SIGTTIN,SIG_IGN); signal(SIGINT, do_shutdown); // Log to stderr Arc::LogStream logcerr(std::cerr); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::INFO); Generator generator; int num = 10; if (argc == 1 || argc == 2) { // run mock a number of times if (argc == 2 && (std::string(argv[1]) == "-h" || !Arc::stringto(argv[1], num))) { usage(); return 1; } generator.start(); for (int i = 0; i < num; ++i) { std::string source = "mock://mocksrc/mock." + Arc::tostring(i); std::string destination = "mock://mockdest/mock." + Arc::tostring(i); generator.run(source, destination); } } else if (argc == 3) { // run with given source and destination generator.start(); generator.run(argv[1], argv[2]); } else { usage(); return 1; } while (generator.counter.get() > 0 && run) { sleep(1); } return 0; } nordugrid-arc-7.1.1/src/libs/data-staging/examples/PaxHeaders/README0000644000000000000000000000013115067751327022133 xustar0030 mtime=1759498967.749501515 29 atime=1759498967.86149359 30 ctime=1759499028.891594506 nordugrid-arc-7.1.1/src/libs/data-staging/examples/README0000644000175000002070000000006015067751327024032 0ustar00mockbuildmock00000000000000Examples of how to use the data staging library.nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/DataDeliveryRemoteComm.cpp0000644000000000000000000000013215067751327024507 xustar0030 mtime=1759498967.747527268 30 atime=1759498967.860493575 30 ctime=1759499028.820625889 nordugrid-arc-7.1.1/src/libs/data-staging/DataDeliveryRemoteComm.cpp0000644000175000002070000004515415067751327026422 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "DataDeliveryRemoteComm.h" #define DELEGATION_NAMESPACE "http://www.nordugrid.org/schemas/delegation" namespace DataStaging { Arc::Logger DataDeliveryRemoteComm::logger(Arc::Logger::getRootLogger(), "DataStaging.DataDeliveryRemoteComm"); DataDeliveryRemoteComm::DataDeliveryRemoteComm(DTR_ptr dtr, const TransferParameters& params) : DataDeliveryComm(dtr, params), client(NULL), dtr_full_id(dtr->get_id()), query_retries(20), endpoint(dtr->get_delivery_endpoint()), timeout(dtr->get_usercfg().Timeout()), valid(false) { { std::unique_lock lock(lock_); // Initial empty status memset(&status_,0,sizeof(status_)); FillStatus(); } if(!dtr->get_source()) return; if(!dtr->get_destination()) return; // check for alternative source or destination eg cache, mapped URL, TURL std::string surl; if (!dtr->get_mapped_source().empty()) { surl = dtr->get_mapped_source(); } else if (!dtr->get_source()->TransferLocations().empty()) { surl = dtr->get_source()->TransferLocations()[0].fullstr(); } else { logger_->msg(Arc::ERROR, "No locations defined for %s", dtr->get_source()->str()); return; } if (dtr->get_destination()->TransferLocations().empty()) { logger_->msg(Arc::ERROR, "No locations defined for %s", dtr->get_destination()->str()); return; } std::string durl = dtr->get_destination()->TransferLocations()[0].fullstr(); bool caching = false; if ((dtr->get_cache_state() == CACHEABLE) && !dtr->get_cache_file().empty()) { durl = dtr->get_cache_file(); caching = true; } else if (!dtr->get_mapped_destination().empty()) { durl = dtr->get_mapped_destination(); } if (dtr->host_cert_for_remote_delivery()) { Arc::initializeCredentialsType cred_type(Arc::initializeCredentialsType::TryCredentials); Arc::UserConfig host_cfg(cred_type); host_cfg.ProxyPath(""); // to force using cert/key files instead of non-existent proxy host_cfg.ApplyToConfig(cfg); } else { dtr->get_usercfg().ApplyToConfig(cfg); } // connect to service and make a new transfer request logger_->msg(Arc::VERBOSE, "Connecting to Delivery service at %s", endpoint.str()); // TODO: implement pool of ClientSOAP objects instead of having one for each Comm // object. That shall reduce number of TCP connections. client = new Arc::ClientSOAP(cfg, endpoint, timeout); Arc::NS ns; Arc::PayloadSOAP request(ns); Arc::XMLNode dtrnode = request.NewChild("DataDeliveryStart").NewChild("DTR"); dtrnode.NewChild("ID") = dtr_full_id; dtrnode.NewChild("Source") = surl; dtrnode.NewChild("Destination") = durl; if (dtr->get_source()->CheckSize()) dtrnode.NewChild("Size") = Arc::tostring(dtr->get_source()->GetSize()); if (dtr->get_source()->CheckCheckSum()) dtrnode.NewChild("CheckSum") = dtr->get_source()->GetCheckSum(); dtrnode.NewChild("Uid") = Arc::tostring(dtr->get_local_user().get_uid()); dtrnode.NewChild("Gid") = Arc::tostring(dtr->get_local_user().get_gid()); // transfer parameters dtrnode.NewChild("MinAverageSpeed") = Arc::tostring(params.min_average_bandwidth); dtrnode.NewChild("AverageTime") = Arc::tostring(params.averaging_time); dtrnode.NewChild("MinCurrentSpeed") = Arc::tostring(params.min_current_bandwidth); dtrnode.NewChild("MaxInactivityTime") = Arc::tostring(params.max_inactivity_time); // caching if (caching) dtrnode.NewChild("Caching") = "true"; else dtrnode.NewChild("Caching") = "false"; // delegate credentials Arc::XMLNode op = request.Child(0); if (!SetupDelegation(op, dtr->get_usercfg())) { logger_->msg(Arc::ERROR, "Failed to set up credential delegation with %s", endpoint.str()); return; } std::string xml; request.GetXML(xml, true); logger_->msg(Arc::DEBUG, "Request:\n%s", xml); Arc::PayloadSOAP *response = NULL; Arc::MCC_Status status = client->process(&request, &response); if (!status) { logger_->msg(Arc::ERROR, "Could not connect to service %s: %s", endpoint.str(), (std::string)status); if (response) delete response; return; } if (!response) { logger_->msg(Arc::ERROR, "No SOAP response from Delivery service %s", endpoint.str()); return; } response->GetXML(xml, true); logger_->msg(Arc::DEBUG, "Response:\n%s", xml); if (response->IsFault()) { Arc::SOAPFault& fault = *response->Fault(); std::string err("SOAP fault: %s", fault.Code()); for (int n = 0;;++n) { if (fault.Reason(n).empty()) break; err += ": " + fault.Reason(n); } logger_->msg(Arc::ERROR, "Failed to start transfer request: %s", err); delete response; return; } Arc::XMLNode resultnode = (*response)["DataDeliveryStartResponse"]["DataDeliveryStartResult"]["Result"][0]; if (!resultnode || !resultnode["ResultCode"]) { logger_->msg(Arc::ERROR, "Bad format in XML response from service at %s: %s", endpoint.str(), xml); delete response; return; } std::string resultcode = (std::string)(resultnode["ResultCode"]); if (resultcode != "OK") { logger_->msg(Arc::ERROR, "Could not make new transfer request: %s: %s", resultcode, (std::string)(resultnode[0]["ErrorDescription"])); delete response; return; } logger_->msg(Arc::INFO, "Started remote Delivery at %s", endpoint.str()); delete response; valid = true; GetHandler().Add(this); } DataDeliveryRemoteComm::~DataDeliveryRemoteComm() { // If transfer is still going, send cancellation request to service if (valid) CancelDTR(); GetHandler().Remove(this); std::unique_lock lock(lock_); delete client; } std::string DataDeliveryRemoteComm::DeliveryId() const { return endpoint.str(); } void DataDeliveryRemoteComm::CancelDTR() { std::unique_lock lock(lock_); if (!client) return; Arc::NS ns; Arc::PayloadSOAP request(ns); Arc::XMLNode dtrnode = request.NewChild("DataDeliveryCancel").NewChild("DTR"); dtrnode.NewChild("ID") = dtr_full_id; std::string xml; request.GetXML(xml, true); logger_->msg(Arc::DEBUG, "Request:\n%s", xml); Arc::PayloadSOAP *response = NULL; Arc::MCC_Status status = client->process(&request, &response); if (!status) { logger_->msg(Arc::ERROR, "Failed to send cancel request: %s", (std::string)status); if (response) delete response; return; } if (!response) { logger_->msg(Arc::ERROR, "Failed to cancel: No SOAP response"); return; } response->GetXML(xml, true); logger_->msg(Arc::DEBUG, "Response:\n%s", xml); if (response->IsFault()) { Arc::SOAPFault& fault = *response->Fault(); std::string err("SOAP fault: %s", fault.Code()); for (int n = 0;;++n) { if (fault.Reason(n).empty()) break; err += ": " + fault.Reason(n); } logger_->msg(Arc::ERROR, "Failed to cancel transfer request: %s", err); delete response; return; } Arc::XMLNode resultnode = (*response)["DataDeliveryCancelResponse"]["DataDeliveryCancelResult"]["Result"][0]; if (!resultnode || !resultnode["ResultCode"]) { logger_->msg(Arc::ERROR, "Bad format in XML response: %s", xml); delete response; return; } if ((std::string)resultnode["ResultCode"] != "OK") { Arc::XMLNode errnode = resultnode["ErrorDescription"]; logger_->msg(Arc::ERROR, "Failed to cancel: %s", (std::string)errnode); } delete response; } void DataDeliveryRemoteComm::PullStatus() { // send query request to service and fill status_ std::unique_lock lock(lock_); if (!client) return; // check time since last query - check every second for the first 20s and // after every 5s // TODO be more intelligent, using transfer rate and file size if (Arc::Time() - start_ < 20 && Arc::Time() - Arc::Time(status_.timestamp) < 1) return; if (Arc::Time() - start_ > 20 && Arc::Time() - Arc::Time(status_.timestamp) < 5) return; Arc::NS ns; Arc::PayloadSOAP request(ns); Arc::XMLNode dtrnode = request.NewChild("DataDeliveryQuery").NewChild("DTR"); dtrnode.NewChild("ID") = dtr_full_id; std::string xml; request.GetXML(xml, true); logger_->msg(Arc::DEBUG, "Request:\n%s", xml); Arc::PayloadSOAP *response = NULL; Arc::MCC_Status status = client->process(&request, &response); if (!status) { logger_->msg(Arc::ERROR, "%s", (std::string)status); status_.commstatus = CommFailed; if (response) delete response; valid = false; return; } if (!response) { if (--query_retries > 0) { HandleQueryFault("No SOAP response from delivery service"); return; } logger_->msg(Arc::ERROR, "No SOAP response from delivery service"); status_.commstatus = CommFailed; valid = false; return; } response->GetXML(xml, true); logger_->msg(Arc::DEBUG, "Response:\n%s", xml); if (response->IsFault()) { Arc::SOAPFault& fault = *response->Fault(); std::string err("SOAP fault: %s", fault.Code()); for (int n = 0;;++n) { if (fault.Reason(n).empty()) break; err += ": " + fault.Reason(n); } delete response; if (--query_retries > 0) { HandleQueryFault("Failed to query state: " + err); return; } logger_->msg(Arc::ERROR, "Failed to query state: %s", err); status_.commstatus = CommFailed; strncpy(status_.error_desc, "SOAP error in connection with delivery service", sizeof(status_.error_desc)); valid = false; return; } Arc::XMLNode resultnode = (*response)["DataDeliveryQueryResponse"]["DataDeliveryQueryResult"]["Result"][0]; if (!resultnode || !resultnode["ResultCode"]) { logger_->msg(Arc::ERROR, "Bad format in XML response: %s", xml); delete response; status_.commstatus = CommFailed; valid = false; return; } // Fill status fields with results from service FillStatus(resultnode[0]); delete response; } bool DataDeliveryRemoteComm::CheckComm(DTR_ptr dtr, std::vector& allowed_dirs, std::string& load_avg) { // call Ping Arc::MCCConfig cfg; if (dtr->host_cert_for_remote_delivery()) { Arc::initializeCredentialsType cred_type(Arc::initializeCredentialsType::TryCredentials); Arc::UserConfig host_cfg(cred_type); host_cfg.ProxyPath(""); // to force using cert/key files instead of non-existent proxy host_cfg.ApplyToConfig(cfg); } else { dtr->get_usercfg().ApplyToConfig(cfg); } dtr->get_logger()->msg(Arc::VERBOSE, "Connecting to Delivery service at %s", dtr->get_delivery_endpoint().str()); Arc::ClientSOAP client(cfg, dtr->get_delivery_endpoint(), dtr->get_usercfg().Timeout()); Arc::NS ns; Arc::PayloadSOAP request(ns); Arc::XMLNode ping = request.NewChild("DataDeliveryPing"); std::string xml; request.GetXML(xml, true); dtr->get_logger()->msg(Arc::DEBUG, "Request:\n%s", xml); Arc::PayloadSOAP *response = NULL; Arc::MCC_Status status = client.process(&request, &response); if (!status) { dtr->get_logger()->msg(Arc::ERROR, "Could not connect to service %s: %s", dtr->get_delivery_endpoint().str(), (std::string)status); if (response) delete response; return false; } if (!response) { dtr->get_logger()->msg(Arc::ERROR, "No SOAP response from Delivery service %s", dtr->get_delivery_endpoint().str()); return false; } response->GetXML(xml, true); dtr->get_logger()->msg(Arc::DEBUG, "Response:\n%s", xml); if (response->IsFault()) { Arc::SOAPFault& fault = *response->Fault(); std::string err("SOAP fault: %s", fault.Code()); for (int n = 0;;++n) { if (fault.Reason(n).empty()) break; err += ": " + fault.Reason(n); } dtr->get_logger()->msg(Arc::ERROR, "SOAP fault from delivery service at %s: %s", dtr->get_delivery_endpoint().str(), err); delete response; return false; } Arc::XMLNode resultnode = (*response)["DataDeliveryPingResponse"]["DataDeliveryPingResult"]["Result"][0]; if (!resultnode || !resultnode["ResultCode"]) { dtr->get_logger()->msg(Arc::ERROR, "Bad format in XML response from delivery service at %s: %s", dtr->get_delivery_endpoint().str(), xml); delete response; return false; } std::string resultcode = (std::string)(resultnode["ResultCode"]); if (resultcode != "OK") { dtr->get_logger()->msg(Arc::ERROR, "Error pinging delivery service at %s: %s: %s", dtr->get_delivery_endpoint().str(), resultcode, (std::string)(resultnode[0]["ErrorDescription"])); delete response; return false; } for (Arc::XMLNode dir = resultnode["AllowedDir"]; dir; ++dir) { allowed_dirs.push_back((std::string)dir); dtr->get_logger()->msg(Arc::DEBUG, "Dir %s allowed at service %s", (std::string)dir, dtr->get_delivery_endpoint().str()); } if (resultnode["LoadAvg"]) { load_avg = (std::string)(resultnode["LoadAvg"]); } else { load_avg = "-1"; } delete response; return true; } void DataDeliveryRemoteComm::FillStatus(const Arc::XMLNode& node) { if (!node) { // initial state std::string empty(""); status_.commstatus = DataDeliveryComm::CommInit; status_.timestamp = ::time(NULL); status_.status = DTRStatus::NULL_STATE; status_.error = DTRErrorStatus::NONE_ERROR; status_.error_location = DTRErrorStatus::NO_ERROR_LOCATION; strncpy(status_.error_desc, empty.c_str(), sizeof(status_.error_desc)); status_.streams = 0; status_.transferred = 0; status_.size = 0; status_.transfer_time = 0; status_.offset = 0; status_.speed = 0; strncpy(status_.checksum, empty.c_str(), sizeof(status_.checksum)); return; } Arc::XMLNode datanode = node["ResultCode"]; if (std::string(datanode) == "TRANSFERRED") { status_.commstatus = CommExited; status_.status = DTRStatus::TRANSFERRED; } else if (std::string(datanode) == "TRANSFER_ERROR") { status_.commstatus = CommFailed; status_.status = DTRStatus::TRANSFERRED; } else if (std::string(datanode) == "SERVICE_ERROR") { status_.commstatus = CommFailed; status_.status = DTRStatus::TRANSFERRED; } else { status_.commstatus = CommNoError; status_.status = DTRStatus::TRANSFERRING; } status_.timestamp = time(NULL); datanode = node["ErrorStatus"]; if (datanode) { int error_status; Arc::stringto(std::string(datanode), error_status); status_.error = (DTRErrorStatus::DTRErrorStatusType)error_status; } datanode = node["ErrorLocation"]; if (datanode) { int error_location; Arc::stringto(std::string(datanode), error_location); status_.error_location = (DTRErrorStatus::DTRErrorLocation)error_location; } datanode = node["ErrorDescription"]; if (datanode) { strncpy(status_.error_desc, ((std::string)datanode).c_str(), sizeof(status_.error_desc)); } datanode = node["BytesTransferred"]; if (datanode) { unsigned long long int bytes; Arc::stringto(std::string(datanode), bytes); status_.transferred = bytes; } datanode = node["TransferTime"]; if (datanode) { unsigned long long int t; Arc::stringto(std::string(datanode), t); status_.transfer_time = t; } // TODO size, offset, speed (currently not used) datanode = node["CheckSum"]; if (datanode) { strncpy(status_.checksum, ((std::string)datanode).c_str(), sizeof(status_.checksum)); } // if terminal state, write log if (status_.commstatus != CommNoError) { // log message is limited to 2048 chars so just print last few lines std::string log = (std::string)node["Log"]; if (!log.empty()) { if (log.size() > 2000) log = log.substr(log.find('\n', log.size()-2000)); logger_->msg(Arc::INFO, "DataDelivery log tail:\n%s", log); } valid = false; } } bool DataDeliveryRemoteComm::SetupDelegation(Arc::XMLNode& op, const Arc::UserConfig& usercfg) { const std::string& cert = (!usercfg.ProxyPath().empty() ? usercfg.ProxyPath() : usercfg.CertificatePath()); const std::string& key = (!usercfg.ProxyPath().empty() ? usercfg.ProxyPath() : usercfg.KeyPath()); const std::string& credentials = usercfg.CredentialString(); const std::string& token = usercfg.OToken(); if (credentials.empty() && (key.empty() || cert.empty()) && token.empty()) { logger_->msg(Arc::VERBOSE, "Failed locating credentials"); return false; } if(!client->Load()) { logger_->msg(Arc::VERBOSE, "Failed to initiate client connection"); return false; } Arc::MCC* entry = client->GetEntry(); if(!entry) { logger_->msg(Arc::VERBOSE, "Client connection has no entry point"); return false; } if(token.empty()) { Arc::DelegationProviderSOAP * deleg = NULL; // Use in-memory credentials if set in UserConfig if (!credentials.empty()) deleg = new Arc::DelegationProviderSOAP(credentials); else deleg = new Arc::DelegationProviderSOAP(cert, key); logger_->msg(Arc::VERBOSE, "Initiating delegation procedure"); if (!deleg->DelegateCredentialsInit(*entry, &(client->GetContext()))) { logger_->msg(Arc::VERBOSE, "Failed to initiate delegation credentials"); delete deleg; return false; } deleg->DelegatedToken(op); delete deleg; } else { Arc::NS ns; ns["deleg"]=DELEGATION_NAMESPACE; Arc::XMLNode deleg_token = op.NewChild("deleg:DelegatedToken",ns); deleg_token.NewAttribute("deleg:Format")="token"; deleg_token.NewChild("deleg:Id")=""; deleg_token.NewChild("deleg:Value")=token; } return true; } void DataDeliveryRemoteComm::HandleQueryFault(const std::string& err) { // Just return without changing status logger_->msg(Arc::WARNING, err); status_.timestamp = time(NULL); // A reconnect may be needed after losing connection delete client; client = new Arc::ClientSOAP(cfg, endpoint, timeout); } } // namespace DataStaging nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/DTR.h0000644000000000000000000000013015067751327020236 xustar0030 mtime=1759498967.747527268 29 atime=1759498967.85949356 29 ctime=1759499028.80927355 nordugrid-arc-7.1.1/src/libs/data-staging/DTR.h0000644000175000002070000006532315067751327022153 0ustar00mockbuildmock00000000000000// Summary page of data staging for doxygen namespace DataStaging { /** * \defgroup datastaging ARC data staging (libarcdatastaging) * * ARC data staging components form a complete data transfer management system. * Whereas \ref data is a library for data access, enabling several types of * operation on data files on the Grid using a variety of access protocols, * \ref datastaging is a framework for managed data transfer to and from the * Grid. The data staging system is designed to run as a persistent process, to * execute data transfers on demand. Data transfers are defined and fed into * the system, and then notification is given when they complete. No knowledge * is required of the internal workings of the Grid, a user only needs to * specify URLs representing the source and destination of the transfer. * * The system is highly configurable and features an intelligent priority, * fair-share and error handling mechanism, as well as the ability to spread * data transfer across multiple hosts using ARC's DataDelivery service. It is * used by ARC's Computing Element (A-REX) for pre- and post- job data transfer * of input and output files. Note that this system is primarily for data * transfer to and from local files and that third-party transfer is not * supported. It is designed for the case of pulling or pushing data between * the Grid and a local file system, rather than a service for transfer between * two Grid storage elements. It is possible to transfer data between two * remote endpoints, but all data flows through the client. * * Simple examples of how to use libarcdatastaging are shown for several * languages in the \ref dtrgenerator "DTR examples page". In all the examples * a Generator class receives as input a source and destination, and creates * a DTR which describes the data transfer. It is then passed to the Scheduler * and the Generator defines a receiveDTR() method for the Scheduler to calls * to notify that the transfer has finished. The examples all allow using the * Generator as a basic copy tool from the command line to copy a single file. * * For more information see http://wiki.nordugrid.org/index.php/Data_Staging */ } // namespace DataStaging #ifndef DTR_H_ #define DTR_H_ #include #include #include #include #include #include #include #include #include #include "DTRStatus.h" /// DataStaging contains all components for data transfer scheduling and execution. namespace DataStaging { class DTR; /// Provides automatic memory management of DTRs and thread-safe destruction. /** \ingroup datastaging */ typedef Arc::ThreadedPointer DTR_ptr; /// The DTR's Logger object can be used outside the DTR object with DTRLogger. /** \ingroup datastaging */ typedef Arc::ThreadedPointer DTRLogger; typedef Arc::ThreadedPointer DTRLogDestination; /// Components of the data staging framework /** \ingroup datastaging */ enum StagingProcesses { GENERATOR, ///< Creator of new DTRs and receiver of completed DTRs SCHEDULER, ///< Controls queues and moves DTRs bewteen other components when necessary PRE_PROCESSOR, ///< Performs all pre-transfer operations DELIVERY, ///< Performs physical transfer POST_PROCESSOR ///< Performs all post-transfer operations }; /// Internal state of StagingProcesses /** \ingroup datastaging */ enum ProcessState { INITIATED, ///< Process is ready to start RUNNING, ///< Process is running TO_STOP, ///< Process has been instructed to stop STOPPED ///< Proecess has stopped }; /// Represents limits and properties of a DTR transfer. These generally apply to all DTRs. /** * \ingroup datastaging * \headerfile DTR.h arc/data-staging/DTR.h */ class TransferParameters { public: /// Minimum average bandwidth in bytes/sec. /** * If the average bandwidth used over the whole transfer drops below this * level the transfer will be killed. */ unsigned long long int min_average_bandwidth; /// Maximum inactivity time in sec. /** * If transfer stops for longer than this time it will be killed. */ unsigned int max_inactivity_time; /// Minimum current bandwidth in bytes/sec. /** * If bandwidth averaged over the previous averaging_time seconds is less * than min_current_bandwidth the transfer will be killed (allows transfers * which slow down to be killed quicker). */ unsigned long long int min_current_bandwidth; /// The time in seconds over which to average the calculation of min_current_bandwidth. unsigned int averaging_time; /// Constructor. Initialises all values to zero. TransferParameters() : min_average_bandwidth(0), max_inactivity_time(0), min_current_bandwidth(0), averaging_time(0) {}; }; /// The configured cache directories /** * \ingroup datastaging * \headerfile DTR.h arc/data-staging/DTR.h */ class DTRCacheParameters { public: /// List of (cache dir [link dir]) std::vector cache_dirs; /// List of draining caches std::vector drain_cache_dirs; /// List of read-only caches std::vector readonly_cache_dirs; /// Constructor with empty lists initialised DTRCacheParameters(void) {}; /// Constructor with supplied cache lists DTRCacheParameters(std::vector caches, std::vector drain_caches, std::vector readonly_caches); }; /// Class for storing credential information /** * To avoid handling credentials directly this class is used to hold * information in simple string/time attributes. It should be filled before * the DTR is started. * \ingroup datastaging * \headerfile DTR.h arc/data-staging/DTR.h */ class DTRCredentialInfo { public: /// Default constructor DTRCredentialInfo() {}; /// Constructor with supplied credential info DTRCredentialInfo(const std::string& DN, const Arc::Time& expirytime, const std::list vomsfqans); /// Get the DN std::string getDN() const { return DN; }; /// Get the expiry time Arc::Time getExpiryTime() const { return expirytime; }; /// Get the VOMS VO std::string extractVOMSVO() const; /// Get the VOMS Group (first in the supplied list of fqans) std::string extractVOMSGroup() const; /// Get the VOMS Role (first in the supplied list of fqans) std::string extractVOMSRole() const; private: std::string DN; Arc::Time expirytime; std::list vomsfqans; }; /// Represents possible cache states of this DTR /** \ingroup datastaging */ enum CacheState { CACHEABLE, ///< Source should be cached NON_CACHEABLE, ///< Source should not be cached CACHE_ALREADY_PRESENT, ///< Source is available in cache from before CACHE_DOWNLOADED, ///< Source has just been downloaded and put in cache CACHE_LOCKED, ///< Cache file is locked CACHE_SKIP, ///< Source is cacheable but due to some problem should not be cached CACHE_NOT_USED ///< Cache was started but was not used }; /// The base class from which all callback-enabled classes should be derived. /** * This class is a container for a callback method which is called when a * DTR is to be passed to a component. Several components in data staging * (eg Scheduler, Generator) are subclasses of DTRCallback, which allows * them to receive DTRs through the callback system. * \ingroup datastaging * \headerfile DTR.h arc/data-staging/DTR.h */ class DTRCallback { public: /// Empty virtual destructor virtual ~DTRCallback() {}; /// Defines the callback method called when a DTR is pushed to this object. /** * The automatic memory management of DTR_ptr ensures that the DTR object * is only deleted when the last copy is deleted. */ virtual void receiveDTR(DTR_ptr dtr) = 0; // TODO //virtual void suspendDTR(DTR& dtr) = 0; //virtual void cancelDTR(DTR& dtr) = 0; }; /// Data Transfer Request. /** * DTR stands for Data Transfer Request and a DTR describes a data transfer * between two endpoints, a source and a destination. There are several * parameters and options relating to the transfer contained in a DTR. * The normal workflow is for a Generator to create a DTR and send it to the * Scheduler for processing using DTR::push(SCHEDULER). If the Generator is a * subclass of DTRCallback, when the Scheduler has finished with the DTR * the DTRCallback::receiveDTR() callback method is called. * * DTRs should always be used through the Arc::ThreadedPointer DTR_ptr. This * ensures proper memory management when passing DTRs among various threads. * To enforce this policy the copy constructor and assignment operator are * private. * * A lock protects member variables that are likely to be accessed and * modified by multiple threads. * \ingroup datastaging * \headerfile DTR.h arc/data-staging/DTR.h */ class DTR { private: /// Identifier std::string DTR_ID; /// UserConfig and URL objects. Needed as DataHandle keeps a reference to them. Arc::URL source_url; Arc::URL destination_url; Arc::UserConfig cfg; /// Source file Arc::DataHandle source_endpoint; /// Destination file Arc::DataHandle destination_endpoint; /// Source file as a string std::string source_url_str; /// Destination file as a string std::string destination_url_str; /// Endpoint of cached file. /* Kept as string so we don't need to duplicate DataHandle properties * of destination. Delivery should check if this is set and if so use * it as destination. */ std::string cache_file; /// Cache configuration DTRCacheParameters cache_parameters; /// Cache state for this DTR CacheState cache_state; /// Local user information Arc::User user; /// Credential information DTRCredentialInfo credentials; /// Job that requested the transfer. Could be used as a generic way of grouping DTRs. std::string parent_job_id; /// A flattened number set by the scheduler int priority; /// Transfer share this DTR belongs to std::string transfershare; /// This string can be used to form sub-sets of transfer shares. /** It is appended to transfershare. It can be used by the Generator * for example to split uploads and downloads into separate shares or * make shares for different endpoints. */ std::string sub_share; /// Number of attempts left to complete this DTR unsigned int tries_left; /// Initial number of attempts unsigned int initial_tries; /// A flag to say whether the DTR is replicating inside the same LFN of an index service bool replication; /// A flag to say whether to forcibly register the destination in an index service. /** Even if the source is not the same file, the destination will be * registered to an existing LFN. It should be set to true in * the case where an output file is uploaded to several locations but * with the same index service LFN */ bool force_registration; /// The file that the current source is mapped to. /** Delivery should check if this is set and if so use this as source. */ std::string mapped_source; /// The file that the current destination is mapped to. /** Delivery should check if this is set and if so use this as destination. */ std::string mapped_destination; /// Status of the DTR DTRStatus status; /// Error status of the DTR DTRErrorStatus error_status; /// Number of bytes transferred so far unsigned long long int bytes_transferred; // TODO and/or offset? /// Time taken in ns to complete transfer (0 if incomplete) unsigned long long int transfer_time; /** Timing variables **/ /// When should we finish the current action Arc::Time timeout; /// Creation time Arc::Time created; /// Modification time Arc::Time last_modified; /// Wait until this time before doing more processing Arc::Time next_process_time; /// True if some process requested cancellation bool cancel_request; /// Bulk start flag bool bulk_start; /// Bulk end flag bool bulk_end; /// Whether bulk operations are supported for the source bool source_supports_bulk; /// Flag to say whether success of the DTR is mandatory bool mandatory; /// Endpoint of delivery service this DTR is scheduled for. /** By default it is LOCAL_DELIVERY so local Delivery is used. */ Arc::URL delivery_endpoint; /// List of problematic endpoints - those which the DTR definitely cannot use std::vector problematic_delivery_endpoints; /// Whether to use host instead of user credentials for contacting remote delivery services. bool use_host_cert_for_remote_delivery; /// The process in charge of this DTR right now StagingProcesses current_owner; /// Logger object. /** Creation and deletion of this object should be managed * in the Generator and a pointer passed in the DTR constructor. */ DTRLogger logger; /// Log Destinations. /** This list is kept here so that the Logger can be connected and * disconnected in threads which have their own root logger * to avoid duplicate messages */ std::list log_destinations; /// Flag to say whether to delete LogDestinations. /** Set to true when a DTR thread is stuck or lost so it doesn't crash when * waking up after DTR has finished */ //bool delete_log_destinations; /// Performance metric logger Arc::JobPerfLog perf_log; /// Performance record used for recording transfer time Arc::JobPerfRecord perf_record; /// List of callback methods called when DTR moves between processes std::map > proc_callback; /// Lock to avoid collisions while changing DTR properties Arc::SimpleCondition lock; /** Possible fields (types, names and so on are subject to change) ** /// DTRs that are grouped must have the same number here int affiliation; /// History of recent statuses DTRStatus::DTRStatusType *history_of_statuses; **/ /* Methods */ /// Change modification time void mark_modification () { last_modified.SetTime(time(NULL)); }; /// Get the list of callbacks for this owner. Protected by lock. std::list get_callbacks(const std::map >& proc_callback, StagingProcesses owner); /// Private and not implemented because DTR_ptr should always be used. DTR& operator=(const DTR& dtr); DTR(const DTR& dtr); DTR(); public: /// URL that is used to denote local Delivery should be used static const Arc::URL LOCAL_DELIVERY; /// Log level for all DTR activity static Arc::LogLevel LOG_LEVEL; /// Normal constructor. /** Construct a new DTR. * @param source Endpoint from which to read data * @param destination Endpoint to which to write data * @param usercfg Provides some user configuration information * @param jobid ID of the job associated with this data transfer * @param uid UID to use when accessing local file system if source * or destination is a local file. If this is different to the current * uid then the current uid must have sufficient privileges to change uid. * @param logs List of ThreadedPointers to Logger Destinations to be * receive DTR processing messages. * @param logname Subdomain name to use for internal DTR logger. */ DTR(const std::string& source, const std::string& destination, const Arc::UserConfig& usercfg, const std::string& jobid, const uid_t& uid, std::list const& logs, const std::string& logname = std::string("DTR")); /// Empty destructor ~DTR() {}; /// Is DTR valid? operator bool() const { return (!DTR_ID.empty()); } /// Is DTR not valid? bool operator!() const { return (DTR_ID.empty()); } /// Register callback objects to be used during DTR processing. /** * Objects deriving from DTRCallback can be registered with this method. * The callback method of these objects will then be called when the DTR * is passed to the specified owner. Protected by lock. */ void registerCallback(DTRCallback* cb, StagingProcesses owner); /// Reset information held on this DTR, such as resolved replicas, error state etc. /** * Useful when a failed DTR is to be retried. */ void reset(); /// Set the ID of this DTR. Useful when passing DTR between processes. void set_id(const std::string& id); /// Get the ID of this DTR std::string get_id() const { return DTR_ID; }; /// Get an abbreviated version of the DTR ID - useful to reduce logging verbosity std::string get_short_id() const; /// Get source handle. Return by reference since DataHandle cannot be copied Arc::DataHandle& get_source() { return source_endpoint; }; /// Get destination handle. Return by reference since DataHandle cannot be copied Arc::DataHandle& get_destination() { return destination_endpoint; }; /// Get source as a string std::string get_source_str() const { return source_url_str; }; /// Get destination as a string std::string get_destination_str() const { return destination_url_str; }; /// Get the UserConfig object associated with this DTR const Arc::UserConfig& get_usercfg() const { return cfg; }; /// Set the timeout for processing this DTR void set_timeout(time_t value) { timeout.SetTime(Arc::Time().GetTime() + value); }; /// Get the timeout for processing this DTR Arc::Time get_timeout() const { return timeout; }; /// Set the next processing time to current time + given time void set_process_time(const Arc::Period& process_time); /// Get the next processing time for the DTR Arc::Time get_process_time() const { return next_process_time; }; /// Get the creation time Arc::Time get_creation_time() const { return created; }; /// Get the modification time Arc::Time get_modification_time() const { return last_modified; }; /// Get the parent job ID std::string get_parent_job_id() const { return parent_job_id; }; /// Set the priority void set_priority(int pri); /// Get the priority int get_priority() const { return priority; }; /// Set credential info void set_credential_info(const DTRCredentialInfo& cred) { credentials = cred; }; /// Get credential info const DTRCredentialInfo& get_credential_info() const { return credentials; }; /// Set the transfer share. sub_share is automatically added to transfershare. void set_transfer_share(const std::string& share_name); /// Get the transfer share. sub_share is automatically added to transfershare. std::string get_transfer_share() const { return transfershare; }; /// Set sub-share void set_sub_share(const std::string& share) { sub_share = share; }; /// Get sub-share std::string get_sub_share() const { return sub_share; }; /// Set the number of attempts remaining void set_tries_left(unsigned int tries); /// Get the number of attempts remaining unsigned int get_tries_left() const { return tries_left; }; /// Get the initial number of attempts (set by set_tries_left()) unsigned int get_initial_tries() const { return initial_tries; } /// Decrease attempt number void decrease_tries_left(); /// Set the status. Protected by lock. void set_status(DTRStatus stat); /// Get the status. Protected by lock. DTRStatus get_status(); /// Set the error status. /** * The DTRErrorStatus last error state field is set to the current status * of the DTR. Protected by lock. */ void set_error_status(DTRErrorStatus::DTRErrorStatusType error_stat, DTRErrorStatus::DTRErrorLocation error_loc, const std::string& desc=""); /// Set the error status back to NONE_ERROR and clear other fields void reset_error_status(); /// Get the error status. DTRErrorStatus get_error_status(); /// Set bytes transferred (should be set by whatever is controlling the transfer) void set_bytes_transferred(unsigned long long int bytes); /// Get current number of bytes transferred unsigned long long int get_bytes_transferred() const { return bytes_transferred; }; /// Set transfer time (should be set by whatever is controlling the transfer) void set_transfer_time(unsigned long long int t); /// Get transfer time unsigned long long int get_transfer_time() const { return transfer_time; }; /// Set the DTR to be cancelled void set_cancel_request(); /// Returns true if cancellation has been requested bool cancel_requested() const { return cancel_request; }; /// Set delivery endpoint void set_delivery_endpoint(const Arc::URL& endpoint) { delivery_endpoint = endpoint; }; /// Returns delivery endpoint const Arc::URL& get_delivery_endpoint() const { return delivery_endpoint; }; /// Add problematic endpoint. /** * Should only be those endpoints where there is a problem with the service * itself and not the transfer. */ void add_problematic_delivery_service(const Arc::URL& endpoint) { problematic_delivery_endpoints.push_back(endpoint); }; /// Get all problematic endpoints const std::vector& get_problematic_delivery_services() const { return problematic_delivery_endpoints; }; /// Set the flag for using host certificate for contacting remote delivery services void host_cert_for_remote_delivery(bool host) { use_host_cert_for_remote_delivery = host; }; /// Get the flag for using host certificate for contacting remote delivery services bool host_cert_for_remote_delivery() const { return use_host_cert_for_remote_delivery; }; /// Set cache filename void set_cache_file(const std::string& filename); /// Get cache filename std::string get_cache_file() const { return cache_file; }; /// Set cache parameters void set_cache_parameters(const DTRCacheParameters& param) { cache_parameters = param; }; /// Get cache parameters const DTRCacheParameters& get_cache_parameters() const { return cache_parameters; }; /// Set the cache state void set_cache_state(CacheState state); /// Get the cache state CacheState get_cache_state() const { return cache_state; }; /// Set the mapped source file void set_mapped_source(const std::string& file = "") { mapped_source = file; }; /// Get the mapped source file std::string get_mapped_source() const { return mapped_source; }; /// Set the mapped destination file void set_mapped_destination(const std::string& file = "") { mapped_destination = file; }; /// Get the mapped destination file std::string get_mapped_destination() const { return mapped_destination; }; /// Find the DTR owner StagingProcesses get_owner() const { return current_owner; }; /// Get the local user information Arc::User get_local_user() const { return user; }; /// Set replication flag void set_replication(bool rep) { replication = rep; }; /// Get replication flag bool is_replication() const { return replication; }; /// Set force replication flag void set_force_registration(bool force) { force_registration = force; }; /// Get force replication flag bool is_force_registration() const { return force_registration; }; /// Set bulk start flag void set_bulk_start(bool value) { bulk_start = value; }; /// Get bulk start flag bool get_bulk_start() const { return bulk_start; }; /// Set bulk end flag void set_bulk_end(bool value) { bulk_end = value; }; /// Get bulk start flag bool get_bulk_end() const { return bulk_end; }; /// Whether bulk operation is possible according to current state and src/dest bool bulk_possible(); /// Whether DTR success is mandatory bool is_mandatory() const { return mandatory; }; /// Get Logger object, so that processes can log to this DTR's log const DTRLogger& get_logger() const { return logger; }; /// Get log destination sassigned to this instance. std::list get_log_destinations() const; /// Pass the DTR from one process to another. Protected by lock. static void push(DTR_ptr dtr, StagingProcesses new_owner); /// Suspend the DTR which is in doing transfer in the delivery process bool suspend(); /// Did an error happen? bool error() const { return (error_status != DTRErrorStatus::NONE_ERROR); } /// Returns true if this DTR is about to go into the pre-processor bool is_destined_for_pre_processor() const; /// Returns true if this DTR is about to go into the post-processor bool is_destined_for_post_processor() const; /// Returns true if this DTR is about to go into delivery bool is_destined_for_delivery() const; /// Returns true if this DTR just came from the pre-processor bool came_from_pre_processor() const; /// Returns true if this DTR just came from the post-processor bool came_from_post_processor() const; /// Returns true if this DTR just came from delivery bool came_from_delivery() const; /// Returns true if this DTR just came from the generator bool came_from_generator() const; /// Returns true if this DTR is in a final state (finished, failed or cancelled) bool is_in_final_state() const; /// Get the performance log Arc::JobPerfLog& get_job_perf_log() { return perf_log; }; /// Get the performance log record Arc::JobPerfRecord& get_job_perf_record() { return perf_record; }; }; /// Helper method to create smart pointer, only for swig bindings DTR_ptr createDTRPtr(const std::string& source, const std::string& destination, const Arc::UserConfig& usercfg, const std::string& jobid, const uid_t& uid, std::list const& logs, const std::string& logname = std::string("DTR")); /// Helper method to create smart pointer, only for swig bindings DTRLogger createDTRLogger(Arc::Logger& parent, const std::string& subdomain); } // namespace DataStaging #endif /*DTR_H_*/ nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/DataDelivery.cpp0000644000000000000000000000013215067751327022517 xustar0030 mtime=1759498967.747527268 30 atime=1759498967.860493575 30 ctime=1759499028.817570733 nordugrid-arc-7.1.1/src/libs/data-staging/DataDelivery.cpp0000644000175000002070000003134515067751327024427 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "DataDeliveryComm.h" #include "DataDelivery.h" namespace DataStaging { Arc::Logger DataDelivery::logger(Arc::Logger::getRootLogger(), "DataStaging.DataDelivery"); /// Wrapper class around DataDeliveryComm class DataDelivery::delivery_pair_t { public: DTR_ptr dtr; TransferParameters params; DataDeliveryComm* comm; bool cancelled; Arc::SimpleCounter thread_count; delivery_pair_t(DTR_ptr request, const TransferParameters& params); ~delivery_pair_t(); void start(); }; DataDelivery::delivery_pair_t::delivery_pair_t(DTR_ptr request, const TransferParameters& params) :dtr(request),params(params),comm(NULL),cancelled(false) {} DataDelivery::delivery_pair_t::~delivery_pair_t() { if (comm) delete comm; } void DataDelivery::delivery_pair_t::start() { comm = DataDeliveryComm::CreateInstance(dtr, params); } DataDelivery::DataDelivery(): delivery_state(INITIATED) { } bool DataDelivery::start() { if(delivery_state == RUNNING || delivery_state == TO_STOP) return false; delivery_state = RUNNING; Arc::CreateThreadFunction(&main_thread,this); return true; } void DataDelivery::receiveDTR(DTR_ptr dtr) { if(!(*dtr)) { logger.msg(Arc::ERROR, "Received invalid DTR"); dtr->set_error_status(DTRErrorStatus::INTERNAL_LOGIC_ERROR, DTRErrorStatus::ERROR_UNKNOWN, "Invalid DTR"); dtr->set_status(DTRStatus::TRANSFERRED); DTR::push(dtr, SCHEDULER); return; } dtr->get_logger()->msg(Arc::INFO, "Delivery received new DTR %s with source: %s, destination: %s", dtr->get_id(), dtr->get_source()->CurrentLocation().str(), dtr->get_destination()->CurrentLocation().str()); dtr->set_status(DTRStatus::TRANSFERRING); delivery_pair_t* d = new delivery_pair_t(dtr, transfer_params); dtr_list_lock.lock(); dtr_list.push_back(d); dtr_list_lock.unlock(); cond.signal(); return; } bool DataDelivery::cancelDTR(DTR_ptr request) { if(!request) { logger.msg(Arc::ERROR, "Received no DTR"); return false; } if(!(*request)) { logger.msg(Arc::ERROR, "Received invalid DTR"); request->set_status(DTRStatus::ERROR); return false; } dtr_list_lock.lock(); for (std::list::iterator i = dtr_list.begin(); i != dtr_list.end(); ++i) { delivery_pair_t* ip = *i; if (ip->dtr->get_id() == request->get_id()) { request->get_logger()->msg(Arc::INFO, "Cancelling DTR %s with source: %s, destination: %s", request->get_id(), request->get_source()->str(), request->get_destination()->str()); ip->cancelled = true; ip->dtr->set_status(DTRStatus::TRANSFERRING_CANCEL); dtr_list_lock.unlock(); cond.signal(); return true; } } // DTR is not in the active transfer list, probably because it just finished dtr_list_lock.unlock(); request->get_logger()->msg(Arc::WARNING, "DTR %s requested cancel but no active transfer", request->get_id()); // if request is already TRANSFERRED, no need to push to Scheduler again if (request->get_status() != DTRStatus::TRANSFERRED) { request->set_status(DTRStatus::TRANSFERRED); DTR::push(request, SCHEDULER); } return true; } bool DataDelivery::stop() { if(delivery_state != RUNNING) return false; delivery_state = TO_STOP; cond.signal(); run_signal.wait(); delivery_state = STOPPED; return true; } void DataDelivery::SetTransferParameters(const TransferParameters& params) { transfer_params = params; } void DataDelivery::start_delivery(void* arg) { delivery_pair_t* dp = (delivery_pair_t*)arg; dp->start(); } void DataDelivery::stop_delivery(void* arg) { delivery_pair_t* dp = (delivery_pair_t*)arg; delete dp->comm; dp->comm = NULL; // In case transfer finished before getting cancel signal, delete destination if (dp->cancelled || dp->dtr->error()) dp->dtr->get_destination()->Remove(); } bool DataDelivery::delete_delivery_pair(delivery_pair_t* dp) { bool res = Arc::CreateThreadFunction(&stop_delivery, dp, &dp->thread_count); if (res) { res = dp->thread_count.wait(300*1000); } if (res) delete dp; return res; } // Delete DTR destination, called after losing contact with delivery process static void delete_dtr_destination(DTR_ptr dtr) { Arc::URL dest(dtr->get_destination()->CurrentLocation()); // Check for TURL if (!dtr->get_destination()->TransferLocations().empty()) { dest = dtr->get_destination()->TransferLocations().front(); } // Check for cache file if ((dtr->get_cache_state() == CACHEABLE) && !dtr->get_cache_file().empty()) { dest = dtr->get_cache_file(); } dtr->get_logger()->msg(Arc::VERBOSE, "Cleaning up after failure: deleting %s", dest.str()); Arc::DataHandle h(dest, dtr->get_usercfg()); if (h) h->Remove(); } void DataDelivery::main_thread (void* arg) { DataDelivery* it = (DataDelivery*)arg; it->main_thread(); } void DataDelivery::main_thread (void) { // disconnect from root logger so // messages are logged to per-DTR Logger Arc::Logger::getRootLogger().setThreadContext(); Arc::Logger::getRootLogger().removeDestinations(); Arc::Logger::getRootLogger().setThreshold(DTR::LOG_LEVEL); while(delivery_state != TO_STOP){ dtr_list_lock.lock(); std::list::iterator d = dtr_list.begin(); dtr_list_lock.unlock(); for(;;) { dtr_list_lock.lock(); if(d == dtr_list.end()) { dtr_list_lock.unlock(); break; } dtr_list_lock.unlock(); delivery_pair_t* dp = *d; // first check for cancellation if (dp->cancelled) { dtr_list_lock.lock(); d = dtr_list.erase(d); dtr_list_lock.unlock(); // deleting delivery_pair_t kills the spawned process // Do this before passing back to Scheduler to avoid race condition // of DTR being deleted before Comm object has finished with it. // With ThreadedPointer this may not be a problem any more. DTR_ptr tmp = dp->dtr; if (!delete_delivery_pair(dp)) { tmp->get_logger()->msg(Arc::ERROR, "Failed to delete delivery object or deletion timed out"); } tmp->get_job_perf_record().End("SchedulerTransferTime_"+tmp->get_delivery_endpoint().Host()); tmp->set_status(DTRStatus::TRANSFERRED); DTR::push(tmp, SCHEDULER); continue; } // check for new transfer if (!dp->comm) { dp->dtr->get_job_perf_record().Start(dp->dtr->get_short_id()); // Connecting to a remote delivery service can hang in rare cases, // so launch a separate thread with a timeout bool res = Arc::CreateThreadFunction(&start_delivery, dp, &dp->thread_count); if (res) { res = dp->thread_count.wait(300*1000); } if (!res) { // error or timeout - in this case do not delete dp since if the // thread timed out it may wake up at some point. Better to have a // small memory leak than seg fault. dtr_list_lock.lock(); d = dtr_list.erase(d); dtr_list_lock.unlock(); DTR_ptr tmp = dp->dtr; tmp->set_error_status(DTRErrorStatus::INTERNAL_PROCESS_ERROR, DTRErrorStatus::NO_ERROR_LOCATION, "Failed to start thread to start delivery or thread timed out"); tmp->get_job_perf_record().End("SchedulerTransferTime_"+tmp->get_delivery_endpoint().Host()); tmp->set_status(DTRStatus::TRANSFERRED); DTR::push(tmp, SCHEDULER); } else { dtr_list_lock.lock(); ++d; dtr_list_lock.unlock(); } continue; } // ongoing transfer - get status DataDeliveryComm::Status status; status = dp->comm->GetStatus(); dp->dtr->set_bytes_transferred(status.transferred); if((status.commstatus == DataDeliveryComm::CommExited) || (status.commstatus == DataDeliveryComm::CommClosed) || (status.commstatus == DataDeliveryComm::CommFailed)) { // Transfer finished - either successfully or with error dtr_list_lock.lock(); d = dtr_list.erase(d); dtr_list_lock.unlock(); if ((status.commstatus == DataDeliveryComm::CommFailed) || (status.error != DTRErrorStatus::NONE_ERROR)) { if (status.error == DTRErrorStatus::NONE_ERROR) { // Lost track of process - delete destination so it can be tried again delete_dtr_destination(dp->dtr); status.error = DTRErrorStatus::INTERNAL_PROCESS_ERROR; } dp->dtr->set_error_status(status.error,status.error_location, status.error_desc[0]?status.error_desc:dp->comm->GetError().c_str()); } else if (status.checksum[0]) { dp->dtr->get_destination()->SetCheckSum(status.checksum); } dp->dtr->get_logger()->msg(Arc::INFO, "Transfer finished: %llu bytes transferred %s", status.transferred, (status.checksum[0] ? ": checksum "+std::string(status.checksum) : " ")); timespec dummy; dp->dtr->get_job_perf_log().Log("DeliveryTransferTime_"+dp->dtr->get_delivery_endpoint().Host(), dp->dtr->get_short_id()+"\t"+Arc::tostring(status.transfer_time), dummy, dummy); dp->dtr->set_transfer_time(status.transfer_time); DTR_ptr tmp = dp->dtr; if (!delete_delivery_pair(dp)) { tmp->get_logger()->msg(Arc::ERROR, "Failed to delete delivery object or deletion timed out"); } tmp->get_job_perf_record().End("SchedulerTransferTime_"+tmp->get_delivery_endpoint().Host()); tmp->set_status(DTRStatus::TRANSFERRED); DTR::push(tmp, SCHEDULER); continue; } if(!(*(dp->comm))) { // Error happened - either delivery process is stuck or could not start dtr_list_lock.lock(); d = dtr_list.erase(d); dtr_list_lock.unlock(); std::string comm_err = dp->comm->GetError(); if (status.commstatus == DataDeliveryComm::CommInit) { if (comm_err.empty()) comm_err = "Failed to start delivery process"; if (dp->dtr->get_delivery_endpoint() == DTR::LOCAL_DELIVERY) { // Serious problem, so mark permanent error dp->dtr->set_error_status(DTRErrorStatus::INTERNAL_LOGIC_ERROR, DTRErrorStatus::ERROR_TRANSFER, comm_err); } else { // Failing to start on remote service should be retried dp->dtr->add_problematic_delivery_service(dp->dtr->get_delivery_endpoint()); dp->dtr->set_error_status(DTRErrorStatus::INTERNAL_PROCESS_ERROR, DTRErrorStatus::ERROR_TRANSFER, comm_err); } } else { if (comm_err.empty()) comm_err = "Connection with delivery process lost"; // delete destination so it can be tried again delete_dtr_destination(dp->dtr); dp->dtr->set_error_status(DTRErrorStatus::INTERNAL_PROCESS_ERROR, DTRErrorStatus::ERROR_TRANSFER, comm_err); } DTR_ptr tmp = dp->dtr; if (!delete_delivery_pair(dp)) { tmp->get_logger()->msg(Arc::ERROR, "Failed to delete delivery object or deletion timed out"); } tmp->get_job_perf_record().End("SchedulerTransferTime_"+tmp->get_delivery_endpoint().Host()); tmp->set_status(DTRStatus::TRANSFERRED); DTR::push(tmp, SCHEDULER); continue; } dtr_list_lock.lock(); ++d; dtr_list_lock.unlock(); } // Go through main loop every half a second or when new transfer arrives cond.wait(100); } // Kill any transfers still running dtr_list_lock.lock(); for (std::list::iterator d = dtr_list.begin(); d != dtr_list.end();) { DTR_ptr tmp = (*d)->dtr; if (!delete_delivery_pair(*d)) { tmp->get_logger()->msg(Arc::ERROR, "Failed to delete delivery object or deletion timed out"); } d = dtr_list.erase(d); } dtr_list_lock.unlock(); logger.msg(Arc::INFO, "Data delivery loop exited"); run_signal.signal(); } } // namespace DataStaging nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/Scheduler.h0000644000000000000000000000013215067751327021525 xustar0030 mtime=1759498967.748491873 30 atime=1759498967.860493575 30 ctime=1759499028.813401264 nordugrid-arc-7.1.1/src/libs/data-staging/Scheduler.h0000644000175000002070000002726715067751327023445 0ustar00mockbuildmock00000000000000#ifndef SCHEDULER_H_ #define SCHEDULER_H_ #include #include #include #include #include #include "DTR.h" #include "DTRList.h" #include "Processor.h" #include "DataDelivery.h" #include "TransferShares.h" namespace DataStaging { /// The Scheduler is the control centre of the data staging framework. /** * The Scheduler manages a global list of DTRs and schedules when they should * go into the next state or be sent to other processes. The DTR priority is * used to decide each DTR's position in a queue. * \ingroup datastaging * \headerfile Scheduler.h arc/data-staging/Scheduler.h */ class Scheduler: public DTRCallback { private: /// All the DTRs the scheduler is aware of. /** The DTR comes to this list once received from the generator * and leaves the list only when pushed back to the generator. */ DTRList DtrList; /// A list of jobs that have been requested to be cancelled. /** External threads add items to this list, and the Scheduler * processes it during the main loop. */ std::list cancelled_jobs; /// A list of DTRs to process std::list events; /// Map of transfer shares to staged DTRs. Filled each event processing loop std::map > staged_queue; /// A lock for the cancelled jobs list Arc::SimpleCondition cancelled_jobs_lock; /// Configuration of transfer shares TransferSharesConf transferSharesConf; /// URLMap containing information on any local mappings defined in the configuration Arc::URLMap url_map; /// Preferred pattern to match replicas defined in configuration std::string preferred_pattern; /// Lock to protect multi-threaded access to start() and stop() Arc::SimpleCondition state_lock; /// Lock for events list Arc::SimpleCondition event_lock; /// Condition to signal end of running Arc::SimpleCondition run_signal; /// Condition to signal end of dump thread Arc::SimpleCondition dump_signal; /// Limit on number of DTRs in pre-processor unsigned int PreProcessorSlots; /// Limit on number of DTRs in delivery unsigned int DeliverySlots; /// Limit on number of DTRs in post-processor unsigned int PostProcessorSlots; /// Limit on number of emergency DTRs in each state unsigned int EmergencySlots; /// Limit on number of staged-prepared files, per share unsigned int StagedPreparedSlots; /// Where to dump DTR state. Currently only a path to a file is supported. std::string dumplocation; /// Performance metrics logger Arc::JobPerfLog job_perf_log; /// Endpoints of delivery services from configuration std::vector configured_delivery_services; /// Map of delivery services and directories they can access, filled after /// querying all services when the first DTR is processed std::map > usable_delivery_services; /// Timestamp of last check of delivery services Arc::Time delivery_last_checked; /// File size limit (in bytes) under which local transfer is used unsigned long long int remote_size_limit; /// Counter of transfers per delivery service std::map delivery_hosts; /// Logger object static Arc::Logger logger; /// Root logger destinations, to use when logging non-DTR specific messages std::list root_destinations; /// Flag describing scheduler state. Used to decide whether to keep running main loop. ProcessState scheduler_state; /// Processor object Processor processor; /// Delivery object DataDelivery delivery; /// Static instance of Scheduler static Scheduler* scheduler_instance; /// Lock for multiple threads getting static Scheduler instance static std::mutex instance_lock; /// Copy constructor is private because Scheduler should not be copied Scheduler(const Scheduler&); // should not happen /// Assignment operator is private because Scheduler should not be copied Scheduler& operator=(const Scheduler&); // should not happen /* Functions to process every state of the DTR during normal workflow */ /// Process a DTR in the NEW state void ProcessDTRNEW(DTR_ptr request); /// Process a DTR in the CACHE_WAIT state void ProcessDTRCACHE_WAIT(DTR_ptr request); /// Process a DTR in the CACHE_CHECKED state void ProcessDTRCACHE_CHECKED(DTR_ptr request); /// Process a DTR in the RESOLVED state void ProcessDTRRESOLVED(DTR_ptr request); /// Process a DTR in the REPLICA_QUERIED state void ProcessDTRREPLICA_QUERIED(DTR_ptr request); /// Process a DTR in the PRE_CLEANED state void ProcessDTRPRE_CLEANED(DTR_ptr request); /// Process a DTR in the STAGING_PREPARING_WAIT state void ProcessDTRSTAGING_PREPARING_WAIT(DTR_ptr request); /// Process a DTR in the STAGED_PREPARED state void ProcessDTRSTAGED_PREPARED(DTR_ptr request); /// Process a DTR in the TRANSFERRED state void ProcessDTRTRANSFERRED(DTR_ptr request); /// Process a DTR in the REQUEST_RELEASED state void ProcessDTRREQUEST_RELEASED(DTR_ptr request); /// Process a DTR in the REPLICA_FINALISED state void ProcessDTRREPLICA_FINALISED(DTR_ptr request); /// Process a DTR in the REPLICA_REGISTERED state void ProcessDTRREPLICA_REGISTERED(DTR_ptr request); /// Process a DTR in the CACHE_PROCESSED state void ProcessDTRCACHE_PROCESSED(DTR_ptr request); /// Process a DTR in a final state /* This is a special function to deal with states after which * the DTR is returned to the generator, i.e. DONE, ERROR, CANCELLED */ void ProcessDTRFINAL_STATE(DTR_ptr request); /// Log a message to the root logger. This sends the message to the log /// destinations attached to the root logger at the point the Scheduler /// was started. void log_to_root_logger(Arc::LogLevel level, const std::string& message); /// Call the appropriate Process method depending on the DTR state void map_state_and_process(DTR_ptr request); /// Maps the DTR to the appropriate state when it is cancelled. /** This is a separate function, since cancellation request * can arrive at any time, breaking the normal workflow. */ void map_cancel_state(DTR_ptr request); /// Map a DTR stuck in a processing state to new state from which it can /// recover and retry. void map_stuck_state(DTR_ptr request); /// Choose a delivery service for the DTR, based on the file system paths /// each service can access. These paths are determined by calling all the /// configured services when the first DTR is received. void choose_delivery_service(DTR_ptr request); /// Go through all DTRs waiting to go into a processing state and decide /// whether to push them into that state, depending on shares and limits. void revise_queues(); /// Add a new event for the Scheduler to process. Used in receiveDTR(). void add_event(DTR_ptr event); /// Process the pool of DTRs which have arrived from other processes void process_events(void); /// Move to the next replica in the DTR. /** Utility function which should be called in the case of error * if the next replica should be tried. It takes care of sending * the DTR to the appropriate state, depending on whether or not * there are more replicas to try. */ void next_replica(DTR_ptr request); /// Handle a DTR whose source is mapped to another URL. /** If a file is mapped, this method should be called to deal * with the mapping. It sets the mapped_file attribute of * request to mapped_url. Returns true if the processing was * successful. */ bool handle_mapped_source(DTR_ptr request, Arc::URL& mapped_url); /// Thread method for dumping state static void dump_thread(void* arg); /// Static version of main_thread, used when thread is created static void main_thread(void* arg); /// Main thread, which runs until stopped void main_thread(void); public: /// Get static instance of Scheduler, to use one DTR instance with multiple generators. /** * Configuration of Scheduler by Set* methods can only be done before * start() is called, so undetermined behaviour can result from multiple * threads simultaneously calling Set* then start(). It is safer to make * sure that all threads use the same configuration (calling start() twice * is harmless). It is also better to make sure that threads call stop() in * a roughly coordinated way, i.e. all generators stop at the same time. */ static Scheduler* getInstance(); /// Constructor, to be used when only one Generator uses this Scheduler. Scheduler(); /// Destructor calls stop(), which cancels all DTRs and waits for them to complete ~Scheduler() { stop(); }; /* The following Set/Add methods are only effective when called before start() */ /// Set number of slots for processor and delivery stages void SetSlots(int pre_processor = 0, int post_processor = 0, int delivery = 0, int emergency = 0, int staged_prepared = 0); /// Add URL mapping entry. See Arc::URLMap. void AddURLMapping(const Arc::URL& template_url, const Arc::URL& replacement_url, const Arc::URL& access_url = Arc::URL()); /// Replace all URL mapping entries void SetURLMapping(const Arc::URLMap& mapping = Arc::URLMap()); /// Set the preferred pattern for ordering replicas. /** * This pattern will be used in the case of an index service URL with * multiple physical replicas and allows sorting of those replicas in order * of preference. It consists of one or more patterns separated by a pipe * character (|) listed in order of preference. If the dollar character ($) * is used at the end of a pattern, the pattern will be matched to the end * of the hostname of the replica. Example: "srm://myhost.org|.uk$|.ch$" */ void SetPreferredPattern(const std::string& pattern); /// Set TransferShares configuration void SetTransferSharesConf(const TransferSharesConf& share_conf); /// Set transfer limits void SetTransferParameters(const TransferParameters& params); /// Set the list of delivery services. DTR::LOCAL_DELIVERY means local Delivery. void SetDeliveryServices(const std::vector& endpoints); /// Set the remote transfer size limit void SetRemoteSizeLimit(unsigned long long int limit); /// Set location for periodic dump of DTR state (only file paths currently supported) void SetDumpLocation(const std::string& location); /// Set JobPerfLog object for performance metrics logging void SetJobPerfLog(const Arc::JobPerfLog& perf_log); /// Start scheduling activity. /** * This method must be called after all configuration parameters are set * properly. Scheduler can be stopped either by calling stop() method or * by destroying its instance. */ bool start(void); /// Callback method implemented from DTRCallback. /** * This method is called by the generator when it wants to pass a DTR * to the scheduler and when other processes send a DTR back to the * scheduler after processing. */ virtual void receiveDTR(DTR_ptr dtr); /// Tell the Scheduler to cancel all the DTRs in the given job description bool cancelDTRs(const std::string& jobid); /// Tell the Scheduler to shut down all threads and exit. /** * All active DTRs are cancelled and this method waits until they finish * (all DTRs go to CANCELLED state) */ bool stop(); }; } // namespace DataStaging #endif /*SCHEDULER_H_*/ nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/TransferShares.cpp0000644000000000000000000000013115067751327023073 xustar0030 mtime=1759498967.748491873 29 atime=1759498967.86149359 30 ctime=1759499028.827175778 nordugrid-arc-7.1.1/src/libs/data-staging/TransferShares.cpp0000644000175000002070000001322115067751327024775 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include "TransferShares.h" #include #include namespace DataStaging { TransferSharesConf::TransferSharesConf(const std::string& type, const std::map& ref_shares) { set_share_type(type); set_reference_shares(ref_shares); } TransferSharesConf::TransferSharesConf() : shareType(NONE) { ReferenceShares["_default"] = 50; } void TransferSharesConf::set_share_type(const std::string& type) { if (Arc::lower(type) == "dn") shareType = USER; else if (Arc::lower(type) == "voms:vo") shareType = VO; else if (Arc::lower(type) == "voms:role") shareType = ROLE; else if (Arc::lower(type) == "voms:group") shareType = GROUP; else shareType = NONE; } bool TransferSharesConf::is_configured(const std::string& ShareToCheck) { return (ReferenceShares.find(ShareToCheck) != ReferenceShares.end()); } int TransferSharesConf::get_basic_priority(const std::string& ShareToCheck) { if (!is_configured(ShareToCheck)) return ReferenceShares["_default"]; return ReferenceShares[ShareToCheck]; } void TransferSharesConf::set_reference_share(const std::string& RefShare, int Priority) { ReferenceShares[RefShare] = Priority; } void TransferSharesConf::set_reference_shares(const std::map& shares) { ReferenceShares = shares; // there should always be a _default share defined if (ReferenceShares.find("_default") == ReferenceShares.end()) ReferenceShares["_default"] = 50; } std::string TransferSharesConf::conf() const { std::string conf; conf += " Share type: "; switch (shareType){ case USER: conf += "DN"; break; case VO: conf += "VOMS VO"; break; case GROUP: conf += "VOMS group"; break; case ROLE: conf += "VOMS role"; break; case NONE: conf += "None"; break; default: // Something really strange conf += "unknown"; break; } if (!ReferenceShares.empty()) { for (std::map::const_iterator i = ReferenceShares.begin(); i != ReferenceShares.end(); ++i) { conf += "\n Reference share " + i->first + ", priority " + Arc::tostring(i->second); } } return conf; } std::string TransferSharesConf::extract_share_info(DTR_ptr DTRToExtract) { DTRCredentialInfo cred = DTRToExtract->get_credential_info(); switch (shareType){ case USER: return cred.getDN(); case VO: return cred.extractVOMSVO(); case GROUP: return cred.extractVOMSGroup(); case ROLE: return cred.extractVOMSRole(); case NONE: return "_default"; default: // Something really strange return ""; } } TransferShares::TransferShares(const TransferSharesConf& shares_conf) : conf(shares_conf) { ActiveShares.clear(); ActiveSharesSlots.clear(); } void TransferShares::set_shares_conf(const TransferSharesConf& shares_conf) { conf = shares_conf; } void TransferShares::calculate_shares(int TotalNumberOfSlots) { ActiveSharesSlots.clear(); // clear active shares with 0 count // and compute the summarized priority of other active shares std::map::iterator i; int SummarizedPriority = 0; int TotalQueued = 0; for (i = ActiveShares.begin(); i != ActiveShares.end(); ){ if (i->second == 0) { ActiveShares.erase(i++); } else { SummarizedPriority += conf.get_basic_priority(i->first); TotalQueued += i->second; ++i; } } int slots_used = 0; // first calculate shares based on the share priority for (i = ActiveShares.begin(); i != ActiveShares.end(); i++){ // Number of slots for this share is its priority divided by total // priorities of all active shares multiplied by the total number of slots int slots = int(::floor(float(conf.get_basic_priority(i->first)) / float(SummarizedPriority) * float(TotalNumberOfSlots))); if (slots > i->second) { // Don't assign more slots than the share needs ActiveSharesSlots[i->first] = i->second; } else if (slots == 0) { // Some shares can receive 0 slots. // It can happen when there are lots of shares active // or one share has enormously big priority. // There should be no 0 in the number of slots, so every // share has at least theoretical possibility to start ActiveSharesSlots[i->first] = 1; } else { ActiveSharesSlots[i->first] = slots; } slots_used += ActiveSharesSlots[i->first]; } // now assign unused slots among shares with more DTRs than slots while (slots_used < TotalQueued && slots_used < TotalNumberOfSlots) { // TODO share slots using priorities for (i = ActiveShares.begin(); i != ActiveShares.end(); i++){ if (ActiveSharesSlots[i->first] < ActiveShares[i->first]) { ActiveSharesSlots[i->first]++; slots_used++; if (slots_used >= TotalQueued || slots_used >= TotalNumberOfSlots) break; } } } } void TransferShares::increase_transfer_share(const std::string& ShareToIncrease) { ActiveShares[ShareToIncrease]++; } void TransferShares::decrease_transfer_share(const std::string& ShareToDecrease) { ActiveShares[ShareToDecrease]--; } void TransferShares::decrease_number_of_slots(const std::string& ShareToDecrease) { ActiveSharesSlots[ShareToDecrease]--; } bool TransferShares::can_start(const std::string& ShareToStart) { return (ActiveSharesSlots[ShareToStart] > 0); } std::map TransferShares::active_shares() const { return ActiveShares; } } nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/Scheduler.cpp0000644000000000000000000000013115067751327022057 xustar0030 mtime=1759498967.748491873 30 atime=1759498967.860493575 29 ctime=1759499028.82603723 nordugrid-arc-7.1.1/src/libs/data-staging/Scheduler.cpp0000644000175000002070000017465615067751327024005 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "Scheduler.h" #include "DataDeliveryRemoteComm.h" namespace DataStaging { Arc::Logger Scheduler::logger(Arc::Logger::getRootLogger(), "DataStaging.Scheduler"); Scheduler* Scheduler::scheduler_instance = NULL; std::mutex Scheduler::instance_lock; Scheduler* Scheduler::getInstance() { std::unique_lock lock(instance_lock); if (!scheduler_instance) { scheduler_instance = new Scheduler(); } return scheduler_instance; } Scheduler::Scheduler(): remote_size_limit(0), scheduler_state(INITIATED) { // Conservative defaults PreProcessorSlots = 20; DeliverySlots = 10; PostProcessorSlots = 20; EmergencySlots = 2; StagedPreparedSlots = 200; } void Scheduler::SetSlots(int pre_processor, int post_processor, int delivery, int emergency, int staged_prepared) { if (scheduler_state == INITIATED) { if(pre_processor > 0) PreProcessorSlots = pre_processor; if(post_processor > 0) PostProcessorSlots = post_processor; if(delivery > 0) DeliverySlots = delivery; if(emergency > 0) EmergencySlots = emergency; if(staged_prepared > 0) StagedPreparedSlots = staged_prepared; } } void Scheduler::AddURLMapping(const Arc::URL& template_url, const Arc::URL& replacement_url, const Arc::URL& access_url) { if (scheduler_state == INITIATED) url_map.add(template_url,replacement_url,access_url); // else should log warning, but logger is disconnected } void Scheduler::SetURLMapping(const Arc::URLMap& mapping) { if (scheduler_state == INITIATED) url_map = mapping; } void Scheduler::SetPreferredPattern(const std::string& pattern) { if (scheduler_state == INITIATED) preferred_pattern = pattern; } void Scheduler::SetTransferSharesConf(const TransferSharesConf& share_conf) { if (scheduler_state == INITIATED) transferSharesConf = share_conf; } void Scheduler::SetTransferParameters(const TransferParameters& params) { delivery.SetTransferParameters(params); } void Scheduler::SetDeliveryServices(const std::vector& endpoints) { if (scheduler_state == INITIATED) configured_delivery_services = endpoints; } void Scheduler::SetRemoteSizeLimit(unsigned long long int limit) { if (scheduler_state == INITIATED) remote_size_limit = limit; } void Scheduler::SetDumpLocation(const std::string& location) { dumplocation = location; } void Scheduler::SetJobPerfLog(const Arc::JobPerfLog& perf_log) { job_perf_log = perf_log; } bool Scheduler::start(void) { state_lock.lock(); if(scheduler_state == RUNNING || scheduler_state == TO_STOP) { state_lock.unlock(); return false; } scheduler_state = RUNNING; state_lock.unlock(); processor.start(); delivery.start(); // if no delivery services set, then use local if (configured_delivery_services.empty()) { std::vector services; services.push_back(DTR::LOCAL_DELIVERY); configured_delivery_services = services; } Arc::CreateThreadFunction(&main_thread, this); return true; } void Scheduler::log_to_root_logger(Arc::LogLevel level, const std::string& message) { Arc::Logger::getRootLogger().addDestinations(root_destinations); logger.msg(level, "%s", message); Arc::Logger::getRootLogger().removeDestinations(); } /* Function to sort the list of the pointers to DTRs * according to the priorities the DTRs have. * DTRs with higher priority go first to the beginning, * with lower -- to the end */ bool dtr_sort_predicate(DTR_ptr dtr1, DTR_ptr dtr2) { return dtr1->get_priority() > dtr2->get_priority(); } void Scheduler::next_replica(DTR_ptr request) { if (!request->error()) { // bad logic request->set_error_status(DTRErrorStatus::INTERNAL_LOGIC_ERROR, DTRErrorStatus::ERROR_UNKNOWN, "Bad logic: next_replica called when there is no error"); // TODO: how to deal with these internal errors? return; } // Logic of whether to go for next source or destination bool source_error(false); if (request->get_error_status().GetErrorLocation() == DTRErrorStatus::ERROR_SOURCE) source_error = true; else if (request->get_error_status().GetErrorLocation() == DTRErrorStatus::ERROR_DESTINATION) source_error = false; else if (request->get_source()->IsIndex() && !request->get_destination()->IsIndex()) source_error = true; else if (!request->get_source()->IsIndex() && request->get_destination()->IsIndex()) source_error = false; else if (!request->get_source()->LastLocation() && request->get_destination()->LastLocation()) source_error = true; else if (request->get_source()->LastLocation() && !request->get_destination()->LastLocation()) source_error = false; else // Unknown error location, and either both are index services with remaining // replicas or neither are index services. Choose source in this case. source_error = true; bool replica_exists; if (source_error) { // reset mapped file request->set_mapped_source(); replica_exists = request->get_source()->NextLocation(); } else { replica_exists = request->get_destination()->NextLocation(); } if (replica_exists) { // Use next replica // Clear the error flag to resume normal workflow request->reset_error_status(); request->get_logger()->msg(Arc::INFO, "Using next %s replica", source_error ? istring("source") : istring("destination")); // Perhaps not necessary to query replica again if the error was in the destination // but the error could have been caused by a source problem during transfer request->set_status(DTRStatus::QUERY_REPLICA); } else { // No replicas - move to appropriate state for the post-processor to do cleanup request->get_logger()->msg(Arc::ERROR, "No more %s replicas", source_error ? istring("source") : istring("destination")); if (request->get_destination()->IsIndex()) { request->get_logger()->msg(Arc::VERBOSE, "Will clean up pre-registered destination"); request->set_status(DTRStatus::REGISTER_REPLICA); } else if (!(request->get_cache_parameters().cache_dirs.empty() && request->get_cache_parameters().readonly_cache_dirs.empty()) && (request->get_cache_state() == CACHE_ALREADY_PRESENT || request->get_cache_state() == CACHEABLE)) { request->get_logger()->msg(Arc::VERBOSE, "Will release cache locks"); request->set_status(DTRStatus::PROCESS_CACHE); } else { // nothing to clean up - set to end state request->get_logger()->msg(Arc::VERBOSE, "Moving to end of data staging"); request->set_status(DTRStatus::CACHE_PROCESSED); } } } bool Scheduler::handle_mapped_source(DTR_ptr request, Arc::URL& mapped_url) { // The DTR source is mapped to another place so set the mapped location in request. // If mapped_url is set delivery will use it as source request->get_logger()->msg(Arc::INFO, "Source is mapped to %s", mapped_url.str()); if (!request->get_source()->ReadOnly() && mapped_url.Protocol() == "link") { // read-write access means user can potentially modify source, so copy instead request->get_logger()->msg(Arc::WARNING, "Cannot link to source which can be modified, will copy instead"); mapped_url.ChangeProtocol("file"); } if (mapped_url.Protocol() == "link") { // If the map is a link then do the link here and set to TRANSFERRED. Local file // copies should still have to wait in the queue. For links we should also // turn off caching, remembering that we still need to release any cache // locks later if necessary. if (!request->get_destination()->Local()) { request->get_logger()->msg(Arc::ERROR, "Cannot link to a remote destination. Will not use mapped URL"); } else { request->get_logger()->msg(Arc::INFO, "Linking mapped file"); // Access session dir under mapped user if (!Arc::FileLink(mapped_url.Path(), request->get_destination()->CurrentLocation().Path(), request->get_local_user().get_uid(), request->get_local_user().get_gid(), true)) { request->get_logger()->msg(Arc::ERROR, "Failed to create link: %s. Will not use mapped URL", Arc::StrError(errno)); } else { // successful link, so turn off caching, set to TRANSFERRED and return request->set_mapped_source(mapped_url.str()); if (request->get_cache_state() == CACHEABLE) request->set_cache_state(CACHE_NOT_USED); request->set_status(DTRStatus::TRANSFERRED); return true; } } } else { // Ready to copy mapped file // Assume that mapped urls are not index services or stageable // TODO: handle case when mapped url is index request->set_mapped_source(mapped_url.str()); request->set_status(DTRStatus::STAGED_PREPARED); return true; } return false; } void Scheduler::ProcessDTRNEW(DTR_ptr request){ request->get_logger()->msg(Arc::INFO, "Scheduler received new DTR %s with source: %s," " destination: %s, assigned to transfer share %s with priority %d", request->get_id(), request->get_source()->str(), request->get_destination()->str(), request->get_transfer_share(), request->get_priority()); // Normal workflow is CHECK_CACHE if (request->get_cache_state() == NON_CACHEABLE || (request->get_cache_parameters().cache_dirs.empty() && request->get_cache_parameters().readonly_cache_dirs.empty())) { request->get_logger()->msg(Arc::VERBOSE, "File is not cacheable, was requested not to be cached or no cache available, skipping cache check"); request->set_status(DTRStatus::CACHE_CHECKED); } else { // Cache checking should have quite a long timeout as it may // take a long time to download a big file or there is a long delivery queue request->set_timeout(86400); request->get_logger()->msg(Arc::VERBOSE, "File is cacheable, will check cache"); if (DtrList.is_being_cached(request)) { Arc::Period cache_wait_period(10); request->get_logger()->msg(Arc::VERBOSE, "File is currently being cached, will wait %is", cache_wait_period.GetPeriod()); request->set_process_time(cache_wait_period); request->set_status(DTRStatus::CACHE_WAIT); } else { request->set_status(DTRStatus::CHECK_CACHE); } } } void Scheduler::ProcessDTRCACHE_WAIT(DTR_ptr request){ // The waiting time should be calculated within DTRList so // by the time we are here we know to query the cache again // If we timed out on it send to CACHE_PROCESSED where it // may be retried without caching if(request->get_timeout() < time(NULL)) { request->set_error_status(DTRErrorStatus::CACHE_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Timed out while waiting for cache for " + request->get_source()->str()); request->get_logger()->msg(Arc::ERROR, "Timed out while waiting for cache lock"); request->set_status(DTRStatus::CACHE_PROCESSED); } else if (DtrList.is_being_cached(request)) { // If the source is already being cached the priority of that DTR // will be raised by is_being_cached() if this DTR's priority is higher Arc::Period cache_wait_period(10); request->get_logger()->msg(Arc::VERBOSE, "File is currently being cached, will wait %is", cache_wait_period.GetPeriod()); request->set_process_time(cache_wait_period); } else { // Try to check cache again request->get_logger()->msg(Arc::VERBOSE, "Checking cache again"); request->set_status(DTRStatus::CHECK_CACHE); } } void Scheduler::ProcessDTRCACHE_CHECKED(DTR_ptr request){ // There's no need to check additionally for cache error // If the error has occurred -- we just proceed the normal // workflow as if it was not cached at all. // But we should clear error flag if it was set by the pre-processor //setting timeout back to 1 hour, was set to 1 day in ProcessDTRNEW(). request->set_timeout(3600); request->reset_error_status(); if (request->get_cache_state() == CACHEABLE) DtrList.caching_started(request); if(request->get_cache_state() == CACHE_ALREADY_PRESENT){ // File is on place already. After the post-processor // the DTR is DONE. request->get_logger()->msg(Arc::VERBOSE, "Destination file is in cache"); request->set_status(DTRStatus::PROCESS_CACHE); } else if (request->get_source()->IsIndex() || request->get_destination()->IsIndex()) { // The Normal workflow -- RESOLVE request->get_logger()->msg(Arc::VERBOSE, "Source and/or destination is index service, will resolve replicas"); request->set_status(DTRStatus::RESOLVE); } else { request->get_logger()->msg(Arc::VERBOSE, "Neither source nor destination are index services, will skip resolving replicas"); request->set_status(DTRStatus::RESOLVED); } } void Scheduler::ProcessDTRRESOLVED(DTR_ptr request){ if(request->error()){ // It's impossible to download anything, since no replica location is resolved // if cacheable, move to PROCESS_CACHE, the post-processor will do the cleanup if (request->get_cache_state() == CACHEABLE && !(request->get_cache_parameters().cache_dirs.empty() && request->get_cache_parameters().cache_dirs.empty())) { request->get_logger()->msg(Arc::ERROR, "Problem with index service, will release cache lock"); request->set_status(DTRStatus::PROCESS_CACHE); // else go to end state } else { request->get_logger()->msg(Arc::ERROR, "Problem with index service, will proceed to end of data staging"); request->set_status(DTRStatus::CACHE_PROCESSED); } } else { // Normal workflow is QUERY_REPLICA // Should we always do this? // logic to choose best replica - sort according to configured preference request->get_source()->SortLocations(preferred_pattern, url_map); // Access latency is not known until replica is queried request->get_logger()->msg(Arc::VERBOSE, "Checking source file is present"); request->set_status(DTRStatus::QUERY_REPLICA); } } void Scheduler::ProcessDTRREPLICA_QUERIED(DTR_ptr request){ if(request->error()){ // go to finalising replica request->get_logger()->msg(Arc::ERROR, "Error with source file, moving to next replica"); request->set_status(DTRStatus::FINALISE_REPLICA); return; } if (request->get_source()->CheckSize()) { // Log performance metric with size of DTR timespec dummy; job_perf_log.Log("DTRSize", request->get_short_id()+"\t"+Arc::tostring(request->get_source()->GetSize()), dummy, dummy); } // Check if the replica is mapped if (url_map) { Arc::URL mapped_url(request->get_source()->CurrentLocation()); if (url_map.map(mapped_url)) { if (handle_mapped_source(request, mapped_url)) return; } } if (request->get_mapped_source().empty() && request->get_source()->GetAccessLatency() == Arc::DataPoint::ACCESS_LATENCY_LARGE) { // If the current source location is long latency, try the next replica // TODO add this replica to the end of location list, so that if there // are problems with other replicas, we eventually come back to this one request->get_logger()->msg(Arc::INFO, "Replica %s has long latency, trying next replica", request->get_source()->CurrentLocation().str()); if (request->get_source()->LastLocation()) { request->get_logger()->msg(Arc::INFO, "No more replicas, will use %s", request->get_source()->CurrentLocation().str()); } else { request->get_source()->NextLocation(); request->get_logger()->msg(Arc::VERBOSE, "Checking replica %s", request->get_source()->CurrentLocation().str()); request->set_status(DTRStatus::QUERY_REPLICA); return; } } // Normal workflow is PRE_CLEAN state request->set_status(DTRStatus::PRE_CLEAN); } void Scheduler::ProcessDTRPRE_CLEANED(DTR_ptr request){ if (request->error()) { if (request->get_error_status() == DTRErrorStatus::PERMANENT_REMOTE_ERROR) { request->get_logger()->msg(Arc::INFO, "Pre-clean failed"); request->set_status(DTRStatus::CACHE_PROCESSED); // Remote destinations can't be cached return; } // If an error occurred cleaning a local file, try to copy anyway request->get_logger()->msg(Arc::INFO, "Pre-clean failed, will still try to copy"); } request->reset_error_status(); if (request->get_source()->IsStageable() || request->get_destination()->IsStageable()) { // Normal workflow is STAGE_PREPARE // Need to set the timeout to prevent from waiting for too long request->set_timeout(3600); // processor will take care of staging source or destination or both request->get_logger()->msg(Arc::VERBOSE, "Source or destination requires staging"); request->set_status(DTRStatus::STAGE_PREPARE); } else { request->get_logger()->msg(Arc::VERBOSE, "No need to stage source or destination, skipping staging"); request->set_status(DTRStatus::STAGED_PREPARED); } } void Scheduler::ProcessDTRSTAGING_PREPARING_WAIT(DTR_ptr request){ // The waiting time should be calculated within DTRList so // by the time we are here we know to query the request again // If there's timeout -- it's error case if(request->get_timeout() < time(NULL)){ // With a special error status we signal to the post-processor // that after releasing request this DTR should go into // QUERY_REPLICA again if necessary // Here we can't tell at which end the timeout was, so make an educated guess if (request->get_source()->IsStageable() && !request->get_destination()->IsStageable()) request->set_error_status(DTRErrorStatus::STAGING_TIMEOUT_ERROR, DTRErrorStatus::ERROR_SOURCE, "Stage request for source file timed out"); else if (!request->get_source()->IsStageable() && request->get_destination()->IsStageable()) request->set_error_status(DTRErrorStatus::STAGING_TIMEOUT_ERROR, DTRErrorStatus::ERROR_DESTINATION, "Stage request for destination file timed out"); else // both endpoints are stageable - don't know the error location request->set_error_status(DTRErrorStatus::STAGING_TIMEOUT_ERROR, DTRErrorStatus::ERROR_UNKNOWN, "Stage request for source or destination file timed out"); // Let the post-processor do the job request->get_logger()->msg(Arc::ERROR, "Staging request timed out, will release request"); request->set_status(DTRStatus::RELEASE_REQUEST); } else { // Normal workflow is STAGE_PREPARE again request->get_logger()->msg(Arc::VERBOSE, "Querying status of staging request"); request->set_status(DTRStatus::STAGE_PREPARE); } } void Scheduler::ProcessDTRSTAGED_PREPARED(DTR_ptr request){ if(request->error()){ // We have to try another replica if the source failed to stage // but first we have to release any requests request->get_logger()->msg(Arc::VERBOSE, "Releasing requests"); request->set_status(DTRStatus::RELEASE_REQUEST); return; } if (url_map && request->get_mapped_source().empty() && request->get_source()->IsStageable()) { // check if any TURLs are mapped std::vector turls = request->get_source()->TransferLocations(); for (std::vector::iterator i = turls.begin(); i != turls.end(); ++i) { Arc::URL mapped_url(i->fullstr()); if (url_map.map(mapped_url)) { if (handle_mapped_source(request, mapped_url)) return; } } } // Check for destination mapping if (url_map) { Arc::URL mapped_url(request->get_destination()->CurrentLocation()); if (url_map.map(mapped_url, false)) { request->set_mapped_destination(mapped_url.str()); } } // After normal workflow the DTR is ready for delivery request->get_logger()->msg(Arc::VERBOSE, "DTR is ready for transfer, moving to delivery queue"); // set long timeout for waiting for transfer slot // (setting timeouts for active transfers is done in Delivery) request->set_timeout(7200); request->set_status(DTRStatus::TRANSFER); } void Scheduler::ProcessDTRTRANSFERRED(DTR_ptr request){ // We don't check if error has happened - if it has the post-processor // will take needed steps in RELEASE_REQUEST in any case. The error flag // will work now as a sign to return the DTR to QUERY_REPLICA again. // Delivery will clean up destination physical file on error if (request->error()) request->get_logger()->msg(Arc::ERROR, "Transfer failed: %s", request->get_error_status().GetDesc()); // Resuming normal workflow after the DTR has finished transferring // The next state is RELEASE_REQUEST // if cacheable and no cancellation or error, mark the DTR as CACHE_DOWNLOADED // Might be better to do this in delivery instead if (!request->cancel_requested() && !request->error() && request->get_cache_state() == CACHEABLE) request->set_cache_state(CACHE_DOWNLOADED); if (request->get_source()->IsStageable() || request->get_destination()->IsStageable()) { request->get_logger()->msg(Arc::VERBOSE, "Releasing request(s) made during staging"); request->set_status(DTRStatus::RELEASE_REQUEST); } else { request->get_logger()->msg(Arc::VERBOSE, "Neither source nor destination were staged, skipping releasing requests"); request->set_status(DTRStatus::REQUEST_RELEASED); } } void Scheduler::ProcessDTRREQUEST_RELEASED(DTR_ptr request){ // Source index post-processing if (request->get_source()->IsIndex()) { request->set_status(DTRStatus::FINALISE_REPLICA); } else { request->set_status(DTRStatus::REPLICA_FINALISED); } } void Scheduler::ProcessDTRREPLICA_FINALISED(DTR_ptr request){ // if the post-processor had troubles releasing the request, continue // normal workflow and the DTR will be cleaned up. If the error // originates from before (like Transfer errors, staging errors) // and is not from destination, we need to query another replica if (request->error() && request->get_error_status().GetLastErrorState() != DTRStatus::RELEASING_REQUEST) { request->get_logger()->msg(Arc::ERROR, "Trying next replica"); next_replica(request); } else if (request->get_destination()->IsIndex()) { // Normal workflow is REGISTER_REPLICA request->get_logger()->msg(Arc::VERBOSE, "Will %s in destination index service", ((request->error() || request->cancel_requested()) ? istring("unregister") : istring("register"))); request->set_status(DTRStatus::REGISTER_REPLICA); } else { request->get_logger()->msg(Arc::VERBOSE, "Destination is not index service, skipping replica registration"); request->set_status(DTRStatus::REPLICA_REGISTERED); } } void Scheduler::ProcessDTRREPLICA_REGISTERED(DTR_ptr request){ // If there was a problem registering the destination file, // using a different source replica won't help, so pass to final step // (remote destinations can't be cached). The post-processor should have // taken care of deleting the physical file. If the error originates from // before, follow normal workflow and processor will clean up if(request->error() && request->get_error_status().GetLastErrorState() == DTRStatus::REGISTERING_REPLICA) { request->get_logger()->msg(Arc::ERROR, "Error registering replica, moving to end of data staging"); request->set_status(DTRStatus::CACHE_PROCESSED); } else if (!(request->get_cache_parameters().cache_dirs.empty() && request->get_cache_parameters().readonly_cache_dirs.empty()) && (request->get_cache_state() == CACHE_ALREADY_PRESENT || request->get_cache_state() == CACHE_DOWNLOADED || request->get_cache_state() == CACHEABLE || request->get_cache_state() == CACHE_NOT_USED)) { // Normal workflow is PROCESS_CACHE request->get_logger()->msg(Arc::VERBOSE, "Will process cache"); request->set_status(DTRStatus::PROCESS_CACHE); } else { // not a cacheable file request->get_logger()->msg(Arc::VERBOSE, "File is not cacheable, skipping cache processing"); request->set_status(DTRStatus::CACHE_PROCESSED); } } void Scheduler::ProcessDTRCACHE_PROCESSED(DTR_ptr request){ // Final stage within scheduler. Retries are initiated from here if necessary, // otherwise report success or failure to generator // First remove from caching list DtrList.caching_finished(request); if (request->cancel_requested()) { // Cancellation steps finished request->get_logger()->msg(Arc::VERBOSE, "Cancellation complete"); request->set_status(DTRStatus::CANCELLED); } else if(request->error()) { // If the error occurred in cache processing we send back // to REPLICA_QUERIED to try the same replica again without cache, // or to CACHE_CHECKED if the file was already in cache, or to NEW // to try again if there was a locking problem during link. If there // was a cache timeout we also go back to CACHE_CHECKED. If in // another place we are finished and report error to generator if (request->get_error_status().GetLastErrorState() == DTRStatus::PROCESSING_CACHE) { if (request->get_cache_state() == CACHE_LOCKED) { // set a flat wait time of 10s Arc::Period cache_wait_period(10); request->get_logger()->msg(Arc::INFO, "Will wait 10s"); request->set_process_time(cache_wait_period); request->set_cache_state(CACHEABLE); request->set_status(DTRStatus::NEW); } else { request->get_logger()->msg(Arc::ERROR, "Error in cache processing, will retry without caching"); if (request->get_cache_state() == CACHE_ALREADY_PRESENT) request->set_status(DTRStatus::CACHE_CHECKED); else request->set_status(DTRStatus::REPLICA_QUERIED); request->set_cache_state(CACHE_SKIP); } request->reset_error_status(); return; } else if (request->get_error_status().GetLastErrorState() == DTRStatus::CACHE_WAIT) { request->get_logger()->msg(Arc::ERROR, "Will retry without caching"); request->set_cache_state(CACHE_SKIP); request->reset_error_status(); request->set_status(DTRStatus::CACHE_CHECKED); return; } else { request->decrease_tries_left(); // Here we decide to retry based on whether the error is // temporary or not and the configured retry strategy if (request->get_error_status().GetErrorStatus() == DTRErrorStatus::TEMPORARY_REMOTE_ERROR || request->get_error_status().GetErrorStatus() == DTRErrorStatus::TRANSFER_SPEED_ERROR || request->get_error_status().GetErrorStatus() == DTRErrorStatus::INTERNAL_PROCESS_ERROR) { if (request->get_tries_left() > 0) { // Check if credentials are ok if (request->get_source()->RequiresCredentials() || request->get_destination()->RequiresCredentials()) { Arc::Time exp_time = request->get_credential_info().getExpiryTime(); if (exp_time < Arc::Time()) { request->get_logger()->msg(Arc::WARNING, "Proxy has expired"); // Append this information to the error string DTRErrorStatus status = request->get_error_status(); request->set_error_status(status.GetErrorStatus(), status.GetErrorLocation(), status.GetDesc()+" (Proxy expired)"); request->set_status(DTRStatus::ERROR); return; } } // exponential back off - 10s, 40s, 90s, ... request->set_process_time(10*(request->get_initial_tries()-request->get_tries_left())* (request->get_initial_tries()-request->get_tries_left())); request->get_logger()->msg(Arc::INFO, "%i retries left, will wait until %s before next attempt", request->get_tries_left(), request->get_process_time().str()); // set state depending on where the error occurred if (request->get_error_status().GetLastErrorState() == DTRStatus::REGISTERING_REPLICA) { request->set_status(DTRStatus::REGISTER_REPLICA); } else if (request->get_error_status().GetLastErrorState() == DTRStatus::RELEASING_REQUEST) { request->set_status(DTRStatus::RELEASE_REQUEST); } else { // If error happened before or during transfer set back to NEW // Reset DTR information set during this transfer request->reset(); request->set_status(DTRStatus::NEW); } return; } else request->get_logger()->msg(Arc::ERROR, "Out of retries"); } request->get_logger()->msg(Arc::ERROR, "Permanent failure"); request->set_status(DTRStatus::ERROR); } } else { // Normal workflow is completed for this DTR successfully request->get_logger()->msg(Arc::INFO, "Finished successfully"); request->set_status(DTRStatus::DONE); } } void Scheduler::ProcessDTRFINAL_STATE(DTR_ptr request){ // This is the only place where the DTR is returned to the generator // and deleted from the global list // Return to the generator request->get_logger()->msg(Arc::INFO, "Returning to generator"); DTR::push(request, GENERATOR); // Delete from the global list DtrList.delete_dtr(request); } void Scheduler::map_state_and_process(DTR_ptr request){ // For cancelled DTRs set the appropriate post-processor state if(request->cancel_requested()) map_cancel_state(request); // Loop until the DTR is sent somewhere for some action to be done // This is more efficient because many DTRs will skip some states and // we don't want to have to wait for the full list to be processed before // advancing to the next state Arc::Time now; while((request->came_from_pre_processor() || request->came_from_delivery() || request->came_from_post_processor() || request->came_from_generator()) && request->get_process_time() <= now) { switch (request->get_status().GetStatus()) { case DTRStatus::NEW: ProcessDTRNEW(request); continue; case DTRStatus::CACHE_WAIT: ProcessDTRCACHE_WAIT(request); continue; case DTRStatus::CACHE_CHECKED: ProcessDTRCACHE_CHECKED(request); continue; case DTRStatus::RESOLVED: ProcessDTRRESOLVED(request); continue; case DTRStatus::REPLICA_QUERIED: ProcessDTRREPLICA_QUERIED(request); continue; case DTRStatus::PRE_CLEANED: ProcessDTRPRE_CLEANED(request); continue; case DTRStatus::STAGING_PREPARING_WAIT: ProcessDTRSTAGING_PREPARING_WAIT(request); continue; case DTRStatus::STAGED_PREPARED: ProcessDTRSTAGED_PREPARED(request); continue; case DTRStatus::TRANSFERRED: ProcessDTRTRANSFERRED(request); continue; case DTRStatus::REQUEST_RELEASED: ProcessDTRREQUEST_RELEASED(request); continue; case DTRStatus::REPLICA_FINALISED: ProcessDTRREPLICA_FINALISED(request); continue; case DTRStatus::REPLICA_REGISTERED: ProcessDTRREPLICA_REGISTERED(request); continue; case DTRStatus::CACHE_PROCESSED: ProcessDTRCACHE_PROCESSED(request); continue; default: break; //DoNothing } } } void Scheduler::map_cancel_state(DTR_ptr request){ switch (request->get_status().GetStatus()) { case DTRStatus::NEW: case DTRStatus::CHECK_CACHE: case DTRStatus::CACHE_WAIT: { // Nothing has yet been done to require cleanup or additional // activities. Return to the generator via CACHE_PROCESSED. request->set_status(DTRStatus::CACHE_PROCESSED); } break; case DTRStatus::CACHE_CHECKED: case DTRStatus::RESOLVE: { // The cache may have been started, so set to // REPLICA_REIGSTERED to allow post-processor to clean up cache request->set_status(DTRStatus::REPLICA_REGISTERED); } break; case DTRStatus::RESOLVED: case DTRStatus::QUERY_REPLICA: case DTRStatus::REPLICA_QUERIED: case DTRStatus::PRE_CLEAN: case DTRStatus::PRE_CLEANED: case DTRStatus::STAGE_PREPARE: { // At this stage we may have registered a file in an // index service so set to REQUEST_RELEASED to allow // the post-processor to clean it up request->set_status(DTRStatus::REQUEST_RELEASED); } break; case DTRStatus::STAGING_PREPARING_WAIT: case DTRStatus::STAGED_PREPARED: case DTRStatus::TRANSFER: { // At this stage we in addition to cache work // may already have pending requests. // The post-processor should take care of it too request->set_status(DTRStatus::TRANSFERRED); } break; case DTRStatus::TRANSFERRED: case DTRStatus::RELEASE_REQUEST: case DTRStatus::REQUEST_RELEASED: case DTRStatus::FINALISE_REPLICA: case DTRStatus::REPLICA_FINALISED: case DTRStatus::REGISTER_REPLICA: case DTRStatus::REPLICA_REGISTERED: case DTRStatus::PROCESS_CACHE: case DTRStatus::CACHE_PROCESSED: { // post-processing states // If the request was cancelled during the transfer, the delivery // should have cleaned up the destination file. If after the // transfer we have to decide whether to clean up or not. /* delete_destination_file() */ // No other action required here, just let the normal workflow // resume and the post-processor will take care of clean up } break; default: break; //Do Nothing } } void Scheduler::map_stuck_state(DTR_ptr request) { switch (request->get_status().GetStatus()) { case DTRStatus::CHECKING_CACHE: { // The cache may have been started, so set to // REPLICA_REIGSTERED to allow post-processor to clean up cache request->set_status(DTRStatus::REPLICA_REGISTERED); } break; case DTRStatus::RESOLVING: case DTRStatus::QUERYING_REPLICA: case DTRStatus::PRE_CLEANING: { // At this stage we may have registered a file in an // index service so set to REQUEST_RELEASED to allow // the post-processor to clean it up request->set_status(DTRStatus::REQUEST_RELEASED); } break; case DTRStatus::STAGING_PREPARING: { // At this stage we in addition to cache work // may already have pending requests. // The post-processor should take care of it too request->set_status(DTRStatus::TRANSFERRED); } break; // For post-processor states simply move on to next state case DTRStatus::RELEASING_REQUEST: { request->set_status(DTRStatus::REQUEST_RELEASED); } break; case DTRStatus::FINALISING_REPLICA: { request->set_status(DTRStatus::REPLICA_FINALISED); } break; case DTRStatus::REGISTERING_REPLICA: { request->set_status(DTRStatus::REPLICA_REGISTERED); } break; case DTRStatus::PROCESSING_CACHE: { request->set_status(DTRStatus::CACHE_PROCESSED); } break; default: break; // Unexpected state - do nothing } } void Scheduler::add_event(DTR_ptr event) { event_lock.lock(); events.push_back(event); event_lock.unlock(); } void Scheduler::choose_delivery_service(DTR_ptr request) { if (configured_delivery_services.empty()) return; // Only local is configured if (configured_delivery_services.size() == 1 && configured_delivery_services.front() == DTR::LOCAL_DELIVERY) return; // Check for size limit under which local should be used if (remote_size_limit > 0 && request->get_source()->CheckSize() && request->get_source()->GetSize() < remote_size_limit) { request->get_logger()->msg(Arc::INFO, "File is smaller than %llu bytes, will use local delivery", remote_size_limit); request->set_delivery_endpoint(DTR::LOCAL_DELIVERY); return; } // Remember current endpoint Arc::URL delivery_endpoint(request->get_delivery_endpoint()); // Check delivery services when the first DTR is processed, and every 5 // minutes after that. The ones that work are the only ones that will be // used until the next check. // This method assumes that the DTR has permission on all services, // which may not be true if DN filtering is used on those services. if (usable_delivery_services.empty() || Arc::Time() - delivery_last_checked > 300) { delivery_last_checked = Arc::Time(); usable_delivery_services.clear(); for (std::vector::iterator service = configured_delivery_services.begin(); service != configured_delivery_services.end(); ++service) { request->set_delivery_endpoint(*service); std::vector allowed_dirs; std::string load_avg; if (!DataDeliveryComm::CheckComm(request, allowed_dirs, load_avg)) { log_to_root_logger(Arc::WARNING, "Error with delivery service at " + request->get_delivery_endpoint().str() + " - This service will not be used"); } else { usable_delivery_services[*service] = allowed_dirs; // This is not a timing measurement so use dummy timestamps timespec dummy; job_perf_log.Log("DTR_load_" + service->Host(), load_avg, dummy, dummy); } } request->set_delivery_endpoint(delivery_endpoint); if (usable_delivery_services.empty()) { log_to_root_logger(Arc::ERROR, "No usable delivery services found, will use local delivery"); return; } } // Make a list of the delivery services that this DTR can use std::vector possible_delivery_services; bool can_use_local = false; for (std::map >::iterator service = usable_delivery_services.begin(); service != usable_delivery_services.end(); ++service) { if (service->first == DTR::LOCAL_DELIVERY) can_use_local = true; for (std::vector::iterator dir = service->second.begin(); dir != service->second.end(); ++dir) { if (request->get_destination()->Local()) { // check for caching std::string dest = request->get_destination()->TransferLocations()[0].Path(); if ((request->get_cache_state() == CACHEABLE) && !request->get_cache_file().empty()) dest = request->get_cache_file(); if (dest.find(*dir) == 0) { request->get_logger()->msg(Arc::DEBUG, "Delivery service at %s can copy to %s", service->first.str(), *dir); possible_delivery_services.push_back(service->first); break; } } else if (request->get_source()->Local()) { if (request->get_source()->TransferLocations()[0].Path().find(*dir) == 0) { request->get_logger()->msg(Arc::DEBUG, "Delivery service at %s can copy from %s", service->first.str(), *dir); possible_delivery_services.push_back(service->first); break; } } else { // copy between two remote endpoints so any service is ok possible_delivery_services.push_back(service->first); break; } } } if (possible_delivery_services.empty()) { request->get_logger()->msg(Arc::WARNING, "Could not find any useable delivery service," " forcing local transfer"); request->set_delivery_endpoint(DTR::LOCAL_DELIVERY); return; } // only local if (possible_delivery_services.size() == 1 && can_use_local) { request->set_delivery_endpoint(DTR::LOCAL_DELIVERY); return; } // Exclude full services with transfers greater than slots/no services for (std::vector::iterator possible = possible_delivery_services.begin(); possible != possible_delivery_services.end();) { if (delivery_hosts[possible->Host()] > (int)(DeliverySlots/configured_delivery_services.size())) { request->get_logger()->msg(Arc::DEBUG, "Not using delivery service at %s because it is full", possible->str()); possible = possible_delivery_services.erase(possible); } else { ++possible; } } // If none left then we should not use local but wait if (possible_delivery_services.empty()) { request->set_delivery_endpoint(Arc::URL()); return; } // First try, use any service if (request->get_tries_left() == request->get_initial_tries()) { delivery_endpoint = possible_delivery_services.at(rand() % possible_delivery_services.size()); request->set_delivery_endpoint(delivery_endpoint); return; } // Retry, try not to use a previous problematic service. If all are // problematic then default to local (even if not configured) for (std::vector::iterator possible = possible_delivery_services.begin(); possible != possible_delivery_services.end();) { std::vector::const_iterator problem = request->get_problematic_delivery_services().begin(); while (problem != request->get_problematic_delivery_services().end()) { if (*possible == *problem) { request->get_logger()->msg(Arc::VERBOSE, "Not using delivery service %s due to previous failure", problem->str()); possible = possible_delivery_services.erase(possible); break; } ++problem; } if (problem == request->get_problematic_delivery_services().end()) ++possible; } if (possible_delivery_services.empty()) { // force local if (!can_use_local) request->get_logger()->msg(Arc::WARNING, "No remote delivery services " "are useable, forcing local delivery"); request->set_delivery_endpoint(DTR::LOCAL_DELIVERY); } else { // Find a random service different from the previous one, looping a // limited number of times in case all delivery services are the same url Arc::URL ep(possible_delivery_services.at(rand() % possible_delivery_services.size())); for (unsigned int i = 0; ep == delivery_endpoint && i < possible_delivery_services.size() * 10; ++i) { ep = possible_delivery_services.at(rand() % possible_delivery_services.size()); } request->set_delivery_endpoint(ep); } } void Scheduler::process_events(void){ Arc::Time now; event_lock.lock(); for (std::list::iterator event = events.begin(); event != events.end();) { DTR_ptr tmp = *event; event_lock.unlock(); if (tmp->get_process_time() <= now) { map_state_and_process(tmp); // If final state, the DTR is returned to the generator and deleted if (tmp->is_in_final_state()) { ProcessDTRFINAL_STATE(tmp); event_lock.lock(); event = events.erase(event); continue; } // If the event was sent on to a queue, erase it from the list if (tmp->is_destined_for_pre_processor() || tmp->is_destined_for_delivery() || tmp->is_destined_for_post_processor()) { event_lock.lock(); event = events.erase(event); continue; } } event_lock.lock(); ++event; } event_lock.unlock(); } void Scheduler::revise_queues() { // The DTRs ready to go into a processing state std::map > DTRQueueStates; DtrList.filter_dtrs_by_statuses(DTRStatus::ToProcessStates, DTRQueueStates); // The active DTRs currently in processing states std::map > DTRRunningStates; DtrList.filter_dtrs_by_statuses(DTRStatus::ProcessingStates, DTRRunningStates); // Get the number of current transfers for each delivery service for // enforcing limits per server delivery_hosts.clear(); for (std::list::const_iterator i = DTRRunningStates[DTRStatus::TRANSFERRING].begin(); i != DTRRunningStates[DTRStatus::TRANSFERRING].end(); i++) { delivery_hosts[(*i)->get_delivery_endpoint().Host()]++; } // Check for any requested changes in priority DtrList.check_priority_changes(std::string(dumplocation + ".prio")); // Get all the DTRs in a staged state staged_queue.clear(); std::list staged_queue_list; DtrList.filter_dtrs_by_statuses(DTRStatus::StagedStates, staged_queue_list); // filter out stageable DTRs per transfer share, putting the highest // priority at the front for (std::list::iterator i = staged_queue_list.begin(); i != staged_queue_list.end(); ++i) { if ((*i)->get_source()->IsStageable() || (*i)->get_destination()->IsStageable()) { std::list& queue = staged_queue[(*i)->get_transfer_share()]; if (!queue.empty() && (*i)->get_priority() > queue.front()->get_priority()) { queue.push_front(*i); } else { queue.push_back(*i); } } } Arc::Time now; // Go through "to process" states, work out shares and push DTRs for (unsigned int i = 0; i < DTRStatus::ToProcessStates.size(); ++i) { std::list DTRQueue = DTRQueueStates[DTRStatus::ToProcessStates.at(i)]; std::list ActiveDTRs = DTRRunningStates[DTRStatus::ProcessingStates.at(i)]; if (DTRQueue.empty() && ActiveDTRs.empty()) continue; // Map of job id to list of DTRs, used for grouping bulk requests std::map > bulk_requests; // Transfer shares for this queue TransferShares transferShares(transferSharesConf); // Sort the DTR queue according to the priorities the DTRs have. // Highest priority will be at the beginning of the list. DTRQueue.sort(dtr_sort_predicate); int highest_priority = 0; // First go over the queue and check for cancellation and timeout for (std::list::iterator dtr = DTRQueue.begin(); dtr != DTRQueue.end();) { DTR_ptr tmp = *dtr; if (dtr == DTRQueue.begin()) highest_priority = tmp->get_priority(); // There's no check for cancellation requests for the post-processor. // Most DTRs with cancellation requests will go to the post-processor // for cleanups, hold releases, etc., so the cancellation requests // don't break normal workflow in the post-processor (as opposed // to any other process), but instead act just as a sign that the // post-processor should do additional cleanup activities. if (tmp->is_destined_for_pre_processor() || tmp->is_destined_for_delivery()) { // The cancellation requests break the normal workflow. A cancelled // request will either go back to generator or be put into a // post-processor state for clean up. if (tmp->cancel_requested()) { map_cancel_state(tmp); add_event(tmp); dtr = DTRQueue.erase(dtr); continue; } } // To avoid the situation where DTRs get blocked due to higher // priority DTRs, DTRs that have passed their timeout should have their // priority boosted. But this should only happen if there are higher // priority DTRs, since there could be a large queue of low priority DTRs // which, after having their priority boosted, would then block new // high priority requests. // The simple solution here is to increase priority by 1 every 5 minutes. // There is plenty of scope for more intelligent solutions. // TODO reset priority back to original value once past this stage. if (tmp->get_timeout() < now && tmp->get_priority() < highest_priority) { tmp->set_priority(tmp->get_priority() + 1); tmp->set_timeout(300); } // STAGE_PREPARE is a special case where we have to apply a limit to // avoid preparing too many files and then pins expire while in the // transfer queue. In future it may be better to limit per remote host. // For now count DTRs staging and transferring in this share and apply // limit. In order not to block the highest priority DTRs here we allow // them to bypass the limit. if (DTRStatus::ToProcessStates.at(i) == DTRStatus::STAGE_PREPARE) { if (staged_queue[tmp->get_transfer_share()].size() < StagedPreparedSlots || staged_queue[tmp->get_transfer_share()].front()->get_priority() < tmp->get_priority() ) { // Reset timeout tmp->set_timeout(3600); // add to the staging queue and sort to put highest priority first staged_queue[tmp->get_transfer_share()].push_front(tmp); staged_queue[tmp->get_transfer_share()].sort(dtr_sort_predicate); } else { // Past limit - this DTR cannot be processed this time so erase from queue dtr = DTRQueue.erase(dtr); continue; } } // check if bulk operation is possible for this DTR. To keep it simple // there is only one bulk request per job per revise_queues loop if (tmp->bulk_possible()) { std::string jobid(tmp->get_parent_job_id()); if (bulk_requests.find(jobid) == bulk_requests.end()) { std::set bulk_list; bulk_list.insert(tmp); bulk_requests[jobid] = bulk_list; } else { DTR_ptr first_bulk = *bulk_requests[jobid].begin(); // Only source bulk operations supported at the moment and limit to 100 if (bulk_requests[jobid].size() < 100 && first_bulk->get_source()->GetURL().Protocol() == tmp->get_source()->GetURL().Protocol() && first_bulk->get_source()->GetURL().Host() == tmp->get_source()->GetURL().Host() && first_bulk->get_source()->CurrentLocation().Protocol() == tmp->get_source()->CurrentLocation().Protocol() && first_bulk->get_source()->CurrentLocation().Host() == tmp->get_source()->CurrentLocation().Host() && // This is because we cannot have a mix of LFNs and GUIDs when querying a catalog like LFC first_bulk->get_source()->GetURL().MetaDataOption("guid").length() == tmp->get_source()->GetURL().MetaDataOption("guid").length()) { bulk_requests[jobid].insert(tmp); } } } transferShares.increase_transfer_share(tmp->get_transfer_share()); ++dtr; } // Go over the active DTRs and add to transfer share for (std::list::iterator dtr = ActiveDTRs.begin(); dtr != ActiveDTRs.end();) { DTR_ptr tmp = *dtr; if (tmp->get_status() == DTRStatus::TRANSFERRING) { // If the DTR is in Delivery, check for cancellation. The pre- and // post-processor DTRs don't get cancelled here but are allowed to // continue processing. if ( tmp->cancel_requested()) { tmp->get_logger()->msg(Arc::INFO, "Cancelling active transfer"); delivery.cancelDTR(tmp); dtr = ActiveDTRs.erase(dtr); continue; } } else if (tmp->get_modification_time() + 3600 < now) { // Stuck in processing thread for more than one hour - assume a hang // and try to recover and retry. It is potentially dangerous if a // stuck thread wakes up. tmp->get_logger()->msg(Arc::WARNING, "Processing thread timed out. Restarting DTR"); tmp->set_error_status(DTRErrorStatus::INTERNAL_PROCESS_ERROR, DTRErrorStatus::NO_ERROR_LOCATION, "Processor thread timed out"); map_stuck_state(tmp); add_event(tmp); ++dtr; continue; } transferShares.increase_transfer_share((*dtr)->get_transfer_share()); ++dtr; } // If the queue is empty we can go straight to the next state if (DTRQueue.empty()) continue; // Slot limit for this state unsigned int slot_limit = DeliverySlots; if (DTRQueue.front()->is_destined_for_pre_processor()) slot_limit = PreProcessorSlots; else if (DTRQueue.front()->is_destined_for_post_processor()) slot_limit = PostProcessorSlots; // Calculate the slots available for each active share transferShares.calculate_shares(slot_limit); // Shares which have at least one DTR active and running. // Shares can only use emergency slots if they are not in this list. std::set active_shares; unsigned int running = ActiveDTRs.size(); // Go over the active DTRs again and decrease slots in corresponding shares for (std::list::iterator dtr = ActiveDTRs.begin(); dtr != ActiveDTRs.end(); ++dtr) { transferShares.decrease_number_of_slots((*dtr)->get_transfer_share()); active_shares.insert((*dtr)->get_transfer_share()); } // Now at the beginning of the queue we have DTRs that should be // launched first. Launch them, but with respect to the transfer shares. for (std::list::iterator dtr = DTRQueue.begin(); dtr != DTRQueue.end(); ++dtr) { DTR_ptr tmp = *dtr; // Check if there are any shares left in the queue which might need // an emergency share - if not we are done if (running >= slot_limit && transferShares.active_shares().size() == active_shares.size()) break; // Check if this DTR is still in a queue state (was not sent already // in a bulk operation) if (tmp->get_status() != DTRStatus::ToProcessStates.at(i)) continue; // Are there slots left for this share? bool can_start = transferShares.can_start(tmp->get_transfer_share()); // Check if it is possible to use an emergency share if (running >= slot_limit && active_shares.find(tmp->get_transfer_share()) != active_shares.end()) { can_start = false; } if (can_start) { transferShares.decrease_number_of_slots(tmp->get_transfer_share()); // Send to processor/delivery if (tmp->is_destined_for_pre_processor()) { // Check for bulk if (tmp->bulk_possible()) { std::set bulk_set(bulk_requests[tmp->get_parent_job_id()]); if (bulk_set.size() > 1 && bulk_set.find(tmp) != bulk_set.end()) { tmp->get_logger()->msg(Arc::INFO, "Will use bulk request"); unsigned int dtr_no = 0; for (std::set::iterator i = bulk_set.begin(); i != bulk_set.end(); ++i) { if (dtr_no == 0) (*i)->set_bulk_start(true); if (dtr_no == bulk_set.size() - 1) (*i)->set_bulk_end(true); DTR::push(*i, PRE_PROCESSOR); ++dtr_no; } } else { DTR::push(tmp, PRE_PROCESSOR); } } else { DTR::push(tmp, PRE_PROCESSOR); } } else if (tmp->is_destined_for_post_processor()) DTR::push(tmp, POST_PROCESSOR); else if (tmp->is_destined_for_delivery()) { choose_delivery_service(tmp); if (!tmp->get_delivery_endpoint()) { // With a large queue waiting for delivery and different dirs per // delivery service this could slow things down as it could go // through every DTR in the queue tmp->get_logger()->msg(Arc::DEBUG, "No delivery endpoints available, will try later"); continue; } DTR::push(tmp, DELIVERY); delivery_hosts[tmp->get_delivery_endpoint().Host()]++; } ++running; active_shares.insert(tmp->get_transfer_share()); } // Hard limit with all emergency slots used if (running == slot_limit + EmergencySlots) break; } } } void Scheduler::receiveDTR(DTR_ptr request){ if (!request) { logger.msg(Arc::ERROR, "Scheduler received NULL DTR"); return; } if (request->get_status() != DTRStatus::NEW) { add_event(request); return; } // New DTR - first check it is valid if (!(*request)) { logger.msg(Arc::ERROR, "Scheduler received invalid DTR"); request->set_status(DTRStatus::ERROR); DTR::push(request, GENERATOR); return; } request->registerCallback(&processor,PRE_PROCESSOR); request->registerCallback(&processor,POST_PROCESSOR); request->registerCallback(&delivery,DELIVERY); /* Shares part*/ // First, get the transfer share this dtr should belong to std::string DtrTransferShare = transferSharesConf.extract_share_info(request); // If no share information could be obtained, use default share if (DtrTransferShare.empty()) DtrTransferShare = "_default"; // If this share is a reference share, we have to add the sub-share // to the reference list bool in_reference = transferSharesConf.is_configured(DtrTransferShare); int priority = transferSharesConf.get_basic_priority(DtrTransferShare); request->set_transfer_share(DtrTransferShare); DtrTransferShare = request->get_transfer_share(); // Now the sub-share is added to DtrTransferShare, add it to reference // shares if appropriate and update each TransferShare if (in_reference && !transferSharesConf.is_configured(DtrTransferShare)) { transferSharesConf.set_reference_share(DtrTransferShare, priority); } // Compute the priority this DTR receives - this is the priority of the // share adjusted by the priority of the parent job request->set_priority(int(transferSharesConf.get_basic_priority(DtrTransferShare) * request->get_priority() * 0.01)); /* Shares part ends*/ DtrList.add_dtr(request); add_event(request); } bool Scheduler::cancelDTRs(const std::string& jobid) { cancelled_jobs_lock.lock(); cancelled_jobs.push_back(jobid); cancelled_jobs_lock.unlock(); return true; } void Scheduler::dump_thread(void* arg) { Scheduler* sched = (Scheduler*)arg; while (sched->scheduler_state == RUNNING && !sched->dumplocation.empty()) { // every second, dump state sched->DtrList.dumpState(sched->dumplocation); // Performance metric - total number of DTRs in the system timespec dummy; sched->job_perf_log.Log("DTR_total", Arc::tostring(sched->DtrList.size()), dummy, dummy); if (sched->dump_signal.wait(1000)) break; // notified by signal() } } bool Scheduler::stop() { state_lock.lock(); if(scheduler_state != RUNNING) { state_lock.unlock(); return false; } // cancel all jobs std::list alljobs = DtrList.all_jobs(); cancelled_jobs_lock.lock(); for (std::list::iterator job = alljobs.begin(); job != alljobs.end(); ++job) cancelled_jobs.push_back(*job); cancelled_jobs_lock.unlock(); // signal main loop to stop and wait for completion of all DTRs scheduler_state = TO_STOP; run_signal.wait(); scheduler_state = STOPPED; state_lock.unlock(); return true; } void Scheduler::main_thread (void* arg) { Scheduler* it = (Scheduler*)arg; it->main_thread(); } void Scheduler::main_thread (void) { logger.msg(Arc::INFO, "Scheduler starting up"); logger.msg(Arc::INFO, "Scheduler configuration:"); logger.msg(Arc::INFO, " Pre-processor slots: %u", PreProcessorSlots); logger.msg(Arc::INFO, " Delivery slots: %u", DeliverySlots); logger.msg(Arc::INFO, " Post-processor slots: %u", PostProcessorSlots); logger.msg(Arc::INFO, " Emergency slots: %u", EmergencySlots); logger.msg(Arc::INFO, " Prepared slots: %u", StagedPreparedSlots); logger.msg(Arc::INFO, " Shares configuration:\n%s", transferSharesConf.conf()); for (std::vector::iterator i = configured_delivery_services.begin(); i != configured_delivery_services.end(); ++i) { if (*i == DTR::LOCAL_DELIVERY) logger.msg(Arc::INFO, " Delivery service: LOCAL"); else logger.msg(Arc::INFO, " Delivery service: %s", i->str()); } // Start thread dumping DTR state if (!Arc::CreateThreadFunction(&dump_thread, this)) logger.msg(Arc::ERROR, "Failed to create DTR dump thread"); // Disconnect from root logger so that messages are logged to per-DTR Logger Arc::Logger::getRootLogger().setThreadContext(); root_destinations = Arc::Logger::getRootLogger().getDestinations(); Arc::Logger::getRootLogger().removeDestinations(); Arc::Logger::getRootLogger().setThreshold(DTR::LOG_LEVEL); while(scheduler_state != TO_STOP || !DtrList.empty()) { // first check for cancelled jobs cancelled_jobs_lock.lock(); std::list::iterator jobid = cancelled_jobs.begin(); for (;jobid != cancelled_jobs.end();) { std::list requests; DtrList.filter_dtrs_by_job(*jobid, requests); for (std::list::iterator dtr = requests.begin(); dtr != requests.end(); ++dtr) { (*dtr)->set_cancel_request(); (*dtr)->get_logger()->msg(Arc::INFO, "DTR %s cancelled", (*dtr)->get_id()); } jobid = cancelled_jobs.erase(jobid); } cancelled_jobs_lock.unlock(); // Dealing with pending events, i.e. DTRs from another processes process_events(); // Revise all the internal queues and take actions revise_queues(); usleep(50000); } // make sure final state is dumped before exit dump_signal.signal(); if (!dumplocation.empty()) DtrList.dumpState(dumplocation); log_to_root_logger(Arc::INFO, "Scheduler loop exited"); run_signal.signal(); } } // namespace DataStaging nordugrid-arc-7.1.1/src/libs/data-staging/PaxHeaders/README0000644000000000000000000000013215067751327020316 xustar0030 mtime=1759498967.747527268 30 atime=1759498967.860493575 30 ctime=1759499028.816528238 nordugrid-arc-7.1.1/src/libs/data-staging/README0000644000175000002070000000015415067751327022220 0ustar00mockbuildmock00000000000000ARC data staging implementation. This code provides an advanced mechanism for data transfer and scheduling. nordugrid-arc-7.1.1/src/libs/PaxHeaders/README0000644000000000000000000000013115067751327015752 xustar0030 mtime=1759498967.747355202 29 atime=1759498967.85949356 30 ctime=1759499028.775221375 nordugrid-arc-7.1.1/src/libs/README0000644000175000002070000000010515067751327017651 0ustar00mockbuildmock00000000000000ARC libraries. Libraries related to HED can be found in src/hed/libs.nordugrid-arc-7.1.1/src/PaxHeaders/services0000644000000000000000000000013115067751427015710 xustar0030 mtime=1759499031.021453309 29 atime=1759499034.76351017 30 ctime=1759499031.021453309 nordugrid-arc-7.1.1/src/services/0000755000175000002070000000000015067751427017670 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/PaxHeaders/Makefile.am0000644000000000000000000000013115067751327020020 xustar0030 mtime=1759498967.749812815 29 atime=1759498967.86149359 30 ctime=1759499029.278528324 nordugrid-arc-7.1.1/src/services/Makefile.am0000644000175000002070000000115415067751327021724 0ustar00mockbuildmock00000000000000if A_REX_SERVICE_ENABLED AREX_SERVICE = a-rex else AREX_SERVICE = endif if LDAP_SERVICE_ENABLED LDAP_SERVICE = ldap-infosys else LDAP_SERVICE = endif if MONITOR_ENABLED MONITOR = monitor else MONITOR = endif if CANDYPOND_ENABLED CANDYPOND_SERVICE = candypond else CANDYPOND_SERVICE = endif if DATADELIVERY_SERVICE_ENABLED DATADELIVERY_SERVICE = data-staging else DATADELIVERY_SERVICE = endif SUBDIRS = $(AREX_SERVICE) $(LDAP_SERVICE) \ $(MONITOR) \ $(CANDYPOND_SERVICE) \ $(DATADELIVERY_SERVICE) \ wrappers examples DIST_SUBDIRS = a-rex ldap-infosys \ monitor \ candypond \ data-staging \ wrappers examples nordugrid-arc-7.1.1/src/services/PaxHeaders/Makefile.in0000644000000000000000000000013215067751355020033 xustar0030 mtime=1759498989.488083902 30 atime=1759499017.550248613 30 ctime=1759499029.279530694 nordugrid-arc-7.1.1/src/services/Makefile.in0000644000175000002070000006211415067751355021741 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir distdir-am am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ @A_REX_SERVICE_ENABLED_FALSE@AREX_SERVICE = @A_REX_SERVICE_ENABLED_TRUE@AREX_SERVICE = a-rex @LDAP_SERVICE_ENABLED_FALSE@LDAP_SERVICE = @LDAP_SERVICE_ENABLED_TRUE@LDAP_SERVICE = ldap-infosys @MONITOR_ENABLED_FALSE@MONITOR = @MONITOR_ENABLED_TRUE@MONITOR = monitor @CANDYPOND_ENABLED_FALSE@CANDYPOND_SERVICE = @CANDYPOND_ENABLED_TRUE@CANDYPOND_SERVICE = candypond @DATADELIVERY_SERVICE_ENABLED_FALSE@DATADELIVERY_SERVICE = @DATADELIVERY_SERVICE_ENABLED_TRUE@DATADELIVERY_SERVICE = data-staging SUBDIRS = $(AREX_SERVICE) $(LDAP_SERVICE) \ $(MONITOR) \ $(CANDYPOND_SERVICE) \ $(DATADELIVERY_SERVICE) \ wrappers examples DIST_SUBDIRS = a-rex ldap-infosys \ monitor \ candypond \ data-staging \ wrappers examples all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ check-am clean clean-generic clean-libtool cscopelist-am ctags \ ctags-am distclean distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags tags-am uninstall uninstall-am .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/PaxHeaders/data-staging0000644000000000000000000000013115067751426020252 xustar0030 mtime=1759499030.926451865 29 atime=1759499034.76351017 30 ctime=1759499030.926451865 nordugrid-arc-7.1.1/src/services/data-staging/0000755000175000002070000000000015067751426022232 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/data-staging/PaxHeaders/Makefile.am0000644000000000000000000000013115067751327022363 xustar0029 mtime=1759498967.77432306 30 atime=1759498967.873493772 30 ctime=1759499030.919932371 nordugrid-arc-7.1.1/src/services/data-staging/Makefile.am0000644000175000002070000000214315067751327024266 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libdatadeliveryservice.la if SYSV_SCRIPTS_ENABLED DATA_DELIVERY_SCRIPT = arc-datadelivery-service else DATA_DELIVERY_SCRIPT = endif initd_SCRIPTS = $(DATA_DELIVERY_SCRIPT) if SYSTEMD_UNITS_ENABLED DATA_DELIVERY_UNIT = arc-datadelivery-service.service else DATA_DELIVERY_UNIT = endif units_DATA = $(DATA_DELIVERY_UNIT) pkgdata_SCRIPTS = arc-datadelivery-service-start libdatadeliveryservice_la_SOURCES = DataDeliveryService.h DataDeliveryService.cpp libdatadeliveryservice_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libdatadeliveryservice_la_LIBADD = \ $(top_builddir)/src/libs/data-staging/libarcdatastaging.la \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(GLIBMM_LIBS) libdatadeliveryservice_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-7.1.1/src/services/data-staging/PaxHeaders/Makefile.in0000644000000000000000000000013215067751356022377 xustar0030 mtime=1759498990.977615493 30 atime=1759499019.144272835 30 ctime=1759499030.920935128 nordugrid-arc-7.1.1/src/services/data-staging/Makefile.in0000644000175000002070000010650415067751356024307 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/data-staging ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = arc-datadelivery-service \ arc-datadelivery-service.service \ arc-datadelivery-service-start CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(initddir)" \ "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(unitsdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libdatadeliveryservice_la_DEPENDENCIES = \ $(top_builddir)/src/libs/data-staging/libarcdatastaging.la \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(am__DEPENDENCIES_1) am_libdatadeliveryservice_la_OBJECTS = \ libdatadeliveryservice_la-DataDeliveryService.lo libdatadeliveryservice_la_OBJECTS = \ $(am_libdatadeliveryservice_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = libdatadeliveryservice_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libdatadeliveryservice_la_CXXFLAGS) $(CXXFLAGS) \ $(libdatadeliveryservice_la_LDFLAGS) $(LDFLAGS) -o $@ SCRIPTS = $(initd_SCRIPTS) $(pkgdata_SCRIPTS) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = \ ./$(DEPDIR)/libdatadeliveryservice_la-DataDeliveryService.Plo am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(libdatadeliveryservice_la_SOURCES) DIST_SOURCES = $(libdatadeliveryservice_la_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac DATA = $(units_DATA) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in \ $(srcdir)/arc-datadelivery-service-start.in \ $(srcdir)/arc-datadelivery-service.in \ $(srcdir)/arc-datadelivery-service.service.in \ $(top_srcdir)/depcomp README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ pkglib_LTLIBRARIES = libdatadeliveryservice.la @SYSV_SCRIPTS_ENABLED_FALSE@DATA_DELIVERY_SCRIPT = @SYSV_SCRIPTS_ENABLED_TRUE@DATA_DELIVERY_SCRIPT = arc-datadelivery-service initd_SCRIPTS = $(DATA_DELIVERY_SCRIPT) @SYSTEMD_UNITS_ENABLED_FALSE@DATA_DELIVERY_UNIT = @SYSTEMD_UNITS_ENABLED_TRUE@DATA_DELIVERY_UNIT = arc-datadelivery-service.service units_DATA = $(DATA_DELIVERY_UNIT) pkgdata_SCRIPTS = arc-datadelivery-service-start libdatadeliveryservice_la_SOURCES = DataDeliveryService.h DataDeliveryService.cpp libdatadeliveryservice_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libdatadeliveryservice_la_LIBADD = \ $(top_builddir)/src/libs/data-staging/libarcdatastaging.la \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(GLIBMM_LIBS) libdatadeliveryservice_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/data-staging/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/data-staging/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): arc-datadelivery-service: $(top_builddir)/config.status $(srcdir)/arc-datadelivery-service.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arc-datadelivery-service.service: $(top_builddir)/config.status $(srcdir)/arc-datadelivery-service.service.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arc-datadelivery-service-start: $(top_builddir)/config.status $(srcdir)/arc-datadelivery-service-start.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(MKDIR_P) '$(DESTDIR)$(pkglibdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" || exit 1; \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libdatadeliveryservice.la: $(libdatadeliveryservice_la_OBJECTS) $(libdatadeliveryservice_la_DEPENDENCIES) $(EXTRA_libdatadeliveryservice_la_DEPENDENCIES) $(AM_V_CXXLD)$(libdatadeliveryservice_la_LINK) -rpath $(pkglibdir) $(libdatadeliveryservice_la_OBJECTS) $(libdatadeliveryservice_la_LIBADD) $(LIBS) install-initdSCRIPTS: $(initd_SCRIPTS) @$(NORMAL_INSTALL) @list='$(initd_SCRIPTS)'; test -n "$(initddir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(initddir)'"; \ $(MKDIR_P) "$(DESTDIR)$(initddir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(initddir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(initddir)$$dir" || exit $$?; \ } \ ; done uninstall-initdSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(initd_SCRIPTS)'; test -n "$(initddir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(initddir)'; $(am__uninstall_files_from_dir) install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdatadeliveryservice_la-DataDeliveryService.Plo@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< libdatadeliveryservice_la-DataDeliveryService.lo: DataDeliveryService.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdatadeliveryservice_la_CXXFLAGS) $(CXXFLAGS) -MT libdatadeliveryservice_la-DataDeliveryService.lo -MD -MP -MF $(DEPDIR)/libdatadeliveryservice_la-DataDeliveryService.Tpo -c -o libdatadeliveryservice_la-DataDeliveryService.lo `test -f 'DataDeliveryService.cpp' || echo '$(srcdir)/'`DataDeliveryService.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libdatadeliveryservice_la-DataDeliveryService.Tpo $(DEPDIR)/libdatadeliveryservice_la-DataDeliveryService.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='DataDeliveryService.cpp' object='libdatadeliveryservice_la-DataDeliveryService.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdatadeliveryservice_la_CXXFLAGS) $(CXXFLAGS) -c -o libdatadeliveryservice_la-DataDeliveryService.lo `test -f 'DataDeliveryService.cpp' || echo '$(srcdir)/'`DataDeliveryService.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-unitsDATA: $(units_DATA) @$(NORMAL_INSTALL) @list='$(units_DATA)'; test -n "$(unitsdir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(unitsdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(unitsdir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(unitsdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(unitsdir)" || exit $$?; \ done uninstall-unitsDATA: @$(NORMAL_UNINSTALL) @list='$(units_DATA)'; test -n "$(unitsdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(unitsdir)'; $(am__uninstall_files_from_dir) ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(initddir)" "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(unitsdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -f ./$(DEPDIR)/libdatadeliveryservice_la-DataDeliveryService.Plo -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-initdSCRIPTS install-pkgdataSCRIPTS \ install-unitsDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libdatadeliveryservice_la-DataDeliveryService.Plo -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-initdSCRIPTS uninstall-pkgdataSCRIPTS \ uninstall-pkglibLTLIBRARIES uninstall-unitsDATA .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ clean-generic clean-libtool clean-pkglibLTLIBRARIES \ cscopelist-am ctags ctags-am distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-initdSCRIPTS install-man \ install-pdf install-pdf-am install-pkgdataSCRIPTS \ install-pkglibLTLIBRARIES install-ps install-ps-am \ install-strip install-unitsDATA installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-am uninstall \ uninstall-am uninstall-initdSCRIPTS uninstall-pkgdataSCRIPTS \ uninstall-pkglibLTLIBRARIES uninstall-unitsDATA .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/data-staging/PaxHeaders/DataDeliveryService.h0000644000000000000000000000013115067751327024376 xustar0029 mtime=1759498967.77432306 30 atime=1759498967.873493772 30 ctime=1759499030.926119913 nordugrid-arc-7.1.1/src/services/data-staging/DataDeliveryService.h0000644000175000002070000001117415067751327026305 0ustar00mockbuildmock00000000000000#ifndef DATADELIVERYSERVICE_H_ #define DATADELIVERYSERVICE_H_ #include #include #include #include #include #include #include namespace DataStaging { /// Service for the Delivery layer of data staging. /** * This service starts and controls data transfers. It assumes that the * files in any request submitted are ready for immediate transfer and * so do not need to be resolved or prepared in any way. * * It implements DTRCallback to get callbacks when a DTR has finished * transfer. * * Status codes in results returned: * - OK - successful submission/cancellation * - TRANSFERRING - transfer still ongoing * - TRANSFERRED - transfer finished successfully * - TRANSFER_ERROR - transfer failed * - SERVICE_ERROR - something went wrong in the service itself * * An internal list of active transfers is held in memory. After the first * query of a finished transfer (successful or not) the DTR is moved to an * archived list where only summary information is kept about the transfer * (DTR ID, state and short error description). The DTR object is then * deleted. This archived list is also kept in memory. In case a transfer is * never queried, a separate thread moves any transfers which completed more * than one hour ago to the archived list. */ class DataDeliveryService: public Arc::Service, DTRCallback { /// Managed pointer to stringstream used to hold log output typedef Arc::ThreadedPointer sstream_ptr; private: /// Construct a SOAP error message with optional extra reason string Arc::MCC_Status make_soap_fault(Arc::Message& outmsg, const std::string& reason = ""); /// DataDeliveryService namespace Arc::NS ns; /// Directories the service is allowed to copy files from or to std::list allowed_dirs; /// Process limit read from cache service configuration unsigned int max_processes; /// Current processes - using gint to guarantee atomic thread-safe operations gint current_processes; /// Internal list of active DTRs, mapped to the stream with the transfer log std::set active_dtrs; /// Lock for active DTRs list Arc::SimpleCondition active_dtrs_lock; /// Archived list of finished DTRs, just ID and final state and short explanation /// TODO: save to file, DB? std::map > archived_dtrs; /// Lock for archive DTRs list Arc::SimpleCondition archived_dtrs_lock; /// Object to manage Delivery processes DataDelivery delivery; /// Container for delegated credentials Arc::DelegationContainerSOAP delegation; /// Directory in which to store temporary delegated proxies std::string tmp_proxy_dir; /// Root logger destinations, to use when logging messages in methods /// called from Delivery layer where root logger is disabled std::list root_destinations; /// Logger object static Arc::Logger logger; /// Log a message to root destinations void LogToRootLogger(Arc::LogLevel level, const std::string& message); /// Static version of ArchivalThread, used when thread is created static void ArchivalThread(void* arg); /// Archival thread void ArchivalThread(void); /// Sanity check on file sources and destinations bool CheckInput(const std::string& url, const Arc::UserConfig& usercfg, Arc::XMLNode& resultelement, bool& require_credential_file); /* individual operations */ /// Start a new transfer Arc::MCC_Status Start(Arc::XMLNode in, Arc::XMLNode out); /// Query status of transfer Arc::MCC_Status Query(Arc::XMLNode in, Arc::XMLNode out); /// Cancel a transfer Arc::MCC_Status Cancel(Arc::XMLNode in, Arc::XMLNode out); /// Check service is ok and return service information Arc::MCC_Status Ping(Arc::XMLNode in, Arc::XMLNode out); public: /// Make a new DataDeliveryService. Sets up the process handler. DataDeliveryService(Arc::Config *cfg, Arc::PluginArgument* parg); /// Destroy the DataDeliveryService virtual ~DataDeliveryService(); /// Main method called by HED when service is invoked. Directs call to appropriate internal method. virtual Arc::MCC_Status process(Arc::Message &inmsg, Arc::Message &outmsg); /// Implementation of callback method from DTRCallback virtual void receiveDTR(DTR_ptr dtr); }; } // namespace DataStaging #endif /* DATADELIVERYSERVICE_H_ */ nordugrid-arc-7.1.1/src/services/data-staging/PaxHeaders/arc-datadelivery-service.in0000644000000000000000000000013115067751327025535 xustar0029 mtime=1759498967.77432306 30 atime=1759498967.874493787 30 ctime=1759499030.922935796 nordugrid-arc-7.1.1/src/services/data-staging/arc-datadelivery-service.in0000644000175000002070000001076215067751327027446 0ustar00mockbuildmock00000000000000#!/bin/bash # # Init file for the DataDelivery service # # chkconfig: 2345 87 13 # description: ARC DataDelivery service # processname: arched ### BEGIN INIT INFO # Provides: arc-datadelivery-service # Required-Start: $local_fs $remote_fs # Required-Stop: $local_fs $remote_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: ARC DataDelivery service # Description: ARC DataDelivery service ### END INIT INFO # source function library if [ -f /etc/init.d/functions ]; then . /etc/init.d/functions log_success_msg() { echo -n "$@" success "$@" echo } log_warning_msg() { echo -n "$@" warning "$@" echo } log_failure_msg() { echo -n "$@" failure "$@" echo } elif [ -f /lib/lsb/init-functions ]; then . /lib/lsb/init-functions else echo "Error: Cannot source neither init.d nor lsb functions" exit 1 fi prog=arched # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/arc-datadelivery-service ]; then . /etc/sysconfig/arc-datadelivery-service elif [ -r /etc/default/arc-datadelivery-service ]; then . /etc/default/arc-datadelivery-service fi # ARC_LOCATION ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then log_failure_msg "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi # PID and lock file PID_FILE=`${ARC_LOCATION}/@pkgdatasubdir@/arc-datadelivery-service-start --getpidfile` if [ $? -ne 0 ]; then # When --getpidfile fails it returns the error on stdout log_failure_msg "$PID_FILE" exit 1 fi if [ `id -u` = 0 ] ; then # Debian does not have /run/lock/subsys if [ -d /run/lock/subsys ]; then LOCKFILE=/run/lock/subsys/$prog-datadelivery-service else LOCKFILE=/run/lock/$prog-datadelivery-service fi else LOCKFILE=$HOME/$prog-datadelivery-service.lock fi start() { echo -n "Starting $prog: " # Check if we are already running if [ -f "$PID_FILE" ]; then read pid < "$PID_FILE" if [ "x$pid" != "x" ]; then ps -p "$pid" -o comm 2>/dev/null | grep "^$prog$" 1>/dev/null 2>/dev/null if [ $? -eq 0 ] ; then log_success_msg "already running (pid $pid)" return 0 fi fi rm -f "$PID_FILE" "$LOCKFILE" fi ${ARC_LOCATION}/@pkgdatasubdir@/arc-datadelivery-service-start RETVAL=$? if [ $RETVAL -eq 0 ]; then touch $LOCKFILE log_success_msg else log_failure_msg fi return $RETVAL } stop() { echo -n "Stopping $prog: " if [ -f "$PID_FILE" ]; then read pid < "$PID_FILE" if [ ! -z "$pid" ] ; then kill "$pid" RETVAL=$? if [ $RETVAL -eq 0 ]; then log_success_msg else log_failure_msg fi timeout=10; # enough time to kill any active processes while ( ps -p "$pid" -o comm 2>/dev/null | grep "^$prog$" 1>/dev/null 2>/dev/null ) && [ $timeout -ge 1 ] ; do sleep 1 timeout=$(($timeout - 1)) done [ $timeout -lt 1 ] && kill -9 "$pid" 1>/dev/null 2>&1 rm -f "$PID_FILE" "$LOCKFILE" else RETVAL=1 log_failure_msg "$prog shutdown - pidfile is empty" fi else RETVAL=0 log_success_msg "$prog shutdown - already stopped" fi return $RETVAL } status() { if [ -f "$PID_FILE" ]; then read pid < "$PID_FILE" if [ "$pid" != "" ]; then if ps -p "$pid" > /dev/null; then echo "$1 (pid $pid) is running..." return 0 fi echo "$1 stopped but pid file exists" return 1 fi fi if [ -f $LOCKFILE ]; then echo "$1 stopped but lockfile exists" return 2 fi echo "$1 is stopped" return 3 } restart() { stop start } case "$1" in start) start ;; stop) stop ;; status) status $prog ;; restart | force-reload) restart ;; reload) ;; condrestart | try-restart) [ -f $LOCKFILE ] && restart || : ;; *) echo "Usage: $0 {start|stop|status|restart|force-reload|reload|condrestart|try-restart}" exit 1 ;; esac exit $? nordugrid-arc-7.1.1/src/services/data-staging/PaxHeaders/arc-datadelivery-service.service.in0000644000000000000000000000013115067751327027174 xustar0029 mtime=1759498967.77432306 30 atime=1759498967.874493787 30 ctime=1759499030.923997054 nordugrid-arc-7.1.1/src/services/data-staging/arc-datadelivery-service.service.in0000644000175000002070000000034615067751327031102 0ustar00mockbuildmock00000000000000[Unit] Description=A-REX datadelivery service After=local-fs.target remote-fs.target [Service] ExecStart=@prefix@/@pkgdatasubdir@/arc-datadelivery-service-start NotifyAccess=all Type=forking [Install] WantedBy=multi-user.target nordugrid-arc-7.1.1/src/services/data-staging/PaxHeaders/DataDeliveryService.cpp0000644000000000000000000000013215067751327024732 xustar0030 mtime=1759498967.773910274 30 atime=1759498967.873493772 30 ctime=1759499030.927139449 nordugrid-arc-7.1.1/src/services/data-staging/DataDeliveryService.cpp0000644000175000002070000006607415067751327026651 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include "DataDeliveryService.h" namespace DataStaging { static Arc::Plugin *get_service(Arc::PluginArgument* arg) { Arc::ServicePluginArgument* srvarg = arg?dynamic_cast(arg):NULL; if(!srvarg) return NULL; DataDeliveryService* s = new DataDeliveryService((Arc::Config*)(*srvarg),arg); if (*s) return s; delete s; return NULL; } Arc::Logger DataDeliveryService::logger(Arc::Logger::rootLogger, "DataDeliveryService"); class DTRLogStream: public Arc::LogStream { public: DTRLogStream(): Arc::LogStream(stream) {} auto str() { return stream.str(); } private: std::stringstream stream; }; void DataDeliveryService::ArchivalThread(void* arg) { DataDeliveryService* service = (DataDeliveryService*)arg; service->ArchivalThread(); } void DataDeliveryService::ArchivalThread() { // archive every 10 mins DTRs older than 1 hour // TODO: configurable, save to disk? int frequency = 600; while (true) { sleep(frequency); Arc::Time timelimit(Arc::Time()-Arc::Period(3600)); active_dtrs_lock.lock(); for (std::set::iterator i = active_dtrs.begin(); i != active_dtrs.end();) { DTR_ptr dtr = *i; if (dtr->get_modification_time() < timelimit && dtr->get_status() != DTRStatus::TRANSFERRING) { archived_dtrs_lock.lock(); if (dtr->error()) { logger.msg(Arc::VERBOSE, "Archiving DTR %s, state ERROR", dtr->get_id()); archived_dtrs[dtr->get_id()] = std::pair("TRANSFER_ERROR", dtr->get_error_status().GetDesc()); } else { logger.msg(Arc::VERBOSE, "Archiving DTR %s, state %s", dtr->get_id(), dtr->get_status().str()); archived_dtrs[dtr->get_id()] = std::pair("TRANSFERRED", ""); } archived_dtrs_lock.unlock(); active_dtrs.erase(i++); } else ++i; } active_dtrs_lock.unlock(); } } bool DataDeliveryService::CheckInput(const std::string& url, const Arc::UserConfig& usercfg, Arc::XMLNode& resultelement, bool& require_credential_file) { Arc::DataHandle h(url, usercfg); if (!h || !(*h)) { resultelement.NewChild("ErrorDescription") = "Can't handle URL " + url; return false; } if (h->Local()) { std::string path(h->GetURL().Path()); if (path.find("../") != std::string::npos) { resultelement.NewChild("ErrorDescription") = "'../' is not allowed in filename"; return false; } bool allowed = false; for (std::list::iterator i = allowed_dirs.begin(); i != allowed_dirs.end(); ++i) { if (path.find(*i) == 0) allowed = true; } if (!allowed) { resultelement.NewChild("ErrorDescription") = "Access denied to path " + path; return false; } } if (h->RequiresCredentialsInFile()) require_credential_file = true; return true; } void DataDeliveryService::LogToRootLogger(Arc::LogLevel level, const std::string& message) { Arc::Logger::getRootLogger().addDestinations(root_destinations); logger.msg(level, message); Arc::Logger::getRootLogger().removeDestinations(); } void DataDeliveryService::receiveDTR(DTR_ptr dtr) { LogToRootLogger(Arc::INFO, "Received DTR "+dtr->get_id()+" from Delivery in state "+dtr->get_status().str()); // delete temp proxy file if it was created if (dtr->get_source()->RequiresCredentialsInFile() || dtr->get_destination()->RequiresCredentialsInFile()) { std::string proxy_file(tmp_proxy_dir+"/DTR."+dtr->get_id()+".proxy"); LogToRootLogger(Arc::DEBUG, "Removing temp proxy "+proxy_file); if (unlink(proxy_file.c_str()) != 0 && errno != ENOENT) { LogToRootLogger(Arc::WARNING, "Failed to remove temporary proxy "+proxy_file+": "+Arc::StrError(errno)); } } if (current_processes > 0) --current_processes; } /* Accepts: id url url 1000 1000 true 12345 adler32:12345678 100 60 100 120 ... Returns id SERVICE_ERROR ... ... */ Arc::MCC_Status DataDeliveryService::Start(Arc::XMLNode in, Arc::XMLNode out) { Arc::XMLNode resp = out.NewChild("DataDeliveryStartResponse"); Arc::XMLNode results = resp.NewChild("DataDeliveryStartResult"); // Save credentials to temp file and set in UserConfig std::string x509_credential; std::string token_credential; Arc::XMLNode delegated_token = in["DataDeliveryStart"]["deleg:DelegatedToken"]; if (!delegated_token) { logger.msg(Arc::ERROR, "No delegation token in request"); return Arc::MCC_Status(Arc::GENERIC_ERROR, "DataDeliveryService", "No delegation token received"); } if ((std::string)delegated_token.Attribute("Format") == "token") { token_credential = (std::string)delegated_token["Value"]; } else { // For X.509 delegation check credentials were already delegated if (!delegation.DelegatedToken(x509_credential, delegated_token)) { // Failed to accept delegation logger.msg(Arc::ERROR, "Failed to accept delegation"); return Arc::MCC_Status(Arc::GENERIC_ERROR, "DataDeliveryService", "Failed to accept delegation"); } } for(int n = 0;;++n) { Arc::XMLNode dtrnode = in["DataDeliveryStart"]["DTR"][n]; if (!dtrnode) break; std::string dtrid((std::string)dtrnode["ID"]); std::string src((std::string)dtrnode["Source"]); std::string dest((std::string)dtrnode["Destination"]); int uid = Arc::stringtoi((std::string)dtrnode["Uid"]); int gid = Arc::stringtoi((std::string)dtrnode["Gid"]); if (dtrnode["Caching"] == "true") { uid = Arc::User().get_uid(); gid = Arc::User().get_gid(); } // proxy path will be set later Arc::initializeCredentialsType cred_type(Arc::initializeCredentialsType::SkipCredentials); Arc::UserConfig usercfg(cred_type); bool require_credential_file = false; Arc::XMLNode resultelement = results.NewChild("Result"); resultelement.NewChild("ID") = dtrid; if (!CheckInput(src, usercfg, resultelement, require_credential_file)) { resultelement.NewChild("ResultCode") = "SERVICE_ERROR"; resultelement["ErrorDescription"] = (std::string)resultelement["ErrorDescription"] + ": Cannot use source"; logger.msg(Arc::ERROR, (std::string)resultelement["ErrorDescription"]); continue; } if (!CheckInput(dest, usercfg, resultelement, require_credential_file)) { resultelement.NewChild("ResultCode") = "SERVICE_ERROR"; resultelement["ErrorDescription"] = (std::string)resultelement["ErrorDescription"] + ": Cannot use destination"; logger.msg(Arc::ERROR, (std::string)resultelement["ErrorDescription"]); continue; } if (current_processes >= max_processes) { logger.msg(Arc::WARNING, "All %u process slots used", max_processes); resultelement.NewChild("ResultCode") = "SERVICE_ERROR"; resultelement.NewChild("ErrorDescription") = "No free process slot available"; continue; } // check if dtrid is in the active list - if so it is probably a retry active_dtrs_lock.lock(); std::set::iterator i = active_dtrs.begin(); for (; i != active_dtrs.end(); ++i) { if ((*i)->get_id() == dtrid) break; } if (i != active_dtrs.end()) { if ((*i)->get_status() == DTRStatus::TRANSFERRING) { logger.msg(Arc::ERROR, "Received retry for DTR %s still in transfer", dtrid); resultelement.NewChild("ResultCode") = "SERVICE_ERROR"; resultelement.NewChild("ErrorDescription") = "DTR is still in transfer"; active_dtrs_lock.unlock(); continue; } // Erase this DTR from active list logger.msg(Arc::VERBOSE, "Replacing DTR %s in state %s with new request", dtrid, (*i)->get_status().str()); active_dtrs.erase(i); } active_dtrs_lock.unlock(); std::string proxy_file(tmp_proxy_dir+"/DTR."+dtrid+".proxy"); if (require_credential_file) { // Store proxy, only readable by user. Use DTR job id as proxy name. // TODO: it is inefficient to create a file for every DTR, better to // use some kind of proxy store logger.msg(Arc::VERBOSE, "Storing temp proxy at %s", proxy_file); bool proxy_result = Arc::FileCreate(proxy_file, x509_credential, 0, 0, S_IRUSR | S_IWUSR); if (!proxy_result && errno == ENOENT) { Arc::DirCreate(tmp_proxy_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH, true); proxy_result = Arc::FileCreate(proxy_file, x509_credential, 0, 0, S_IRUSR | S_IWUSR); } if (!proxy_result) { logger.msg(Arc::ERROR, "Failed to create temp proxy at %s: %s", proxy_file, Arc::StrError(errno)); resultelement.NewChild("ResultCode") = "SERVICE_ERROR"; resultelement.NewChild("ErrorDescription") = "Failed to store temporary proxy"; continue; } if (chown(proxy_file.c_str(), uid, gid) != 0) { logger.msg(Arc::ERROR, "Failed to change owner of temp proxy at %s to %i:%i: %s", proxy_file, uid, gid, Arc::StrError(errno)); resultelement.NewChild("ResultCode") = "SERVICE_ERROR"; resultelement.NewChild("ErrorDescription") = "Failed to store temporary proxy"; continue; } usercfg.ProxyPath(proxy_file); } if(!x509_credential.empty()) { usercfg.CredentialString(x509_credential); } if(!token_credential.empty()) { usercfg.OToken(token_credential); } // Logger destinations for this DTR. Uses a string stream so log can easily be sent // back to the client. LogStream keeps a reference to the stream so we // cannot delete it until deleting LogStream. These pointers are // deleted when the DTR is archived. std::list logs; Arc::LogDestination * output = new DTRLogStream(); output->setFormat(Arc::MediumFormat); logs.push_back(output); std::string groupid(Arc::UUID()); DTR_ptr dtr(new DTR(src, dest, usercfg, groupid, uid, logs, "DataStaging")); if (!(*dtr)) { logger.msg(Arc::ERROR, "Invalid DTR"); resultelement.NewChild("ResultCode") = "SERVICE_ERROR"; resultelement.NewChild("ErrorDescription") = "Could not create DTR"; if (unlink(proxy_file.c_str()) != 0 && errno != ENOENT) { logger.msg(Arc::WARNING, "Failed to remove temporary proxy %s: %s", proxy_file, Arc::StrError(errno)); } continue; } ++current_processes; // Set source checksum to validate against if (dtrnode["CheckSum"]) dtr->get_source()->SetCheckSum((std::string)dtrnode["CheckSum"]); // Set filesize for protocols which need it if (dtrnode["Size"]) dtr->get_source()->SetSize(Arc::stringtoull((std::string)dtrnode["Size"])); // Get the callbacks sent to Scheduler and connect Delivery dtr->registerCallback(this, SCHEDULER); dtr->registerCallback(&delivery, DELIVERY); // Set transfer limits TransferParameters transfer_params; if (dtrnode["MinAverageSpeed"]) transfer_params.min_average_bandwidth = Arc::stringtoull((std::string)dtrnode["MinAverageSpeed"]); if (dtrnode["AverageTime"]) transfer_params.averaging_time = Arc::stringtoui((std::string)dtrnode["AverageTime"]); if (dtrnode["MinCurrentSpeed"]) transfer_params.min_current_bandwidth = Arc::stringtoull((std::string)dtrnode["MinCurrentSpeed"]); if (dtrnode["MaxInactivityTime"]) transfer_params.max_inactivity_time = Arc::stringtoui((std::string)dtrnode["MaxInactivityTime"]); delivery.SetTransferParameters(transfer_params); dtr->set_id(dtrid); dtr->set_status(DTRStatus::TRANSFER); DTR::push(dtr, DELIVERY); // Add to active list active_dtrs_lock.lock(); active_dtrs.emplace(dtr); active_dtrs_lock.unlock(); resultelement.NewChild("ResultCode") = "OK"; } return Arc::MCC_Status(Arc::STATUS_OK); } /* Accepts: id ... Returns: id ERROR ... 2 1 ... 1234 123456789 adler32:a123a45 ... */ Arc::MCC_Status DataDeliveryService::Query(Arc::XMLNode in, Arc::XMLNode out) { Arc::XMLNode resp = out.NewChild("DataDeliveryQueryResponse"); Arc::XMLNode results = resp.NewChild("DataDeliveryQueryResult"); for(int n = 0;;++n) { Arc::XMLNode dtrnode = in["DataDeliveryQuery"]["DTR"][n]; if (!dtrnode) break; std::string dtrid((std::string)dtrnode["ID"]); Arc::XMLNode resultelement = results.NewChild("Result"); resultelement.NewChild("ID") = dtrid; active_dtrs_lock.lock(); std::set::iterator dtr_it = active_dtrs.begin(); for (; dtr_it != active_dtrs.end(); ++dtr_it) { if ((*dtr_it)->get_id() == dtrid) break; } if (dtr_it == active_dtrs.end()) { active_dtrs_lock.unlock(); // if not in active list, look in archived list archived_dtrs_lock.lock(); std::map >::const_iterator arc_it = archived_dtrs.find(dtrid); if (arc_it != archived_dtrs.end()) { resultelement.NewChild("ResultCode") = archived_dtrs[dtrid].first; resultelement.NewChild("ErrorDescription") = archived_dtrs[dtrid].second; archived_dtrs_lock.unlock(); continue; } archived_dtrs_lock.unlock(); logger.msg(Arc::ERROR, "No such DTR %s", dtrid); resultelement.NewChild("ResultCode") = "SERVICE_ERROR"; resultelement.NewChild("ErrorDescription") = "No such DTR"; continue; } DTR_ptr dtr = *dtr_it; std::list logdest = dtr->get_log_destinations(); for(auto log: logdest) { auto logstream = dynamic_cast(log); if (logstream) { resultelement.NewChild("Log") = logstream->str(); break; } } resultelement.NewChild("BytesTransferred") = Arc::tostring(dtr->get_bytes_transferred()); if (dtr->error()) { logger.msg(Arc::INFO, "DTR %s failed: %s", dtrid, dtr->get_error_status().GetDesc()); resultelement.NewChild("ResultCode") = "TRANSFER_ERROR"; resultelement.NewChild("ErrorDescription") = dtr->get_error_status().GetDesc(); resultelement.NewChild("ErrorStatus") = Arc::tostring(dtr->get_error_status().GetErrorStatus()); resultelement.NewChild("ErrorLocation") = Arc::tostring(dtr->get_error_status().GetErrorLocation()); resultelement.NewChild("TransferTime") = Arc::tostring(dtr->get_transfer_time()); archived_dtrs_lock.lock(); archived_dtrs[dtrid] = std::pair("TRANSFER_ERROR", dtr->get_error_status().GetDesc()); archived_dtrs_lock.unlock(); } else if (dtr->get_status() == DTRStatus::TRANSFERRED) { logger.msg(Arc::INFO, "DTR %s finished successfully", dtrid); resultelement.NewChild("ResultCode") = "TRANSFERRED"; resultelement.NewChild("TransferTime") = Arc::tostring(dtr->get_transfer_time()); // pass calculated checksum back to Scheduler (eg to insert in catalog) if (dtr->get_destination()->CheckCheckSum()) resultelement.NewChild("CheckSum") = dtr->get_destination()->GetCheckSum(); archived_dtrs_lock.lock(); archived_dtrs[dtrid] = std::pair("TRANSFERRED", ""); archived_dtrs_lock.unlock(); } else { logger.msg(Arc::VERBOSE, "DTR %s still in progress (%lluB transferred)", dtrid, dtr->get_bytes_transferred()); resultelement.NewChild("ResultCode") = "TRANSFERRING"; active_dtrs_lock.unlock(); return Arc::MCC_Status(Arc::STATUS_OK); } // Terminal state //delete dtr_it->second; active_dtrs.erase(dtr_it); active_dtrs_lock.unlock(); } return Arc::MCC_Status(Arc::STATUS_OK); } /* Accepts: id ... Returns: id ERROR ... ... */ Arc::MCC_Status DataDeliveryService::Cancel(Arc::XMLNode in, Arc::XMLNode out) { Arc::XMLNode resp = out.NewChild("DataDeliveryCancelResponse"); Arc::XMLNode results = resp.NewChild("DataDeliveryCancelResult"); for (int n = 0;;++n) { Arc::XMLNode dtrnode = in["DataDeliveryCancel"]["DTR"][n]; if (!dtrnode) break; std::string dtrid((std::string)dtrnode["ID"]); Arc::XMLNode resultelement = results.NewChild("Result"); resultelement.NewChild("ID") = dtrid; // Check if DTR is still in active list active_dtrs_lock.lock(); std::set::iterator dtr_it = active_dtrs.begin(); for (; dtr_it != active_dtrs.end(); ++dtr_it) { if ((*dtr_it)->get_id() == dtrid) break; } if (dtr_it == active_dtrs.end()) { active_dtrs_lock.unlock(); logger.msg(Arc::ERROR, "No active DTR %s", dtrid); resultelement.NewChild("ResultCode") = "SERVICE_ERROR"; resultelement.NewChild("ErrorDescription") = "No such active DTR"; continue; } // DTR could be already finished, but report successful cancel anyway DTR_ptr dtr = *dtr_it; if (dtr->get_status() == DTRStatus::TRANSFERRING_CANCEL) { active_dtrs_lock.unlock(); logger.msg(Arc::ERROR, "DTR %s was already cancelled", dtrid); resultelement.NewChild("ResultCode") = "SERVICE_ERROR"; resultelement.NewChild("ErrorDescription") = "DTR already cancelled"; continue; } // Delivery will automatically kill running process if (!delivery.cancelDTR(dtr)) { active_dtrs_lock.unlock(); logger.msg(Arc::ERROR, "DTR %s could not be cancelled", dtrid); resultelement.NewChild("ResultCode") = "SERVICE_ERROR"; resultelement.NewChild("ErrorDescription") = "DTR could not be cancelled"; continue; } logger.msg(Arc::INFO, "DTR %s cancelled", dtr->get_id()); resultelement.NewChild("ResultCode") = "OK"; active_dtrs_lock.unlock(); } return Arc::MCC_Status(Arc::STATUS_OK); } /* Accepts: Returns: ERROR ... /var/arc/cache 6.5 ... ... */ Arc::MCC_Status DataDeliveryService::Ping(Arc::XMLNode in, Arc::XMLNode out) { Arc::XMLNode resultelement = out.NewChild("DataDeliveryPingResponse").NewChild("DataDeliveryPingResult").NewChild("Result"); resultelement.NewChild("ResultCode") = "OK"; for (std::list::iterator dir = allowed_dirs.begin(); dir != allowed_dirs.end(); ++dir) { resultelement.NewChild("AllowedDir") = *dir; } // Send the 5 min load average double avg[3]; if (getloadavg(avg, 3) != 3) { logger.msg(Arc::WARNING, "Failed to get load average: %s", Arc::StrError()); resultelement.NewChild("LoadAvg") = "-1"; } else { resultelement.NewChild("LoadAvg") = Arc::tostring(avg[1]); } return Arc::MCC_Status(Arc::STATUS_OK); } DataDeliveryService::DataDeliveryService(Arc::Config *cfg, Arc::PluginArgument* parg) : Service(cfg,parg), max_processes(100), current_processes(0) { valid = false; // Set medium format for logging root_destinations = Arc::Logger::getRootLogger().getDestinations(); for (std::list::iterator i = root_destinations.begin(); i != root_destinations.end(); ++i) { (*i)->setFormat(Arc::MediumFormat); } // Check configuration - at least one allowed IP address and dir must be specified if (!(*cfg)["SecHandler"]["PDP"]["Policy"]["Rule"]["Subjects"]["Subject"]) { logger.msg(Arc::ERROR, "Invalid configuration - no allowed IP address specified"); return; } if (!(*cfg)["AllowedDir"]) { logger.msg(Arc::ERROR, "Invalid configuration - no transfer dirs specified"); return; } for (int n = 0;;++n) { Arc::XMLNode allowed_dir = (*cfg)["AllowedDir"][n]; if (!allowed_dir) break; allowed_dirs.push_back((std::string)allowed_dir); } // Start archival thread if (!Arc::CreateThreadFunction(ArchivalThread, this)) { logger.msg(Arc::ERROR, "Failed to start archival thread"); return; } // Create tmp dir for proxies // TODO get from configuration tmp_proxy_dir = "/tmp/arc"; // clear any proxies left behind from previous bad shutdown Arc::DirDelete(tmp_proxy_dir); // Set restrictive umask umask(0077); // Set log level for DTR DataStaging::DTR::LOG_LEVEL = Arc::Logger::getRootLogger().getThreshold(); // Start new DataDelivery delivery.start(); valid = true; } DataDeliveryService::~DataDeliveryService() { // Stop accepting new requests and cancel all active transfers // DataDelivery destructor automatically calls stop() valid = false; // clear any proxies left behind Arc::DirDelete(tmp_proxy_dir); logger.msg(Arc::INFO, "Shutting down data delivery service"); } Arc::MCC_Status DataDeliveryService::process(Arc::Message &inmsg, Arc::Message &outmsg) { if (!valid) return make_soap_fault(outmsg, "Service is not valid"); // Check authorization if(!ProcessSecHandlers(inmsg, "incoming")) { logger.msg(Arc::ERROR, "Unauthorized"); return make_soap_fault(outmsg, "Authorization failed"); } std::string method = inmsg.Attributes()->get("HTTP:METHOD"); if(method == "POST") { logger.msg(Arc::VERBOSE, "process: POST"); logger.msg(Arc::VERBOSE, "Identity is %s", inmsg.Attributes()->get("TLS:PEERDN")); // Both input and output are supposed to be SOAP // Extracting payload Arc::PayloadSOAP* inpayload = NULL; try { inpayload = dynamic_cast(inmsg.Payload()); } catch(std::exception& e) { }; if(!inpayload) { logger.msg(Arc::ERROR, "input is not SOAP"); return make_soap_fault(outmsg); } // Applying known namespaces inpayload->Namespaces(ns); if(logger.getThreshold() <= Arc::DEBUG) { std::string str; inpayload->GetDoc(str, true); logger.msg(Arc::DEBUG, "process: request=%s",str); } // Analyzing request Arc::XMLNode op = inpayload->Child(0); if(!op) { logger.msg(Arc::ERROR, "input does not define operation"); return make_soap_fault(outmsg); } logger.msg(Arc::VERBOSE, "process: operation: %s",op.Name()); Arc::PayloadSOAP* outpayload = new Arc::PayloadSOAP(ns); outpayload->Namespaces(ns); Arc::MCC_Status result(Arc::STATUS_OK); // choose operation // Make a new request if (MatchXMLName(op,"DataDeliveryStart")) { result = Start(*inpayload, *outpayload); } // Query a request else if (MatchXMLName(op,"DataDeliveryQuery")) { result = Query(*inpayload, *outpayload); } // Cancel a request else if (MatchXMLName(op,"DataDeliveryCancel")) { result = Cancel(*inpayload, *outpayload); } // ping service else if (MatchXMLName(op,"DataDeliveryPing")) { result = Ping(*inpayload, *outpayload); } // Delegate credentials. Should be called before making a new request else if (delegation.MatchNamespace(*inpayload)) { if (!delegation.Process(*inpayload, *outpayload)) { delete outpayload; return make_soap_fault(outmsg); } } // Unknown operation else { logger.msg(Arc::ERROR, "SOAP operation is not supported: %s", op.Name()); delete outpayload; return make_soap_fault(outmsg); } if (!result) return make_soap_fault(outmsg, result.getExplanation()); if (logger.getThreshold() <= Arc::DEBUG) { std::string str; outpayload->GetDoc(str, true); logger.msg(Arc::DEBUG, "process: response=%s", str); } outmsg.Payload(outpayload); if (!ProcessSecHandlers(outmsg,"outgoing")) { logger.msg(Arc::ERROR, "Security Handlers processing failed"); delete outmsg.Payload(NULL); return Arc::MCC_Status(); } } else { // only POST supported logger.msg(Arc::ERROR, "Only POST is supported in DataDeliveryService"); return Arc::MCC_Status(); } return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status DataDeliveryService::make_soap_fault(Arc::Message& outmsg, const std::string& reason) { Arc::PayloadSOAP* outpayload = new Arc::PayloadSOAP(ns,true); Arc::SOAPFault* fault = outpayload?outpayload->Fault():NULL; if(fault) { fault->Code(Arc::SOAPFault::Sender); if (reason.empty()) fault->Reason("Failed processing request"); else fault->Reason("Failed processing request: "+reason); } outmsg.Payload(outpayload); return Arc::MCC_Status(Arc::STATUS_OK); } } // namespace DataStaging extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "datadeliveryservice", "HED:SERVICE", NULL, 0, &DataStaging::get_service }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-7.1.1/src/services/data-staging/PaxHeaders/arc-datadelivery-service-start.in0000644000000000000000000000013115067751327026670 xustar0029 mtime=1759498967.77432306 30 atime=1759498967.874493787 30 ctime=1759499030.921451789 nordugrid-arc-7.1.1/src/services/data-staging/arc-datadelivery-service-start.in0000644000175000002070000001005315067751327030572 0ustar00mockbuildmock00000000000000#!/bin/bash add_library_path() { location="$1" if [ ! "x$location" = "x" ] ; then if [ ! "$location" = "/usr" ] ; then libdir="$location/lib" libdir64="$location/lib64" if [ -d "$libdir64" ] ; then if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH="$libdir64" else LD_LIBRARY_PATH="$libdir64:$LD_LIBRARY_PATH" fi fi if [ -d "$libdir" ] ; then if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH="$libdir" else LD_LIBRARY_PATH="$libdir:$LD_LIBRARY_PATH" fi fi fi fi } prog=arched RUN=yes send_systemd_notify() { # return if no systemd-notify found type systemd-notify >/dev/null 2>&1 || return systemd-notify "$@" } log_failure_msg() { send_systemd_notify --status "Error: $@" echo $@ } # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/arc-datadelivery-service ]; then . /etc/sysconfig/arc-datadelivery-service elif [ -r /etc/default/arc-datadelivery-service ]; then . /etc/default/arc-datadelivery-service fi # GLOBUS_LOCATION GLOBUS_LOCATION=${GLOBUS_LOCATION:-@DEFAULT_GLOBUS_LOCATION@} if [ -d "$GLOBUS_LOCATION" ]; then export GLOBUS_LOCATION else GLOBUS_LOCATION= fi # ARC_LOCATION ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then log_failure_msg "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi export ARC_LOCATION readorigconfigvar() { value=`$ARC_LOCATION/@pkglibexecsubdir@/arcconfig-parser --config "$1" -b "$2" -o "$3" 2>/dev/null` if [ $? -eq 0 ] ; then echo "$value" exit 0 else exit 1 fi } # ARC_CONFIG if [ "x$ARC_CONFIG" = "x" ]; then if [ -r $ARC_LOCATION/etc/arc.conf ]; then ARC_CONFIG=$ARC_LOCATION/etc/arc.conf elif [ -r /etc/arc.conf ]; then ARC_CONFIG=/etc/arc.conf fi fi PID_FILE=`readorigconfigvar "$ARC_CONFIG" datadelivery-service pidfile` if [ "x$PID_FILE" = "x" ]; then # Missing default value for pidfile means no service block is present log_failure_msg "ARC configuration is missing [datadelivery-service] block" exit 1 fi if [ "$1" = "--getpidfile" ] ; then echo $PID_FILE exit 0 fi LOG_FILE=`readorigconfigvar "$ARC_CONFIG" datadelivery-service logfile` if [ "x$LOG_FILE" = "x" ]; then log_failure_msg "Log file could not be found in [datadelivery-service] block" exit 1 fi if [ ! -d `dirname "$LOGFILE"` ]; then mkdir -p `dirname "$LOGFILE"` fi prepare() { CMD="$ARC_LOCATION/sbin/$prog" if [ ! -x "$CMD" ]; then log_failure_msg "Missing $CMD executable" exit 1 fi if [ ! -r "$ARC_CONFIG" ]; then log_failure_msg "ARC configuration not found (usually /etc/arc.conf)" exit 1 fi # check that if service is insecure no allowed_dns are defined SECURE=`readorigconfigvar "$ARC_CONFIG" datadelivery-service secure` ALLOWEDDN=`readorigconfigvar "$ARC_CONFIG" datadelivery-service allowed_dn` if [ "$SECURE" = "no" ]; then if [ "x$ALLOWEDDN" != "x" ]; then log_failure_msg "allowed_dn cannot be used with secure=no" exit 1 fi fi # Assuming ini style config CMD="$CMD -i $ARC_CONFIG -p $PID_FILE -l $LOG_FILE" if [ "x$GLOBUS_LOCATION" != "x" ]; then add_library_path "$GLOBUS_LOCATION" fi if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH=$ARC_LOCATION/@libsubdir@ else LD_LIBRARY_PATH=$ARC_LOCATION/@libsubdir@:$LD_LIBRARY_PATH fi export LD_LIBRARY_PATH cd / } if [ "$RUN" != "yes" ] ; then echo "arc-datadelivery-service disabled, please adjust the configuration to your" echo "needs and then set RUN to 'yes' in /etc/default/arc-datadelivery-service to" echo "enable it." exit 0 fi prepare exec $CMD "$@" nordugrid-arc-7.1.1/src/services/data-staging/PaxHeaders/README0000644000000000000000000000013115067751327021207 xustar0029 mtime=1759498967.77432306 30 atime=1759498967.873493772 30 ctime=1759499030.925059624 nordugrid-arc-7.1.1/src/services/data-staging/README0000644000175000002070000000011215067751327023104 0ustar00mockbuildmock00000000000000DataDeliveryService is a HED service for executing data transfer requests.nordugrid-arc-7.1.1/src/services/PaxHeaders/monitor0000644000000000000000000000013115067751426017376 xustar0030 mtime=1759499030.808450072 29 atime=1759499034.76351017 30 ctime=1759499030.808450072 nordugrid-arc-7.1.1/src/services/monitor/0000755000175000002070000000000015067751426021356 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/monitor/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327021510 xustar0030 mtime=1759498967.775071557 30 atime=1759498967.874493787 30 ctime=1759499030.602832919 nordugrid-arc-7.1.1/src/services/monitor/Makefile.am0000644000175000002070000000032015067751327023405 0ustar00mockbuildmock00000000000000SUBDIRS = man mon-icons lang includes monitordir = @monitor_prefix@ dist_monitor_DATA = $(srcdir)/*.php $(srcdir)/*.js monitor_DATA = README install-data-local: $(MKDIR_P) $(DESTDIR)$(monitordir)/cache nordugrid-arc-7.1.1/src/services/monitor/PaxHeaders/userlist.php0000644000000000000000000000013215067751327022037 xustar0030 mtime=1759498967.781492375 30 atime=1759498967.878493848 30 ctime=1759499030.615620807 nordugrid-arc-7.1.1/src/services/monitor/userlist.php0000644000175000002070000002056015067751327023744 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; $giislist = &$toppage->giislist; // Header table $toppage->tabletop("",$toptitle." $family"); // Array defining the attributes to be returned $lim = array( "dn", USR_USSN, USR_CPUS, USR_QUEU, USR_DISK ); $ulim = array( "dn", JOB_NAME, JOB_EQUE, JOB_ECLU, JOB_GOWN, JOB_SUBM, JOB_STAT, JOB_USET, JOB_ERRS, JOB_CPUS ); if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $tlim = 20; $tout = 20; if( $debug ) dbgmsg("
    :::> ".$errors["114"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); // ldapsearch filter string for jobs $filter = "(&(objectclass=".OBJ_USER.")(".USR_USSN."=$uname))"; $ufilter = "(&(objectclass=".OBJ_AJOB.")(".JOB_GOWN."=$uname))"; $gentries = recursive_giis_info($giislist,"cluster",$errors,$debug); $nc = count($gentries); if ( !$nc ) { $errno = "1"; echo "
    ".$errors[$errno]."\n"; return $errno; } $dsarray = array (); $hnarray = array (); $pnarray = array (); $sitetag = array (); /* a tag to skip duplicated entries */ for ( $k = 0; $k < $nc; $k++ ) { $clhost = $gentries[$k]["host"]; $clport = $gentries[$k]["port"]; $ldapuri = "ldap://".$clhost.":".$clport; $clconn = ldap_connect($ldapuri); if ( $clconn && !$sitetag[$clhost] ) { array_push($dsarray,$clconn); array_push($hnarray,$clhost); array_push($pnarray,$clport); $sitetag[$clhost] = 1; /* filtering tag */ } } $nhosts = count($dsarray); if ( !$nhosts ) { // NO SITES REPLY... $errno = "2"; echo "
    ".$errors[$errno]."\n"; return $errno; } // Search all clusters for (a) allowed queues and (b) for user jobs $uiarray = @ldap_search($dsarray,DN_LOCAL,$filter,$lim,0,0,$tlim,LDAP_DEREF_NEVER); // Loop on results: first go queues // HTML table initialisation $utable = new LmTable("userres",$strings["userres"]); $urowcont = array(); $dnmsg = "".$errors["420"].": ".$uname; $utable->adderror($dnmsg, "#cccccc"); $nauclu = 0; $goodds = array(); $goodhn = array(); $goodpn = array(); for ( $ids = 0; $ids < $nhosts; $ids++ ) { $ui = $uiarray[$ids]; $hn = $hnarray[$ids]; $pn = $pnarray[$ids]; $dst = $dsarray[$ids]; $curl = popup("clusdes.php?host=$hn&port=$pn",700,620,1,$lang,$debug); if ($dst && $ui) { $nqueues = @ldap_count_entries($dst,$ui); if ($nqueues > 0) { $nauclu++; array_push($goodds,$dst); array_push($goodhn,$hn); array_push($goodpn,$pn); // If there are valid entries, tabulate results $allres = ldap_get_entries($dst,$ui); $results = ldap_purge($allres); $nqueues = $allres["count"]; // define("CMPKEY",USR_CPUS); // usort($allres,"ldap_entry_comp"); // loop on queues for ($j=0; $j<$nqueues; $j++) { $parts = ldap_explode_dn($allres[$j]["dn"],0); foreach ($parts as $part) { $pair = explode("=",$part); switch ( $pair[0] ) { case CLU_NAME: $ucluster = $pair[1]; break; case QUE_NAME: $uqueue = $pair[1]; break; } } if ( $debug == 2 ) dbgmsg("$hn -- $ucluster
    "); $qurl = popup("quelist.php?host=$ucluster&port=$pn&qname=$uqueue",750,430,6,$lang,$debug); $curl = popup("clusdes.php?host=$ucluster&port=$pn",700,620,1,$lang,$debug); $fcpu = $allres[$j][USR_CPUS][0]; $fproc = freeproc($fcpu); $fdisk = $allres[$j][USR_DISK][0]; $exque = $allres[$j][USR_QUEU][0]; $urowcont[] = "$ucluster:$uqueue"; $urowcont[] = $fcpu; $urowcont[] = $exque; $urowcont[] = $fdisk; $utable->addrow($urowcont); $urowcont = array(); } } else { $utable->adderror("".$errors["11"]." $hn"); } } else { $utable->adderror("$hn ".$errors["12"].""); } @ldap_free_result($ui); } $utable->adderror("".$errors["421"].$nauclu.$errors["422"]."", "#0099FF"); $utable->close(); echo "
    \n"; $srarray = @ldap_search($goodds,DN_LOCAL,$ufilter,$ulim,0,0,$tlim,LDAP_DEREF_NEVER); // HTML table initialisation $jtable = new LmTable($module,$toppage->$module); $rowcont = array(); $jcount = 0; $nghosts = count($goodds); for ( $ids = 0; $ids < $nghosts; $ids++ ) { $sr = $srarray[$ids]; $dst = $goodds[$ids]; $gpn = $goodpn[$ids]; $ghn = $goodhn[$ids]; if ($dst && $sr) { // If search returned, check that there are valid entries $nmatch = @ldap_count_entries($dst,$sr); if ($nmatch > 0) { // If there are valid entries, tabulate results $allentries = ldap_get_entries($dst,$sr); $entries = ldap_purge($allentries); $njobs = $entries["count"]; define("CMPKEY",JOB_SUBM); usort($entries,"ldap_entry_comp"); // loop on jobs for ($i=1; $i<$njobs+1; $i++) { $jobdn = rawurlencode($entries[$i]["dn"]); $curstat = $entries[$i][JOB_STAT][0]; $stahead = substr($curstat,0,12); if ($stahead=="FINISHED at:") { $ftime = substr(strrchr($curstat, " "), 1); $ftime = cnvtime($ftime); $curstat = "FINISHED at: ".$ftime; } $jname = htmlentities($entries[$i][JOB_NAME][0]); $jobname = ($entries[$i][JOB_NAME][0]) ? $jname : "N/A"; $queue = ($entries[$i][JOB_EQUE][0]) ? $entries[$i][JOB_EQUE][0] : "N/A"; $cluster = ($entries[$i][JOB_ECLU][0]) ? $entries[$i][JOB_ECLU][0] : "N/A"; $time = ($entries[$i][JOB_USET][0]) ? $entries[$i][JOB_USET][0] : "N/A"; $ncpus = ($entries[$i][JOB_CPUS][0]) ? $entries[$i][JOB_CPUS][0] : ""; $error = ($entries[$i][JOB_ERRS][0]); if ( $error ) $error = ( preg_match("/user/i",$error) ) ? "X" : "!"; if ( $debug == 2 ) dbgmsg("$ghn --- $cluster
    "); $newwin = popup("jobstat.php?host=$cluster&port=$gpn&status=$status&jobdn=$jobdn",750,430,4,$lang,$debug); $quewin = popup("quelist.php?host=$cluster&port=$gpn&qname=$queue",750,430,6,$lang,$debug); $clstring = popup("clusdes.php?host=$cluster&port=$gpn",700,620,1,$lang,$debug); $jcount++; // filling the table $rowcont[] = "$jcount $error"; $rowcont[] = "$jobname"; $rowcont[] = "$curstat"; $rowcont[] = "$time"; $rowcont[] = "$cluster"; $rowcont[] = "$queue"; $rowcont[] = "$ncpus"; $jtable->addrow($rowcont); $rowcont = array(); } } } @ldap_free_result($sr); } if ( !$jcount ) $jtable->adderror("".$errors["13"].$family.""); $jtable->close(); return 0; // Done $toppage->close(); ?> nordugrid-arc-7.1.1/src/services/monitor/PaxHeaders/lang0000644000000000000000000000013115067751426020317 xustar0030 mtime=1759499030.806450042 29 atime=1759499034.76351017 30 ctime=1759499030.806450042 nordugrid-arc-7.1.1/src/services/monitor/lang/0000755000175000002070000000000015067751426022277 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/monitor/lang/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327022431 xustar0030 mtime=1759498967.776492299 30 atime=1759498967.875493803 30 ctime=1759499030.788230586 nordugrid-arc-7.1.1/src/services/monitor/lang/Makefile.am0000644000175000002070000000015615067751327024335 0ustar00mockbuildmock00000000000000monitorlangdir = @monitor_prefix@/lang monitorlang_DATA = $(srcdir)/*.inc EXTRA_DIST = $(monitorlang_DATA) nordugrid-arc-7.1.1/src/services/monitor/lang/PaxHeaders/de.inc0000644000000000000000000000013215067751327021460 xustar0030 mtime=1759498967.777617767 30 atime=1759498967.875493803 30 ctime=1759499030.793132426 nordugrid-arc-7.1.1/src/services/monitor/lang/de.inc0000644000175000002070000014167215067751327023375 0ustar00mockbuildmock00000000000000 N/A bezeichnet einen Job ohne Namen.
    X bezeichnet einen Job, der durch den Nutzer abgebrochen wurde.
    ! bezeichnet einen Job, der nicht erfolgreich ausgeführt wurde.
    Klicken Sie auf den jeweiligen Jobnamen für eine detaillierte Beschreibung."; $str_nam = "Name des Nutzers wie im user certificate spezifiziert. Klicken Sie auf den Namen, um eine Liste aller Ressourcen zu erhalten, die für diesen Nutzer zur Verfügung stehen, sowie eine Liste aller Jobs dieses Users im System."; $str_sta = "Jobstatus wie angegeben durch the Grid Manager (GM) und LRMS.; Jobs durchlaufen die Zustände in der folgenden Reihenfolge:
    ACCEPTED – job wurde submitted aber er wird noch nicht ausgeführt.
    PREPARING – Eingabedateien werden übertragen
    SUBMITTING – Interaktion mit dem LRMS
    INLRMS – der Job ist unter der Kontrolle des LRMS; sein interner Zustand wird durch das Infosystem bestimmt. Mögliche solche Zustände sind:
    : Q – Job ist in der Queue (queued)
    : U – Job wurde unterbrochen (suspended) wegen eines anderen Prozesses (PBSPro)
    : S – Job wurde unterbrochen (suspended) (Condor)
    : R, run – Job wird ausgeführt
    : E – Job wird beendet (PBS)
    FINISHING – Ausgabedateien werden durch den GM transferiert
    FINISHED – Job wurde beendet, eine Zeitmarke (time stamp) wird durch das Infosystem hinzugefügt
    CANCELING – Job wurde abgebrochen
    DELETED – Job wurde nicht nach dem Download des Anwenders, sondern durch den GM wegen Überschreitung der Ablauffrist (expiration date) gelöscht.
    Jeder der Zust&aauml;nde kann durch den Prefix PENDING gekennzeichnet sein. Der GM versucht dann, diesen Zustand für diesen Job zu erreichen."; $str_tim = "CPU-Zeit des Jobs, gemessen in Minuten."; $str_mem = "Speicherbedarf des Jobs, gemessen in KB."; $str_cpu = "Die Anzahl genutzter Prozessoren des Jobs."; // Actual messages $message = array ( // Table headers and help (do not localize "0" and "help" keys) // For "help", keywords in
    must correspond to column titles below the help text ) "loadmon" => array( "0" => "Grid Monitor", "help" => "
    Diese Übersicht zeigt alle Teilnehmer, die an der Spitze des ARC registiert sind. Sie sind primär nach ihrem Land sortiert und dann anhand deren Namen. Ausgewählte Parameter werden überwacht: Cluster alias, die Anzahl aller CPUs und solcher reserviert für lokale Jobs, die Anzahl laufender und wartender Aufträge. Nutzen Sie die "Search" Funktion, um andere Charakteristica von Clustern, Queues, Aufträgen, etc. zu vergleichen
    Land
    ".$clickable.". Landesflagge und -name wie abgeleitet von der resource-Beschreibung. Anklicken, um Informationen zur Gridnutzung dieses Landes zu sehen.
    Cluster
    ".$clickable.". Alternativer Name des Cluster wie durch dessen owner festgelegt. Es werden maximal 22 Zeichen dargestellt. Durch Anlicken werden detaillierte Informationen zum Cluster dargestellt.
    CPUs
    Gesamtanzahl der CPUs im Cluster. NB! Nur ein Teil dieser mag tatsächlich auch für Grid Nutzer verfügbar sein.
    Last (Prozesse:Grid+lokal)
    ".$clickable.". Relative Auslastung des Clusters, abgeleitet von der Anzahl belegter CPUs. Graue Balken stellen die mit lokalen Jobs belegten CPUs dar, rote Balken solche, die von über das Grid submitteten Jobs beansprucht sind. Klicke auf die Balken, um eine detaillierte Liste aller Jobs zu erhalten, inklusive der Anzahl genutzter Prozessoren je Job.
    Wartend
    ".$clickable.". Anzahl aller wartenden Jobs auf dem Cluster, angezeigt als die Anzahl solcher durch das Grid submitteter Jobs plus die Anzahl derjenigen, die lokal submitted wurden. Klicke auf die erste Nummer, um die Liste der wartenden Grid-Jobs zu erhalten.
    ", "Land" => 30, "Site" => 160, "CPUs" => 10, "Last (Prozesse: Grid+lokal)" => 210, "In einer Queue" => 10 ), "clusdes" => array("0" => "Details einer Resource zu", "help" => "
    Attribut
    ".$clickable.". Cluster Attributename".$str_att."
    Wert
    ".$str_val."
    Queue
    ".$clickable.". Namen von batch queues verfügbar fü ARC Nutzer, wie festgelegt durch die owner des Clusters." .$str_que."
    Status
    Queue status. Eine operationelle Queue hat typischerweise den Status active.
    CPU (min)
    Zeitbegrenzung für einen Job. Der erste Wert ist untere Grenze, der zweite die obere. Wenn keine Begrenzungen gesetzt sind, es wird dann jede Lauflänge akzeptiert, wird N/A angezeigt.
    Running
    Die Anzahl von Jobs, die in der Queue aktiv sind. Die Gesamtanzahl der Jobs wird angezeigt, mit der Anzahl belegter Prozessoren in Klammern. NB! Für Jobs mit Parallelverarbeitung kann diese Anzahl deutlich höher sein als die Anzahl der Jobs.
    Queing
    Anzahl von Jobs, die auf deren Ausführung warten. Die Gesamtanzahl wird angezeigt mit der Anzahl durch das Grid submitteter Jobs in Klammern.
    ", "Queue" => 0, "Mapping Queue" => 0, "Status" => 0, "Limiten (min)" => 0, "CPUs" => 0, "Running" => 0, "Queueing" => 0 ), "jobstat" => array("0" => "Jobs at:Job ID", "help" => "
    JOB LIST:
    Job name
    ".$clickable.". Name des Jobs wie durch den owner festgelegt. Wenn kein Name zugewiesen wurde, so wird "N/A" angezeigt. Bei Klick auf dem Namen wird eine detaillierte Beschreibung des Jobs angezeigt.
    Owner
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Queue
    ".$clickable.". Name der Queue in der der Job ausgeführt wird. ".$str_que."
    CPUs
    ".$str_cpu."
    JOB DETAILS:
    Attribut
    ".$clickable.". Job Attributname".$str_att."
    Wert
    ".$str_val."
    ", "Jobname" => 0, "Eigner" => 0, "Status" => 0, "CPU (min)" => 0, "Queue" => 0, "CPUs" => 0 ), "volist" => array("0" => "Virtuelle Organisationen", "help" => "
    Virtuelle Organisation
    ".$clickable.". Gruppe von Anwendern, üblicherweise gemeinsame Aktivit&aul;ten und Ressourcen teilend. Wenigstens ein Cluster des ARC akzeptiert diese. Klicken Sie auf den Namen, um eine Liste der Mitglieder zu erhalten.
    Mitglieder
    Anzahl der Mitglieder.
    Verwaltet durch
    LDAP Server der die Mitglieder-Datenbank hält.
    ", "Virtuelle Organisation" => 0, "Mitglieder" => 0, "Verwaltet durch" => 0 ), "vousers" => array("0" => "Grid User Base", "help" => "
    Name
    ".$clickable.". ".$str_nam."
    Affiliation
    Des Nutzers Arbeitgeber wie durch den VO manager angegeben. Kann freigelassen werden.
    E-mail
    ".$clickable.". Des Nutzers eMail-Adresse wie angegeben durch den VO Manager. Darf freigelassen werden. Durch Anlicken der Adresse kann eine eMail an den Nutzer gesendet werden.
    ", "#" => 0, "Name" => 0, "Zugehörigkeit" => 0, "E-mail" => 0 ), "userlist" => array("0" => "Information für", "help" => "
    Cluster:queue
    ".$clickable.". Namen der Cluster und deren Queues (getrennt durch einen Doppelpunkt ":") auf welche ein Nutzer Zugriff hat. Ist ein Nutzer nicht autorisiert, wird die Nachricht "Not authorised at host ..." angezeigt. Bei Anlicken der Cluster Namens wird die Beschreibung des Clusters gegeben, genauso wie bei einer Auswahl der Queue.
    Freie CPUs
    Die Anzahl von freien CPUs, die für eine bestimmte Queue für einen bestimmten Nutzer zu einem bestimmten Moment, ggf. eingeschränkt durch die Angabe der maximalen Laufzeit (Angabe in Minuten), verfügbar sind. Zum Beispiel bedeutet "3", daß 3 CPUs für einen Job unbeschränkter Laufzeit verfügbar sind. "4:360" beschreibt die Verfügbarkeit von vier Jobs für nicht länger al 6 Stunden. "10:180 30" bedeutet, daß 10 CPUs verfügbar sind für Jobs, die nicht länger rechnen als 3 Stunden, sowie weitere 30 für Jobs mit unbeschränkter Laufzeit. "0" bedeutet, daß keine CPUs verfügbar sind und neue Jobs entsprechend warten müssen.
    Wartenden Jobs
    Anzahl von Jobs des Anwenders, die in der Wartschlange vor einem Neuen Job sind. Die Zahl "0" bedeutet, dass der Job sofort ausgeführt wird. NB! Dies ist nur eine Abschätzung, durch den Einfluss lokaler Administratoren ist eine sichere Angabe nicht möglich.
    Freier Diskplatz (MB)
    Für einen Nutzer verfügbarer Diskplatz (in Megabytes). NB! Dies ist nur eine Abschätzung, die meisten Cluster haben keine solchen Quotas festgelegt.
    Jobname
    ".$clickable.". ".$str_job."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Cluster
    ".$clickable.". Name des Clusters bei dem der Job ausgeführt wird. Bei Klick auf den Namen werden detaillierte Informationen zu dem Cluster präsentiert.
    Queue
    ".$clickable.". Name der Queue, in which der Job ausgeführt wird oder wurde. ".$str_que."
    CPUs
    ".$str_cpu."
    ", "" => 0, "Jobname" => 0, "Status" => 0, "CPU (min)" => 0, "Cluster" => 0, "Queue" => 0, "CPUs" => 0 ), "attlist" => array("0" => "Attributwerte", "help" => "
    Objekt
    ".$clickable.". Names des Objektes dessen Attribute angezeigt werden. Es kann ein Cluster sein, dessen Queue, ein Job, ein Anwender etc. Nach einer Auswahl durch Anklicken der Zeichenkette werden detaillierte Information angezeigt.
    Attribute
    Fü jedes Objekt wird eines oder mehrere Attribute angezeigt. Der Spaltentitel ist der Klarname des Attributes, von einigen MDS-spezifischen Attributen abgesehen, der Inhalt entspricht den Werten wie sie im Informationssystem abgelegt sind.
    ", "Object" => 0, "Attribute" => 0 ), "quelist" => array("0" => "Queue", "help" => "
    Attribut
    ".$clickable.". Name des Queue Attributs".$str_att."
    Wert
    ".$str_val."
    Jobname
    ".$clickable.". ".$str_job."
    Eigner
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Speicher (KB)
    ".$str_mem."
    CPUs
    ".$str_cpu."
    ", "" => 0, "Jobname" => 0, "Eigner" => 0, "Status" => 0, "CPU (min)" => 0, "Speicher (KB)" => 0, "CPUs" => 0 ), "sestat" => array("0" => "Storage Elements", "help" => "
    Alias
    Storage Element Zweitname wie festgelegt im Informationssystem. Maximal 15 Zeichen werden angezeigt.
    Tot. space
    Totaler Plattenplatz (GB).
    Freier Plattenplatz
    Verfügbarer Plattenplatz in GB.
    Name
    Name des Storage Elements, bestehend aus einem logischen Namen und dem Namen des Hosts, getrennt durch einen Doppelpunk ":"). Der logische Name wird nur für die interne Verwaltung genutzt, um verschiedene Einheiten auf demselben System zu unterscheiden.
    Basis URL
    URL des Storage Elements, üblich ist die Verwendung des gsiftp:// Protokols. Dieser URL dient als Basis für den Zugriff auf Dateien.
    Typ
    Storage Element typ. "gridftp-based" beschreibt Plattenplatz der über die GridFTP Schnittstelle verfügbar ist.
    ", "#" => 0, "Alias" => 0, // "Tot. Platz" => 0, "Freier/Tot. Platz, GB" => 0, "Name" => 0, "Basis URL" => 0, "Typ" => 0 ), "allusers" => array("0" => "Authorised Grid Users:Active Grid Users", "help" => "
    Name
    ".$clickable.". ".$str_nam."
    Zugehörigkeit:
    Das Institut oder die Firma, die den Anwender beschäftigt. Der Eintrag ist abgeleitet vom personal certificate
    Jobs
    Zählt alle Jobs des Anwenders im System (running, pending, finished oder deleted)
    Sites
    Gibt an, wieviele teilnehmende Cluster Aufträge dieses Nutzers annehmen.
    ", "#" => 0, "Name" => 0, "Zugehörigkeit" => 0, "Jobs" => 0, "Sites" => 0 ), "userres" => array("0" => "", "Cluster:queue" => 0, "Freie CPUs" => 0, "Wartenden Jobs" => 0, "Freier Plattenplatz (MB)" => 0 ), "ldapdump" => array("0" => "", "Attribut" => 0, "Wert" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Info gültig von (GMT)", "Mds-validto" => "Info gültig bis (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Front-end domain Name", "nordugrid-cluster-aliasname" => "Cluster Alias", "nordugrid-cluster-contactstring" => "Kontakt", "nordugrid-cluster-interactive-contactstring" => "Interactiver Kontakt", "nordugrid-cluster-comment" => "Kommentar", "nordugrid-cluster-support" => "E-mail Kontakt", "nordugrid-cluster-acl" => "Autorisierte VOs", "nordugrid-cluster-lrms-type" => "LRMS Typ", "nordugrid-cluster-lrms-version" => "LRMS Version", "nordugrid-cluster-lrms-config" => "LRMS Details", "nordugrid-cluster-architecture" => "Architektur", "nordugrid-cluster-opsys" => "Operating System", "nordugrid-cluster-homogeneity" => "Homogener Cluster", "nordugrid-cluster-nodecpu" => "CPU Typ (langsamster)", "nordugrid-cluster-nodememory" => "Memory (MB, kleinster)", "nordugrid-cluster-totalcpus" => "CPUs, gesamt", "nordugrid-cluster-cpudistribution" => "CPU:Hosts", "nordugrid-cluster-benchmark" => "Benchmark", "nordugrid-cluster-sessiondir-free" => "Plattenplatz, verfügbar (MB)", "nordugrid-cluster-sessiondir-total" => "Plattenplatz, gesamt (MB)", "nordugrid-cluster-sessiondir-lifetime"=> "Lebensdauer der Grid Session (min)", "nordugrid-cluster-cache-free" => "Cache size, verfügbar (MB)", "nordugrid-cluster-cache-total" => "Cache size, gesamt (MB)", "nordugrid-cluster-runtimeenvironment" => "Runtime environment", "nordugrid-cluster-localse" => "Storage Element, lokal", "nordugrid-cluster-middleware" => "Grid middleware", "nordugrid-cluster-totaljobs" => "Jobs, totale Anzahl", "nordugrid-cluster-usedcpus" => "CPUs, belegt", "nordugrid-cluster-queuedjobs" => "Jobs, in Queue wartend", "nordugrid-cluster-prelrmsqueued" => "Grid jobs, noch nicht submitted", "nordugrid-cluster-location" => "Postleitzahl", "nordugrid-cluster-owner" => "Eigner", "nordugrid-cluster-issuerca" => "Zertifikat-Aussteller", "nordugrid-cluster-issuerca-hash" => "Zertifikat-Aussteller's kash", "nordugrid-cluster-trustedca" => "Akzeptierte Zertificat-Aussteller", "nordugrid-cluster-nodeaccess" => "IP Konnektivität der Hosts", "nordugrid-cluster-gridarea" => "Session area (OBSOLETE)", "nordugrid-cluster-gridspace" => "Grid Plattenplatz (OBSOLETE)", "nordugrid-cluster-opsysdistribution" => "OS Distribution (OBSOLETE)", "nordugrid-cluster-runningjobs" => "Jobs, running (OBSOLETE)", "nordugrid-cluster-credentialexpirationtime" => "Credential expiration time", "nordugrid-queue-name" => "Queue Name", "nordugrid-queue-comment" => "Kommentar", "nordugrid-queue-status" => "Queue Status", "nordugrid-queue-running" => "CPUs, belegt", "nordugrid-queue-localqueued" => "Local jobs, queued", "nordugrid-queue-prelrmsqueued" => "Grid jobs, noch nicht submitted", "nordugrid-queue-queued" => "Jobs, queued (OBSOLETE)", "nordugrid-queue-maxrunning" => "Jobs, running (max)", "nordugrid-queue-maxqueuable" => "Jobs, queueable (max)", "nordugrid-queue-maxuserrun" => "Jobs pro Unix User (max)", "nordugrid-queue-maxcputime" => "CPU Zeit, max. (min)", "nordugrid-queue-mincputime" => "CPU Zeit, min. (min)", "nordugrid-queue-defaultcputime" => "CPU Zeit, default (min)", "nordugrid-queue-maxwalltime" => "Zeit auf Küchenuhr, max. (min)", "nordugrid-queue-minwalltime" => "Zeit auf Küchenuhr, min. (min)", "nordugrid-queue-defaultwalltime" => "Zeit auf Küchenuhr, default (min)", "nordugrid-queue-schedulingpolicy" => "Scheduling policy", "nordugrid-queue-totalcpus" => "CPUs, gesamt", "nordugrid-queue-nodecpu" => "CPU Typ", "nordugrid-queue-nodememory" => "Speicher (MB)", "nordugrid-queue-architecture" => "Architektur", "nordugrid-queue-opsys" => "Betriebssystem", "nordugrid-queue-homogeneity" => "Homogene Queue", "nordugrid-queue-gridrunning" => "CPUs occupied by Grid jobs", "nordugrid-queue-gridqueued" => "Grid jobs, queued", "nordugrid-queue-benchmark" => "Benchmark", "nordugrid-queue-assignedcpunumber" => "CPUs je Queue (OBSOLETE)", "nordugrid-queue-assignedcputype" => "CPU typ (OBSOLETE)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Eigner", "nordugrid-job-execcluster" => "Execution Cluster", "nordugrid-job-execqueue" => "Execution Queue", "nordugrid-job-stdout" => "Standard output file", "nordugrid-job-stderr" => "Standard error file", "nordugrid-job-stdin" => "Standard input file", "nordugrid-job-reqcputime" => "Verlangte CPU Zeit", "nordugrid-job-reqwalltime" => "Verlangte Zeit auf Küchenuhr", "nordugrid-job-status" => "Status", "nordugrid-job-queuerank" => "Position in der Queue", "nordugrid-job-comment" => "LRMS Kommentar", "nordugrid-job-submissionui" => "Submitted von", "nordugrid-job-submissiontime" => "Submitted um (GMT)", "nordugrid-job-usedcputime" => "Benötigte CPU Zeit", "nordugrid-job-usedwalltime" => "Benötigte Zeit auf Küchenuhr", "nordugrid-job-completiontime" => "Job beendet um (GMT)", "nordugrid-job-sessiondirerasetime" => "Job gelöscht um (GMT)", "nordugrid-job-proxyexpirationtime" => "Proxy Verfallzeit (GMT)", "nordugrid-job-usedmem" => "Verwendeter Speicher (KB)", "nordugrid-job-errors" => "Errors", "nordugrid-job-exitcode" => "Exit Code", "nordugrid-job-jobname" => "Name", "nordugrid-job-runtimeenvironment" => "Runtime environment", "nordugrid-job-cpucount" => "Angeforderte CPUs", "nordugrid-job-executionnodes" => "Aufrührende Rechner", "nordugrid-job-gmlog" => "GM Logdatei", "nordugrid-job-clientsoftware" => "Version des Clients", "nordugrid-job-rerunable" => "Rerunnable", "nordugrid-job-reqcput" => "Requested time (OBSOLETE)", "nordugrid-job-gridlog" => "Gridlog file (OBSOLETE)", "nordugrid-job-lrmscomment" => "LRMS comment (OBSOLETE)", "nordugrid-authuser-name" => "Name", "nordugrid-authuser-sn" => "Subject Name", "nordugrid-authuser-freecpus" => "Freie CPUs", "nordugrid-authuser-diskspace" => "Freier Plattenplatz (MB)", "nordugrid-authuser-queuelength" => "Wartende Jobs des Users", "nordugrid-se-name" => "Name", "nordugrid-se-aliasname" => "Storage Element Alias", "nordugrid-se-type" => "Storage Element Typ", "nordugrid-se-acl" => "Autorisierte VOs", "nordugrid-se-freespace" => "Freier Plattenplatz (MB)", "nordugrid-se-totalspace" => "Gesamter Plattenplatz (MB)", "nordugrid-se-url" => "Kontakt URL", "nordugrid-se-baseurl" => "Kontakt URL (OBSOLETE)", "nordugrid-se-accesscontrol" => "Zugangskontrolle", "nordugrid-se-authuser" => "Zugelassene User (DN)", "nordugrid-se-location" => "Postleitzahl", "nordugrid-se-owner" => "Eigner", "nordugrid-se-middleware" => "Middleware", "nordugrid-se-issuerca" => "Zertifikat-Aussteller", "nordugrid-se-issuerca-hash" => "Zertifikat-Aussteller ID", "nordugrid-se-trustedca" => "Vertrauten Zertifikat-Ausstellern", "nordugrid-se-comment" => "Kommentar", "nordugrid-rc-name" => "Domainname", "nordugrid-rc-aliasname" => "Replica Catalog Alias", "nordugrid-rc-baseurl" => "Kontakt URL", "nordugrid-rc-authuser" => "Zugelassene User (DN)", "nordugrid-rc-location" => "Postleitzahl", "nordugrid-rc-owner" => "Eigner", "nordugrid-rc-issuerca" => "Zertifikat-Aussteller" ), // Errors, warnings etc "errors" => array( // failure notices "1" => "Die top-level resource Indizes konnten nicht gelesen werden", "2" => "Keiner der lokalen Indizes konnte erreicht werden", // ? "3" => " schlechte Konfiguration oder Zeitüberschreitung bei der Anfrage", "4" => "Keine Grid Jobs gefunden", "5" => "Keine Information gefunden", "6" => "Server nicht verfügbar", "7" => " - später neu laden", "8" => "Keine Informationen zur Queue gefunden", "9" => "Keine Ei nträge gefunden.", "10" => "Keine Nutzer gefunden.", "11" => "Bei diesem Host nicht autorisiert", "12" => "antwortet nicht", "13" => "Keine jüngst submitteten Jobs gefunden für ", // debug messages "101" => " Monitor timeouts für GRIS: ", "102" => " sek für Verbindung und ", "103" => " sek beim Suchen", "104" => " sek verbracht beim Suchen", "105" => "Zeige Ressourcen nur in ", "106" => "Polled top-level Indizes: ", "107" => "Erhielt geographische Ortsangaben, gescante Sites: ", // ? "108" => " sites geographisch geordnet", "109" => "Suche nach Cluster Attributen", "110" => "Suche for Queue Attributen", "111" => "Keine Daten von ", "112" => " funktioniert in ", // ? "113" => " hat keine Resourcen anzubieten", "114" => " Monitor timeouts für GIIS: ", "115" => "Überspringe GRIS: ", "116" => "nicht ein ", "117" => "Teste Verbindung: ", "118" => "OK", "119" => "Entdeckte bislang Ressourcen der folgenden Art ", "120" => "LDAP Fehler beim Suchen ", "121" => " Status bei ", "122" => "Blacklisted: ", "123" => "Registrant gefunden für ", "124" => "Suche nach SE Attributen", "125" => "Suche nach Nutzern", "126" => "Suche nach jobs", "127" => " hat Job ", "128" => " obwohl nicht autorisiert", "129" => "Kann die Objektdaten nicht erhalten: Fehler ", "130" => " Monitor timeouts für EMIR: ", // icon titles "301" => "Update", "302" => "Drucken", "303" => "Hilfe", "304" => "Schließen", "305" => "Rot", "306" => "Grün", "307" => "Alle Nutzer", "308" => "Aktive Nutzer", "309" => "Suchen", "310" => "Storage", "311" => "VOs", "312" => "Flagge von ", "313" => " Grid Prozesse und ", "314" => " lokale Prozesse", // auxilliary strings "401" => "Prozesse", "402" => "Grid", "403" => "Lokal", "404" => "Globus", "405" => "TOTAL", "406" => " sites", "407" => "eine Menge", "408" => " GB", "409" => " ALLE", "410" => "Cluster", "411" => "Queue", "412" => "Job", "413" => "Nutzer", "414" => "Storage", "415" => "Replica Cat.", "416" => "Definiere Attribute, die für das Objekt anzuzeigen sind: ", "417" => "AND von allen Ausdrücken wird gesucht", // ? "418" => "Feld ganz rechts freilassen, um alles anzuzeigen", "419" => "Personalisierte Anzeige von Ressourcen", "420" => "Eindeutiger Name", "421" => "Kann insgesamt nutzen ", "422" => " sites", "423" => "Resource / Object:", "424" => "Nr. von Attributen (def. 6):", "425" => "Objekt", "426" => "Nächstes", "427" => "Auswahl", "428" => "Reset", "429" => "ANZEIGEN" ), // Post code conversion "tlconvert" => array ( "Australia" => "Australien", "Austria" => "Österreich", "Armenia" => "Armenien", "Algeria" => "Algerien", "Belgium" => "Belgien", "Bulgaria" => "Bulgarien", "Canada" => "Canada", "China" => "China", "Czechia" => "Tschechien", "Denmark" => "Dänemark", "Estonia" => "Estland", "Finland" => "Finnland", "France" => "Frankreich", "Georgia" => "Georgien", "Germany" => "Deutschland", "Greece" => "Griechenland", "Hungary" => "Ungarn", "Iceland" => "Island", "Ireland" => "Irland", "Italy" => "Italien", "Japan" => "Japan", "Latvia" => "Lettland", "Lithuania" => "Litauen", "Morocco" => "Marokko", "Netherlands" => "Niederlande", "Norway" => "Norwegen", "Poland" => "Polen", "Portugal" => "Portugal", "Romania" => "Rumänien", "Russia" => "Russland", "SriLanka" => "Sri Lanka", "Sweden" => "Schweden", "Slovakia" => "Slowakei", "Slovenia" => "Slowenien", "Switzerland" => "Schweiz", "Turkey" => "Türkei", "UK" => "UK", "Ukraine" => "Ukraine", "USA" => "USA" ) ); ?> nordugrid-arc-7.1.1/src/services/monitor/lang/PaxHeaders/sk.inc0000644000000000000000000000013215067751327021505 xustar0030 mtime=1759498967.778492329 30 atime=1759498967.877493833 30 ctime=1759499030.802678034 nordugrid-arc-7.1.1/src/services/monitor/lang/sk.inc0000644000175000002070000015155515067751327023423 0ustar00mockbuildmock00000000000000ARC
    ."; $str_val = "Hodnota atribútu v InformaÄnom Systéme."; $str_que = "ObyÄajne sa jednotlivé rady navzájom líšia prípustným trvaním úloh, prípadne rôznymi skupinami užívateľov. Po kliknutí na prísluÅ¡nú radu získate podrobné informácie vrátane zoznamu bežiacich, Äakajúcich a ukonÄených úloh."; $str_job = "Názov úlohy zvolený užívateľom.
    N/A znamená, že vlastník nepriradil úlohe žiadne meno.
    X znamená, že úloha bola ukonÄená vlastníkom.
    ! znamená, že pri plnení úlohy došlo k chybe.
    Kliknutím zobrazíte podrobné informácie o úlohe."; $str_nam = "Meno užívateľa, podľa jeho osobného certifikátu. Kliknutím získate zoznam všetkých jemu dostupných zdrojov, ako aj zoznam všetkých úloh spustených týmto užívateľom momentálne v systéme."; $str_sta = "Stav úlohy: podľa Gridového Manažéra (GM) a systému správy lokálnych zdrojov (LRMS). Poradie možných stavov je na nasledujúce:
    ACCEPTED – úloha je prijatá, ale jej vykonávanie eÅ¡te nezaÄalo
    PREPARING – sÅ¥ahujú sa vstupné súbory
    SUBMITTING – informácie sa posielajú do LRMS
    INLRMS – správa úlohy predaná LRMS; informaÄný systém zabezpeÄuje informácie o vnútornom stave úlohy. Možné sú nasledujúce stavy:
    : Q – úloha Äaká v rade
    : U – úloha je pozastavená na preÅ¥aženom pracovnom uzle (PBSPro)
    : S – úloha je pozastavená (Condor)
    : R, run – úloha sa vykonáva
    : E – úloha sa ukonÄuje (PBS)
    FINISHING – výstupné súbory sú prenášané na miesto urÄenia
    FINISHED – úloha je ukonÄená; Äas ukonÄenia je stanovený informaÄným systémom
    CANCELING – úloha sa ruší
    DELETED – výstupy úlohy nezmazané užívateľom, ale zmazané GM po expiraÄnej dobe
    Ku každému stavu môže byÅ¥ pridaná predpona \"PENDING:\", ktorá znaÄí, že GM práve nemôže prejsÅ¥ k nasledujúcemu kroku v dôsledku vnútorných obmedzení."; $str_tim = "CPU Äas spotrebovaný úlohou, v minútach."; $str_mem = "Rozsah operaÄnej pamäte využívanej úlohou, v KB."; $str_cpu = "PoÄet CPU využívaných úlohou."; // Actual messages $message = array ( // Table headers and help (do not localize "0" and "help" keys) // For "help", keywords in
    must correspond to column titles below the help text ) "loadmon" => array( "0" => "Grid Monitor", "help" => "
    V tomto okne sa zobrazujú vÅ¡etky výpoÄtové zdroje, ktoré sa registrujú do najvyššieho indexovacieho servisu ARC. Tabuľka je usporiadaná podľa anglického názvu prísluÅ¡nej krajiny a v sekcii pre danú krajinu podľa názvu hlavného stroja daného zdroja (výpoÄtového klástra). Pre každý kláster sa uvádzajú nasledujúce parametre: názov, celkové množstvo procesorov, poÄet obsadených procesorov a tiež poÄet bežiacich a Äakajúcich úloh tak spustených cez Grid ako aj lokálne. Použite utilitu "Vyhľadávanie" pre prezeranie a porovnanie s parametrami ostatných klástrov, rád, úloh atÄ.
    Krajina
    ".$clickable.". Vlajka a názov krajiny. Názov krajiny urÄený z dostupného popisu výpoÄtového zdroja. Kliknite pre zobrazenie zdrojov výluÄne z danej krajiny.
    Zdroj
    ".$clickable.". Názov zdroja (obyÄajne výpoÄtového klástra) daný jeho majiteľom. Maximálna dĺžka zobrazeného reÅ¥azca je 22 znakov. Kliknite na názov pre podrobné informácie o zdroji.
    CPU
    Celkový poÄet CPU zdroja. Pozor! Je možné, že iba ÄasÅ¥ je využiteľná cez Grid.
    Záťaž (procesy)
    ".$clickable.". Relatívne využitie zdroja, zodpovedajúce poÄtu zaÅ¥ažených CPU. Sivá úseÄka zodpovedá poÄtu procesorov obsadených lokálnymi úlohami, zelená úseÄka zodpovedá procesorom vykonávajúcim Gridové úlohy. Kliknite pre podrobné informácie o vÅ¡etkých Gridových úlohách bežiacich na zdroji, vrátane informácií o poÄte procesorov na každú úlohu.
    Čakajúce
    ".$clickable.". PoÄet vÅ¡etkých úloh Äakajúcich v rade na danom zdroji, uvádzaný ako súÄet Gridových a lokálnych úloh. Kliknite na prvé z Äísel pre podrobné informácie o vÅ¡etkých Gridových úlohách Äakajúcich v rade.
    ", "Krajina" => 30, "Zdroj" => 160, "PoÄet CPU" => 10, "Záťaž (procesy: Grid+lokálne)" => 210, "ÄŒakajúce" => 10 ), "clusdes" => array( "0" => "Opis zdroja", "help" => "
    Atribút
    ".$clickable.". Názov atribútov zdroja".$str_att."
    Hodnota
    ".$str_val."
    Rada
    ".$clickable.". Názvy (dané majiteľom zdroja) rád, dostupných Gridovým užívateľom. ".$str_que."
    Stav
    Stav rady. Fungujúca rada obyÄajne udáva stav active.
    Trvanie (min)
    Limit trvania úlohy v danej rade - ak je limit stanovený - v minútach procesorového Äasu. Prvá zobrazená hodnota je dolný limit, druhá predstavuje horné ohraniÄenie tohto parametra. Ak limity nie so stanovené (úlohy ľubovoľného Äasu trvania sú akceptované), zobrazí sa reÅ¥azec N/A.
    Bežiace
    PoÄet úloh spracovávaných v rade. Zobrazuje sa celkový poÄet úloh, priÄom poÄet procesorov obsadených Gridovými úlohami je uvedený v zátvorkách, napr. (Grid: 12). Pozor! Pri paralelných mnohoprocesorových úlohách môže byÅ¥ Äíslo v zátvorkách väÄÅ¡ie ako poÄet úloh.
    Čakajúce
    PoÄet úloh Äakajúcich na spustenie v rade. Zobrazuje sa celkový poÄet úloh, priÄom množstvo úloh spustených cez Grid je uvedené v zátvorkách, napr. (Grid: 235)
    ", "Rada" => 0, "Mapping Queue" => 0, "Stav" => 0, "Trvanie (min)" => 0, "PoÄet CPU" => 0, "Bežiace" => 0, "ÄŒakajúce" => 0 ), "jobstat" => array( "0" => "Úlohy na:identifikátor úlohy", "help" => "
    ZOZNAM ÚLOH:
    Názov úlohy
    ".$clickable.". Názov úlohy daný užívateľom. "N/A" znamená, že užívateľ úlohe názov nepriradil. Po kliknutí sa zobrazí detailný popis úlohy.
    Užívateľ
    ".$clickable.". ".$str_nam."
    Stav
    ".$str_sta."
    CPU Äas (min)
    ".$str_tim."
    Rada
    ".$clickable.". Názov rady, v ktorej sa úloha vykonáva. ".$str_que."
    Procesory
    ".$str_cpu."
    OPIS ÚLOHY:
    Atribút
    ".$clickable.". Názvy atribútov úlohy".$str_att."
    Hodnota
    ".$str_val."
    ", "Názov úlohy" => 0, "Užívateľ" => 0, "Stav" => 0, "CPU Äas (min)" => 0, "Rada" => 0, "PoÄet CPU" => 0 ), "volist" => array( "0" => "Virtuálne organizácie", "help" => "
    Virtuálne organizácie
    ".$clickable.". Skupina užívateľov, obyÄajne zdieľajúca spoloÄné aktivity a zdroje, autorizovaná na aspoň jednom zdroji zapojenom v ARC. Po kliknutí sa zobrazí zoznam Älenov skupiny.
    ÄŒlenovia
    PoÄet Älenov skupiny.
    Obsluhuje sa
    Adresa servera spravujúceho databázu s údajmi o Älenoch skupiny.
    ", "Virtuálna organizácia" => 0, "Členovia" => 0, "Obsluhuje sa" => 0 ), "vousers" => array( "0" => "Užívatelia", "help" => "
    Meno
    ".$clickable.". ".$str_nam."
    Pracovisko
    Pracovisko užívateľa, podľa zápisu v databáze. Nepovinný údaj.
    Elektronická pošta
    ".$clickable.". Adresa elektronickej poštovej schránky užívateľa, podľa zápisu v databáze. Nepovinný údaj. Kliknite na adresu pre poslanie správy užívateľovi.
    ", "#" => 0, "Meno" => 0, "Pracovisko" => 0, "Elektronická pošta" => 0 ), "userlist" => array( "0" => "Informácia pre", "help" => "
    Zdroj:rada
    ".$clickable.". Názvy zdrojov (klástrov) a zodpovedajúcich rád lokálnych systémov správy úloh (LRMS) (oddelené dvojbodkou,":"), na ktorých je užívateľ oprávnený posielať úlohy. Ak užívateľ nie je oprávnený, objaví sa správa: "Not authorised at host ...". Po kliknutí na názov klástra sa zobrazí jeho podrobný opis. Pre získanie podrobností o rade kliknite na jej názov.
    Voľné CPU
    PoÄet voľných CPU v danej rade, pre daného užívateľa, v danom Äase. V prípade, že rada využíva Äasové ohraniÄenia na prípustné trvanie behu úlohy, je tento údaj zobrazený za Äíslom reprezentujúcim poÄet procesorov (v minútach, oddelený dvojbodkou). Napríklad "3" znamená dostupnosÅ¥ troch voľných CPU pre úlohy s akoukoľvek dĺžkou trvania; "4:360" oznaÄuje dostupnosÅ¥ Å¡tyroch voľných CPU, pre úlohy s dobou trvania behu nie dlhÅ¡ou ako Å¡esÅ¥ hodín; "10:180:30" znamená, že je dostupných desaÅ¥ CPU pre úlohy s trvaním nepresahujúcim 3 hodiny a Äalších tridsaÅ¥ procesorov, ktoré môžu prijaÅ¥ úlohy s neobmedzeným Äasom behu; "0" znamená, že v danom momente nie sú žiadne voľné CPU a úlohy budú zaradené do prísluÅ¡nej rady ako Äakajúce.
    Úlohy v rade
    PoÄet úloh užívateľa v zozname Äakajúcich úloh pred novou úlohou, zaslanou daným užívateľom. PoÄet "0" znamená, že úloha by sa mala zaÄaÅ¥ vykonávaÅ¥ okamžite. POZOR! Je to odhad, ktorý nemusí zohľadňovaÅ¥ vÅ¡etky lokálne nastavenia správcu zdroja.
    Voľný diskový priestor (MB)
    Diskový priestor dostupný pre užívateľa v danej rade (v megabajtoch). POZOR! Ide len o odhad, väÄÅ¡ina zdrojov nepodporuje resp. nevyužíva kvóty na diskový priestor.
    Názov úlohy
    ".$clickable.". ".$str_job."
    Stav
    ".$str_sta."
    ÄŒas (min)
    ".$str_tim."
    Zdroj
    ".$clickable.". Názov zdroja (obyÄajne klástra), na ktorom sa úloha vykonáva. Po kliknutí sa zobrazia podrobné informácie o zdroji.
    Rada
    ".$clickable.". Názov rady v LRMS, v ktorej sa vykonávajúcej úlohu. ".$str_que."
    PoÄet CPU
    ".$str_cpu."
    ", "" => 0, "Názov úlohy" => 0, "Stav" => 0, "ÄŒas (min)" => 0, "Zdroj" => 0, "Rada" => 0, "PoÄet CPU" => 0 ), "attlist" => array( "0" => "Attribute values", "help" => "
    Objekt
    ".$clickable.". Názov objektu, atribúty ktorého sú zobrazené. Môže ísÅ¥ o názov rady klástra, názov úlohy, meno užívateľa atÄ. Po kliknutí sa zobrazia podrobné informácie o objekte.
    Atribút
    Pre každý objekt v tabuľke je možné uviesÅ¥ jeden alebo viacero atribútov. V hlaviÄke stĺpca je uvedený názov atribútu, upravený do jednoducho Äitateľnej formy (s výnimkou niekoľkých atribútov Å¡pecifických pre systém MDS), obsah jednotlivých stĺpcov predstavujú význam týchto atribútov podľa toho, ako sú popísané v InformaÄnom Systéme.
    ", "Objekt" => 0, "Atribút" => 0 ), "quelist" => array( "0" => "Popis rady", "help" => "
    Atribút
    ".$clickable.". Názvy atribútov rady".$str_att."
    Hodnota
    ".$str_val."
    Názov úlohy
    ".$clickable.". ".$str_job."
    Majiteľ
    ".$clickable.". ".$str_nam."
    Stav
    ".$str_sta."
    CPU Äas (min)
    ".$str_tim."
    Pamäť (KB)
    ".$str_mem."
    PoÄet CPU
    ".$str_cpu."
    ", "" => 0, "Názov úlohy" => 0, "Majiteľ" => 0, "Stav" => 0, "CPU Äas (min)" => 0, "OperaÄná pamäť (KB)" => 0, "PoÄet CPU" => 0 ), "sestat" => array( "0" => "Úložné zariadenia", "help" => "
    Názov
    Názov úložného zariadenia zaregistrovaný v InformaÄnom Systéme. Zobrazených maximálne 15 znakov.
    Celková kapacita
    Celkový diskový priestor v GB.
    Voľná kapacita
    Momentálne dostupný diskový priestor v GB.
    Názov
    Názov úložného zariadenia skladajúci sa z názvu logickej jednotky a názvu servera (rozdelených dvojbodkou). Logický názov sa využíva len pre úÄely InformaÄného Systému pre zjednoduÅ¡enie rozoznávania rôznych úložných zariadení, nachádzajúcich sa na jednom a tom istom servere.
    Bázová URL
    URL úložného zariadenia, obyÄajne využívajúc protokol gsiftp. Použite túto URL ako bázu pre prístup k súborom.
    Typ
    Typ úložného zariadenia. Typ "gridftp-based" oznaÄuje úložnú jednotku dostupnú cez GridFTP rozhranie.
    ", "#" => 0, "Názov" => 0, // "Celková kapacita" => 0, "Voľná/celková kapacita v GB" => 0, "Názov" => 0, "Bázová URL" => 0, "Typ" => 0 ), "allusers" => array( "0" => "Autorizovaný užívatelia:Aktívny užívatelia", "help" => "
    Meno
    ".$clickable.". ".$str_nam."
    Pracovisko
    Pracovisko užívateľa, podľa informácií v jeho osobnom certifikáte
    Úlohy
    PoÄet vÅ¡etkých užívateľových úloh v systéme (bežiacich, Äakajúcich, ukonÄených a vymazaných)
    Zdroje
    PoÄet klástrov, na ktoré má daný užívateľ prístup
    ", "#" => 0, "Meno" => 0, "pracovisko" => 0, "Úlohy" => 0, "Zdroje" => 0 ), "userres" => array( "0" => "", "Zdroj:rada" => 0, "Voľné CPU" => 0, "Úlohy v rade" => 0, "Voľný diskový priestor (MB)" => 0 ), "ldapdump" => array( "0" => "", "Atribút" => 0, "Hodnota" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Údaje platné od (GMT)", "Mds-validto" => "Údaje platné do (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Meno hlavného stroja", "nordugrid-cluster-aliasname" => "Názov", "nordugrid-cluster-contactstring" => "Kontaktná adresa", "nordugrid-cluster-interactive-contactstring" => "Interaktívna adresa", "nordugrid-cluster-comment" => "Komentár", "nordugrid-cluster-support" => "Elektronická adresa zodpovednej osoby", "nordugrid-cluster-acl" => "Autorizované VO", "nordugrid-cluster-lrms-type" => "typ LRMS", "nordugrid-cluster-lrms-version" => "verzia LRMS", "nordugrid-cluster-lrms-config" => "podrobnosti o LRMS", "nordugrid-cluster-architecture" => "Architektúra", "nordugrid-cluster-opsys" => "OperaÄný systém", "nordugrid-cluster-homogeneity" => "Homogenita klástra", "nordugrid-cluster-nodecpu" => "Typ procesoru (najslabÅ¡ieho)", "nordugrid-cluster-nodememory" => "Pamäť (MB, najmenÅ¡ia)", "nordugrid-cluster-totalcpus" => "PoÄet CPU celkovo", "nordugrid-cluster-cpudistribution" => "PoÄet CPU na jeden uzol", "nordugrid-cluster-benchmark" => "Etalónový test (Benchmark)", "nordugrid-cluster-sessiondir-free" => "Diskový priestor, dostupný (MB)", "nordugrid-cluster-sessiondir-total" => "Diskový priestor, celkový (MB)", "nordugrid-cluster-sessiondir-lifetime"=> "Doba života gridovej úlohy (min)", "nordugrid-cluster-cache-free" => "KeÅ¡ová pamäť, dostupná (MB)", "nordugrid-cluster-cache-total" => "KeÅ¡ová pamäť, celková (MB)", "nordugrid-cluster-runtimeenvironment" => "Pracovné prostredie", "nordugrid-cluster-localse" => "Lokálne úložné zariadenie", "nordugrid-cluster-middleware" => "Gridové rozhranie (middleware)", "nordugrid-clAliasuster-totaljobs" => "Úlohy, celkovo", "nordugrid-cluster-usedcpus" => "CPU, obsadené", "nordugrid-cluster-queuedjobs" => "Úlohy v rade (ZASTARANÉ)", "nordugrid-cluster-prelrmsqueued" => "Gridové úlohy Äakajúce na zaslanie", "nordugrid-cluster-location" => "PoÅ¡tové smerovacie Äíslo", "nordugrid-cluster-owner" => "Majiteľ", "nordugrid-cluster-issuerca" => "CertifikaÄná autorita", "nordugrid-cluster-issuerca-hash" => "Hash-kód certifikaÄnej autority", "nordugrid-cluster-trustedca" => "Akceptované certifikaÄné autority", "nordugrid-cluster-nodeaccess" => "IP-konektivita uzlov", "nordugrid-cluster-gridarea" => "Priestor gridovej úlohy (ZASTARANÉ)", "nordugrid-cluster-gridspace" => "Gridový diskový priestor (ZASTARANÉ)", "nordugrid-cluster-opsysdistribution" => "Distribúcia OS (ZASTARANÉ)", "nordugrid-cluster-runningjobs" => "Bežiace úlohy (ZASTARANÉ)", "nordugrid-cluster-credentialexpirationtime" => "Credential expiration time", "nordugrid-queue-name" => "Názov rady", "nordugrid-queue-comment" => "Komentár", "nordugrid-queue-status" => "Stav rady", "nordugrid-queue-running" => "VÅ¡etky obsadené CPU", "nordugrid-queue-localqueued" => "Lokálne úlohy v rade", "nordugrid-queue-prelrmsqueued" => "Gridové úlohy Äakajúce na zaslanie do rady", "nordugrid-queue-queued" => "Úlohy v rade (ZASTARANÉ)", "nordugrid-queue-maxrunning" => "Bežiace úlohy (max)", "nordugrid-queue-maxqueuable" => "PoÄet úloh v rade (max)", "nordugrid-queue-maxuserrun" => "PoÄet úloh na užívateľa (max)", "nordugrid-queue-maxcputime" => "CPU Äas, maximum (min.)", "nordugrid-queue-mincputime" => "CPU Äas, minimum (min.)", "nordugrid-queue-defaultcputime" => "CPU Äas, bez udania (min.)", "nordugrid-queue-maxwalltime" => "Trvanie, maximum (min.)", "nordugrid-queue-minwalltime" => "Trvanie, minimum (min.)", "nordugrid-queue-defaultwalltime" => "Trvanie, bez udania (min.)", "nordugrid-queue-schedulingpolicy" => "Pravidlá rozvrhu úloh", "nordugrid-queue-totalcpus" => "Celkový poÄet CPU", "nordugrid-queue-nodecpu" => "Typ CPU", "nordugrid-queue-nodememory" => "OperaÄná pamäť (MB)", "nordugrid-queue-architecture" => "Architektúra", "nordugrid-queue-opsys" => "OperaÄný systém", "nordugrid-queue-homogeneity" => "Homogenita rady", "nordugrid-queue-gridrunning" => "CPU obsadené Gridovými úlohami", "nordugrid-queue-gridqueued" => "Gridové úlohy v rade", "nordugrid-queue-benchmark" => "Etalónový test - Benchmark", "nordugrid-queue-assignedcpunumber" => "PoÄet CPU v rade (ZASTARANÉ)", "nordugrid-queue-assignedcputype" => "Typ CPU v rade (ZASTARANÉ)", "nordugrid-job-globalid" => "Identifikátor", "nordugrid-job-globalowner" => "Majiteľ", "nordugrid-job-execcluster" => "Vykonávajúci kláster", "nordugrid-job-execqueue" => "Vykonávajúca rada", "nordugrid-job-stdout" => "Å tandardný výstup", "nordugrid-job-stderr" => "Å tandardný chybový výstup", "nordugrid-job-stdin" => "Å tandardný vstup", "nordugrid-job-reqcputime" => "Požadovaná CPU Äas", "nordugrid-job-reqwalltime" => "Požadovaný Äas trvania", "nordugrid-job-status" => "Stav úlohy", "nordugrid-job-queuerank" => "Pozícia úlohy v rade", "nordugrid-job-comment" => "LRMS komentár", "nordugrid-job-submissionui" => "Stroj, z ktorého bola úloha zaslaná", "nordugrid-job-submissiontime" => "ÄŒas zaslania (GMT)", "nordugrid-job-usedcputime" => "Použitý CPU Äas", "nordugrid-job-usedwalltime" => "DoterajÅ¡ie trvanie úlohy", "nordugrid-job-completiontime" => "ÄŒas ukonÄenia (GMT)", "nordugrid-job-sessiondirerasetime" => "ÄŒas vymazania (GMT)", "nordugrid-job-proxyexpirationtime" => "ÄŒas vyprÅ¡ania proxy certifikátu (GMT)", "nordugrid-job-usedmem" => "Použitá pamäť (KB)", "nordugrid-job-errors" => "Chyby", "nordugrid-job-exitcode" => "Návratová hodnota", "nordugrid-job-jobname" => "Názov", "nordugrid-job-runtimeenvironment" => "Pracovné prostredie", "nordugrid-job-cpucount" => "Požadovaný poÄet CPU", "nordugrid-job-executionnodes" => "Vykonávajúce uzly", "nordugrid-job-gmlog" => "GM log súbor", "nordugrid-job-clientsoftware" => "Verzia klienta", "nordugrid-job-rerunable" => "ZnovuspustiteľnosÅ¥", "nordugrid-job-reqcput" => "Požadovaný Äas (ZASTARANÉ)", "nordugrid-job-gridlog" => "Gridlog súbor (ZASTARANÉ)", "nordugrid-job-lrmscomment" => "LRMS komentár (ZASTARANÉ)", "nordugrid-authuser-name" => "Meno", "nordugrid-authuser-sn" => "Subjekt", "nordugrid-authuser-freecpus" => "Voľné CPU", "nordugrid-authuser-diskspace" => "Voľný úložný priestor (MB)", "nordugrid-authuser-queuelength" => "Úlohy užívateľa v rade", "nordugrid-se-name" => "Celý názov", "nordugrid-se-aliasname" => "Názov", "nordugrid-se-type" => "Typ", "nordugrid-se-acl" => "Autorizované VO", "nordugrid-se-freespace" => "Voľný priestor (MB)", "nordugrid-se-totalspace" => "Celkový priestor (MB)", "nordugrid-se-url" => "Kontaktná URL adresa", "nordugrid-se-baseurl" => "Kontaktná základná URL adresa (ZASTARANÉ)", "nordugrid-se-accesscontrol" => "Kontrola prístupu", "nordugrid-se-authuser" => "Autorizovaný užívateľ (DN)", "nordugrid-se-location" => "PoÅ¡tové smerovacie Äíslo", "nordugrid-se-owner" => "Majiteľ", "nordugrid-se-middleware" => "Gridové rozhranie", "nordugrid-se-issuerca" => "CertifikaÄná autorita", "nordugrid-se-issuerca-hash" => "Hash-kód certifikaÄnej autority", "nordugrid-se-trustedca" => "Akceptované certifikaÄné autority", "nordugrid-se-comment" => "Komentár", "nordugrid-rc-name" => "Názov domény", "nordugrid-rc-aliasname" => "Názov", "nordugrid-rc-baseurl" => "Kontaktná URL adresa", "nordugrid-rc-authuser" => "Autorizovaní užívatelia (DN)", "nordugrid-rc-location" => "PoÅ¡tové smerovacie Äíslo", "nordugrid-rc-owner" => "Majiteľ", "nordugrid-rc-issuerca" => "CertifikaÄná autorita" ), // Errors, warnings etc "errors" => array( // failure notices "1" => "Nemožno preÄítaÅ¥ údaje z indexu vyššej úrovne", "2" => "Žiaden z lokálnych indexov neodpovedá", "3" => " nesprávna konfigurácia alebo uplynul Äas požiadavky", "4" => "Žiadna gridová úloha", "5" => "Žiadna informácia", "6" => "Služba je nedostupná", "7" => " - pokúste sa obnoviÅ¥ neskôr", "8" => "Informácie o rade nedostupné", "9" => "Žiadne údaje", "11" => "Žiadny užívatelia", "11" => "Neautorizovaný na danom zdroji", "12" => "neodpovedá", "13" => "Momentálne niet úloh od daného užívateľa ", // debug messages "101" => " ÄŒasový limit pre spojenie s lokálnym indexom: ", "102" => " s pre spojenie a ", "103" => " s pre hľadanie", "104" => " s strávených hľadaním", "105" => "Zobrazenie zdrojov výluÄne v ", "106" => "Dopytované indexy vyššej úrovne: ", "107" => "Prijaté geografické koordináty zdrojov, preskenované zdroje: ", "108" => " zdrojov usporiadaných podľa geografickej polohy", "109" => "Vyhľadávanie atribútov klástra", "110" => "Vyhľadávanie atribútov rady", "111" => "Niet údajov od ", "112" => " fungujúcich v krajine: ", "113" => " žiadne ponúkané zdroje", "114" => " ÄŒasový limit pre spojenie s globálnym indexom: ", "115" => "Ignoruje sa zdroj: ", "116" => "nezodpovedá typu ", "117" => "Preverovanie spojenia: ", "118" => "V poriadku", "119" => "Doteraz objavených zdrojov typu ", "120" => "Chyba LDAP pri hľadaní ", "121" => " stav v ", "122" => "Zablokované: ", "123" => "Objavený registrant ", "124" => "Vyhľadávanie atribútov úložných zariadení", "125" => "Vyhľadávanie užívateľov", "126" => "Vyhľadávanie úloh", "127" => " spustil úlohu ", "128" => " nemajúc autorizáciu", "129" => "Niet údajov o objekte: chyba ", "130" => " ÄŒasový limit pre spojenie s EMIR: ", // icon titles "301" => "ObnoviÅ¥", "302" => "TlaÄ", "303" => "Pomoc", "304" => "ZatvoriÅ¥", "305" => "Zelená", "306" => "Sivá", "307" => "VÅ¡etci užívatelia", "308" => "Aktívny užívatelia", "309" => "Vyhľadávanie", "310" => "Úložiská", "311" => "Virtuálne organizácie", "312" => "Vlajka krajiny: ", "313" => " (gridové procesy), ", "314" => " (lokálne procesy)", // auxilliary strings "401" => "Procesy", "402" => "Grid", "403" => "Lokálne", "404" => "Svet", "405" => "CELKOVO", "406" => " zdrojov", "407" => "mnoho", "408" => " GB", "409" => " VÅ ETKY", "410" => "Kláster", "411" => "Rada", "412" => "Úloha", "413" => "Užívateľ", "414" => "Úložisko", "415" => "Katalóg replík", "416" => "Zadajte atribúty, ktoré sa majú zobraziÅ¥ pre vybraný objekt: ", "417" => "Vyhľadávanie sa vykonáva pre logické A vÅ¡etkých zadaných výrazov", "418" => "Ponechajte pravé pole prázdne ak filter nie je potrebný", "419" => "Prezeranie zdrojov alebo objektov podľa výberu", "420" => "Plný názov (DN)", "421" => "Môže použiÅ¥ celkovo ", "422" => " zdrojov", "423" => "Zdroj / objekt:", "424" => "PoÄet atribútov (6 automaticky):", "425" => "Objekt", "426" => "ÄŽalší", "427" => "Vyberte", "428" => "ZnovunaÄítaÅ¥", "429" => "UKÃZAŤ" ), // Post code conversion: only for [en]! "tlconvert" => array ( "AU" => "Austrália", "AT" => "Rakúsko", "AM" => "Arménsko", "DZ" => "Alžírsko", "BE" => "Belgicko", "BG" => "Bulharsko", "CA" => "Kanada", "CN" => "Čína", "CZ" => "ÄŒesko", "DK" => "Dánsko", "EE" => "Estónsko", "FI" => "Fínsko", "FIN" => "Fínsko", "SF" => "Fínsko", "FR" => "Francúzsko", "GE" => "Gruzínsko", "DE" => "Nemecko", "D" => "Nemecko", "GR" => "Grécko", "HU" => "MaÄarsko", "IS" => "Island", "IR" => "Ãrsko", "IE" => "Ãrsko", "IT" => "Taliansko", "JP" => "Japonsko", "KEK" => "Japonsko", "TOKYO" => "Japonsko", "LV" => "LotyÅ¡sko", "LT" => "Litva", "MA" => "Maroko", "NL" => "Holandsko", "NO" => "Nórsko", "N" => "Nórsko", "PL" => "Poľsko", "PT" => "Portugalsko", "RO" => "Rumunsko", "RU" => "Rusko", "LK" => "Srí Lanka", "SE" => "Å védsko", "SK" => "Slovensko", "SI" => "Slovinsko", "CH" => "Å vajÄiarsko", "TR" => "Turecko", "UK" => "UK", "UA" => "Ukrajina", "COM" => "USA", "GOV" => "USA", "USA" => "USA", "US" => "USA", "Australia" => "Austrália", "Austria" => "Rakúsko", "Armenia" => "Arménsko", "Algeria" => "Alžírsko", "Belgium" => "Belgicko", "Bulgaria" => "Bulharsko", "Canada" => "Kanada", "China" => "Čína", "Czechia" => "ÄŒesko", "Denmark" => "Dánsko", "Estonia" => "Estónsko", "Finland" => "Fínsko", "France" => "Francúzsko", "Georgia" => "Gruzínsko", "Germany" => "Nemecko", "Greece" => "Grécko", "Hungary" => "MaÄarsko", "Iceland" => "Island", "Ireland" => "Ãrsko", "Italy" => "Taliansko", "Japan" => "Japonsko", "Latvia" => "LotyÅ¡sko", "Lithuania" => "Litva", "Morocco" => "Maroko", "Netherlands" => "Holandsko", "Norway" => "Nórsko", "Poland" => "Poľsko", "Portugal" => "Portugalsko", "Romania" => "Rumunsko", "Russia" => "Rusko", "SriLanka" => "Srí Lanka", "Sweden" => "Å védsko", "Slovakia" => "Slovensko", "Slovenia" => "Slovinsko", "Switzerland" => "Å vajÄiarsko", "Turkey" => "Turecko", "UK" => "Veľká Británia", "Ukraine" => "Ukrajina", "USA" => "USA", "World" => "Svet" ) ); ?> nordugrid-arc-7.1.1/src/services/monitor/lang/PaxHeaders/fi.inc0000644000000000000000000000013215067751327021466 xustar0030 mtime=1759498967.777617767 30 atime=1759498967.875493803 30 ctime=1759499030.796014003 nordugrid-arc-7.1.1/src/services/monitor/lang/fi.inc0000644000175000002070000013717015067751327023401 0ustar00mockbuildmock00000000000000 N/A tarkoittaa: käyttäjä ei antanut tyolle nimeä.
    X tarkoittaa: käyttäjä tappoi tyonsä.
    ! tarkoittaa: tyon suoritus epäonnistui.
    Valitse tyon nimi jos haluat tyon tarkemmat tiedot."; $str_nam = "Käyttäjän nimi, siten kuin se on käyttäjän varmenteessa. Valitse käyttäjän nimi jos haluat tietoa resursseista jotka ovat hänen käytettävissään ja käyttäjän ajossa olevista toistä."; $str_sta = "Työn tila, siten kuin Grid Manager (GM) ja jonosuoritusohjelma (LRMS) sen kertoivat. Tilat ovat:
    ACCEPTED – tyo lähetetty
    PREPARING – haetaan syötetiedostoja
    SUBMITTING – lähetys jononsuoritusohjelmaan (LRMS) menossa
    INLRMS – tyo on jononsuoritusohjelman armoilla; Tietojärjestelmä lisää seuraavat LRMSn sisäiset tilat:
    : Q – jonossa,
    : U – jono on jaädytetty väliaikaisesti koska tietokone on kuormitettu (PBSPro)
    : S – jono on jäädytty (Condor)
    : R, run – työtä suoritetaan
    : E – tyo on loppuvaiheessa (PBS)
    FINISHING – GM siirtää tyon tulostiedostoja
    FINISHED – tyo suoritettu loppuun; tietojärjestelmä lisää aikaleimaa
    CANCELING – tyo peruutetaan
    DELETED – käyttäjä ei siirtanyt tulosteita, GM poisti ne koska maksimiaika ylittyi
    Kaikkiin tiloihin voi liittya PENDING: -etuliite, joka tarkoittaa etta GM yrittää siirtää työtä seuraavaan tilaan"; $str_tim = "Tyon käyttämä prosessoriaika minuutteina."; $str_mem = "Tyon käyttämä muisti, KB"; $str_cpu = "Tyon käyttämien prosessorien lukumäärä."; // Actual messages $message = array ( // Table headers and help (do not localize "0" and "help" keys) // For "help", keywords in
    must correspond to column titles below the help text ) "loadmon" => array( "0" => "Grid Monitor", "help" => "
    Kohteet jotka rekisteroityvät ARCin luettelopalveluun lajiteltuna maan ja tietokoneen nimen mukaisesti. Kohteista rekisteroidään seuraavat ominaisuudet: klusterin alias-nimi, prosessorikapasiteetti, ajossa olevat ja jonottavat työt (sekä Grid-toiminnoilla lahetetyt etta paikalliset). Käytä "Search" toimintoa jos haluat vertailla muita klusterin, jonon tai tyon ominaisuuksia
    Maa
    ".$clickable.". Maa (lippu ja nimi) kuten annettu resurssien kuvauksessa. Valitse maa jos haluat näyttää vain taman maan tiedot.
    Klusteri
    ".$clickable.". Klusterin alias-nimi kuten omistaja on sen antanut. Naytetään max 22 merkkia. Valitse alias jos haluat tarkemman kuvauksen klusterista.
    Prosessoreita
    Klusterin prosessorien kokonaismäärä. Huom! Grid-käyttäjien saatavilla voi olla näistä vai osa.
    Kuorma (prosesseja:Grid+paikallinen)
    ".$clickable.". Klusterin suhteelinen kuorma, eli kuormitettujen prosessorien määrä. Harmaat palkit vastaavat prosessoreita jotka suorittavat paikallisesti lahetettyha töitä, punaiset palkit Grid-töitä. Valitse palkki jos haluat tarkempaa tietoa Grid-toistä joita suoritetaan klusterissa.
    Jonottamassa
    ".$clickable.". Klusterissa jonottavien toiden lukumäärä, Grid työt sekä paikallisesti lähetetyt työt. Valitse ensimmainen numero jos haluat tarkempaa jonottavista Grid-toista.
    ", "Maa" => 30, "Kohde" => 160, "Prosesseja" => 10, "Kuorma (prosesseja: Grid+paikall.)" => 210, "Jonottamassa" => 10 ), "clusdes" => array("0" => "Resource Details for", "help" => "
    Ominaisuus
    ".$clickable." Klusterin ominaisuuden nimi".$str_att."
    Arvo
    ".$str_val."
    Jono
    ".$clickable.". Jonon nimi siten kuin jonon omistaja on sen maarittanyt. ".$str_que."
    Tila
    Jonon tila. Toiminnassa oleva jono ilmoittaa yleensa tilan active.
    Prosessorit (min)
    Jonon toiden aikarajoitus (jos annettu) prosessoriminuutteina. Naytetään ala- ja yläraja. N/A näytetään ios rajoituksia ei ole (kaikenkestoiset työt sallitaan).
    Ajossa
    Ajossa olevat jonon työt. Toiden kokonaismäärä, suluissa Grid-töitä suorittavien prosessorien kokonaismäärä. Huom: rinnakkaisille multiprosessoritoille suluissa oleva numero voi olla suurempi kuin toiden määrä.
    Jonottamassa
    TyöT jotka odottavat suoritukseen paäsyä jonossa. Toiden kokonaismäärä ja Grid-toiminnoilla lähetetyt suluissa esim. (Grid: 235)
    ", "Jono" => 0, "Mapping Queue" => 0, "Tila" => 0, "Rajoitukset (min)" => 0, "Prosessoreita" => 0, "Ajossa" => 0, "Jonottamassa" => 0 ), "jobstat" => array("0" => "Jobs at:Job ID", "help" => "
    TYÖT:
    Tyon nimi
    ".$clickable.". Tyon nimi (omistajan antama). Jos omistaja ei antanut tyolle nimea, näytetään " style={color:red;}>N/A" . Valitse nimi jos haluat kuvauksen tyostä.
    Omistaja
    ".$clickable.". ".$str_nam."
    Tila
    ".$str_sta."
    Prosessoreita (min)
    ".$str_tim."
    Jono
    ".$clickable.". Eräajojono, jossa työtä suoritetaan. ".$str_que."
    Prosessoreita
    ".$str_cpu."
    TYÖT (YKSITYISKOHTAISESTI):
    Ominaisuus
    ".$clickable.". Tyon ominaisuus".$str_att."
    Arvo
    ".$str_val."
    ", "Tyon nimi" => 0, "Omistaja" => 0, "Tila" => 0, "Prosessoreita (min)" => 0, "Jono" => 0, "Prosessoreita" => 0 ), "volist" => array("0" => "Virtual Organisations", "help" => "
    Virtuaaliorganisaatio (VO)
    ".$clickable.". Ryhmä käyttäjiä jotka käyttävät samanlaisia resursseja ARC-tietkoneissa. Valitse ryhmän nimi jos haluta listan ryhmän jäsenistä.
    Jäseniä
    Ryhmän jäsenten määrä.
    Served by
    LDAP palvelin johon ryhmä/jäsenyystiedot talletetaan.
    ", "Virtuaaliorganisaatio (VO)" => 0, "Jäsenet" => 0, "Palvelin" => 0 ), "vousers" => array("0" => "Grid User Base", "help" => "
    Nimi
    ".$clickable.". ".$str_nam."
    Organisaatio
    ".$clickable.". Käyttäjän organisaatio siinä muodossa kuin VO'n hallinnoija on sen antanut (voi olla myos tyhjä).
    Sähkopostiosoite
    ".$clickable.". Käyttäjän sähkopostiosoite siinä muodossa kuin VO'n hallinnoija on sen antanut (voi olla myos tyhjä). Valitsemalla sähkopostiosoitteen voit lähettää käyttäjälle sähkopostia.
    ", "#" => 0, "Nimi" => 0, "Organisaatio" => 0, "Sähkopostiosoite" => 0 ), "userlist" => array("0" => "Information for", "help" => "
    Klusteri:jono
    ".$clickable.". Klusterit ja niiden jonot (kaksoispisteella erotettuina, ":") joihin käyttäja voi lähettää töitä. Jos käyttäjällä ei ole oikeutta lähettää työtä, tuloste on "Not authorised at host ..." Valitse klusterin nimi jos haluat yksityiskohtaisen kuvauksen klusterista. Valitse jonon nimi jos haluat yksityiskohtaisen kuvauksen jonosta.
    Vapaita prosessoreita
    Tälle käyttäjälle, annetussa jonossa saatavilla olevien prosessorien maara. Tämän jälkeen saattaa ilmetä myos maksimiarvo joka kertoo kuinka monta minuuttia prosessori on käytettävissä. "3" tarkoittaa: 3 prosessoria käytettävissä ilman aikarjaa. "4:360" tarkoittaa: 4 prosessoria korkeintaan kuudeksi tunniksi. "0" tarkoittaa: ei prosessoreita saatavilla ja työt jonottavat kunnes niita vapautuu.
    Jonossa olevia töitä
    TyöT jotka todennäkoisesti suoritetaan ennen uutta jonoon tulevaa. "0" tarkoittaa: tyo suoritetaan heti. Huom! Tämä on arvio, jonon paikalliskaytanto saattaa muuttaa prioriteetteja.
    Vapaa tila (MB)
    Käyttäjälle tarjolla oleva levytila tässä jonossa (megatavuina). Huom! Tämä on arvio, koska klusterit eivät tarjoa levykiintioitä.
    Tyon nimi
    ".$clickable.". ".$str_job."
    Tila
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Klusteri
    ".$clickable.". Klusteri kossa tyo suoritetaan/suoritettiin.
    Queue
    ".$clickable.". Jono jossa tyo suoritetaan/suoritettiin. ".$str_que."
    Prosessoreita
    ".$str_cpu."
    ", "" => 0, "Tyon nimi" => 0, "Tila" => 0, "Prosessoreita (min)" => 0, "Klusteri" => 0, "Jono" => 0, "Prosessoreita" => 0 ), "attlist" => array("0" => "Attribute values", "help" => "
    Object
    ".$clickable.". Objekti jonka ominaisuuksia tarkastellaan. Objekti voi olla klusteri, jono, tyo, käyttäjä jne. Valitsemalla objektin sen kuvauksen.
    Ominaisuus
    Ominaisuuksia ja niiden arvoja tulostetaan yksi tai usempia per kohde. Sarakkeen otsikko on ihmiselle ymmarrettävässä muodossa (poislukien jotkin MDS-spesifit ominaisuudet). Sarake sisältää vastaavan ominaisuuden arvot tälle kohteelle (arvot saadaan tietojärjestelmästä).
    ", "Objekti" => 0, "Ominaisuus" => 0 ), "quelist" => array("0" => "Jono", "help" => "
    Ominaisuus
    ".$clickable.". Name of a queue attribute".$str_att."
    Arvo
    ".$str_val."
    Tyon nimi
    ".$clickable.". ".$str_job."
    Omistaja
    ".$clickable.". ".$str_nam."
    Tila
    ".$str_sta."
    Prosessoreita (min)
    ".$str_tim."
    Muisti (KB)
    ".$str_mem."
    CPUs
    ".$str_cpu."
    ", "" => 0, "Tyon nimi" => 0, "Omistaja" => 0, "Tila" => 0, "Prosessoreita (min)" => 0, "Muisti (KB)" => 0, "Prosessoreita" => 0 ), "sestat" => array("0" => "Storage Elements", "help" => "
    Alias-nimi
    Talletuselementin nimi siinä muodossa kuin se on tietojärjestelmässä (IS), max. 15 merkkiä näytetään.
    Tilaa kaikkiaan
    Kokonaislevytila, GB.
    Vapaa tila
    Tälle hetkellä vapaana oleva levytila, GB.
    Name
    Talletuselementin nimi, looginen nimi ja tietokoneen nimi kaksoispisteella eroteltuna. Loogista nimeä käyttää vain tietojärjestelmä (IS), jotta voidaan erottaa eri talletuselementit samalla koneella.
    URLin alku
    Talletuselementin URL, usein gsiftp://.. Tama URL on edeltää yksittäisiä tiedostoja tai hakemistoja.
    Type
    Talletuselementin tyyppi. "gridftp-based" tarkoittaa tietovarantoa jossa GridFTP liittymä.
    ", "#" => 0, "Alias-nimi" => 0, // "Tilaa kaikkiaan" => 0, "Vapaa/kaikkiaan tila, GB" => 0, "Nimi" => 0, "URLin alku" => 0, "Typpi" => 0 ), "allusers" => array("0" => "Grid kayttäjät joille käytto sallittu:Aktiiviset Grid käyttäjät", "help" => "
    Nimi
    ".$clickable.". ".$str_nam."
    Organisaatio
    Käyttäjän organisaatio, tieto saatu varmenteesta
    TyöT
    Kaikki käyttäjien työt (ajossa, odottamassa, suoritettu tai poistettu)
    Kohteet
    Kuinka monta kohdetta tämä käyttäjä voi käyttää
    ", "#" => 0, "Nimi" => 0, "Organisaatio" => 0, "TöIta" => 0, "Kohteita" => 0 ), "userres" => array("0" => "", "Klusteri:jono" => 0, "Vapaita prosessoreita" => 0, "Jonossa olevia töitä" => 0, "Vapaata levytilaa (MB)" => 0 ), "ldapdump" => array("0" => "", "Ominaisuus" => 0, "Arvo" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Info valid from (GMT)", "Mds-validto" => "Info valid to (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Edustakoneen domain", "nordugrid-cluster-aliasname" => "Klusterin alias-nimi", "nordugrid-cluster-contactstring" => "Kontakti", "nordugrid-cluster-interactive-contactstring" => "Interaktiivisten toiden kontakti", "nordugrid-cluster-comment" => "Kommentti", "nordugrid-cluster-support" => "Tukipalvelun sähkoposti", "nordugrid-cluster-acl" => "Sallitut VO't", "nordugrid-cluster-lrms-type" => "Jononhallintaohjelmiston tyyppi", "nordugrid-cluster-lrms-version" => "Jononhallintaohjelmiston versio", "nordugrid-cluster-lrms-config" => "Jononhallintaohjelmisto, tarkemmin", "nordugrid-cluster-architecture" => "Arkkitehtuuri", "nordugrid-cluster-opsys" => "Käyttojärjestelmä", "nordugrid-cluster-homogeneity" => "Klusterin arkkitehtuuri yhtenainen ", "nordugrid-cluster-nodecpu" => "Prosessorin tyyppi (hitain)", "nordugrid-cluster-nodememory" => "Muisti (MB, pienin määrä)", "nordugrid-cluster-totalcpus" => "Prosessoreita kaikkiaan", "nordugrid-cluster-cpudistribution" => "Prosessoreita tietokonetta kohti", "nordugrid-cluster-benchmark" => "Suoritustesti", "nordugrid-cluster-sessiondir-free" => "Levytila, saatavilla (MB)", "nordugrid-cluster-sessiondir-total" => "Levytila kaikkiaan (MB)", "nordugrid-cluster-sessiondir-lifetime"=> "Sessiohakemiston elinaika (min)", "nordugrid-cluster-cache-free" => "Valimuistin koko, saatavilla (MB)", "nordugrid-cluster-cache-total" => "Valimuistin koko kaikkiaan (MB)", "nordugrid-cluster-runtimeenvironment" => "Ajoaikainen ymparisto", "nordugrid-cluster-localse" => "Paikallinen talletuselementti (SE)", "nordugrid-cluster-middleware" => "Väliohjelmisto", "nordugrid-cluster-totaljobs" => "Töiden kokonaismäärä", "nordugrid-cluster-usedcpus" => "Prosessoreita varattu", "nordugrid-cluster-queuedjobs" => "TöItä jonossa", "nordugrid-cluster-prelrmsqueued" => "Grid–töitä odottamassa", "nordugrid-cluster-location" => "Postinumero", "nordugrid-cluster-owner" => "Omistaja", "nordugrid-cluster-issuerca" => "Varmenteen myontäjä", "nordugrid-cluster-issuerca-hash" => "Varmenteen myontäjän hajakoodi", "nordugrid-cluster-trustedca" => "Luotetut varmenteen myontäjä", "nordugrid-cluster-nodeaccess" => "Laskentasolmun internet-yhteys", "nordugrid-cluster-gridarea" => "Sessiotila (VANHENTUNUT)", "nordugrid-cluster-gridspace" => "Grid levytila (VANHENTUNUT)", "nordugrid-cluster-opsysdistribution" => "Käyttojärjestelmän jakelunimi (VANHENTUNUT)", "nordugrid-cluster-runningjobs" => "TöItä, ajossa (VANHENTUNUT)", "nordugrid-cluster-credentialexpirationtime" => "Varmenne voimassa", "nordugrid-queue-name" => "Jonon nimi", "nordugrid-queue-comment" => "Kommentti", "nordugrid-queue-status" => "Jonon tila", "nordugrid-queue-running" => "Prosessoreita varattu", "nordugrid-queue-localqueued" => "Paikallisia töitä jonossa", "nordugrid-queue-prelrmsqueued" => "Grid–töitä odottamassa", "nordugrid-queue-queued" => "Jonossa olevia töitä (VANHENTUNUT)", "nordugrid-queue-maxrunning" => "Ajossa olevia töitä (max)", "nordugrid-queue-maxqueuable" => "TöItä jotka voivat jonottaa (max)", "nordugrid-queue-maxuserrun" => "TöItä käyttäjää kohti (max)", "nordugrid-queue-maxcputime" => "Prosessoriaika, max. (minuutteja)", "nordugrid-queue-mincputime" => "Prosessoriaika, min. (minuutteja)", "nordugrid-queue-defaultcputime" => "Prosessoriaika, oletusarvo (minuutteja)", "nordugrid-queue-maxwalltime" => "Kokonaisaika, max. (minuutteja)", "nordugrid-queue-minwalltime" => "Kokonaisaika, min. (minuutteja)", "nordugrid-queue-defaultwalltime" => "Kokonaisaika, oletusarvo (minuutteja)", "nordugrid-queue-schedulingpolicy" => "Schedulointipolitiikka", "nordugrid-queue-totalcpus" => "Prosessoreita kaikkiaan", "nordugrid-queue-nodecpu" => "Prosessrin tyyppi", "nordugrid-queue-nodememory" => "Muistia (MB)", "nordugrid-queue-architecture" => "Arkkitehtuuri", "nordugrid-queue-opsys" => "Käyttojärjestelmä", "nordugrid-queue-homogeneity" => "Jonon arkkitehtuuri yhtenäinen", "nordugrid-queue-gridrunning" => "Grid–töiden käyttämät prosessorit", "nordugrid-queue-gridqueued" => "Grid työt, jonossa", "nordugrid-queue-benchmark" => "Mitattu suorituskyky", "nordugrid-queue-assignedcpunumber" => "Prosessoreita jonoa kohti (VANHENTUNUT)", "nordugrid-queue-assignedcputype" => "Prosessorin tyyppi (VANHENTUNUT)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Omistaja", "nordugrid-job-execcluster" => "Suoritusklusteri", "nordugrid-job-execqueue" => "Suoritusjono", "nordugrid-job-stdout" => "Standardi tulostiedosto", "nordugrid-job-stderr" => "Standardi virhetiedosto ", "nordugrid-job-stdin" => "Standardi syotetiedosto", "nordugrid-job-reqcputime" => "Pyydetty prosessoriaika", "nordugrid-job-reqwalltime" => "Pyydetty kokonaisaika", "nordugrid-job-status" => "Tyon tila", "nordugrid-job-queuerank" => "Paikka jonossa", "nordugrid-job-comment" => "Jonosuoritusohjelman kommentti", "nordugrid-job-submissionui" => "Lähetetty koneesta", "nordugrid-job-submissiontime" => "Lähetysaika (GMT)", "nordugrid-job-usedcputime" => "Käytetty prosessoriaika", "nordugrid-job-usedwalltime" => "Käytetty kokonaisaika", "nordugrid-job-completiontime" => "Saatu suoritettua (GMT)", "nordugrid-job-sessiondirerasetime" => "Poistamisaika (GMT)", "nordugrid-job-proxyexpirationtime" => "Proxyn käyttoaika loppuu (GMT)", "nordugrid-job-usedmem" => "Käytetty muisti (KB)", "nordugrid-job-errors" => "Virheet", "nordugrid-job-exitcode" => "Poistumiskoodi", "nordugrid-job-jobname" => "Nimi", "nordugrid-job-runtimeenvironment" => "Ajoaikainen ympäristo", "nordugrid-job-cpucount" => "Pyydetyt prosessorit", "nordugrid-job-executionnodes" => "Suoritusnoodi", "nordugrid-job-gmlog" => "GM log -tiedosto", "nordugrid-job-clientsoftware" => "Asiakasohjelmiston nimi", "nordugrid-job-rerunable" => "Uudelleen ajettavissa", "nordugrid-job-reqcput" => "Pyydetty suoritusaika (VANHENTUNUT)", "nordugrid-job-lrmscomment" => "Jonosuoritusohjelman kommentti (VANHENTUNUT)", "nordugrid-job-gridlog" => "Gridlog tiedosto (VANHENTUNUT)", "nordugrid-authuser-name" => "Nimi", "nordugrid-authuser-sn" => "Subject-nimi", "nordugrid-authuser-freecpus" => "Vapaita prosessoreita", "nordugrid-authuser-diskspace" => "Vapaa levytila (MB)", "nordugrid-authuser-queuelength" => "KДyttДjДn tЖitД jonossa", "nordugrid-se-name" => "Nimi", "nordugrid-se-aliasname" => "Talletuselementin alias-nimi", "nordugrid-se-type" => "Talletuselementin tyyppi", "nordugrid-se-acl" => "Autorisoidut VOt", "nordugrid-se-freespace" => "Vapaa tila (MB)", "nordugrid-se-totalspace" => "Kokonaistila (MB)", "nordugrid-se-url" => "Yhteys-URL", "nordugrid-se-baseurl" => "Yhteys-URL (VANHENTUNUT)", "nordugrid-se-accesscontrol" => "Kayttokontrolli", "nordugrid-se-authuser" => "Auktorisoitu käyttäjä (DN)", "nordugrid-se-location" => "Postinumero", "nordugrid-se-owner" => "Omistaja", "nordugrid-se-middleware" => "VДliohjelmisto", "nordugrid-se-issuerca" => "Varmenteen myontäjä", "nordugrid-se-issuerca-hash" => "Varmenteen myontäjän hajakoodi", "nordugrid-se-trustedca" => "Luotetut varmenteen myontäjä", "nordugrid-se-comment" => "Kommentteja", "nordugrid-rc-name" => "Domainin nimi", "nordugrid-rc-aliasname" => "Replica Catalog alias", "nordugrid-rc-baseurl" => "Yhteys-URL", "nordugrid-rc-authuser" => "Auktorisoitu käyttäjä (DN)", "nordugrid-rc-location" => "Postinumero", "nordugrid-rc-owner" => "Omistaja", "nordugrid-rc-issuerca" => "Varmenteen myontäjä (CA)" ), // Errors, warnings etc "errors" => array( // failure notices "1" => "Ei voitu lukea ylätason indeksejä", "2" => "Ei saatu yhteyttä paikallisiin indeksipalveluihin", "3" => " viallinen konfiguraatio tai pyyynnolle annettu aika ylittyi", "4" => "Ei Grid-töitä", "5" => "Ei loytynyt tietoa", "6" => "Tietokone ei saavutettavissa", "7" => " - hae uudestaan myohemmin", "8" => "Ei jonotietoa", "9" => "Ei kohderiveja", "10" => "Ei käyttäjiä", "11" => "Ei oikeutta käyttää tietokonetta", "12" => "ei vastaa", "13" => "Ei töitä ", // debug messages "101" => " Monitoriprosesille annettu aika: GRIS: ", "102" => " sekuntia yhteyksien luomiseen ", "103" => " sekuntia käytetty etsimisprosessissa", "104" => " sekuntia käytetty etsimiseen", "105" => "Näytetään vain resurssit: ", "106" => "Tutkittu ylimman tason indeksit: ", "107" => "Maantieteelliset kohteet haettu, lisataan tietoa: ", "108" => " kohteet jarjestetty maantieteellisesti", "109" => "Etsi klusterin ominaisuuksilla", "110" => "Etsi jonon ominaisuuksilla", "111" => "Ei dataa kohteesta ", "112" => " on toiminnassa: ", "113" => " ei resursseja tarjolla", "114" => " Monitoriprosessille annettu aika ylittyi, GIIS: ", "115" => "Jätetään valiin GRIS: ", "116" => "ei ole ", "117" => "Tarkintan yhteyttä: ", "118" => "OK", "119" => "Siihen mennessä loytynyt seuraavanlaisia resursseja ", "120" => "LDAP etsinnässä virhe ", "121" => " status ", "122" => "Mustalla listalla: ", "123" => "Rekisteroitynyt ", "124" => "Etsi tallennuselementin (SE) ominaisuuskai", "125" => "Etsi käyttäjiä", "126" => "Etsi töitä", "127" => " tyo ", "128" => " ei käyttooikeutta", "129" => "Virhe: ei tietoa kohteesta ", "130" => " Monitoriprosessille annettu aika ylittyi, EMIR: ", // icon titles "301" => "Lataa uudestaan", "302" => "Tulosta", "303" => "Ohjeet", "304" => "Sulje", "305" => "Punainen", "306" => "Harmaa", "307" => "Kaikki käyttäjät", "308" => "Aktiiviset käyttäjät", "309" => "Hae", "310" => "Tietovarannot", "311" => "Virtuaaliorganisaatiot", "312" => "Lippu: ", "313" => " Grid prosessit ja ", "314" => " paikalliset prosessit", // auxilliary strings "401" => "Prosessit", "402" => "Grid", "403" => "Paikallinen", "404" => "Maailma", "405" => "TOTAL", "406" => " kohdetta ", "407" => "paljon", "408" => " GB", "409" => " KAIKKI", "410" => "Klusteri", "411" => "Jono", "412" => "Tyo", "413" => "Kayttäjä", "414" => "Tietovaranto", "415" => "Replica Cat.", "416" => "Valitse ominaisuudet jotka näytetään: ", "417" => "Kaikkien valintojen kombinaation näytetään", "418" => "Jata oikeanpuoleinen kenttä tyhjäksi jos haluat kaikki tulokset näyttoon", "419" => "Näyta valitut resurssit tai kohteet", "420" => "Distinguished name", "421" => "käytettävissä ", "422" => " kohdetta", "423" => "Resurssi / objekti:", "424" => "Ominaisuuksia (def. 6):", "425" => "Objekti", "426" => "Seuraava", "427" => "Valise yksi", "428" => "Tyhjennä valinnat", "429" => "NÄYTÄ" ), // Post code conversion "tlconvert" => array ( "Australia" => "Australia", "Austria" => "Itävalta", "Armenia" => "Armenia", "Algeria" => "Algeria", "Belgium" => "Belgia", "Bulgaria" => "Bulgaria", "Canada" => "Kanada", "Czechia" => "Tsekki", "China" => "Kiina", "Denmark" => "Tanska", "Estonia" => "Eesti", "Finland" => "Suomi", "France" => "Ranska", "Georgia" => "Georgia", "Germany" => "Saksa", "Greece" => "Kreikka", "Hungary" => "Unkari", "Iceland" => "Islanti", "Ireland" => "Irlanti", "Italy" => "Italia", "Japan" => "Japani", "Latvia" => "Latvia", "Lithuania" => "Liettua", "Morocco" => "Marokko", "Netherlands" => "Alankomaat", "Norway" => "Norja", "Poland" => "Puola", "Portugal" => "Portugali", "Romania" => "Romania", "Russia" => "Venäjä", "SriLanka" => "Sri Lanka", "Sweden" => "Ruotsi", "Slovakia" => "Slovakia", "Slovenia" => "Slovenia", "Switzerland" => "Sveitsi", "Turkey" => "Turkki", "UK" => "Iso-Britannia", "Ukraine" => "Ukraina", "USA" => "USA" ) ); ?> nordugrid-arc-7.1.1/src/services/monitor/lang/PaxHeaders/ru.inc0000644000000000000000000000013215067751327021516 xustar0030 mtime=1759498967.778492329 30 atime=1759498967.876493818 30 ctime=1759499030.801361639 nordugrid-arc-7.1.1/src/services/monitor/lang/ru.inc0000644000175000002070000020053415067751327023424 0ustar00mockbuildmock00000000000000ARC."; $str_val = "Значение атрибута, запиÑанное в Информационной СиÑтеме."; $str_que = "Обычно очереди различаютÑÑ Ð»Ð¸Ð±Ð¾ по допуÑтимой продолжительноÑти Ñчёта, либо по допущенной группе пользователей. По щелчку выводитÑÑ Ð¿Ð¾Ð»Ð½Ð¾Ðµ опиÑание очереди, включающее ÑпиÑок вÑех извеÑтных задач: в Ñчёте, в очереди и закончившихÑÑ."; $str_job = " Ð˜Ð¼Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸, приÑвоенное хозÑином.
    N/A означает, что хозÑин не приÑвоил никакого имени.
    X означает, что хозÑин отменил иÑполнение задачи.
    ! означает, что при иÑполнении задачи произошла ошибка.
    По щелчку выводитÑÑ Ð¿Ð¾Ð´Ñ€Ð¾Ð±Ð½Ð¾Ðµ опиÑание задачи."; $str_nam = "Ð˜Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ, в ÑоответÑтвии Ñ ÐµÐ³Ð¾ личным Ñертификатом. По щелчку выводитÑÑ ÑÐ²Ð¾Ð´Ð½Ð°Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ð° вÑех Грид-реÑурÑов, доÑтупных данному пользователю, и ÑпиÑок вÑех его задач, зарегиÑтрированных на данный момент в ÑиÑтеме."; $str_sta = "СоÑтоÑние задачи: ÑÑ‚Ð°Ð´Ð¸Ñ Ð¿Ñ€Ð¾Ð³Ñ€ÐµÑÑа в ГМ или ÑÑ‚Ð°Ñ‚ÑƒÑ Ð² СУПО. ПоÑледовательноÑть возможных ÑоÑтоÑний такова:
    ACCEPTED – задача принÑта, но иÑполнение ещё не началоÑÑŒ
    PREPARING – подгружаютÑÑ Ð½ÐµÐ¾Ð±Ñ…Ð¾Ð´Ð¸Ð¼Ñ‹Ðµ входные данные
    SUBMITTING – поÑылаетÑÑ Ð·Ð°Ð´Ð°Ð½Ð¸Ðµ в СУПО
    INLRMS – управление задачей передано в СУПО; Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¾Ð½Ð½Ð°Ñ ÑиÑтема обеÑпечивает ÑÐ²ÐµÐ´ÐµÐ½Ð¸Ñ Ð¾ внутреннем ÑоÑтоÑнии задачи. Возможны Ñледующие ÑоÑтоÑниÑ:
    : Q – задача ожидает в очереди
    : U – задача приоÑтановлена на перегруженом узле (PBSPro)
    : S – задача приоÑтановлена (Condor)
    : R, run – задача иÑполнÑетÑÑ
    : E – задача заканчиваетÑÑ (PBS)
    FINISHING – выходные данные переÑылаютÑÑ Ð¿Ð¾ назначению
    FINISHED – задача завершена; Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ð¾Ð½Ð½Ð°Ñ ÑиÑтема добавлÑет метку времени окончаниÑ
    CANCELING – задача отменÑетÑÑ
    DELETED – результаты задачи не затребованы хозÑином, но уничтожены Ñервером по иÑтечении времени Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ (обычно 24 чаÑа).
    К каждому ÑоÑтоÑнию может быть добавлена приÑтавка \"PENDING:\", что означает, что ГМ не может в данный момент перейти к Ñледующему Ñтапу иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð¸Ð·-за ÑоответÑтвующих внутренних ограничений."; $str_tim = "ПроцеÑÑорное времÑ, затраченное задачей, в минутах."; $str_mem = "Объём оперативной памÑти, иÑпользуемый задачей на текущий момент, в килобайтах"; $str_cpu = "ЧиÑло процеÑÑоров, занимаемых задачей."; // Actual messages $message = array ( // Table headers and help "loadmon" => array( "0" => "Грид-монитор", "help" => "
    Ð’ Ñтом окне приведена ÑÐ²Ð¾Ð´Ð½Ð°Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ð° вÑех вычиÑлительных реÑурÑов, региÑтрирующихÑÑ Ð² ÑпиÑки выÑшего ÑƒÑ€Ð¾Ð²Ð½Ñ ARC. Таблица упорÑдочена по английÑкому названию Ñтраны, и в каждой Ñтране – по имени головной машины. Ð”Ð»Ñ ÐºÐ°Ð¶Ð´Ð¾Ð³Ð¾ реÑурÑа выведены Ñледующие параметры: название, общее чиÑло процеÑÑоров, чиÑло занÑтых процеÑÑоров, а также количеÑтво заданий в очереди, как заÑланных через Грид, так и меÑтных. ИÑпользуйте утилиту \"ПоиÑк\" Ð´Ð»Ñ Ð¿Ñ€Ð¾Ñмотра и ÑÑ€Ð°Ð²Ð½ÐµÐ½Ð¸Ñ Ð´Ñ€ÑƒÐ³Ð¸Ñ… параметров клаÑтеров, очередей, задач и Ñ‚.д..
    Страна
    ".$clickable.". Флаг и название Ñтраны, как Ñледует из доÑтупного опиÑÐ°Ð½Ð¸Ñ Ñ€ÐµÑурÑа. По щелчку выводитÑÑ ÑÐ²Ð¾Ð´Ð½Ð°Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ð° только Ð´Ð»Ñ Ñтой Ñтраны.
    РеÑурÑ
    ".$clickable.". Ðазвание реÑурÑа (обычно, клаÑтера), приÑвоенное владельцем. Длина Ñтроки не должна превышать 22 Ñимвола. По щелчку выводитÑÑ Ð¿Ð¾Ð»Ð½Ð¾Ðµ опиÑание реÑурÑа (клаÑтера).
    ЦП
    Общее чиÑло центральных процеÑÑоров в клаÑтере. Внимание! Лишь чаÑть из них может быть доÑтупна Грид-пользователÑм.
    Загрузка (процеÑÑÑ‹)
    ".$clickable.". ОтноÑÐ¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ð·Ð°Ð³Ñ€ÑƒÐ·ÐºÐ° клаÑтера, иÑÑ…Ð¾Ð´Ñ Ð¸Ð· чиÑла занÑтых процеÑÑоров. Ð¡ÐµÑ€Ð°Ñ Ð¿Ð¾Ð»Ð¾Ñа ÑоответÑтвует количеÑтву процеÑÑоров, занÑтых под меÑтные задачи, тогда как краÑÐ½Ð°Ñ Ð¿Ð¾Ð»Ð¾Ñа указывает на количеÑтво процеÑÑоров, иÑполнÑющих Грид-задачи. По щелчку выводитÑÑ Ñводка вÑех активных Грид-задач на клаÑтере, Ð²ÐºÐ»ÑŽÑ‡Ð°ÑŽÑ‰Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸ÑŽ о чиÑле процеÑÑоров на каждую задачу.
    Ожидают
    ".$clickable.". ЧиÑло вÑех задач, ожидающих в очереди на данном клаÑтере, предÑтавленное в виде Ñуммы Грид- и локальных задач. По щелчку на первой цифре выводитÑÑ Ñводка вÑех задач в очереди, заÑланных через Грид.
    ", "Страна" => 30, "РеÑурÑ" => 160, "ЦП" => 10, "Загрузка (процеÑÑÑ‹)" => 210, "Ожидают" => 10 ), "clusdes" => array( "0" => "ОпиÑание реÑурÑа", "help" => "
    Ðтрибут
    ".$clickable.". ÐÐ°Ð·Ð²Ð°Ð½Ð¸Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð¾Ð² клаÑтера".$str_att."
    Значение
    ".$str_val."
    Очередь
    ".$clickable.". ÐÐ°Ð·Ð²Ð°Ð½Ð¸Ñ Ð¾Ñ‡ÐµÑ€ÐµÐ´ÐµÐ¹ (приÑвоенные владельцами), доÑтупных Ð´Ð»Ñ Ð“Ñ€Ð¸Ð´-пользователей. ".$str_que."
    СоÑтоÑние
    СоÑтоÑние очереди. Ð Ð°Ð±Ð¾Ñ‚Ð°ÑŽÑ‰Ð°Ñ Ð¾Ñ‡ÐµÑ€ÐµÐ´ÑŒ обычно выдаёт ÑоÑтоÑние active.
    ДлительноÑть (мин)
    Пределы по времени на продолжительноÑть обработки Ð·Ð°Ð´Ð°Ð½Ð¸Ñ Ð² очереди, еÑли таковые уÑтановлены, в минутах процеÑÑорного времени. Первое значение ÑоответÑтвует нижнему пределу, второе – верхнему. ЕÑли пределы не уÑтановлены (Ñ‚.е., очередь принимает задачи любой продолжительноÑти), выводитÑÑ Ð¼ÐµÑ‚ÐºÐ° N/A.
    СчитаютÑÑ
    ЧиÑло задач, ÑчитающихÑÑ Ð² очереди. Показано общее чиÑло задач, причём чиÑло процеÑÑоров, занÑтых под Грид-задачи, указано в Ñкобках, например: (Грид: 12). Внимание! При наличии параллельных многопроцеÑÑорных задач, чиÑло в Ñкобках может превышать общее чиÑло задач.
    Ожидают
    ЧиÑло заданий, ожидающих иÑÐ¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð² очереди. Показано общее чиÑло задач, причём количеÑтво заданий, заÑланных через Грид, указано в Ñкобках, например: (Грид: 235).
    ", "Очередь" => 0, "Mapping Queue" => 0, "СоÑтоÑние" => 0, "ДлительноÑть (мин)" => 0, "ЦП" => 0, "СчитаютÑÑ" => 0, "Ожидают" => 0 ), "jobstat" => array( "0" => "Задачи на:Ярлык задачи", "help" => "
    СПИСОК ЗÐДÐЧ:
    Ð˜Ð¼Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸
    ".$clickable.". Ð˜Ð¼Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸, приÑвоенное хозÑином. N/A означает, что хозÑин не приÑвоил никакого имени. По щелчку выводитÑÑ Ð¿Ð¾Ð´Ñ€Ð¾Ð±Ð½Ð¾Ðµ опиÑание задачи.
    ХозÑин
    ".$clickable.". ".$str_nam."
    СоÑтоÑние
    ".$str_sta."
    Ð’Ñ€ÐµÐ¼Ñ (мин)
    ".$str_tim."
    Очередь
    ".$clickable.". Ðазвание очереди СУПО, в которой проиÑходит иÑполнение задачи.".$str_que."
    ЦП
    ".$str_cpu."
    ОПИСÐÐИЕ ЗÐДÐЧИ:
    Ðтрибут
    ".$clickable.". ÐÐ°Ð·Ð²Ð°Ð½Ð¸Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð¾Ð² задачи.".$str_att."
    Значение
    ".$str_val."
    ", "Ð˜Ð¼Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" => 0, "ХозÑин" => 0, "СоÑтоÑние" => 0, "Ð’Ñ€ÐµÐ¼Ñ (мин)" => 0, "Очередь" => 0, "ЦП" => 0 ), "volist" => array( "0" => "Виртуальные организации", "help" => "
    Виртуальные организации
    ".$clickable.". Группа пользователей – обычно объединÑемых ÑовмеÑтной целью или реÑурÑами, – Ð´Ð¾Ð¿ÑƒÑ‰ÐµÐ½Ð½Ð°Ñ Ðº работе по крайней мере на одном из реÑурÑов ARC. По щелчку выводитÑÑ ÑпиÑок членов группы.
    Члены
    КоличеÑтво членов группы.
    ОбÑлуживаетÑÑ
    ÐÐ´Ñ€ÐµÑ Ñервера, поддерживающего базу данных членов группы.
    ", "Ð’Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð°Ñ Ð¾Ñ€Ð³Ð°Ð¸Ð·Ð°Ñ†Ð¸Ñ" => 0, "Члены" => 0, "ОбÑлуживаетÑÑ" => 0 ), "vousers" => array( "0" => "Пользователи", "help" => "
    ИмÑ
    ".$clickable.". ".$str_nam."
    МеÑто работы
    МеÑто работы пользователÑ, в ÑоответÑтвии Ñ Ð·Ð°Ð¿Ð¸Ñью в базе данных. ÐеобÑзательно.
    Ð­Ð»ÐµÐºÑ‚Ñ€Ð¾Ð½Ð½Ð°Ñ Ð¿Ð¾Ñ‡Ñ‚Ð°
    ".$clickable.". ÐÐ´Ñ€ÐµÑ Ñлектронной почты пользователÑ, в ÑоответÑтвии Ñ Ð·Ð°Ð¿Ð¸Ñью в базе данных. ÐеобÑзательно. По щелчку ÑоздаетÑÑ Ñообщение Ð´Ð»Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ.
    ", "#" => 0, "ИмÑ" => 0, "МеÑто работы" => 0, "Ð­Ð»ÐµÐºÑ‚Ñ€Ð¾Ð½Ð½Ð°Ñ Ð¿Ð¾Ñ‡Ñ‚Ð°" => 0 ), "userlist" => array( "0" => "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð´Ð»Ñ", "help" => "
    РеÑурÑ:очередь
    ".$clickable.". ÐÐ°Ð·Ð²Ð°Ð½Ð¸Ñ Ñ€ÐµÑурÑов (клаÑтеров) и ÑоответÑтвующих очередей СУПО (разделённые двоеточием), доÑтупных данному пользователю. ЕÑли доÑтуп закрыт, выводитÑÑ Ñообщение "Ðет доÑтупа к реÑурÑу". По щелчку на названии клаÑтера выводитÑÑ Ð¿Ð¾Ð»Ð½Ð¾Ðµ опиÑание реÑурÑа (клаÑтера). По щелчку на названии очереди выводитÑÑ Ð¿Ð¾Ð»Ð½Ð¾Ðµ опиÑание очереди.
    Свободные ЦП.
    ЧиÑло Ñвободных центральных процеÑÑоров, доÑтупных в данной очереди Ð´Ð»Ñ Ð´Ð°Ð½Ð½Ð¾Ð³Ð¾ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð½Ð° данный момент времени. ЕÑли очередь имеет Ð¾Ð³Ñ€Ð°Ð½Ð¸Ñ‡ÐµÐ½Ð¸Ñ Ð¿Ð¾ времени на иÑполнение задач, Ñтот предел указан поÑле чиÑла процеÑÑоров (в минутах, разделÑетÑÑ Ð´Ð²Ð¾ÐµÑ‚Ð¾Ñ‡Ð¸ÐµÐ¼). Ðапример, "3" означает, что 3 процеÑÑора доÑтупно Ð´Ð»Ñ Ñколь угодно продолжительных задач; "4:360" означает, что 4 процеÑÑора доÑтупно Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡, не превышающих 6 чаÑов; "10:180 30" означает, что 10 процеÑÑоров доÑтупно Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ñ‡, не превышающих 3 чаÑов, и 30 процеÑÑоров доÑтупно Ð´Ð»Ñ Ñколь угодно продолжительных задач; "0" означает, что Ñвободных реÑурÑов нет, и задачи будут направлены на ожидание в очереди.
    Задачи в очереди
    КоличеÑтво задач пользователÑ, раÑположенных в ÑпиÑке Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¿ÐµÑ€ÐµÐ´ новой задачей, заÑланной от имени данного пользователÑ. ЧиÑло "0" означает, что задача предположительно будет запущена на Ñчёт немедленно. Внимание! Это лишь предположительные значениÑ, которые могут быть изменены локальными операторами.
    ДиÑк, доÑтупно (Мб)
    ПроÑтранÑтво на локальном жёÑтком диÑке, доÑтупное данному пользователю в данной очереди (в мегабайтах). Внимание! Это лишь предположительные значениÑ, Ñ‚.к. большинÑтво клаÑтеров не поддерживают диÑковые квоты.
    Ð˜Ð¼Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸
    ".$clickable.". ".$str_job."
    СоÑтоÑние
    ".$str_sta."
    Ð’Ñ€ÐµÐ¼Ñ (мин)
    ".$str_tim."
    РеÑурÑ
    ".$clickable.". Ð˜Ð¼Ñ Ñ€ÐµÑурÑа (обычно, клаÑтера), на котором проиÑходит иÑполнение. задачи. По щелчку выводитÑÑ Ð¿Ð¾Ð»Ð½Ð¾Ðµ опиÑание реÑурÑа (клаÑтера).
    Очередь
    ".$clickable.". Ðазвание очереди СУПО, в которой проиÑходит иÑполнение задачи. ".$str_que."
    ЦП
    ".$str_cpu."
    ", "" => 0, "Ð˜Ð¼Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" => 0, "СоÑтоÑние" => 0, "Ð’Ñ€ÐµÐ¼Ñ (мин)" => 0, "РеÑурÑ" => 0, "Очередь" => 0, "ЦП" => 0 ), "attlist" => array( "0" => "Ð—Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð¾Ð²", "help" => "
    Объект
    ".$clickable." Ðазвание объекта, атрибуты которого перечиÑлены в Ñтроке. Это может быть Ð¸Ð¼Ñ ÐºÐ»Ð°Ñтера, Ð¸Ð¼Ñ Ð¾Ñ‡ÐµÑ€ÐµÐ´Ð¸, Ð¸Ð¼Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸, Ð¸Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð¸.Ñ‚.д.. По щелчку выводитÑÑ Ð¿Ð¾Ð´Ñ€Ð¾Ð±Ð½Ð¾Ðµ опиÑание объекта.
    Ðтрибут
    Ð”Ð»Ñ ÐºÐ°Ð¶Ð´Ð¾Ð³Ð¾ объекта в таблице приведены Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð¾Ð´Ð½Ð¾Ð³Ð¾ или неÑкольких его атрибутов. Ð’ заголовке Ñтолбца указано название атрибута, интерпретированное Ð´Ð»Ñ Ð¿Ñ€Ð¾Ñтоты Ñ‡Ñ‚ÐµÐ½Ð¸Ñ (за иÑключением неÑкольких атрибутов, Ñпецифичных Ð´Ð»Ñ ÑиÑтемы MDS), а Ñодержимым каждого Ñтолбца ÑвлÑÑŽÑ‚ÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ ÑоответÑтвующих атрибутов, запиÑанные в Информационной СиÑтеме.
    ", "Объект" => 0, "Ðтрибут" => 0 ), "quelist" => array( "0" => "Очередь", "help" => "
    Ðтрибут
    ".$clickable.". ÐÐ°Ð·Ð²Ð°Ð½Ð¸Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð¾Ð² очереди".$str_att."
    Значение
    ".$str_val."
    Ð˜Ð¼Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸
    ".$clickable.". ".$str_job."
    ХозÑин
    ".$clickable.". ".$str_nam."
    СоÑтоÑние
    ".$str_sta."
    Ð’Ñ€ÐµÐ¼Ñ (мин)
    ".$str_tim."
    ОЗУ (Кб)
    ".$str_mem."
    ЦП
    ".$str_cpu."
    ", "" => 0, "Ð˜Ð¼Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" => 0, "ХозÑин" => 0, "СоÑтоÑние" => 0, "Ð’Ñ€ÐµÐ¼Ñ (мин)" => 0, "ОЗУ (Кб)" => 0, "ЦП" => 0 ), "sestat" => array( "0" => "Ðакопительные уÑтройÑтва", "help" => "
    Ðазвание
    Ðазвание накопительного уÑтройÑтва, зарегиÑтрированное в Информационной СиÑтеме. МакÑимально допуÑÑ‚Ð¸Ð¼Ð°Ñ Ð´Ð»Ð¸Ð½Ð°: 15 Ñимволов.
    ВеÑÑŒ объём
    Полный объём диÑка, Гб.
    Свободно
    ДоÑтупное проÑтранÑтво на диÑке в наÑтоÑщий момент, Гб.
    ИмÑ
    Ð˜Ð¼Ñ Ð½Ð°ÐºÐ¾Ð¿Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð¾Ð³Ð¾ уÑтройÑтва, ÑоÑтоÑщее из логичеÑкого имени и имени Ñервера (разделённое двоеточием). ЛогичеÑкое Ð¸Ð¼Ñ Ð¸ÑпользуетÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ Информационной СиÑтемой, Ð´Ð»Ñ Ð¿Ñ€Ð¾Ñтоты раÑÐ¿Ð¾Ð·Ð½Ð°Ð²Ð°Ð½Ð¸Ñ Ñ€Ð°Ð·Ð½Ñ‹Ñ… накопительных уÑтройÑтв, находÑщихÑÑ Ð½Ð° одном и том же Ñервере.
    URL базы
    URL накопительного уÑтройÑтва, обычно по протоколу gsiftp://. ИÑпользуйте Ñтот Ð°Ð´Ñ€ÐµÑ ÐºÐ°Ðº базовый Ð´Ð»Ñ Ð´Ð¾Ñтупа к файлам.
    Тип
    Тип накопительного уÑтройÑтва. Тип "gridftp-based" означает что Ñто диÑковый накопитель Ñ Ð¸Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñом GridFTP.
    ", "#" => 0, "Ðазвание" => 0, // "ВеÑÑŒ объём" => 0, "Свободно/веÑÑŒ объём, Гб"=> 0, "ИмÑ" => 0, "URL базы" => 0, "Тип" => 0 ), "allusers" => array( "0" => "Допущенные пользователи:Ðктивные пользователи", "help" => "
    ИмÑ
    ".$clickable.". ".$str_nam."
    МеÑто работы
    МеÑто работы пользователÑ, в ÑоответÑтвии Ñ Ð·Ð°Ð¿Ð¸Ñью в его Ñертификате.
    Задачи
    ЧиÑло вÑех задач пользователÑ, находÑщихÑÑ Ð² ÑиÑтеме (в Ñчёте, в очереди и закончившихÑÑ).
    РеÑурÑÑ‹
    ЧиÑло клаÑтеров, на которых данный пользователь имеет допуÑк.
    ", "#" => 0, "ИмÑ" => 0, "МеÑто работы" => 0, "Задачи" => 0, "РеÑурÑÑ‹" => 0 ), "userres" => array( "0" => "", "РеÑурÑ:очередь" => 0, "Свободные ЦП" => 0, "Задачи в очереди" => 0, "ДиÑк, доÑтупно (Мб)" => 0 ), "ldapdump" => array( "0" => "", "Ðтрибут" => 0, "Значение" => 0 ), "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Данные дейÑтвительны Ñ (GMT)", "Mds-validto" => "Данные дейÑтвительны по (GMT)" ), "isattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Данные дейÑтвительны Ñ (GMT)", "Mds-validto" => "Данные дейÑтвительны по (GMT)", "nordugrid-cluster-name" => "Ð˜Ð¼Ñ Ð³Ð¾Ð»Ð¾Ð²Ð½Ð¾Ð¹ машины", "nordugrid-cluster-aliasname" => "Ðазвание", "nordugrid-cluster-contactstring" => "Контактный адреÑ", "nordugrid-cluster-interactive-contactstring" => "Интерактивный адреÑ", "nordugrid-cluster-comment" => "Комментарий", "nordugrid-cluster-support" => "ÐÐ´Ñ€ÐµÑ Ð¾Ñ‚Ð²ÐµÑ‚Ñтвенного", "nordugrid-cluster-acl" => "Допущенные ВО", "nordugrid-cluster-lrms-type" => "СУПО, тип", "nordugrid-cluster-lrms-version" => "СУПО, верÑиÑ", "nordugrid-cluster-lrms-config" => "СУПО, подробноÑти", "nordugrid-cluster-architecture" => "Ðрхитектура", "nordugrid-cluster-opsys" => "ÐžÐ¿ÐµÑ€Ð°Ñ†Ð¸Ð¾Ð½Ð½Ð°Ñ ÑиÑтема", "nordugrid-cluster-homogeneity" => "ОднородноÑть реÑурÑа", "nordugrid-cluster-nodecpu" => "ПроцеÑÑор, тип (худший)", "nordugrid-cluster-nodememory" => "ОЗУ (Мб, наименьшее)", "nordugrid-cluster-totalcpus" => "ПроцеÑÑоры, вÑего", "nordugrid-cluster-cpudistribution" => "ПроцеÑÑоры:узлы", "nordugrid-cluster-benchmark" => "Эталонный теÑÑ‚", "nordugrid-cluster-sessiondir-free" => "ДиÑк, доÑтупно (Мб)", "nordugrid-cluster-sessiondir-total" => "ДиÑк, веÑÑŒ объём (Мб)", "nordugrid-cluster-sessiondir-lifetime"=> "Ð’Ñ€ÐµÐ¼Ñ Ð¶Ð¸Ð·Ð½Ð¸ Грид-ÑеÑÑии (мин)", "nordugrid-cluster-cache-free" => "ДиÑковый кÑш, Ñвободно (Мб)", "nordugrid-cluster-cache-total" => "ДиÑковый кÑш, вÑего (Мб)", "nordugrid-cluster-runtimeenvironment" => "Ð Ð°Ð±Ð¾Ñ‡Ð°Ñ Ñреда", "nordugrid-cluster-localse" => "Локальный накопитель", "nordugrid-cluster-middleware" => "Грид-ПО", "nordugrid-cluster-totaljobs" => "Задачи, вÑего", "nordugrid-cluster-usedcpus" => "ПроцеÑÑоры, занÑтые", "nordugrid-cluster-queuedjobs" => "Задачи в очереди (УСТÐРЕВШИЙ)", "nordugrid-cluster-prelrmsqueued" => "Грид-задачи, ждущие заÑылки", "nordugrid-cluster-location" => "Почтовый индекÑ", "nordugrid-cluster-owner" => "Владелец", "nordugrid-cluster-issuerca" => "Центр Ñертификации", "nordugrid-cluster-issuerca-hash" => "Хеш-код центра Ñертификации", "nordugrid-cluster-trustedca" => "ДоверÑемые центры Ñертификации", "nordugrid-cluster-nodeaccess" => "IP-Ñоединение узлов", "nordugrid-cluster-gridarea" => "ÐÐ´Ñ€ÐµÑ ÑеÑÑий (УСТÐРЕВШИЙ)", "nordugrid-cluster-gridspace" => "Грид-диÑк (УСТÐРЕВШИЙ)", "nordugrid-cluster-opsysdistribution" => "ДиÑтрибутив ОС (УСТÐРЕВШИЙ)", "nordugrid-cluster-runningjobs" => "Задачи в Ñчёте (УСТÐРЕВШИЙ)", "nordugrid-cluster-credentialexpirationtime" => "Срок дейÑÑ‚Ð²Ð¸Ñ Ñертификата", "nordugrid-queue-name" => "Ð˜Ð¼Ñ Ð¾Ñ‡ÐµÑ€ÐµÐ´Ð¸", "nordugrid-queue-comment" => "Комментарий", "nordugrid-queue-status" => "СоÑтоÑние очереди", "nordugrid-queue-running" => "Ð’Ñе занÑтые процеÑÑоры", "nordugrid-queue-localqueued" => "Локальные задачи в очереди", "nordugrid-queue-prelrmsqueued" => "Грид-задачи, ждущие заÑылки", "nordugrid-queue-queued" => "Задачи в очереди (УСТÐРЕВШИЙ)", "nordugrid-queue-maxrunning" => "Задачи в Ñчёте (предел)", "nordugrid-queue-maxqueuable" => "Задачи в очереди (предел)", "nordugrid-queue-maxuserrun" => "Задачи на Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ (предел)", "nordugrid-queue-maxcputime" => "Ð’Ñ€ÐµÐ¼Ñ Ð¿Ñ€Ð¾Ñ†ÐµÑÑора, наибольшее (мин)", "nordugrid-queue-mincputime" => "Ð’Ñ€ÐµÐ¼Ñ Ð¿Ñ€Ð¾Ñ†ÐµÑÑора, наименьшее (мин)", "nordugrid-queue-defaultcputime" => "Ð’Ñ€ÐµÐ¼Ñ Ð¿Ñ€Ð¾Ñ†ÐµÑÑора, по умолчанию (мин)", "nordugrid-queue-maxwalltime" => "ПродолжительноÑть, Ð½Ð°Ð¸Ð±Ð¾Ð»ÑŒÑˆÐ°Ñ (мин)", "nordugrid-queue-minwalltime" => "ПродолжительноÑть, Ð½Ð°Ð¸Ð¼ÐµÐ½ÑŒÑˆÐ°Ñ (мин)", "nordugrid-queue-defaultwalltime" => "ПродолжительноÑть, по умолчанию (мин)", "nordugrid-queue-schedulingpolicy" => "Правила планировки", "nordugrid-queue-totalcpus" => "ПроцеÑÑоры, вÑего", "nordugrid-queue-nodecpu" => "ПроцеÑÑор, тип", "nordugrid-queue-nodememory" => "ОЗУ (Мб)", "nordugrid-queue-architecture" => "Ðрхитектура", "nordugrid-queue-opsys" => "ÐžÐ¿ÐµÑ€Ð°Ñ†Ð¸Ð¾Ð½Ð½Ð°Ñ ÑиÑтема", "nordugrid-queue-homogeneity" => "ОднородноÑть очереди", "nordugrid-queue-gridrunning" => "ПроцеÑÑоры под грид-задачами", "nordugrid-queue-gridqueued" => "Грид-задачи в очереди", "nordugrid-queue-benchmark" => "Эталонный теÑÑ‚", "nordugrid-queue-assignedcpunumber" => "ПроцеÑÑоры (УСТÐРЕВШИЙ)", "nordugrid-queue-assignedcputype" => "Тип процеÑÑора (УСТÐРЕВШИЙ)", "nordugrid-job-globalid" => "Ярлык", "nordugrid-job-globalowner" => "ХозÑин", "nordugrid-job-execcluster" => "ВыполнÑющий клаÑтер", "nordugrid-job-execqueue" => "ВыполнÑÑŽÑ‰Ð°Ñ Ð¾Ñ‡ÐµÑ€ÐµÐ´ÑŒ", "nordugrid-job-stdout" => "Стандартный выход", "nordugrid-job-stderr" => "Ð¡Ñ‚Ð°Ð½Ð´Ð°Ñ€Ñ‚Ð½Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°", "nordugrid-job-stdin" => "Стандартный вход", "nordugrid-job-reqcputime" => "Запрошенное процеÑÑорное времÑ", "nordugrid-job-reqwalltime" => "Запрошенное времÑ", "nordugrid-job-status" => "СоÑтоÑние", "nordugrid-job-queuerank" => "Положение в очереди", "nordugrid-job-comment" => "Комментарий СУПО", "nordugrid-job-submissionui" => "ЗаÑылающий клиент", "nordugrid-job-submissiontime" => "Ð’Ñ€ÐµÐ¼Ñ Ð·Ð°Ñылки (GMT)", "nordugrid-job-usedcputime" => "ИÑпользованное процеÑÑорное времÑ", "nordugrid-job-usedwalltime" => "ИÑпользованное времÑ", "nordugrid-job-completiontime" => "Ð’Ñ€ÐµÐ¼Ñ Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ð½Ð¸Ñ (GMT)", "nordugrid-job-sessiondirerasetime" => "Срок ÑƒÐ½Ð¸Ñ‡Ñ‚Ð¾Ð¶ÐµÐ½Ð¸Ñ (GMT)", "nordugrid-job-proxyexpirationtime" => "Окончание доверенноÑти (GMT)", "nordugrid-job-usedmem" => "ИÑпользование ОЗУ (Кб)", "nordugrid-job-errors" => "Ошибки", "nordugrid-job-exitcode" => "Код возврата", "nordugrid-job-jobname" => "ИмÑ", "nordugrid-job-runtimeenvironment" => "Ð Ð°Ð±Ð¾Ñ‡Ð°Ñ Ñреда", "nordugrid-job-cpucount" => "Запрошено процеÑÑоров", "nordugrid-job-executionnodes" => "ВыполнÑющие узлы", "nordugrid-job-gmlog" => "Ð–ÑƒÑ€Ð½Ð°Ð»ÑŒÐ½Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ ГМ", "nordugrid-job-clientsoftware" => "ВерÑÐ¸Ñ ÐºÐ»Ð¸ÐµÐ½Ñ‚Ð°", "nordugrid-job-rerunable" => "ПерезапуÑкаемоÑть", "nordugrid-job-reqcput" => "Запрошенное Ð²Ñ€ÐµÐ¼Ñ (УСТÐРЕВШИЙ)", "nordugrid-job-gridlog" => "Грид-запиÑÑŒ (УСТÐРЕВШИЙ)", "nordugrid-job-lrmscomment" => "Комментарий СУПО (УСТÐРЕВШИЙ)", "nordugrid-authuser-name" => "ИмÑ", "nordugrid-authuser-sn" => "Субъект", "nordugrid-authuser-freecpus" => "Свободные ЦП", "nordugrid-authuser-diskspace" => "ДиÑк, доÑтупно (Мб)", "nordugrid-authuser-queuelength" => "Задачи Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð² очереди", "nordugrid-se-name" => "УÑловное имÑ", "nordugrid-se-aliasname" => "Ðазвание", "nordugrid-se-type" => "Тип", "nordugrid-se-acl" => "Допущенные ВО", "nordugrid-se-freespace" => "Свободный объём (Мб)", "nordugrid-se-totalspace" => "ВеÑÑŒ объём (Мб)", "nordugrid-se-url" => "Контактный адреÑ", "nordugrid-se-baseurl" => "Контактный Ð°Ð´Ñ€ÐµÑ (УСТÐРЕВШИЙ)", "nordugrid-se-accesscontrol" => "Контроль доÑтупа", "nordugrid-se-authuser" => "Допущенные пользователи (DN)", "nordugrid-se-location" => "Почтовый индекÑ", "nordugrid-se-owner" => "Владелец", "nordugrid-se-middleware" => "Грид-ПО", "nordugrid-se-issuerca" => "Центр Ñертификации", "nordugrid-se-issuerca-hash" => "Хеш-код центра Ñертификации", "nordugrid-se-trustedca" => "ДоверÑемые центры Ñертификации", "nordugrid-se-comment" => "Комментарий", "nordugrid-rc-name" => "Доменное имÑ", "nordugrid-rc-aliasname" => "Ðазвание", "nordugrid-rc-baseurl" => "Контактный адреÑ", "nordugrid-rc-authuser" => "Допущенные пользователи (DN)", "nordugrid-rc-location" => "Почтовый индекÑ", "nordugrid-rc-owner" => "Владелец", "nordugrid-rc-issuerca" => "Сертификат выдан" ), "errors" => array( "1" => "Ðевозможно прочеÑть ÑпиÑки выÑшего уровнÑ", "2" => "Ðи один из меÑтных ÑпиÑков не отзываетÑÑ", "3" => " Ð½ÐµÐ²ÐµÑ€Ð½Ð°Ñ ÐºÐ¾Ð½Ñ„Ð¸Ð³ÑƒÑ€Ð°Ñ†Ð¸Ñ Ð¸Ð»Ð¸ иÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа", "4" => "Ðет Грид-задач", "5" => "Ðет информации", "6" => "Служба недоÑтупна", "7" => " - попробуйте обновить позже", "8" => "Ðет информации об очереди", "9" => "Ðет данных", "10" => "Ðет пользователей", "11" => "Ðет доÑтупа к реÑурÑу", "12" => "не отзываетÑÑ", "13" => "Ðа наÑтоÑщий момент нет задач Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ ", "101" => " Ð’Ñ€ÐµÐ¼Ñ Ð½Ð° ÑвÑзь Ñ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ñ‹Ð¼ ÑпиÑком: ", "102" => " Ñ Ð½Ð° Ñоединение и ", "103" => " Ñ Ð½Ð° поиÑк", "104" => " Ñ Ð·Ð°Ñ‚Ñ€Ð°Ñ‡ÐµÐ½Ð¾ на поиÑк", "105" => "ПеречиÑление реÑурÑов: ", "106" => "Опрошено ÑпиÑков верхнего уровнÑ: ", "107" => "Получены географичеÑкие координаты, проÑканировано реÑурÑов: ", "108" => " реÑурÑов упорÑдочено по геополитичеÑкому признаку", "109" => "ПоиÑк атрибутов клаÑтера", "110" => "ПоиÑк атрибутов очереди", "111" => "Ðет данных Ñ ", "112" => " функционирует в Ñтране: ", "113" => " не раÑполагает реÑурÑами", "114" => " Ð’Ñ€ÐµÐ¼Ñ Ð½Ð° ÑвÑзь Ñ Ð³Ð»Ð¾Ð±Ð°Ð»ÑŒÐ½Ñ‹Ð¼ ÑпиÑком: ", "115" => "ИгнорируетÑÑ Ñ€ÐµÑурÑ: ", "116" => "не ÑоответÑтвует типу ", "117" => "Проверка ÑвÑзи: ", "118" => "еÑть", "119" => "Ðа данный момент обнаружено реÑурÑов типа ", "120" => "Ошибка LDAP при поиÑке на ", "121" => "-ÑоÑтоÑние на ", "122" => "Заблокирован: ", "123" => "Обнаружен региÑтрант ", "124" => "ПоиÑк атрибутов накопителей", "125" => "ПоиÑк пользователей", "126" => "ПоиÑк задач", "127" => " запуÑтил(а) задачу ", "128" => " не будучи допущенным(ой)", "129" => "Ðет информации об объекте: ошибка ", "130" => " Ð’Ñ€ÐµÐ¼Ñ Ð½Ð° ÑвÑзь Ñ Ð³Ð»Ð¾Ð±Ð°Ð»ÑŒÐ½Ñ‹Ð¼ ÑпиÑком: ", "301" => "Перезагрузить", "302" => "Печать", "303" => "Помощь", "304" => "Закрыть", "305" => "КраÑный", "306" => "Серый", "307" => "Ð’Ñе пользователи", "308" => "Ðктивные пользователи", "309" => "ПоиÑк", "310" => "Ðакопители", "311" => "Виртуальные организации", "312" => "Флаг Ñтраны: ", "313" => " Грид-процеÑÑов и ", "314" => " меÑтных процеÑÑов", "401" => "ПроцеÑÑÑ‹", "402" => "Грид", "403" => "меÑтные", "404" => "Мир", "405" => "ВСЕГО", "406" => " объектов", "407" => "куча", "408" => " Гб", "409" => " ВСЕ", "410" => "КлаÑтер", "411" => "Очередь", "412" => "Задача", "413" => "Пользователь", "414" => "Ðакопитель", "415" => "Каталог реплик", "416" => "Задайте атрибуты Ð´Ð»Ñ Ð¿Ñ€Ð¾Ñмотра; выбранный объект: ", "417" => "ПоиÑк проводитÑÑ Ð´Ð»Ñ Ð»Ð¾Ð³Ð¸Ñ‡ÐµÑкого И вÑех выражений", "418" => "Ðе заполнÑйте правое поле, еÑли фильтр не нужен", "419" => "ПроÑмотр реÑурÑов или объектов по выбору", "420" => "Выделенное имÑ", "421" => "Может иÑпользовать ", "422" => " клаÑтеров", "423" => "РеÑÑƒÑ€Ñ / объект:", "424" => "Кол.-во атрибутов (6 по ум.):", "425" => "Объект", "426" => "Дальше", "427" => "Выберите", "428" => "ОчиÑтить", "429" => "ПОКÐЗÐТЬ" ), // Country name conversion, no postcode! "tlconvert" => array ( "Australia" => "ÐвÑтралиÑ", "Austria" => "ÐвÑтриÑ", "Armenia" => "ÐрмениÑ", "Algeria" => "Ðлжир", "Belgium" => "БельгиÑ", "Bulgaria" => "БолгариÑ", "Canada" => "Канада", "Chile" => "Чили", "China" => "Китай", "Czechia" => "ЧехиÑ", "Denmark" => "ДаниÑ", "Estonia" => "ЭÑтониÑ", "Finland" => "ФинлÑндиÑ", "France" => "ФранциÑ", "Georgia" => "ГрузиÑ", "Germany" => "ГерманиÑ", "Greece" => "ГрециÑ", "HongKong" => "Гонконг", "Hungary" => "ВенгриÑ", "Iceland" => "ИÑландиÑ", "Ireland" => "ИрландиÑ", "Italy" => "ИталиÑ", "Japan" => "ЯпониÑ", "Latvia" => "ЛатвиÑ", "Lithuania" => "Литва", "Morocco" => "Марокко", "Netherlands" => "Ðидерланды", "Norway" => "ÐорвегиÑ", "Poland" => "Польша", "Portugal" => "ПортугалиÑ", "Romania" => "РумыниÑ", "Russia" => "РоÑÑиÑ", "SriLanka" => "Шри-Ланка", "Sweden" => "ШвециÑ", "Slovakia" => "СловакиÑ", "Slovenia" => "СловениÑ", "Spain" => "ИÑпаниÑ", "Switzerland" => "ШвейцариÑ", "Taiwan" => "Тайвань", "Turkey" => "ТурциÑ", "UK" => "ВеликобританиÑ", "Ukraine" => "Украина", "USA" => "СШÐ", "World" => "Мир" ) ); ?> nordugrid-arc-7.1.1/src/services/monitor/lang/PaxHeaders/Makefile.in0000644000000000000000000000013115067751357022444 xustar0030 mtime=1759498991.172163944 30 atime=1759499019.474277849 29 ctime=1759499030.78983629 nordugrid-arc-7.1.1/src/services/monitor/lang/Makefile.in0000644000175000002070000005107315067751357024355 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/monitor/lang ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(monitorlangdir)" DATA = $(monitorlang_DATA) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ monitorlangdir = @monitor_prefix@/lang monitorlang_DATA = $(srcdir)/*.inc EXTRA_DIST = $(monitorlang_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/monitor/lang/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/monitor/lang/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-monitorlangDATA: $(monitorlang_DATA) @$(NORMAL_INSTALL) @list='$(monitorlang_DATA)'; test -n "$(monitorlangdir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(monitorlangdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(monitorlangdir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(monitorlangdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(monitorlangdir)" || exit $$?; \ done uninstall-monitorlangDATA: @$(NORMAL_UNINSTALL) @list='$(monitorlang_DATA)'; test -n "$(monitorlangdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(monitorlangdir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(monitorlangdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-monitorlangDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-monitorlangDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-monitorlangDATA install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags-am uninstall uninstall-am \ uninstall-monitorlangDATA .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/monitor/lang/PaxHeaders/no.inc0000644000000000000000000000013215067751327021504 xustar0030 mtime=1759498967.778492329 30 atime=1759498967.876493818 30 ctime=1759499030.799745476 nordugrid-arc-7.1.1/src/services/monitor/lang/no.inc0000644000175000002070000013711015067751327023411 0ustar00mockbuildmock00000000000000 N/A indikerer at eier ikke har tildelt jobben et navn.
    X indikerer at eier har avbrutt jobben.
    ! indikerer at jobben ikke ble fullført.
    Klikk på et navn for å få en detaljert beskrivelse av jobben."; $str_nam = "Brukernavn som spesifisert i det personlige sertifikatet. Klikk på et navn for resurser tilgjengelige for denne brukeren og dennes jobber i systemet."; $str_sta = "Jobbstatus som returnert av gridmanageren (GM) og lokalt resursmanagementsystem LRMS. Kronologisk er tilstandene :
    ACCEPTED – jobben er sendt, men ikke behandlet.
    PREPARING – inputfiler hentes.
    SUBMITTING – forhandlinger med LRMS pÃ¥gÃ¥r
    INLRMS – jobben er overført til LRMS. Informasjonssystemet lagrer lokal status. Mulige tilstander er :
    : Q – jobben er i køen
    : U – jobben er satt pÃ¥ vent pÃ¥ en opptatt maskin (PBSPro)
    : S – jobben er satt pÃ¥ vent (Condor)
    : R, run – jobben kjøres.
    : E – jobben avsluttes (PBS)
    FINISHING – outputfiler overføres av GM.
    FINISHED – jobben err avsluttet; tidsstempel legges til av informasjonssystemet.
    CANCELING – jobben avbrytes.
    DELETED – jobben er ikke ryddet opp av eier, men slettet av GM pÃ¥ grunn av overgÃ¥tt lagringstid.
    Alla disse tilstandene kan meldes med prefikset PENDING: som betyr at GM prøver å flytte jobben over i neste tilstand."; $str_tim = "CPU-tid i minutter brukt av jobben."; $str_mem = "Minne i KB brukt av jobben."; $str_cpu = "Antall prosessorer brukt av jobben."; // Actual messages $message = array ( // Table headers and help (do not localize "0" and "help" keys) // For "help", keywords in
    must correspond to column titles below the help text ) "loadmon" => array( "0" => "Gridmonitor", "help" => "
    Denne siden viser alle klynger (sites) som har registrert seg i indekstjenesten til ARC, sortert etter land og deretter maskinnavn. Følgende klyngeparametere listes : klyngealias, total CPU-kapasitet og antall kjørende og ventende jobber, både lokale jobber og gridjobber. Bruk søkefunksjonen hvis annen informasjon om klynger, køer, jobber eller lignende er ønsket.
    Land
    ".$clickable.". Flagg og navn på land hentet fra tilgjengellige resursbeskrivelser. Klikk for å få opp infomasjon om et land.
    Klynger
    ".$clickable.". Klyngealias tildelt av eier. Maksimalt vises 22 tegn. Klikk på aliaset for en detaljert klyngebeskrivelse.
    CPU-er
    Totalt antall CPU-er i en klynge. OBS! Muligens er bare noen av disse tilgjengelige for gridbrukere.
    Belastning (prosesser: grid + lokalt)
    ".$clickable.". Relativ klyngebelastning som tilsvarer antall opptatte CPU-er. Grå felt viser antall prosessorer som kjører lokale jobbber, røde felt viser antall CPU-er som kjører gridjobber. Klikk på feltet for en detaljert liste over alle gridjobber som kjøres på klyngen, inklusive antall prosessorer per jobb.
    Ventende
    ".$clickable.". Totalt antall jobber som venter på klyngen, vises som antall ventende gridjobber pluss antall ventende lokale jobber. Klikk på det første sifferet for å liste ventende gridjobber på klyngen.
    ", "Land" => 30, "Klynge" => 160, "CPU-er" => 10, "Belastning (prosesser: grid + lokalt)" => 210, "Ventende" => 10 ), "clusdes" => array("0" => "Resursinformasjon for", "help" => "
    Attributt
    ".$clickable.". Klyngeattributtnavn".$str_att."
    Verdi
    ".$str_val."
    Kø
    ".$clickable.". Klyngeeiers navn på batchkøene som er tilgjengelige for ARC brukere.".$str_que."
    Status
    Køstatus. Fungerende køer viser normalt status active.
    Tidsgrenser (min)
    Tidsgrense for jobblengde per kø, hvis definert, i CPU-minutter. Den første verdien er den nedre grensen, den andre den øvre. Hvis ingen grenser er definert, dvs. alle jobber er tillatt, vises N/A
    Kjøres
    Antall kjørende jobber. Totalt antall jobber vises med antall prosessorer med gridjobber i parentes, f.eks. (Grid: 12). OBS! For parallelle multiprosessorjobber kan nummeret i parentes være større enn antall jobber.
    Køer
    Antall jobber i køen. Totalt antall jobber vises med gridjobber i parentes, f.eks. (Grid: 235)
    ", "Kø" => 0, "Mapping Queue" => 0, "Status" => 0, "Tidsgrenser (min)" => 0, "CPU-er" => 0, "Kjøres" => 0, "Køer" => 0 ), "jobstat" => array("0" => "Jobber på:Jobb-ID", "help" => "
    JOBBLISTE:
    Jobbnavn
    ".$clickable.". ".$str_job."
    Eier
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Kø
    ".$clickable.". Navn på batchkø hvor jobben kjøres. ".$str_que."
    CPU-er
    ".$str_cpu."
    JOBBINFORMASJON:
    Attributt
    ".$clickable.". Jobbattributtnavn".$str_att."
    Verdi
    ".$str_val."
    ", "Jobbnavn" => 0, "Eier" => 0, "Status" => 0, "CPU (min)" => 0, "Kø" => 0, "CPU-er" => 0 ), "volist" => array("0" => "Virtuelle organisasjoner", "help" => "
    Virtuell organisasjon VO
    ".$clickable.". En gruppe brukere som ofte arbeider med det samme og bruker de samme resursene. En VO er autorisert på minst en ARC klynge. Klikk på navnet for å få en liste over medlemmene.
    Medlemmer
    Antall medlemmer.
    Tjener
    LDAP-tjener som huser databasen med medlemskapene.
    ", "Virtuell organisasjon" => 0, "Medlemmer" => 0, "Tjener" => 0 ), "vousers" => array("0" => "Gridbrukerbase", "help" => "
    Navn
    ".$clickable.". ".$str_nam."
    Tilknytning
    Brukerens hjemmeinstitutt registrert av en VO manager. Kan være tomt.
    E-post
    ".$clickable.". Brukerens e-post registrert av en VO-manager. Kan være tomt. Klikk på adressen for å sende en e-post til brukeren.
    ", "#" => 0, "Navn" => 0, "Tilknytning" => 0, "E-post" => 0 ), "userlist" => array("0" => "Informasjon om", "help" => "
    Klynge:kø
    ".$clickable.". Navn på klynge og dens respektive køer (separert med et kolon, ":") som brukerern er autorisert til å sende jobber til. Hvis brukeren ikke er autorisert vises meldingen "Not authorised at host ...". Klikk på klyngenavn for å få detaljert klyngebeskrivelse. Klikk på kønavn for å få detaljert købeskrivelse.
    Ledige CPU-er
    Antall ledige CPU-er i køen for denne brukeren i øyeblikket, iblant med en øvre tidsgrense i minutter. F.eks. "3" betyr tre CPU-er tilgjengelige for en jobb med ubegrenset kjøringstid; "4:360" indikerer at det finnes fire CPU-er tilgjengelige for jobber kortere enn seks timer; "10:180 30" betyr at det finnes ti CPU-er tilgjengelige for jobber som ikke overgår tre timer, pluss 30 CPU-er tilgjengelige for jobber av valgfri lengde; "0" betyr at det ikke finnes noen CPU-er tilgjenglige for øyeblikket og at jobben kommer til å bli satt i kø.
    Ventende jobber
    Antall brukerens forventede jobber foran i køen for denne brukeren. "0" betyr at jobben forventes å kjøres umiddelbart. OBS! Dette er kun et estimat som kan overkjøres av lokale regler.
    Ledig disk (MB)
    Diskplass tilgjengelig for brukeren i en gitt kø (i megabyte). OBS! Dette er kun et estimat da de fleste klynger ikke tilbyr faste diskkvoter.
    Jobbnavn
    ".$clickable.". ".$str_job."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Klynge
    ".$clickable.". Navn på klynge hvor jobben kjøres / ble kjørt. Klikk på klyngenavn for detaljert informasjon om klyngen.
    Kø
    ".$clickable.". Navn på batchkøen hvor jobben kjøres / ble kjørt. ".$str_que."
    CPU-er
    ".$str_cpu."
    ", "" => 0, "Jobbnavn" => 0, "Status" => 0, "CPU (min)" => 0, "Klynger" => 0, "Kø" => 0, "CPU-er" => 0 ), "attlist" => array("0" => "Attributtverdi", "help" => "
    Objekt
    ".$clickable.". Namn på det objekt vars attribut visas. Det kan vara ett klusternamn, ett klusters könamn, ett jobbnamn, ett användarnamn etc. Klicka på namnet för att få en detaljerad beskrivning av objektet.
    Attributt
    För varje objekt, ett eller flera attributtvärden kan listas. Kolumntiteln är det human-readable attributtnamnet (förutom för några MDS-specifika attributt), och Kolumnens innehåll är attributtvärden per objekt inmatade i informationssystemet.
    ", "Objekt" => 0, "Attributt" => 0 ), "quelist" => array("0" => "Kø", "help" => "
    Attributt
    ".$clickable.". Køattributtnavn".$str_att."
    Verdi
    ".$str_val."
    Jobbnavn
    ".$clickable.". ".$str_job."
    Eier
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Minne (KB)
    ".$str_mem."
    CPU-er
    ".$str_cpu."
    ", "" => 0, "Jobbnavn" => 0, "Eier" => 0, "Status" => 0, "CPU (min)" => 0, "Minne (KB)" => 0, "CPU-er" => 0 ), "sestat" => array("0" => "Lagringselementer", "help" => "
    Alias
    Lagringselementets alias som angitt i informasjonssystemet. Det vises maksimalt 15 tegn.
    Total plass
    Total diskplass (GB).
    Ledig plass
    Diskplass tilgjengelig for øyeblikket (GB).
    Navn
    Lagringselementets navn. Både logisk navn og maskinnavn (separert med et kolon, ":") angis. Det logiske navnet brukes av informasjonssystemet for å skille mellom ulike lagringselementer på samme maskin.
    Base-URL
    Lagringselementets URL, oftest en gsiftp:// protokoll. Bruk URL som basis for tilgang til filer.
    Type
    Lagringselementets type. "gridftp-based" indikerer disklagring med gridftp-grensesnitt.
    ", "#" => 0, "Alias" => 0, // "Total plass" => 0, "Ledig/total plass" => 0, "Navn" => 0, "Base-URL" => 0, "Type" => 0 ), "allusers" => array("0" => "Autoriserte gridbrukere:Aktive gridbrukere", "help" => "
    Navn
    ".$clickable.". ".$str_nam."
    Tilknytning
    Brukerens tilknytning som spesifisert i det personlige sertifikatet.
    Jobber
    Totalt antall jobber som denne brukeren har i systemet (kjørende, ventende, ferdige eller slettede).
    Klynger
    Viser antall klynger som denne brukeren er autorisert på.
    ", "#" => 0, "Navn" => 0, "Tilknytning" => 0, "Jobber" => 0, "Klynger" => 0 ), "userres" => array("0" => "", "Klynge:kø" => 0, "Ledige CPU-er" => 0, "Ventende jobber" => 0, "Ledig disk (MB)" => 0 ), "ldapdump" => array("0" => "", "Attributt" => 0, "Verdi" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Info gyldig f.o.m. (GMT)", "Mds-validto" => "Info gyldig t.o.m. (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Front-end domenenavn", "nordugrid-cluster-aliasname" => "Klyngealias", "nordugrid-cluster-contactstring" => "Kontaktstreng", "nordugrid-cluster-interactive-contactstring" => "Interaktiv kontakt", "nordugrid-cluster-comment" => "Kommentar", "nordugrid-cluster-support" => "E-postkontakt", "nordugrid-cluster-acl" => "Autoriserte VO-er", "nordugrid-cluster-lrms-type" => "LRMS-type", "nordugrid-cluster-lrms-version" => "LRMS-versjon", "nordugrid-cluster-lrms-config" => "LRMS-detaljer", "nordugrid-cluster-architecture" => "Arkitektur", "nordugrid-cluster-opsys" => "Operativsystem", "nordugrid-cluster-homogeneity" => "Homogen klynge", "nordugrid-cluster-nodecpu" => "CPU-type (langsomste)", "nordugrid-cluster-nodememory" => "Minne (MB, minste)", "nordugrid-cluster-totalcpus" => "CPU-er, totalt", "nordugrid-cluster-cpudistribution" => "CPU:maskiner", "nordugrid-cluster-benchmark" => "Benchmark", "nordugrid-cluster-sessiondir-free" => "Diskplass tilgjengelig (MB)", "nordugrid-cluster-sessiondir-total" => "Diskplass totalt (MB)", "nordugrid-cluster-sessiondir-lifetime"=> "Gridsesjonens levetid (min)", "nordugrid-cluster-cache-free" => "Cachestørrelse tilgjengelig (MB)", "nordugrid-cluster-cache-total" => "Cachestørrelse totalt (MB)", "nordugrid-cluster-runtimeenvironment" => "Runtimemiljø", "nordugrid-cluster-localse" => "Lagringselement, lokalt", "nordugrid-cluster-middleware" => "Grid-middleware", "nordugrid-cluster-totaljobs" => "Jobber, totalt antall", "nordugrid-cluster-usedcpus" => "CPU-er, opptatte", "nordugrid-cluster-queuedjobs" => "Jobber, ventende", "nordugrid-cluster-prelrmsqueued" => "Grid jobs, awaiting submission", "nordugrid-cluster-location" => "Postnummer", "nordugrid-cluster-owner" => "Eier", "nordugrid-cluster-issuerca" => "Sertifikatutstedere", "nordugrid-cluster-issuerca-hash" => "Certificate issuer's hash", "nordugrid-cluster-trustedca" => "Trusted certificate issuers", "nordugrid-cluster-nodeaccess" => "Node-IP-Oppkobling", "nordugrid-cluster-gridarea" => "SesjonsomrÃ¥de (UtgÃ¥tt)", "nordugrid-cluster-gridspace" => "Griddiskplass (UtgÃ¥tt)", "nordugrid-cluster-opsysdistribution" => "OS-distribusjon (UtgÃ¥tt)", "nordugrid-cluster-runningjobs" => "Kjørende jobber (UtgÃ¥tt)", "nordugrid-cluster-credentialexpirationtime" => "Credential expiration time", "nordugrid-queue-name" => "Kønavn", "nordugrid-queue-comment" => "Kommentar", "nordugrid-queue-status" => "Køstatus", "nordugrid-queue-running" => "CPU-er, opptatte", "nordugrid-queue-localqueued" => "Local jobs, queued", "nordugrid-queue-prelrmsqueued" => "Grid jobs, awaiting submission", "nordugrid-queue-queued" => "Jobber, ventende (UtgÃ¥tt)", "nordugrid-queue-maxrunning" => "Jobber, kjørende (max)", "nordugrid-queue-maxqueuable" => "Jobber, ventende (max)", "nordugrid-queue-maxuserrun" => "Jobber per unixbruker (max)", "nordugrid-queue-maxcputime" => "CPU-tid, max. (min)", "nordugrid-queue-mincputime" => "CPU-tid, min. (min)", "nordugrid-queue-defaultcputime" => "CPU-tid, spesifisert (min)", "nordugrid-queue-maxwalltime" => "Klokketid, max. (min)", "nordugrid-queue-minwalltime" => "Klokketid, min. (min)", "nordugrid-queue-defaultwalltime" => "Klokketid, spesifisert (min)", "nordugrid-queue-schedulingpolicy" => "Scheduleringspolicy", "nordugrid-queue-totalcpus" => "CPU-er, totalt", "nordugrid-queue-nodecpu" => "CPU-type", "nordugrid-queue-nodememory" => "Minne (MB)", "nordugrid-queue-architecture" => "Arkitektur", "nordugrid-queue-opsys" => "Operativsystem", "nordugrid-queue-homogeneity" => "Homogen kø", "nordugrid-queue-gridrunning" => "CPUs occupied by Grid jobs", "nordugrid-queue-gridqueued" => "Gridjobber, ventende", "nordugrid-queue-benchmark" => "Benchmark", "nordugrid-queue-assignedcpunumber" => "CPU-er per kø (FÖRLEGAD)", "nordugrid-queue-assignedcputype" => "CPU-type (UtgÃ¥tt)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Eier", "nordugrid-job-execcluster" => "Eksekveringsklynge", "nordugrid-job-execqueue" => "Eksekveringskø", "nordugrid-job-stdout" => "Standard outputfil", "nordugrid-job-stderr" => "Standard errorfil", "nordugrid-job-stdin" => "Standard inputfil", "nordugrid-job-reqcputime" => "Forlangt CPU-tid", "nordugrid-job-reqwalltime" => "Forlangt klokketid", "nordugrid-job-status" => "Status", "nordugrid-job-queuerank" => "Plass i køen", "nordugrid-job-comment" => "LRMS-kommentar", "nordugrid-job-submissionui" => "Innsendingsmaskin", "nordugrid-job-submissiontime" => "Innsendingstid (GMT)", "nordugrid-job-usedcputime" => "Brukt CPU-tid", "nordugrid-job-usedwalltime" => "Brukt klokketid", "nordugrid-job-completiontime" => "Avslutningstid (GMT)", "nordugrid-job-sessiondirerasetime" => "Slettetid (GMT)", "nordugrid-job-proxyexpirationtime" => "Proxy forfallstid (GMT)", "nordugrid-job-usedmem" => "Brukt minne (KB)", "nordugrid-job-errors" => "Feil", "nordugrid-job-exitcode" => "Returkode", "nordugrid-job-jobname" => "Navn", "nordugrid-job-runtimeenvironment" => "Runtimemiljø", "nordugrid-job-cpucount" => "Forlangte CPU-er", "nordugrid-job-executionnodes" => "Ekseekveringsnoder", "nordugrid-job-gmlog" => "GM loggfil", "nordugrid-job-clientsoftware" => "Klientversjon", "nordugrid-job-rerunable" => "Omkjørbar", "nordugrid-job-reqcput" => "Forlangt tid (UtgÃ¥tt)", "nordugrid-job-gridlog" => "Gridloggfil (UtgÃ¥tt)", "nordugrid-job-lrmscomment" => "LRMS-kommentar (UtgÃ¥tt)", "nordugrid-authuser-name" => "Navn", "nordugrid-authuser-sn" => "Subjektnavn", "nordugrid-authuser-freecpus" => "Ledige CPU-er", "nordugrid-authuser-diskspace" => "Ledig diskplass (MB)", "nordugrid-authuser-queuelength" => "User's queued jobs", "nordugrid-se-name" => "Navn", "nordugrid-se-aliasname" => "Lagringselementalias", "nordugrid-se-type" => "Lagringselementtype", "nordugrid-se-acl" => "Autoriserte VO-er", "nordugrid-se-freespace" => "Ledig plass (MB)", "nordugrid-se-totalspace" => "Totalt utrymme (MB)", "nordugrid-se-url" => "Kontakt-URL", "nordugrid-se-baseurl" => "Kontakt-URL (UtgÃ¥tt)", "nordugrid-se-accesscontrol" => "Tilgangskontroll", "nordugrid-se-authuser" => "Autorisert bruker", "nordugrid-se-location" => "Postnummer", "nordugrid-se-owner" => "Eier", "nordugrid-se-middleware" => "Grid-middleware", "nordugrid-se-issuerca" => "Sertifikatutsteder", "nordugrid-se-issuerca-hash" => "Certificate issuer's hash", "nordugrid-se-trustedca" => "Trusted certificate issuers", "nordugrid-se-comment" => "Kommentar", "nordugrid-rc-name" => "Domenenavn", "nordugrid-rc-aliasname" => "Replikkakatalog-Alias", "nordugrid-rc-baseurl" => "Kontakt-URL", "nordugrid-rc-authuser" => "Autorisert bruker (DN)", "nordugrid-rc-location" => "Postnummer", "nordugrid-rc-owner" => "Eier", "nordugrid-rc-issuerca" => "Sertifikatutsteder" ), // Errors, warnings etc "errors" => array( // failure notices "1" => "Kan ikke lese toppnivÃ¥ indekstjenere", "2" => "Ingen av de lokale indekstjenerne returnerte oppkoblingen", "3" => " dÃ¥lig konfigurering eller begäran drog över tiden", "4" => "Ingen gridjobber funnet", "5" => "Ingen informasjon funnet", "6" => "Tjener utilgjengelig", "7" => " - reload senere", "8" => "Ingen køinformasjon funnet", "9" => "Ingen poster funnet", "10" => "Ingen brukere funnet", "11" => "Ikke autorisert pÃ¥ ", "12" => "svarer ikke", "13" => "Ingen nye jobber funnet for ", // debug messages "101" => " Monitor timeout for GRIS: ", "102" => " sek for oppkobling og ", "103" => " sek for søk", "104" => " sek brukere for søk", "105" => "Viser resurser ogsÃ¥ i ", "106" => "Spurte toppnivÃ¥ indekstjenere: ", "107" => "Fikk geografiske data, skannede klynger: ", "108" => " klynger sortert etter geografiske data", "109" => "Søk etter klyngeattributter", "110" => "Søk etter køattributter", "111" => "Ingen data fra ", "112" => " Er oppe i ", "113" => " har ingen resurser Ã¥ tilby", "114" => " Monitor timeout for GIIS: ", "115" => "Hopper over GRIS: ", "116" => "ikke en ", "117" => "Verifiserer oppkobling: ", "118" => "OK", "119" => "Hittil, detekterte resurser av slag ", "120" => "LDAP-feil ved søk etter ", "121" => " status ved ", "122" => "Svartelistede: ", "123" => "Registrert funnet for ", "124" => "Søk etter lagringselementattributter", "125" => "Søk etter brukere", "126" => "Søk etter jobb", "127" => " har jobb ", "128" => " uten være autorisert", "129" => "Kan ikke lade objektdata: feil ", "130" => " Monitor timeout for EMIR: ", // icon titles "301" => "Reload", "302" => "Skriv ut", "303" => "Hjelp", "304" => "Lukk", "305" => "Rød", "306" => "GrÃ¥", "307" => "Alle brukere", "308" => "Aktive brukere", "309" => "Søk", "310" => "Lagringsenheter", "311" => "VO-er", "312" => "Flagg for ", "313" => " gridprosesser og ", "314" => " lokale prosesser", // auxilliary strings "401" => "Prosesser", "402" => "Grid", "403" => "Lokalt", "404" => "Verden", "405" => "TOTALT", "406" => " klynger", "407" => "en masse", "408" => " GB", "409" => " ALLE", "410" => "Klynge", "411" => "Kø", "412" => "Jobb", "413" => "Bruker", "414" => "Lagringsenhet", "415" => "Replikkakatalog", "416" => "Definer søkeattributter for objekt : ", "417" => "Det søkes logisk OG av alle uttrykkene.", "418" => "La det høyre feltet stÃ¥ tomt for Ã¥ vise alt.", "419" => "Vis resurser eller objekt som samsvarer med ditt valg", "420" => "Særskilt navn", "421" => "Kan bruke totalt ", "422" => " klynger", "423" => "Resurs / objekt:", "424" => "Antall attributter (standard er 6):", "425" => "Objekt", "426" => "Neste", "427" => "Velg", "428" => "Gjenopprett", "429" => "VIS" ), // Post code conversion "tlconvert" => array ( "Australia" => "Australia", "Austria" => "Österrike", "Armenia" => "Armenia", "Algeria" => "Algerie", "Belgium" => "Belgia", "Bulgaria" => "Bulgaria", "Canada" => "Canada", "China" => "Kina", "Czechia" => "Tsjekkia", "Denmark" => "Danmark", "Estonia" => "Estland", "Finland" => "Finland", "France" => "Frankrike", "Georgia" => "Georgia", "Germany" => "Tyskland", "Greece" => "Hellas", "Hungary" => "Ungarn", "Iceland" => "Island", "Ireland" => "Irland", "Italy" => "Italia", "Japan" => "Japan", "Latvia" => "Lettland", "Lithuania" => "Litauen", "Morocco" => "Marokko", "Netherlands" => "Nederland", "Norway" => "Norge", "Poland" => "Polen", "Portugal" => "Portugal", "Romania" => "Romania", "Russia" => "Russland", "SriLanka" => "Sri Lanka", "Sweden" => "Sverige", "Slovakia" => "Slovakia", "Slovenia" => "Slovenia", "Switzerland" => "Sveits", "Turkey" => "Tyrkia", "UK" => "Storbritannia", "Ukraine" => "Ukraina", "USA" => "USA", "World" => "Verden" ) ); ?> nordugrid-arc-7.1.1/src/services/monitor/lang/PaxHeaders/da.inc0000644000000000000000000000013215067751327021454 xustar0030 mtime=1759498967.777617767 30 atime=1759498967.875493803 30 ctime=1759499030.791504005 nordugrid-arc-7.1.1/src/services/monitor/lang/da.inc0000644000175000002070000014253015067751327023363 0ustar00mockbuildmock00000000000000 N/A viser at brugeren ikke tildelte et navn.
    X viser at jobbet er slået ihjel af ejeren.
    ! viser at jobbet fejlede i systemet.
    Tryk på et navn for at få en detaljeret beskrivelse af jobbet."; $str_nam = "Navn på brugeren som angivet i det personlige certifikat. Tryk på navnet for at få en liste af alle ressourcer tilgængelige for denne bruger og alle brugerens job i systemet."; $str_sta = "Jobstatus som returneret af Gridmanageren (GM) og LRMS. Tilstandene er i sekventiel rækkefølge:
    ACCEPTED – jobbet er overført til systemet men endnu ikke behandlet
    PREPARING – inputfilerne hentes
    SUBMITTING – der udveksles data med LRMS
    INLRMS – jobbet overføres til LRMS; intern status tilføjes af informationssystemet. Mulige tilstande er:
    : Q – jobbet venter i kø
    : U – jobbets udførelse er udskudt på en travl knude (PBSPro)
    : S – jobbets udførelse er udskudt (Condor)
    : R, run – jobbet kører
    : E – jobbet er færdigt (PBS)
    FINISHING – uddatafilerne overføres af GM
    FINISHED – jobbet er færdigt; tidsstemplet tilføjes af informationssystemet
    CANCELING – jobbet afbrydes
    DELETED – jobbet er ikke afryddet på anmodning af brugeren men fjernet af GM fordi udløbstiden er passeret
    Hver tilstand kan rapporteres med et PENDING præfiks som betyder at GM forsøger at rykke jobbet op i næste tilstand"; $str_tim = "CPU-tid brugt at jobbet, minutter."; $str_mem = "Lager (RAM) brugt af jobbet, KB"; $str_cpu = "Antal processorer brugt af jobbet."; // Actual messages $message = array ( // Table headers and help (do not localize "0" and "help" keys) // For "help", keywords in
    must correspond to column titles below the help text ) "loadmon" => array( "0" => "Grid Monitor", "help" => "
    Denne skærmside viser alle steder, der registrerer sig hos den øverste ARC indekseringstjeneste sorteret først efter land så efter maskinnavn. Udvalgte lokale parametre overvåges: klyngealias, kø, job, o.s.v. Brug quot;Search" tjenesten hvis du vil sammenligne klynger, køer, job, osv.
    Land
    ".$clickable.". Landets flag og navn fra tilgængelig resource. Tryk for vise landeinformation.
    Klynge
    ".$clickable.". Klyngealias som tildelt af ejeren. Højst 22 tegn vises. Tryk på aliases for deltaljerede klyngebeskrivelse.
    CPU-er
    Totalt antal CPU-er i en klynge. NB! Kun en del af disse kan bruges af Grid-brugere.
    Belastning (processer:Grid + lokale)
    ".$clickable.". Relativ klyngebelastning, svarende til antallet af optagede CPU-er . Grå felter viser processorer optaget af lokale job, røde felter viser CPU-er optaget af Grid-job. Tryk på feltet for at få en detaljeret liste med alle kørende Grid-job på klyngen , inklusiv antallet af processorer per job.
    job I Kø
    ".$clickable.". Antal job i køen på klyngen, vist som antallet Grid-job plus antal lokale job i køen. Tryk på det første antal for at få en liste af Grid-job i køen på klyngen
    ", "Land" => 30, "Sted" => 160, "CPU-er" => 10, "Belastning (processer: Grid+lokale)" => 210, "I kø" => 10 ), "clusdes" => array("0" => "Ressourcedetaljer for", "help" => "
    Attribut
    ".$clickable.". Klyngeattributnavn".$str_att."
    Værdi
    ".$str_val."
    ".$clickable.". Navn på batchkøer til rådighed for ARC brugere, som angivet af klyngeejere. ".$str_que."
    Status
    Køstatus. Fungerende køer viser som regel active status.
    CPU (min)
    Tidsgrænse for varigheden af job per kø, hvis sat, i CPU-minutter. Den første værdi er den nedre grænse, den anden er den øvre grænse. Hvis der ikke er en grænse (job med envher varighed accepteres), vises N/A.
    Kørende
    Antal job der udføres i køen. Det totale antal job is vises, med antallet af processorer optaget af Grid-job vist i parentes, fx (Grid: 12). NB! For parallelle multiprocessorjob kan tallet i parentes være større end antallet af job.
    I kø
    Antallet af job, der venter i køen på at komme til at køre. Det totale antal vises, med Grid-job i parentes, fx (Grid: 235)
    ", "Kø" => 0, "Mapping Queue" => 0, "Status" => 0, "Grænse (min)" => 0, "CPU-er" => 0, "kørende" => 0, "I Kø" => 0 ), "jobstat" => array("0" => "Job på:Job ID", "help" => "
    JOBLISTE:
    Job navn
    ".$clickable.". Navn på et job som tildelt af ejeren. Hvis der ikke er tildelt et navn, vises "N/A". Tryk på et navn for at få en detaljeret beskrivelse af jobbet.
    Ejer
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    ".$clickable.". Navn på batchkøen, hvor jobbet udføres. ".$str_que."
    CPU'er
    ".$str_cpu."
    JOBDETALJER:
    Attribut
    ".$clickable.". Jobattributens navn".$str_att."
    Værdi
    ".$str_val."
    ", "Job navn" => 0, "Ejer" => 0, "Status" => 0, "CPU (min)" => 0, "Kø" => 0, "CPU'er" => 0 ), "volist" => array("0" => "Virtuel Organisations", "help" => "
    Virtuel Organisation
    ".$clickable.". Group of users, typically sharing common activities and resources, authorised at at least one ARC-enabled site. Click on the navn to get the list of group members.
    Members
    Number of group members.
    Served by
    LDAP server that supports the group membership database.
    ", "Virtuel Organisation" => 0, "Medlemmer" => 0, "Server" => 0 ), "vousers" => array("0" => "Gridbrugerbase", "help" => "
    Navn
    ".$clickable.". ".$str_nam."
    Tilknytning
    Brugerens hjemmeinstitut som anmeldt af VO bestyreren. Kan være tom.
    E-mail
    ".$clickable.". Brugerens e-mail som anmeldt af VO bestyreren. Kan være tom. Tryk på adressen for sende en email til brugeren.
    ", "#" => 0, "Navn" => 0, "Tilknytning" => 0, "E-mail" => 0 ), "userlist" => array("0" => "Information om", "help" => "
    Klynge:kø
    ".$clickable.". Navne på klynger of respektive køer (adskilt af kolon, ":") hvor en bruger er autoriseret til at indlevere job. Hvis brugeren ikke er autoriseret, vises beskeden: "Ikke authoriset på vært ...". Tryk på et klyngenavn for at få en detaljeret beskrivelse af klyngen. Tryk på et kønavn for at få en detaljeret beskrivelse af køen
    Ledige CPU'er
    Det aktuelle antal ledige CPU'er i en given kø til rådighed for brugeren , evt tilføjet den øvre grænse for varigheden af jobs (i minutter). Fx betyder "3" at der er 3 CPU'er til rådighed for et job med ubegrænset køretid; "4:360" angiver at der er 4 CPU'er til rådighed for job, der kører mindre end 6 timer; "10:180 30" betyder at der er 10 CPU'er til rådighed for job, der kører mindre end 3 timer, samt 30 CPU'er til rådighed for jobs, der kan køre en vilkårlig tid; "0" betyder at der ikke er nogen CPU'er til rådighed for tiden the moment, og nye job vil havne i en ventekø.
    Job i ventekø
    Antal brugerjob, der forventes at være foran en brugers nye job i en ventekø (for this user). Antallet "0" betyder at jobbet forventes udført med det samme. NB! Det er kun et estimat, som kan tilsidesættes af lokale regler.
    Ledig diskplads (MB)
    Diskplads til rådighed for brugeren i en given bestemt kø (i MegaBytes). NB! Det er kun et estimat, da de færreste klynges kan tilbyde faste diskpladskvoter.
    Jobnavn
    ".$clickable.". ".$str_job."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Klynge
    ".$clickable.". Navn på klyngen hvor jobbet udføres. Tryk på et klyngenavn for at få detaljerede oplysninger om klyngen.
    ".$clickable.". Navn på batchkøen hvor jobbet udføres eller blev udført. ".$str_que."
    CPUs
    ".$str_cpu."
    ", "" => 0, "Jobnavn" => 0, "Status" => 0, "CPU (min)" => 0, "Klynge" => 0, "Kø" => 0, "CPU'er" => 0 ), "attlist" => array("0" => "Attributværdier", "help" => "
    Objekt
    ".$clickable.". Navn objektet hvis attributter vises. Det kan være et klyngenavn, et klyngekønavn, et jobnavn, et brugernavn osv. Tryk på navneteksten for at få en detaljeret beskrivelse af objektet.
    Attribut
    For hvert objekt kan en eller flere værdier vises. Kolonnetitel er det menneskelæselige navn (bortset fra visse MDS-specifikke attributter) og indholdet i kolonnen er attributværdier for objektet som det blev til indtastet i informationssystemet.
    ", "Objelt" => 0, "Attribut" => 0 ), "quelist" => array("0" => "Kø", "help" => "
    Attribut
    ".$clickable.". Navn på en køattribut".$str_att."
    Værdi
    ".$str_val."
    Jobnavn
    ".$clickable.". ".$str_job."
    Ejer
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Lager (RAM) (KB)
    ".$str_mem."
    CPUs
    ".$str_cpu."
    ", "" => 0, "Jobnavn" => 0, "Ejer" => 0, "Status" => 0, "CPU (min)" => 0, "Lager (RAM) (KB)" => 0, "CPU'er" => 0 ), "sestat" => array("0" => "Lagerenhed", "help" => "
    Alias
    Lagerenheder alias som angivet i informationssystemet. Højst 15 tegn vises.
    Total lagerplads
    Total lagetplads på harddisken, GigaByte.
    Ledig Plads
    Aktuel Ledig diskplads, GigaByte.
    Navn
    Lagerenheds, bestående af et logsik navn og et værtsnavn (adskilt af kolon, ":"). Det logiske navn bruges kun af hensyn til informationssystemet, for at skelne mellem forskellige lagringsenheder på den samme maskine.
    Grund-URL
    Lagringsenhedens URL, som regel en gsiftp:// protokol. Brug denne URL som udgangspunkt for at tilgå filer.
    Type
    Lagringselement type. "gridftp-baseret" angiver en harddisk med GridFTP-grænseflade.
    ", "#" => 0, "Alias" => 0, // "Total lagerplads" => 0, "Ledig/Total Diskplads, GB" => 0, "Navn" => 0, "Grund-URL" => 0, "Type" => 0 ), "allusers" => array("0" => "Authoriserede Gridbrugere:Aktive Gridbrugere", "help" => "
    Navn
    ".$clickable.". ".$str_nam."
    Tilknytning
    Brugertilknytning, uddraget af det personlige certifikat
    Job
    Antal brugerjob i systemet (kørende, ventende, afsluttede eller slettede)
    Steder
    Viser hvor mange steder brugeren er autoriseret
    ", "#" => 0, "Navn" => 0, "Tilknytning" => 0, "Job" => 0, "Steder" => 0 ), "userres" => array("0" => "", "Klynge:kø" => 0, "Ledige CPU'er" => 0, "Job, i ventekø" => 0, "Ledig diskplads (MegaByte)" => 0 ), "ldapdump" => array("0" => "", "Attribut" => 0, "Værdi" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objektklasse", "Mds-validfrom" => "Info gyldig fra (GMT)", "Mds-validto" => "Info gyldig til (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Frontend domænenavn", "nordugrid-cluster-aliasname" => "Klyngealias", "nordugrid-cluster-contactstring" => "Kontakttekst", "nordugrid-cluster-interactive-contactstring" => "Interaktiv kontakt", "nordugrid-cluster-comment" => "Kommentar", "nordugrid-cluster-support" => "E-mail kontact", "nordugrid-cluster-acl" => "Authoriserede VO'er", "nordugrid-cluster-lrms-type" => "LRMS type", "nordugrid-cluster-lrms-version" => "LRMS version", "nordugrid-cluster-lrms-config" => "LRMS detaljer", "nordugrid-cluster-architecture" => "Arkitektur", "nordugrid-cluster-opsys" => "Styresystem", "nordugrid-cluster-homogeneity" => "Homogen klynge", "nordugrid-cluster-nodecpu" => "CPU type (langsomste)", "nordugrid-cluster-nodememory" => "Lager (MB, mindst)", "nordugrid-cluster-totalcpus" => "CPU's, i alt", "nordugrid-cluster-cpudistribution" => "CPU:maskiner", "nordugrid-cluster-benchmark" => "Benchmark", "nordugrid-cluster-sessiondir-free" => "Harddiskplads, til rådighed (MB)", "nordugrid-cluster-sessiondir-total" => "Harddiskplads, i alt (MB)", "nordugrid-cluster-sessiondir-lifetime"=> "Gridsession levetid (min)", "nordugrid-cluster-cache-free" => "Cachestørrelse, til rådighed (MB)", "nordugrid-cluster-cache-total" => "Cachestørrelse, i alt (MB)", "nordugrid-cluster-runtimeenvironment" => "Køretidsomgivelser", "nordugrid-cluster-localse" => "Lagringsenhed, lokal", "nordugrid-cluster-middleware" => "Gridmiddleware", "nordugrid-cluster-totaljobs" => "Jobs, samlet antal", "nordugrid-cluster-usedcpus" => "CPU'er, optagede", "nordugrid-cluster-queuedjobs" => "Jobs, i kø", "nordugrid-cluster-prelrmsqueued" => "Grid jobs, awaiting submission", "nordugrid-cluster-location" => "Postnummer", "nordugrid-cluster-owner" => "Ejer", "nordugrid-cluster-issuerca" => "Certifikatudsteder", "nordugrid-cluster-issuerca-hash" => "Certificate issuer's hash", "nordugrid-cluster-trustedca" => "Trusted certificate issuers", "nordugrid-cluster-nodeaccess" => "Node IP sammenhæng", "nordugrid-cluster-gridarea" => "Session area (FORÑ„LDET)", "nordugrid-cluster-gridspace" => "Griddiskplads (FORÑ„LDET)", "nordugrid-cluster-opsysdistribution" => "OS fordeling (FORÑ„LDET)", "nordugrid-cluster-runningjobs" => "Job, kørende (FORÑ„LDET)", "nordugrid-cluster-credentialexpirationtime" => "Credential expiration time", "nordugrid-queue-name" => "Kønavn", "nordugrid-queue-comment" => "Kommentar", "nordugrid-queue-status" => "Køstatus", "nordugrid-queue-running" => "CPU'er, optagede", "nordugrid-queue-localqueued" => "Local jobs, queued", "nordugrid-queue-prelrmsqueued" => "Grid jobs, awaiting submission", "nordugrid-queue-queued" => "Job, i ventekø (FORÑ„LDET)", "nordugrid-queue-maxrunning" => "Jobs, kørende (max)", "nordugrid-queue-maxqueuable" => "Jobs, kan udskydes (max)", "nordugrid-queue-maxuserrun" => "Jobs per Unixbruger (max)", "nordugrid-queue-maxcputime" => "CPU-tid, max. (minutter)", "nordugrid-queue-mincputime" => "CPU-tid, min. (minutter)", "nordugrid-queue-defaultcputime" => "CPU-tid, default (minutter)", "nordugrid-queue-maxwalltime" => "Vægurstid, max. (minutter)", "nordugrid-queue-minwalltime" => "Vægurstid, min. (minutter)", "nordugrid-queue-defaultwalltime" => "Vægurstid, default (minutter)", "nordugrid-queue-schedulingpolicy" => "Skeduleringspolitik", "nordugrid-queue-totalcpus" => "CPU'er, i alt", "nordugrid-queue-nodecpu" => "CPUtype", "nordugrid-queue-nodememory" => "Lager (RAM) (MB)", "nordugrid-queue-architecture" => "Arkitektur", "nordugrid-queue-opsys" => "Styresystem", "nordugrid-queue-homogeneity" => "Homogen kø", "nordugrid-queue-gridrunning" => "CPU'er, optagede af Gridjobs", "nordugrid-queue-gridqueued" => "Gridjobs, i ventekø", "nordugrid-queue-benchmark" => "Benchmark", "nordugrid-queue-assignedcpunumber" => "CPU'er per kø (FORÑ„LDET)", "nordugrid-queue-assignedcputype" => "CPUtype (FORÑ„LDET)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Ejer", "nordugrid-job-execcluster" => "Execution cluster", "nordugrid-job-execqueue" => "Aktivkø", "nordugrid-job-stdout" => "Standard uddatafil", "nordugrid-job-stderr" => "Standard fejlfil", "nordugrid-job-stdin" => "Standard inddatafile", "nordugrid-job-reqcputime" => "Anmodet CPU-tid", "nordugrid-job-reqwalltime" => "Anmodet vægurstid", "nordugrid-job-status" => "Status", "nordugrid-job-queuerank" => "Position i køen", "nordugrid-job-comment" => "LRMS kommentar", "nordugrid-job-submissionui" => "Indleveringsmaskine", "nordugrid-job-submissiontime" => "Indleveringstid (GMT)", "nordugrid-job-usedcputime" => "Forbrugt CPU-tid", "nordugrid-job-usedwalltime" => "Forbrugt vægurstid", "nordugrid-job-completiontime" => "Job afslutningstidspunkt (GMT)", "nordugrid-job-sessiondirerasetime" => "Sletningstidspunkt (GMT)", "nordugrid-job-proxyexpirationtime" => "Proxy udløbstidspunkt (GMT)", "nordugrid-job-usedmem" => "Benyttet lager (RAM) (KB)", "nordugrid-job-errors" => "Fejl", "nordugrid-job-exitcode" => "Afslutningskode", "nordugrid-job-jobname" => "Navn", "nordugrid-job-runtimeenvironment" => "Køretidsomgivelser", "nordugrid-job-cpucount" => "Anmodede CPU'er", "nordugrid-job-executionnodes" => "Udførelsesknuder", "nordugrid-job-gmlog" => "GM logfil", "nordugrid-job-clientsoftware" => "klientversion", "nordugrid-job-rerunable" => "Genkørbare", "nordugrid-job-reqcput" => "Anmodet tid (Forældet)", "nordugrid-job-gridlog" => "Gridlogfil (Forældet)", "nordugrid-job-lrmscomment" => "LRMS kommentar (Forældet)", "nordugrid-authuser-name" => "Navn", "nordugrid-authuser-sn" => "Subjektnavn", "nordugrid-authuser-freecpus" => "Ledige CPU'er", "nordugrid-authuser-diskspace" => "Ledig harddiskplads (MB)", "nordugrid-authuser-queuelength" => "User's queued jobs", "nordugrid-se-name" => "Nabn", "nordugrid-se-aliasname" => "Lagerelement alias", "nordugrid-se-type" => "Lagerelement type", "nordugrid-se-acl" => "Authoriserede VO'er", "nordugrid-se-freespace" => "Legid plads (MB)", "nordugrid-se-totalspace" => "Total plads (MB)", "nordugrid-se-url" => "Kontakt URL", "nordugrid-se-baseurl" => "Kontact URL (Forældet)", "nordugrid-se-accesscontrol" => "Adgangskontrol", "nordugrid-se-authuser" => "Autoriseret bruger (DN)", "nordugrid-se-location" => "Postnummer", "nordugrid-se-owner" => "Ejer", "nordugrid-se-middleware" => "Middleware", "nordugrid-se-issuerca" => "Certifikatudsteder", "nordugrid-se-issuerca-hash" => "Certificate issuer's hash", "nordugrid-se-trustedca" => "Trusted certificate issuers", "nordugrid-se-comment" => "Kommentar", "nordugrid-rc-name" => "Domænenavn", "nordugrid-rc-aliasname" => "Replikeringskatalogalias", "nordugrid-rc-baseurl" => "Kontact URL", "nordugrid-rc-authuser" => "Autoriseret bruger (DN)", "nordugrid-rc-location" => "Postnummer", "nordugrid-rc-owner" => "Ejer", "nordugrid-rc-issuerca" => "Certifikatudsteder" ), // Errors, warnings etc "errors" => array( // failure notices "1" => "Kan ikke topniveau ressourceindekser", "2" => "ingen af de lokale indekser returnerede en forbindelse", "3" => " dårlig konfigurations eller anmodning udløb", "4" => "Ingen Gridjobs fundet", "5" => "ingen information fundet", "6" => "Server ikke tilgængelig", "7" => " - genlæs senere", "8" => "ingen køinformationer fundet", "9" => "Ingen indgange fundet", "10" => "Ingen brugere fundet", "11" => "Ingen autoriserede på værten", "12" => "svarer ikke", "13" => "Fandt ingen nylige jobs for ", // debug messages "101" => " Monitor timeout for GRIS: ", "102" => " sek on forbindelse og ", "103" => " sek on søgning", "104" => " sek brugt på at søge", "105" => "Viser kun ressourcer i ", "106" => "Spurgte topniveau indeksservere: ", "107" => "Fik geokrafiske placeringer, skanned steder: ", "108" => " steder sorteret efter geografisk placering", "109" => "Leder efter klyngeattributter", "110" => "Leder efter køattributter", "111" => "Ingen daya fra ", "112" => " er oppe i ", "113" => " tilbyder ingen ressourcer", "114" => " Monitor timeouts for GIIS: ", "115" => "springer over GRIS: ", "116" => "ikke en ", "117" => "Checker forbindelse: ", "118" => "OK", "119" => "Så vidt, opdagede ressource at typen ", "120" => "LDAP fejl ved søgning ", "121" => " status ved ", "122" => "Sortlistet: ", "123" => "Registrant fundet for ", "124" => "Led efter SE attributter", "125" => "Led efter brugere", "126" => "Led efter jobs", "127" => " har job ", "128" => " men ikke autoriseret", "129" => "Kan ikke få objektdata: fejl ", "130" => " Monitor timeouts for EMIR: ", // icon titles "301" => "Genlæs", "302" => "Udskriv", "303" => "Hjælp", "304" => "Luk", "305" => "Rød", "306" => "Grå", "307" => "Alle brugere", "308" => "Aktive brugere", "309" => "Søg", "310" => "Lager", "311" => "VO-er", "312" => "Flaget for ", "313" => " Grid processer og ", "314" => " locale processer", // auxilliary strings "401" => "Processer", "402" => "Grid", "403" => "Lokal", "404" => "Verden", "405" => "TOTAL", "406" => " steder", "407" => "en masse", "408" => " GB", "409" => " ALLE", "410" => "Klynge", "411" => "Kø", "412" => "Job", "413" => "Bruger", "414" => "Lager", "415" => "Replikerings Kat.", "416" => "Definer attributter for at vise objektet: ", "417" => "logisk OG af alle udtrykkene findes", "418" => "Efterlad feltet længst til højre tomt for vise alt", "419" => "Vis de ressourcer eller objekter, du vil", "420" => "Distinguished name", "421" => "Kan bruge i alt ", "422" => " steder", "423" => "Ressource / objekt:", "424" => "Ant. attributter (def. 6):", "425" => "Objekt", "426" => "Næste", "427" => "Vælg een", "428" => "Nulstil", "429" => "VIS" ), // Post code conversion: only for [en]! "tlconvert" => array ( "Australia" => "Australien", "Austria" => "ÑŒstrig", "Armenia" => "Armenien", "Algeria" => "Algeriet", "Belgium" => "Belgien", "Bulgaria" => "Bulgarien", "Canada" => "Canada", "China" => "Kina", "Czechia" => "Tjekkiet", "Denmark" => "Danmark", "Estonia" => "Estland", "Finland" => "Finland", "France" => "Frankrig", "Georgia" => "Georgien", "Germany" => "Tyskland", "Greece" => "Grækenland", "Hungary" => "Ungarn", "Iceland" => "Island", "Ireland" => "Irland", "Italy" => "Italien", "Japan" => "Japan", "Latvia" => "Letland", "Lithuania" => "Lithauen", "Morocco" => "Marocco", "Netherlands" => "Nederlandene", "Norway" => "Norge", "Poland" => "Polen", "Portugal" => "Portugal", "Romania" => "Rumænien", "Russia" => "Rusland", "SriLanka" => "Sri Lanka", "Sweden" => "Sverige", "Slovakia" => "Slovakiet", "Slovenia" => "Slovenien", "Switzerland" => "Schweiz", "Turkey" => "Tyrkiet", "UK" => "UK", "Ukraine" => "Ukraine", "USA" => "USA", "World" => "Verden" ) ); ?> nordugrid-arc-7.1.1/src/services/monitor/lang/PaxHeaders/uk.inc0000644000000000000000000000013215067751327021507 xustar0030 mtime=1759498967.778492329 30 atime=1759498967.877493833 30 ctime=1759499030.805904778 nordugrid-arc-7.1.1/src/services/monitor/lang/uk.inc0000644000175000002070000017333515067751327023425 0ustar00mockbuildmock00000000000000 // -- Author: oxana.smirnova@hep.lu.se // Some common strings: $clickable = "ПОСИЛÐÐÐЯ"; $str_att = ", інтерпретировані Ð´Ð»Ñ Ð¿Ñ€Ð¾Ñтоти Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ (за виключеннÑм декількох атрибутів, Ñпецифічних Ð´Ð»Ñ ÑиÑтеми MDS). За кліком выводÑтьÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð½Ñ Ñ†ÑŒÐ¾Ð³Ð¾ атрибута на вÑÑ–Ñ… відомих реÑурÑах ARC."; $str_val = "Ð—Ð½Ð°Ñ‡ÐµÐ½Ð½Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð°, запиÑане в інформаційній ÑиÑтемі."; $str_que = "Зазвичай черги розрізнÑютьÑÑ Ð°Ð±Ð¾ за допуÑтимою триваліÑтю обрахунку, або за допущеною групою кориÑтувачів. За кліком виводитьÑÑ Ð¿Ð¾Ð²Ð½Ð¸Ð¹ Ð¾Ð¿Ð¸Ñ Ñ‡ÐµÑ€Ð³Ð¸, що міÑтить ÑпиÑок вÑÑ–Ñ… відомих завдань: в обрахунку, в черзі та завершених."; $str_job = " Ім'Ñ Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ, приÑвоєне хазÑїном.
    N/A означає, що хазÑїн не приÑвоїв ніÑкого імені.
    X означає, що хазÑїн відмінив Ð²Ð¸ÐºÐ¾Ð½Ð°Ð½Ð½Ñ Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ.
    ! означає, що при виконанні Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ Ð²Ð¸Ð½Ð¸ÐºÐ»Ð° помилка.
    За кліком виводитьÑÑ Ð´ÐµÑ‚Ð°Ð»ÑŒÐ½Ð¸Ð¹ Ð¾Ð¿Ð¸Ñ Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ."; $str_nam = "Ім'Ñ ÐºÐ¾Ñ€Ð¸Ñтувача, у відповідноÑті до його оÑобиÑтого Ñертифікату. За кліком виводитьÑÑ Ð·Ð²ÐµÐ´ÐµÐ½Ð° Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ Ð²ÑÑ–Ñ… Грід-реÑурÑів, доÑтупних даному кориÑтувачу, Ñ– ÑпиÑок вÑÑ–Ñ… його завдань, зареєÑтрованих на разі в ÑиÑтемі."; $str_sta = "Стан завданнÑ: ÑÑ‚Ð°Ð´Ñ–Ñ Ð¿Ñ€Ð¾Ð³Ñ€ÐµÑу в ГМ або Ñтан в ЛСКР. ПоÑлідовніÑть можливих Ñтанів така:
    ACCEPTED – Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ Ð¿Ñ€Ð¸Ð¹Ð½Ñто, але Ð²Ð¸ÐºÐ¾Ð½Ð°Ð½Ð½Ñ Ñ‰Ðµ не почалоÑÑŒ
    PREPARING – підвантажуютьÑÑ Ð½ÐµÐ¾Ð±Ñ…Ñ–Ð´Ð½Ñ– вхідні дані
    SUBMITTING – Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ Ð½Ð°Ð¿Ñ€Ð°Ð²Ð»ÑєтьÑÑ Ð´Ð¾ ЛСКР
    INLRMS – ÑƒÐ¿Ñ€Ð°Ð²Ð»Ñ–Ð½Ð½Ñ Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñм передано в ЛСКР; інформаційна ÑиÑтема виÑвітлює відомоÑті про внутрішній Ñтан завданнÑ. Можливі наÑтупні Ñтани:
    : Q – Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ Ð¾Ñ‡Ñ–ÐºÑƒÑ” у черзі
    : U – Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ Ð¿Ñ€Ð¸Ð·ÑƒÐ¿Ð¸Ð½ÐµÐ½Ð¾ на перевантаженому вузлі (PBSPro)
    : S – Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ Ð¿Ñ€Ð¸Ð·ÑƒÐ¿Ð¸Ð½ÐµÐ½Ð¾ (Condor)
    : R, run – Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ Ð²Ð¸ÐºÐ¾Ð½ÑƒÑ”Ñ‚ÑŒÑÑ
    : E – Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ Ð·Ð°Ð²ÐµÑ€ÑˆÑƒÑ”Ñ‚ÑŒÑÑ (PBS)
    FINISHING – вихідні дані переÑилаютÑÑ Ð·Ð° призначеннÑм
    FINISHED – Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ðµ; інформаційна ÑиÑтема додає мітку чаÑу завершеннÑ
    CANCELING – Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ Ð²Ñ–Ð´Ð¼Ñ–Ð½ÑєтьÑÑ
    DELETED – результати Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ Ð½Ðµ були вивантажені його хазÑїном Ñ– були знищені Ñервером піÑÐ»Ñ Ñ‚Ð¾Ð³Ð¾ Ñк Ñплив Ñ‡Ð°Ñ Ð·Ð±ÐµÑ€Ñ–Ð³Ð°Ð½Ð½Ñ (зазвичай 24 години).
    До кожного Ñтану може бути додана приÑтавка \"PENDING:\", що означає, що ГМ не може в даний момент перейти до наÑтупного етапу Ð²Ð¸ÐºÐ¾Ð½Ð°Ð½Ð½Ñ Ñ‡ÐµÑ€ÐµÐ· відповідні внутрішні обмеженнÑ."; $str_tim = "ПроцеÑорний чаÑ, витрачений завданнÑм, у хвилинах."; $str_mem = "Об'єм оперативної пам'Ñті, що викориÑтовує Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ Ð½Ð° разі, в кілобайтах"; $str_cpu = "ЧиÑло процеÑорів, що займає завданнÑ."; // Actual messages $message = array ( // Table headers and help "loadmon" => array( "0" => "Грід-монітор", "help" => "
    У цьому вікні приведена Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ Ð²ÑÑ–Ñ… обчиÑлювальних реÑурÑів, що реєÑтруютьÑÑ Ñƒ ÑпиÑки вищого Ñ€Ñ–Ð²Ð½Ñ ARC. Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ Ð²Ð¿Ð¾Ñ€Ñдкована по англійÑькій назві країни, Ñ– в кожній країні – за іменем керуючої машини. Ð”Ð»Ñ ÐºÐ¾Ð¶Ð½Ð¾Ð³Ð¾ реÑурÑа виведені наÑтупні параметри: назва, загальна кількіÑть процеÑорів, чиÑло зайнÑтих процеÑорів, а також кількіÑть завдань у черзі, Ñк заÑланих через Грід, так Ñ– міÑцевих. ВикориÑтовуйте утиліту \"Пошук\" Ð´Ð»Ñ Ð¾Ð³Ð»Ñду та порівнÑÐ½Ð½Ñ Ñ–Ð½ÑˆÐ¸Ñ… параметрів клаÑтерів, черг, завдань Ñ– Ñ‚.д.
    Країна
    ".$clickable.". Прапор та назва країни, Ñк Ñлідує із доÑтупного опиÑу реÑурÑу. За кліком виводитьÑÑ Ð·Ð²ÐµÐ´ÐµÐ½Ð° Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ Ñ‚Ñ–Ð»ÑŒÐºÐ¸ Ð´Ð»Ñ Ñ†Ñ–Ñ”Ñ— країни.
    РеÑурÑ
    ".$clickable.". Ðазва реÑурÑа (зазвичай клаÑтера), приÑвоєна влаÑником. Довжина Ñ€Ñдка не повинна перевищувати 22 Ñимвола. За кліком виводитьÑÑ Ð¿Ð¾Ð²Ð½Ð¸Ð¹ Ð¾Ð¿Ð¸Ñ Ñ€ÐµÑурÑу (клаÑтера).
    ЦП
    Загальна кількіÑть процеÑорів (Ñдер) у клаÑтері. Увага! Тільки чаÑтина з них може бути доÑтупна кориÑтувачам грід.
    ЗавантаженіÑть (процеÑори)
    ".$clickable.". ВідноÑна завантаженіÑть клаÑтера, виходÑчи із чиÑла зайнÑтих процеÑорів. Сіра Ñмуга відповідає кількоÑті процеÑорів, зайнÑтих під міÑцеві завданнÑ, а червона Ñмуга вказує кількіÑть процеÑорів, що виконують грід-завданнÑ. За кліком виводитьÑÑ ÑпиÑок вÑÑ–Ñ… активних грід-завдань на клаÑтері, включаючи інформацію про чиÑло процеÑорів на кожне завданнÑ.
    Очікують
    ".$clickable.". ЧиÑло вÑÑ–Ñ… завдань, що ÑтоÑть у черзі на даному клаÑтері, предÑтавлене у виглÑді Ñуми грід- Ñ– локальних завдань. За кліком на першій цифрі виводитьÑÑ ÑпиÑок вÑÑ–Ñ… завдань у черзі, заÑланих через грід.
    ", "Країна" => 30, "РеÑурÑ" => 160, "ЦП" => 10, "ЗавантаженіÑть (процеÑори)" => 210, "Очікують" => 10 ), "clusdes" => array( "0" => "ÐžÐ¿Ð¸Ñ Ñ€ÐµÑурÑу", "help" => "
    Ðтрибут
    ".$clickable.". Ðазви атрибутів клаÑтера".$str_att."
    ЗначеннÑ
    ".$str_val."
    Черга
    ".$clickable.". Ðазви черг (приÑвоєні влаÑниками), що Ñ” доÑтупними Ð´Ð»Ñ Ð³Ñ€Ñ–Ð´-кориÑтувачів. ".$str_que."
    Стан
    Стан черги. Ðктивна черга зазвичай видає Ñтан active.
    ТриваліÑть (хв)
    Межі по чаÑу на триваліÑть Ð¿ÐµÑ€ÐµÐ±ÑƒÐ²Ð°Ð½Ð½Ñ Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ Ð² черзі, Ñкщо такі вÑтановлені, у хвилинах процеÑорного чаÑу. Перше Ð·Ð½Ð°Ñ‡ÐµÐ½Ð½Ñ Ð²Ñ–Ð´Ð¿Ð¾Ð²Ñ–Ð´Ð°Ñ” нижній межі, друге – верхній. Якщо межі не вÑтановлені (тобто черга приймає Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ Ð±ÑƒÐ´ÑŒ-Ñкої тривалоÑті), виводитьÑÑ Ð¼Ñ–Ñ‚ÐºÐ° N/A.
    РахуютьÑÑ
    ЧиÑло завдань, що обраховуютьÑÑ Ð² черзі. Показано загальне чиÑло завдань, причому чиÑло процеÑорів, зайнÑтих під грід-завданнÑ, вказано в дужках, наприклад: (Грід: 12). Увага! За наÑвноÑті паралельних багатопроцеÑорних завдань, чиÑло в дужках может перевищувати загальне чиÑло завдань.
    Очікують
    ЧиÑло завдань, що чекують на Ð²Ð¸ÐºÐ¾Ð½Ð°Ð½Ð½Ñ Ð² черзі. Показано загальне чиÑло завдань, причому кількіÑть завдань, заÑланих через Грід, вказано в дужках, наприклад: (Грід: 235).
    ", "Черга" => 0, "Mapping Queue" => 0, "Стан" => 0, "ТриваліÑть (хв)" => 0, "ЦП" => 0, "РахуютьÑÑ" => 0, "Очікують" => 0 ), "jobstat" => array( "0" => "Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ Ð½Ð°:Ярлик завданнÑ", "help" => "
    СПИСОК завдань:
    Ім'Ñ Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ
    ".$clickable.". Ім'Ñ Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ, приÑвоєне хазÑїном. N/A означає, що хазÑїн не приÑвоїв ниÑкого імені. За кліком виводитьÑÑ Ð´ÐµÑ‚Ð°Ð»ÑŒÐ½Ð¸Ð¹ Ð¾Ð¿Ð¸Ñ Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ.
    ХазÑїн
    ".$clickable.". ".$str_nam."
    Стан
    ".$str_sta."
    Ð§Ð°Ñ (хв)
    ".$str_tim."
    Черга
    ".$clickable.". Ðазва черги ЛСКР, у котрій проходить Ð²Ð¸ÐºÐ¾Ð½Ð°Ð½Ð½Ñ Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ.".$str_que."
    ЦП
    ".$str_cpu."
    ОПИС завданнÑ:
    Ðтрибут
    ".$clickable.". Ðазви атрибутів завданнÑ.".$str_att."
    ЗначеннÑ
    ".$str_val."
    ", "Ім'Ñ Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ" => 0, "ХазÑїн" => 0, "Стан" => 0, "Ð§Ð°Ñ (хв)" => 0, "Черга" => 0, "ЦП" => 0 ), "volist" => array( "0" => "Віртуальні організації", "help" => "
    Віртуальні організації
    ".$clickable.". Група кориÑтувачів – зазвичай об'єднаних Ñпільною ціллю чи реÑурÑами, – допущена до работи хоча б на одному із реÑурÑів ARC. За кліком виводитьÑÑ ÑпиÑок членів групи.
    Члени
    КількіÑть членів групи.
    ОбÑлуговуєтьÑÑ
    ÐдреÑа Ñервера, що підтримує базу даних членів групи.
    ", "Віртуальна організаціÑ" => 0, "Члени" => 0, "ОбÑлуговуєтьÑÑ" => 0 ), "vousers" => array( "0" => "КориÑтувачі", "help" => "
    Ім'Ñ
    ".$clickable.". ".$str_nam."
    МіÑце роботи
    МіÑце роботи кориÑтувача, у відповідноÑті до запиÑу у базі даних. Ðеобов'Ñзково.
    Електронна пошта
    ".$clickable.". ÐдреÑа електронної пошти кориÑтувача, у відповідноÑті до запиÑу у базі даних. Ðеобов'Ñзково. За кліком ÑтворюєтьÑÑ Ð»Ð¸ÑÑ‚ Ð´Ð»Ñ ÐºÐ¾Ñ€Ð¸Ñтувача.
    ", "â„–" => 0, "Ім'Ñ" => 0, "МіÑце роботи" => 0, "Електронна пошта" => 0 ), "userlist" => array( "0" => "Ð†Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ñ–Ñ Ð¿Ñ€Ð¾", "help" => "
    РеÑурÑ:черга
    ".$clickable.". Ðазви реÑурÑів (клаÑтерів) та відповідних черг ЛСКР (разділені двокрапкою), доÑтупних даному кориÑтувачу. Якщо доÑтуп закритий, виводитьÑÑ Ð¿Ð¾Ð²Ñ–Ð´Ð¾Ð¼Ð»ÐµÐ½Ð½Ñ "Ðемає доÑтупу до реÑурÑу". За кліком на назві клаÑтера виводитьÑÑ Ð¿Ð¾Ð²Ð½Ð¸Ð¹ Ð¾Ð¿Ð¸Ñ Ñ€ÐµÑурÑа (клаÑтера). За кліком на назві черги виводитьÑÑ Ð¿Ð¾Ð²Ð½Ð¸Ð¹ Ð¾Ð¿Ð¸Ñ Ñ‡ÐµÑ€Ð³Ð¸.
    вільних процеÑорів.
    ЧиÑло вільних центральних процеÑорів, доÑтупних у даній черзі Ð´Ð»Ñ Ð´Ð°Ð½Ð¾Ð³Ð¾ кориÑтувача на даний момент чаÑу. Якщо черга має Ð¾Ð±Ð¼ÐµÐ¶ÐµÐ½Ð½Ñ Ð·Ð° чаÑом на Ð²Ð¸ÐºÐ¾Ð½Ð°Ð½Ð½Ñ Ð·Ð°Ð²Ð´Ð°Ð½ÑŒ, Ñ†Ñ Ð¼ÐµÐ¶Ð° вказана піÑÐ»Ñ Ñ‡Ð¸Ñла процеÑорів (у хвилинах, розділÑєтÑÑ Ð´Ð²Ð¾ÐºÑ€Ð°Ð¿ÐºÐ¾ÑŽ). Ðаприклад, "3" означає, що 3 процеÑÑора доÑтупно Ð´Ð»Ñ Ð·Ð°Ð²Ð´Ð°Ð½ÑŒ будь-Ñкої тривалоÑті; "4:360" означає, що 4 процеÑора доÑтупно Ð´Ð»Ñ Ð·Ð°Ð²Ð´Ð°Ð½ÑŒ, Ñ‡Ð°Ñ Ð²Ð¸ÐºÐ¾Ð½Ð°Ð½Ð½Ñ Ñких не перевищує 6 годин; "10:180 30" означає, що 10 процеÑорів доÑтупно Ð´Ð»Ñ Ð·Ð°Ð²Ð´Ð°Ð½ÑŒ, Ñ‡Ð°Ñ Ð²Ð¸ÐºÐ¾Ð½Ð°Ð½Ð½Ñ Ñких не перевищує 3 годин, Ñ– 30 процеÑорів доÑтупно Ð´Ð»Ñ Ð·Ð°Ð²Ð´Ð°Ð½ÑŒ будь-Ñкої тривалоÑті; "0" означає, що вільних реÑурÑів немає, Ñ– Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ Ð±ÑƒÐ´ÑƒÑ‚ÑŒ направлені на Ð¾Ñ‡Ñ–ÐºÑƒÐ²Ð°Ð½Ð½Ñ Ð² черзі.
    завдань у черзі
    КількіÑть завдань кориÑтувача, що знаходÑтьÑÑ Ñƒ ÑпиÑку Ð¾Ñ‡Ñ–ÐºÑƒÐ²Ð°Ð½Ð½Ñ Ð¿ÐµÑ€ÐµÐ´ новим завданнÑм, заÑланим від імені даного кориÑтувача. ЧиÑло "0" означає, що Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ Ð¼Ð¾Ð¶Ð»Ð¸Ð²Ð¾ буде запущене на обрахунок негайно. Увага! Це лише приблизні значеннÑ, Ñкі можуть бути змінені локальними операторами.
    доÑтупний проÑтір на Ñховищі (Мб)
    ПроÑтір на локальному жорÑткому диÑку, доÑтупне даному кориÑтувачу у даній черзі (в мегабайтах). Увага! Це лише приблизні значеннÑ, оÑкільки більшіÑть клаÑтерів не підтримують диÑкові квоти.
    Ім'Ñ Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ
    ".$clickable.". ".$str_job."
    Стан
    ".$str_sta."
    Ð§Ð°Ñ (хв)
    ".$str_tim."
    РеÑурÑ
    ".$clickable.". Ім'Ñ Ñ€ÐµÑурÑу (зазвичай клаÑтера), на котрому проходить виконаннÑ. завданнÑ. За кліком виводитьÑÑ Ð¿Ð¾Ð²Ð½Ð¸Ð¹ Ð¾Ð¿Ð¸Ñ Ñ€ÐµÑурÑу (клаÑтера).
    Черга
    ".$clickable.". Ðазва черги ЛСКР, у Ñкій проходить Ð²Ð¸ÐºÐ¾Ð½Ð°Ð½Ð½Ñ Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ. ".$str_que."
    ЦП
    ".$str_cpu."
    ", "" => 0, "Ім'Ñ Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ" => 0, "Стан" => 0, "Ð§Ð°Ñ (хв)" => 0, "РеÑурÑ" => 0, "Черга" => 0, "ЦП" => 0 ), "attlist" => array( "0" => "Ð—Ð½Ð°Ñ‡ÐµÐ½Ð½Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ñ–Ð²", "help" => "
    Об'єкт
    ".$clickable." Ðазва об'єкта, атрибути Ñкого перераховані у Ñ€Ñдку. Це може бути ім'Ñ ÐºÐ»Ð°Ñтера, черги, завданнÑ, кориÑтувача Ñ– Ñ‚.д.. За кліком виводитьÑÑ Ð·Ð¼Ñ–Ñтовний Ð¾Ð¿Ð¸Ñ Ð¾Ð±'єкту.
    Ðтрибут
    Ð”Ð»Ñ ÐºÐ¾Ð¶Ð½Ð¾Ð³Ð¾ об'єкта в таблиці приведені Ð·Ð½Ð°Ñ‡ÐµÐ½Ð½Ñ Ð¾Ð´Ð½Ð¾Ð³Ð¾ чи декількох його атрибутів. У заголовку ÑÑ‚Ð¾Ð²Ð¿Ñ†Ñ Ð²ÐºÐ°Ð·Ð°Ð½Ð¾ назва атрибута, інтерпретована Ð´Ð»Ñ Ð¿Ñ€Ð¾Ñтоти Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ (за виключеннÑм декількох атрибутів, Ñпецифічних Ð´Ð»Ñ ÑиÑтеми MDS), а вміÑтом кожного ÑÑ‚Ð¾Ð²Ð¿Ñ†Ñ ÑвлÑютьÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð½Ñ Ð²Ñ–Ð´Ð¿Ð¾Ð²Ñ–Ð´Ð½Ð¸Ñ… атрибутів, що запиÑані в інформаційній ÑиÑтемі.
    ", "Об'єкт" => 0, "Ðтрибут" => 0 ), "quelist" => array( "0" => "Черга", "help" => "
    Ðтрибут
    ".$clickable.". Ðазва атрибутів черги".$str_att."
    ЗначеннÑ
    ".$str_val."
    Ім'Ñ Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ
    ".$clickable.". ".$str_job."
    ХазÑїн
    ".$clickable.". ".$str_nam."
    Стан
    ".$str_sta."
    Ð§Ð°Ñ (хв)
    ".$str_tim."
    ОЗУ (Кб)
    ".$str_mem."
    ЦП
    ".$str_cpu."
    ", "" => 0, "І'Ð¼Ñ Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ" => 0, "ХазÑїн" => 0, "Стан" => 0, "Ð§Ð°Ñ (хв)" => 0, "ОЗУ (Кб)" => 0, "ЦП" => 0 ), "sestat" => array( "0" => "Зберігальні приÑтрої", "help" => "
    Ðазва
    Ðазва зберігального приÑтрою, зареєÑтрована в інформаційній ÑиÑтемі. МакÑимально допуÑтима довжина: 15 Ñимволів.
    ВеÑÑŒ об'єм
    Повний об'єм диÑку, Гб.
    Вільно
    ДоÑтупний проÑтір на диÑку на разі, Гб.
    Ім'Ñ
    Ім'Ñ Ð·Ð±ÐµÑ€Ñ–Ð³Ð°Ð»ÑŒÐ½Ð¾Ð³Ð¾ приÑтрою, Ñкладене із логічного імені та імені Ñервера (розділених двокрапкою). Логічне ім'Ñ Ð²Ð¸ÐºÐ¾Ñ€Ð¸ÑтовуєтьÑÑ Ñ‚Ñ–Ð»ÑŒÐºÐ¸ інформаційною ÑиÑтемою Ð´Ð»Ñ Ñ€Ð¾Ð·Ñ€Ñ–Ð·Ð½ÐµÐ½Ð½Ñ Ð·Ð±ÐµÑ€Ñ–Ð³Ð°Ð»ÑŒÐ½Ð¸Ñ… приÑтроїв на одному й тому ж Ñервері.
    URL бази
    URL зберігального приÑтрою, зазвичай протоколу gsiftp://. ВикориÑтовуйте цю адреÑу Ñк базову Ð´Ð»Ñ Ð´Ð¾Ñтупу до файлів.
    Тип
    Тип зберігального приÑтрою. Тип "gridftp-based" означає що це диÑковий накопичувач з інтерфейÑом GridFTP.
    ", "â„–" => 0, "Ðазва" => 0, // "ВеÑÑŒ об'єм" => 0, "Вільний/веÑÑŒ об'єм, Гб" => 0, "Ім'Ñ" => 0, "URL бази" => 0, "Тип" => 0 ), "allusers" => array( "0" => "Допущені кориÑтувачі:Ðктивні кориÑтувачі", "help" => "
    Ім'Ñ
    ".$clickable.". ".$str_nam."
    МіÑце роботи
    МіÑце роботи кориÑтувача, у відповідноÑті із запиÑом у його Ñертифікаті.
    ЗавданнÑ
    ЧиÑло вÑÑ–Ñ… завдань кориÑтувача, що знаходÑтьÑÑ Ð² ÑиÑтемі (на обрахунку, в черзі та завершених).
    РеÑурÑи
    ЧиÑло клаÑтерів, до Ñких даний кориÑтувач має допуÑк.
    ", "â„–" => 0, "Ім'Ñ" => 0, "МіÑце роботи" => 0, "ЗавданнÑ" => 0, "РеÑурÑи" => 0 ), "userres" => array( "0" => "", "РеÑурÑ:черга" => 0, "вільних процеÑорів" => 0, "завдань у черзі" => 0, "вільний проÑтір на Ñховищі (Мб)" => 0 ), "ldapdump" => array( "0" => "", "Ðтрибут" => 0, "ЗначеннÑ" => 0 ), "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "ВідомоÑті дійÑні з (GMT)", "Mds-validto" => "ВідомоÑті дійÑні до (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Ім'Ñ ÐºÐµÑ€ÑƒÑŽÑ‡Ð¾Ñ— машини", "nordugrid-cluster-aliasname" => "Ðазва", "nordugrid-cluster-contactstring" => "Точка входу", "nordugrid-cluster-interactive-contactstring" => "Інтерактивна точка входу", "nordugrid-cluster-comment" => "Коментар", "nordugrid-cluster-support" => "Технічна підтримка", "nordugrid-cluster-acl" => "Допущені ВО", "nordugrid-cluster-lrms-type" => "ЛСКР, тип", "nordugrid-cluster-lrms-version" => "ЛСКР, верÑÑ–Ñ", "nordugrid-cluster-lrms-config" => "ЛСКР, подробиці", "nordugrid-cluster-architecture" => "Ðрхітектура", "nordugrid-cluster-opsys" => "Операційна ÑиÑтема", "nordugrid-cluster-homogeneity" => "ОднорідніÑть реÑурÑу", "nordugrid-cluster-nodecpu" => "ПроцеÑор, тип (найгірший)", "nordugrid-cluster-nodememory" => "ОЗУ (Мб, найменьше)", "nordugrid-cluster-totalcpus" => "ПроцеÑори, уÑього", "nordugrid-cluster-cpudistribution" => "ПроцеÑори: вузли", "nordugrid-cluster-benchmark" => "Еталонний теÑÑ‚", "nordugrid-cluster-sessiondir-free" => "Сховище, доÑтупно (Мб)", "nordugrid-cluster-sessiondir-total" => "Сховище, веÑÑŒ об'єм (Мб)", "nordugrid-cluster-sessiondir-lifetime"=> "Ð§Ð°Ñ Ð¶Ð¸Ñ‚Ñ‚Ñ Ð³Ñ€Ñ–Ð´-ÑеанÑу (хв)", "nordugrid-cluster-cache-free" => "ДиÑковий кеш, вільно (Мб)", "nordugrid-cluster-cache-total" => "ДиÑковий кеш, уÑього (Мб)", "nordugrid-cluster-runtimeenvironment" => "Робоче Ñередовище", "nordugrid-cluster-localse" => "Локальний накопичувач", "nordugrid-cluster-middleware" => "Грід-ПЗ", "nordugrid-cluster-totaljobs" => "завдань, вÑього", "nordugrid-cluster-usedcpus" => "ПроцеÑори, зайнÑті", "nordugrid-cluster-queuedjobs" => "завдань у черзі (ЗÐСТÐРІЛИЙ)", "nordugrid-cluster-prelrmsqueued" => "Грід-завдань, що очікують на заÑилку", "nordugrid-cluster-location" => "Поштовий індекÑ", "nordugrid-cluster-owner" => "ВлаÑник", "nordugrid-cluster-issuerca" => "Центр Ñертифікації", "nordugrid-cluster-issuerca-hash" => "Хеш-код центра Ñертификації", "nordugrid-cluster-trustedca" => "Довірені центри Ñертификації", "nordugrid-cluster-nodeaccess" => "IP-з'Ñ”Ð´Ð½Ð°Ð½Ð½Ñ Ð²ÑƒÐ·Ð»Ñ–Ð²", "nordugrid-cluster-gridarea" => "ÐдреÑа ÑеанÑів (ЗÐСТÐРІЛИЙ)", "nordugrid-cluster-gridspace" => "Грід-диÑк (ЗÐСТÐРІЛИЙ)", "nordugrid-cluster-opsysdistribution" => "ДиÑтрибутив ОС (ЗÐСТÐРІЛИЙ)", "nordugrid-cluster-runningjobs" => "завдань в обрахунку (ЗÐСТÐРІЛИЙ)", "nordugrid-cluster-credentialexpirationtime" => "Термін дії Ñертифікату", "nordugrid-queue-name" => "І'Ð¼Ñ Ñ‡ÐµÑ€Ð³Ð¸", "nordugrid-queue-comment" => "Коментар", "nordugrid-queue-status" => "Стан черги", "nordugrid-queue-running" => "завдань в обрахунку", "nordugrid-queue-localqueued" => "Локальні Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ Ñƒ черзі", "nordugrid-queue-prelrmsqueued" => "Грід-завданнÑ, що очікують на заÑилку", "nordugrid-queue-queued" => "завдань в черзі (ЗÐСТÐРІЛИЙ)", "nordugrid-queue-maxrunning" => "завдань в обрахунку (межа)", "nordugrid-queue-maxqueuable" => "завдань в черзі (межа)", "nordugrid-queue-maxuserrun" => "завдань на кориÑтувача (межа)", "nordugrid-queue-maxcputime" => "ТриваліÑть, найбільша (хв)", "nordugrid-queue-mincputime" => "ТриваліÑть, найменьша (ха)", "nordugrid-queue-defaultcputime" => "ТриваліÑть, за замовчуваннÑм (хв)", "nordugrid-queue-schedulingpolicy" => "Правила плануваннÑ", "nordugrid-queue-totalcpus" => "ПроцеÑори, вÑього", "nordugrid-queue-nodecpu" => "ПроцеÑор, тип", "nordugrid-queue-nodememory" => "ОЗУ (Мб)", "nordugrid-queue-architecture" => "Ðрхітектура", "nordugrid-queue-opsys" => "Операційна ÑиÑтема", "nordugrid-queue-homogeneity" => "ОднорідніÑть черги", "nordugrid-queue-gridrunning" => "Грід-завдань в обрахунку", "nordugrid-queue-gridqueued" => "Грід-завдань в черзі", "nordugrid-queue-benchmark" => "Еталонний теÑÑ‚", "nordugrid-queue-assignedcpunumber" => "ПроцеÑори (ЗÐСТÐРІЛИЙ)", "nordugrid-queue-assignedcputype" => "Тип процеÑора (ЗÐСТÐРІЛИЙ)", "nordugrid-job-globalid" => "Ярлик", "nordugrid-job-globalowner" => "ХазÑїн", "nordugrid-job-execcluster" => "Виконуючий клаÑтер", "nordugrid-job-execqueue" => "Виконуюча черга", "nordugrid-job-stdout" => "Стандартний потік виведеннÑ", "nordugrid-job-stderr" => "Стандартний потік видачі помилок", "nordugrid-job-stdin" => "Стандартний потік введеннÑ", "nordugrid-job-reqcputime" => "Запитаний процеÑÑорний чаÑ", "nordugrid-job-reqwalltime" => "Запитаний реальний чаÑ", "nordugrid-job-status" => "Стан", "nordugrid-job-queuerank" => "ÐŸÐ¾Ð»Ð¾Ð¶ÐµÐ½Ð½Ñ Ð² черзі", "nordugrid-job-comment" => "Коментар ЛСКР", "nordugrid-job-submissionui" => "ЗаÑилаючий клієнт", "nordugrid-job-submissiontime" => "Ð§Ð°Ñ Ð·Ð°Ñилки (GMT)", "nordugrid-job-usedcputime" => "ВикориÑтаний процеÑорний чаÑ", "nordugrid-job-usedwalltime" => "ВикориÑтаний реальний чаÑ", "nordugrid-job-completiontime" => "Ð§Ð°Ñ Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð½Ñ (GMT)", "nordugrid-job-sessiondirerasetime" => "Срок Ð·Ð½Ð¸Ñ‰ÐµÐ½Ð½Ñ (GMT)", "nordugrid-job-proxyexpirationtime" => "Ð—Ð°ÐºÑ–Ð½Ñ‡ÐµÐ½Ð½Ñ Ð´Ð¾Ð²Ñ–Ñ€ÐµÐ½Ð½Ñ (GMT)", "nordugrid-job-usedmem" => "ВикориÑÑ‚Ð°Ð½Ð½Ñ ÐžÐ—Ð£ (Кб)", "nordugrid-job-errors" => "Помилки", "nordugrid-job-exitcode" => "Код поверненнÑ", "nordugrid-job-jobname" => "Ім'Ñ", "nordugrid-job-runtimeenvironment" => "Рабоче Ñередовище", "nordugrid-job-cpucount" => "Запитано процеÑорів", "nordugrid-job-executionnodes" => "Виконуючі вузли", "nordugrid-job-gmlog" => "Журнальний Ð·Ð°Ð¿Ð¸Ñ Ð“Ðœ", "nordugrid-job-clientsoftware" => "ВерÑÑ–Ñ ÐºÐ»Ñ–Ñ”Ð½Ñ‚Ð°", "nordugrid-job-rerunable" => "МожливіÑть перезапуÑку", "nordugrid-job-reqcput" => "Запитаний Ñ‡Ð°Ñ (ЗÐСТÐРІЛИЙ)", "nordugrid-job-gridlog" => "Грід-Ð·Ð°Ð¿Ð¸Ñ (ЗÐСТÐРІЛИЙ)", "nordugrid-job-lrmscomment" => "Коментар ЛСКР (ЗÐСТÐРІЛИЙ)", "nordugrid-authuser-name" => "Ім'Ñ", "nordugrid-authuser-sn" => "Суб'єкт", "nordugrid-authuser-freecpus" => "Вільні ЦП", "nordugrid-authuser-diskspace" => "ДиÑк, доÑтупно (Мб)", "nordugrid-authuser-queuelength" => "завдань кориÑтувача в черзі", "nordugrid-se-name" => "Умовне ім'Ñ", "nordugrid-se-aliasname" => "Ðазва", "nordugrid-se-type" => "Тип", "nordugrid-se-acl" => "Допущені ВО", "nordugrid-se-freespace" => "Вільний об'єм (Мб)", "nordugrid-se-totalspace" => "ВеÑÑŒ об'єм (Мб)", "nordugrid-se-url" => "ÐдреÑа доÑтупу", "nordugrid-se-baseurl" => "ÐдреÑа доÑтупу (ЗÐСТÐРІЛИЙ)", "nordugrid-se-accesscontrol" => "Контроль доÑтупу", "nordugrid-se-authuser" => "Допущені кориÑтувачі (DN)", "nordugrid-se-location" => "Поштовий індекÑ", "nordugrid-se-owner" => "ВлаÑник", "nordugrid-se-middleware" => "Грід-ПЗ", "nordugrid-se-issuerca" => "Центр Ñертифікації", "nordugrid-se-issuerca-hash" => "Хеш-код центра Ñертификації", "nordugrid-se-trustedca" => "Довірені центи Ñертификації", "nordugrid-se-comment" => "Коментар", "nordugrid-rc-name" => "Доменне ім'Ñ", "nordugrid-rc-aliasname" => "Ðазва", "nordugrid-rc-baseurl" => "Контактна адреÑа", "nordugrid-rc-authuser" => "Допущені кориÑтувачі (DN)", "nordugrid-rc-location" => "Поштовий індекÑ", "nordugrid-rc-owner" => "ВлаÑник", "nordugrid-rc-issuerca" => "Сертифікат виданий" ), "errors" => array( "1" => "Ðеможливо опитати каталоги вищого рівнÑ", "2" => "Жоден із міÑцевих каталогів не відзиваєтьÑÑ", "3" => " невірна ÐºÐ¾Ð½Ñ„Ñ–Ð³ÑƒÑ€Ð°Ñ†Ñ–Ñ Ð°Ð±Ð¾ Ñплив Ñ‡Ð°Ñ Ð·Ð°Ð¿Ð¸Ñ‚Ñƒ", "4" => "Ðемає грід-завдань", "5" => "Ðемає інформації", "6" => "Служба недоÑтупна", "7" => " - Ñпробуйте поновити пізніше", "8" => "Ðемає інформації про чергу", "9" => "Ðемає даних", "10" => "Ðемає кориÑтувачів", "11" => "Ðемає доÑтупу до реÑурÑу", "12" => "не відзиваєтÑÑ", "13" => "Ðа разі немає завдань кориÑтувача ", "101" => " Ð§Ð°Ñ Ð½Ð° зв'Ñзок із локальним каталогом: ", "102" => " Ñ Ð½Ð° з'Ñ”Ð´Ð½Ð°Ð½Ð½Ñ Ñ‚Ð° ", "103" => " Ñ Ð½Ð° пошук", "104" => " Ñ Ð·Ð°Ñ‚Ñ€Ð°Ñ‡ÐµÐ½Ð¾ на пошук", "105" => "ÐŸÐµÑ€ÐµÑ€Ð°Ñ…ÑƒÐ²Ð°Ð½Ð½Ñ Ñ€ÐµÑурÑів: ", "106" => "Опитано каталогів верхнього рівнÑ: ", "107" => "Отримані географічні координати, проÑкановано реÑурÑів: ", "108" => " реÑурÑів впорÑдковано за геополітичною ознакою", "109" => "Пошук атрибутів клаÑтера", "110" => "Пошук атрибутів черги", "111" => "Ðемає даних з ", "112" => " функціонує в країні: ", "113" => " не має реÑурÑів", "114" => " Ð§Ð°Ñ Ð½Ð° з'вÑзок із глобальним каталогом: ", "115" => "ІгноруєтьÑÑ Ñ€ÐµÑурÑ: ", "116" => "не відповідає типу ", "117" => "Перевірка зв'Ñзку: ", "118" => "так!", "119" => "Ðа разі виÑвлено реÑурÑів типу ", "120" => "Помилка LDAP при пошуку на ", "121" => "-Ñтан на ", "122" => "Заблокований: ", "123" => "ВиÑвлено реєÑтранта ", "124" => "Пошук атрибутів накопичувачів", "125" => "Пошук кориÑтувачів", "126" => "Пошук завдань", "127" => " запуÑтив(ла) Ð·Ð°Ð²Ð´Ð°Ð½Ð½Ñ ", "128" => " не будучи допущеним(ою)", "129" => "Ðемає інформації про об'єкт: помилка ", "130" => " Ð§Ð°Ñ Ð½Ð° з'вÑзок із глобальним каталогом: ", "301" => "Перезавантажити", "302" => "Друк", "303" => "Допомога", "304" => "Закрити", "305" => "Червоний", "306" => "Сірий", "307" => "Ð’ÑÑ– кориÑтувачі", "308" => "Ðктивні кориÑтувачі", "309" => "Пошук", "310" => "Ðакопичувачі", "311" => "Віртуальні организації", "312" => "Прапор країни: ", "313" => " процеÑорів під грід та ", "314" => " процеÑорів під міÑцеві", "401" => "ПроцеÑи", "402" => "Грід", "403" => "міÑцеві", "404" => "Світ", "405" => "ЗÐГÐЛОМ", "406" => " об'єктів", "407" => "купа", "408" => " Гб", "409" => " ВСІ", "410" => "КлаÑтер", "411" => "Черга", "412" => "завданнÑ", "413" => "КориÑтувач", "414" => "Ðакопичувач", "415" => "Каталог реплік", "416" => "Задайте атрибути Ð´Ð»Ñ Ð¾Ð³Ð»Ñду; вибраний об'єкт: ", "417" => "Пошук проводитьÑÑ Ð´Ð»Ñ Ð»Ð¾Ð³Ñ–Ñ‡Ð½Ð¾Ð³Ð¾ І вÑÑ–Ñ… виразів", "418" => "Ðе заповнюйте праве поле, Ñкщо фільтр непотрібен", "419" => "ОглÑд реÑурÑів чи об'єктів за вибором", "420" => "Виділене ім'Ñ", "421" => "Може викориÑтовувати ", "422" => " клаÑтерів", "423" => "РеÑÑƒÑ€Ñ / об'єкт:", "424" => "КількіÑть атрибутів (6 за зам.):", "425" => "Об'єкт", "426" => "Далі", "427" => "Виберіть", "428" => "ОчиÑтити", "429" => "ПОКÐЗÐТИ" ), // Country name conversion, no postcode! "tlconvert" => array ( "Australia" => "ÐвÑтраліÑ", "Austria" => "ÐвÑтріÑ", "Armenia" => "ÐрменіÑ", "Algeria" => "Ðлжир", "Belgium" => "БельгіÑ", "Bulgaria" => "БолгаріÑ", "Canada" => "Канада", "China" => "Китай", "Czechia" => "ЧехіÑ", "Denmark" => "ДаніÑ", "Estonia" => "ЕÑтоніÑ", "Finland" => "ФінлÑндіÑ", "France" => "ФранціÑ", "Georgia" => "ГрузіÑ", "Germany" => "Ðімеччина", "Greece" => "ГреціÑ", "Hungary" => "Угорщина", "Iceland" => "ІÑландіÑ", "Ireland" => "ІрландіÑ", "Italy" => "ІталіÑ", "Japan" => "ЯпоніÑ", "Latvia" => "ЛатвіÑ", "Lithuania" => "Литва", "Morocco" => "Марокко", "Netherlands" => "Ðідерланди", "Norway" => "ÐорвегіÑ", "Poland" => "Польща", "Portugal" => "ПортугаліÑ", "Romania" => "РумуніÑ", "Russia" => "РоÑÑ–Ñ", "SriLanka" => "Шрі-Ланка", "Sweden" => "ШвеціÑ", "Slovakia" => "Словаччина", "Slovenia" => "СловеніÑ", "Switzerland" => "ШвейцаріÑ", "Turkey" => "Туреччина", "UK" => "ВеликобританіÑ", "Ukraine" => "Україна", "USA" => "СШÐ", "World" => "Світ" ) ); ?> nordugrid-arc-7.1.1/src/services/monitor/lang/PaxHeaders/en.inc0000644000000000000000000000013215067751327021472 xustar0030 mtime=1759498967.777617767 30 atime=1759498967.875493803 30 ctime=1759499030.794736436 nordugrid-arc-7.1.1/src/services/monitor/lang/en.inc0000644000175000002070000014702215067751327023402 0ustar00mockbuildmock00000000000000 N/A indicates that user did not assign any name.
    X indicates that the job has been killed by the owner
    ! indicates that the job failed in the system
    Click on a name to get a detailed description of the job."; $str_nam = "Name of the user, as specified in the personal certificate. Click on a name to get the list of all the resources available for this user and all the jobs by this user which are currently in the system."; $str_sta = "Job status as returned by the Grid Manager (GM) and LRMS. In sequential order, the states are:
    ACCEPTED – job submitted but not yet processed
    PREPARING – input files are being retreived
    SUBMITTING – interaction with LRMS ongoing
    INLRMS – the job is transferred to the LRMS; internal status is added by the infosystem. Possible states are:
    : Q – job is queued
    : U – job is in a suspended state on a busy node (PBSPro)
    : S – job is in a suspended state (Condor)
    : R, run – job is running
    : E – job is finishing (PBS)
    FINISHING – output files are being transferred by the GM
    FINISHED – job is finished; time stamp is added by the infosystem
    CANCELING – job is being cancelled
    DELETED – job not cleaned upon user request but removed by the GM due to expiration time
    Each of the states can be reported with the PENDING: prefix, meaning the GM is attempting to move the job to the next state"; $str_tim = "CPU time used by the job, minutes."; $str_mem = "Memory consumed by the job, KB"; $str_cpu = "Number of processors used by the job."; // Actual messages $message = array ( // Table headers and help (do not localize "0" and "help" keys) // For "help", keywords in
    must correspond to column titles below the help text ) "loadmon" => array( "0" => "Grid Monitor", "help" => "
    This screen displays all the sites registering to the top ARC indexing service, sorted by country then by host name. Selected site parameters are monitored: cluster alias, total CPU capacity and number of running and queued jobs, both Grid and local ones. Use "Search" utility if you want to compare other cluster, queue, job etc. characteristics
    Country
    ".$clickable.". Country flag and name as deduced from available resource descriptions. Click to show only this country info.
    Cluster
    ".$clickable.". Cluster alias as assigned by the owner. Maximal displayed length is 22 characters. Click on the alias to get a detailed cluster description.
    CPUs
    Total number of CPUs in a cluster. NB! Only a fraction of those can be actualy available for the Grid users.
    Load (processes:Grid+local)
    ".$clickable.". Relative cluster load, corresponding to the occupied CPUs count. Grey bars indicate processors occupied by the localy submitted jobs, while red bars show CPUs occupied by jobs submitted via Grid. Click on the bar to get the detailed list of all the running Grid jobs on the cluster, including amount of processors per job.
    Queueing
    ".$clickable.". Number of all queued jobs on the cluster, shown as number of queueing grid jobs plus number of locally submitted queueing jobs. Click the first number to get the list of queued Grid jobs on the cluster.
    ", "Country" => 30, "Site" => 160, "CPUs" => 10, "Load (processes: Grid+local)" => 210, "Queueing" => 10 ), "clusdes" => array("0" => "Resource Details for", "help" => "
    Attribute
    ".$clickable.". Cluster attribute name".$str_att."
    Value
    ".$str_val."
    Queue
    ".$clickable.". Names of batch queues available for the ARC users, as set by cluster owners. ".$str_que."
    Status
    Queue status. Operating queue typically reports active status.
    CPU (min)
    Time limit for job duration per queue, if set, in CPU-minutes. First displayed value is the lower limit, second - the upper one. If limits are not set (jobs of any duration are accepted), N/A tag is shown.
    Running
    Number of jobs running in the queue. Total number of jobs is shown, with number of processors occupied by Grid-submitted jobs displayed in parentheses, e.g. (Grid: 12). NB! For parallel multiprocessor jobs, number in parentheses can be larger than number of jobs.
    Queing
    Number of jobs awaiting execution in the queue. Total number of jobs is shown, with Grid-submitted jobs displayed in parentheses, e.g. (Grid: 235)
    ", "Queue" => 0, "Mapping Queue" => 0, "Status" => 0, "Limits (min)" => 0, "CPUs" => 0, "Running" => 0, "Queueing" => 0 ), "jobstat" => array("0" => "Jobs at:Job ID", "help" => "
    JOB LIST:
    Job name
    ".$clickable.". Name of a job as assigned by the owner. If no name has been assigned, "N/A" is displayed. Click on a name to get a detailed description of the job.
    Owner
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Queue
    ".$clickable.". Name of the batch queue in which the job is being executed. ".$str_que."
    CPUs
    ".$str_cpu."
    JOB DETAILS:
    Attribute
    ".$clickable.". Job attribute name".$str_att."
    Value
    ".$str_val."
    ", "Job name" => 0, "Owner" => 0, "Status" => 0, "CPU (min)" => 0, "Queue" => 0, "CPUs" => 0 ), "volist" => array("0" => "Virtual Organisations", "help" => "
    Virtual Organisation
    ".$clickable.". Group of users, typically sharing common activities and resources, authorised at at least one ARC-enabled site. Click on the name to get the list of group members.
    Members
    Number of group members.
    Served by
    LDAP server that supports the group membership database.
    ", "Virtual Organisation" => 0, "Members" => 0, "Served by" => 0 ), "vousers" => array("0" => "Grid User Base", "help" => "
    Name
    ".$clickable.". ".$str_nam."
    Affiliation
    Users home institute as entered by a VO manager. Can be empty.
    E-mail
    ".$clickable.". Users e-mail as entered by a VO manager. Can be empty. Click the address to send an e-mail to the user.
    ", "#" => 0, "Name" => 0, "Affiliation" => 0, "E-mail" => 0 ), "userlist" => array("0" => "Information for", "help" => "
    Cluster:queue
    ".$clickable.". Names of clusters and respective queues (separated by a column, ":") where a user is authorized to submit jobs. If a user is not authorized, message "Not authorised at host ..." is displayed. Click a cluster name to get a detailed cluster description. Click on a queue name to get a detailed queue description.
    Free CPUs
    Number of free CPUs available in a given queue for the user at this moment of time, optionally appended with the upper time limit value (in minutes). For example, "3" means 3 CPUs available for a job of unlimited running time; "4:360" indicates there are 4 CPUs available for jobs not longer than 6 hours; "10:180 30" means there are 10 CPUs available for jobs not exceeding 3 hours, plus 30 CPUs available for jobs of any length; "0" means there are no CPUs available at the moment, and the jobs will be placed in a waiting queue.
    Queued jobs
    Number of user's jobs expected to sit ahead of a new submitted job (for this user) in a waiting queue. Number of "0" means the job is expected to be executed immediately. NB! This is only an estimation, which can be overwritten by local policies.
    Free disk (MB)
    Disk space available for the user in a given queue (in Megabytes). NB! This is only an estimation, as most clusters do not provide fixed disk quotas.
    Job name
    ".$clickable.". ".$str_job."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Cluster
    ".$clickable.". Name of the cluster at which the job is being executed. Click on a cluster name to get detailed information about the cluster.
    Queue
    ".$clickable.". Name of the batch queue in which the job is/was executed. ".$str_que."
    CPUs
    ".$str_cpu."
    ", "" => 0, "Job name" => 0, "Status" => 0, "CPU (min)" => 0, "Cluster" => 0, "Queue" => 0, "CPUs" => 0 ), "attlist" => array("0" => "Attribute values", "help" => "
    Object
    ".$clickable.". Name of the object which attributes are displayed. It can be a cluster name, a clusters queue name, a job name, a user name etc. Click on the string to get a detailed decscription of the object.
    Attribute
    For each object, one or more attribute values can be listed. Column title is the human-readable attribute name (except of some MDS-specific attributes), and the column contents are attribute values per object as entered in the Information System.
    ", "Object" => 0, "Attribute" => 0 ), "quelist" => array("0" => "Queue", "help" => "
    Attribute
    ".$clickable.". Name of a queue attribute".$str_att."
    Value
    ".$str_val."
    Job name
    ".$clickable.". ".$str_job."
    Owner
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Memory (KB)
    ".$str_mem."
    CPUs
    ".$str_cpu."
    ", "" => 0, "Job name" => 0, "Owner" => 0, "Status" => 0, "CPU (min)" => 0, "Memory (KB)" => 0, "CPUs" => 0 ), "sestat" => array("0" => "Storage Elements", "help" => "
    Alias
    Storage element alias as specified in the Information System. Maximal displayed length is 15 characters.
    Tot. space
    Total disk space, GB.
    Free space
    Disk space available at the moment, GB.
    Name
    Storage element name, consisting of a logical name and host name (separated by a column, ":"). Logical name is used only for information system purposes, to distinguish between different storage elements hosted by the same machine.
    Base URL
    URL for the storage element, typically a gsiftp:// protocol. Use this URL as the base to access files.
    Type
    Storage element type. "gridftp-based" indicates a disk storage with GridFTP interface.
    ", "#" => 0, "Alias" => 0, // "Tot. space" => 0, "Free/total space, Gb" => 0, "Name" => 0, "Base URL" => 0, "Type" => 0 ), "allusers" => array("0" => "Authorised Grid Users:Active Grid Users", "help" => "
    Name
    ".$clickable.". ".$str_nam."
    Affiliation
    User's affiliation, derived from the personal certificate
    Jobs
    Count of all user jobs in the system (running, pending, finished or deleted)
    Sites
    Shows how many sites authorise this user
    ", "#" => 0, "Name" => 0, "Affiliaton" => 0, "Jobs" => 0, "Sites" => 0 ), "userres" => array("0" => "", "Cluster:queue" => 0, "Free CPUs" => 0, "Queued jobs" => 0, "Free disk (MB)" => 0 ), "ldapdump" => array("0" => "", "Attribute" => 0, "Value" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Info valid from (GMT)", "Mds-validto" => "Info valid to (GMT)" ), "isattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Info valid from (GMT)", "Mds-validto" => "Info valid to (GMT)", "nordugrid-cluster-name" => "Front-end domain name", "nordugrid-cluster-aliasname" => "Cluster alias", "nordugrid-cluster-contactstring" => "Contact string", "nordugrid-cluster-interactive-contactstring" => "Interactive contact", "nordugrid-cluster-comment" => "Comment", "nordugrid-cluster-support" => "E-mail contact", "nordugrid-cluster-acl" => "Authorised VOs", "nordugrid-cluster-lrms-type" => "LRMS type", "nordugrid-cluster-lrms-version" => "LRMS version", "nordugrid-cluster-lrms-config" => "LRMS details", "nordugrid-cluster-architecture" => "Architecture", "nordugrid-cluster-opsys" => "Operating system", "nordugrid-cluster-homogeneity" => "Homogeneous cluster", "nordugrid-cluster-nodecpu" => "CPU type (slowest)", "nordugrid-cluster-nodememory" => "Memory (MB, smallest)", "nordugrid-cluster-totalcpus" => "CPUs, total", "nordugrid-cluster-cpudistribution" => "CPU:machines", "nordugrid-cluster-benchmark" => "Benchmark", "nordugrid-cluster-sessiondir-free" => "Disk space, available (MB)", "nordugrid-cluster-sessiondir-total" => "Disk space, total (MB)", "nordugrid-cluster-sessiondir-lifetime"=> "Grid session lifetime (min)", "nordugrid-cluster-cache-free" => "Cache size, available (MB)", "nordugrid-cluster-cache-total" => "Cache size, total (MB)", "nordugrid-cluster-runtimeenvironment" => "Runtime environment", "nordugrid-cluster-localse" => "Storage Element, local", "nordugrid-cluster-middleware" => "Grid middleware", "nordugrid-cluster-totaljobs" => "Jobs, total amount", "nordugrid-cluster-usedcpus" => "CPUs, occupied", "nordugrid-cluster-queuedjobs" => "Jobs, queued", "nordugrid-cluster-prelrmsqueued" => "Grid jobs, awaiting submission", "nordugrid-cluster-location" => "Postal code", "nordugrid-cluster-owner" => "Owner", "nordugrid-cluster-issuerca" => "Certificate issuer", "nordugrid-cluster-issuerca-hash" => "Certificate issuer's hash", "nordugrid-cluster-trustedca" => "Trusted certificate issuers", "nordugrid-cluster-nodeaccess" => "Node IP connectivity", "nordugrid-cluster-gridarea" => "Session area (OBSOLETE)", "nordugrid-cluster-gridspace" => "Grid disk space (OBSOLETE)", "nordugrid-cluster-opsysdistribution" => "OS distribution (OBSOLETE)", "nordugrid-cluster-runningjobs" => "Jobs, running (OBSOLETE)", "nordugrid-cluster-credentialexpirationtime" => "Credential expiration time", "nordugrid-queue-name" => "Queue name", "nordugrid-queue-comment" => "Comment", "nordugrid-queue-status" => "Queue status", "nordugrid-queue-running" => "Total occupied CPUs", "nordugrid-queue-localqueued" => "Local jobs, queued", "nordugrid-queue-prelrmsqueued" => "Grid jobs, awaiting submission", "nordugrid-queue-queued" => "Jobs, queued (OBSOLETE)", "nordugrid-queue-maxrunning" => "Jobs, running (max)", "nordugrid-queue-maxqueuable" => "Jobs, queueable (max)", "nordugrid-queue-maxuserrun" => "Jobs per Unix user (max)", "nordugrid-queue-maxcputime" => "CPU time, max. (minutes)", "nordugrid-queue-mincputime" => "CPU time, min. (minutes)", "nordugrid-queue-defaultcputime" => "CPU time, default (minutes)", "nordugrid-queue-maxwalltime" => "Walltime, max. (minutes)", "nordugrid-queue-minwalltime" => "Walltime, min. (minutes)", "nordugrid-queue-defaultwalltime" => "Walltime, default (minutes)", "nordugrid-queue-schedulingpolicy" => "Scheduling policy", "nordugrid-queue-totalcpus" => "CPUs, total", "nordugrid-queue-nodecpu" => "CPU type", "nordugrid-queue-nodememory" => "Memory (MB)", "nordugrid-queue-architecture" => "Architecture", "nordugrid-queue-opsys" => "Operating system", "nordugrid-queue-homogeneity" => "Homogeneous queue", "nordugrid-queue-gridrunning" => "CPUs occupied by Grid jobs", "nordugrid-queue-gridqueued" => "Grid jobs, queued", "nordugrid-queue-benchmark" => "Benchmark", "nordugrid-queue-assignedcpunumber" => "CPUs per queue (OBSOLETE)", "nordugrid-queue-assignedcputype" => "CPU type (OBSOLETE)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Owner", "nordugrid-job-execcluster" => "Execution cluster", "nordugrid-job-execqueue" => "Execution queue", "nordugrid-job-stdout" => "Standard output file", "nordugrid-job-stderr" => "Standard error file", "nordugrid-job-stdin" => "Standard input file", "nordugrid-job-reqcputime" => "Requested CPU time", "nordugrid-job-reqwalltime" => "Requested wall clock time", "nordugrid-job-status" => "Status", "nordugrid-job-queuerank" => "Position in the queue", "nordugrid-job-comment" => "LRMS comment", "nordugrid-job-submissionui" => "Submission machine", "nordugrid-job-submissiontime" => "Submission time (GMT)", "nordugrid-job-usedcputime" => "Used CPU time", "nordugrid-job-usedwalltime" => "Used wall clock time", "nordugrid-job-completiontime" => "Job completion time (GMT)", "nordugrid-job-sessiondirerasetime" => "Erase time (GMT)", "nordugrid-job-proxyexpirationtime" => "Proxy expiration time (GMT)", "nordugrid-job-usedmem" => "Used memory (KB)", "nordugrid-job-errors" => "Errors", "nordugrid-job-exitcode" => "Exit code", "nordugrid-job-jobname" => "Name", "nordugrid-job-runtimeenvironment" => "Runtime environment", "nordugrid-job-cpucount" => "Requested CPUs", "nordugrid-job-executionnodes" => "Execution nodes", "nordugrid-job-gmlog" => "GM log file", "nordugrid-job-clientsoftware" => "Client version", "nordugrid-job-rerunable" => "Rerunnable", "nordugrid-job-reqcput" => "Requested time (OBSOLETE)", "nordugrid-job-gridlog" => "Gridlog file (OBSOLETE)", "nordugrid-job-lrmscomment" => "LRMS comment (OBSOLETE)", "nordugrid-authuser-name" => "Name", "nordugrid-authuser-sn" => "Subject Name", "nordugrid-authuser-freecpus" => "Free CPUs", "nordugrid-authuser-diskspace" => "Free disk space (MB)", "nordugrid-authuser-queuelength" => "User's queued jobs", "nordugrid-se-name" => "Name", "nordugrid-se-aliasname" => "Storage element alias", "nordugrid-se-type" => "Storage element type", "nordugrid-se-acl" => "Authorised VOs", "nordugrid-se-freespace" => "Free space (MB)", "nordugrid-se-totalspace" => "Total space (MB)", "nordugrid-se-url" => "Contact URL", "nordugrid-se-baseurl" => "Contact URL (OBSOLETE)", "nordugrid-se-accesscontrol" => "Access control", "nordugrid-se-authuser" => "Authorised user (DN)", "nordugrid-se-location" => "Postal code", "nordugrid-se-owner" => "Owner", "nordugrid-se-middleware" => "Middleware", "nordugrid-se-issuerca" => "Certificate issuer", "nordugrid-se-issuerca-hash" => "Certificate issuer's hash", "nordugrid-se-trustedca" => "Trusted certificate issuers", "nordugrid-se-comment" => "Comment", "nordugrid-rc-name" => "Domain name", "nordugrid-rc-aliasname" => "Replica Catalog alias", "nordugrid-rc-baseurl" => "Contact URL", "nordugrid-rc-authuser" => "Authorised user (DN)", "nordugrid-rc-location" => "Postal code", "nordugrid-rc-owner" => "Owner", "nordugrid-rc-issuerca" => "Certificate issuer" ), // Errors, warnings etc "errors" => array( // failure notices "1" => "Can not read top-level resource indices", "2" => "None of the local indices returned connection", "3" => " bad configuration or request timed out", "4" => "No Grid jobs found", "5" => "No information found", "6" => "Server unavailable", "7" => " - refresh later", "8" => "No queue information found", "9" => "No entries found", "10" => "No users found", "11" => "Not authorised at host", "12" => "does not answer", "13" => "No recent jobs found for ", // debug messages "101" => " Monitor timeouts for GRIS: ", "102" => " sec on connection and ", "103" => " sec on search", "104" => " sec spent searching", "105" => "Showing resources only in ", "106" => "Polled top-level indices: ", "107" => "Got geographical locations, scanned sites: ", "108" => " sites arranged by geographical location", "109" => "Search for cluster attributes", "110" => "Search for queue attributes", "111" => "No data from ", "112" => " is up in ", "113" => " has no resources to offer", "114" => " Monitor timeouts for GIIS: ", "115" => "Skipping GRIS: ", "116" => "not a ", "117" => "Checking connection: ", "118" => "OK", "119" => "That far, detected resources of kind ", "120" => "LDAP error searching ", "121" => " status at ", "122" => "Blacklisted: ", "123" => "Registrant found for ", "124" => "Search for SE attributes", "125" => "Search for users", "126" => "Search for jobs", "127" => " has job ", "128" => " while not being authorized", "129" => "Can not get object data: error ", "130" => " Monitor timeouts for EMIR: ", "131" => " Monitor timeouts for ARCHERY depends on OS DNS resolver settings (In DNS cache we trust!)", "132" => "Failed to query the following ARCHERY endpoint: ", "133" => "Reached the recursive loop limit while querying ARCHERY endpoint: ", // icon titles "301" => "Refresh", "302" => "Print", "303" => "Help", "304" => "Close", "305" => "Red", "306" => "Grey", "307" => "All users", "308" => "Active users", "309" => "Search", "310" => "Storage", "311" => "VOs", "312" => "Flag of ", "313" => " Grid processes and ", "314" => " local processes", // auxilliary strings "401" => "Processes", "402" => "Grid", "403" => "Local", "404" => "World", "405" => "TOTAL", "406" => " sites", "407" => "a lot of", "408" => " GB", "409" => " ALL", "410" => "Cluster", "411" => "Queue", "412" => "Job", "413" => "User", "414" => "Storage", "415" => "Replica Cat.", "416" => "Define attributes to display for the object: ", "417" => "AND of all the expressions will be matched", "418" => "Leave the righmost field empty to show everything", "419" => "Display resources or objects of your choice", "420" => "Distinguished name", "421" => "Can use a total of ", "422" => " sites", "423" => "Resource / object:", "424" => "Nr.of attributes (def. 6):", "425" => "Object", "426" => "Next", "427" => "Select one", "428" => "Reset", "429" => "SHOW" ), // Post code conversion: only for [en]! "tlconvert" => array ( "AU" => "Australia", "AT" => "Austria", "AM" => "Armenia", "DZ" => "Algeria", "BE" => "Belgium", "BG" => "Bulgaria", "CA" => "Canada", "CL" => "Chile", "CN" => "China", "CZ" => "Czechia", "DK" => "Denmark", "EE" => "Estonia", "FI" => "Finland", "FIN" => "Finland", "SF" => "Finland", "FR" => "France", "GE" => "Georgia", "DE" => "Germany", "D" => "Germany", "GR" => "Greece", "HK" => "HongKong", "HU" => "Hungary", "IL" => "Israel", "IS" => "Iceland", "IR" => "Ireland", "IE" => "Ireland", "IT" => "Italy", "JP" => "Japan", "KEK" => "Japan", "TOKYO" => "Japan", "LV" => "Latvia", "LT" => "Lithuania", "MA" => "Morocco", "NL" => "Netherlands", "NO" => "Norway", "N" => "Norway", "PL" => "Poland", "PT" => "Portugal", "RO" => "Romania", "RU" => "Russia", "SU" => "Russia", "LK" => "SriLanka", "SE" => "Sweden", "SK" => "Slovakia", "SI" => "Slovenia", "ES" => "Spain", "CH" => "Switzerland", "TW" => "Taiwan", "TR" => "Turkey", "UK" => "UK", "UA" => "Ukraine", "COM" => "USA", "GOV" => "USA", "OV" => "USA", "USA" => "USA", "US" => "USA", "EDU" => "USA", "DU" => "USA", "RG" => "World" ) ); ?> nordugrid-arc-7.1.1/src/services/monitor/lang/PaxHeaders/us.inc0000644000000000000000000000013215067751327021517 xustar0030 mtime=1759498967.778492329 30 atime=1759498967.877493833 30 ctime=1759499030.807052575 nordugrid-arc-7.1.1/src/services/monitor/lang/us.inc0000644000175000002070000014277515067751327023441 0ustar00mockbuildmock00000000000000 [<2-letter code>] // -- Translation: // -- Author: oxana.smirnova@hep.lu.se // Some common strings: $clickable = "CLICKABLE"; $str_att = ", human-readable except of some MDS-specific attributes. Click on the attribute name to get the list of the attribute values across the ARC universe."; $str_val = "Attribute value as entered in the Information System."; $str_que = "Typically, different queues correspond to different allowed task duration, or to different groups of users. Click on a queue name to get detailed information about the queue, including running, queued, and finished tasks."; $str_job = "Name of a job as assigned by the job owner.
    N/A indicates that user did not assign any name.
    X indicates that the job has been killed by the owner
    ! indicates that the job failed in the system
    Click on a name to get a detailed description of the job."; $str_nam = "Name of the user, as specified in the personal certificate. Click on a name to get the list of all the resources available for this user and all the jobs by this user which are currently in the system."; $str_sta = "Job status as returned by the Grid Manager (GM) and LRMS. In sequential order, the states are:
    ACCEPTED – job submitted but not yet processed
    PREPARING – input files are being retreived
    SUBMITTING – interaction with LRMS ongoing
    INLRMS – the job is transferred to the LRMS; internal status is added by the infosystem. Possible states are:
    : Q – job is queued
    : U – job is in a suspended state on a busy node (PBSPro)
    : S – job is in a suspended state (Condor)
    : R, run – job is running
    : E – job is finishing (PBS)
    FINISHING – output files are being transferred by the GM
    FINISHED – job is finished; time stamp is added by the infosystem
    CANCELING – job is being cancelled
    DELETED – job not cleaned upon user request but removed by the GM due to expiration time
    Each of the states can be reported with the PENDING: prefix, meaning the GM is attempting to move the job to the next state"; $str_tim = "CPU time used by the job, minutes."; $str_mem = "Memory consumed by the job, KB"; $str_cpu = "Number of processors used by the job."; // Actual messages $message = array ( // Table headers and help (do not localize "0" and "help" keys) // For "help", keywords in
    must correspond to column titles below the help text ) "loadmon" => array( "0" => "Grid Monitor", "help" => "
    This screen displays all the sites registering to the top ARC indexing service, sorted by country then by host name. Selected site parameters are monitored: cluster alias, total CPU capacity and number of running and queued jobs, both Grid and local ones. Use "Search" utility if you want to compare other cluster, queue, job etc. characteristics
    Country
    ".$clickable.". Country flag and name as deduced from available resource descriptions. Click to show only this country info.
    Cluster
    ".$clickable.". Cluster alias as assigned by the owner. Maximal displayed length is 22 characters. Click on the alias to get a detailed cluster description.
    CPUs
    Total number of CPUs in a cluster. NB! Only a fraction of those can be actualy available for the Grid users.
    Load (processes:Grid+local)
    ".$clickable.". Relative cluster load, corresponding to the occupied CPUs count. Grey bars indicate processors occupied by the localy submitted jobs, while red bars show CPUs occupied by jobs submitted via Grid. Click on the bar to get the detailed list of all the running Grid jobs on the cluster, including amount of processors per job.
    Queueing
    ".$clickable.". Number of all queued jobs on the cluster, shown as number of queueing grid jobs plus number of locally submitted queueing jobs. Click the first number to get the list of queued Grid jobs on the cluster.
    ", "Country" => 30, "Site" => 160, "CPUs" => 10, "Load (processes: Grid+local)" => 210, "Queueing" => 10 ), "clusdes" => array("0" => "Resource Details for", "help" => "
    Attribute
    ".$clickable.". Cluster attribute name".$str_att."
    Value
    ".$str_val."
    Queue
    ".$clickable.". Names of batch queues available for the ARC users, as set by cluster owners. ".$str_que."
    Status
    Queue status. Operating queue typically reports active status.
    CPU (min)
    Time limit for job duration per queue, if set, in CPU-minutes. First displayed value is the lower limit, second - the upper one. If limits are not set (jobs of any duration are accepted), N/A tag is shown.
    Running
    Number of jobs running in the queue. Total number of jobs is shown, with number of processors occupied by Grid-submitted jobs displayed in parentheses, e.g. (Grid: 12). NB! For parallel multiprocessor jobs, number in parentheses can be larger than number of jobs.
    Queing
    Number of jobs awaiting execution in the queue. Total number of jobs is shown, with Grid-submitted jobs displayed in parentheses, e.g. (Grid: 235)
    ", "Queue" => 0, "Mapping Queue" => 0, "Status" => 0, "Limits (min)" => 0, "CPUs" => 0, "Running" => 0, "Queueing" => 0 ), "jobstat" => array("0" => "Jobs at:Job ID", "help" => "
    JOB LIST:
    Job name
    ".$clickable.". Name of a job as assigned by the owner. If no name has been assigned, "N/A" is displayed. Click on a name to get a detailed description of the job.
    Owner
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Queue
    ".$clickable.". Name of the batch queue in which the job is being executed. ".$str_que."
    CPUs
    ".$str_cpu."
    JOB DETAILS:
    Attribute
    ".$clickable.". Job attribute name".$str_att."
    Value
    ".$str_val."
    ", "Job name" => 0, "Owner" => 0, "Status" => 0, "CPU (min)" => 0, "Queue" => 0, "CPUs" => 0 ), "volist" => array("0" => "Virtual Organisations", "help" => "
    Virtual Organisation
    ".$clickable.". Group of users, typically sharing common activities and resources, authorised at at least one ARC-enabled site. Click on the name to get the list of group members.
    Members
    Number of group members.
    Served by
    LDAP server that supports the group membership database.
    ", "Virtual Organisation" => 0, "Members" => 0, "Served by" => 0 ), "vousers" => array("0" => "Grid User Base", "help" => "
    Name
    ".$clickable.". ".$str_nam."
    Affiliation
    Users home institute as entered by a VO manager. Can be empty.
    E-mail
    ".$clickable.". Users e-mail as entered by a VO manager. Can be empty. Click the address to send an e-mail to the user.
    ", "#" => 0, "Name" => 0, "Affiliation" => 0, "E-mail" => 0 ), "userlist" => array("0" => "Information for", "help" => "
    Cluster:queue
    ".$clickable.". Names of clusters and respective queues (separated by a column, ":") where a user is authorized to submit jobs. If a user is not authorized, message "Not authorised at host ..." is displayed. Click a cluster name to get a detailed cluster description. Click on a queue name to get a detailed queue description.
    Free CPUs
    Number of free CPUs available in a given queue for the user at this moment of time, optionally appended with the upper time limit value (in minutes). For example, "3" means 3 CPUs available for a job of unlimited running time; "4:360" indicates there are 4 CPUs available for jobs not longer than 6 hours; "10:180 30" means there are 10 CPUs available for jobs not exceeding 3 hours, plus 30 CPUs available for jobs of any length; "0" means there are no CPUs available at the moment, and the jobs will be placed in a waiting queue.
    Queued jobs
    Number of user's jobs expected to sit ahead of a new submitted job (for this user) in a waiting queue. Number of "0" means the job is expected to be executed immediately. NB! This is only an estimation, which can be overwritten by local policies.
    Free disk (MB)
    Disk space available for the user in a given queue (in Megabytes). NB! This is only an estimation, as most clusters do not provide fixed disk quotas.
    Job name
    ".$clickable.". ".$str_job."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Cluster
    ".$clickable.". Name of the cluster at which the job is being executed. Click on a cluster name to get detailed information about the cluster.
    Queue
    ".$clickable.". Name of the batch queue in which the job is/was executed. ".$str_que."
    CPUs
    ".$str_cpu."
    ", "" => 0, "Job name" => 0, "Status" => 0, "CPU (min)" => 0, "Cluster" => 0, "Queue" => 0, "CPUs" => 0 ), "attlist" => array("0" => "Attribute values", "help" => "
    Object
    ".$clickable.". Name of the object which attributes are displayed. It can be a cluster name, a clusters queue name, a job name, a user name etc. Click on the string to get a detailed decscription of the object.
    Attribute
    For each object, one or more attribute values can be listed. Column title is the human-readable attribute name (except of some MDS-specific attributes), and the column contents are attribute values per object as entered in the Information System.
    ", "Object" => 0, "Attribute" => 0 ), "quelist" => array("0" => "Queue", "help" => "
    Attribute
    ".$clickable.". Name of a queue attribute".$str_att."
    Value
    ".$str_val."
    Job name
    ".$clickable.". ".$str_job."
    Owner
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Memory (KB)
    ".$str_mem."
    CPUs
    ".$str_cpu."
    ", "" => 0, "Job name" => 0, "Owner" => 0, "Status" => 0, "CPU (min)" => 0, "Memory (KB)" => 0, "CPUs" => 0 ), "sestat" => array("0" => "Storage Elements", "help" => "
    Alias
    Storage element alias as specified in the Information System. Maximal displayed length is 15 characters.
    Tot. space
    Total disk space, GB.
    Free space
    Disk space available at the moment, GB.
    Name
    Storage element name, consisting of a logical name and host name (separated by a column, ":"). Logical name is used only for information system purposes, to distinguish between different storage elements hosted by the same machine.
    Base URL
    URL for the storage element, typically a gsiftp:// protocol. Use this URL as the base to access files.
    Type
    Storage element type. "gridftp-based" indicates a disk storage with GridFTP interface.
    ", "#" => 0, "Alias" => 0, "Free/tot. space, GB" => 0, "Name" => 0, "Base URL" => 0, "Type" => 0 ), "allusers" => array("0" => "Authorised Grid Users:Active Grid Users", "help" => "
    Name
    ".$clickable.". ".$str_nam."
    Affiliation
    User's affiliation, derived from the personal certificate
    Jobs
    Count of all user jobs in the system (running, pending, finished or deleted)
    Sites
    Shows how many sites authorise this user
    ", "#" => 0, "Name" => 0, "Affiliaton" => 0, "Jobs" => 0, "Sites" => 0 ), "userres" => array("0" => "", "Cluster:queue" => 0, "Free CPUs" => 0, "Queued jobs" => 0, "Free disk (MB)" => 0 ), "ldapdump" => array("0" => "", "Attribute" => 0, "Value" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Info valid from (GMT)", "Mds-validto" => "Info valid to (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Front-end domain name", "nordugrid-cluster-aliasname" => "Cluster alias", "nordugrid-cluster-contactstring" => "Contact string", "nordugrid-cluster-interactive-contactstring" => "Interactive contact", "nordugrid-cluster-comment" => "Comment", "nordugrid-cluster-support" => "E-mail contact", "nordugrid-cluster-acl" => "Authorised VOs", "nordugrid-cluster-lrms-type" => "LRMS type", "nordugrid-cluster-lrms-version" => "LRMS version", "nordugrid-cluster-lrms-config" => "LRMS details", "nordugrid-cluster-architecture" => "Architecture", "nordugrid-cluster-opsys" => "Operating system", "nordugrid-cluster-homogeneity" => "Homogeneous cluster", "nordugrid-cluster-nodecpu" => "CPU type (slowest)", "nordugrid-cluster-nodememory" => "Memory (MB, smallest)", "nordugrid-cluster-totalcpus" => "CPUs, total", "nordugrid-cluster-cpudistribution" => "CPU:machines", "nordugrid-cluster-benchmark" => "Benchmark", "nordugrid-cluster-sessiondir-free" => "Disk space, available (MB)", "nordugrid-cluster-sessiondir-total" => "Disk space, total (MB)", "nordugrid-cluster-sessiondir-lifetime"=> "Grid session lifetime (min)", "nordugrid-cluster-cache-free" => "Cache size, available (MB)", "nordugrid-cluster-cache-total" => "Cache size, total (MB)", "nordugrid-cluster-runtimeenvironment" => "Runtime environment", "nordugrid-cluster-localse" => "Storage Element, local", "nordugrid-cluster-middleware" => "Grid middleware", "nordugrid-cluster-totaljobs" => "Jobs, total amount", "nordugrid-cluster-usedcpus" => "CPUs, occupied", "nordugrid-cluster-queuedjobs" => "Jobs, queued", "nordugrid-cluster-prelrmsqueued" => "Grid jobs, awaiting submission", "nordugrid-cluster-location" => "Postal code", "nordugrid-cluster-owner" => "Owner", "nordugrid-cluster-issuerca" => "Certificate issuer", "nordugrid-cluster-issuerca-hash" => "Certificate issuer's hash", "nordugrid-cluster-trustedca" => "Trusted certificate issuers", "nordugrid-cluster-nodeaccess" => "Node IP connectivity", "nordugrid-cluster-gridarea" => "Session area (OBSOLETE)", "nordugrid-cluster-gridspace" => "Grid disk space (OBSOLETE)", "nordugrid-cluster-opsysdistribution" => "OS distribution (OBSOLETE)", "nordugrid-cluster-runningjobs" => "Jobs, running (OBSOLETE)", "nordugrid-cluster-credentialexpirationtime" => "Credential expiration time", "nordugrid-queue-name" => "Queue name", "nordugrid-queue-comment" => "Comment", "nordugrid-queue-status" => "Queue status", "nordugrid-queue-running" => "Total occupied CPUs", "nordugrid-queue-localqueued" => "Local jobs, queued", "nordugrid-queue-prelrmsqueued" => "Grid jobs, awaiting submission", "nordugrid-queue-queued" => "Jobs, queued (OBSOLETE)", "nordugrid-queue-maxrunning" => "Jobs, running (max)", "nordugrid-queue-maxqueuable" => "Jobs, queueable (max)", "nordugrid-queue-maxuserrun" => "Jobs per Unix user (max)", "nordugrid-queue-maxcputime" => "CPU time, max. (minutes)", "nordugrid-queue-mincputime" => "CPU time, min. (minutes)", "nordugrid-queue-defaultcputime" => "CPU time, default (minutes)", "nordugrid-queue-maxwalltime" => "Walltime, max. (minutes)", "nordugrid-queue-minwalltime" => "Walltime, min. (minutes)", "nordugrid-queue-defaultwalltime" => "Walltime, default (minutes)", "nordugrid-queue-schedulingpolicy" => "Scheduling policy", "nordugrid-queue-totalcpus" => "CPUs, total", "nordugrid-queue-nodecpu" => "CPU type", "nordugrid-queue-nodememory" => "Memory (MB)", "nordugrid-queue-architecture" => "Architecture", "nordugrid-queue-opsys" => "Operating system", "nordugrid-queue-homogeneity" => "Homogeneous queue", "nordugrid-queue-gridrunning" => "CPUs occupied by Grid jobs", "nordugrid-queue-gridqueued" => "Grid jobs, queued", "nordugrid-queue-benchmark" => "Benchmark", "nordugrid-queue-assignedcpunumber" => "CPUs per queue (OBSOLETE)", "nordugrid-queue-assignedcputype" => "CPU type (OBSOLETE)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Owner", "nordugrid-job-execcluster" => "Execution cluster", "nordugrid-job-execqueue" => "Execution queue", "nordugrid-job-stdout" => "Standard output file", "nordugrid-job-stderr" => "Standard error file", "nordugrid-job-stdin" => "Standard input file", "nordugrid-job-reqcputime" => "Requested CPU time", "nordugrid-job-reqwalltime" => "Requested wall clock time", "nordugrid-job-status" => "Status", "nordugrid-job-queuerank" => "Position in the queue", "nordugrid-job-comment" => "LRMS comment", "nordugrid-job-submissionui" => "Submission machine", "nordugrid-job-submissiontime" => "Submission time (GMT)", "nordugrid-job-usedcputime" => "Used CPU time", "nordugrid-job-usedwalltime" => "Used wall clock time", "nordugrid-job-completiontime" => "Completion time (GMT)", "nordugrid-job-sessiondirerasetime" => "Erase time (GMT)", "nordugrid-job-proxyexpirationtime" => "Proxy expiration time (GMT)", "nordugrid-job-usedmem" => "Used memory (KB)", "nordugrid-job-errors" => "Errors", "nordugrid-job-exitcode" => "Exit code", "nordugrid-job-jobname" => "Name", "nordugrid-job-runtimeenvironment" => "Runtime environment", "nordugrid-job-cpucount" => "Requested CPUs", "nordugrid-job-executionnodes" => "Execution nodes", "nordugrid-job-gmlog" => "GM log file", "nordugrid-job-clientsoftware" => "Client version", "nordugrid-job-rerunable" => "Rerunnable", "nordugrid-job-reqcput" => "Requested time (OBSOLETE)", "nordugrid-job-gridlog" => "Gridlog file (OBSOLETE)", "nordugrid-job-lrmscomment" => "LRMS comment (OBSOLETE)", "nordugrid-authuser-name" => "Name", "nordugrid-authuser-sn" => "Subject Name", "nordugrid-authuser-freecpus" => "Free CPUs", "nordugrid-authuser-diskspace" => "Free disk space (MB)", "nordugrid-authuser-queuelength" => "User's queued jobs", "nordugrid-se-name" => "Name", "nordugrid-se-aliasname" => "Storage element alias", "nordugrid-se-type" => "Storage element type", "nordugrid-se-acl" => "Authorised VOs", "nordugrid-se-freespace" => "Free space (MB)", "nordugrid-se-totalspace" => "Total space (MB)", "nordugrid-se-url" => "Contact URL", "nordugrid-se-baseurl" => "Contact URL (OBSOLETE)", "nordugrid-se-accesscontrol" => "Access control", "nordugrid-se-authuser" => "Authorised user (DN)", "nordugrid-se-location" => "Postal code", "nordugrid-se-owner" => "Owner", "nordugrid-se-middleware" => "Middleware", "nordugrid-se-issuerca" => "Certificate issuer", "nordugrid-se-issuerca-hash" => "Certificate issuer's hash", "nordugrid-se-trustedca" => "Trusted certificate issuers", "nordugrid-se-comment" => "Comment", "nordugrid-rc-name" => "Domain name", "nordugrid-rc-aliasname" => "Replica Catalog alias", "nordugrid-rc-baseurl" => "Contact URL", "nordugrid-rc-authuser" => "Authorised user (DN)", "nordugrid-rc-location" => "Postal code", "nordugrid-rc-owner" => "Owner", "nordugrid-rc-issuerca" => "Certificate issuer" ), // Errors, warnings etc "errors" => array( // failure notices "1" => "Can not read top-level resource indices", "2" => "None of the local indices returned connection", "3" => " bad configuration or request timed out", "4" => "No Grid jobs found", "5" => "No information found", "6" => "Server unavailable", "7" => " - refresh later", "8" => "No queue information found", "9" => "No entries found", "10" => "No users found", "11" => "Not authorised at host", "12" => "does not answer", "13" => "No recent jobs found for ", // debug messages "101" => " Monitor timeouts for GRIS: ", "102" => " sec on connection and ", "103" => " sec on search", "104" => " sec spent searching", "105" => "Showing resources only in ", "106" => "Polled top-level indices: ", "107" => "Got geographical locations, scanned sites: ", "108" => " sites arranged by geographical location", "109" => "Search for cluster attributes", "110" => "Search for queue attributes", "111" => "No data from ", "112" => " is up in ", "113" => " has no resources to offer", "114" => " Monitor timeouts for GIIS: ", "115" => "Skipping GRIS: ", "116" => "not a ", "117" => "Checking connection: ", "118" => "OK", "119" => "That far, detected resources of kind ", "120" => "LDAP error searching ", "121" => " status at ", "122" => "Blacklisted: ", "123" => "Registrant found for ", "124" => "Search for SE attributes", "125" => "Search for users", "126" => "Search for jobs", "127" => " has job ", "128" => " while not being authorized", "129" => "Can not get object data: error ", "130" => " Monitor timeouts for EMIR: ", // icon titles "301" => "Refresh", "302" => "Print", "303" => "Help", "304" => "Close", "305" => "Red", "306" => "Grey", "307" => "All users", "308" => "Active users", "309" => "Search", "310" => "Storage", "311" => "VOs", "312" => "Flag of ", "313" => " Grid processes and ", "314" => " local processes", // auxilliary strings "401" => "Processes", "402" => "Grid", "403" => "Local", "404" => "World", "405" => "TOTAL", "406" => " sites", "407" => "a lot of", "408" => " GB", "409" => " ALL", "410" => "Cluster", "411" => "Queue", "412" => "Job", "413" => "User", "414" => "Storage", "415" => "Replica Cat.", "416" => "Define attributes to display for the object: ", "417" => "AND of all the expressions will be matched", "418" => "Leave the righmost field empty to show everything", "419" => "Display resources or objects of your choice", "420" => "Distinguished name", "421" => "Can use a total of ", "422" => " sites", "423" => "Resource / object:", "424" => "Nr.of attributes (def. 6):", "425" => "Object", "426" => "Next", "427" => "Select one", "428" => "Reset", "429" => "SHOW" ), // Post code conversion "tlconvert" => array ( "Australia" => "Australia", "Austria" => "Austria", "Armenia" => "Armenia", "Algeria" => "Algeria", "Belgium" => "Belgium", "Bulgaria" => "Bulgaria", "Canada" => "Canada", "China" => "China", "Czechia" => "Czechia", "Denmark" => "Denmark", "Estonia" => "Estonia", "Finland" => "Finland", "France" => "France", "Georgia" => "Georgia", "Germany" => "Germany", "Greece" => "Greece", "Hungary" => "Hungary", "Iceland" => "Iceland", "Ireland" => "Ireland", "Ireland" => "Ireland", "Italy" => "Italy", "Japan" => "Japan", "Latvia" => "Latvia", "Lithuania" => "Lithuania", "Morocco" => "Morocco", "Netherlands" => "Netherlands", "Norway" => "Norway", "Poland" => "Poland", "Portugal" => "Portugal", "Romania" => "Romania", "Russia" => "Russia", "SriLanka" => "Sri Lanka", "Sweden" => "Sweden", "Slovakia" => "Slovakia", "Slovenia" => "Slovenia", "Switzerland" => "Switzerland", "Turkey" => "Turkey", "UK" => "UK", "Ukraine" => "Ukraine", "USA" => "USA" ) ); ?> nordugrid-arc-7.1.1/src/services/monitor/lang/PaxHeaders/fr.inc0000644000000000000000000000013215067751327021477 xustar0030 mtime=1759498967.777617767 30 atime=1759498967.876493818 30 ctime=1759499030.797485695 nordugrid-arc-7.1.1/src/services/monitor/lang/fr.inc0000644000175000002070000014632015067751327023407 0ustar00mockbuildmock00000000000000 N/A indique que l'utilisateur n'a pas donné de nom.
    X indique que le job a été tué par le propriétaire.
    ! indique que le job a échoué dans le systÈme
    Cliquer sur un nom pour voir une description détaillée du job."; $str_nam = "Nom de l'utilisateur, tel que spécifié dans le certificat personnel. Cliquer sur un nom pour voir la list de toutes les ressource disponibles pour cet utilisateur et tous les jobs soumis par cet utilisateur qui sont actuellement dans le système."; $str_sta = "Statut du job tel que fourni par le Grid Manager (GM) et LRMS. Dans l'ordre, les états sont :
    ACCEPTED – job soumis mais non encore pris en charge
    PREPARING – les fichiers d'entrée sont en train d'être récupérés
    SUBMITTING – interaction avec LRMS en cours
    INLRMS – le job est transféré au LRMS; un statut interne est ajouté par l'infosystem. Les états possible sont :
    : Q – le job est en attente
    : U – le job est suspendu dans un node occupé (PBSPro)
    : S – le job est suspendu (Condor)
    : R, run – le job est en cours
    : E – le job se termine (PBS)
    FINISHING – les fichiers de sortie sont en train d'être transférés par le GM
    FINISHED – le job est terminé; un indicateur temporel est ajouté par l'infosystem
    CANCELING – le job est en train d'être annulé
    DELETED – le job n'a pas été supprimé par l'utilisateur mais par le GM à cause de la date d'expiration
    Chaque état peut être donné avec le préfixe PENDING:, ce qui signifie que le GM essaie de déplacer le job vers l'état suivant"; $str_tim = "Temps CPU utilisé par le job, en minutes."; $str_mem = "Mémoire consomée par le job, en ko"; $str_cpu = "Nombre de processeurs utilisés par le job."; // Actual messages $message = array ( // Table headers and help (do not localize "0" and "help" keys) // For "help", keywords in
    must correspond to column titles below the help text ) "loadmon" => array( "0" => "Grid Monitor", "help" => "
    Cet écran montre tous les sites enrégistrés dans l'indexing service d'ARC, triés par pays puis par nom de site. Une selection de paramètres de site sont affichés : alias du cluster, capacité CPU totale et nombre de jobs courants et en attente, aussi bien du Grid que locaux. Utiliser "Search" pour comparer d'autres caractéristiques de cluster, file, job etc...
    Pays
    ".$clickable.". Drapeau et nom du pays, extrait des descriptions de ressource disponibles. Cliquer pour montrer les information concernant ce pays uniquement.
    Cluster
    ".$clickable.". Alias du cluster assigné par le propriétaire. La longueur maximale affichée est 22 caractÈres. Cliquer sur l'alias pour voir une description détaillée du cluster.
    CPU
    nombre total de CPU dans un cluster. NB! Seule une fraction de ceux-ci est effectivement accessible aux utilisateurs du Grid.
    Charge (processus:Grid+local)
    ".$clickable.". Charge relative du cluster, correspondant au nombre de CPU occupés. Les barres grises indiquent les processeurs occupés par les jobs soumis localement, les barres rouges montrent les CPU occupés par des jobs soumis à travers le Grid. Cliquer sur la barre pour voir la liste détaillée de tous les jobs d'origine Grid dans le cluster, y compris le nombre de processus par job.
    Files d'attente
    ".$clickable.". Nombre total de jobs en attente dans le cluster, montré comme le nombre de jobs du grid en attente plus le nombre de jobs en attente soumis localement. Cliquer sur le premier nombre pour voir la liste des jobs du Grid en attente dans le cluster.
    ", "Pays" => 30, "Cluster" => 160, "CPU" => 10, "Charge (processus: Grid+local)" => 210, "File d'attente" => 10 ), "clusdes" => array("0" => "Details des ressources pour", "help" => "
    Attribut
    ".$clickable.". Nom de l'attribut de cluster".$str_att."
    Valeur
    ".$str_val."
    File d'attente
    ".$clickable.". Noms des files batch disponibles pour les utilisateurs d'ARC, donné par le propriétaire du cluster. ".$str_que."
    Statut
    Statut des files. Une file active indiquera typiquement le statut : active.
    CPU (min)
    Limite de durée pour un job dans une file, si elle existe, en minutes. La première valeur affichée est la limite basse, la seconde la limite haute. Si les limites ne sont pas données (jobs de durée quelconque acceptés), le symbole N/A est affiché.
    En cours
    Nombre de jobs en cours dans la file. Le nombre total de jobs est indiqué, avec le nombre de processeurs occupś par des jobs du Grid entre parenthèses, par ex. (Grid:12). NB! Pour les jobs multiprocesseurs en parallèle, le nombre entre parenthèse peut être plus élevé que le nombre de jobs.
    En attente
    Nombre de jobs en attente d'execution dans la file. Le nombre total de jobs est affiché, avec les jobs du Grid entre parenthèse, par ex. (Grid: 235)
    ", "File d'attente" => 0, "Mapping Queue" => 0, "Statut" => 0, "Limites (min)" => 0, "CPU" => 0, "En cours" => 0, "En attente" => 0 ), "jobstat" => array("0" => "Jobs à:Job ID", "help" => "
    LIST DES JOBS:
    Nom du job
    ".$clickable.". Nom d'un job, assigné par le propriétaire. Si aucun nom n'a été assigné, "N/A" est affiché. Cliquer sur un nom pour voir une description détaillée du job.
    Propriétaire
    ".$clickable.". ".$str_nam."
    Statut
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    File d'attente
    ".$clickable.". Nom de la file batch dans laquelle le job est exécuté. ".$str_que."
    CPU
    ".$str_cpu."
    DETAILS DU JOB:
    Attribut
    ".$clickable.". Nom de l'attribut du job".$str_att."
    Valeur
    ".$str_val."
    ", "Nom du job" => 0, "propriétaire" => 0, "Statut" => 0, "CPU (min)" => 0, "Filer d'attente" => 0, "CPU" => 0 ), "volist" => array("0" => "Virtual Organisations", "help" => "
    Virtual Organisation
    ".$clickable.". Groupe d'utilisateurs, généralement partageant la même activités et les mêmes ressources, autorisé à au moins un site ARC. Cliquer sur le nom pour voir la liste des membres du groupe.
    Membres
    Nombre de membres du groupe.
    Desservi par
    serveur LDAP qui supporte la base de données des membres du groupe.
    ", "Virtual Organisation" => 0, "Membres" => 0, "Desservi par" => 0 ), "vousers" => array("0" => "Utilisateurs du Grid", "help" => "
    Nom
    ".$clickable.". ".$str_nam."
    Affiliation
    Institut d'origine de l'utilisateur, entré par le VO manager. Peut être vide.
    Adresse électronique
    ".$clickable.". Adresse électronique de l'utilisateur, entré par le VO manager. Peut être vide. Cliquer sur l'adresse pour envoyer un courriel à l'utilisateur.
    ", "#" => 0, "Nom" => 0, "Affiliation" => 0, "Adresse èlectronique" => 0 ), "userlist" => array("0" => "Information pour", "help" => "
    Cluster:file
    ".$clickable.". Noms des clusters et file respective (separés par deux points, ":") où un utilisateur est autorisé à soumettre des jobs. Si un utilisateur n'est pas autorisé, le message "Not authorised at host ..." est affiché. Cliquer sur un nom de cluster pour voir une description détaillée du cluster. Cliquer sur un nom de file pour voir une description détaillée de la file.
    CPU libres
    Nombre de CPU libres disponibles dans une file données pour l'utilisateur à cet instant, eventuellement associé avec la durée maximum (en minutes) Par exemple, "3" signifie 3 CPU disponible pour un job de durée illimitée; "4:360" indique qu'il y a 4 CPU disponibles pour des jobs de moins de 6 heures; "10:180 30" signifie qu'il y a 10 CPU disponibles pour des jobs n'excédant pas 3 heures, plus 30 CPU disponibles pour des jobs de n'omporte quelle durée; "0" signifie qu'il n'y a pas de CPU disponible à cet instant, et les jobs seront placés dans une file d'attente.
    Jobs en attente
    Nombre de jobs d'utilisateur qui seront avant un nouveau job (pour cet utilisateur) dans une file d'attente. Un nombre de "0" signifie que le job devrait être exécuté immédiatement. NB! Ceci n'est qu'une estimation, qui peut être outrepassée par des politiques locales.
    Disque libre (Mo)
    Espace disque disponible pour l'utilisateur dans une file donnée (en Mégaoctets). NB! Ceci n'est qu'une estimation, étant donné que la plupart des clusters ne fournissent pas de quotas fixes.
    Nom du job
    ".$clickable.". ".$str_job."
    Statut
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Cluster
    ".$clickable.". Nom du cluster où le job est exécuté. Cliquer sur un nom de cluster pour voir des information détaillées sur le cluster.
    Queue
    ".$clickable.". Nom de la file batch dans laquelle le job est/était exécuté. ".$str_que."
    CPU
    ".$str_cpu."
    ", "" => 0, "Nom du job" => 0, "Statut" => 0, "CPU (min)" => 0, "Cluster" => 0, "File" => 0, "CPU" => 0 ), "attlist" => array("0" => "Valeur des attributs", "help" => "
    Objet
    ".$clickable.". Nom de l'objet dont les attributs sont affichés Ce peut être le nom d'un cluster, d'une file de clusters, d'un job, d'un utilisateur etc... Cliquer sur le texte pour voir une description détaillée de l'objet.
    Attribut
    Pour chaque objet, un ou plusieurs attributs peuvent Être listés Le titre de la colonne est une version "human-readable" du nom de l'attribut (sauf pour certain attributs MDS-specifiques), et le contenu de la colonne est la valeur de l'attribut par objet, telle qu'elle est entrée dans l'Information System.
    ", "Objet" => 0, "Attribut" => 0 ), "quelist" => array("0" => "File", "help" => "
    Attribut
    ".$clickable.". Nom d'un attribut de file".$str_att."
    Valeur
    ".$str_val."
    Nom du job
    ".$clickable.". ".$str_job."
    Propriétaire
    ".$clickable.". ".$str_nam."
    Statut
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Memoire (KB)
    ".$str_mem."
    CPU
    ".$str_cpu."
    ", "" => 0, "Nom du job" => 0, "Propriétaire" => 0, "Statut" => 0, "CPU (min)" => 0, "Memoire (ko)" => 0, "CPU" => 0 ), "sestat" => array("0" => "Elements de Stockage", "help" => "
    Alias
    Alias de l'Element de Stockage specifié dans l'Information System. Longueur maximal affichée de 15 caractères.
    Espace total
    Espace disque total, Go.
    Espace libre
    Espace disque disponible sur le moment, Go.
    Nom
    Nom de l'Element de Stockage, composé d'un nom logique et d'un nom d'hôte (séparés par deux points, ":"). Le nom logique est utilisé uniquement pour le système d'information, pour distinguer différents élément de stockages accueillis par la même machine-hôte.
    URL de base
    URL pour l'élément de stockage, généralement un protocole gsiftp:// Utiliser cet URL comme base pour accéder aux fichiers.
    Type
    Type d'élément de stockage. "gridftp-based" indique un stockage disque avec une interface GridFTP.
    ", "#" => 0, "Alias" => 0, "Espace libre/total, GB" => 0, "Nom" => 0, "URL de base" => 0, "Type" => 0 ), "allusers" => array("0" => "Utilisateurs Grid autorisés:Utilisateurs Grid actifs", "help" => "
    Nom
    ".$clickable.". ".$str_nam."
    Affiliation
    Affiliation de l'utilisateur, dérivé du certificat personnel
    Jobs
    Compteur de tous les jobs d'utilisateur dans le système (en cours, en attente, terminés ou supprimés)
    Sites
    Affiche le nombre de sites qui admettent cet utilisateur
    ", "#" => 0, "Nom" => 0, "Affiliaton" => 0, "Jobs" => 0, "Sites" => 0 ), "userres" => array("0" => "", "Cluster:file" => 0, "CPU libres" => 0, "Jobs en attente" => 0, "Espace disque libre (Mo)" => 0 ), "ldapdump" => array("0" => "", "Attribut" => 0, "Valeur" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Info valide depuis (GMT)", "Mds-validto" => "Info valide jusqu'à (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Nom du domaine Front-end", "nordugrid-cluster-aliasname" => "Alias du cluster", "nordugrid-cluster-contactstring" => "Fil de contact", "nordugrid-cluster-interactive-contactstring" => "Contact interactif", "nordugrid-cluster-comment" => "Commentaire", "nordugrid-cluster-support" => "Contact courriel", "nordugrid-cluster-acl" => "VO authorisés", "nordugrid-cluster-lrms-type" => "type de LRMS", "nordugrid-cluster-lrms-version" => "Version de LRMS", "nordugrid-cluster-lrms-config" => "détails de LRMS", "nordugrid-cluster-architecture" => "Architecture", "nordugrid-cluster-opsys" => "Système d'exploitation", "nordugrid-cluster-homogeneity" => "Cluster homogène", "nordugrid-cluster-nodecpu" => "type de CPU (le plus lent)", "nordugrid-cluster-nodememory" => "Memoire (Mo, la plus petite)", "nordugrid-cluster-totalcpus" => "CPU, total", "nordugrid-cluster-cpudistribution" => "CPU:machines", "nordugrid-cluster-benchmark" => "Benchmark", "nordugrid-cluster-sessiondir-free" => "Espace disque, disponible (Mo)", "nordugrid-cluster-sessiondir-total" => "Espace disque, total (Mo)", "nordugrid-cluster-sessiondir-lifetime"=> "Durée de vie d'une session Grid (min)", "nordugrid-cluster-cache-free" => "Taille du cache, disponible (Mo)", "nordugrid-cluster-cache-total" => "Taille du cache, total (Mo)", "nordugrid-cluster-runtimeenvironment" => "Moteur d'exécution (runtime environment)", "nordugrid-cluster-localse" => "Element de stockage, local", "nordugrid-cluster-middleware" => "Intergiciel (middleware) du Grid", "nordugrid-cluster-totaljobs" => "Jobs, quantité totale", "nordugrid-cluster-usedcpus" => "CPU, occupés", "nordugrid-cluster-queuedjobs" => "Jobs, en attente", "nordugrid-cluster-prelrmsqueued" => "Jobs Grid, en attente d'être soumis", "nordugrid-cluster-location" => "Code postal", "nordugrid-cluster-owner" => "Propriétaire", "nordugrid-cluster-issuerca" => "Fournisseur du certificat", "nordugrid-cluster-issuerca-hash" => "Hachage du fournisseur du certificat", "nordugrid-cluster-trustedca" => "Fournisseurs de certificat fiables", "nordugrid-cluster-nodeaccess" => "IP-connectivité du node", "nordugrid-cluster-gridarea" => "zone de la session (OBSOLETE)", "nordugrid-cluster-gridspace" => "Espace disque Grid (OBSOLETE)", "nordugrid-cluster-opsysdistribution" => "OS distribution (OBSOLETE)", "nordugrid-cluster-runningjobs" => "Jobs, en cours (OBSOLETE)", "nordugrid-cluster-credentialexpirationtime" => "Credential expiration time", "nordugrid-queue-name" => "Nom de file", "nordugrid-queue-comment" => "Commentaire", "nordugrid-queue-status" => "Statut de file", "nordugrid-queue-running" => "CPU, occupés", "nordugrid-queue-localqueued" => "Jobs locaux, en attente", "nordugrid-queue-prelrmsqueued" => "Jobs Grid, en attente d'être soumis", "nordugrid-queue-queued" => "Jobs, en attente (OBSOLETE)", "nordugrid-queue-maxrunning" => "Jobs, en cours (max)", "nordugrid-queue-maxqueuable" => "Jobs, pouvant être mis en attente (max)", "nordugrid-queue-maxuserrun" => "Jobs par utilisateur Unix (max)", "nordugrid-queue-maxcputime" => "Temps CPU, max. (minutes)", "nordugrid-queue-mincputime" => "Temps CPU, min. (minutes)", "nordugrid-queue-defaultcputime" => "Temps CPU, default (minutes)", "nordugrid-queue-maxwalltime" => "Temps d'horloge, max. (minutes)", "nordugrid-queue-minwalltime" => "Temps d'horloge, min. (minutes)", "nordugrid-queue-defaultwalltime" => "Temps d'horloge, defaut (minutes)", "nordugrid-queue-schedulingpolicy" => "Scheduling policy", "nordugrid-queue-totalcpus" => "CPU, total", "nordugrid-queue-nodecpu" => "type de CPU", "nordugrid-queue-nodememory" => "Memoire (Mo)", "nordugrid-queue-architecture" => "Architecture", "nordugrid-queue-opsys" => "System d'exploitation", "nordugrid-queue-homogeneity" => "File homogène", "nordugrid-queue-gridrunning" => "CPU, occupés par jobs Grid", "nordugrid-queue-gridqueued" => "Jobs Grid, en attente", "nordugrid-queue-benchmark" => "Benchmark", "nordugrid-queue-assignedcpunumber" => "CPU par file (OBSOLETE)", "nordugrid-queue-assignedcputype" => "Type de CPU (OBSOLETE)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Propriétaire", "nordugrid-job-execcluster" => "Cluster d'execution", "nordugrid-job-execqueue" => "File d'execution", "nordugrid-job-stdout" => "Fichier de sortie standard", "nordugrid-job-stderr" => "Fichier d'erreur standard", "nordugrid-job-stdin" => "Fichier d'entrée standard", "nordugrid-job-reqcputime" => "Temps CPU requis", "nordugrid-job-reqwalltime" => "Temps d'horloge requis", "nordugrid-job-status" => "Statut", "nordugrid-job-queuerank" => "Position dans la file", "nordugrid-job-comment" => "Commentaire LRMS", "nordugrid-job-submissionui" => "Machine de soumission", "nordugrid-job-submissiontime" => "Date de soumission (GMT)", "nordugrid-job-usedcputime" => "Temps CPU utilisé", "nordugrid-job-usedwalltime" => "Temps d'horloge utilisé", "nordugrid-job-completiontime" => "Date de termination (GMT)", "nordugrid-job-sessiondirerasetime" => "Date de suppression (GMT)", "nordugrid-job-proxyexpirationtime" => "Date d'expiration du proxy (GMT)", "nordugrid-job-usedmem" => "Memoire utilisée (Ko)", "nordugrid-job-errors" => "Erreurs", "nordugrid-job-exitcode" => "Code de sortie", "nordugrid-job-jobname" => "Nom", "nordugrid-job-runtimeenvironment" => "Moteur d'exécution (runtime environment)", "nordugrid-job-cpucount" => "CPU requis", "nordugrid-job-executionnodes" => "Nodes d'exécution", "nordugrid-job-gmlog" => "Fichier de journal du GM", "nordugrid-job-clientsoftware" => "Version du client", "nordugrid-job-rerunable" => "Réexecutable", "nordugrid-job-reqcput" => "Temps requis (OBSOLETE)", "nordugrid-job-gridlog" => "Fichier Gridlog (OBSOLETE)", "nordugrid-job-lrmscomment" => "commentaire LRMS (OBSOLETE)", "nordugrid-authuser-name" => "Nom", "nordugrid-authuser-sn" => "Nom du sujet", "nordugrid-authuser-freecpus" => "CPU libres", "nordugrid-authuser-diskspace" => "Espace disque libre (Mo)", "nordugrid-authuser-queuelength" => "Jobs en attente de l'utilisateur", "nordugrid-se-name" => "Nom", "nordugrid-se-aliasname" => "Alias de l'élément de stockage", "nordugrid-se-type" => "Type d'élément de stockage", "nordugrid-se-acl" => "VO autorisés", "nordugrid-se-freespace" => "Espace libre (Mo)", "nordugrid-se-totalspace" => "Espace total (Mo)", "nordugrid-se-url" => "URL de contact", "nordugrid-se-baseurl" => "URL de contact (OBSOLETE)", "nordugrid-se-accesscontrol" => "Contrôle d'accès", "nordugrid-se-authuser" => "Utilisateur autorisé (DN)", "nordugrid-se-location" => "Code postal", "nordugrid-se-owner" => "Propriétaire", "nordugrid-se-middleware" => "Intergiciel (middleware)", "nordugrid-se-issuerca" => "Fournisseur de certificate", "nordugrid-se-issuerca-hash" => "Hachage du fournisseur de certificat", "nordugrid-se-trustedca" => "Fournisseurs de certificat fiables", "nordugrid-se-comment" => "Commentaire", "nordugrid-rc-name" => "Nom de domaine", "nordugrid-rc-aliasname" => "Alias du duplicata du Catalogue", "nordugrid-rc-baseurl" => "URL de contact", "nordugrid-rc-authuser" => "Utilisateur autorisé (DN)", "nordugrid-rc-location" => "Code postal", "nordugrid-rc-owner" => "Propriétaire", "nordugrid-rc-issuerca" => "Fournisseur de certificat" ), // Errors, warnings etc "errors" => array( // failure notices "1" => "Impossible de lire les index de ressource", "2" => "Aucun des index locaux ne retourne de connexion", "3" => " mauvaise configuration ou la requête a expiré", "4" => "Aucun job Grid trouvé", "5" => "Aucune information trouvée", "6" => "Serveur indisponible", "7" => " - rafraîchir plus tard", "8" => "Aucune information de liste trouvée", "9" => "Aucune entrée trouvée", "10" => "Aucun utilisateur trouvé", "11" => "Non autorisé chez l'hôte", "12" => "Ne répond pas", "13" => "Aucun job récent trouvé pour ", // debug messages "101" => " Monitor timeouts pour GRIS: ", "102" => " sec pendant la connection et ", "103" => " sec pendant la recherche", "104" => " sec en recherche", "105" => "N'affiche les ressources qu'en ", "106" => "Polled top-level indices: ", "107" => "Situations géographique obtenues, sites scannés: ", "108" => " sites rangés par situation géographique", "109" => "Recherche d'attributs du cluster", "110" => "Recherche d'attributs de la file", "111" => "Aucune donnée de ", "112" => " is up in ", "113" => " n'a aucune ressource à proposer", "114" => " Monitor timeouts for GIIS: ", "115" => "Saute GRIS: ", "116" => "pas un ", "117" => "Vérifie la connexion: ", "118" => "OK", "119" => "Jusqu'ici, a détecté des ressource de genre ", "120" => "Erreur LDAP en cherchant ", "121" => " statut à ", "122" => "Sur liste noire: ", "123" => "Registrant found for ", "124" => "Recherche d'attributs de SE", "125" => "Recherche d'utilisateurs", "126" => "Recherche de jobs", "127" => " a un job ", "128" => " alors que ce n'est pas autorisé", "129" => "Impossible d'obtenir les données d'object: erreur ", "130" => " Monitor timeouts for EMIR: ", // icon titles "301" => "Rafraîchir", "302" => "Imprimer", "303" => "Aide", "304" => "Fermer", "305" => "Rouge", "306" => "Gris", "307" => "Tous utilisateurs", "308" => "Utilisateurs actifs", "309" => "Rechercher", "310" => "Stockage", "311" => "VO", "312" => "Drapeau de ", "313" => " processus Grid et ", "314" => " processus locaux", // auxilliary strings "401" => "Processus", "402" => "Grid", "403" => "Local", "404" => "Monde", "405" => "TOTAL", "406" => " sites", "407" => "beaucoup de", "408" => " Go", "409" => " ALL", "410" => "Cluster", "411" => "File", "412" => "Job", "413" => "Utilisateur", "414" => "Stockage", "415" => "Duplicata Cat.", "416" => "Définir les attributs à afficher pour l'objet: ", "417" => "Le produit logique (ET) de toutes les expressions va être testé", "418" => "Laisser le champ de droite vide pour tout afficher", "419" => "Afficher les ressources ou objets de votre choix", "420" => "Nom Distinct", "421" => "Peut utiliser un total de ", "422" => " sites", "423" => "Ressource / objet:", "424" => "Nr. des attributs (def. 6):", "425" => "Objet", "426" => "Suivant", "427" => "Choisir un", "428" => "Reinitialiser", "429" => "AFFICHER" ), // Post code conversion "tlconvert" => array ( "Australia" => "Australie", "Austria" => "Autriche", "Armenia" => "Armenie", "Algeria" => "Algerie", "Belgium" => "Belgique", "Bulgaria" => "Bulgarie", "Canada" => "Canada", "China" => "Chine", "Czechia" => "République Tchèque", "Denmark" => "Danemark", "Estonia" => "Estonie", "Finland" => "Finlande", "France" => "France", "Georgia" => "Georgie", "Germany" => "Allemagne", "Greece" => "Grèce", "Hungary" => "Hongrie", "Iceland" => "Islande", "Ireland" => "Irlande", "Italy" => "Italie", "Japan" => "Japon", "Latvia" => "Lettonie", "Lithuania" => "Lithuanie", "Morocco" => "Maroc", "Netherlands" => "Pays-Bas", "Norway" => "Norvège", "Poland" => "Pologne", "Portugal" => "Portugal", "Romania" => "Roumanie", "Russia" => "Russie", "SriLanka" => "Sri Lanka", "Sweden" => "Suède", "Slovakia" => "Slovaquie", "Slovenia" => "Slovenie", "Switzerland" => "Suisse", "Turkey" => "Turquie", "UK" => "Grande-Bretagne", "Ukraine" => "Ukraine", "USA" => "USA" ) ); ?> nordugrid-arc-7.1.1/src/services/monitor/lang/PaxHeaders/sv.inc0000644000000000000000000000013215067751327021520 xustar0030 mtime=1759498967.778492329 30 atime=1759498967.877493833 30 ctime=1759499030.804311124 nordugrid-arc-7.1.1/src/services/monitor/lang/sv.inc0000644000175000002070000014647115067751327023437 0ustar00mockbuildmock00000000000000 N/A betyder att ägaren inte tilldelat ett jobbnamn
    X betyder att jobbet dödats av ägaren
    ! betyder att jobbet inte fullbordades i systemet
    Klicka på ett namn för att få en detaljerad beskrivning av jobbet."; $str_nam = "Användarens namn såsom specificerat i det personliga cerifikatet. Klicka på ett namn för att få en lista över alla resurser som är tillgängliga för denna användare och denna användares alla jobb som för närvarande finns i systemet."; $str_sta = "Jobbstatus returnerad av gridmanagern (GM) och LRMS. Tillstånden är i tidsordning:
    ACCEPTED – jobbet har skickats in men är ännu ej behandlat
    PREPARING – indatafiler hämtas
    SUBMITTING – växelverkan med LRMS pÃ¥gÃ¥r
    INLRMS – jobbet har överförts till LRMS; intern status läggs till av informationsstystemet. Möjliga tillstÃ¥nd är:
    : Q – jobbet är köat
    : U – jobbet är i ett uppskjutet tillstÃ¥nd pÃ¥ en upptagen nod (PBSPro)
    : S – jobbet är i ett uppskjutet tillstÃ¥nd (Condor)
    : R, run – jobbet exekveras
    : E – jobbet avslutas (PBS)
    FINISHING – utdatafiler överförs av GM
    FINISHED – jobbet är avslutat; tidsstämpel läggs till av informationssystemet
    CANCELING – jobbet hÃ¥ller pÃ¥ att avbrytas
    DELETED – jobbet har inte tagits bort pÃ¥ begäran av användaren utan av GM p.g.a. att maximala lagringstiden har passerat
    Alla dessa tillstånd kan rapporteras med prefixet PENDING:, vilket betyder att GM försöker a flytta jobbet till nästa tillstånd"; $str_tim = "CPU-tid som jobbet använt, minuter."; $str_mem = "Minne som jobbet använt, KB."; $str_cpu = "Antal processorer som jobbet använt."; // Actual messages $message = array ( // Table headers and help (do not localize "0" and "help" keys) // For "help", keywords in
    must correspond to column titles below the help text ) "loadmon" => array( "0" => "Gridmonitor", "help" => "
    Denna sida visar alla kluster som registrerar sig till ARCs indexservice, sorterade efter land och därefter värdnamn. Utvalda klusterparametrar monitoreras: klusteralias, total CPU-kapacitet och antal jobb som exekveras och köar på klustret, såväl gridjobb som lokala jobb. Använd sökfuntionen om du vill jämföra annan kluster-, kö- och jobbinformation.
    Land
    ".$clickable.". Landslagga och landsnamn härledda från tillgängliga resursbeskrivningar. Klicka för att visa endast detta lands information.
    Kluster
    ".$clickable.". Klusteralias tilldelat av ägaren. Maximal visad längd är 22 tecken. Klicka på detta alias för att få en detaljerad klusterbeskrivning.
    CPU:er
    Totalt antal CPU:er i ett kluster. OBS! Endast en del av dessa kan vara tillgängliga för gridanvändare.
    Belastning (processer: grid + lokala)
    ".$clickable.". Relativ klusterbelastning, motsvarande antalet upptagna CPU:er. Grå fält markerar processorer upptagna av de lokalt inskickade jobben, medan röda fält visar CPU:er upptagna av jobb som skickats in via grid. Klicka på fältet för att få en detaljerad lista av alla gridjobb som exekveras på klustret, inklusive antalet processorer per job.
    Köande
    ".$clickable.". Totalt antal jobb som köar på klustret, visat som antalet köande gridjobb plus antalet lokalt inskickade köande jobb. Klicka på den första siffran för att få en lista av köande gridjob på klustret.
    ", "Land" => 30, "Kluster" => 160, "CPU:er" => 10, "Belastning (processer: grid + lokala)" => 210, "Köande" => 10 ), "clusdes" => array("0" => "Resursinformation för", "help" => "
    Attribut
    ".$clickable.". Klusterattributnamn".$str_att."
    Värde
    ".$str_val."
    Kö
    ".$clickable.". Namn på batchköer tillgängliga för ARCanvändarna uppsatta av klusterägarna. ".$str_que."
    Status
    Köstatus. Fungerande köer visar normalt status active.
    Tidsgränser (min)
    Tidsgräns för jobblängd per kö, om definierad, i CPU-minuter. Det första visade värdet är den nedre gränsen, det andra den övre. Om inga gränser är definierade (jobb med alla längder är tillåtna), visas N/A
    Exekveras
    Antal jobb som exekveras i kön. Det totala antalet jobb visas, med antalet processorer upptagna av gridjobb i parentes, t.ex. (Grid: 12). OBS! För parallella multiprocessorjobb kan numret i parentes vara större än antalet jobb.
    Köar
    Antal jobb som väntar på att exekveras i kön. Det totala antalet jobb visas, med gridjobb visade i parentes, t.ex. (Grid: 235)
    ", "Kö" => 0, "Mappningskö" => 0, "Status" => 0, "Tidsgränser (min)" => 0, "CPU:er" => 0, "Exekveras" => 0, "Köar" => 0 ), "jobstat" => array("0" => "Jobb på:Jobb-ID", "help" => "
    JOBBLISTA:
    Jobbnamn
    ".$clickable.". ".$str_job."
    Ägare
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Kö
    ".$clickable.". Namn på batchkön i vilken jobbet exekveras. ".$str_que."
    CPU:er
    ".$str_cpu."
    JOBBINFORMATION:
    Attribut
    ".$clickable.". Jobbattributnamn".$str_att."
    Värde
    ".$str_val."
    ", "Jobbnamn" => 0, "Ägare" => 0, "Status" => 0, "CPU (min)" => 0, "Kö" => 0, "CPU:er" => 0 ), "volist" => array("0" => "Virtuella organisationer", "help" => "
    Virtuell organisation
    ".$clickable.". Användargrupp, delar oftast gemensamma activiteter och resurser, autoriserad på åtminstone ett ARC-kluster. Klicka på namnet för att få en lista med gruppmedlemmar.
    Medlemmar
    Antal gruppmedlemmar.
    Server
    LDAP-server som huserar gruppmedlemsskapsdatabasen.
    ", "Virtuell organisation" => 0, "Medlemmar" => 0, "Server" => 0 ), "vousers" => array("0" => "Gridanvändarbas", "help" => "
    Namn
    ".$clickable.". ".$str_nam."
    Anknytning
    Användarens heminstitut inmatat av VO-managern. Kan vara tomt.
    E-post
    ".$clickable.". Användarens e-post inmatad av en VO-manager. Kan vara tomt. Klicka på adressen för att sända ett e-brev till användaren.
    ", "#" => 0, "Namn" => 0, "Anknytning" => 0, "E-post" => 0 ), "userlist" => array("0" => "Information för", "help" => "
    Kluster:kö
    ".$clickable.". Namn på kluster och dess respektive köer (separade med ett kolon, ":") där en användare är autoriserad att skicka in jobb. Om en avändare inte är autoriserad visas meddelendet "Not authorised at host ...". Klicka på ett klusternamn för att få en detaljerad klusterbeskrivning. Klicka på ett könamn föt att få en detaljerad köbeskrivning.
    Fria CPU:er
    Antal fria CPU:er tillgängliga i en given kö för denna användare vid detta tillfälle, ibland med en övre tidsgräns (i minuter) bifogad. T.ex. "3" betyder 3 CPU:er tillgängliga för ett jobb med obegränsad exekveringstid; "4:360" indikerar att det finns 4 CPU:er tillgängliga för jobb som inte är längre än 6 timmar; "10:180 30" betyder att det finns 10 CPU:er tillgängliga för jobb som inte övergår 3 timmar, plus 30 CPU:er tillgängliga för jobb av valfri längd; "0" betyder att det inte finns några CPU:er tillgängliga för tillfället, och att jobben kommer att placeras i kö.
    Köade jobb
    Antal användarens jobb som förväntas stå före ett nytt inskickat jobb (för denna användare) i en kö. "0" betyder att jobbet förväntas exekveras omedelbart. OBS! Detta är endast en uppskattning, som kan åsidosättas av lokala regler.
    Fri disk (MB)
    Diskutrymme gillgängligt för användaren i en given kö (i megabyte). OBS! Detta är endast en uppskattning, då de flesta kluster inte erbjuder fasta diskkvoter.
    Jobbnamn
    ".$clickable.". ".$str_job."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Kluster
    ".$clickable.". Namn på det kluster på vilket jobbet exekvera(de)s. Klicka på ett klusternamn för att få detaljerad information om klustret.
    Kö
    ".$clickable.". Name på den batchkö i vilken jobbet exekvera(de)s. ".$str_que."
    CPU:er
    ".$str_cpu."
    ", "" => 0, "Jobbnamn" => 0, "Status" => 0, "CPU (min)" => 0, "Kluster" => 0, "Kö" => 0, "CPU:er" => 0 ), "attlist" => array("0" => "Attributvärden", "help" => "
    Objekt
    ".$clickable.". Namn på det objekt vars attribut visas. Det kan vara ett klusternamn, ett klusters könamn, ett jobbnamn, ett användarnamn etc. Klicka på namnet för att få en detaljerad beskrivning av objektet.
    Attribut
    För varje objekt, ett eller flera attributvärden kan listas. Kolumntiteln är det human-readable attributnamnet (förutom för några MDS-specifika attribut), och Kolumnens innehåll är attributvärden per objekt inmatade i informationssystemet.
    ", "Objekt" => 0, "Attribut" => 0 ), "quelist" => array("0" => "Kö", "help" => "
    Attribut
    ".$clickable.". Köattributnamn".$str_att."
    Värde
    ".$str_val."
    Jobbnamn
    ".$clickable.". ".$str_job."
    Ägare
    ".$clickable.". ".$str_nam."
    Status
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Minne (KB)
    ".$str_mem."
    CPU:er
    ".$str_cpu."
    ", "" => 0, "Jobbnamn" => 0, "Ägare" => 0, "Status" => 0, "CPU (min)" => 0, "Minne (KB)" => 0, "CPU:er" => 0 ), "sestat" => array("0" => "Lagringselement", "help" => "
    Alias
    Lagringselementalias specificerat i informationssytemet. Maximal visad längd är 15 tecken.
    Totalt utrymme
    Totalt diskutrymme, GB.
    Fritt utrymme
    Diskutrymme tillgängligt för tillfället, GB.
    Namn
    Lagringselementnamn, bestående av ett logiskt namn och värdnamn (separerade av ett kolon, ":"). Det logiska namnet används endast för informationssystemsyften, för att särskilja olika lagringselement som huserar på samma maskin.
    Bas-URL
    Lagringselementats URL, oftast ett gsiftp:// protocol. Använd denna URL som bas för att komma åt filer.
    Typ
    Lagringselementtyp. "gridftp-based" indikerar disklagring med gridftp-gränssnitt.
    ", "#" => 0, "Alias" => 0, // "Totalt utrymme" => 0, "Fritt/totalt utrymme, GB" => 0, "Namn" => 0, "Bas-URL" => 0, "Typ" => 0 ), "allusers" => array("0" => "Autoriserade gridanvändare:Aktiva gridanvändare", "help" => "
    Namn
    ".$clickable.". ".$str_nam."
    Anknytning
    Användarens anknytning, härledd från det personliga certifikatet
    Jobb
    Totalt antal jobb från denna användarens i systemet (exekveras, avvaktande, avslutade eller borttagna)
    Kluster
    Visar hur många kluster som autoriserar denna användare
    ", "#" => 0, "Namn" => 0, "Anknytning" => 0, "Jobb" => 0, "Kluster" => 0 ), "userres" => array("0" => "", "Kluster:kö" => 0, "Fria CPU:er" => 0, "Köade jobb" => 0, "Fri disk (MB)" => 0 ), "ldapdump" => array("0" => "", "Attribut" => 0, "Värde" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Info giltig fr.o.m. (GMT)", "Mds-validto" => "Info giltig t.o.m. (GMT)" ), "isattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Info giltig fr.o.m. (GMT)", "Mds-validto" => "Info giltig t.o.m. (GMT)" "nordugrid-cluster-name" => "Front-end domännamn", "nordugrid-cluster-aliasname" => "Klusteralias", "nordugrid-cluster-contactstring" => "Kontaktsträng", "nordugrid-cluster-interactive-contactstring" => "Interaktiv kontakt", "nordugrid-cluster-comment" => "Kommentar", "nordugrid-cluster-support" => "E-postkontakt", "nordugrid-cluster-acl" => "Auktoriserade VO:er", "nordugrid-cluster-lrms-type" => "LRMS-typ", "nordugrid-cluster-lrms-version" => "LRMS-version", "nordugrid-cluster-lrms-config" => "LRMS-detaljer", "nordugrid-cluster-architecture" => "Arkitektur", "nordugrid-cluster-opsys" => "Operativsystem", "nordugrid-cluster-homogeneity" => "Homogent kluster", "nordugrid-cluster-nodecpu" => "CPU-typ (lÃ¥ngsammast)", "nordugrid-cluster-nodememory" => "Minne (MB, minsta)", "nordugrid-cluster-totalcpus" => "CPU:er, totalt", "nordugrid-cluster-cpudistribution" => "CPU:er, per maskin", "nordugrid-cluster-benchmark" => "Benchmark", "nordugrid-cluster-sessiondir-free" => "Diskutrymme, tillgängligt (MB)", "nordugrid-cluster-sessiondir-total" => "Diskutrymme, totalt (MB)", "nordugrid-cluster-sessiondir-lifetime"=> "Gridsessionens livstid (min)", "nordugrid-cluster-cache-free" => "Cachestorlek, tillgängligt (MB)", "nordugrid-cluster-cache-total" => "Cachestorlek, totalt (MB)", "nordugrid-cluster-runtimeenvironment" => "Runtime-miljö", "nordugrid-cluster-localse" => "Lagringselement, lokalt", "nordugrid-cluster-middleware" => "Grid-middleware", "nordugrid-cluster-totaljobs" => "Jobb, totalt antal", "nordugrid-cluster-usedcpus" => "CPU:er, upptagna", "nordugrid-cluster-queuedjobs" => "Jobb, köade", "nordugrid-cluster-prelrmsqueued" => "Gridjobb, köade före LRMS", "nordugrid-cluster-location" => "Postnummer", "nordugrid-cluster-owner" => "Ägare", "nordugrid-cluster-issuerca" => "Certifikatutfärdare", "nordugrid-cluster-issuerca-hash" => "Certifikatutfärdares hashsumma", "nordugrid-cluster-trustedca" => "Betrodd certifikatutfärdare", "nordugrid-cluster-nodeaccess" => "Nod-IP-uppkoppling", "nordugrid-cluster-gridarea" => "Sessionsarea (FÖRLEGAD)", "nordugrid-cluster-gridspace" => "Griddiskutrymme (FÖRLEGAD)", "nordugrid-cluster-opsysdistribution" => "OS-distribution (FÖRLEGAD)", "nordugrid-cluster-runningjobs" => "Jobb, exekveras (FÖRLEGAD)", "nordugrid-cluster-credentialexpirationtime" => "Certifikat-förfallotid", "nordugrid-queue-name" => "Könamn", "nordugrid-queue-comment" => "Kommentar", "nordugrid-queue-status" => "Köstatus", "nordugrid-queue-running" => "CPU:er, upptagna", "nordugrid-queue-localqueued" => "Lokala jobb, köade", "nordugrid-queue-prelrmsqueued" => "Grid jobb, köade före LRMS", "nordugrid-queue-queued" => "Jobb, köade (FÖRLEGAD)", "nordugrid-queue-maxrunning" => "Jobb, exekveras (max)", "nordugrid-queue-maxqueuable" => "Jobb, köbara (max)", "nordugrid-queue-maxuserrun" => "Jobb per unixanvändare (max)", "nordugrid-queue-maxcputime" => "CPU-tid, max. (minuter)", "nordugrid-queue-mincputime" => "CPU-tid, min. (minuter)", "nordugrid-queue-defaultcputime" => "CPU-tid, förvald (minuter)", "nordugrid-queue-maxwalltime" => "Klocktid, max. (minuter)", "nordugrid-queue-minwalltime" => "Klocktid, min. (minuter)", "nordugrid-queue-defaultwalltime" => "Klocktid, förvald (minuter)", "nordugrid-queue-schedulingpolicy" => "Scheduleringspolicy", "nordugrid-queue-totalcpus" => "CPU:er, totalt", "nordugrid-queue-nodecpu" => "CPU-typ", "nordugrid-queue-nodememory" => "Minne (MB)", "nordugrid-queue-architecture" => "Arkitektur", "nordugrid-queue-opsys" => "Operativsystem", "nordugrid-queue-homogeneity" => "Homogen kö", "nordugrid-queue-gridrunning" => "CPU:er, upptagna av gridjobb", "nordugrid-queue-gridqueued" => "Gridjobb, köade", "nordugrid-queue-benchmark" => "Benchmark", "nordugrid-queue-assignedcpunumber" => "CPU:er per kö (FÖRLEGAD)", "nordugrid-queue-assignedcputype" => "CPU-typ (FÖRLEGAD)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Ägare", "nordugrid-job-execcluster" => "Exekveringskluster", "nordugrid-job-execqueue" => "Exekveringskö", "nordugrid-job-stdout" => "Standard output-fil", "nordugrid-job-stderr" => "Standard error-fil", "nordugrid-job-stdin" => "Standard input-fil", "nordugrid-job-reqcputime" => "Begärd CPU-tid", "nordugrid-job-reqwalltime" => "Begärd klocktid", "nordugrid-job-status" => "Status", "nordugrid-job-queuerank" => "Plats i kön", "nordugrid-job-comment" => "LRMS-kommentar", "nordugrid-job-submissionui" => "Inskickningsmaskin", "nordugrid-job-submissiontime" => "Inskickningstid (GMT)", "nordugrid-job-usedcputime" => "Använd CPU-tid", "nordugrid-job-usedwalltime" => "Använd klocktid", "nordugrid-job-completiontime" => "Avslutningstid (GMT)", "nordugrid-job-sessiondirerasetime" => "Raderingstid (GMT)", "nordugrid-job-proxyexpirationtime" => "Proxyförfallotid (GMT)", "nordugrid-job-usedmem" => "Använt minne (KB)", "nordugrid-job-errors" => "Fel", "nordugrid-job-exitcode" => "Returkod", "nordugrid-job-jobname" => "Namn", "nordugrid-job-runtimeenvironment" => "Runtimemiljö", "nordugrid-job-cpucount" => "Begärda CPU:er", "nordugrid-job-executionnodes" => "Exekveringsnoder", "nordugrid-job-gmlog" => "GM loggfil", "nordugrid-job-clientsoftware" => "Klientversion", "nordugrid-job-rerunable" => "Omkörbart", "nordugrid-job-reqcput" => "Begärd tid (FÖRLEGAD)", "nordugrid-job-gridlog" => "Gridloggfil (FÖRLEGAD)", "nordugrid-job-lrmscomment" => "LRMS-kommentar (FÖRLEGAD)", "nordugrid-authuser-name" => "Namn", "nordugrid-authuser-sn" => "Subjektnamn", "nordugrid-authuser-freecpus" => "Fria CPU:er", "nordugrid-authuser-diskspace" => "Fritt diskutrymme (MB)", "nordugrid-authuser-queuelength" => "Användarens kölängd", "nordugrid-se-name" => "Namn", "nordugrid-se-aliasname" => "Lagringselementalias", "nordugrid-se-type" => "Lagringselementtyp", "nordugrid-se-acl" => "Auktoriserade VO:er", "nordugrid-se-freespace" => "Fritt utrymme (MB)", "nordugrid-se-totalspace" => "Totalt utrymme (MB)", "nordugrid-se-url" => "Kontakt-URL", "nordugrid-se-baseurl" => "Kontakt-URL (FÖRLEGAD)", "nordugrid-se-accesscontrol" => "Access kontroll", "nordugrid-se-authuser" => "Auktoriserad användare (DN)", "nordugrid-se-location" => "Postnummer", "nordugrid-se-owner" => "Ägare", "nordugrid-se-middleware" => "Grid-middleware", "nordugrid-se-issuerca" => "Certifikatutfärdare", "nordugrid-se-issuerca-hash" => "Certifikatutfärdares hashsumma", "nordugrid-se-trustedca" => "Betrodd certifikatutfärdare", "nordugrid-se-comment" => "Kommentar", "nordugrid-rc-name" => "Domännamn", "nordugrid-rc-aliasname" => "Replica Catalog-alias", "nordugrid-rc-baseurl" => "Kontakt-URL", "nordugrid-rc-authuser" => "Auktoriserad användare (DN)", "nordugrid-rc-location" => "Postnummer", "nordugrid-rc-owner" => "Ägare", "nordugrid-rc-issuerca" => "Certifikatutfärdare" ), // Errors, warnings etc "errors" => array( // failure notices "1" => "Kan inte läsa topp-nivÃ¥-indexservrarna", "2" => "Ingen av de lokala indexservrarna returnerade uppkopplingen", "3" => " dÃ¥lig konfigurering eller begäran drog över tiden", "4" => "Inga gridjobb funna", "5" => "Ingen information funnen", "6" => "Server otillgänglig", "7" => " - ladda om senare", "8" => "Ingen köinformation funnen", "9" => "Inga poster funna", "10" => "Inga användare funna", "11" => "Inte autoriserad pÃ¥ värden", "12" => "svarar inte", "13" => "Inga nya jobb funna för ", // debug messages "101" => " Monitor-time-out för GRIS: ", "102" => " sek för uppkoppling och ", "103" => " sek för sökning", "104" => " sek använda för sökning", "105" => "Visar resurser endast i ", "106" => "FrÃ¥gade topp-nivÃ¥-indexservrar: ", "107" => "Fick geografiska data, skannade kluster: ", "108" => " kluster sorterade efter geografiska data", "109" => "Sökning efter klusterattribut", "110" => "Sökning efter köattribut", "111" => "Inga data frÃ¥n ", "112" => " är uppe i ", "113" => " har inga resurser att erbjuda", "114" => " Monitor-time-out för GIIS: ", "115" => "Hoppar över GRIS: ", "116" => "inte en ", "117" => "Verifierar uppkoppling: ", "118" => "OK", "119" => "Hittills, detekterade resurser av slag ", "120" => "LDAP-fel vid sökning efter ", "121" => " status vid ", "122" => "Svartlistad: ", "123" => "Registrant funnen för ", "124" => "Sökning efter lagringselementattribut", "125" => "Sökning efter användare", "126" => "Sökning efter jobb", "127" => " har jobb ", "128" => " utan att vara auktoriserad", "129" => "Kan inte hämta objektdata: fel ", "130" => " Monitor-timeout för EMIR: ", "131" => " Monitor-timeout för ARCHERY beror pÃ¥ operativsystemets DNS-uppslagningsinställningar (Vi litar pÃ¥ DNS-cache!)", "132" => "Misslyckades med att frÃ¥ga följande ARCHERY-ändpunkt: ", "133" => "NÃ¥dde rekursiv-loop-gräns medan ARCHERY-ändpunkt tillfrÃ¥gades: ", // icon titles "301" => "Ladda om", "302" => "Skriv ut", "303" => "Hjälp", "304" => "Stäng", "305" => "Röd", "306" => "GrÃ¥", "307" => "Alla användare", "308" => "Aktiva användare", "309" => "Sök", "310" => "Lagring", "311" => "VO:ar", "312" => "Flagga för ", "313" => " Gridprocesser and ", "314" => " lokala processer", // auxilliary strings "401" => "Processer", "402" => "Grid", "403" => "Lokala", "404" => "Världen", "405" => "TOTALT", "406" => " kluster", "407" => "en massa", "408" => " GB", "409" => " ALLA", "410" => "Kluster", "411" => "Kö", "412" => "Jobb", "413" => "Användare", "414" => "Lagring", "415" => "Replikakatalog", "416" => "Definera attribut att visa för objektet: ", "417" => "logiskt OCH av alla uttryck kommer att hittas", "418" => "Lämna det högra fältet tomt för att visa allt", "419" => "Visa resurser eller objekt enligt ditt val", "420" => "Särskijlande namn", "421" => "Kan använda totalt ", "422" => " kluster", "423" => "Resurs / objekt:", "424" => "Antal attribut (förval 6):", "425" => "Objekt", "426" => "Nästa", "427" => "Välj ett", "428" => "Ã…terställ", "429" => "VISA" ), // Post code conversion "tlconvert" => array ( "Australia" => "Australien", "Austria" => "Österrike", "Armenia" => "Armenien", "Algeria" => "Algeriet", "Belgium" => "Belgien", "Bulgaria" => "Bulgarien", "Canada" => "Canada", "Chile" => "Chile", "China" => "Kina", "Czechia" => "Tjeckien", "Denmark" => "Danmark", "Estonia" => "Estland", "Finland" => "Finland", "France" => "Frankrike", "Georgia" => "Georgien", "Germany" => "Tyskland", "Greece" => "Grekland", "HongKong" => "Hong Kong", "Hungary" => "Ungern", "Iceland" => "Island", "Ireland" => "Irland", "Israel" => "Israel", "Italy" => "Italien", "Japan" => "Japan", "Latvia" => "Lettland", "Lithuania" => "Litauen", "Morocco" => "Marocko", "Netherlands" => "Nederländerna", "Norway" => "Norge", "Poland" => "Polen", "Portugal" => "Portugal", "Romania" => "Rumänien", "Russia" => "Ryssland", "SriLanka" => "Sri Lanka", "Sweden" => "Sverige", "Slovakia" => "Slovakien", "Slovenia" => "Slovenien", "Spain" => "Spanien", "Switzerland" => "Schweiz", "Taiwan" => "Taiwan", "Turkey" => "Turkiet", "UK" => "Storbritannien", "Ukraine" => "Ukraina", "USA" => "USA", "World" => "Världen" ) ); ?> nordugrid-arc-7.1.1/src/services/monitor/lang/PaxHeaders/hu.inc0000644000000000000000000000013215067751327021504 xustar0030 mtime=1759498967.778492329 30 atime=1759498967.876493818 30 ctime=1759499030.798725391 nordugrid-arc-7.1.1/src/services/monitor/lang/hu.inc0000644000175000002070000014220215067751327023407 0ustar00mockbuildmock00000000000000 N/A:  ez utóbbi azt jelenti, hogy a felhasználó nem adott meg neki nevet.
    X:  ez azt jelenti, hogy a job-ot a tulajdonosa "megölte"
    !:  ez azt jelenti, hogy a job futása közben hiba lépett fel a rendszerben
    Kattintson a névre, hogy bővebb információt kapjon a job-ról."; $str_nam = "A felhasználó neve, ez van megadva a személyes tanusítványban. Kattintson a névre azért, hogy megkapja a felhasználó összes elérhető erőforrásoknak a listáját és a hozzá tartozó összes job-ot, ami a rendszerben éppen jelen van."; $str_sta = "A job állapota, amit a Grid Menedzser (GM) és az LRMS ad vissza. Szekvenciális sorrendben az állapotok a következők:
    ACCEPTED – a job elküldve, de még nincs feldolgozás alatt
    PREPARING – bemeneti állományok kinyerése.
    SUBMITTING – az interrakció az LRMS-el folyamatban
    INLRMS – a job átküldve az LRMS-nek; a belsÅ‘ állapot hozzá lett adva az információs rendszer segítségével. A lehetséges állapotok:
    Q – a job a várakozósorban van
    U – a job egy felfüggesztett állapotban van egy elfoglalt csomópontban (PBSPro)
    S – a job egy felfüggesztett állapotban van (Condor)
    R – a job fut
    E – a job véget ért (PBS)
    FINISHING – a kimeneti fájlok átvitelre megtötént a GM segítségével
    FINISHED – a job véget ért; az idÅ‘ pecsétet hozzá adta az információs rendszer
    CANCELING – a job érvénytelenítve lett
    DELETED – a job nem lett kitörölve a felhasználó kérésére, viszont a GM eltávolította, mert lejárt a határideje
    Minden állapotot jelenteni lehet a PENDING állapottal, ez azt jelenti a GM számára, hogy a job-ot a következő állapotba próbálja meg átbillenteni"; $str_tim = "A job által lefoglalt CPU idő (perc)."; $str_mem = "A job által lefoglalt memória (KB)."; $str_cpu = "Azon processzorok száma, amit a job használ."; // Actual messages $message = array ( // Table headers and help (do not localize "0" and "help" keys) // For "help", keywords in
    must correspond to column titles below the help text ) "loadmon" => array( "0" => "Grid Monitorozó", "help" => "
    Ez a képernyő mutatja meg az összes site regisztrációt, ami a legfelső ARC indexelő szolgáltatás esetén előfordul, először az ország, majd a hoszt név szerint van rendezve a lista. A következő site paraméterek vannak monitorozva: klaszter alias, teljes CPU kapacitás és a futó ill. várakozósoros jobok száma (Grid-es és helyi együttesen). Használja a "Keresés" segédeszközt, ha szeretne megosztani egyéb klaszter, várakozósor, job stb. jellemzőt.
    Ország
    ".$clickable.". Ország zászló és név, ez az elérhető leírásokból száramzik. Kattintson ide, hogy csak ennek az országnak az információit lássa.
    Klaszter
    ".$clickable.". A klaszter álnevét a tulajdonos jelöli ki. Maximum 22 karakter hosszúságú lehet. Kattintson az aliasre, hogy részletesebb információt kapjon a klaszterről.
    CPU-k
    A klaszterben lévő összes CPU száma. Csak ezek töredékét tudják éppen elérni a grid-es felhasználók.
    Betöltés (feldolgoz:Grid+helyi)
    ".$clickable.". Relatív klaszter betöltés, megfelelően a foglalt CPU-k számához. A szürke sáv azt mutatja, hogy a processzorokat helyileg elküldött job-ok foglalják le, a piros sáv pedig azt, hogy a CPU-kat a grid-ről küldött job-ok foglalják le. Kattintson a sávra, hogy részletes információt kapjon a klaszteren futó Grid-es job-okról, ebben benne foglaltatik az is, hogy egy job-hoz hány darab processzor tartozik.
    Várakozólistában
    ".$clickable.". A klaszterben lévő összes várakozósoros job száma, megmutatja a várakozósoros grid-es job-okat plusz a helyileg elküldött várakozósoros job-okat. Kattintson az első számra ahhoz, hogy a klaszterben lévő várakozósoros grid-es jobok listáját megkapja.
    ", "Ország" => 30, "Site" => 160, "CPU-k" => 10, "Betöltés (feldolgoz: Grid+helyi)" => 210, "Várólistán" => 10 ), "clusdes" => array("0" => "Erőforrás részletek a következőkről:", "help" => "
    Attribútum
    ".$clickable.". Klaszter attribútum név".$str_att."
    Érték
    ".$str_val."
    Várakozósor
    ".$clickable.". Azon kötegelt várakozósoroknak a nevei, amik az ARC felhasználók számára elérhetőek, ezt a klaszter tulajdonosa állítja be. ".$str_que."
    Ãllapot
    A várakozósor állapota. A működő lista tipikusan aktív állapotot jelez.
    CPU (min)
    Idő korlát a várakozósoronkénti job-ok időtartamára, ha meg van adva, akkor az CPU percben értendő. Az első megjelenő érték az alsó korlát, a második a felső korlát. Ha a korlátok nincsenek beállítva (a job-ok bármikor elfogadásra kerülnek), ez így van jelölve: N/A .
    Futás
    Azon job-ok száma, amik a várakozósorban futnak. Az összes job számát megmutatja, a processzorok számával illetve a zárójelben jelzett grid-feladatokkal együtt, e.g. (Grid: 12). Párhuzamos, többprocesszoros feladatok esetén a zárójelek közötti szám nagyobb is lehet, mint a feladatok száma
    Várakozólistán
    Azon job-ok száma, melyek a várakozósorban a futtatásra várnak. Az összes job száma látható, a zárójelben jelzett grid-feladatokkal együtt, például (Grid: 235)
    ", "Várakozósor" => 0, "LRMS várakozósor" => 0, "Ãllapot" => 0, "Korlátok (min)" => 0, "CPU-k" => 0, "Futás" => 0, "Várólistán" => 0 ), "jobstat" => array("0" => "Job helye:Job ID", "help" => "
    JOB LISTA
    Job név
    ".$clickable.". A job neve, amit a tulajdonos jelöl ki. Ha nincsen név kijelölve, akkor a következőt látjuk: "N/A". Kattintson a névre, hogy megkapja a job részletes leírását.
    Tulajdonos
    ".$clickable.". ".$str_nam."
    Ãllapot
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Várakozósor
    ".$clickable.". A kötegelt várakozósor neve, amiben a job végrehajtódott. ".$str_que."
    CPU-k
    ".$str_cpu."
    JOB RÉSZLETEK
    Attribútum
    ".$clickable.". Job attribútum név".$str_att."
    Érték
    ".$str_val."
    ", "Job név" => 0, "Tulajdonos" => 0, "Ãllapot" => 0, "CPU (min)" => 0, "Várakozósor" => 0, "CPU-k" => 0 ), "volist" => array("0" => "Virtuális szervezetek", "help" => "
    Virtuális szervezet
    ".$clickable.". Felhasználók csoportja, tipikusan közös feladatokat és erőforrásokat osztanak meg egymással, az engedélyezés legalább egy ARC-os site-on megtörténik. Kattintson a névre, hogy megkapja a csoport tajainak a listáját.
    Tagok
    A csoport tagjainak a száma.
    Kiszolgáló
    LDAP szerver, ami támogatja a csoportos tagsági adatbázist.
    ", "Virtuális szervezet" => 0, "Tagok" => 0, "Kiszolgáló" => 0 ), "vousers" => array("0" => "Grid User Base", "help" => "
    Név
    ".$clickable.". ".$str_nam."
    Kapcsolatok
    A felhasználók saját intézménye, amit egy VO menedzser szolgáltat. üres is lehet.
    E-mail
    ".$clickable.". Felhasználók E-mail címe, amit egy VO menedzser ad meg. üres is lehet. Kattintson a címre, hogy levelet küldhessen a felhasználó E-mail címére.
    ", "#" => 0, "Név" => 0, "Kapcsolatok" => 0, "E-mail" => 0 ), "userlist" => array("0" => "Információszerzés", "help" => "
    Klaszter:várakozósor
    ".$clickable.". A klaszterek nevei és a megfelelő várakozósorok(oszlopokkal elválasztva, ":"), ahol a felhasználó job küldésekre jogosult. Ha a felhasználó nem jogosult, akkor a következő üzenet fog megjelenni: "Nincs megfelelő jogosultsága ennél a hosztnál". Kattintson a klaszter nevére, hogy egy részletesebb leírást kapjon a klaszterről. Kattintson a várakozósor nevére, hogy egy részletesebb leírást kapjon a várakozósorról.
    Szabad CPU-k
    Ebben a pillanatban a felhasználó számára elérhető szabad CPU-k száma, az adott várakozósorban, feltételesen ki van egészítve a felső idő korláttal (ez percben értendő). Például a "3" azt jelenti, hogy 3 CPU használható fel a job számára korlátlan ideig; "4:360" ez azt mutatja, hogy 4 CPU-t tud felhasználni a job, de csak 6 órán keresztül; "10:180 30" ez azt jelenti, hogy 10 CPU áll a job-ok rendelkezésére 3 órán keresztül, és ezen kívül 30 CPU van még pluszba korlátlan időre; "0" ez azt jelenti, hogy nem áll rendelkezésre CPU, ebben a pillanatban, és ekkor a job-ok várakozólistára kerülnek.
    Várakosósorban elhelyezett job-ok
    A felhasználó azon job-jainak a száma, amiknek várhatóan várakoznia kell egy újonnan elküldött job előtt a várakozási sorban. A "0" száma azt jelenti, hogy a job remélhetőleg azonnal lefuthat. Ez csupán egy becslés, amit a helyi irányelvek felülbírálhatnak.
    Szabad lemez terület (MB)
    A felhasználó számára elérhető szabad lemezterület egy adott várakozósorban (MB). Ez csupán egy értékelés, a legtöbb klaszter nem nyújt fix lemez kvótákat.
    Job név
    ".$clickable.". ".$str_job."
    Ãllapot
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Klaszter
    ".$clickable.". A klaszter neve, amelyben a feladat éppen fut. Kattintson a klaszter nevére, hogy egy részletesebb leírást kapjon.
    Várakozósor
    ".$clickable.". Azon várakozósor neve, amiben a job lefuttot, vagy le fog futni. ".$str_que."
    CPU-k
    ".$str_cpu."
    ", "" => 0, "Job név" => 0, "Ãllapot" => 0, "CPU (min)" => 0, "Klaszter" => 0, "Várakozósor" => 0, "CPU-k" => 0 ), "attlist" => array("0" => "Attribútum értékek", "help" => "
    Objektum
    ".$clickable.". Az objektumok neve, ezek lesznek megjelenítve. Ez lehet klaszter név, egy klaszter várakozósorának a neve, egy job név, egy felhasználói név stb. Kattintson a szövegre, hogy egy részletesebb leírást kapjon az objektumról.
    Attribútumok
    Minden objektum számára egy vagy több attribútum értéket lehet kilistáztatni. Az oszlop címe egy emberi olvasásra szánt név (kivéve néhány MDS specifikus attribútumot), az oszlopok attribútum értékeket tartalmaznak az adott objektumról, ahogy az az információs rendszerbe be lett írva .
    ", "Objektum" => 0, "Attribútum" => 0 ), "quelist" => array("0" => "Várakozósor", "help" => "
    Attribútum
    ".$clickable.". Egy várakozósor attribútumának a neve".$str_att."
    Érték
    ".$str_val."
    Job név
    ".$clickable.". ".$str_job."
    Tulajdonos
    ".$clickable.". ".$str_nam."
    Ãllapot
    ".$str_sta."
    CPU (min)
    ".$str_tim."
    Memória (KB)
    ".$str_mem."
    CPU-k
    ".$str_cpu."
    ", "" => 0, "Job név" => 0, "Tulajdonos" => 0, "Ãllapot" => 0, "CPU (min)" => 0, "Memória (KB)" => 0, "CPU-k" => 0 ), "sestat" => array("0" => "Adattároló elemek", "help" => "
    Alias
    Az adattároló elem álneve az információs rendszerben van meghatározva. Maximális megjeleníthető hosszúság: 15 karakter
    Összes lemezterület
    Összes lemezterület (GB).
    Szabad terület
    Pillanatnyilag ennyi szabad terület van, (GB)
    Név
    Adattároló elem neve, egy logikai névből és egy hoszt névből áll (egy oszloppal van elválasztva, ":"). A logikai nevet az információs rendszer használja azért, hogy megkülönböztesse a különbözö adattároló elemeket ugyanazon a gépen.
    Alap URL
    Az adattároló elem URL-je tipikusan egy gsiftp:// protokoll. Ezt használja alapból ahhoz, hogy elérje a fájlokat.
    Típus
    Az adattároló elem típusa. "gridftp-based" jelzi a GridFTP interfészen keresztül az adattároló lemezt.
    ", "#" => 0, "Alias" => 0, "Szabad/összes hely, GB" => 0, "Név" => 0, "Alap URL" => 0, "Típus" => 0 ), "allusers" => array("0" => "Megbízható Grid felhasználók:Aktív Grid felhasználók", "help" => "
    Név
    ".$clickable.". ".$str_nam."
    Kapcsolat
    A felhasználó kapcsolódás-rendszere, amely a személyes tanúsítványából származik.
    Job-ok
    Az összes felhasználó rendszerben lévő job-jainak a száma (futás, függőben lévő, befejezett vagy törölt)
    Site-ok
    Megmutatja, hogy hány darab site engedélyezi ezt a felhasználót
    ", "#" => 0, "Név" => 0, "Kapcsolatok" => 0, "Job-ok" => 0, "Site-ok" => 0 ), "userres" => array("0" => "", "Klaszter:várakozósor" => 0, "Szabad CPU-k" => 0, "Várakozólistára helyezett job-ok" => 0, "Szabad lemezterület (MB)" => 0 ), "ldapdump" => array("0" => "", "Attribútum" => 0, "Érték" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "érvényes információ tÅ‘le (GMT)", "Mds-validto" => "érvényes információ neki (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Front-end tartomány név", "nordugrid-cluster-aliasname" => "Klaszter alias", "nordugrid-cluster-contactstring" => "ElérhetÅ‘ségi szöveg", "nordugrid-cluster-interactive-contactstring" => "Interaktív elérhetÅ‘ség", "nordugrid-cluster-comment" => "Megjegyzés", "nordugrid-cluster-support" => "E-mail cím", "nordugrid-cluster-acl" => "Engedélyezett VO-k", "nordugrid-cluster-lrms-type" => "LRMS típus", "nordugrid-cluster-lrms-version" => "LRMS verzió", "nordugrid-cluster-lrms-config" => "LRMS részletek", "nordugrid-cluster-architecture" => "Architektúra", "nordugrid-cluster-opsys" => "Operációs rendszer", "nordugrid-cluster-homogeneity" => "Homogén klaszter", "nordugrid-cluster-nodecpu" => "CPU típus (leglassabb)", "nordugrid-cluster-nodememory" => "Memória (MB, legkisebb)", "nordugrid-cluster-totalcpus" => "CPU-k, összesen", "nordugrid-cluster-cpudistribution" => "CPU:gépek", "nordugrid-cluster-benchmark" => "Teljesítmény értékelés", "nordugrid-cluster-sessiondir-free" => "Lemez terület, elérhetÅ‘ (MB)", "nordugrid-cluster-sessiondir-total" => "Lemez terület, összesen (MB)", "nordugrid-cluster-sessiondir-lifetime"=> "Grid session élettartam (min)", "nordugrid-cluster-cache-free" => "Cache méret, elérhetÅ‘ (MB)", "nordugrid-cluster-cache-total" => "Cache méret, összesen (MB)", "nordugrid-cluster-runtimeenvironment" => "Futásidejű környezet", "nordugrid-cluster-localse" => "Adattárolási elem, helyi", "nordugrid-cluster-middleware" => "Grid köztesréteg", "nordugrid-cluster-totaljobs" => "Job-ok, teljes összeg", "nordugrid-cluster-usedcpus" => "CPU-k, foglalt", "nordugrid-cluster-queuedjobs" => "Job-ok, várakozólistás", "nordugrid-cluster-prelrmsqueued" => "Grid job-ok, várakozó részfeladat", "nordugrid-cluster-location" => "Irányítószám", "nordugrid-cluster-owner" => "Tulajdonos", "nordugrid-cluster-issuerca" => "Tanúsítvány kibocsájtó", "nordugrid-cluster-issuerca-hash" => "Tanúsítvány kibocsájtó-s hash", "nordugrid-cluster-trustedca" => "Megbízható tanúsítvány kibocsájtók", "nordugrid-cluster-nodeaccess" => "Csomópont IP összekapcsolhatóság", "nordugrid-cluster-gridarea" => "Session terület (ELAVULT)", "nordugrid-cluster-gridspace" => "Grid lemez terület (ELAVULT)", "nordugrid-cluster-opsysdistribution" => "OS disztribúció (ELAVULT)", "nordugrid-cluster-runningjobs" => "Job-ok, futás (ELAVULT)", "nordugrid-cluster-credentialexpirationtime" => "Credential expiration time", "nordugrid-queue-name" => "A várakozósor neve", "nordugrid-queue-comment" => "Megjegyzés", "nordugrid-queue-status" => "A várakozósor állapota", "nordugrid-queue-running" => "CPU-k, foglalt", "nordugrid-queue-localqueued" => "Helyi job-ok, várakozólistás", "nordugrid-queue-prelrmsqueued" => "Grid job-ok, várakozó részfeladat", "nordugrid-queue-queued" => "Job-ok, várakozólistás (ELAVULT)", "nordugrid-queue-maxrunning" => "Job-ok, futás(max)", "nordugrid-queue-maxqueuable" => "Job-ok, várakozólistába tehetÅ‘ (max)", "nordugrid-queue-maxuserrun" => "Unix felhasználókénti job-ok (max)", "nordugrid-queue-maxcputime" => "CPU idÅ‘, max. (perc)", "nordugrid-queue-mincputime" => "CPU idÅ‘, min. (perc)", "nordugrid-queue-defaultcputime" => "CPU idÅ‘, alap. (perc)", "nordugrid-queue-maxwalltime" => ""Wall clock" idÅ‘, max. (perc)", "nordugrid-queue-minwalltime" => ""Wall clock" idÅ‘, min. (perc)", "nordugrid-queue-defaultwalltime" => ""Wall clock" idÅ‘, alap. (perc)", "nordugrid-queue-schedulingpolicy" => "Ütemezési politika", "nordugrid-queue-totalcpus" => "CPU-k, összesen", "nordugrid-queue-nodecpu" => "CPU típusa", "nordugrid-queue-nodememory" => "Memória (MB)", "nordugrid-queue-architecture" => "Architektúra", "nordugrid-queue-opsys" => "Operációs rendszer", "nordugrid-queue-homogeneity" => "Homogén várakozósor", "nordugrid-queue-gridrunning" => "CPUs occupied by Grid jobs", "nordugrid-queue-gridqueued" => "Grid job-ok, várakozólistás", "nordugrid-queue-benchmark" => "Teljesítmény értékelés", "nordugrid-queue-assignedcpunumber" => "Várakozósoronkénti CPU-k (ELAVULT)", "nordugrid-queue-assignedcputype" => "CPU típus (ELAVULT)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Tulajdonos", "nordugrid-job-execcluster" => "Végrehajtási klaszter", "nordugrid-job-execqueue" => "Végrehajtási várakozósor", "nordugrid-job-stdout" => "Szabványos kimeneti fájl", "nordugrid-job-stderr" => "Szabványos hiba fájl", "nordugrid-job-stdin" => "Szabványos bemeneti fájl", "nordugrid-job-reqcputime" => "Kért CPU idÅ‘", "nordugrid-job-reqwalltime" => "Kért $quot;wall clock$quot; idÅ‘", "nordugrid-job-status" => "Ãllapot", "nordugrid-job-queuerank" => "A városkozási sorban lévÅ‘ pozíciója", "nordugrid-job-comment" => "LRMS megjegyzés", "nordugrid-job-submissionui" => "Submission machine", "nordugrid-job-submissiontime" => "Részfeladat idÅ‘ (GMT)", "nordugrid-job-usedcputime" => "Felhasznált CPU idÅ‘", "nordugrid-job-usedwalltime" => "Felhasznált "wall clock" idÅ‘", "nordugrid-job-completiontime" => "Elkészítési idÅ‘ (GMT)", "nordugrid-job-sessiondirerasetime" => "Törlési idÅ‘ (GMT)", "nordugrid-job-proxyexpirationtime" => "Proxy lejárati idÅ‘ (GMT)", "nordugrid-job-usedmem" => "Felhasznált memória (KB)", "nordugrid-job-errors" => "Hibák", "nordugrid-job-exitcode" => "Kilépési kód", "nordugrid-job-jobname" => "Név", "nordugrid-job-runtimeenvironment" => "Futásidejű környezet", "nordugrid-job-cpucount" => "Kért CPU-k", "nordugrid-job-executionnodes" => "Végrehajtási csomópontok", "nordugrid-job-gmlog" => "GM napló fájl", "nordugrid-job-clientsoftware" => "Kliens verzió", "nordugrid-job-rerunable" => "újra futtatható", "nordugrid-job-reqcput" => "Kért idÅ‘ (ELAVULT)", "nordugrid-job-gridlog" => "Grid napló fájl (ELAVULT)", "nordugrid-job-lrmscomment" => "LRMS megjegyzés (ELAVULT)", "nordugrid-authuser-name" => "Név", "nordugrid-authuser-sn" => "Téma neve", "nordugrid-authuser-freecpus" => "Szabad CPU-k", "nordugrid-authuser-diskspace" => "Szabad hely (MB)", "nordugrid-authuser-queuelength" => "A felhasználó várakozósoros job-jai", "nordugrid-se-name" => "Név", "nordugrid-se-aliasname" => "Az adattároló elem álneve", "nordugrid-se-type" => "Az adattároló elem típusa", "nordugrid-se-acl" => "Engedélyezett VO-k", "nordugrid-se-freespace" => "Szabad hely (MB)", "nordugrid-se-totalspace" => "Összes lemezterület (MB)", "nordugrid-se-url" => "URL elérhetÅ‘ség", "nordugrid-se-baseurl" => "URL elérhetÅ‘ség (ELAVULT)", "nordugrid-se-accesscontrol" => "Hozzáférés ellenÅ‘rzése", "nordugrid-se-authuser" => "Engedélyezett felhasználó (DN)", "nordugrid-se-location" => "Irányítószám", "nordugrid-se-owner" => "Tulajdonos", "nordugrid-se-middleware" => "Köztesréteg", "nordugrid-se-issuerca" => "Tanúsítvány kibocsátó", "nordugrid-se-issuerca-hash" => "Tanúsítvány kibocsátó hash-e", "nordugrid-se-trustedca" => "Megbízható tanúsítvány kibocsájtók", "nordugrid-se-comment" => "Megjegyzés", "nordugrid-rc-name" => "Tartomány név", "nordugrid-rc-aliasname" => "Replika katalógus alias", "nordugrid-rc-baseurl" => "URL elérhetÅ‘ség", "nordugrid-rc-authuser" => "Engedélyezett felhasználó (DN)", "nordugrid-rc-location" => "Irányítószám", "nordugrid-rc-owner" => "Tulajdonos", "nordugrid-rc-issuerca" => "Tanúsítvány kibocsátó" ), // Errors, warnings etc "errors" => array( // failure notices "1" => "Nem tudom olvasni a top level GIS és EMIR szerverek index szolgáltatásait", "2" => "Egyik helyi index sem jelzett vissza kapcsolatot", "3" => " rossz konfiguráció, vagy kérési idÅ‘túllépés", "4" => "Grid-es job nem található", "5" => "Nincs információ", "6" => "A szervert nem lehet elérni", "7" => " - frissítés késÅ‘bb", "8" => "Nincs információ a várakozási sorról", "9" => "Nem található bejegyzés", "10" => "Nincs felhasználó", "11" => "Nincs megfelelÅ‘ jogosultsága ennél a hosztnál ", "12" => "nincs válasz", "13" => "Nincsenek nemrégi feladatok ", // debug messages "101" => " Monitorozási idÅ‘túllépések a GRIS esetén: ", "102" => " mp kapcsolódáskor és ", "103" => " mp kereséskor", "104" => " mp (keresésre szánt idÅ‘)", "105" => "Az erÅ‘forrásokat csupán a következÅ‘ben mutatja meg ", "106" => "Lekérdezett felsÅ‘szintű indexek: ", "107" => "Kapott földrajzi helyek, átvizsgált site-ok: ", "108" => " site-ok intézése földrajzi helyek szerint", "109" => "Klaszter attribútumok keresése", "110" => "A várakozási sor attribútumainak a keresése", "111" => "Nincs adat errÅ‘l ", "112" => " működÅ‘képes ", "113" => " nincs erÅ‘forrása, amit felkínálhat", "114" => " Monitorozási idÅ‘túllépések a GIIS esetén: ", "115" => "GRIS kihagyása: ", "116" => "nem egy ", "117" => "Kapcsolat ellenÅ‘rzése: ", "118" => "OK", "119" => "Eddig, ebbÅ‘l a fajta erÅ‘forrásból ", "120" => "LDAP hiba keresése ", "121" => " állapot ", "122" => "Fekete listára került: ", "123" => "Regisztálót találtam a következÅ‘ számára ", "124" => "SE-s attribútumok keresése", "125" => "Felhasználók keresése", "126" => "Jobok keresése", "127" => " van job-ja ", "128" => " amíg nincsen engedélyezve ", "129" => "Nem lehet elérni az objektum adatait: hiba ", "130" => "Monitorozási idÅ‘túllépések az EMIR esetén: ", // icon titles "301" => "Frissítés", "302" => "Nyomtatás", "303" => "Súgó", "304" => "Bezár", "305" => "Piros", "306" => "Szürke", "307" => "Minden felhasználó", "308" => "Aktív felhasználók", "309" => "Keresés", "310" => "Adattároló", "311" => "VO-k", "312" => "Zászlaja ", "313" => " Grid-es feldolgozás és ", "314" => " helyi feldolgozás", // auxiliary strings "401" => "Feldolgoz", "402" => "Grid", "403" => "Helyi", "404" => "Világ", "405" => "TELJES", "406" => " site-ok", "407" => "rengeteg", "408" => " GB", "409" => " MIND", "410" => "Klaszter", "411" => "Várakozási sor", "412" => "Job", "413" => "Felhasználó", "414" => "Adattároló", "415" => "Replika katalógus", "416" => "Attribútumok megadása az objektum megjelenítése miatt: ", "417" => "Minden kifejezés összevetésre fog kerülni", "418" => "A jobb szélsÅ‘ mezÅ‘t hagyja üresen azért, hogy mindent lásson", "419" => "A kiválasztott erÅ‘források vagy objektumok megjelenítése", "420" => "MegkülönböztetÅ‘ név", "421" => "Összesen használni tud ", "422" => " site-ot", "423" => "ErÅ‘forrás / objektum:", "424" => "Attribútumok száma (alap.: 6):", "425" => "Objektum", "426" => "KövetkezÅ‘", "427" => "Válassz ki egyet", "428" => "Törlés", "429" => "Mutat" ), // Post code conversion "tlconvert" => array ( "Australia" => "Ausztrália", "Austria" => "Ausztria", "Armenia" => "Örményország", "Algeria" => "Algéria", "Belgium" => "Belgium", "Bulgaria" => "Bulgária", "Canada" => "Kanada", "China" => "Kína", "Czechia" => "Cseszlovákia", "Denmark" => "Dánia", "Estonia" => "észtország", "Finland" => "Finnország", "France" => "Franciaország", "Georgia" => "GrúÉzia", "Germany" => "Németország", "Greece" => "Görögország", "Hungary" => "Magyarország", "Iceland" => "Izland", "Ireland" => "írország", "Italy" => "Olaszország", "Japan" => "Japán", "Latvia" => "Lettország", "Lithuania" => "Litvánia", "Morocco" => "Marokkó", "Netherlands" => "Hollandia", "Norway" => "Norvégia", "Poland" => "Lengyelország", "Portugal" => "Portugália", "Romania" => "Románia", "Russia" => "Oroszország", "SriLanka" => "Sri Lanka", "Sweden" => "Svédország", "Slovakia" => "Szlovákia", "Slovenia" => "Szlovénia", "Switzerland" => "Svájc", "Turkey" => "Törökország", "UK" => "UK", "Ukraine" => "Ukrajna", "USA" => "USA" ) ); ?> nordugrid-arc-7.1.1/src/services/monitor/PaxHeaders/allusers.php0000644000000000000000000000013215067751327022017 xustar0030 mtime=1759498967.775506042 30 atime=1759498967.874493787 30 ctime=1759499030.604051643 nordugrid-arc-7.1.1/src/services/monitor/allusers.php0000644000175000002070000001426515067751327023731 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; $giislist = &$toppage->giislist; $yazyk = &$toppage->language; // Array defining the attributes to be returned $lim = array( "dn", USR_USSN ); /* need only SN per each user */ $jlim = array( "dn", JOB_GOWN ); /* Job owner only is needed */ if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $tlim = 20; $tout = 20; if( $debug ) dbgmsg("
    :::> ".$errors["114"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); // Header table $titles = explode(":",$toptitle); // two alternative titles, separated by column $gtitle = $titles[0]; if ( $jobnum ) $gtitle = $titles[1]; $toppage->tabletop($gtitle,""); $family = cnvname($ussn); // ldapsearch filter string for jobs $filter = "(objectclass=".OBJ_USER.")"; /* Find all users */ $jfilter = "(objectclass=".OBJ_AJOB.")"; /* Find all jobs */ $gentries = recursive_giis_info($giislist,"cluster",$errors,$debug); $nc = count($gentries); if ( !$nc ) { // NO SITES FOUND! $errno = "1"; echo "
    ".$errors[$errno]."\n"; return $errno; } $dsarray = array (); $hnarray = array (); $sitetag = array (); /* a tag to skip duplicated entries */ for ( $k = 0; $k < $nc; $k++ ) { $clhost = $gentries[$k]["host"]; $clport = $gentries[$k]["port"]; $ldapuri = "ldap://".$clhost.":".$clport; $clconn = ldap_connect($ldapuri); if ( $clconn && !$sitetag[$clhost] ) { array_push($dsarray,$clconn); array_push($hnarray,$clhost); $sitetag[$clhost] = 1; /* filtering tag */ } } $nhosts = count($dsarray); if ( !$nhosts ) { // NO SITES REPLY... $errno = "2"; echo "
    ".$errors[$errno]."\n"; return $errno; } // Search all clusters for users $uiarray = array(); $ts1 = time(); $uiarray = @ldap_search($dsarray,DN_LOCAL,$filter,$lim,0,0,$tlim,LDAP_DEREF_NEVER); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["125"]." (".($ts2-$ts1).$errors["104"].")
    "); // Search all clusters for jobs $jiarray = array(); $ts1 = time(); $jiarray = @ldap_search($dsarray,DN_LOCAL,$jfilter,$jlim,0,0,$tlim,LDAP_DEREF_NEVER); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["126"]." (".($ts2-$ts1).$errors["104"].")
    "); // Loop on clusters; building user list $usrlist = array (); for ( $ids = 0; $ids < $nhosts; $ids++ ) { $ui = array (); $ui = $uiarray[$ids]; $ji = array (); $ji = $jiarray[$ids]; $dst = array (); $dst = $dsarray[$ids]; if ($dst && $ui) { $nusers = @ldap_count_entries($dst,$ui); $njobs = @ldap_count_entries($dst,$ji); if ($nusers > 0 || $njobs > 0) { // If there are valid entries, tabulate results $allres = array(); $allres = @ldap_get_entries($dst,$ui); $results = ldap_purge($allres,USR_USSN,$debug); $alljobs = array(); $alljobs = @ldap_get_entries($dst,$ji); // $nusers = $allres["count"]; $nusers = $results["count"]; $njobs = $alljobs["count"]; // loop on users, filling $usrlist[$ussn]["name"] and counting $usrlist[$ussn]["hosts"] for ($j=0; $j<$nusers; $j++) { // $ussn = $allres[$j][USR_USSN][0]; $ussn = $results[$j][USR_USSN][0]; $family = cnvname($ussn, 2); if ( $family == "host" || strlen($family) < 2 ) continue; $ussn = trim($ussn); $ussn = addslashes($ussn); // In case $ussn contains escape characters if ( !$usrlist[$ussn] ) { $usrlist[$ussn]["name"] = $family; $usrlist[$ussn]["org"] = getorg($ussn); $usrlist[$ussn]["jobs"] = 0; $usrlist[$ussn]["hosts"] = 0; } $usrlist[$ussn]["hosts"]++; } // loop on jobs, filling $usrlist[$jown]["jobs"] for ($k=0; $k<$njobs; $k++) { $jdn = $alljobs[$k]["dn"]; $jown = $alljobs[$k][JOB_GOWN][0]; $family = cnvname($jown, 2); if ( $family == "host" || strlen($family) < 2 ) continue; $jown = addslashes($jown); // In case $jown contains escape characters if ( !$usrlist[$jown] ) { // Shouldn't be happening, but... $usrlist[$jown]["name"] = $family; $usrlist[$jown]["org"] = getorg($jown); $usrlist[$jown]["jobs"] = 0; if( $debug == 2 ) dbgmsg("$family".$errors["127"]."$jdn".$errors["128"]."
    "); } $usrlist[$jown]["jobs"]++; } } } } uasort($usrlist,"hncmp"); // HTML table initialisation $utable = new LmTableSp($module,$toppage->$module); $urowcont = array(); if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $count = 0; foreach ( $usrlist as $ussn => $data ) { // if ( $count > 9 ) continue; $name = $data["name"]; $org = $data["org"]; $nhosts = 0; $nhosts = $data["hosts"]; $jcount = 0; $jcount = $data["jobs"]; if ( $jcount < $jobnum ) continue; /* In case list only those with jobs */ $count++; $encuname = rawurlencode($ussn); $usrwin = popup("userlist.php?owner=$encuname",700,500,5,$lang,$debug); $urowcont[] = $count; $urowcont[] = "$name"; $urowcont[] = $org; $urowcont[] = $jcount; $urowcont[] = $nhosts; $utable->addrow($urowcont); $urowcont = array(); } $utable->close(); return 0; // Done $toppage->close(); ?> nordugrid-arc-7.1.1/src/services/monitor/PaxHeaders/man0000644000000000000000000000013115067751426020151 xustar0030 mtime=1759499030.654447732 29 atime=1759499034.76351017 30 ctime=1759499030.654447732 nordugrid-arc-7.1.1/src/services/monitor/man/0000755000175000002070000000000015067751426022131 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/monitor/man/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327022263 xustar0030 mtime=1759498967.779492344 30 atime=1759498967.878493848 30 ctime=1759499030.652497408 nordugrid-arc-7.1.1/src/services/monitor/man/Makefile.am0000644000175000002070000000002515067751327024162 0ustar00mockbuildmock00000000000000man_MANS = monitor.7 nordugrid-arc-7.1.1/src/services/monitor/man/PaxHeaders/Makefile.in0000644000000000000000000000013215067751357022277 xustar0030 mtime=1759498991.203166952 30 atime=1759499019.397276679 30 ctime=1759499030.653718606 nordugrid-arc-7.1.1/src/services/monitor/man/Makefile.in0000644000175000002070000005263315067751357024212 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/monitor/man ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = monitor.7 CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } man7dir = $(mandir)/man7 am__installdirs = "$(DESTDIR)$(man7dir)" NROFF = nroff MANS = $(man_MANS) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/monitor.7.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ man_MANS = monitor.7 all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/monitor/man/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/monitor/man/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): monitor.7: $(top_builddir)/config.status $(srcdir)/monitor.7.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man7: $(man_MANS) @$(NORMAL_INSTALL) @list1=''; \ list2='$(man_MANS)'; \ test -n "$(man7dir)" \ && test -n "`echo $$list1$$list2`" \ || exit 0; \ echo " $(MKDIR_P) '$(DESTDIR)$(man7dir)'"; \ $(MKDIR_P) "$(DESTDIR)$(man7dir)" || exit 1; \ { for i in $$list1; do echo "$$i"; done; \ if test -n "$$list2"; then \ for i in $$list2; do echo "$$i"; done \ | sed -n '/\.7[a-z]*$$/p'; \ fi; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^7][0-9a-z]*$$,7,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man7dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man7dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man7dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man7dir)" || exit $$?; }; \ done; } uninstall-man7: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man7dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.7[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^7][0-9a-z]*$$,7,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ dir='$(DESTDIR)$(man7dir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(MANS) installdirs: for dir in "$(DESTDIR)$(man7dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-man install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-man7 install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-man uninstall-man: uninstall-man7 .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-man7 install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags-am uninstall uninstall-am uninstall-man \ uninstall-man7 .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/monitor/man/PaxHeaders/monitor.7.in0000644000000000000000000000013115067751327022412 xustar0030 mtime=1759498967.780074486 30 atime=1759498967.878493848 29 ctime=1759499030.65501471 nordugrid-arc-7.1.1/src/services/monitor/man/monitor.7.in0000644000175000002070000000367415067751327024327 0ustar00mockbuildmock00000000000000.TH monitor 7 "2003-03-03" "NorduGrid @VERSION@" "NorduGrid Toolkit" .SH NAME monitor \- Real-time NorduGrid monitoring tool .SH DESCRIPTION .B "LDAP Grid Monitor" is a set of .B PHP and .B Java scripts, providing a Web interface to the .B NorduGrid Information System. Should be working for any similar .B LDAP based service. .SH REQUIREMENTS .IP "LDAP library" e.g., http://www.openldap.org .IP "GD library" http://www.boutell.com/gd .IP "PHP4 library" http://www.php.net, must be compiled with LDAP and GD extensions .IP "HTTP server" must be compiled with PHP4 .IP "Globus MDS" http://www.globus.org/mds, or a similar .B LDAP based service .IP "Virtual Organisation" Is optional .SH INSTALLATION Copy all the files in a folder, accessible by the HTTP server. Modify .I settings.inc according to your MDS structure and liking. Run the whole stuff by loading .I loadmon.php into your favorite browser. .SH FILES .I loadmon.php .RS To monitor several servers at once, add hosts and DNs to the .IR $arrhost and, correspondingly, .IR $arrbdn arrays in .I loadmon.php .RE .I isattr.inc .I cnvname.inc .I cnvalias.inc .RS Making output more human-readable: modify .IR isattr.inc, .IR cnvname.inc, .IR cnvalias.inc. Otherwise, these files are not needed. .RE .I blacklist.inc .RS To prevent sites from being polled, modify array entries in .IR blacklist.inc. Otherwise, the file is not needed. .RE .I vo-users.php .RS Not needed when working without a Virtual Organisation. In such a case, remove the corresponding link from .I loadmon.php . .RE .I jobstat.php .RS When working without the .B NorduGrid Information System: to make sure that the job status is defined properly, edit .I jobstat.php (look for .B adjustment instructions in the code). .SH AUTHOR Oxana Smirnova .SH "SEE ALSO" .BR ngsub (1), .BR ngstat (1), .BR ngdel (1), .BR ngget (1), .BR ngsync (1), .BR ngcopy (1), .BR ngremove (1) nordugrid-arc-7.1.1/src/services/monitor/PaxHeaders/Makefile.in0000644000000000000000000000013215067751357021524 xustar0030 mtime=1759498991.110118604 30 atime=1759499019.337275767 30 ctime=1759499030.622447246 nordugrid-arc-7.1.1/src/services/monitor/Makefile.in0000644000175000002070000007020715067751357023434 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/monitor ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(dist_monitor_DATA) \ $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = monitor README CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(monitordir)" "$(DESTDIR)$(monitordir)" DATA = $(dist_monitor_DATA) $(monitor_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir distdir-am am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/README.in \ $(srcdir)/monitor.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ SUBDIRS = man mon-icons lang includes monitordir = @monitor_prefix@ dist_monitor_DATA = $(srcdir)/*.php $(srcdir)/*.js monitor_DATA = README all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/monitor/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/monitor/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): monitor: $(top_builddir)/config.status $(srcdir)/monitor.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ README: $(top_builddir)/config.status $(srcdir)/README.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_monitorDATA: $(dist_monitor_DATA) @$(NORMAL_INSTALL) @list='$(dist_monitor_DATA)'; test -n "$(monitordir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(monitordir)'"; \ $(MKDIR_P) "$(DESTDIR)$(monitordir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(monitordir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(monitordir)" || exit $$?; \ done uninstall-dist_monitorDATA: @$(NORMAL_UNINSTALL) @list='$(dist_monitor_DATA)'; test -n "$(monitordir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(monitordir)'; $(am__uninstall_files_from_dir) install-monitorDATA: $(monitor_DATA) @$(NORMAL_INSTALL) @list='$(monitor_DATA)'; test -n "$(monitordir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(monitordir)'"; \ $(MKDIR_P) "$(DESTDIR)$(monitordir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(monitordir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(monitordir)" || exit $$?; \ done uninstall-monitorDATA: @$(NORMAL_UNINSTALL) @list='$(monitor_DATA)'; test -n "$(monitordir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(monitordir)'; $(am__uninstall_files_from_dir) # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(DATA) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(monitordir)" "$(DESTDIR)$(monitordir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-data-local install-dist_monitorDATA \ install-monitorDATA install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-dist_monitorDATA uninstall-monitorDATA .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ check-am clean clean-generic clean-libtool cscopelist-am ctags \ ctags-am distclean distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am \ install-data-local install-dist_monitorDATA install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-monitorDATA install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-am uninstall \ uninstall-am uninstall-dist_monitorDATA uninstall-monitorDATA .PRECIOUS: Makefile install-data-local: $(MKDIR_P) $(DESTDIR)$(monitordir)/cache # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/monitor/PaxHeaders/help.php0000644000000000000000000000013215067751327021115 xustar0030 mtime=1759498967.775506042 30 atime=1759498967.874493787 30 ctime=1759499030.608946685 nordugrid-arc-7.1.1/src/services/monitor/help.php0000644000175000002070000000072615067751327023024 0ustar00mockbuildmock00000000000000$module; $helptext = $data["help"]; echo $helptext; // Done $toppage->close(); ?> nordugrid-arc-7.1.1/src/services/monitor/PaxHeaders/volist.php0000644000000000000000000000013215067751327021505 xustar0030 mtime=1759498967.781492375 30 atime=1759498967.878493848 30 ctime=1759499030.618433461 nordugrid-arc-7.1.1/src/services/monitor/volist.php0000644000175000002070000001444615067751327023420 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; // Header table $toppage->tabletop("".$toptitle."

    "); // The main function $vos = array ( array ( "name" => "NorduGrid members", "server" => "grid-vo.nordugrid.org", "port" => "389", "dn" => "ou=people,dc=nordugrid,dc=org" ), array ( "name" => "NorduGrid guests", "server" => "https://www.pdc.kth.se/grid/swegrid-vo", "port" => "", "dn" => "" ), array ( "name" => "NorduGrid developers", "server" => "http://www.nordugrid.org", "port" => "", "dn" => "", "group" => "developers.dn" ), array ( "name" => "NorduGrid tutorials", "server" => "grid-vo.nordugrid.org", "port" => "389", "dn" => "ou=tutorial,dc=nordugrid,dc=org" ), array ( "name" => "ATLAS test users (SWEGRID)", "server" => "https://www.pdc.kth.se", "port" => "", "dn" => "", "group" => "grid/swegrid-vo/vo.atlas-testusers-vo" ), /* array ( "name" => "NorduGrid services", "server" => "grid-vo.nordugrid.org", "port" => "389", "dn" => "ou=services,dc=nordugrid,dc=org" ), */ array ( "name" => "BaBar", "server" => "babar-vo.gridpp.ac.uk", "port" => "389", "dn" => "ou=babar,dc=gridpp,dc=ac,dc=uk" ), array ( "name" => "EDG ALICE", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=alice,dc=eu-datagrid,dc=org" ), array ( "name" => "EDG ATLAS", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=atlas,dc=eu-datagrid,dc=org" ), array ( "name" => "LCG ATLAS", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=atlas,dc=eu-datagrid,dc=org", "group" => "ou=lcg1" ), array ( "name" => "EDG CMS", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=cms,dc=eu-datagrid,dc=org" ), array ( "name" => "EDG LHC-B", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=lhcb,dc=eu-datagrid,dc=org" ), array ( "name" => "EDG D0", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=dzero,dc=eu-datagrid,dc=org", "group" => "ou=testbed1" ), array ( "name" => "EDG Earth Observation", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=earthob,dc=eu-datagrid,dc=org" ), array ( "name" => "EDG Genomics", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=biomedical,dc=eu-datagrid,dc=org", "group" => "ou=genomics" ), array ( "name" => "EDG Medical Imaging", "server" => "grid-vo.nikhef.nl", "port" => "389", "dn" => "o=biomedical,dc=eu-datagrid,dc=org", "group" => "ou=medical imaging" ), array ( "name" => "EDG ITeam", "server" => "marianne.in2p3.fr", "port" => "389", "dn" => "o=testbed,dc=eu-datagrid,dc=org", "group" => "ou=ITeam" ), array ( "name" => "EDG TSTG", "server" => "marianne.in2p3.fr", "port" => "389", "dn" => "o=testbed,dc=eu-datagrid,dc=org", "group" => "ou=TSTG" ), array ( "name" => "EDG Tutorials", "server" => "marianne.in2p3.fr", "port" => "389", "dn" => "o=testbed,dc=eu-datagrid,dc=org", "group" => "ou=EDGtutorial" ), array ( "name" => "EDG WP6", "server" => "marianne.in2p3.fr", "port" => "389", "dn" => "o=testbed,dc=eu-datagrid,dc=org", "group" => "ou=wp6" ) ); $votable = new LmTableSp($module,$toppage->$module); $rowcont = array (); foreach ( $vos as $contact ) { $server = $contact["server"]; $port = $contact["port"]; $dn = $contact["dn"]; $group = ""; if ( !empty($contact["group"]) ) $group = $contact["group"]; $nusers = 0; if ( $dn ) { // open ldap connection $ldapuri = "ldap://".$server.":".$port; $ds = ldap_connect($ldapuri); if ($ds) { if ( $group ) { $newfilter = "(objectclass=*)"; $newdn = $group.",".$dn; $newlim = array("dn","member"); $sr = @ldap_search($ds,$newdn,$newfilter,$newlim,0,0,10,LDAP_DEREF_NEVER); if ($sr) { $groupdesc = @ldap_get_entries($ds,$sr); $nusers = $groupdesc[0]["member"]["count"]; } } else { $sr = @ldap_search($ds,$dn,"(objectclass=organizationalPerson)",array("dn"),0,0,10,LDAP_DEREF_NEVER); if ($sr) $nusers = @ldap_count_entries($ds,$sr); } } $vostring = popup("vo-users.php?host=$server&port=$port&vo=$dn&group=$group",750,300,6,$lang,$debug); } else { $url = $server."/".$group; $users = file($url); $nusers = 0; if ( !empty($users) ) $nusers = count($users); $vostring = popup($url,750,300,6,$lang,$debug); } $rowcont[] = "".$contact["name"].""; $rowcont[] = $nusers; $rowcont[] = $server; $votable->addrow($rowcont); $rowcont = array (); } $votable->close(); $toppage->close(); /* group http://www.nbi.dk/~waananen/ngssc2003.txt ### Datagrid VO Groups and their user mappings */ ?>nordugrid-arc-7.1.1/src/services/monitor/PaxHeaders/clusdes.php0000644000000000000000000000013215067751327021627 xustar0030 mtime=1759498967.775506042 30 atime=1759498967.874493787 30 ctime=1759499030.606532122 nordugrid-arc-7.1.1/src/services/monitor/clusdes.php0000644000175000002070000002336315067751327023540 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; // Header table $toppage->tabletop("","".$toptitle." $host"); //TODO add translation string here echo "

    [Scroll down to queues/shares table]

    "; // Array defining the attributes to be returned $qlim = array( QUE_NAME, QUE_QUED, QUE_GQUE, QUE_PQUE, QUE_LQUE, QUE_RUNG, QUE_GRUN, QUE_ASCP, QUE_MAXT, QUE_MINT, QUE_STAT ); // ldapsearch filter strings for cluster and queues $qfilter = "(objectclass=".OBJ_QUEU.")"; $dn = DN_LOCAL; if ($schema == "GLUE2") { $qlim = array( GQUE_NAME, GQUE_MAPQ, GQUE_STAT, GQUE_RUNG, GQUE_MAXR, GQUE_LQUE, GQUE_LRUN, GQUE_PQUE, GQUE_QUED, GQUE_MAXQ, GQUE_MINT, GQUE_MAXT, GQUE_ENVK ); $elim = array( EENV_ID, EENV_LCPU, EENV_PCPU, EENV_TINS ); // ldapsearch filter strings for Shares and ExecutionEnvironments $qfilter = "(objectclass=".GOBJ_QUEU.")"; $efilter = "(objectclass=".GOBJ_EENV.")"; $dn = DN_GLUE; } if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $tlim = 15; $tout = 20; if( $debug ) dbgmsg("
    :::> ".$errors["101"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); // establish connection to the requested LDAP server $chost = $host; if ( $isse ) $chost=substr(strstr($host,":"),1); $ldapuri = "ldap://".$chost.":".$port; $ds = ldap_connect($ldapuri); if ($ds) { // If contact OK, search for clusters $ts1 = time(); if ( $isse ) { $exclude = array(SEL_USER); if ( $dn == DN_LOCAL ) $thisdn = ldap_nice_dump($strings,$ds,SEL_NAME."=".$host.",".$dn,$exclude); /** * Storage not supported in GLUE2 * if ( $dn == DN_GLUE ) { * $querydn = SEL_NAME."=".$host.":arex,GLUE2GroupID=services,".DN_GLUE;//TODO: change SEL_NAME * $thisdn = ldap_nice_dump($strings,$ds,$querydn,$exclude); * } */ // if it is a cluster } else { if ( $dn == DN_LOCAL ) $thisdn = ldap_nice_dump($strings,$ds,CLU_NAME."=".$host.",".$dn); if ( $dn == DN_GLUE ) { $querydn = "GLUE2ServiceID=urn:ogf:ComputingService:".$host.":arex,GLUE2GroupID=services,".DN_GLUE; $thisdn = ldap_nice_dump($strings,$ds,$querydn); } } $ts2 = time(); if($debug) dbgmsg("
    ".$errors["109"]." (".($ts2-$ts1).$errors["104"].")
    "); if ( strlen($thisdn) < 4 && $debug ) dbgmsg("
    ".$errors["129"].$thisdn."

    "); echo "
    "; // Loop on queues/shares (if everything works) if ($thisdn != 1 && !$isse) { $ts1 = time(); $qsr = @ldap_search($ds,$dn,$qfilter,$qlim,0,0,$tlim,LDAP_DEREF_NEVER); // Only for GLUE2, search ExecutionEnvironments if ( $dn == DN_GLUE ) $esr = @ldap_search($ds,$dn,$efilter,$elim,0,0,$tlim,LDAP_DEREF_NEVER); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["110"]." (".($ts2-$ts1).$errors["104"].")
    "); // Fall back to conventional LDAP // if (!$qsr) $qsr = @ldap_search($ds,$dn,$qfilter,$qlim,0,0,$tlim,LDAP_DEREF_NEVER); } // only for GLUE2, store executionenvironments in $envs for later use if (($dn == DN_GLUE ) && ($esr) ) { $nematch = @ldap_count_entries($ds,$esr); if ($nematch > 0) { $envs = array(); // TODO: If there are valid entries, save them in an array for later use, reorder with ID as primary key $eentries = @ldap_get_entries($ds,$esr); $nenvs = $eentries["count"]; for ($k=0; $k<$nenvs+1; $k++) { $envs[$eentries[$k][EENV_ID][0]] = array( EENV_LCPU => $eentries[$k][EENV_LCPU][0], EENV_PCPU => $eentries[$k][EENV_PCPU][0], EENV_TINS => $eentries[$k][EENV_TINS][0] ); } } else { // TODO: add error strings to errors file, for translation //if($debug) dbgmsg("
    ".$errors["TODO-ERR1"]."
    "); if($debug) dbgmsg("
    No ExecutionEnvironments found
    "); } } if ($qsr) { // If search returned, check that there are valid entries $nqmatch = @ldap_count_entries($ds,$qsr); if ($nqmatch > 0) { // If there are valid entries, tabulate results $qentries = @ldap_get_entries($ds,$qsr); $nqueues = $qentries["count"]; // HTML table initialisation echo ""; $qtable = new LmTableSp($module,$toppage->$module,$schema); // loop on the rest of attributes // some sorting, diversified depending on schema if ($dn == DN_LOCAL) { define("CMPKEY",QUE_MAXT); usort($qentries,"quetcmp"); } elseif ($dn == DN_GLUE) { // suprisingly, sorting buy dn did the trick for queues... usort($qentries,"dncmp"); } else { // TODO: add error strings to errors file, for translation //if($debug) dbgmsg("
    ".$errors["TODO-ERR2"]."
    "); if($debug) dbgmsg("
    Sorting of queues/shares failed
    "); } for ($k=1; $k<$nqueues+1; $k++) { if ( $dn == DN_LOCAL ) { $qname = $qentries[$k][QUE_NAME][0]; $qstatus = $qentries[$k][QUE_STAT][0]; // $queued = @$qentries[$k][QUE_QUED][0]; $queued = @($qentries[$k][QUE_QUED][0]) ? ($entries[$k][QUE_QUED][0]) : 0; /* deprecated since 0.5.38 */ $locque = @($qentries[$k][QUE_LQUE][0]) ? ($qentries[$k][QUE_LQUE][0]) : 0; /* new since 0.5.38 */ $run = @($qentries[$k][QUE_RUNG][0]) ? ($qentries[$k][QUE_RUNG][0]) : 0; $cpumin = @($qentries[$k][QUE_MINT][0]) ? $qentries[$k][QUE_MINT][0] : "0"; $cpumax = @($qentries[$k][QUE_MAXT][0]) ? $qentries[$k][QUE_MAXT][0] : ">"; $cpu = @($qentries[$k][QUE_ASCP][0]) ? $qentries[$k][QUE_ASCP][0] : "N/A"; $gridque = @($qentries[$k][QUE_GQUE][0]) ? $qentries[$k][QUE_GQUE][0] : "0"; $gmque = @($qentries[$k][QUE_PQUE][0]) ? ($qentries[$k][QUE_PQUE][0]) : 0; /* new since 0.5.38 */ $gridrun = @($qentries[$k][QUE_GRUN][0]) ? $qentries[$k][QUE_GRUN][0] : "0"; $quewin = popup("quelist.php?host=$host&port=$port&qname=$qname&schema=$schema",750,430,6,$lang,$debug); } if ( $dn == DN_GLUE ) { $qname = $qentries[$k][GQUE_NAME][0]; $mapque = $qentries[$k][GQUE_MAPQ][0]; $qstatus = $qentries[$k][GQUE_STAT][0]; // Queued $queued = @($qentries[$k][GQUE_QUED][0]) ? ($qentries[$k][GQUE_QUED][0]) : 0; $locque = @($qentries[$k][GQUE_LQUE][0]) ? ($qentries[$k][GQUE_LQUE][0]) : 0; $gridque = $queued - $locque; if ( $gridque < 0 ) $gridque = 0; $gmque = @($qentries[$k][GQUE_PQUE][0]) ? ($qentries[$k][GQUE_PQUE][0]) : 0; // Running $run = @($qentries[$k][GQUE_RUNG][0]) ? ($qentries[$k][GQUE_RUNG][0]) : 0; $locrun = @($qentries[$k][GQUE_LRUN][0]) ? ($qentries[$k][GQUE_LRUN][0]) : 0; $gridrun = $run - $locrun; if ( $gridrun < 0 ) $gridrun = 0; // Limits $cpumin = @($qentries[$k][GQUE_MINT][0]) ? $qentries[$k][GQUE_MINT][0] : "0"; $cpumax = @($qentries[$k][GQUE_MAXT][0]) ? $qentries[$k][GQUE_MAXT][0] : ">"; // related execenv $qenvkey = @($qentries[$k][GQUE_ENVK][0]) ? $qentries[$k][GQUE_ENVK][0] : ""; // use ExecutionEnvironment TotalInstances, LogicalCPUs when available to calculate cpus // use mapping between queues and execenvs $env = $envs[$qenvkey]; $cpu = $env[EENV_LCPU] * $env[EENV_TINS]; if (!$cpu) $cpu = "N/A"; // This below TODO $quewin = popup("quelist.php?host=$host&port=$port&qname=$qname&schema=$schema",750,430,6,$lang,$debug); } $gridque = $gridque + $gmque; if ( $queued == 0 ) $queued = $locque + $gridque; // filling the table $qrowcont[] = "$qname"; if ( !empty($mapque) ) { $qrowcont[] = "$mapque"; } $qrowcont[] = "$qstatus"; $qrowcont[] = "$cpumin – $cpumax"; $qrowcont[] = "$cpu"; $qrowcont[] = "$run (".$errors["402"].": $gridrun)"; $qrowcont[] = "$queued (".$errors["402"].": $gridque)"; $qtable->addrow($qrowcont); $qrowcont = array (); } $qtable->close(); } else { $errno = 8; echo "
    ".$errors["8"]."\n"; return $errno; } } elseif ( !$isse ) { $errno = 5; echo "
    ".$errors["5"]."\n"; return $errno; } @ldap_free_result($qsr); @ldap_close($ds); return 0; } else { $errno = 6; echo "
    ".$errors[$errno]."\n"; return $errno; } // Done $toppage->close(); ?> nordugrid-arc-7.1.1/src/services/monitor/PaxHeaders/mon-icons0000644000000000000000000000013115067751426021300 xustar0030 mtime=1759499030.758449313 29 atime=1759499034.76351017 30 ctime=1759499030.758449313 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/0000755000175000002070000000000015067751426023260 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Russia.png0000644000000000000000000000013215067751327023332 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.720391336 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Russia.png0000644000175000002070000000040015067751327025226 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<*PLTEÂ$¾'3S 6^©\D…lƒº3S¡6^ª\D†lƒ»Ã$À'¿'ÿÿÿv¯Ëý\IDATxÚbàEÄ€.@ÄÀÊʉXYˆ‰‰ 0133;`f $ÀÂ@ \È€ €xx¸‘@102ò F€_åCFY9IEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327023412 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.679770579 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Makefile.am0000644000175000002070000000022715067751327025315 0ustar00mockbuildmock00000000000000monitoriconsdir = @monitor_prefix@/mon-icons monitoricons_DATA = $(srcdir)/*.png $(srcdir)/*.php $(srcdir)/*.gif EXTRA_DIST = $(monitoricons_DATA) nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Finland.png0000644000000000000000000000013215067751327023437 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.695888766 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Finland.png0000644000175000002070000000050215067751327025336 0ustar00mockbuildmock00000000000000‰PNG  IHDR 2ÜIËbKGDÿÿÿ ½§“ pHYs  ÒÝ~ütIMEÓÜÆ?üÏIDATxÚ•;rÃ0 DI„òg«Ðýïæ"©â(¡(R ÐEv¶}³²/³+ˆ>Ñæ{Âje« q‡:ôäíŠo™æ,ȱ†#å Š…1u2ý8höxÒqƒ™í‚ûÐÐO@›³à:¦œ§è1*J1Ø÷¼;‹¼Eüœ½Oðñ;ð®ýªÅKZÑFøóg•¤_i«)=Ñ^ nc%¸esÆ9Øåņ¸+Ád á§U6ÎU/¬xñ’ C< ~ÐÙ48Qpø§þ ðp#F{2IEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Slovakia.png0000644000000000000000000000013115067751327023634 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 29 ctime=1759499030.72177935 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Slovakia.png0000644000175000002070000000050215067751327025534 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<`PLTElƒºÞ“î¿ÉGd¨òÏÖæŸ®ÆBÊ/OSn®ê¯»x¿ÒOj[C„•Ap¹`¹;_;Z£ÿÿÿ¾'???FÎ?]-Q,Cz_ŽÎÕè¬ÑÖ_xòôù_x´‘ fâhIDATxÚbDÄ€.@`NV&V¨@17+/ƒ°3rró @@˜ùEøØ`ÄÀÌòFkt옖”\ ÒvLŠx­†Ÿ=J,¸ÎþM»Œ4Kø)~IEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Makefile.in0000644000000000000000000000013115067751357023425 xustar0030 mtime=1759498991.234153404 29 atime=1759499019.45527756 30 ctime=1759499030.680448127 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Makefile.in0000644000175000002070000005120515067751357025333 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/monitor/mon-icons ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(monitoriconsdir)" DATA = $(monitoricons_DATA) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ monitoriconsdir = @monitor_prefix@/mon-icons monitoricons_DATA = $(srcdir)/*.png $(srcdir)/*.php $(srcdir)/*.gif EXTRA_DIST = $(monitoricons_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/monitor/mon-icons/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/monitor/mon-icons/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-monitoriconsDATA: $(monitoricons_DATA) @$(NORMAL_INSTALL) @list='$(monitoricons_DATA)'; test -n "$(monitoriconsdir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(monitoriconsdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(monitoriconsdir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(monitoriconsdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(monitoriconsdir)" || exit $$?; \ done uninstall-monitoriconsDATA: @$(NORMAL_UNINSTALL) @list='$(monitoricons_DATA)'; test -n "$(monitoriconsdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(monitoriconsdir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(monitoriconsdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-monitoriconsDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-monitoriconsDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-monitoriconsDATA install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags-am uninstall uninstall-am \ uninstall-monitoriconsDATA .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Spain.png0000644000000000000000000000013215067751327023136 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.724549252 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Spain.png0000644000175000002070000000030615067751327025037 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<PLTEä²ùú½'¥VXæÜÃj ßÛ2½…sòæÏ4IDATxÚ„Î1 ÄÀœ€þÿÇR)#…)· ã‰-IŽë”àaE[T˜óBëû±Læçü©ØIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Slovenia.png0000644000000000000000000000013215067751327023644 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.723203815 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Slovenia.png0000644000175000002070000000047115067751327025550 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<`PLTElƒºÎÕèµÁÜæêó„—Å_x´KN“l8tÉÅÚ­¡ÁtXÿÿÿ;Z£¾'Fž@S÷ô -Q_¿¿¿Gd¨òôùSn®|-e¬ÑÚàíµ.[C„CT›¢ËÁËâün_IDATxÚbàFÄÀÍÍÄÊÀ   ,'B €X¹˜¹à €xx¸eyà €€<œ,>@1°³³K°#€bàE„!@ÆzŠÃ%qIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Portugal.png0000644000000000000000000000013215067751327023661 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.717860919 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Portugal.png0000644000175000002070000000031415067751327025561 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<PLTE+¢L¾' EÜ´»I9²o=Â5N…Eª|Æ:IDATxÚdŽA ¦üÿÍb¦N{l ` ml‰–¤€Š A]¢ÃxY*¨•ÈèYùfϱ)Àzvu7%Â8IEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Denmark.png0000644000000000000000000000013215067751327023445 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.693569417 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Denmark.png0000644000175000002070000000047115067751327025351 0ustar00mockbuildmock00000000000000‰PNG  IHDR 2ÜIËbKGDÿÿÿ ½§“ pHYs  ÒÝ~ütIMEÓ €mçÆIDATxÚuŽÁnÂ0çùÙ„„¶ê¡âÿET"…P'¶·?`VsÜÑ®­,¤H6†ç¼úx÷ôÙÆžôb+¸±‰Ût8^îÕñ÷=5c©/(’ ƒ{«»P9ÙÌœT»}b6nÓáki—¸~ø%~ö7ÖP™õŠ}oU­H:kVQíWÝ N"¸øÍÄiˆoìú ×€Òð„ÓéüÐõ4"³jt¡2*ðçlîš•›d¶hôa ÈS™mjªö²ÝàÅÑ×ùkÇ•¾Lf–IEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Latvia.png0000644000000000000000000000013215067751327023304 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.710057712 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Latvia.png0000644000175000002070000000025515067751327025210 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe< PLTEÿÿÿ™33™3f3'Û9'IDATxÚb`dBŒ Lh€‰01`h! 0mA7ÃZt-\N¦…ÎIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/USA.png0000644000000000000000000000013215067751327022514 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.734322501 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/USA.png0000644000175000002070000000027515067751327024422 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<PLTEÿÿÿÌ3ff™3f™ÿÿÿ–ÛutRNSÿÿÿÿû¶S#IDATxÚb`F8``fA C`DD¨`AB.Ó$s±IEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/icon-close.png0000644000000000000000000000013215067751327024117 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.738628251 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/icon-close.png0000644000175000002070000000051415067751327026021 0ustar00mockbuildmock00000000000000‰PNG  IHDRשÍÊgAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<PLTEÀÀÀ```   ÿÿÿ´Â®¸tRNSÿÿÿÿû¶S²IDATxÚb`Áˆ—@á”  y( € #3Lœ‰™LDœ&Ãd‚e$ÁÀÌ “a3A‚Ä€à2!1XXˆIIœ €XdÄYˆMæ<€b`A•; €pJN£§å„Ó¹„Óƒ„3Hg Î` œ@8£ €pJN €^/ýx%’PIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Georgia.png0000644000000000000000000000013215067751327023441 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.698085998 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Georgia.png0000644000175000002070000000030415067751327025340 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<PLTEÿÿÿÿÌÿÿÌÌÌ3fÌ3û®cŽ;IDATxÚŒŽ10ùÿ›[jl:ô&%^($¢aáÀÃ3@ìð(á-5HèÂwl…%}|Å–\u¢¹œIIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Poland.png0000644000000000000000000000013015067751327023277 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 28 ctime=1759499030.7165706 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Poland.png0000644000175000002070000000022315067751327025200 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<PLTEÿÿÿ¾'?µIDATxÚb` `DÔ0 òQ>ìYÞIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Netherlands.png0000644000000000000000000000013115067751327024332 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 29 ctime=1759499030.71388183 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Netherlands.png0000644000175000002070000000025015067751327026232 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<PLTEÿÿÿÿÌÌÌÌÿÌ33f™W3–‘IDATxÚb`F „ÑaÀ„XÐa€sjÞ“¼ŠIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Germany.png0000644000000000000000000000013215067751327023466 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.699240945 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Germany.png0000644000175000002070000000036215067751327025371 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<'PLTE¼$É)ÎC;ïÔfòÞiË)½$óßiñÖfÏD;ðÕfu@ÒüXQIDATxÚbàAÄ€.@ÄÀˆ‘‘02` &NÀ@ ,ì(€ €¸8P@10s¡f€“…}ó§ŠIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Romania.png0000644000000000000000000000013215067751327023452 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.719074959 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Romania.png0000644000175000002070000000024115067751327025351 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<PLTEÿÿÿ™ÌÌ3Ì33f™ pIDATxÚb`& `d†Á*`€}•eB$IEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Morocco.png0000644000000000000000000000013215067751327023465 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.712518328 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Morocco.png0000644000175000002070000000031415067751327025365 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<PLTEcM3¾'Â'”",¡*Ã6¼ /Ç#EØ /:IDATxÚ\ÎA0DÑé`ÜÿƵhRüå‹©h)g¨ÖBÕ'h€s€Û€"®¥ÿì~ì 0‚Z‰úèúIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/icon_led.php0000644000000000000000000000013215067751327023643 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.754788499 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/icon_led.php0000644000175000002070000000073015067751327025545 0ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Switzerland.png0000644000000000000000000000013215067751327024372 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.728458736 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Switzerland.png0000644000175000002070000000026015067751327026272 0ustar00mockbuildmock00000000000000‰PNG  IHDR ½¾ÞœbKGDÿÿÿ ½§“ pHYs  ÒÝ~ütIMEÓøÿ”W=IDATxÚcd@ÿþ3al& ~þÿ‡`² °à´Ÿ‘‘^.€Ù„Çf»Ý%´Œç o{=IEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Algeria.png0000644000000000000000000000013115067751327023427 xustar0030 mtime=1759498967.780074486 30 atime=1759498967.878493848 29 ctime=1759499030.68215232 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Algeria.png0000644000175000002070000000030315067751327025326 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<PLTE(ŸJþÿþ¬3Aß «rZ?9±`óêêýðó J«„1IDATxÚb`eVF(``À!ÀÂÌŽ"ÀĆ"ÀÄÂÊÌĆOC ¦¡HÖ¢; ÀDϵ0²ñIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/icon_back.php0000644000000000000000000000013215067751327023777 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.751664034 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/icon_back.php0000644000175000002070000000062115067751327025700 0ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Ireland.png0000644000000000000000000000013215067751327023442 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.705273093 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Ireland.png0000644000175000002070000000026615067751327025350 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<PLTEÿÿÿÿÿÌÿÌfÌÿÌÌÌf3™f3™3ôÚ±'IDATxÚb`f `dV6 €°€º]€Ö¢9 À¦ ëÛSIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Chile.png0000644000000000000000000000013215067751327023110 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.690123499 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Chile.png0000644000175000002070000000047515067751327025020 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA± üa cHRMz%€ƒùÿ€éu0ê`:˜o’_ÅF PLTE;Z£ÿÿÿ¾'ÍòbKGDÿ-Þ pHYsgŸÒRtIMEä ±EëàIDAT×c`F`À"$¨@`BÔ7Úëõ!ñ%tEXtdate:create2020-02-24T08:31:03+00:00eîxŒ%tEXtdate:modify2020-02-24T08:31:03+00:00³À0IEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/spacer.gif0000644000000000000000000000013215067751327023322 xustar0030 mtime=1759498967.781492375 30 atime=1759498967.878493848 30 ctime=1759499030.759090183 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/spacer.gif0000644000175000002070000000005315067751327025222 0ustar00mockbuildmock00000000000000GIF89a€!ù,D;nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Sweden.png0000644000000000000000000000013215067751327023311 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.727172285 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Sweden.png0000644000175000002070000000047115067751327025215 0ustar00mockbuildmock00000000000000‰PNG  IHDR 2ÜIËbKGDÿÿÿ ½§“ pHYs  ÒÝ~ütIMEÓˆÆIDATxÚ•Í[JAEÑ}ë;-ç?<‘‰Ñ®t½®¨û÷°2.‡v‰ð©’0‹< fËl—yõåV„P{ÒöìÁï¾ñ;¦ h¡ÓÏ#ÞÔZ’È7® Ë\ˆ „8†³Çµ¯Ñ>‚Ej då^cS>Úª´p×mì«*Ó\Âh?¬KÑ>¨ðµ,ï‡ôLsá…ƒBñ¾ÞukzÚØÏ(¦Ns³‡Æ(¨¬¹KËÓOºRÞæ‚…ŽWèªh ˜Š ˜gøçþÎÈ‚ªï[þôIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/icon-folks.png0000644000000000000000000000013215067751327024130 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.741321678 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/icon-folks.png0000644000175000002070000000076315067751327026040 0ustar00mockbuildmock00000000000000‰PNG  IHDRשÍÊgAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<ZPLTEžýæ˜üÙdûÏ=þì²Ù¥¢°=²JDÿùæìÓÑ«;5¿idÕ„ ýß~âœ%ýé¥ùñðžéµ\Åxsª1 ¾bJ¤% ß´²ôÀ-æÃÁþò̘ûÌ0ÿÿÿ¿¬~×tRNSÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿì^õIDATxÚbň—@á” œ—”D• ¨„8ƒ´4+²@A$8¥ÁY@A$¸!¢¼LLüP €‚H°ƒÅE$d€€ "@È:ødÀ€,@ I°D\†,@PW±‚\ÅŒ,@ III.ii.!ˆX €€ÂÒ0Àf†X! @ ²¬PQ1666 ¯””H € N„: @º&Á#ƒ¤dˆA–SPÅ. XèbHLB pÈΈ œ„S €pJ˜<ÉêoSIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/World.png0000644000000000000000000000013215067751327023153 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.737028988 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/World.png0000644000175000002070000000021315067751327025051 0ustar00mockbuildmock00000000000000‰PNG  IHDR 2ÜIËbKGDÿÿÿ ½§“ pHYs  ÒÝ~ütIMEÓI,6IDATxÚcž÷™ÀÄ@"Õ0844û¸diy?IEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Bulgaria.png0000644000000000000000000000013215067751327023612 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.687904019 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Bulgaria.png0000644000175000002070000000025515067751327025516 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<PLTEÿÿÿÌ3fÌf3™33f3™3x*|×!IDATxÚb` 0¡V4ÀÀŒXÐ# ,`M²m6/YIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Czechia.png0000644000000000000000000000013215067751327023432 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.692462521 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Czechia.png0000644000175000002070000000025715067751327025340 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe< PLTEÿÿÿÌ33f™f¼œÕ,IDATxÚdÈ1 ƒÀÂÿÝAÙ¸q*ý H‹l¹Àkê@$ä³ %a_›IEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/icon-run.png0000644000000000000000000000013215067751327023616 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.748872615 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/icon-run.png0000644000175000002070000000113615067751327025521 0ustar00mockbuildmock00000000000000‰PNG  IHDRשÍÊgAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<`PLTEž½Už«;5¿idè¨(˜ùñðôÀ-ª1 ÉmÜ"Ò–“¶IìÓÑ¥,%Ù¥¢æÃÁòâá²JDî´*ÅxsÐżƒ¸ZT¤% °=Ãaß´²âœ%ûÌ0ÿÿÿ ø < tRNSÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ\\íXIDATxÚbLj—@á” œ“`fcå”ec T‚Ÿ‘ƒ•›‹—“™M„›$@ .^)i99N6V1f@%ØÙ¸Y™X„™Dä䏸ù@b–b’fg‘d•““cš#(À#/@P£Ä%8q‹ÉI2³qòròÈÌUBL@qFY99Y)y€‚Ið0ÊÉI3 ±ðr1r²1³°ËL‚™h1³<»( Ð/ @ 0“€ËI‰"|@P qF„˜0B € L §Š1"$ˆ!ÎÅÀ‡ˆ’`abåæcäG—  ÐËblŒ¼Ìì(Á@ ò<œŒ ,¨âòÄt#³fDƒ<Ÿ°;– œQ @8% _‘8S%ZŽÏIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/icon-refresh.png0000644000000000000000000000013215067751327024450 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.747340768 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/icon-refresh.png0000644000175000002070000000070615067751327026355 0ustar00mockbuildmock00000000000000‰PNG  IHDRשÍÊgAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<-PLTEððð   ÀÀÀppp```PPP€€€@@@000 àààÿÿÿ4‡qstRNSÿÿÿÿÿÿÿÿÿÿÿÿÿÿÔܘ¡IDATxÚbàÈ—@!Ið01ñ x“àaæænf˜@A%Xxá€"@ V°˜b L=7#ˆÅ4 ,@  8'ÔLNV>& €@|f z$×1€5H¨Éì¼\@Q€b€˜„$ÎÃÅËËÎÇ@ |l`§0Á%À|>€bk€º˜ N ™ áÈ.™pÀt# °¯PËËË ¤,ÄÄÇŒð Ô`€b€¹$` œ@8ƒ €pF@áŒZ€Â™§@€×A Qk%IEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Armenia.png0000644000000000000000000000013215067751327023440 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.683321716 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Armenia.png0000644000175000002070000000025715067751327025346 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<PLTEÿ™fÌ™fÌ3ff™f3™3f™'#IDATxÚb`B „XÐ+À`F € `”â‘ »J§IEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Italy.png0000644000000000000000000000013215067751327023146 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.707757327 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Italy.png0000644000175000002070000000024515067751327025051 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<PLTEÿÿÿÿ™ÌÌ3™ÌÌ3™3™3ØÀ¹IDATxÚb`Vf `d†Á*`w[_|pдIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Israel.png0000644000000000000000000000013215067751327023303 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.706522364 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Israel.png0000644000175000002070000000027415067751327025210 0ustar00mockbuildmock00000000000000‰PNG  IHDR 2ÜIËtEXtSoftwareAdobe ImageReadyqÉe<^IDATxÚbÜö-)€‘ÁbIXŒ„H³áÿÿÿ˜¢νàe5På%JÄ•xX>|ù$¼¥Ñd™0ͪƒ“$8 HbõKãÜ»¸üwðü{*+#©`2À! æ0IEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Hungary.png0000644000000000000000000000013215067751327023501 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.703005587 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Hungary.png0000644000175000002070000000025015067751327025400 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<PLTEÿÿÿÿÌÌÌÿÌÌ33™3`ÝÊDIDATxÚb`F „ÑaÀ„XÐa€sjÞ“¼ŠIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Greece.png0000644000000000000000000000013215067751327023256 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.700443107 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Greece.png0000644000175000002070000000026315067751327025161 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe< PLTEÿÿÿ™ÌÌÿÿÿTøœ tRNSÿÿ×Ê A!IDATxÚb`dd`D „ðªÀ0ŸvbU0¡€wrtŒ-IEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/icon-help.png0000644000000000000000000000013215067751327023742 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.742840015 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/icon-help.png0000644000175000002070000000066215067751327025650 0ustar00mockbuildmock00000000000000‰PNG  IHDRשÍÊgAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<3PLTEððð@@@€€€ÐÐÐ °°°àààPPP```000ppp   ÀÀÀÿÿÿÿÌtRNSÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ%­™bèIDATxÚbÀˆ—@á” ¸+;###7L € 4 ? pC„Br‚…¹@X €À/ˆÏ b²¤Ab–™Ã1“ ¤†È °3ÔN@hÎeÙÆbª+Øq¬ &@!K00‚„Y †’3Hœ*@H c8Xa<€BH0ÃÜ„àI ôB‚„`c„@á4 €pJN €Âµ„S €pJj!kqË.IEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/HongKong.png0000644000000000000000000000013215067751327023576 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.701679377 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/HongKong.png0000644000175000002070000000067515067751327025510 0ustar00mockbuildmock00000000000000‰PNG  IHDR 2ÜIË„IDATÓÁ¿jQàß9÷_fv²»AC²*Úˆ ¤±´ŸÁ°²µ³ôERˆµ…¥`-q AÓ™-„€ n2³;sçÞ{Žß‡dåX» 0ìÛHÍÛ€µÁ85j- “ p—6 ­ê G‚Œ f:‚²¡P‹ž1f(`©+e$`ˤ˜Œ˜éÛ×ú鸛Þ^ÃG¢nîz@Éd¶–‰6©÷ç͹å´wóòò9/W;»wM=‹ç¿ø*¼]ÇHÙZÍÙTÁ»øð‹ûc+Ž˜ç¸8-/Þðj¥È Ud¨”¢ÙlñûÍÖ'?ÝÕ°÷ä^÷ñksv ô@M µÕµƒ2´q»?þ°{¬'Ëaù­ýñåïâAæ¬h³1çŠ$Ò%çüÁþ óô§×áó÷ðêYƒ DÛA@àÂT”ÌhÕèâ–4e¯*¹ ô•€2·$LDœJ±`2&—\˜…¡ªÁyɉ6œJqÞþÚC·;¤ÊIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Japan.png0000644000000000000000000000013215067751327023115 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.708789585 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Japan.png0000644000175000002070000000067315067751327025025 0ustar00mockbuildmock00000000000000‰PNG  IHDR 2ÜIËbKGDÿÿÿ ½§“ pHYs  ÒÝ~ütIMEÓ' n¶HIDATxÚ-ΡŽSAÇáÿ™¹·mX‚AaX³¢ÄDƒåA1<Y,˜]ƒÁ€€„ÁbPÐlÛ;3眆ï >R¢iQš´‰ªšÙY3­›Në¶‘Ì×Ji–y¶£­îKcc3ÏRÍ¥~û*Û­åF–=š¦ií]ôÑÆ‚……Îõûþì%zàzØv¯øtƒw‘ÑHÄ©9ýHŒÛ/y~AUTQ5Šþ^¾à÷‚tF‘ ^¿A›¬J­SÊ¢®Wo8 EÒ¬é€T¥?{Íx¨ÏjÅH™Í~:ªàéE³¢(<~}îÎÓԫƬ”–'»ÃþgObñÿ%g$\¿Ë§ÏC+L\îÆÇtœ;çHK#ð"IB!Ãöw|¿5¤ícÎîYVŠ iBE2²É )³IàY Sš­‚>¢–²2Ó¤0ªøÃ2¡ ðëMIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/icon_spacer.php0000644000000000000000000000013215067751327024354 xustar0030 mtime=1759498967.781492375 30 atime=1759498967.878493848 30 ctime=1759499030.756281668 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/icon_spacer.php0000644000175000002070000000034415067751327026257 0ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Austria.png0000644000000000000000000000013215067751327023474 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.685581318 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Austria.png0000644000175000002070000000023615067751327025377 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe< PLTEÿÿÿÿÌÌÌ3­ŽXÇIDATxÚb`B „Ña€¡…tk K áCAhIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Lithuania.png0000644000000000000000000000013215067751327024002 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.711308854 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Lithuania.png0000644000175000002070000000037615067751327025712 0ustar00mockbuildmock00000000000000‰PNG  IHDR 2ÜIËbKGDÿÿÿ ½§“ pHYs  ÒÝ~ütIMEÓ éšô£‹IDATxÚ1Â@ g«©iù 5¥å/y5HÈKar9!2Õœ¥[Ë«çƒ$¶KÊY¨a¦3iј& ƒX!ÙJ` ôµa\k€€ÔåzŠö‘™í6{ê’lÛn>¤ëC]ÙýgŠÝÎó}#µ×µÞ“LKEŸ6FêÙ]/¨Rÿba³·ŸÎµªIã…P£dIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Taiwan.png0000644000000000000000000000013215067751327023307 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.729827818 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Taiwan.png0000644000175000002070000000062015067751327025207 0ustar00mockbuildmock00000000000000‰PNG  IHDR 2ÜIËWIDATÓÁ1nQ†Ñï¿ï={‚b#Q Q¥ ƒ††ýP’- ö€”4Hl…¤BQ$ :$Pšñ8ŒgæÝË9zþâs÷·ø”Nϲ…’F[þùÑ(ÉN8s Öùð˺¶=?/o/^þåêf·NÌ¢_d+áš<°Ò8¹ÌyÍÉëWÏ.Þ½Áùöµû½ûéþo)[„‘BH ËCCˆëÛîãåwE½¾Ý·Ô’-Ïžkˆ P@T¢æ§v„»»›ûOï{¢톱±Fx„‚PîÉ]ýÙ“îp¿Èø°º\î#)´Å”Œ˜„;^J^­óŠU¡e¬¦Ná6r@ã ɉ @bˆž©Ç…¶£·y1áÔY)=žëÞ’ÀˆÙ2y]‘1•OÂk—-îÕ”äVÐtte¶kSM„ª#„“ ZÁ,¡Š³YÿèY¹êVùÉwIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Norway.png0000644000000000000000000000013215067751327023343 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.715190926 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Norway.png0000644000175000002070000000054415067751327025250 0ustar00mockbuildmock00000000000000‰PNG  IHDR 2ÜIËbKGDÿÿÿ ½§“ pHYs  ÒÝ~ütIMEÓ3~¡NŽñIDATxÚ•Î?JA†ñg¾Élt‰ ÑÖÖBì<€·ðÞÀ4‚…`eá <lõ¢  Ùà&›ÌÎ÷yi|øõï‹:ŒAÝ#Þ=@B¬‡÷æQò:Î÷ÚvU6¢ÕR¨6Ùêê7ç<¹¤êþFIc¯lÌú \PMÁ,e9{y#£ÉþÑÞé1K]Œ¾ÂÇ…Uv’5Ìí—-Ô¶Ý™âbAÊ"0'$X”bÐ P¤…”ÕYß<¥õØï¾ÛÉÅÎù™S7yþ”×GUÍ_š{LˆÐ^]1hëÛËäQ—'½äA|¡W6‡N=F–ðÏþ•œ*Tö²ÿIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Canada.png0000644000000000000000000000013215067751327023233 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.688448249 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Canada.png0000644000175000002070000000075115067751327025140 0ustar00mockbuildmock00000000000000‰PNG  IHDR 2ÜIËbKGDÿÿÿ ½§“ pHYs  ÒÝ~ütIMEÓ%ŠgvIDATxÚÁÁŠAÐûªª“i˜0 ˆàB7â*£?àWù þ‰+7âÖ•.Dc&Ý©TW½z×s@‘ìлóòíûUú;nâfÜG#û¾gäŸØŸÏè`€Á#yʼ.–ì÷4µúö%ß¼ÎL™7©t] Ã €¡‰ý,.Ïx}`¤þþÉå­ýéŠ_¿0‘…yÕUÀ†i0‚+U[ʦ¼ûØ|þ€ü¯ Ä«ùü../šÆa¤š‡ÒeLx³ãî/ÖE}S¼ºÀûkÛ]ñöÊÐ*Ü6ÀUƒqlœÖ2š[ä{w¨«ÕLqùPÛÄ  TLÀűfrøÁGO´#¼®Ÿ2nã0²;S †iØJ]Ð;úÔN›ØÛñÜ=6{ÿ ÚãÁãÔ¶m!AÐ"”¨0 ÂñôD¯÷$YÍâ Ûm¥‘äØ-‹÷($8€FZ))%?q"ªBj)¦U£Ï™fAD(ÿwéyÜã×IEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Ukraine.png0000644000000000000000000000013215067751327023462 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.735571422 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Ukraine.png0000644000175000002070000000024115067751327025361 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe< PLTEÿÿ3ÿÿ™Ìÿ™ÌÌ:Ý7ÉIDATxÚb`B d0£ ÀˆÈ0^²ñöqlIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Belgium.png0000644000000000000000000000013215067751327023450 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.686761778 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Belgium.png0000644000175000002070000000025415067751327025353 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<PLTEÿÿ3ÿÿÌ™Ì3™™D¾ IDATxÚb`F&f`€0Á`@8 À§ï'ú÷ÞIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/icon-print.png0000644000000000000000000000013215067751327024146 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.745893877 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/icon-print.png0000644000175000002070000000053615067751327026054 0ustar00mockbuildmock00000000000000‰PNG  IHDRשÍÊgAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<PLTE€€€   àààpppÐÐÐÀÀÀ@@@ÿÿÿé" > tRNSÿÿÿÿÿÿÿÿSOx´IDATxÚbàÀˆ—@á” ˜;@§@!$Pi€B’`d``EHN €B’`fccDH’ ++B €pZ@8%ˆî$ÿ°„S € ÚÙÙYÁÈfKB$ €‚I°ÁT €] €Â)@ (A €Â)@8£ €pJN €Ò á€#µlIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/icon_bar.php0000644000000000000000000000013115067751327023642 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 29 ctime=1759499030.75324856 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/icon_bar.php0000644000175000002070000000241415067751327025546 0ustar00mockbuildmock00000000000000 9) {$x3 = $x1-16;} if (strlen($text) > 5) $x3 = 84; if (strlen($text) > 10) $x3 = 36; $im = @imagecreate(200,$y); $bgcolor = imagecolorallocate($im,204,204,204); $red = imagecolorallocate($im,97,144,0); $grey = imagecolorallocate($im,176,176,176); // $white = imagecolorallocate($im,255,255,255); $white = imagecolorallocate($im,48,48,48); if ( $x1 < $x3 ) $white = imagecolorallocate($im,82,82,82); if ( $x1 ) imagefilledrectangle($im,0,0,$x1,$y,$grey); if ( $xg1 ) imagefilledrectangle($im,0,0,$xg1,$y,$red); imagestring ($im, 3, $x3, 0, $text, $white); imagepng ($im); ImageDestroy($im); } $x = $_GET["x"]; $xg = $_GET["xg"]; $y = $_GET["y"]; $text = $_GET["text"]; do_bar($x,$xg,$y,$text); ?>nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/icon_start.php0000644000000000000000000000013215067751327024234 xustar0030 mtime=1759498967.781492375 30 atime=1759498967.878493848 30 ctime=1759499030.757526803 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/icon_start.php0000644000175000002070000000071615067751327026142 0ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/UK.png0000644000000000000000000000013115067751327022402 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 29 ctime=1759499030.73288751 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/UK.png0000644000175000002070000000036415067751327024310 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<PLTEÿÌÌÿ™™ÌÌÌÌffÌ3fÌ33Ì3™™Ìf™Ì3f™;Á“\IDATxÚ<Ž À0Bk­÷¿ðÞ²¥&"Š?…|fÎZ v†Þ Ètk&Sn Èž‘±V „«öÞ¯ÿXú’kÅx›nF6|ÆÊ~—“¥Ç‘ÿÙTÈ÷1šã`úŠôœ?ž IEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Australia.png0000644000000000000000000000013215067751327024011 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.684481868 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Australia.png0000644000175000002070000000054215067751327025714 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<`PLTElƒº¬ÑGd¨Sn®_x´ÚàíÆBx¿Úo…â ·Ÿ¤ê¯»Ê/O”Œ¶¦_n;Z£,CzòôùÎÕè„—ÅæêóµÁÜÎ?]ößäÖ_xúïñÂ4Þ“î¿É柮©¶ÖгîˆIDATxÚb‘gáãc’áb⇀b`à”唓g²ÙE€@1ððð°± ›‰‰™™ €ع9%9d9 êÙùˆ‹—‰™‹—Ÿ¨h@1@Íbâgb H@D„$ÀÊ a³€8¶Žæ @ üh À¶%ªóƒIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/China.png0000644000000000000000000000013215067751327023106 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.691326699 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/China.png0000644000175000002070000000043715067751327025014 0ustar00mockbuildmock00000000000000‰PNG  IHDR ½¾ÞœbKGDÿÿÿ ½§“ pHYs  šœtIMEÖ .濬IDAT(ÏÅÏ-Â@„áw7-¦=YA QׄSÎÄGè!¨ AÔWB@ p 4”ì‡Àaؤ‚ÇÏdFåD Z‚<ISÌJ°ü¤rIoX¤†ÛúÀ}ë£Û‚­.4AjèÏ7©[)’,v*P9‘t5áÄp]©öhðBËë¢Ïyyú„,Œ#Ç _’,Fw„bZº_ MCÿ/xñ14aú„ÙIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/icon-disk.png0000644000000000000000000000013215067751327023744 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.740084034 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/icon-disk.png0000644000175000002070000000070115067751327025644 0ustar00mockbuildmock00000000000000‰PNG  IHDRשÍÊgAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<NPLTEÉmžª1 Õ„ ôÀ-°=âœ%ÏxÃaÒ–“ž¶IòâáæÃÁè¨(½U²JDÙ¥¢¿id¥,%ùñð¸ZT˜̇ƒûÌ0ÿÿÿ·çtRNSÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ"ÚÓIDATxÚbĈ—@á” œ„S €pJTB„STX „E9E "‘àadeæ“>fVF°@A$Ĺ%à€[,@P 1&v6 6v&1ˆ@A%˜ùY¹ÄĸXù™ “@• œ„S €pJT‚!Á‘ ˆ/#¹Œ¼`!€‚…• (H„¡„3§@á” œ„S €pJh3MfŸ}IEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Turkey.png0000644000000000000000000000013215067751327023347 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.731375329 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Turkey.png0000644000175000002070000000035215067751327025251 0ustar00mockbuildmock00000000000000‰PNG  IHDR 2ÜIË pHYs  šœœIDATxœcÜÇ Î@ `"I5 2‡ÇPS4Ðõ׋7¯×ïþõü5Düõ6Íx»õÀÓiËþÿúMØIòÕzæ~:y‰á÷ÛB®VBî6ø40±²üÿóI†‰‘ ‹™ãD ¬?)Tg¾Ûqø÷›÷,¼_¯ßý~û!> _.Üøvý®dR0—ºÂ¯'/¿ý€ÕŒ4’<´NjwIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/icon-look.png0000644000000000000000000000013215067751327023756 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.744345407 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/icon-look.png0000644000175000002070000000073015067751327025660 0ustar00mockbuildmock00000000000000‰PNG  IHDRשÍÊgAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<BPLTEß´²ôÀ-¸ZTÅxs²JDÉmìÓÑ̇ƒæÃÁùñðÃa¥,%î´*°=¤% Ò–“½Uª1 ¿idûÌ0˜ÿÿÿ°˜Ý”tRNSÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÒÀäúIDATxÚbň—@¡I°±³³³Y„"Á)$Lœ@6@¡H0‰ðóp‰0Ù„,Á!Â% \"¢¢„,Á.–`a d >ad d fd„,Á  –`ŠŠ«¸EX…YE€&‰²„·ˆ¯ˆ È„``e`fa†B9Ø_p@ q~d €b@ˆ ¡'@1à â¢Ä€C\ €€lØÄE"). @ £8°E<@áL „S ÀËô+EØi IEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Iceland.png0000644000000000000000000000013115067751327023422 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 29 ctime=1759499030.70424166 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Iceland.png0000644000175000002070000000021115067751327025317 0ustar00mockbuildmock00000000000000‰PNG  IHDR ½¾ÞœPIDAT(‘c´ŽZüŸ YðŸQƒÁñÿ ›è% ø^Y"Å0þÿÿÿ?º ²°AÃRÀÀ{=šŽ,Aáþhå a [sáIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/Estonia.png0000644000000000000000000000013215067751327023466 xustar0030 mtime=1759498967.780181094 30 atime=1759498967.878493848 30 ctime=1759499030.694734587 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/Estonia.png0000644000175000002070000000034415067751327025371 0ustar00mockbuildmock00000000000000‰PNG  IHDR Š`.®gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<PLTE=]¨;[¤;Z£;[¥;Z¤=]©@@@*ÿÿÿøÉ‹LIDATxÚb`bbALLÄÀÈÈŒˆ 00; 4@ÄÀ†ˆ †@a3¥WhÒIEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/mon-icons/PaxHeaders/icon-vo.png0000644000000000000000000000013215067751327023436 xustar0030 mtime=1759498967.780492359 30 atime=1759498967.878493848 30 ctime=1759499030.750152329 nordugrid-arc-7.1.1/src/services/monitor/mon-icons/icon-vo.png0000644000175000002070000000105515067751327025341 0ustar00mockbuildmock00000000000000‰PNG  IHDRשÍÊgAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<]PLTEª1 ôÀ-ž¤% ²JDùñð¥,%̇ƒ˜òâáÜ"žÉmÒ–“ÃaÏxî´*Ù¥¢¸ZT«;5Õ„ ß´²¶I½U¿idìÓÑâœ%æÃÁ°=ûÌ0ÿÿÿè¶4tRNSÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÍv+IDATxÚbÈ—@!$8¥€Î „ ƒŒ ³œ @p AfYYYF&i €`¬l\@ Y˜@Á$ØedÁ€Y‚¤ €`¢ÌŒ q>q6 @Áí`áIˆ‰ƒHv99€‚KHrpII‰0õIqsʹl çŠAlbà• tçJÄE€–ܹÜ`!°´¨œ@$xYXX„aÎÈò @%8AÖò3A bsƒ €€B|`'"“@烜4’Z›S €!Œd“ @1ÀÜÃφpˆ @@;xŽ`ÔÏ BÿƒÙÄ€©p6@áL „S À3B7;ê\Y¹IEND®B`‚nordugrid-arc-7.1.1/src/services/monitor/PaxHeaders/discover.php0000644000000000000000000000013215067751327022003 xustar0030 mtime=1759498967.775506042 30 atime=1759498967.874493787 30 ctime=1759499030.607742383 nordugrid-arc-7.1.1/src/services/monitor/discover.php0000644000175000002070000001146115067751327023710 0ustar00mockbuildmock00000000000000title); $strings = &$toppage->strings; $giislist = &$toppage->giislist; $isattr = &$toppage->isattr; $errors = &$toppage->errors; require_once('attlist.inc'); $itself = $_SERVER["PHP_SELF"]; $schema = @$_GET["schema"]; if (!$schema) $schema = "NG"; $itself = $itself."?schema=".$schema; $ifsub = $_POST["submit"] ? TRUE : FALSE ; $ifsel = $_POST["select"] ? TRUE : FALSE ; echo "
    \n"; if ( $ifsub ) { // Call attributes list function for all selected arguments $request = $_POST; $attributes = array (); $signs = array (); $filters = array (); $attributes = $request["attributes"]; $signs = $request["signs"]; $filters = $request["filters"]; $thething = $request["scope"]; if ( $thething == "job" || $thething == "queue" || $thething == "authuser" ) $thething = "cluster"; // $attwin = popup("attlist.php?attribute=$encatt",650,300,7,$lang,$debug); //TODO: change thething to object class, or it will never work for GLUE2. // alternative: keep job queue authuser and do guessing for GLUE2. do_attlist($thething,$attributes,$signs,$filters,$strings,$giislist,$archery_list,$schema); echo "
     "; echo " \n
    "; } elseif ( $ifsel ) { // If selection of search object and nr. of attributres is made, display options: $scope = $_POST; $object = $scope["object"]; $nlines = $scope["nlines"]; if ( !$nlines ) $nlines = 6; echo "

    ".$errors["416"].$object."

    \n"; echo "
    ".$errors["417"]."
    \n"; echo "
    ".$errors["418"]."


    \n"; $attwin = popup($itself,650,300,7,$lang,$debug); echo "
    "; echo "\n"; $rcol = "#ccffff"; for ( $i = 0; $i < $nlines; $i++ ) { echo "\n"; echo "\n"; echo "\n"; } echo "\n"; echo "
    "; echo "

      
    \n"; echo " \n"; } else { echo "

    ".$errors["419"]."

    \n"; echo "
    "; echo "

    ".$errors["423"]." \n"; echo "  ".$errors["424"]." \n"; echo "  \n"; echo "

    \n"; } echo "
    \n"; $toppage->close(); ?> nordugrid-arc-7.1.1/src/services/monitor/PaxHeaders/vo-users.php0000644000000000000000000000013215067751327021750 xustar0030 mtime=1759498967.781492375 30 atime=1759498967.878493848 30 ctime=1759499030.616999544 nordugrid-arc-7.1.1/src/services/monitor/vo-users.php0000644000175000002070000000776715067751327023673 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; // Header table $toppage->tabletop("","".$toptitle.""); // ldap search filter string for jobs $ufilter = "(objectclass=".OBJ_PERS.")"; $ulim = array ( "dn", VO_USSN, VO_USCN, VO_DESC, VO_INST, VO_MAIL ); if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $tlim = 10; $tout = 15; if( $debug ) dbgmsg("
    :::> ".$errors["114"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); // Establish connection to the requested VO server if( $debug ) dbgmsg($errors["117"].$host.":".$port); $ldapuri = "ldap://".$host.":".$port; $ds = ldap_connect($ldapuri); if ($ds) { // If contact OK, search for people $ts1 = time(); $sr = @ldap_search($ds,$vo,$ufilter,$ulim,0,0,$tlim,LDAP_DEREF_NEVER,$tout); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["125"]." (".($ts2-$ts1).$errors["104"].")
    "); if ($sr) { // If search returned, check that there are valid entries $nmatch = @ldap_count_entries($ds,$sr); if ($nmatch > 0) { // If there are valid entries, tabulate results $entries = @ldap_get_entries($ds,$sr); $nusers = $entries["count"]; define("CMPKEY",VO_USSN); usort($entries,"ldap_entry_comp"); // HTML table initialization $utable = new LmTable($module,$toppage->$module); // loop on users $uscnt = 0; for ($i=1; $i<$nusers+1; $i++) { $dn = $entries[$i]["dn"]; if ( $group ) { $newfilter = "(member=$dn)"; $newdn = $group.",".$vo; $newlim = array("dn"); $gcheck = @ldap_search($ds,$newdn,$newfilter,$newlim,0,0,$tlim,LDAP_DEREF_NEVER,$tout); if ( !ldap_count_entries($ds,$gcheck) ) continue; } $usname = $entries[$i][VO_USCN][0]; // $usname = utf2cyr($usname,"n"); // $ussn = strstr($entries[$i][VO_DESC][0],"/"); $ussn = substr(strstr($entries[$i][VO_DESC][0],"subject="),8); $ussn = trim($ussn); $encuname = rawurlencode($ussn); $org = $entries[$i][VO_INST][0]; // $org = utf8_decode($org); $mail = $entries[$i][VO_MAIL][0]; $mailstr = "mailto:".$mail; $usrwin = popup("userlist.php?owner=$encuname",700,500,5,$lang,$debug); // filling the table $uscnt++; $urowcont[] = $uscnt; $urowcont[] = "$usname"; $urowcont[] = "$org"; $urowcont[] = "$mail"; $utable->addrow($urowcont); $urowcont = array (); } $utable->close(); } else { $errno = 10; echo "
    ".$errors[$errno]."\n"; return $errno; } } else { $errno = 5; echo "
    ".$errors[$errno]."\n"; return $errno; } ldap_free_result($sr); ldap_close($ds); return 0; } else { $errno = 6; echo "
    ".$errors[$errno]."\n"; return $errno; } // Done $toppage->close(); ?> nordugrid-arc-7.1.1/src/services/monitor/PaxHeaders/loadmon.php0000644000000000000000000000013215067751327021616 xustar0030 mtime=1759498967.779492344 30 atime=1759498967.878493848 30 ctime=1759499030.611446384 nordugrid-arc-7.1.1/src/services/monitor/loadmon.php0000644000175000002070000006660415067751327023534 0ustar00mockbuildmock00000000000000module; $strings = &$toppage->strings; $errors = &$toppage->errors; $giislist = &$toppage->giislist; $emirslist= &$toppage->emirslist; $cert = &$toppage->cert; $yazyk = &$toppage->language; $archery_list = &$toppage->archery_list; // Header table $toptit = date("Y-m-d T H:i:s"); $toppage->tabletop("".EXTRA_TITLE." ".$toppage->title."

    ","$toptit"); //********************* Schema changing ****************************** $other_schema = "GLUE2"; if ( $schema == "GLUE2" ) $other_schema = "NG"; $_GET["schema"] = $other_schema; $get_options = ""; $keys = array_keys($_GET); foreach ($_GET as $key => $value) { if ( $key == $keys[0] ) { $get_options = "?"; } else { $get_options = $get_options."&"; } $get_options = $get_options."$key=$value"; } //TODO: use translate messages echo "Current data rendered according to $schema schema.
    "; echo "Schema switching to: $other_schema

    "; //********************** Legend - only needed for this module ********************* echo "
    \n"; echo "".$errors["401"].":\n"; echo "\"".$errors["305"]."\"".$errors["402"]." \"".$errors["306"]."\"".$errors["403"]."\n"; echo ""; $sewin = popup("sestat.php",650,200,8,$lang,$debug); $discwin = popup("discover.php",700,400,9,$lang,$debug); $vostring = popup("volist.php",440,330,11,$lang,$debug); $usstring = popup("allusers.php",650,700,12,$lang,$debug); $acstring = popup("allusers.php?limit=1",500,600,12,$lang,$debug); echo "
    \n"; //******** Authorised users echo "\"".$errors["307"]."\" \n"; //******** Active users echo "\"".$errors["308"]."\" \n"; //******** Search echo "\"".$errors["309"]."\" \n"; //******** Storage echo "\"".$errors["310"]."\" \n"; //******** Virtual Organisations echo "\"".$errors["311"]."\"\n"; echo "
    \n"; echo "
    \n"; //****************************** End of legend **************************************** // Some debug output if ( $debug ) { ob_end_flush(); ob_implicit_flush(); dbgmsg("
    ARC ".$toppage->getVersion()."
    "); } $tcont = array(); // array with rows, to be sorted $cachefile = CACHE_LOCATION."/loadmon-$schema-".$yazyk; $tcont = get_from_cache($cachefile,120); // If cache exists, skip ldapsearch if ( !$tcont || $debug || $display != "all" ) { // Do LDAP search $tcont = array(); // Setting time limits for ldapsearch $tlim = 10; $tout = 11; if($debug) dbgmsg("
    :::> ".$errors["101"].$tlim.$errors["102"].$tout.$errors["103"]." <:::
    "); // ldapsearch filter different for NG and GLUE2 if ( $schema == "NG" ) { $filter="(|(objectClass=".OBJ_CLUS.")(objectClass=".OBJ_QUEU."))"; } else { $filter="(|(objectclass=".GOBJ_CLUS.")(objectclass=".GOBJ_QUEU.")(objectClass=".GOBJ_MAN.")(objectClass=".GOBJ_LOC.")(objectClass=".GOBJ_CON.")(objectClass=".GOBJ_ADMD."))"; } // Array defining the attributes to be returned $lim = array( "dn", GCLU_ANAM, GCLU_ZIPC, GCLU_TCPU, GCLU_GCPU, GCLU_LCPU, GCLU_TJOB, GCLU_PQUE, GCLU_SUPP, GCLU_OWNR, GQUE_NAME, GQUE_MAPQ, GQUE_STAT, GQUE_QUED, GQUE_LQUE, GQUE_PQUE, GQUE_RUNG, GQUE_LRUN, CLU_ANAM, CLU_ZIPC, CLU_TCPU, CLU_UCPU, CLU_TJOB, CLU_QJOB, CLU_PQUE, QUE_STAT, QUE_GQUE, QUE_QUED, QUE_LQUE, QUE_PQUE, QUE_RUNG, QUE_GRUN ); // Adjusting cluster display filter $showvo = ""; if ( substr($display,0,2) == "vo" ) { $showvo = substr(strrchr($display,"="),1); if ($debug) dbgmsg(" ::: ".$errors["105"]."$showvo"); } if ( $display != "all" && !$showvo ) $filter = "(&".$filstr."(".$display."))"; //========================= GET CLUSTER LIST ============================ $gentries = array(); // EGIIS if ( ! empty($giislist) ) { $ngiis = count($giislist); $ts1 = time(); $gentries = recursive_giis_info($giislist,"cluster",$errors,$debug,1); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["106"].$ngiis." (".($ts2-$ts1).$errors["104"].")
    "); } // EMIR if ( ! empty($emirslist)) $gentries = emirs_info($emirslist,"cluster",$errors,$gentries,$debug,$cert); // ARCHERY if ( ! empty($archery_list) ) $gentries = array_merge($gentries, archery_info($archery_list, $schema, $errors, $debug)); //======================================================================= $nc = count($gentries); if ( !$nc ) { // NO SITES FOUND! $errno = "1"; echo "
    ".$errors[$errno]."\n"; return $errno; } else { if ( $debug == 2 ) { dbgmsg("

    ".$errors["119"]."cluster: ".$nc."
    "); foreach ( $gentries as $num=>$val ) dbgmsg($val["host"].":".$val["base"]."
    "); } } $dsarray = array (); $hnarray = array (); $pnarray = array (); $dnarray = array (); $sitetag = array (); /* a tag to skip duplicated entries */ // Purging cluster entries for ( $k = 0; $k < $nc; $k++ ) { $clhost = $gentries[$k]["host"]; $clport = $gentries[$k]["port"]; //$basedn = $gentries[$k]["base"]; // Force basedn to the selected schema if ( $schema == "GLUE2" ) { $basedn = DN_GLUE; } else { $basedn = DN_LOCAL; } $fp = @fsockopen($clhost, $clport, $errno, $errstr, 2); $ldapuri = "ldap://".$clhost.":".$clport; $clconn = ldap_connect($ldapuri); if ( $fp && $clconn && !@$sitetag[$clhost] ) { fclose($fp); array_push($dsarray,$clconn); array_push($hnarray,$clhost); array_push($pnarray,$clport); array_push($dnarray,$basedn); @ldap_set_option($clconn, LDAP_OPT_NETWORK_TIMEOUT, $tout); $sitetag[$clhost] = 1; /* filtering tag */ if ($debug==2) dbgmsg("Adding $k - $clhost:$clport $basedn
    "); } elseif ( $fp && $clconn && @$sitetag[$clhost] ) { if ($debug==2) dbgmsg("Skipping duplicate host entry $k - $clhost:$clport $basedn
    "); fclose($fp); } } $nhosts = count($dsarray); if( $debug == 2 ) dbgmsg("
    ".$nhosts.$errors["108"]."
    "); if ( !$nhosts ) { // NO SITES REPLY... $errno = "2"; echo "
    ".$errors[$errno]."\n"; return $errno; } // Search all clusters and queues $ts1 = time(); $srarray = @ldap_search($dsarray,$dnarray,$filter,$lim,0,0,$tlim,LDAP_DEREF_NEVER); // If using the patched LDAP //$srarray = @ldap_search($dsarray,DN_LOCAL,$filter,$lim,0,0,$tlim,LDAP_DEREF_NEVER,$tout); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["109"]." (".($ts2-$ts1).$errors["104"].")
    "); /* * $ts1 = time(); * $qsarray = @ldap_search($dsarray,DN_LOCAL,$qfilstr,$qlim,0,0,$tlim,LDAP_DEREF_NEVER); * // Fall back to a conventional LDAP * // if ( !count($qsrarray)) $qsarray = @ldap_search($dsarray,DN_LOCAL,$qfilstr,$qlim,0,0,$tlim,LDAP_DEREF_NEVER); * $ts2 = time(); if($debug) dbgmsg("
    ".$errors["110"]." (".($ts2-$ts1).$errors["104"].")
    "); */ // Loop on clusters for ( $ids = 0; $ids < $nhosts; $ids++ ) { $entries = array(); $jentries = array(); $gentries = array(); $rowcont = array(); $sr = $srarray[$ids]; $hn = $hnarray[$ids]; $pn = $pnarray[$ids]; $ds = $dsarray[$ids]; $nr = @ldap_count_entries($ds,$sr); if ( !$sr || !$ds || !$nr ) { $error = ldap_error($ds); if ( $error == "Success" ) $error = $errors["3"]; if ( $debug ) dbgmsg("".$errors["111"]."$hn ($error)
    "); $sr = FALSE; } if ($ds && $sr) { $entries = @ldap_get_entries($ds,$sr); // Number of LDAP objects retrieved for a given cluster // NG: nordugrid-cluster and nordugrid-queue(s) , 2+ // GLUE2: Service,Manager,Shares,Location,Contact,AdminDomain, 6+ $nobjects = $entries["count"]; if ( !$nobjects ) { if ( $debug ) dbgmsg("$hn:".$errors["3"]."
    "); continue; } $nclu = 0; $nqueues = 0; $allqrun = 0; $lrmsrun = 0; $gridjobs = 0; $allqueued = 0; $gridqueued = 0; $lrmsqueued = 0; $prequeued = 0; $totgridq = 0; $toflag2 = FALSE; $stopflag = FALSE; for ($i=0; $i<$nobjects; $i++) { $curdn = $entries[$i]["dn"]; $preflength = strrpos($curdn,","); $basedn = substr($curdn,$preflength+1); $allbasedn = strtolower(substr($curdn,$preflength-17)); if ( ($schema == "GLUE2") && ($basedn == DN_GLUE ) ) { // extract objectclass name from DN -- shouldn't this be easier? $preflength = strpos($curdn,":"); $preflength = strpos($curdn,":",$preflength+1); $object = substr($curdn,$preflength+1,strpos($curdn,":",$preflength+1)-$preflength-1); if ($object=="ComputingService") { $dnparts = ldap_explode_dn($curdn,0); $endpointArray=explode(":",$dnparts[0]); $curname = $endpointArray[3]; $curport = $pn; $curalias = $entries[$i][GCLU_ANAM][0]; // Manipulate alias: replace the string if necessary and cut off at 22 characters; strip HTML tags if (file_exists("cnvalias.inc")) include('cnvalias.inc'); $curalias = strip_tags($curalias); //if alias empty (common in GLUE2 due to no real place for it in the schema), use endpoint FQDN if ( empty($curalias) ) { $curalias = $curname." Undefined)"; } if ( strlen($curalias) > 22 ) $curalias = substr($curalias,0,21) . ">"; $gmqueued = @($entries[$i][GCLU_PQUE][0]) ? $entries[$i][GCLU_PQUE][0] : 0; /* new since 0.5.38 */ // use computingmanager info instead, For some reason this number is incorrect in the rendering. Needs to be checked on infosys side. //$curtotjobs = @($entries[$i][GCLU_TJOB][0]) ? $entries[$i][GCLU_TJOB][0] : 0; $clstring = popup("clusdes.php?host=$curname&port=$curport&schema=$schema",700,620,1,$lang,$debug); $nclu++; } elseif ($object=="ComputingManager") { // All the numbers here are actually "JobSlots" and not CPUs or cores. // But I am keeping the variable names for consistency with NG. // Assumption: 1 core - 1 job slot $curtotcpu = @($entries[$i][GCLU_TCPU][0]) ? $entries[$i][GCLU_TCPU][0] : 0; if ( !$curtotcpu && $debug ) dbgmsg("$curname".$errors["113"]."
    "); $gridjobs = @($entries[$i][GCLU_GCPU][0]) ? $entries[$i][GCLU_GCPU][0] : 0; $lrmsrun = @($entries[$i][GCLU_LCPU][0]) ? $entries[$i][GCLU_LCPU][0] : 0; $curusedcpu = $gridjobs + $lrmsrun; if ( $curusedcpu < 0 ) $curusedcpu = -1; // I think this is not actually used anywhere, even in NG. Probably can be removed. $curtotjobs = $curusedcpu; } elseif ($object=="ComputingShare") { // GLUE2 publishes a Share for the bare queue, and as many shares as the VOs. // So here we only take the Share that holds the bare info, which is what we publish in the monitor. // TODO: find a better solution for this. Unfortunately some sites // seem to have hacked the queue EntityName or the LRMS returns a longer name that // is not being shortened by infosys. $shname = $entries[$i][GQUE_NAME][0]; $shmapq = $entries[$i][GQUE_MAPQ][0]; if ( $shname == $shmapq ) { $qstatus = $entries[$i][GQUE_STAT][0]; if ( $qstatus != "production" ) $stopflag = TRUE; // curallqueued: all queued jobs in the queue (grid + local) $curallqueued = @($entries[$i][GQUE_QUED][0]) ? ($entries[$i][GQUE_QUED][0]) : 0; $curlrmsqueued = @($entries[$i][GQUE_LQUE][0]) ? ($entries[$i][GQUE_LQUE][0]) : 0; // There is no info in GLUE2 Shares about Grid jobs, must be extracted $curgridqueued = $curallqueued - $curlrmsqueued; if ($curgridqueued < 0) $curgridqueued = 0; $gridqueued += $curgridqueued; $lrmsqueued += $curlrmsqueued; $prequeued += @($entries[$i][GQUE_PQUE][0]) ? ($entries[$i][GQUE_PQUE][0]) : 0; // Updating the total number of queued jobs $allqueued += $curallqueued; $nqueues++; }; } elseif ($object=="Location") { if ( !empty($entries[$i][GCLU_ZIPC][0]) ) $curzip = $entries[$i][GCLU_ZIPC][0]; } elseif ( $object == "AdminDomain" ) { // here we may extract the site name and add it to the cluster line as in NG but funkyer } elseif ( $object == "Contact" ) { // here we may extract the support string (usually an email) }; // This part of the code is for aggregating values from all the above GLUE2 objects if ( ($schema == "GLUE2") && ($basedn == DN_GLUE ) && ($i == ($nobjects-1))) { // Calculate country based on gathered data $dnparts = ldap_explode_dn($curdn,0); $endpointArray=explode(":",$dnparts[0]); $curname = $endpointArray[3]; $curport = $pn; // This could have been set by the Location object parsing, so we check it first if (!isset($$curzip)) $curzip=""; $vo = guess_country($curname,$curzip); if ($debug==2) dbgmsg("$ids: $curname".$errors["112"]."$vo
    "); $vostring = $_SERVER['PHP_SELF']."?display=vo=$vo"; if ( $lang != "default") $vostring .= "&lang=".$lang; if ( $debug ) $vostring .= "&debug=".$debug; $country = $vo; if ( $yazyk !== "en" ) $country = $strings["tlconvert"][$vo]; $country_content = "\"".$errors["312"]."\" ".$country." "; if (!in_array($country_content,$rowcont)) { $rowcont[] = $country_content; } //blank $curzip for the next cluster $curzip=""; }; } elseif ( ($schema == "NG") && ( $allbasedn == DN_LOCAL) ) { // check if it is a site or a job; count $preflength = strpos($curdn,"-"); $object = substr($curdn,$preflength+1,strpos($curdn,"-",$preflength+1)-$preflength-1); if ($object=="cluster") { $dnparts = ldap_explode_dn($curdn,0); $curname = substr(strstr($dnparts[0],"="),1); $curport = $pn; // Country name massaging $zip = ""; if ( !empty($entries[$i][CLU_ZIPC][0]) ) $zip = $entries[$i][CLU_ZIPC][0]; $vo = guess_country($curname,$zip); if ($debug==2) dbgmsg("$ids: $curname".$errors["112"]."$vo
    "); $vostring = $_SERVER['PHP_SELF']."?display=vo=$vo"; if ( $lang != "default") $vostring .= "&lang=".$lang; if ( $debug ) $vostring .= "&debug=".$debug; $country = $vo; if ( $yazyk !== "en" ) $country = $strings["tlconvert"][$vo]; $rowcont[] = "\"".$errors["312"]."\" ".$country." "; $curtotcpu = @($entries[$i][CLU_TCPU][0]) ? $entries[$i][CLU_TCPU][0] : 0; if ( !$curtotcpu && $debug ) dbgmsg("$curname".$errors["113"]."
    "); $curalias = $entries[$i][CLU_ANAM][0]; // Manipulate alias: replace the string if necessary and cut off at 22 characters; strip HTML tags if (file_exists("cnvalias.inc")) include('cnvalias.inc'); $curalias = strip_tags($curalias); if ( strlen($curalias) > 22 ) $curalias = substr($curalias,0,21) . ">"; $curtotjobs = @($entries[$i][CLU_TJOB][0]) ? $entries[$i][CLU_TJOB][0] : 0; $curusedcpu = @($entries[$i][CLU_UCPU][0]) ? $entries[$i][CLU_UCPU][0] : -1; $totqueued = @($entries[$i][CLU_QJOB][0]) ? $entries[$i][CLU_QJOB][0] : 0; /* deprecated since 0.5.38 */ $gmqueued = @($entries[$i][CLU_PQUE][0]) ? $entries[$i][CLU_PQUE][0] : 0; /* new since 0.5.38 */ $clstring = popup("clusdes.php?host=$curname&port=$curport",700,620,1,$lang,$debug); $nclu++; } elseif ($object=="queue") { $qstatus = $entries[$i][QUE_STAT][0]; if ( $qstatus != "active" ) $stopflag = TRUE; $allqrun += @($entries[$i][QUE_RUNG][0]) ? ($entries[$i][QUE_RUNG][0]) : 0; $gridjobs += @($entries[$i][QUE_GRUN][0]) ? ($entries[$i][QUE_GRUN][0]) : 0; $gridqueued += @($entries[$i][QUE_GQUE][0]) ? ($entries[$i][QUE_GQUE][0]) : 0; $allqueued += @($entries[$i][QUE_QUED][0]) ? ($entries[$i][QUE_QUED][0]) : 0; /* deprecated since 0.5.38 */ $lrmsqueued += @($entries[$i][QUE_LQUE][0]) ? ($entries[$i][QUE_LQUE][0]) : 0; /* new since 0.5.38 */ $prequeued += @($entries[$i][QUE_PQUE][0]) ? ($entries[$i][QUE_PQUE][0]) : 0; /* new since 0.5.38 */ $nqueues++; } } } if ( !$nclu && $nqueues ) { if ( $debug ) dbgmsg("$hn:".$errors["3"].": ".$errors["111"].$errors["410"]."
    "); continue; } if ( $nclu > 1 && $debug ) dbgmsg("$hn:".$errors["3"].": $nclu ".$errors["406"]."
    "); if (!$nqueues) $toflag2 = TRUE; if ($debug==2 && $prequeued != $gmqueued) dbgmsg("$curname: cluster-prelrmsqueued != sum(queue-prelrmsqueued)"); $allrun = ($curusedcpu < 0) ? $allqrun : $curusedcpu; if ($gridjobs > $allrun) $gridjobs = $allrun; /* For versions < 0.5.38: * Some Grid jobs are counted towards $totqueued and not towards $allqueued * (those in GM), so $totqueued - $allqueued = $gmqueued, * and $truegridq = $gmqueued + $gridqueued * and $nongridq = $totqueued - $truegridq == $allqueued - $gridqueued * hence $truegridq = $totqueued - $nongridq */ $nongridq = ($totqueued) ? $allqueued - $gridqueued : $lrmsqueued; $truegridq = ($totqueued) ? $totqueued - $nongridq : $gridqueued + $prequeued; // temporary hack: // $truegridq = $gridqueued; // $formtgq = sprintf(" s",$truegridq); $formngq = sprintf("\ \;s",$nongridq); $localrun = $allrun - $gridjobs; $gridload = ($curtotcpu > 0) ? $gridjobs/$curtotcpu : 0; $clusload = ($curtotcpu > 0) ? $allrun/$curtotcpu : 0; $tstring = urlencode("$gridjobs+$localrun"); $jrstring = popup("jobstat.php?host=$curname&port=$curport&status=Running&jobdn=all",600,500,2,$lang,$debug); $jqstring = popup("jobstat.php?host=$curname&port=$curport&status=Queueing&jobdn=all",600,500,2,$lang,$debug); if ( $schema == "GLUE2"){ $jrstring = popup("jobstat.php?host=$curname&port=$curport&status=Running&jobdn=all&schema=$schema",600,500,2,$lang,$debug); $jqstring = popup("jobstat.php?host=$curname&port=$curport&status=Queueing&jobdn=all&schema=$schema",600,500,2,$lang,$debug); } if ( $toflag2 ) { $tstring .= " (no queue info)"; // not sure if this is localizeable at all } elseif ( $stopflag ) { $tstring .= " (queue inactive)"; // not sure if this is localizeable at all } // Add a cluster row $rowcont[] = " $curalias"; $rowcont[] = "$curtotcpu"; if ( $curtotcpu ) { $rowcont[] = "\"$gridjobs+$localrun\""; } else { $rowcont[] = "\"$gridjobs+$localrun\""; } // $rowcont[] = "$totqueued"; $rowcont[] = "$truegridq+$nongridq"; // Not adding anymore, cache instead // $ctable->addrow($rowcont); $tcont[] = $rowcont; $rowcont = array (); } } // Dump the collected table cache_table($cachefile,$tcont); } // HTML table initialization $ctable = new LmTableSp($module,$toppage->$module); // Sort /** possible ordering keywords: * country - sort by country, default * cpu - sort by advertised CPU number * grun - sort by number of running Grid jobs */ $ostring = "comp_by_".$order; usort($tcont,$ostring); $nrows = count($tcont); $votolink = array(); $affiliation = array(); foreach ( $tcont as $trow ) { $vo = $trow[0]; $vo = substr(stristr($vo,"./mon-icons/"),12); $vo = substr($vo,0,strpos($vo,".")); if ( !in_array($vo,$votolink) ) $votolink[]=$vo; array_push($affiliation,$vo); } $affcnt = array_count_values($affiliation); $prevvo = "boo"; $sumcpu = 0; $sumgridjobs = 0; $sumlocljobs = 0; $sumclusters = 0; $sumgridqueued = 0; $sumloclqueued = 0; //$sumqueued = 0; // actual loop foreach ( $tcont as $trow ) { $gridjobs = $trow[3]; $gridjobs = substr(stristr($gridjobs,"alt=\""),5); $gridjobs = substr($gridjobs,0,strpos($gridjobs,"+")); $localrun = $trow[3]; $localrun = substr(stristr($localrun,"+"),1); $localrun = substr($localrun,0,strpos($localrun,"\" w")); $truegridq = $trow[4]; $truegridq = substr(stristr($truegridq,""),3); $truegridq = substr($truegridq,0,strpos($truegridq,"")); $nongridq = $trow[4]; $nongridq = substr(stristr($nongridq,"+"),1); $vo = $trow[0]; $vo = substr(stristr($vo,"./mon-icons/"),12); $vo = substr($vo,0,strpos($vo,".")); if ( @$showvo && $showvo != $vo ) continue; $sumcpu += $trow[2]; $sumgridjobs += $gridjobs; $sumlocljobs += $localrun; $sumgridqueued += $truegridq; $sumloclqueued += $nongridq; // $sumqueued += $totqueued; $sumclusters ++; if ( $vo != $prevvo && $order == "country" ) { // start new country rowspan $prevvo = $vo; $vostring = $trow[0]; $ctable->addspacer("#000099"); $ctable->rowspan( $affcnt[$vo], $vostring, "#FFF2DF" ); $tcrow = array_shift($trow); $ctable->addrow($trow); } else { if ( $order == "country" ) $tcrow = array_shift($trow); $ctable->addrow($trow); } } $tcont = array(); $ctable->addspacer("#990000"); $rowcont[] = "".$errors["405"].""; $rowcont[] = "$sumclusters".$errors["406"].""; $rowcont[] = "$sumcpu"; $rowcont[] = "$sumgridjobs + $sumlocljobs"; $rowcont[] = "$sumgridqueued + $sumloclqueued"; // $rowcont[] = "$sumqueued"; $ctable->addrow($rowcont, "#ffffff"); $ctable->close(); // To change language, link back to ALL $linkback = $_SERVER['PHP_SELF']; if ( $debug ) { $linkback .= "?debug=".$debug; $separator = "&"; } else { $separator = "?"; } // Show flags if only one country is chosen if ( @$showvo ) { echo "
    \n"; foreach ( $votolink as $volink ) { $vostring = $_SERVER['PHP_SELF']."?display=vo=$volink"; if ( $lang != "default" ) $vostring .= "&lang=".$lang; if ( $debug ) $vostring .= "&debug=".$debug; $voimage = "\"".$errors["312"]."\""; echo "$voimage  "; } if ( $lang != "default") $linkall = $linkback.$separator."lang=".$lang; echo "".$errors["409"]."
    \n"; // Show ALL echo "
    \n"; } else { // Show languages $translations = scandir(getcwd()."/lang"); echo "

    \n"; foreach ( $translations as $transfile ) { $twoletcod = substr($transfile,0,2); if ( stristr($transfile,".") == ".inc" && $twoletcod != "us" ) { $linklang = $linkback.$separator."lang=".$twoletcod; echo "$twoletcod  "; } } echo "
    \n"; } return 0; // Done $toppage->close(); ?> nordugrid-arc-7.1.1/src/services/monitor/PaxHeaders/mylo.js0000644000000000000000000000013215067751327020772 xustar0030 mtime=1759498967.781492375 30 atime=1759498967.878493848 30 ctime=1759499030.621686311 nordugrid-arc-7.1.1/src/services/monitor/mylo.js0000644000175000002070000000051715067751327022677 0ustar00mockbuildmock00000000000000function mylo (fnam,lnam,dom1,dom2){ if ( lnam == "" ) { var name = fnam; } else { var name = fnam + "." + lnam; } var host = dom1 + "." + dom2; var complete = name + "@" + host; output = "" + complete + ""; document.write(output); return output; } nordugrid-arc-7.1.1/src/services/monitor/PaxHeaders/attlist.php0000644000000000000000000000013215067751327021651 xustar0030 mtime=1759498967.775506042 30 atime=1759498967.874493787 30 ctime=1759499030.605288144 nordugrid-arc-7.1.1/src/services/monitor/attlist.php0000644000175000002070000000257415067751327023563 0ustar00mockbuildmock00000000000000title); $strings = &$toppage->strings; $giislist = &$toppage->giislist; $archery_list = &$toppage->archery_list; require_once('attlist.inc'); $schema = $_GET["schema"]; $object = $_GET["object"]; $attribute = $_GET["attribute"]; $filter = $_GET["filter"]; if ( !$filter ) $filter=""; if ( !$attribute ) $attribute="nordugrid-cluster-middleware"; if ( !$object ) $object="cluster"; $attribute = rawurldecode($attribute); $filter = rawurldecode($filter); if ( $attribute[1]==":") { $attribute = unserialize($attribute); $filter = unserialize($filter); $attributes = $attribute; $filters = $filter; $n = count($attributes); $signs = array_fill(0,$n,"="); } else { $attributes = array ($attribute); $signs = array ("="); $filters = array ($filter); } do_attlist($object,$attributes,$signs,$filters,$strings,$giislist,$archery_list,$schema); // Done $toppage->close(); ?> nordugrid-arc-7.1.1/src/services/monitor/PaxHeaders/quelist.php0000644000000000000000000000013215067751327021653 xustar0030 mtime=1759498967.781492375 30 atime=1759498967.878493848 30 ctime=1759499030.612817721 nordugrid-arc-7.1.1/src/services/monitor/quelist.php0000644000175000002070000001721215067751327023560 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; $clstring = popup("clusdes.php?host=$host&port=$port&schema=$schema",700,620,1,$lang,$debug); // Header table $toppage->tabletop("","".$toptitle." ".$qname." (".$host.")"); $lim = array( "dn", JOB_NAME, JOB_GOWN, JOB_SUBM, JOB_STAT, JOB_COMP, JOB_USET, JOB_USEM, JOB_ERRS, JOB_CPUS, JOB_EQUE ); if ( $schema == "GLUE2") { $lim = array( "dn", GJOB_NAME, GJOB_GOWN, GJOB_SUBM, GJOB_STAT, GJOB_COMP, GJOB_USET, GJOB_USEM, GJOB_ERRS, GJOB_CPUS, GJOB_EQUE ); } if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $tlim = 15; $tout = 20; if( $debug ) dbgmsg("
    :::> ".$errors["101"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); // ldapsearch filter strings for cluster and queues $filstr = "(objectclass=".OBJ_AJOB.")"; $dn = DN_LOCAL; $topdn = DN_GLOBL; if ( $schema == "GLUE2") { $filstr = "(objectclass=".GOBJ_AJOB.")"; $dn = "GLUE2GroupID=services,".DN_GLUE; } // Establish connection to the requested LDAP server $ldapuri = "ldap://".$host.":".$port; $ds = ldap_connect($ldapuri); if ($ds) { // If contact OK, search for NorduGrid clusters $basedn = QUE_NAME."=".$qname.",".CLU_NAME."=".$host.","; $locdn = $basedn.$dn; if ( $schema == "GLUE2") { $basedn = GQUE_NAME."=".$qname.",".GCLU_NAME."=".$host.","; $basedn = "GLUE2ShareID=urn:ogf:ComputingShare:".$host.":".$qname.",GLUE2ServiceID=urn:ogf:ComputingService:".$host.":arex,"; $locdn = $basedn.$dn; } $aaa = ldap_nice_dump($strings,$ds,$locdn); echo "
    "; $ts1 = time(); $sr = ldap_search($ds,$dn,$filstr,$lim,0,0,$tlim,LDAP_DEREF_NEVER); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["110"]." (".($ts2-$ts1).$errors["104"].")
    "); // Fall back to conventional LDAP // if (!$sr) $sr = ldap_search($ds,$dn,$filstr,$lim,0,0,$tlim,LDAP_DEREF_NEVER); if ($sr) { $nmatch = ldap_count_entries($ds,$sr); if ($nmatch > 0) { $entries = ldap_get_entries($ds,$sr); $njobs = $entries["count"]; define("CMPKEY",JOB_SUBM); if ( $schema == "GLUE2") define("CMPKEY",GJOB_SUBM); usort($entries,"ldap_entry_comp"); // HTML table initialisation $ltable = new LmTable($module,$toppage->$module); // loop on jobs $nj = 0; for ($i=1; $i<$njobs+1; $i++) { if ( $schema == "GLUE2") { $equeue = $entries[$i][GJOB_EQUE][0]; if ( $equeue !== $qname ) { if ( $debug == 2 ) dbgmsg($equeue." != ".$qname); continue; } $jobdn = rawurlencode($entries[$i]["dn"]); $curstat = $entries[$i][GJOB_STAT][0]; $stahead = substr($curstat,0,12); $ftime = ""; if ($stahead=="FINISHED at:") { $ftime = substr(strrchr($curstat, " "), 1); } elseif ($curstat=="FINISHED") { $ftime = $entries[$i][GJOB_COMP][0]; } if ( $ftime ) { $ftime = cnvtime($ftime); $curstat = "FINISHED at: ".$ftime; } $uname = $entries[$i][GJOB_GOWN][0]; $encuname = rawurlencode($uname); $family = cnvname($uname, 2); $jname = htmlentities($entries[$i][GJOB_NAME][0]); $jobname = ($entries[$i][GJOB_NAME][0]) ? $jname : "N/A"; $time = ($entries[$i][GJOB_USET][0]) ? $entries[$i][GJOB_USET][0] : ""; $memory = ($entries[$i][GJOB_USEM][0]) ? $entries[$i][GJOB_USEM][0] : ""; $ncpus = ($entries[$i][GJOB_CPUS][0]) ? $entries[$i][GJOB_CPUS][0] : ""; $error = ($entries[$i][GJOB_ERRS][0]); if ( $error ) $error = ( preg_match("/user/i",$error) ) ? "X" : "!"; $status = "All"; $newwin = popup("jobstat.php?host=$host&port=$port&status=$status&jobdn=$jobdn&schema=$schema",750,430,4,$lang,$debug); $usrwin = popup("userlist.php?bdn=$topdn&owner=$encuname&schema=$schema",700,500,5,$lang,$debug); } else { //NG schema parse $equeue = $entries[$i][JOB_EQUE][0]; if ( $equeue !== $qname ) { if ( $debug == 2 ) dbgmsg($equeue." != ".$qname); continue; } $jobdn = rawurlencode($entries[$i]["dn"]); $curstat = $entries[$i][JOB_STAT][0]; $stahead = substr($curstat,0,12); $ftime = ""; if ($stahead=="FINISHED at:") { $ftime = substr(strrchr($curstat, " "), 1); } elseif ($curstat=="FINISHED") { $ftime = $entries[$i][JOB_COMP][0]; } if ( $ftime ) { $ftime = cnvtime($ftime); $curstat = "FINISHED at: ".$ftime; } $uname = $entries[$i][JOB_GOWN][0]; $encuname = rawurlencode($uname); $family = cnvname($uname, 2); $jname = htmlentities($entries[$i][JOB_NAME][0]); $jobname = ( !empty($entries[$i][JOB_NAME][0]) ) ? $jname : "N/A"; $time = ( !empty($entries[$i][JOB_USET][0]) ) ? $entries[$i][JOB_USET][0] : ""; $memory = ( !empty($entries[$i][JOB_USEM][0]) ) ? $entries[$i][JOB_USEM][0] : ""; $ncpus = ( !empty($entries[$i][JOB_CPUS][0]) ) ? $entries[$i][JOB_CPUS][0] : ""; $error = ( !empty($entries[$i][JOB_ERRS][0]) ); if ( $error ) $error = ( preg_match("/user/i",$error) ) ? "X" : "!"; $status = "All"; $newwin = popup("jobstat.php?host=$host&port=$port&status=$status&jobdn=$jobdn",750,430,4,$lang,$debug); $usrwin = popup("userlist.php?bdn=$topdn&owner=$encuname",700,500,5,$lang,$debug); } // filling the table $nj++; $lrowcont[] = "$nj $error"; $lrowcont[] = "$jobname"; $lrowcont[] = "$family"; $lrowcont[] = "$curstat"; $lrowcont[] = "$time"; $lrowcont[] = "$memory"; $lrowcont[] = "$ncpus"; $ltable->addrow($lrowcont); $lrowcont = array (); } $ltable->close(); } else { $errno = "4"; echo "
    ".$errors[$errno]."\n"; return $errno; } } else { $errno = "5"; echo "
    ".$errors[$errno]."\n"; return $errno; } ldap_free_result($sr); ldap_close($ds); return 0; } else { $errno = "6"; echo "
    ".$errors[$errno]."\n"; return $errno; } // Done $toppage->close(); ?> nordugrid-arc-7.1.1/src/services/monitor/PaxHeaders/README.in0000644000000000000000000000013215067751327020741 xustar0030 mtime=1759498967.775506042 30 atime=1759498967.874493787 30 ctime=1759499030.624416412 nordugrid-arc-7.1.1/src/services/monitor/README.in0000644000175000002070000000520015067751327022640 0ustar00mockbuildmock00000000000000NorduGrid ARC version @VERSION@ Grid Monitor ============ Description ----------- Set of PHP scripts, providing a Web interface to the NorduGrid Information System. Should be working for any similar LDAP-based service, if the schema configuration is done carefuly. The directory contains: cache - directory for front page cache includes - directory with common methods and configuration file (settings.inc) lang - directory with localizations man - directory for the man page mon-icons - directory with icons allusers.php - list grid users attlist.php - show values of selected attributes on the grid clusdes.php - show cluster or storage information discover.php - list attributes specific for an object for consecutive search monitor.in - lynx call for the monitor (template) help.php - print help jobstat.php - show running/other jobs in a queue loadmon.php - main grid monitor script Makefile.am - Makefile template monitor.js - Java script for pop-up screens mylo.js - Java script for mail addresses quelist.php - show queue details and jobs README.in - README file (template) sestat.php - list storage elements userlist.php - show allowed sites and list of jobs of a user volist.php - static list of some VOs vo-users.php - lists users in a VO Requirements ------------ - GD library (http://www.boutell.com/gd/) - LDAP library (e.g., http://www.openldap.org) - PHP4 or PHP5 (http://www.php.net) compiled with LDAP and GD extensions - HTTP server compiled with PHP4 or PHP5 - Working ARC information system instance or a similar LDAP-based service - Optional: running Virtual Organisation LDAP-based service Installation ------------ 1. Copy all the files in a folder, accessible by the HTTP server 2. Verify that this folder contains a directory called "cache" and that it is writeable by the HTTP server. If your server is configured to have write access only to a specific location, such as "../htdata", modify CACHE_LOCATION value in "includes/settings.inc" accordingly 3. Modify "includes/settings.inc" according to your infosystem structure and liking: most likely, you want to modify the $giislist array by removing some GIISes/GRISes and adding other(s) 4. Run the whole stuff by loading "loadmon.php" into your browser Fine tuning ----------- - Making output more human-readable: modify "/lang/*.inc", "includes/cnvname.inc", "includes/cnvalias.inc". - Preventing sites from being polled: modify "includes/blacklist.inc". Otherwise, the file is not needed. Contact ------- Oxana Smirnova, oxana.smirnova@hep.lu.se nordugrid-arc-7.1.1/src/services/monitor/PaxHeaders/sestat.php0000644000000000000000000000013215067751327021470 xustar0030 mtime=1759498967.781492375 30 atime=1759498967.878493848 30 ctime=1759499030.614224515 nordugrid-arc-7.1.1/src/services/monitor/sestat.php0000644000175000002070000001402015067751327023367 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; $giislist = &$toppage->giislist; // Header table $toppage->tabletop("".$toptitle."

    ",""); if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $tlim = 10; $tout = 15; if($debug) dbgmsg("
    :::> ".$errors["101"].$tlim.$errors["102"].$tout.$errors["103"]." <:::
    "); // Arrays defining the attributes to be returned $lim = array( "dn", SEL_NAME, SEL_ANAM, SEL_CURL, SEL_BURL, SEL_TYPE, SEL_FREE, SEL_TOTA ); // ldapsearch filter strings for clusters and queues $filstr = "(objectclass=".OBJ_STEL.")"; // Top GIIS server: get all from the pre-defined list $ngiis = count($giislist); $ts1 = time(); $gentries = recursive_giis_info($giislist,"nordugrid-SE",$errors,$debug); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["106"].$ngiis." (".($ts2-$ts1).$errors["104"].")
    "); $nc = count($gentries); if ( !$nc ) { // NO SITES FOUND! $errno = "1"; echo "
    ".$errors[$errno]."\n"; return $errno; } $dsarray = array (); $hnarray = array (); $sitetag = array (); /* a tag to skip duplicated entries */ for ( $k = 0; $k < $nc; $k++ ) { $clhost = $gentries[$k]["host"]; $clport = $gentries[$k]["port"]; $ldapuri = "ldap://".$clhost.":".$clport; $clconn = ldap_connect($ldapuri); if ( $clconn && !$sitetag[$clhost] ) { array_push($dsarray,$clconn); array_push($hnarray,$clhost); $sitetag[$clhost] = 1; /* filtering tag */ if ($debug==2) dbgmsg("$k - $clhost:$clport "); } } $nhosts = count($dsarray); if ( !$nhosts ) { // NO SITES REPLY... $errno = "2"; echo "
    ".$errors[$errno]."\n"; return $errno; } // Search all SEs $ts1 = time(); $srarray = @ldap_search($dsarray,DN_LOCAL,$filstr,$lim,0,0,$tlim,LDAP_DEREF_NEVER); $ts2 = time(); if($debug) dbgmsg("
    ".$errors["124"]." (".($ts2-$ts1).$errors["104"].")
    "); $ctable = new LmTableSp($module,$toppage->$module); // Loop on SEs $senum = 0; $space = 0; $capacity = 0; for ( $ids = 0; $ids < $nhosts; $ids++ ) { $sr = $srarray[$ids]; $ds = $dsarray[$ids]; $hn = $hnarray[$ids]; /* host name, for debugging */ if ($ds && $sr) { $entries = @ldap_get_entries($ds,$sr); $nclusters = $entries["count"]; /* May be several SEs! */ if ( !$nclusters ) continue; for ( $i = 0; $i < $nclusters; $i++) { $senum++; $curdn = $entries[$i]["dn"]; $curname = $entries[$i][SEL_NAME][0]; $curalias = $entries[$i][SEL_ANAM][0]; $curspace = ( $entries[$i][SEL_FREE][0] ) ? $entries[$i][SEL_FREE][0] : 0; // $curcapacity = ( $entries[$i][SEL_TOTA][0] ) ? $entries[$i][SEL_TOTA][0] : $errors["407"]; $curcapacity = ( $entries[$i][SEL_TOTA][0] ) ? $entries[$i][SEL_TOTA][0] : $curspace; $cururl = ( $entries[$i][SEL_BURL][0] ) ? $entries[$i][SEL_BURL][0] : $entries[$i][SEL_CURL][0]; $curtype = $entries[$i][SEL_TYPE][0]; $clstring = popup("clusdes.php?host=$curname&port=$curport&isse=1&debug=$debug",700,620,1,$lang,$debug); $curspace = intval($curspace/1000); $occupancy = 1; // by default, all occupied $space += $curspace; // if ( $curcapacity != $errors["407"] ) { if ( $curcapacity != 0 ) { $curcapacity = intval($curcapacity/1000); $occupancy = ($curcapacity - $curspace)/$curcapacity; $capacity += $curcapacity; } $tstring = $curspace."/".$curcapacity; $tlen = strlen($tstring); if ($tlen<11) { $nspaces = 11 - $tlen; for ( $is = 0; $is < $nspaces; $is++ ) $tstring .= " "; } $tstring = urlencode($tstring); if ($debug==2) dbgmsg("$senum: $curname at $hn
    "); if ( strlen($curalias) > 15 ) $curalias = substr($curalias,0,15) . ">"; // $clstring = popup("clusdes.php?host=$curname&port=2135",700,620,1,$lang,$debug); $rowcont[] = "$senum"; $rowcont[] = " $curalias"; $rowcont[] = "\"$tstring\""; // $rowcont[] = $curcapacity.$errors["408"]; // $rowcont[] = $curspace.$errors["408"]; $rowcont[] = "$curname"; $rowcont[] = "$cururl"; $rowcont[] = "$curtype"; $ctable->addrow($rowcont); $rowcont = array (); } } $entries = array(); $jentries = array(); $gentries = array(); } $occupancy = ($capacity - $space)/$capacity; $tstring = $space."/".$capacity; $ctable->addspacer("#ffcc33"); $rowcont[] = " "; $rowcont[] = "".$errors["405"].""; $rowcont[] = "\"$tstring\""; //$rowcont[] = "$capacity".$errors["408"].""; //$rowcont[] = "$space".$errors["408"].""; $rowcont[] = " "; $rowcont[] = " "; $rowcont[] = " "; $ctable->addrow($rowcont, "#ffffff"); $ctable->close(); return 0; // Done $toppage->close(); ?> nordugrid-arc-7.1.1/src/services/monitor/PaxHeaders/jobstat.php0000644000000000000000000000013215067751327021633 xustar0030 mtime=1759498967.776492299 30 atime=1759498967.875493803 30 ctime=1759499030.610214838 nordugrid-arc-7.1.1/src/services/monitor/jobstat.php0000644000175000002070000001762115067751327023544 0ustar00mockbuildmock00000000000000title; $module = &$toppage->module; $strings = &$toppage->strings; $errors = &$toppage->errors; // Header table $titles = explode(":",$toptitle); // two alternative titles, separated by column if ($jobdn=="all") { $clstring = popup("clusdes.php?host=$host&port=$port&schema=$schema",700,620,1,$lang,$debug); $gtitle = "".$titles[0]." $host"; } else { $jobdn = rawurldecode($jobdn); $jobdn = preg_replace("/\"/","",$jobdn); $dn_pieces = ldap_explode_dn($jobdn,1); $jobgid = $dn_pieces[0]; $gtitle = "".$titles[1].": $jobgid"; } $toppage->tabletop("",$gtitle); // Arrays defining the attributes to be returned $lim = array( "dn", JOB_NAME, JOB_EQUE, JOB_GOWN, JOB_STAT, JOB_USET, JOB_SUBM, JOB_CPUS ); // ldapsearch filter string for jobs $filstr="(objectclass=".OBJ_AJOB.")"; if ( $schema == "GLUE2") { $lim = array( "dn", GJOB_NAME, GJOB_EQUE, GJOB_GOWN, GJOB_STAT, GJOB_USET, GJOB_SUBM, GJOB_CPUS ); $filstr="(objectclass=".GOBJ_AJOB.")"; } if ( $debug ) { ob_end_flush(); ob_implicit_flush(); } $tlim = 15; $tout = 20; if( $debug ) dbgmsg("
    :::> ".$errors["101"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); // Establish connection to the requested LDAP server $ldapuri = "ldap://".$host.":".$port; $ds = ldap_connect($ldapuri); $bdn = DN_LOCAL; $topdn = DN_GLOBL; if ( $schema == "GLUE2") { $bdn = DN_GLUE; if ($jobdn != "all") $bdn = ""; } if ($ds) { // Single job info dump and quit if ($jobdn != "all") { // $basedn = explode("Mds",$jobdn); $basedn = preg_split("/mds/i",$jobdn); $locdn = $basedn[0].$bdn; $thisdn = ldap_nice_dump($strings,$ds,$locdn); ldap_close($ds); return 0; } // Loop over all the jobs $sr = @ldap_search($ds,$bdn,$filstr,$lim,0,0,$tlim,LDAP_DEREF_NEVER); // Fall back to conventional LDAP // if (!$sr) $sr = @ldap_search($ds,$bdn,$filstr,$lim,0,0,$tlim,LDAP_DEREF_NEVER); if ($sr) { // If search returned, check that there are valid entries $nmatch = ldap_count_entries($ds,$sr); if ($nmatch > 0) { // HTML table initialisation $jtable = new LmTable($module,$toppage->$module); // If there are valid entries, tabulate results $entries = ldap_get_entries($ds,$sr); $njobs = $entries["count"]; define("CMPKEY",JOB_SUBM); if ( $schema == "GLUE2") define("CMPKEY",GJOB_SUBM); usort($entries,"ldap_entry_comp"); // loop on jobs $jcount = 0; for ($i=1; $i<$njobs+1; $i++) { $jdn = rawurlencode($entries[$i]["dn"]); $curstat = $entries[$i][JOB_STAT][0]; if ( $schema == "GLUE2") { $curstat = $entries[$i][GJOB_STAT][0]; } /* * The following flags may need an adjustment, * depending on the Job Status provider */ // Running job: statail == "R" or "run" // $statail = substr($curstat,-3); $statail = substr(strstr($curstat,"INLRMS:"),7); $statail = trim($statail); // Queued job: stahead != "FIN" && statail != "R" and "run" etc $stahead = substr($curstat,0,3); $flagrun = ( $status == "Running" && ( $statail == "R" || /* PBS */ $statail == "S" || /* suspended by Condor */ $statail == "run" ) /* easypdc */ ); $flagque = ( $status != "Running" && $statail != "R" && $statail != "S" && $statail != "run" && $stahead != "FIN" && $stahead != "FAI" && $stahead != "EXE" && $stahead != "KIL" && $stahead != "DEL" ); /* No changes necessary below */ $flagact = ($flagrun || $flagque)?1:0; if ($flagact == 1 || $status == "All" ) { if ( $schema == "GLUE2") { $uname = $entries[$i][GJOB_GOWN][0]; $encuname = rawurlencode($uname); $uname = addslashes($uname); // In case $uname contains escape characters $family = cnvname($uname, 2); $jname = htmlentities($entries[$i][JOB_NAME][0]); $jobname = ($entries[$i][GJOB_NAME][0]) ? $jname : "N/A"; $queue = ($entries[$i][GJOB_EQUE][0]) ? $entries[$i][GJOB_EQUE][0] : ""; $time = ($entries[$i][GJOB_USET][0]) ? $entries[$i][GJOB_USET][0] : ""; $ncpus = ($entries[$i][GJOB_CPUS][0]) ? $entries[$i][GJOB_CPUS][0] : ""; $newwin = popup("jobstat.php?host=$host&port=$port&status=$status&jobdn=$jdn&schema=$schema",750,430,4,$lang,$debug); $quewin = popup("quelist.php?host=$host&port=$port&qname=$queue&schema=$schema",750,430,6,$lang,$debug); $usrwin = popup("userlist.php?bdn=$topdn&owner=$encuname&schema=$schema",700,500,5,$lang,$debug); } else { $uname = $entries[$i][JOB_GOWN][0]; $encuname = rawurlencode($uname); $uname = addslashes($uname); // In case $uname contains escape characters $family = cnvname($uname, 2); $jname = htmlentities($entries[$i][JOB_NAME][0]); $jobname = ($entries[$i][JOB_NAME][0]) ? $jname : "N/A"; $queue = ($entries[$i][JOB_EQUE][0]) ? $entries[$i][JOB_EQUE][0] : ""; $time = ($entries[$i][JOB_USET][0]) ? $entries[$i][JOB_USET][0] : ""; $ncpus = ($entries[$i][JOB_CPUS][0]) ? $entries[$i][JOB_CPUS][0] : ""; $newwin = popup("jobstat.php?host=$host&port=$port&status=$status&jobdn=$jdn",750,430,4,$lang,$debug); $quewin = popup("quelist.php?host=$host&port=$port&qname=$queue",750,430,6,$lang,$debug); $usrwin = popup("userlist.php?bdn=$topdn&owner=$encuname",700,500,5,$lang,$debug); } $jcount++; // filling the table $jrowcont[] = "$jcount $jobname"; $jrowcont[] = "$family"; $jrowcont[] = "$curstat"; $jrowcont[] = "$time"; $jrowcont[] = "$queue"; $jrowcont[] = "$ncpus"; $jtable->addrow($jrowcont); $jrowcont = array (); } } if ($jcount == 0) $jtable->adderror("".$errors["4"].": ".$status.""); $jtable->close(); } else { echo "
    ".$errors["4"]."".$errors["7"]."\n"; } } else { echo "
    ".$errors["4"]."".$errors["7"]."\n"; } $entries = array(); @ldap_free_result($sr); ldap_close($ds); return 0; } else { $errno = "6"; echo "
    ".$errors[$errno]."\n"; return $errno; } // Done $toppage->close(); ?> nordugrid-arc-7.1.1/src/services/monitor/PaxHeaders/monitor.js0000644000000000000000000000013215067751327021501 xustar0030 mtime=1759498967.781492375 30 atime=1759498967.878493848 30 ctime=1759499030.620241161 nordugrid-arc-7.1.1/src/services/monitor/monitor.js0000644000175000002070000000322715067751327023407 0ustar00mockbuildmock00000000000000function ngurl(link) { var wloc="http://"+document.domain+link; var vtest=link; var prot=vtest.substring(0,4); var vhttp="http"; if (prot == vhttp) { var wloc=link } return wloc; } function monitor(link,x,y,n) { // "n" is needed to keep dedicated windows for each monitor type // function ngurl() adds HTTP contact string, if needed // wloc=ngurl(link); var ua = ' ' + navigator.userAgent.toLowerCase(); var is_opera = ua.indexOf('opera'); var is_lynx = ua.indexOf('lynx'); var is_konqueror = ua.indexOf('konqueror'); wloc = link; browser = navigator.appName; if ( is_opera>0 || is_lynx>0 || is_konqueror>0 ) { window.location = wloc; } else { aaa=open("","win"+n,"innerWidth="+x+",innerHeight="+y+",resizable=1,scrollbars=1,width="+x+",height="+y); aaa.document.encoding = "text/html; charset=utf-8"; aaa.document.clear(); aaa.document.writeln(""); aaa.document.writeln(""); aaa.document.writeln("NorduGrid"); aaa.document.writeln(""); aaa.document.writeln(""); aaa.document.writeln(""); aaa.document.writeln("






    "); aaa.document.writeln("Collecting information..."); aaa.document.writeln("

    "); aaa.document.writeln(""); aaa.document.writeln(""); aaa.document.close(); aaa.document.location.href=wloc; aaa.document.close(); } } nordugrid-arc-7.1.1/src/services/monitor/PaxHeaders/includes0000644000000000000000000000013115067751426021204 xustar0030 mtime=1759499030.854450771 29 atime=1759499034.76351017 30 ctime=1759499030.854450771 nordugrid-arc-7.1.1/src/services/monitor/includes/0000755000175000002070000000000015067751426023164 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/monitor/includes/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327023316 xustar0030 mtime=1759498967.775506042 30 atime=1759498967.874493787 30 ctime=1759499030.832110409 nordugrid-arc-7.1.1/src/services/monitor/includes/Makefile.am0000644000175000002070000000015715067751327025223 0ustar00mockbuildmock00000000000000monitorincdir = @monitor_prefix@/includes monitorinc_DATA = $(srcdir)/*.inc EXTRA_DIST = $(monitorinc_DATA) nordugrid-arc-7.1.1/src/services/monitor/includes/PaxHeaders/comfun.inc0000644000000000000000000000013215067751327023244 xustar0030 mtime=1759498967.775903469 30 atime=1759498967.874493787 30 ctime=1759499030.842450589 nordugrid-arc-7.1.1/src/services/monitor/includes/comfun.inc0000644000175000002070000000617215067751327025154 0ustar00mockbuildmock00000000000000 XXX   */ $geo1 = $a[0]; $geo2 = $b[0]; $geo1 = substr(stristr($geo1,""),3); $geo2 = substr(stristr($geo2,""),3); $geo1 = substr($geo1,0,strpos($geo1,"<")); $geo2 = substr($geo2,0,strpos($geo2,"<")); $ali1 = $a[1]; $ali2 = $b[1]; $ali1 = substr(stristr($ali1,""),3); $ali2 = substr(stristr($ali2,""),3); $ali1 = substr($ali1,0,strpos($ali1,"<")); $ali2 = substr($ali2,0,strpos($ali2,"<")); $cmpgeo = strcasecmp ($geo1,$geo2); $cmpali = strcasecmp ($ali1,$ali2); if ( !$cmpgeo ) return $cmpali; return $cmpgeo; } /** * @return int * @param a array * @param b array * @desc Compares by CPU */ function comp_by_cpu ($a, $b) { $cpu1 = $a[2]; $cpu2 = $b[2]; $cmpcpu = $cpu2 - $cpu1; return $cmpcpu; } /** * @return int * @param a array * @param b array * @desc Compares by grid running jobs */ function comp_by_grun ($a, $b) { $sum1 = $a[3]; $sum2 = $b[3]; // echo $sum1." vs ".$sum2."
    "; $sum1 = substr(stristr($sum1,"alt=\""),5); $sum2 = substr(stristr($sum2,"alt=\""),5); $sum1 = substr($sum1,0,strpos($sum1,"+")); $sum2 = substr($sum2,0,strpos($sum2,"+")); $cmpsum = $sum2 - $sum1; return $cmpsum; } ?>nordugrid-arc-7.1.1/src/services/monitor/includes/PaxHeaders/blacklist.inc0000644000000000000000000000013115067751327023724 xustar0030 mtime=1759498967.775903469 30 atime=1759498967.874493787 29 ctime=1759499030.83698686 nordugrid-arc-7.1.1/src/services/monitor/includes/blacklist.inc0000644000175000002070000000257315067751327025636 0ustar00mockbuildmock00000000000000"0", "grid.fi.uib.no"=>"0", "dc2.uio.no"=>"0", "dc1.uio.no"=>"0", "dc3.uio.no"=>"0", "dc4.uio.no"=>"0", "fire.ii.uib.no"=>"0", "hydra.ii.uib.no"=>"0", "grid.nbi.dk"=>"0", "lscf.nbi.dk"=>"0", "hepax1.nbi.dk"=>"0", "morpheus.nbi.dk"=>"0", "heppc08.nbi.dk"=>"0", "grid.uni-c.dk"=>"0", "tambohuse.imada.sdu.dk"=>"0", "gridrouter.imada.sdu.dk"=>"0", "tiger.imada.sdu.dk"=>"0", "cbs202.cbs.dtu.dk"=>"0", "gridgate.it.dtu.dk"=>"0", "amigos24.diku.dk"=>"0", "nroot.hip.fi"=>"0", "grid.hip.fi"=>"0", "hirmu.hip.fi"=>"0", "pc19.hip.fi"=>"0", "pc30.hip.helsinki.fi"=>"0", "testbed0.hip.helsinki.fi"=>"0", "pchip04.cern.ch"=>"0", "quark.hep.lu.se"=>"0", "farm.hep.lu.se"=>"0", "hathi.hep.lu.se"=>"0", "grid.tsl.uu.se"=>"0", "grid.scfab.se"=>"0", "bambi.quark.lu.se"=>"0", "nexus.swegrid.se"=>"0", "hagrid.it.uu.se"=>"0", "ingrid.hpc2n.umu.se"=>"0", "sigrid.lunarc.lu.se"=>"0", "bluesmoke.nsc.liu.se"=>"0", "g01n01.pdc.kth.se"=>"0", "ingvar.nsc.liu.se"=>"0", "seth.hpc2n.umu.se"=>"0", "banan.hpc2n.umu.se"=>"0", "jakarta.hpc2n.umu.se"=>"0", "gridum2.cs.umu.se"=>"0", "gridum1.cs.umu.se"=>"0", "sleipner.byggmek.lth.se"=>"0", "grendel.it.uu.se"=>"0", "login-3.monolith.nsc.liu.se"=>"0", "vls.science.upjs.sk"=>"0", "213-35-172-38-dsl.plus.estpak.ee"=>"0", "cm-gw.phys.ualberta.ca"=>"0", "tgrid.icepp.s.u-tokyo.ac.jp"=>"0", "hmx00.kek.jp"=>"0", "dummy"=>"0", "dummy"=>"0"); ?>nordugrid-arc-7.1.1/src/services/monitor/includes/PaxHeaders/locale.inc0000644000000000000000000000013215067751327023214 xustar0030 mtime=1759498967.776492299 30 atime=1759498967.874493787 30 ctime=1759499030.849702945 nordugrid-arc-7.1.1/src/services/monitor/includes/locale.inc0000644000175000002070000007054115067751327025125 0ustar00mockbuildmock00000000000000 array ( // Table headers "loadmon" => array( "0" => "Grid Monitor", "Country" => 30, "Site" => 160, "CPUs" => 10, "Load (processes: Grid+local)" => 210, "Queueing" => 10 ), "clusdes" => array("0" => "Cluster Details for", "Queue" => 0, "Status" => 0, "Limits (min)" => 0, "CPUs" => 0, "Running" => 0, "Queueing" => 0 ), "jobstat" => array("0" => "Jobs at:Job ID", "Job name" => 0, "Owner" => 0, "Status" => 0, "CPU (min)" => 0, "Queue" => 0, "CPUs" => 0 ), "volist" => array("0" => "Virtual Organisations", "Virtual Organisation" => 0, "Members" => 0, "Served by" => 0 ), "vousers" => array("0" => "Grid User Base", "#" => 0, "Name" => 0, "Affiliation" => 0, "E-mail" => 0 ), "userlist" => array("0" => "Information for", "" => 0, "Job name" => 0, "Status" => 0, "CPU (min)" => 0, "Cluster" => 0, "Queue" => 0, "CPUs" => 0 ), "userres" => array("0" => "", "Cluster:queue" => 0, "Free CPUs" => 0, "Exp. queue length" => 0, "Free disk (MB)" => 0 ), "attlist" => array("0" => "Attribute values", "Resource" => 0, "Current value" => 0 ), "quelist" => array("0" => "Details for the queue", "" => 0, "Job name" => 0, "Owner" => 0, "Status" => 0, "CPU (min)" => 0, "Memory (KB)" => 0, "CPUs" => 0 ), "sestat" => array("0" => "Storage Elements", "#" => 0, "Alias" => 0, "Tot. space" => 0, "Free space" => 0, "Name" => 0, "Base URL" => 0, "Type" => 0 ), "allusers" => array("0" => "Authorised Grid Users:Active Grid Users", "#" => 0, "Name" => 0, "Affiliaton" => 0, "Jobs" => 0, "Sites" => 0 ), "ldapdump" => array("0" => "", "Attribute" => 0, "Value" => 0 ), // IS attributes "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Info valid from (GMT)", "Mds-validto" => "Info valid to (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Front-end domain name", "nordugrid-cluster-aliasname" => "Cluster alias", "nordugrid-cluster-contactstring" => "Contact string", "nordugrid-cluster-interactive-contactstring" => "Interactive contact", "nordugrid-cluster-comment" => "Comment", "nordugrid-cluster-support" => "E-mail contact", "nordugrid-cluster-lrms-type" => "LRMS type", "nordugrid-cluster-lrms-version" => "LRMS version", "nordugrid-cluster-lrms-config" => "LRMS details", "nordugrid-cluster-architecture" => "Architecture", "nordugrid-cluster-opsys" => "Operating system", "nordugrid-cluster-homogeneity" => "Homogeneous cluster", "nordugrid-cluster-nodecpu" => "CPU type (slowest)", "nordugrid-cluster-nodememory" => "Memory (MB, smallest)", "nordugrid-cluster-totalcpus" => "CPUs, total", "nordugrid-cluster-cpudistribution" => "CPU:machines", "nordugrid-cluster-sessiondir-free" => "Disk space, available (MB)", "nordugrid-cluster-sessiondir-total" => "Disk space, total (MB)", "nordugrid-cluster-cache-free" => "Cache size, available (MB)", "nordugrid-cluster-cache-total" => "Cache size, total (MB)", "nordugrid-cluster-runtimeenvironment" => "Runtime environment", "nordugrid-cluster-localse" => "Storage Element, local", "nordugrid-cluster-middleware" => "Grid middleware", "nordugrid-cluster-totaljobs" => "Jobs, total amount", "nordugrid-cluster-usedcpus" => "CPUs, occupied", "nordugrid-cluster-queuedjobs" => "Jobs, queued", "nordugrid-cluster-location" => "Postal code", "nordugrid-cluster-owner" => "Owner", "nordugrid-cluster-issuerca" => "Certificate issuer", "nordugrid-cluster-nodeaccess" => "Node IP connectivity", "nordugrid-cluster-gridarea" => "Session area (OBSOLETE)", "nordugrid-cluster-gridspace" => "Grid disk space (OBSOLETE)", "nordugrid-cluster-opsysdistribution" => "OS distribution (OBSOLETE)", "nordugrid-cluster-runningjobs" => "Jobs, running (OBSOLETE)", "nordugrid-queue-name" => "Queue name", "nordugrid-queue-status" => "Queue status", "nordugrid-queue-running" => "Jobs, running", "nordugrid-queue-queued" => "Jobs, queued", "nordugrid-queue-maxrunning" => "Jobs, running (max)", "nordugrid-queue-maxqueuable" => "Jobs, queueable (max)", "nordugrid-queue-maxuserrun" => "Jobs per Unix user (max)", "nordugrid-queue-maxcputime" => "CPU time, max. (minutes)", "nordugrid-queue-mincputime" => "CPU time, min. (minutes)", "nordugrid-queue-defaultcputime" => "CPU time, default (minutes)", "nordugrid-queue-schedulingpolicy" => "Scheduling policy", "nordugrid-queue-totalcpus" => "CPUs, total", "nordugrid-queue-nodecpu" => "CPU type", "nordugrid-queue-nodememory" => "Memory (MB)", "nordugrid-queue-architecture" => "Architecture", "nordugrid-queue-opsys" => "Operating system", "nordugrid-queue-gridrunning" => "Grid jobs, running", "nordugrid-queue-gridqueued" => "Grid jobs, queued", "nordugrid-queue-assignedcpunumber" => "CPUs per queue (OBSOLETE)", "nordugrid-queue-assignedcputype" => "CPU type (OBSOLETE)", "nordugrid-job-globalid" => "ID", "nordugrid-job-globalowner" => "Owner", "nordugrid-job-execcluster" => "Execution cluster", "nordugrid-job-execqueue" => "Execution queue", "nordugrid-job-stdout" => "Standard output file", "nordugrid-job-stderr" => "Standard error file", "nordugrid-job-stdin" => "Standard input file", "nordugrid-job-reqcput" => "Requested CPU time", "nordugrid-job-status" => "Status", "nordugrid-job-queuerank" => "Position in the queue", "nordugrid-job-lrmscomment" => "LRMS comment", "nordugrid-job-submissionui" => "Submission machine", "nordugrid-job-submissiontime" => "Submission time (GMT)", "nordugrid-job-usedcputime" => "Used CPU time", "nordugrid-job-usedwalltime" => "Used wall time", "nordugrid-job-sessiondirerasetime" => "Erase time (GMT)", "nordugrid-job-proxyexpirationtime" => "Proxy expiration time (GMT)", "nordugrid-job-usedmem" => "Used memory (KB)", "nordugrid-job-errors" => "Errors", "nordugrid-job-jobname" => "Name", "nordugrid-job-runtimeenvironment" => "Runtime environment", "nordugrid-job-cpucount" => "Requested CPUs", "nordugrid-job-executionnodes" => "Execution nodes", "nordugrid-job-gmlog" => "GM log file", "nordugrid-job-gridlog" => "Gridlog file (OBSOLETE)", "nordugrid-job-clientsoftware" => "Client version", "nordugrid-authuser-name" => "Name", "nordugrid-authuser-sn" => "Subject Name", "nordugrid-authuser-freecpus" => "Free CPUs", "nordugrid-authuser-diskspace" => "Free disk space (MB)", "nordugrid-authuser-queuelength" => "Experienced queue length", "nordugrid-se-name" => "Domain name", "nordugrid-se-aliasname" => "Storage element alias", "nordugrid-se-type" => "Storage element type", "nordugrid-se-freespace" => "Free space (GB)", "nordugrid-se-totalspace" => "Total space (GB)", "nordugrid-se-baseurl" => "Contact URL", "nordugrid-se-authuser" => "Authorised user (DN)", "nordugrid-se-location" => "Postal code", "nordugrid-se-owner" => "Owner", "nordugrid-se-issuerca" => "Certificate issuer", "nordugrid-se-comment" => "Comment", "nordugrid-rc-name" => "Domain name", "nordugrid-rc-aliasname" => "Replica Catalog alias", "nordugrid-rc-baseurl" => "Contact URL", "nordugrid-rc-authuser" => "Authorised user (DN)", "nordugrid-rc-location" => "Postal code", "nordugrid-rc-owner" => "Owner", "nordugrid-rc-issuerca" => "Certificate issuer" ), // Errors, warnings etc "errors" => array( "1" => "Can not read top-level resource indices", "2" => "None of the local indices returned connection", "3" => " bad configuration or request timed out", "4" => "No Grid jobs found", "5" => "No information found", "6" => "Server unavailable", "7" => " - refresh later", "101" => " Monitor timeouts for GRIS: ", "102" => " sec on connection and ", "103" => " sec on search", "104" => " sec spent searching", "105" => "Showing resources only in ", "106" => "Polled top-level indices: ", "107" => "Got geographical locations, scanned sites: ", "108" => " sites arranged by geographical location", "109" => "Search for cluster attributes", "110" => "Search for queue attributes", "111" => "No data from ", "112" => " is up in ", "113" => " has no resources to offer", "114" => " Monitor timeouts for GIIS: ", "115" => "Skipping GRIS: ", "116" => "not a ", "117" => "Checking connection: ", "118" => "OK", "119" => "That far, detected resources of kind ", "120" => "LDAP error searching ", "121" => " status at ", "122" => "Blacklisted: ", "123" => "Registrant found for ", "124" => "Search for SE attributes", "125" => "Search for users", "126" => "Search for jobs", "127" => " has job ", "128" => " while not being authorized", "129" => "Can not get object data: error ", "130" => " Monitor timeouts for EMIR: ", "301" => "Refresh", "302" => "Print", "303" => "Help", "304" => "Close", "305" => "Red", "306" => "Grey", "307" => "All users", "308" => "Active users", "309" => "Search", "310" => "Storage", "311" => "VOs", "312" => "Flag of ", "313" => " Grid processes and ", "314" => " local processes", "401" => "Processes", "402" => "Grid", "403" => "Local", "404" => "World", "405" => "TOTAL", "406" => " sites", "407" => "a lot of", "408" => " GB", "409" => " ALL" ), // Post code conversion: only for [en]! "tlconvert" => array ( "AU" => "Australia", "CA" => "Canada", "CH" => "Switzerland", "DK" => "Denmark", "EE" => "Estonia", "FI" => "Finland", "FIN" => "Finland", "SF" => "Finland", "DE" => "Germany", "JP" => "Japan", "NO" => "Norway", "N" => "Norway", "SE" => "Sweden", "SK" => "Slovakia", "SI" => "Slovenia", "KEK" => "Japan", "TOKYO" => "Japan" ) ), "ru" => array ( // Table headers "loadmon" => array("0" => "Грид-монитор", "Страна" => 0, "РеÑурÑ" => 0, "ЦП" => 0, "Загрузка" => 0, "Ожидают" => 0 ), "clusdes" => array("0" => "ОпиÑание клаÑтера", "Очередь" => 0, "СоÑтоÑние" => 0, "ДлительноÑть (мин)" => 0, "ЦП" => 0, "СчитаютÑÑ" => 0, "Ожидают" => 0 ), "jobstat" => array("0" => "Задачи на:Ðомер задачи", "Ð˜Ð¼Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" => 0, "ХозÑин" => 0, "СоÑтоÑние" => 0, "Ð’Ñ€ÐµÐ¼Ñ (мин)" => 0, "Очередь" => 0, "ЦП" => 0 ), "volist" => array("0" => "Виртуальные организации", "Ð’Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð°Ñ Ð¾Ñ€Ð³Ð°Ð¸Ð·Ð°Ñ†Ð¸Ñ" => 0, "Члены" => 0, "ОбÑлуживаетÑÑ" => 0 ), "vousers" => array("0" => "Пользователи", "#" => 0, "ИмÑ" => 0, "МеÑто работы" => 0, "Ð­Ð»ÐµÐºÑ‚Ñ€Ð¾Ð½Ð½Ð°Ñ Ð¿Ð¾Ñ‡Ñ‚Ð°" => 0 ), "userlist" => array("0" => "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð´Ð»Ñ", "" => 0, "Ð˜Ð¼Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" => 0, "СоÑтоÑние" => 0, "Ð’Ñ€ÐµÐ¼Ñ (мин)" => 0, "РеÑурÑ" => 0, "Очередь" => 0, "ЦП" => 0 ), "userres" => array("0" => "", "РеÑурÑ:очередь" => 0, "Свободные ЦП" => 0, "Длина очереди" => 0, "ДиÑк, доÑтупно (Мб)" => 0 ), "attlist" => array("0" => "Ð—Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð°Ñ‚Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð¾Ð²", "РеÑурÑ" => 0, "Значение" => 0 ), "quelist" => array("0" => "ОпиÑание очереди", "" => 0, "Ð˜Ð¼Ñ Ð·Ð°Ð´Ð°Ñ‡Ð¸" => 0, "ХозÑин" => 0, "СоÑтоÑние" => 0, "Ð’Ñ€ÐµÐ¼Ñ (мин)" => 0, "ОЗУ (КБ)" => 0, "ЦП" => 0 ), "sestat" => array("0" => "Внешние запоминающие уÑтройÑтва", "#" => 0, "Ðазвание" => 0, "ВеÑÑŒ объём" => 0, "Свободно" => 0, "ИмÑ" => 0, "URL базы" => 0, "Тип" => 0 ), "allusers" => array("0" => "Допущенные пользователи:Ðктивные пользователи", "#" => 0, "ИмÑ" => 0, "МеÑто работы" => 0, "Задачи" => 0, "РеÑурÑÑ‹" => 0 ), "ldapdump" => array("0" => "", "Ðттрибут" => 0, "Значение" => 0 ), "mdsattr" => array( "objectClass" => "objectClass", "Mds-validfrom" => "Данные дейÑтвительны Ñ (GMT)", "Mds-validto" => "Данные дейÑтвительны по (GMT)" ), "isattr" => array( "nordugrid-cluster-name" => "Ð˜Ð¼Ñ Ð³Ð¾Ð»Ð¾Ð²Ð½Ð¾Ð¹ машины", "nordugrid-cluster-aliasname" => "Ðазвание", "nordugrid-cluster-contactstring" => "Контактный адреÑ", "nordugrid-cluster-interactive-contactstring" => "Интерактивный адреÑ", "nordugrid-cluster-comment" => "Комментарий", "nordugrid-cluster-support" => "Е-почта ответÑтвенного", "nordugrid-cluster-lrms-type" => "СУПО, тип", "nordugrid-cluster-lrms-version" => "СУПО, верÑиÑ", "nordugrid-cluster-lrms-config" => "СУПО, подробноÑти", "nordugrid-cluster-architecture" => "Ðрхитектура", "nordugrid-cluster-opsys" => "ÐžÐ¿ÐµÑ€Ð°Ñ†Ð¸Ð¾Ð½Ð½Ð°Ñ ÑиÑтема", "nordugrid-cluster-homogeneity" => "ГомогенноÑть реÑурÑа", "nordugrid-cluster-nodecpu" => "ПроцеÑÑор, тип (худший)", "nordugrid-cluster-nodememory" => "ОЗУ (Мб, наименьшее)", "nordugrid-cluster-totalcpus" => "ПроцеÑÑоры, вÑего", "nordugrid-cluster-cpudistribution" => "ПроцеÑÑоры:узлы", "nordugrid-cluster-sessiondir-free" => "ДиÑк, доÑтупно (Мб)", "nordugrid-cluster-sessiondir-total" => "ДиÑк, веÑÑŒ объём (Мб)", "nordugrid-cluster-cache-free" => "ДиÑковый кÑш, Ñвободно (Мб)", "nordugrid-cluster-cache-total" => "ДиÑковый кÑш, вÑего (Мб)", "nordugrid-cluster-runtimeenvironment" => "Ð Ð°Ð±Ð¾Ñ‡Ð°Ñ Ñреда", "nordugrid-cluster-localse" => "ВЗУ, локальное", "nordugrid-cluster-middleware" => "Грид-ПО", "nordugrid-cluster-totaljobs" => "Задачи, вÑего", "nordugrid-cluster-usedcpus" => "ПроцеÑÑоры, занÑтые", "nordugrid-cluster-queuedjobs" => "Задачи, в очереди", "nordugrid-cluster-location" => "Почтовый индекÑ", "nordugrid-cluster-owner" => "Владелец", "nordugrid-cluster-issuerca" => "Сертификат выдан", "nordugrid-cluster-nodeaccess" => "IP-Ñоединение узлов", "nordugrid-cluster-gridarea" => "ÐÐ´Ñ€ÐµÑ ÑеÑÑий (СТÐРЫЙ)", "nordugrid-cluster-gridspace" => "Грид-диÑк (СТÐРЫЙ)", "nordugrid-cluster-opsysdistribution" => "ДиÑтрибутив ОС (СТÐРЫЙ)", "nordugrid-cluster-runningjobs" => "Задачи, в Ñчёте (СТÐРЫЙ)", "nordugrid-queue-name" => "Ð˜Ð¼Ñ Ð¾Ñ‡ÐµÑ€ÐµÐ´Ð¸", "nordugrid-queue-status" => "СоÑтоÑние очереди", "nordugrid-queue-running" => "Задачи, в Ñчёте", "nordugrid-queue-queued" => "Задачи, в очереди", "nordugrid-queue-maxrunning" => "Задачи, в Ñчёте (предел)", "nordugrid-queue-maxqueuable" => "Задачи, в очереди (предел)", "nordugrid-queue-maxuserrun" => "Задачи на Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ (предел)", "nordugrid-queue-maxcputime" => "ДлительноÑть, наиб. (мин)", "nordugrid-queue-mincputime" => "ДлительноÑть, наим. (мин)", "nordugrid-queue-defaultcputime" => "ДлительноÑть, по ум. (мин)", "nordugrid-queue-schedulingpolicy" => "Правила планировки", "nordugrid-queue-totalcpus" => "ПроцеÑÑоры, вÑего", "nordugrid-queue-nodecpu" => "ПроцеÑÑор, тип", "nordugrid-queue-nodememory" => "ОЗУ (Мб)", "nordugrid-queue-architecture" => "Ðрхитектура", "nordugrid-queue-opsys" => "ÐžÐ¿ÐµÑ€Ð°Ñ†Ð¸Ð¾Ð½Ð½Ð°Ñ ÑиÑтема", "nordugrid-queue-gridrunning" => "Грид-задачи, в Ñчёте", "nordugrid-queue-gridqueued" => "Грид-задачи, в очереди", "nordugrid-queue-assignedcpunumber" => "ПроцеÑÑоры (СТÐРЫЙ)", "nordugrid-queue-assignedcputype" => "Тип процеÑÑора (СТÐРЫЙ)", "nordugrid-job-globalid" => "Ðомер", "nordugrid-job-globalowner" => "ХозÑин", "nordugrid-job-execcluster" => "ВыполнÑющий клаÑтер", "nordugrid-job-execqueue" => "ВыполнÑÑŽÑ‰Ð°Ñ Ð¾Ñ‡ÐµÑ€ÐµÐ´ÑŒ", "nordugrid-job-stdout" => "Стандартный выход", "nordugrid-job-stderr" => "Ð¡Ñ‚Ð°Ð½Ð´Ð°Ñ€Ñ‚Ð½Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°", "nordugrid-job-stdin" => "Стандартный вход", "nordugrid-job-reqcput" => "Запрошенное времÑ", "nordugrid-job-status" => "СоÑтоÑние", "nordugrid-job-queuerank" => "Положение в очереди", "nordugrid-job-lrmscomment" => "Комментарий СУПО", "nordugrid-job-submissionui" => "ЗаÑылающий клиент", "nordugrid-job-submissiontime" => "Ð’Ñ€ÐµÐ¼Ñ Ð·Ð°Ñылки (GMT)", "nordugrid-job-usedcputime" => "ИÑпользованное Ð²Ñ€ÐµÐ¼Ñ Ð¦ÐŸ", "nordugrid-job-usedwalltime" => "ИÑпользованное времÑ", "nordugrid-job-sessiondirerasetime" => "Срок ÑƒÐ½Ð¸Ñ‡Ñ‚Ð¾Ð¶ÐµÐ½Ð¸Ñ (GMT)", "nordugrid-job-proxyexpirationtime" => "Окончание доверенноÑти (GMT)", "nordugrid-job-usedmem" => "ИÑпользование ОЗУ (Кб)", "nordugrid-job-errors" => "Ошибки", "nordugrid-job-jobname" => "ИмÑ", "nordugrid-job-runtimeenvironment" => "Ð Ð°Ð±Ð¾Ñ‡Ð°Ñ Ñреда", "nordugrid-job-cpucount" => "Запрошено процеÑÑоров", "nordugrid-job-executionnodes" => "ВыполнÑющие узлы", "nordugrid-job-gmlog" => "Ð–ÑƒÑ€Ð½Ð°Ð»ÑŒÐ½Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ ГМ", "nordugrid-job-gridlog" => "Грид-запиÑÑŒ (СТÐРЫЙ)", "nordugrid-job-clientsoftware" => "ВерÑÐ¸Ñ ÐºÐ»Ð¸ÐµÐ½Ñ‚Ð°", "nordugrid-authuser-name" => "ИмÑ", "nordugrid-authuser-sn" => "Субъект", "nordugrid-authuser-freecpus" => "Свободные ЦП", "nordugrid-authuser-diskspace" => "ДиÑк, доÑтупно (Мб)", "nordugrid-authuser-queuelength" => "Длина очереди", "nordugrid-se-name" => "Доменное имÑ", "nordugrid-se-aliasname" => "Ðазвание", "nordugrid-se-type" => "Тип", "nordugrid-se-freespace" => "Свободный объём (Гб)", "nordugrid-se-totalspace" => "ВеÑÑŒ объём (Гб)", "nordugrid-se-baseurl" => "Контактный адреÑ", "nordugrid-se-authuser" => "Допущенные ползьзователи (DN)", "nordugrid-se-location" => "Почтовый индекÑ", "nordugrid-se-owner" => "Владелец", "nordugrid-se-issuerca" => "Сертификат выдан", "nordugrid-se-comment" => "Комментарий", "nordugrid-rc-name" => "Доменное имÑ", "nordugrid-rc-aliasname" => "Ðазвание", "nordugrid-rc-baseurl" => "Контактный адреÑ", "nordugrid-rc-authuser" => "Допущенные пользователи (DN)", "nordugrid-rc-location" => "Почтовый индекÑ", "nordugrid-rc-owner" => "Владелец", "nordugrid-rc-issuerca" => "Сертификат выдан" ), "errors" => array( "1" => "Ðевозможно прочеÑть ÑпиÑки выÑшего уровнÑ", "2" => "Ðи один из меÑтных ÑпиÑков не отзываетÑÑ", "3" => " Ð½ÐµÐ²ÐµÑ€Ð½Ð°Ñ ÐºÐ¾Ð½Ñ„Ð¸Ð³ÑƒÑ€Ð°Ñ†Ð¸Ñ Ð¸Ð»Ð¸ иÑтекло Ð²Ñ€ÐµÐ¼Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа", "4" => "Ðе обнаружено Грид-задач", "5" => "Ðет информации", "6" => "Служба недоÑтупна", "7" => " - попробуйте обновить поззже", "101" => " Ð’Ñ€ÐµÐ¼Ñ Ð½Ð° ÑвÑзь Ñ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ñ‹Ð¼ ÑпиÑком: ", "102" => " Ñ Ð½Ð° Ñоединение и ", "103" => " Ñ Ð½Ð° поиÑк", "104" => " Ñ Ð·Ð°Ñ‚Ñ€Ð°Ñ‡ÐµÐ½Ð¾ на поиÑк", "105" => "ПеречиÑление реÑурÑов: ", "106" => "Опрошено ÑпиÑков верхнего уровнÑ: ", "107" => "Получены географичеÑкие координаты, проÑканировано реÑурÑов: ", "108" => " реÑурÑов упорÑдочено по геополитичеÑкому признаку", "109" => "ПоиÑк аттрибутов клаÑтера", "110" => "ПоиÑк аттрибутов очереди", "111" => "Ðет данных Ñ ", "112" => " фукционирует в Ñтране: ", "113" => " не раÑполагает реÑурÑами", "114" => " Ð’Ñ€ÐµÐ¼Ñ Ð½Ð° ÑвÑзь Ñ Ð³Ð»Ð¾Ð±Ð°Ð»ÑŒÐ½Ñ‹Ð¼ ÑпиÑком: ", "115" => "ИгнорируетÑÑ Ñ€ÐµÑурÑ: ", "116" => "не ÑоответÑтвует типу ", "117" => "Проверка ÑвÑзи: ", "118" => "еÑть", "119" => "Ðа данный момент обнаружено реÑурÑов типа ", "120" => "Ошибка LDAP при поиÑке на ", "121" => "-ÑоÑтоÑние на ", "122" => "Заблокирован: ", "123" => "Обнаружен региÑтрант ", "124" => "ПоиÑк аттрибутов ВЗУ", "125" => "ПоиÑк пользователей", "126" => "ПоиÑк задач", "127" => " запуÑтил(а) задачу ", "128" => " не будучи допущенным(ой)", "301" => "Перезагрузить", "302" => "Печать", "303" => "Помощь", "304" => "Закрыть", "305" => "КраÑный", "306" => "Серый", "307" => "Ð’Ñе пользователи", "308" => "Ðктивные пользователи", "309" => "ПоиÑк", "310" => "ВЗУ", "311" => "Виртуальные организации", "312" => "Флаг Ñтраны: ", "313" => " Грид-процеÑÑов и ", "314" => " меÑтных процеÑÑов", "401" => "ПроцеÑÑÑ‹", "402" => "Грид", "403" => "меÑтные", "404" => "Мир", "405" => "ВСЕГО", "406" => " реÑурÑ(а)(ов)", "407" => "куча", "408" => " Гб", "409" => " ВСЕ" ), // Country name conversion, no postcode! "tlconvert" => array ( "Australia" => "ÐвÑтралиÑ", "Canada" => "Канада", "Switzerland" => "ШвейцариÑ", "Denmark" => "ДаниÑ", "Estonia" => "ЭÑтониÑ", "Finland" => "ФинлÑндиÑ", "Germany" => "ГерманиÑ", "Japan" => "ЯпониÑ", "Norway" => "ÐорвегиÑ", "Sweden" => "ШвециÑ", "Slovakia" => "СловакиÑ", "Slovenia" => "СловениÑ", "World" => "Мир" ) ) ); ?> nordugrid-arc-7.1.1/src/services/monitor/includes/PaxHeaders/Makefile.in0000644000000000000000000000013115067751357023331 xustar0029 mtime=1759498991.14125614 30 atime=1759499019.435277256 30 ctime=1759499030.833389704 nordugrid-arc-7.1.1/src/services/monitor/includes/Makefile.in0000644000175000002070000005106515067751357025243 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/monitor/includes ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(monitorincdir)" DATA = $(monitorinc_DATA) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ monitorincdir = @monitor_prefix@/includes monitorinc_DATA = $(srcdir)/*.inc EXTRA_DIST = $(monitorinc_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/monitor/includes/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/monitor/includes/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-monitorincDATA: $(monitorinc_DATA) @$(NORMAL_INSTALL) @list='$(monitorinc_DATA)'; test -n "$(monitorincdir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(monitorincdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(monitorincdir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(monitorincdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(monitorincdir)" || exit $$?; \ done uninstall-monitorincDATA: @$(NORMAL_UNINSTALL) @list='$(monitorinc_DATA)'; test -n "$(monitorincdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(monitorincdir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(monitorincdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-monitorincDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-monitorincDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-monitorincDATA install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags-am uninstall uninstall-am \ uninstall-monitorincDATA .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/monitor/includes/PaxHeaders/attlist.inc0000644000000000000000000000013215067751327023441 xustar0030 mtime=1759498967.775903469 30 atime=1759498967.874493787 30 ctime=1759499030.835864693 nordugrid-arc-7.1.1/src/services/monitor/includes/attlist.inc0000644000175000002070000002204415067751327025345 0ustar00mockbuildmock00000000000000tabletop("",TOPTIT); $tlim = 20; $tout = 20; $engatts = array (); $lim = array ( "dn" ); // deduce the top object name assuming all requests are for the same object // TODO: this works only for NG, there is no dashes in GLUE2. Object name should not be guessed, as in some cases in GLUE2 there is no trace of the object // Change in clusdes.php or other callers // Keeping this code for backward compatibility only for NG if ( $schema == "NG") { $components = explode("-",$attributes[0]); $object = $components[0]."-".$components[1]; } // start building the filter $filter = "(&(objectclass=$object)"; $n = count($attributes); $natt = 0; $attrtag = array (); for ( $i=0; $i<$n; $i++ ){ $attribute = $attributes[$i]; if ( !$attribute ) continue; $selection = ( $filters[$i] ) ? $filters[$i] : "*"; $is = ( $signs[$i] ) ? $signs[$i] : "="; $lim[] = $attribute; if ( !in_array($attribute,$attrtag)) $engatts[] = ( $isattr[$attribute] ) ? $isattr[$attribute] : $attribute; $attrtag[] = $attribute; // create the filter switch ( $is ) { case "!=": $filter .= "(!($attribute=$selection))"; break; case "~": $selection = "*".$selection."*"; $filter .= "($attribute=$selection)"; break; case "!~": $selection = "*".$selection."*"; $filter .= "(!($attribute=$selection))"; break; default: $filter .= "($attribute$is$selection)"; } $natt++; } $filter .= ")"; if ( ! empty($giislist) ) { // TODO?: thething was used as a shorname for objects. Must be fixed somewhat, but egiis is dead... $gentries = recursive_giis_info($giislist,$object,$errors,0,1); $nc = count($gentries); } // TODO: fix for GLUE2 schema? It works with NG. if ( ! empty($archery_list) ) { $gentries = array_merge($gentries, archery_info($archery_list, "NG", $errors, 0)); $nc = count($gentries); } if ( !$nc ) { $errno = 1; echo "
    ".$errors[$errno]."\n"; return $errno; } $dsarray = array (); $hnarray = array (); $pnarray = array (); $sitetag = array (); /* a tag to skip duplicated entries */ for ( $k = 0; $k < $nc; $k++ ) { $clhost = $gentries[$k]["host"]; $clport = $gentries[$k]["port"]; $ldapuri = "ldap://".$clhost.":".$clport; $clconn = ldap_connect($ldapuri); @ldap_set_option($clconn, LDAP_OPT_NETWORK_TIMEOUT, $tout); if ( $clconn && !$sitetag[$clhost] ) { array_push($dsarray,$clconn); array_push($pnarray,$clport); $sitetag[$clhost] = 1; /* filtering tag */ } } $nhosts = count($dsarray); if ( !$nhosts ) { // NO SITES REPLY... $errno = "2"; echo "
    ".$errors[$errno]."\n"; return $errno; } // Search all clusters if ($schema == "GLUE2" ) { $adn = DN_GLUE; } else { $adn = DN_LOCAL; } $srarray = @ldap_search($dsarray,$adn,$filter,$lim,0,0,$tlim,LDAP_DEREF_NEVER); echo "\n"; // HTML table initialisation array_unshift($engatts,$errors["425"]); $jtable = new LmTableFree($engatts); $rowcont = array(); $tabcont = array(); $rc = 0; for ( $ids = 0; $ids < $nhosts; $ids++ ) { $sr = $srarray[$ids]; $dst = $dsarray[$ids]; $pn = $pnarray[$ids]; if ($dst && $sr) { // If search returned, check that there are valid entries $nmatch = @ldap_count_entries($dst,$sr); if ($nmatch > 0) { // If there are valid entries, tabulate results $allentries = ldap_get_entries($dst,$sr); $entries = ldap_purge($allentries); if ( $object == OBJ_AJOB ) { define("CMPKEY",JOB_STAT); //usort($entries,"ldap_entry_comp"); } $nclus = $entries["count"]; for ($i=0; $i<$nclus; $i++) { $cluster = "N/A"; $queue = "N/A"; $job = "N/A"; $currdn = $entries[$i]["dn"]; $currdn = preg_replace("/\"/","",$currdn); $dnparts = ldap_explode_dn($currdn,0); foreach ($dnparts as $part) { $pair = explode("=",$part); switch ( $pair[0] ) { case CLU_NAME: $cluster = $pair[1]; break; case SEL_NAME: $se = $pair[1]; break; case QUE_NAME: $queue = $pair[1]; break; case JOB_GLID: $job = $pair[1]; $encjob = rawurlencode($currdn); break; case "GLUE2ServiceID": // for any other GLUE2 object take cluster hostname(urn:ogf:object:hostname) // TODO: this does not work. One has to pinpoint which DN wants to see per object. It probably means a case per object as above. if ($schema == "GLUE2" ) { $colfields = explode(":",$pair[1]); $cluster = $colfields[3]; } } } $sort = "cluster"; // 410: cluster; 411: queue; 412: job; 413: user; 414: SE switch ( strtolower($object) ) { case OBJ_CLUS: $resource = $errors["410"]." $cluster"; $winstring = popup("clusdes.php?host=$cluster&port=$pn",700,620,1); break; case OBJ_QUEU: $resource = $errors["410"]." $cluster, ".$errors["411"]." $queue"; $winstring = popup("quelist.php?host=$cluster&port=$pn&qname=$queue",750,430,6); break; case OBJ_USER: $resource = $errors["410"]." $cluster, ".$errors["411"]." $queue"; $winstring = popup("quelist.php?host=$cluster&port=$pn&qname=$queue",750,430,6); break; case OBJ_AJOB: $resource = $errors["412"]." $job"; $winstring = popup("jobstat.php?host=$cluster&port=$pn&status=&jobdn=$encjob",750,430,4); break; case OBJ_STEL: $resource = $errors["414"]." $se"; $winstring = ""; break; default: // for any other glue object take cluster for now (should be set by code in the previous switch). if ($schema == "GLUE2" ) { $resource = $errors["410"]." $cluster"; $winstring = popup("clusdes.php?host=$cluster&port=$pn&schema=GLUE2",700,620,1); } } $rc++; $rowcont[0] = ( $winstring ) ? "$rc $resource" : "$rc $resource"; // determine maximum row count per object $vcount = 0; foreach ( $attributes as $attribute ) { // lowercase due to LDAP lib $attributelc = strtolower($attribute); if ( !$attributelc ) continue; $ccount = $entries[$i][$attributelc]["count"]; $vcount = ( $ccount > $vcount ) ? $ccount : $vcount; } if ($vcount == 0) $jtable->adderror($resource); $attrtag = array(); for ( $j = 0; $j < $vcount; $j++ ) { $attval = ""; $attcheck = FALSE; for ( $k = 0; $k < $n ; $k++ ) { $attribute = $attributes[$k]; // lowercase due to LDAP lib $attributelc = strtolower($attribute); if ( !$attributelc || @in_array($attributelc,$attrtag[$j]) ) continue; if ( $entries[$i][$attributelc][$j] ) { $attval = $entries[$i][$attributelc][$j]; $attcheck = TRUE; } else { $attval = " "; } // Some time-stamp readability adjustment if (substr(strrchr($attribute, "-"), 1) == "sessiondirerasetime" || substr(strrchr($attribute, "-"), 1) == "submissiontime" || substr($attribute,0,9) == "Mds-valid" ) $attval=cnvtime($attval); $rowcont[] = htmlentities($attval); $attrtag[$j][] = $attribute; } if ( $attcheck ) { $tabcont[] = $rowcont; } else { $rc--; } // if ( $attcheck ) $jtable->addrow($rowcont); $rowcont = array(); $rowcont[0] = " "; } } } } @ldap_free_result($sr); } foreach ( $tabcont as $row ) $jtable->addrow($row,""); $jtable->close(); return 0; } ?> nordugrid-arc-7.1.1/src/services/monitor/includes/PaxHeaders/ldap_nice_dump.inc0000644000000000000000000000013215067751327024720 xustar0030 mtime=1759498967.775903469 30 atime=1759498967.874493787 30 ctime=1759499030.846330406 nordugrid-arc-7.1.1/src/services/monitor/includes/ldap_nice_dump.inc0000644000175000002070000001312715067751327026626 0ustar00mockbuildmock00000000000000"; // Plain LDAP dump for the DN $filstr = "(objectclass=*)"; if ( strpos($dn, DN_GLUE) ) { $schema = "GLUE2"; $filstr = "(|(objectClass=".GOBJ_CLUS.")(objectClass=".GOBJ_MAN.")(objectClass=".GOBJ_LOC.")(objectClass=".GOBJ_QUEU.")(objectClass=".GOBJ_CON.") (objectClass=".GOBJ_ADMD.")(objectClass=".GOBJ_EENV.")(objectClass=".GOBJ_BENC.")(objectClass=".GOBJ_AENV."))"; if ( strpos(strtolower(" ".$dn), GJOB_GLID) ) { $filstr = "(|(objectClass=".GOBJ_AJOB."))"; } } else { $schema = "NG"; } $sr = ldap_search($ds,$dn,$filstr,array("*"),0,0,$tlim,LDAP_DEREF_NEVER); if ($sr) { // If search returned, check that there are valid entries $nmatch = ldap_count_entries($ds,$sr); if ($nmatch > 0) { // If there are valid entries, tabulate results $first = ldap_first_entry($ds,$sr); if ( !strpos($dn, DN_GLUE) ) $nmatch = 1; for ( $j=0; $j<$nmatch; $j++){ $entries = ldap_get_attributes($ds,$first); $nfields = $entries["count"]; //Attempt to order the contents for GLUE2. Maybe needs a custom sort, but didn't seem to work so far. if ( strpos($dn, DN_GLUE) ) array_multisort($entries,SORT_DESC,SORT_NATURAL); // get the Distinguished Name $thisdn = ldap_get_dn($ds,$first); // get current objectclass - take either a nordugrid or a glue2 $objclasses = @($entries["objectClass"]); $nclasses = count($objclasses); $thisobj = ""; $c = 0; while ( ($thisobj == "") && ($c < $nclasses) ) { if ( strpos($objclasses[$c],"nordugrid") !== false ) $thisobj = $objclasses[$c]; if ( strpos($objclasses[$c],"GLUE2") !== false ) $thisobj = $objclasses[$c]; $c++; } // TODO: add error code and string if ($thisobj == "") dbgmsg("

    Error: Cannot find objectclass

    "); // HTML table initialisation $dtable = new LmTableSp("ldapdump",$strings["ldapdump"]); // add the DN entry $drowcont = array("".$errors["420"]."",$thisdn); $dtable->addrow($drowcont, "#cccccc"); $drowcont = array(); // loop on the rest of attributes for ($i=0; $i<$nfields; $i++) { $curatt = $entries[$i]; if ( $exclude && in_array($curatt,$exclude) ) continue; $engatt = ($isattr[$curatt]) ? $isattr[$curatt] : $curatt; $nval = $entries[$curatt]["count"]; $encatt = rawurlencode($curatt); // OBS the GLUE2 strings are uppercase, must be lowercased to process with LDAP lib in attlist $attwin = popup("attlist.php?object=".$thisobj."&attribute=".$encatt."&schema=".$schema,650,300,7); $attstring = @( $mdsattr[$curatt] ) ? "$engatt" : "$engatt"; $drowcont[0] = $attstring; $drowcont[1] = " "; if ($nval==0) $dtable->addrow($drowcont); $drowcont[1] = ""; if ( $nval > 4 ) $drowcont[1] = $fhead; for ($k=0; $k<$nval; $k++) { $curval = $entries[$curatt][$k]; // Strip HTML tags some smart folks are adding $curval = strip_tags($curval); // Some time-stamp readability adjustment if ( strlen($curval) == 15 && $curval[14] == "Z" ) $curval=cnvtime($curval); $encval = htmlspecialchars($curval,ENT_QUOTES,"UTF-8"); // E-mail masquerading for short lists (dunno what to do with long lists) if (strpos($curval,"@",1) && $nval<5) { $m = mylo ($curval); if ( !empty($m[0]) ) $encval = ""; } if ( $nval > 4 ) { $drowcont[1] .= "$encval"; if ( $k < $nval-1 ) $drowcont[1] .= "\n"; } else { $drowcont[1] .= $encval; if ( $k < $nval-1 ) $drowcont[1] .= "
     "; } } if ( $nval > 4 ) $drowcont[1] .= $ftail; $dtable->addrow($drowcont); } $dtable->close(); echo "
    "; $first = ldap_next_entry($ds,$first); } ldap_free_result($sr); return $thisdn; } else { $errno = 9; echo "
    ".$errors[$errno]."\n"; return $errno; } } else { $errno = 5; echo "
    ".$errors[$errno]."\n"; return $errno; } } ?> nordugrid-arc-7.1.1/src/services/monitor/includes/PaxHeaders/cnvtime.inc0000644000000000000000000000013215067751327023422 xustar0030 mtime=1759498967.775903469 30 atime=1759498967.874493787 30 ctime=1759499030.841691555 nordugrid-arc-7.1.1/src/services/monitor/includes/cnvtime.inc0000644000175000002070000000055215067751327025326 0ustar00mockbuildmock00000000000000 nordugrid-arc-7.1.1/src/services/monitor/includes/PaxHeaders/archery.inc0000644000000000000000000000013215067751327023412 xustar0030 mtime=1759498967.775903469 30 atime=1759498967.874493787 30 ctime=1759499030.834622479 nordugrid-arc-7.1.1/src/services/monitor/includes/archery.inc0000644000175000002070000001267215067751327025324 0ustar00mockbuildmock00000000000000" .$error_str."
  • \n"); } /** * @param $hostname * @return int */ function check_blacklist($hostname) { global $blacklist; if ( ! isset($blacklist) ) { return 0; } if ( in_array($hostname, $blacklist) ) { return $blacklist[$hostname]; } return 0; } /** * @param $endpoint * @param $errors * @param $debug * @return array */ function query_dns_archery($endpoint, $errors, $debug) { $archery_endpoints = array(); // get dns record hostname according to ARCHERY entree point convention if (substr($endpoint, 0, 6) == 'dns://') { $dns_endpoint = substr($endpoint, 6); } else { $dns_endpoint = '_archery.' . $endpoint; } // perform query $dnsquery = dns_get_record($dns_endpoint, DNS_TXT); if ( $dnsquery === FALSE ) { if ( $debug ) dbgerr_html($errors["132"].$endpoint); return $archery_endpoints; } // parse query foreach ( $dnsquery as $dnsrr ) { if (isset($dnsrr['entries'])) { foreach ($dnsrr['entries'] as $dnsrr_value) { $erecord_arr = array(); $akv = explode(' ', $dnsrr_value); foreach ($akv as $kv) { $ae = explode('=', $kv, 2); if ( count($ae) == 2 ) { $erecord_arr[$ae[0]] = $ae[1]; } } $archery_endpoints[] = $erecord_arr; } } else { if ( $debug ) dbgerr_html($errors["132"].$endpoint); } } return $archery_endpoints; } /** * @param $endpoint * @param $schema * @param $errors * @param int $debug * @param int $looplimit * @return array */ function recursive_archery_info ($endpoint, $schema, $errors, $debug=0, $looplimit=5) { $endpoints = array(); // Just in case recursion limit if ( $looplimit == 0 ) { dbgerr_html($errors["133"].$endpoint); return $endpoints; } // Query archery for endpoints info $archery_endpoints = query_dns_archery($endpoint, $errors, $debug); foreach ($archery_endpoints as $ainfo) { if ( !empty($ainfo['t']) ) { if ($ainfo['t'] == 'org.nordugrid.archery' OR $ainfo['t'] == 'archery' OR $ainfo['t'] == 'archery.service' OR $ainfo['t'] == 'archery.group' ) { if (isset($ainfo['s'])) { if ($ainfo['s'] != "1") { continue; } } $more_endpoints = recursive_archery_info ($ainfo['u'], $schema, $errors, $debug=0, $looplimit-1); $endpoints = array_merge($endpoints, $more_endpoints); } elseif ($ainfo['t'] == 'org.nordugrid.ldapegiis') { //TODO: invoke egiis query continue; } elseif ($ainfo['t'] == 'org.nordugrid.ldapng') { if ( $schema !== 'NG' ) continue; // ldap://:2135/Mds-Vo-Name=local,o=grid $parsed_url = array(); if ( !empty($ainfo['u']) && preg_match('/^ldap:\/\/(?P[^:]+):(?[0-9]+)\/(?P.*)/', $ainfo['u'], $parsed_url) ) { if ( check_blacklist($parsed_url['host'])) { if ( $debug ) dbgerr_html($errors["122"].$parsed_url['host']); continue; } $endpoints[] = array ( 'host' => $parsed_url['host'], 'port' => $parsed_url['port'], 'base' => "nordugrid-cluster-name=".$parsed_url['host'].",".$parsed_url['base'] ); } } elseif ($ainfo['t'] == 'org.nordugrid.ldapglue2') { if ( $schema !== 'GLUE2' ) continue; // ldap://:2135/o=glue $parsed_url = array(); if ( preg_match('/^ldap:\/\/(?P[^:]+):(?[0-9]+)\/(?P.*)/', $ainfo['u'], $parsed_url) ) { if ( check_blacklist($parsed_url['host'])) { if ( $debug ) dbgerr_html($errors["122"].$parsed_url['host']); continue; } $endpoints[] = array ( 'host' => $parsed_url['host'], 'port' => $parsed_url['port'], // dirty hack, monitor only works with array of ldapng endpoints even for GLUE2 :-) 'base' => "nordugrid-cluster-name=".$parsed_url['host'].",".DN_LOCAL ); } } else { // skip all unsupported endpoints (e.g. submission endpoints, WS endpoints, etc) continue; } } } return $endpoints; } /** * @return array * @param archery_list array * @param schema string * @param debug integer * @param loopcnt integer * @desc Returns list of LDAP endpoints */ function archery_info($archery_list, $schema, $errors, $debug="0") { // show the debug message regarding ARCHERY timeouts if($debug && ! empty($archery_list)) { dbgmsg("
    :::> " . $errors["131"] . " <:::

    "); } // start recursively querying ARCHERY $entries = array(); foreach ( $archery_list as $archery ) { $entries = array_merge($entries, recursive_archery_info($archery['endpoint'], $schema, $errors, $debug)); } return $entries; } ?> nordugrid-arc-7.1.1/src/services/monitor/includes/PaxHeaders/headfoot.inc0000644000000000000000000000013215067751327023546 xustar0030 mtime=1759498967.775903469 30 atime=1759498967.874493787 30 ctime=1759499030.845119215 nordugrid-arc-7.1.1/src/services/monitor/includes/headfoot.inc0000644000175000002070000002263715067751327025462 0ustar00mockbuildmock00000000000000\n"; //echo "\n"; echo "\n"; echo "\n"; echo "\n"; echo "\n"; echo "\n"; echo "\n"; if ( $wintyp ) { $this->module = $wintyp; // Localize $yaccept = @$_SERVER["HTTP_ACCEPT_LANGUAGE"] ; if ( !$yaccept ) $yaccept = "en"; if ( FORCE_LANG != "default" ) $yaccept = FORCE_LANG; $yazyk = "en"; $yazyki = explode(",",$yaccept); foreach ( $yazyki as $option ) { if ( $yazyk != "en" ) continue; $option = trim($option); $option = substr($option,0,2); // some sniffing // touch("test/".$option); // echo "\n"; $locfile = $option.".inc"; if ( !file_exists("lang/".$locfile) ) continue; $yazyk = $option; } $locfile = $yazyk.".inc"; include $locfile; setlocale(LC_ALL, $yazyk); $this->language = $yazyk; $this->strings = $message; $this->errors = $message["errors"]; $this->countries = $message["tlconvert"]; $this->mdsattr = $message["mdsattr"]; $this->isattr = $message["isattr"]; // Assigns $this->clusdes = $message["clusdes"]; $this->$wintyp = $message[$wintyp]; $toptitle = $message[$wintyp][0]; // Set page parameters require ('settings.inc'); $inpnam = implode("_",array("def",$wintyp)); // Page style definitions (see settings.inc) // Sets top window title $this->title = ( $toptitle ) ? $toptitle : ""; // Refresh rate $this->refresh = (${$inpnam}["refresh"]) ? ${$inpnam}["refresh"] : 0; // Background and link colors $this->bg = (${$inpnam}["bgcolor"]) ? ${$inpnam}["bgcolor"] : "#ffffff"; $this->lc = (${$inpnam}["lcolor"]) ? ${$inpnam}["lcolor"] : "#cc0000"; // Dumps the header HTML code $titles = explode(":",$this->title); // sometimes titles are many echo "".$titles[0]." ".$extratitle."\n"; if ( $this->refresh ) echo "\n"; echo "\n"; // define giislist if ( ! isset($emirslist) ) { $emirslist = array (); } if ( ! isset($archery_list)) { $archery_list = array (); } $this->giislist = $giislist; $this->emirslist = $emirslist; $this->cert = $cert; $this->archery_list = $archery_list; } // Finishes HTML header, starts document body echo "\n"; echo "\n"; echo "
    \n"; } /** * @return void * @param errors array * @param title string * @param subtitle string * @desc Makes an opening Monitor header */ function tabletop ( $toptitle="", $subtitle="" ) { // function tabletop() $lang = FORCE_LANG; echo "\n"; echo "\n"; echo "\n"; echo "
    ".$toptitle."
    ".$subtitle."\n"; echo " errors["301"]."\" alt=\"".$this->errors["301"]."\">\n"; echo " \n"; echo " errors["302"]."\" alt=\"".$this->errors["302"]."\">\n"; echo " module."&lang=".$lang."',400,300,10);\" onClick=\"javascript:monitor('help.php?module=".$this->module."',400,300,10);\">\n"; echo " errors["303"]."\" alt=\"".$this->errors["303"]."\">\n"; echo " \n"; echo " errors["304"]."\" alt=\"".$this->errors["304"]."\">\n"; echo "
    \n"; } /** * @return string * @desc returns version number from README */ function getVersion () { $v = "N/A"; if ( file_exists("README") ) { $readme = fopen("README","r"); $fline = fgets($readme); $v = substr(stristr($fline,"version "),8); fclose($readme); } $this->version = $v; return $v; } /** * @return void * @desc Closes an HTML document */ function close () { // Closes the HTML document echo "\n
    \n"; echo "\n"; while (ob_get_level() > 0) { ob_end_flush(); } ob_implicit_flush(); } } /** * Below are some generic functions, non-class-specific * * function dbgmsg ( string ) : prints out a message and flushes output; useful for debugging * function popup ( string, int, int, int ) : opens up a new window, depending on the client */ /** * @return void * @param dbgtxt string * @desc Outputs a debug message outside the table */ function dbgmsg( $dbgtxt="Debug" ) { echo "$dbgtxt\n"; flush(); } /** * @return void * @param contact string * @param x int * @param y int * @param n int * @param lang string * @param debug int * @desc Returns a new monitor window URL */ $agent = @$_SERVER["HTTP_USER_AGENT"] ; if ( !defined("USERAGENT") ) define("USERAGENT",$agent); function popup() { $numargs = func_num_args(); $contact = func_get_arg(0); if ( $numargs < 2 ) { $x = 400; $y = 300; $n = 1; } elseif ( $numargs < 5 ) { $lang = "default"; $debug = 0; } else { $x = func_get_arg(1); // 400 $y = func_get_arg(2); // 300 $n = func_get_arg(3); // 1 $lang = func_get_arg(4); $debug = func_get_arg(5); } ( USERAGENT ) ? $agent = USERAGENT : $agent = "lynx"; if ( preg_match("/opera/i",$agent) || preg_match("/lynx/i",$agent) || preg_match("/konqueror/i",$agent) ) return $contact; // $link = "javascript:monitor('".$contact."',$x,$y,$n)"; if ( $lang != "default" && $lang != FALSE ) { if ( strpos($contact,"?") ) { $contact .= "&" ; } else { $contact .= "?" ; } $contact .= "lang=$lang"; } if ( $debug ) { if ( strpos($contact,"?") ) { $contact .= "&" ; } else { $contact .= "?" ; } $contact .= "debug=$debug"; } $link = $contact."\" target=\"win".$n."\" onClick=\"monitor('".$contact."',$x,$y,$n); return false"; return $link; } ?> nordugrid-arc-7.1.1/src/services/monitor/includes/PaxHeaders/ldap_purge.inc0000644000000000000000000000013115067751327024076 xustar0030 mtime=1759498967.775903469 30 atime=1759498967.874493787 29 ctime=1759499030.84736539 nordugrid-arc-7.1.1/src/services/monitor/includes/ldap_purge.inc0000644000175000002070000000176615067751327026013 0ustar00mockbuildmock00000000000000### purged DN:".$curdn."

    \n"; } } $entries["count"] = $storesize; return $entries; } ?> nordugrid-arc-7.1.1/src/services/monitor/includes/PaxHeaders/lmtable.inc0000644000000000000000000000013215067751327023375 xustar0030 mtime=1759498967.775903469 30 atime=1759498967.874493787 30 ctime=1759499030.848637373 nordugrid-arc-7.1.1/src/services/monitor/includes/lmtable.inc0000644000175000002070000002172715067751327025310 0ustar00mockbuildmock00000000000000 0) { ob_end_flush(); } require ('settings.inc'); $inpnam = implode("_",array("def",$wintyp)); // $xeader = implode("_",array("header",$wintyp)); $this->color_header = (${$inpnam}["thcolor"]) ? ${$inpnam}["thcolor"] : "#999999"; $this->color_bg = (${$inpnam}["tbcolor"]) ? ${$inpnam}["tbcolor"] : "#f0f0f0"; $this->font_title = (${$inpnam}["thfont"]) ? ${$inpnam}["thfont"] : "color=\"#ffffff\""; $this->font_main = (${$inpnam}["tbfont"]) ? ${$inpnam}["tbfont"] : "color=\"#000000\""; $this->columns = $locset; $this->ncols = 0; echo "color_bg."\">\n"; echo "color_header."\">\n"; $colnr = 0; if ( $wintyp == "clusdes" && $schema != "GLUE2" ) { $position = 3; $keys = array_keys($locset); unset($locset[$keys[$position]]); } foreach ( $locset as $colnam => $colwid) { if ( $colnam == "0" || $colnam == "help" ) continue; $this->ncols ++; $colnr++; $value = $colnam; if ( $schema == "GLUE2" && $value == "Queue") { $value = "Share Name"; } // Specific sorting links for the front module if ( $wintyp == "loadmon" ) { // Keep old arguments, if any, except of order $allargs = ""; foreach ( $_GET as $argm => $argval ) { if ( $argm == "order" ) continue; $allargs .= $argm."=".$argval."&"; } $str1 = "font_title.">".$value.""; if ( $colnr == 1 ) $value = $str1."country".$str2; elseif ( $colnr == 3 ) $value = $str1."cpu".$str2; elseif ( $colnr == 4 ) $value = $str1."grun".$str2; } $width = ($colwid)?$colwid:"1%"; echo "\n"; } echo "\n"; } /** * @return void * @param contents array * @desc Draws a table row */ function addrow( $contents, $bgcol="" ) { if ( count($contents) != $this->ncols ) { $this->adderror("Incompatible data"); return 1; } $this->contents = $contents; if ($bgcol) { echo "\n"; } else { echo "\n"; } foreach ($contents as $colent) { $value = $colent; echo "\n"; } echo "\n"; } /** * @return void * @param color string * @desc Draws a spanning row containing a spacer */ function addspacer( $color="#000000" ) { echo "\n"; echo ""; echo "\n"; } /** * @return void * @param errtxt string * @desc Draws a spanning row containing error message */ function adderror( $errtxt="Error", $bgcol="" ) { $this->errtxt = $errtxt; echo "\n"; echo ""; echo "\n"; } /** * @return void * @param errtxt string * @param nrows integer * @param color string * @desc Adds a cell spanning $nrows rows */ function rowspan( $nrows, $errtxt=" ", $color="#ffffcc" ) { $this->errtxt = $errtxt; $ncols = $this->ncols - 1; $nrows = $nrows + 1; echo "\n"; echo ""; echo ""; echo "\n"; } /** * @return void * @desc Closes a table */ function close() { echo "
    font_title."> $value 
    font_main."> $value 
    ncols."\" bgcolor=\"$color\" height=\"0\">\"\"
    ncols."\""; if ($bgcol) echo " bgcolor=\"$bgcol\""; echo ">font_main."> $errtxt
     $errtxt\"\"
    \n"; # ob_end_flush(); ob_implicit_flush(FALSE); } } class LmTableSp extends LmTable { var $spcolor; /** * @return void * @param contents array * @param color string * @desc Draws a table row with a spacer above */ function addrow( $contents, $bgcol="", $color="#ffffff" ) { $ncols = count($contents); $this->contents = $contents; if ($bgcol) { echo "\n"; } else { echo "\n"; } foreach ($contents as $colent) { $value = $colent; echo "font_main."> $value \n"; } echo "\n"; echo "\n"; echo "\"\""; echo "\n"; } /** * @return void * @param errtxt string * @param color string * @desc Draws a spanning row containing error message */ function adderror( $errtxt="Error", $color="#ffffff", $bgcol="" ) { $this->errtxt = $errtxt; $ncols = $this->ncols; $tospan = $this->rowspan; if ( $tospan ) $ncols = $ncols - 1; echo "\n"; echo "\"\""; echo "\n"; echo "\n"; echo "ncols."\""; if ($bgcol) echo " bgcolor=\"$bgcol\""; echo ">font_main."> $errtxt"; echo "\n"; } /** * @return void * @param errtxt string * @param nrows integer * @param color string * @desc Adds a cell spanning $nrows rows */ function rowspan( $nrows, $errtxt=" ", $color="#ffffcc" ) { $this->errtxt = $errtxt; $ncols = $this->ncols - 1; $nrows = (2 * $nrows) + 1; echo "\n"; echo " $errtxt"; echo "\"\""; echo "\n"; } } class LmTableFree extends LmTableSp { /** * @return LmTableFree * @param headers array * @desc Starts an HTML table */ function __construct( $headers ) { ob_implicit_flush(0); ob_start(); $this->color_header = "#666666"; $this->color_bg = "#f0f0f0"; $this->font_title = "color=\"#ffffff\""; $this->font_main = "color=\"#000000\""; $this->columns = count($headers); $this->ncols = 0; echo "color_bg."\">\n"; echo "color_header."\">\n"; foreach ( $headers as $colnam ) { $this->ncols ++; $value = $colnam; $width = "1%"; echo "\n"; } echo "\n"; } } ?> nordugrid-arc-7.1.1/src/services/monitor/includes/PaxHeaders/postcode.inc0000644000000000000000000000013215067751327023575 xustar0030 mtime=1759498967.776492299 30 atime=1759498967.875493803 30 ctime=1759499030.851759479 nordugrid-arc-7.1.1/src/services/monitor/includes/postcode.inc0000644000175000002070000000565315067751327025510 0ustar00mockbuildmock00000000000000$tout sec t/o"; if ( !$record ) continue; $nrecords = $record["count"]; /* should be 1 */ for ($m = 0; $m < $nrecords; $m++) { $curcod = $record[$m][CLU_ZIPC][0]; if ( $curcod ) $cllist[$idx]["zvoname"] = cnvvo($curcod,$curnam); } } return($cllist); } /** * @return string * @param curnam string * @desc Guesses geographical location of a cluster */ function guess_country($curnam, $zip) { // Dumb domain name guess by 2 last letters $zvoname = cnvvo("",$curnam); // overwrite the previous decision if country code is set in the postal code if ( $zip ) $zvoname = cnvvo($zip,$curnam); return $zvoname; } ?>nordugrid-arc-7.1.1/src/services/monitor/includes/PaxHeaders/mylo.inc0000644000000000000000000000013215067751327022735 xustar0030 mtime=1759498967.776492299 30 atime=1759498967.875493803 30 ctime=1759499030.850765831 nordugrid-arc-7.1.1/src/services/monitor/includes/mylo.inc0000644000175000002070000000124015067751327024634 0ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/monitor/includes/PaxHeaders/cnvname.inc0000644000000000000000000000013215067751327023404 xustar0030 mtime=1759498967.775903469 30 atime=1759498967.874493787 30 ctime=1759499030.840616665 nordugrid-arc-7.1.1/src/services/monitor/includes/cnvname.inc0000644000175000002070000000401115067751327025302 0ustar00mockbuildmock00000000000000 1 && $family != "Doe") { /* catch for the tutorials */ $doestr = substr($family,1,1); /* returns "1" if it is a number, or a letter if it's a name */ if ( preg_match("/[0-9]/",$doestr) ) { $number = array_pop($names); $family = end($names); } // $family = substr(strrchr($uname, " "), 1); $name = $cn[0]."."; /* First letter of the name (doesn't work with 8-bit strings) */ if ( $flag == 2 ) $name = $names[0]; eval("\$name = \"$name\";"); $family = $name." ".$family; } else { $family = $cn; } if ( !$family ) return $uname /* Give up */; return $family; } /** * @return string * @param uname string * @desc Takes user DN and attempts to extract her affiliation */ function getorg ( $uname ) { $uname = trim($uname); $pieces = explode("/L=", $uname); if ( count($pieces) == 1 ) $pieces = explode("/DC=", $uname); if ( count($pieces) == 1 ) $pieces = explode("/OU=", $uname); if ( count($pieces) == 1 ) $pieces = explode("/O=", $uname); $org = end($pieces); $tailpos = strpos($org, "/"); if ( $tailpos ) $org = substr($org,0,$tailpos); return $org; } ?>nordugrid-arc-7.1.1/src/services/monitor/includes/PaxHeaders/settings.inc0000644000000000000000000000013215067751327023615 xustar0030 mtime=1759498967.776492299 30 atime=1759498967.875493803 30 ctime=1759499030.854024098 nordugrid-arc-7.1.1/src/services/monitor/includes/settings.inc0000644000175000002070000004750015067751327025525 0ustar00mockbuildmock00000000000000 "index1.nordugrid.org", "port" => "2135", "base" => "mds-vo-name=NorduGrid,o=grid", "vo" => "NorduGrid"), array("host" => "index2.nordugrid.org", "port" => "2135", "base" => "mds-vo-name=NorduGrid,o=grid", "vo" => "NorduGrid"), array("host" => "index3.nordugrid.org", "port" => "2135", "base" => "mds-vo-name=NorduGrid,o=grid", "vo" => "NorduGrid"), array("host" => "index4.nordugrid.org", "port" => "2135", "base" => "mds-vo-name=NorduGrid,o=grid", "vo" => "NorduGrid") //*** A country-level GIIS example, use as many as you wish to monitor: //, //array("host" => "f9pc18.ijs.si", // "port" => "2135", // "base" => "mds-vo-name=Slovenia,o=grid", // "vo" => "Slovenia") //*** A single site GRIS example, use as many as you wish to monitor: //, //array("host" => "gridmaster.pzr.uni-rostock.de", // "port" => "2135", // "base" => "nordugrid-cluster-name=gridmaster.pzr.uni-rostock.de,mds-vo-name=local,o=grid", // "vo" => "Germany") ); // list of ARCHERY endpoints to query $archery_list = array ( array ( /* ATLAS at NDGF */ "endpoint" => "ndgf.org") /* NorduGrid */, array ( "endpoint" => "nordugrid.org") ); // list of available EMIRs /* $emirslist = array( array("schema" => "https", "host" => "testbed-emi5.grid.upjs.sk", "port" => "54321", "base" => "mds-vo-name=NorduGrid,o=grid") ); /* * To set up TLS connection to EMIR server client certificate is mandatory: * Convert client certificate from *.pfx (pkcs12) into *.pem with openssl (if needed): * > openssl pkcs12 -in keys.pfx -out keys.pem */ $cert = "/var/www/monitor-svn/includes/test.pem"; $cert_pass = 'emi'; // base DNs for searches: local (GRIS), global (GIIS), VO if ( !defined("DN_LOCAL") ) define("DN_LOCAL","mds-vo-name=local,o=grid"); if ( !defined("DN_GLUE") ) define("DN_GLUE","o=glue"); if ( !defined("DN_GLOBL") ) define("DN_GLOBL","mds-vo-name=NorduGrid,o=grid"); if ( !defined("DN_VIORG") ) define("DN_VIORG","dc=nordugrid,dc=org"); if ( !defined("DN_PEOPL") ) define("DN_PEOPL","ou=people,dc=nordugrid,dc=org"); if ( !defined("DN_GUEST") ) define("DN_GUEST","ou=guests,dc=nordugrid,dc=org"); if ( !defined("DN_TUTOR") ) define("DN_TUTOR","ou=tutorial,dc=nordugrid,dc=org"); if ( !defined("DN_SERVS") ) define("DN_SERVS","ou=services,dc=nordugrid,dc=org"); if ( !defined("DN_RECAT") ) define("DN_RECAT","rc=NorduGrid,dc=nordugrid,dc=org"); // Information system classes and attributes namespace prefix, for NorduGRID schema, "nordugrid" if ( !defined("IS_PREFX") ) define("IS_PREFX","nordugrid"); // Information system classes and attributes namespace prefix for GLUE2 schema, "GLUE2". Due to // the way the php library works, this needs to be all lowercase. if ( !defined("IS_PREFXG") ) define("IS_PREFXG","glue2"); // Cache location (use ../htdata when installing directly in ./htdocs) if ( !defined("CACHE_LOCATION") ) define("CACHE_LOCATION","cache"); // Extra title to be added to "Grid Monitor" (e.g. My Favorite) if ( !defined("EXTRA_TITLE") ) define("EXTRA_TITLE",""); //========================================================================= // =================== no need to change things below ===================== //========================================================================= // objectclasses if ( !defined("OBJ_CLUS") ) define("OBJ_CLUS",IS_PREFX."-cluster"); if ( !defined("OBJ_STEL") ) define("OBJ_STEL",IS_PREFX."-se"); if ( !defined("OBJ_QUEU") ) define("OBJ_QUEU",IS_PREFX."-queue"); if ( !defined("OBJ_AJOB") ) define("OBJ_AJOB",IS_PREFX."-job"); if ( !defined("OBJ_USER") ) define("OBJ_USER",IS_PREFX."-authuser"); //GLUE2 if ( !defined("GOBJ_CLUS") ) define("GOBJ_CLUS",IS_PREFXG."ComputingService"); // SE are currently not supported in our GLUE2 rendering. //if ( !defined("GOBJ_STEL") ) define("GOBJ_STEL",IS_PREFXG."-se"); if ( !defined("GOBJ_QUEU") ) define("GOBJ_QUEU",IS_PREFXG."ComputingShare"); if ( !defined("GOBJ_AJOB") ) define("GOBJ_AJOB",IS_PREFXG."ComputingActivity"); // GLUE2 has no equivalent to authuser at the moment. //if ( !defined("GOBJ_USER") ) define("GOBJ_USER",IS_PREFXG."-authuser"); // Making everything lowercase due to php ldap library if ( !defined("GOBJ_LOC") ) define("GOBJ_LOC",IS_PREFXG."location"); if ( !defined("GOBJ_MAN") ) define("GOBJ_MAN",IS_PREFXG."manager"); if ( !defined("GOBJ_CON") ) define("GOBJ_CON",IS_PREFXG."contact"); if ( !defined("GOBJ_ADMD") ) define("GOBJ_ADMD",IS_PREFXG."admindomain"); if ( !defined("GOBJ_EENV") ) define("GOBJ_EENV",IS_PREFXG."executionenvironment"); if ( !defined("GOBJ_BENC") ) define("GOBJ_BENC",IS_PREFXG."benchmark"); if ( !defined("GOBJ_AENV") ) define("GOBJ_AENV",IS_PREFXG."applicationenvironment"); if ( !defined("OBJ_PERS") ) define("OBJ_PERS","organizationalPerson"); if ( !defined("OBJ_RCOL") ) define("OBJ_RCOL","GlobusReplicaLogicalCollection"); /* RC Logical Collection object */ if ( !defined("OBJ_RFIL") ) define("OBJ_RFIL","GlobusReplicaLogicalFile"); /* RC Logical File object */ if ( !defined("OBJ_RFIN") ) define("OBJ_RFIN","GlobusReplicaFileInfo"); /* RC File Info object */ if ( !defined("OBJ_RSEL") ) define("OBJ_RSEL","GlobusReplicaInfo"); /* RC Info object */ // attributes //NG if ( !defined("CLU_NAME") ) define("CLU_NAME",IS_PREFX."-cluster-name"); if ( !defined("CLU_ANAM") ) define("CLU_ANAM",IS_PREFX."-cluster-aliasname"); if ( !defined("CLU_ZIPC") ) define("CLU_ZIPC",IS_PREFX."-cluster-location"); if ( !defined("CLU_TCPU") ) define("CLU_TCPU",IS_PREFX."-cluster-totalcpus"); if ( !defined("CLU_UCPU") ) define("CLU_UCPU",IS_PREFX."-cluster-usedcpus"); if ( !defined("CLU_TJOB") ) define("CLU_TJOB",IS_PREFX."-cluster-totaljobs"); if ( !defined("CLU_QJOB") ) define("CLU_QJOB",IS_PREFX."-cluster-queuedjobs"); /* deprecated since 0.5.38 */ if ( !defined("CLU_OWNR") ) define("CLU_OWNR",IS_PREFX."-cluster-owner"); if ( !defined("CLU_SUPP") ) define("CLU_SUPP",IS_PREFX."-cluster-support"); if ( !defined("CLU_PQUE") ) define("CLU_PQUE",IS_PREFX."-cluster-prelrmsqueued"); /* new since 0.5.38 */ if ( !defined("SEL_NAME") ) define("SEL_NAME",IS_PREFX."-se-name"); if ( !defined("SEL_BURL") ) define("SEL_BURL",IS_PREFX."-se-baseurl"); /* gone since 0.5.26 */ if ( !defined("SEL_CURL") ) define("SEL_CURL",IS_PREFX."-se-url"); /* in place since 0.5.26 */ if ( !defined("SEL_ANAM") ) define("SEL_ANAM",IS_PREFX."-se-aliasname"); if ( !defined("SEL_TYPE") ) define("SEL_TYPE",IS_PREFX."-se-type"); if ( !defined("SEL_FREE") ) define("SEL_FREE",IS_PREFX."-se-freespace"); if ( !defined("SEL_TOTA") ) define("SEL_TOTA",IS_PREFX."-se-totalspace"); if ( !defined("SEL_USER") ) define("SEL_USER",IS_PREFX."-se-authuser"); if ( !defined("QUE_NAME") ) define("QUE_NAME",IS_PREFX."-queue-name"); if ( !defined("QUE_STAT") ) define("QUE_STAT",IS_PREFX."-queue-status"); if ( !defined("QUE_RUNG") ) define("QUE_RUNG",IS_PREFX."-queue-running"); if ( !defined("QUE_GRUN") ) define("QUE_GRUN",IS_PREFX."-queue-gridrunning"); if ( !defined("QUE_MAXR") ) define("QUE_MAXR",IS_PREFX."-queue-maxrunning"); if ( !defined("QUE_QUED") ) define("QUE_QUED",IS_PREFX."-queue-queued"); /* deprecated since 0.5.38 */ if ( !defined("QUE_LQUE") ) define("QUE_LQUE",IS_PREFX."-queue-localqueued"); /* new since 0.5.38 */ if ( !defined("QUE_PQUE") ) define("QUE_PQUE",IS_PREFX."-queue-prelrmsqueued"); /* new since 0.5.38 */ if ( !defined("QUE_GQUE") ) define("QUE_GQUE",IS_PREFX."-queue-gridqueued"); if ( !defined("QUE_MAXQ") ) define("QUE_MAXQ",IS_PREFX."-queue-maxqueuable"); if ( !defined("QUE_ASCP") ) define("QUE_ASCP",IS_PREFX."-queue-totalcpus"); if ( !defined("QUE_MINT") ) define("QUE_MINT",IS_PREFX."-queue-mincputime"); if ( !defined("QUE_MAXT") ) define("QUE_MAXT",IS_PREFX."-queue-maxcputime"); if ( !defined("JOB_GLID") ) define("JOB_GLID",IS_PREFX."-job-globalid"); if ( !defined("JOB_NAME") ) define("JOB_NAME",IS_PREFX."-job-jobname"); if ( !defined("JOB_STAT") ) define("JOB_STAT",IS_PREFX."-job-status"); if ( !defined("JOB_EQUE") ) define("JOB_EQUE",IS_PREFX."-job-execqueue"); if ( !defined("JOB_ECLU") ) define("JOB_ECLU",IS_PREFX."-job-execcluster"); if ( !defined("JOB_GOWN") ) define("JOB_GOWN",IS_PREFX."-job-globalowner"); if ( !defined("JOB_USET") ) define("JOB_USET",IS_PREFX."-job-usedcputime"); if ( !defined("JOB_USEM") ) define("JOB_USEM",IS_PREFX."-job-usedmem"); if ( !defined("JOB_SUBM") ) define("JOB_SUBM",IS_PREFX."-job-submissiontime"); if ( !defined("JOB_COMP") ) define("JOB_COMP",IS_PREFX."-job-completiontime"); if ( !defined("JOB_ERRS") ) define("JOB_ERRS",IS_PREFX."-job-errors"); if ( !defined("JOB_CPUS") ) define("JOB_CPUS",IS_PREFX."-job-cpucount"); if ( !defined("USR_NAME") ) define("USR_NAME",IS_PREFX."-authuser-name"); if ( !defined("USR_USSN") ) define("USR_USSN",IS_PREFX."-authuser-sn"); if ( !defined("USR_CPUS") ) define("USR_CPUS",IS_PREFX."-authuser-freecpus"); if ( !defined("USR_QUEU") ) define("USR_QUEU",IS_PREFX."-authuser-queuelength"); if ( !defined("USR_DISK") ) define("USR_DISK",IS_PREFX."-authuser-diskspace"); //GLUE2. All lowercase due to php ldap library // in ComputingService if ( !defined("GCLU_NAME") ) define("GCLU_NAME",IS_PREFXG."entityname"); if ( !defined("GCLU_ANAM") ) define("GCLU_ANAM",IS_PREFXG."entityname"); // this number seems to be incorrect, check infosys code if ( !defined("GCLU_TJOB") ) define("GCLU_TJOB",IS_PREFXG."computingservicetotaljobs"); // queuedjobs has no equivalent in GLUE2. Must be calculated with some maths. Since it's deprecated, who cares //if ( !defined("GCLU_QJOB") ) define("GCLU_QJOB",IS_PREFXG."-cluster-queuedjobs"); /* deprecated since 0.5.38 */ if ( !defined("GCLU_PQUE") ) define("GCLU_PQUE",IS_PREFXG."computingserviceprelrmswaitingjobs"); // in Location if ( !defined("GCLU_ZIPC") ) define("GCLU_ZIPC",IS_PREFXG."locationpostcode"); // in ComputingManager // TODO: There are discrepancies on how LogicalCPUs are reported.Maybe TotalSlots is better. This needs to be fixed in the infosys. //if ( !defined("GCLU_TCPU") ) define("GCLU_TCPU",IS_PREFXG."computingmanagertotallogicalcpus"); if ( !defined("GCLU_TCPU") ) define("GCLU_TCPU",IS_PREFXG."computingmanagertotalslots"); // usedcpus is not available in GLUE2 as such. SlotsUsedByGridJobs should be ok, renaming to GCLU_GCPU if ( !defined("GCLU_GCPU") ) define("GCLU_GCPU",IS_PREFXG."computingmanagerslotsusedbygridjobs"); if ( !defined("GCLU_LCPU") ) define("GCLU_LCPU",IS_PREFXG."computingmanagerslotsusedbylocaljobs"); // in Contact if ( !defined("GCLU_SUPP") ) define("GCLU_SUPP",IS_PREFXG."contactdetail"); // in AdminDomain if ( !defined("GCLU_OWNR") ) define("GCLU_OWNR",IS_PREFXG."admindomainowner"); // Storage element not supported in current GLUE2 //if ( !defined("GSEL_NAME") ) define("GSEL_NAME",IS_PREFXG."-se-name"); //if ( !defined("GSEL_BURL") ) define("GSEL_BURL",IS_PREFXG."-se-baseurl"); /* gone since 0.5.26 */ //if ( !defined("GSEL_CURL") ) define("GSEL_CURL",IS_PREFXG."-se-url"); /* in place since 0.5.26 */ //if ( !defined("GSEL_ANAM") ) define("GSEL_ANAM",IS_PREFXG."-se-aliasname"); //if ( !defined("GSEL_TYPE") ) define("GSEL_TYPE",IS_PREFXG."-se-type"); //if ( !defined("GSEL_FREE") ) define("GSEL_FREE",IS_PREFXG."-se-freespace"); //if ( !defined("GSEL_TOTA") ) define("GSEL_TOTA",IS_PREFXG."-se-totalspace"); //if ( !defined("GSEL_USER") ) define("GSEL_USER",IS_PREFXG."-se-authuser"); // In ComputingShare if ( !defined("GQUE_NAME") ) define("GQUE_NAME",IS_PREFXG."entityname"); if ( !defined("GQUE_MAPQ") ) define("GQUE_MAPQ",IS_PREFXG."computingsharemappingqueue"); if ( !defined("GQUE_STAT") ) define("GQUE_STAT",IS_PREFXG."computingshareservingstate"); // RUNG is all the running jobs in the queue, NG queue-running, maps to GLUE2 RunningJobs if ( !defined("GQUE_RUNG") ) define("GQUE_RUNG",IS_PREFXG."computingsharerunningjobs"); // GRUN is the grid jobs only in the queue, NG queue-gridrunning, there is no equivalent for this in GLUE2. Must be calculated from total-local jobs in the queue //if ( !defined("GQUE_GRUN") ) define("GQUE_GRUN",IS_PREFXG.""); if ( !defined("GQUE_MAXR") ) define("GQUE_MAXR",IS_PREFXG."computingsharemaxrunningjobs"); if ( !defined("GQUE_LQUE") ) define("GQUE_LQUE",IS_PREFXG."computingsharelocalwaitingjobs"); /* new since 0.5.38 */ // LRUN is non-grid jobs running in the queue if ( !defined("GQUE_LRUN") ) define("GQUE_LRUN",IS_PREFXG."computingsharelocalrunningjobs"); if ( !defined("GQUE_PQUE") ) define("GQUE_PQUE",IS_PREFXG."computingshareprelrmswaitingjobs"); /* new since 0.5.38 */ //This is equivalent to the deprecated -queue-queued, /* deprecated since 0.5.38 */, but exists in GLUE2 if ( !defined("GQUE_QUED") ) define("GQUE_QUED",IS_PREFXG."computingsharewaitingjobs"); if ( !defined("GQUE_MAXQ") ) define("GQUE_MAXQ",IS_PREFXG."computingsharemaxtotaljobs"); // No such thing in GLUE2, when possible, the related execution environment data should be used. // using maxtotaljobs is not correct, clusters have weird stuff in their setup. //if ( !defined("GQUE_ASCP") ) define("GQUE_ASCP",IS_PREFXG."-queue-totalcpus"); if ( !defined("GQUE_MINT") ) define("GQUE_MINT",IS_PREFXG."computingsharemincputime"); if ( !defined("GQUE_MAXT") ) define("GQUE_MAXT",IS_PREFXG."computingsharemaxcputime"); if ( !defined("GQUE_ENVK") ) define("GQUE_ENVK",IS_PREFXG."computingshareexecutionenvironmentforeignkey"); // in ComputingActivity -- these are disabled by default in GLUE2 LDAP. if ( !defined("GJOB_GLID") ) define("GJOB_GLID",IS_PREFXG."computingactivityidfromendpoint"); if ( !defined("GJOB_NAME") ) define("GJOB_NAME",IS_PREFXG."name"); // Job state is multivalued in GLUE2 (for each state model). Must be handled differently in rendering if ( !defined("GJOB_STAT") ) define("GJOB_STAT",IS_PREFXG."computingactivitystate"); if ( !defined("GJOB_EQUE") ) define("GJOB_EQUE",IS_PREFXG."computingactivityqueue"); if ( !defined("GJOB_ECLU") ) define("GJOB_ECLU",IS_PREFXG."computingactivityexecutionnode"); if ( !defined("GJOB_GOWN") ) define("GJOB_GOWN",IS_PREFXG."computingactivityowner"); if ( !defined("GJOB_USET") ) define("GJOB_USET",IS_PREFXG."computingactivityusedtotalcputime"); if ( !defined("GJOB_USEM") ) define("GJOB_USEM",IS_PREFXG."computingactivityusedmainmemory"); if ( !defined("GJOB_SUBM") ) define("GJOB_SUBM",IS_PREFXG."computingactivitysubmissiontime"); if ( !defined("GJOB_COMP") ) define("GJOB_COMP",IS_PREFXG."computingactivityendtime"); if ( !defined("GJOB_ERRS") ) define("GJOB_ERRS",IS_PREFXG."computingactivityerror"); if ( !defined("GJOB_CPUS") ) define("GJOB_CPUS",IS_PREFXG."computingactivityrequestedslots"); // in ExecutionEnvironment if ( !defined("EENV_ID") ) define("EENV_ID",IS_PREFXG."resourceid"); if ( !defined("EENV_LCPU") ) define("EENV_LCPU",IS_PREFXG."executionenvironmentlogicalcpus"); if ( !defined("EENV_PCPU") ) define("EENV_PCPU",IS_PREFXG."executionenvironmentphysicalcpus"); if ( !defined("EENV_TINS") ) define("EENV_TINS",IS_PREFXG."executionenvironmenttotalinstances"); // UserInfo is not implemented in our GLUE2 and never will. Most likely all this below and the // relative code can be removed //if ( !defined("GUSR_NAME") ) define("GUSR_NAME",IS_PREFXG."-authuser-name"); //if ( !defined("GUSR_USSN") ) define("GUSR_USSN",IS_PREFXG."-authuser-sn"); //if ( !defined("GUSR_CPUS") ) define("GUSR_CPUS",IS_PREFXG."-authuser-freecpus"); //if ( !defined("GUSR_QUEU") ) define("GUSR_QUEU",IS_PREFXG."-authuser-queuelength"); //if ( !defined("GUSR_DISK") ) define("GUSR_DISK",IS_PREFXG."-authuser-diskspace"); // This VO information is relative to country info and has nothing to do with the VO-VOMS concept. // TODO: assess how relevant this can be for GLUE2. if ( !defined("VO_USCN" ) ) define("VO_USCN" ,"cn"); if ( !defined("VO_USSN" ) ) define("VO_USSN" ,"sn"); if ( !defined("VO_DESC" ) ) define("VO_DESC" ,"description"); if ( !defined("VO_MAIL" ) ) define("VO_MAIL" ,"mail"); if ( !defined("VO_INST" ) ) define("VO_INST" ,"o"); //************************************* Grid Monitor top window style ****************************** $def_loadmon = array( "refresh" => 120, "bgcolor" => "#ffffff", "thcolor" => "#005659", "lcolor" => "#005659", "tbcolor" => "#ffecb5", "thfont" => "face=\"sans-serif\" color=#ffffff", "tbfont" => "face=\"sans-serif\"" ); //************************************* Cluster description style ********************************** $def_clusdes = array ( //"title" => $theaders["clusdes"][0], "refresh" => 600, "bgcolor" => "#ffcc33", "thcolor" => "#000099", "lcolor" => "#000099", "tbcolor" => "#ffffcc", "thfont" => "face=\"sans-serif\" color=\"#ffffff\"", "tbfont" => "face=\"sans-serif\" color=\"#000099\"" ); //*************************************** Job statistics style ************************************* $def_jobstat = array ( "refresh" => 600, "bgcolor" => "#ffffff", "thcolor" => "#ffcc33", "lcolor" => "#000099", "tbcolor" => "#ffffcc", "thfont" => "face=\"sans-serif\" color=\"#000000\"", "tbfont" => "face=\"sans-serif\" color=\"#000099\"" ); //******************************************* VO list style *************************************** $def_volist = array( "refresh" => 0, "bgcolor" => "#ffffff", "thcolor" => "#ffcc33", "lcolor" => "#ffff00", "tbcolor" => "#cc0033", "thfont" => "face=\"sans-serif\" color=\"#993300\"", "tbfont" => "face=\"sans-serif\" color=\"#ffffff\"" ); //***************************************** VO user base style ************************************* $def_vousers = array( "refresh" => 0, "bgcolor" => "#ffffff", "thcolor" => "#ffcc33", "lcolor" => "#ffcccc", "tbcolor" => "#000099", "thfont" => "face=\"sans-serif\" color=\"#000000\"", "tbfont" => "face=\"sans-serif\" color=\"#ffffff\"" ); //***************************************** User job list style ************************************ $def_userlist = array( "refresh" => 0, "bgcolor" => "#ffffcc", "thcolor" => "#ffcc33", "lcolor" => "#000099", "tbcolor" => "#ffffff", "thfont" => "face=\"sans-serif\" color=\"#000000\"", "tbfont" => "face=\"sans-serif\" color=\"#000099\"" ); $def_userres = array( "thcolor" => "#000099", "tbcolor" => "#ffffcc", "thfont" => "face=\"sans-serif\" color=\"#ffffff\"", "tbfont" => "face=\"sans-serif\" color=\"#000099\"" ); //**************************************** Attribute list style ************************************ $def_attlist = array( "refresh" => 0, "bgcolor" => "#ffffff", "thcolor" => "#000099", "lcolor" => "#000099", "tbcolor" => "#ccffff", "thfont" => "face=\"sans-serif\" color=\"#ffffff\"", "tbfont" => "face=\"sans-serif\" color=\"#000099\"" ); //****************************************** Queue job list style ********************************** $def_quelist = array( "refresh" => 300, "bgcolor" => "#ffffff", "thcolor" => "#000099", "lcolor" => "#000099", "tbcolor" => "#ffffcc", "thfont" => "face=\"sans-serif\" color=\"#ffffff\"", "tbfont" => "face=\"sans-serif\" color=\"#000099\"" ); //******************************************* SE info style *************************************** $def_sestat = array( "refresh" => 300, "bgcolor" => "#ffffff", "thcolor" => "#ffcc33", "lcolor" => "#003300", "tbcolor" => "#CCCC99", "thfont" => "face=\"sans-serif\" color=\"#990000\"", "tbfont" => "face=\"sans-serif\" color=\"#000000\"" ); //******************************************* Users info style *************************************** $def_allusers = array( "refresh" => 0, "bgcolor" => "#ffffff", "thcolor" => "#339966", "lcolor" => "#003300", "tbcolor" => "#ccffcc", "thfont" => "face=\"sans-serif\" color=\"#ffffff\"", "tbfont" => "face=\"sans-serif\" color=\"#000000\"" ); //***************************** LDAP parameters dump style - no need to modify ********************* $def_ldapdump = array( "thcolor" => "#000099", "tbcolor" => "#ffffcc", "thfont" => "face=\"sans-serif\" color=\"#ffffff\"", "tbfont" => "face=\"sans-serif\" color=\"#000099\"" ); ?> nordugrid-arc-7.1.1/src/services/monitor/includes/PaxHeaders/toreload.inc0000644000000000000000000000013215067751327023566 xustar0030 mtime=1759498967.776492299 30 atime=1759498967.875493803 30 ctime=1759499030.855382063 nordugrid-arc-7.1.1/src/services/monitor/includes/toreload.inc0000644000175000002070000000060615067751327025472 0ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/monitor/includes/PaxHeaders/cache.inc0000644000000000000000000000013215067751327023020 xustar0030 mtime=1759498967.775903469 30 atime=1759498967.874493787 30 ctime=1759499030.838259791 nordugrid-arc-7.1.1/src/services/monitor/includes/cache.inc0000644000175000002070000000255315067751327024727 0ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/monitor/includes/PaxHeaders/emirs_info.inc0000644000000000000000000000013115067751327024106 xustar0030 mtime=1759498967.775903469 30 atime=1759498967.874493787 29 ctime=1759499030.84403567 nordugrid-arc-7.1.1/src/services/monitor/includes/emirs_info.inc0000644000175000002070000001525515067751327026021 0ustar00mockbuildmock00000000000000 "https", * "host" => "testbed-emi5.grid.upjs.sk, * "port" => "54321", * "base" => "mds-vo-name=NorduGrid,o=grid"),...) */ $tlim = 2; $tout = 5; if($debug && !empty($emirslist)) dbgmsg("
    :::> ".$errors["130"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); $nemirs = count($emirslist); $counter = count($gentries); $tag = array(); $entries = $gentries; $blacklist = array(); if ( file_exists("blacklist.inc") ) include('blacklist.inc'); // Loop on entered EMIR sites // If a host is blacklisted, skip // If a host is a cluster (GRIS), keep and skip // If a host is any other local GRIS, skip for ( $ig = 0; $ig < $nemirs; $ig++ ) { $eschema = $emirslist[$ig]["schema"]; $ehost = $emirslist[$ig]["host"]; if ( @$blacklist[$ehost] ) { if ( $debug ) dbgmsg("
    ".$errors["122"]."$ehost
    \n"); continue; } $eport = $emirslist[$ig]["port"]; $ebase = $emirslist[$ig]["base"]; if ( preg_match("/$element/i",$ebase) ) { // Invent a "fake DN" for host tagging and skip duplicated entries $fakedn = "hn=".$ehost.", ".$ebase; if ( @$tag[$fakedn] ) continue; $tag[$fakedn] = 1; continue; } elseif ( preg_match("/local/i",$ebase) ) { if ( $debug ) dbgmsg("
    ".$errors["115"].$ehost." (".$errors["116"].$element.")"); continue; } if ( $debug == 2 ) dbgmsg("
    ".$errors["117"]."$ehost..."); // Connection to EMIR $path = "services"; $query = "Service_Endpoint_Capability=information.discovery.resource&Service_Endpoint_Technology=ldap"; $res = http_request('GET', $eschema."://".$ehost.":".$eport."/".$path."?".$query, $data, $cert, $referer=''); if ($res["status"] == "ok"){ if ( $debug == 2 ) dbgmsg($errors["118"]); $json_a=json_decode($res["content"], true); $nrecords = count($json_a); for ($i = 0; $i < $nrecords; $i++) { $url = parse_url($json_a[$i]["Service_Endpoint_URL"]); $curhost = $url["host"]; $curhstat = $json_a[$i]["Service_Endpoint_HealthState"]; $cursstat = $json_a[$i]["Service_Endpoint_ServingState"]; /* * URL structure: * * ldapng: ldap://:2135/Mds-Vo-Name=local,o=grid * * ldapglue1: ldap://:2135/Mds-Vo-Name=resource,o=grid * * ldapglue2: ldap://:2135/o=glue */ // Introduce "fake" DN for tagging purpose - helps skipping sites registering twice $fakedn = "hn=".$url["host"].", ".$url["base"]; //if ( @$tag[$fakedn] ) continue; // Note: We need all enpoint about a service!!! if ( @$blacklist[$curhost] ) { if ( $debug>0 ) dbgmsg("
    ".$errors["122"]."$curhost
    \n"); continue; } $curstat = (($curhstat == "ok")&&($cursstat == "production")) ? "VALID": "healt state: '".$curhstate."', serving state: '".$cursstate."'"; if ( $curstat != "VALID" ) { if ( $debug ) dbgmsg("
    $curstat".$errors["121"]."$fakedn
    \n"); //continue; } $entries[$counter]["host"] = $url["host"]; $entries[$counter]["port"] = $url["port"]; $entries[$counter]["base"] = substr($url["path"],1); if ( $debug == 2 ) dbgmsg("
    ".$errors["123"]."$base: $fakedn
    \n"); $tag[$fakedn] = 1; $counter++; } } } if ( $debug == 2 ) dbgmsg("
    "); // Some debugging printout if ( $debug == 2 ) { dbgmsg("

    ".$errors["119"].$element.": ".$counter."
    "); foreach ( $entries as $num=>$val ) dbgmsg($val["host"].":".$val["port"]."/".$val["base"]."
    "); } return $entries; } /* * Send http request to the given URL of the server. */ function http_request($type, $url, $data, $cert, $referer='') { // Convert the data array into URL Parameters like a=b&foo=bar etc. $data = http_build_query($data); // parse the given URL $url = parse_url($url); if ($url['scheme'] != 'https' && $url['scheme'] != 'http') { die('Error: Only HTTP(S) request are supported !'); } // extract host and path: $host = $url['host']; $port = $url['port']; $path = $url['path']; $query= $url['query']; // open a socket connection on the given port - timeout: 30 sec $fp = stream_socket_client($host.":".$port, $errno, $errstr, 30); if ($url['scheme'] == 'https') { // add secure properties $context = stream_context_create(); $result = stream_context_set_option($context, 'ssl', 'local_cert', $cert); $result = stream_context_set_option($context, 'ssl', 'passphrase', $cert_pass); // open a secure socket connection on the given port - timeout: 30 sec $fp = stream_socket_client("ssl://".$host.":".$port, $errno, $errstr, 30, STREAM_CLIENT_CONNECT,$context); } if ($fp){ // send the request headers: fputs($fp, $type." $path?$query HTTP/1.1\r\n"); fputs($fp, "Host: $host\r\n"); if ($referer != '') fputs($fp, "Referer: $referer\r\n"); fputs($fp, "Content-type: application/x-www-form-urlencoded\r\n"); fputs($fp, "Content-length: ". strlen($data) ."\r\n"); fputs($fp, "Connection: close\r\n\r\n"); fputs($fp, $data); $result = ''; while(!feof($fp)) { // receive the results of the request $result .= fgets($fp, 128); } } else { return array( 'status' => 'err', 'error' => "$errstr ($errno)" ); } // close the socket connection: fclose($fp); // split the result header from the content $result = explode("\r\n\r\n", $result, 2); $header = isset($result[0]) ? $result[0] : ''; $content = isset($result[1]) ? $result[1] : ''; // return as structured array: return array( 'status' => 'ok', 'header' => $header, 'content' => $content ); } ?> nordugrid-arc-7.1.1/src/services/monitor/includes/PaxHeaders/cnvalias.inc0000644000000000000000000000013215067751327023555 xustar0030 mtime=1759498967.775903469 30 atime=1759498967.874493787 30 ctime=1759499030.839393157 nordugrid-arc-7.1.1/src/services/monitor/includes/cnvalias.inc0000644000175000002070000000302615067751327025460 0ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/monitor/includes/PaxHeaders/recursive_giis_info.inc0000644000000000000000000000013215067751327026012 xustar0030 mtime=1759498967.776492299 30 atime=1759498967.875493803 30 ctime=1759499030.852789628 nordugrid-arc-7.1.1/src/services/monitor/includes/recursive_giis_info.inc0000644000175000002070000001265215067751327027722 0ustar00mockbuildmock00000000000000 "grid.nbi.dk", * "port" => "2135", * "base" => "mds-vo-name=NorduGrid,o=grid"),...) */ $loopcnt++; $tlim = 2; $tout = 5; if($debug && count($giislist) < 5) dbgmsg("
    :::> ".$errors["114"].$tlim.$errors["102"].$tout.$errors["103"]." <:::

    "); $greg = array(); $gfilter = "(objectclass=mds*)"; $ngiis = count($giislist); $counter = 0; $tag = array(); $dsarray = array(); $dnarray = array(); $hnarray = array(); $entries = array(); $blacklist = array(); @include('blacklist.inc'); // uses blacklist if it is in includes or current path // Loop on entered sites // If a host is blacklisted, skip // If a host is a cluster (ARIS), keep and skip // If a host is any other local ARIS, skip for ( $ig = 0; $ig < $ngiis; $ig++ ) { $ghost = $giislist[$ig]["host"]; if ( @$blacklist[$ghost] ) { if ( $debug ) dbgmsg("
    ".$errors["122"]."$ghost
    \n"); continue; } $gport = $giislist[$ig]["port"]; $gbase = $giislist[$ig]["base"]; if ( preg_match("/$element/i",$gbase) ) { // Invent a "fake DN" for host tagging and skip duplicated entries $fakedn = "hn=".$ghost.", ".$gbase; if ( @$tag[$fakedn] ) continue; $tag[$fakedn] = 1; array_push($entries,$giislist[$ig]); $counter++; continue; } elseif ( preg_match("/local/i",$gbase) ) { if ( $debug ) dbgmsg("
    ".$errors["115"].$ghost." (".$errors["116"].$element.")"); continue; } if ( $debug == 2 ) dbgmsg("
    ".$errors["117"]."$ghost..."); $fp = @fsockopen($ghost, $gport, $errno, $errstr, 2); $ldapuri = "ldap://".$ghost.":".$gport; $gconn = ldap_connect($ldapuri); if ( $fp && $gconn ) { fclose($fp); if ( $debug == 2 ) dbgmsg($errors["118"]); array_push($dsarray,$gconn); array_push($dnarray,$gbase); array_push($hnarray,$ghost); } if ( $debug == 2 ) dbgmsg("
    "); } // Some debugging printout if ( $debug == 2 ) { dbgmsg("

    ".$errors["119"].$element.": ".$counter."
    "); foreach ( $entries as $num=>$val ) dbgmsg($val["host"].":".$val["base"]."
    "); } // Check if there is underlying structure $srarray = @ldap_read($dsarray,$dnarray,$gfilter,$greg,0,0,$tlim,LDAP_DEREF_NEVER); // If using the pached LDAP //$srarray = @ldap_read($dsarray,$dnarray,$gfilter,$greg,0,0,$tlim,LDAP_DEREF_NEVER,$tout); // Debug: check if something eventualy timeouts or something if ( $debug ) { $nconns = count($dsarray); for ( $ii = 0; $ii < $nconns; $ii++ ) { $ldconn = $dsarray[$ii]; $hnconn = $hnarray[$ii]; if ( ldap_errno($ldconn) != 0x00 ) { $ldaperrmess = ldap_error($ldconn); dbgmsg("".$errors["120"].$hnconn.": ".$ldaperrmess."
    "); } } } $nhosts = 0; if ( !empty($srarray) ) $nhosts = count($srarray); // If EGIISes are found, loop on contacted EGIISes if ( $nhosts ) { $truecount = 0; for( $ids = 0; $ids < $nhosts; $ids++ ) { // suppose N hosts answered (nhosts), each returned M lower registrants (nrecords) // some of lower registrants are the same and have to be purged // and everything should be re-arranged in a new common array $sr = $srarray[$ids]; $ds = $dsarray[$ids]; $base = $dnarray[$ids]; if ($sr) $truecount++; $record = @ldap_get_entries($ds,$sr); $nrecords = $record["count"]; // Per each contacted EGIIS, loop on potential lower-level EGIISes/clusters for ($i = 0; $i < $nrecords; $i++) { $curdn = $record[$i]["dn"]; $curhost = $record[$i]["mds-service-hn"][0]; $curstat = $record[$i]["mds-reg-status"][0]; $curport = $record[$i]["mds-service-port"][0]; $cursuff = $record[$i]["mds-service-ldap-suffix"][0]; // Introduce "fake" DN for tagging purpose - helps skipping sites registering twice $fakedn = "hn=".$curhost.", ".$cursuff; if ( @$tag[$fakedn] ) continue; if ( @$blacklist[$curhost] ) { if ( $debug>0 ) dbgmsg("
    ".$errors["122"]."$curhost
    \n"); continue; } if ( $curstat != "VALID" ) { if ( $debug ) dbgmsg("
    $curstat".$errors["121"]."$fakedn
    \n"); continue; } // array_push($entries,$record[$i]); $entries[$counter]["host"] = $curhost; $entries[$counter]["port"] = $curport; $entries[$counter]["base"] = $cursuff; if ( $debug == 2 ) dbgmsg("
    ".$errors["123"]."$base: $fakedn
    \n"); $tag[$fakedn] = 1; $counter++; } } // Array $entries contains all possible stuff which registers to a EGIIS // Keep recursing if ($truecount && $loopcnt < 10 ) $entries = recursive_giis_info($entries,$element,$errors,$debug,$loopcnt); } return $entries; } ?> nordugrid-arc-7.1.1/src/services/monitor/PaxHeaders/monitor.in0000644000000000000000000000013115067751327021472 xustar0030 mtime=1759498967.781492375 30 atime=1759498967.878493848 29 ctime=1759499030.62584986 nordugrid-arc-7.1.1/src/services/monitor/monitor.in0000644000175000002070000000010415067751327023370 0ustar00mockbuildmock00000000000000#!/bin/sh lynx http://localhost/@monitor_local_prefix@/loadmon.php nordugrid-arc-7.1.1/src/services/PaxHeaders/candypond0000644000000000000000000000013115067751426017666 xustar0030 mtime=1759499030.891451334 29 atime=1759499034.76351017 30 ctime=1759499030.891451334 nordugrid-arc-7.1.1/src/services/candypond/0000755000175000002070000000000015067751426021646 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/candypond/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327022000 xustar0030 mtime=1759498967.773910274 30 atime=1759498967.873493772 30 ctime=1759499030.885675457 nordugrid-arc-7.1.1/src/services/candypond/Makefile.am0000644000175000002070000000135715067751327023710 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libcandypond.la libcandypond_la_SOURCES = CandyPond.h CandyPond.cpp \ CandyPondGenerator.h CandyPondGenerator.cpp libcandypond_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libcandypond_la_LIBADD = \ ../a-rex/grid-manager/libgridmanager.la \ ../a-rex/delegation/libdelegation.la \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) libcandypond_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-7.1.1/src/services/candypond/PaxHeaders/Makefile.in0000644000000000000000000000013215067751356022013 xustar0030 mtime=1759498990.924802095 30 atime=1759499019.124272531 30 ctime=1759499030.886759183 nordugrid-arc-7.1.1/src/services/candypond/Makefile.in0000644000175000002070000007616515067751356023734 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/candypond ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libcandypond_la_DEPENDENCIES = \ ../a-rex/grid-manager/libgridmanager.la \ ../a-rex/delegation/libdelegation.la \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) am_libcandypond_la_OBJECTS = libcandypond_la-CandyPond.lo \ libcandypond_la-CandyPondGenerator.lo libcandypond_la_OBJECTS = $(am_libcandypond_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = libcandypond_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libcandypond_la_CXXFLAGS) $(CXXFLAGS) \ $(libcandypond_la_LDFLAGS) $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = ./$(DEPDIR)/libcandypond_la-CandyPond.Plo \ ./$(DEPDIR)/libcandypond_la-CandyPondGenerator.Plo am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(libcandypond_la_SOURCES) DIST_SOURCES = $(libcandypond_la_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ pkglib_LTLIBRARIES = libcandypond.la libcandypond_la_SOURCES = CandyPond.h CandyPond.cpp \ CandyPondGenerator.h CandyPondGenerator.cpp libcandypond_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libcandypond_la_LIBADD = \ ../a-rex/grid-manager/libgridmanager.la \ ../a-rex/delegation/libdelegation.la \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) libcandypond_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/candypond/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/candypond/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(MKDIR_P) '$(DESTDIR)$(pkglibdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" || exit 1; \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libcandypond.la: $(libcandypond_la_OBJECTS) $(libcandypond_la_DEPENDENCIES) $(EXTRA_libcandypond_la_DEPENDENCIES) $(AM_V_CXXLD)$(libcandypond_la_LINK) -rpath $(pkglibdir) $(libcandypond_la_OBJECTS) $(libcandypond_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcandypond_la-CandyPond.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcandypond_la-CandyPondGenerator.Plo@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< libcandypond_la-CandyPond.lo: CandyPond.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcandypond_la_CXXFLAGS) $(CXXFLAGS) -MT libcandypond_la-CandyPond.lo -MD -MP -MF $(DEPDIR)/libcandypond_la-CandyPond.Tpo -c -o libcandypond_la-CandyPond.lo `test -f 'CandyPond.cpp' || echo '$(srcdir)/'`CandyPond.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcandypond_la-CandyPond.Tpo $(DEPDIR)/libcandypond_la-CandyPond.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='CandyPond.cpp' object='libcandypond_la-CandyPond.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcandypond_la_CXXFLAGS) $(CXXFLAGS) -c -o libcandypond_la-CandyPond.lo `test -f 'CandyPond.cpp' || echo '$(srcdir)/'`CandyPond.cpp libcandypond_la-CandyPondGenerator.lo: CandyPondGenerator.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcandypond_la_CXXFLAGS) $(CXXFLAGS) -MT libcandypond_la-CandyPondGenerator.lo -MD -MP -MF $(DEPDIR)/libcandypond_la-CandyPondGenerator.Tpo -c -o libcandypond_la-CandyPondGenerator.lo `test -f 'CandyPondGenerator.cpp' || echo '$(srcdir)/'`CandyPondGenerator.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcandypond_la-CandyPondGenerator.Tpo $(DEPDIR)/libcandypond_la-CandyPondGenerator.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='CandyPondGenerator.cpp' object='libcandypond_la-CandyPondGenerator.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libcandypond_la_CXXFLAGS) $(CXXFLAGS) -c -o libcandypond_la-CandyPondGenerator.lo `test -f 'CandyPondGenerator.cpp' || echo '$(srcdir)/'`CandyPondGenerator.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -f ./$(DEPDIR)/libcandypond_la-CandyPond.Plo -rm -f ./$(DEPDIR)/libcandypond_la-CandyPondGenerator.Plo -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libcandypond_la-CandyPond.Plo -rm -f ./$(DEPDIR)/libcandypond_la-CandyPondGenerator.Plo -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ clean-generic clean-libtool clean-pkglibLTLIBRARIES \ cscopelist-am ctags ctags-am distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-pkglibLTLIBRARIES install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-am uninstall \ uninstall-am uninstall-pkglibLTLIBRARIES .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/candypond/PaxHeaders/CandyPond.h0000644000000000000000000000013215067751327021774 xustar0030 mtime=1759498967.773910274 30 atime=1759498967.873493772 30 ctime=1759499030.889064691 nordugrid-arc-7.1.1/src/services/candypond/CandyPond.h0000644000175000002070000001070315067751327023677 0ustar00mockbuildmock00000000000000#ifndef CANDYPONDSERVICE_H_ #define CANDYPONDSERVICE_H_ #include #include #include #include #include // A-REX includes for GM configuration and delegation #include "../a-rex/grid-manager/conf/GMConfig.h" #include "../a-rex/grid-manager/files/ControlFileContent.h" #include "../a-rex/grid-manager/files/ControlFileHandling.h" #include "../a-rex/delegation/DelegationStore.h" #include "CandyPondGenerator.h" namespace CandyPond { /** * CandyPond provides functionality for A-REX cache operations that can be * performed by remote clients. It currently consists of three operations: * CacheCheck - allows querying of the cache for the presence of files. * CacheLink - enables a running job to dynamically request cache files to * be linked to its working (session) directory. * CacheLinkQuery - query the status of a transfer initiated by CacheLink. * This service is especially useful in the case of pilot job workflows where * job submission does not follow the usual ARC workflow. In order for input * files to be available to jobs, the pilot job can call CandyPond to * prepare them. If requested files are not present in the cache, they can be * downloaded by CandyPond if requested, using the DTR data staging * framework. */ class CandyPond: public Arc::Service { private: /** Return codes of cache link */ enum CacheLinkReturnCode { Success, // everything went ok Staging, // files are still in the middle of downloading NotAvailable, // cache file doesn't exist and dostage is false Locked, // cache file is locked (being downloaded by other process) CacheError, // error with cache (configuration, filesystem etc) PermissionError, // user doesn't have permission on original source LinkError, // error while linking to session dir DownloadError, // error downloading cache file BadURLError, // A bad URL was supplied which could not be handled }; /** Construct a SOAP error message with optional extra reason string */ Arc::MCC_Status make_soap_fault(Arc::Message& outmsg, const std::string& reason = ""); /** Add a Result element to a response */ void add_result_element(Arc::XMLNode& results, const std::string& fileurl, CacheLinkReturnCode returncode, const std::string& reason); /** CandyPond namespace */ Arc::NS ns; /** A-REX configuration */ ARex::GMConfig config; /** Generator to handle data staging */ CandyPondGenerator* dtr_generator; /** Logger object */ static Arc::Logger logger; protected: /* Cache operations */ /** * Check whether the URLs supplied in the input are present in any cache. * Returns in the out message for each file true or false, and if true, * the size of the file on cache disk. * @param mapped_user The local user to which the client DN was mapped */ Arc::MCC_Status CacheCheck(Arc::XMLNode in, Arc::XMLNode out, const Arc::User& mapped_user); /** * This method is used to link cache files to the session dir. A list of * URLs is supplied and if they are present in the cache and the user * calling the service has permission to access them, then they are linked * to the given session directory. If the user requests that missing files * be staged, then data staging requests are entered. The user should then * use CacheLinkQuery to poll the status of the requests. * @param mapped_user The local user to which the client DN was mapped */ Arc::MCC_Status CacheLink(Arc::XMLNode in, Arc::XMLNode out, const Arc::User& mapped_user); /** * Query the status of data staging for a given job ID. */ Arc::MCC_Status CacheLinkQuery(Arc::XMLNode in, Arc::XMLNode out); public: /** * Make a new CandyPond. Reads the configuration and determines * the validity of the service. */ CandyPond(Arc::Config *cfg, Arc::PluginArgument* parg); /** * Destroy the CandyPond */ virtual ~CandyPond(void); /** * Main method called by HED when CandyPond is invoked. Directs call * to appropriate CandyPond method. */ virtual Arc::MCC_Status process(Arc::Message &inmsg, Arc::Message &outmsg); /** Returns true if the CandyPond is valid. */ operator bool() { return valid; }; /** Returns true if the CandyPond is not valid. */ bool operator!() { return !valid; }; }; } // namespace CandyPond #endif /* CANDYPONDSERVICE_H_ */ nordugrid-arc-7.1.1/src/services/candypond/PaxHeaders/CandyPond.cpp0000644000000000000000000000013215067751327022327 xustar0030 mtime=1759498967.773273145 30 atime=1759498967.873493772 30 ctime=1759499030.890148179 nordugrid-arc-7.1.1/src/services/candypond/CandyPond.cpp0000644000175000002070000005347315067751327024245 0ustar00mockbuildmock00000000000000#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "CandyPond.h" namespace CandyPond { static Arc::Plugin *get_service(Arc::PluginArgument* arg) { Arc::ServicePluginArgument* srvarg = arg?dynamic_cast(arg):NULL; if(!srvarg) return NULL; CandyPond* s = new CandyPond((Arc::Config*)(*srvarg),arg); if (*s) return s; delete s; return NULL; } Arc::Logger CandyPond::logger(Arc::Logger::rootLogger, "CandyPond"); CandyPond::CandyPond(Arc::Config *cfg, Arc::PluginArgument* parg) : Service(cfg,parg), dtr_generator(NULL) { valid = false; // read configuration information /* candypond config specifies A-REX conf file /etc/arc.conf */ ns["candypond"] = "urn:candypond_config"; if (!(*cfg)["service"] || !(*cfg)["service"]["config"]) { // error - no config defined logger.msg(Arc::ERROR, "No A-REX config file found in candypond configuration"); return; } std::string arex_config = (std::string)(*cfg)["service"]["config"]; logger.msg(Arc::INFO, "Using A-REX config file %s", arex_config); config.SetConfigFile(arex_config); if (!config.Load()) { logger.msg(Arc::ERROR, "Failed to process A-REX configuration in %s", arex_config); return; } config.Print(); if (config.CacheParams().getCacheDirs().empty() && config.CacheParams().getReadOnlyCacheDirs().empty()) { logger.msg(Arc::ERROR, "No caches defined in configuration"); return; } // check if we are running along with A-REX or standalone bool with_arex = false; if ((*cfg)["service"]["witharex"] && (std::string)(*cfg)["service"]["witharex"] == "true") with_arex = true; // start Generator for data staging dtr_generator = new CandyPondGenerator(config, with_arex); valid = true; } CandyPond::~CandyPond(void) { if (dtr_generator) { delete dtr_generator; dtr_generator = NULL; } } Arc::MCC_Status CandyPond::CacheCheck(Arc::XMLNode in, Arc::XMLNode out, const Arc::User& mapped_user) { /* Accepts: url ... Returns url true 1234 ... */ // substitute cache paths according to mapped user ARex::CacheConfig cache_params(config.CacheParams()); cache_params.substitute(config, mapped_user); Arc::FileCache cache(cache_params.getCacheDirs(), cache_params.getDrainingCacheDirs(), cache_params.getReadOnlyCacheDirs(), "0", mapped_user.get_uid(), mapped_user.get_gid()); if (!cache) { logger.msg(Arc::ERROR, "Error creating cache"); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheCheck", "Server error with cache"); } Arc::XMLNode resp = out.NewChild("CacheCheckResponse"); Arc::XMLNode results = resp.NewChild("CacheCheckResult"); for(int n = 0;;++n) { Arc::XMLNode id = in["CacheCheck"]["TheseFilesNeedToCheck"]["FileURL"][n]; if (!id) break; std::string fileurl = (std::string)in["CacheCheck"]["TheseFilesNeedToCheck"]["FileURL"][n]; Arc::XMLNode resultelement = results.NewChild("Result"); resultelement.NewChild("FileURL") = fileurl; bool fileexist = false; std::string file_lfn; Arc::initializeCredentialsType cred_type(Arc::initializeCredentialsType::SkipCredentials); Arc::UserConfig usercfg(cred_type); Arc::URL url(fileurl); Arc::DataHandle d(url, usercfg); if (!d) { logger.msg(Arc::ERROR, "Can't handle URL %s", fileurl); resultelement.NewChild("ExistInTheCache") = "false"; resultelement.NewChild("FileSize") = "0"; continue; } logger.msg(Arc::INFO, "Looking up URL %s", d->str()); file_lfn = cache.File(d->str()); if (file_lfn.empty()) { logger.msg(Arc::ERROR, "Empty filename returned from FileCache"); resultelement.NewChild("ExistInTheCache") = "false"; resultelement.NewChild("FileSize") = "0"; continue; } logger.msg(Arc::INFO, "Cache file is %s", file_lfn); struct stat fileStat; if (Arc::FileStat(file_lfn, &fileStat, false)) fileexist = true; else if (errno != ENOENT) logger.msg(Arc::ERROR, "Problem accessing cache file %s: %s", file_lfn, Arc::StrError(errno)); resultelement.NewChild("ExistInTheCache") = (fileexist ? "true": "false"); if (fileexist) resultelement.NewChild("FileSize") = Arc::tostring(fileStat.st_size); else resultelement.NewChild("FileSize") = "0"; } return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status CandyPond::CacheLink(Arc::XMLNode in, Arc::XMLNode out, const Arc::User& mapped_user) { /* Accepts: url // remote file name // local file on session dir ... uname 123456789 90 false Returns: url 0 success ... */ // read in inputs bool dostage = false; if (in["CacheLink"]["Stage"]) dostage = ((std::string)in["CacheLink"]["Stage"] == "true") ? true : false; Arc::XMLNode jobidnode = in["CacheLink"]["JobID"]; if (!jobidnode) { logger.msg(Arc::ERROR, "No job ID supplied"); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheLink", "Bad input (no JobID specified)"); } std::string jobid = (std::string)jobidnode; int priority = 50; Arc::XMLNode prioritynode = in["CacheLink"]["Priority"]; if (prioritynode) { if (!Arc::stringto((std::string)prioritynode, priority)) { logger.msg(Arc::ERROR, "Bad number in priority element: %s", (std::string)prioritynode); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheLink", "Bad input (bad number in Priority)"); } if (priority <= 0) priority = 1; if (priority > 100) priority = 100; } Arc::XMLNode uname = in["CacheLink"]["Username"]; if (!uname) { logger.msg(Arc::ERROR, "No username supplied"); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheLink", "Bad input (no Username specified)"); } std::string username = (std::string)uname; // TODO: try to force mapping to supplied user if (username != mapped_user.Name()) { logger.msg(Arc::ERROR, "Supplied username %s does not match mapped username %s", username, mapped_user.Name()); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheLink", "Supplied username does not match mapped user"); } // check job id and session dir are ok // substitute session dirs and use tmp configuration to find the one for this job std::vector sessions = config.SessionRoots(); for (std::vector::iterator session = sessions.begin(); session != sessions.end(); ++session) { config.Substitute(*session, mapped_user); } ARex::GMConfig tmp_config; tmp_config.SetSessionRoot(sessions); std::string session_root = tmp_config.SessionRoot(jobid); if (session_root.empty()) { logger.msg(Arc::ERROR, "No session directory found"); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheLink", "No session directory found for supplied Job ID"); } std::string session_dir = session_root + '/' + jobid; logger.msg(Arc::INFO, "Using session dir %s", session_dir); struct stat fileStat; if (!Arc::FileStat(session_dir, &fileStat, true)) { logger.msg(Arc::ERROR, "Failed to stat session dir %s", session_dir); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheLink", "Failed to access session dir"); } // check permissions - owner must be same as mapped user if (fileStat.st_uid != mapped_user.get_uid()) { logger.msg(Arc::ERROR, "Session dir %s is owned by %i, but current mapped user is %i", session_dir, fileStat.st_uid, mapped_user.get_uid()); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheLink", "Failed to access session dir"); } // get delegated proxy info to check permission on cached files // TODO: use credentials of caller of this service. For now ask the // delegation store for the proxy of the job. ARex::DelegationStore::DbType deleg_db_type = ARex::DelegationStore::DbSQLite; switch (config.DelegationDBType()) { case ARex::GMConfig::deleg_db_bdb: deleg_db_type = ARex::DelegationStore::DbBerkeley; break; case ARex::GMConfig::deleg_db_sqlite: deleg_db_type = ARex::DelegationStore::DbSQLite; break; } ARex::DelegationStore dstore(config.DelegationDir(), deleg_db_type, false); std::string proxy_path; // Read job's local file to extract delegation id ARex::JobLocalDescription job_desc; if (job_local_read_file(jobid, config, job_desc) && !job_desc.delegationid.empty()) { proxy_path = dstore.FindCred(job_desc.delegationid, job_desc.DN); } if (proxy_path.empty() || !Arc::FileStat(proxy_path, &fileStat, true)) { logger.msg(Arc::ERROR, "Failed to access proxy of given job id %s at %s", jobid, proxy_path); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheLink", "Failed to access proxy"); } Arc::UserConfig usercfg; usercfg.UtilsDirPath(config.ControlDir()); usercfg.ProxyPath(proxy_path); usercfg.InitializeCredentials(Arc::initializeCredentialsType::NotTryCredentials); std::string dn; Arc::Time exp_time; try { Arc::Credential ci(usercfg.ProxyPath(), usercfg.ProxyPath(), usercfg.CACertificatesDirectory(), "", usercfg.CAUseSystem(), usercfg.CAUseGrid()); dn = ci.GetIdentityName(); exp_time = ci.GetEndTime(); } catch (Arc::CredentialError& e) { logger.msg(Arc::ERROR, "Couldn't handle certificate: %s", e.what()); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheLink", std::string("Error with proxy at "+proxy_path)); } logger.msg(Arc::INFO, "DN is %s", dn); // create cache // substitute cache paths according to mapped user ARex::CacheConfig cache_params(config.CacheParams()); cache_params.substitute(config, mapped_user); Arc::FileCache cache(cache_params.getCacheDirs(), cache_params.getDrainingCacheDirs(), cache_params.getReadOnlyCacheDirs(), jobid, mapped_user.get_uid(), mapped_user.get_gid()); if (!cache) { logger.msg(Arc::ERROR, "Error with cache configuration"); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheCheck", "Server error with cache"); } // set up response structure Arc::XMLNode resp = out.NewChild("CacheLinkResponse"); Arc::XMLNode results = resp.NewChild("CacheLinkResult"); std::map to_download; // files not in cache (remote, local) bool error_happened = false; // if true then don't bother with downloads at the end // loop through all files for (int n = 0;;++n) { Arc::XMLNode id = in["CacheLink"]["TheseFilesNeedToLink"]["File"][n]; if (!id) break; Arc::XMLNode f_url = id["FileURL"]; if (!f_url) break; Arc::XMLNode f_name = id["FileName"]; if (!f_name) break; std::string fileurl = (std::string)f_url; std::string filename = (std::string)f_name; std::string session_file = session_dir + '/' + filename; logger.msg(Arc::INFO, "Looking up URL %s", fileurl); Arc::URL u(fileurl); Arc::DataHandle d(u, usercfg); if (!d) { logger.msg(Arc::ERROR, "Can't handle URL %s", fileurl); add_result_element(results, fileurl, CandyPond::BadURLError, "Could not handle input URL"); error_happened = true; continue; } d->SetSecure(false); // the actual url used with the cache std::string url = d->str(); bool available = false; bool is_locked = false; if (!cache.Start(url, available, is_locked)) { if (is_locked) { add_result_element(results, fileurl, CandyPond::Locked, "File is locked"); } else { add_result_element(results, fileurl, CandyPond::CacheError, "Error starting cache"); } error_happened = true; continue; } if (!available) { cache.Stop(url); // file not in cache - the result status for these files will be set later to_download[fileurl] = session_file; continue; } // file is in cache - check permissions if (!cache.CheckDN(url, dn)) { Arc::DataStatus res = d->Check(false); if (!res.Passed()) { logger.msg(Arc::ERROR, "Permission checking failed: %s", url); add_result_element(results, fileurl, CandyPond::PermissionError, "Permission denied"); error_happened = true; continue; } cache.AddDN(url, dn, exp_time); logger.msg(Arc::VERBOSE, "Permission checking passed for url %s", url); } // link file bool try_again = false; // TODO add executable and copy flags to request if (!cache.Link(session_file, url, false, false, false, try_again)) { // If locked, send to DTR and let it deal with the retry strategy if (try_again) { to_download[fileurl] = session_file; continue; } // failed to link - report as if not there add_result_element(results, fileurl, CandyPond::LinkError, "Failed to link to session dir"); error_happened = true; continue; } // Successfully linked to session - move to scratch if necessary // Note: won't work if scratch is not mounted on CE if (!config.ScratchDir().empty()) { std::string scratch_file(config.ScratchDir()+'/'+jobid+'/'+filename); // Access session and scratch under mapped uid Arc::FileAccess fa; if (!fa.fa_setuid(mapped_user.get_uid(), mapped_user.get_gid()) || !fa.fa_rename(session_file, scratch_file)) { logger.msg(Arc::ERROR, "Failed to move %s to %s: %s", session_file, scratch_file, Arc::StrError(errno)); add_result_element(results, fileurl, CandyPond::LinkError, "Failed to link to move file from session dir to scratch"); error_happened = true; continue; } } // everything went ok so report success add_result_element(results, fileurl, CandyPond::Success, "Success"); } // check for any downloads to perform, only if requested and there were no previous errors if (to_download.empty() || error_happened || !dostage) { for (std::map::iterator i = to_download.begin(); i != to_download.end(); ++i) { add_result_element(results, i->first, CandyPond::NotAvailable, "File not available"); } return Arc::MCC_Status(Arc::STATUS_OK); } bool stage_start_error = false; // Loop through files to download and start a DTR for each one for (std::map::iterator i = to_download.begin(); i != to_download.end(); ++i) { // if one DTR failed to start then don't start any more // TODO cancel others already started if (stage_start_error) { add_result_element(results, i->first, CandyPond::DownloadError, "Failed to start data staging"); continue; } logger.msg(Arc::VERBOSE, "Starting new DTR for %s", i->first); if (!dtr_generator->addNewRequest(mapped_user, i->first, i->second, usercfg, jobid, priority)) { logger.msg(Arc::ERROR, "Failed to start new DTR for %s", i->first); add_result_element(results, i->first, CandyPond::DownloadError, "Failed to start data staging"); stage_start_error = true; } else { add_result_element(results, i->first, CandyPond::Staging, "Staging started"); } } return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status CandyPond::CacheLinkQuery(Arc::XMLNode in, Arc::XMLNode out) { /* Accepts: 123456789 Returns: 0 success */ Arc::XMLNode jobidnode = in["CacheLinkQuery"]["JobID"]; if (!jobidnode) { logger.msg(Arc::ERROR, "No job ID supplied"); return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheLinkQuery", "Bad input (no JobID specified)"); } std::string jobid = (std::string)jobidnode; // set up response structure Arc::XMLNode resp = out.NewChild("CacheLinkQueryResponse"); Arc::XMLNode results = resp.NewChild("CacheLinkQueryResult"); std::string error; // query Generator for DTR status if (dtr_generator->queryRequestsFinished(jobid, error)) { if (error.empty()) { logger.msg(Arc::INFO, "Job %s: all files downloaded successfully", jobid); add_result_element(results, "", CandyPond::Success, "Success"); } else if (error == "Job not found") { add_result_element(results, "", CandyPond::CacheError, "No such job"); } else { logger.msg(Arc::INFO, "Job %s: Some downloads failed", jobid); add_result_element(results, "", CandyPond::DownloadError, "Download failed: " + error); } } else { logger.msg(Arc::VERBOSE, "Job %s: files still downloading", jobid); add_result_element(results, "", CandyPond::Staging, "Still staging"); } return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status CandyPond::process(Arc::Message &inmsg, Arc::Message &outmsg) { // Check authorization if(!ProcessSecHandlers(inmsg, "incoming")) { logger.msg(Arc::ERROR, "CandyPond: Unauthorized"); return make_soap_fault(outmsg, "Authorization failed"); } std::string method = inmsg.Attributes()->get("HTTP:METHOD"); // find local user std::string mapped_username = inmsg.Attributes()->get("SEC:LOCALID"); if (mapped_username.empty()) { logger.msg(Arc::ERROR, "No local user mapping found"); return make_soap_fault(outmsg, "No local user mapping found"); } Arc::User mapped_user(mapped_username); if(method == "POST") { logger.msg(Arc::VERBOSE, "process: POST"); logger.msg(Arc::INFO, "Identity is %s", inmsg.Attributes()->get("TLS:PEERDN")); // Both input and output are supposed to be SOAP // Extracting payload Arc::PayloadSOAP* inpayload = NULL; try { inpayload = dynamic_cast(inmsg.Payload()); } catch(std::exception& e) { }; if(!inpayload) { logger.msg(Arc::ERROR, "input is not SOAP"); return make_soap_fault(outmsg); } // Applying known namespaces inpayload->Namespaces(ns); if(logger.getThreshold() <= Arc::VERBOSE) { std::string str; inpayload->GetDoc(str, true); logger.msg(Arc::VERBOSE, "process: request=%s",str); } // Analyzing request Arc::XMLNode op = inpayload->Child(0); if(!op) { logger.msg(Arc::ERROR, "input does not define operation"); return make_soap_fault(outmsg); } logger.msg(Arc::VERBOSE, "process: operation: %s",op.Name()); Arc::PayloadSOAP* outpayload = new Arc::PayloadSOAP(ns); outpayload->Namespaces(ns); Arc::MCC_Status result(Arc::STATUS_OK); // choose operation if (MatchXMLName(op,"CacheCheck")) { result = CacheCheck(*inpayload, *outpayload, mapped_user); } else if (MatchXMLName(op, "CacheLink")) { result = CacheLink(*inpayload, *outpayload, mapped_user); } else if (MatchXMLName(op, "CacheLinkQuery")) { result = CacheLinkQuery(*inpayload, *outpayload); } else { // unknown operation logger.msg(Arc::ERROR, "SOAP operation is not supported: %s", op.Name()); delete outpayload; return make_soap_fault(outmsg); } if (!result) return make_soap_fault(outmsg, result.getExplanation()); if (logger.getThreshold() <= Arc::VERBOSE) { std::string str; outpayload->GetDoc(str, true); logger.msg(Arc::VERBOSE, "process: response=%s", str); } outmsg.Payload(outpayload); if (!ProcessSecHandlers(outmsg,"outgoing")) { logger.msg(Arc::ERROR, "Security Handlers processing failed"); delete outmsg.Payload(NULL); return Arc::MCC_Status(); } } else { // only POST supported logger.msg(Arc::ERROR, "Only POST is supported in CandyPond"); return Arc::MCC_Status(); } return Arc::MCC_Status(Arc::STATUS_OK); } void CandyPond::add_result_element(Arc::XMLNode& results, const std::string& fileurl, CacheLinkReturnCode returncode, const std::string& reason) { Arc::XMLNode resultelement = results.NewChild("Result"); if (!fileurl.empty()) resultelement.NewChild("FileURL") = fileurl; resultelement.NewChild("ReturnCode") = Arc::tostring(returncode); resultelement.NewChild("ReturnCodeExplanation") = reason; } Arc::MCC_Status CandyPond::make_soap_fault(Arc::Message& outmsg, const std::string& reason) { Arc::PayloadSOAP* outpayload = new Arc::PayloadSOAP(ns,true); Arc::SOAPFault* fault = outpayload?outpayload->Fault():NULL; if(fault) { fault->Code(Arc::SOAPFault::Sender); if (reason.empty()) fault->Reason("Failed processing request"); else fault->Reason("Failed processing request: "+reason); } outmsg.Payload(outpayload); return Arc::MCC_Status(Arc::STATUS_OK); } } // namespace CandyPond extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "candypond", "HED:SERVICE", NULL, 0, &CandyPond::get_service }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-7.1.1/src/services/candypond/PaxHeaders/CandyPondGenerator.h0000644000000000000000000000013215067751327023643 xustar0030 mtime=1759498967.773910274 30 atime=1759498967.873493772 30 ctime=1759499030.891275748 nordugrid-arc-7.1.1/src/services/candypond/CandyPondGenerator.h0000644000175000002070000000571715067751327025557 0ustar00mockbuildmock00000000000000#ifndef CANDYPONDGENERATOR_H_ #define CANDYPONDGENERATOR_H_ #include #include #include "../a-rex/grid-manager/conf/StagingConfig.h" namespace CandyPond { /// DTR Generator for CandyPond. class CandyPondGenerator : public DataStaging::DTRCallback { private: /// Scheduler object to process DTRs. DataStaging::Scheduler* scheduler; /// Generator state DataStaging::ProcessState generator_state; /// Whether to use the host certificate when communicating with remote delivery bool use_host_cert; /// Scratch directory used by job std::string scratch_dir; /// Whether we are running with A-REX or we manage the Scheduler ourselves bool run_with_arex; /// A-REX configuration const ARex::GMConfig& config; /// Staging configuration ARex::StagingConfig staging_conf; /// Map of job id to DTRs std::multimap processing_dtrs; /// Lock for DTR map Arc::SimpleCondition processing_lock; /// Map of job id to error message, if any std::map finished_jobs; /// Lock for finished job map Arc::SimpleCondition finished_lock; /// Logger static Arc::Logger logger; public: /// Start Generator and get Scheduler instance. /** * If with_arex is true then it is assumed that A-REX takes care of * configuring, starting and stopping the DTR Scheduler. If CandyPond * is run outside of A-REX then it starts an independent DTR instance, * using parameters given in arc.conf. * @param config A-REX configuration * @param with_arex If true then we assume A-REX starts the scheduler, if * false then we start and stop it. */ CandyPondGenerator(const ARex::GMConfig& config, bool with_arex); /// Stop Scheduler if we are not running with A-REX ~CandyPondGenerator(); /// Callback method to receive completed DTRs void receiveDTR(DataStaging::DTR_ptr dtr); /// Add a new request. /** * @param user User for this transfer * @param source Source file * @param destination Destination file * @param usercfg UserConfig with proxy information * @param jobid Job identifier * @param priority DTR priority */ bool addNewRequest(const Arc::User& user, const std::string& source, const std::string& destination, const Arc::UserConfig& usercfg, const std::string& jobid, int priority); /// Query requests for given job id. /** * @param jobid Job ID to query * @param error If any DTR finished with an error, the description is put * in error. * @return True if all requests for the job have finished, false otherwise */ bool queryRequestsFinished(const std::string& jobid, std::string& error); }; } // namespace CandyPond #endif /* CANDYPONDGENERATOR_H_ */ nordugrid-arc-7.1.1/src/services/candypond/PaxHeaders/CandyPondGenerator.cpp0000644000000000000000000000013215067751327024176 xustar0030 mtime=1759498967.773910274 30 atime=1759498967.873493772 30 ctime=1759499030.892364401 nordugrid-arc-7.1.1/src/services/candypond/CandyPondGenerator.cpp0000644000175000002070000002124315067751327026102 0ustar00mockbuildmock00000000000000#include #include #include "../a-rex/grid-manager/conf/UrlMapConfig.h" #include "CandyPondGenerator.h" namespace CandyPond { Arc::Logger CandyPondGenerator::logger(Arc::Logger::rootLogger, "CandyPondGenerator"); CandyPondGenerator::CandyPondGenerator(const ARex::GMConfig& conf, bool with_arex) : generator_state(DataStaging::INITIATED), use_host_cert(false), scratch_dir(conf.ScratchDir()), run_with_arex(with_arex), config(conf), staging_conf(config) { scheduler = DataStaging::Scheduler::getInstance(); if (run_with_arex) { // A-REX sets DTR configuration generator_state = DataStaging::RUNNING; return; } if (!staging_conf) return; // Convert A-REX configuration values to DTR configuration // TODO find location for DTR state log, should be different from A-REX's // Log level for DTR DataStaging::DTR::LOG_LEVEL = staging_conf.get_log_level(); // Processing limits scheduler->SetSlots(staging_conf.get_max_processor(), staging_conf.get_max_processor(), staging_conf.get_max_delivery(), staging_conf.get_max_emergency(), staging_conf.get_max_prepared()); // Transfer shares DataStaging::TransferSharesConf share_conf(staging_conf.get_share_type(), staging_conf.get_defined_shares()); scheduler->SetTransferSharesConf(share_conf); // Transfer limits DataStaging::TransferParameters transfer_limits; transfer_limits.min_current_bandwidth = staging_conf.get_min_speed(); transfer_limits.averaging_time = staging_conf.get_min_speed_time(); transfer_limits.min_average_bandwidth = staging_conf.get_min_average_speed(); transfer_limits.max_inactivity_time = staging_conf.get_max_inactivity_time(); scheduler->SetTransferParameters(transfer_limits); // URL mappings ARex::UrlMapConfig url_map(config); scheduler->SetURLMapping(url_map); // Preferred pattern scheduler->SetPreferredPattern(staging_conf.get_preferred_pattern()); // Delivery services scheduler->SetDeliveryServices(staging_conf.get_delivery_services()); // Limit on remote delivery size scheduler->SetRemoteSizeLimit(staging_conf.get_remote_size_limit()); // Set whether to use host cert for remote delivery use_host_cert = staging_conf.get_use_host_cert_for_remote_delivery(); // End of configuration - start Scheduler thread scheduler->start(); generator_state = DataStaging::RUNNING; } CandyPondGenerator::~CandyPondGenerator() { generator_state = DataStaging::STOPPED; if (!run_with_arex) scheduler->stop(); // delete scheduler? it is possible another thread is using the static instance } void CandyPondGenerator::receiveDTR(DataStaging::DTR_ptr dtr) { // Take DTR out of processing map and add to finished jobs logger.msg(Arc::INFO, "DTR %s finished with state %s", dtr->get_id(), dtr->get_status().str()); std::string jobid (dtr->get_parent_job_id()); // Add to finished jobs std::string error_msg; if (dtr->error()) error_msg = dtr->get_error_status().GetDesc() + ". "; finished_lock.lock(); finished_jobs[jobid] += error_msg; finished_lock.unlock(); // remove from processing jobs processing_lock.lock(); std::pair::iterator, std::multimap::iterator> dtr_iterator = processing_dtrs.equal_range(jobid); if (dtr_iterator.first == dtr_iterator.second) { processing_lock.unlock(); logger.msg(Arc::WARNING, "No active job id %s", jobid); return; } // remove this DTR from the processing list for (std::multimap::iterator i = dtr_iterator.first; i != dtr_iterator.second; ++i) { if (i->second->get_id() == dtr->get_id()) { processing_dtrs.erase(i); break; } } processing_lock.unlock(); // Move to scratch if necessary if (!dtr->error() && !scratch_dir.empty()) { // Get filename relative to session dir std::string session_file = dtr->get_destination()->GetURL().Path(); std::string::size_type pos = session_file.find(jobid); if (pos == std::string::npos) { logger.msg(Arc::ERROR, "Could not determine session directory from filename %s", session_file); finished_lock.lock(); finished_jobs[jobid] += "Could not determine session directory from filename for during move to scratch. "; finished_lock.unlock(); return; } std::string scratch_file(scratch_dir+'/'+session_file.substr(pos)); // Access session and scratch under mapped uid Arc::FileAccess fa; if (!fa.fa_setuid(dtr->get_local_user().get_uid(), dtr->get_local_user().get_gid()) || !fa.fa_rename(session_file, scratch_file)) { logger.msg(Arc::ERROR, "Failed to move %s to %s: %s", session_file, scratch_file, Arc::StrError(errno)); finished_lock.lock(); finished_jobs[jobid] += "Failed to move file from session dir to scratch. "; finished_lock.unlock(); } } } bool CandyPondGenerator::addNewRequest(const Arc::User& user, const std::string& source, const std::string& destination, const Arc::UserConfig& usercfg, const std::string& jobid, int priority) { if (generator_state != DataStaging::RUNNING) return false; std::list logs; // Logger destinations for this DTR. Uses a string stream to keep log in memory rather // than a file. LogStream keeps a reference to the stream so we have to use // a pointer. The LogDestinations are deleted when the DTR is received back. // TODO: provide access to this log somehow std::stringstream * stream = new std::stringstream(); Arc::LogDestination * output = new Arc::LogStream(*stream); logs.push_back(output); DataStaging::DTR_ptr dtr(new DataStaging::DTR(source, destination, usercfg, jobid, user.get_uid(), logs, "DataStaging")); if (!(*dtr)) { logger.msg(Arc::ERROR, "Invalid DTR for source %s, destination %s", source, destination); return false; } // set retry count (tmp errors only) dtr->set_tries_left(staging_conf.get_max_retries()); // set priority dtr->set_priority(priority); // set whether to use A-REX host certificate for remote delivery services dtr->host_cert_for_remote_delivery(use_host_cert); // use a separate share from A-REX downloads dtr->set_sub_share("candypond-download"); // substitute cache paths based on user ARex::CacheConfig cache_params(config.CacheParams()); cache_params.substitute(config, user); DataStaging::DTRCacheParameters cache_parameters; cache_parameters.cache_dirs = cache_params.getCacheDirs(); // we are definitely going to download so read-only caches are not useful here dtr->set_cache_parameters(cache_parameters); dtr->registerCallback(this, DataStaging::GENERATOR); dtr->registerCallback(scheduler, DataStaging::SCHEDULER); processing_lock.lock(); processing_dtrs.insert(std::pair(jobid, dtr)); processing_lock.unlock(); // Avoid logging when possible during scheduler submission because it gets // blocked by LFC calls locking the environment Arc::LogLevel log_level = Arc::Logger::getRootLogger().getThreshold(); Arc::Logger::getRootLogger().setThreshold(Arc::ERROR); DataStaging::DTR::push(dtr, DataStaging::SCHEDULER); Arc::Logger::getRootLogger().setThreshold(log_level); return true; } bool CandyPondGenerator::queryRequestsFinished(const std::string& jobid, std::string& error) { // First check currently processing DTRs processing_lock.lock(); if (processing_dtrs.find(jobid) != processing_dtrs.end()) { logger.msg(Arc::VERBOSE, "DTRs still running for job %s", jobid); processing_lock.unlock(); return false; } processing_lock.unlock(); // Now check finished jobs finished_lock.lock(); if (finished_jobs.find(jobid) != finished_jobs.end()) { logger.msg(Arc::VERBOSE, "All DTRs finished for job %s", jobid); error = finished_jobs[jobid]; finished_lock.unlock(); return true; } // Job not running or finished - report error logger.msg(Arc::WARNING, "Job %s not found", jobid); error = "Job not found"; return true; } } // namespace CandyPond nordugrid-arc-7.1.1/src/services/candypond/PaxHeaders/README0000644000000000000000000000013215067751327020624 xustar0030 mtime=1759498967.773910274 30 atime=1759498967.873493772 30 ctime=1759499030.887921507 nordugrid-arc-7.1.1/src/services/candypond/README0000644000175000002070000000020615067751327022524 0ustar00mockbuildmock00000000000000The cache service is a service inside HED which exposes some operations on the A-REX cache to remote clients through a WS interface. nordugrid-arc-7.1.1/src/services/PaxHeaders/wrappers0000644000000000000000000000013115067751426017552 xustar0030 mtime=1759499030.958452352 29 atime=1759499034.76351017 30 ctime=1759499030.958452352 nordugrid-arc-7.1.1/src/services/wrappers/0000755000175000002070000000000015067751426021532 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/wrappers/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327021664 xustar0030 mtime=1759498967.781492375 30 atime=1759498967.879493863 30 ctime=1759499030.953798669 nordugrid-arc-7.1.1/src/services/wrappers/Makefile.am0000644000175000002070000000017115067751327023565 0ustar00mockbuildmock00000000000000if PYTHON_SERVICE PYTHON_WRAPPER = python else PYTHON_WRAPPER = endif SUBDIRS = $(PYTHON_WRAPPER) DIST_SUBDIRS = python nordugrid-arc-7.1.1/src/services/wrappers/PaxHeaders/Makefile.in0000644000000000000000000000013215067751357021700 xustar0030 mtime=1759498991.266126482 30 atime=1759499019.529278685 30 ctime=1759499030.955133552 nordugrid-arc-7.1.1/src/services/wrappers/Makefile.in0000644000175000002070000006104215067751357023605 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/wrappers ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir distdir-am am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ @PYTHON_SERVICE_FALSE@PYTHON_WRAPPER = @PYTHON_SERVICE_TRUE@PYTHON_WRAPPER = python SUBDIRS = $(PYTHON_WRAPPER) DIST_SUBDIRS = python all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/wrappers/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/wrappers/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ check-am clean clean-generic clean-libtool cscopelist-am ctags \ ctags-am distclean distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags tags-am uninstall uninstall-am .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/wrappers/PaxHeaders/README0000644000000000000000000000013215067751327020510 xustar0030 mtime=1759498967.782577889 30 atime=1759498967.879493863 30 ctime=1759499030.956493067 nordugrid-arc-7.1.1/src/services/wrappers/README0000644000175000002070000000004015067751327022404 0ustar00mockbuildmock00000000000000collection of language bindings nordugrid-arc-7.1.1/src/services/wrappers/PaxHeaders/python0000644000000000000000000000013115067751426021073 xustar0030 mtime=1759499030.994452898 29 atime=1759499034.76351017 30 ctime=1759499030.994452898 nordugrid-arc-7.1.1/src/services/wrappers/python/0000755000175000002070000000000015067751426023053 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/wrappers/python/PaxHeaders/pythonwrapper.cpp0000644000000000000000000000013215067751327024577 xustar0030 mtime=1759498967.782672262 30 atime=1759498967.879493863 30 ctime=1759499030.990242902 nordugrid-arc-7.1.1/src/services/wrappers/python/pythonwrapper.cpp0000644000175000002070000004014715067751327026507 0ustar00mockbuildmock00000000000000// based on: // http://www.codeproject.com/cpp/embedpython_1.asp // http://coding.derkeiler.com/Archive/Python/comp.lang.python/2006-11/msg01211.html #ifdef HAVE_CONFIG_H #include #endif #include "pythonwrapper.h" #include #include #include #include #include #include #ifdef __cplusplus extern "C" { #endif /* SWIG Specific object SHOULD BE SYNC WITH generated SWIG CODE */ typedef void *(*swig_converter_func)(void *); typedef struct swig_type_info *(*swig_dycast_func)(void **); typedef struct swig_type_info { const char *name; /* mangled name of this type */ const char *str; /* human readable name of this type */ swig_dycast_func dcast; /* dynamic cast function down a hierarchy */ struct swig_cast_info *cast; /* linked list of types that can cast into this type */ void *clientdata; /* language specific type data */ int owndata; /* flag if the structure owns the clientdata */ } swig_type_info; /* Structure to store a type and conversion function used for casting */ typedef struct swig_cast_info { swig_type_info *type; /* pointer to type that is equivalent to this type */ swig_converter_func converter; /* function to cast the void pointers */ struct swig_cast_info *next; /* pointer to next cast in linked list */ struct swig_cast_info *prev; /* pointer to the previous cast */ } swig_cast_info; typedef struct { PyObject_HEAD void *ptr; swig_type_info *ty; int own; PyObject *next; } PySwigObject; #ifdef __cplusplus } #endif void *extract_swig_wrappered_pointer(PyObject *obj) { char this_str[] = "this"; if (!PyObject_HasAttrString(obj, this_str)) { return NULL; } PyObject *thisAttr = PyObject_GetAttrString(obj, this_str); if (thisAttr == NULL) { return NULL; } void* ptr = ((PySwigObject *)thisAttr)->ptr; Py_DECREF(thisAttr); return ptr; } // Thread state of main python interpreter thread static PyThreadState *tstate = NULL; static int python_service_counter = 0; static std::mutex service_lock; Arc::Logger Arc::Service_PythonWrapper::logger(Service::logger, "PythonWrapper"); static Arc::Plugin* get_service(Arc::PluginArgument* arg) { Arc::ServicePluginArgument* srvarg = arg?dynamic_cast(arg):NULL; if(!srvarg) return NULL; Arc::ChainContext* ctx = (Arc::ChainContext*)(*srvarg); // ((Arc::PluginsFactory*)(*ctx))->load("pythonservice",false,true); // doesn't work, why? ::dlopen(((Arc::PluginsFactory*)(*ctx))->findLocation("pythonservice").c_str(),RTLD_NOW | RTLD_GLOBAL); service_lock.lock(); // Initialize the Python Interpreter if (!Py_IsInitialized()) { Py_InitializeEx(0); // python does not handle signals PyEval_InitThreads(); // Main thread created and lock acquired tstate = PyThreadState_Get(); // Get current thread if(tstate == NULL) { Arc::Logger::getRootLogger().msg(Arc::ERROR, "Failed to initialize main Python thread"); return NULL; } } else { if(tstate == NULL) { Arc::Logger::getRootLogger().msg(Arc::ERROR, "Main Python thread was not initialized"); return NULL; } PyEval_AcquireThread(tstate); } python_service_counter++; Arc::Logger::getRootLogger().msg(Arc::DEBUG, "Loading %u-th Python service", python_service_counter); service_lock.unlock(); Arc::Service* service = new Arc::Service_PythonWrapper((Arc::Config*)(*srvarg),arg); PyEval_ReleaseThread(tstate); // Release current thread Arc::Logger::getRootLogger().msg(Arc::DEBUG, "Initialized %u-th Python service", python_service_counter); return service; } extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "pythonservice", "HED:SERVICE", NULL, 0, &get_service }, { NULL, NULL, NULL, 0, NULL } }; namespace Arc { Service_PythonWrapper::Service_PythonWrapper(Arc::Config *cfg,Arc::PluginArgument* parg):Service(cfg,parg), initialized(false) { PyObject *py_module_name = NULL; PyObject *py_arc_module_name = NULL; PyObject *dict = NULL; PyObject *arc_dict = NULL; PyObject *arc_cfg_klass = NULL; PyObject *arg = NULL; PyObject *py_cfg = NULL; PyObject *klass = NULL; arc_module = NULL; module = NULL; object = NULL; if (tstate == NULL) { logger.msg(Arc::ERROR, "Main Python thread is not initialized"); return; } //PyEval_AcquireThread(tstate); std::string path = (std::string)(*cfg)["ClassName"]; std::size_t p = path.rfind("."); if (p == std::string::npos) { logger.msg(Arc::ERROR, "Invalid class name"); return; } std::string module_name = path.substr(0, p); std::string class_name = path.substr(p+1, path.length()); logger.msg(Arc::VERBOSE, "class name: %s", class_name); logger.msg(Arc::VERBOSE, "module name: %s", module_name); // Convert module name to Python string #if PY_MAJOR_VERSION >= 3 py_module_name = PyUnicode_FromString(module_name.c_str()); #else py_module_name = PyString_FromString(module_name.c_str()); #endif if (py_module_name == NULL) { logger.msg(Arc::ERROR, "Cannot convert module name to Python string"); if (PyErr_Occurred()) PyErr_Print(); return; } // Load module module = PyImport_Import(py_module_name); if (module == NULL) { logger.msg(Arc::ERROR, "Cannot import module"); if (PyErr_Occurred()) PyErr_Print(); Py_DECREF(py_module_name); return; } Py_DECREF(py_module_name); // Import ARC python wrapper #if PY_MAJOR_VERSION >= 3 py_arc_module_name = PyUnicode_FromString("arc"); #else py_arc_module_name = PyString_FromString("arc"); #endif if (py_arc_module_name == NULL) { logger.msg(Arc::ERROR, "Cannot convert ARC module name to Python string"); if (PyErr_Occurred()) PyErr_Print(); return; } // Load arc module arc_module = PyImport_Import(py_arc_module_name); if (arc_module == NULL) { logger.msg(Arc::ERROR, "Cannot import ARC module"); if (PyErr_Occurred()) PyErr_Print(); Py_DECREF(py_arc_module_name); return; } Py_DECREF(py_arc_module_name); // arc_dict is a borrowed reference arc_dict = PyModule_GetDict(arc_module); if (arc_dict == NULL) { logger.msg(Arc::ERROR, "Cannot get dictionary of ARC module"); if (PyErr_Occurred()) PyErr_Print(); return; } // Get the arc config class // arc_cfg_klass is a borrowed reference arc_cfg_klass = PyDict_GetItemString(arc_dict, "Config"); if (arc_cfg_klass == NULL) { logger.msg(Arc::ERROR, "Cannot find ARC Config class"); if (PyErr_Occurred()) PyErr_Print(); return; } // check is it really a class if (!PyCallable_Check(arc_cfg_klass)) { logger.msg(Arc::ERROR, "Config class is not an object"); return; } // Get dictionary of module content // dict is a borrowed reference dict = PyModule_GetDict(module); if (dict == NULL) { logger.msg(Arc::ERROR, "Cannot get dictionary of module"); if (PyErr_Occurred()) PyErr_Print(); return; } // Get the class // klass is a borrowed reference klass = PyDict_GetItemString(dict, (char*)class_name.c_str()); if (klass == NULL) { logger.msg(Arc::ERROR, "Cannot find service class"); if (PyErr_Occurred()) PyErr_Print(); return; } // check is it really a class if (PyCallable_Check(klass)) { arg = Py_BuildValue("(l)", (long int)cfg); if (arg == NULL) { logger.msg(Arc::ERROR, "Cannot create config argument"); if (PyErr_Occurred()) PyErr_Print(); return; } py_cfg = PyObject_CallObject(arc_cfg_klass, arg); if (py_cfg == NULL) { logger.msg(Arc::ERROR, "Cannot convert config to Python object"); if (PyErr_Occurred()) PyErr_Print(); Py_DECREF(arg); return; } Py_DECREF(arg); arg = Py_BuildValue("(O)", py_cfg); if (arg == NULL) { logger.msg(Arc::ERROR, "Cannot create argument of the constructor"); if (PyErr_Occurred()) PyErr_Print(); return; } // create instance of class object = PyObject_CallObject(klass, arg); if (object == NULL) { logger.msg(Arc::ERROR, "Cannot create instance of Python class"); if (PyErr_Occurred()) PyErr_Print(); return; } Py_DECREF(arg); } else { logger.msg(Arc::ERROR, "%s is not an object", class_name); return; } // check is it really a class if (!PyCallable_Check(klass)) { logger.msg(Arc::ERROR, "Message class is not an object"); return; } //tstate = PyGILState_GetThisThreadState(); //PyEval_ReleaseThread(tstate); logger.msg(Arc::VERBOSE, "Python Wrapper constructor succeeded"); initialized = true; } Service_PythonWrapper::~Service_PythonWrapper(void) { service_lock.lock(); PyEval_AcquireThread(tstate); // Release python objects - it is needed for Python // destructors to be called if(arc_module) { Py_DECREF(arc_module); } if(module) { Py_DECREF(module); } if(object) { Py_DECREF(object); } // Finish the Python Interpreter python_service_counter--; logger.msg(Arc::VERBOSE, "Python Wrapper destructor (%d)", python_service_counter); if (python_service_counter == 0) { Py_Finalize(); } else { PyEval_ReleaseThread(tstate); } service_lock.unlock(); } Arc::MCC_Status Service_PythonWrapper::make_fault(Arc::Message& outmsg) { Arc::PayloadSOAP* outpayload = new Arc::PayloadSOAP(Arc::NS(),true); Arc::SOAPFault* fault = outpayload->Fault(); if(fault) { fault->Code(Arc::SOAPFault::Sender); fault->Reason("Failed processing request"); }; outmsg.Payload(outpayload); return Arc::MCC_Status(); } /* Arc::MCC_Status Service_PythonWrapper::python_error(const char *str) { return Arc::MCC_Status(Arc::GENERIC_ERROR); }*/ class PythonLock { private: PyGILState_STATE gstate_; Arc::Logger& logger_; public: PythonLock(Arc::Logger& logger):logger_(logger) { gstate_ = PyGILState_Ensure(); logger_.msg(Arc::VERBOSE, "Python interpreter locked"); }; ~PythonLock(void) { PyGILState_Release(gstate_); logger_.msg(Arc::VERBOSE, "Python interpreter released"); }; }; class XMLNodeP { private: Arc::XMLNode* obj_; public: XMLNodeP(Arc::XMLNode& node):obj_(NULL) { try { obj_ = new Arc::XMLNode(node); } catch(std::exception& e) { }; }; ~XMLNodeP(void) { if(obj_) delete obj_; }; XMLNode& operator*(void) const { return *obj_; }; XMLNode* operator->(void) const { return obj_; }; operator bool(void) { return (obj_ != NULL); }; bool operator!(void) { return (obj_ == NULL); }; operator long int(void) { return (long int)obj_; }; private: XMLNodeP(); XMLNodeP(XMLNodeP const&); XMLNodeP& operator=(XMLNodeP const&); }; class SOAPMessageP { private: Arc::SOAPMessage* obj_; public: SOAPMessageP(Arc::Message& msg):obj_(NULL) { try { obj_ = new Arc::SOAPMessage(msg); } catch(std::exception& e) { }; }; ~SOAPMessageP(void) { if(obj_) delete obj_; }; SOAPMessage& operator*(void) const { return *obj_; }; SOAPMessage* operator->(void) const { return obj_; }; operator bool(void) { return (obj_ != NULL); }; bool operator!(void) { return (obj_ == NULL); }; operator long int(void) { return (long int)obj_; }; private: SOAPMessageP(); SOAPMessageP(SOAPMessageP const&); SOAPMessageP& operator=(SOAPMessageP const&); }; class PyObjectP { private: PyObject* obj_; PyObjectP(); PyObjectP& operator=(PyObjectP const&); public: PyObjectP(PyObjectP const& p):obj_(p.obj_) { Py_INCREF(obj_); }; PyObjectP(PyObject* obj):obj_(obj) { }; ~PyObjectP(void) { if(obj_) { Py_DECREF(obj_); } }; operator bool(void) { return (obj_ != NULL); }; bool operator!(void) { return (obj_ == NULL); }; operator PyObject*(void) { return obj_; }; }; Arc::MCC_Status Service_PythonWrapper::process(Arc::Message& inmsg, Arc::Message& outmsg) { //PyObject *py_status = NULL; //PyObject *py_inmsg = NULL; //PyObject *py_outmsg = NULL; PyObject *arg = NULL; logger.msg(Arc::VERBOSE, "Python wrapper process called"); if(!initialized) return Arc::MCC_Status(); PythonLock plock(logger); // Convert in message to SOAP message Arc::SOAPMessageP inmsg_ptr(inmsg); if(!inmsg_ptr) { logger.msg(Arc::ERROR, "Failed to create input SOAP container"); return make_fault(outmsg); } if(!inmsg_ptr->Payload()) { logger.msg(Arc::ERROR, "input is not SOAP"); return make_fault(outmsg); } // Convert incoming message to python object arg = Py_BuildValue("(l)", (long int)inmsg_ptr); if (arg == NULL) { logger.msg(Arc::ERROR, "Cannot create inmsg argument"); if (PyErr_Occurred()) PyErr_Print(); return make_fault(outmsg); } // arc_dict is a borrowed reference PyObject *arc_dict = PyModule_GetDict(arc_module); if (arc_dict == NULL) { logger.msg(Arc::ERROR, "Cannot get dictionary of ARC module"); if (PyErr_Occurred()) PyErr_Print(); return make_fault(outmsg); } // arc_msg_klass is a borrowed reference PyObject *arc_msg_klass = PyDict_GetItemString(arc_dict, "SOAPMessage"); if (arc_msg_klass == NULL) { logger.msg(Arc::ERROR, "Cannot find ARC Message class"); if (PyErr_Occurred()) PyErr_Print(); return make_fault(outmsg); } PyObjectP py_inmsg(PyObject_CallObject(arc_msg_klass, arg)); if (!py_inmsg) { logger.msg(Arc::ERROR, "Cannot convert inmsg to Python object"); if (PyErr_Occurred()) PyErr_Print(); Py_DECREF(arg); return make_fault(outmsg); } Py_DECREF(arg); Arc::SOAPMessageP outmsg_ptr(outmsg); if(!outmsg_ptr) { logger.msg(Arc::ERROR, "Failed to create SOAP containers"); return make_fault(outmsg); } // Convert incoming and outcoming messages to python objects arg = Py_BuildValue("(l)", (long int)outmsg_ptr); if (arg == NULL) { logger.msg(Arc::ERROR, "Cannot create outmsg argument"); if (PyErr_Occurred()) PyErr_Print(); return make_fault(outmsg); } PyObjectP py_outmsg(PyObject_CallObject(arc_msg_klass, arg)); if (!py_outmsg) { logger.msg(Arc::ERROR, "Cannot convert outmsg to Python object"); if (PyErr_Occurred()) PyErr_Print(); Py_DECREF(arg); return make_fault(outmsg); } Py_DECREF(arg); // Call the process method PyObjectP py_status(PyObject_CallMethod(object, (char*)"process", (char*)"(OO)", (PyObject*)py_inmsg, (PyObject*)py_outmsg)); if (!py_status) { if (PyErr_Occurred()) PyErr_Print(); return make_fault(outmsg); } MCC_Status *status_ptr2 = (MCC_Status *)extract_swig_wrappered_pointer(py_status); Arc::MCC_Status status; if(status_ptr2) status=(*status_ptr2); { // std::string str = (std::string)status; // std::cout << "status: " << str << std::endl; }; SOAPMessage *outmsg_ptr2 = (SOAPMessage *)extract_swig_wrappered_pointer(py_outmsg); if(outmsg_ptr2 == NULL) return make_fault(outmsg); SOAPEnvelope *p = outmsg_ptr2->Payload(); if(p == NULL) return make_fault(outmsg); { // std::string xml; // if(p) p->GetXML(xml); // std::cout << "XML: " << xml << std::endl; }; Arc::PayloadSOAP *pl = new Arc::PayloadSOAP(*p); { // std::string xml; // pl->GetXML(xml); // std::cout << "XML: " << xml << std::endl; }; outmsg.Payload(pl); return status; } } // namespace Arc nordugrid-arc-7.1.1/src/services/wrappers/python/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327023205 xustar0030 mtime=1759498967.782577889 30 atime=1759498967.879493863 30 ctime=1759499030.986410577 nordugrid-arc-7.1.1/src/services/wrappers/python/Makefile.am0000644000175000002070000000117115067751327025107 0ustar00mockbuildmock00000000000000SUBDIRS = schema pkglib_LTLIBRARIES = libpythonservice.la libpythonservice_la_SOURCES = pythonwrapper.cpp pythonwrapper.h libpythonservice_la_CXXFLAGS = -I$(top_srcdir)/include \ $(PYTHON_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libpythonservice_la_LDFLAGS = -no-undefined -avoid-version -module libpythonservice_la_LIBADD = \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(PYTHON_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(DLOPEN_LIBS) nordugrid-arc-7.1.1/src/services/wrappers/python/PaxHeaders/Makefile.in0000644000000000000000000000013215067751357023221 xustar0030 mtime=1759498991.316105414 30 atime=1759499019.549278989 30 ctime=1759499030.987724566 nordugrid-arc-7.1.1/src/services/wrappers/python/Makefile.in0000644000175000002070000010351715067751357025132 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/wrappers/python ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) am__DEPENDENCIES_1 = libpythonservice_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libpythonservice_la_OBJECTS = libpythonservice_la-pythonwrapper.lo libpythonservice_la_OBJECTS = $(am_libpythonservice_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = libpythonservice_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libpythonservice_la_CXXFLAGS) $(CXXFLAGS) \ $(libpythonservice_la_LDFLAGS) $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = \ ./$(DEPDIR)/libpythonservice_la-pythonwrapper.Plo am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(libpythonservice_la_SOURCES) DIST_SOURCES = $(libpythonservice_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir distdir-am am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ SUBDIRS = schema pkglib_LTLIBRARIES = libpythonservice.la libpythonservice_la_SOURCES = pythonwrapper.cpp pythonwrapper.h libpythonservice_la_CXXFLAGS = -I$(top_srcdir)/include \ $(PYTHON_CFLAGS) $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) libpythonservice_la_LDFLAGS = -no-undefined -avoid-version -module libpythonservice_la_LIBADD = \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(PYTHON_LIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(DLOPEN_LIBS) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/wrappers/python/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/wrappers/python/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(MKDIR_P) '$(DESTDIR)$(pkglibdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" || exit 1; \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libpythonservice.la: $(libpythonservice_la_OBJECTS) $(libpythonservice_la_DEPENDENCIES) $(EXTRA_libpythonservice_la_DEPENDENCIES) $(AM_V_CXXLD)$(libpythonservice_la_LINK) -rpath $(pkglibdir) $(libpythonservice_la_OBJECTS) $(libpythonservice_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libpythonservice_la-pythonwrapper.Plo@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< libpythonservice_la-pythonwrapper.lo: pythonwrapper.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpythonservice_la_CXXFLAGS) $(CXXFLAGS) -MT libpythonservice_la-pythonwrapper.lo -MD -MP -MF $(DEPDIR)/libpythonservice_la-pythonwrapper.Tpo -c -o libpythonservice_la-pythonwrapper.lo `test -f 'pythonwrapper.cpp' || echo '$(srcdir)/'`pythonwrapper.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libpythonservice_la-pythonwrapper.Tpo $(DEPDIR)/libpythonservice_la-pythonwrapper.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='pythonwrapper.cpp' object='libpythonservice_la-pythonwrapper.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libpythonservice_la_CXXFLAGS) $(CXXFLAGS) -c -o libpythonservice_la-pythonwrapper.lo `test -f 'pythonwrapper.cpp' || echo '$(srcdir)/'`pythonwrapper.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -f ./$(DEPDIR)/libpythonservice_la-pythonwrapper.Plo -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f ./$(DEPDIR)/libpythonservice_la-pythonwrapper.Plo -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \ am--depfiles check check-am clean clean-generic clean-libtool \ clean-pkglibLTLIBRARIES cscopelist-am ctags ctags-am distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibLTLIBRARIES \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am \ uninstall-pkglibLTLIBRARIES .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/wrappers/python/PaxHeaders/schema0000644000000000000000000000013115067751427022334 xustar0030 mtime=1759499031.018453263 29 atime=1759499034.76351017 30 ctime=1759499031.018453263 nordugrid-arc-7.1.1/src/services/wrappers/python/schema/0000755000175000002070000000000015067751427024314 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/wrappers/python/schema/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327024445 xustar0030 mtime=1759498967.782672262 30 atime=1759498967.879493863 30 ctime=1759499031.017402231 nordugrid-arc-7.1.1/src/services/wrappers/python/schema/Makefile.am0000644000175000002070000000014615067751327026350 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = pythonwrapper.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-7.1.1/src/services/wrappers/python/schema/PaxHeaders/Makefile.in0000644000000000000000000000013215067751357024461 xustar0030 mtime=1759498991.347746298 30 atime=1759499019.568279277 30 ctime=1759499031.018409664 nordugrid-arc-7.1.1/src/services/wrappers/python/schema/Makefile.in0000644000175000002070000005105615067751357026372 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/wrappers/python/schema ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = pythonwrapper.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/wrappers/python/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/wrappers/python/schema/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(arcschemadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(arcschemadir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-arcschemaDATA install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags-am uninstall \ uninstall-am uninstall-arcschemaDATA .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/wrappers/python/schema/PaxHeaders/pythonwrapper.xsd0000644000000000000000000000013215067751327026053 xustar0030 mtime=1759498967.782895343 30 atime=1759498967.879493863 30 ctime=1759499031.019443962 nordugrid-arc-7.1.1/src/services/wrappers/python/schema/pythonwrapper.xsd0000644000175000002070000000124315067751327027755 0ustar00mockbuildmock00000000000000 Defines the full module name of the class which containes the service implementation. The full name should follow the rules of python 'import' command. nordugrid-arc-7.1.1/src/services/wrappers/python/PaxHeaders/pythonwrapper.h0000644000000000000000000000013215067751327024244 xustar0030 mtime=1759498967.782672262 30 atime=1759498967.879493863 30 ctime=1759499030.991500142 nordugrid-arc-7.1.1/src/services/wrappers/python/pythonwrapper.h0000644000175000002070000000140015067751327026141 0ustar00mockbuildmock00000000000000#ifndef __ARC_SERVICE_PYTHON_WRAPPER_H__ #define __ARC_SERVICE_PYTHON_WRAPPER_H__ #include #include #include namespace Arc { class Service_PythonWrapper: public Arc::Service { protected: Arc::MCC_Status make_fault(Arc::Message& outmsg); static Arc::Logger logger; PyObject *arc_module; PyObject *module; PyObject *object; bool initialized; public: Service_PythonWrapper(Arc::Config *cfg, Arc::PluginArgument* parg); virtual ~Service_PythonWrapper(void); /** Service request processing routine */ virtual Arc::MCC_Status process(Arc::Message&, Arc::Message&); }; } // namespace Arc #endif // __ARC_SERVICE_PYTHON_WRAPPER_H__ nordugrid-arc-7.1.1/src/services/wrappers/python/PaxHeaders/README0000644000000000000000000000013215067751327022031 xustar0030 mtime=1759498967.782672262 30 atime=1759498967.879493863 30 ctime=1759499030.989113455 nordugrid-arc-7.1.1/src/services/wrappers/python/README0000644000175000002070000000005215067751327023730 0ustar00mockbuildmock00000000000000service which wraps python based services nordugrid-arc-7.1.1/src/services/PaxHeaders/a-rex0000644000000000000000000000013015067751426016722 xustar0029 mtime=1759499030.49044524 30 atime=1759499034.764510185 29 ctime=1759499030.49044524 nordugrid-arc-7.1.1/src/services/a-rex/0000755000175000002070000000000015067751426020703 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/PayloadFile.h0000644000000000000000000000013115067751327021342 xustar0030 mtime=1759498967.750197335 29 atime=1759498967.86149359 30 ctime=1759499029.348926308 nordugrid-arc-7.1.1/src/services/a-rex/PayloadFile.h0000644000175000002070000000777515067751327023265 0ustar00mockbuildmock00000000000000#ifndef __ARC_PAYLOADFILE_H__ #define __ARC_PAYLOADFILE_H__ #include #include #include #include namespace ARex { /** Implementation of PayloadRawInterface which provides access to ordinary file. Currently only read-only mode is supported. */ class PayloadFile: public Arc::PayloadRawInterface { protected: /* TODO: use system-independent file access */ int handle_; char* addr_; off_t size_; off_t start_; off_t end_; void SetRead(int h,Size_t start,Size_t end); public: /** Creates object associated with file for reading from it. Use end=-1 for full size. */ PayloadFile(const char* filename,Size_t start,Size_t end); PayloadFile(int h,Size_t start,Size_t end); /** Creates object associated with file for writing into it. Use size=-1 for undefined size. */ //PayloadFile(const char* filename,Size_t size); virtual ~PayloadFile(void); virtual char operator[](Size_t pos) const; virtual char* Content(Size_t pos = -1); virtual Size_t Size(void) const; virtual char* Insert(Size_t pos = 0,Size_t size = 0); virtual char* Insert(const char* s,Size_t pos = 0,Size_t size = -1); virtual char* Buffer(unsigned int num); virtual Size_t BufferSize(unsigned int num) const; virtual Size_t BufferPos(unsigned int num) const; virtual bool Truncate(Size_t size); operator bool(void) { return (handle_ != -1); }; bool operator!(void) { return (handle_ == -1); }; }; class PayloadBigFile: public Arc::PayloadStream { private: static Size_t threshold_; off_t limit_; public: /** Creates object associated with file for reading from it */ PayloadBigFile(const char* filename,Size_t start,Size_t end); PayloadBigFile(int h,Size_t start,Size_t end); /** Creates object associated with file for writing into it. Use size=-1 for undefined size. */ //PayloadBigFile(const char* filename,Size_t size); virtual ~PayloadBigFile(void); virtual Size_t Pos(void) const; virtual Size_t Size(void) const; virtual Size_t Limit(void) const; virtual bool Get(char* buf,int& size); operator bool(void) { return (handle_ != -1); }; bool operator!(void) { return (handle_ == -1); }; static Size_t Threshold(void) { return threshold_; }; static void Threshold(Size_t t) { if(t > 0) threshold_=t; }; }; class PayloadFAFile: public Arc::PayloadStreamInterface { protected: Arc::FileAccess* handle_; off_t limit_; public: /** Creates object associated with file for reading from it */ PayloadFAFile(Arc::FileAccess* h,Size_t start,Size_t end); virtual ~PayloadFAFile(void); virtual Size_t Pos(void) const; virtual Size_t Size(void) const; virtual Size_t Limit(void) const; virtual bool Get(char* buf,int& size); virtual bool Get(std::string& buf) { char cbuf[1024]; int size = sizeof(cbuf); if(!Get(cbuf,size)) return false; buf.assign(cbuf,size); return true; }; virtual std::string Get(void) { std::string buf; Get(buf); return buf; }; virtual bool Put(const char* buf,Size_t size) { return false; }; virtual bool Put(const std::string& buf) { return Put(buf.c_str(),buf.length()); }; virtual bool Put(const char* buf) { return Put(buf,buf?strlen(buf):0); }; virtual int Timeout(void) const { return 0; }; virtual void Timeout(int to) { }; operator bool(void) { return (handle_ != NULL); }; bool operator!(void) { return (handle_ == NULL); }; }; // For ranges start is inclusive and end is exclusive Arc::MessagePayload* newFileRead(const char* filename,Arc::PayloadRawInterface::Size_t start = 0,Arc::PayloadRawInterface::Size_t end = (Arc::PayloadRawInterface::Size_t)(-1)); Arc::MessagePayload* newFileRead(int h,Arc::PayloadRawInterface::Size_t start = 0,Arc::PayloadRawInterface::Size_t end = (Arc::PayloadRawInterface::Size_t)(-1)); Arc::MessagePayload* newFileRead(Arc::FileAccess* h,Arc::PayloadRawInterface::Size_t start = 0,Arc::PayloadRawInterface::Size_t end = (Arc::PayloadRawInterface::Size_t)(-1)); } // namespace ARex #endif /* __ARC_PAYLOADFILE_H__ */ nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/Makefile.am0000644000000000000000000000013115067751327021034 xustar0030 mtime=1759498967.750197335 29 atime=1759498967.86149359 30 ctime=1759499029.310563865 nordugrid-arc-7.1.1/src/services/a-rex/Makefile.am0000644000175000002070000000522215067751327022740 0ustar00mockbuildmock00000000000000if INTERNAL_ENABLED INTERNAL = internaljobplugin else INTERNAL = endif SUBDIRS = delegation grid-manager infoproviders lrms schema $(INTERNAL) rte rest DIST_SUBDIRS = delegation grid-manager infoproviders lrms schema internaljobplugin rte rest pkglib_LTLIBRARIES = libarex.la noinst_PROGRAMS = test_cache_check if SYSV_SCRIPTS_ENABLED AREX_SCRIPT = arc-arex arc-arex-ws else AREX_SCRIPT = endif initd_SCRIPTS = $(AREX_SCRIPT) if SYSTEMD_UNITS_ENABLED AREX_UNIT = arc-arex.service arc-arex-ws.service else AREX_UNIT = endif units_DATA = $(AREX_UNIT) pkgdata_SCRIPTS = arc-arex-start arc-arex-ws-start perferator update-controldir sbin_SCRIPTS = a-rex-backtrace-collect man_MANS = a-rex-backtrace-collect.8 EXTRA_DIST = arc.zero.conf GRIDMANAGER_LIBS = grid-manager/libgridmanager.la delegation/libdelegation.la rest/libarexrest.la libarex_la_SOURCES = arex.cpp authop.cpp job.cpp \ create_activity.cpp \ change_activity_status.cpp \ update_credentials.cpp faults.cpp \ get.cpp put.cpp PayloadFile.cpp FileChunks.cpp \ information_collector.cpp cachecheck.cpp tools.cpp \ arex.h job.h PayloadFile.h FileChunks.h tools.h libarex_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) # Needs real cleaning in respect to dependencies libarex_la_LIBADD = \ $(GRIDMANAGER_LIBS) \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/ws-addressing/libarcwsaddressing.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/otokens/libarcotokens.la $(top_builddir)/src/hed/libs/common/libarccommon.la libarex_la_LDFLAGS = -no-undefined -avoid-version -module test_cache_check_SOURCES = test_cache_check.cpp test_cache_check_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_cache_check_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) install-data-local: $(MKDIR_P) "$(DESTDIR)$(sysconfdir)" if test ! -e $(DESTDIR)$(sysconfdir)/arc.conf; then $(INSTALL_DATA) $(srcdir)/arc.zero.conf $(DESTDIR)$(sysconfdir)/arc.conf; fi uninstall-local: rm -f $(DESTDIR)$(sysconfdir)/arc.conf nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/tools.cpp0000644000000000000000000000013215067751327020645 xustar0030 mtime=1759498967.773273145 30 atime=1759498967.873493772 30 ctime=1759499029.344347441 nordugrid-arc-7.1.1/src/services/a-rex/tools.cpp0000644000175000002070000002307315067751327022554 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "tools.h" namespace ARex { void convertActivityStatus(const std::string& gm_state,std::string& bes_state,std::string& arex_state,bool failed,bool pending) { if(gm_state == "ACCEPTED") { bes_state="Pending"; arex_state="Accepted"; } else if(gm_state == "PREPARING") { bes_state="Running"; arex_state=(!pending)?"Preparing":"Prepared"; } else if(gm_state == "SUBMIT") { bes_state="Running"; arex_state="Submitting"; } else if(gm_state == "INLRMS") { bes_state="Running"; arex_state=(!pending)?"Executing":"Executed"; } else if(gm_state == "FINISHING") { bes_state="Running"; arex_state="Finishing"; } else if(gm_state == "FINISHED") { if(!failed) { bes_state="Finished"; arex_state="Finished"; } else { bes_state="Failed"; arex_state="Failed"; }; } else if(gm_state == "DELETED") { // AFAIR failed is not avialable anymore. bes_state=(!failed)?"Finished":"Failed"; arex_state="Deleted"; } else if(gm_state == "CANCELING") { bes_state="Running"; arex_state="Killing"; }; } Arc::XMLNode addActivityStatus(Arc::XMLNode pnode,const std::string& gm_state,Arc::XMLNode glue_xml,bool failed,bool pending) { std::string bes_state(""); std::string arex_state(""); std::string glue_state(""); convertActivityStatus(gm_state,bes_state,arex_state,failed,pending); Arc::XMLNode state = pnode.NewChild("bes-factory:ActivityStatus"); state.NewAttribute("state")=bes_state; state.NewChild("a-rex:State")=arex_state; if(pending) state.NewChild("a-rex:State")="Pending"; if((bool)glue_xml) { Arc::XMLNode state_node = glue_xml["State"]; for(;(bool)state_node;++state_node) { std::string state = (std::string)state_node; if(state.empty()) continue; // Look for nordugrid prefix if(::strncmp("nordugrid:",state.c_str(),10) == 0) { // Remove prefix state.erase(0,10); glue_state = state; }; }; }; if(!glue_state.empty()) { std::string::size_type p = glue_state.find(':'); if(p != std::string::npos) { if(glue_state.substr(0,p) == "INLRMS") { // Extrach state of batch system state.NewChild("a-rex:LRMSState")=glue_state.substr(p+1); }; }; state.NewChild("glue:State")=glue_state; }; return state; } // primary: // accepted|preprocessing| // processing|processing-accepting|processing-queued|processing-running| // postprocessing|terminal // attribute: // validating| // server-paused| // client-paused| // client-stagein-possible| // client-stageout-possible| // provisioning| // deprovisioning| // server-stagein| // server-stageout| // batch-suspend| // app-running| // preprocessing-cancel| // processing-cancel| // postprocessing-cancel| // validation-failure| // preprocessing-failure| // processing-failure| // postprocessing-failure| // app-failure| // expired void convertActivityStatusES(const std::string& gm_state,std::string& primary_state,std::list& state_attributes,bool failed,bool pending,const std::string& failedstate,const std::string& failedcause) { bool failed_set = false; bool canceled = (failedcause == "client"); primary_state = ""; if(gm_state == "ACCEPTED") { primary_state="accepted"; state_attributes.push_back("client-stagein-possible"); } else if(gm_state == "PREPARING") { primary_state="preprocessing"; state_attributes.push_back("client-stagein-possible"); state_attributes.push_back("server-stagein"); } else if(gm_state == "SUBMIT") { primary_state="processing-accepting"; } else if(gm_state == "INLRMS") { // Reporting job state as not started executing yet. // Because we have no more detailed information this // is probably safest solution. primary_state="processing-queued"; } else if(gm_state == "FINISHING") { primary_state="postprocessing"; state_attributes.push_back("client-stageout-possible"); state_attributes.push_back("server-stageout"); } else if(gm_state == "FINISHED") { primary_state="terminal"; state_attributes.push_back("client-stageout-possible"); } else if(gm_state == "DELETED") { primary_state="terminal"; state_attributes.push_back("expired"); } else if(gm_state == "CANCELING") { primary_state="processing"; }; if(failedstate == "ACCEPTED") { state_attributes.push_back("validation-failure"); failed_set = true; } else if(failedstate == "PREPARING") { state_attributes.push_back(canceled?"preprocessing-cancel":"preprocessing-failure"); failed_set = true; } else if(failedstate == "SUBMIT") { state_attributes.push_back(canceled?"processing-cancel":"processing-failure"); failed_set = true; } else if(failedstate == "INLRMS") { state_attributes.push_back(canceled?"processing-cancel":"processing-failure"); // Or maybe APP-FAILURE failed_set = true; } else if(failedstate == "FINISHING") { state_attributes.push_back(canceled?"postprocessing-cancel":"postprocessing-failure"); failed_set = true; } else if(failedstate == "FINISHED") { } else if(failedstate == "DELETED") { } else if(failedstate == "CANCELING") { }; if(primary_state == "terminal") { if(failed && !failed_set) { // Must put something to mark job failed state_attributes.push_back("app-failure"); }; }; if(!primary_state.empty()) { if(pending) state_attributes.push_back("server-paused"); }; } /* JobIDGeneratorARC::JobIDGeneratorARC(const std::string& endpoint):endpoint_(endpoint) { } void JobIDGeneratorARC::SetLocalID(const std::string& id) { id_ = id; } Arc::XMLNode JobIDGeneratorARC::GetGlobalID(Arc::XMLNode& pnode) { Arc::XMLNode node; if(!pnode) { Arc::NS ns; ns["bes-factory"]="http://schemas.ggf.org/bes/2006/08/bes-factory"; ns["a-rex"]="http://www.nordugrid.org/schemas/a-rex"; Arc::XMLNode(ns,"bes-factory:ActivityIdentifier").Exchange(pnode); node = pnode; } else { node = pnode.NewChild("bes-factory:ActivityIdentifier"); }; Arc::WSAEndpointReference identifier(node); // Make job's ID identifier.Address(endpoint_); // address of service identifier.ReferenceParameters().NewChild("a-rex:JobID")=id_; identifier.ReferenceParameters().NewChild("a-rex:JobSessionDir")=endpoint_+"/"+id_; return node; } std::string JobIDGeneratorARC::GetGlobalID(void) { Arc::XMLNode node; GetGlobalID(node); std::string jobid; node.GetDoc(jobid); std::string::size_type p = 0; // squeeze into 1 line while((p=jobid.find_first_of("\r\n",p)) != std::string::npos) jobid.replace(p,1," "); return jobid; } std::string JobIDGeneratorARC::GetManagerURL(void) { return endpoint_; } std::string JobIDGeneratorARC::GetJobURL(void) { return endpoint_ + "/" + id_; } std::string JobIDGeneratorARC::GetInterface(void) { return "org.nordugrid.xbes"; } std::string JobIDGeneratorARC::GetHostname(void) { return Arc::URL(endpoint_).Host(); } */ JobIDGeneratorES::JobIDGeneratorES(const std::string& endpoint):endpoint_(endpoint) { } void JobIDGeneratorES::SetLocalID(const std::string& id) { id_ = id; } Arc::XMLNode JobIDGeneratorES::GetGlobalID(Arc::XMLNode& pnode) { Arc::XMLNode node; if(!pnode) { Arc::NS ns; ns["estypes"]="http://www.eu-emi.eu/es/2010/12/types"; Arc::XMLNode(ns,"estypes:ActivityID").Exchange(pnode); node = pnode; } else { node = pnode.NewChild("estypes:ActivityID"); }; node = id_; return node; } std::string JobIDGeneratorES::GetGlobalID(void) { return id_; } std::string JobIDGeneratorES::GetManagerURL(void) { return endpoint_; } std::string JobIDGeneratorES::GetJobURL(void) { return endpoint_ + "/" + id_; } std::string JobIDGeneratorES::GetInterface(void) { return "org.ogf.glue.emies.activitycreation"; } std::string JobIDGeneratorES::GetHostname(void) { return Arc::URL(endpoint_).Host(); } std::string JobIDGeneratorREST::GetInterface(void) { return "org.nordugrid.arcrest"; } JobIDGeneratorINTERNAL::JobIDGeneratorINTERNAL(const std::string& endpoint):endpoint_(endpoint) { } void JobIDGeneratorINTERNAL::SetLocalID(const std::string& id) { id_ = id; } Arc::XMLNode JobIDGeneratorINTERNAL::GetGlobalID(Arc::XMLNode& pnode) { //To-do make something more sensible for INTERNAL plugin case Arc::XMLNode node; if(!pnode) { Arc::NS ns; ns["estypes"]="http://www.eu-emi.eu/es/2010/12/types"; Arc::XMLNode(ns,"estypes:ActivityID").Exchange(pnode); node = pnode; } else { node = pnode.NewChild("estypes:ActivityID"); }; node = id_; return node; } std::string JobIDGeneratorINTERNAL::GetGlobalID(void) { return id_; } std::string JobIDGeneratorINTERNAL::GetManagerURL(void) { return ""; // conroldir? } std::string JobIDGeneratorINTERNAL::GetJobURL(void) { return ""; // job state file? } std::string JobIDGeneratorINTERNAL::GetInterface(void) { return "org.nordugrid.internal"; } std::string JobIDGeneratorINTERNAL::GetHostname(void) { return Arc::URL(endpoint_).Host(); } } nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/arex.h0000644000000000000000000000013115067751327020110 xustar0030 mtime=1759498967.750491903 29 atime=1759498967.86149359 30 ctime=1759499029.345843116 nordugrid-arc-7.1.1/src/services/a-rex/arex.h0000644000175000002070000002231315067751327022014 0ustar00mockbuildmock00000000000000#ifndef __ARC_AREX_H__ #define __ARC_AREX_H__ #include #include #include #include #include #include #include #include "FileChunks.h" #include "grid-manager/GridManager.h" #include "delegation/DelegationStores.h" #include "grid-manager/conf/GMConfig.h" #include "rest/rest.h" #include "job.h" namespace ARex { class ARexGMConfig; class CountedResourceLock; class CountedResource { friend class CountedResourceLock; public: CountedResource(int maxconsumers = -1); ~CountedResource(void); void MaxConsumers(int maxconsumers); private: std::condition_variable cond_; std::mutex lock_; int limit_; int count_; void Acquire(void); void Release(void); }; class CountedResourceLock { private: CountedResource& r_; public: CountedResourceLock(CountedResource& resource):r_(resource) { r_.Acquire(); }; ~CountedResourceLock(void) { r_.Release(); }; }; class OptimizedInformationContainer: public Arc::InformationContainer { private: bool parse_xml_; std::string filename_; int handle_; Arc::XMLNode doc_; std::mutex olock_; public: OptimizedInformationContainer(bool parse_xml = true); ~OptimizedInformationContainer(void); int OpenDocument(void); void Assign(const std::string& xml,const std::string filename = ""); }; #define AREXOP(NAME) Arc::MCC_Status NAME(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out) class ARexService: public Arc::Service { private: static void gm_threads_starter(void* arg); void gm_threads_starter(); Arc::MCC_Status cache_get(Arc::Message& outmsg, const std::string& subpath, off_t range_start, off_t range_end, ARexGMConfig& config, bool no_content); protected: Arc::ThreadRegistry thread_count_; static Arc::NS ns_; Arc::Logger logger_; DelegationStores delegation_stores_; OptimizedInformationContainer infodoc_; CountedResource infolimit_; CountedResource beslimit_; CountedResource datalimit_; std::string endpoint_; bool publishstaticinfo_; std::string uname_; std::string common_name_; std::string long_description_; std::string os_name_; std::string gmrun_; unsigned int infoprovider_wakeup_period_; unsigned int all_jobs_count_; //std::mutex glue_states_lock_; //std::map glue_states_; FileChunksList files_chunks_; GMConfig config_; GridManager* gm_; ARexRest rest_; // A-REX operations AREXOP(CacheCheck); /** Update credentials for specified job through A-REX own interface */ Arc::MCC_Status UpdateCredentials(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out,const std::string& credentials); // HTTP operations Arc::MCC_Status GetJob(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath); Arc::MCC_Status GetLogs(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath); Arc::MCC_Status GetInfo(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath); Arc::MCC_Status GetInfo(Arc::Message& inmsg,Arc::Message& outmsg); Arc::MCC_Status GetNew(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath); Arc::MCC_Status GetDelegation(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath); Arc::MCC_Status GetCache(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath); Arc::MCC_Status HeadJob(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath); Arc::MCC_Status HeadLogs(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath); Arc::MCC_Status HeadInfo(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath); Arc::MCC_Status HeadInfo(Arc::Message& inmsg,Arc::Message& outmsg); Arc::MCC_Status HeadNew(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath); Arc::MCC_Status HeadDelegation(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath); Arc::MCC_Status HeadCache(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath); Arc::MCC_Status PutJob(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath); Arc::MCC_Status PutLogs(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath); Arc::MCC_Status PutInfo(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath); Arc::MCC_Status PutNew(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath); Arc::MCC_Status PutDelegation(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath); Arc::MCC_Status PutCache(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath); Arc::MCC_Status DeleteJob(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath); Arc::MCC_Status DeleteLogs(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath); Arc::MCC_Status DeleteInfo(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath); Arc::MCC_Status DeleteNew(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath); Arc::MCC_Status DeleteDelegation(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath); Arc::MCC_Status DeleteCache(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath); // A-REX faults void UnknownActivityIdentifierFault(Arc::XMLNode fault,const std::string& message); void UnknownActivityIdentifierFault(Arc::SOAPFault& fault,const std::string& message); void InvalidRequestMessageFault(Arc::XMLNode fault,const std::string& element,const std::string& message); void InvalidRequestMessageFault(Arc::SOAPFault& fault,const std::string& element,const std::string& message); // EMI ES faults #define ES_MSG_FAULT_HEAD(NAME) \ void NAME(Arc::XMLNode fault,const std::string& message,const std::string& desc = ""); \ void NAME(Arc::SOAPFault& fault,const std::string& message,const std::string& desc = ""); #define ES_SIMPLE_FAULT_HEAD(NAME) \ void NAME(Arc::XMLNode fault,const std::string& message = "",const std::string& desc = ""); \ void NAME(Arc::SOAPFault& fault,const std::string& message = "",const std::string& desc = ""); ES_MSG_FAULT_HEAD(ESInternalBaseFault) void ESVectorLimitExceededFault(Arc::XMLNode fault,unsigned long limit,const std::string& message = "",const std::string& desc = ""); void ESVectorLimitExceededFault(Arc::SOAPFault& fault,unsigned long limit,const std::string& message = "",const std::string& desc = ""); ES_SIMPLE_FAULT_HEAD(ESAccessControlFault); ES_SIMPLE_FAULT_HEAD(ESUnsupportedCapabilityFault) ES_SIMPLE_FAULT_HEAD(ESInvalidActivityDescriptionSemanticFault) ES_SIMPLE_FAULT_HEAD(ESInvalidActivityDescriptionFault) ES_SIMPLE_FAULT_HEAD(ESNotSupportedQueryDialectFault) ES_SIMPLE_FAULT_HEAD(ESNotValidQueryStatementFault) ES_SIMPLE_FAULT_HEAD(ESUnknownQueryFault) ES_SIMPLE_FAULT_HEAD(ESInternalResourceInfoFault) ES_SIMPLE_FAULT_HEAD(ESResourceInfoNotFoundFault) ES_SIMPLE_FAULT_HEAD(ESUnableToRetrieveStatusFault) ES_SIMPLE_FAULT_HEAD(ESUnknownAttributeFault) ES_SIMPLE_FAULT_HEAD(ESOperationNotAllowedFault) ES_SIMPLE_FAULT_HEAD(ESActivityNotFoundFault) ES_SIMPLE_FAULT_HEAD(ESInternalNotificationFault) ES_SIMPLE_FAULT_HEAD(ESOperationNotPossibleFault) ES_SIMPLE_FAULT_HEAD(ESInvalidActivityStateFault) ES_SIMPLE_FAULT_HEAD(ESInvalidActivityLimitFault) ES_SIMPLE_FAULT_HEAD(ESInvalidParameterFault) Arc::MCC_Status preProcessSecurity(Arc::Message& inmsg,Arc::Message& outmsg,Arc::SecAttr* sattr,bool is_soap,ARexConfigContext*& config,bool& passed); Arc::MCC_Status postProcessSecurity(Arc::Message& outmsg,bool& passed); public: ARexService(Arc::Config *cfg,Arc::PluginArgument *parg); virtual ~ARexService(void); virtual Arc::MCC_Status process(Arc::Message& inmsg,Arc::Message& outmsg); // HTTP paths static char const* InfoPath; static char const* LogsPath; static char const* NewPath; static char const* DelegationPath; static char const* CachePath; static char const* RestPath; // Convenience methods static Arc::MCC_Status make_empty_response(Arc::Message& outmsg); static Arc::MCC_Status make_fault(Arc::Message& outmsg); static Arc::MCC_Status make_http_fault(Arc::Message& outmsg,int code,const char* resp); static Arc::MCC_Status make_soap_fault(Arc::Message& outmsg,const char* resp = NULL); static Arc::MCC_Status extract_content(Arc::Message& inmsg, std::string& content,uint32_t size_limit = 0); int OpenInfoDocument(void); void InformationCollector(void); virtual std::string getID(); void StopChildThreads(void); }; } // namespace ARex #endif nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/tools.h0000644000000000000000000000013215067751327020312 xustar0030 mtime=1759498967.773273145 30 atime=1759498967.873493772 30 ctime=1759499029.352044508 nordugrid-arc-7.1.1/src/services/a-rex/tools.h0000644000175000002070000000546215067751327022223 0ustar00mockbuildmock00000000000000#ifndef __ARC_AREX_TOOLS_H__ #define __ARC_AREX_TOOLS_H__ #include #include namespace ARex { void convertActivityStatus(const std::string& gm_state,std::string& bes_state,std::string& arex_state,bool failed = false,bool pending = false); void convertActivityStatusES(const std::string& gm_state,std::string& primary_state,std::list& state_attributes,bool failed,bool pending,const std::string& failedstate,const std::string& failedcause); Arc::XMLNode addActivityStatus(Arc::XMLNode pnode,const std::string& gm_state,Arc::XMLNode glue_xml = Arc::XMLNode(),bool failed = false,bool pending = false); class JobIDGenerator { public: JobIDGenerator() { }; virtual ~JobIDGenerator() { }; virtual void SetLocalID(const std::string& id) = 0; virtual Arc::XMLNode GetGlobalID(Arc::XMLNode& pnode) = 0; virtual std::string GetGlobalID(void) = 0; virtual std::string GetJobURL(void) = 0; virtual std::string GetManagerURL(void) = 0; virtual std::string GetHostname(void) = 0; virtual std::string GetInterface(void) = 0; }; class JobIDGeneratorES:public JobIDGenerator { public: JobIDGeneratorES(const std::string& endpoint); virtual ~JobIDGeneratorES() { }; virtual void SetLocalID(const std::string& id); virtual Arc::XMLNode GetGlobalID(Arc::XMLNode& pnode); virtual std::string GetGlobalID(void); virtual std::string GetJobURL(void); virtual std::string GetManagerURL(void); virtual std::string GetHostname(void); virtual std::string GetInterface(void); private: std::string endpoint_; std::string id_; }; class JobIDGeneratorINTERNAL:public JobIDGenerator { public: JobIDGeneratorINTERNAL(const std::string& endpoint); virtual ~JobIDGeneratorINTERNAL() { }; virtual void SetLocalID(const std::string& id); virtual Arc::XMLNode GetGlobalID(Arc::XMLNode& pnode); virtual std::string GetGlobalID(void); virtual std::string GetJobURL(void); virtual std::string GetManagerURL(void); virtual std::string GetHostname(void); virtual std::string GetInterface(void); private: std::string endpoint_; std::string id_; }; class JobIDGeneratorREST:public JobIDGeneratorES { public: JobIDGeneratorREST(const std::string& endpoint):JobIDGeneratorES(endpoint) {}; virtual std::string GetInterface(void); }; Arc::XMLNode addJobID(Arc::XMLNode& pnode,const std::string& endpoint,const std::string& id); std::string makeJobID(const std::string& endpoint,const std::string& id); Arc::XMLNode addJobIDES(Arc::XMLNode& pnode,const std::string& endpoint,const std::string& id); std::string makeJobIDES(const std::string& endpoint,const std::string& id); Arc::XMLNode addJobIDINTERNAL(Arc::XMLNode& pnode,const std::string& endpoint,const std::string& id); std::string makeJobIDINTERNAL(const std::string& endpoint,const std::string& id); } #endif // __ARC_AREX_TOOLS_H__ nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/get.cpp0000644000000000000000000000013215067751327020264 xustar0030 mtime=1759498967.751729254 30 atime=1759498967.862493605 30 ctime=1759499029.336688083 nordugrid-arc-7.1.1/src/services/a-rex/get.cpp0000644000175000002070000005331715067751327022177 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include "PayloadFile.h" #include "job.h" #include "arex.h" #define MAX_CHUNK_SIZE (10*1024*1024) namespace ARex { static Arc::PayloadRaw* newFileInfo(int handle = -1) { Arc::PayloadRaw* buf = new Arc::PayloadRaw; if(handle != -1) { struct stat st; if(buf && (::fstat(handle,&st) == 0)) buf->Truncate(st.st_size); ::close(handle); } else { if(buf) buf->Truncate(0); } return buf; } static Arc::PayloadRaw* newFileInfo(Arc::FileAccess& file) { struct stat st; Arc::PayloadRaw* buf = new Arc::PayloadRaw; if(buf && (file.fa_fstat(st))) buf->Truncate(st.st_size); return buf; } static void ExtractRange(Arc::Message& inmsg, off_t& range_start, off_t& range_end) { range_start = 0; range_end = (off_t)(-1); { std::string val; val=inmsg.Attributes()->get("HTTP:RANGESTART"); if(!val.empty()) { // Negative ranges not supported if(!Arc::stringto(val,range_start)) { range_start=0; } else { val=inmsg.Attributes()->get("HTTP:RANGEEND"); if(!val.empty()) { if(!Arc::stringto(val,range_end)) { range_end=(off_t)(-1); } else { // Rest of code here treats end of range as exclusive // While HTTP ranges are inclusive ++range_end; }; }; }; }; }; } // -------------------------------------------------------------------------------------------------------------- static Arc::MCC_Status GetJobsList(Arc::Message& outmsg,ARexGMConfig& config,Arc::Logger& logger) { std::string html; html="\r\n\r\nARex: Jobs list\r\n\r\n\r\n
      \r\n"; std::list jobs = ARexJob::Jobs(config,logger); for(std::list::iterator job = jobs.begin();job!=jobs.end();++job) { std::string line = "
    • job "; line+=(*job); line+=""; line+=" logs\r\n"; html+=line; }; html+="
    \r\n"; // Service description access html+="SERVICE DESCRIPTION"; html+="\r\n"; Arc::PayloadRaw* buf = new Arc::PayloadRaw; if(buf) buf->Insert(html.c_str(),0,html.length()); outmsg.Payload(buf); outmsg.Attributes()->set("HTTP:content-type","text/html"); return Arc::MCC_Status(Arc::STATUS_OK); } static Arc::MCC_Status GetFilesList(Arc::Message& outmsg,ARexGMConfig& config, Arc::FileAccess& dir,std::string const& baseurl,std::string const& basepath, Arc::Logger& logger) { std::string html; html="\r\n\r\nARex: Job\r\n\r\n\r\n
      \r\n"; for(;;) { std::string file; if(!dir.fa_readdir(file)) break; if(file == ".") continue; if(file == "..") continue; std::string fpath = basepath+"/"+file; struct stat st; if(lstat(fpath.c_str(),&st) == 0) { if(S_ISREG(st.st_mode)) { std::string line = "
    • file "; line+=file; line+=" - "+Arc::tostring(st.st_size)+" bytes"+"\r\n"; html+=line; } else if(S_ISDIR(st.st_mode)) { std::string line = "
    • dir "; line+=file; line+="\r\n"; html+=line; }; } else { std::string line = "
    • unknown "; line+=file; line+="\r\n"; html+=line; }; }; // Add virtual logs folder /* if((hpath.empty()) && (!joblog.empty())) { std::string line = "
    • dir "; line+=joblog; line+=" - log directory\r\n"; html+=line; }; */ html+="
    \r\n\r\n"; Arc::PayloadRaw* buf = new Arc::PayloadRaw; if(buf) buf->Insert(html.c_str(),0,html.length()); outmsg.Payload(buf); outmsg.Attributes()->set("HTTP:content-type","text/html"); return Arc::MCC_Status(Arc::STATUS_OK); } // -------------------------------------------------------------------------------------------------------------- Arc::MCC_Status ARexService::GetJob(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath) { if(!&config) { return make_http_fault(outmsg, HTTP_ERR_FORBIDDEN, "User is not identified"); }; if(id.empty()) { // Not a specific job - generate page with list of jobs return GetJobsList(outmsg,config,logger_); } ARexJob job(id,config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "Get: there is no job %s - %s", id, job.Failure()); return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); }; std::string hpath = Arc::trim(subpath,"/"); // prepare clean sub-path std::string joblog = job.LogDir(); if(!joblog.empty()) { if((strncmp(joblog.c_str(),hpath.c_str(),joblog.length()) == 0) && ((hpath[joblog.length()] == '/') || (hpath[joblog.length()] == '\0'))) { hpath.erase(0,joblog.length()+1); return GetLogs(inmsg,outmsg,config,id,hpath); }; }; // File or folder Arc::FileAccess* dir = job.OpenDir(subpath); if(dir) { // Directory - html with file list std::string dirurl = config.Endpoint()+"/"+id; if(!hpath.empty()) dirurl+="/"+hpath; std::string dirpath = job.GetFilePath(hpath); Arc::MCC_Status r = GetFilesList(outmsg,config,*dir,dirurl,dirpath,logger_); dir->fa_closedir(); Arc::FileAccess::Release(dir); return r; }; Arc::FileAccess* file = job.OpenFile(hpath,true,false); if(file) { // File or similar off_t range_start; off_t range_end; ExtractRange(inmsg, range_start, range_end); Arc::MessagePayload* h = newFileRead(file,range_start,range_end); if(!h) { file->fa_close(); Arc::FileAccess::Release(file); return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); }; outmsg.Payload(h); outmsg.Attributes()->set("HTTP:content-type","application/octet-stream"); return Arc::MCC_Status(Arc::STATUS_OK); }; // Can't process this path // offset=0; size=0; return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); } Arc::MCC_Status ARexService::GetLogs(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath) { if(!&config) { return make_http_fault(outmsg, HTTP_ERR_FORBIDDEN, "User is not identified"); }; if(id.empty()) { // Not a specific job - not expected return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); } ARexJob job(id,config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "Get: there is no job %s - %s", id, job.Failure()); return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); }; std::string hpath = Arc::trim(subpath,"/"); // prepare clean sub-path if(hpath.empty()) { std::list logs = job.LogFiles(); std::string html; html="\r\n\r\nARex: Job Logs\r\n\r\n\r\n
      \r\n"; for(std::list::iterator l = logs.begin();l != logs.end();++l) { if(strncmp(l->c_str(),"proxy",5) == 0) continue; std::string line = "
    • file "; line+=*l; line+=" - log file\r\n"; html+=line; }; html+="
    \r\n\r\n"; Arc::PayloadRaw* buf = new Arc::PayloadRaw; if(buf) buf->Insert(html.c_str(),0,html.length()); outmsg.Payload(buf); outmsg.Attributes()->set("HTTP:content-type","text/html"); return Arc::MCC_Status(Arc::STATUS_OK); } else { if(hpath != "proxy") { int file = job.OpenLogFile(hpath); if(file != -1) { off_t range_start; off_t range_end; ExtractRange(inmsg, range_start, range_end); Arc::MessagePayload* h = newFileRead(file,range_start,range_end); if(h) { outmsg.Payload(h); outmsg.Attributes()->set("HTTP:content-type","text/plain"); return Arc::MCC_Status(Arc::STATUS_OK); } else { ::close(file); }; }; }; }; return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); } Arc::MCC_Status ARexService::GetInfo(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath) { if(!subpath.empty()) return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); return GetInfo(inmsg, outmsg); } Arc::MCC_Status ARexService::GetInfo(Arc::Message& inmsg,Arc::Message& outmsg) { int h = OpenInfoDocument(); if(h == -1) return Arc::MCC_Status(); Arc::MessagePayload* payload = newFileRead(h); if(!payload) { ::close(h); return Arc::MCC_Status(); }; outmsg.Payload(payload); outmsg.Attributes()->set("HTTP:content-type","text/xml"); return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status ARexService::GetNew(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath) { if(!&config) { return make_http_fault(outmsg, HTTP_ERR_FORBIDDEN, "User is not identified"); }; return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); } Arc::MCC_Status ARexService::GetCache(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath) { if(!&config) { return make_http_fault(outmsg, HTTP_ERR_FORBIDDEN, "User is not identified"); }; off_t range_start = 0; off_t range_end = (off_t)(-1); ExtractRange(inmsg, range_start, range_end); return cache_get(outmsg, subpath, range_start, range_end, config, false); } // -------------------------------------------------------------------------------------------------------------- Arc::MCC_Status ARexService::HeadLogs(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath) { if(id.empty()) { // Not a specific job - not expected return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); } ARexJob job(id,config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "Get: there is no job %s - %s", id, job.Failure()); return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); }; std::string hpath = Arc::trim(subpath,"/"); // prepare clean sub-path if(hpath.empty()) { Arc::PayloadRaw* buf = new Arc::PayloadRaw; if(buf) buf->Truncate(0); outmsg.Payload(buf); outmsg.Attributes()->set("HTTP:content-type","text/html"); return Arc::MCC_Status(Arc::STATUS_OK); } else { int file = job.OpenLogFile(hpath); if(file != -1) { struct stat st; Arc::PayloadRaw* buf = new Arc::PayloadRaw; if(buf && (::fstat(file,&st) == 0)) buf->Truncate(st.st_size); ::close(file); outmsg.Payload(buf); outmsg.Attributes()->set("HTTP:content-type","text/plain"); return Arc::MCC_Status(Arc::STATUS_OK); }; }; return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); } Arc::MCC_Status ARexService::HeadInfo(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath) { if(!subpath.empty()) return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); return HeadInfo(inmsg, outmsg); } Arc::MCC_Status ARexService::HeadInfo(Arc::Message& inmsg,Arc::Message& outmsg) { int h = OpenInfoDocument(); if(h == -1) return Arc::MCC_Status(); outmsg.Payload(newFileInfo(h)); outmsg.Attributes()->set("HTTP:content-type","text/xml"); return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status ARexService::HeadNew(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath) { if(!&config) { return make_http_fault(outmsg, HTTP_ERR_FORBIDDEN, "User is not identified"); }; return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); } Arc::MCC_Status ARexService::HeadCache(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath) { if(!&config) { return make_http_fault(outmsg, HTTP_ERR_FORBIDDEN, "User is not identified"); }; off_t range_start = 0; off_t range_end = (off_t)(-1); return cache_get(outmsg, subpath, range_start, range_end, config, true); } Arc::MCC_Status ARexService::HeadJob(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath) { if(!&config) { return make_http_fault(outmsg, HTTP_ERR_FORBIDDEN, "User is not identified"); }; if(id.empty()) { // Not a specific job - page with list of jobs outmsg.Payload(newFileInfo()); outmsg.Attributes()->set("HTTP:content-type","text/html"); return Arc::MCC_Status(Arc::STATUS_OK); } ARexJob job(id,config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "Head: there is no job %s - %s", id, job.Failure()); return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); }; std::string hpath = Arc::trim(subpath,"/"); // prepare clean sub-path std::string joblog = job.LogDir(); if(!joblog.empty()) { if((strncmp(joblog.c_str(),hpath.c_str(),joblog.length()) == 0) && ((hpath[joblog.length()] == '/') || (hpath[joblog.length()] == '\0'))) { hpath.erase(0,joblog.length()+1); return HeadLogs(inmsg,outmsg,config,id,hpath); }; }; // File or folder Arc::FileAccess* dir = job.OpenDir(subpath); if(dir) { // Directory - html with file list outmsg.Payload(newFileInfo()); outmsg.Attributes()->set("HTTP:content-type","text/html"); dir->fa_closedir(); Arc::FileAccess::Release(dir); return Arc::MCC_Status(Arc::STATUS_OK); }; Arc::FileAccess* file = job.OpenFile(hpath,true,false); if(file) { // File or similar outmsg.Payload(newFileInfo(*file)); file->fa_close(); Arc::FileAccess::Release(file); outmsg.Attributes()->set("HTTP:content-type","application/octet-stream"); return Arc::MCC_Status(Arc::STATUS_OK); }; // Can't process this path // offset=0; size=0; return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); } // ------------------------------------------------------------------------------------------------- static bool cache_get_allowed(const std::string& url, ARexGMConfig& config, Arc::Logger& logger) { // Extract information from credentials std::string dn; // DN of credential std::string vo; // Assuming only one VO std::list voms; // VOMS attributes for (std::list::const_iterator a = config.beginAuth(); a!=config.endAuth(); ++a) { if (*a) { Arc::SecAttr* sattr = (*a)->get("TLS"); if (!sattr) continue; dn = sattr->get("IDENTITY"); vo = sattr->get("VO"); voms = sattr->getAll("VOMS"); break; } } // At least DN should be found. VOMS info may not be present. if (dn.empty()) { logger.msg(Arc::ERROR, "Failed to extract credential information"); return false; } logger.msg(Arc::DEBUG, "Checking cache permissions: DN: %s", dn); logger.msg(Arc::DEBUG, "Checking cache permissions: VO: %s", vo); for (std::list::const_iterator att = voms.begin(); att != voms.end(); ++att) { logger.msg(Arc::DEBUG, "Checking cache permissions: VOMS attr: %s", *att); } // Cache configuration specifies URL regexps and a certificate attribute and // value. Go through looking for a match. for (std::list::const_iterator access = config.GmConfig().CacheParams().getCacheAccess().begin(); access != config.GmConfig().CacheParams().getCacheAccess().end(); ++access) { if (access->regexp.match(url)) { if (Arc::lower(access->cred_type) == "dn") { if (access->cred_value.match(dn)) { logger.msg(Arc::VERBOSE, "Cache access allowed to %s by DN %s", url, dn); return true; } logger.msg(Arc::DEBUG, "DN %s doesn't match %s", dn, access->cred_value.getPattern()); } else if (Arc::lower(access->cred_type) == "voms:vo") { if (access->cred_value.match(vo)) { logger.msg(Arc::VERBOSE, "Cache access allowed to %s by VO %s", url, vo); return true; } logger.msg(Arc::DEBUG, "VO %s doesn't match %s", vo, access->cred_value.getPattern()); } else if (Arc::lower(access->cred_type) == "voms:role") { // Get the configured allowed role std::vector role_parts; Arc::tokenize(access->cred_value.getPattern(), role_parts, ":"); if (role_parts.size() != 2) { logger.msg(Arc::WARNING, "Bad credential value %s in cache access rules", access->cred_value.getPattern()); continue; } std::string cred_vo = role_parts[0]; std::string cred_role = role_parts[1]; std::string allowed_role("/VO="+cred_vo+"/Group="+cred_vo+"/Role="+cred_role); for (std::list::const_iterator attr = voms.begin(); attr != voms.end(); ++attr) { if (*attr == allowed_role) { logger.msg(Arc::DEBUG, "VOMS attr %s matches %s", *attr, allowed_role); logger.msg(Arc::VERBOSE, "Cache access allowed to %s by VO %s and role %s", url, cred_vo, cred_role); return true; } logger.msg(Arc::DEBUG, "VOMS attr %s doesn't match %s", *attr, allowed_role); } } else if (Arc::lower(access->cred_type) == "voms:group") { // Get the configured allowed group std::vector group_parts; Arc::tokenize(access->cred_value.getPattern(), group_parts, ":"); if (group_parts.size() != 2) { logger.msg(Arc::WARNING, "Bad credential value %s in cache access rules", access->cred_value.getPattern()); continue; } std::string cred_vo = group_parts[0]; std::string cred_group = group_parts[1]; std::string allowed_group("/VO="+cred_vo+"/Group="+cred_vo+"/Group="+cred_group); for (std::list::const_iterator attr = voms.begin(); attr != voms.end(); ++attr) { if (*attr == allowed_group) { logger.msg(Arc::DEBUG, "VOMS attr %s matches %s", *attr, allowed_group); logger.msg(Arc::VERBOSE, "Cache access allowed to %s by VO %s and group %s", url, cred_vo, cred_group); return true; } logger.msg(Arc::DEBUG, "VOMS attr %s doesn't match %s", *attr, allowed_group); } } else { logger.msg(Arc::WARNING, "Unknown credential type %s for URL pattern %s", access->cred_type, access->regexp.getPattern()); } } } // If we get to here no match was found logger.msg(Arc::VERBOSE, "No match found in cache access rules for %s", url); return false; } Arc::MCC_Status ARexService::cache_get(Arc::Message& outmsg, const std::string& subpath, off_t range_start, off_t range_end, ARexGMConfig& config, bool no_content) { // subpath contains the URL, which can be encoded. Constructing a URL // object with encoded=true only decodes the path so have to decode first std::string unencoded(Arc::uri_unencode(subpath)); Arc::URL cacheurl(unencoded); logger.msg(Arc::INFO, "Get from cache: Looking in cache for %s", cacheurl.str()); if (!cacheurl) { logger.msg(Arc::ERROR, "Get from cache: Invalid URL %s", subpath); return make_http_fault(outmsg, 400, "Bad request: Invalid URL"); } // Security check. The access is configured in arc.conf like // cache_access="srm://srm-atlas.cern.ch/grid/atlas* voms:vo atlas" // then the url is compared to the certificate attribute specified if (!cache_get_allowed(cacheurl.str(), config, logger)) { return make_http_fault(outmsg, 403, "Forbidden"); } Arc::FileCache cache(config.GmConfig().CacheParams().getCacheDirs(), config.GmConfig().CacheParams().getDrainingCacheDirs(), config.GmConfig().CacheParams().getReadOnlyCacheDirs(), "0", // Jobid is not used config.User().get_uid(), config.User().get_gid()); if (!cache) { logger.msg(Arc::ERROR, "Get from cache: Error in cache configuration"); return make_http_fault(outmsg, 500, "Error in cache configuration"); } // Get the cache file corresponding to the URL std::string cache_file(cache.File(cacheurl.str())); // Check if file exists struct stat st; if (!Arc::FileStat(cache_file, &st, false)) { if (errno == ENOENT) { logger.msg(Arc::INFO, "Get from cache: File not in cache"); return make_http_fault(outmsg, 404, "File not found"); } else { logger.msg(Arc::WARNING, "Get from cache: could not access cached file: %s", Arc::StrError(errno)); return make_http_fault(outmsg, 500, "Error accessing cached file"); } } // Check file size against specified range if (range_start > st.st_size) range_start = st.st_size; if (range_end > st.st_size) range_end = st.st_size; // Check if lockfile exists if (Arc::FileStat(cache_file + Arc::FileLock::getLockSuffix(), &st, false)) { logger.msg(Arc::INFO, "Get from cache: Cached file is locked"); return make_http_fault(outmsg, 409, "Cached file is locked"); } // Read the file and fill the payload if (!no_content) { Arc::MessagePayload* h = newFileRead(cache_file.c_str(), range_start, range_end); outmsg.Payload(h); } else { struct stat st; Arc::PayloadRaw* buf = new Arc::PayloadRaw; if(buf && Arc::FileStat(cache_file, &st, false)) buf->Truncate(st.st_size); outmsg.Payload(buf); } outmsg.Attributes()->set("HTTP:content-type","application/octet-stream"); return Arc::MCC_Status(Arc::STATUS_OK); } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/a-rex-backtrace-collect.in0000644000000000000000000000013115067751327023704 xustar0030 mtime=1759498967.750197335 29 atime=1759498967.86149359 30 ctime=1759499029.314576145 nordugrid-arc-7.1.1/src/services/a-rex/a-rex-backtrace-collect.in0000644000175000002070000000461615067751327025616 0ustar00mockbuildmock00000000000000#!/bin/bash readconfigvar() { fname=$1 if [ ! -r "$fname" ]; then return fi bname="[$2]" vname=$3 value= cat "$fname" | sed -e 's/\r$//' -e 's/^\r//' | grep -e '^\[' -e "^${vname}=" | { while true; do read line if [ ! $? = 0 ] ; then return fi if [ "$line" = "$bname" ] ; then while true ; do read line if [ ! $? = 0 ] ; then return fi lstart=`echo "$line" | head -c 1` if [ "$lstart" = '[' ] ; then return fi vlname=`echo "$line" | sed 's/=.*//;t;s/.*//'` if [ "$vlname" = "$vname" ] ; then val=`echo "$line" | sed 's/[^=]*=//'` eval "echo $val" return fi done fi done } } # ARC_LOCATION ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ "x$ARC_CONFIG" = "x" ]; then if [ -r $ARC_LOCATION/etc/arc.conf ]; then ARC_CONFIG=$ARC_LOCATION/etc/arc.conf elif [ -r /etc/arc.conf ]; then ARC_CONFIG=/etc/arc.conf fi fi if [ "x$ARC_CONFIG" = "x" ]; then echo "Can't find configuration file." exit 1 fi if [ ! -f "${ARC_CONFIG}" ]; then echo "Can't find configuration file at ${ARC_CONFIG}." exit 1 fi ARCHED="${ARC_LOCATION}/sbin/arched" if [ ! -f "${ARCHED}" ]; then echo "Can't find arched at ${ARCHED}." exit 1 fi LOGFILE=`readconfigvar "$ARC_CONFIG" arex logfile` LOGFILE=${LOGFILE:-/var/log/arc/arex.log} COREDIR=`dirname "${LOGFILE}"`/arccore if [ ! -d "${COREDIR}" ]; then echo "Can't find core collection folder at ${COREDIR}." exit 1 fi backtrace_generated=no for corename in "${COREDIR}"/*; do echo "${corename}" | grep '\.backtrace$' if [ ! "$?" = '0' ]; then backtracename="${corename}.backtrace" echo "--- Processing ${corename} - storing into ${backtracename} ---" gdb --batch --core="${corename}" "${ARCHED}" --eval-command='thread apply all bt full' 1>"${backtracename}" 2>&1 backtrace_generated=yes fi done if [ $backtrace_generated = yes ]; then echo "Please send generated backtrace(s) to support@nordugrid.org or report them on http://bugzilla.nordugrid.org" fi nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/information_collector.cpp0000644000000000000000000000013215067751327024100 xustar0030 mtime=1759498967.763492101 30 atime=1759498967.869493711 30 ctime=1759499029.342088347 nordugrid-arc-7.1.1/src/services/a-rex/information_collector.cpp0000644000175000002070000001756415067751327026017 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "grid-manager/files/ControlFileHandling.h" #include "job.h" #include "arex.h" namespace ARex { int ARexService::OpenInfoDocument() { int h = infodoc_.OpenDocument(); if (h == -1) { // If information collector has no file assigned (not yet or collector is not running) open it directly h = open(config_.InformationFile().c_str(), O_RDONLY); } return h; } void ARexService::InformationCollector(void) { thread_count_.RegisterThread(); for(;;) { // Run information provider std::string xml_str; int r = -1; { std::string cmd; cmd=Arc::ArcLocation::GetDataDir()+"/CEinfo.pl --splitjobs --config "+config_.ConfigFile(); std::string stdin_str; std::string stderr_str; Arc::Run run(cmd); run.AssignStdin(stdin_str); run.AssignStdout(xml_str, 1024*1024); // can information document become bigger than 1MB? run.AssignStderr(stderr_str); logger_.msg(Arc::DEBUG,"Resource information provider: %s",cmd); if(!run.Start()) { // Failed to fork proces logger_.msg(Arc::DEBUG,"Resource information provider failed to start"); } else { if(!run.Wait()) { logger_.msg(Arc::DEBUG,"Resource information provider failed to run"); } else { r = run.Result(); if (r!=0) { logger_.msg(Arc::WARNING,"Resource information provider failed with exit status: %i\n%s",r,stderr_str); } else { logger_.msg(Arc::DEBUG,"Resource information provider log:\n%s",stderr_str); }; }; }; }; if (r!=0) { logger_.msg(Arc::WARNING,"No new informational document assigned"); } else { logger_.msg(Arc::VERBOSE,"Obtained XML: %s",xml_str.substr(0,100)); // Following code is suboptimal. Most of it should go away // and functionality to be moved to information providers. if(!xml_str.empty()) { // Currently glue states are lost. Counter of all jobs is lost too. infodoc_.Assign(xml_str, config_.InformationFile()); Arc::XMLNode root = infodoc_.Acquire(); Arc::XMLNode all_jobs_count = root["Domains"]["AdminDomain"]["Services"]["ComputingService"]["AllJobs"]; if((bool)all_jobs_count) { Arc::stringto((std::string)all_jobs_count,all_jobs_count_); all_jobs_count.Destroy(); // is not glue2 info }; infodoc_.Release(); } else { logger_.msg(Arc::ERROR,"Informational document is empty"); }; }; if(thread_count_.WaitOrCancel(infoprovider_wakeup_period_*100)) break; }; thread_count_.UnregisterThread(); } std::string ARexService::getID() { return "ARC:AREX"; } class PrefixedFilePayload: public Arc::PayloadRawInterface { private: std::string prefix_; std::string postfix_; int handle_; void* addr_; off_t length_; public: PrefixedFilePayload(const std::string& prefix,const std::string& postfix,int handle) { prefix_ = prefix; postfix_ = postfix; handle_ = handle; addr_ = MAP_FAILED; length_ = 0; if(handle != -1) { struct stat st; if(::fstat(handle,&st) == 0) { if(st.st_size > 0) { length_ = st.st_size; addr_ = ::mmap(NULL,st.st_size,PROT_READ,MAP_PRIVATE,handle,0); if(addr_ == MAP_FAILED) length_=0; }; }; }; }; ~PrefixedFilePayload(void) { if(addr_ != MAP_FAILED) ::munmap(addr_,length_); if(handle_ != -1) ::close(handle_); }; virtual char operator[](Size_t pos) const { char* p = ((PrefixedFilePayload*)this)->Content(pos); if(!p) return 0; return *p; }; virtual char* Content(Size_t pos) { if(pos < prefix_.length()) return (char*)(prefix_.c_str() + pos); pos -= prefix_.length(); if(pos < length_) return ((char*)(addr_) + pos); pos -= length_; if(pos < postfix_.length()) return (char*)(postfix_.c_str() + pos); return NULL; }; virtual Size_t Size(void) const { return (prefix_.length() + length_ + postfix_.length()); }; virtual char* Insert(Size_t /* pos */ = 0,Size_t /* size */ = 0) { return NULL; }; virtual char* Insert(const char* /* s */,Size_t /* pos */ = 0,Size_t /* size */ = -1) { return NULL; }; virtual char* Buffer(unsigned int num = 0) { if(num == 0) return (char*)(prefix_.c_str()); if(addr_ != MAP_FAILED) { if(num == 1) return (char*)addr_; } else { ++num; }; if(num == 2) return (char*)(postfix_.c_str()); return NULL; }; virtual Size_t BufferSize(unsigned int num = 0) const { if(num == 0) return prefix_.length(); if(addr_ != MAP_FAILED) { if(num == 1) return length_; } else { ++num; }; if(num == 2) return postfix_.length(); return 0; }; virtual Size_t BufferPos(unsigned int num = 0) const { if(num == 0) return 0; if(addr_ != MAP_FAILED) { if(num == 1) return prefix_.length(); } else { ++num; }; if(num == 2) return (prefix_.length() + length_); return (prefix_.length() + length_ + postfix_.length()); }; virtual bool Truncate(Size_t /* size */) { return false; }; }; OptimizedInformationContainer::OptimizedInformationContainer(bool parse_xml) { handle_=-1; parse_xml_=parse_xml; } OptimizedInformationContainer::~OptimizedInformationContainer(void) { if(handle_ != -1) ::close(handle_); if(!filename_.empty()) ::unlink(filename_.c_str()); } int OptimizedInformationContainer::OpenDocument(void) { int h = -1; olock_.lock(); if(handle_ != -1) h = ::dup(handle_); olock_.unlock(); return h; } void OptimizedInformationContainer::Assign(const std::string& xml, const std::string filename) { std::string tmpfilename; int h = -1; if(filename.empty()) { h = Glib::file_open_tmp(tmpfilename); } else { tmpfilename = filename; tmpfilename += ".tmpXXXXXX"; h = Glib::mkstemp(tmpfilename); }; if(h == -1) { Arc::Logger::getRootLogger().msg(Arc::ERROR,"OptimizedInformationContainer failed to create temporary file"); return; }; Arc::Logger::getRootLogger().msg(Arc::VERBOSE,"OptimizedInformationContainer created temporary file: %s",tmpfilename); for(std::string::size_type p = 0;p/dev/null 2>&1 || return systemd-notify "$@" } log_failure_msg() { send_systemd_notify --status "Error: $@" echo $@ } # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/arc-arex ]; then . /etc/sysconfig/arc-arex elif [ -r /etc/default/arc-arex ]; then . /etc/default/arc-arex fi # GLOBUS_LOCATION GLOBUS_LOCATION=${GLOBUS_LOCATION:-@DEFAULT_GLOBUS_LOCATION@} if [ -n "$GLOBUS_LOCATION" ]; then if [ ! -d "$GLOBUS_LOCATION" ]; then log_failure_msg "GLOBUS_LOCATION ($GLOBUS_LOCATION) not found" exit 1 fi export GLOBUS_LOCATION fi # ARC_LOCATION ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then log_failure_msg "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi export ARC_LOCATION # VOMS_LOCATION VOMS_LOCATION=${VOMS_LOCATION:-@DEFAULT_VOMS_LOCATION@} # Prepare environment for executing various tools and main application add_library_path "$VOMS_LOCATION" add_library_path "$GLOBUS_LOCATION" if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH=$ARC_LOCATION/@libsubdir@:$ARC_LOCATION/@libsubdir@64 else LD_LIBRARY_PATH=$ARC_LOCATION/@libsubdir@:$ARC_LOCATION/@libsubdir@64:$LD_LIBRARY_PATH fi export LD_LIBRARY_PATH testconfigblock() { $ARC_LOCATION/@pkglibexecsubdir@/arcconfig-parser --runconfig "$1" --load -b "$2" 2>/dev/null 1>&2 if [ $? -eq 0 ] ; then echo 'true' else echo 'false' fi } readorigconfigvar() { value=`$ARC_LOCATION/@pkglibexecsubdir@/arcconfig-parser --config "$1" -b "$3" -o "$2" 2>/dev/null` if [ $? -eq 0 ] ; then echo "$value" exit 0 else exit 1 fi } readconfigvar() { fname="$1" optname="$2" blocks="" while [ ! -z "$3" ] ; do blocks="$blocks -b $3" shift done value=`$ARC_LOCATION/@pkglibexecsubdir@/arcconfig-parser --runconfig "$fname" --load $blocks -o "$optname" 2>/dev/null` if [ $? -eq 0 ] ; then echo "$value" exit 0 else exit 1 fi } # ARC_CONFIG if [ "x$ARC_CONFIG" = "x" ]; then if [ -r $ARC_LOCATION/etc/arc.conf ]; then ARC_CONFIG=$ARC_LOCATION/etc/arc.conf elif [ -r /etc/arc.conf ]; then ARC_CONFIG=/etc/arc.conf fi fi # PID file PID_FILE=`readorigconfigvar "$ARC_CONFIG" pidfile arex` if [ "x$PID_FILE" = "x" ]; then # Missing default value for pidfile means no service block is present log_failure_msg "ARC configuration is missing [arex] block" exit 1 fi if [ "$1" = "--getpidfile" ] ; then echo $PID_FILE exit 0 fi ARC_RUNTIME_CONFIG=`echo "$PID_FILE" | sed 's#\([^\./]*\)\.[^\./]*$#\1#'` ARC_RUNTIME_CONFIG="${ARC_RUNTIME_CONFIG}.cfg" mkdir_for_user() { dirpath="$1" username="$2" groupname="$3" if [ ! -d "$dirpath" ] ; then mkdir -p "$dirpath" if [ ! -z "$username" ] ; then if [ ! -z "$groupname" ] ; then chown "$username:$groupname" "$dirpath" else chown "$username" "$dirpath" fi fi fi } mkfile_for_user() { filepath="$1" username="$2" groupname="$3" if [ ! -f "$filepath" ] ; then touch "$filepath" fi if [ ! -z "$username" ] ; then if [ ! -z "$groupname" ] ; then chown "$username:$groupname" "$filepath" else chown "$username" "$filepath" fi fi } prepare() { CMD="$ARC_LOCATION/sbin/$prog" if [ ! -x "$CMD" ]; then log_failure_msg "Missing $CMD executable" exit 1 fi if [ ! -r "$ARC_CONFIG" ]; then log_failure_msg "ARC configuration not found (usually /etc/arc.conf)" exit 1 fi # Pre-process configuration $ARC_LOCATION/@pkglibexecsubdir@/arcconfig-parser --config "$ARC_CONFIG" --runconfig "$ARC_RUNTIME_CONFIG" --save 2>/dev/null if [ $? -ne 0 ] ; then log_failure_msg "ARC configuration processing failed" exit 1 fi # Creating configuration file of arched # Reading following information from config file: # Log file # Debug level # User name # ... LOGFILE=`readconfigvar "$ARC_RUNTIME_CONFIG" logfile arex` LOGLEVEL=`readconfigvar "$ARC_RUNTIME_CONFIG" loglevel arex` WATCHDOG=`readconfigvar "$ARC_RUNTIME_CONFIG" watchdog arex` USERNAME=`readconfigvar "$ARC_RUNTIME_CONFIG" user arex` GRIDTMPDIR=`readconfigvar "$ARC_RUNTIME_CONFIG" tmpdir arex` GROUPNAME=`echo "$USERNAME" | sed 's/^[^:]*//;s/^://'` USERNAME=`echo "$USERNAME" | sed 's/:.*//'` X509_USER_CERT=`readconfigvar "$ARC_RUNTIME_CONFIG" x509_host_cert common` X509_USER_KEY=`readconfigvar "$ARC_RUNTIME_CONFIG" x509_host_key common` X509_CERT_POLICY=`readconfigvar "$ARC_RUNTIME_CONFIG" x509_cert_policy common` if [ "$X509_CERT_POLICY" = 'any' ] ; then X509_CERT_DIR=`readconfigvar "$ARC_RUNTIME_CONFIG" x509_cert_dir common` elif [ "$X509_CERT_POLICY" = 'grid' ] ; then X509_CERT_DIR=`readconfigvar "$ARC_RUNTIME_CONFIG" x509_cert_dir common` else X509_CERT_DIR="" X509_CERT_POLICY='system' fi GLOBUS_TCP_PORT_RANGE=`readconfigvar "$ARC_RUNTIME_CONFIG" globus_tcp_port_range arex/data-staging` GLOBUS_UDP_PORT_RANGE=`readconfigvar "$ARC_RUNTIME_CONFIG" globus_udp_port_range arex/data-staging` HOSTNAME=`readconfigvar "$ARC_RUNTIME_CONFIG" hostname common` SERVICEMAIL=`readconfigvar "$ARC_RUNTIME_CONFIG" mail arex` # CONTROLDIR=`readconfigvar "$ARC_RUNTIME_CONFIG" controldir arex` # It is easier to handle root user through empty value. if [ "$USERNAME" = "root" ] ; then USERNAME="" fi if [ "$GROUPNAME" = "root" ] ; then GROUPNAME="" fi # Exporting collected variables export X509_USER_CERT export X509_USER_KEY export X509_CERT_DIR export X509_CERT_POLICY export GLOBUS_TCP_PORT_RANGE export GLOBUS_UDP_PORT_RANGE export HOSTNAME if [ ! -z "$GRIDTMPDIR" ] ; then export TMPDIR="$GRIDTMPDIR" ; fi # Web Service configuration arex_endpoint="" arex_mount_point="" ws_present=`testconfigblock "$ARC_RUNTIME_CONFIG" arex/ws` arex_present=`testconfigblock "$ARC_RUNTIME_CONFIG" arex/ws/jobs` if [ "$ws_present" = 'true' ] ; then WSLOGFILE=`readconfigvar "$ARC_RUNTIME_CONFIG" logfile arex/ws` arex_mount_point=`readconfigvar "$ARC_RUNTIME_CONFIG" wsurl arex/ws` arex_endpoint="$arex_mount_point" fi service_mail="" if [ ! -z "$SERVICEMAIL" ] ; then service_mail="$SERVICEMAIL" fi AREX_CONFIG=`mktemp -t arex.xml.XXXXXX` if [ -z "$AREX_CONFIG" ] ; then log_failure_msg "Failed to create temporary file" exit 1 fi CMD="$CMD -c $AREX_CONFIG" case "$LOGLEVEL" in 0) LOGLEVEL="FATAL" ;; 1) LOGLEVEL="ERROR" ;; 2) LOGLEVEL="WARNING" ;; 3) LOGLEVEL="INFO" ;; 4) LOGLEVEL="VERBOSE" ;; 5) LOGLEVEL="DEBUG" ;; *) LOGLEVEL="INFO" ;; esac mkdir_for_user `dirname "$LOGFILE"` "$USERNAME" "$GROUPNAME" mkfile_for_user "$LOGFILE" "$USERNAME" "$GROUPNAME" if [ "$WATCHDOG" = "yes" ] ; then WATCHDOG="true" else WATCHDOG="false" fi if [ ! -z "$USERNAME" ] ; then CMD="$CMD -u $USERNAME" fi if [ ! -z "$GROUPNAME" ] ; then CMD="$CMD -g $GROUPNAME" fi AREXCFG="\ $PID_FILE $LOGFILE $LOGLEVEL $WATCHDOG $ARC_LOCATION/@pkglibsubdir@/ arex $service_mail $arex_endpoint $ARC_RUNTIME_CONFIG " echo "$AREXCFG" > "$AREX_CONFIG" # setup logfile in case it is not there yet if [ ! -z "$USERNAME" ] ; then if [ ! -z "$GROUPNAME" ] ; then [ -f $AREX_CONFIG ] && chown "$USERNAME:$GROUPNAME" "$AREX_CONFIG" else [ -f $AREX_CONFIG ] && chown "$USERNAME" "$AREX_CONFIG" fi fi # prepare to collect crash information COREDIR=`dirname "${LOGFILE}"`/arccore mkdir_for_user "${COREDIR}" "$USERNAME" "$GROUPNAME" cd "${COREDIR}" ulimit -c unlimited } validate() { CHECK_CMD=$ARC_LOCATION/sbin/arcctl if [ ! -x $CHECK_CMD ]; then log_failure_msg "Could not find or execute arcctl tool" return 1 fi eval "$CHECK_CMD $@ --config $ARC_CONFIG service verify" RETVAL=$? return $RETVAL } if [ "$RUN" != "yes" ] ; then echo "a-rex disabled, please adjust the configuration to your needs " echo "and then set RUN to 'yes' in /etc/default/a-rex to enable it." exit 0 fi prepare echo "Validating A-REX setup..." >> "$LOGFILE" validate >> "$LOGFILE" 2>&1 RETVAL=$? if [ $RETVAL != 0 ]; then # Run validator again to print errors to stdout validate -d ERROR log_failure_msg "Configuration validation failed" exit 1 fi # if [ -z "$CONTROLDIR" ] ; then # log_failure_msg "Missing controldir in A-REX configuration" # exit 1 # fi # "$ARC_LOCATION/share/arc/update-controldir" "$CONTROLDIR" >> "$LOGFILE" # if [ $? -ne 0 ]; then # log_failure_msg "Failed to update A-REX control dir" # exit 1 # fi # Raise limit on number of file descriptors to max hlimit=`ulimit -H -n` if [ ! -z "$hlimit" ] ; then ulimit -S -n "$hlimit" 2>/dev/null fi now=`date '+[%Y:%m:%d %H:%M:%S]' 2>/dev/null` echo "$now Starting A-REX service executable..." >> "$LOGFILE" exec $CMD "$@" nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/Makefile.in0000644000000000000000000000013215067751355021047 xustar0030 mtime=1759498989.571727159 30 atime=1759499017.569248902 30 ctime=1759499029.312031824 nordugrid-arc-7.1.1/src/services/a-rex/Makefile.in0000644000175000002070000020445115067751355022757 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ noinst_PROGRAMS = test_cache_check$(EXEEXT) subdir = src/services/a-rex ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = arc-arex arc-arex.service arc-arex-start \ arc-arex-ws arc-arex-ws.service arc-arex-ws-start \ a-rex-backtrace-collect a-rex-backtrace-collect.8 perferator \ update-controldir CONFIG_CLEAN_VPATH_FILES = PROGRAMS = $(noinst_PROGRAMS) am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(initddir)" \ "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(sbindir)" \ "$(DESTDIR)$(man8dir)" "$(DESTDIR)$(unitsdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) libarex_la_DEPENDENCIES = $(GRIDMANAGER_LIBS) \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/ws-addressing/libarcwsaddressing.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/otokens/libarcotokens.la am_libarex_la_OBJECTS = libarex_la-arex.lo libarex_la-authop.lo \ libarex_la-job.lo libarex_la-create_activity.lo \ libarex_la-change_activity_status.lo \ libarex_la-update_credentials.lo libarex_la-faults.lo \ libarex_la-get.lo libarex_la-put.lo libarex_la-PayloadFile.lo \ libarex_la-FileChunks.lo libarex_la-information_collector.lo \ libarex_la-cachecheck.lo libarex_la-tools.lo libarex_la_OBJECTS = $(am_libarex_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = libarex_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libarex_la_CXXFLAGS) \ $(CXXFLAGS) $(libarex_la_LDFLAGS) $(LDFLAGS) -o $@ am_test_cache_check_OBJECTS = \ test_cache_check-test_cache_check.$(OBJEXT) test_cache_check_OBJECTS = $(am_test_cache_check_OBJECTS) am__DEPENDENCIES_1 = test_cache_check_DEPENDENCIES = $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) test_cache_check_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(test_cache_check_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ SCRIPTS = $(initd_SCRIPTS) $(pkgdata_SCRIPTS) $(sbin_SCRIPTS) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = ./$(DEPDIR)/libarex_la-FileChunks.Plo \ ./$(DEPDIR)/libarex_la-PayloadFile.Plo \ ./$(DEPDIR)/libarex_la-arex.Plo \ ./$(DEPDIR)/libarex_la-authop.Plo \ ./$(DEPDIR)/libarex_la-cachecheck.Plo \ ./$(DEPDIR)/libarex_la-change_activity_status.Plo \ ./$(DEPDIR)/libarex_la-create_activity.Plo \ ./$(DEPDIR)/libarex_la-faults.Plo \ ./$(DEPDIR)/libarex_la-get.Plo \ ./$(DEPDIR)/libarex_la-information_collector.Plo \ ./$(DEPDIR)/libarex_la-job.Plo ./$(DEPDIR)/libarex_la-put.Plo \ ./$(DEPDIR)/libarex_la-tools.Plo \ ./$(DEPDIR)/libarex_la-update_credentials.Plo \ ./$(DEPDIR)/test_cache_check-test_cache_check.Po am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(libarex_la_SOURCES) $(test_cache_check_SOURCES) DIST_SOURCES = $(libarex_la_SOURCES) $(test_cache_check_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac man8dir = $(mandir)/man8 NROFF = nroff MANS = $(man_MANS) DATA = $(units_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir distdir-am am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in \ $(srcdir)/a-rex-backtrace-collect.8.in \ $(srcdir)/a-rex-backtrace-collect.in \ $(srcdir)/arc-arex-start.in $(srcdir)/arc-arex-ws-start.in \ $(srcdir)/arc-arex-ws.in $(srcdir)/arc-arex-ws.service.in \ $(srcdir)/arc-arex.in $(srcdir)/arc-arex.service.in \ $(srcdir)/perferator.in $(srcdir)/update-controldir.in \ $(top_srcdir)/depcomp README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ @INTERNAL_ENABLED_FALSE@INTERNAL = @INTERNAL_ENABLED_TRUE@INTERNAL = internaljobplugin SUBDIRS = delegation grid-manager infoproviders lrms schema $(INTERNAL) rte rest DIST_SUBDIRS = delegation grid-manager infoproviders lrms schema internaljobplugin rte rest pkglib_LTLIBRARIES = libarex.la @SYSV_SCRIPTS_ENABLED_FALSE@AREX_SCRIPT = @SYSV_SCRIPTS_ENABLED_TRUE@AREX_SCRIPT = arc-arex arc-arex-ws initd_SCRIPTS = $(AREX_SCRIPT) @SYSTEMD_UNITS_ENABLED_FALSE@AREX_UNIT = @SYSTEMD_UNITS_ENABLED_TRUE@AREX_UNIT = arc-arex.service arc-arex-ws.service units_DATA = $(AREX_UNIT) pkgdata_SCRIPTS = arc-arex-start arc-arex-ws-start perferator update-controldir sbin_SCRIPTS = a-rex-backtrace-collect man_MANS = a-rex-backtrace-collect.8 EXTRA_DIST = arc.zero.conf GRIDMANAGER_LIBS = grid-manager/libgridmanager.la delegation/libdelegation.la rest/libarexrest.la libarex_la_SOURCES = arex.cpp authop.cpp job.cpp \ create_activity.cpp \ change_activity_status.cpp \ update_credentials.cpp faults.cpp \ get.cpp put.cpp PayloadFile.cpp FileChunks.cpp \ information_collector.cpp cachecheck.cpp tools.cpp \ arex.h job.h PayloadFile.h FileChunks.h tools.h libarex_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) # Needs real cleaning in respect to dependencies libarex_la_LIBADD = \ $(GRIDMANAGER_LIBS) \ $(top_builddir)/src/hed/libs/infosys/libarcinfosys.la \ $(top_builddir)/src/hed/libs/ws-addressing/libarcwsaddressing.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/otokens/libarcotokens.la libarex_la_LDFLAGS = -no-undefined -avoid-version -module test_cache_check_SOURCES = test_cache_check.cpp test_cache_check_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) test_cache_check_LDADD = \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): arc-arex: $(top_builddir)/config.status $(srcdir)/arc-arex.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arc-arex.service: $(top_builddir)/config.status $(srcdir)/arc-arex.service.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arc-arex-start: $(top_builddir)/config.status $(srcdir)/arc-arex-start.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arc-arex-ws: $(top_builddir)/config.status $(srcdir)/arc-arex-ws.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arc-arex-ws.service: $(top_builddir)/config.status $(srcdir)/arc-arex-ws.service.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arc-arex-ws-start: $(top_builddir)/config.status $(srcdir)/arc-arex-ws-start.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ a-rex-backtrace-collect: $(top_builddir)/config.status $(srcdir)/a-rex-backtrace-collect.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ a-rex-backtrace-collect.8: $(top_builddir)/config.status $(srcdir)/a-rex-backtrace-collect.8.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ perferator: $(top_builddir)/config.status $(srcdir)/perferator.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ update-controldir: $(top_builddir)/config.status $(srcdir)/update-controldir.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ clean-noinstPROGRAMS: @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(MKDIR_P) '$(DESTDIR)$(pkglibdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" || exit 1; \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libarex.la: $(libarex_la_OBJECTS) $(libarex_la_DEPENDENCIES) $(EXTRA_libarex_la_DEPENDENCIES) $(AM_V_CXXLD)$(libarex_la_LINK) -rpath $(pkglibdir) $(libarex_la_OBJECTS) $(libarex_la_LIBADD) $(LIBS) test_cache_check$(EXEEXT): $(test_cache_check_OBJECTS) $(test_cache_check_DEPENDENCIES) $(EXTRA_test_cache_check_DEPENDENCIES) @rm -f test_cache_check$(EXEEXT) $(AM_V_CXXLD)$(test_cache_check_LINK) $(test_cache_check_OBJECTS) $(test_cache_check_LDADD) $(LIBS) install-initdSCRIPTS: $(initd_SCRIPTS) @$(NORMAL_INSTALL) @list='$(initd_SCRIPTS)'; test -n "$(initddir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(initddir)'"; \ $(MKDIR_P) "$(DESTDIR)$(initddir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(initddir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(initddir)$$dir" || exit $$?; \ } \ ; done uninstall-initdSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(initd_SCRIPTS)'; test -n "$(initddir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(initddir)'; $(am__uninstall_files_from_dir) install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) install-sbinSCRIPTS: $(sbin_SCRIPTS) @$(NORMAL_INSTALL) @list='$(sbin_SCRIPTS)'; test -n "$(sbindir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(sbindir)'"; \ $(MKDIR_P) "$(DESTDIR)$(sbindir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(sbindir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(sbindir)$$dir" || exit $$?; \ } \ ; done uninstall-sbinSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(sbin_SCRIPTS)'; test -n "$(sbindir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(sbindir)'; $(am__uninstall_files_from_dir) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-FileChunks.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-PayloadFile.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-arex.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-authop.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-cachecheck.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-change_activity_status.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-create_activity.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-faults.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-get.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-information_collector.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-job.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-put.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-tools.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarex_la-update_credentials.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_cache_check-test_cache_check.Po@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< libarex_la-arex.lo: arex.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-arex.lo -MD -MP -MF $(DEPDIR)/libarex_la-arex.Tpo -c -o libarex_la-arex.lo `test -f 'arex.cpp' || echo '$(srcdir)/'`arex.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarex_la-arex.Tpo $(DEPDIR)/libarex_la-arex.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arex.cpp' object='libarex_la-arex.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-arex.lo `test -f 'arex.cpp' || echo '$(srcdir)/'`arex.cpp libarex_la-authop.lo: authop.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-authop.lo -MD -MP -MF $(DEPDIR)/libarex_la-authop.Tpo -c -o libarex_la-authop.lo `test -f 'authop.cpp' || echo '$(srcdir)/'`authop.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarex_la-authop.Tpo $(DEPDIR)/libarex_la-authop.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='authop.cpp' object='libarex_la-authop.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-authop.lo `test -f 'authop.cpp' || echo '$(srcdir)/'`authop.cpp libarex_la-job.lo: job.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-job.lo -MD -MP -MF $(DEPDIR)/libarex_la-job.Tpo -c -o libarex_la-job.lo `test -f 'job.cpp' || echo '$(srcdir)/'`job.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarex_la-job.Tpo $(DEPDIR)/libarex_la-job.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='job.cpp' object='libarex_la-job.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-job.lo `test -f 'job.cpp' || echo '$(srcdir)/'`job.cpp libarex_la-create_activity.lo: create_activity.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-create_activity.lo -MD -MP -MF $(DEPDIR)/libarex_la-create_activity.Tpo -c -o libarex_la-create_activity.lo `test -f 'create_activity.cpp' || echo '$(srcdir)/'`create_activity.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarex_la-create_activity.Tpo $(DEPDIR)/libarex_la-create_activity.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='create_activity.cpp' object='libarex_la-create_activity.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-create_activity.lo `test -f 'create_activity.cpp' || echo '$(srcdir)/'`create_activity.cpp libarex_la-change_activity_status.lo: change_activity_status.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-change_activity_status.lo -MD -MP -MF $(DEPDIR)/libarex_la-change_activity_status.Tpo -c -o libarex_la-change_activity_status.lo `test -f 'change_activity_status.cpp' || echo '$(srcdir)/'`change_activity_status.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarex_la-change_activity_status.Tpo $(DEPDIR)/libarex_la-change_activity_status.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='change_activity_status.cpp' object='libarex_la-change_activity_status.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-change_activity_status.lo `test -f 'change_activity_status.cpp' || echo '$(srcdir)/'`change_activity_status.cpp libarex_la-update_credentials.lo: update_credentials.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-update_credentials.lo -MD -MP -MF $(DEPDIR)/libarex_la-update_credentials.Tpo -c -o libarex_la-update_credentials.lo `test -f 'update_credentials.cpp' || echo '$(srcdir)/'`update_credentials.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarex_la-update_credentials.Tpo $(DEPDIR)/libarex_la-update_credentials.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='update_credentials.cpp' object='libarex_la-update_credentials.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-update_credentials.lo `test -f 'update_credentials.cpp' || echo '$(srcdir)/'`update_credentials.cpp libarex_la-faults.lo: faults.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-faults.lo -MD -MP -MF $(DEPDIR)/libarex_la-faults.Tpo -c -o libarex_la-faults.lo `test -f 'faults.cpp' || echo '$(srcdir)/'`faults.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarex_la-faults.Tpo $(DEPDIR)/libarex_la-faults.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='faults.cpp' object='libarex_la-faults.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-faults.lo `test -f 'faults.cpp' || echo '$(srcdir)/'`faults.cpp libarex_la-get.lo: get.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-get.lo -MD -MP -MF $(DEPDIR)/libarex_la-get.Tpo -c -o libarex_la-get.lo `test -f 'get.cpp' || echo '$(srcdir)/'`get.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarex_la-get.Tpo $(DEPDIR)/libarex_la-get.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='get.cpp' object='libarex_la-get.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-get.lo `test -f 'get.cpp' || echo '$(srcdir)/'`get.cpp libarex_la-put.lo: put.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-put.lo -MD -MP -MF $(DEPDIR)/libarex_la-put.Tpo -c -o libarex_la-put.lo `test -f 'put.cpp' || echo '$(srcdir)/'`put.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarex_la-put.Tpo $(DEPDIR)/libarex_la-put.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='put.cpp' object='libarex_la-put.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-put.lo `test -f 'put.cpp' || echo '$(srcdir)/'`put.cpp libarex_la-PayloadFile.lo: PayloadFile.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-PayloadFile.lo -MD -MP -MF $(DEPDIR)/libarex_la-PayloadFile.Tpo -c -o libarex_la-PayloadFile.lo `test -f 'PayloadFile.cpp' || echo '$(srcdir)/'`PayloadFile.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarex_la-PayloadFile.Tpo $(DEPDIR)/libarex_la-PayloadFile.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='PayloadFile.cpp' object='libarex_la-PayloadFile.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-PayloadFile.lo `test -f 'PayloadFile.cpp' || echo '$(srcdir)/'`PayloadFile.cpp libarex_la-FileChunks.lo: FileChunks.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-FileChunks.lo -MD -MP -MF $(DEPDIR)/libarex_la-FileChunks.Tpo -c -o libarex_la-FileChunks.lo `test -f 'FileChunks.cpp' || echo '$(srcdir)/'`FileChunks.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarex_la-FileChunks.Tpo $(DEPDIR)/libarex_la-FileChunks.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='FileChunks.cpp' object='libarex_la-FileChunks.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-FileChunks.lo `test -f 'FileChunks.cpp' || echo '$(srcdir)/'`FileChunks.cpp libarex_la-information_collector.lo: information_collector.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-information_collector.lo -MD -MP -MF $(DEPDIR)/libarex_la-information_collector.Tpo -c -o libarex_la-information_collector.lo `test -f 'information_collector.cpp' || echo '$(srcdir)/'`information_collector.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarex_la-information_collector.Tpo $(DEPDIR)/libarex_la-information_collector.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='information_collector.cpp' object='libarex_la-information_collector.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-information_collector.lo `test -f 'information_collector.cpp' || echo '$(srcdir)/'`information_collector.cpp libarex_la-cachecheck.lo: cachecheck.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-cachecheck.lo -MD -MP -MF $(DEPDIR)/libarex_la-cachecheck.Tpo -c -o libarex_la-cachecheck.lo `test -f 'cachecheck.cpp' || echo '$(srcdir)/'`cachecheck.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarex_la-cachecheck.Tpo $(DEPDIR)/libarex_la-cachecheck.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='cachecheck.cpp' object='libarex_la-cachecheck.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-cachecheck.lo `test -f 'cachecheck.cpp' || echo '$(srcdir)/'`cachecheck.cpp libarex_la-tools.lo: tools.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -MT libarex_la-tools.lo -MD -MP -MF $(DEPDIR)/libarex_la-tools.Tpo -c -o libarex_la-tools.lo `test -f 'tools.cpp' || echo '$(srcdir)/'`tools.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarex_la-tools.Tpo $(DEPDIR)/libarex_la-tools.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='tools.cpp' object='libarex_la-tools.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarex_la_CXXFLAGS) $(CXXFLAGS) -c -o libarex_la-tools.lo `test -f 'tools.cpp' || echo '$(srcdir)/'`tools.cpp test_cache_check-test_cache_check.o: test_cache_check.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_cache_check_CXXFLAGS) $(CXXFLAGS) -MT test_cache_check-test_cache_check.o -MD -MP -MF $(DEPDIR)/test_cache_check-test_cache_check.Tpo -c -o test_cache_check-test_cache_check.o `test -f 'test_cache_check.cpp' || echo '$(srcdir)/'`test_cache_check.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/test_cache_check-test_cache_check.Tpo $(DEPDIR)/test_cache_check-test_cache_check.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='test_cache_check.cpp' object='test_cache_check-test_cache_check.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_cache_check_CXXFLAGS) $(CXXFLAGS) -c -o test_cache_check-test_cache_check.o `test -f 'test_cache_check.cpp' || echo '$(srcdir)/'`test_cache_check.cpp test_cache_check-test_cache_check.obj: test_cache_check.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_cache_check_CXXFLAGS) $(CXXFLAGS) -MT test_cache_check-test_cache_check.obj -MD -MP -MF $(DEPDIR)/test_cache_check-test_cache_check.Tpo -c -o test_cache_check-test_cache_check.obj `if test -f 'test_cache_check.cpp'; then $(CYGPATH_W) 'test_cache_check.cpp'; else $(CYGPATH_W) '$(srcdir)/test_cache_check.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/test_cache_check-test_cache_check.Tpo $(DEPDIR)/test_cache_check-test_cache_check.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='test_cache_check.cpp' object='test_cache_check-test_cache_check.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_cache_check_CXXFLAGS) $(CXXFLAGS) -c -o test_cache_check-test_cache_check.obj `if test -f 'test_cache_check.cpp'; then $(CYGPATH_W) 'test_cache_check.cpp'; else $(CYGPATH_W) '$(srcdir)/test_cache_check.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man8: $(man_MANS) @$(NORMAL_INSTALL) @list1=''; \ list2='$(man_MANS)'; \ test -n "$(man8dir)" \ && test -n "`echo $$list1$$list2`" \ || exit 0; \ echo " $(MKDIR_P) '$(DESTDIR)$(man8dir)'"; \ $(MKDIR_P) "$(DESTDIR)$(man8dir)" || exit 1; \ { for i in $$list1; do echo "$$i"; done; \ if test -n "$$list2"; then \ for i in $$list2; do echo "$$i"; done \ | sed -n '/\.8[a-z]*$$/p'; \ fi; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^8][0-9a-z]*$$,8,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man8dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man8dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man8dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man8dir)" || exit $$?; }; \ done; } uninstall-man8: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man8dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.8[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^8][0-9a-z]*$$,8,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ dir='$(DESTDIR)$(man8dir)'; $(am__uninstall_files_from_dir) install-unitsDATA: $(units_DATA) @$(NORMAL_INSTALL) @list='$(units_DATA)'; test -n "$(unitsdir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(unitsdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(unitsdir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(unitsdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(unitsdir)" || exit $$?; \ done uninstall-unitsDATA: @$(NORMAL_UNINSTALL) @list='$(units_DATA)'; test -n "$(unitsdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(unitsdir)'; $(am__uninstall_files_from_dir) # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(PROGRAMS) $(LTLIBRARIES) $(SCRIPTS) $(MANS) $(DATA) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(initddir)" "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(sbindir)" "$(DESTDIR)$(man8dir)" "$(DESTDIR)$(unitsdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-noinstPROGRAMS \ clean-pkglibLTLIBRARIES mostlyclean-am distclean: distclean-recursive -rm -f ./$(DEPDIR)/libarex_la-FileChunks.Plo -rm -f ./$(DEPDIR)/libarex_la-PayloadFile.Plo -rm -f ./$(DEPDIR)/libarex_la-arex.Plo -rm -f ./$(DEPDIR)/libarex_la-authop.Plo -rm -f ./$(DEPDIR)/libarex_la-cachecheck.Plo -rm -f ./$(DEPDIR)/libarex_la-change_activity_status.Plo -rm -f ./$(DEPDIR)/libarex_la-create_activity.Plo -rm -f ./$(DEPDIR)/libarex_la-faults.Plo -rm -f ./$(DEPDIR)/libarex_la-get.Plo -rm -f ./$(DEPDIR)/libarex_la-information_collector.Plo -rm -f ./$(DEPDIR)/libarex_la-job.Plo -rm -f ./$(DEPDIR)/libarex_la-put.Plo -rm -f ./$(DEPDIR)/libarex_la-tools.Plo -rm -f ./$(DEPDIR)/libarex_la-update_credentials.Plo -rm -f ./$(DEPDIR)/test_cache_check-test_cache_check.Po -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-data-local install-initdSCRIPTS install-man \ install-pkgdataSCRIPTS install-unitsDATA install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-sbinSCRIPTS install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-man8 install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f ./$(DEPDIR)/libarex_la-FileChunks.Plo -rm -f ./$(DEPDIR)/libarex_la-PayloadFile.Plo -rm -f ./$(DEPDIR)/libarex_la-arex.Plo -rm -f ./$(DEPDIR)/libarex_la-authop.Plo -rm -f ./$(DEPDIR)/libarex_la-cachecheck.Plo -rm -f ./$(DEPDIR)/libarex_la-change_activity_status.Plo -rm -f ./$(DEPDIR)/libarex_la-create_activity.Plo -rm -f ./$(DEPDIR)/libarex_la-faults.Plo -rm -f ./$(DEPDIR)/libarex_la-get.Plo -rm -f ./$(DEPDIR)/libarex_la-information_collector.Plo -rm -f ./$(DEPDIR)/libarex_la-job.Plo -rm -f ./$(DEPDIR)/libarex_la-put.Plo -rm -f ./$(DEPDIR)/libarex_la-tools.Plo -rm -f ./$(DEPDIR)/libarex_la-update_credentials.Plo -rm -f ./$(DEPDIR)/test_cache_check-test_cache_check.Po -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-initdSCRIPTS uninstall-local uninstall-man \ uninstall-pkgdataSCRIPTS uninstall-pkglibLTLIBRARIES \ uninstall-sbinSCRIPTS uninstall-unitsDATA uninstall-man: uninstall-man8 .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \ am--depfiles check check-am clean clean-generic clean-libtool \ clean-noinstPROGRAMS clean-pkglibLTLIBRARIES cscopelist-am \ ctags ctags-am distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-data-local install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-initdSCRIPTS install-man \ install-man8 install-pdf install-pdf-am install-pkgdataSCRIPTS \ install-pkglibLTLIBRARIES install-ps install-ps-am \ install-sbinSCRIPTS install-strip install-unitsDATA \ installcheck installcheck-am installdirs installdirs-am \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags tags-am uninstall uninstall-am \ uninstall-initdSCRIPTS uninstall-local uninstall-man \ uninstall-man8 uninstall-pkgdataSCRIPTS \ uninstall-pkglibLTLIBRARIES uninstall-sbinSCRIPTS \ uninstall-unitsDATA .PRECIOUS: Makefile $(top_builddir)/src/hed/libs/common/libarccommon.la install-data-local: $(MKDIR_P) "$(DESTDIR)$(sysconfdir)" if test ! -e $(DESTDIR)$(sysconfdir)/arc.conf; then $(INSTALL_DATA) $(srcdir)/arc.zero.conf $(DESTDIR)$(sysconfdir)/arc.conf; fi uninstall-local: rm -f $(DESTDIR)$(sysconfdir)/arc.conf # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/rte0000644000000000000000000000013215067751426017516 xustar0030 mtime=1759499030.482445118 30 atime=1759499034.764510185 30 ctime=1759499030.482445118 nordugrid-arc-7.1.1/src/services/a-rex/rte/0000755000175000002070000000000015067751426021475 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/rte/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327021627 xustar0030 mtime=1759498967.772902482 30 atime=1759498967.873493772 30 ctime=1759499030.482354763 nordugrid-arc-7.1.1/src/services/a-rex/rte/Makefile.am0000644000175000002070000000044015067751327023527 0ustar00mockbuildmock00000000000000arcrteenvdir = $(pkgdatadir)/rte/ENV arcrteenv_DATA = ENV/PROXY ENV/RTE ENV/LRMS-SCRATCH ENV/CANDYPOND ENV/SINGULARITY arcrteenvcondordir = $(pkgdatadir)/rte/ENV/CONDOR arcrteenvcondor_DATA = ENV/CONDOR/DOCKER EXTRA_DIST = ENV/RTE ENV/LRMS-SCRATCH ENV/SINGULARITY $(arcrteenvcondor_DATA) nordugrid-arc-7.1.1/src/services/a-rex/rte/PaxHeaders/Makefile.in0000644000000000000000000000013215067751356021642 xustar0030 mtime=1759498990.843339788 30 atime=1759499019.047271361 30 ctime=1759499030.483611306 nordugrid-arc-7.1.1/src/services/a-rex/rte/Makefile.in0000644000175000002070000005341415067751356023553 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/rte ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(arcrteenvdir)" \ "$(DESTDIR)$(arcrteenvcondordir)" DATA = $(arcrteenv_DATA) $(arcrteenvcondor_DATA) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ arcrteenvdir = $(pkgdatadir)/rte/ENV arcrteenv_DATA = ENV/PROXY ENV/RTE ENV/LRMS-SCRATCH ENV/CANDYPOND ENV/SINGULARITY arcrteenvcondordir = $(pkgdatadir)/rte/ENV/CONDOR arcrteenvcondor_DATA = ENV/CONDOR/DOCKER EXTRA_DIST = ENV/RTE ENV/LRMS-SCRATCH ENV/SINGULARITY $(arcrteenvcondor_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/rte/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/rte/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcrteenvDATA: $(arcrteenv_DATA) @$(NORMAL_INSTALL) @list='$(arcrteenv_DATA)'; test -n "$(arcrteenvdir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(arcrteenvdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(arcrteenvdir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcrteenvdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcrteenvdir)" || exit $$?; \ done uninstall-arcrteenvDATA: @$(NORMAL_UNINSTALL) @list='$(arcrteenv_DATA)'; test -n "$(arcrteenvdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(arcrteenvdir)'; $(am__uninstall_files_from_dir) install-arcrteenvcondorDATA: $(arcrteenvcondor_DATA) @$(NORMAL_INSTALL) @list='$(arcrteenvcondor_DATA)'; test -n "$(arcrteenvcondordir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(arcrteenvcondordir)'"; \ $(MKDIR_P) "$(DESTDIR)$(arcrteenvcondordir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcrteenvcondordir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcrteenvcondordir)" || exit $$?; \ done uninstall-arcrteenvcondorDATA: @$(NORMAL_UNINSTALL) @list='$(arcrteenvcondor_DATA)'; test -n "$(arcrteenvcondordir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(arcrteenvcondordir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcrteenvdir)" "$(DESTDIR)$(arcrteenvcondordir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcrteenvDATA install-arcrteenvcondorDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcrteenvDATA uninstall-arcrteenvcondorDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-arcrteenvDATA \ install-arcrteenvcondorDATA install-data install-data-am \ install-dvi install-dvi-am install-exec install-exec-am \ install-html install-html-am install-info install-info-am \ install-man install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags-am uninstall uninstall-am \ uninstall-arcrteenvDATA uninstall-arcrteenvcondorDATA .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/rte/PaxHeaders/ENV0000644000000000000000000000013215067751426020146 xustar0030 mtime=1759499030.486445179 30 atime=1759499034.764510185 30 ctime=1759499030.486445179 nordugrid-arc-7.1.1/src/services/a-rex/rte/ENV/0000755000175000002070000000000015067751426022125 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/rte/ENV/PaxHeaders/CANDYPOND.in0000644000000000000000000000013215067751327022072 xustar0030 mtime=1759498967.772902482 30 atime=1759498967.873493772 30 ctime=1759499024.694410077 nordugrid-arc-7.1.1/src/services/a-rex/rte/ENV/CANDYPOND.in0000644000175000002070000000322115067751327023772 0ustar00mockbuildmock00000000000000# description: makes ARC Candypond client availble on the Worker Nodes and ready to be used # param:CANDYPOND_URL:string:auto:Manually redefine Candypond URL CANDYPOND_URL=${CANDYPOND_URL:-"auto"} ARC_LIBEXEC_DIR="${ARC_LOCATION:-@prefix@}/@pkglibexecsubdir@" ARC_CONFIG_PARSER="$ARC_LIBEXEC_DIR/arcconfig-parser --load -r $ARC_CONFIG" if [ "x$1" = "x0" ]; then # determine Candypond URL if [ "x$CANDYPOND_URL" = "xauto" ]; then if [ ! -r "$ARC_CONFIG" ]; then echo "WARNING: arc.conf is not readable, assumming Candypond is enabled and default URL is used." >&2 CANDYPOND_URL="https://$(hostname -f):443/arex/candypond" else # check Candypond is enabled $ARC_CONFIG_PARSER -b arex/ws/candypond if [ $? -ne 0 ]; then echo "FATAL: Candypond is not enabled. Add [arex/ws/candypond] to the arc.conf." >&2 exit 1 fi # get URL CANDYPOND_URL="$( $ARC_CONFIG_PARSER -b arex/ws -o wsurl )/candypond" fi fi # copy 'arccandypond' tool to sessiondir mkdir -p ${joboption_directory}/arc/bin/ cp $ARC_LIBEXEC_DIR/arccandypond ${joboption_directory}/arc/bin/ # add URL to job environment (find the last and add to bottom) arc_env_idx=0 arc_env_var="joboption_env_${arc_env_idx}" while eval "test -n \"\$$arc_env_var\""; do arc_env_idx=$(( arc_env_idx + 1 )) arc_env_var="joboption_env_${arc_env_idx}" done eval "export ${arc_env_var}=ARC_CANDYPOND_URL='$CANDYPOND_URL'" elif [ "x$1" = "x1" ]; then # add to PATH export PATH="${RUNTIME_JOB_DIR}/arc/bin:$PATH" fi nordugrid-arc-7.1.1/src/services/a-rex/rte/ENV/PaxHeaders/CONDOR0000644000000000000000000000013215067751426021132 xustar0030 mtime=1759499030.487445194 30 atime=1759499034.764510185 30 ctime=1759499030.487445194 nordugrid-arc-7.1.1/src/services/a-rex/rte/ENV/CONDOR/0000755000175000002070000000000015067751426023111 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/rte/ENV/CONDOR/PaxHeaders/DOCKER0000644000000000000000000000013215067751327022101 xustar0030 mtime=1759498967.772902482 30 atime=1759498967.873493772 30 ctime=1759499030.488741364 nordugrid-arc-7.1.1/src/services/a-rex/rte/ENV/CONDOR/DOCKER0000644000175000002070000000076715067751327024015 0ustar00mockbuildmock00000000000000# description: enables submission to Docker universe in HTCondor backend # param:DOCKER_IMAGE:string::Docker image to use for submitted jobs by default DOCKER_IMAGE=${DOCKER_IMAGE:-} if [ "x$1" = "x0" ]; then export DOCKER_UNIVERSE=docker # docker image name can be redefined by user as RTE parameter [ -n "$2" ] && DOCKER_IMAGE="$2" export DOCKER_IMAGE # in case Docker image is used on WN add this to accounting information export ACCOUNTING_WN_INSTANCE="${DOCKER_IMAGE}" fi nordugrid-arc-7.1.1/src/services/a-rex/rte/ENV/PaxHeaders/LRMS-SCRATCH0000644000000000000000000000013215067751327022010 xustar0030 mtime=1759498967.772902482 30 atime=1759498967.873493772 30 ctime=1759499030.486192972 nordugrid-arc-7.1.1/src/services/a-rex/rte/ENV/LRMS-SCRATCH0000644000175000002070000000134515067751327023715 0ustar00mockbuildmock00000000000000# description: enables the usage of local to WN scratch directory defined by LRMS # param:SCRATCH_VAR:string:WORKDIR:Variable name that holds the path to job-specific WN scratch directory # param:TMPDIR_LOCATION:string::Define the TMPDIR path on WN. Variable names can be used as a part of the path, e.g. '$WORKDIR/tmp' SCRATCH_VAR="${SCRATCH_VAR:-WORKDIR}" TMPDIR_LOCATION="${TMPDIR_LOCATION:-}" if [ "x$1" = "x0" ]; then RUNTIME_LOCAL_SCRATCH_DIR="\${${SCRATCH_VAR}}" elif [ "x$1" = "x1" ]; then if [ -n "${TMPDIR_LOCATION}" ]; then if [ ! -d "${TMPDIR_LOCATION}" ]; then mkdir "${TMPDIR_LOCATION}" chmod 1777 "${TMPDIR_LOCATION}" fi export TMPDIR="${TMPDIR_LOCATION}" fi fi nordugrid-arc-7.1.1/src/services/a-rex/rte/ENV/PaxHeaders/PROXY.in0000644000000000000000000000013215067751327021474 xustar0030 mtime=1759498967.772902482 30 atime=1759498967.873493772 30 ctime=1759499024.695425809 nordugrid-arc-7.1.1/src/services/a-rex/rte/ENV/PROXY.in0000644000175000002070000000363515067751327023405 0ustar00mockbuildmock00000000000000# description: copy proxy certificate to the job session directory # param:COPY_CACERT_DIR:Yes,No:Yes:If set to Yes, CA certificate dir will be copied to the session directory along with proxy certificate # param:USE_DELEGATION_DB:Yes,No:No:If set to Yes RTE will try to extract proxy certificate from A-REX delegation DB (works in limited number of cases) # param:X509_CERT_DIR:string:/etc/pki/tls/certs:CAdir to use (either /etc/grid-security/certificates or system folder depending on x509_cert_policy grid or system) COPY_CACERT_DIR="${COPY_CACERT_DIR:-Yes}" USE_DELEGATION_DB="${USE_DELEGATION_DB:-No}" X509_CERT_DIR="${X509_CERT_DIR:-/etc/grid-security/certificates}" if [ "x$1" = "x0" ]; then PROXY_FILE=$(control_path "${joboption_controldir}" "${joboption_gridid}" "proxy") if [ "x$COPY_CACERT_DIR" = "xYes" ]; then mkdir -pv ${joboption_directory}/arc/certificates/ cp -rv ${X509_CERT_DIR}/* ${joboption_directory}/arc/certificates/ fi if [ "x$USE_DELEGATION_DB" = "xYes" ]; then GM_JOBS="${ARC_LOCATION:-@prefix@}/@pkglibexecsubdir@/gm-jobs" # try DB export or fall back to proxy file $GM_JOBS -J -S -D ${joboption_gridid} -o "${PROXY_FILE}" || USE_DELEGATION_DB="No" fi if [ "x$USE_DELEGATION_DB" = "xNo" ]; then cat "${PROXY_FILE}" > "${joboption_directory}/user.proxy" fi chmod 600 "${joboption_directory}/user.proxy" elif [ "x$1" = "x1" ]; then export X509_USER_PROXY="${X509_USER_PROXY:-${RUNTIME_JOB_DIR}/user.proxy}" export X509_USER_CERT="${X509_USER_CERT:-${RUNTIME_JOB_DIR}/user.proxy}" if [ "x$COPY_CACERT_DIR" = "xYes" ]; then export X509_CERT_DIR="${RUNTIME_JOB_DIR}/arc/certificates" else export X509_CERT_DIR="${X509_CERT_DIR:-/etc/grid-security/certificates}" fi elif [ "x$1" = "x2" ]; then if [ "x$COPY_CACERT_DIR" = "xYes" ]; then rm -rf ${RUNTIME_JOB_DIR}/arc/certificates fi fi nordugrid-arc-7.1.1/src/services/a-rex/rte/ENV/PaxHeaders/SINGULARITY0000644000000000000000000000013215067751327021760 xustar0030 mtime=1759498967.772902482 30 atime=1759498967.873493772 30 ctime=1759499030.487441862 nordugrid-arc-7.1.1/src/services/a-rex/rte/ENV/SINGULARITY0000644000175000002070000000327215067751327023666 0ustar00mockbuildmock00000000000000# description: executes the job inside singularity container # param:SINGULARITY_IMAGE:string:NULL:singularity image or tree per VO, key:value comma separated, key default if VO not matched # param:SINGULARITY_OPTIONS:string: :singularity options # param:BINARY_PATH:string:/usr/bin/singularity:singularity binary location SINGULARITY_OPTIONS="${SINGULARITY_OPTIONS:-}" SINGULARITY_IMAGE="${SINGULARITY_IMAGE:-}" BINARY_PATH="${BINARY_PATH:-'/usr/bin/singularity'}" BINARY_PATH="${4:-$BINARY_PATH}" DEFAULT_IMAGE="NULL" if [ "x$1" = "x0" ]; then LOCAL_FILE=$(control_path "${joboption_controldir}" "${joboption_gridid}" "local") # get VO #vo=`arcproxy -P $joboption_controldir/job.$joboption_gridid.proxy -i vomsVO 2> /dev/null` vo=`grep voms= $LOCAL_FILE | awk -F / '{print $2}'|sort -u` values=`echo $SINGULARITY_IMAGE | sed -e 's/,/ /g'` for i in $values do voname=`echo $i | awk -F : '{print $1}'` voimage=`echo $i | awk -F : '{print $2}'` if [ "xdefault" = "x$voname" ]; then DEFAULT_IMAGE=$voimage fi if [ "x$vo" = "x$voname" ] ; then IMAGE=$voimage fi done IMAGE="${IMAGE:-$DEFAULT_IMAGE}" # explicit image, NULL skips container IMAGE="${2:-$IMAGE}" # unquote IMAGE temp="${IMAGE%\"}" temp="${temp#\"}" IMAGE=$temp # Check if singularity already used/set by another RTE echo $joboption_args |grep -q singularity; sused=$? if [ "x$IMAGE" != "xNULL" ] && [ "x$sused" == "x1" ] ; then joboption_args="$BINARY_PATH exec $SINGULARITY_OPTIONS --home \${RUNTIME_JOB_DIR} $IMAGE $joboption_args" # account singularity image usage export ACCOUNTING_WN_INSTANCE="${IMAGE}" fi fi nordugrid-arc-7.1.1/src/services/a-rex/rte/ENV/PaxHeaders/RTE0000644000000000000000000000013215067751327020600 xustar0030 mtime=1759498967.772902482 30 atime=1759498967.873493772 30 ctime=1759499030.484899251 nordugrid-arc-7.1.1/src/services/a-rex/rte/ENV/RTE0000644000175000002070000000310415067751327022500 0ustar00mockbuildmock00000000000000# description: copy RunTimeEnvironment scripts to the job session directory if [ "$1" = "0" ] ; then runtimeenv_idx=0 runtimeenv_var="joboption_runtime_${runtimeenv_idx}" eval "runtimeenv_name=\"\${${runtimeenv_var}}\"" while [ -n "${runtimeenv_name}" ]; do # define safe-defaults arcce_runtimeenv_path=/dev/null arcce_runtimeenv_params_path=/dev/null # find RTE location (enabled vs default) if [ -e "${joboption_controldir}/rte/enabled/${runtimeenv_name}" ]; then arcce_runtimeenv_path="${joboption_controldir}/rte/enabled/${runtimeenv_name}" else arcce_runtimeenv_path="${joboption_controldir}/rte/default/${runtimeenv_name}" fi # check RTE have parameters file if [ -e "${joboption_controldir}/rte/params/${runtimeenv_name}" ]; then arcce_runtimeenv_params_path="${joboption_controldir}/rte/params/${runtimeenv_name}" fi # copy RTE script to session directory sessiondir_runtimeenv_path="${joboption_directory}/rte/${runtimeenv_name}" mkdir -p "${sessiondir_runtimeenv_path%/*}" cat "$arcce_runtimeenv_params_path" > "$sessiondir_runtimeenv_path" cat "$arcce_runtimeenv_path" >> "$sessiondir_runtimeenv_path" # next RTE runtimeenv_idx=$((runtimeenv_idx+1)) runtimeenv_var="joboption_runtime_${runtimeenv_idx}" eval "runtimeenv_name=\"\${${runtimeenv_var}}\"" done unset runtimeenv_idx runtimeenv_var sessiondir_runtimeenv_path arcce_runtimeenv_path arcce_runtimeenv_params_path fi true nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/arc-arex-ws-start.in0000644000000000000000000000013015067751327022613 xustar0030 mtime=1759498967.750197335 29 atime=1759498967.86149359 29 ctime=1759499029.31716446 nordugrid-arc-7.1.1/src/services/a-rex/arc-arex-ws-start.in0000644000175000002070000005307015067751327024524 0ustar00mockbuildmock00000000000000#!/bin/bash export MALLOC_ARENA_MAX=2 add_library_path() { location="$1" if [ ! "x$location" = "x" ] ; then if [ ! "$location" = "/usr" ] ; then libdir="$location/lib" libdir64="$location/lib64" if [ -d "$libdir64" ] ; then if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH="$libdir64" else LD_LIBRARY_PATH="$libdir64:$LD_LIBRARY_PATH" fi fi if [ -d "$libdir" ] ; then if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH="$libdir" else LD_LIBRARY_PATH="$libdir:$LD_LIBRARY_PATH" fi fi fi fi } prog=arched RUN=yes send_systemd_notify() { # return if no systemd-notify found type systemd-notify >/dev/null 2>&1 || return systemd-notify "$@" } log_failure_msg() { send_systemd_notify --status "Error: $@" echo $@ } # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/arc-arex-ws ]; then . /etc/sysconfig/arc-arex-ws elif [ -r /etc/default/arc-arex-ws ]; then . /etc/default/arc-arex-ws fi # GLOBUS_LOCATION GLOBUS_LOCATION=${GLOBUS_LOCATION:-@DEFAULT_GLOBUS_LOCATION@} if [ -n "$GLOBUS_LOCATION" ]; then if [ ! -d "$GLOBUS_LOCATION" ]; then log_failure_msg "GLOBUS_LOCATION ($GLOBUS_LOCATION) not found" exit 1 fi export GLOBUS_LOCATION fi # ARC_LOCATION ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then log_failure_msg "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi export ARC_LOCATION # VOMS_LOCATION VOMS_LOCATION=${VOMS_LOCATION:-@DEFAULT_VOMS_LOCATION@} # Prepare environment for executing various tools and main application add_library_path "$VOMS_LOCATION" add_library_path "$GLOBUS_LOCATION" if [ "x$LD_LIBRARY_PATH" = "x" ]; then LD_LIBRARY_PATH=$ARC_LOCATION/@libsubdir@:$ARC_LOCATION/@libsubdir@64 else LD_LIBRARY_PATH=$ARC_LOCATION/@libsubdir@:$ARC_LOCATION/@libsubdir@64:$LD_LIBRARY_PATH fi export LD_LIBRARY_PATH testconfigblock() { $ARC_LOCATION/@pkglibexecsubdir@/arcconfig-parser --runconfig "$1" --load -b "$2" 2>/dev/null 1>&2 if [ $? -eq 0 ] ; then echo 'true' else echo 'false' fi } readorigconfigvar() { value=`$ARC_LOCATION/@pkglibexecsubdir@/arcconfig-parser --config "$1" -b "$3" -o "$2" 2>/dev/null` if [ $? -eq 0 ] ; then echo "$value" exit 0 else exit 1 fi } readconfigvar() { fname="$1" optname="$2" blocks="" while [ ! -z "$3" ] ; do blocks="$blocks -b $3" shift done value=`$ARC_LOCATION/@pkglibexecsubdir@/arcconfig-parser --runconfig "$fname" --load $blocks -o "$optname" 2>/dev/null` if [ $? -eq 0 ] ; then echo "$value" exit 0 else exit 1 fi } # ARC_CONFIG if [ "x$ARC_CONFIG" = "x" ]; then if [ -r $ARC_LOCATION/etc/arc.conf ]; then ARC_CONFIG=$ARC_LOCATION/etc/arc.conf elif [ -r /etc/arc.conf ]; then ARC_CONFIG=/etc/arc.conf fi fi # PID file PID_FILE=`readorigconfigvar "$ARC_CONFIG" pidfile arex/ws` if [ "x$PID_FILE" = "x" ]; then # Missing default value for pidfile means no service block is present log_failure_msg "ARC configuration is missing [arex/ws] block" exit 1 fi if [ "$1" = "--getpidfile" ] ; then echo $PID_FILE exit 0 fi ARC_RUNTIME_CONFIG=`echo "$PID_FILE" | sed 's#\([^\./]*\)\.[^\./]*$#\1#'` ARC_RUNTIME_CONFIG="${ARC_RUNTIME_CONFIG}.cfg" mkdir_for_user() { dirpath="$1" username="$2" groupname="$3" if [ ! -d "$dirpath" ] ; then mkdir -p "$dirpath" if [ ! -z "$username" ] ; then if [ ! -z "$groupname" ] ; then chown "$username:$groupname" "$dirpath" else chown "$username" "$dirpath" fi fi fi } mkfile_for_user() { filepath="$1" username="$2" groupname="$3" if [ ! -f "$filepath" ] ; then touch "$filepath" fi if [ ! -z "$username" ] ; then if [ ! -z "$groupname" ] ; then chown "$username:$groupname" "$filepath" else chown "$username" "$filepath" fi fi } prepare() { CMD="$ARC_LOCATION/sbin/$prog" if [ ! -x "$CMD" ]; then log_failure_msg "Missing $CMD executable" exit 1 fi if [ ! -r "$ARC_CONFIG" ]; then log_failure_msg "ARC configuration not found (usually /etc/arc.conf)" exit 1 fi # Pre-process configuration $ARC_LOCATION/@pkglibexecsubdir@/arcconfig-parser --config "$ARC_CONFIG" --runconfig "$ARC_RUNTIME_CONFIG" --save 2>/dev/null if [ $? -ne 0 ] ; then log_failure_msg "ARC configuration processing failed" exit 1 fi # Creating configuration file of arched # Reading following information from config file: # Log file # Debug level # User name # ... LOGFILE=`readconfigvar "$ARC_RUNTIME_CONFIG" logfile arex` LOGLEVEL=`readconfigvar "$ARC_RUNTIME_CONFIG" loglevel arex` WATCHDOG=`readconfigvar "$ARC_RUNTIME_CONFIG" watchdog arex` USERNAME=`readconfigvar "$ARC_RUNTIME_CONFIG" user arex` GRIDTMPDIR=`readconfigvar "$ARC_RUNTIME_CONFIG" tmpdir arex` GROUPNAME=`echo "$USERNAME" | sed 's/^[^:]*//;s/^://'` USERNAME=`echo "$USERNAME" | sed 's/:.*//'` X509_USER_CERT=`readconfigvar "$ARC_RUNTIME_CONFIG" x509_host_cert common` X509_USER_KEY=`readconfigvar "$ARC_RUNTIME_CONFIG" x509_host_key common` X509_CERT_POLICY=`readconfigvar "$ARC_RUNTIME_CONFIG" x509_cert_policy common` if [ "$X509_CERT_POLICY" = 'any' ] ; then X509_CERT_DIR=`readconfigvar "$ARC_RUNTIME_CONFIG" x509_cert_dir common` x509_cert_config="truetrue$X509_CERT_DIR" elif [ "$X509_CERT_POLICY" = 'grid' ] ; then X509_CERT_DIR=`readconfigvar "$ARC_RUNTIME_CONFIG" x509_cert_dir common` x509_cert_config="falsetrue$X509_CERT_DIR" else X509_CERT_DIR="" X509_CERT_POLICY='system' x509_cert_config="truefalse" fi GLOBUS_TCP_PORT_RANGE=`readconfigvar "$ARC_RUNTIME_CONFIG" globus_tcp_port_range arex/data-staging` GLOBUS_UDP_PORT_RANGE=`readconfigvar "$ARC_RUNTIME_CONFIG" globus_udp_port_range arex/data-staging` VOMS_PROCESSING=`readconfigvar "$ARC_RUNTIME_CONFIG" voms_processing common` mapping_present=`testconfigblock "$ARC_RUNTIME_CONFIG" mapping` CIPHERS_STRING=`readconfigvar "$ARC_RUNTIME_CONFIG" tlsciphers arex/ws` if [ -z "$CIPHERS_STRING" ] ; then if [ ! -d "/etc/crypto-policies" ] ; then # System has no crypto policies - define something safe CIPHERS_STRING="$ARC_CIPHERS_STRING" if [ -z "$CIPHERS_STRING" ] ; then CIPHERS_STRING="HIGH:!eNULL:!aNULL" fi fi fi CIPHERS_SERVER_ORDER=`readconfigvar "$ARC_RUNTIME_CONFIG" tlsserverorder arex/ws` PROTOCOLS_STRING=`readconfigvar "$ARC_RUNTIME_CONFIG" tlsprotocols arex/ws` CURVE_STRING=`readconfigvar "$ARC_RUNTIME_CONFIG" tlscurve arex/ws` USERMAP_BLOCK='' if [ "$mapping_present" = 'true' ] ; then USERMAP_BLOCK='mapping' fi HOSTNAME=`readconfigvar "$ARC_RUNTIME_CONFIG" hostname common` SERVICEMAIL=`readconfigvar "$ARC_RUNTIME_CONFIG" mail arex` CONTROLDIR=`readconfigvar "$ARC_RUNTIME_CONFIG" controldir arex` # It is easier to handle root user through empty value. if [ "$USERNAME" = "root" ] ; then USERNAME="" fi if [ "$GROUPNAME" = "root" ] ; then GROUPNAME="" fi DHPARAM_PATH='' if [ ! -z "$CONTROLDIR" ] ; then DHPARAM_PATH="$CONTROLDIR/dhparam.pem" if [ -f "$DHPARAM_PATH" ] ; then openssl dhparam -in "$DHPARAM_PATH" -noout &>/dev/null if [ $? -ne 0 ] ; then rm -f "$DHPARAM_PATH" fi fi if [ ! -f "$DHPARAM_PATH" ] ; then /bin/bash -c "umask 0177 ; openssl dhparam -out '$DHPARAM_PATH' 4096" &>/dev/null & disown; fi fi # Exporting collected variables export X509_USER_CERT export X509_USER_KEY export X509_CERT_DIR export X509_CERT_POLICY export GLOBUS_TCP_PORT_RANGE export GLOBUS_UDP_PORT_RANGE export HOSTNAME if [ ! -z "$GRIDTMPDIR" ] ; then export TMPDIR="$GRIDTMPDIR" ; fi if [ ! -z "$CIPHERS_STRING" ] ; then export ARC_CIPHERS_STRING="$CIPHERS_STRING" ; fi # Web Service configuration arex_endpoint="" arex_mount_point="" arex_proto="" arex_host="" arex_port="" arex_path="" arex_service_plexer="" ws_present=`testconfigblock "$ARC_RUNTIME_CONFIG" arex/ws` arex_present=`testconfigblock "$ARC_RUNTIME_CONFIG" arex/ws/jobs` if [ "$ws_present" = 'true' ] ; then WSLOGFILE=`readconfigvar "$ARC_RUNTIME_CONFIG" logfile arex/ws` MAX_JOB_CONTROL_REQUESTS=`readconfigvar "$ARC_RUNTIME_CONFIG" max_job_control_requests arex/ws` MAX_INFOSYS_REQUESTS=`readconfigvar "$ARC_RUNTIME_CONFIG" max_infosys_requests arex/ws` MAX_DATA_TRANSFER_REQUESTS=`readconfigvar "$ARC_RUNTIME_CONFIG" max_data_transfer_requests arex/ws` USERAUTH_BLOCK='arex/ws/jobs' arex_mount_point=`readconfigvar "$ARC_RUNTIME_CONFIG" wsurl arex/ws` arex_proto=`echo "$arex_mount_point" | sed 's/^\([^:]*\):\/\/.*/\1/;t;s/.*//'` arex_host=`echo "$arex_mount_point" | sed 's/^[^:]*:\/\/\([^:\/]*\).*/\1/;t;s/.*//'` arex_port=`echo "$arex_mount_point" | sed 's/^[^:]*:\/\/[^:]*:\([^\/]*\)\(.*\)/\1/;t;s/.*//'` arex_path=`echo "$arex_mount_point" | sed 's/^[^:]*:\/\/[^\/]*\/\(.*\)/\1/;t;s/.*//'` if [ "$arex_proto" = "https" ] ; then if [ -z "$arex_port" ] ; then arex_port="443" fi elif [ "$arex_proto" = "http" ] ; then if [ -z "$arex_port" ] ; then arex_port="80" fi else log_failure_msg "Unsupported protocol '$arex_proto' for WS interface URL" exit 1 fi arex_endpoint="$arex_mount_point" arex_path="/$arex_path" mkdir_for_user `dirname "$WSLOGFILE"` "$USERNAME" "$GROUPNAME" mkfile_for_user "$WSLOGFILE" "$USERNAME" "$GROUPNAME" else log_failure_msg "The A-REX/EMIES WS interface must be enabled for this service" exit 1 fi if [ "$arex_present" = 'true' ] ; then if [ "$mapping_present" != 'true' ] ; then log_failure_msg "For A-REX/EMIES WS interface to work mapping must be enabled" exit 1 fi arex_service_plexer="^$arex_path" fi # candypond candypond_plexer="" candypond="" use_candypond=`testconfigblock "$ARC_RUNTIME_CONFIG" arex/ws/candypond` if [ "$use_candypond" = "true" ]; then if [ "$ws_present" != 'true' ] ; then log_failure_msg "WS interface must be turned on to use candypond" exit 1 fi candypond_plexer="^$arex_path/candypond" candypond_shc=" $ARC_RUNTIME_CONFIG arex/ws/candypond " if [ "$mapping_present" = 'true' ]; then candypond_shc="$candypond_shc $ARC_RUNTIME_CONFIG $USERMAP_BLOCK $ARC_RUNTIME_CONFIG true " fi candypond=" $candypond_shc " fi service_mail="" if [ ! -z "$SERVICEMAIL" ] ; then service_mail="$SERVICEMAIL" fi AREX_CONFIG=`mktemp -t arex.xml.XXXXXX` if [ -z "$AREX_CONFIG" ] ; then log_failure_msg "Failed to create temporary file" exit 1 fi CMD="$CMD -c $AREX_CONFIG" case "$LOGLEVEL" in 0) LOGLEVEL="FATAL" ;; 1) LOGLEVEL="ERROR" ;; 2) LOGLEVEL="WARNING" ;; 3) LOGLEVEL="INFO" ;; 4) LOGLEVEL="VERBOSE" ;; 5) LOGLEVEL="DEBUG" ;; *) LOGLEVEL="INFO" ;; esac if [ "$WATCHDOG" = "yes" ] ; then WATCHDOG="true" else WATCHDOG="false" fi VOMS_PROCESSING=${VOMS_PROCESSING:-standard} if [ ! -z "$USERNAME" ] ; then CMD="$CMD -u $USERNAME" fi if [ ! -z "$GROUPNAME" ] ; then CMD="$CMD -g $GROUPNAME" fi # Authorization and user mapping for A-REX/EMIES emies_legacy_shc="" # emies_legacy_shc=" # # # # # # $ARC_RUNTIME_CONFIG # $USERAUTH_BLOCK # # # #" if [ "$mapping_present" = 'true' ]; then emies_legacy_shc="$emies_legacy_shc $ARC_RUNTIME_CONFIG $USERMAP_BLOCK " fi authtokens_plugin="arcshcotokens" authtokens_handler=" " ciphers_server_order="" if [ "$CIPHERS_SERVER_ORDER" = 'yes' ] ; then ciphers_server_order="ServerOrder=\"true\"" fi ciphers_xml="" if [ ! -z "$CIPHERS_STRING" ] || [ ! -z "$ciphers_server_order" ] ; then ciphers_xml="$CIPHERS_STRING" fi protocols_xml="" while [ ! -z "$PROTOCOLS_STRING" ] ; do PROTOCOL=`echo $PROTOCOLS_STRING | sed 's/^ *\([^ ]*\) *.*$/\1/'` PROTOCOLS_STRING=`echo $PROTOCOLS_STRING | sed 's/^ *[^ ]* *//'` if [ ! -z "$PROTOCOL" ] ; then protocols_xml="$protocols_xml$PROTOCOL" fi done curve_xml="" if [ ! -z "$CURVE_STRING" ] ; then curve_xml="$CURVE_STRING" fi dhparam_xml="" if [ ! -z "$DHPARAM_PATH" ] ; then dhparam_xml="$DHPARAM_PATH" fi # A-Rex with WS interface over HTTP AREXCFGWS="\ $PID_FILE $WSLOGFILE $LOGLEVEL $WATCHDOG $ARC_LOCATION/@pkglibsubdir@/ mcctcp mcctls mcchttp mccsoap arex identitymap arcshc arcshclegacy $authtokens_plugin $arex_port POST $authtokens_handler true $candypond_plexer $arex_service_plexer $emies_legacy_shc $arex_endpoint $service_mail $ARC_RUNTIME_CONFIG none $MAX_INFOSYS_REQUESTS $MAX_JOB_CONTROL_REQUESTS $MAX_DATA_TRANSFER_REQUESTS $candypond " # A-Rex with WS interface over HTTPS AREXCFGWSS="\ $PID_FILE $WSLOGFILE $LOGLEVEL $WATCHDOG $ARC_LOCATION/@pkglibsubdir@/ mcctcp mcctls mcchttp mccsoap arex identitymap arcshc arcshclegacy $authtokens_plugin $arex_port $X509_USER_KEY $X509_USER_CERT $x509_cert_config $VOMS_PROCESSING false $ciphers_xml $protocols_xml $curve_xml $dhparam_xml POST $authtokens_handler
    Strict-Transport-Security: max-age=31536000; includeSubDomains
    $ARC_RUNTIME_CONFIG
    true $candypond_plexer $arex_service_plexer $service_mail $emies_legacy_shc $arex_endpoint $ARC_RUNTIME_CONFIG none $MAX_INFOSYS_REQUESTS $MAX_JOB_CONTROL_REQUESTS $MAX_DATA_TRANSFER_REQUESTS $candypond
    " if [ "$arex_proto" = 'http' ] ; then echo "$AREXCFGWS" > "$AREX_CONFIG" else echo "$AREXCFGWSS" > "$AREX_CONFIG" fi # setup logfile in case it is not there yet if [ ! -z "$USERNAME" ] ; then if [ ! -z "$GROUPNAME" ] ; then [ -f $AREX_CONFIG ] && chown "$USERNAME:$GROUPNAME" "$AREX_CONFIG" else [ -f $AREX_CONFIG ] && chown "$USERNAME" "$AREX_CONFIG" fi fi # prepare to collect crash information COREDIR=`dirname "${LOGFILE}"`/arccore mkdir_for_user "${COREDIR}" "$USERNAME" "$GROUPNAME" cd "${COREDIR}" ulimit -c unlimited } validate() { CHECK_CMD=$ARC_LOCATION/sbin/arcctl if [ ! -x $CHECK_CMD ]; then log_failure_msg "Could not find or execute arcctl tool" return 1 fi eval "$CHECK_CMD $@ --config $ARC_CONFIG service verify" RETVAL=$? return $RETVAL } if [ "$RUN" != "yes" ] ; then echo "a-rex-ws disabled, please adjust the configuration to your needs " echo "and then set RUN to 'yes' in /etc/default/arc-arex-ws to enable it." exit 0 fi prepare echo "Validating A-REX setup..." >> "$WSLOGFILE" validate >> "$WSLOGFILE" 2>&1 RETVAL=$? if [ $RETVAL != 0 ]; then # Run validator again to print errors to stdout validate -d ERROR log_failure_msg "Configuration validation failed" exit 1 fi # Raise limit on number of file descriptors to max hlimit=`ulimit -H -n` if [ ! -z "$hlimit" ] ; then ulimit -S -n "$hlimit" 2>/dev/null fi now=`date '+[%Y:%m:%d %H:%M:%S]' 2>/dev/null` echo "$now Starting A-REX WS interfaces service executable..." >> "$WSLOGFILE" exec $CMD "$@" nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/internaljobplugin0000644000000000000000000000013215067751426022452 xustar0030 mtime=1759499030.453629402 30 atime=1759499034.764510185 30 ctime=1759499030.453629402 nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/0000755000175000002070000000000015067751426024431 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/PaxHeaders/JobControllerPluginINTERNAL.h0000644000000000000000000000013215067751327027772 xustar0030 mtime=1759498967.764072651 30 atime=1759498967.869493711 30 ctime=1759499030.448627691 nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.h0000644000175000002070000000412015067751327031671 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JOBCONTROLLERINTERNAL_H__ #define __ARC_JOBCONTROLLERINTERNAL_H__ #include using namespace Arc; namespace Arc{ class URL; } namespace ARex { class GMConfig; } namespace ARexINTERNAL { class INTERNALClient; class INTERNALClients; class JobStateINTERNAL; class JobControllerPluginINTERNAL : public Arc::JobControllerPlugin { public: JobControllerPluginINTERNAL(const UserConfig& usercfg, PluginArgument* parg) : JobControllerPlugin(usercfg, parg),clients(usercfg) { supportedInterfaces.push_back("org.nordugrid.internal"); } ~JobControllerPluginINTERNAL() {} static Plugin* Instance(PluginArgument *arg) { JobControllerPluginArgument *jcarg = dynamic_cast(arg); return jcarg ? new JobControllerPluginINTERNAL(*jcarg, arg) : NULL; } virtual bool isEndpointNotSupported(const std::string& endpoint) const; virtual void UpdateJobs(std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool CleanJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool CancelJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool RenewJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool ResumeJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped = false) const; virtual bool GetURLToJobResource(const Job& job, Job::ResourceType resource, URL& url) const; virtual bool GetJobDescription(const Job& job, std::string& desc_str) const; private: INTERNALClients clients; static Logger logger; }; } // namespace Arc #endif // __ARC_JOBCONTROLLERINTERNAL_H__ nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327024563 xustar0030 mtime=1759498967.764072651 30 atime=1759498967.869493711 30 ctime=1759499030.436486924 nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/Makefile.am0000644000175000002070000000333415067751327026470 0ustar00mockbuildmock00000000000000pkglib_LTLIBRARIES = libaccINTERNAL.la libaccINTERNAL_la_SOURCES = INTERNALClient.cpp INTERNALClient.h \ JobStateINTERNAL.cpp JobStateINTERNAL.h \ SubmitterPluginINTERNAL.cpp SubmitterPluginINTERNAL.h \ JobControllerPluginINTERNAL.cpp JobControllerPluginINTERNAL.h \ JobListRetrieverPluginINTERNAL.cpp JobListRetrieverPluginINTERNAL.h \ TargetInformationRetrieverPluginINTERNAL.cpp TargetInformationRetrieverPluginINTERNAL.h \ DescriptorsINTERNAL.cpp \ ../job.cpp ../tools.cpp libaccINTERNAL_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libaccINTERNAL_la_LIBADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ ../grid-manager/conf/libconf.la \ ../grid-manager/jobs/libjobs.la \ ../grid-manager/files/libfiles.la \ ../grid-manager/log/liblog.la \ ../grid-manager/run/librun.la \ ../grid-manager/mail/libmail.la \ ../delegation/libdelegation.la # $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ # $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ # $(top_builddir)/src/hed/libs/compute/libarccompute.la \ # $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ # $(top_builddir)/src/hed/libs/message/libarcmessage.la \ # $(top_builddir)/src/libs/data-staging/libarcdatastaging.la \ # $(top_builddir)/src/services/a-rex/grid-manager/libgridmanager.la \ # $(top_builddir)/src/services/a-rex/delegation/libdelegation.la \ # $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(GLOBUS_JOBPLUGIN_LIBS) libaccINTERNAL_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/PaxHeaders/INTERNALClient.h0000644000000000000000000000013115067751327025252 xustar0030 mtime=1759498967.764072651 30 atime=1759498967.869493711 29 ctime=1759499030.44145881 nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/INTERNALClient.h0000644000175000002070000001277715067751327027173 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __INTERNAL_CLIENT__ #define __INTERNAL_CLIENT__ #include #include #include #include #include #include #include #include #include "../job.h" #include "../delegation/DelegationStore.h" #include "../delegation/DelegationStores.h" #include "../grid-manager/jobs/GMJob.h" /*Note to self: must check all variables if they should be public or private */ using namespace Arc; namespace ARexINTERNAL { #define DEFAULT_JOB_RSL_MAX_SIZE (5*1024*1024) class INTERNALClient; class INTERNALJob { friend class INTERNALClient; private: std::string id; std::string state; std::string sessiondir; std::string controldir; std::string delegation_id; Arc::URL manager; Arc::URL resource; std::list stagein; std::list session; std::list stageout; public: INTERNALJob& operator=(const Arc::Job& job); void toJob(INTERNALClient* client, INTERNALJob* localjob, Arc::Job& j) const; void toJob(INTERNALClient* client, Arc::Job & job, Arc::Logger& logger) const; //added to be able to convert arexjob to INTERNALJob INTERNALJob(/*const */ARex::ARexJob& _arexjob, const ARex::GMConfig& _config, std::string const& _deleg_id); INTERNALJob(void){}; std::string const GetId() const { return id; } std::list const& GetStagein() const { return stagein; } std::list const& GetSession() const { return session; } std::list const& GetStageout() const { return stageout; } }; //! A client class for the INTERNAL service. /*! This class is a client for the INTERNAL service. It provides methods for selected set of operations on a INTERNAL service: - Job submission - Job status queries - Job termination */ class INTERNALClient { friend class INTERNALJob; public: //! The constructor for the INTERNALClient class. /*! This is the constructor for the INTERNALClient class. It creates an INTERNAL client that corresponds to a specific INTERNAL service. @param url The URL of the INTERNAL service. @param usercfg onfiguration object. */ INTERNALClient(void); INTERNALClient(const Arc::UserConfig& usercfg); INTERNALClient(const Arc::URL& url, const Arc::UserConfig& usercfg); //! The destructor. /*! This is the destructor. It does what destructors usually do, cleans up... */ ~INTERNALClient(); ARex::GMConfig const * GetConfig() const { return config; } const std::string& failure(void) const { return lfailure; } bool CreateDelegation(std::string& deleg_id); bool RenewDelegation(std::string const& deleg_id); //! Submit a job. //TO-DO Fix description /*! This method submits a job to the INTERNAL service corresponding to this client instance. It does not do data staging. @param jobdesc A string containing the job description. @param job The container for attributes identidying submitted job. @param state The current state of submitted job. @return true on success */ bool submit(const std::list& jobdescs, std::list& localjobs_, const std::string delegation_id = ""); bool submit(const Arc::JobDescription& jobdesc, INTERNALJob& localjob, const std::string delegation_id = ""); bool putFiles(INTERNALJob const& localjob, std::list const& sources, std::list const& destinations); bool info(std::list& jobids,std::list& jobids_found); bool info(INTERNALJob& job, Arc::Job& info); bool clean(const std::string& jobid); bool kill(const std::string& jobid); bool restart(const std::string& jobid); bool list(std::list& jobs); //! Request the status of a service. /*! This method requests the INTERNAL service about its status. @param status The XML document representing status of the service. @return true on success */ bool sstat(Arc::XMLNode& xmldoc); private: Arc::URL ce; std::string endpoint; Arc::UserConfig usercfg; std::string cfgfile; Arc::User user; std::vector session_dirs; std::vector session_dirs_non_draining; ARex::GMConfig *config; ARex::ARexGMConfig *arexconfig; bool SetAndLoadConfig(); bool SetEndPoint(); //bool SetGMDirs(); bool MapLocalUser(); bool PrepareARexConfig(); //bool PreProcessJob(ARex::JobDescriptionHandler& job_desc_handler, ARex::JobLocalDescription& job_desc); bool readonly; unsigned int job_rsl_max_size; bool fill_local_jobdesc(Arc::XMLNode& descr); std::string error_description;//should maybe be other type, check in jobplugin and relat std::string get_error_description() const; ARex::DelegationStore::DbType deleg_db_type; //! A logger for the A-REX client. /*! This is a logger to which all logging messages from the INTERNAL client are sent. */ static Arc::Logger logger; ARex::DelegationStores deleg_stores; std::list avail_queues; const char* matched_vo; std::string lfailure; }; class INTERNALClients { private: std::multimap clients_; const Arc::UserConfig& usercfg_; public: INTERNALClients(const Arc::UserConfig& usercfg); ~INTERNALClients(void); }; } #endif nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/PaxHeaders/Makefile.in0000644000000000000000000000013215067751356024576 xustar0030 mtime=1759498990.258700326 30 atime=1759499018.055256287 30 ctime=1759499030.437710598 nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/Makefile.in0000644000175000002070000012765215067751356026515 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/internaljobplugin ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(pkglibdir)" LTLIBRARIES = $(pkglib_LTLIBRARIES) libaccINTERNAL_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ ../grid-manager/conf/libconf.la \ ../grid-manager/jobs/libjobs.la \ ../grid-manager/files/libfiles.la \ ../grid-manager/log/liblog.la ../grid-manager/run/librun.la \ ../grid-manager/mail/libmail.la ../delegation/libdelegation.la am_libaccINTERNAL_la_OBJECTS = libaccINTERNAL_la-INTERNALClient.lo \ libaccINTERNAL_la-JobStateINTERNAL.lo \ libaccINTERNAL_la-SubmitterPluginINTERNAL.lo \ libaccINTERNAL_la-JobControllerPluginINTERNAL.lo \ libaccINTERNAL_la-JobListRetrieverPluginINTERNAL.lo \ libaccINTERNAL_la-TargetInformationRetrieverPluginINTERNAL.lo \ libaccINTERNAL_la-DescriptorsINTERNAL.lo \ libaccINTERNAL_la-job.lo libaccINTERNAL_la-tools.lo libaccINTERNAL_la_OBJECTS = $(am_libaccINTERNAL_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = libaccINTERNAL_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libaccINTERNAL_la_CXXFLAGS) $(CXXFLAGS) \ $(libaccINTERNAL_la_LDFLAGS) $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = \ ./$(DEPDIR)/libaccINTERNAL_la-DescriptorsINTERNAL.Plo \ ./$(DEPDIR)/libaccINTERNAL_la-INTERNALClient.Plo \ ./$(DEPDIR)/libaccINTERNAL_la-JobControllerPluginINTERNAL.Plo \ ./$(DEPDIR)/libaccINTERNAL_la-JobListRetrieverPluginINTERNAL.Plo \ ./$(DEPDIR)/libaccINTERNAL_la-JobStateINTERNAL.Plo \ ./$(DEPDIR)/libaccINTERNAL_la-SubmitterPluginINTERNAL.Plo \ ./$(DEPDIR)/libaccINTERNAL_la-TargetInformationRetrieverPluginINTERNAL.Plo \ ./$(DEPDIR)/libaccINTERNAL_la-job.Plo \ ./$(DEPDIR)/libaccINTERNAL_la-tools.Plo am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(libaccINTERNAL_la_SOURCES) DIST_SOURCES = $(libaccINTERNAL_la_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ pkglib_LTLIBRARIES = libaccINTERNAL.la libaccINTERNAL_la_SOURCES = INTERNALClient.cpp INTERNALClient.h \ JobStateINTERNAL.cpp JobStateINTERNAL.h \ SubmitterPluginINTERNAL.cpp SubmitterPluginINTERNAL.h \ JobControllerPluginINTERNAL.cpp JobControllerPluginINTERNAL.h \ JobListRetrieverPluginINTERNAL.cpp JobListRetrieverPluginINTERNAL.h \ TargetInformationRetrieverPluginINTERNAL.cpp TargetInformationRetrieverPluginINTERNAL.h \ DescriptorsINTERNAL.cpp \ ../job.cpp ../tools.cpp libaccINTERNAL_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libaccINTERNAL_la_LIBADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/security/libarcsecurity.la \ ../grid-manager/conf/libconf.la \ ../grid-manager/jobs/libjobs.la \ ../grid-manager/files/libfiles.la \ ../grid-manager/log/liblog.la \ ../grid-manager/run/librun.la \ ../grid-manager/mail/libmail.la \ ../delegation/libdelegation.la # $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ # $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ # $(top_builddir)/src/hed/libs/compute/libarccompute.la \ # $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ # $(top_builddir)/src/hed/libs/message/libarcmessage.la \ # $(top_builddir)/src/libs/data-staging/libarcdatastaging.la \ # $(top_builddir)/src/services/a-rex/grid-manager/libgridmanager.la \ # $(top_builddir)/src/services/a-rex/delegation/libdelegation.la \ # $(LIBXML2_LIBS) $(GLIBMM_LIBS) $(GLOBUS_JOBPLUGIN_LIBS) libaccINTERNAL_la_LDFLAGS = -no-undefined -avoid-version -module all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/internaljobplugin/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/internaljobplugin/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @$(NORMAL_INSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(MKDIR_P) '$(DESTDIR)$(pkglibdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" || exit 1; \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \ } uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \ done clean-pkglibLTLIBRARIES: -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libaccINTERNAL.la: $(libaccINTERNAL_la_OBJECTS) $(libaccINTERNAL_la_DEPENDENCIES) $(EXTRA_libaccINTERNAL_la_DEPENDENCIES) $(AM_V_CXXLD)$(libaccINTERNAL_la_LINK) -rpath $(pkglibdir) $(libaccINTERNAL_la_OBJECTS) $(libaccINTERNAL_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccINTERNAL_la-DescriptorsINTERNAL.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccINTERNAL_la-INTERNALClient.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccINTERNAL_la-JobControllerPluginINTERNAL.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccINTERNAL_la-JobListRetrieverPluginINTERNAL.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccINTERNAL_la-JobStateINTERNAL.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccINTERNAL_la-SubmitterPluginINTERNAL.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccINTERNAL_la-TargetInformationRetrieverPluginINTERNAL.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccINTERNAL_la-job.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccINTERNAL_la-tools.Plo@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< libaccINTERNAL_la-INTERNALClient.lo: INTERNALClient.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccINTERNAL_la_CXXFLAGS) $(CXXFLAGS) -MT libaccINTERNAL_la-INTERNALClient.lo -MD -MP -MF $(DEPDIR)/libaccINTERNAL_la-INTERNALClient.Tpo -c -o libaccINTERNAL_la-INTERNALClient.lo `test -f 'INTERNALClient.cpp' || echo '$(srcdir)/'`INTERNALClient.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libaccINTERNAL_la-INTERNALClient.Tpo $(DEPDIR)/libaccINTERNAL_la-INTERNALClient.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='INTERNALClient.cpp' object='libaccINTERNAL_la-INTERNALClient.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccINTERNAL_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccINTERNAL_la-INTERNALClient.lo `test -f 'INTERNALClient.cpp' || echo '$(srcdir)/'`INTERNALClient.cpp libaccINTERNAL_la-JobStateINTERNAL.lo: JobStateINTERNAL.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccINTERNAL_la_CXXFLAGS) $(CXXFLAGS) -MT libaccINTERNAL_la-JobStateINTERNAL.lo -MD -MP -MF $(DEPDIR)/libaccINTERNAL_la-JobStateINTERNAL.Tpo -c -o libaccINTERNAL_la-JobStateINTERNAL.lo `test -f 'JobStateINTERNAL.cpp' || echo '$(srcdir)/'`JobStateINTERNAL.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libaccINTERNAL_la-JobStateINTERNAL.Tpo $(DEPDIR)/libaccINTERNAL_la-JobStateINTERNAL.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='JobStateINTERNAL.cpp' object='libaccINTERNAL_la-JobStateINTERNAL.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccINTERNAL_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccINTERNAL_la-JobStateINTERNAL.lo `test -f 'JobStateINTERNAL.cpp' || echo '$(srcdir)/'`JobStateINTERNAL.cpp libaccINTERNAL_la-SubmitterPluginINTERNAL.lo: SubmitterPluginINTERNAL.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccINTERNAL_la_CXXFLAGS) $(CXXFLAGS) -MT libaccINTERNAL_la-SubmitterPluginINTERNAL.lo -MD -MP -MF $(DEPDIR)/libaccINTERNAL_la-SubmitterPluginINTERNAL.Tpo -c -o libaccINTERNAL_la-SubmitterPluginINTERNAL.lo `test -f 'SubmitterPluginINTERNAL.cpp' || echo '$(srcdir)/'`SubmitterPluginINTERNAL.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libaccINTERNAL_la-SubmitterPluginINTERNAL.Tpo $(DEPDIR)/libaccINTERNAL_la-SubmitterPluginINTERNAL.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='SubmitterPluginINTERNAL.cpp' object='libaccINTERNAL_la-SubmitterPluginINTERNAL.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccINTERNAL_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccINTERNAL_la-SubmitterPluginINTERNAL.lo `test -f 'SubmitterPluginINTERNAL.cpp' || echo '$(srcdir)/'`SubmitterPluginINTERNAL.cpp libaccINTERNAL_la-JobControllerPluginINTERNAL.lo: JobControllerPluginINTERNAL.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccINTERNAL_la_CXXFLAGS) $(CXXFLAGS) -MT libaccINTERNAL_la-JobControllerPluginINTERNAL.lo -MD -MP -MF $(DEPDIR)/libaccINTERNAL_la-JobControllerPluginINTERNAL.Tpo -c -o libaccINTERNAL_la-JobControllerPluginINTERNAL.lo `test -f 'JobControllerPluginINTERNAL.cpp' || echo '$(srcdir)/'`JobControllerPluginINTERNAL.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libaccINTERNAL_la-JobControllerPluginINTERNAL.Tpo $(DEPDIR)/libaccINTERNAL_la-JobControllerPluginINTERNAL.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='JobControllerPluginINTERNAL.cpp' object='libaccINTERNAL_la-JobControllerPluginINTERNAL.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccINTERNAL_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccINTERNAL_la-JobControllerPluginINTERNAL.lo `test -f 'JobControllerPluginINTERNAL.cpp' || echo '$(srcdir)/'`JobControllerPluginINTERNAL.cpp libaccINTERNAL_la-JobListRetrieverPluginINTERNAL.lo: JobListRetrieverPluginINTERNAL.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccINTERNAL_la_CXXFLAGS) $(CXXFLAGS) -MT libaccINTERNAL_la-JobListRetrieverPluginINTERNAL.lo -MD -MP -MF $(DEPDIR)/libaccINTERNAL_la-JobListRetrieverPluginINTERNAL.Tpo -c -o libaccINTERNAL_la-JobListRetrieverPluginINTERNAL.lo `test -f 'JobListRetrieverPluginINTERNAL.cpp' || echo '$(srcdir)/'`JobListRetrieverPluginINTERNAL.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libaccINTERNAL_la-JobListRetrieverPluginINTERNAL.Tpo $(DEPDIR)/libaccINTERNAL_la-JobListRetrieverPluginINTERNAL.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='JobListRetrieverPluginINTERNAL.cpp' object='libaccINTERNAL_la-JobListRetrieverPluginINTERNAL.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccINTERNAL_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccINTERNAL_la-JobListRetrieverPluginINTERNAL.lo `test -f 'JobListRetrieverPluginINTERNAL.cpp' || echo '$(srcdir)/'`JobListRetrieverPluginINTERNAL.cpp libaccINTERNAL_la-TargetInformationRetrieverPluginINTERNAL.lo: TargetInformationRetrieverPluginINTERNAL.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccINTERNAL_la_CXXFLAGS) $(CXXFLAGS) -MT libaccINTERNAL_la-TargetInformationRetrieverPluginINTERNAL.lo -MD -MP -MF $(DEPDIR)/libaccINTERNAL_la-TargetInformationRetrieverPluginINTERNAL.Tpo -c -o libaccINTERNAL_la-TargetInformationRetrieverPluginINTERNAL.lo `test -f 'TargetInformationRetrieverPluginINTERNAL.cpp' || echo '$(srcdir)/'`TargetInformationRetrieverPluginINTERNAL.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libaccINTERNAL_la-TargetInformationRetrieverPluginINTERNAL.Tpo $(DEPDIR)/libaccINTERNAL_la-TargetInformationRetrieverPluginINTERNAL.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='TargetInformationRetrieverPluginINTERNAL.cpp' object='libaccINTERNAL_la-TargetInformationRetrieverPluginINTERNAL.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccINTERNAL_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccINTERNAL_la-TargetInformationRetrieverPluginINTERNAL.lo `test -f 'TargetInformationRetrieverPluginINTERNAL.cpp' || echo '$(srcdir)/'`TargetInformationRetrieverPluginINTERNAL.cpp libaccINTERNAL_la-DescriptorsINTERNAL.lo: DescriptorsINTERNAL.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccINTERNAL_la_CXXFLAGS) $(CXXFLAGS) -MT libaccINTERNAL_la-DescriptorsINTERNAL.lo -MD -MP -MF $(DEPDIR)/libaccINTERNAL_la-DescriptorsINTERNAL.Tpo -c -o libaccINTERNAL_la-DescriptorsINTERNAL.lo `test -f 'DescriptorsINTERNAL.cpp' || echo '$(srcdir)/'`DescriptorsINTERNAL.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libaccINTERNAL_la-DescriptorsINTERNAL.Tpo $(DEPDIR)/libaccINTERNAL_la-DescriptorsINTERNAL.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='DescriptorsINTERNAL.cpp' object='libaccINTERNAL_la-DescriptorsINTERNAL.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccINTERNAL_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccINTERNAL_la-DescriptorsINTERNAL.lo `test -f 'DescriptorsINTERNAL.cpp' || echo '$(srcdir)/'`DescriptorsINTERNAL.cpp libaccINTERNAL_la-job.lo: ../job.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccINTERNAL_la_CXXFLAGS) $(CXXFLAGS) -MT libaccINTERNAL_la-job.lo -MD -MP -MF $(DEPDIR)/libaccINTERNAL_la-job.Tpo -c -o libaccINTERNAL_la-job.lo `test -f '../job.cpp' || echo '$(srcdir)/'`../job.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libaccINTERNAL_la-job.Tpo $(DEPDIR)/libaccINTERNAL_la-job.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='../job.cpp' object='libaccINTERNAL_la-job.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccINTERNAL_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccINTERNAL_la-job.lo `test -f '../job.cpp' || echo '$(srcdir)/'`../job.cpp libaccINTERNAL_la-tools.lo: ../tools.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccINTERNAL_la_CXXFLAGS) $(CXXFLAGS) -MT libaccINTERNAL_la-tools.lo -MD -MP -MF $(DEPDIR)/libaccINTERNAL_la-tools.Tpo -c -o libaccINTERNAL_la-tools.lo `test -f '../tools.cpp' || echo '$(srcdir)/'`../tools.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libaccINTERNAL_la-tools.Tpo $(DEPDIR)/libaccINTERNAL_la-tools.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='../tools.cpp' object='libaccINTERNAL_la-tools.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccINTERNAL_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccINTERNAL_la-tools.lo `test -f '../tools.cpp' || echo '$(srcdir)/'`../tools.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: for dir in "$(DESTDIR)$(pkglibdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -f ./$(DEPDIR)/libaccINTERNAL_la-DescriptorsINTERNAL.Plo -rm -f ./$(DEPDIR)/libaccINTERNAL_la-INTERNALClient.Plo -rm -f ./$(DEPDIR)/libaccINTERNAL_la-JobControllerPluginINTERNAL.Plo -rm -f ./$(DEPDIR)/libaccINTERNAL_la-JobListRetrieverPluginINTERNAL.Plo -rm -f ./$(DEPDIR)/libaccINTERNAL_la-JobStateINTERNAL.Plo -rm -f ./$(DEPDIR)/libaccINTERNAL_la-SubmitterPluginINTERNAL.Plo -rm -f ./$(DEPDIR)/libaccINTERNAL_la-TargetInformationRetrieverPluginINTERNAL.Plo -rm -f ./$(DEPDIR)/libaccINTERNAL_la-job.Plo -rm -f ./$(DEPDIR)/libaccINTERNAL_la-tools.Plo -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-pkglibLTLIBRARIES install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libaccINTERNAL_la-DescriptorsINTERNAL.Plo -rm -f ./$(DEPDIR)/libaccINTERNAL_la-INTERNALClient.Plo -rm -f ./$(DEPDIR)/libaccINTERNAL_la-JobControllerPluginINTERNAL.Plo -rm -f ./$(DEPDIR)/libaccINTERNAL_la-JobListRetrieverPluginINTERNAL.Plo -rm -f ./$(DEPDIR)/libaccINTERNAL_la-JobStateINTERNAL.Plo -rm -f ./$(DEPDIR)/libaccINTERNAL_la-SubmitterPluginINTERNAL.Plo -rm -f ./$(DEPDIR)/libaccINTERNAL_la-TargetInformationRetrieverPluginINTERNAL.Plo -rm -f ./$(DEPDIR)/libaccINTERNAL_la-job.Plo -rm -f ./$(DEPDIR)/libaccINTERNAL_la-tools.Plo -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-pkglibLTLIBRARIES .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ clean-generic clean-libtool clean-pkglibLTLIBRARIES \ cscopelist-am ctags ctags-am distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-pkglibLTLIBRARIES install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-am uninstall \ uninstall-am uninstall-pkglibLTLIBRARIES .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/PaxHeaders/JobStateINTERNAL.h0000644000000000000000000000013215067751327025550 xustar0030 mtime=1759498967.764072651 30 atime=1759498967.869493711 30 ctime=1759499030.443922421 nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/JobStateINTERNAL.h0000644000175000002070000000062415067751327027454 0ustar00mockbuildmock00000000000000#ifndef __ARC_JOBSTATEINTERNAL_H__ #define __ARC_JOBSTATEINTERNAL_H__ #include namespace ARexINTERNAL { class JobStateINTERNAL : public Arc::JobState { public: JobStateINTERNAL(const std::string& state) : Arc::JobState(state, &StateMap) {} static JobState::StateType StateMap(const std::string& state); }; } #endif // __ARC_JOBSTATEINTERNAL_H__ nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/PaxHeaders/TargetInformationRetrieverPlugin0000644000000000000000000000031115067751327031154 xustar00111 path=nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/TargetInformationRetrieverPluginINTERNAL.cpp 30 mtime=1759498967.764072651 30 atime=1759498967.869493711 30 ctime=1759499030.452335709 nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/TargetInformationRetrieverPluginINTERNAL.cp0000644000175000002070000001022615067751327034616 0ustar00mockbuildmock00000000000000 // -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include "JobStateINTERNAL.h" #include "INTERNALClient.h" #include "TargetInformationRetrieverPluginINTERNAL.h" using namespace Arc; namespace ARexINTERNAL { //used when the --direct option is not issued with arcsub Logger TargetInformationRetrieverPluginINTERNAL::logger(Logger::getRootLogger(), "TargetInformationRetrieverPlugin.INTERNAL"); bool TargetInformationRetrieverPluginINTERNAL::isEndpointNotSupported(const Endpoint& endpoint) const { const std::string::size_type pos = endpoint.URLString.find("://"); if (pos != std::string::npos) { const std::string proto = lower(endpoint.URLString.substr(0, pos)); return (proto != "file"); } return (endpoint.URLString != "localhost"); // TODO: consider more strict way to allow only file://localhost } static URL CreateURL(std::string service) { std::string::size_type pos1 = service.find("://"); if (pos1 == std::string::npos) { service = "file://" + service; } else { std::string proto = lower(service.substr(0,pos1)); if(proto != "file") return URL(); } return service; } EndpointQueryingStatus TargetInformationRetrieverPluginINTERNAL::Query(const UserConfig& uc, const Endpoint& cie, std::list& csList, const EndpointQueryOptions&) const { EndpointQueryingStatus s(EndpointQueryingStatus::FAILED); //To-decide: should INTERNAL plugin information be visible in info.xml? It can not be used outside, so does not seem to make sense to have it added in info.xml URL url(CreateURL(cie.URLString)); if (!url) { return s; } //To get hold of general service information INTERNALClient ac(url, uc); XMLNode servicesQueryResponse; if (!ac.sstat(servicesQueryResponse)) { return s; } GLUE2::ParseExecutionTargets(servicesQueryResponse, csList); if(!csList.empty()){ if(csList.front().AdminDomain->Name.empty()) csList.front().AdminDomain->Name = url.Host(); csList.front()->InformationOriginEndpoint = cie; //Add the INTERNAL computingendpointtype ComputingEndpointType newCe; newCe->ID = url.Host(); newCe->URLString = url.str(); newCe->InterfaceName = "org.nordugrid.internal"; newCe->HealthState = "ok"; newCe->QualityLevel = "testing";//testing for now, production when in production newCe->Technology = "direct"; newCe->Capability.insert("executionmanagement.jobcreation"); newCe->Capability.insert("executionmanagement.jobdescription"); newCe->Capability.insert("executionmanagement.jobmanagement"); newCe->Capability.insert("information.discovery.job"); newCe->Capability.insert("information.discovery.resource"); newCe->Capability.insert("information.lookup.job"); newCe->Capability.insert("security.delegation"); // std::string ID; // std::string HealthStateInfo; // std::list InterfaceVersion; // std::list InterfaceExtension; // std::list SupportedProfile; // std::string Implementor; // Software Implementation; // std::string ServingState; // std::string IssuerCA; // std::list TrustedCA; // Time DowntimeEnds; // std::string Staging; // int TotalJobs; // int RunningJobs; // int WaitingJobs; // int StagingJobs; // int SuspendedJobs; // int PreLRMSWaitingJobs; //To-DO Assuming there is only one computingservice ComputingServiceType cs = csList.front(); std::map ce = cs.ComputingEndpoint; csList.front().ComputingEndpoint.insert(std::pair(ce.size(), newCe)); } if (!csList.empty()) s = EndpointQueryingStatus::SUCCESSFUL; return s; } } // namespace Arc nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/PaxHeaders/SubmitterPluginINTERNAL.h0000644000000000000000000000013215067751327027172 xustar0030 mtime=1759498967.764072651 30 atime=1759498967.869493711 30 ctime=1759499030.446275246 nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.h0000644000175000002070000000307715067751327031103 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_SUBMITTERPLUGININTERNAL_H__ #define __ARC_SUBMITTERPLUGININTERNAL_H__ #include #include #include #include #include #include "INTERNALClient.h" using namespace Arc; namespace ARexINTERNAL{ //class JobStateINTERNAL; class SubmissionStatus; class SubmitterPluginINTERNAL : public SubmitterPlugin { public: SubmitterPluginINTERNAL(const UserConfig& usercfg, PluginArgument* parg) : SubmitterPlugin(usercfg, parg),clients(usercfg) { supportedInterfaces.push_back("org.nordugrid.internal"); } ~SubmitterPluginINTERNAL() { /*deleteAllClients();*/ } static Plugin* Instance(PluginArgument *arg) { SubmitterPluginArgument *subarg = dynamic_cast(arg); return subarg ? new SubmitterPluginINTERNAL(*subarg, arg) : NULL; } virtual bool isEndpointNotSupported(const std::string& endpoint) const; virtual Arc::SubmissionStatus Submit(const std::list& jobdescs, const std::string& endpoint, EntityConsumer& jc, std::list& notSubmitted); virtual Arc::SubmissionStatus Submit(const std::list& jobdescs, const ExecutionTarget& et, EntityConsumer& jc, std::list& notSubmitted); private: INTERNALClients clients; bool getDelegationID(const URL& durl, std::string& delegation_id); }; } // namespace ARexINTERNAL #endif // __ARC_SUBMITTERPLUGININTERNAL_H__ nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/PaxHeaders/SubmitterPluginINTERNAL.cpp0000644000000000000000000000013215067751327027525 xustar0030 mtime=1759498967.764072651 30 atime=1759498967.869493711 30 ctime=1759499030.445167084 nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/SubmitterPluginINTERNAL.cpp0000644000175000002070000001167515067751327031441 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include //#include "JobStateINTERNAL.h" #include "SubmitterPluginINTERNAL.h" using namespace Arc; namespace ARexINTERNAL { bool SubmitterPluginINTERNAL::isEndpointNotSupported(const std::string& endpoint) const { const std::string::size_type pos = endpoint.find("://"); return pos != std::string::npos && lower(endpoint.substr(0, pos)) != "file"; } bool SubmitterPluginINTERNAL::getDelegationID(const URL& durl, std::string& delegation_id) { if(!durl) { logger.msg(INFO, "Failed to delegate credentials to server - no delegation interface found"); return false; } INTERNALClient ac(durl,*usercfg); if(!ac.CreateDelegation(delegation_id)) { logger.msg(INFO, "Failed to delegate credentials to server - %s",ac.failure()); return false; } return true; } Arc::SubmissionStatus SubmitterPluginINTERNAL::Submit(const std::list& jobdescs, const ExecutionTarget& et, EntityConsumer& jc, std::list& notSubmitted){ Arc::SubmissionStatus retval; std::string endpoint = et.ComputingEndpoint->URLString; retval = Submit(jobdescs, endpoint, jc, notSubmitted); return retval; } Arc::SubmissionStatus SubmitterPluginINTERNAL::Submit(const std::list& jobdescs, const std::string& endpoint, EntityConsumer& jc, std::list& notSubmitted) { //jobdescs as passed down from the client // TODO: this is multi step process. So having retries would be nice. // TODO: If delegation interface is not on same endpoint as submission interface this method is faulty. URL url((endpoint.find("://") == std::string::npos ? "file://" : "") + endpoint, false); /*for accessing jobs*/ /*Preparation of jobdescription*/ Arc::SubmissionStatus retval; std::string delegation_id; INTERNALClient ac(url,*usercfg); for (std::list::const_iterator itJ = jobdescs.begin(); itJ != jobdescs.end(); ++itJ) { //Calls JobDescription.Prepare, which Check for identical file names. and if executable and input is contained in the file list. JobDescription preparedjobdesc(*itJ); if (!preparedjobdesc.Prepare()) { logger.msg(INFO, "Failed preparing job description"); notSubmitted.push_back(&*itJ); retval |= Arc::SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; continue; } bool need_delegation = false; std::list upload_sources; std::list upload_destinations; /*Preparation of input files and outputfiles */ for(std::list::const_iterator itIF = preparedjobdesc.DataStaging.InputFiles.begin(); itIF != preparedjobdesc.DataStaging.InputFiles.end(); ++itIF) { if(!itIF->Sources.empty()) { if(itIF->Sources.front().Protocol() == "file") { upload_sources.push_back(itIF->Sources.front().Path()); upload_destinations.push_back(itIF->Name); } else { need_delegation = true; } } } for(std::list::const_iterator itOF = itJ->DataStaging.OutputFiles.begin(); itOF != itJ->DataStaging.OutputFiles.end() && !need_delegation; ++itOF) { if((!itOF->Targets.empty()) || (itOF->Name[0] == '@')) { // ARC specific - dynamic list of output files need_delegation = true; } } /*end preparation of input and output files */ if (need_delegation && delegation_id.empty()) { if (!getDelegationID(url, delegation_id)) { notSubmitted.push_back(&*itJ); retval |= Arc::SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; continue; } } std::list localjobs; std::list preparedjobdescs; preparedjobdescs.push_back(preparedjobdesc); if((!ac.submit(preparedjobdescs, localjobs, delegation_id)) || (localjobs.empty())) { logger.msg(INFO, "Failed submitting job description"); notSubmitted.push_back(&*itJ); retval |= Arc::SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; continue; } if(!upload_sources.empty()) { if(!ac.putFiles(localjobs.front(), upload_sources, upload_destinations)) { notSubmitted.push_back(&*itJ); retval |= Arc::SubmissionStatus::DESCRIPTION_NOT_SUBMITTED; continue; } } Arc::Job job; localjobs.front().toJob(&ac,&(localjobs.front()),job); AddJobDetails(preparedjobdesc, job); jc.addEntity(job); }//end loop over jobdescriptions return retval; } } // namespace ARexINTERNAL nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/PaxHeaders/JobStateINTERNAL.cpp0000644000000000000000000000013215067751327026103 xustar0030 mtime=1759498967.764072651 30 atime=1759498967.869493711 30 ctime=1759499030.442707941 nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/JobStateINTERNAL.cpp0000644000175000002070000000631015067751327030005 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "JobStateINTERNAL.h" /* Maps/translates a INTERNAL state - which is a state corresponding to the ARexJob state, hence GM-job state, to an ARC:JobState */ namespace ARexINTERNAL { Arc::JobState::StateType JobStateINTERNAL::StateMap(const std::string& state) { std::string state_ = Arc::lower(state); /* Infosys states (mapped from GM states): ACCEPTING ACCEPTED PREPARED SUBMITTING INLRMS: * KILLING EXECUTED KILLED FAILED GM states (either not mapped or somehow obtained directly): ACCEPTED PREPARING SUBMIT INLRMS CANCELING FINISHING FINISHED DELETED PENDING:* */ /// \mapname GM Grid Manager /// \mapnote Prefix "PENDING:" and spaces are ignored when mapping states. if (state_.substr(0,8) == "pending:") state_.erase(0,8); // remove spaces because sometimes we may have 'INLRMS: *' std::string::size_type p = 0; while((p = state_.find(' ',p)) != std::string::npos) state_.erase(p,1); /// \mapattr ACCEPTED -> ACCEPTED /// \mapattr ACCEPTING -> ACCEPTED if ((state_ == "accepted") || (state_ == "accepting")) return JobState::ACCEPTED; /// \mapattr PREPARING -> PREPARING /// \mapattr PREPARED -> PREPARING else if ((state_ == "preparing") || (state_ == "prepared")) return JobState::PREPARING; /// \mapattr SUBMIT -> SUBMITTING /// \mapattr SUBMITTING -> SUBMITTING else if ((state_ == "submit") || (state_ == "submitting")) return JobState::SUBMITTING; /// \mapattr INLRMS:Q -> QUEUING else if (state_ == "inlrms:q") return JobState::QUEUING; /// \mapattr INLRMS:R -> RUNNING else if (state_ == "inlrms:r") return JobState::RUNNING; /// \mapattr INLRMS:H -> HOLD else if (state_ == "inlrms:h") return JobState::HOLD; /// \mapattr INLRMS:S -> HOLD else if (state_ == "inlrms:s") return JobState::HOLD; /// \mapattr INLRMS:E -> FINISHING else if (state_ == "inlrms:e") return JobState::FINISHING; /// \mapattr INLRMS:O -> HOLD else if (state_ == "inlrms:o") return JobState::HOLD; /// \mapattr INLRMS* -> QUEUING else if (state_.substr(0,6) == "inlrms") return JobState::QUEUING; // expect worst ? /// \mapattr FINISHING -> FINISHING /// \mapattr KILLING -> FINISHING /// \mapattr CANCELING -> FINISHING /// \mapattr EXECUTED -> FINISHING else if ((state_ == "finishing") || (state_ == "killing") || (state_ == "canceling") || (state_ == "executed")) return JobState::FINISHING; /// \mapattr FINISHED -> FINISHED else if (state_ == "finished") return JobState::FINISHED; /// \mapattr KILLED -> KILLED else if (state_ == "killed") return JobState::KILLED; /// \mapattr FAILED -> FAILED else if (state_ == "failed") return JobState::FAILED; /// \mapattr DELETED -> DELETED else if (state_ == "deleted") return JobState::DELETED; /// \mapattr "" -> UNDEFINED else if (state_ == "") return JobState::UNDEFINED; /// \mapattr Any other state -> OTHER else return JobState::OTHER; } } nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/PaxHeaders/DescriptorsINTERNAL.cpp0000644000000000000000000000013215067751327026671 xustar0030 mtime=1759498967.763492101 30 atime=1759498967.869493711 30 ctime=1759499030.454444693 nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/DescriptorsINTERNAL.cpp0000644000175000002070000000245215067751327030576 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include "SubmitterPluginINTERNAL.h" #include "JobControllerPluginINTERNAL.h" #include "JobListRetrieverPluginINTERNAL.h" #include "TargetInformationRetrieverPluginINTERNAL.h" extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "INTERNAL", "HED:SubmitterPlugin", "INTERNAL execution service", 0, &ARexINTERNAL::SubmitterPluginINTERNAL::Instance }, { "INTERNAL", "HED:JobControllerPlugin", "INTERNAL execution service", 0, &ARexINTERNAL::JobControllerPluginINTERNAL::Instance }, { "INTERNAL", "HED:TargetInformationRetrieverPlugin", "INTERNAL execution service", 0, &ARexINTERNAL::TargetInformationRetrieverPluginINTERNAL::Instance }, { "INTERNAL", "HED:JobListRetrieverPlugin", "INTERNAL execution service", 0, &ARexINTERNAL::JobListRetrieverPluginINTERNAL::Instance }, { NULL, NULL, NULL, 0, NULL } }; // Bug #3775 reports issues related to unloading this module. The source of the issue is not yet clear. // But taking into account complexity of linked libraries it is better to disable loading. extern "C" { void ARC_MODULE_CONSTRUCTOR_NAME(Glib::Module* module, Arc::ModuleManager* manager) { if(manager && module) { manager->makePersistent(module); }; } } nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/PaxHeaders/TargetInformationRetrieverPlugin0000644000000000000000000000013215067751327031155 xustar0030 mtime=1759498967.764072651 30 atime=1759498967.869493711 30 ctime=1759499030.453629402 nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/TargetInformationRetrieverPluginINTERNAL.h0000644000175000002070000000234615067751327034447 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_TARGETINFORMATIONRETRIEVERINTERNAL_H__ #define __ARC_TARGETINFORMATIONRETRIEVERINTERNAL_H__ #include #include using namespace Arc; namespace Arc{ class Logger; class EndpointQueryingStatus; class ExecutionTarget; class URL; class UserConfig; class XMLNode; } namespace ARexINTERNAL { class INTERNALClient; class JobStateINTERNAL; class TargetInformationRetrieverPluginINTERNAL: public TargetInformationRetrieverPlugin { public: TargetInformationRetrieverPluginINTERNAL(PluginArgument* parg): TargetInformationRetrieverPlugin(parg) { supportedInterfaces.push_back("org.nordugrid.internal"); }; ~TargetInformationRetrieverPluginINTERNAL() {}; static Plugin* Instance(PluginArgument *arg) { return new TargetInformationRetrieverPluginINTERNAL(arg); }; virtual EndpointQueryingStatus Query(const UserConfig&, const Endpoint&, std::list&, const EndpointQueryOptions&) const; virtual bool isEndpointNotSupported(const Endpoint&) const; private: static Logger logger; }; } // namespace Arc #endif // __ARC_TARGETINFORMATIONRETRIEVERINTERNAL_H__ nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/PaxHeaders/INTERNALClient.cpp0000644000000000000000000000013215067751327025606 xustar0030 mtime=1759498967.764072651 30 atime=1759498967.869493711 30 ctime=1759499030.440201642 nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/INTERNALClient.cpp0000644000175000002070000006127615067751327027524 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include "../grid-manager/jobs/CommFIFO.h" #include "../grid-manager/jobs/JobDescriptionHandler.h" #include "../grid-manager/conf/GMConfig.h" #include "../grid-manager/files/ControlFileHandling.h" #include "JobStateINTERNAL.h" #include "INTERNALClient.h" //#include "../job.cpp" using namespace Arc; namespace ARexINTERNAL { Arc::Logger INTERNALClient::logger(Arc::Logger::rootLogger, "INTERNAL Client"); INTERNALClient::INTERNALClient(void) : config(NULL), arexconfig(NULL) { logger.msg(Arc::DEBUG,"Default INTERNAL client constructor"); if(!SetAndLoadConfig()){ logger.msg(Arc::ERROR,"Failed to load grid-manager configfile"); return; } if(!SetEndPoint()){ logger.msg(Arc::ERROR,"Failed to set INTERNAL endpoint"); return; } MapLocalUser(); PrepareARexConfig(); }; INTERNALClient::INTERNALClient(const Arc::UserConfig& usercfg) :usercfg(usercfg), config(NULL), arexconfig(NULL) { if(!SetAndLoadConfig()){ logger.msg(Arc::ERROR,"Failed to load grid-manager configfile"); return; } if(!SetEndPoint()){ logger.msg(Arc::ERROR,"Failed to set INTERNAL endpoint"); return; } MapLocalUser(); PrepareARexConfig(); }; //using this one from submitterpluginlocal INTERNALClient::INTERNALClient(const Arc::URL& url, const Arc::UserConfig& usercfg) :ce(url), usercfg(usercfg), config(NULL), arexconfig(NULL) { if(!SetAndLoadConfig()){ logger.msg(Arc::ERROR,"Failed to load grid-manager configfile"); return; } if(!SetEndPoint()){ logger.msg(Arc::ERROR,"Failed to set INTERNAL endpoint"); return; } MapLocalUser(); PrepareARexConfig(); }; INTERNALClient::~INTERNALClient() { delete config; delete arexconfig; } INTERNALJob::INTERNALJob(/*const */ARex::ARexJob& _arexjob, const ARex::GMConfig& config, std::string const& _deleg_id) :id(_arexjob.ID()), state((std::string)_arexjob.State()), sessiondir(_arexjob.SessionDir()), controldir(config.ControlDir()), delegation_id(_deleg_id) { stageout.push_back(_arexjob.SessionDir()); stagein.push_back(_arexjob.SessionDir()); } bool INTERNALClient::SetEndPoint(){ endpoint = config->ControlDir(); return true; } bool INTERNALClient::SetAndLoadConfig(){ cfgfile = ARex::GMConfig::GuessConfigFile(); if (cfgfile.empty()) { logger.msg(Arc::ERROR,"Failed to identify grid-manager config file"); return false; } // Push configuration through pre-parser in order to setup default values. // We are only interested in pidfile location because this is where // fully pre-processed configuration file resides. std::list parser_args; parser_args.push_back(Arc::ArcLocation::GetToolsDir() + "/arcconfig-parser"); parser_args.push_back("--config"); parser_args.push_back(cfgfile); parser_args.push_back("-b"); parser_args.push_back("arex"); parser_args.push_back("-o"); parser_args.push_back("pidfile"); Arc::Run parser(parser_args); std::string pidfile; parser.AssignStdout(pidfile); if((!parser.Start()) || (!parser.Wait())) { logger.msg(Arc::ERROR,"Failed to run configuration parser at %s.", parser_args.front()); return false; } if(parser.Result() != 0) { logger.msg(Arc::ERROR,"Parser failed with error code %i.", (int)parser.Result()); return false; } pidfile = Arc::trim(pidfile, "\r\n"); // parser adds EOLs struct stat st; if(!FileStat(pidfile, &st, true)) { logger.msg(Arc::ERROR,"No pid file is found at '%s'. Probably A-REX is not running.", pidfile); return false; } // Actual config file location cfgfile = pidfile; std::string::size_type dot_pos = cfgfile.find_last_of("./"); if((dot_pos != std::string::npos) && (cfgfile[dot_pos] == '.')) cfgfile.resize(dot_pos); cfgfile += ".cfg"; config = new ARex::GMConfig(cfgfile); config->SetDelegations(&deleg_stores); if(!config->Load()){ logger.msg(Arc::ERROR,"Failed to load grid-manager config file from %s", cfgfile); return false; } ARex::DelegationStore::DbType deleg_db_type = ARex::DelegationStore::DbBerkeley; switch(config->DelegationDBType()) { case ARex::GMConfig::deleg_db_bdb: deleg_db_type = ARex::DelegationStore::DbBerkeley; break; case ARex::GMConfig::deleg_db_sqlite: deleg_db_type = ARex::DelegationStore::DbSQLite; break; }; deleg_stores.SetDbType(deleg_db_type); config->Print(); return true; } // Security attribute simulating information pulled from TLS layer. class TLSSecAttr: public SecAttr { public: TLSSecAttr(Arc::UserConfig& usercfg) { Arc::Credential cred(usercfg); identity_ = cred.GetIdentityName(); Arc::VOMSTrustList trust_list; trust_list.AddRegex("^.*$"); std::vector voms; if(parseVOMSAC(cred, usercfg.CACertificatesDirectory(), usercfg.CACertificatePath(), usercfg.CAUseSystem(), usercfg.VOMSESPath()/*?*/, trust_list, voms, true, true)) { for(std::vector::const_iterator v = voms.begin(); v != voms.end();++v) { if(!(v->status & VOMSACInfo::Error)) { for(std::vector::const_iterator a = v->attributes.begin(); a != v->attributes.end();++a) { voms_.push_back(VOMSFQANToFull(v->voname,*a)); }; }; }; }; } virtual ~TLSSecAttr(void) { } virtual operator bool(void) const { return true; } virtual bool Export(SecAttrFormat format,XMLNode &val) const { return false; } virtual std::string get(const std::string& id) const { if(id == "IDENTITY") return identity_; std::list items = getAll(id); if(!items.empty()) return *items.begin(); return ""; } virtual std::list getAll(const std::string& id) const { if(id == "VOMS") { return voms_; }; return SecAttr::getAll(id); } std::string const& Identity() const { return identity_; } virtual std::map< std::string,std::list > getAll() const { std::map< std::string,std::list > all; all["IDENTITY"] = getAll("IDENTITY"); all["VOMS"] = getAll("VOMS"); return all; } protected: std::string identity_; // Subject of last non-proxy certificate std::list voms_; // VOMS attributes from the VOMS extension of proxy virtual bool equal(const SecAttr &b) const { return false; } }; bool INTERNALClient::MapLocalUser(){ if(!arexconfig) { logger.msg(Arc::ERROR, "INTERNALClient is not initialized"); return false; } Arc::Credential cred(usercfg); // Here we need to simulate message going though chain of plugins. // Luckily we only need these SecHandler plugins: legacy.handler and legacy.map. // And as source of information "TLS" Security Attribute must be supplied following // information items: IDENTITY (user subject) and VOMS (VOMS FQANs). // Load plugins Config factory_cfg; //(""); MCCLoader loader(factory_cfg); //factory_cfg.NewChild("ModuleManager").NewChild("Path") = Arc::ArcLocation::Get()+"/lib/arc"; //factory_cfg.NewChild("Plugins").NewChild("Name") = "arcshclegacy"; //PluginsFactory factory(factory_cfg); ChainContext& context(*static_cast(loader)); PluginsFactory& factory(*static_cast(context)); factory.load("arcshc"); factory.load("arcshclegacy"); factory.load("identitymap"); //Arc::ChainContext context(MCCLoader& loader); ArcSec::SecHandler* gridmapper(NULL); ArcSec::SecHandler* handler(NULL); ArcSec::SecHandler* mapper(NULL); { ArcSec::SecHandlerConfig xcfg("identity.map", "incoming"); Config cfg(xcfg /*, cfg.getFileName()*/); XMLNode pdp1 = cfg.NewChild("PDP"); pdp1.NewAttribute("name") = "allow.pdp"; pdp1.NewChild("LocalList") = "/etc/grid-security/grid-mapfile"; //XMLNode pdp2 = cfg.NewChild("PDP"); //pdp2.NewAttribute("allow.pdp"); //pdp2.NewChild("LocalName") = "nobody"; ArcSec::SecHandlerPluginArgument arg(&cfg, &context); Plugin* plugin = factory.get_instance(SecHandlerPluginKind, "identity.map", &arg); gridmapper = plugin?dynamic_cast(plugin):NULL; } { ArcSec::SecHandlerConfig xcfg("arclegacy.handler", "incoming"); Config cfg(xcfg /*, cfg.getFileName()*/); cfg.NewChild("ConfigFile") = config->ConfigFile(); ArcSec::SecHandlerPluginArgument arg(&cfg, &context); Plugin* plugin = factory.get_instance(SecHandlerPluginKind, "arclegacy.handler", &arg); handler = plugin?dynamic_cast(plugin):NULL; }; { ArcSec::SecHandlerConfig xcfg("arclegacy.map", "incoming"); Config cfg(xcfg /*, cfg.getFileName()*/); XMLNode block = cfg.NewChild("ConfigBlock"); block.NewChild("ConfigFile") = config->ConfigFile(); block.NewChild("BlockName") = "mapping"; ArcSec::SecHandlerPluginArgument arg(&cfg, &context); Plugin* plugin = factory.get_instance(SecHandlerPluginKind, "arclegacy.map", &arg); mapper = plugin?dynamic_cast(plugin):NULL; }; bool result = false; if(gridmapper && handler && mapper) { // Prepare information source TLSSecAttr* sec_attr = new TLSSecAttr(usercfg); // Setup fake mesage to be used as container for information being processed Arc::Message msg; msg.Auth()->set("TLS", sec_attr); // Message takes ownership of the sec_attr // Some plugins fetch user DN from message attributes msg.Attributes()->set("TLS:IDENTITYDN", sec_attr->Identity()); // Process collected information if((gridmapper->Handle(&msg)) && (handler->Handle(&msg)) && (mapper->Handle(&msg))) { // Result of mapping is stored in message attribute - fetch it std::string uname = msg.Attributes()->get("SEC:LOCALID"); if(!uname.empty()) { user = Arc::User(uname); result = true; } } } delete gridmapper; delete handler; delete mapper; return result; } bool INTERNALClient::PrepareARexConfig(){ Arc::Credential cred(usercfg); std::string gridname = cred.GetIdentityName(); arexconfig = new ARex::ARexGMConfig(*config,user.Name(),gridname,endpoint); return true; } bool INTERNALClient::CreateDelegation(std::string& deleg_id){ if(!arexconfig) { logger.msg(Arc::ERROR, "INTERNALClient is not initialized"); return false; } // Create new delegation slot in delegation store and // generate or apply delegation id. Arc::Credential cred(usercfg); std::string gridname = cred.GetIdentityName(); std::string proxy_data; std::string proxy_part1; std::string proxy_part2; std::string proxy_part3; cred.OutputCertificate(proxy_part1); cred.OutputPrivatekey(proxy_part2); cred.OutputCertificateChain(proxy_part3); proxy_data = proxy_part1 + proxy_part2 + proxy_part3; ARex::DelegationStore& deleg = deleg_stores[config->DelegationDir()]; if(!deleg.AddCred(deleg_id, gridname, proxy_data)) { error_description="Failed to store delegation."; logger.msg(Arc::ERROR, "%s", error_description); return false; } return true; } bool INTERNALClient::RenewDelegation(std::string const& deleg_id) { if(!arexconfig) { logger.msg(Arc::ERROR, "INTERNALClient is not initialized"); return false; } // Create new delegation in already assigned slot if(deleg_id.empty()) return false; Arc::Credential cred(usercfg); std::string gridname = cred.GetIdentityName(); std::string proxy_data; //std::string proxy_key; //cred.OutputCertificateChain(proxy_data); //cred.OutputPrivatekey(proxy_key); //proxy_data = proxy_key + proxy_data; //usercfg.CredentialString(proxy_data); std::string proxy_part1; std::string proxy_part2; std::string proxy_part3; cred.OutputCertificate(proxy_part1); cred.OutputPrivatekey(proxy_part2); cred.OutputCertificateChain(proxy_part3); proxy_data = proxy_part1 + proxy_part2 + proxy_part3; ARex::DelegationStore& deleg = deleg_stores[config->DelegationDir()]; if(!deleg.PutCred(deleg_id, gridname, proxy_data)) { error_description="Failed to store delegation."; logger.msg(Arc::ERROR, "%s", error_description); return false; } return true; } std::string INTERNALClient::get_error_description() const { if (!error_description.empty()) return error_description; return ""; } bool INTERNALClient::submit(const std::list& jobdescs,std::list& localjobs, const std::string delegation_id) { if(!arexconfig) { logger.msg(Arc::ERROR, "INTERNALClient is not initialized"); return false; } //called by SubmitterPluginINTERNAL ac->submit(..) logger.msg(Arc::VERBOSE, "Submitting job "); bool noFailures = true; int limit = 1000000; // 1 M - Safety std::list::const_iterator itSubmit = jobdescs.begin(), itLastProcessedEnd = jobdescs.begin(); while (itSubmit != jobdescs.end() && limit > 0) { for (int i = 0; itSubmit != jobdescs.end() && i < limit; ++itSubmit, ++i) { INTERNALJob localjob; //set some additional parameters if(config->DefaultQueue().empty() && (config->Queues().size() == 1)) { config->SetDefaultQueue(*(config->Queues().begin())); } ARex::JobDescriptionHandler job_desc_handler(*config); ARex::JobLocalDescription job_desc; std::string jobdesc_str; Arc::JobDescriptionResult ures = (*itSubmit).UnParse(jobdesc_str,"emies:adl"); Arc::XMLNode adl(jobdesc_str); ARex::JobIDGeneratorINTERNAL idgenerator(endpoint); const std::string dummy = ""; ARex::ARexJob arexjob(adl,*arexconfig,delegation_id,dummy,dummy,logger,idgenerator); if(!arexjob){ logger.msg(Arc::ERROR, "%s",arexjob.Failure()); return false; } else{ //make localjob for internal handling INTERNALJob localjob(arexjob,*config,delegation_id); localjobs.push_back(localjob); } } itLastProcessedEnd = itSubmit; } return noFailures; } bool INTERNALClient::putFiles(INTERNALJob const& localjob, std::list const& sources, std::list const& destinations) { if(!arexconfig) { logger.msg(Arc::ERROR, "INTERNALClient is not initialized"); return false; } ARex::GMJob gmjob(localjob.id, user, localjob.sessiondir, ARex::JOB_STATE_ACCEPTED); //Fix-me removed cbegin and cend from sources and destination. Either fix compiler, or rewrite to be const. for(std::list::const_iterator source = sources.begin(), destination = destinations.begin(); source != sources.end() && destination != destinations.end(); ++source, ++destination) { std::string path = localjob.sessiondir + "/" + *destination; std::string fn = "/" + *destination; // TODO: direct copy will not work if session is on NFS if(!FileCopy(*source, path)) { logger.msg(Arc::ERROR, "Failed to copy input file: %s to path: %s",path); return false; } if((!ARex::fix_file_permissions(path,false)) || // executable flags is handled by A-Rex (!ARex::fix_file_owner(path,gmjob))) { logger.msg(Arc::ERROR, "Failed to set permissions on: %s",path); //clean job here? At the moment job is left in limbo in control and sessiondir clean(localjob.id); return false; } ARex::job_input_status_add_file(gmjob,*config,fn); } (void)ARex::CommFIFO::Signal(config->ControlDir(), localjob.id); return true; } bool INTERNALClient::submit(const Arc::JobDescription& jobdesc, INTERNALJob& localjob, const std::string delegation_id) { std::list jobdescs; std::list localjobs; jobdescs.push_back(jobdesc); if(!submit(jobdescs, localjobs, delegation_id)) return false; if(localjobs.empty()) return false; localjob = localjobs.back(); return true; } bool INTERNALClient::info(std::list& jobs, std::list& jobids_found){ if(!arexconfig) { logger.msg(Arc::ERROR, "INTERNALClient is not initialized"); return false; } //at the moment called by JobListretrieverPluginINTERNAL Query for(std::list::iterator job = jobs.begin(); job!= jobs.end(); job++){ ARex::ARexJob arexjob(job->id,*arexconfig,logger); std::string state = arexjob.State(); if (state != "UNDEFINED") jobids_found.push_back(*job); } return true; } bool INTERNALClient::info(INTERNALJob& localjob, Arc::Job& arcjob){ if(!arexconfig) { logger.msg(Arc::ERROR, "INTERNALClient is not initialized"); return false; } //Called from (at least) JobControllerPluginINTERNAL //Called for stagein/out/sessionodir if url of either is not known //Extracts information about current arcjob from arexjob and job.jobid.description file and updates/populates the localjob and arcjob with this info, and fills a localjob with the information std::vector tokens; Arc::tokenize(arcjob.JobID, tokens, "/"); if(tokens.empty()) return false; //NB! Add control that the arcjob.jobID is in correct format localjob.id = tokens.back(); ARex::JobId gm_job_id = localjob.id; ARex::ARexJob arexjob(gm_job_id,*arexconfig,logger); arcjob.State = JobStateINTERNAL((std::string)arexjob.State()); if(!localjob.delegation_id.empty()) arcjob.DelegationID.push_back(localjob.delegation_id); //Get other relevant info from the .info file ARex::JobLocalDescription job_desc; if(!ARex::job_local_read_file(gm_job_id,*config,job_desc)) { error_description="Job is probably corrupted: can't read internal information."; logger.msg(Arc::ERROR, "%s", error_description); return false; }; //JobControllerPluginINTERNAL needs this, so make sure it is set. if(localjob.session.empty()){ localjob.session.push_back((std::string)job_desc.sessiondir); } if(localjob.stagein.empty()){ //assume that it is sessiondir localjob.stagein.push_back((std::string)job_desc.sessiondir); } if(localjob.stageout.empty()){ //assume that it is sessiondir localjob.stageout.push_back((std::string)job_desc.sessiondir); } return true; } bool INTERNALClient::sstat(Arc::XMLNode& xmldoc) { if(!arexconfig) { logger.msg(Arc::ERROR, "INTERNALClient is not initialized"); return false; } //TO-DO Need to lock info.xml during reading? std::string fname = config->InformationFile(); std::string xmlstring; (void)Arc::FileRead(fname, xmlstring); if(xmlstring.empty()){ error_description="Failed to obtain resource information."; logger.msg(Arc::ERROR, "%s", error_description); return false; } XMLNode tmp(xmlstring); XMLNode services = tmp["Domains"]["AdminDomain"]["Services"]; if(!services) { lfailure = "Missing Services in response"; return false; } services.Move(xmldoc); return true; } bool INTERNALClient::kill(const std::string& jobid){ if(!arexconfig) { logger.msg(Arc::ERROR, "INTERNALClient is not initialized"); return false; } //jobid is full url std::vector tokens; Arc::tokenize(jobid, tokens, "/"); if(tokens.empty()) return false; std::string thisid = tokens.back(); ARex::ARexJob arexjob(thisid,*arexconfig,logger); arexjob.Cancel(); return true; } bool INTERNALClient::clean(const std::string& jobid){ if(!arexconfig) { logger.msg(Arc::ERROR, "INTERNALClient is not initialized"); return false; } //jobid is full url std::vector tokens; Arc::tokenize(jobid, tokens, "/"); if(tokens.empty()) return false; std::string thisid = tokens.back(); ARex::ARexJob arexjob(thisid,*arexconfig,logger); arexjob.Clean(); return true; } bool INTERNALClient::restart(const std::string& jobid){ if(!arexconfig) { logger.msg(Arc::ERROR, "INTERNALClient is not initialized"); return false; } //jobid is full url std::vector tokens; Arc::tokenize(jobid, tokens, "/"); if(tokens.empty()) return false; std::string thisid = tokens.back(); ARex::ARexJob arexjob(thisid,*arexconfig,logger); arexjob.Resume(); return true; } bool INTERNALClient::list(std::list& jobs){ //Populates localjobs containing only jobid //how do I want to search for jobs in system? std::string cdir=config->ControlDir(); Glib::Dir dir(cdir); std::string file_name; while ((file_name = dir.read_name()) != "") { std::vector tokens; Arc::tokenize(file_name, tokens, "."); // look for job.id.local if (tokens.size() == 3 && tokens[0] == "job" && tokens[2] == "local") { INTERNALJob job; job.id = (std::string)tokens[1]; jobs.push_back(job); }; } dir.close(); return true; } INTERNALJob& INTERNALJob::operator=(const Arc::Job& job) { //Set localjob attributes from the ARC job //Called from JobControllerPlugin stagein.clear(); session.clear(); stageout.clear(); if (job.StageInDir) stagein.push_back(job.StageInDir); if (job.StageOutDir) stageout.push_back(job.StageOutDir); if (job.SessionDir) session.push_back(job.SessionDir); id = job.JobID; manager = job.JobManagementURL; resource = job.ServiceInformationURL; delegation_id = job.DelegationID.empty()?std::string(""):*job.DelegationID.begin(); // State information is not transfered from Job object. Currently not needed. return *this; } void INTERNALJob::toJob(INTERNALClient* client, INTERNALJob* localjob, Arc::Job& j) const { //fills an arcjob from localjob //j.JobID = (client->ce).str() + "/" + localjob->id; j.JobID = "file://" + sessiondir; j.ServiceInformationURL = client->ce; j.ServiceInformationInterfaceName = "org.nordugrid.internal"; j.JobStatusURL = client->ce; j.JobStatusInterfaceName = "org.nordugrid.internal"; j.JobManagementURL = client->ce; j.JobManagementInterfaceName = "org.nordugrid.internal"; j.IDFromEndpoint = id; if (!stagein.empty())j.StageInDir = stagein.front(); else j.StageInDir = sessiondir; if (!stageout.empty())j.StageOutDir = stageout.front(); else j.StageOutDir = sessiondir; if (!session.empty()) j.SessionDir = session.front(); else j.SessionDir = sessiondir; j.DelegationID.clear(); if(!(localjob->delegation_id).empty()) j.DelegationID.push_back(localjob->delegation_id); } void INTERNALJob::toJob(INTERNALClient* client, Arc::Job& arcjob, Arc::Logger& logger) const { //called from UpdateJobs in JobControllerPluginINTERNAL if (!stagein.empty())arcjob.StageInDir = stagein.front(); else arcjob.StageInDir = sessiondir; if (!stageout.empty()) arcjob.StageOutDir = stageout.front(); else arcjob.StageOutDir = sessiondir; if (!session.empty()) arcjob.StageInDir = session.front(); else arcjob.SessionDir = sessiondir; //extract info from arexjob //extract jobid from arcjob, which is the full jobid url std::vector tokens; Arc::tokenize(arcjob.JobID, tokens, "/"); if(!tokens.empty()) { //NB! Add control that the arcjob.jobID is in correct format ARex::JobId gm_job_id = tokens.back(); if(client && client->arexconfig) { ARex::ARexJob arexjob(gm_job_id,*(client->arexconfig),client->logger); std::string state = arexjob.State(); arcjob.State = JobStateINTERNAL(state); } } } // ----------------------------------------------------------------------------- // TODO: does it need locking? INTERNALClients::INTERNALClients(const Arc::UserConfig& usercfg):usercfg_(usercfg) { } INTERNALClients::~INTERNALClients(void) { std::multimap::iterator it; for (it = clients_.begin(); it != clients_.end(); it = clients_.begin()) { delete it->second; } } } nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/PaxHeaders/JobControllerPluginINTERNAL.cpp0000644000000000000000000000013215067751327030325 xustar0030 mtime=1759498967.764072651 30 atime=1759498967.869493711 30 ctime=1759499030.447490817 nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/JobControllerPluginINTERNAL.cpp0000644000175000002070000002453315067751327032236 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include "../grid-manager/conf/GMConfig.h" #include "../grid-manager/files/ControlFileHandling.h" #include "JobStateINTERNAL.h" #include "INTERNALClient.h" #include "JobControllerPluginINTERNAL.h" using namespace Arc; namespace ARexINTERNAL { Logger JobControllerPluginINTERNAL::logger(Logger::getRootLogger(), "JobControllerPlugin.INTERNAL"); bool JobControllerPluginINTERNAL::isEndpointNotSupported(const std::string& endpoint) const { const std::string::size_type pos = endpoint.find("://"); return pos != std::string::npos && lower(endpoint.substr(0, pos)) != "file" ; } void JobControllerPluginINTERNAL::UpdateJobs(std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { if (jobs.empty()) return; INTERNALClient ac; ARex::GMConfig const *config = ac.GetConfig(); if(!config){ logger.msg(Arc::ERROR,"Failed to load grid-manager config file"); return; } //Is this method doing what it is supposed to do? I think the main purpose is to get hold of jobids for existing jobs in the system. for(std::list::iterator itJ = jobs.begin(); itJ != jobs.end(); itJ++){ //stat the .description file to check whether job is still in the system //(*itJ).JobID is now the global id, tokenize and get hold of just the local jobid std::vector tokens; Arc::tokenize((**itJ).JobID, tokens, "/"); std::string localid = tokens[tokens.size()-1]; std::string rsl; if(!ARex::job_description_read_file(localid, *config, rsl)){ continue; } //the job exists, so add it INTERNALJob localjob; //toJob calls info(job) and populates the arcjob with basic information (id and state). localjob.toJob(&ac,**itJ,logger); if (itJ != jobs.end()) { IDsProcessed.push_back((**itJ).JobID); } else{ IDsNotProcessed.push_back((**itJ).JobID); } } } bool JobControllerPluginINTERNAL::CleanJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { INTERNALClient ac(*usercfg); ARex::GMConfig const * config = ac.GetConfig(); if(!config){ logger.msg(Arc::ERROR,"Failed to load grid-manager config file"); return false; } bool ok = true; for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { //Job& job = **it; if (!ac.clean((*it)->JobID)) { ok = false; IDsNotProcessed.push_back((*it)->JobID); continue; } IDsProcessed.push_back((*it)->JobID); } return ok; } bool JobControllerPluginINTERNAL::CancelJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { bool ok = true; for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { INTERNALClient ac(*usercfg); ARex::GMConfig const * config = ac.GetConfig(); if(!config){ logger.msg(Arc::ERROR,"Failed to load grid-manager config file"); return false; } if(!ac.kill((*it)->JobID)) { ok = false; IDsNotProcessed.push_back((*it)->JobID); continue; } (*it)->State = JobStateINTERNAL((std::string)"killed"); IDsProcessed.push_back((*it)->JobID); } return ok; } bool JobControllerPluginINTERNAL::RenewJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { // 1. Fetch/find delegation ids for each job INTERNALClient ac; ARex::GMConfig const * config = ac.GetConfig(); if(!config){ logger.msg(Arc::ERROR,"Failed to load grid-manager config file"); return false; } if((*it)->DelegationID.empty()) { logger.msg(INFO, "Job %s has no delegation associated. Can't renew such job.", (*it)->JobID); IDsNotProcessed.push_back((*it)->JobID); continue; } // 2. Leave only unique IDs - not needed yet because current code uses // different delegations for each job. // 3. Renew credentials for every ID std::list::const_iterator did = (*it)->DelegationID.begin(); for(;did != (*it)->DelegationID.end();++did) { if(!ac.RenewDelegation(*did)) { logger.msg(INFO, "Job %s failed to renew delegation %s.", (*it)->JobID/*, *did, ac->failure()*/); break; } } if(did != (*it)->DelegationID.end()) { IDsNotProcessed.push_back((*it)->JobID); continue; } IDsProcessed.push_back((*it)->JobID); } return false; } bool JobControllerPluginINTERNAL::ResumeJobs(const std::list& jobs, std::list& IDsProcessed, std::list& IDsNotProcessed, bool isGrouped) const { bool ok = true; for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { INTERNALClient ac; ARex::GMConfig const * config = ac.GetConfig(); if(!config){ logger.msg(Arc::ERROR,"Failed to load grid-manager config file"); return false; } Job& job = **it; if (!job.RestartState) { logger.msg(INFO, "Job %s does not report a resumable state", job.JobID); ok = false; IDsNotProcessed.push_back(job.JobID); continue; } logger.msg(VERBOSE, "Resuming job: %s at state: %s (%s)", job.JobID, job.RestartState.GetGeneralState(), job.RestartState()); if(!ac.restart((*it)->JobID)) { ok = false; IDsNotProcessed.push_back((*it)->JobID); continue; } IDsProcessed.push_back((*it)->JobID); logger.msg(VERBOSE, "Job resuming successful"); } return ok; } bool JobControllerPluginINTERNAL::GetURLToJobResource(const Job& job, Job::ResourceType resource, URL& url) const { if (resource == Job::JOBDESCRIPTION) { return false; } // Obtain information about staging urls INTERNALJob ljob; ljob = job; URL stagein; URL stageout; URL session; // TODO: currently using first valid URL. Need support for multiple. for(std::list::const_iterator s = ljob.GetStagein().begin();s!=ljob.GetStagein().end();++s) { if(*s) { stagein = *s; break; } } for(std::list::const_iterator s = ljob.GetStageout().begin();s!=ljob.GetStageout().end();++s) { if(*s) { stageout = *s; break; } } for(std::list::const_iterator s = ljob.GetSession().begin();s!=ljob.GetSession().end();++s) { if(*s) { session = *s; break; } } if ((resource != Job::STAGEINDIR || !stagein) && (resource != Job::STAGEOUTDIR || !stageout) && (resource != Job::SESSIONDIR || !session)) { // If there is no needed URL provided try to fetch it from server Job tjob; tjob.JobID = job.JobID; INTERNALClient ac; ARex::GMConfig const * config = ac.GetConfig(); if(!config){ logger.msg(Arc::ERROR,"Failed to load grid-manager config file"); return false; } if (!ac.info(ljob, tjob)) { logger.msg(INFO, "Failed retrieving information for job: %s", job.JobID); return false; } for(std::list::const_iterator s = ljob.GetStagein().begin();s!=ljob.GetStagein().end();++s) { if(*s) { stagein = *s; break; } } for(std::list::const_iterator s = ljob.GetStageout().begin();s!=ljob.GetStageout().end();++s) { if(*s) { stageout = *s; break; } } for(std::list::const_iterator s = ljob.GetSession().begin();s!=ljob.GetSession().end();++s) { if(*s) { session = *s; break; } } // Choose url by state // TODO: For INTERNAL submission plugin the url is the same for all, although not reflected here // TODO: maybe this method should somehow know what is purpose of URL // TODO: state attributes would be more suitable // TODO: library need to be etended to allow for multiple URLs if((tjob.State == JobState::ACCEPTED) || (tjob.State == JobState::PREPARING)) { url = stagein; } else if((tjob.State == JobState::DELETED) || (tjob.State == JobState::FAILED) || (tjob.State == JobState::KILLED) || (tjob.State == JobState::FINISHED) || (tjob.State == JobState::FINISHING)) { url = stageout; } else { url = session; } // If no url found by state still try to get something if(!url) { if(session) url = session; if(stagein) url = stagein; if(stageout) url = stageout; } } switch (resource) { case Job::STDIN: url.ChangePath(url.Path() + '/' + job.StdIn); break; case Job::STDOUT: url.ChangePath(url.Path() + '/' + job.StdOut); break; case Job::STDERR: url.ChangePath(url.Path() + '/' + job.StdErr); break; case Job::JOBLOG: url.ChangePath(url.Path() + "/" + job.LogDir + "/errors"); break; case Job::STAGEINDIR: if(stagein) url = stagein; break; case Job::STAGEOUTDIR: if(stageout) url = stageout; break; case Job::SESSIONDIR: if(session) url = session; break; default: break; } if(url && ((url.Protocol() == "file"))) { //To-do - is this relevant for INTERNAL plugin? url.AddOption("threads=2",false); url.AddOption("encryption=optional",false); // url.AddOption("httpputpartial=yes",false); - TODO: use for A-REX } return true; } bool JobControllerPluginINTERNAL::GetJobDescription(const Job& /* job */, std::string& /* desc_str */) const { logger.msg(INFO, "Retrieving job description of INTERNAL jobs is not supported"); return false; } } // namespace Arc nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/PaxHeaders/JobListRetrieverPluginINTERNAL.h0000644000000000000000000000013215067751327030452 xustar0030 mtime=1759498967.764072651 30 atime=1759498967.869493711 30 ctime=1759499030.450444632 nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/JobListRetrieverPluginINTERNAL.h0000644000175000002070000000206715067751327032361 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifndef __ARC_JOBLISTRETRIEVERPLUGININTERNAL_H__ #define __ARC_JOBLISTRETRIEVERPLUGININTERNAL_H__ #include #include using namespace Arc; namespace Arc{ class Logger; } namespace ARexINTERNAL { class JobLocalDescription; class INTERNALClient; class INTERNALClients; class JobListRetrieverPluginINTERNAL : public Arc::JobListRetrieverPlugin { public: JobListRetrieverPluginINTERNAL(PluginArgument* parg): JobListRetrieverPlugin(parg) { supportedInterfaces.push_back("org.nordugrid.internal"); } virtual ~JobListRetrieverPluginINTERNAL() {} static Plugin* Instance(PluginArgument *arg) { return new JobListRetrieverPluginINTERNAL(arg); } virtual EndpointQueryingStatus Query(const UserConfig&, const Endpoint&, std::list&, const EndpointQueryOptions&) const; virtual bool isEndpointNotSupported(const Endpoint&) const; private: static Logger logger; }; } // namespace Arc #endif // __ARC_JOBLISTRETRIEVERPLUGININTERNAL_H__ nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/PaxHeaders/JobListRetrieverPluginINTERNAL.c0000644000000000000000000000013215067751327030445 xustar0030 mtime=1759498967.764072651 30 atime=1759498967.869493711 30 ctime=1759499030.449701443 nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/JobListRetrieverPluginINTERNAL.cpp0000644000175000002070000000545015067751327032713 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "../grid-manager/files/ControlFileContent.h" #include "../grid-manager/files/ControlFileHandling.h" #include "JobStateINTERNAL.h" #include "INTERNALClient.h" #include "JobListRetrieverPluginINTERNAL.h" using namespace Arc; namespace ARexINTERNAL { Logger JobListRetrieverPluginINTERNAL::logger(Logger::getRootLogger(), "JobListRetrieverPlugin.INTERNAL"); bool JobListRetrieverPluginINTERNAL::isEndpointNotSupported(const Endpoint& endpoint) const { const std::string::size_type pos = endpoint.URLString.find("://"); if (pos != std::string::npos) { const std::string proto = lower(endpoint.URLString.substr(0, pos)); return ((proto != "file")); } return (endpoint.URLString != "localhost"); } static URL CreateURL(std::string service) { std::string::size_type pos1 = service.find("://"); if (pos1 == std::string::npos) { service = "file://" + service; } else { std::string proto = lower(service.substr(0,pos1)); if(proto != "file") return URL(); } return service; } EndpointQueryingStatus JobListRetrieverPluginINTERNAL::Query(const UserConfig& uc, const Endpoint& endpoint, std::list& jobs, const EndpointQueryOptions&) const { EndpointQueryingStatus s(EndpointQueryingStatus::FAILED); //this can all be simplified I think - TO-DO URL url(CreateURL(endpoint.URLString)); if (!url) { return s; } INTERNALClient ac(uc); if (!ac.GetConfig()) { return s; } std::list localjobs; if (!ac.list(localjobs)) { return s; } logger.msg(DEBUG, "Listing localjobs succeeded, %d localjobs found", localjobs.size()); //checks that the job is in state other than undefined std::list jobids_found; ac.info(localjobs,jobids_found); std::list::iterator itID = jobids_found.begin(); for(; itID != jobids_found.end(); ++itID) { //read job description to get hold of submission-interface ARex::JobLocalDescription job_desc; ARex::JobId jobid((*itID).GetId()); ARex::job_local_read_file(jobid, *ac.GetConfig(), job_desc); std::string submittedVia = job_desc.interface; if (submittedVia != "org.nordugrid.internal") { logger.msg(DEBUG, "Skipping retrieved job (%s) because it was submitted via another interface (%s).", url.fullstr() + "/" + itID->GetId(), submittedVia); continue; } INTERNALJob localjob; Job j; itID->toJob(&ac, &localjob, j); jobs.push_back(j); }; s = EndpointQueryingStatus::SUCCESSFUL; return s; } } // namespace Arc nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/PaxHeaders/README0000644000000000000000000000013115067751327023406 xustar0030 mtime=1759498967.764072651 30 atime=1759498967.869493711 29 ctime=1759499030.43844445 nordugrid-arc-7.1.1/src/services/a-rex/internaljobplugin/README0000644000175000002070000000025715067751327025315 0ustar00mockbuildmock00000000000000Arc Client Component (ACC) plugins for supporting ARC lightweight (INTERNAL) Implements the following specialized classes: o JobControllerPluginINTERNAL o SubmitterINTERNAL nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/cachecheck.cpp0000644000000000000000000000013215067751327021546 xustar0030 mtime=1759498967.750491903 30 atime=1759498967.862493605 30 ctime=1759499029.343164478 nordugrid-arc-7.1.1/src/services/a-rex/cachecheck.cpp0000644000175000002070000000616215067751327023455 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include "job.h" #include "arex.h" #define CACHE_CHECK_SESSION_DIR_ID "9999999999999999999999999999999" namespace ARex { Arc::MCC_Status ARexService::CacheCheck(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out) { std::vector caches; std::vector draining_caches; std::vector readonly_caches; // use cache dir(s) from conf file try { CacheConfig cache_config(config.GmConfig().CacheParams()); cache_config.substitute(config.GmConfig(), config.User()); caches = cache_config.getCacheDirs(); readonly_caches = cache_config.getReadOnlyCacheDirs(); } catch (CacheConfigException& e) { logger.msg(Arc::ERROR, "Error with cache configuration: %s", e.what()); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Error with cache configuration"); fault.Detail(true).NewChild("CacheConfigurationFault"); out.Destroy(); return Arc::MCC_Status(); } if (caches.empty()) { Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Cache is disabled"); fault.Detail(true).NewChild("CacheDisabledFault"); out.Destroy(); return Arc::MCC_Status(); } Arc::FileCache cache(caches, draining_caches, readonly_caches, CACHE_CHECK_SESSION_DIR_ID ,config.User().get_uid(), config.User().get_gid()); if (!cache) { logger.msg(Arc::ERROR, "Error with cache configuration"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Error with cache configuration"); fault.Detail(true).NewChild("CacheConfigurationFault"); out.Destroy(); return Arc::MCC_Status(); } bool fileexist; Arc::XMLNode resp = out.NewChild("CacheCheckResponse"); Arc::XMLNode results = resp.NewChild("CacheCheckResult"); for(int n = 0;;++n) { Arc::XMLNode id = in["CacheCheck"]["TheseFilesNeedToCheck"]["FileURL"][n]; if (!id) break; std::string fileurl = (std::string)in["CacheCheck"]["TheseFilesNeedToCheck"]["FileURL"][n]; Arc::XMLNode resultelement = results.NewChild("Result"); fileexist = false; std::string file_lfn; Arc::initializeCredentialsType cred_type(Arc::initializeCredentialsType::SkipCredentials); Arc::UserConfig usercfg(cred_type); Arc::URL url(fileurl); Arc::DataHandle d(url, usercfg); logger.msg(Arc::INFO, "Looking up URL %s", d->str()); file_lfn = cache.File(d->str()); logger.msg(Arc::INFO, "Cache file is %s", file_lfn); struct stat fileStat; fileexist = (stat(file_lfn.c_str(), &fileStat) == 0) ? true : false; resultelement.NewChild("FileURL") = fileurl; resultelement.NewChild("ExistInTheCache") = (fileexist ? "true": "false"); if (fileexist) resultelement.NewChild("FileSize") = Arc::tostring(fileStat.st_size); else resultelement.NewChild("FileSize") = "0"; } return Arc::MCC_Status(Arc::STATUS_OK); } } // namespace nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/put.cpp0000644000000000000000000000013215067751327020315 xustar0030 mtime=1759498967.771747295 30 atime=1759498967.872493757 30 ctime=1759499029.338241203 nordugrid-arc-7.1.1/src/services/a-rex/put.cpp0000644000175000002070000002070515067751327022223 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include "PayloadFile.h" #include "job.h" #include "arex.h" #define MAX_CHUNK_SIZE (10*1024*1024) namespace ARex { static bool write_file(Arc::FileAccess& h,char* buf,size_t size) { for(;size>0;) { ssize_t l = h.fa_write(buf,size); if(l == -1) return false; size-=l; buf+=l; }; return true; } Arc::MCC_Status ARexService::PutInfo(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath) { if(!&config) { return make_http_fault(outmsg, HTTP_ERR_FORBIDDEN, "User is not identified"); }; return make_http_fault(outmsg,501,"Not Implemented"); } Arc::MCC_Status ARexService::DeleteInfo(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath) { if(!&config) { return make_http_fault(outmsg, HTTP_ERR_FORBIDDEN, "User is not identified"); }; return make_http_fault(outmsg,501,"Not Implemented"); } Arc::MCC_Status ARexService::PutCache(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath) { if(!&config) { return make_http_fault(outmsg, HTTP_ERR_FORBIDDEN, "User is not identified"); }; return make_http_fault(outmsg,501,"Not Implemented"); } Arc::MCC_Status ARexService::DeleteCache(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath) { if(!&config) { return make_http_fault(outmsg, HTTP_ERR_FORBIDDEN, "User is not identified"); }; return make_http_fault(outmsg,501,"Not Implemented"); } static Arc::MCC_Status PutJobFile(Arc::Message& outmsg, Arc::FileAccess& file, std::string& errstr, Arc::PayloadStreamInterface& stream, FileChunks& fc, bool& complete) { complete = false; // TODO: Use memory mapped file to minimize number of in memory copies const int bufsize = 1024*1024; if(!fc.Size()) fc.Size(stream.Size()); off_t pos = stream.Pos(); if(file.fa_lseek(pos,SEEK_SET) != pos) { std::string err = Arc::StrError(); errstr = "failed to set position of file to "+Arc::tostring(pos)+" - "+err; return ARexService::make_http_fault(outmsg, 500, "Error seeking to specified position in file"); }; char* buf = new char[bufsize]; if(!buf) { errstr = "failed to allocate memory"; return ARexService::make_http_fault(outmsg, 500, "Error allocating memory"); }; bool got_something = false; for(;;) { int size = bufsize; if(!stream.Get(buf,size)) break; if(size > 0) got_something = true; if(!write_file(file,buf,size)) { std::string err = Arc::StrError(); delete[] buf; errstr = "failed to write to file - "+err; return ARexService::make_http_fault(outmsg, 500, "Error writing to file"); }; if(size) fc.Add(pos,size); pos+=size; }; delete[] buf; // Due to limitation of PayloadStreamInterface it is not possible to // directly distingush between zero sized file and file with undefined // size. But by applying some dynamic heuristics it is possible. // TODO: extend/modify PayloadStreamInterface. if((stream.Size() == 0) && (stream.Pos() == 0) && (!got_something)) { complete = true; } return ARexService::make_empty_response(outmsg); } static Arc::MCC_Status PutJobFile(Arc::Message& outmsg, Arc::FileAccess& file, std::string& errstr, Arc::PayloadRawInterface& buf, FileChunks& fc, bool& complete) { complete = false; bool got_something = false; if(!fc.Size()) fc.Size(buf.Size()); for(int n = 0;;++n) { char* sbuf = buf.Buffer(n); if(sbuf == NULL) break; off_t offset = buf.BufferPos(n); off_t size = buf.BufferSize(n); if(size > 0) { got_something = true; off_t o = file.fa_lseek(offset,SEEK_SET); if(o != offset) { std::string err = Arc::StrError(); errstr = "failed to set position of file to "+Arc::tostring(offset)+" - "+err; return ARexService::make_http_fault(outmsg, 500, "Error seeking to specified position"); }; if(!write_file(file,sbuf,size)) { std::string err = Arc::StrError(); errstr = "failed to write to file - "+err; return ARexService::make_http_fault(outmsg, 500, "Error writing file"); }; if(size) fc.Add(offset,size); }; }; if((buf.Size() == 0) && (!got_something)) { complete = true; } return ARexService::make_empty_response(outmsg); } Arc::MCC_Status ARexService::PutJob(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath) { if(!&config) { return make_http_fault(outmsg, HTTP_ERR_FORBIDDEN, "User is not identified"); }; // Nothing can be put into root endpoint if(id.empty()) return make_http_fault(outmsg, 500, "No job specified"); // Check for proper payload Arc::MessagePayload* payload = inmsg.Payload(); if(!payload) { logger_.msg(Arc::ERROR, "%s: put file %s: there is no payload", id, subpath); return make_http_fault(outmsg, 500, "Missing payload"); }; Arc::PayloadStreamInterface* stream = dynamic_cast(payload); Arc::PayloadRawInterface* buf = dynamic_cast(payload); if((!stream) && (!buf)) { logger_.msg(Arc::ERROR, "%s: put file %s: unrecognized payload", id, subpath); return make_http_fault(outmsg, 500, "Error processing payload"); } // Acquire job ARexJob job(id,config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "%s: there is no such job: %s", job.ID(), job.Failure()); return make_http_fault(outmsg, 500, "Job does not exist"); }; // Prepare access to file FileChunksRef fc(files_chunks_.Get(job.ID()+"/"+subpath)); Arc::FileAccess* file = job.CreateFile(subpath.c_str()); if(file == NULL) { // TODO: report something logger_.msg(Arc::ERROR, "%s: put file %s: failed to create file: %s", job.ID(), subpath, job.Failure()); return make_http_fault(outmsg, 500, "Error creating file"); }; Arc::MCC_Status r; std::string err; bool complete(false); if(stream) { r = PutJobFile(outmsg,*file,err,*stream,*fc,complete); } else { r = PutJobFile(outmsg,*file,err,*buf,*fc,complete); } file->fa_close(); Arc::FileAccess::Release(file); if(r) { if(complete || fc->Complete()) job.ReportFileComplete(subpath); } else { logger_.msg(Arc::ERROR, "%s: put file %s: %s", job.ID(), subpath, err); } return r; } Arc::MCC_Status ARexService::DeleteJob(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath) { if(!&config) { return make_http_fault(outmsg, HTTP_ERR_FORBIDDEN, "User is not identified"); }; // Nothing can be removed in root endpoint if(id.empty()) return make_http_fault(outmsg, 500, "No job specified"); // Ignoring payload // Acquire job ARexJob job(id,config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "%s: there is no such job: %s", job.ID(), job.Failure()); return make_http_fault(outmsg, 500, "Job does not exist"); }; std::string full_path = job.GetFilePath(subpath.c_str()); if(full_path.empty()) { logger_.msg(Arc::ERROR, "%s: delete file %s: failed to obtain file path: %s", job.ID(), subpath, job.Failure()); return make_http_fault(outmsg, 500, "Error deleting file"); }; bool is_file = true; Arc::FileAccess* fs = job.OpenFile(subpath.c_str(), false, true); if(fs == NULL) { is_file = false; fs = job.OpenDir(subpath.c_str()); } if(fs == NULL) { // TODO: report something logger_.msg(Arc::ERROR, "%s: delete file %s: failed to open file/dir: %s", job.ID(), subpath, job.Failure()); return make_http_fault(outmsg, 500, "Error deleting file"); }; bool unlink_result = is_file ? fs->fa_unlink(full_path.c_str()) : fs->fa_rmdir(full_path.c_str());; int unlink_err = fs->geterrno(); is_file ? fs->fa_close() : fs->fa_closedir(); Arc::FileAccess::Release(fs); if(!unlink_result) { if((unlink_err == ENOTDIR) || (unlink_err == ENOENT)) { return make_http_fault(outmsg, 404, "File not found"); } else { return make_http_fault(outmsg, 500, "Error deleting file"); }; }; return ARexService::make_empty_response(outmsg); } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/test_cache_check.cpp0000644000000000000000000000013215067751327022744 xustar0030 mtime=1759498967.773273145 30 atime=1759498967.873493772 30 ctime=1759499029.353342905 nordugrid-arc-7.1.1/src/services/a-rex/test_cache_check.cpp0000644000175000002070000000247115067751327024652 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include int main(void) { Arc::Logger logger(Arc::Logger::rootLogger, "Test"); Arc::LogStream logcerr(std::cerr); Arc::Logger::rootLogger.addDestination(logcerr); logger.msg(Arc::INFO, "Creating client side chain"); std::string id; std::string url("https://localhost/arex"); Arc::NS ns("a-rex", "http://www.nordugrid.org/schemas/a-rex"); Arc::MCCConfig cfg; Arc::UserConfig uc; uc.ApplyToConfig(cfg); Arc::ClientSOAP client(cfg, url, 60); std::string faultstring; Arc::PayloadSOAP request(ns); Arc::XMLNode req = request.NewChild("a-rex:CacheCheck").NewChild("a-rex:TheseFilesNeedToCheck"); req.NewChild("a-rex:FileURL") = "http://example.org/test.txt"; Arc::PayloadSOAP* response; Arc::MCC_Status status = client.process(&request, &response); if (!status) { std::cerr << "Request failed" << std::endl; } std::string str; response->GetDoc(str, true); std::cout << str << std::endl; return 0; } nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/arc.zero.conf0000644000000000000000000000013015067751327021371 xustar0030 mtime=1759498967.750197335 29 atime=1759498967.86149359 29 ctime=1759499029.35483458 nordugrid-arc-7.1.1/src/services/a-rex/arc.zero.conf0000644000175000002070000000064715067751327023304 0ustar00mockbuildmock00000000000000# # ARC Computing Element zero configuration # Consult ARC Installation Guide to modify this configuration for production use-cases. # # You can also use "arc.conf.d" directory to add configuration options. # Run "arcctl config dump" for complete running configuration with defaults. # [common] [mapping] [lrms] lrms = fork [arex] [arex/ws] [arex/ws/jobs] [infosys] [infosys/glue2] [infosys/cluster] [queue:fork] nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/lrms0000644000000000000000000000013215067751426017701 xustar0030 mtime=1759499030.345443037 30 atime=1759499034.764510185 30 ctime=1759499030.345443037 nordugrid-arc-7.1.1/src/services/a-rex/lrms/0000755000175000002070000000000015067751426021660 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/lrms/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327022012 xustar0030 mtime=1759498967.764492116 30 atime=1759498967.869493711 30 ctime=1759499029.909766363 nordugrid-arc-7.1.1/src/services/a-rex/lrms/Makefile.am0000644000175000002070000000036615067751327023721 0ustar00mockbuildmock00000000000000dist_pkgdata_DATA = cancel_common.sh submit_common.sh scan_common.sh community_rtes.sh pkgdata_DATA = lrms_common.sh SUBDIRS = pbs pbspro fork sge condor lsf ll slurm boinc test DIST_SUBDIRS = pbs pbspro fork sge condor lsf ll slurm boinc test nordugrid-arc-7.1.1/src/services/a-rex/lrms/PaxHeaders/Makefile.in0000644000000000000000000000013215067751356022025 xustar0030 mtime=1759498990.294083107 30 atime=1759499018.181258201 30 ctime=1759499029.915067357 nordugrid-arc-7.1.1/src/services/a-rex/lrms/Makefile.in0000644000175000002070000007000615067751356023732 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(dist_pkgdata_DATA) \ $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = lrms_common.sh CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" DATA = $(dist_pkgdata_DATA) $(pkgdata_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir distdir-am am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/lrms_common.sh.in \ README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ dist_pkgdata_DATA = cancel_common.sh submit_common.sh scan_common.sh community_rtes.sh pkgdata_DATA = lrms_common.sh SUBDIRS = pbs pbspro fork sge condor lsf ll slurm boinc test DIST_SUBDIRS = pbs pbspro fork sge condor lsf ll slurm boinc test all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): lrms_common.sh: $(top_builddir)/config.status $(srcdir)/lrms_common.sh.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_pkgdataDATA: $(dist_pkgdata_DATA) @$(NORMAL_INSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-dist_pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) install-pkgdataDATA: $(pkgdata_DATA) @$(NORMAL_INSTALL) @list='$(pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(DATA) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dist_pkgdataDATA install-pkgdataDATA install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-dist_pkgdataDATA uninstall-pkgdataDATA .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ check-am clean clean-generic clean-libtool cscopelist-am ctags \ ctags-am distclean distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am \ install-dist_pkgdataDATA install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-pkgdataDATA install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags tags-am uninstall uninstall-am \ uninstall-dist_pkgdataDATA uninstall-pkgdataDATA .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/lrms/PaxHeaders/test0000644000000000000000000000013215067751426020660 xustar0030 mtime=1759499030.375443493 30 atime=1759499034.764510185 30 ctime=1759499030.375443493 nordugrid-arc-7.1.1/src/services/a-rex/lrms/test/0000755000175000002070000000000015067751426022637 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/lrms/test/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327022771 xustar0030 mtime=1759498967.770492207 30 atime=1759498967.872493757 30 ctime=1759499030.371124339 nordugrid-arc-7.1.1/src/services/a-rex/lrms/test/Makefile.am0000644000175000002070000000011115067751327024664 0ustar00mockbuildmock00000000000000EXTRA_DIST = check_submit_script.sh check_scan_script.sh test-gm-kick.sh nordugrid-arc-7.1.1/src/services/a-rex/lrms/test/PaxHeaders/check_scan_script.sh0000644000000000000000000000013215067751327024736 xustar0030 mtime=1759498967.771747295 30 atime=1759498967.872493757 30 ctime=1759499030.375062154 nordugrid-arc-7.1.1/src/services/a-rex/lrms/test/check_scan_script.sh0000644000175000002070000001674115067751327026651 0ustar00mockbuildmock00000000000000#!/bin/bash # # TODO: Support for multiple control directories. # TODO: Support for switching UID. # TODO: Support for updating diag file during "scanning". usage="$0 " if test $# != 2; then echo "Two arguments are needed." echo $usage exit 1 fi . ./lrms_common.sh function goToParentAndRemoveDir() { cd .. rm -rf ${1} } # Clear time zone - important when converting start and end time of jobs to be written to diag file. export TZ= scan_script=$1 unit_test=$2 unit_test_basename=`basename ${unit_test}` orig_srcdir=${SRCDIR} ORIG_PATH=${PATH} # Source unit test which should define a number of test functions having prefix 'test_' . ${unit_test} exitCode=0 errorOutput="" for test in ${TESTS}; do testdir="scan_${unit_test_basename}_test_${test}" rm -rf ${testdir} mkdir -p ${testdir} ${testdir}/controldir/processing cd ${testdir} SRCDIR=../${orig_srcdir} export PATH=${ORIG_PATH} unset arc_test_configuration unset simulate_cmds unset simulator_output unset test_jobs unset input_diag_file unset expected_diag_file unset expected_kicked_jobs echo -n "." # Run test function test_${test} # Write ARC configuration (arc.conf) touch ${test}.arc.conf if test "x${arc_test_configuration}" != "x"; then echo "${arc_test_configuration}" > ${test}.arc.conf fi # Add sessiondir and controldir attributes to arc.conf configuration. if test $(grep '^[[]arex][[:space:]]*$' ${test}.arc.conf -c) -ge 1; then sed -i "/^[[]arex][[:space:]]*$/ a\ sessiondir=`pwd`\n\ controldir=`pwd`/controldir" ${test}.arc.conf else echo $'\n'"[arex]"$'\n'"sessiondir=`pwd`"$'\n'"controldir=`pwd`/controldir" >> ${test}.arc.conf fi # Setup command simulation mkdir bin export PATH=$(pwd)/bin:${PATH} for cmd in sleep ${simulate_cmds}; do ln -s $(pwd)/../command-simulator.sh bin/${cmd} done if test ! -z "${simulator_output}"; then echo "${simulator_output}" > simulator_output export SIMULATOR_OUTCOME_FILE=$(pwd)/simulator_output export SIMULATOR_ERRORS_FILE=$(pwd)/simulator_errors fi # Create session directory and .status, .local, and .diag files. oIFS="${IFS}" IFS=$'\n' test_input_diag_list=( ${test_input_diag_list} ) test_jobs=( ${test_jobs} ) for i in $(seq 0 $(( ${#test_jobs[@]} - 1 )) ); do IFS=" " job=( ${test_jobs[$i]} ) mkdir ${test}-${job[0]} # Create session directory job_path=$(control_path "$(pwd)/controldir" "${job[0]}" "") mkdir -p ${job_path} status_path="$(pwd)/controldir/processing/${job[0]}.status" echo "${job[1]}" > ${status_path} local_path=$(control_path "$(pwd)/controldir" "${job[0]}" "local") echo "localid=${job[0]}" > ${local_path} echo "sessiondir=$(pwd)/${test}-${job[0]}" >> ${local_path} IFS=";" job_diag=( ${test_input_diag_list[$i]} ) printf "%s\n" "${job_diag[@]}" > $(pwd)/${test}-${job[0]}.diag done <<< "${test_jobs}" IFS="${oIFS}" # Execute scan script script_output=$(../${scan_script} --config $(pwd)/${test}.arc.conf $(pwd)/controldir 2>&1) if test $? -ne 0; then echo -n "F" errorOutput="$errorOutput"$'\n\n'"Error: Scan script \"${scan_script}\" failed:"$'\n'"${script_output}" exitCode=$((exitCode + 1)) goToParentAndRemoveDir ${testdir} continue fi # Check if commands were executed in expected order, with expected arguments if test ! -z "${simulator_output}"; then if test -f ${SIMULATOR_ERRORS_FILE}; then echo -n "F" errorOutput="$errorOutput"$'\n\n'"Error: Scan script \"${scan_script}\" failed:"$'\n'"Wrong command executed, or wrong arguments passed:"$'\n'"$(cat ${SIMULATOR_ERRORS_FILE})" exitCode=$((exitCode + 1)) goToParentAndRemoveDir ${testdir} continue fi fi # TODO: Check Errors file # Diagnostic file (.diag) # TODO: Maybe check for possible .diag files not specified in 'expected_diags' list. for expected_diag in ${expected_diags}; do oIFS="${IFS}" IFS=";" expected_diag=( $expected_diag ) IFS="${oIFS}" # Check if diag file exist diag_file=${test}-${expected_diag[0]}.diag printf "%s\n" "${expected_diag[@]:1}" | sort -u -t= -k1,1 > expected-${diag_file} # TODO: Filtering is too agressive. Some attributes appears multiple time (e.g. 'nodelist' attribute). tac ${diag_file} | grep -v "^\(#\|[[:space:]]*$\)" | sort -u -t= -k1,1 > filtered-${diag_file} diag_diff=$(diff -u expected-${diag_file} filtered-${diag_file}) if test $? != 0; then echo -n "F" exitCode=$((exitCode + 1)) errorOutput="$errorOutput"$'\n\n'"Test fail in test_${test}:"$'\n'"Diag for job (${expected_diag[0]}) differs from expected:"$'\n'"${diag_diff}" goToParentAndRemoveDir ${testdir} continue 2 fi done # Check exit codes (.lrms_done) # TODO: Maybe check for possible .lrms_done files not specified in 'expected_exit_codes' list. for expected_exit_code in ${expected_exit_codes}; do oIFS="${IFS}" IFS=";" expected_exit_code=( $expected_exit_code ) IFS="${oIFS}" lrms_done_path=$(control_path "controldir" "${expected_exit_code[0]}" "lrms_done") ls -l controldir ls -l controldir/jobs read job_exit_code < ${lrms_done_path} if test "${job_exit_code%% *}" != "${expected_exit_code[1]}"; then echo -n "F" exitCode=$((exitCode + 1)) errorOutput="$errorOutput"$'\n\n'"Test fail in test_${test}:"$'\n'"Exit code for job (${expected_exit_code[0]}) was expected to be '${expected_exit_code[1]}', but '${job_exit_code%% *}' was found." goToParentAndRemoveDir ${testdir} continue 2 fi done # Check kicked jobs if test ! -z "${expected_kicked_jobs}" && test ! -f gm_kick_test_file; then echo -n "F" exitCode=$((exitCode + 1)) errorOutput="$errorOutput"$'\n\n'"Test fail in test_${test}:"$'\n'"No jobs kicked, however the following jobs were expected to be kicked:"$'\n'"${expected_kicked_jobs}" goToParentAndRemoveDir ${testdir} continue fi jobs_not_kicked="" for job in ${expected_kicked_jobs}; do if test $(grep -c "^${job}\$" gm_kick_test_file) == 0; then jobs_not_kicked="${jobs_not_kicked}"$'\n'"${job}" continue fi # Remove first occurance of job in kick list. sed -i "0,/^${job}\$/ {/^${job}\$/ d}" gm_kick_test_file done if test ! -z "${jobs_not_kicked}"; then echo -n "F" exitCode=$((exitCode + 1)) errorOutput="$errorOutput"$'\n\n'"Test fail in test_${test}:"$'\n'"The following jobs was expected to be kicked, but was not:${jobs_not_kicked}" goToParentAndRemoveDir ${testdir} continue fi # Check if there are any entries left in kick file. if test "$(wc -w gm_kick_test_file | cut -f 1 -d ' ')" -gt 0; then echo -n "F" exitCode=$((exitCode + 1)) errorOutput="$errorOutput"$'\n\n'"Test fail in test_${test}:"$'\n'"The following jobs was unexpectedly kicked:"$'\n'"$(sed 's#^# #' gm_kick_test_file)" goToParentAndRemoveDir ${testdir} continue fi # Make post check if test_post_check function is defined. if test "x$(type -t test_post_check)" == "xfunction"; then post_check_output=$(test_post_check ${lrms_script_name}) if test $? != 0; then echo -n "F" exitCode=$((exitCode + 1)) errorOutput="$errorOutput"$'\n\n'"Test fail in test_${test}:"$'\n'"${post_check_output}" goToParentAndRemoveDir ${testdir} continue fi fi goToParentAndRemoveDir ${testdir} done if test ${exitCode} = 0; then echo $'\n\n'"OK ("$(echo ${TESTS} | wc -w)")" else echo "${errorOutput}"$'\n' fi exit ${exitCode} nordugrid-arc-7.1.1/src/services/a-rex/lrms/test/PaxHeaders/Makefile.in0000644000000000000000000000013015067751356023002 xustar0029 mtime=1759498990.71133636 29 atime=1759499018.20025849 30 ctime=1759499030.372414962 nordugrid-arc-7.1.1/src/services/a-rex/lrms/test/Makefile.in0000644000175000002070000004442415067751356024716 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms/test ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ EXTRA_DIST = check_submit_script.sh check_scan_script.sh test-gm-kick.sh all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/test/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags-am uninstall uninstall-am .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/lrms/test/PaxHeaders/check_submit_script.sh0000644000000000000000000000013215067751327025315 xustar0030 mtime=1759498967.771747295 30 atime=1759498967.872493757 30 ctime=1759499030.373714239 nordugrid-arc-7.1.1/src/services/a-rex/lrms/test/check_submit_script.sh0000644000175000002070000002031215067751327027215 0ustar00mockbuildmock00000000000000#!/bin/bash # # This script checks that submit scripts (passed as first argument) write the # expected job options into lrms job script, with a job description as input. # The expected job options and job description are defined in unit tests # (passed as second argument). # # A LRMS job options unit test should define a number of functions prefixed # 'test_', and a 'TESTS' variable listing these functions without the 'test_' # prefix. These functions should define the 'job_description_input' and # 'expected_job_options' environment variables. The former variable should # contain a job description string in any language supported by ARC (e.g. xRSL), # whereas the latter should contain the LRMS job options expected expected to be # output by the LRMS job script based on the GRAMi file created from the job # description input. Optionally the unit test can set the arc_test_configuration # variable, which defines an ARC configuration (arc.conf), to be used when # executing the LRMS job script. # usage="$0 " if test $# != 2; then echo "Two arguments are needed." echo $usage exit 1 fi . ./lrms_common.sh function goToParentAndRemoveDir() { export PATH=${ORIG_PATH} # Make the life of developers easy when modifications of unit-tests are needed: # following code writes a new.diff files in case of unit-test had failed # developer should check this diff against the changes made to batch backends # if it is intented behaviour - replace the unit-test patch with a new diff if [ "x$TESTFAILED" = "x1" ]; then tmpdir=$( mktemp -d testdiff.XXXXXX ) mkdir $tmpdir/a $tmpdir/b # Write basic LRMS job script with test substitutions sed -e "s#@TEST_JOB_ID@#${test}#g" \ -e "s#@TEST_SESSION_DIR@#$(pwd)#g" \ -e "s#@TEST_RUNTIME_CONFIG_DIR@#$(pwd)/rtes#" \ -e "s#@TEST_ARC_LOCATION@#$(dirname $(pwd))#" \ -e "s#@TEST_HOSTNAME@#$(uname -n)#" \ "../basic-script.sh" > $tmpdir/a/basic-script.sh cp ${lrms_script_name} $tmpdir/b/basic-script.sh cd $tmpdir diff -ubBw ${test_ignore_matching_line} a/basic-script.sh b/basic-script.sh > $unit_test_basename-${test#test_}.new.diff cd - > /dev/null mv $tmpdir/$unit_test_basename-${test#test_}.new.diff .. rm -rf $tmpdir fi cd .. # do not remove unit test directory for failed test [ "x$TESTFAILED" = "x1" ] || rm -rf ${1} if test "${lrms_script_name}x" != "x"; then rm -f ${lrms_script_name} ${lrms_script_name}.out ${lrms_script_name}.err fi } submit_script=$1 unit_test=$2 unit_test_basename=$(basename ${unit_test}) orig_srcdir=${SRCDIR} ORIG_PATH=${PATH} # Source unit test which should define a number of test functions having prefix 'test_' . ${unit_test} exitCode=0 errorOutput="" # Loop over all functions prefixed 'test_' for test in ${TESTS}; do testdir="${unit_test_basename}_test_${test}" rm -rf ${testdir} mkdir -p ${testdir}/${test} ${testdir}/controldir cd ${testdir} testdir=$(pwd) SRCDIR=../${orig_srcdir} export PATH=${ORIG_PATH} unset arc_test_configuration unset job_description_input unset rtes unset ONLY_WRITE_JOBOPTIONS unset test_post_check unset test_ignore_matching_line echo -n "." # Run test function test_${test} if test "x${job_description_input}" = "x"; then echo -n "F" errorOutput="$errorOutput"$'\n\n'"Error: test_${test} in unit test \"${unit_test}\" doesn't define the 'job_description_input' environment variable." exitCode=$((exitCode + 1)) goToParentAndRemoveDir ${testdir} continue fi if test ! -f "expected_lrms_job_script.tmpl"; then echo -n "F" errorOutput="$errorOutput"$'\n\n'"Error: test_${test} in unit test \"${unit_test}\" did not create the 'expected_lrms_job_script.tmpl' template file." exitCode=$((exitCode + 1)) goToParentAndRemoveDir ${testdir} continue fi # Write ARC configuration (arc.conf) touch ${test}.arc.conf if test "x${arc_test_configuration}" != "x"; then echo "${arc_test_configuration}" > ${test}.arc.conf elif test "x${general_arc_test_configuration}" != "x"; then echo "${general_arc_test_configuration}" > ${test}.arc.conf fi sed -i "s#@PWD@#${testdir}#g" ${test}.arc.conf # Setup command simulation mkdir bin export PATH=$(pwd)/bin:${PATH} for cmd in ${simulate_cmds}; do ln -s $(pwd)/../command-simulator.sh bin/${cmd} done if test ! -z "${simulator_output}"; then echo "${simulator_output}" > simulator_output export SIMULATOR_OUTCOME_FILE=$(pwd)/simulator_output export SIMULATOR_ERRORS_FILE=$(pwd)/simulator_errors fi if test $(grep '^[[]arex][[:space:]]*$' ${test}.arc.conf -c) -ge 1; then sed -i "/^[[]arex][[:space:]]*$/ a\ sessiondir=$(pwd)\n\ controldir=$(pwd)/controldir" ${test}.arc.conf else echo $'\n'"[arex]"$'\n'"sessiondir=$(pwd)"$'\n'"controldir=$(pwd)/controldir" >> ${test}.arc.conf fi # If defined, write RTEs to disk if test "x${rtes}" != "x"; then mkdir rtes mkdir -p "$(pwd)/controldir/rte/enabled" # Add runtimedir attribute to arc.conf. If 'arex' section does not exist, add it as well. if test $(grep '^[[]arex][[:space:]]*$' ${test}.arc.conf -c) -ge 1; then sed -i "/^[[]arex][[:space:]]*$/ a\ runtimedir=$(pwd)/rtes" ${test}.arc.conf else echo $'\n'"[arex]"$'\n'"runtimedir=$(pwd)/rtes" >> ${test}.arc.conf fi for rte in ${rtes}; do echo "${!rte}" > rtes/${rte} chmod +x rtes/${rte} # 'enable' RTE ln -s "$(pwd)/rtes/${rte}" "$(pwd)/controldir/rte/enabled/${rte}" done fi # Write GRAMi file. ../${TEST_WRITE_GRAMI_FILE} --grami "${test}" --conf "${test}.arc.conf" "${job_description_input}" 2>&1 > /dev/null if test $? -ne 0; then echo -n "F" errorOutput="$errorOutput"$'\n\n'"Error: Writing GRAMI file failed." exitCode=$((exitCode + 1)) goToParentAndRemoveDir ${testdir} continue fi # Execute submit script grami_path=$(control_path "$(pwd)/controldir" "${test}" "grami") script_output=$(ONLY_WRITE_JOBSCRIPT="yes" ../${submit_script} --config $(pwd)/${test}.arc.conf ${grami_path} 2>&1) if test $? -ne 0; then echo -n "F" errorOutput="$errorOutput"$'\n\n'"Error: Submit script \"${submit_script}\" failed:"$'\n'"${script_output}" exitCode=$((exitCode + 1)) goToParentAndRemoveDir ${testdir} continue fi lrms_script_name=$(echo "${script_output}" | grep "^Created file " | sed 's/Created file //') # Check if commands were executed in expected order, with expected arguments if test ! -z "${simulator_output}"; then if test -f "${SIMULATOR_ERRORS_FILE}"; then echo -n "F" errorOutput="$errorOutput"$'\n\n'"Error: Submit script \"${submit_script}\" failed:"$'\n'"Wrong command executed, or wrong arguments passed:"$'\n'"$(cat ${SIMULATOR_ERRORS_FILE})" exitCode=$((exitCode + 1)) goToParentAndRemoveDir ${testdir} continue fi fi # Write expected LRMS job script to file. sed -e "s#@TEST_JOB_ID@#${test}#g" \ -e "s#@TEST_SESSION_DIR@#$(pwd)#g" \ -e "s#@TEST_RUNTIME_CONFIG_DIR@#$(pwd)/rtes#" \ -e "s#@TEST_ARC_LOCATION@#$(dirname $(pwd))#" \ -e "s#@TEST_HOSTNAME@#$(uname -n)#" \ "expected_lrms_job_script.tmpl" > ${test}.expected_job_options_in_lrms_script # Compare (diff) expected LRMS job options with those from job script. TESTFAILED=0 diffOutput=$(diff -ubBw ${test_ignore_matching_line} ${test}.expected_job_options_in_lrms_script ${lrms_script_name}) if test $? != 0; then TESTFAILED=1 echo -n "F" exitCode=$((exitCode + 1)) errorOutput="$errorOutput"$'\n\n'"Test fail in test_${test}:"$'\n'"${diffOutput}" goToParentAndRemoveDir ${testdir} continue fi # Make post check if test_post_check function is defined. if test "x$(type -t test_post_check)" == "xfunction"; then post_check_output=$(test_post_check ${lrms_script_name}) if test $? != 0; then echo -n "F" exitCode=$((exitCode + 1)) errorOutput="$errorOutput"$'\n\n'"Test fail in test_${test}:"$'\n'"${post_check_output}" goToParentAndRemoveDir ${testdir} continue fi fi goToParentAndRemoveDir ${testdir} done if test ${exitCode} = 0; then echo $'\n\n'"OK ("$(echo ${TESTS} | wc -w)")" else echo "${errorOutput}"$'\n' fi exit ${exitCode} nordugrid-arc-7.1.1/src/services/a-rex/lrms/test/PaxHeaders/test-gm-kick.sh0000644000000000000000000000013215067751327023570 xustar0030 mtime=1759498967.771747295 30 atime=1759498967.872493757 30 ctime=1759499030.376216753 nordugrid-arc-7.1.1/src/services/a-rex/lrms/test/test-gm-kick.sh0000644000175000002070000000042615067751327025474 0ustar00mockbuildmock00000000000000#!/bin/bash if test ! -f "${GM_KICK_TEST_FILE}"; then GM_KICK_TEST_FILE=gm_kick_test_file fi isid='0' for arg in $@; do if [ "$isid" = '0' ] ; then if [ "$arg" = "-j" ] ; then isid='1' fi else echo "$arg" >> ${GM_KICK_TEST_FILE} isid='0' fi done nordugrid-arc-7.1.1/src/services/a-rex/lrms/PaxHeaders/boinc0000644000000000000000000000013215067751426020773 xustar0030 mtime=1759499030.342442991 30 atime=1759499034.764510185 30 ctime=1759499030.342442991 nordugrid-arc-7.1.1/src/services/a-rex/lrms/boinc/0000755000175000002070000000000015067751426022752 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/lrms/boinc/PaxHeaders/Makefile.am0000644000000000000000000000013115067751327023103 xustar0030 mtime=1759498967.765366009 30 atime=1759498967.869493711 29 ctime=1759499030.33450528 nordugrid-arc-7.1.1/src/services/a-rex/lrms/boinc/Makefile.am0000644000175000002070000000015615067751327025010 0ustar00mockbuildmock00000000000000dist_pkgdata_DATA = configure-boinc-env.sh pkgdata_SCRIPTS = submit-boinc-job cancel-boinc-job scan-boinc-job nordugrid-arc-7.1.1/src/services/a-rex/lrms/boinc/PaxHeaders/Makefile.in0000644000000000000000000000013015067751356023115 xustar0030 mtime=1759498990.326325156 30 atime=1759499018.969270176 28 ctime=1759499030.3383941 nordugrid-arc-7.1.1/src/services/a-rex/lrms/boinc/Makefile.in0000644000175000002070000005536415067751356025036 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms/boinc ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(dist_pkgdata_DATA) \ $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = submit-boinc-job scan-boinc-job cancel-boinc-job CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" SCRIPTS = $(pkgdata_SCRIPTS) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac DATA = $(dist_pkgdata_DATA) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/cancel-boinc-job.in \ $(srcdir)/scan-boinc-job.in $(srcdir)/submit-boinc-job.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ dist_pkgdata_DATA = configure-boinc-env.sh pkgdata_SCRIPTS = submit-boinc-job cancel-boinc-job scan-boinc-job all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/boinc/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/boinc/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): submit-boinc-job: $(top_builddir)/config.status $(srcdir)/submit-boinc-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ scan-boinc-job: $(top_builddir)/config.status $(srcdir)/scan-boinc-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ cancel-boinc-job: $(top_builddir)/config.status $(srcdir)/cancel-boinc-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_pkgdataDATA: $(dist_pkgdata_DATA) @$(NORMAL_INSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-dist_pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dist_pkgdataDATA install-pkgdataSCRIPTS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am \ install-dist_pkgdataDATA install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-pkgdataSCRIPTS install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags-am uninstall uninstall-am uninstall-dist_pkgdataDATA \ uninstall-pkgdataSCRIPTS .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/lrms/boinc/PaxHeaders/scan-boinc-job.in0000644000000000000000000000013115067751327024163 xustar0030 mtime=1759498967.765456607 30 atime=1759498967.869493711 29 ctime=1759499030.34223202 nordugrid-arc-7.1.1/src/services/a-rex/lrms/boinc/scan-boinc-job.in0000644000175000002070000001253015067751327026067 0ustar00mockbuildmock00000000000000#!/bin/bash # # Periodically monitor for jobs which has finished or failed but not # reported an exitcode # #set -x id=`id -u` #debug=: debug () { echo -n `date` 1>&2 echo -n ' ' 1>&2 echo $@ 1>&2 } debug "starting" debug "options = $@" # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi if [ -z "$1" ] ; then echo "Controldir argument missing" 1>&2 ; exit 1 ; fi joboption_lrms="boinc" lrms_options="boinc_app_id boinc_db_host boinc_db_port boinc_db_user boinc_db_pass boinc_db_name boinc_project_dir" # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # include common scan functions . "${pkgdatadir}/scan_common.sh" || exit $? # run common init # * parse config # * load LRMS-specific env # * set common variables common_init GMKICK=${pkglibexecdir}/gm-kick # Prints the uid of the owner of the file given as argument # Perl is used because it's more portable than using the stat command printuid () { code='my @s = stat($ARGV[0]); print($s[4] || "")' /usr/bin/perl -we "$code" "$1" } # # Attempts to switch to uid passed as the first argument and then runs the # commands passed as the second argument in a shell. The remaining arguments # are passed as arguments to the shell. No warning is given in case switching # uid is not possible. # do_as_uid () { test $# -ge 2 || { log "do_as_uid requires 2 arguments"; return 1; } script='use English; my ($uid, @args) = @ARGV; if ( $UID == 0 && $uid ) { eval { $UID = $uid }; print STDERR "Cannot switch to uid($UID): $@\n" if $UID != $uid; } system("/bin/sh","-c",@args); exit ($?>>8||128+($?&127)); ' /usr/bin/perl -we "$script" "$@" } # Append .comment (containing STDOUT & STDERR of the job wrapper) to .errors save_commentfile () { uid=$1 commentfile=$2 errorsfile=$3 echo '---------- Output of the job wrapper script -----------' >> $errorsfile cat $commentfile 2> /dev/null >> $errorsfile echo '------------------------- End of output -------------------------' >> $errorsfile #do_as_uid "$uid" "$action" } for control_dir in "$@" ; do if [ ! -d "${control_dir}" ]; then echo "No control dir $control_dir" 1>&2 continue fi # Bash specific, but this script will be rewritten in python soon... declare -A finished_jobs appidclause="" if [ ! -z "$CONFIG_boinc_app_id" ]; then appidclause="and appid=$CONFIG_boinc_app_id" fi finished=$(mysql -h $CONFIG_boinc_db_host -P $CONFIG_boinc_db_port -u $CONFIG_boinc_db_user --password=$CONFIG_boinc_db_pass $CONFIG_boinc_db_name -e "select name from workunit where assimilate_state=2 $appidclause") for job in `echo $finished`; do finished_jobs[$job]=1 done # iterate over all jobs known in the control directory find "${control_dir}/processing" -name '*.status' \ | xargs egrep -l "INLRMS|CANCELING" \ | sed -e 's/.*\///' -e 's/\.status$//' \ | while read job; do #debug "scanning job = $job" unset joboption_jobid unset joboption_directory lrmsfile=$(control_path "${control_dir}" "${job}" "lrms_done") gramifile=$(control_path "${control_dir}" "${job}" "grami") localfile=$(control_path "${control_dir}" "${job}" "local") errorsfile=$(control_path "${control_dir}" "${job}" "errors") # this job was already completed, nothing remains to be done [ -f "$lrmsfile" ] && continue # a grami file exists for all jobs that GM thinks are running. # proceed to next job if this file is missing. if [ ! -f "$gramifile" ]; then continue fi # extract process IDs of the grami file [ ! -f "$gramifile" ] && continue . "$gramifile" # process IDs could not be learned, proceeding to next [ -z "$joboption_jobid" ] && continue #debug "local jobid = $joboption_jobid" # checking if process is still running if [[ ! ${finished_jobs[$joboption_jobid]} ]]; then #debug "$joboption_jobid is still running, Continueing to next" continue else debug "$joboption_jobid is finished" fi uid=$(printuid "$localfile") debug "local user id = $uid" diagfile=${joboption_directory}.diag debug "checking $diagfile" exitcode=$(do_as_uid "$uid" "cat '$diagfile'" | sed -n 's/^exitcode=\([0-9]*\).*/\1/p') debug "exitcode = [$exitcode] extracted from $diagfile" exitcode=0 comment="" if [ -z "$joboption_arg_code" ] ; then joboption_arg_code='0' ; fi if [ -z "$exitcode" ]; then echo "Job $job with PID $joboption_jobid died unexpectedly" 1>&2 comment="Job died unexpectedly" 1>&2 exitcode=-1 elif [ "$exitcode" -ne "$joboption_arg_code" ]; then comment="Job finished with wrong exit code - $exitcode != $joboption_arg_code" 1>&2 fi debug "got exitcode=$exitcode" save_commentfile "$uid" "${joboption_directory}.comment" "$errorsfile" echo "$exitcode $comment" > "$lrmsfile" "${GMKICK}" -j "${job}" "${control_dir}" done done debug "done, going to sleep" sleep 120 exit 0 nordugrid-arc-7.1.1/src/services/a-rex/lrms/boinc/PaxHeaders/configure-boinc-env.sh0000644000000000000000000000013215067751327025243 xustar0030 mtime=1759498967.765456607 30 atime=1759498967.869493711 30 ctime=1759499030.336434252 nordugrid-arc-7.1.1/src/services/a-rex/lrms/boinc/configure-boinc-env.sh0000644000175000002070000000010515067751327027141 0ustar00mockbuildmock00000000000000# # set environment variables for boinc # # Script returned ok true nordugrid-arc-7.1.1/src/services/a-rex/lrms/boinc/PaxHeaders/submit-boinc-job.in0000644000000000000000000000013215067751327024543 xustar0030 mtime=1759498967.765456607 30 atime=1759498967.869493711 30 ctime=1759499030.343673263 nordugrid-arc-7.1.1/src/services/a-rex/lrms/boinc/submit-boinc-job.in0000644000175000002070000003013415067751327026446 0ustar00mockbuildmock00000000000000#!/bin/bash # set -x # # Input: path to grami file (same as Globus). # This script creates a temporary job script and runs it DEBUG=0 echo "----- starting submit_boinc_job -----" 1>&2 joboption_lrms="boinc" lrms_options="boinc_app_id boinc_db_host boinc_db_port boinc_db_user boinc_db_pass boinc_db_name boinc_project_dir" # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi GRAMI_FILE=$1 # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # include common submit functions . "${pkgdatadir}/submit_common.sh" || exit $? # run common init # * parse grami # * parse config # * load LRMS-specific env # * set common variables common_init joboption_stdout='`pwd`/'`basename $joboption_stdout` joboption_stderr='`pwd`/'`basename $joboption_stderr` RUNTIME_NODE_SEES_FRONTEND=yes ############################################################## # Zero stage of runtime environments ############################################################## RTE_stage0 joboption_directory_orig=$joboption_directory joboption_directory='`pwd`' # make sure session is world-writable chmod 777 $joboption_directory_orig PROJECT_ROOT="$CONFIG_boinc_project_dir" echo "project_root=$PROJECT_ROOT" 1>&2 if [ -z "$PROJECT_ROOT" ]; then echo "No project directory configured" 1>&2 exit 1 elif [ ! -d "$PROJECT_ROOT" ]; then echo "Project dir ${PROJECT_ROOT} does not exist" 1>&2 exit 1 elif [ ! -x "${PROJECT_ROOT}/bin/create_work" ]; then echo "Project dir ${PROJECT_ROOT} does appear to be a BOINC project" 1>&2 exit 1 fi cd $PROJECT_ROOT ############################################################## # create job script ############################################################## mktempscript LRMS_JOB_BOINC="${LRMS_JOB_SCRIPT}.boinc" touch $LRMS_JOB_BOINC chmod u+x ${LRMS_JOB_SCRIPT} ############################################################## # Start job script ############################################################## N=0 x=$joboption_directory_orig while [ "$x" != "/" ] do x=`dirname $x` N=$((N+1)) done echo '#!/bin/sh' > $LRMS_JOB_SCRIPT echo "#job script built by grid-manager and input file for BOINC job" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT cat >> $LRMS_JOB_SCRIPT <<"FMARK" set -x export RUNTIME_CONFIG_DIR=`pwd`/ #rename root file FMARK echo tar --strip-components=$N -xvf *input.tar.gz >> $LRMS_JOB_SCRIPT ############################################################## # non-parallel jobs ############################################################## set_count sourcewithargs_jobscript ############################################################## # Override umask ############################################################## echo "" >> $LRMS_JOB_SCRIPT echo "# Overide umask of execution node (sometime values are really strange)" >> $LRMS_JOB_SCRIPT echo "umask 077" >> $LRMS_JOB_SCRIPT ############################################################## # Init accounting ############################################################## accounting_init ############################################################## # Add environment variables ############################################################## add_user_env ############################################################## # Check for existance of executable, ############################################################## if [ -z "${joboption_arg_0}" ] ; then echo 'Executable is not specified' 1>&2 exit 1 fi setup_runtime_env ############################################################## # Add std... to job arguments ############################################################## include_std_streams ############################################################## # Move files to local working directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_node ############################################################## # Runtime configuration ############################################################## echo RUNTIME_JOB_DIAG='`pwd`/'`basename $joboption_directory_orig`.diag >>$LRMS_JOB_SCRIPT RTE_stage1 echo "echo \"runtimeenvironments=\$runtimeenvironments\" >> \"\$RUNTIME_JOB_DIAG\"" >> $LRMS_JOB_SCRIPT ##################################################### # Accounting (WN OS Detection) ##################################################### detect_wn_systemsoftware ##################################################### # Go to working dir and start job ##################################################### cd_and_run # Add nodename username@hostname and fix core count for accounting # Add exit code here since accounting_end does it after the diag file has been tarred up cat >> $LRMS_JOB_SCRIPT <<"FMARK" sed -i -e '/nodename=/d' $RUNTIME_JOB_DIAG hostname=` grep domain_name init_data.xml |awk -F '>' '{print $2}'|awk -F "<" '{print $1}'|sed -e "s# #_#g"` username=` grep user_name init_data.xml |awk -F '>' '{print $2}'|awk -F "<" '{print $1}'|sed -e "s# #_#g"` nodename=$username@$hostname echo "nodename=$nodename" >> "$RUNTIME_JOB_DIAG" [ -n "$ATHENA_PROC_NUMBER" ] && sed -i -e 's/Processors=1/Processors='"$ATHENA_PROC_NUMBER"'/' $RUNTIME_JOB_DIAG echo "exitcode=$RESULT" >> "$RUNTIME_JOB_DIAG" FMARK echo "" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime (post)configuration at computing node ############################################################## RTE_stage2 ############################################################## # Move files back to session directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## #move_files_to_frontend ############################################################# # zip the result files into 1 file zip.tar.gz ############################################################# notnull () { if [ -z $1 ];then echo 0 else echo 1 fi } result_list= i=0 eval opt=\${joboption_outputfile_$i} ret=`notnull $opt` while [ $ret = "1" ] do output_file=$(echo $opt|sed -e "s#^/#./#") output_file=$(echo $output_file|sed -e "s#@##") result_list=$result_list" "$output_file i=$((i+1)) eval opt=\${joboption_outputfile_$i} ret=`notnull $opt` echo "ret="$ret done files=$(echo $result_list|tr " " "\n") cat >> $LRMS_JOB_SCRIPT <<'EOF' echo "zip all output files" flist="*.diag " EOF echo for f in $files >>$LRMS_JOB_SCRIPT cat >> $LRMS_JOB_SCRIPT <<'EOF' do if [ -e $f ];then flist=$flist" "$f fi done EOF #echo $flist cat <<'EOF' >>$LRMS_JOB_SCRIPT if [ -f output.list ];then ol=$(awk '{print $1}' output.list) for i in $ol do if [[ -f $i && ! $i =~ HITS.* ]];then flist=$flist" "$i fi done sed -i -e 's/HITS.* /HITS.pool.root.1 /' output.list fi EOF echo 'tar cvf result.tar.gz $flist' >>$LRMS_JOB_SCRIPT chmod a+r $LRMS_JOB_SCRIPT ############################################################## # Finish accounting and exit job ############################################################## accounting_end ####################################### # Submit the job ####################################### echo "job script ${LRMS_JOB_SCRIPT} built" 1>&2 JobId=`basename $joboption_directory_orig` JobInput=$joboption_directory_orig/$JobId"_input.tar.gz" wu=$JobId echo "#!/bin/bash" >> $LRMS_JOB_BOINC echo "set -x" >> $LRMS_JOB_BOINC tflist="" Root_basename=() RootFile=() ## RootFile keeps the orginal path of the root files ## Root_basename keeps the basename of the root files, with adding JobID to make them unique i=0 for file in `ls $joboption_directory_orig` do echo $file|grep ".root" > /dev/null ret=$? if [ $ret -eq 0 ]; then echo skip root file $file Root_basename[$i]=$JobId"_"$file RootFile[$i]=$joboption_directory_orig/$file sed -i -e "/#rename root file/a\mv ATLAS.root_$i $file" $LRMS_JOB_SCRIPT let i=$i+1 continue else tflist=$tflist" "$joboption_directory_orig/$file fi done echo tar zhcvf $JobInput $tflist echo "tar zhcvf $JobInput $tflist" >> $LRMS_JOB_BOINC echo "cd $PROJECT_ROOT " >>$LRMS_JOB_BOINC JobInput_basename=`basename $JobInput` Script_basename=`basename $LRMS_JOB_SCRIPT` echo "cp $JobInput "'`bin/dir_hier_path '$(basename $JobInput)'`' >> $LRMS_JOB_BOINC echo "chmod a+r "'`bin/dir_hier_path '$(basename $JobInput)'`' >> $LRMS_JOB_BOINC echo "cp $LRMS_JOB_SCRIPT " '`bin/dir_hier_path' $(basename $LRMS_JOB_SCRIPT)'`' >>$LRMS_JOB_BOINC echo "chmod a+r " '`bin/dir_hier_path' $(basename $LRMS_JOB_SCRIPT)'`' >>$LRMS_JOB_BOINC [ -n "$PROJECT_DOWNLOAD_ROOT" ] && echo "cd $PROJECT_DOWNLOAD_ROOT" >> $LRMS_JOB_BOINC ## process the root files as remote files cd $PROJECT_ROOT echo "current directory is the project_root: "$PWD remote_url=() fsize=() md5=() i=0 while [ $i -lt ${#RootFile[@]} ] do [ -L ${RootFile[$i]} ] && RootFile[$i]=`ls -l ${RootFile[$i]}|awk '{print $11}'` echo "ln -s ${RootFile[$i]} "'`bin/dir_hier_path' ${Root_basename[$i]} '`' >> $LRMS_JOB_BOINC if [ -n "$PROJECT_DOWNLOAD_ROOT" ]; then download_dir=`bin/dir_hier_path ${Root_basename[$i]} | awk -F/ '{print $(NF-1)}'` remote_url[$i]="${PROJECT_DOWNLOAD_URL}/${download_dir}/${Root_basename[$i]}" fsize[$i]=`stat -c %s ${RootFile[$i]}` md5[$i]=`md5sum ${RootFile[$i]} | awk '{print $1}'` echo "Using remote file ${remote_url[$i]} ${fsize[$i]} ${md5[$i]}" 1>&2 fi let i=$i+1 done [ -n "$PROJECT_DOWNLOAD_ROOT" ] && echo "cd $PROJECT_ROOT" >> $LRMS_JOB_BOINC ## generate the input template file let ifileno=2+${#RootFile[@]} i=0 intmp="" while [ $i -lt $ifileno ] do intmp="$intmp $i " let i=$i+1 done intmp="$intmp " i=0 while [ $i -lt ${#RootFile[@]} ] do intmp="$intmp $i shared/ATLAS.root_$i " let i=$i+1 done intmp="$intmp $i shared/input.tar.gz " let i=$i+1 intmp="$intmp $i shared/start_atlas.sh " intmp_res=$(cat $WU_TEMPLATE) intmp="$intmp $intmp_res " intmp="$intmp " WU_TEMPLATE_tmp=$(mktemp /tmp/${BOINC_APP}_XXXXXX) cat << EOF > $WU_TEMPLATE_tmp $intmp EOF ####################################### if [ -z $joboption_memory ];then memreq=2000000000 else memreq=$((joboption_memory*1000000)) fi if [ -z $joboption_cputime ];then maxcputime=$((2*3600*3000000000)) else maxcputime=$((joboption_cputime*3000000000)) fi priority= if [ ! -z "$joboption_priority" ]; then priority="--priority $joboption_priority" fi batchid= if [ -f "${joboption_directory_orig}/pandaJobData.out" ]; then taskid=$(grep -E -m1 -o "taskID=[[:digit:]]+" ${joboption_directory_orig}/pandaJobData.out|cut -d = -f 2 ) if [ ! -z "$taskid" ]; then batchid="--batch $taskid" fi fi cmd="bin/create_work \ --appname $BOINC_APP \ --wu_name $wu \ --wu_template $WU_TEMPLATE_tmp \ --result_template $RESULT_TEMPLATE \ --rsc_memory_bound $memreq \ --rsc_fpops_est $maxcputime \ $batchid \ $priority" j=0 while [ $j -lt ${#RootFile[@]} ] do if [ -n "$PROJECT_DOWNLOAD_ROOT" ]; then cmd="$cmd \ --remote_file ${remote_url[$j]} ${fsize[$j]} ${md5[$j]}" else cmd="$cmd \ ${Root_basename[$j]}" fi let j=$j+1 done cmd="$cmd \ $(basename $JobInput) \ $(basename $LRMS_JOB_SCRIPT)" echo $cmd >> $LRMS_JOB_BOINC echo 'ret=$?' >>$LRMS_JOB_BOINC echo 'exit $ret' >>$LRMS_JOB_BOINC if [ $DEBUG -eq 2 ];then cat $LRMS_JOB_BOINC 1>&2 else sh $LRMS_JOB_BOINC 1>&2 fi rc=$? if [ $rc -eq 0 ];then echo "job $wu submitted successfully!" 1>&2 echo "joboption_jobid=$wu" >> $GRAMI_FILE fi echo "----- removing intermediate files ----" 1>&2 if [ $DEBUG -ne 1 ];then rm -fr $WU_TEMPLATE_tmp rm -fr $LRMS_JOB_BOINC $LRMS_JOB_ERR $LRMS_JOB_OUT $LRMS_JOB_SCRIPT rm -fr $JobInput fi echo "----- exiting submit_boinc_job -----" 1>&2 echo "" 1>&2 exit $rc nordugrid-arc-7.1.1/src/services/a-rex/lrms/boinc/PaxHeaders/cancel-boinc-job.in0000644000000000000000000000013215067751327024465 xustar0030 mtime=1759498967.765456607 30 atime=1759498967.869493711 30 ctime=1759499030.340357745 nordugrid-arc-7.1.1/src/services/a-rex/lrms/boinc/cancel-boinc-job.in0000644000175000002070000000663315067751327026377 0ustar00mockbuildmock00000000000000#!/bin/sh # set -x # # Cancel job running in boinc. # control_path () { # job_id=`echo "$2" | sed 's/\(.\{9\}\)/\1\//g' | sed 's/\/$//'` job_id=`echo "$2" | sed -e 's#\(.\{3\}\)#\1/#3' -e 's#\(.\{3\}\)#\1/#2' -e 's#\(.\{3\}\)#\1/#1' -e 's#$#/#'` path="$1/jobs/${job_id}/$3" echo "$path" } echo "----- starting cancel_boinc_job -----" 1>&2 trap 'echo "----- exiting cancel_boinc_job -----" 1>&2; echo "" 1>&2' EXIT joboption_lrms="boinc" lrms_options="boinc_app_id boinc_db_host boinc_db_port boinc_db_user boinc_db_pass boinc_db_name boinc_project_dir" # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi GRAMI_FILE=$1 # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # load common cancel functions . "${pkgdatadir}/cancel_common.sh" || exit $? # run common init # * parse grami # * parse config # * load LRMS-specific env common_init if [ -z "$joboption_controldir" ] ; then joboption_controldir=`dirname "$GRAMI_FILE"` joboption_controldir=`dirname "$joboption_controldir"` joboption_controldir=`dirname "$joboption_controldir"` joboption_controldir=`dirname "$joboption_controldir"` joboption_controldir=`dirname "$joboption_controldir"` if [ "$joboption_controldir" = '.' ] ; then joboption_controldir="$PWD" fi fi if [ -z "$joboption_gridid" ] ; then joboption_gridid=`echo "$GRAMI_FILE" | sed 's/.*\([^\/]*\)\/\([^\/]*\)\/\([^\/]*\)\/\([^\/]*\)\/grami$/\1\2\3\4/'` fi job_control_dir="$joboption_controldir" echo "Deleting job $joboption_gridid, local id $joboption_jobid" 1>&2 localfile=$(control_path "$job_control_dir" "${joboption_gridid}" "local") if [ ! -r "$localfile" ]; then echo "Local description of job ${joboption_gridid} not found at '$localfile'. Job was not killed, if running at all." 1>&2 exit 1 fi if [ -z "$joboption_jobid" ] ; then joboption_jobid=`cat "$localfile" | grep '^localid=' | sed 's/^localid=//'` fi job_control_subdir= if [ -r "$job_control_dir/accepting/${joboption_gridid}.status" ]; then job_control_subdir="$job_control_dir/accepting" elif [ -r "$job_control_dir/processing/${joboption_gridid}.status" ]; then job_control_subdir="$job_control_dir/processing" elif [ -r "$job_control_dir/finished/${joboption_gridid}.status" ]; then job_control_subdir="$job_control_dir/finished" else echo "Status file of job ${joboption_gridid} not found in '$job_control_dir'. Job was not killed, if running at all." 1>&2 exit 1 fi if [ -z "$CONFIG_boinc_project_dir" ]; then echo "No project directory configured, cannot cancel" 1>&2 exit 1 elif [ ! -d "$CONFIG_boinc_project_dir" ]; then echo "Project dir ${CONFIG_boinc_project_dir} does not exist, cannot cancel" 1>&2 exit 1 elif [ ! -x "${CONFIG_boinc_project_dir}/bin/cancel_jobs" ]; then echo "Project dir ${CONFIG_boinc_project_dir} does appear to be a BOINC project, cannot cancel" 1>&2 exit 1 fi case X`cat "$job_control_subdir/${joboption_gridid}.status"` in XINLRMS | XCANCELING) if [ -z "$joboption_jobid" ] ; then echo "Can't find local id of job" 1>&2 exit 1 fi cd $CONFIG_boinc_project_dir; bin/cancel_jobs --name $joboption_gridid ;; XFINISHED | XDELETED) echo "Job already died, won't do anything" 1>&2 ;; *) echo "Job is at unkillable state" 1>&2 ;; esac exit 0 nordugrid-arc-7.1.1/src/services/a-rex/lrms/PaxHeaders/condor0000644000000000000000000000013215067751426021165 xustar0030 mtime=1759499030.076438949 30 atime=1759499034.764510185 30 ctime=1759499030.076438949 nordugrid-arc-7.1.1/src/services/a-rex/lrms/condor/0000755000175000002070000000000015067751426023144 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/lrms/condor/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327023276 xustar0030 mtime=1759498967.765456607 30 atime=1759498967.869493711 30 ctime=1759499030.070074516 nordugrid-arc-7.1.1/src/services/a-rex/lrms/condor/Makefile.am0000644000175000002070000000016515067751327025202 0ustar00mockbuildmock00000000000000dist_pkgdata_DATA = configure-condor-env.sh pkgdata_SCRIPTS = cancel-condor-job scan-condor-job \ submit-condor-job nordugrid-arc-7.1.1/src/services/a-rex/lrms/condor/PaxHeaders/configure-condor-env.sh0000644000000000000000000000013215067751327025627 xustar0030 mtime=1759498967.765899957 30 atime=1759498967.870493727 30 ctime=1759499030.071185251 nordugrid-arc-7.1.1/src/services/a-rex/lrms/condor/configure-condor-env.sh0000644000175000002070000000266215067751327027537 0ustar00mockbuildmock00000000000000 # Conditionaly enable performance logging init_perflog # Initializes environment variables: CONDOR_BIN_PATH # Valued defines in arc.conf take priority over pre-existing environment # variables. # Condor executables are located using the following cues: # 1. condor_bin_path option in arc.conf # 2. PATH environment variable if [ ! -z "$CONFIG_condor_bin_path" ]; then CONDOR_BIN_PATH=$CONFIG_condor_bin_path; else condor_version=$(type -p condor_version) CONDOR_BIN_PATH=${condor_version%/*} fi; if [ ! -x "$CONDOR_BIN_PATH/condor_version" ]; then echo 'Condor executables not found!'; return 1; fi echo "Using Condor executables from: $CONDOR_BIN_PATH" export CONDOR_BIN_PATH if [ ! -z "$CONFIG_condor_config" ]; then CONDOR_CONFIG=$CONFIG_condor_config; else CONDOR_CONFIG="/etc/condor/condor_config"; fi; if [ ! -e "$CONDOR_CONFIG" ]; then echo 'Condor config file not found!'; return 1; fi echo "Using Condor config file at: $CONDOR_CONFIG" export CONDOR_CONFIG # FIX: Recent versions (8.5+?) of HTCondor does not show all jobs when running condor_q, but only own # Solution according Brain Bockelman GGUS 123947 _condor_CONDOR_Q_ONLY_MY_JOBS=false export _condor_CONDOR_Q_ONLY_MY_JOBS _condor_CONDOR_Q_DASH_BATCH_IS_DEFAULT=false export _condor_CONDOR_Q_DASH_BATCH_IS_DEFAULT nordugrid-arc-7.1.1/src/services/a-rex/lrms/condor/PaxHeaders/Makefile.in0000644000000000000000000000013115067751356023310 xustar0029 mtime=1759498990.35853153 30 atime=1759499018.240259098 30 ctime=1759499030.072384488 nordugrid-arc-7.1.1/src/services/a-rex/lrms/condor/Makefile.in0000644000175000002070000005543015067751356025222 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms/condor ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(dist_pkgdata_DATA) \ $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = scan-condor-job cancel-condor-job \ submit-condor-job CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" SCRIPTS = $(pkgdata_SCRIPTS) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac DATA = $(dist_pkgdata_DATA) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/cancel-condor-job.in \ $(srcdir)/scan-condor-job.in $(srcdir)/submit-condor-job.in \ README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ dist_pkgdata_DATA = configure-condor-env.sh pkgdata_SCRIPTS = cancel-condor-job scan-condor-job \ submit-condor-job all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/condor/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/condor/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): scan-condor-job: $(top_builddir)/config.status $(srcdir)/scan-condor-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ cancel-condor-job: $(top_builddir)/config.status $(srcdir)/cancel-condor-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ submit-condor-job: $(top_builddir)/config.status $(srcdir)/submit-condor-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_pkgdataDATA: $(dist_pkgdata_DATA) @$(NORMAL_INSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-dist_pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dist_pkgdataDATA install-pkgdataSCRIPTS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am \ install-dist_pkgdataDATA install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-pkgdataSCRIPTS install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags-am uninstall uninstall-am uninstall-dist_pkgdataDATA \ uninstall-pkgdataSCRIPTS .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/lrms/condor/PaxHeaders/cancel-condor-job.in0000644000000000000000000000013215067751327025051 xustar0030 mtime=1759498967.765899957 30 atime=1759498967.869493711 30 ctime=1759499030.073669023 nordugrid-arc-7.1.1/src/services/a-rex/lrms/condor/cancel-condor-job.in0000644000175000002070000000157315067751327026761 0ustar00mockbuildmock00000000000000#!@posix_shell@ # # Cancel job running in Condor # progname=$(basename "$0") echo "----- starting $progname -----" 1>&2 joboption_lrms="condor" lrms_options="condor_requirements condor_rank condor_bin_path condor_config" queue_options="condor_requirements" # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi GRAMI_FILE=$1 # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # load common cancel functions . "${pkgdatadir}/cancel_common.sh" || exit $? # run common init # * parse grami # * parse config # * load LRMS-specific env common_init # run condor_rm echo "$progname: canceling job $joboption_jobid with condor_rm..." 1>&2 $CONDOR_BIN_PATH/condor_rm ${joboption_jobid%.`hostname -f`} 1>&2 echo "----- exiting $progname -----" 1>&2 exit 0 nordugrid-arc-7.1.1/src/services/a-rex/lrms/condor/PaxHeaders/submit-condor-job.in0000644000000000000000000000013215067751327025127 xustar0030 mtime=1759498967.765899957 30 atime=1759498967.870493727 30 ctime=1759499030.076349606 nordugrid-arc-7.1.1/src/services/a-rex/lrms/condor/submit-condor-job.in0000644000175000002070000004055415067751327027041 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -x # # Based on globus submission script for pbs # # Submits job to HTCondor. # Input: path to grami file (same as Globus). # # The temporary job description file is created for the submission and then removed # at the end of this script. echo "----- starting submit_condor_job -----" 1>&2 joboption_lrms="condor" lrms_options="condor_requirements condor_rank condor_bin_path condor_config" queue_options="condor_requirements" # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi GRAMI_FILE=$1 # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # include common submit functions . "${pkgdatadir}/submit_common.sh" || exit $? # run common init # * parse grami # * parse config # * load LRMS-specific env # * set common variables common_init perflogfilesub="${perflogdir}/submission.perflog" if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then RUNTIME_LOCAL_SCRATCH_DIR="\${_CONDOR_SCRATCH_DIR}" fi # check remote or local scratch is configured check_any_scratch ############################################################## # Zero stage of runtime environments ############################################################## RTE_stage0 ############################################################## # create job script ############################################################## mktempscript is_cluster=true ############################################################## # Start job description file ############################################################## CONDOR_SUBMIT='condor_submit' if [ ! -z "$CONDOR_BIN_PATH" ] ; then CONDOR_SUBMIT=${CONDOR_BIN_PATH}/${CONDOR_SUBMIT} fi # HTCondor job script and submit description file rm -f "$LRMS_JOB_SCRIPT" LRMS_JOB_SCRIPT="${joboption_directory}/condorjob.sh" LRMS_JOB_DESCRIPT="${joboption_directory}/condorjob.jdl" echo "# HTCondor job description built by arex" > $LRMS_JOB_DESCRIPT echo "Executable = condorjob.sh" >> $LRMS_JOB_DESCRIPT echo "Input = $joboption_stdin" >> $LRMS_JOB_DESCRIPT echo "Log = ${joboption_directory}/log">> $LRMS_JOB_DESCRIPT # write HTCondor output to .comment file if possible, but handle the situation when # jobs are submitted by HTCondor-G < 8.0.5 condor_stdout="${joboption_directory}.comment" condor_stderr="${joboption_directory}.comment" if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then # if [[ $joboption_stdout =~ _condor_stdout$ ]]; then if expr match "$joboption_stdout" '.*_condor_stdout$' > /dev/null; then condor_stdout=$joboption_stdout; condor_stderr=$joboption_stderr; fi fi echo "Output = $condor_stdout">> $LRMS_JOB_DESCRIPT echo "Error = $condor_stderr">> $LRMS_JOB_DESCRIPT # queue if [ ! -z "${joboption_queue}" ] ; then echo "+NordugridQueue = \"$joboption_queue\"" >> $LRMS_JOB_DESCRIPT fi # job name for convenience if [ ! -z "${joboption_jobname}" ] ; then #TODO is this necessary? do parts of the infosys need these limitations? jobname=`echo "$joboption_jobname" | \ sed 's/^\([^[:alpha:]]\)/N\1/' | \ sed 's/[^[:alnum:]]/_/g' | \ sed 's/\(...............\).*/\1/'` echo "Description = $jobname" >> $LRMS_JOB_DESCRIPT else jobname="gridjob" echo "Description = $jobname" >> $LRMS_JOB_DESCRIPT fi # universe if [ ! -z $DOCKER_UNIVERSE ] ; then echo "Universe = $DOCKER_UNIVERSE" >> $LRMS_JOB_DESCRIPT echo "should_transfer_files = YES" >> $LRMS_JOB_DESCRIPT echo "when_to_transfer_output = ON_EXIT" >> $LRMS_JOB_DESCRIPT else echo "Universe = vanilla" >> $LRMS_JOB_DESCRIPT fi if [ ! -z $DOCKER_IMAGE ] ; then echo "docker_image = $DOCKER_IMAGE" >> $LRMS_JOB_DESCRIPT fi # notification echo "Notification = Never" >> $LRMS_JOB_DESCRIPT # no job restart REQUIREMENTS="(NumJobStarts == 0)" PERIODIC_REMOVE="(JobStatus == 1 && NumJobStarts > 0)" # custom requirements if [ ! -z "$CONFIG_condor_requirements" ] ; then # custom requirement from arc.conf REQUIREMENTS="${REQUIREMENTS} && ( $CONFIG_condor_requirements )" fi echo "Requirements = ${REQUIREMENTS}" >> $LRMS_JOB_DESCRIPT ##################################################### # priority ##################################################### if [ ! -z "$joboption_priority" ]; then #Condor uses any integer as priority. 0 being default. Only per user basis. #We assume that only grid jobs are relevant. #In that case we can use ARC 0-100 but translated so default is 0. priority=$((joboption_priority-50)) echo "Priority = $priority" >> $LRMS_JOB_DESCRIPT fi # rank if [ ! -z "$CONFIG_condor_rank" ] ; then echo "Rank = $CONFIG_condor_rank" >> $LRMS_JOB_DESCRIPT fi # proxy or token if [ -f "${joboption_directory}/user.proxy" ]; then if head -n 1 "${joboption_directory}/user.proxy" | grep -F -q 'BEGIN CERTIFICATE' ; then echo "x509userproxy = ${joboption_directory}/user.proxy" >> $LRMS_JOB_DESCRIPT else echo "scitokens_file = ${joboption_directory}/user.proxy" >> $LRMS_JOB_DESCRIPT fi fi ############################################################## # (non-)parallel jobs ############################################################## set_count if [ ! -z $joboption_count ] && [ $joboption_count -gt 0 ] ; then echo "request_cpus = $joboption_count" >> $LRMS_JOB_DESCRIPT fi if [ "$joboption_exclusivenode" = "true" ]; then echo "+RequiresWholeMachine=True" >> $LRMS_JOB_DESCRIPT fi ############################################################## # Execution times (minutes) ############################################################## if [ ! -z "$joboption_cputime" ] ; then if [ $joboption_cputime -lt 0 ] ; then echo 'WARNING: Less than 0 cpu time requested: $joboption_cputime' 1>&2 joboption_cputime=0 echo 'WARNING: cpu time set to 0' 1>&2 fi maxcputime=$(( $joboption_cputime / $joboption_count )) echo "+JobCpuLimit = $joboption_cputime" >> $LRMS_JOB_DESCRIPT PERIODIC_REMOVE="${PERIODIC_REMOVE} || RemoteUserCpu + RemoteSysCpu > JobCpuLimit" fi if [ -z "$joboption_walltime" ] ; then if [ ! -z "$joboption_cputime" ] ; then # Set walltime for backward compatibility or incomplete requests joboption_walltime=$(( $maxcputime * $walltime_ratio )) fi fi if [ ! -z "$joboption_walltime" ] ; then if [ $joboption_walltime -lt 0 ] ; then echo 'WARNING: Less than 0 wall time requested: $joboption_walltime' 1>&2 joboption_walltime=0 echo 'WARNING: wall time set to 0' 1>&2 fi echo "+JobTimeLimit = $joboption_walltime" >> $LRMS_JOB_DESCRIPT PERIODIC_REMOVE="${PERIODIC_REMOVE} || RemoteWallClockTime > JobTimeLimit" fi ############################################################## # Requested memory (mb) ############################################################## set_req_mem if [ ! -z "$joboption_memory" ] ; then memory_bytes=$(( $joboption_memory * 1024 )) memory_req=$(( $joboption_memory )) # HTCondor needs to know the total memory for the job, not memory per core if [ ! -z $joboption_count ] && [ $joboption_count -gt 0 ] ; then memory_bytes=$(( $joboption_count * $memory_bytes )) memory_req=$(( $joboption_count * $memory_req )) fi echo "request_memory=$memory_req" >> $LRMS_JOB_DESCRIPT echo "+JobMemoryLimit = $memory_bytes" >> $LRMS_JOB_DESCRIPT # it is important to protect evaluation from undefined ResidentSetSize PERIODIC_REMOVE="${PERIODIC_REMOVE} || ((ResidentSetSize isnt undefined ? ResidentSetSize : 0) > JobMemoryLimit)" fi ############################################################## # HTCondor stage in/out ############################################################## if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then ( cd "$joboption_directory" if [ $? -ne '0' ] ; then echo "Can't change to session directory: $joboption_directory" 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_DESCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "Submission: Configuration error." exit 1 fi # transfer all session directory if not shared between ARC CE and worknodes scratch_dir=`dirname "$joboption_directory"` echo "should_transfer_files = YES" >> $LRMS_JOB_DESCRIPT echo "When_to_transfer_output = ON_EXIT_OR_EVICT" >> $LRMS_JOB_DESCRIPT echo "Transfer_input_files = $joboption_directory" >> $LRMS_JOB_DESCRIPT ) fi echo "Periodic_remove = ${PERIODIC_REMOVE}" >> $LRMS_JOB_DESCRIPT echo "Queue" >> $LRMS_JOB_DESCRIPT echo "#!/bin/bash -l" > $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT echo "# Overide umask of execution node (sometime values are really strange)" >> $LRMS_JOB_SCRIPT echo "umask 077" >> $LRMS_JOB_SCRIPT echo " " >> $LRMS_JOB_SCRIPT # Script must have execute permission chmod 0755 $LRMS_JOB_SCRIPT sourcewithargs_jobscript ############################################################## # Init accounting ############################################################## accounting_init ############################################################## # Add environment variables ############################################################## add_user_env ############################################################## # Check for existance of executable, # there is no sense to check for executable if files are # downloaded directly to computing node ############################################################## if [ -z "${joboption_arg_0}" ] ; then echo 'Executable is not specified' 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_DESCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "Submission: Job description error." exit 1 fi ###################################################################### # Adjust working directory for tweaky nodes # RUNTIME_GRIDAREA_DIR should be defined by external means on nodes ###################################################################### if [ ! -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then setup_runtime_env else echo "RUNTIME_JOB_DIR=$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid" >> $LRMS_JOB_SCRIPT echo "RUNTIME_JOB_DIAG=$RUNTIME_LOCAL_SCRATCH_DIR/${joboption_gridid}.diag" >> $LRMS_JOB_SCRIPT RUNTIME_STDIN_REL=`echo "${joboption_stdin}" | sed "s#^${joboption_directory}/*##"` RUNTIME_STDOUT_REL=`echo "${joboption_stdout}" | sed "s#^${joboption_directory}/*##"` RUNTIME_STDERR_REL=`echo "${joboption_stderr}" | sed "s#^${joboption_directory}/*##"` if [ "$RUNTIME_STDIN_REL" = "${joboption_stdin}" ] ; then echo "RUNTIME_JOB_STDIN=\"${joboption_stdin}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDIN=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDIN_REL\"" >> $LRMS_JOB_SCRIPT fi if [ "$RUNTIME_STDOUT_REL" = "${joboption_stdout}" ] ; then echo "RUNTIME_JOB_STDOUT=\"${joboption_stdout}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDOUT=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDOUT_REL\"" >> $LRMS_JOB_SCRIPT fi if [ "$RUNTIME_STDERR_REL" = "${joboption_stderr}" ] ; then echo "RUNTIME_JOB_STDERR=\"${joboption_stderr}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDERR=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDERR_REL\"" >> $LRMS_JOB_SCRIPT fi fi ############################################################## # Add std... to job arguments ############################################################## include_std_streams ############################################################## # Move files to local working directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_node echo "" >> $LRMS_JOB_SCRIPT echo "RESULT=0" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT ############################################################## # Skip execution if something already failed ############################################################## echo "if [ \"\$RESULT\" = '0' ] ; then" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime configuration at computing node ############################################################## RTE_stage1 ############################################################## # Diagnostics ############################################################## echo "echo \"runtimeenvironments=\$runtimeenvironments\" >> \"\$RUNTIME_JOB_DIAG\"" >> $LRMS_JOB_SCRIPT cat >> $LRMS_JOB_SCRIPT <<'EOSCR' EOSCR ############################################################## # Accounting (WN OS Detection) ############################################################## detect_wn_systemsoftware ############################################################## # Check intermediate result again ############################################################## echo "if [ \"\$RESULT\" = '0' ] ; then" >> $LRMS_JOB_SCRIPT ############################################################## # Execution ############################################################## cd_and_run ############################################################## # End of RESULT checks ############################################################## echo "fi" >> $LRMS_JOB_SCRIPT echo "fi" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime (post)configuration at computing node ############################################################## RTE_stage2 ##################################################### # Clean up output files in the local scratch dir ##################################################### clean_local_scratch_dir_output "moveup" ############################################################## # Move files back to session directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_frontend ############################################################## # Finish accounting and exit job ############################################################## accounting_end if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-condor-job, JobScriptCreation: $t" >> $perflogfilesub fi ####################################### # Submit the job ####################################### echo "HTCondor job script built" 1>&2 # Execute condor_submit command cd "$joboption_directory" echo "HTCondor script follows:" 1>&2 echo "-------------------------------------------------------------------" 1>&2 cat "$LRMS_JOB_SCRIPT" 1>&2 echo "-------------------------------------------------------------------" 1>&2 echo "" 1>&2 CONDOR_RESULT=1 CONDOR_TRIES=0 while [ "$CONDOR_TRIES" -lt '10' ] ; do if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi ${CONDOR_SUBMIT} $LRMS_JOB_DESCRIPT 1>$LRMS_JOB_OUT 2>$LRMS_JOB_ERR CONDOR_RESULT="$?" if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-condor-job, JobSubmission: $t" >> $perflogfilesub fi if [ "$CONDOR_RESULT" -eq '0' ] ; then break ; fi CONDOR_TRIES=$(( $CONDOR_TRIES + 1 )) sleep 2 done if [ $CONDOR_RESULT -eq '0' ] ; then job_out=`cat $LRMS_JOB_OUT` if [ "${job_out}" = "" ]; then echo "job *NOT* submitted successfully!" 1>&2 echo "failed getting the condor jobid for the job!" 1>&2 echo "Submission: Local submission client behaved unexpectedly." elif [ `echo "${job_out}" | grep -Ec "submitted to cluster\s[0-9]+"` != "1" ]; then echo "job *NOT* submitted successfully!" 1>&2 echo "badly formatted condor jobid for the job !" 1>&2 echo "Submission: Local submission client behaved unexpectedly." else job_id=`echo $job_out | grep cluster | awk '{print $8}' | sed 's/[\.]//g'` hostname=`hostname -f` echo "joboption_jobid=${job_id}.${hostname}" >> $GRAMI_FILE echo "condor_log=${joboption_directory}/log" >> $GRAMI_FILE echo "job submitted successfully!" 1>&2 echo "local job id: $job_id" 1>&2 # Remove temporary files rm -f $LRMS_JOB_OUT $LRMS_JOB_ERR echo "----- exiting submit_condor_job -----" 1>&2 echo "" 1>&2 exit 0 fi else echo "job *NOT* submitted successfully!" 1>&2 echo "got error code from condor_submit: $CONDOR_RESULT !" 1>&2 echo "Submission: Local submission client failed." fi echo "Output is:" 1>&2 cat $LRMS_JOB_OUT 1>&2 echo "Error output is:" 1>&2 cat $LRMS_JOB_ERR 1>&2 rm -f "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "----- exiting submit_condor_job -----" 1>&2 echo "" 1>&2 exit 1 nordugrid-arc-7.1.1/src/services/a-rex/lrms/condor/PaxHeaders/scan-condor-job.in0000644000000000000000000000013215067751327024550 xustar0030 mtime=1759498967.765899957 30 atime=1759498967.870493727 30 ctime=1759499030.074995026 nordugrid-arc-7.1.1/src/services/a-rex/lrms/condor/scan-condor-job.in0000755000175000002070000006264315067751327026470 0ustar00mockbuildmock00000000000000#!/bin/bash progname=$(basename "$0") LRMS=Condor # for use in log messages # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi # Validate control directories supplied on command-line if [ -z "$1" ] ; then echo "no control_dir specified" 1>&2; exit 1 fi for ctr_dir in "$@"; do if [ ! -d "$ctr_dir" ]; then echo "called with erronous control dir: $ctr_dir" exit 1 fi done joboption_lrms="condor" lrms_options="condor_requirements condor_rank condor_bin_path condor_config" queue_options="condor_requirements" # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # include common scan functions . "${pkgdatadir}/scan_common.sh" || exit $? # run common init # * parse config # * load LRMS-specific env # * set common variables common_init # Log system performance if [ ! -z "$perflogdir" ]; then perflog_common "$perflogdir" "$CONFIG_controldir" fi # List of jobids for grid-jobs with state INLRMS declare -a localids # Array with basenames of grid-job files in ctrl_dir, indexed by localid # example /some/path/XX/XX/X /some/other/path/YY/YY/Y declare -a basenames # Array with states of the jobs in Condor, indexed by localid declare -a jobstates # Array with grid id values, indexed by localid declare -a gridids # Array with control directories, indexed by localid declare -a ctrdirs # Array to store localids of jobs that are determined to have finished, which are sent to gm-kick declare -a kicklist # Array with jobid blocks declare -a lidblocks # Find list of grid jobs with status INLRMS, store localid and # basename for those jobs for ctr_dir in "$@"; do for id in $(find "$ctr_dir/processing" -name '*.status' -print0 \ | xargs -0 egrep -l "INLRMS|CANCELING" \ | sed 's#.*/processing/\([^\.]*\)\.status$#\1#') do basename=$(control_path "${ctr_dir}" "${id}" "") localid=$(grep ^localid= "${basename}/local" | cut -d= -f2 | cut -d "." -f1) localids[${#localids[@]}]="$localid" basenames[$localid]="$basename" gridids[$localid]="$id" ctrdirs[$localid]="$ctr_dir" done done # No need to continue further if no jobs have status INLRMS if [ ${#localids[@]} -eq 0 ]; then exit 0 fi ############################################################################# ########################## LRMS specific functions ########################## ############################################################################# # # Should print the id's of all jobs in the LRMS, one per line. If left # unimplemented then lrms_job_finished must be implemented. If it's # implemented then implementing lrms_job_finished is optional. # lrms_list_jobs() { LIST_IMPLEMENTED= } # # Should return 0 only if the job is not in the LRMS. The job's LRMS id is # stored in the lrmsid variable. It's called for all grid jobs that are in # INLRMS and CANCELING states and whose LRMS id was not listed by # lrms_list_jobs. STDOUT and STDERR are redirected to $gridid/error. # lrms_job_finished() { return 0 } # # Should attempt to collect accounting info from LRMS for a job. The job's # LRMS id is stored in the lrmsid variable. This function will be called after # the job has left the LRMS. Diagnostics might not be available right after # the job has finished and therefore a retry mechanism is implemented. If more # time is needed, the function should signal this by returning without setting # the LRMSExitcode variable. In this case it will be called again on the next # run on scan-*-jobs, but not more than $maxwait times for any given job. If # it sets LRMSExitcode, or $maxwait retries have already been done, then # lrms_last_call will be called shortly afterwards and the job declared done. # STDOUT and STDERR are redirected to $gridid/errors. The interval between # successive runs of scan-*-jobs is controlled by $wakeupperiod. # Input variables: # * gridid # * lrmsid # * sessiondir # * uid -- numerical unix ID of the user owning the job # The following variables are initialized with values read from # $sessiondir.diag. All except exitcode are expected to be updated by this # function: # * exitcode -- It's the exitcode of the user's executable, as reported by # the job wrapper. Do not change. # * nodename -- may contain multiple lines, one execution node per line # * WallTime -- in seconds # * UserTime -- in seconds # * KernelTime -- in seconds # * TotalMemory -- in kB # * ResidentMemory -- in kB # * LRMSStartTime -- in Mds time format, UTC time zone (20091201140049Z) # * LRMSEndTime -- in Mds time format, UTC time zone (20091201140049Z) # Output variables: # * LRMSExitcode -- as reported by the LRMS. It will be saved to the .diag file # * LRMSMessage -- any clues obtained from the LRMS about job failure. It's # content will be added to $gridid/lrms_done in case LRMSExitcode is not 0. # lrms_get_accounting() { ACCT_IMPLEMENTED= LRMSExitcode=${exitcode:--1} } # # Called just before uptading .diag and writing the $gridid/lrms_done file. STDOUT and # STDERR are redirected to $gridid/error. Can be left as is. # Input/Output variables: # * the same as for lrms_get_accounting # * any variables set in lrms_get_accounting are visible here # lrms_last_call() { [ -n "$LRMSExitcode" ] \ || log "LRMS exit status not available for job $gridid ($LRMS id: $lrmsid)" [ -n "$ACCT_IMPLEMENTED" ] || LRMSExitcode= # Suspect killing due to resource limit exceeded only if exitcode is # missing or is > 128 (as in the case of a shell killed by a signal) if [ -z "$exitcode" ] || [ "$exitcode" -gt 128 ]; then read_grami; autodetect_overlimit fi } ############################################################################# # # scan-*-jobs has STDOUT redirected to /dev/null and STDERR redirected to # job.helper..errors # log () { echo "[`date +%Y-%m-%d\ %T`] $progname: $*" 1>&2; } # # Reads a line from STDIN and prints integer part on STDOUT. # If not a valid number, prints nothing and returns 1 # to_integer() { /usr/bin/perl -we 'chomp(my $line = <>); exit 0 if $line eq ""; if ( $line =~ m/^(\d*)(?:\.\d+)?$/ ) { print $1 || 0; } else { exit 1; }' } # Input variables # * gridid # Output variables: # * ReqWallTime # * ReqCPUTime # * ReqTotalMemory read_grami() { gramifile="${basenames[$localid]}/grami" [ -f "$gramifile" ] || { log "grami file not found: $gramifile"; return 1; } ReqWallTime=$(sed -n "s/^joboption_walltime=//p" "$gramifile" | tail -n 1) ReqCPUTime=$(sed -n "s/^joboption_cputime=//p" "$gramifile" | tail -n 1) ReqTotalMemory=$(sed -n "s/^joboption_memory=//p" "$gramifile" | tail -n 1) ReqWallTime=$(echo $ReqWallTime | to_integer) || log "joboption_walltime not a number" ReqCPUTime=$(echo $ReqCPUTime | to_integer) || log "joboption_cputime not a number" ReqTotalMemory=$(echo $ReqTotalMemory | to_integer) || log "joboption_memory not a number" # convert MB to KB [ -n "$ReqTotalMemory" ] && ReqTotalMemory=$(( $ReqTotalMemory * 1024 )) log "---- Requested resources specified in grami file ----" [ -n "$ReqWallTime" ] && log "Requested walltime: $ReqWallTime seconds" [ -n "$ReqCPUTime" ] && log "Requested cputime: $ReqCPUTime seconds" [ -n "$ReqTotalMemory" ] && log "Requested memory: $(( $ReqTotalMemory / 1024 )) MB" log "-----------------------------------------------------" } # # Can be used from lrms_get_accounting() to guess whether the job was killed due to # an exceeded resource limit and set LRMSMessage accordingly. # Input variables # * gridid # * uid # * ReqWallTime # * ReqCPUTime # * ReqTotalMemory # * WallTime # * UserTime # * KernelTime # * TotalMemory # * ResidentMemory # * exitcode # * LRMSExitcode # * LRMSMessage # Output variables: # * overlimit (if set, then one of memory cputime walltime ) # autodetect_overlimit() { # round to integers wallt=$(echo $WallTime | to_integer) || log "WallTime not a number" usert=$(echo $UserTime | to_integer) || log "UserTime not a number" kernelt=$(echo $KernelTime | to_integer) || log "KernelTime not a number" totalmem=$(echo $TotalMemory | to_integer) || log "TotalMemory not a number" residentmem=$(echo $ResidentMemory | to_integer) || log "ResidentMemory not a number" cput=$(( ${usert:-0} + ${kernelt:-0} )) if [ -n "$cput" ] && [ "$cput" -gt 0 ] \ && [ -n "$ReqCPUTime" ] && [ "$ReqCPUTime" -gt 0 ] \ && [ $(( 100 * $cput / $ReqCPUTime )) -gt 95 ]; then overlimit="cputime" fi if [ -n "$wallt" ] && [ "$wallt" -gt 0 ] \ && [ -n "$ReqWallTime" ] && [ "$ReqWallTime" -gt 0 ] \ && [ $(( 100 * $wallt / $ReqWallTime )) -gt 95 ]; then overlimit="walltime" fi if [ -n "$totalmem" ] && [ "$totalmem" -gt 0 ] \ && [ -n "$ReqTotalMemory" ] && [ "$ReqTotalMemory" -gt 0 ] \ && [ $(( 100 * $totalmem / $ReqTotalMemory )) -gt 95 ]; then overlimit="memory" fi if [ -n "$residentmem" ] && [ "$residentmem" -gt 0 ] \ && [ -n "$ReqTotalMemory" ] && [ "$ReqTotalMemory" -gt 0 ] \ && [ $(( 100 * $residentmem / $ReqTotalMemory )) -gt 95 ]; then overlimit="memory" fi [ -n "$overlimit" ] && log "Job have likely hit $overlimit limit" } # # Returns 0 at most maxwait calls for any given gridid. Returns 1 on # further calls or if an error has occured. # job_canwait() { [ -n "$gridid" ] && [ -n "$maxwait" ] \ || { log "job_canwait requires the following to be set: gridid, maxwait"; return 1; } countfile="${basenames[$localid]}/lrms_job" if [ ! -f "$countfile" ]; then echo "1" > "$countfile" || { log "cannot write count file: $countfile"; return 1; } else count=$(head -n 1 "$countfile") || { log "cannot read count file: $countfile"; return 1; } [ -z "$count" ] && { log "empty count file: $countfile"; return 1; } dummy=$(echo "$count" | grep -v '[0-9]') && { log "not an integer in count file: $countfile"; return 1; } [ "$count" -lt "$maxwait" ] || { rm -f "$countfile"; return 1; } echo "$(( $count + 1 ))" > "$countfile" || { log "cannot write count file: $countfile"; return 1; } fi return 0 } # # Append .comment (containing STDOUT & STDERR of the job wrapper) to .errors # Input variables: # * uid # * sessiondir job_print_comment() { [ -n "$uid" ] && [ -n "$sessiondir" ] \ || { log "job_print_comment requires the following to be set: uid, sessiondir"; return 1; } commentfile=$sessiondir.comment [ -f "$commentfile" ] && do_as_uid "$uid" " echo '--------- Contents of output stream forwarded by $LRMS ------------' cat '$commentfile' echo '------------------------- End of output -------------------------' " || log "failed reading: $commentfile" } # In case overlimit is set, tweak what will go into $gridid/lrms_done set_overlimit_message() { [ -n "$overlimit" ] || return if [ $overlimit = "cputime" ]; then LRMSMessage="job killed: cput" elif [ $overlimit = "walltime" ]; then LRMSMessage="job killed: wall" elif [ $overlimit = "memory" ]; then LRMSMessage="job killed: vmem" else log "invalid value overlimit=$overlimit"; return 1 fi LRMSExitcode=271 } # # Input variables: # * gridid # * basedir # * exitcode # * LRMSExitcode # * LRMSMessage # * overlimit # job_write_donefile() { [ -n "$gridid" ] && [ -n "$basedir" ] && [ -n "$LRMS" ] \ || { log "job_write_donefile requires the following to be set: gridid, basedir, LRMS"; return 1; } set_overlimit_message if [ -n "$LRMSMessage" ] && [ "$LRMSExitcode" != 0 ]; then msg="$LRMSMessage" else if [ "$exitcode" = 0 ]; then if [ -z "$LRMSExitcode" ] || [ "$LRMSExitcode" = 0 ]; then msg= else msg="Job finished properly but $LRMS reported failure" fi elif [ -z "$exitcode" ]; then if [ "$LRMSExitcode" = 0 ]; then LRMSExitcode=-1; fi msg="Job was killed by $LRMS" else if [ "$LRMSExitcode" = 0 ]; then LRMSExitcode=-1; fi msg="Job failed with exit code $exitcode" fi fi log "${msg:-$LRMS job $lrmsid finished normally}" donefile="${basenames[$localid]}/lrms_done" echo "${LRMSExitcode:--1} $msg" > $donefile || log "failed writing file: $donefile" # wake up GM "${pkglibexecdir}/gm-kick" -j "$gridid" "$ctrdir" } # # Should check that the job has exited lrms, and then do whatever post-processing is necesarry. # Called with STDOUT and STDERR redirected to the $gridid/errors file. # Input variables: # * gridid # * lrmsid # * uid # process_job() { [ -n "$gridid" ] && [ -n "$lrmsid" ] && [ -n "$uid" ] && [ -n "$LRMS" ] \ || { log "process_job requires the following to be set: gridid, lrmsid, uid, LRMS"; return 1; } lrms_job_finished || return log "[$(date +%Y-%m-%d\ %T)] $LRMS job $lrmsid has exited" localfile="${basenames[$localid]}/local" sessiondir=$(sed -n 's/^sessiondir=//p' "$localfile" | tail -n 1) [ -n "$sessiondir" ] || { log "failed reading sessiondir from: $localfile"; return 1; } # move diag file that end-up in session directory after condor transfer_output (shared_filesystem = no) [ -f "${sessiondir}/${sessiondir##*/}.diag" ] && mv "${sessiondir}/${sessiondir##*/}.diag" "${sessiondir}.diag" job_read_diag lrms_get_accounting if [ -z "$LRMSExitcode" ] && job_canwait; then : # Come back again next time else rm -f "$countfile" job_print_comment lrms_last_call job_write_diag job_write_donefile fi } scan_init () { [ -n "$basedir" ] || { log "basedir must be set"; exit 1; } [ -n "$LRMS" ] || { log "LRMS must be set"; exit 1; } LIST_IMPLEMENTED=yes ACCT_IMPLEMENTED=yes maxwait=5 wakeupperiod=60 trap 'sleep $wakeupperiod' EXIT TERM TMPDIR=${TMPDIR:-@tmp_dir@} export TMPDIR # default is shared sessiondirs if [ -z "$CONFIG_shared_filesystem" ]; then CONFIG_shared_filesystem=yes elif [ "$CONFIG_shared_filesystem" = 'no' ]; then CONFIG_shared_filesystem= fi # Retrieve all jobs from Condor # Call function lrms_list_jobs which runs condor_q against the local schedd of the CE lrmsids=$(lrms_list_jobs) || { log "lrms_list_jobs failed"; continue; } } scan_main() { log () { echo "$progname: $*" 1>&2; } # If the job is in condor_q, exit cleanly, the job has not yet finished if echo "$lrmsids" | grep -q "^$lrmsid$"; then return 0 fi # Exit the loop cleanly if $gridid/lrms_done exists # This indicates that the job has already been processed by this loop and identifed as complete by Condor donefile="${basenames[$localid]}/lrms_done" [ -f "$donefile" ] && return 0 errorsfile="${basenames[$localid]}/errors" [ -w "$errorsfile" ] || { log "cannot write to errors file at: $errorsfile"; return 0; } jobfile="${basenames[$localid]}/local" uid=$(get_owner_uid "$jobfile") # run in separate process to make sure shell vars of one job # are not influencing other jobs ( process_job; ) >> "$errorsfile" 2>&1 } ################################### Condor #################################### lrms_list_jobs() { script='my $cmd="$ENV{CONDOR_BIN_PATH}/condor_q"; open Q, "$cmd|" or die "Failed running $cmd : $!\n"; my $out; { local $/; $out = ; }; close Q; exit 0 if $out =~ m/All queues are empty/; die "Non-zero exit status returned by $cmd\n" if $?; my @ids = ($out =~ m/^\s*(\d+)\.\d+\s+/mg); print "$_\n" for @ids; ' /usr/bin/perl -we "$script" } condor_read_history() { # This Perl script reads and prints a per-job condor history file. We need to use a # hash rather than printing the file directly because some attributes appear multiple # times and we need to use the last occurrence. condorscript='use strict; my %data; if (-e $ARGV[0]) { open(FILE, "<$ARGV[0]"); foreach my $line () { if ($line =~ /([\w\+]+)\s=\s(.*)/) { $data{$1} = $2; } } foreach my $key (keys %data) { print $key." = ".$data{$key}."\n"; } } ' # First try per-job history files (best performance) perjobhistorydir=`$CONDOR_BIN_PATH/condor_config_val PER_JOB_HISTORY_DIR` perjobhistory_exists=$? histstring="" if [ $perjobhistory_exists -eq 0 ]; then # per-job history files are being used, so we can immediately find the right file historyfile="$perjobhistorydir/history.$lrmsid.0" [ -f "$historyfile" ] && histstring=$( /usr/bin/perl -we "$condorscript" "$historyfile" ) fi # If per-job history is not in place - use common history files (including rotated) historydir=`$CONDOR_BIN_PATH/condor_config_val HISTORY` if [ -z "$histstring" -a -n "$historydir" ]; then # find the appropriate history file historyfile=`grep "$(hostname -s).*#$lrmsid.0" -l $historydir*` if [ $? -eq 0 ]; then # try to get the full job classad { histstring=$( $CONDOR_BIN_PATH/condor_history -l -file $historyfile -match 1 "$lrmsid" ); } 2>&1 fi fi # the awk expression checks that the input is more than 1 line long echo "$histstring" | awk 'END{if(NR<2){exit 1}}' || return 1 # Extract information from condor_history output __RemoteHost=$(echo "$histstring" | sed -n 's/^LastRemoteHost *= *"\(.*\)"[^"]*$/\1/p') __WallTime=$(echo "$histstring" | sed -n 's/^RemoteWallClockTime *= *\([0-9][0-9]*\).*/\1/p') __KernelTime=$(echo "$histstring" | sed -n 's/^RemoteSysCpu *= *\([0-9][0-9]*\).*/\1/p') __UserTime=$(echo "$histstring" | sed -n 's/^RemoteUserCpu *= *\([0-9][0-9]*\).*/\1/p') __ImageSize=$(echo "$histstring" | sed -n 's/^ImageSize *= *//p') __ExitCode=$(echo "$histstring" | sed -n 's/^ExitCode *= *//p') ExitStatus=$(echo "$histstring" | sed -n 's/^ExitStatus *= *//p') JobStatus=$(echo "$histstring" | sed -n 's/^JobStatus *= *//p') ExitSignal=$(echo "$histstring" | sed -n 's/^ExitSignal *= *//p') RemoveReason=$(echo "$histstring" | sed -n 's/^RemoveReason *= *"\(.*\)"[^"]*$/\1/p') ExitReason=$(echo "$histstring" | sed -n 's/^ExitReason *= *"\(.*\)"[^"]*$/\1/p') JobCurrentStartDate=$(echo "$histstring" | sed -n 's/^JobCurrentStartDate *= *\([0-9][0-9]*\).*/\1/p') EnteredCurrentStatus=$(echo "$histstring" | sed -n 's/^EnteredCurrentStatus *= *\([0-9][0-9]*\).*/\1/p') RequestCpus=$(echo "$histstring" | sed -n 's/^RequestCpus *= *//p') echo "$RemoveReason" | grep -q 'PeriodicRemove .*evaluated to \(TRUE\)' [ $? = 0 ] && PeriodicRemove=TRUE return 0 } seconds() { /usr/bin/perl -e 'my $str = "'"$1"'"; exit unless $str =~ /(\d+) (\d\d):(\d\d):(\d\d)/; printf "%.0f", ( $1 * 24 + $2 ) * 3600 + $3 * 60 + $4; ' } find_in_file() { file=$1; regex=$2; grep "$regex" "$file" | tail -n 1 | sed -n "s/\(.*\)$regex\(.*\)/\2/ip"; } condor_read_log() { # Find the Condor log. gramifile="${basenames[$localid]}/grami" [ -f "$gramifile" ] || { log "grami file not found: $gramifile"; return 1; } condor_log=$(sed -n 's/^condor_log=//p' "$gramifile" | tail -n 1) [ -n "$condor_log" ] || { log "condor_log not set in grami file: $gramifile"; return 1; } log "condor log is at: $condor_log" [ -r "$condor_log" ] || { log "Condor log file not readable: $condor_log"; return 1; } # Parse condor log. Look for lines like: # (return value 0) # Image size of job updated: 692632 # Usr 0 00:37:09, Sys 0 00:00:04 - Total Remote Usage # Job executing on host: <129.240.86.70:32769> _RemoteHost=$( find_in_file "$condor_log" 'Job executing on host: *<\([^:>]*\)' ) _UserTime=$( find_in_file "$condor_log" 'Usr \([0-9][0-9]* [0-9][0-9]:[0-9][0-9]:[0-9][0-9]\).*Total Remote Usage' ) _KernelTime=$( find_in_file "$condor_log" 'Sys \([0-9][0-9]* [0-9][0-9]:[0-9][0-9]:[0-9][0-9]\).*Total Remote Usage' ) _ImageSize=$(find_in_file "$condor_log" 'Image size of job updated: \([0-9][0-9]*\)' ) _ExitCode=$( find_in_file "$condor_log" '(return value \([0-9][0-9]*\))' ) _UserTime=$(seconds "$_UserTime") _KernelTime=$(seconds "$_KernelTime") } lrms_get_accounting() { condor_read_history || { log "Job has exited but is not yet listed by condor_history"; return 1; } # set LRMSExitcode to signal that no more tries are necessary LRMSExitcode=-1 } lrms_last_call() { condor_read_log && { # override values read from .diag with those from condor log nodename=${_RemoteHost:-$nodename} UserTime=${_UserTime:-$UserTime} KernelTime=${_KernelTime:-$KernelTime} TotalMemory=${_ImageSize:-$TotalMemory} echo "$progname: ----- begin condor log ($condor_log) -----" cat "$condor_log" echo "$progname: ----- end condor log ($condor_log) -----" echo "$progname: ----- Information extracted from Condor log -----" [ -n "$_RemoteHost" ] && echo "$progname: RemoteHost=$_RemoteHost" [ -n "$_UserTime" ] && echo "$progname: UserTime=$_UserTime" [ -n "$_KernelTime" ] && echo "$progname: KernelTime=$_KernelTime" [ -n "$_ImageSize" ] && echo "$progname: ImageSize=$_ImageSize" [ -n "$_ExitCode" ] && echo "$progname: ExitCode=$_ExitCode" echo "$progname: -------------------------------------------------" } if [ -z "$LRMSExitcode" ]; then log "$progname: No condor_history for Condor ID $lrmsid" else # override with values from condor_history nodename=${__RemoteHost:-$nodename} WallTime=${__WallTime:-$WallTime} UserTime=${__UserTime:-$UserTime} KernelTime=${__KernelTime:-$KernelTime} TotalMemory=${__ImageSize:-$TotalMemory} echo "$progname: ----- begin condor history message -----" echo "$histstring" echo "$progname: ----- end condor history message -----" echo "$progname: ----- Information extracted from condor_history -----" [ -n "$__RemoteHost" ] && echo "$progname: LastRemoteHost=$__RemoteHost" [ -n "$__WallTime" ] && echo "$progname: RemoteWallClockTime=$__WallTime" [ -n "$__UserTime" ] && echo "$progname: RemoteUserCpu=$__UserTime" [ -n "$__KernelTime" ] && echo "$progname: RemoteSysCpu=$__KernelTime" [ -n "$__ImageSize" ] && echo "$progname: ImageSize=$__ImageSize" [ -n "$__ExitCode" ] && echo "$progname: ExitCode=$__ExitCode" [ -n "$ExitStatus" ] && echo "$progname: ExitStatus=$ExitStatus" [ -n "$JobStatus" ] && echo "$progname: JobStatus=$JobStatus" [ -n "$ExitSignal" ] && echo "$progname: ExitSignal=$ExitSignal" [ -n "$RemoveReason" ] && echo "$progname: RemoveReason=$RemoveReason" [ -n "$JobCurrentStartDate" ] && echo "$progname: JobCurrentStartDate=$JobCurrentStartDate" [ -n "$EnteredCurrentStatus" ] && echo "$progname: EnteredCurrentStatus=$EnteredCurrentStatus" [ -n "$ExitReason" ] && echo "$progname: ExitReason=$ExitReason" [ -n "$RequestCpus" ] && echo "$progname: RequestCpus=$RequestCpus" echo "$progname: -----------------------------------------------------" if [ -n "$JobCurrentStartDate" ]; then date_seconds_to_utc "$JobCurrentStartDate" seconds_to_mds_date "$return_date_seconds" LRMSStartTime=$return_mds_date echo "$progname: LRMSStartTime=$LRMSStartTime" fi if [ -n "$EnteredCurrentStatus" ]; then date_seconds_to_utc "$EnteredCurrentStatus" seconds_to_mds_date "$return_date_seconds" LRMSEndTime=$return_mds_date echo "$progname: LRMSEndTime=$LRMSEndTime" fi fi LRMSExitcode=${__ExitCode:-$_ExitCode} # set LRMSExitcode to signal that no more tries are necessary [ -n "$LRMSExitcode" ] || log "ExitCode not found in condor log and condor_history" # set message in case condor killed the job. LRMSExitcode should not be 0. if [ -n "$PeriodicRemove" ]; then [ "$LRMSExitcode" = 0 ] && LRMSExitcode= LRMSMessage="PeriodicRemove evaluated to TRUE" elif [ -n "$RemoveReason" ] && [ "$RemoveReason" != "None" ]; then [ "$LRMSExitcode" = 0 ] && LRMSExitcode= LRMSMessage="RemoveReason: $RemoveReason" elif [ -n "$ExitReason" ] && [ "$ExitReason" != "None" ]; then [ "$LRMSExitcode" = 0 ] && LRMSExitcode= LRMSMessage="ExitReason: $ExitReason" fi # Check whether the job was killed by Condor. If yes, check for exceeded resources limits if ( [ -n "$RemoveReason" ] && [ "$RemoveReason" != "None" ] ) || [ -n "$PeriodicRemove" ]; then read_grami; autodetect_overlimit fi # Condor does not write a .diag file. exitcode=$LRMSExitcode } # Setup Condor environment # Don't include as part of for loop to reduce pressure on Condor schedd scan_init # Loop through all jobs in "$ctr_dir/processing" with name '*.status' for localid in ${localids[@]}; do gridid=${gridids[$localid]} ctrdir=${ctrdirs[$localid]} lrmsid=$localid scan_main "$@" done exit 0 nordugrid-arc-7.1.1/src/services/a-rex/lrms/condor/PaxHeaders/README0000644000000000000000000000013215067751327022122 xustar0030 mtime=1759498967.765899957 30 atime=1759498967.869493711 30 ctime=1759499030.077605066 nordugrid-arc-7.1.1/src/services/a-rex/lrms/condor/README0000644000175000002070000000003015067751327024015 0ustar00mockbuildmock00000000000000Condor control scripts. nordugrid-arc-7.1.1/src/services/a-rex/lrms/PaxHeaders/pbspro0000644000000000000000000000013215067751425021205 xustar0030 mtime=1759499029.980437491 30 atime=1759499034.764510185 30 ctime=1759499029.980437491 nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbspro/0000755000175000002070000000000015067751425023164 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbspro/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327023317 xustar0030 mtime=1759498967.767700358 30 atime=1759498967.870493727 30 ctime=1759499029.972437369 nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbspro/Makefile.am0000644000175000002070000000016215067751327025220 0ustar00mockbuildmock00000000000000dist_pkgdata_DATA = configure-pbspro-env.sh pkgdata_SCRIPTS = submit-pbspro-job cancel-pbspro-job scan-pbspro-job nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbspro/PaxHeaders/Makefile.in0000644000000000000000000000013215067751356023332 xustar0030 mtime=1759498990.518896841 30 atime=1759499018.673265678 30 ctime=1759499029.975437415 nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbspro/Makefile.in0000644000175000002070000005542415067751356025246 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms/pbspro ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(dist_pkgdata_DATA) \ $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = submit-pbspro-job cancel-pbspro-job \ scan-pbspro-job CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" SCRIPTS = $(pkgdata_SCRIPTS) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac DATA = $(dist_pkgdata_DATA) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/cancel-pbspro-job.in \ $(srcdir)/scan-pbspro-job.in $(srcdir)/submit-pbspro-job.in \ README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ dist_pkgdata_DATA = configure-pbspro-env.sh pkgdata_SCRIPTS = submit-pbspro-job cancel-pbspro-job scan-pbspro-job all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/pbspro/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/pbspro/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): submit-pbspro-job: $(top_builddir)/config.status $(srcdir)/submit-pbspro-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ cancel-pbspro-job: $(top_builddir)/config.status $(srcdir)/cancel-pbspro-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ scan-pbspro-job: $(top_builddir)/config.status $(srcdir)/scan-pbspro-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_pkgdataDATA: $(dist_pkgdata_DATA) @$(NORMAL_INSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-dist_pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dist_pkgdataDATA install-pkgdataSCRIPTS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am \ install-dist_pkgdataDATA install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-pkgdataSCRIPTS install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags-am uninstall uninstall-am uninstall-dist_pkgdataDATA \ uninstall-pkgdataSCRIPTS .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbspro/PaxHeaders/scan-pbspro-job.in0000644000000000000000000000013215067751327024612 xustar0030 mtime=1759498967.768121105 30 atime=1759498967.870493727 30 ctime=1759499029.978895941 nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbspro/scan-pbspro-job.in0000644000175000002070000004067615067751327026531 0ustar00mockbuildmock00000000000000#!@posix_shell@ # # Periodically read log files of PBS and put mark files # for job, which finished. # If log files are not available scan for finished (absent) jobs # in PBS and put mark files for job, which finished. # # usage: scan_pbs_job control_dir ... # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi # first control_dir is used for storing own files if [ -z "$1" ] ; then exit 1 ; fi control_dir=$1 control_dirs= while [ $# -gt 0 ] ; do control_dirs="${control_dirs} \"$1\"" shift done joboption_lrms="pbspro" lrms_options="pbs_bin_path pbs_log_path" queue_options="pbs_queue_node" # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # include common scan functions . "${pkgdatadir}/scan_common.sh" || exit $? # run common init # * parse config # * load LRMS-specific env # * set common variables common_init # Define gm-kick location GMKICK=${pkglibexecdir}/gm-kick # Log system performance if [ ! -z "$perflogdir" ]; then perflog_common "$perflogdir" "$CONFIG_controldir" fi pbs_log_dir="${CONFIG_pbs_log_path:-/var/spool/pbs/server_logs}" my_id=`id -u` state_file=$control_dir/pbs_log_scan.`id -un` lines=`cat "$state_file" 2>/dev/null` ldt=`echo $lines | awk '{split($0,field," ");print field[1]}' ` lines=`echo $lines | awk '{split($0,field," ");print field[2]}'` lines_skip=$(( $lines + 0 )) ldate=$(( $ldt + 0 )) if [ -z "$lines_skip" ] ; then lines_skip='0' ; fi if [ -z "$ldate" ] ; then ldate='0' ; fi whole_line= control_path () { # job_id=`echo "$2" | sed 's/\(.\{9\}\)/\1\//g' | sed 's/\/$//'` job_id=`echo "$2" | sed -e 's#\(.\{3\}\)#\1/#3' -e 's#\(.\{3\}\)#\1/#2' -e 's#\(.\{3\}\)#\1/#1' -e 's#$#/#'` path="$1/jobs/${job_id}/$3" echo "$path" } find_by_local() { eval "set -- $control_dirs" for ctr_dir in "$@"; do find ${ctr_dir}/processing -name '*.status' -print | sed 's/^.*\/\([^\.]*\)\.status$/\1/g' | { while true; do read id if [ $? -ne '0' ] ; then break ; fi path=$(control_path "${ctr_dir}" "${id}" "local") grep -F -c $whole_line "localid=$job_id" "${path}" 2>/dev/null 1>/dev/null if [ $? -eq '0' ] ; then echo "${id} ${ctr_dir}" ; break ; fi done } done } find_by_grami() { eval "set -- $control_dirs" for ctr_dir in "$@"; do find ${ctr_dir}/processing -name '*.status' -print | sed 's/^.*\/\([^\.]*\)\.status$/\1/g' | { while true; do read id if [ $? -ne '0' ] ; then break ; fi path=$(control_path "${ctr_dir}" "${id}" "grami") grep -F -l $whole_line "joboption_jobid=$job_id" "${path}" 2>/dev/null 1>/dev/null if [ $? -eq '0' ] ; then echo "${id} ${ctr_dir}" ; break ; fi done } done } # set_job_vars takes a line from pbs logs and splits it, returning information # in pbs_date, pbs_code, pbs_server, pbs_job, job_id, job_message and rest_line set_job_vars() { pbs_date=$1 pbs_code=$2 pbs_server=$3 pbs_job=$4 job_id=$5 job_message=$6 rest_line=$7 } # # Main function for processing one PBS log. # Extracts log lines with code 0010 (job exited) and 0008 (job killed) # # TODO this should be split into smaller functions process_log_file () { eval "set -- $control_dirs" #we grep for finished jobs, then use sed to remove already processed lines #OBS: deleted jobs have a 0008 message with not much info in it. A 0010 # message may follow (or not) with full usage stats. By this time the # job has already been processed, so this info is ignored! #TODO: make log scanning more intelligent. exited_killed_jobs=`egrep '^[^;]*;0010;[^;]*;Job;|^[^;]*;0008;[^;]*;Job;[^;]*;Exit_status=|^[^;]*;0008;[^;]*;Job;[^;]*;Job deleted' ${lname} | tail -n+$(( $lines_skip + 1 ))` #TODO should we add processed lines before jobs have actually been processed? What if the last job only has half a record? new_lines=`echo -n "$exited_killed_jobs" | wc -l` # new_lines set to 1 when string is empty, should have been 0 [ "x$exited_killed_jobs" = x ] && continue lines_processed=$(( $lines_skip + $new_lines )) if [ "$lines_processed" -lt '0' ] ; then lines_processed=0; fi echo "$cname $lines_processed"> $state_file exited_killed_jobs=`echo "$exited_killed_jobs" | sort -u` # force word splitting to happen only on newlines old_IFS=$IFS; IFS=' ' for job in $exited_killed_jobs; do # Split line into fields by forcing word splitting to happen on ";" IFS=";" set_job_vars $job IFS=$old_IFS # Try to extract exit code of PBS (note: if executable fails it's code goes to PBS) exit_code=`echo "$job_message" | sed -n 's/^.*Exit_status=\([-0-9]*\).*/\1/p'` # Check if job has suffix echo "$job_id" | grep -q -F . if [ ! $? = '0' ] ; then whole_line=-x else job_id=`echo "$job_id" | awk '{split($0,field,".");print field[1]"."field[2]}'` whole_line= fi # look for this id in ID.local, then in ID.grami gridid=`find_by_local` if [ -z "$gridid" ]; then gridid=`find_by_grami` fi ctrdir=`echo "${gridid}" | sed -n 's/^[^ ]* \(.*\)/\1/p' gridid=`echo "${gridid}" | sed -n 's/^\([^ ]*\) .*/\1/p' if [ -z "$gridid" ]; then continue; fi if [ -z "$ctrdir" ]; then continue; fi statusfile="${ctrdir}/processing/${id}.status" lrmsfile=$(control_path "${ctrdir}" "${gridid}" "lrms_done") localfile=$(control_path "${ctrdir}" "${gridid}" "local") errorsfile=$(control_path "${ctrdir}" "${gridid}" "errors") if [ "$my_id" != '0' ] ; then if [ ! -O "$statusfile" ] ; then continue ; fi fi uid=$(get_owner_uid "$statusfile") [ -z "$uid" ] && { log "Failed to stat $statusfile"; continue; } # check if job already reported if [ -f "$lrmsfile" ] ; then continue ; fi # more protection - check if arex thinks job is still running egrep 'INLRMS|SUBMIT|CANCELING' "$statusfile" >/dev/null 2>&1 if [ ! $? = '0' ] ; then continue ; fi # So far only PBS exit code is available # It would be nice to have exit code of main executable exitcode='' # get session directory of this job sessiondir=`grep -h '^sessiondir=' "$localfile" | sed 's/^sessiondir=\(.*\)/\1/'` diagfile="${sessiondir}.diag" commentfile="${sessiondir}.comment" if [ -z "$sessiondir" ] ; then log "Failed to determine the path of the job's session directory" else # have chance to obtain exit code if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then # In case of non-NFS setup it may take some time till # diagnostics file is delivered. Wait for it max 2 minutes. # OBS: exitcode may never appear in the .diag file if the job was # killed. There will be a 2 minute delay for every such job! diag_tries=0 while [ "$diag_tries" -lt 20 ] ; do job_read_diag # uses $sessiondir, $uid if [ ! -z "$exitcode" ] ; then break ; fi sleep 10 diag_tries=$(( $diag_tries + 1 )) log "no exitcode in diag file $diagfile (try $diag_tries of 20)" done else job_read_diag # uses $sessiondir, $uid fi fi # Try to obtain message from PBS if any pbs_comment=$(do_as_uid "$uid" "tail -n 1 '$commentfile'") save_commentfile "$uid" "$commentfile" "${errorsfile}" # Extract values from PBS walltime=`echo "$job_message" | sed -n 's/^.*resources_used.walltime=\(\([0-9]*:\)*[0-9][0-9]\).*/\1/p'` cputime=`echo "$job_message" | sed -n 's/^.*resources_used.cput=\(\([0-9]*:\)*[0-9][0-9]\).*/\1/p'` mem=`echo "$job_message" | sed -n 's/^.*resources_used.mem=\([0-9]*\)kb.*/\1/p'` vmem=`echo "$job_message" | sed -n 's/^.*resources_used.vmem=\([0-9]*\)kb.*/\1/p'` # Convert to utc and store as seconds date_to_utc_seconds "$pbs_date" if [ ! -z "$return_date_seconds" ]; then # Convert from seconds to YYYYMMDDHHMMSSZ seconds_to_mds_date "$return_date_seconds" endtime=$return_mds_date # Find out how many seconds the job executed interval_to_seconds "$walltime" if [ ! -z "$return_interval_seconds" ]; then # Convert from seconds to YYYYMMDDHHMMSSZ seconds_to_mds_date $(( $return_date_seconds - $return_interval_seconds )) starttime=$return_mds_date fi fi # Values to write to diag. These will override values already written. interval_to_seconds "$walltime" [ -n "$return_interval_seconds" ] && WallTime=$return_interval_seconds interval_to_seconds "$cputime" [ -n "$return_interval_seconds" ] && UserTime=$return_interval_seconds [ -n "$return_interval_seconds" ] && KernelTime=0 [ -n "$mem" ] && UsedMemory=$mem [ -n "$vmem" ] && TotalMemory=$vmem [ -n "$starttime" ] && LRMSStartTime=$starttime [ -n "$endtime" ] && LRMSEndTime=$endtime [ -n "$pbs_comment" ] && LRMSMessage=$pbs_comment [ -n "$exit_code" ] && LRMSExitcode=$exit_code job_write_diag if [ -z "$exitcode" ] ; then # No exit code of job means job was most probably killed if [ -z "$exit_code" ] ; then exit_code='-1'; fi if [ "$exit_code" = '0' ] ; then echo "Job $job_id failed but PBS have not noticed that" 1>&2 echo "-1 Job failed but PBS reported 0 exit code." > "${lrmsfile}" elif [ -z "$pbs_comment" ] ; then echo "Job $job_id failed with PBS exit code $exit_code" 1>&2 echo "$exit_code Job was killed by PBS." > "${lrmsfile}" else echo "Job $job_id failed with PBS exit code $exit_code" 1>&2 echo "$exit_code $pbs_comment" > "${lrmsfile}" fi else if [ -z "$exit_code" ] ; then exit_code='-1'; fi if [ ! "$exitcode" = 0 ] ; then if [ "$exit_code" = '0' ] ; then exit_code='-1'; fi echo "Job $job_id failed with exit code $exitcode, PBS reported $exit_code." 1>&2 echo "$exit_code Job failed with exit code $exitcode." > "${lrmsfile}" else if [ ! "$exit_code" = '0' ] ; then echo "Job finished properly but PBS reported $exit_code." 1>&2 if [ -z "$pbs_comment" ] ; then echo "$exit_code Job was killed by PBS." > "${lrmsfile}" else echo "$exit_code $pbs_comment" > "${lrmsfile}" fi else # echo "Job finished without errors." 1>&2 echo "0" > "${lrmsfile}" fi fi fi # wake up GM ${GMKICK} -j "${gridid}" "${ctrdir}" done IFS=$old_IFS } readable_logs=no # Check $pbs_log_dir for readable files # if any are found, process them and update relevant information if [ ! -z "${pbs_log_dir}" ] ; then for cname in `ls -1 ${pbs_log_dir}/ 2>/dev/null | grep '^[0-9]*$'` ; do lname="${pbs_log_dir}/$cname" if [ ! -r "$lname" ] ; then continue ; fi readable_logs=yes if [ "$cname" -lt "$ldate" ] ; then continue elif [ "$cname" -gt "$ldate" ] ; then lines_skip=0 fi echo "Date: " $cname last_modified=`stat $lname | grep Modify` process_log_file done fi # main loop, stay here up to 60 seconds if log is still updated while # we are reading it. if [ "$readable_logs" = 'yes' ] ; then time_count=0 while true ; do new_modified=`stat $lname | grep Modify` if [ "$new_modified" != "$last_modified" ] ; then last_modified="$new_modified" lines=`cat "$state_file" 2>/dev/null` ldt=`echo $lines | awk '{split($0,field," ");print field[1]}' ` lines=`echo $lines | awk '{split($0,field," ");print field[2]}'` lines_skip=$(( $lines + 0 )) ldate=$(( $ldt + 0 )) process_log_file fi sleep 10 time_count=$(( $time_count + 10 )) if [ "$time_count" -ge 60 ] ; then break ; fi done exit 0 fi # If no PBS logs found try ordinary 'qstat' eval "set -- $control_dirs" # Get all running jobs pidslist=`mktemp "$TMPDIR/qstat.XXXXXX"` || if [ ! "$?" = '0' ] ; then # FS problems ? # TODO debug output here sleep 60 exit 1 fi # define and execute qstat PBS_QSTAT="${PBS_QSTAT:-qstat}" if [ ! -z "$PBS_BIN_PATH" ] ; then PBS_QSTAT="${PBS_BIN_PATH}/${PBS_QSTAT}" fi ${PBS_QSTAT} -a 2>/dev/null 1>"$pidslist" if [ ! "$?" = '0' ] ; then rm -f "$pidslist" # PBS server down ? sleep 60 exit 1 fi exclude_completed () { awk '$10!="C"{print $0}' } pids=`cat "$pidslist" | grep '^[0-9][0-9]*\.' | exclude_completed | sed 's/^\([0-9][0-9]*\).*/\1/'` rm -f "$pidslist" # Go through directories for ctr_dir in "$@" ; do if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi # Obtain ids stored in job.*.local ids=` find ${ctr_dir}/processing -name '*.status' -print | sed 's/^.*\/\([^\.]*\)\.status$/\1/g' | { while true; do \ read id; \ if [ $? -ne '0' ] ; then break ; fi path=$(control_path "${ctr_dir}" "${id}" "local") grep -h "^localid=" "${path}" 2>/dev/null | sed 's/^localid=\([0-9]*\).*/\1/' done } ` if [ -z "$ids" ] ; then continue ; fi if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-pbs-job, ControlDirTraversal: $t" >> $perflogfile fi # compare them to running jobs and find missing bids= for id in $ids ; do found=`echo "$pids" | grep "^$id$"` if [ -z "$found" ] ; then bids="$bids $id" fi done if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi # go through missing ids for id in $bids ; do # find grid job corresponding to curent local id jobfile="" gridid="" find ${ctr_dir}/processing -name '*.status' -print | sed 's/^.*\/\([^\.]*\)\.status$/\1/g' | { while true; do \ read id; \ if [ $? -ne '0' ] ; then break ; fi path=$(control_path "${ctr_dir}" "${id}" "local") grep -F -l "localid=$id." 2>/dev/null 1>/dev/null if [ $? -eq '0' ] ; then gridid="$id" jobfile="$path" break fi done } if [ -z "$jobfile" ] ; then continue ; fi if [ "$my_id" != '0' ] ; then if [ ! -O "$jobfile" ] ; then continue ; fi fi uid=$(get_owner_uid "$jobfile") [ -z "$uid" ] && { log "Failed to stat $jobfile"; continue; } donefile=$(control_path "${ctr_dir}" "${gridid}" "lrms_done") if [ -f "$donefile" ] ; then continue ; fi statusfile="${ctr_dir}/processing/${gridid}.status" if [ ! -f "$statusfile" ] ; then continue ; fi status=`cat "$statusfile"` if [ "$status" != "INLRMS" ] && [ "$status" != "CANCELING" ]; then continue ; fi # get session directory of this job session=`grep -h '^sessiondir=' "$jobfile" | sed 's/^sessiondir=\(.*\)/\1/'` if [ ! -z "$session" ] ; then # have chance to obtain exit code diagfile="${session}.diag" if [ ! -z "$session" ] ; then # have chance to obtain exit code exitcode=$(do_as_uid "$uid" "grep '^exitcode=' '$diagfile'" | sed 's/^exitcode=//') fi if [ ! -z "$exitcode" ] ; then # job finished and exit code is known save_commentfile "$uid" "${session}.comment" $(control_path "${ctr_dir}" "${gridid}" "errors") echo "$exitcode Executable finished with exit code $exitcode" > "$donefile" ${GMKICK} -j "$gridid" "${ctr_dir}" echo "Job $gridid finished with exit code $exitcode" continue fi fi # job has probaly finished and exit code is not known exitcode='-1' countfile=$(control_path "${ctr_dir}" "${gridid}" "lrms_job") counter=0 if [ -f "$countfile" ] ; then counter=`cat "$countfile"` counter=$(( $counter + 1 )) fi if [ "$counter" -gt 5 ] ; then rm -f "$countfile" save_commentfile "$uid" "${session}.comment" $(control_path "${ctr_dir}" "${gridid}" "errors") echo "$exitcode Job was lost with unknown exit code" > "$donefile" ${GMKICK} -j "$gridid" "${ctr_dir}" echo "Job $gridid finished with unknown exit code" else echo "$counter" > "$countfile" fi done if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-pbs-job, JobProcessing: $t" >> $perflogfile fi # go through existing ids for id in $pids ; do # find grid job corresponding to curent local id jobfile=`find ${ctr_dir} -name '*.local' -print0 | xargs -0 grep -F -l "localid=$id." 2>/dev/null` if [ -z "$jobfile" ] ; then continue ; fi countfile=`echo "$jobfile" | sed 's/local$/lrms_job'` # reset failure counter rm -f "$countfile" done done sleep 60 exit 0 nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbspro/PaxHeaders/cancel-pbspro-job.in0000644000000000000000000000013115067751327025112 xustar0030 mtime=1759498967.768121105 30 atime=1759498967.870493727 29 ctime=1759499029.97742307 nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbspro/cancel-pbspro-job.in0000644000175000002070000000163315067751327027020 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -x # # Cancel job running in PBS. # echo "----- starting cancel_pbs_job -----" 1>&2 joboption_lrms="pbspro" lrms_options="pbs_bin_path pbs_log_path" queue_options="pbs_queue_node" # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi GRAMI_FILE=$1 # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # load common cancel functions . "${pkgdatadir}/cancel_common.sh" || exit $? # run common init # * parse grami # * parse config # * load LRMS-specific env common_init # define and execute qdel PBS_QDEL="${PBS_QDEL:-qdel}" if [ ! -z "$PBS_BIN_PATH" ] ; then PBS_QDEL="${PBS_BIN_PATH}/${PBS_QDEL}" fi echo executing qdel with job id $joboption_jobid 1>&2 ${PBS_QDEL} "${joboption_jobid}" echo "----- exiting cancel_pbs_job -----" 1>&2 echo "" 1>&2 exit 0 nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbspro/PaxHeaders/configure-pbspro-env.sh0000644000000000000000000000013215067751327025671 xustar0030 mtime=1759498967.768121105 30 atime=1759498967.870493727 30 ctime=1759499029.974499889 nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbspro/configure-pbspro-env.sh0000644000175000002070000000041615067751327027574 0ustar00mockbuildmock00000000000000# # set environment variables: # PBS_BIN_PATH # # Conditionaly enable performance logging init_perflog # Path to PBS commands PBS_BIN_PATH=${CONFIG_pbs_bin_path:-/usr/bin} if [ ! -d ${PBS_BIN_PATH} ] ; then echo "Could not set PBS_BIN_PATH." 1>&2 exit 1 fi nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbspro/PaxHeaders/submit-pbspro-job.in0000644000000000000000000000013115067751327025170 xustar0030 mtime=1759498967.768121105 30 atime=1759498967.870493727 29 ctime=1759499029.98041636 nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbspro/submit-pbspro-job.in0000644000175000002070000003723415067751327027104 0ustar00mockbuildmock00000000000000#!@posix_shell@ # # Submits job to Altair PBS.Professional # Input: path to grami file # # The temporary job script is created for the submission and then removed # at the end of this script. echo "----- starting submit_pbs_job -----" 1>&2 joboption_lrms="pbspro" lrms_options="pbs_bin_path pbs_log_path" queue_options="pbs_queue_node" # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi GRAMI_FILE=$1 # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # include common submit functions . "${pkgdatadir}/submit_common.sh" || exit $? # run common init # * parse grami # * parse config # * load LRMS-specific env # * set common variables common_init # perflog submission start time if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi # check remote or local scratch is configured check_any_scratch ############################################################## # Zero stage of runtime environments ############################################################## RTE_stage0 ############################################################## # create job script ############################################################## mktempscript PBS_QSUB=${PBS_QSUB:-"qsub"} if [ ! -z "$PBS_BIN_PATH" ] ; then PBS_QSUB=${PBS_BIN_PATH}/${PBS_QSUB} fi ############################################################## # Start job script ############################################################## echo "# PBSPro batch job script built by arex" > $LRMS_JOB_SCRIPT # use /bin/bash as a top shell (-S option is compatible with older PBSPro versions) #echo "#PBS Shell_Path_List=/bin/bash" >> $LRMS_JOB_SCRIPT PBS_QSUB="${PBS_QSUB} -S /bin/bash" # no PBS native mailing echo "#PBS -m n" >> $LRMS_JOB_SCRIPT # no re-run echo "#PBS -r n" >> $LRMS_JOB_SCRIPT # write PBS output to 'comment' file echo "#PBS -o '${joboption_directory}.comment'" >> $LRMS_JOB_SCRIPT echo "#PBS -j oe" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT # choose queue if [ ! -z "${joboption_queue}" ] ; then echo "#PBS -q $joboption_queue" >> $LRMS_JOB_SCRIPT fi ############################################################## # priority ############################################################## if [ ! -z "$joboption_priority" ]; then #first we must scale priority. PBS: -1024 -> +1023 ARC: 0-100 priority=$((joboption_priority * (1024+1023) / 100)) priority=$((priority-1024)) echo "#PBS -p ${priority}" >> $LRMS_JOB_SCRIPT fi # project name for accounting if [ ! -z "${joboption_rsl_project}" ] ; then echo "#PBS Account_Name=$joboption_rsl_project" >> $LRMS_JOB_SCRIPT fi # job name for convenience if [ ! -z "${joboption_jobname}" ] ; then jobname=`echo "$joboption_jobname" | \ sed 's/^\([^[:alpha:]]\)/N\1/' | \ sed 's/[^[:alnum:]]/_/g' | \ sed 's/\(...............\).*/\1/'` echo "#PBS -WJob_Name='$jobname'" >> $LRMS_JOB_SCRIPT fi echo "PBS jobname: $jobname" 1>&2 ############################################################## # Set resource requirements ############################################################## # incorporate defaults set_count set_req_mem # set memory select string if [ ! -z "$joboption_memory" ] ; then memreq="${joboption_memory}" # memory per-chunk (joboption_memory is per-process by specification) if [ -n "$joboption_count" ] && [ $joboption_count -gt 0 ] ; then memreq=$(( ${joboption_countpernode:-1} * $memreq )) fi memreq=":mem=${memreq}mb" fi # single-process/parallel jobs if [ "$joboption_count" = "1" ] ; then select_string="#PBS -l select=1:ncpus=1${memreq}" place_string="#PBS -l place=free" elif [ -n "$joboption_numnodes" ] ; then # joboption_numnodes set by A-REX when 'countpernode' is defined in job description select_string="#PBS -l select=${joboption_numnodes}:ncpus=${joboption_countpernode:-1}${memreq}" place_string="#PBS -l place=free" else # no countpernode is requested - job use count as a number of chunks select_string="#PBS -l select=${joboption_count}:ncpus=1${memreq}" place_string="#PBS -l place=pack" fi # add extra requirements from arc.conf if [ ! -z "$CONFIG_pbs_queue_node" ] ; then select_string="${select_string}:${CONFIG_pbs_queue_node}" fi # exclusice execution if [ "$joboption_exclusivenode" = "true" ]; then place_string="${place_string}:excl" fi # node properties (TODO: can be set in RTE only?) i=0 eval "var_is_set=\${joboption_nodeproperty_$i+yes}" while [ ! -z "${var_is_set}" ] ; do eval "var_value=\${joboption_nodeproperty_$i}" select_string="${select_string}:${var_value}" i=$(( $i + 1 )) eval "var_is_set=\${joboption_nodeproperty_$i+yes}" done echo "${select_string}" >> $LRMS_JOB_SCRIPT echo "${place_string}" >> $LRMS_JOB_SCRIPT ############################################################## # Execution times (minutes) ############################################################## if [ ! -z "$joboption_cputime" ] ; then # TODO: parallel jobs, add initialization time, make walltime bigger, ... # is cputime for every process ? if [ $joboption_cputime -lt 0 ] ; then echo 'WARNING: Less than 0 CPU time requested: $joboption_cputime' 1>&2 joboption_cputime=0 echo 'WARNING: cpu time set to 0' 1>&2 fi maxcputime="$joboption_cputime" cputime_min=$(( $maxcputime / 60 )) cputime_sec=$(( $maxcputime - $cputime_min * 60 )) echo "#PBS -l cput=${cputime_min}:${cputime_sec}" >> $LRMS_JOB_SCRIPT fi if [ -z "$joboption_walltime" ] ; then if [ ! -z "$joboption_cputime" ] ; then # Set walltime for backward compatibility or incomplete requests joboption_walltime=$(( $joboption_cputime * $walltime_ratio )) fi fi if [ ! -z "$joboption_walltime" ] ; then if [ $joboption_walltime -lt 0 ] ; then echo 'WARNING: Less than 0 walltime requested: $joboption_walltime' 1>&2 joboption_walltime=0 echo 'WARNING: wall time set to 0' 1>&2 fi maxwalltime="$joboption_walltime" walltime_min=$(( $maxwalltime / 60 )) walltime_sec=$(( $maxwalltime - $walltime_min * 60 )) echo "#PBS -l walltime=${walltime_min}:${walltime_sec}" >> $LRMS_JOB_SCRIPT fi ############################################################## # PBS stage in/out ############################################################## gate_host=`uname -n` if [ -z "$gate_host" ] ; then echo "Can't get own hostname" 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "Submission: Configuration error." exit 1 fi if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then ( cd "$joboption_directory" if [ $? -ne '0' ] ; then echo "Can't change to session directory: $joboption_directory" 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "Submission: Configuration error." exit 1 fi scratch_dir=`dirname "$joboption_directory"` echo "#PBS -W stagein=$RUNTIME_LOCAL_SCRATCH_DIR@$gate_host:$joboption_directory" >> $LRMS_JOB_SCRIPT echo "#PBS -W stageout=$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid@$gate_host:$scratch_dir" >> $LRMS_JOB_SCRIPT echo "#PBS -W stageout=$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid.diag@$gate_host:$joboption_directory.diag" >> $LRMS_JOB_SCRIPT ) fi echo "" >> $LRMS_JOB_SCRIPT echo "# Overide umask of execution node (sometime values are really strange)" >> $LRMS_JOB_SCRIPT echo "umask 077" >> $LRMS_JOB_SCRIPT echo " " >> $LRMS_JOB_SCRIPT sourcewithargs_jobscript ############################################################## # Init accounting ############################################################## accounting_init ############################################################## # Add environment variables ############################################################## add_user_env ############################################################## # Check for existance of executable, # there is no sense to check for executable if files are # downloaded directly to computing node ############################################################## if [ -z "${joboption_arg_0}" ] ; then echo 'Executable is not specified' 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "Submission: Job description error." exit 1 fi ###################################################################### # Adjust working directory for tweaky nodes # RUNTIME_GRIDAREA_DIR should be defined by external means on nodes ###################################################################### if [ ! -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then setup_runtime_env else echo "RUNTIME_JOB_DIR=$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid" >> $LRMS_JOB_SCRIPT echo "RUNTIME_JOB_DIAG=$RUNTIME_LOCAL_SCRATCH_DIR/${joboption_gridid}.diag" >> $LRMS_JOB_SCRIPT RUNTIME_STDIN_REL=`echo "${joboption_stdin}" | sed "s#^${joboption_directory}/*##"` RUNTIME_STDOUT_REL=`echo "${joboption_stdout}" | sed "s#^${joboption_directory}/*##"` RUNTIME_STDERR_REL=`echo "${joboption_stderr}" | sed "s#^${joboption_directory}/*##"` if [ "$RUNTIME_STDIN_REL" = "${joboption_stdin}" ] ; then echo "RUNTIME_JOB_STDIN=\"${joboption_stdin}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDIN=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDIN_REL\"" >> $LRMS_JOB_SCRIPT fi if [ "$RUNTIME_STDOUT_REL" = "${joboption_stdout}" ] ; then echo "RUNTIME_JOB_STDOUT=\"${joboption_stdout}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDOUT=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDOUT_REL\"" >> $LRMS_JOB_SCRIPT fi if [ "$RUNTIME_STDERR_REL" = "${joboption_stderr}" ] ; then echo "RUNTIME_JOB_STDERR=\"${joboption_stderr}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDERR=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDERR_REL\"" >> $LRMS_JOB_SCRIPT fi fi ############################################################## # Add std... to job arguments ############################################################## include_std_streams ############################################################## # Move files to local working directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_node echo "" >> $LRMS_JOB_SCRIPT echo "RESULT=0" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT ##################################################### # Go to working dir and start job #################################################### echo "" >> $LRMS_JOB_SCRIPT echo "# Changing to session directory" >> $LRMS_JOB_SCRIPT echo "cd \$RUNTIME_JOB_DIR" >> $LRMS_JOB_SCRIPT echo "export HOME=\$RUNTIME_JOB_DIR" >> $LRMS_JOB_SCRIPT ############################################################## # Skip execution if something already failed ############################################################## echo "if [ \"\$RESULT\" = '0' ] ; then" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime configuration at computing node ############################################################## RTE_stage1 ############################################################## # Diagnostics ############################################################## echo "echo \"runtimeenvironments=\$runtimeenvironments\" >> \"\$RUNTIME_JOB_DIAG\"" >> $LRMS_JOB_SCRIPT cat >> $LRMS_JOB_SCRIPT <<'EOSCR' if [ ! "X$PBS_NODEFILE" = 'X' ] ; then if [ -r "$PBS_NODEFILE" ] ; then cat "$PBS_NODEFILE" | sed 's/\(.*\)/nodename=\1/' >> "$RUNTIME_JOB_DIAG" NODENAME_WRITTEN="1" else PBS_NODEFILE= fi fi EOSCR ############################################################## # Accounting (WN OS Detection) ############################################################## detect_wn_systemsoftware ############################################################## # Check intermediate result again ############################################################## echo "if [ \"\$RESULT\" = '0' ] ; then" >> $LRMS_JOB_SCRIPT ############################################################## # Execution ############################################################## cd_and_run ############################################################## # End of RESULT checks ############################################################## echo "fi" >> $LRMS_JOB_SCRIPT echo "fi" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime (post)configuration at computing node ############################################################## RTE_stage2 #################################################### # Clean up output files li local scratchdir #################################################### clean_local_scratch_dir_output ############################################################## # Move files back to session directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_frontend ############################################################## # Finish accounting and exit job ############################################################## accounting_end ####################################### # Submit the job ####################################### echo "PBS job script built" 1>&2 # Execute qsub command cd "$joboption_directory" echo "PBS script follows:" 1>&2 echo "-------------------------------------------------------------------" 1>&2 cat "$LRMS_JOB_SCRIPT" 1>&2 echo "-------------------------------------------------------------------" 1>&2 echo "" 1>&2 PBS_RESULT=1 PBS_TRIES=0 if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-pbs-job, JobScriptCreation: $t" >> $perflogfilesub fi while [ "$PBS_TRIES" -lt '10' ] ; do if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi ${PBS_QSUB} $LRMS_JOB_SCRIPT 1>$LRMS_JOB_OUT 2>$LRMS_JOB_ERR PBS_RESULT="$?" if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-pbs-job, JobSiubmission: $t" >> $perflogfilesub fi if [ "$PBS_RESULT" -eq '0' ] ; then break ; fi if [ "$PBS_RESULT" -eq '198' ] ; then echo "Waiting for queue to decrease" 1>&2 sleep 60 PBS_TRIES=0 continue fi grep 'maximum number of jobs' "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" if [ $? -eq '0' ] ; then echo "Waiting for queue to decrease" 1>&2 sleep 60 PBS_TRIES=0 continue fi PBS_TRIES=$(( $PBS_TRIES + 1 )) sleep 2 done if [ $PBS_RESULT -eq '0' ] ; then job_id=`cat $LRMS_JOB_OUT` # This should be on the format 1414162.$hostname if [ "${job_id}" = "" ]; then echo "job *NOT* submitted successfully!" 1>&2 echo "failed getting the pbs jobid for the job!" 1>&2 echo "Submission: Local submission client behaved unexpectedly." elif [ `echo "${job_id}" | grep -Ec "^[0-9]+"` != "1" ]; then echo "job *NOT* submitted successfully!" 1>&2 echo "badly formatted pbs jobid for the job: $job_id !" 1>&2 echo "Submission: Local submission client behaved unexpectedly." else echo "joboption_jobid=$job_id" >> $GRAMI_FILE echo "job submitted successfully!" 1>&2 echo "local job id: $job_id" 1>&2 # Remove temporary job script file rm -f $LRMS_JOB_SCRIPT $LRMS_JOB_OUT $LRMS_JOB_ERR echo "----- exiting submit_pbs_job -----" 1>&2 echo "" 1>&2 exit 0 fi else echo "job *NOT* submitted successfully!" 1>&2 echo "got error code from qsub: $PBS_RESULT !" 1>&2 echo "Submission: Local submission client failed." fi echo "Output is:" 1>&2 cat $LRMS_JOB_OUT 1>&2 echo "Error output is:" cat $LRMS_JOB_ERR 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "----- exiting submit_pbs_job -----" 1>&2 echo "" 1>&2 exit 1 nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbspro/PaxHeaders/README0000644000000000000000000000013215067751327022143 xustar0030 mtime=1759498967.768121105 30 atime=1759498967.870493727 30 ctime=1759499029.981895748 nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbspro/README0000644000175000002070000000011315067751327024040 0ustar00mockbuildmock00000000000000PBS backend targeted to support recent Altair's PBS Professional features. nordugrid-arc-7.1.1/src/services/a-rex/lrms/PaxHeaders/lsf0000644000000000000000000000013215067751426020465 xustar0030 mtime=1759499030.107704287 30 atime=1759499034.764510185 30 ctime=1759499030.107704287 nordugrid-arc-7.1.1/src/services/a-rex/lrms/lsf/0000755000175000002070000000000015067751426022444 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/lrms/lsf/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327022576 xustar0030 mtime=1759498967.766756242 30 atime=1759498967.870493727 30 ctime=1759499030.101573649 nordugrid-arc-7.1.1/src/services/a-rex/lrms/lsf/Makefile.am0000644000175000002070000000014615067751327024501 0ustar00mockbuildmock00000000000000dist_pkgdata_DATA = configure-lsf-env.sh pkgdata_SCRIPTS = scan-lsf-job submit-lsf-job cancel-lsf-job nordugrid-arc-7.1.1/src/services/a-rex/lrms/lsf/PaxHeaders/Makefile.in0000644000000000000000000000013215067751356022611 xustar0030 mtime=1759498990.454767849 30 atime=1759499018.512263231 30 ctime=1759499030.104004247 nordugrid-arc-7.1.1/src/services/a-rex/lrms/lsf/Makefile.in0000644000175000002070000005532515067751356024525 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms/lsf ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(dist_pkgdata_DATA) \ $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = submit-lsf-job cancel-lsf-job scan-lsf-job CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" SCRIPTS = $(pkgdata_SCRIPTS) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac DATA = $(dist_pkgdata_DATA) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/cancel-lsf-job.in \ $(srcdir)/scan-lsf-job.in $(srcdir)/submit-lsf-job.in README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ dist_pkgdata_DATA = configure-lsf-env.sh pkgdata_SCRIPTS = scan-lsf-job submit-lsf-job cancel-lsf-job all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/lsf/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/lsf/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): submit-lsf-job: $(top_builddir)/config.status $(srcdir)/submit-lsf-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ cancel-lsf-job: $(top_builddir)/config.status $(srcdir)/cancel-lsf-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ scan-lsf-job: $(top_builddir)/config.status $(srcdir)/scan-lsf-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_pkgdataDATA: $(dist_pkgdata_DATA) @$(NORMAL_INSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-dist_pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dist_pkgdataDATA install-pkgdataSCRIPTS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am \ install-dist_pkgdataDATA install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-pkgdataSCRIPTS install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags-am uninstall uninstall-am uninstall-dist_pkgdataDATA \ uninstall-pkgdataSCRIPTS .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/lrms/lsf/PaxHeaders/submit-lsf-job.in0000644000000000000000000000013115067751327023726 xustar0029 mtime=1759498967.76728586 30 atime=1759498967.870493727 30 ctime=1759499030.107704287 nordugrid-arc-7.1.1/src/services/a-rex/lrms/lsf/submit-lsf-job.in0000644000175000002070000003157615067751327025645 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -xv # # Submits job to LSF # Input: path to grami file (same as Globus). # # A temporary job script is created for the submission and then removed # at the end of this script. echo "----- starting submit_lsf_job -----" 1>&2 joboption_lrms="lsf" lrms_options="lsf_architecture lsf_bin_path" # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi GRAMI_FILE=$1 # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # include common submit functions . "${pkgdatadir}/submit_common.sh" || exit $? # run common init # * parse grami # * parse config # * load LRMS-specific env # * set common variables common_init # perflog perflogfilesub="${perflogdir}/submission.perflog" if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi ############################################################## # Zero stage of runtime environments ############################################################## RTE_stage0 ############################################################## # create temp job script ############################################################## mktempscript ############################################################## # Start job script ############################################################## LSF_BSUB='bsub' LSF_BPARAMS='bparams' if [ ! -z "$LSF_BIN_PATH" ] ; then LSF_BSUB=${LSF_BIN_PATH}/${LSF_BSUB} LSF_BPARAMS=${LSF_BIN_PATH}/${LSF_BPARAMS} fi echo "#! /bin/bash" > $LRMS_JOB_SCRIPT echo "#LSF batch job script built by arex" >> $LRMS_JOB_SCRIPT echo "#" >> $LRMS_JOB_SCRIPT # Specify the bash shell as default #echo "#BSUB -L /bin/bash" >> $LRMS_JOB_SCRIPT # Write output to comment file: echo "#BSUB -oo ${joboption_directory}.comment" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT # Choose queue(s). if [ ! -z "${joboption_queue}" ] ; then echo "#BSUB -q $joboption_queue" >> $LRMS_JOB_SCRIPT fi if [ ! -z "${joboption_rsl_architecture}" ] ; then queuearch=`echo ${joboption_rsl_architecture}|sed 's/\"//g'` echo "#BSUB -R type=${queuearch}" >> $LRMS_JOB_SCRIPT else if [ ! -z $CONFIG_lsf_architecture ] ; then echo "#BSUB -R type=$CONFIG_lsf_architecture" >> $LRMS_JOB_SCRIPT fi fi # Project name for accounting if [ ! -z "${joboption_rsl_project}" ] ; then echo "#BSUB -P $joboption_rsl_project" >> $LRMS_JOB_SCRIPT fi # job name for convenience if [ ! -z "${joboption_jobname}" ] ; then jobname=`echo "$joboption_jobname" | \ sed 's/^\([^[:alpha:]]\)/N\1/' | \ sed 's/[^[:alnum:]]/_/g' | \ sed 's/\(...............\).*/\1/'` echo "#BSUB -J $jobname" >> $LRMS_JOB_SCRIPT fi echo "LSF jobname: $jobname" 1>&2 ############################################################## # (non-)parallel jobs ############################################################## set_count ############################################################## # parallel jobs ############################################################## echo "#BSUB -n $joboption_count" >> $LRMS_JOB_SCRIPT # parallel structure if [ ! -z $joboption_countpernode ] && [ $joboption_countpernode != '-1' ] ; then echo "#BSUB -R span[ptile=$joboption_countpernode]" >> $LRMS_JOB_SCRIPT fi # exclusive execution if [ "$joboption_exclusivenode" = "true" ]; then echo "#BSUB -x" >> $LRMS_JOB_SCRIPT fi ############################################################## # Execution times (obtained in seconds) ############################################################## #OBS: Assuming here that LSB_JOB_CPULIMIT=y or is unset. if [ -n "$joboption_cputime" ] && [ $joboption_cputime -gt 0 ]; then cputime=$(( ${joboption_cputime} / 60 )) echo "#BSUB -c ${cputime}" >> $LRMS_JOB_SCRIPT fi if [ -n "$joboption_walltime" ] && [ $joboption_walltime -gt 0 ] ; then walltime=$(( ${joboption_walltime} / 60 )) echo "#BSUB -W ${walltime}" >> $LRMS_JOB_SCRIPT fi ############################################################## # Requested memory (mb) ############################################################## set_req_mem #-M is memory limit per process in LSF, so no need to modify memory limit based on count. if [ ! -z "$joboption_memory" ]; then memory=$(( ${joboption_memory} * 1024 )) echo "#BSUB -M ${memory}" >> $LRMS_JOB_SCRIPT fi ############################################################## # Start Time ############################################################## if [ -n "$joboption_starttime" ] ; then echo "#BSUB -b ${joboption_starttime}" >> $LRMS_JOB_SCRIPT fi ############################################################## # priority ############################################################## if [ ! -z "$joboption_priority" ]; then #first we must parse the max priority maxprio=`${LSF_BPARAMS} -a| grep MAX_USER_PRIORITY | cut -f 2 -d '=' | cut -f 2 -d ' '` #scale priority LSF: 1 -> MAX_USER_PRIORITY ARC: 0-100 if [ ! -z "$maxprio" ]; then if [ "$maxprio" -gt "0" ]; then priority=$((joboption_priority * ($maxprio - 1) / 100 +1)) echo "#BSUB -sp ${priority}" >> $LRMS_JOB_SCRIPT fi fi fi ############################################################## # Override umask ############################################################## echo "# Overide umask of execution node (sometime values are really strange)" >> $LRMS_JOB_SCRIPT echo "umask 077" >> $LRMS_JOB_SCRIPT echo " " >> $LRMS_JOB_SCRIPT sourcewithargs_jobscript ############################################################## # Init accounting ############################################################## accounting_init ############################################################## # Add environment variables ############################################################## add_user_env ############################################################## # Check for existence of executable, # there is no sense to check for executable if files are # downloaded directly to computing node ############################################################## if [ -z "${joboption_arg_0}" ] ; then echo 'Executable is not specified' 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "Submission: Job description error." exit 1 fi program_start=`echo ${joboption_arg_0} | head -c 1 2>&1` if [ "$program_start" != '$' ] && [ "$program_start" != '/' ] ; then if [ ! -f $joboption_directory/${joboption_arg_0} ] ; then echo 'Executable does not exist, or permission denied.' 1>&2 echo " Executable $joboption_directory/${joboption_arg_0}" 1>&2 echo " whoami: "`whoami` 1>&2 echo " ls -l $joboption_directory/${joboption_arg_0}: "`ls -l $joboption_directory/${joboption_arg_0}` exit 1 fi if [ ! -x $joboption_directory/${joboption_arg_0} ] ; then echo 'Executable is not executable' 1>&2 exit 1 fi fi ###################################################################### # Adjust working directory for tweaky nodes # RUNTIME_GRIDAREA_DIR should be defined by external means on nodes ###################################################################### if [ ! -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then setup_runtime_env else echo "RUNTIME_JOB_DIR=$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid" >> $LRMS_JOB_SCRIPT echo "RUNTIME_JOB_DIAG=$RUNTIME_LOCAL_SCRATCH_DIR/${joboption_gridid}.diag" >> $LRMS_JOB_SCRIPT RUNTIME_STDIN_REL=`echo "${joboption_stdin}" | sed "s#^${joboption_directory}/*##"` RUNTIME_STDOUT_REL=`echo "${joboption_stdout}" | sed "s#^${joboption_directory}/*##"` RUNTIME_STDERR_REL=`echo "${joboption_stderr}" | sed "s#^${joboption_directory}/*##"` if [ "$RUNTIME_STDIN_REL" = "${joboption_stdin}" ] ; then echo "RUNTIME_JOB_STDIN=\"${joboption_stdin}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDIN=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDIN_REL\"" >> $LRMS_JOB_SCRIPT fi if [ "$RUNTIME_STDOUT_REL" = "${joboption_stdout}" ] ; then echo "RUNTIME_JOB_STDOUT=\"${joboption_stdout}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDOUT=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDOUT_REL\"" >> $LRMS_JOB_SCRIPT fi if [ "$RUNTIME_STDERR_REL" = "${joboption_stderr}" ] ; then echo "RUNTIME_JOB_STDERR=\"${joboption_stderr}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDERR=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDERR_REL\"" >> $LRMS_JOB_SCRIPT fi fi ############################################################## # Add std... to job arguments ############################################################## include_std_streams ############################################################## # Move files to local working directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_node echo "" >> $LRMS_JOB_SCRIPT echo "RESULT=0" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT ##################################################### # Go to working dir and start job #################################################### echo "# Changing to session directory" >> $LRMS_JOB_SCRIPT echo "cd \$RUNTIME_JOB_DIR" >> $LRMS_JOB_SCRIPT echo "export HOME=\$RUNTIME_JOB_DIR" >> $LRMS_JOB_SCRIPT ############################################################## # Skip execution if something already failed ############################################################## echo "" >> $LRMS_JOB_SCRIPT echo "if [ \"\$RESULT\" = '0' ] ; then" >> $LRMS_JOB_SCRIPT echo "echo \"runtimeenvironments=\$runtimeenvironments\" >> \"\$RUNTIME_JOB_DIAG\"" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime configuration at computing node ############################################################## RTE_stage1 #extra checks if [ -z "$RUNTIME_NODE_SEES_FRONTEND" ] ; then echo "Nodes detached from gridarea are not supported when LSF is used. Aborting job submit" 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" exit 1 fi gate_host=`uname -n` if [ -z "$gate_host" ] ; then echo "Can't get own hostname" 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" exit 1 fi ############################################################## # Execution ############################################################## cd_and_run ############################################################## # End of RESULT checks ############################################################## echo "fi" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime (post)configuration at computing node ############################################################## RTE_stage2 ############################################################## # Move files back to session directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_frontend ############################################################## # Finish accounting and exit job ############################################################## accounting_end ####################################### # Submit the job ####################################### # Execute bsub command cd "$joboption_directory" #chmod 0755 $LRMS_JOB_SCRIPT # We make the assumption that $joboption_directory is locally available according to the requirements of any arc installation echo "----------------- BEGIN job script -----" 1>&2 cat $LRMS_JOB_SCRIPT 1>&2 echo "----------------- END job script -----" 1>&2 if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-lsf-job, JobScriptCreation: $t" >> $perflogfilesub fi if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi ${LSF_BSUB} < $LRMS_JOB_SCRIPT 1>$LRMS_JOB_OUT 2>$LRMS_JOB_ERR LSF_RESULT="$?" if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-lsf-job, JobSubmission: $t" >> $perflogfilesub fi if [ $LSF_RESULT -eq '0' ] ; then job_id=`cat $LRMS_JOB_OUT | awk '{split($0,field," ");print field[2]}' | sed 's/[<>]//g'` if [ "${job_id}" = "" ] ; then echo "job *NOT* submitted successfully!" 1>&2 echo "failed getting the jobid for the job!" 1>&2 else echo "joboption_jobid=$job_id" >> $GRAMI_FILE echo "job submitted successfully!" 1>&2 echo "local job id: $job_id" 1>&2 # Remove temporary job script file rm -f $LRMS_JOB_SCRIPT $LRMS_JOB_OUT $LRMS_JOB_ERR echo "----- exiting submit_lsf_job -----" 1>&2 echo "" 1>&2 exit 0 fi else echo "job *NOT* submitted successfully!" 1>&2 echo "got error code from qsub!" 1>&2 fi echo "Output is:" 1>&2 cat $LRMS_JOB_OUT 1>&2 echo "Error output is:" cat $LRMS_JOB_ERR 1>&2 rm -f $LRMS_JOB_SCRIPT $LRMS_JOB_OUT $LRMS_JOB_ERR echo "----- exiting submit_lsf_job -----" 1>&2 echo "" 1>&2 exit 1 nordugrid-arc-7.1.1/src/services/a-rex/lrms/lsf/PaxHeaders/cancel-lsf-job.in0000644000000000000000000000013115067751327023650 xustar0029 mtime=1759498967.76728586 30 atime=1759498967.870493727 30 ctime=1759499030.105236764 nordugrid-arc-7.1.1/src/services/a-rex/lrms/lsf/cancel-lsf-job.in0000644000175000002070000000155615067751327025562 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -x # # Cancel job running in LSF. # echo "----- starting cancel_lsf_job -----" 1>&2 joboption_lrms="lsf" lrms_options="lsf_architecture lsf_bin_path" # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi GRAMI_FILE=$1 # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # load common cancel functions . "${pkgdatadir}/cancel_common.sh" || exit $? # run common init # * parse grami # * parse config # * load LRMS-specific env common_init # bkill LSF_BKILL='bkill' if [ ! -z "$LSF_BIN_PATH" ] ; then LSF_BKILL="${LSF_BIN_PATH}/${LSF_BKILL} -s 9 " fi echo "executing ${LSF_BKILL} with job id ${joboption_jobid}" 1>&2 $LSF_BKILL $joboption_jobid echo "----- exiting cancel_lsf_job -----" 1>&2 echo "" 1>&2 exit 0 nordugrid-arc-7.1.1/src/services/a-rex/lrms/lsf/PaxHeaders/scan-lsf-job.in0000644000000000000000000000013115067751327023347 xustar0029 mtime=1759498967.76728586 30 atime=1759498967.870493727 30 ctime=1759499030.106478924 nordugrid-arc-7.1.1/src/services/a-rex/lrms/lsf/scan-lsf-job.in0000644000175000002070000001634215067751327025260 0ustar00mockbuildmock00000000000000#!@posix_shell@ # # # Scan for finished LSF jobs, using bjobs # # usage: scan_lsf_job control_dir ... # Set variables: # LSF_BIN_PATH # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi if [ -z "$1" ] ; then echo "Missing control directory path" 1>&2 exit 1 fi # first control_dir is used for storing own files echo `date`" : control_dir=$1" 1>&2 control_dir=$1 control_dirs= while [ $# -gt 0 ] ; do control_dirs="${control_dirs} $1" shift done control_path () { # job_id=`echo "$2" | sed 's/\(.\{9\}\)/\1\//g' | sed 's/\/$//'` job_id=`echo "$2" | sed -e 's#\(.\{3\}\)#\1/#3' -e 's#\(.\{3\}\)#\1/#2' -e 's#\(.\{3\}\)#\1/#1' -e 's#$#/#'` path="$1/jobs/${job_id}/$3" echo "$path" } joboption_lrms="lsf" lrms_options="lsf_architecture lsf_bin_path" # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # include common scan functions . "${pkgdatadir}/scan_common.sh" || exit $? # run common init # * parse config # * load LRMS-specific env # * set common variables common_init # Assume that gm-kick is installed in the same directory GMKICK=${pkglibexecdir}/gm-kick umask 022 # Log system performance if [ ! -z "$perflogdir" ]; then perflog_common "$perflogdir" "$CONFIG_controldir" fi if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi my_id=`id -u` if [ -z ${LSF_BIN_PATH} ]; then echo "${LSF_BIN_PATH} not set" 1>&2 exit 1 fi # Get all running jobs lsf_stat=`${LSF_BIN_PATH}/bjobs -a -u all 2>/dev/null` # | grep RUN | grep '^ [:digit:]' if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-lsf-job, bjobs -a -u all: $t" >> $perflogfile fi if [ -z "${lsf_stat}" ] ; then echo "bjobs returned empty result" 1>&2 fi pids=`echo "${lsf_stat}" | egrep 'PSUSP|USUSP|SSUSP|RUN|PEND' | sed -e 's/^\([^ ]*\).*/\1/'` eval "set -- $control_dirs" # Go through directories for ctr_dir in $control_dir ; do if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi # Obtain ids stored in job.*.local gridids=`find ${ctr_dir}/processing -name '*.status' -print | sed 's/.*\/\([^\.\/]*\)\.status$/\1/'` ids=`for gridid in $gridids; do grep -h "^localid=" $(control_path "${ctr_dir}" "$gridid" "local") 2>/dev/null | sed 's/^localid=\([0-9]*\).*/\1/' ; done` if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-lsf-job, ControlDirTraversal: $t" >> $perflogfile fi if [ -z "$ids" ] ; then continue ; fi # compare them to running jobs and find missing bids= for id in $ids ; do found=`echo "$pids" | grep "^$id$"` if [ -z "$found" ] ; then bids="$bids $id" fi done if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi done_count=0 total_count=0 # go through missing ids for id in $bids ; do # find grid job corresponding to curent local id gridid="" for gid in $gridids; do grep "^localid=$id" $(control_path "${ctr_dir}" "$gid" "local") 2>/dev/null 1>/dev/null if [ $? -eq '1' ] ; then gridid="$gid" break fi done if [ -z "$gridid" ] ; then continue ; fi total_count=$(( total_count + 1 )) donefile=$(control_path "${ctr_dir}" "${gridid}" "lrms_done") jobfile=$(control_path "${ctr_dir}" "${gridid}" "local") if [ -f "$donefile" ] ; then continue ; fi statusfile="${ctr_dir}/processing/${gridid}.status" if [ ! -f "$statusfile" ] ; then continue ; fi status=`cat "$statusfile"` if [ "$status" != "INLRMS" ] && [ "$status" != "CANCELING" ] ; then continue ; fi if [ "$my_id" != '0' ] ; then if [ ! -O "$jobfile" ] ; then continue ; fi fi uid=$(get_owner_uid "$jobfile") [ -z "$uid" ] && { log "Failed to stat $jobfile"; continue; } # get session directory of this job sessiondir=`grep -h '^sessiondir=' "$jobfile" | sed 's/^sessiondir=\(.*\)/\1/'` # get job specific output and remove header bjobs_output="`${LSF_BIN_PATH}/bjobs -W -w $id 2>/dev/null | sed -e'1,1d'`" job_status="`echo $bjobs_output | awk '{print $3}'`" # DONE if exit_code is 0, EXIT if non zero if [ "${job_status}" = "DONE" ] || [ "${job_status}" = "EXIT" ]; then job_read_diag starttime="`echo $bjobs_output | awk '{print $14}' | sed 's/-/ /g'`" endtime="`echo $bjobs_output | awk '{print $15}' | sed 's/-/ /g'`" date_to_utc_seconds "$starttime" starttime_seconds="$return_date_seconds" seconds_to_mds_date "$return_date_seconds" LRMSStartTime=$return_mds_date date_to_utc_seconds "$endtime" endtime_seconds="$return_date_seconds" seconds_to_mds_date "$return_date_seconds" LRMSEndTime=$return_mds_date #TODO handle cputime (walltime * count?) etc. walltime=$(( $endtime_seconds - $starttime_seconds)) #cputime=$(( $walltime * $count)) # Values to write to diag. These will override values already written. [ -n "$walltime" ] && WallTime=$walltime #[ -n "$cputime" ] && UserTime=$cputime #[ -n "$cputime" ] && KernelTime=0 job_write_diag done_count=$(( done_count + 1 )) fi if [ -n "$sessiondir" ] ; then # have chance to obtain exit code diagfile="${sessiondir}.diag" if [ -n "$sessiondir" ] ; then # have chance to obtain exit code exitcode=$(do_as_uid "$uid" "grep '^exitcode=' '$diagfile'" | sed 's/^exitcode=//') fi if [ -n "$exitcode" ] ; then # job finished and exit code is known save_commentfile "$uid" "${sessiondir}.comment" $(control_path "${ctr_dir}" "${gridid}" "errors") echo "$exitcode Executable finished with exit code $exitcode" > "$donefile" ${GMKICK} -j "${gridid}" "${ctr_dir}" continue fi fi # job has probaly finished and exit code is not known exitcode='-1' countfile=$(control_path "${ctr_dir}" "${gridid}" "lrms_job") counter=0 if [ -f "$countfile" ] ; then counter=`cat "$countfile"` counter=$(( $counter + 1 )) fi if [ "$counter" -gt 5 ] ; then rm -f "$countfile" save_commentfile "$uid" "${sessiondir}.comment" $(control_path "${ctr_dir}" "${gridid}" "errors") echo "$exitcode Job was lost with unknown exit code" > "$donefile" ${GMKICK} -j "${gridid}" "${ctr_dir}" else echo "$counter" > "$countfile" fi done if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-lsf-job, JobProcessing, T=$total_count D=$done_count: $t" >> $perflogfile fi # go through existing ids for id in $pids ; do # find grid job corresponding to curent local id jobfile=`find ${ctr_dir} -name '*.local' -print0 | xargs -0 grep -F -l "localid=$id." 2>/dev/null` if [ -z "$jobfile" ] ; then continue ; fi gridid=`grep -h '^globalid=' "$jobfile" | sed 's/^globalid=//'` countfile=$(control_path "${ctr_dir}" "${gridid}" "lrms_job") # reset failure counter rm -f "$countfile" done done sleep 60 exit 0 nordugrid-arc-7.1.1/src/services/a-rex/lrms/lsf/PaxHeaders/configure-lsf-env.sh0000644000000000000000000000013115067751327024426 xustar0029 mtime=1759498967.76728586 30 atime=1759498967.870493727 30 ctime=1759499030.102796384 nordugrid-arc-7.1.1/src/services/a-rex/lrms/lsf/configure-lsf-env.sh0000755000175000002070000000043615067751327026337 0ustar00mockbuildmock00000000000000# # set environment variables: # LSF_BIN_PATH # CONFIG_lsf_architecture # # Conditionaly enable performance logging init_perflog # Path to LSF commands LSF_BIN_PATH=$CONFIG_lsf_bin_path if [ ! -d ${LSF_BIN_PATH} ] ; then echo "Could not set LSF_BIN_PATH." 1>&2 exit 1 fi nordugrid-arc-7.1.1/src/services/a-rex/lrms/lsf/PaxHeaders/README0000644000000000000000000000013115067751327021421 xustar0029 mtime=1759498967.76728586 30 atime=1759498967.870493727 30 ctime=1759499030.108927451 nordugrid-arc-7.1.1/src/services/a-rex/lrms/lsf/README0000644000175000002070000000002515067751327023321 0ustar00mockbuildmock00000000000000LSF control scripts. nordugrid-arc-7.1.1/src/services/a-rex/lrms/PaxHeaders/pbs0000644000000000000000000000013215067751425020464 xustar0030 mtime=1759499029.945869115 30 atime=1759499034.764510185 30 ctime=1759499029.945869115 nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbs/0000755000175000002070000000000015067751425022443 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbs/PaxHeaders/Makefile.am0000644000000000000000000000013115067751327022575 xustar0029 mtime=1759498967.76728586 30 atime=1759498967.870493727 30 ctime=1759499029.940596907 nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbs/Makefile.am0000644000175000002070000000014615067751327024501 0ustar00mockbuildmock00000000000000dist_pkgdata_DATA = configure-pbs-env.sh pkgdata_SCRIPTS = submit-pbs-job cancel-pbs-job scan-pbs-job nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbs/PaxHeaders/configure-pbs-env.sh0000644000000000000000000000013215067751327024427 xustar0030 mtime=1759498967.767700358 30 atime=1759498967.870493727 30 ctime=1759499029.941612963 nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbs/configure-pbs-env.sh0000644000175000002070000000041615067751327026332 0ustar00mockbuildmock00000000000000# # set environment variables: # PBS_BIN_PATH # # Conditionaly enable performance logging init_perflog # Path to PBS commands PBS_BIN_PATH=${CONFIG_pbs_bin_path:-/usr/bin} if [ ! -d ${PBS_BIN_PATH} ] ; then echo "Could not set PBS_BIN_PATH." 1>&2 exit 1 fi nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbs/PaxHeaders/Makefile.in0000644000000000000000000000013215067751356022611 xustar0030 mtime=1759498990.486791281 30 atime=1759499018.590264417 30 ctime=1759499029.942640212 nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbs/Makefile.in0000644000175000002070000005532515067751356024525 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms/pbs ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(dist_pkgdata_DATA) \ $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = submit-pbs-job cancel-pbs-job scan-pbs-job CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" SCRIPTS = $(pkgdata_SCRIPTS) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac DATA = $(dist_pkgdata_DATA) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/cancel-pbs-job.in \ $(srcdir)/scan-pbs-job.in $(srcdir)/submit-pbs-job.in README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ dist_pkgdata_DATA = configure-pbs-env.sh pkgdata_SCRIPTS = submit-pbs-job cancel-pbs-job scan-pbs-job all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/pbs/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/pbs/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): submit-pbs-job: $(top_builddir)/config.status $(srcdir)/submit-pbs-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ cancel-pbs-job: $(top_builddir)/config.status $(srcdir)/cancel-pbs-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ scan-pbs-job: $(top_builddir)/config.status $(srcdir)/scan-pbs-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_pkgdataDATA: $(dist_pkgdata_DATA) @$(NORMAL_INSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-dist_pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dist_pkgdataDATA install-pkgdataSCRIPTS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am \ install-dist_pkgdataDATA install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-pkgdataSCRIPTS install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags-am uninstall uninstall-am uninstall-dist_pkgdataDATA \ uninstall-pkgdataSCRIPTS .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbs/PaxHeaders/scan-pbs-job.in0000644000000000000000000000013215067751327023350 xustar0030 mtime=1759498967.767700358 30 atime=1759498967.870493727 30 ctime=1759499029.944802679 nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbs/scan-pbs-job.in0000644000175000002070000004034115067751327025254 0ustar00mockbuildmock00000000000000#!@posix_shell@ # # Periodically read log files of PBS and put mark files # for job, which finished. # If log files are not available scan for finished (absent) jobs # in PBS and put mark files for job, which finished. # # usage: scan_pbs_job control_dir ... # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi # first control_dir is used for storing own files if [ -z "$1" ] ; then exit 1 ; fi control_dir=$1 control_dirs= while [ $# -gt 0 ] ; do control_dirs="${control_dirs} \"$1\"" shift done control_path () { # job_id=`echo "$2" | sed 's/\(.\{9\}\)/\1\//g' | sed 's/\/$//'` job_id=`echo "$2" | sed -e 's#\(.\{3\}\)#\1/#3' -e 's#\(.\{3\}\)#\1/#2' -e 's#\(.\{3\}\)#\1/#1' -e 's#$#/#'` path="$1/jobs/${job_id}/$3" echo "$path" } joboption_lrms="pbs" lrms_options="pbs_bin_path pbs_log_path" queue_options="pbs_queue_node nodememory" # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # include common scan functions . "${pkgdatadir}/scan_common.sh" || exit $? # run common init # * parse config # * load LRMS-specific env # * set common variables common_init # Define gm-kick location GMKICK=${pkglibexecdir}/gm-kick # Log system performance if [ ! -z "$perflogdir" ]; then perflog_common "$perflogdir" "$CONFIG_controldir" fi pbs_log_dir="${CONFIG_pbs_log_path:-/var/spool/pbs/server_logs}" my_id=`id -u` state_file=$control_dir/pbs_log_scan.`id -un` lines=`cat "$state_file" 2>/dev/null` ldt=`echo $lines | awk '{split($0,field," ");print field[1]}' ` lines=`echo $lines | awk '{split($0,field," ");print field[2]}'` lines_skip=$(( $lines + 0 )) ldate=$(( $ldt + 0 )) if [ -z "$lines_skip" ] ; then lines_skip='0' ; fi if [ -z "$ldate" ] ; then ldate='0' ; fi whole_line= find_by_local() { eval "set -- $control_dirs" for ctr_dir in "$@"; do find ${ctr_dir}/processing -name '*.status' -print | sed 's/^.*\/\([^\.]*\)\.status$/\1/g' | { while true; do read id if [ $? -ne '0' ] ; then break ; fi path=$(control_path "${ctr_dir}" "${id}" "local") grep -F -c $whole_line "localid=$job_id" "${path}" 2>/dev/null 1>/dev/null if [ $? -eq '0' ] ; then echo "${id} ${ctr_dir}" ; break ; fi done } done } find_by_grami() { eval "set -- $control_dirs" for ctr_dir in "$@"; do find ${ctr_dir}/processing -name '*.status' -print | sed 's/^.*\/\([^\.]*\)\.status$/\1/g' | { while true; do read id if [ $? -ne '0' ] ; then break ; fi path=$(control_path "${ctr_dir}" "${id}" "grami") grep -F -l $whole_line "joboption_jobid=$job_id" "${path}" 2>/dev/null 1>/dev/null if [ $? -eq '0' ] ; then echo "${id} ${ctr_dir}" ; break ; fi done } done } # set_job_vars takes a line from pbs logs and splits it, returning information # in pbs_date, pbs_code, pbs_server, pbs_job, job_id, job_message and rest_line set_job_vars() { pbs_date=$1 pbs_code=$2 pbs_server=$3 pbs_job=$4 job_id=$5 job_message=$6 rest_line=$7 } # # Main function for processing one PBS log. # Extracts log lines with code 0010 (job exited) and 0008 (job killed) # # TODO this should be split into smaller functions process_log_file () { eval "set -- $control_dirs" #we grep for finished jobs, then use sed to remove already processed lines #OBS: deleted jobs have a 0008 message with not much info in it. A 0010 # message may follow (or not) with full usage stats. By this time the # job has already been processed, so this info is ignored! #TODO: make log scanning more intelligent. exited_killed_jobs=`egrep '^[^;]*;(0010|16);[^;]*;Job;|^[^;]*;(00)?08;[^;]*;Job;[^;]*;Exit_status=|^[^;]*;(00)?08;[^;]*;Job;[^;]*;Job deleted' ${lname} | tail -n+$(( $lines_skip + 1 ))` #TODO should we add processed lines before jobs have actually been processed? What if the last job only has half a record? new_lines=`echo -n "$exited_killed_jobs" | wc -l` # new_lines set to 1 when string is empty, should have been 0 [ "x$exited_killed_jobs" = x ] && continue lines_processed=$(( $lines_skip + $new_lines )) if [ "$lines_processed" -lt '0' ] ; then lines_processed=0; fi echo "$cname $lines_processed"> $state_file exited_killed_jobs=`echo "$exited_killed_jobs" | sort -u` # force word splitting to happen only on newlines old_IFS=$IFS; IFS=' ' for job in $exited_killed_jobs; do # Split line into fields by forcing word splitting to happen on ";" IFS=";" set_job_vars $job IFS=$old_IFS # Try to extract exit code of PBS (note: if executable fails it's code goes to PBS) exit_code=`echo "$job_message" | sed -n 's/^.*Exit_status=\([-0-9]*\).*/\1/p'` # Check if job has suffix echo "$job_id" | grep -q -F . if [ ! $? = '0' ] ; then whole_line=-x else job_id=`echo "$job_id" | awk '{split($0,field,".");print field[1]"."field[2]}'` whole_line= fi # look for this id in ID.local, then in ID.grami gridid=`find_by_local` if [ -z "$gridid" ]; then gridid=`find_by_grami` fi ctrdir=`echo "${gridid}" | sed -n 's/^[^ ]* \(.*\)/\1/p'` gridid=`echo "${gridid}" | sed -n 's/^\([^ ]*\) .*/\1/p'` if [ -z "$gridid" ]; then continue; fi if [ -z "$ctrdir" ]; then continue; fi statusfile="${ctrdir}/processing/${gridid}.status" lrmsfile=$(control_path "${ctrdir}" "${gridid}" "lrms_done") localfile=$(control_path "${ctrdir}" "${gridid}" "local") errorsfile=$(control_path "${ctrdir}" "${gridid}" "errors") if [ "$my_id" != '0' ] ; then if [ ! -O "$statusfile" ] ; then continue ; fi fi uid=$(get_owner_uid "$statusfile") [ -z "$uid" ] && { log "Failed to stat $statusfile"; continue; } # check if job already reported if [ -f "$lrmsfile" ] ; then continue ; fi # more protection - check if arex thinks job is still running egrep 'INLRMS|SUBMIT|CANCELING' "$statusfile" >/dev/null 2>&1 if [ ! $? = '0' ] ; then continue ; fi # So far only PBS exit code is available # It would be nice to have exit code of main executable exitcode='' # get session directory of this job sessiondir=`grep -h '^sessiondir=' "$localfile" | sed 's/^sessiondir=\(.*\)/\1/'` diagfile="${sessiondir}.diag" commentfile="${sessiondir}.comment" if [ -z "$sessiondir" ] ; then log "Failed to determine the path of the job's session directory" else # have chance to obtain exit code if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then # In case of non-NFS setup it may take some time till # diagnostics file is delivered. Wait for it max 2 minutes. # OBS: exitcode may never appear in the .diag file if the job was # killed. There will be a 2 minute delay for every such job! diag_tries=0 while [ "$diag_tries" -lt 20 ] ; do job_read_diag # uses $sessiondir, $uid if [ ! -z "$exitcode" ] ; then break ; fi sleep 10 diag_tries=$(( $diag_tries + 1 )) log "no exitcode in diag file $diagfile (try $diag_tries of 20)" done else job_read_diag # uses $sessiondir, $uid fi fi # Try to obtain message from PBS if any pbs_comment=$(do_as_uid "$uid" "tail -n 1 '$commentfile'") save_commentfile "$uid" "$commentfile" "$errorsfile" # Extract values from PBS walltime=`echo "$job_message" | sed -n 's/^.*resources_used.walltime=\(\([0-9]*:\)*[0-9][0-9]\).*/\1/p'` cputime=`echo "$job_message" | sed -n 's/^.*resources_used.cput=\(\([0-9]*:\)*[0-9][0-9]\).*/\1/p'` mem=`echo "$job_message" | sed -n 's/^.*resources_used.mem=\([0-9]*\)kb.*/\1/p'` vmem=`echo "$job_message" | sed -n 's/^.*resources_used.vmem=\([0-9]*\)kb.*/\1/p'` # Convert to utc and store as seconds date_to_utc_seconds "$pbs_date" if [ ! -z "$return_date_seconds" ]; then # Convert from seconds to YYYYMMDDHHMMSSZ seconds_to_mds_date "$return_date_seconds" endtime=$return_mds_date # Find out how many seconds the job executed interval_to_seconds "$walltime" if [ ! -z "$return_interval_seconds" ]; then # Convert from seconds to YYYYMMDDHHMMSSZ seconds_to_mds_date $(( $return_date_seconds - $return_interval_seconds )) starttime=$return_mds_date fi fi # Values to write to diag. These will override values already written. interval_to_seconds "$walltime" [ -n "$return_interval_seconds" ] && WallTime=$return_interval_seconds interval_to_seconds "$cputime" [ -n "$return_interval_seconds" ] && UserTime=$return_interval_seconds [ -n "$return_interval_seconds" ] && KernelTime=0 [ -n "$mem" ] && UsedMemory=$mem [ -n "$vmem" ] && TotalMemory=$vmem [ -n "$starttime" ] && LRMSStartTime=$starttime [ -n "$endtime" ] && LRMSEndTime=$endtime [ -n "$pbs_comment" ] && LRMSMessage=$pbs_comment [ -n "$exit_code" ] && LRMSExitcode=$exit_code job_write_diag if [ -z "$exitcode" ] ; then # No exit code of job means job was most probably killed if [ -z "$exit_code" ] ; then exit_code='-1'; fi if [ "$exit_code" = '0' ] ; then echo "Job $job_id failed but PBS have not noticed that" 1>&2 echo "-1 Job failed but PBS reported 0 exit code." > "$lrmsfile" elif [ -z "$pbs_comment" ] ; then echo "Job $job_id failed with PBS exit code $exit_code" 1>&2 echo "$exit_code Job was killed by PBS." > "$lrmsfile" else echo "Job $job_id failed with PBS exit code $exit_code" 1>&2 echo "$exit_code $pbs_comment" > "$lrmsfile" fi else if [ -z "$exit_code" ] ; then exit_code='-1'; fi if [ ! "$exitcode" = 0 ] ; then if [ "$exit_code" = '0' ] ; then exit_code='-1'; fi echo "Job $job_id failed with exit code $exitcode, PBS reported $exit_code." 1>&2 echo "$exit_code Job failed with exit code $exitcode." > "$lrmsfile" else if [ ! "$exit_code" = '0' ] ; then echo "Job finished properly but PBS reported $exit_code." 1>&2 if [ -z "$pbs_comment" ] ; then echo "$exit_code Job was killed by PBS." > "$lrmsfile" else echo "$exit_code $pbs_comment" > "$lrmsfile" fi else # echo "Job finished without errors." 1>&2 echo "0" > "$lrmsfile" fi fi fi # wake up GM ${GMKICK} -j "${gridid}" "${ctrdir}" done IFS=$old_IFS } readable_logs=no # Check $pbs_log_dir for readable files # if any are found, process them and update relevant information if [ ! -z "${pbs_log_dir}" ] ; then for cname in `ls -1 ${pbs_log_dir}/ 2>/dev/null | grep '^[0-9]*$'` ; do lname="${pbs_log_dir}/$cname" if [ ! -r "$lname" ] ; then continue ; fi readable_logs=yes if [ "$cname" -lt "$ldate" ] ; then continue elif [ "$cname" -gt "$ldate" ] ; then lines_skip=0 fi echo "Date: " $cname last_modified=`stat $lname | grep Modify` process_log_file done fi # main loop, stay here up to 60 seconds if log is still updated while # we are reading it. if [ "$readable_logs" = 'yes' ] ; then time_count=0 while true ; do new_modified=`stat $lname | grep Modify` if [ "$new_modified" != "$last_modified" ] ; then last_modified="$new_modified" lines=`cat "$state_file" 2>/dev/null` ldt=`echo $lines | awk '{split($0,field," ");print field[1]}' ` lines=`echo $lines | awk '{split($0,field," ");print field[2]}'` lines_skip=$(( $lines + 0 )) ldate=$(( $ldt + 0 )) process_log_file fi sleep 10 time_count=$(( $time_count + 10 )) if [ "$time_count" -ge 60 ] ; then break ; fi done exit 0 fi # If no PBS logs found try ordinary 'qstat' eval "set -- $control_dirs" # Get all running jobs pidslist=`mktemp "$TMPDIR/qstat.XXXXXX"` || if [ ! "$?" = '0' ] ; then # FS problems ? # TODO debug output here sleep 60 exit 1 fi ${PBS_BIN_PATH}/qstat -a 2>/dev/null 1>"$pidslist" if [ ! "$?" = '0' ] ; then rm -f "$pidslist" # PBS server down ? sleep 60 exit 1 fi exclude_completed () { awk '$10!="C"{print $0}' } pids=`cat "$pidslist" | grep '^[0-9][0-9]*\.' | exclude_completed | sed 's/^\([0-9][0-9]*\).*/\1/'` rm -f "$pidslist" # Go through directories for ctr_dir in "$@" ; do if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi # Obtain ids stored in job.*.local ids=` find ${ctr_dir}/processing -name '*.status' -print | sed 's/^.*\/\([^\.]*\)\.status$/\1/g' | { while true; do \ read id; \ if [ $? -ne '0' ] ; then break ; fi path=$(control_path "${ctr_dir}" "${id}" "local") grep -h "^localid=" "${path}" 2>/dev/null | sed 's/^localid=\([0-9]*\).*/\1/' done } ` if [ -z "$ids" ] ; then continue ; fi if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-pbs-job, ControlDirTraversal: $t" >> $perflogfile fi # compare them to running jobs and find missing bids= for id in $ids ; do found=`echo "$pids" | grep "^$id$"` if [ -z "$found" ] ; then bids="$bids $id" fi done if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi # go through missing ids for id in $bids ; do # find grid job corresponding to curent local id jobfile="" gridid="" find ${ctr_dir}/processing -name '*.status' -print | sed 's/^.*\/\([^\.]*\)\.status$/\1/g' | { while true; do \ read id; \ if [ $? -ne '0' ] ; then break ; fi path=$(control_path "${ctr_dir}" "${id}" "local") grep -F -l "localid=$id." 2>/dev/null 1>/dev/null if [ $? -eq '0' ] ; then gridid="$id" jobfile="$path" break fi done } if [ -z "$jobfile" ] ; then continue ; fi if [ "$my_id" != '0' ] ; then if [ ! -O "$jobfile" ] ; then continue ; fi fi uid=$(get_owner_uid "$jobfile") [ -z "$uid" ] && { log "Failed to stat $jobfile"; continue; } donefile=$(control_path "${ctr_dir}" "${gridid}" "lrms_done") if [ -f "$donefile" ] ; then continue ; fi statusfile="${ctr_dir}/processing/${gridid}.status" if [ ! -f "$statusfile" ] ; then continue ; fi status=`cat "$statusfile"` if [ "$status" != "INLRMS" ] && [ "$status" != "CANCELING" ]; then continue ; fi # get session directory of this job session=`grep -h '^sessiondir=' "$jobfile" | sed 's/^sessiondir=\(.*\)/\1/'` if [ ! -z "$session" ] ; then # have chance to obtain exit code diagfile="${session}.diag" exitcode=$(do_as_uid "$uid" "grep '^exitcode=' '$diagfile'" | sed 's/^exitcode=//') if [ ! -z "$exitcode" ] ; then # job finished and exit code is known save_commentfile "$uid" "${session}.comment" $(control_path "${ctr_dir}" "${gridid}" "errors") echo "$exitcode Executable finished with exit code $exitcode" > "$donefile" ${GMKICK} -j "$gridid" "${ctr_dir}" echo "Job $gridid finished with exit code $exitcode" continue fi fi # job has probaly finished and exit code is not known exitcode='-1' countfile=$(control_path "${ctr_dir}" "${gridid}" "lrms_job") counter=0 if [ -f "$countfile" ] ; then counter=`cat "$countfile"` counter=$(( $counter + 1 )) fi if [ "$counter" -gt 5 ] ; then rm -f "$countfile" save_commentfile "$uid" "${session}.comment" $(control_path "${ctr_dir}" "${gridid}" "errors") echo "$exitcode Job was lost with unknown exit code" > "$donefile" ${GMKICK} -j "$gridid" "${ctr_dir}" echo "Job $gridid finished with unknown exit code" else echo "$counter" > "$countfile" fi done if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-pbs-job, JobProcessing: $t" >> $perflogfile fi # go through existing ids for id in $pids ; do # find grid job corresponding to curent local id jobfile=`find ${ctr_dir} -name '*.local' -print0 | xargs -0 grep -F -l "localid=$id." 2>/dev/null` if [ -z "$jobfile" ] ; then continue ; fi countfile=`echo "$jobfile" | sed 's/local$/lrms_job'` # reset failure counter rm -f "$countfile" done done sleep 60 exit 0 nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbs/PaxHeaders/cancel-pbs-job.in0000644000000000000000000000013215067751327023651 xustar0030 mtime=1759498967.767700358 30 atime=1759498967.870493727 30 ctime=1759499029.943745099 nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbs/cancel-pbs-job.in0000644000175000002070000000163015067751327025553 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -x # # Cancel job running in PBS. # echo "----- starting cancel_pbs_job -----" 1>&2 joboption_lrms="pbs" lrms_options="pbs_bin_path pbs_log_path" queue_options="pbs_queue_node nodememory" # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi GRAMI_FILE=$1 # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # load common cancel functions . "${pkgdatadir}/cancel_common.sh" || exit $? # run common init # * parse grami # * parse config # * load LRMS-specific env common_init # define and execute qdel PBS_QDEL='qdel' if [ ! -z "$PBS_BIN_PATH" ] ; then PBS_QDEL="${PBS_BIN_PATH}/${PBS_QDEL}" fi echo executing qdel with job id $joboption_jobid 1>&2 "${PBS_QDEL}" "${joboption_jobid}" echo "----- exiting cancel_pbs_job -----" 1>&2 echo "" 1>&2 exit 0 nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbs/PaxHeaders/submit-pbs-job.in0000644000000000000000000000013215067751327023727 xustar0030 mtime=1759498967.767700358 30 atime=1759498967.870493727 30 ctime=1759499029.945869115 nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbs/submit-pbs-job.in0000644000175000002070000004035515067751327025640 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -x # # Originally based on globus submission script for pbs # # Submits job to PBS. # Input: path to grami file # # The temporary job script is created for the submission and then removed # at the end of this script. echo "----- starting submit_pbs_job -----" 1>&2 joboption_lrms="pbs" lrms_options="pbs_bin_path pbs_log_path" queue_options="pbs_queue_node nodememory" # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi GRAMI_FILE=$1 # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # include common submit functions . "${pkgdatadir}/submit_common.sh" || exit $? # run common init # * parse grami # * parse config # * load LRMS-specific env # * set common variables common_init # perflog submission start time if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi # check remote or local scratch is configured check_any_scratch ############################################################## # Zero stage of runtime environments ############################################################## RTE_stage0 ############################################################## # create job script ############################################################## mktempscript PBS_QSUB='qsub -r n -S /bin/bash -m n ' if [ ! -z "$PBS_BIN_PATH" ] ; then PBS_QSUB=${PBS_BIN_PATH}/${PBS_QSUB} fi is_cluster=true ############################################################## # Start job script ############################################################## echo "# PBS batch job script built by arex" > $LRMS_JOB_SCRIPT # write PBS output to 'comment' file echo "#PBS -e '${joboption_directory}.comment'" >> $LRMS_JOB_SCRIPT echo "#PBS -j eo">> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT # choose queue if [ ! -z "${joboption_queue}" ] ; then echo "#PBS -q $joboption_queue" >> $LRMS_JOB_SCRIPT fi ############################################################## # priority ############################################################## if [ ! -z "$joboption_priority" ]; then #first we must scale priority. PBS: -1024 -> 1023 ARC: 0-100 priority=$((joboption_priority * (1024+1023) / 100)) priority=$((priority-1024)) echo "#PBS -p ${priority}" >> $LRMS_JOB_SCRIPT fi # project name for accounting if [ ! -z "${joboption_rsl_project}" ] ; then echo "#PBS -A $joboption_rsl_project" >> $LRMS_JOB_SCRIPT fi # job name for convenience if [ ! -z "${joboption_jobname}" ] ; then jobname=`echo "$joboption_jobname" | \ sed 's/^\([^[:alpha:]]\)/N\1/' | \ sed 's/[^[:alnum:]]/_/g' | \ sed 's/\(...............\).*/\1/'` echo "#PBS -N '$jobname'" >> $LRMS_JOB_SCRIPT fi echo "PBS jobname: $jobname" 1>&2 ############################################################## # (non-)parallel jobs ############################################################## set_count if [ "$joboption_count" = "1" ] ; then nodes_string="#PBS -l nodes=1" else if [ ! -z $joboption_numnodes ] ; then nodes_string="#PBS -l nodes=${joboption_numnodes}" else #in case no countpernode is requested in job, numnodes will also not be set, use count instead nodes_string="#PBS -l nodes=${joboption_count}" fi fi if [ ! -z $joboption_countpernode ] && [ $joboption_countpernode -gt 0 ] ; then nodes_string="${nodes_string}:ppn=${joboption_countpernode}" fi if [ ! -z "$CONFIG_pbs_queue_node" ] ; then nodes_string="${nodes_string}:${CONFIG_pbs_queue_node}" fi i=0 eval "var_is_set=\${joboption_nodeproperty_$i+yes}" while [ ! -z "${var_is_set}" ] ; do eval "var_value=\${joboption_nodeproperty_$i}" nodes_string="${nodes_string}:${var_value}" i=$(( $i + 1 )) eval "var_is_set=\${joboption_nodeproperty_$i+yes}" done echo "$nodes_string" >> $LRMS_JOB_SCRIPT # exclusice execution: # there is no standard way to express this in PBS. # One way would be to request a full nodes memory, # but this is only feasible on a cluster with # homogenous nodes ############################################################## # Execution times (minutes) ############################################################## if [ ! -z "$joboption_cputime" ] ; then # TODO: parallel jobs, add initialization time, make walltime bigger, ... # is cputime for every process ? if [ $joboption_cputime -lt 0 ] ; then echo 'WARNING: Less than 0 cpu time requested: $joboption_cputime' 1>&2 joboption_cputime=0 echo 'WARNING: cpu time set to 0' 1>&2 fi maxcputime="$joboption_cputime" cputime_min=$(( $maxcputime / 60 )) cputime_sec=$(( $maxcputime - $cputime_min * 60 )) echo "#PBS -l cput=${cputime_min}:${cputime_sec}" >> $LRMS_JOB_SCRIPT fi if [ -z "$joboption_walltime" ] ; then if [ ! -z "$joboption_cputime" ] ; then # Set walltime for backward compatibility or incomplete requests joboption_walltime=$(( $joboption_cputime * $walltime_ratio )) fi fi if [ ! -z "$joboption_walltime" ] ; then if [ $joboption_walltime -lt 0 ] ; then echo 'WARNING: Less than 0 wall time requested: $joboption_walltime' 1>&2 joboption_walltime=0 echo 'WARNING: wall time set to 0' 1>&2 fi maxwalltime="$joboption_walltime" walltime_min=$(( $maxwalltime / 60 )) walltime_sec=$(( $maxwalltime - $walltime_min * 60 )) echo "#PBS -l walltime=${walltime_min}:${walltime_sec}" >> $LRMS_JOB_SCRIPT fi ############################################################## # Requested memory (mb) ############################################################## set_req_mem #pmem and pvmem are per process, and enforced by PBS via setting up memory ulimit. #But in case of using threads - single process is used and limited to per-process memory. #To support correct operation of threaded apps in PBS - submit-pbs-job set the general job memory (vmem) #Moreover according to the PBS manuals, setting vmem is supported on the sufficiently #bigger ammount of operating systems then pvmem. if [ ! -z "$joboption_memory" ] ; then memreq="${joboption_memory}" if [ ! -z $joboption_count ] && [ $joboption_count -gt 0 ] ; then memreq=$(( $joboption_count * $memreq )) fi fi #requested memory is used to simulate exclusive execution if [ "$joboption_exclusivenode" = "true" ]; then # using nodememory as maximum mem if [ -n "${CONFIG_nodememory}" ] ; then tempmem=`expr $CONFIG_nodememory / $joboption_countpernode ` if [ -n "$memreq" ]; then if [ "${tempmem}" -gt "${memreq}" ] ; then memreq="${tempmem}" fi else memreq="${tempmem}" fi else echo "WARNING: Could not set memory limit to simulate exclusive execution." 1>&2 fi fi if [ ! -z "$memreq" ] ; then echo "#PBS -l mem=${memreq}mb" >> $LRMS_JOB_SCRIPT fi gate_host=`uname -n` if [ -z "$gate_host" ] ; then echo "Can't get own hostname" 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "Submission: Configuration error." exit 1 fi ############################################################## # PBS stage in/out ############################################################## if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then ( cd "$joboption_directory" if [ $? -ne '0' ] ; then echo "Can't change to session directory: $joboption_directory" 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "Submission: Configuration error." exit 1 fi scratch_dir=`dirname "$joboption_directory"` echo "#PBS -W stagein=$RUNTIME_LOCAL_SCRATCH_DIR@$gate_host:$joboption_directory" >> $LRMS_JOB_SCRIPT STAGEOUT1="$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid@$gate_host:$scratch_dir" STAGEOUT2="$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid.diag@$gate_host:$joboption_directory.diag" echo "#PBS -W stageout=\"${STAGEOUT1},${STAGEOUT2}\"" >> $LRMS_JOB_SCRIPT ) fi echo "" >> $LRMS_JOB_SCRIPT echo "# Overide umask of execution node (sometime values are really strange)" >> $LRMS_JOB_SCRIPT echo "umask 077" >> $LRMS_JOB_SCRIPT echo " " >> $LRMS_JOB_SCRIPT sourcewithargs_jobscript ############################################################## # Init accounting ############################################################## accounting_init ############################################################## # Add environment variables ############################################################## add_user_env ############################################################## # Check for existance of executable, # there is no sense to check for executable if files are # downloaded directly to computing node ############################################################## if [ -z "${joboption_arg_0}" ] ; then echo 'Executable is not specified' 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "Submission: Job description error." exit 1 fi ###################################################################### # Adjust working directory for tweaky nodes # RUNTIME_GRIDAREA_DIR should be defined by external means on nodes ###################################################################### if [ ! -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then setup_runtime_env else echo "RUNTIME_JOB_DIR=$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid" >> $LRMS_JOB_SCRIPT echo "RUNTIME_JOB_DIAG=$RUNTIME_LOCAL_SCRATCH_DIR/${joboption_gridid}.diag" >> $LRMS_JOB_SCRIPT RUNTIME_STDIN_REL=`echo "${joboption_stdin}" | sed "s#^${joboption_directory}/*##"` RUNTIME_STDOUT_REL=`echo "${joboption_stdout}" | sed "s#^${joboption_directory}/*##"` RUNTIME_STDERR_REL=`echo "${joboption_stderr}" | sed "s#^${joboption_directory}/*##"` if [ "$RUNTIME_STDIN_REL" = "${joboption_stdin}" ] ; then echo "RUNTIME_JOB_STDIN=\"${joboption_stdin}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDIN=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDIN_REL\"" >> $LRMS_JOB_SCRIPT fi if [ "$RUNTIME_STDOUT_REL" = "${joboption_stdout}" ] ; then echo "RUNTIME_JOB_STDOUT=\"${joboption_stdout}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDOUT=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDOUT_REL\"" >> $LRMS_JOB_SCRIPT fi if [ "$RUNTIME_STDERR_REL" = "${joboption_stderr}" ] ; then echo "RUNTIME_JOB_STDERR=\"${joboption_stderr}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDERR=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDERR_REL\"" >> $LRMS_JOB_SCRIPT fi fi ############################################################## # Add std... to job arguments ############################################################## include_std_streams ############################################################## # Move files to local working directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_node echo "" >> $LRMS_JOB_SCRIPT echo "RESULT=0" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT ##################################################### # Go to working dir and start job #################################################### echo "" >> $LRMS_JOB_SCRIPT echo "# Changing to session directory" >> $LRMS_JOB_SCRIPT echo "cd \$RUNTIME_JOB_DIR" >> $LRMS_JOB_SCRIPT echo "export HOME=\$RUNTIME_JOB_DIR" >> $LRMS_JOB_SCRIPT ############################################################## # Skip execution if something already failed ############################################################## echo "if [ \"\$RESULT\" = '0' ] ; then" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime configuration at computing node ############################################################## RTE_stage1 ############################################################## # Diagnostics ############################################################## echo "echo \"runtimeenvironments=\$runtimeenvironments\" >> \"\$RUNTIME_JOB_DIAG\"" >> $LRMS_JOB_SCRIPT cat >> $LRMS_JOB_SCRIPT <<'EOSCR' if [ ! "X$PBS_NODEFILE" = 'X' ] ; then if [ -r "$PBS_NODEFILE" ] ; then cat "$PBS_NODEFILE" | sed 's/\(.*\)/nodename=\1/' >> "$RUNTIME_JOB_DIAG" NODENAME_WRITTEN="1" else PBS_NODEFILE= fi fi EOSCR ############################################################## # Accounting (WN OS Detection) ############################################################## detect_wn_systemsoftware ############################################################## # Check intermediate result again ############################################################## echo "if [ \"\$RESULT\" = '0' ] ; then" >> $LRMS_JOB_SCRIPT ############################################################## # Execution ############################################################## cd_and_run ############################################################## # End of RESULT checks ############################################################## echo "fi" >> $LRMS_JOB_SCRIPT echo "fi" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime (post)configuration at computing node ############################################################## RTE_stage2 #################################################### # Clean up output files li local scratchdir #################################################### clean_local_scratch_dir_output ############################################################## # Move files back to session directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_frontend ############################################################## # Finish accounting and exit job ############################################################## accounting_end ####################################### # Submit the job ####################################### echo "PBS job script built" 1>&2 # Execute qsub command cd "$joboption_directory" echo "PBS script follows:" 1>&2 echo "-------------------------------------------------------------------" 1>&2 cat "$LRMS_JOB_SCRIPT" 1>&2 echo "-------------------------------------------------------------------" 1>&2 echo "" 1>&2 PBS_RESULT=1 PBS_TRIES=0 if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-pbs-job, JobScriptCreation: $t" >> $perflogfilesub fi while [ "$PBS_TRIES" -lt '10' ] ; do if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi ${PBS_QSUB} < $LRMS_JOB_SCRIPT 1>$LRMS_JOB_OUT 2>$LRMS_JOB_ERR PBS_RESULT="$?" if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-pbs-job, JobSiubmission: $t" >> $perflogfilesub fi if [ "$PBS_RESULT" -eq '0' ] ; then break ; fi if [ "$PBS_RESULT" -eq '198' ] ; then echo "Waiting for queue to decrease" 1>&2 sleep 60 PBS_TRIES=0 continue fi grep 'maximum number of jobs' "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" if [ $? -eq '0' ] ; then echo "Waiting for queue to decrease" 1>&2 sleep 60 PBS_TRIES=0 continue fi PBS_TRIES=$(( $PBS_TRIES + 1 )) sleep 2 done if [ $PBS_RESULT -eq '0' ] ; then job_id=`cat $LRMS_JOB_OUT` # This should be on the format 1414162.$hostname if [ "${job_id}" = "" ]; then echo "job *NOT* submitted successfully!" 1>&2 echo "failed getting the pbs jobid for the job!" 1>&2 echo "Submission: Local submission client behaved unexpectedly." elif [ `echo "${job_id}" | grep -Ec "^[0-9]+"` != "1" ]; then echo "job *NOT* submitted successfully!" 1>&2 echo "badly formatted pbs jobid for the job: $job_id !" 1>&2 echo "Submission: Local submission client behaved unexpectedly." else echo "joboption_jobid=$job_id" >> $GRAMI_FILE echo "job submitted successfully!" 1>&2 echo "local job id: $job_id" 1>&2 # Remove temporary job script file rm -f $LRMS_JOB_SCRIPT $LRMS_JOB_OUT $LRMS_JOB_ERR echo "----- exiting submit_pbs_job -----" 1>&2 echo "" 1>&2 exit 0 fi else echo "job *NOT* submitted successfully!" 1>&2 echo "got error code from qsub: $PBS_RESULT !" 1>&2 echo "Submission: Local submission client failed." fi echo "Output is:" 1>&2 cat $LRMS_JOB_OUT 1>&2 echo "Error output is:" cat $LRMS_JOB_ERR 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "----- exiting submit_pbs_job -----" 1>&2 echo "" 1>&2 exit 1 nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbs/PaxHeaders/README0000644000000000000000000000013215067751327021422 xustar0030 mtime=1759498967.767700358 30 atime=1759498967.870493727 30 ctime=1759499029.946957185 nordugrid-arc-7.1.1/src/services/a-rex/lrms/pbs/README0000644000175000002070000000003415067751327023321 0ustar00mockbuildmock00000000000000PBS/Torque control scripts. nordugrid-arc-7.1.1/src/services/a-rex/lrms/PaxHeaders/cancel_common.sh0000644000000000000000000000013115067751327023106 xustar0030 mtime=1759498967.765456607 30 atime=1759498967.869493711 29 ctime=1759499029.91082981 nordugrid-arc-7.1.1/src/services/a-rex/lrms/cancel_common.sh0000644000175000002070000000056015067751327025012 0ustar00mockbuildmock00000000000000# # Common block for cancel scripts # must be called with the grami file as argument # remember to set $joboption_lrms common_init () { # parse grami file no_grami_extra_processing=1 parse_grami_file $GRAMI_FILE # parse configuration parse_arc_conf # read pbs-specific environment . ${pkgdatadir}/configure-${joboption_lrms}-env.sh || exit $? } nordugrid-arc-7.1.1/src/services/a-rex/lrms/PaxHeaders/ll0000644000000000000000000000013215067751426020310 xustar0030 mtime=1759499030.138439891 30 atime=1759499034.764510185 30 ctime=1759499030.138439891 nordugrid-arc-7.1.1/src/services/a-rex/lrms/ll/0000755000175000002070000000000015067751426022267 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/lrms/ll/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327022421 xustar0030 mtime=1759498967.766520106 30 atime=1759498967.870493727 30 ctime=1759499030.132104077 nordugrid-arc-7.1.1/src/services/a-rex/lrms/ll/Makefile.am0000644000175000002070000000014215067751327024320 0ustar00mockbuildmock00000000000000dist_pkgdata_DATA = configure-ll-env.sh pkgdata_SCRIPTS = scan-ll-job submit-ll-job cancel-ll-job nordugrid-arc-7.1.1/src/services/a-rex/lrms/ll/PaxHeaders/Makefile.in0000644000000000000000000000013115067751356022433 xustar0030 mtime=1759498990.422649514 29 atime=1759499018.42926197 30 ctime=1759499030.134571055 nordugrid-arc-7.1.1/src/services/a-rex/lrms/ll/Makefile.in0000644000175000002070000005530215067751356024343 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms/ll ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(dist_pkgdata_DATA) \ $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = submit-ll-job cancel-ll-job scan-ll-job CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" SCRIPTS = $(pkgdata_SCRIPTS) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac DATA = $(dist_pkgdata_DATA) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/cancel-ll-job.in \ $(srcdir)/scan-ll-job.in $(srcdir)/submit-ll-job.in README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ dist_pkgdata_DATA = configure-ll-env.sh pkgdata_SCRIPTS = scan-ll-job submit-ll-job cancel-ll-job all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/ll/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/ll/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): submit-ll-job: $(top_builddir)/config.status $(srcdir)/submit-ll-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ cancel-ll-job: $(top_builddir)/config.status $(srcdir)/cancel-ll-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ scan-ll-job: $(top_builddir)/config.status $(srcdir)/scan-ll-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_pkgdataDATA: $(dist_pkgdata_DATA) @$(NORMAL_INSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-dist_pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dist_pkgdataDATA install-pkgdataSCRIPTS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am \ install-dist_pkgdataDATA install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-pkgdataSCRIPTS install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags-am uninstall uninstall-am uninstall-dist_pkgdataDATA \ uninstall-pkgdataSCRIPTS .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/lrms/ll/PaxHeaders/cancel-ll-job.in0000644000000000000000000000013215067751327023317 xustar0030 mtime=1759498967.766756242 30 atime=1759498967.870493727 30 ctime=1759499030.135827912 nordugrid-arc-7.1.1/src/services/a-rex/lrms/ll/cancel-ll-job.in0000644000175000002070000000161515067751327025224 0ustar00mockbuildmock00000000000000#!@posix_shell@ # # Cancel job running in LoadLeveler. # progname=`basename $0` echo "----- starting $progname -----" 1>&2 joboption_lrms="ll" lrms_options="ll_bin_path ll_consumable_resources ll_parallel_single_jobs" # ARC1 passes the config file first. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi GRAMI_FILE=$1 # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # load common cancel functions . "${pkgdatadir}/cancel_common.sh" || exit $? # run common init # * parse grami # * parse config # * load LRMS-specific env common_init # llcancel LL_DEL='llcancel' if [ ! -z "$LL_BIN_PATH" ] ; then LL_DEL="${LL_BIN_PATH}/${LL_DEL}" fi echo executing job removal with job id $joboption_jobid 1>&2 "${LL_DEL}" "${joboption_jobid}" echo "----- exiting $progname -----" 1>&2 echo "" 1>&2 exit 0 nordugrid-arc-7.1.1/src/services/a-rex/lrms/ll/PaxHeaders/submit-ll-job.in0000644000000000000000000000013215067751327023375 xustar0030 mtime=1759498967.766756242 30 atime=1759498967.870493727 30 ctime=1759499030.138281227 nordugrid-arc-7.1.1/src/services/a-rex/lrms/ll/submit-ll-job.in0000644000175000002070000003011215067751327025274 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -xv # # Submits job to loadleveler. # # A temporary job script is created for the submission and then removed # at the end of this script. # echo "----- starting submit_ll_job -----" 1>&2 joboption_lrms="ll" lrms_options="ll_bin_path ll_consumable_resources ll_parallel_single_jobs" # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi GRAMI_FILE=$1 # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # include common submit functions . "${pkgdatadir}/submit_common.sh" || exit $? # run common init # * parse grami # * parse config # * load LRMS-specific env # * set common variables common_init # perflog perflogfilesub="${perflogdir}/submission.perflog" if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi # GD enforce this for the moment RUNTIME_FRONTEND_SEES_NODE='' RUNTIME_NODE_SEES_FRONTEND='yes' ############################################################## # Zero stage of runtime environments ############################################################## RTE_stage0 LL_SUB='llsubmit' if [ ! -z "$LL_BIN_PATH" ] ; then LL_SUB=${LL_BIN_PATH}/${LL_SUB} fi mktempscript ############################################################## # Start job script ############################################################## echo "# LL batch job script built by arex" > $LRMS_JOB_SCRIPT # job name for convenience if [ ! -z "${joboption_jobname}" ] ; then jobname=`echo "$joboption_jobname" | \ sed 's/^\([^[:alpha:]]\)/N\1/' | \ sed 's/[^[:alnum:]]/_/g' | \ sed 's/\(...............\).*/\1/'` echo "# @ job_name = $jobname" >> $LRMS_JOB_SCRIPT fi echo "LL jobname: $jobname" 1>&2 echo "# @ output = ${joboption_directory}.comment" >> $LRMS_JOB_SCRIPT echo "# @ error = ${joboption_directory}.comment" >> $LRMS_JOB_SCRIPT # Project account number for accounting if [ ! -z "${joboption_rsl_project}" ] ; then echo "# @ account_no = $joboption_rsl_project" >> $LRMS_JOB_SCRIPT fi ############################################################## # (non-)parallel jobs ############################################################## set_count if [ $joboption_count -gt 1 ] || [ "$LL_PARALLEL_SINGLE_JOBS" = "yes" ] ; then echo "# @ job_type = parallel" >> $LRMS_JOB_SCRIPT echo "# @ total_tasks = $joboption_count" >> $LRMS_JOB_SCRIPT echo "# @ node = $joboption_numnodes" >> $LRMS_JOB_SCRIPT fi #set node to exclusive if [ "$joboption_exclusivenode" = "true" ]; then echo "# @ node_usage = not_shared " >> $LRMS_JOB_SCRIPT fi ############################################################## # Execution times (obtained in seconds) ############################################################## # cputime/walltime is obtained in seconds via $joboption_cputime and $joboption_walltime if ( [ -n "$joboption_cputime" ] && [ $joboption_cputime -gt 0 ] ) ; then # CPU time must be given per-task for LL cputime_pertask=$(( $joboption_cputime / $joboption_count )) cputime_hard_pertask=$(($(( $cputime_pertask * $time_hardlimit_ratio))+30)) echo "# @ cpu_limit = ${cputime_hard_pertask} , ${cputime_pertask}" >> $LRMS_JOB_SCRIPT fi if [ -n "$joboption_walltime" ] ; then if [ $joboption_walltime -lt 0 ] ; then echo 'WARNING: Less than 0 wall time requested: $joboption_walltime' 1>&2 joboption_walltime=0 echo 'WARNING: wall time set to 0' 1>&2 fi joboption_walltime_hard=$(($(( $joboption_walltime * $time_hardlimit_ratio))+30)) echo "# @ wall_clock_limit = ${joboption_walltime_hard} , ${joboption_walltime}" >> $LRMS_JOB_SCRIPT fi ############################################################## # Requested memory (mb) ############################################################## set_req_mem # There are soft and hard limits for virtual memory consumption in LL # The limits are interpreted by LoadLeveler as per process in a # parallel job. There is no need to recalculate the mem limit. if [ -n "$joboption_memory" ] ; then joboption_memory_hard=$(( $joboption_memory * $memory_hardlimit_ratio )) requirements="(Memory > ${joboption_memory_hard})" preferences="(Memory > ${joboption_memory})" if [ "$LL_CONSUMABLE_RESOURCES" != "yes" ]; then echo "# @ requirements = ${requirements}" >> $LRMS_JOB_SCRIPT echo "# @ preferences = ${preferences}" >> $LRMS_JOB_SCRIPT fi fi ############################################################## # Consumable resources # One cpu should be requested per task created. I.e. per count. ############################################################# if [ "$LL_CONSUMABLE_RESOURCES" = "yes" ]; then echo "# @ resources = ConsumableCpus(1) ConsumableMemory(${joboption_memory})" >> $LRMS_JOB_SCRIPT fi ############################################################## # Override umask ############################################################## #echo "umask 077" >> $LRMS_JOB_SCRIPT #echo 'exec > /var/tmp/grid-job-output.$$ 2>&1' >> $LRMS_JOB_SCRIPT ############################################################## # Init accounting ############################################################## accounting_init ############################################################## # Add environment variables ############################################################## add_user_env ############################################################## # Check for existence of executable, ############################################################## if [ -z "${joboption_arg_0}" ] ; then echo 'Executable is not specified' 1>&2 exit 1 fi program_start=`echo ${joboption_arg_0} | cut -c 1 2>&1` if [ "$program_start" != '$' ] && [ "$program_start" != '/' ] ; then if [ ! -f $joboption_directory/${joboption_arg_0} ] ; then echo 'Executable does not exist, or permission denied.' 1>&2 echo " Executable $joboption_directory/${joboption_arg_0}" 1>&2 echo " whoami: "`whoami` 1>&2 echo " ls -l $joboption_directory/${joboption_arg_0}: "`ls -l $joboption_directory/${joboption_arg_0}` exit 1 fi if [ ! -x $joboption_directory/${joboption_arg_0} ] ; then echo 'Executable is not executable' 1>&2 exit 1 fi fi ################################################################## #Read queue from config or figure out which queue to use ################################################################## if [ ! -z "${joboption_queue}" ] ; then class=$joboption_queue else #if queue is not set we must choose one LL_CLASS='llclass -l' if [ ! -z "$LL_BIN_PATH" ] ; then LL_CLASS=${LL_BIN_PATH}/${LL_CLASS} fi queue_names=`${LL_CLASS}|grep Name|awk '{split($0,field," ");print field[2]}'` #default will be shortest queue if [! -n "$joboption_walltime" ] ; then joboption_walltime_hard=1 fi queue_time_sel=0 for queue in $queue_names do queue_time=`${LL_CLASS} ${queue}|grep Wall_clock_limit|awk '{split($0,field,"(");print field[2]}'|awk '{split($0,field," ");print field[1]}'` if [${joboption_walltime_hard} -lt ${queue_time}] ; then if [${queue_time_sel} -eq 0] || [${queue_time_sel} -gt ${queue_time}] ; then class=${queue} queue_time_sel=${queue_time} fi fi done fi echo "# @ class=${class}" >> $LRMS_JOB_SCRIPT ################################################################### #Priority of jobs ################################################################## if [ ! -z $joboption_priority ]; then # LL: priority from 0-100. 50 is default # We can just use ARC priority directly echo "# @ user_priority = ${joboption_priority}" >> $LRMS_JOB_SCRIPT fi ################################################################### #Queue job #No mail notification ################################################################## echo "# @ notification = never" >> $LRMS_JOB_SCRIPT echo "# @ queue" >> $LRMS_JOB_SCRIPT echo " " >> $LRMS_JOB_SCRIPT sourcewithargs_jobscript setup_runtime_env ################################################################### #setup soft limit trap ################################################################## echo "trap \"echo 'exitcode=24'>>\$RUNTIME_JOB_DIAG;exit 24\" SIGXCPU" >> $LRMS_JOB_SCRIPT ############################################################## # Add std... to job arguments ############################################################## include_std_streams ############################################################## # Move files to local working directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_node echo "" >> $LRMS_JOB_SCRIPT echo "RESULT=0" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT ############################################################## # Skip execution if something already failed ############################################################## echo "" >> $LRMS_JOB_SCRIPT echo "if [ \"\$RESULT\" = '0' ] ; then" >> $LRMS_JOB_SCRIPT echo "echo \"runtimeenvironments=\$runtimeenvironments\" >> \"\$RUNTIME_JOB_DIAG\"" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime configuration ############################################################## RTE_stage1 if [ -z "$RUNTIME_NODE_SEES_FRONTEND" ] ; then echo "Nodes detached from gridarea are not supported when LL is used. Aborting job submit" 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" exit 1 fi ############################################################## # Accounting (WN OS Detection) ############################################################## detect_wn_systemsoftware ############################################################## # Execution ############################################################## cd_and_run echo "fi" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime (post)configuration at computing node ############################################################## RTE_stage2 ############################################################## # Move files back to session directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_frontend ############################################################## # Finish accounting and exit job ############################################################## accounting_end ####################################### # Submit the job ####################################### echo "ll job script built" 1>&2 #job creation finished if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-ll-job, JobScriptCreation: $t" >> $perflogfilesub fi # Execute sub command cd "$joboption_directory" echo "LL script follows:" 1>&2 cat "$LRMS_JOB_SCRIPT" 1>&2 echo "" 1>&2 if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi ${LL_SUB} $LRMS_JOB_SCRIPT 1>$LRMS_JOB_OUT 2>$LRMS_JOB_ERR LLSUB_RESULT="$?" if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-ll-job, JobSubmission: $t" >> $perflogfilesub fi if [ $LLSUB_RESULT -eq '0' ] ; then echo "LRMS_JOB_OUT is $LRMS_JOB_OUT" job_id=`cat $LRMS_JOB_OUT | awk '{split($0,field,"\"");print field[2]}'`.0 if [ "${job_id}" = "" ] ; then echo "job *NOT* submitted successfully!" 1>&2 echo "failed getting the LL jobid for the job!" 1>&2 else echo "joboption_jobid=$job_id" >> $GRAMI_FILE echo "job submitted successfully!" 1>&2 echo "local job id: $job_id" 1>&2 # Remove temporary job script file rm -f $LRMS_JOB_SCRIPT $LRMS_JOB_OUT $LRMS_JOB_ERR echo "----- exiting submit_ll_job -----" 1>&2 echo "" 1>&2 exit 0 fi else echo "job *NOT* submitted successfully!" 1>&2 echo "got error code from llsubmit!" 1>&2 fi echo "Output is:" 1>&2 cat $LRMS_JOB_OUT 1>&2 echo "Error output is:" cat $LRMS_JOB_ERR 1>&2 rm -f $LRMS_JOB_SCRIPT $LRMS_JOB_OUT $LRMS_JOB_ERR echo "----- exiting submit_ll_job -----" 1>&2 echo "" 1>&2 exit 1 nordugrid-arc-7.1.1/src/services/a-rex/lrms/ll/PaxHeaders/configure-ll-env.sh0000644000000000000000000000013215067751327024075 xustar0030 mtime=1759498967.766756242 30 atime=1759498967.870493727 30 ctime=1759499030.133349509 nordugrid-arc-7.1.1/src/services/a-rex/lrms/ll/configure-ll-env.sh0000644000175000002070000000113615067751327026000 0ustar00mockbuildmock00000000000000# # set environment variables: # LL_BIN_PATH # # Conditionaly enable performance logging init_perflog # Path to ll commands LL_BIN_PATH=$CONFIG_ll_bin_path if [ ! -d ${LL_BIN_PATH} ] ; then echo "Could not set LL_BIN_PATH." 1>&2 exit 1 fi # Consumable resources LL_CONSUMABLE_RESOURCES=${LL_CONSUMABLE_RESOURCES:-$CONFIG_ll_consumable_resources} # Enable parallel single jobs LL_PARALLEL_SINGLE_JOBS=${LL_PARALLEL_SINGLE_JOBS:-$CONFIG_ll_parallel_single_jobs} # Local scratch disk RUNTIME_LOCAL_SCRATCH_DIR=${RUNTIME_LOCAL_SCRATCH_DIR:-$CONFIG_scratchdir} export RUNTIME_LOCAL_SCRATCH_DIR nordugrid-arc-7.1.1/src/services/a-rex/lrms/ll/PaxHeaders/scan-ll-job.in0000644000000000000000000000013215067751327023016 xustar0030 mtime=1759498967.766756242 30 atime=1759498967.870493727 30 ctime=1759499030.137074571 nordugrid-arc-7.1.1/src/services/a-rex/lrms/ll/scan-ll-job.in0000644000175000002070000001711515067751327024725 0ustar00mockbuildmock00000000000000#!/bin/bash # Helper script to flag done LoadLeveler jobs. # The script is called periodically by the arex. # # This function retrieve the jobs status and id in one shot # look for jobs which have a known status but are not completed (!=C) # and save the localid of these jobs in the string variable # $outLocalIdsString. # # The input variable is a string list of localid to check. # # Example of usage: # get_bunch_jobs_status "$inLocalIdsString" outLocalIdsString="" # ARC1 passes the config file first. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi # Does the control directory exist? control_dir="$1" test -d "$control_dir" || exit 1 joboption_lrms="ll" lrms_options="ll_bin_path ll_consumable_resources ll_parallel_single_jobs" # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # include common scan functions . "${pkgdatadir}/scan_common.sh" || exit $? # run common init # * parse config # * load LRMS-specific env # * set common variables common_init # Assume that gm-kick is installed in the same directory GMKICK=${pkglibexecdir}/gm-kick # Log system performance if [ ! -z "$perflogdir" ]; then perflog_common "$perflogdir" "$CONFIG_controldir" fi if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi my_id=`id -u` control_path () { # job_id=`echo "$2" | sed 's/\(.\{9\}\)/\1\//g' | sed 's/\/$//'` job_id=`echo "$2" | sed -e 's#\(.\{3\}\)#\1/#3' -e 's#\(.\{3\}\)#\1/#2' -e 's#\(.\{3\}\)#\1/#1' -e 's#$#/#'` path="$1/jobs/${job_id}/$3" echo "$path" } get_bunch_jobs_status() { #get the string list of jobs loop=`$LL_BIN_PATH/llq -r %st %id $1` if [ $? -eq 0 ]; then for elm in $loop do if [ `echo $elm | grep '^[A-Z]\{1,2\}!.\+$'` ]; then if [ ! `echo $elm | grep '^C!'` ]; then outLocalIdsString=$outLocalIdsString" "`echo $elm | awk -F! '{ print $NF}'` fi fi done fi } # mergedlist: array where each element is made of # key:value, where key is the arc jobid and value is the # localid mergedlist=() # inLocalIdsString: in this string we save the localid retrived by from # the arc .local file divided by space inLocalIdsString="" findoutput=$(find "$control_dir/processing" -maxdepth 1 -type f -name '*.status' | sed 's/.*\/\([^\]*\)\.status$/\1/') while read jobid do i=$(control_path "$control_dir" "$jobid" "local") # Continue if no glob expansion or other problems test -f "$i" || continue donefile=$(control_path "$control_dir" "$jobid" "lrms_done") statusfile="${control_dir}/processing/${jobid}.status" # Continue if the job is already flagged as done? test -f "$donefile" && continue if [ ! -f "$statusfile" ] ; then continue ; fi gmstatus=`cat "$statusfile"` if [ "$gmstatus" != "INLRMS" ] && [ "$gmstatus" != "CANCELING" ] ; then continue ; fi # Get local LRMS id of job by evaluating the line with localid localid=`grep ^localid= $i|head -1` eval $localid # Did we get a local id? test "$localid" = "" && continue # HACK: save the localid to be queried into inLocalIdsString # associate the localid to its jobid and save them in a list inLocalIdsString=$inLocalIdsString" "$localid mergedlist+=("$jobid:$localid") done <<< "$findoutput" if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-ll-job, ControldirTraversal: $t" >> $perflogfile fi if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi # Query the LoadLeveler for jobs # and save the not completed into $outLocalIdsString # Call the funcion only if there is some into the string if [[ $inLocalIdsString =~ /^[0-9]|[a-z]|[A-Z]*$/ ]]; then get_bunch_jobs_status "$inLocalIdsString" fi if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-ll-job, llq -r %st %id: $t" >> $perflogfile fi if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi numelem=0 # Start the loop based on element of the mergelist for element in ${mergedlist[@]} do # Divide the jobid from the localid jobid=`echo $element | awk '{split($0,a,":"); print a[1]}'` localid=`echo $element | awk '{split($0,a,":"); print a[2]}'` # Exclude the not completed jobs stored into the $outLocalIdsString if [[ $outLocalIdsString == *$localid* ]] then continue fi numelem=$((numelem+1)) donefile=$(control_path "${control_dir}" "${jobid}" "lrms_done") statusfile="${control_dir}/processing/${jobid}.status" jobfile=$(control_path "${control_dir}" "${jobid}" "local") errorsfile=$(control_path "${control_dir}" "${jobid}" "errors") # Continue if the job is already flagged as done? test -f "$donefile" && continue if [ ! -f "$statusfile" ] ; then continue ; fi gmstatus=`cat "$statusfile"` exitcode='' # get session directory of this job sessiondir=`grep -h '^sessiondir=' "${jobfile}" | sed 's/^sessiondir=\(.*\)/\1/'` diagfile="${sessiondir}.diag" commentfile="${sessiondir}.comment" if [ "$my_id" != '0' ] ; then if [ ! -O "$jobfile" ] ; then continue ; fi fi uid=$(get_owner_uid "$jobfile") [ -z "$uid" ] && { log "Failed to stat $jobfile"; continue; } if [ ! -z "$sessiondir" ] ; then # have chance to obtain exit code exitcode=$(do_as_uid "$uid" "grep '^exitcode=' '$diagfile'" | sed 's/^exitcode=//') else continue fi if [ ! -z "$exitcode" ] ; then if [ "$exitcode" = "152" -o $exitcode = "24" ] ; then exitcode="24" save_commentfile "$uid" "${commentfile}" "$errorsfile" echo "$exitcode Job exceeded time limit." > "$donefile" # If job exceeded time, then it will have been killed and no cputime/walltime has been written walltime=`$LL_BIN_PATH/llq -l $localid|sed -n 's/^ *Wall Clk Hard Limit:.*(\([0-9]*\) seconds.*/\1/p'` usertime=`$LL_BIN_PATH/llq -l $localid|sed -n 's/^ *Step Cpu Hard Limit:.*(\([0-9]*\) seconds.*/\1/p'` starttime=`$LL_BIN_PATH/llq -l $localid|sed -n 's/^ *Dispatch Time: \(.*\)/\1/p'` endtime=`$LL_BIN_PATH/llq -l $localid|sed -n 's/^ *Completion Date: \(.*\)/\1/p'` if [ -n "$starttime" ]; then date_to_utc_seconds "$starttime" seconds_to_mds_date "$return_date_seconds" starttime=$return_mds_date fi if [ -n "$endtime" ]; then date_to_utc_seconds "$endtime" seconds_to_mds_date "$return_date_seconds" endtime=$return_mds_date fi job_read_diag [ -n "$walltime" ] && WallTime=${walltime} [ -n "$usertime" ] && UserTime=${usertime} [ -n "$usertime" ] && KernelTime=0 [ -n "$starttime" ] && LRMSStartTime=${starttime} [ -n "$endtime" ] && LRMSEndTime=${endtime} #This needs investigating, might be user program exit code [ -n "$exitcode" ] && LRMSExitcode=$exitcode job_write_diag ${GMKICK} -j "${jobid}" "${control_dir}" continue fi # job finished and exit code is known save_commentfile "$uid" "${commentfile}" "$errorsfile" echo "$exitcode Executable finished with exit code $exitcode" >> "$donefile" ${GMKICK} -j "${jobid}" "${control_dir}" continue fi exitcode=-1 save_commentfile "$uid" "${commentfile}" "$errorsfile" echo "$exitcode Job finished with unknown exit code" >> "$donefile" ${GMKICK} -j "${jobid}" "${control_dir}" done if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-ll-job, JobHandling, Handled= $numelem: $t" >> $perflogfile fi sleep 60 exit 0 nordugrid-arc-7.1.1/src/services/a-rex/lrms/ll/PaxHeaders/README0000644000000000000000000000013215067751327021245 xustar0030 mtime=1759498967.766756242 30 atime=1759498967.870493727 30 ctime=1759499030.139368795 nordugrid-arc-7.1.1/src/services/a-rex/lrms/ll/README0000644000175000002070000000003515067751327023145 0ustar00mockbuildmock00000000000000Load Leveler control script. nordugrid-arc-7.1.1/src/services/a-rex/lrms/PaxHeaders/scan_common.sh0000644000000000000000000000013215067751327022606 xustar0030 mtime=1759498967.768121105 30 atime=1759498967.871493742 30 ctime=1759499029.912920219 nordugrid-arc-7.1.1/src/services/a-rex/lrms/scan_common.sh0000644000175000002070000002155515067751327024520 0ustar00mockbuildmock00000000000000# This file contains functions that are used througout the scan-*-job scripts. progname=$(basename "$0") # # scan-*-jobs has STDOUT redirected to /dev/null and STDERR redirected to # helperlog # log () { echo "[`date +%Y-%m-%d\ %T`] $progname: $*" 1>&2; } common_init () { # parse configuration parse_arc_conf # read pbs-specific environment . ${pkgdatadir}/configure-${joboption_lrms}-env.sh || exit $? # set common LRMS environmental variables init_lrms_env } perflog_common () { perflog_dname=$1 d=`date +%F` perflog_fname=${perflog_dname}/system${d}.perflog jobstatus_dir=$2 #gather performance information loadavg=`cat /proc/loadavg` memtotal=$(a=`grep MemTotal /proc/meminfo`; echo ${a#MemTotal:}) memfree=$(a=`grep MemFree /proc/meminfo`; echo ${a#MemFree:}) if [ -d "$jobstatus_dir" ]; then jsd_size=`ls -l $jobstatus_dir| wc -l` jsdP_size=`ls -l $jobstatus_dir/processing | wc -l` fi #log the loadavg, stripping the last elemenmt, and the rest of the gathered info echo "[`date +%Y-%m-%d\ %T`] LoadAVG: ${loadavg% *}" >> $perflog_fname; echo "[`date +%Y-%m-%d\ %T`] MemStat: $memtotal $memfree" >> $perflog_fname; echo "[`date +%Y-%m-%d\ %T`] Control dir: $jsd_size $jsdP_size" >> $perflog_fname; # gather gridftp info gftp_pid=`cat /run/gridftpd.pid` gsiftp=`top -b -n 1 -p ${gftp_pid} | grep -w ${gftp_pid} | sed -e 's/[[:space:]]*$//'` printf "[`date +%Y-%m-%d\ %T`] Gridftpd: $gsiftp\n" >> $perflog_fname; # gather slapd info slapd_pid=`cat /run/arc/bdii/db/slapd.pid` slapd=`top -b -n 1 -p ${slapd_pid} | grep -w ${slapd_pid} | sed -e 's/[[:space:]]*$//'` printf "[`date +%Y-%m-%d\ %T`] Slapd: ${slapd}\n" >> $perflog_fname; # gather a-rex information arex_pid=`cat /run/arched-arex.pid` arex=`top -b -n 1 -p ${arex_pid} | grep -w ${arex_pid} | sed -e 's/[[:space:]]*$//'` prinft "[`date +%Y-%m-%d\ %T`] A-Rex: ${arex}\n" >> $perflog_fname; unset perflog_dname unset perflog_fname unset jobstatus_dir } # This function takes a time interval formatted as 789:12:34:56 (with days) or # 12:34:56 (without days) and transforms it to seconds. It returns the result in # the return_interval_seconds variable. interval_to_seconds () { _interval_dhms=$1 _interval_size=`echo $_interval_dhms | grep -o : | wc -l` if [ $_interval_size -eq 2 ]; then return_interval_seconds=`echo $_interval_dhms | tr : ' ' | awk '{print $1*60*60+$2*60+$3;}'` elif [ $_interval_size -eq 3 ]; then return_interval_seconds=`echo $_interval_dhms | tr : ' ' | awk '{print $1*24*60*60+$2*60*60+$3*60+$4;}'` else echo "Bad formatting of time interval: $_interval_dhms" >&2 return_interval_seconds= fi unset _interval_dhms _interval_size } # This function takes a date string in the form recognized by the date utility # and transforms it into seconds in UTC time. It returns the result in the # return_date_seconds variable. date_to_utc_seconds () { _date_string=$1 return_date_seconds= [ -z "$_date_string" ] && return _date_seconds=`date -d "$_date_string" +%s` [ ! $? = 0 ] && return date_seconds_to_utc "$_date_seconds" unset _date_string _date_seconds } # This function takes a timestamp as seconds in local time and transforms it into # seconds in UTC time. It returns the result in the return_date_seconds variable. date_seconds_to_utc () { _date_seconds=$1 _offset_hms=`date +"%::z"` _offset_seconds=`echo $_offset_hms | tr ':' ' ' | awk '{ print $1*60*60+$2*60+$3; }'` return_date_seconds=$(( $_date_seconds - ($_offset_seconds) )) unset _date_seconds _offset_hms _offset_seconds } # This function takes a timestamp as seconds and transforms it to Mds date # format (YYYYMMDDHHMMSSZ). It returns the result in the return_mds_date # variable. seconds_to_mds_date () { _date_seconds=$1 return_mds_date=`date -d "1970-01-01 UTC $_date_seconds seconds" +"%Y%m%d%H%M%SZ"` unset _date_seconds } # # gets the numerical uid of the owner of a file # get_owner_uid () { script='my $filename = $ARGV[0]; exit 1 unless $filename; my @stat = stat($ARGV[0]); exit 1 unless defined $stat[4]; print "$stat[4]\n"; ' /usr/bin/perl -we "$script" "$1" } # # If running as root, attempts to switch to the uid passed as the first # argument and then runs the command passed as the second argument in a shell. # The remaining arguments are passed as arguments to the shell. # do_as_uid () { test $# -ge 2 || { log "do_as_uid requires 2 arguments"; return 1; } script='use English; my ($uid, @args) = @ARGV; if ( $UID == 0 ) { my ($name, $pass, $uid, $gid, $quota, $comment, $gcos, $dir, $shell, $expire) = getpwuid($uid); eval { $GID = $gid; $UID = $uid }; print STDERR "Cannot switch to uid($UID): $@\n" if $@; } system("'$POSIX_SHELL'","-c",@args); exit 0 if $? eq 0; exit ($?>>8||128+($?&127)); ' /usr/bin/perl -we "$script" "$@" } # # Input variables: # * sessiondir # * uid # Output variables: # * diagstring -- the whole contents of .diag # * nodename # * WallTime # * UserTime # * KernelTime # * TotalMemory # * ResidentMemory # * LRMSStartTime # * LRMSEndTime # * exitcode # job_read_diag() { [ -n "$uid" ] && [ -n "$sessiondir" ] \ || { log "job_read_diag requires the following to be set: uid sessiondir"; return 1; } diagfile=$sessiondir.diag; [ -f "$diagfile" ] || { log "diag file not found at: $sessiondir.diag"; return 1; } diagstring=$(do_as_uid $uid "tail -n 1000 '$diagfile'") [ $? = 0 ] || { log "cannot read diag file at: $diagfile"; return 1; } nodename=$(echo "$diagstring" | sed -n 's/^nodename=\(..*\)/\1/p') WallTime=$(echo "$diagstring" | sed -n 's/^WallTime=\([0-9.]*\)s/\1/p' | tail -n 1) UserTime=$(echo "$diagstring" | sed -n 's/^UserTime=\([0-9.]*\)s/\1/p' | tail -n 1) KernelTime=$(echo "$diagstring" | sed -n 's/^KernelTime=\([0-9.]*\)s/\1/p' | tail -n 1) TotalMemory=$(echo "$diagstring" | sed -n 's/^AverageTotalMemory=\([0-9.]*\)kB/\1/p' | tail -n 1) ResidentMemory=$(echo "$diagstring" | sed -n 's/^AverageResidentMemory=\([0-9.]*\)kB/\1/p' | tail -n 1) LRMSStartTime=$(echo "$diagstring" | sed -n 's/^LRMSStartTime=\([0-9][0-9]*Z\)/\1/p' | tail -n 1) LRMSEndTime=$(echo "$diagstring" | sed -n 's/^LRMSEndTime=\([0-9][0-9]*Z\)/\1/p' | tail -n 1) exitcode=$(echo "$diagstring" | sed -n 's/^exitcode=\([0-9]*\)/\1/p' | tail -n 1) for key in nodename WallTime UserTime KernelTime AverageTotalMemory AverageResidentMemory \ exitcode LRMSStartTime LRMSEndTime LRMSExitcode LRMSMessage; do diagstring=$(echo "$diagstring" | grep -v "^$key=") done } # # Input variables: # * sessiondir # * uid # * LRMSExitcode # * LRMSMessage # + all output variables from job_read_diag # OBS: nodename should be a multi-line string, one line per node (or is it per cpu used?) # OBS: UserTime, KernelTime, Walltime must be given in seconds (without unit at the end) # OBS: TotalMemory, ResidentMemory must be given in kB (without unit at the end) # OBS: LRMSStartTime, LRMSEndTime must be of Mds form YYYYMMDDHHMMSSZ (note: UTC timezone) # job_write_diag() { [ -n "$uid" ] && [ -n "$sessiondir" ] \ || { log "job_write_diag requires the following to be set: uid sessiondir"; return 1; } diagfile=$sessiondir.diag; { echo "$diagstring" && echo [ -n "$nodename" ] && echo "$nodename" | sed -n 's/^\(..*\)/nodename=\1/p' [ -n "$WallTime" ] && echo "WallTime=${WallTime}s" [ -n "$Processors" ] && echo "Processors=${Processors}" [ -n "$UserTime" ] && echo "UserTime=${UserTime}s" [ -n "$KernelTime" ] && echo "KernelTime=${KernelTime}s" [ -n "$TotalMemory" ] && echo "AverageTotalMemory=${TotalMemory}kB" [ -n "$ResidentMemory" ] && echo "AverageResidentMemory=${ResidentMemory}kB" [ -n "$LRMSStartTime" ] && echo "LRMSStartTime=$LRMSStartTime" [ -n "$LRMSEndTime" ] && echo "LRMSEndTime=$LRMSEndTime" [ -n "$LRMSMessage" ] && echo "LRMSMessage=$LRMSMessage" [ -n "$LRMSExitcode" ] && echo "LRMSExitcode=$LRMSExitcode" [ -n "$exitcode" ] && echo "exitcode=$exitcode" } | do_as_uid $uid "cat > '$diagfile'" [ $? = 0 ] || { log "cannot write diag file at: $diagfile"; return 1; } } # Append .comment (containing STDOUT & STDERR of the job wrapper) to .errors # This file can also contain a message from the LRMS (i.e. the reason for killing the job). save_commentfile () { uid=$1 commentfile=$2 errorsfile=$3 action=" { echo '------- Contents of output stream forwarded by the LRMS ---------' cat '$commentfile' 2> /dev/null echo '------------------------- End of output -------------------------' } >> '$errorsfile' " do_as_uid "$uid" "$action" } nordugrid-arc-7.1.1/src/services/a-rex/lrms/PaxHeaders/sge0000644000000000000000000000013215067751426020457 xustar0030 mtime=1759499030.041811973 30 atime=1759499034.764510185 30 ctime=1759499030.041811973 nordugrid-arc-7.1.1/src/services/a-rex/lrms/sge/0000755000175000002070000000000015067751426022436 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/lrms/sge/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327022570 xustar0030 mtime=1759498967.768121105 30 atime=1759498967.871493742 30 ctime=1759499030.036840461 nordugrid-arc-7.1.1/src/services/a-rex/lrms/sge/Makefile.am0000644000175000002070000000014615067751327024473 0ustar00mockbuildmock00000000000000dist_pkgdata_DATA = configure-sge-env.sh pkgdata_SCRIPTS = scan-sge-job submit-sge-job cancel-sge-job nordugrid-arc-7.1.1/src/services/a-rex/lrms/sge/PaxHeaders/Makefile.in0000644000000000000000000000013115067751356022602 xustar0029 mtime=1759498990.55103811 30 atime=1759499018.754266908 30 ctime=1759499030.038867944 nordugrid-arc-7.1.1/src/services/a-rex/lrms/sge/Makefile.in0000644000175000002070000005532515067751356024517 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms/sge ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(dist_pkgdata_DATA) \ $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = submit-sge-job scan-sge-job cancel-sge-job CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" SCRIPTS = $(pkgdata_SCRIPTS) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac DATA = $(dist_pkgdata_DATA) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/cancel-sge-job.in \ $(srcdir)/scan-sge-job.in $(srcdir)/submit-sge-job.in README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ dist_pkgdata_DATA = configure-sge-env.sh pkgdata_SCRIPTS = scan-sge-job submit-sge-job cancel-sge-job all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/sge/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/sge/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): submit-sge-job: $(top_builddir)/config.status $(srcdir)/submit-sge-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ scan-sge-job: $(top_builddir)/config.status $(srcdir)/scan-sge-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ cancel-sge-job: $(top_builddir)/config.status $(srcdir)/cancel-sge-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_pkgdataDATA: $(dist_pkgdata_DATA) @$(NORMAL_INSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-dist_pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dist_pkgdataDATA install-pkgdataSCRIPTS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am \ install-dist_pkgdataDATA install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-pkgdataSCRIPTS install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags-am uninstall uninstall-am uninstall-dist_pkgdataDATA \ uninstall-pkgdataSCRIPTS .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/lrms/sge/PaxHeaders/configure-sge-env.sh0000644000000000000000000000013215067751327024413 xustar0030 mtime=1759498967.768481309 30 atime=1759498967.871493742 30 ctime=1759499030.037861605 nordugrid-arc-7.1.1/src/services/a-rex/lrms/sge/configure-sge-env.sh0000644000175000002070000000250015067751327026312 0ustar00mockbuildmock00000000000000# set environment variables: # SGE_BIN_PATH # SGE_ROOT # SGE_CELL # SGE_QMASTER_PORT # SGE_EXECD_PORT # # Conditionaly enable performance logging init_perflog ############################################################## # Initialize SGE environment variables ############################################################## SGE_ROOT=${CONFIG_sge_root:-$SGE_ROOT} if [ -z "$SGE_ROOT" ]; then echo 'SGE_ROOT not set' 1>&2 return 1 fi SGE_CELL=${SGE_CELL:-default} SGE_CELL=${CONFIG_sge_cell:-$SGE_CELL} export SGE_ROOT SGE_CELL if [ ! -z "$CONFIG_sge_qmaster_port" ]; then export SGE_QMASTER_PORT=$CONFIG_sge_qmaster_port fi if [ ! -z "$CONFIG_sge_execd_port" ]; then export SGE_EXECD_PORT=$CONFIG_sge_execd_port fi ############################################################## # Find path to SGE executables ############################################################## # 1. use sge_bin_path config option, if set if [ ! -z "$CONFIG_sge_bin_path" ]; then SGE_BIN_PATH=$CONFIG_sge_bin_path; fi # 2. otherwise see if qsub can be found in the path if [ -z "$SGE_BIN_PATH" ]; then qsub=$(type -p qsub) SGE_BIN_PATH=${qsub%/*} unset qsub fi if [ ! -x "$SGE_BIN_PATH/qsub" ]; then echo 'SGE executables not found! Check that sge_bin_path is defined' 1>&2 return 1 fi export SGE_BIN_PATH nordugrid-arc-7.1.1/src/services/a-rex/lrms/sge/PaxHeaders/cancel-sge-job.in0000644000000000000000000000013215067751327023635 xustar0030 mtime=1759498967.768481309 30 atime=1759498967.871493742 30 ctime=1759499030.039855556 nordugrid-arc-7.1.1/src/services/a-rex/lrms/sge/cancel-sge-job.in0000644000175000002070000000154115067751327025540 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -x # # Cancel job running in SGE. # echo "----- starting cancel_sge_job -----" 1>&2 joboption_lrms="sge" lrms_options="sge_root sge_cell sge_qmaster_port sge_execd_port sge_bin_path sge_jobopts" queue_options="sge_jobopts" # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi GRAMI_FILE=$1 # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # load common cancel functions . "${pkgdatadir}/cancel_common.sh" || exit $? # run common init # * parse grami # * parse config # * load LRMS-specific env common_init # qdel echo executing $SGE_BIN_PATH/qdel with job id $joboption_jobid 1>&2 $SGE_BIN_PATH/qdel "${joboption_jobid}" echo "----- exiting cancel_sge_job -----" 1>&2 echo "" 1>&2 exit 0 nordugrid-arc-7.1.1/src/services/a-rex/lrms/sge/PaxHeaders/README0000644000000000000000000000013115067751327021413 xustar0030 mtime=1759498967.768481309 30 atime=1759498967.871493742 29 ctime=1759499030.04287028 nordugrid-arc-7.1.1/src/services/a-rex/lrms/sge/README0000644000175000002070000000004115067751327023311 0ustar00mockbuildmock00000000000000Sun Grid Engine control scripts. nordugrid-arc-7.1.1/src/services/a-rex/lrms/sge/PaxHeaders/submit-sge-job.in0000644000000000000000000000013215067751327023713 xustar0030 mtime=1759498967.768481309 30 atime=1759498967.871493742 30 ctime=1759499030.041811973 nordugrid-arc-7.1.1/src/services/a-rex/lrms/sge/submit-sge-job.in0000644000175000002070000003400515067751327025617 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -xv # # Submits job to Sun Grid Engine (SGE). # Input: path to grami file (same as Globus). # # A temporary job script is created for the submission and then removed # at the end of this script. # echo "----- starting submit_sge_job -----" 1>&2 joboption_lrms="sge" lrms_options="sge_root sge_cell sge_qmaster_port sge_execd_port sge_bin_path sge_jobopts" queue_options="sge_jobopts" # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi GRAMI_FILE=$1 # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # include common submit functions . "${pkgdatadir}/submit_common.sh" || exit $? # run common init # * parse grami # * parse config # * load LRMS-specific env # * set common variables common_init #Log performance perflogfilesub="${perflogdir}/submission.perflog" if [ ! -z "$perflogdir" ]; then #start time stamp start_ts=`date +%s.%N` fi ############################################################## # Zero stage of runtime environments ############################################################## RTE_stage0 # Force shell /bin/sh, other qsub options have been moved to the job script SGE_QSUB='qsub -S @posix_shell@' SGE_QCONF=qconf if [ "$SGE_BIN_PATH" ] ; then SGE_QSUB=${SGE_BIN_PATH}/${SGE_QSUB} SGE_QCONF=${SGE_BIN_PATH}/${SGE_QCONF} fi mktempscript ############################################################## # Start job script ############################################################## echo '#!@posix_shell@' > $LRMS_JOB_SCRIPT echo "# SGE batch job script built by arex" >> $LRMS_JOB_SCRIPT # Job not rerunable: echo "#$ -r n" >> $LRMS_JOB_SCRIPT # Don't send mail when job finishes: echo "#$ -m n" >> $LRMS_JOB_SCRIPT # Mix standard output and standard error: echo "#$ -j y" >> $LRMS_JOB_SCRIPT # Write output to comment file: echo "#$ -o ${joboption_directory}/.comment" >> $LRMS_JOB_SCRIPT ############################################################## # priority ############################################################## if [ ! -z "$joboption_priority" ]; then #first we must scale priority. SGE: -1023 -> 1024 ARC: 0-100 #user can only decrease priority: i.e. -1023 -> 0 (info from gsciacca@lhep.unibe.ch) #Same problem as SLURM. We can only prioritize grid jobs. Locally submitted jobs will get highest priority. priority=$((joboption_priority * 1023 / 100)) priority=$((priority-1023)) echo "#$ -p ${priority}" >> $LRMS_JOB_SCRIPT fi # Choose queue. echo "#$ -q $joboption_queue" >> $LRMS_JOB_SCRIPT # job name for convenience if [ ! -z "${joboption_jobname}" ] ; then jobname=`echo "$joboption_jobname" | \ sed 's/^\([^[:alpha:]]\)/N\1/' | \ sed 's/[^[:alnum:]]/_/g' | \ sed 's/\(...............\).*/\1/'` echo "#$ -N \"$jobname\"" >> $LRMS_JOB_SCRIPT fi echo "SGE jobname: $jobname" 1>&2 ############################################################## # (non-)parallel jobs ############################################################## set_count ############################################################## # parallel jobs ############################################################## # In addition to the number of parallel tasks, also a valid # parallel environment (PE) must be set for SGE. # # The selection of PE is done by Runtime Environment setup script in the zero # stage. The user has to request a proper RE in addition to setting the # "count" -property in the xrsl. The RE script must set the name of the desired # PE to joboption_nodeproperty_# -variable (# is a number starting from zero, # RE should use the lowest previously undefined number). This script then # searches through the joboption_nodeproperty_# variables and compares them to # the PE list obtained from SGE. The first matching PE name is used. # if [ -n "$joboption_nodeproperty_0" ]; then i=0 sge_parallel_environment_list=`$SGE_QCONF -spl` while eval jope=\${joboption_nodeproperty_$i} && test "$jope" ; do for ipe in $sge_parallel_environment_list ; do if [ "$jope" = "$ipe" ] ; then break 2 # now $jope contains the requested parallel env fi done i=$(($i + 1)) done if [ -n "$jope" ] ; then echo "#\$ -pe $jope $joboption_count" >> $LRMS_JOB_SCRIPT else echo 'ERROR: Setting parallel environment failed.' 1>&2 fi fi if [ "$joboption_exclusivenode" = "true" ]; then sge_excl_complex=`$SGE_QCONF -sc | grep EXCL | head -n 1` if [ -n "$sge_excl_complex" ]; then sge_excl_complex_name=`echo $sge_excl_complex | awk '{print $1}'` echo "#\$ -l ${sge_excl_complex_name}=true" >> $LRMS_JOB_SCRIPT else echo "WARNING: Exclusive execution support is not configured by this Grid Engine" 1>&2 echo "WARNING: Example configuration: https://wiki.nordugrid.org/index.php/LRMS_Backends/Testbeds" 1>&2 fi fi ############################################################## # Execution times (obtained in seconds) ############################################################## # SGE has soft and hard limits (soft = SIGUSR1, hard = SIGKILL sent to the job), # let's allow time_hardlimit_ratio extra before the hard limit. # cputime/walltime is obtained in seconds via $joboption_cputime and $joboption_walltime # parallel jobs, add initialization time, soft/hard limit configurable... if ( [ -n "$joboption_cputime" ] && [ $joboption_cputime -gt 0 ] ) ; then # SGE enforces job-total cpu time limit, but it expects in h_cpu and s_cpu # per-slot limits. It then scales these with the number of requested slots # before enforcing them. cputime_perslot=$(( $joboption_cputime / $joboption_count )) cputime_hard_perslot=$(( $cputime_perslot * $time_hardlimit_ratio )) s_cpu_requestable=$($SGE_QCONF -sc|awk '($1=="s_cpu" && ( $5=="YES" || $5=="FORCED" )){print $5}') h_cpu_requestable=$($SGE_QCONF -sc|awk '($1=="h_cpu" && ( $5=="YES" || $5=="FORCED" )){print $5}') opt="#$" if [ $s_cpu_requestable ]; then opt="$opt -l s_cpu=::${cputime_perslot}"; fi if [ $h_cpu_requestable ]; then opt="$opt -l h_cpu=::${cputime_hard_perslot}"; fi echo $opt >> $LRMS_JOB_SCRIPT fi if [ -n "$joboption_walltime" ] ; then if [ $joboption_walltime -lt 0 ] ; then echo 'WARNING: Less than 0 wall time requested: $joboption_walltime' 1>&2 joboption_walltime=0 echo 'WARNING: wall time set to 0' 1>&2 fi joboption_walltime_hard=$(( $joboption_walltime * $time_hardlimit_ratio )) s_rt_requestable=$($SGE_QCONF -sc|awk '($1=="s_rt" && ( $5=="YES" || $5=="FORCED" )){print $5}') h_rt_requestable=$($SGE_QCONF -sc|awk '($1=="h_rt" && ( $5=="YES" || $5=="FORCED" )){print $5}') opt="#$" if [ $s_rt_requestable ]; then opt="$opt -l s_rt=::${joboption_walltime}"; fi if [ $h_rt_requestable ]; then opt="$opt -l h_rt=::${joboption_walltime_hard}"; fi echo $opt >> $LRMS_JOB_SCRIPT fi ############################################################## # Requested memory (mb) ############################################################## set_req_mem # There are soft and hard limits for virtual memory consumption in SGE if [ -n "$joboption_memory" ] ; then joboption_memory_hard=$(( $joboption_memory * $memory_hardlimit_ratio )) h_vmem_requestable=$($SGE_QCONF -sc|awk '($1=="h_vmem" && ( $5=="YES" || $5=="FORCED" )){print $5}') s_vmem_requestable=$($SGE_QCONF -sc|awk '($1=="s_vmem" && ( $5=="YES" || $5=="FORCED" )){print $5}') opt="#$" if [ $s_vmem_requestable ]; then opt="$opt -l s_vmem=${joboption_memory}M"; fi if [ $h_vmem_requestable ]; then opt="$opt -l h_vmem=${joboption_memory_hard}M"; fi echo $opt >> $LRMS_JOB_SCRIPT fi ############################################################## # Extra job options. This is the last, so that # it can overwrite previously set options. ############################################################## if [ ! -z "$CONFIG_sge_jobopts" ]; then echo "#$ $CONFIG_sge_jobopts" >> $LRMS_JOB_SCRIPT fi ############################################################## # Override umask ############################################################## echo "" >> $LRMS_JOB_SCRIPT echo "# Overide umask of execution node (sometime values are really strange)" >> $LRMS_JOB_SCRIPT echo "umask 077" >> $LRMS_JOB_SCRIPT ############################################################## # By default, use $TMPDIR from SGE to alleviate its cleanup facilities. # It can be overridden with scratchdir though. # Don't do this if "shared_scratch" is defined in arc.conf. ############################################################## if [ "$RUNTIME_LOCAL_SCRATCH_DIR" ] && [ ! "$RUNTIME_FRONTEND_SEES_NODE" ]; then echo "if [ -d \"${CONFIG_scratchdir:-\$TMPDIR}\" ]; then RUNTIME_LOCAL_SCRATCH_DIR=${CONFIG_scratchdir:-\$TMPDIR}; fi" >> $LRMS_JOB_SCRIPT fi sourcewithargs_jobscript ############################################################## # Init accounting ############################################################## accounting_init ############################################################## # Add environment variables ############################################################## add_user_env ############################################################## # Check for existance of executable, ############################################################## if [ -z "${joboption_arg_0}" ] ; then echo 'Executable is not specified' 1>&2 exit 1 fi program_start=`echo ${joboption_arg_0} | cut -c 1 2>&1` if [ "$program_start" != '$' ] && [ "$program_start" != '/' ] ; then if [ ! -f $joboption_directory/${joboption_arg_0} ] ; then echo 'Executable does not exist, or permission denied.' 1>&2 echo " Executable $joboption_directory/${joboption_arg_0}" 1>&2 echo " whoami: "`whoami` 1>&2 echo " ls -l $joboption_directory/${joboption_arg_0}: "`ls -l $joboption_directory/${joboption_arg_0}` exit 1 fi if [ ! -x $joboption_directory/${joboption_arg_0} ] ; then echo 'Executable is not executable' 1>&2 exit 1 fi fi setup_runtime_env # Override location of .diag file: put it under the working directory echo 'RUNTIME_JOB_DIAG=$RUNTIME_JOB_DIR/.diag' >> $LRMS_JOB_SCRIPT ############################################################## # Add std... to job arguments ############################################################## include_std_streams ############################################################## # Move files to local working directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_node echo "" >> $LRMS_JOB_SCRIPT echo "RESULT=0" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT ############################################################## # Skip execution if something already failed ############################################################## echo "" >> $LRMS_JOB_SCRIPT echo "if [ \"\$RESULT\" = '0' ] ; then" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime configuration ############################################################## RTE_stage1 echo "echo \"runtimeenvironments=\$runtimeenvironments\" >> \"\$RUNTIME_JOB_DIAG\"" >> $LRMS_JOB_SCRIPT if [ -z "$RUNTIME_NODE_SEES_FRONTEND" ] ; then echo "Nodes detached from gridarea are not supported when SGE is used. Aborting job submit" 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" exit 1 fi ############################################################## # Accounting (WN OS Detection) ############################################################## detect_wn_systemsoftware ############################################################## # Execution ############################################################## cd_and_run echo "fi" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime (post)configuration at computing node ############################################################## RTE_stage2 ############################################################## # Move files back to session directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_frontend ############################################################## # Finish accounting and exit job ############################################################## accounting_end if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-sge-job, JobScriptCreation: $t" >> $perflogfilesub fi if [ ! -z "$perflogdir" ]; then #start time stamp start_ts=`date +%s.%N` fi ####################################### # Submit the job ####################################### ( echo "SGE job script built" cd "$joboption_directory" echo "SGE script follows:" cat "$LRMS_JOB_SCRIPT" echo # Execute qsub command ${SGE_QSUB} < $LRMS_JOB_SCRIPT 1>$LRMS_JOB_OUT 2>$LRMS_JOB_ERR # expected SGE output is like: 'Your job 77 ("perftest") has been # submitted', the line below uses only the job number as job id. job_id=$(cat $LRMS_JOB_OUT $LRMS_JOB_ERR \ | awk '/^.our job .* has been submitted/ {split($0,field," ");print field[3]}') # anything else is a sign of problems, which should be logged warnings=$(cat $LRMS_JOB_OUT $LRMS_JOB_ERR \ | grep -v '^.our job .* has been submitted' | grep -v '^Exit') if [ ! -z "$warnings" ]; then echo "WARNING: $warnings"; echo; fi exitcode=0 if [ -z $job_id ] ; then echo "job *NOT* submitted successfully!" exitcode=1 else echo "joboption_jobid=$job_id" >> $GRAMI_FILE echo "local job id: $job_id" echo "job submitted successfully!" exitcode=0 fi # Remove temporary job script file rm -f $LRMS_JOB_SCRIPT $LRMS_JOB_OUT $LRMS_JOB_ERR echo "----- exiting submit_sge_job -----"; ) 1>&2 if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-sge-job, JobSubmission: $t" >> $perflogfilesub fi exit $exitcode nordugrid-arc-7.1.1/src/services/a-rex/lrms/sge/PaxHeaders/scan-sge-job.in0000644000000000000000000000013215067751327023334 xustar0030 mtime=1759498967.768481309 30 atime=1759498967.871493742 30 ctime=1759499030.040830396 nordugrid-arc-7.1.1/src/services/a-rex/lrms/sge/scan-sge-job.in0000644000175000002070000004443615067751327025251 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -xv # # scan-sge-job does not use log-files, it only uses qacct. # # usage: scan_sge_job control_dir ... # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi if [ -z "$1" ] ; then exit 1 ; fi # first control_dir is used for storing own files control_dir=$1 control_dirs= while [ $# -gt 0 ] ; do control_dirs="${control_dirs} $1" shift done joboption_lrms="sge" lrms_options="sge_root sge_cell sge_qmaster_port sge_execd_port sge_bin_path sge_jobopts" queue_options="sge_jobopts" # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # include common scan functions . "${pkgdatadir}/scan_common.sh" || exit $? # run common init # * parse config # * load LRMS-specific env # * set common variables common_init # Assume that gm-kick and scan_common are installed in the same directory GMKICK=${pkglibexecdir}/gm-kick olog () { echo "[`date +%Y-%m-%d\ %T`] scan-sge-job: $*" 1>&2; } control_path () { # job_id=`echo "$2" | sed 's/\(.\{9\}\)/\1\//g' | sed 's/\/$//'` job_id=`echo "$2" | sed -e 's#\(.\{3\}\)#\1/#3' -e 's#\(.\{3\}\)#\1/#2' -e 's#\(.\{3\}\)#\1/#1' -e 's#$#/#'` path="$1/jobs/${job_id}/$3" echo "$path" } umask 022 my_id=`id -u` # Log system performance if [ ! -z "$perflogdir" ]; then perflog_common "$perflogdir" "$CONFIG_controldir" fi # Takes diagfile_acct, SGE qacct command, id and extraargs # writes output to diagfile_acct (temporary file for accounting info) write_diagfile_acct () { diagfile_acct=$1 qacct_command=$2 id=$3 extraargs=$4 #Check that qacct is available. if ! `which $qacct_command >dev/null` ; then olog "WARNING: qacct was not found. Accounting data will not be recorded." fi $qacct_command -j $id $extraargs 2> /dev/null \ | /usr/bin/perl -e 'while(<>) { $nodename=$1 if /^hostname\s+(\S+)/; $id=$1 if /^jobnumber\s+(\S+)/; $exitcode=$1 if /^exit_status\s+(\S+)/; $failed=$1 if /^failed\s+(.*\S)/; $CPUTime=$1 if /^cpu\s+(\d+)/; $Processors=$1 if /^slots\s+(\d+)/; $KernelTime=$1 if /^ru_stime\s+(\d+)/; $WallTime=$1 if /^ru_wallclock\s+(\d+)/; $UsedMemory=$1 if /^maxvmem\s+(\S+)M/; $UsedMemory=$1*1024 if /^maxvmem\s+(\S+)G/; $start_time=`date -d "$1" +%s` if /^start_time\s+(.+)/; $end_time =`date -d "$1" +%s` if /^end_time\s+(.+)/; } # converts seconds since epoch in local time into Mds time format (UTC) sub mds_date { my ($seconds) = @_; my @t = gmtime $seconds; my ($Y,$M,$D,$h,$m,$s) = (1900+$t[5],1+$t[4],$t[3],$t[2],$t[1],$t[0]); return sprintf "%04i%02i%02i%02i%02i%02iZ",$Y,$M,$D,$h,$m,$s; } END { exit unless $id; print "LRMSStartTime=".mds_date($1)."\n" if $start_time =~ m/^(\d+)$/; print "LRMSEndTime=" .mds_date($1)."\n" if $end_time =~ m/^(\d+)$/; print "nodename=${nodename}\n"; print "CPUTime=${CPUTime}.0s\n"; print "Processors=${Processors}\n"; print "WallTime=${WallTime}.0s\n"; print "KernelTime=${KernelTime}.0s\n"; print "UserTime=".int($CPUTime-$KernelTime).".0s\n"; print "AverageTotalMemory=".int($UsedMemory*1024)."kB\n"; print "failed=$failed\n"; print "\nsgeexitcode=$exitcode\n"; }' \ > "$diagfile_acct" } # Parse GRAMI-file and see what was requested check_exceeded_resources_grami () { gramifile=$1 errorsfile=$2 used_cputime=$3 used_walltime=$4 used_memory=$5 req_walltime=`sed -n "s/^joboption_walltime=//p" "$gramifile" | tail -n 1` req_cputime=`sed -n "s/^joboption_cputime=//p" "$gramifile" | tail -n 1` req_memory=`sed -n "s/^joboption_memory=//p" "$gramifile" | tail -n 1` if [ ! -z "$used_memory" ] && [ ! -z "$req_memory" ] \ && [ "$req_memory" != "" ] && [ "$req_memory" -gt 0 ] \ && [ $(( 100*$used_memory/1024/$req_memory )) -gt 95 ]; then overlimit="memory" fi if [ ! -z "$used_cputime" ] && [ ! -z "$req_cputime" ] \ && [ "$req_cputime" != "" ] && [ "$req_cputime" -gt 0 ] \ && [ $(( 100*$used_cputime/$req_cputime )) -gt 95 ]; then overlimit="cputime" fi if [ ! -z "$used_walltime" ] && [ ! -z "$req_walltime" ] \ && [ "$req_walltime" != "" ] && [ "$req_walltime" -gt 0 ] \ && [ $(( 100*$used_walltime/$req_walltime )) -gt 95 ]; then overlimit="walltime" fi echo ++++++++++++++++++++++++++ >> "$errorsfile" echo Resources: >> "$errorsfile" echo ++++++++++++++++++++++++++ >> "$errorsfile" echo req_memory=$req_memory MB >> "$errorsfile" echo req_cputime=$req_cputime >> "$errorsfile" echo req_walltime=$req_walltime >> "$errorsfile" echo used_memory=$used_memory kB >> "$errorsfile" echo used_cputime=$used_cputime >> "$errorsfile" echo used_walltime=$used_walltime >> "$errorsfile" if [ ! -z "$overlimit" ]; then echo overlimit=$overlimit >> "$errorsfile" fi echo ++++++++++++++++++++++++++ >> "$errorsfile" } #Handle failedcode handle_failedcode () { failedcode=$1 donefile=$2 exitcode=$3 sgeexitcode=$4 overlimit=$5 if [ -z "$failedcode" ]; then # Should never happen olog "SGE job $id failed: SGE accouting record is incomplete" echo "-1 SGE accouting record is incomplete" > "$donefile" elif [ "$failedcode" = "0" ]; then if [ -z "$exitcode" ]; then olog "SGE job $id failed with unknown exit code" if [ -z "$sgeexitcode" ] || [ "$sgeexitcode" = 0 ]; then sgeexitcode="-1"; fi echo "$sgeexitcode Job failed with unknown exit code" > "$donefile" elif [ "$exitcode" = "0" ]; then #olog "SGE job $id finished successfully" echo "0" > "$donefile" else #olog "SGE job $id failed with exit code $exitcode" if [ -z "$sgeexitcode" ] || [ "$sgeexitcode" = 0 ]; then sgeexitcode="-1"; fi echo "$sgeexitcode Job failed with exit code $exitcode" > "$donefile" fi else # SGE reports a problem if [ "$failedcode" = "25" ]; then failedreason="SGE error $failedcode: Job will be rescheduled" elif [ "$failedcode" = "24" ]; then failedreason="SGE error $failedcode: Job will be migrated" elif [ "$failedcode" = "100" ]; then # This happens when SGE signals the job, as in the case when a # resource limit is exceeded. We don't know for sure whether # they were enforced or not but if a job is killed by SGE, this # might be the likely cause. if [ -z "$overlimit" ]; then failedreason="SGE error $failedreason" elif [ $overlimit = "memory" ]; then failedreason="job killed: vmem" elif [ $overlimit = "cputime" ]; then failedreason="job killed: cput" elif [ $overlimit = "walltime" ]; then failedreason="job killed: wall" fi else failedreason="SGE error $failedreason" fi if [ ! -z "$eqwmessage" ]; then failedreason="$eqwmessage" fi olog "SGE job $id failed: $failedreason" if [ -z "$sgeexitcode" ] || [ "$sgeexitcode" = 0 ]; then sgeexitcode="-1"; fi echo "271 $failedreason" > "$donefile" fi # failedcode } # Add accounting info to $diagfile add_accounting_to_diag () { diagfile=$1 diagfile_acct=$2 diagfile_tmp=$3 errorsfile=$4 uid=$5 commentfile=$6 ctr_dir=$7 gramifile=$(control_path "${ctr_dir}" "${gridid}" "grami") donefile=$(control_path "${ctr_dir}" "${gridid}" "lrms_done") countfile=$(control_path "${ctr_dir}" "${gridid}" "lrms_job") errorsfile=$(control_path "${ctr_dir}" "${gridid}" "errors") localfile=$(control_path "${ctr_dir}" "${gridid}" "local") used_walltime=`sed -n 's/^WallTime=\(.*\).0s/\1/p' "$diagfile_acct" | tail -n 1` used_cputime=`sed -n 's/^CPUTime=\(.*\).0s/\1/p' "$diagfile_acct" | tail -n 1` used_memory=`sed -n 's/^AverageTotalMemory=\(.*\)kB/\1/p' "$diagfile_acct" | tail -n 1` sgeexitcode=`sed -n 's/^sgeexitcode=//p' "$diagfile_acct" | tail -n 1` failedreason=`sed -n 's/^failed=//p' "$diagfile_acct" | tail -n 1` failedcode=`echo $failedreason | awk '{print $1}'` if [ -s "$diagfile_acct" ]; then # Remove attributes from existing diagfile that we should have # gotten from qacct this time, otherwise we will get duplicates do_as_uid "$uid" "cat '$diagfile'" \ | grep -v "^nodename=" \ | grep -v "^WallTime=" \ | grep -v "^KernelTime=" \ | grep -v "^UserTime=" \ | grep -v "^CPUTime=" \ | grep -v "^Processors=" \ | grep -v "^LRMSStartTime=" \ | grep -v "^LRMSEndTime=" \ | grep -v "^MaxResidentMemory=" \ | grep -v "^AverageTotalMemory=" \ > "$diagfile_tmp" cat "$diagfile_tmp" "$diagfile_acct" \ | grep -v '^sgeexitcode=' \ | do_as_uid "$uid" "cat > '$diagfile'" # Check for exceeded resources limits overlimit= if [ -s "$gramifile" ]; then check_exceeded_resources_grami "$gramifile" "$errorsfile" "$used_cputime" "$used_walltime" "$used_memory" fi # grami file save_commentfile "$uid" "$commentfile" "$errorsfile" handle_failedcode "$failedcode" "$donefile" "$exitcode" "$sgeexitcode" "$overlimit" # wake up GM $GMKICK -j "${gridid}" "${ctr_dir}" >> "$errorsfile" rm -f "$countfile" rm -f "$diagfile_tmp" "$diagfile_acct" # we're done, go to next job id return 0 fi # accounting info ok rm -f "$diagfile_tmp" "$diagfile_acct" return 1 } # Handle missing accounting info handle_missing_accounting () { countfile=$1 uid=$2 commentfile=$3 errorsfile=$4 donefile=$5 exitcode=$6 ctr_dir=$7 GMKICK=$8 SGE_BIN_PATH=$9 id=$10 if [ -n "$noaccounting" ]; then # Accounting file is not accessible on this host. echo "scan-sge-job: WARNING: SGE's accounting file is not accessible on the Grid frontend node" >> "$errorsfile" echo "scan-sge-job: WARNING: Resource usage reported for this job may be inaccurate or incomplete" >> "$errorsfile" if [ -z "$exitcode" ]; then echo "-1 Job failed with unknown exit code" > "$donefile" elif [ "$exitcode" = "0" ]; then echo "0" > "$donefile" else echo "$exitcode Job failed with exit code $exitcode" > "$donefile" fi $GMKICK -j "${gridid}" "${ctr_dir}" >> "$errorsfile" return fi # There is a certain lag between the end of the job # and the time when accouting information becomes available. # We do 5 retries, keeping the count in $countfile counter=0 if [ -f "$countfile" ] ; then counter=`cat "$countfile"` counter=$(( $counter + 1 )) fi if [ "$counter" -gt 5 ]; then # Cannot wait more for accounting info. echo "scan-sge-job: WARNING: No SGE accounting record found for this job" >> "$errorsfile" echo "scan-sge-job: WARNING: Resource usage reported for this job may be inaccurate or incomplete" >> "$errorsfile" save_commentfile "$uid" "$commentfile" "$errorsfile" if [ -z "$exitcode" ]; then olog "No SGE accounting record found for job $id. No exitcode in diag file" echo "-1 Job failed with unknown exit code" > "$donefile" elif [ "$exitcode" = "0" ]; then olog "No SGE accounting record found for job $id. exitcode=$exitcode in diag file" echo "0" > "$donefile" else olog "No SGE accounting record found for job $id. exitcode=$exitcode in diag file" echo "$exitcode Job failed with exit code $exitcode" > "$donefile" fi rm -f "$countfile" # wake up GM $GMKICK -j "${gridid}" "${ctr_dir}" >> "$errorsfile" else # test again for job existence, only count if not known ${SGE_BIN_PATH}/qstat -j $id > /dev/null 2>&1 if [ $? -ne 0 ]; then echo "$counter" > "$countfile" else olog "SGE job $id disappeared and then reappeared!" rm -f "$countfile" fi fi } # GD: no attempt to look for SGE Manager logfiles, restrict to job logs. if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi # Get all jobs pids=`${SGE_BIN_PATH}/qstat -u '*' 2>/dev/null | sed -n 's/^ *\([0-9][0-9]*\) .*/\1/p'` if [ $? != 0 ]; then olog "Failed running ${SGE_BIN_PATH}/qstat" sleep 60 exit 1 fi if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-sge-job, qstat -u '*': $t" >> $perflogfile fi # Go through directories for ctr_dir in $control_dirs ; do # Obtain ids of pending/running jobs stored in job.*.local if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi rjobs='' for gid in `find "$ctr_dir/processing" -name '*.status' 2>/dev/null | sed 's/.*\/\([^\.\/]*\)\.status$/\1/'` ; do localfile=$(control_path "$ctr_dir" "$gid" "local") rjobs="$rjobs $localfile" done if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-sge-job, control dir traversal: $t" >> $perflogfile fi if [ -z "$rjobs" ] ; then continue ; fi ids=`echo $rjobs | xargs grep -h '^localid=' 2>/dev/null | sed 's/^localid=\([^ ]*\)/\1/'` if [ -z "$ids" ] ; then continue ; fi # compare them to running jobs and find missing bids= for id in $ids ; do found=`echo "$pids" | grep "^$id"` if [ -z "$found" ] ; then bids="$bids $id" fi done # go through missing ids for id in $bids ; do # find grid job corresponding to current local id jobfile='' gridid='' for gid in `find "$ctr_dir/processing" -name '*.status' 2>/dev/null | sed 's/.*\/\([^\.\/]*\)\.status$/\1/'` ; do localfile=$(control_path "$ctr_dir" "$gid" "local") grep -h "localid=$id\$" 2>/dev/null 1>/dev/null if [ $? -eq 0 ] ; then gridid="$gid" jobfile="$localfile" fi done if [ -z "$jobfile" ] ; then continue ; fi # unless running as superuser, skip jobs not belonging to the current user if [ "$my_id" != '0' ] ; then if [ ! -O "$jobfile" ] ; then continue ; fi fi # find who runs this job uid=$(get_owner_uid "$jobfile") [ -z "$uid" ] && { log "Failed to stat $jobfile"; continue; } # extract grid id donefile=$(control_path "${ctr_dir}" "${gridid}" "lrms_done") countfile=$(control_path "${ctr_dir}" "${gridid}" "lrms_job") failedfile=$(control_path "${ctr_dir}" "${gridid}" "failed") errorsfile=$(control_path "${ctr_dir}" "${gridid}" "errors") localfile=$(control_path "${ctr_dir}" "${gridid}" "local") xmlfile=$(control_path "${ctr_dir}" "${gridid}" "xml") if [ -f "$donefile" ] ; then continue ; fi statusfile="${ctr_dir}/processing/${gridid}.status" if [ ! -f "$statusfile" ] ; then continue ; fi status=`cat "$statusfile"` if [ "$status" != "INLRMS" ] && [ "$status" != "CANCELING" ] ; then continue ; fi # get session directory of this job session=`grep -h '^sessiondir=' "$jobfile" | sed 's/^sessiondir=\(.*\)/\1/'` commentfile="${session}.comment" commentfile2="${session}/.comment" do_as_uid "$uid" "cat '$commentfile2' >> '$commentfile' 2> /dev/null; rm -f '$commentfile2'"; if [ -d "$session" ] ; then diagfile="${session}.diag" diagfile2="${session}/.diag" do_as_uid "$uid" "cat '$diagfile2' >> '$diagfile' 2> /dev/null; rm -f '$diagfile2'"; # try to obtain the exit code # $diagfile is owned by the user running the job. Switch user to access it. exitcode=`do_as_uid "$uid" "sed -n 's/^exitcode=//p' '$diagfile'" | tail -n 1` diagfile_tmp=`mktemp "$TMPDIR/diag_tmp.XXXXXX"` || { sleep 60; exit 1; } diagfile_acct=`mktemp "$TMPDIR/diag_acct.XXXXXX"` || { sleep 60; exit 1; } noaccounting= # qacct can take quite long. Here is a workaround. # Find the accounting file, and copy the last 50000 # records to a temp file. acctfile=$SGE_ROOT/$SGE_CELL/common/accounting if [ -f "$acctfile" ]; then briefacct=`mktemp "$TMPDIR/accounting.XXXXXX"` || { sleep 60; exit 1; } tail -n 50000 "$acctfile" > "$briefacct" if [ $? = 0 ]; then extraargs="-f $briefacct"; fi else # Accounting file is not accessible on this host. noaccounting=1 fi eqwmessage=`grep "SGE job state was Eqw" $xmlfile | sed -r 's/<[/]?OtherMessages>//g'` # get accounting info. write diag file write_diagfile_acct "$diagfile_acct" "${SGE_BIN_PATH}/qacct" "$id" "$extraargs" if [ "x$briefacct" != "x" ]; then rm -f "$briefacct"; fi # If the last qacct record is about migration, # we should wait for the next qacct record to appear # Delete file, like there was no accounting present at all! if grep -q "^failed=24 *: migrating" "$diagfile_acct" \ || grep -q "^failed=25 *: rescheduling" "$diagfile_acct"; then rm -f "$diagfile_acct" olog "SGE job $id: the last record in qacct is about migration. Waiting for next record to appear" fi # Add accounting info to $diagfile add_accounting_to_diag "$diagfile" "$diagfile_acct" "$diagfile_tmp" "$errorsfile" "$uid" "$commentfile" "$ctr_dir" if [ $? = 0 ]; then continue fi fi # session directory exists # This section is only reached when accounting info is not present handle_missing_accounting "$countfile" "$uid" "$commentfile" "$errorsfile" "$donefile" "$exitcode" "$ctr_dir" "$GMKICK" "$SGE_BIN_PATH" "$id" done # loop over bids # Detect the unlikely situation when a job reappears in the qstat listing # after being absent in the previous run of the scan-*-job (which updated # the counter file) for countfile in `find $ctr_dir -name 'lrms_job'`; do localfile=${countfile%lrms_job}local pid=`sed -n 's/^localid=\([^ ]*\)/\1/p' "$localfile" 2>/dev/null` if [ -z "$pid" ]; then continue; fi if echo "$pids" | grep "^$pid\$" >/dev/null; then olog "SGE job $id disappeared and then reappeared!" rm -f "$countfile" fi done done # loop over control_dirs sleep 60 exit 0 nordugrid-arc-7.1.1/src/services/a-rex/lrms/PaxHeaders/slurm0000644000000000000000000000013215067751426021043 xustar0030 mtime=1759499030.170440378 30 atime=1759499034.764510185 30 ctime=1759499030.170440378 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/0000755000175000002070000000000015067751426023022 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327023154 xustar0030 mtime=1759498967.768481309 30 atime=1759498967.871493742 30 ctime=1759499030.163298258 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/Makefile.am0000644000175000002070000000017615067751327025062 0ustar00mockbuildmock00000000000000dist_pkgdata_DATA = configure-SLURM-env.sh pkgdata_SCRIPTS = submit-SLURM-job cancel-SLURM-job scan-SLURM-job SUBDIRS = test nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/PaxHeaders/Makefile.in0000644000000000000000000000013215067751356023167 xustar0030 mtime=1759498990.585472071 30 atime=1759499018.833268109 30 ctime=1759499030.165781296 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/Makefile.in0000644000175000002070000007177315067751356025110 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms/slurm ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(dist_pkgdata_DATA) \ $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = submit-SLURM-job scan-SLURM-job cancel-SLURM-job CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" SCRIPTS = $(pkgdata_SCRIPTS) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac DATA = $(dist_pkgdata_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir distdir-am am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/cancel-SLURM-job.in \ $(srcdir)/scan-SLURM-job.in $(srcdir)/submit-SLURM-job.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ dist_pkgdata_DATA = configure-SLURM-env.sh pkgdata_SCRIPTS = submit-SLURM-job cancel-SLURM-job scan-SLURM-job SUBDIRS = test all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/slurm/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/slurm/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): submit-SLURM-job: $(top_builddir)/config.status $(srcdir)/submit-SLURM-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ scan-SLURM-job: $(top_builddir)/config.status $(srcdir)/scan-SLURM-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ cancel-SLURM-job: $(top_builddir)/config.status $(srcdir)/cancel-SLURM-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_pkgdataDATA: $(dist_pkgdata_DATA) @$(NORMAL_INSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-dist_pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(SCRIPTS) $(DATA) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dist_pkgdataDATA install-pkgdataSCRIPTS install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ check-am clean clean-generic clean-libtool cscopelist-am ctags \ ctags-am distclean distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am \ install-dist_pkgdataDATA install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-pkgdataSCRIPTS install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags tags-am uninstall uninstall-am \ uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/PaxHeaders/scan-SLURM-job.in0000644000000000000000000000013215067751327024044 xustar0030 mtime=1759498967.768492177 30 atime=1759498967.871493742 30 ctime=1759499030.168044271 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/scan-SLURM-job.in0000644000175000002070000005062515067751327025756 0ustar00mockbuildmock00000000000000#!/bin/bash # # Periodically check state of grid jobs in SLURM, and put mark files # for finished jobs. # # usage: scan_slurm_job control_dir ... # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi # Validate control directories supplied on command-line if [ -z "$1" ] ; then echo "no control_dir specified" 1>&2; exit 1 fi for ctr_dir in "$@"; do if [ ! -d "$ctr_dir" ]; then echo "called with erronous control dir: $ctr_dir" exit 1 fi done joboption_lrms="SLURM" lrms_options="slurm_wakeupperiod slurm_use_sacct slurm_bin_path slurm_query_retries" # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # include common scan functions . "${pkgdatadir}/scan_common.sh" || exit $? # run common init # * parse config # * load LRMS-specific env # * set common variables common_init # Prevent multiple instances of scan-slurm-job to run concurrently lockfile="${TMPDIR:-@tmp_dir@}/scan-slurm-job.lock" #Check if lockfile exist, if not, create it. (set -C; : > "$lockfile") 2> /dev/null if [ "$?" != "0" ]; then if ps -p $(< "$lockfile") 2>/dev/null;then echo "lockfile exists and PID $(< $lockfile) is running" exit 1 fi echo "old lockfile found, was scan-slurm-job killed?" # sleep, and if no other have removed and recreated the lockfile we remove it. # there are still races possible, but this will have to do sleep $((${RANDOM}%30+10)) if ps -p $(< $lockfile) &>/dev/null;then echo "lockfile exists and $(< $lockfile) is running" exit 1 else echo "still not running, removing lockfile" rm $lockfile exit 1 fi fi echo "$$" > "$lockfile" #If killed, remove lockfile trap 'rm $lockfile' EXIT KILL TERM #Default sleep-time is 30 seconds sleep ${CONFIG_slurm_wakeupperiod:-30} # Log system performance if [ ! -z "$perflogdir" ]; then perflog_common "$perflogdir" "$CONFIG_controldir" fi ### use sacct unset use_sacct if [ ! -z "${CONFIG_slurm_use_sacct}" ]; then if [ "${CONFIG_slurm_use_sacct}" = "yes" ]; then use_sacct="true" fi fi ### slurm_query_retries unset slurm_query_retries if [ ! -z "${CONFIG_slurm_query_retries}" ]; then slurm_query_retries=${CONFIG_slurm_query_retries} fi my_id=`id -u` if [ ! -z "$perflogdir" ]; then #start time stamp start_ts=`date +%s.%N` fi # List of SLURM jobids for grid-jobs with state INLRMS declare -a localids # Array with basenames of grid-job files in ctrl_dir, indexed by localid # example /some/path/XX/XX/X /some/other/path/YY/YY/Y declare -a basenames # Array with states of the jobs in SLURM, indexed by localid declare -a jobstates # Array with grid id values, indexed by localid declare -a gridids # Array with control directories, indexed by localid declare -a ctrdirs # Array to store localids of jobs that are determined to have finished, which are sent to gm-kick declare -a kicklist # Array with jobid blocks declare -a lidblocks # Find list of grid jobs with status INLRMS, store localid and # basename for those jobs for ctr_dir in "$@"; do for id in $(find "$ctr_dir/processing" -name '*.status' -print0 \ | xargs -0 egrep -l "INLRMS|CANCELING" \ | sed 's#.*/processing/\([^\.]*\)\.status$#\1#') do basename=$(control_path "${ctr_dir}" "${id}" "") localid=$(grep ^localid= "${basename}/local" | cut -d= -f2) verify_jobid "$localid" || continue localids[${#localids[@]}]="$localid" basenames[$localid]="$basename" gridids[$localid]="$id" ctrdirs[$localid]="$ctr_dir" done done # No need to continue further if no jobs have status INLRMS if [ ${#localids[@]} -eq 0 ]; then exit 0 fi # Distribute localids into block so that we don't exceed max command line length for jids in `echo "${localids[@]}" | xargs -n 4000 | tr ' ' ,`; do lidblocks[${#lidblocks[@]}]=$jids done if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` # t=`perl -e "printf '%.2f',$stop_ts-$start_ts;"` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-slurm-job, ControldirTraversal: $t" >> $perflogfile fi if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi # Get JobStates from SLURM jobstate_squeue=$(echo "${lidblocks[@]}" | xargs -n 1 $squeue -a -h -o "%i:%T" -t all -j )\ || { echo "[$(date +%Y-%m-%d\ %T)] squeue failed" 1>&2; exit 1; } for record in $jobstate_squeue; do localid=$(echo "$record"|cut -d: -f1) state=$(echo "$record"|cut -d: -f2) jobstates[$localid]=$state; done unset jobstate_squeue if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-slurm-job, squeue -a -h -o %i:%T -t all -j: $t" >> $perflogfile fi # A special version of interval_to_seconds for Slurm v20.02 # This function takes a time interval formatted as 789:12:34:56 (with days) or # 12:34:56 (without days) and transforms it to seconds. It returns the result in # the return_interval_seconds variable. # Slurm format: [dd-][hh:][mm:][ss][.uuu]. # [.uuu] will always be removed. # There can be years and months in front of the days, like [yy-][mm-]? slurm_interval_to_seconds () { return_interval_seconds=0 _interval_dhms=`echo $1 | sed -e 's|-|:|' -e 's|\.[0-9]\+||'` _interval_good=`echo $_interval_dhms | grep -E '[^:0-9]'` _interval_size=`echo $_interval_dhms | grep -o : | wc -l` if [ X"$_interval_good" = "X" ] ; then if [ "$_interval_size" -eq 0 ]; then return_interval_seconds=$_interval_dhms elif [ "$_interval_size" -eq 1 ]; then return_interval_seconds=`echo $_interval_dhms | tr : ' ' | awk '{print $1*60+$2;}'` elif [ "$_interval_size" -eq 2 ]; then return_interval_seconds=`echo $_interval_dhms | tr : ' ' | awk '{print $1*60*60+$2*60+$3;}'` elif [ "$_interval_size" -eq 3 ]; then return_interval_seconds=`echo $_interval_dhms | tr : ' ' | awk '{print $1*24*60*60+$2*60*60+$3*60+$4;}'` else echo "Bad formatting of time interval: $_interval_dhms" 1>&2 fi else echo "Bad formatting of time interval: $_interval_dhms" 1>&2 fi unset _interval_dhms _interval_size _interval_good } handle_commentfile () { localid=$1 sessiondir=`grep -h '^sessiondir=' $jobfile | sed 's/^sessiondir=\(.*\)/\1/'` if [ "$my_id" != '0' ] ; then if [ ! -O "$jobfile" ] ; then continue ; fi fi uid=$(get_owner_uid "$jobfile") [ -z "$uid" ] && { log "Failed to stat $jobfile"; continue; } save_commentfile "$uid" "${sessiondir}.comment" "${basenames[$localid]}/errors" } # This function is called after a successful call to handle_diag_file. # It fetches the exitcode from SLURM and inserts the code into the $localid/lrms_done file # The kicklist is updated to include the $localid of this job # # Input variables: # * localid # * tmpexitcode (hardcoded exitcode) # * reason (hardcoded reason) # # The following variables are initialized and updated, then written to lrms_done # * exitcode (either hardcoded tmpexitcode or # * reason # # In slurm the exitcode is returned as : where the first is the exit code, # and the second is the signal number responsible for the job termination # function handle_exitcode { localid="$1" tmpexitcode="$2" reason="$3" exitcode_retries=$(( ${slurm_query_retries} + 1 )) while [ "$exitcode_retries" -gt 0 ]; do if [ "$use_sacct" ]; then jobinfostring=$("$sacct" -j $localid -o ExitCode -P -n | head -n 1) exitcode1=$(echo $jobinfostring|awk -F':' '{print $1}') exitcode2=$(echo $jobinfostring|awk -F':' '{print $2}') else jobinfostring=$("$scontrol" -o show job $localid) exitcode1=$(echo $jobinfostring|sed -n 's/.*ExitCode=\([0-9]*\):\([0-9]*\).*/\1/p') exitcode2=$(echo $jobinfostring|sed -n 's/.*ExitCode=\([0-9]*\):\([0-9]*\).*/\2/p') fi if [ -z "$jobinfostring" ]; then exitcode_retries=$(( $exitcode_retries - 1 )) echo "scan-SLURM-job - [$(date +%Y-%m-%d\ %T)] sacct/scontrol failed for job: $localid - could not fetch jobinfostring for exit code handling. Retries left: $exitcode_retries" 1>&2; jobinfo_exitcode_failed=1 else ## all ok, break out of loop unset jobinfo_exitcode_failed break fi done ## If all retries failed hence jobinfo_exitcode_failed set, skip this step and try again at next scan if [ -z "$jobinfo_exitcode_failed" ]; then if [ -z "$exitcode1" ] && [ -z "$exitcode2" ] ; then exitcode=$tmpexitcode elif [ -n "$exitcode2" ] && [ "$exitcode2" -ne 0 ]; then exitcode=$(( $exitcode2 + 256 )) elif [ -n "$exitcode1" ] && [ "$exitcode1" -ne 0 ]; then exitcode=$exitcode1 else exitcode=0 fi # Special handling of cancelled jobs - as SLURM can return exitcode 0:0 for cancelled jobs if [ "$exitcode" -eq 0 ] && [[ $reason == *"cancelled"* ]] ; then exitcode=15 fi echo "$exitcode $reason" > "${basenames[$localid]}/lrms_done" kicklist=(${kicklist[@]} $localid) fi } # # Collects accounting info from LRMS for a job by using sacct or scontrol SLURM commands # depending on ARC configuration. The job's LRMS id is stored in the "localid" variable. # It first reads the jobs diag file. The job_read_diag function initializes the following variables: # * nodename # * WallTime # * UserTime # * KernelTime # * TotalMemory # * ResidentMemory # * LRMSStartTime # * LRMSEndTime # * exitcode # # Next, information from LRMS is fetched. # If sacct is used, the following info is fetched: # * cpus (NCPUS) # * starttime (Start) # * endtime (End) # * usercputime (UserCPU) # * kernelcputime (SystemCPU) # # If scontrol is used instead of sacct, no usercputime is available, only walltime. # The following info is fetched from scontrol: # * cpus # * starttime (Start) # * endtime (End) # # Once the values have been fetched, the diag file values updated are # * WallTime - in seconds # * Processors # * UserTime - in seconds # * KernelTime - in seconds # Note again that in the case where scontrol is used instead of sacct UserTime=WallTime # # If for some reason sacct or scontrol fails (the former due to e.g. the slurm database being overloade) # a retry functionality is included. 3 retries is attempted for sacct/scontrol call. If still no # success, the handle_errorcode or handle_errorcode_cancelled is not called, avoiding the lrms_done mark. # This results in the job being picked up in the next scan for a new attempt. # # The STDOUT and STDERR are redirected to the job-helper.errors file. # function handle_diag_file { localid="$1" ctr_diag="$2" handle_diag_tries=$(( ${slurm_query_retries} + 1 )) job_read_diag ## This while loop is an attempt to reduce the cases where the job info ## is not successfully fetched from slurm due to slurm connection/timeout issues while [ "$handle_diag_tries" -gt 0 ] ; do unset jobinfo_collect_failed if [ "$use_sacct" ]; then jobinfostring=$("$sacct" -j $localid.batch -o NCPUS,Start,End,UserCPU,SystemCPU -P -n | tail -n 1) cpus=$(echo "$jobinfostring" | awk -F'|' '{print $1}') starttime=$(echo "$jobinfostring"|awk -F'|' '{print $2}'| sed 's,\([0-9]\+/[0-9]\+\)-\([0-9:]\+\),\1 \2,g' | sed 's/T/ /g') endtime=$(echo "$jobinfostring"|awk -F'|' '{print $3}'| sed 's,\([0-9]\+/[0-9]\+\)-\([0-9:]\+\),\1 \2,g' | sed 's/T/ /g') # UserCPU,SystemCPU format is [dd-]hh:mm:ss[.uuu] usercputime=$(echo "$jobinfostring" | awk -F'|' '{print $4}') kernelcputime=$(echo "$jobinfostring" | awk -F'|' '{print $5}') [ -z "$usercputime" ] && usercputime="00:00:00" [ -z "$kernelcputime" ] && kernelcputime="00:00:00" else jobinfostring=$("$scontrol" -o show job $localid) #Slurm can report StartTime and EndTime in at least these two formats: #2010-02-15T15:30:29 #02/15-15:25:15 #For our code to be able to manage both, the first needs to keep its hyphens, #the second needs them removed starttime=$(echo "$jobinfostring"|sed -n 's/.*StartTime=\([^ ]*\) .*/\1/p' | \ sed 's,\([0-9]\+/[0-9]\+\)-\([0-9:]\+\),\1 \2,g' | sed 's/T/ /g') endtime=$(echo "$jobinfostring"|sed -n 's/.*EndTime=\([^ ]*\) .*/\1/p' | \ sed 's,\([0-9]\+/[0-9]\+\)-\([0-9:]\+\),\1 \2,g' | sed 's/T/ /g') cpus=$(echo "$jobinfostring"|sed -n 's/.*NumCPUs=\([^ ]*\) .*/\1/p') fi if [ -z "$jobinfostring" ]; then jobinfo_collect_failed=1; fi ## Do not try again if cpus correctly filled with a number. Or if cpus filled with header of sacct, which means cpus=NCPUS. ## The latter is handled already in handle_diag_file function ## If (not empty cpus variable, and cpus variable is a number) or cpus variable is NCPUS then we are done, so break out of the retry loop if ( [ -n "$cpus" ] && [ "$cpus" -eq "$cpus" ] 2>/dev/null ) || [ z"$cpus" = "zNCPUS" ] ; then break ; fi handle_diag_tries=$(( $handle_diag_tries - 1 )) if [ -n "$jobinfo_collect_failed" ] ; then echo "scan-SLURM-job - [$(date +%Y-%m-%d\ %T)] sacct/scontrol failed for job: $localid - could not fetch jobinfostring to update the diag file. Retries left: $handle_diag_tries." 1>&2; fi sleep 2 done # if "sacct -j $localid/batch" return string "NCPUS|NNodes..." only, the job has no batch stage, it was killed before start on WN if [ ! z"$cpus" = "zNCPUS" ] && [ -z "$jobinfo_collect_failed" ]; then date_to_utc_seconds "$starttime" starttime_seconds="$return_date_seconds" seconds_to_mds_date "$return_date_seconds" LRMSStartTime=$return_mds_date date_to_utc_seconds "$endtime" endtime_seconds="$return_date_seconds" seconds_to_mds_date "$return_date_seconds" LRMSEndTime=$return_mds_date #TODO handle exitcode etc. walltime=$(( $endtime_seconds - $starttime_seconds)) slurm_interval_to_seconds "$usercputime" cputime="$return_interval_seconds" slurm_interval_to_seconds "$kernelcputime" kernel="$return_interval_seconds" # Values to write to diag. These will override values already written. [ -n "$walltime" ] && WallTime=$walltime [ -n "$cpus" ] && Processors=$cpus [ -n "$cputime" ] && UserTime=$cputime [ -n "$kernel" ] && KernelTime=$kernel job_write_diag fi } if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi run=0 completed=0 zombie=0 failed=0 # Look at the list of jobstates and determine which jobs that have # finished. Write XXXX/lrms_done according to this for localid in ${localids[@]}; do # Initialize jobfile variable since it's used below jobfile="${basenames[$localid]}/local" case "${jobstates[$localid]}" in "") # Job is missing (no state) from slurm but INLRMS. zombie=$(($zombie + 1)) exitcode='' # get session directory of this job sessiondir=`grep -h '^sessiondir=' $jobfile | sed 's/^sessiondir=\(.*\)/\1/'` diagfile="${sessiondir}.diag" commentfile="${sessiondir}.comment" if [ "$my_id" != '0' ] ; then if [ ! -O "$jobfile" ] ; then continue ; fi fi uid=$(get_owner_uid "$jobfile") [ -z "$uid" ] && { log "Failed to stat $jobfile"; continue; } if [ ! -z "$sessiondir" ] ; then # have chance to obtain exit code if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then # In case of non-NFS setup it may take some time till # diagnostics file is delivered. Wait for it max 2 minutes. diag_tries=20 while [ "$diag_tries" -gt 0 ] ; do if [ -z "$uid" ] ; then exitcode=`grep '^exitcode=' "$diagfile" 2>/dev/null | sed 's/^exitcode=//'` else exitcode=$(do_as_uid "$uid" "grep '^exitcode=' '$diagfile'" | sed 's/^exitcode=//') fi if [ ! -z "$exitcode" ] ; then break ; fi sleep 10 diag_tries=$(( $diag_tries - 1 )) done else if [ -z "$uid" ] ; then exitcode=`grep '^exitcode=' "$diagfile" 2>/dev/null | sed 's/^exitcode=//'` else exitcode=$(do_as_uid "$uid" "grep '^exitcode=' '$diagfile'" | sed 's/^exitcode=//') fi fi fi jobstatus="$exitcode Job missing from SLURM, exitcode recovered from session directory" if [ -z $exitcode ];then exitcode="-1" jobstatus="$exitcode Job missing from SLURM" fi save_commentfile "$uid" "$commentfile" "${basenames[$localid]}/errors" echo "$jobstatus" > "${basenames[$localid]}/lrms_done" kicklist=(${kicklist[@]} $localid) ;; PENDING|RUNNING|SUSPENDED|COMPLETING) #Job is running, nothing to do. run=$(($run + 1)) ;; CANCELLED) failed=$(($failed + 1)) handle_commentfile $localid kicklist=(${kicklist[@]} $localid) handle_diag_file "$localid" "${basenames[$localid]}/diag" [ -z "$jobinfo_collect_failed" ] && handle_exitcode $localid "-1" "Job was cancelled" || echo "scan-SLURM-job - [$(date +%Y-%m-%d\ %T)] Job:$localid CANCELLED, but jobinfo_collect_failed - not setting exit code, will try again in next scan" 1>&2; ;; COMPLETED) completed=$(($completed + 1)) handle_commentfile $localid handle_diag_file "$localid" "${basenames[$localid]}/diag" [ -z "$jobinfo_collect_failed" ] && handle_exitcode $localid "0" "" || echo "scan-SLURM-job - [$(date +%Y-%m-%d\ %T)] Job:$localid COMPLETED, but jobinfo_collect_failed - not setting exit code, will try again in next scan" 1>&2; ;; FAILED) failed=$(($failed + 1)) handle_commentfile $localid handle_diag_file "$localid" "${basenames[$localid]}/diag" [ -z "$jobinfo_collect_failed" ] && handle_exitcode $localid "-1" "Job failed" || echo "scan-SLURM-job - [$(date +%Y-%m-%d\ %T)] Job:$localid FAILED, but jobinfo_collect_failed - not setting exit code, will try again in next scan" 1>&2; ;; TIMEOUT) failed=$(($failed + 1)) handle_commentfile $localid handle_diag_file "$localid" "${basenames[$localid]}/diag" [ -z "$jobinfo_collect_failed" ] && handle_exitcode $localid "-1" "Job timeout" || echo "scan-SLURM-job - [$(date +%Y-%m-%d\ %T)] Job:$localid TIMEOUT, but jobinfo_collect_failed - not setting exit code, will try again in next scan" 1>&2; ;; NODE_FAIL) failed=$(($failed + 1)) handle_commentfile $localid handle_diag_file "$localid" "${basenames[$localid]}/diag" [ -z "$jobinfo_collect_failed" ] && handle_exitcode $localid "-1" "Node fail" || echo "scan-SLURM-job - [$(date +%Y-%m-%d\ %T)] Job:$localid NODE_FAIL, but jobinfo_collect_failed - not setting exit code, will try again in next scan" 1>&2; ;; esac unset jobinfo_collect_failed done if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] scan-slurm-job, JobHandling, R= $run, D= $completed, Z= $zombie, F= $failed: $t" >> $perflogfile fi # Kick the GM if [ -n "${kicklist[*]}" ];then for localid in "${kicklist[@]}";do gridid=${gridids[$localid]} ctrdir=${ctrdirs[$localid]} "${pkglibexecdir}/gm-kick" -j "${gridid}" "${ctrdir}" done fi exit 0 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/PaxHeaders/test0000644000000000000000000000013215067751426022022 xustar0030 mtime=1759499030.277442004 30 atime=1759499034.764510185 30 ctime=1759499030.277442004 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/0000755000175000002070000000000015067751426024001 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327024133 xustar0030 mtime=1759498967.768492177 30 atime=1759498967.871493742 30 ctime=1759499030.193568425 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/Makefile.am0000644000175000002070000000002615067751327026033 0ustar00mockbuildmock00000000000000SUBDIRS = submit scan nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/PaxHeaders/submit0000644000000000000000000000013215067751426023325 xustar0030 mtime=1759499030.275441973 30 atime=1759499034.764510185 30 ctime=1759499030.275441973 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/0000755000175000002070000000000015067751426025304 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/rte-test0000644000000000000000000000013215067751327025074 xustar0030 mtime=1759498967.770492207 30 atime=1759498967.872493757 30 ctime=1759499030.227930919 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/rte-test0000644000175000002070000001111015067751327026770 0ustar00mockbuildmock00000000000000TESTS="rte0_change_memory rte0_nodeproperty rte0_env rte0_runtime rte rte_with_options" # NOTEL: RTEs are not able to change the joboption_arg_ variables (i.e. # change executable and arguments) - they are processed before RTE stage 0. # See bug 3625 - test disabled. #TESTS="${TESTS} rte0_arg" # NOTE: Backslashes in environment variables is not handled consistent in bash and dash (RedHat vs Debian). # TEST DISABLED! #TESTS="${TESTS} rte0_env_backslashes" # Test passes with bash but not in dash. simulate_cmds="sbatch rm" # Simulate rm in order not to get job script deleted read -r -d '' simulator_output <<'EOF' rargs="/sbatch .*/" output="Submitted batch job 1" rargs="/rm .*/" output="" EOF read -r -d '' general_arc_test_configuration < /bin/true 1572864000 INCREASEMEMORY_1 INCREASEMEMORY_2 EOF rtes="INCREASEMEMORY_1 INCREASEMEMORY_2" read -r -d '' INCREASEMEMORY_1 <<'EOF' joboption_memory=$(( $(echo ${joboption_memory} + ${2} | tr -d \") )) EOF read -r -d '' INCREASEMEMORY_2 <<'EOF' joboption_memory=$(( $(echo ${joboption_memory} + ${2} + ${2} | tr -d \") )) EOF cat "${SRCDIR}/rte-test-rte_with_options.patch" | patch -sf -p1 -d ${SRCDIR} -o ${testdir}/expected_lrms_job_script.tmpl } nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/basic-test0000644000000000000000000000013215067751327025363 xustar0030 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.221713298 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/basic-test0000644000175000002070000000147315067751327027272 0ustar00mockbuildmock00000000000000TESTS="basic with_arguments" simulate_cmds="sbatch rm" # Simulate rm in order not to get job script deleted read -r -d '' simulator_output <<'EOF' rargs="/sbatch .*/" output="Submitted batch job 1" rargs="/rm .*/" output="" EOF read -r -d '' general_arc_test_configuration <&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms/slurm/test/submit ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__tty_colors_dummy = \ mgn= red= grn= lgn= blu= brg= std=; \ am__color_tests=no am__tty_colors = { \ $(am__tty_colors_dummy); \ if test "X$(AM_COLOR_TESTS)" = Xno; then \ am__color_tests=no; \ elif test "X$(AM_COLOR_TESTS)" = Xalways; then \ am__color_tests=yes; \ elif test "X$$TERM" != Xdumb && { test -t 1; } 2>/dev/null; then \ am__color_tests=yes; \ fi; \ if test $$am__color_tests = yes; then \ red=''; \ grn=''; \ lgn=''; \ blu=''; \ mgn=''; \ brg=''; \ std=''; \ fi; \ } am__DIST_COMMON = $(srcdir)/Makefile.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ TESTS = basic-test memory-test count-test queue-test job-name-test \ cpu-wall-time-test rte-test config-options-test std-io-test user-env-test \ files-io-test TESTS_ENVIRONMENT = \ PYTHONPATH=$(abs_top_srcdir)/src/utils/python/ \ ARC_PLUGIN_PATH=$(abs_top_builddir)/src/hed/acc/JobDescriptionParser/.libs \ ARC_LOCATION=$(abs_builddir) \ TEST_WRITE_GRAMI_FILE=$(top_builddir)/src/services/a-rex/grid-manager/test_write_grami_file \ SRCDIR=$(srcdir) \ $(SHELL) check_submit_script.sh submit-SLURM-job SCRIPTSNEEDED = check_submit_script.sh \ submit-SLURM-job $(pkgdatasubdir)/configure-SLURM-env.sh \ lrms_common.sh $(pkgdatasubdir)/submit_common.sh \ $(pkglibexecsubdir)/arcconfig-parser command-simulator.sh check_SCRIPTS = $(TESTS) basic-script.sh $(SCRIPTSNEEDED) EXTRA_DIST = $(TESTS) basic-script.sh $(wildcard $(srcdir)/*.patch) CLEANFILES = $(SCRIPTSNEEDED) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/slurm/test/submit/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/slurm/test/submit/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags TAGS: ctags CTAGS: cscope cscopelist: check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst $(AM_TESTS_FD_REDIRECT); then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ col="$$grn"; \ else \ col="$$red"; \ fi; \ echo "$${col}$$dashes$${std}"; \ echo "$${col}$$banner$${std}"; \ test -z "$$skipped" || echo "$${col}$$skipped$${std}"; \ test -z "$$report" || echo "$${col}$$report$${std}"; \ echo "$${col}$$dashes$${std}"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_SCRIPTS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: all all-am check check-TESTS check-am clean clean-generic \ clean-libtool cscopelist-am ctags-am distclean \ distclean-generic distclean-libtool distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags-am uninstall \ uninstall-am .PRECIOUS: Makefile check_submit_script.sh: $(srcdir)/../../../test/check_submit_script.sh cp $< $@ submit-SLURM-job: $(builddir)/../../submit-SLURM-job cp $< $@ chmod +x $@ $(pkgdatasubdir)/configure-SLURM-env.sh: $(srcdir)/../../configure-SLURM-env.sh mkdir -p $(pkgdatasubdir) cp $< $@ $(pkgdatasubdir)/submit_common.sh: $(builddir)/../../../submit_common.sh mkdir -p $(pkgdatasubdir) cp $< $@ lrms_common.sh: $(builddir)/../../../lrms_common.sh cp $< $@ $(pkglibexecsubdir)/arcconfig-parser: $(top_builddir)/src/utils/python/arcconfig-parser mkdir -p $(pkglibexecsubdir) cp $< $@ command-simulator.sh: $(top_srcdir)/src/tests/lrms/command-simulator.sh cp $< $@ chmod +x $@ # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/files-io-test0000644000000000000000000000013215067751327026011 xustar0030 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.232792699 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/files-io-test0000644000175000002070000000153115067751327027713 0ustar00mockbuildmock00000000000000TESTS="outputfiles" simulate_cmds="sbatch rm" # Simulate rm in order not to get job script deleted read -r -d '' simulator_output <<'EOF' rargs="/sbatch .*/" output="Submitted batch job 1" rargs="/rm .*/" output="" EOF read -r -d '' general_arc_test_configuration <$RUNTIME_JOB_STDOUT 2>&1 + $GNU_TIME -o "$RUNTIME_JOB_DIAG" -a -f 'WallTime=%es\nKernelTime=%Ss\nUserTime=%Us\nCPUUsage=%P\nMaxResidentMemory=%MkB\nAverageResidentMemory=%tkB\nAverageTotalMemory=%KkB\nAverageUnsharedMemory=%DkB\nAverageUnsharedStack=%pkB\nAverageSharedMemory=%XkB\nPageSize=%ZB\nMajorPageFaults=%F\nMinorPageFaults=%R\nSwaps=%W\nForcedSwitches=%c\nWaitSwitches=%w\nInputs=%I\nOutputs=%O\nSocketReceived=%r\nSocketSent=%s\nSignals=%k\n' "/bin/true" <$RUNTIME_JOB_STDIN 1>$RUNTIME_JOB_STDOUT 2>$RUNTIME_JOB_STDERR else - "/bin/true" <$RUNTIME_JOB_STDIN 1>$RUNTIME_JOB_STDOUT 2>&1 + "/bin/true" <$RUNTIME_JOB_STDIN 1>$RUNTIME_JOB_STDOUT 2>$RUNTIME_JOB_STDERR fi RESULT=$? @@ -217,6 +217,19 @@ if [ ! -z "$RUNTIME_LOCAL_SCRATCH_DIR" ] ; then find ./ -type l -exec rm -f "{}" ";" chmod -R u+w "./" + chmod -R u-w "$RUNTIME_JOB_DIR"/'testfile' 2>/dev/null + dynlist='list_of_outputfiles_generated_by_job' + chmod -R u-w "./$dynlist" 2>/dev/null + cat "./$dynlist" | while read name rest; do + chmod -R u-w "./$name" 2>/dev/null + done + chmod -R u-w "$RUNTIME_JOB_DIR"/'another_testfile' 2>/dev/null + chmod -R u-w "$RUNTIME_JOB_DIR"/'another_testfile' 2>/dev/null + chmod -R u-w "$RUNTIME_JOB_DIR"/'file with spaces' 2>/dev/null + chmod -R u-w "$RUNTIME_JOB_DIR"/'very tricky\\ file\\\\' 2>/dev/null + chmod -R u-w "$RUNTIME_JOB_DIR"/'very tricky\\ file\\\\' 2>/dev/null + chmod -R u-w "$RUNTIME_JOB_DIR"/'another'\''tricky'\''file' 2>/dev/null + chmod -R u-w "$RUNTIME_JOB_DIR"/'another\'\''tricky\'\''file' 2>/dev/null find ./ -type f -perm /200 -exec rm -f "{}" ";" chmod -R u+w "./" fi nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/cpu-wall-time-test-cputime_0000644000000000000000000000013215067751327030565 xustar0030 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.243913849 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/cpu-wall-time-test-cputime_count.patch0000644000175000002070000000122115067751327034632 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2019-11-04 17:19:14.270959996 +0200 +++ b/basic-script.sh 2019-11-04 17:19:14.271959982 +0200 @@ -8,8 +8,10 @@ #SBATCH --nice=50 #SBATCH -J 'gridjob' #SBATCH --get-user-env -#SBATCH -n 1 +#SBATCH -n 6 #SBATCH +#SBATCH -t 1:10 +#SBATCH -t 1:10 # Overide umask of execution node (sometime values are really strange) umask 077 @@ -178,7 +183,7 @@ nodename=`/bin/hostname -f` echo "nodename=$nodename" >> "$RUNTIME_JOB_DIAG" fi -echo "Processors=1" >> "$RUNTIME_JOB_DIAG" +echo "Processors=6" >> "$RUNTIME_JOB_DIAG" echo "Benchmark=HEPSPEC:1.0" >> "$RUNTIME_JOB_DIAG" executable='/bin/true' # Check if executable exists nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/count-test-count_per_node.p0000644000000000000000000000013215067751327030671 xustar0030 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.240586301 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/count-test-count_per_node.patch0000644000175000002070000000125315067751327033434 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2019-11-04 17:19:10.596013344 +0200 +++ b/basic-script.sh 2019-11-04 17:19:10.596013344 +0200 @@ -8,7 +8,8 @@ #SBATCH --nice=50 #SBATCH -J 'gridjob' #SBATCH --get-user-env -#SBATCH -n 1 +#SBATCH -n 6 +#SBATCH --ntasks-per-node 2 #SBATCH # Overide umask of execution node (sometime values are really strange) @@ -178,7 +182,8 @@ nodename=`/bin/hostname -f` echo "nodename=$nodename" >> "$RUNTIME_JOB_DIAG" fi -echo "Processors=1" >> "$RUNTIME_JOB_DIAG" +echo "Processors=6" >> "$RUNTIME_JOB_DIAG" +echo "Nodecount=3" >> "$RUNTIME_JOB_DIAG" echo "Benchmark=HEPSPEC:1.0" >> "$RUNTIME_JOB_DIAG" executable='/bin/true' # Check if executable exists nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/memory-test-memory_queue_de0000644000000000000000000000031515067751327030777 xustar00116 path=nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/memory-test-memory_queue_defaultmemory.patch 30 mtime=1759498967.769618065 30 atime=1759498967.872493757 29 ctime=1759499030.25944173 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/memory-test-memory_queue_defaultmemory0000644000175000002070000000067315067751327035171 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2014-01-28 14:10:18.764277404 +0100 +++ b/basic-script.sh 2014-01-28 14:10:18.756277403 +0100 @@ -4,11 +4,13 @@ #SBATCH -e /my/session/directory.comment #SBATCH -o /my/session/directory.comment +#SBATCH -p nordugrid #SBATCH --nice=50 #SBATCH -J 'gridjob' #SBATCH --get-user-env #SBATCH -n 1 #SBATCH +#SBATCH --mem-per-cpu=200 # Overide umask of execution node (sometime values are really strange) umask 077 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/cpu-wall-time-test-walltime0000644000000000000000000000013215067751327030576 xustar0030 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.245019202 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/cpu-wall-time-test-walltime.patch0000644000175000002070000000041715067751327033600 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2014-01-28 10:35:19.780083853 +0100 +++ b/basic-script.sh 2014-01-28 10:35:19.772083853 +0100 @@ -8,6 +8,7 @@ #SBATCH --get-user-env #SBATCH -n 1 #SBATCH +#SBATCH -t 1:0 # Overide umask of execution node (sometime values are really strange) nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/user-env-test-user_env.patc0000644000000000000000000000013215067751327030620 xustar0030 mtime=1759498967.770492207 30 atime=1759498967.872493757 30 ctime=1759499030.276054217 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/user-env-test-user_env.patch0000644000175000002070000000107315067751327032673 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2014-01-28 10:35:19.780083853 +0100 +++ b/basic-script.sh 2014-01-28 10:35:19.772083853 +0100 @@ -59,9 +59,11 @@ echo "Failed to use both cgroups and GNU time for resource usage accounting. Accounting relies on LRMS information only." 1>&2 fi # Setting environment variables as specified by user +export 'TEST=Testing' +export 'TEST2='\''Testing'\''' export 'GRID_GLOBAL_JOBID=@TEST_JOB_ID@' export 'GRID_GLOBAL_JOBURL=' export 'GRID_GLOBAL_JOBINTERFACE=' export 'GRID_GLOBAL_JOBHOST=' RUNTIME_JOB_DIR=@TEST_SESSION_DIR@/@TEST_JOB_ID@ nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/job-name-test0000644000000000000000000000013215067751327025772 xustar0030 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.225809959 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/job-name-test0000644000175000002070000000365415067751327027704 0ustar00mockbuildmock00000000000000TESTS="job_name_basic job_name_truncate "\ "job_name_replace_underscore job_name_prefix_N" simulate_cmds="sbatch rm" # Simulate rm in order not to get job script deleted read -r -d '' simulator_output <<'EOF' rargs="/sbatch .*/" output="Submitted batch job 1" rargs="/rm .*/" output="" EOF read -r -d '' general_arc_test_configuration <&2 + echo "Runtime DUMMY stage 1 execution failed." 1>>"${RUNTIME_JOB_STDERR}" + exit 1 +fi + echo "runtimeenvironments=$runtimeenvironments" >> "$RUNTIME_JOB_DIAG" if [ ! "X$SLURM_NODEFILE" = 'X' ] ; then if [ -r "$SLURM_NODEFILE" ] ; then @@ -205,6 +221,15 @@ fi # Running RTE scripts (stage 2) runtimeenvironments= +runtimeenvironments="${runtimeenvironments}DUMMY;" +# Calling DUMMY function: +RTE_function_0 2 +if [ $? -ne 0 ]; then + echo "Runtime DUMMY stage 2 execution failed." 1>&2 + echo "Runtime DUMMY stage 2 execution failed." 1>>"${RUNTIME_JOB_STDERR}" + exit 1 +fi + # Measuring used scratch space echo "usedscratch=$( du -sb "$RUNTIME_JOB_DIR" | sed "s/\s.*$//" )" >> "$RUNTIME_JOB_DIAG" # Cleaning up extra files in the local scratch nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/job-name-test-job_name_trun0000644000000000000000000000013215067751327030612 xustar0030 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.253230945 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/job-name-test-job_name_truncate.patch0000644000175000002070000000044715067751327034454 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2014-01-28 10:35:19.780083853 +0100 +++ b/basic-script.sh 2014-01-28 10:35:19.772083853 +0100 @@ -5,7 +5,7 @@ #SBATCH -o /my/session/directory.comment #SBATCH --nice=50 -#SBATCH -J 'gridjob' +#SBATCH -J 'abcdefghijklmno' #SBATCH --get-user-env #SBATCH -n 1 #SBATCH nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/basic-test-with_arguments.p0000644000000000000000000000013215067751327030657 xustar0030 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.235116797 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/basic-test-with_arguments.patch0000644000175000002070000000323415067751327033423 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2019-11-04 17:19:08.057050201 +0200 +++ b/basic-script.sh 2019-11-04 17:19:08.058050186 +0200 @@ -180,7 +183,7 @@ fi echo "Processors=1" >> "$RUNTIME_JOB_DIAG" echo "Benchmark=HEPSPEC:1.0" >> "$RUNTIME_JOB_DIAG" -executable='/bin/true' +executable='/bin/echo' # Check if executable exists if [ ! -f "$executable" ]; then @@ -198,9 +201,9 @@ echo "Cannot run $executable: $interpreter: not found" 1>$RUNTIME_JOB_STDOUT 2>$RUNTIME_JOB_STDERR 1>&2 exit 1; } if [ "x$JOB_ACCOUNTING" = "xgnutime" ]; then - $GNU_TIME -o "$RUNTIME_JOB_DIAG" -a -f 'WallTime=%es\nKernelTime=%Ss\nUserTime=%Us\nCPUUsage=%P\nMaxResidentMemory=%MkB\nAverageResidentMemory=%tkB\nAverageTotalMemory=%KkB\nAverageUnsharedMemory=%DkB\nAverageUnsharedStack=%pkB\nAverageSharedMemory=%XkB\nPageSize=%ZB\nMajorPageFaults=%F\nMinorPageFaults=%R\nSwaps=%W\nForcedSwitches=%c\nWaitSwitches=%w\nInputs=%I\nOutputs=%O\nSocketReceived=%r\nSocketSent=%s\nSignals=%k\n' "/bin/true" <$RUNTIME_JOB_STDIN 1>$RUNTIME_JOB_STDOUT 2>&1 + $GNU_TIME -o "$RUNTIME_JOB_DIAG" -a -f 'WallTime=%es\nKernelTime=%Ss\nUserTime=%Us\nCPUUsage=%P\nMaxResidentMemory=%MkB\nAverageResidentMemory=%tkB\nAverageTotalMemory=%KkB\nAverageUnsharedMemory=%DkB\nAverageUnsharedStack=%pkB\nAverageSharedMemory=%XkB\nPageSize=%ZB\nMajorPageFaults=%F\nMinorPageFaults=%R\nSwaps=%W\nForcedSwitches=%c\nWaitSwitches=%w\nInputs=%I\nOutputs=%O\nSocketReceived=%r\nSocketSent=%s\nSignals=%k\n' "/bin/echo" "Hello World" <$RUNTIME_JOB_STDIN 1>$RUNTIME_JOB_STDOUT 2>&1 else - "/bin/true" <$RUNTIME_JOB_STDIN 1>$RUNTIME_JOB_STDOUT 2>&1 + "/bin/echo" "Hello World" <$RUNTIME_JOB_STDIN 1>$RUNTIME_JOB_STDOUT 2>&1 fi RESULT=$? nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/job-name-test-job_name_pref0000644000000000000000000000013215067751327030556 xustar0030 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.250764156 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/job-name-test-job_name_prefix_N.patch0000644000175000002070000000044715067751327034401 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2014-01-28 10:35:19.780083853 +0100 +++ b/basic-script.sh 2014-01-28 10:35:19.772083853 +0100 @@ -5,7 +5,7 @@ #SBATCH -o /my/session/directory.comment #SBATCH --nice=50 -#SBATCH -J 'gridjob' +#SBATCH -J 'N12345678901234' #SBATCH --get-user-env #SBATCH -n 1 #SBATCH nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/memory-test-memory_cluster_0000644000000000000000000000033115067751327031021 xustar00127 path=nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/memory-test-memory_cluster_and_queue_nodememory_1.patch 30 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.255708598 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/memory-test-memory_cluster_and_queue_n0000644000175000002070000000067315067751327035134 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2014-01-28 14:08:57.116276178 +0100 +++ b/basic-script.sh 2014-01-28 14:08:57.116276178 +0100 @@ -4,11 +4,13 @@ #SBATCH -e /my/session/directory.comment #SBATCH -o /my/session/directory.comment +#SBATCH -p nordugrid #SBATCH --nice=50 #SBATCH -J 'gridjob' #SBATCH --get-user-env #SBATCH -n 1 #SBATCH +#SBATCH --mem-per-cpu=700 # Overide umask of execution node (sometime values are really strange) umask 077 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/job-name-test-job_name_repl0000644000000000000000000000032115067751327030564 xustar00119 path=nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/job-name-test-job_name_replace_underscore.patch 30 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.251930189 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/job-name-test-job_name_replace_undersc0000644000175000002070000000044715067751327034667 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2014-01-28 10:35:19.780083853 +0100 +++ b/basic-script.sh 2014-01-28 10:35:19.772083853 +0100 @@ -5,7 +5,7 @@ #SBATCH -o /my/session/directory.comment #SBATCH --nice=50 -#SBATCH -J 'gridjob' +#SBATCH -J 'job____________' #SBATCH --get-user-env #SBATCH -n 1 #SBATCH nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/job-name-test-job_name_basi0000644000000000000000000000013215067751327030540 xustar0030 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.249585628 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/job-name-test-job_name_basic.patch0000644000175000002070000000043315067751327033703 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2014-01-28 10:35:19.780083853 +0100 +++ b/basic-script.sh 2014-01-28 10:35:19.772083853 +0100 @@ -5,7 +5,7 @@ #SBATCH -o /my/session/directory.comment #SBATCH --nice=50 -#SBATCH -J 'gridjob' +#SBATCH -J 'job' #SBATCH --get-user-env #SBATCH -n 1 #SBATCH nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/memory-test-memory_basic.pa0000644000000000000000000000013215067751327030660 xustar0030 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.254388123 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/memory-test-memory_basic.patch0000644000175000002070000000044315067751327033262 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2014-01-28 14:14:58.640281603 +0100 +++ b/basic-script.sh 2014-01-28 14:14:58.636281603 +0100 @@ -9,6 +9,7 @@ #SBATCH --get-user-env #SBATCH -n 1 #SBATCH +#SBATCH --mem-per-cpu=100 # Overide umask of execution node (sometime values are really strange) umask 077 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/count-test-exclusive.patch0000644000000000000000000000013215067751327030535 xustar0030 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.241715869 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/count-test-exclusive.patch0000644000175000002070000000120315067751327032433 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2019-11-04 17:19:10.916008698 +0200 +++ b/basic-script.sh 2019-11-04 17:19:10.917008684 +0200 @@ -8,8 +8,9 @@ #SBATCH --nice=50 #SBATCH -J 'gridjob' #SBATCH --get-user-env -#SBATCH -n 1 +#SBATCH -n 6 #SBATCH +#SBATCH --exclusive # Overide umask of execution node (sometime values are really strange) umask 077 @@ -178,7 +182,7 @@ nodename=`/bin/hostname -f` echo "nodename=$nodename" >> "$RUNTIME_JOB_DIAG" fi -echo "Processors=1" >> "$RUNTIME_JOB_DIAG" +echo "Processors=6" >> "$RUNTIME_JOB_DIAG" echo "Benchmark=HEPSPEC:1.0" >> "$RUNTIME_JOB_DIAG" executable='/bin/true' # Check if executable exists nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/cpu-wall-time-test-walltime0000644000000000000000000000031715067751327030603 xustar00117 path=nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/cpu-wall-time-test-walltime_and_cputime.patch 30 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.246115892 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/cpu-wall-time-test-walltime_and_cputim0000644000175000002070000000043715067751327034707 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2014-01-28 10:35:19.780083853 +0100 +++ b/basic-script.sh 2014-01-28 10:35:19.772083853 +0100 @@ -8,6 +8,8 @@ #SBATCH --get-user-env #SBATCH -n 1 #SBATCH +#SBATCH -t 1:0 +#SBATCH -t 5:0 # Overide umask of execution node (sometime values are really strange) nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/count-test-count.patch0000644000000000000000000000013215067751327027656 xustar0030 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.239507036 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/count-test-count.patch0000644000175000002070000000114315067751327031557 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2019-11-04 17:19:10.195019165 +0200 +++ b/basic-script.sh 2019-11-04 17:19:10.196019150 +0200 @@ -8,7 +8,7 @@ #SBATCH --nice=50 #SBATCH -J 'gridjob' #SBATCH --get-user-env -#SBATCH -n 1 +#SBATCH -n 6 #SBATCH # Overide umask of execution node (sometime values are really strange) @@ -178,7 +181,7 @@ nodename=`/bin/hostname -f` echo "nodename=$nodename" >> "$RUNTIME_JOB_DIAG" fi -echo "Processors=1" >> "$RUNTIME_JOB_DIAG" +echo "Processors=6" >> "$RUNTIME_JOB_DIAG" echo "Benchmark=HEPSPEC:1.0" >> "$RUNTIME_JOB_DIAG" executable='/bin/true' # Check if executable exists nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/cpu-wall-time-test0000644000000000000000000000013215067751327026762 xustar0030 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.226907435 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/cpu-wall-time-test0000644000175000002070000000321615067751327030666 0ustar00mockbuildmock00000000000000TESTS="cputime walltime walltime_and_cputime walltime_count cputime_count " simulate_cmds="sbatch rm" # Simulate rm in order not to get job script deleted read -r -d '' simulator_output <<'EOF' rargs="/sbatch .*/" output="Submitted batch job 1" rargs="/rm .*/" output="" EOF read -r -d '' general_arc_test_configuration <&2 + echo "Runtime ENV script failed " 1>"$RUNTIME_JOB_DIAG" + exit 1 + fi + fi +fi echo "runtimeenvironments=$runtimeenvironments" >> "$RUNTIME_JOB_DIAG" if [ ! "X$SLURM_NODEFILE" = 'X' ] ; then @@ -162,6 +175,12 @@ fi fi +if [ ! -z "$RUNTIME_CONFIG_DIR" ] ; then + if [ -r "${RUNTIME_CONFIG_DIR}/ENV" ] ; then + cmdl=${RUNTIME_CONFIG_DIR}/ENV + sourcewithargs $cmdl 2 + fi +fi if [ ! -z "$RUNTIME_LOCAL_SCRATCH_DIR" ] ; then find ./ -type l -exec rm -f "{}" ";" nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/config-options-test-multico0000644000000000000000000000031515067751327030715 xustar00115 path=nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/config-options-test-multicore_scratch.patch 30 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.236209087 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/config-options-test-multicore_scratch.0000644000175000002070000000703415067751327034734 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2019-07-22 17:49:47.678606762 +0200 +++ b/basic-script.sh 2019-07-22 17:49:47.680606715 +0200 @@ -8,7 +8,7 @@ #SBATCH --nice=50 #SBATCH -J 'gridjob' #SBATCH --get-user-env -#SBATCH -n 1 +#SBATCH -n 8 #SBATCH # Overide umask of execution node (sometime values are really strange) @@ -64,23 +64,15 @@ export 'GRID_GLOBAL_JOBINTERFACE=' export 'GRID_GLOBAL_JOBHOST=' -RUNTIME_JOB_DIR=@TEST_SESSION_DIR@/@TEST_JOB_ID@ -RUNTIME_JOB_STDIN=/dev/null -RUNTIME_JOB_STDOUT=/dev/null -RUNTIME_JOB_STDERR=/dev/null -RUNTIME_JOB_DIAG=@TEST_SESSION_DIR@/@TEST_JOB_ID@.diag -if [ ! -z "$RUNTIME_GRIDAREA_DIR" ] ; then - RUNTIME_JOB_DIR=$RUNTIME_GRIDAREA_DIR/`basename $RUNTIME_JOB_DIR` - RUNTIME_JOB_STDIN=`echo "$RUNTIME_JOB_STDIN" | sed "s#^$RUNTIME_JOB_DIR#$RUNTIME_GRIDAREA_DIR#"` - RUNTIME_JOB_STDOUT=`echo "$RUNTIME_JOB_STDOUT" | sed "s#^$RUNTIME_JOB_DIR#$RUNTIME_GRIDAREA_DIR#"` - RUNTIME_JOB_STDERR=`echo "$RUNTIME_JOB_STDERR" | sed "s#^$RUNTIME_JOB_DIR#$RUNTIME_GRIDAREA_DIR#"` - RUNTIME_JOB_DIAG=`echo "$RUNTIME_JOB_DIAG" | sed "s#^$RUNTIME_JOB_DIR#$RUNTIME_GRIDAREA_DIR#"` - RUNTIME_CONTROL_DIR=`echo "$RUNTIME_CONTROL_DIR" | sed "s#^$RUNTIME_JOB_DIR#$RUNTIME_GRIDAREA_DIR#"` -fi -RUNTIME_LOCAL_SCRATCH_DIR=${RUNTIME_LOCAL_SCRATCH_DIR:-} +RUNTIME_JOB_DIR=/local/scratch/multicore_scratch +RUNTIME_JOB_DIAG=/local/scratch/multicore_scratch.diag +RUNTIME_JOB_STDIN="/dev/null" +RUNTIME_JOB_STDOUT="/dev/null" +RUNTIME_JOB_STDERR="/dev/null" +RUNTIME_LOCAL_SCRATCH_DIR=${RUNTIME_LOCAL_SCRATCH_DIR:-/local/scratch} RUNTIME_LOCAL_SCRATCH_MOVE_TOOL=${RUNTIME_LOCAL_SCRATCH_MOVE_TOOL:-mv} RUNTIME_FRONTEND_SEES_NODE=${RUNTIME_FRONTEND_SEES_NODE:-} -RUNTIME_NODE_SEES_FRONTEND=${RUNTIME_NODE_SEES_FRONTEND:-yes} +RUNTIME_NODE_SEES_FRONTEND=${RUNTIME_NODE_SEES_FRONTEND:-} if [ ! -z "$RUNTIME_LOCAL_SCRATCH_DIR" ] && [ ! -z "$RUNTIME_NODE_SEES_FRONTEND" ]; then RUNTIME_NODE_JOB_DIR="$RUNTIME_LOCAL_SCRATCH_DIR"/`basename "$RUNTIME_JOB_DIR"` rm -rf "$RUNTIME_NODE_JOB_DIR" @@ -141,8 +136,21 @@ RESULT=0 if [ "$RESULT" = '0' ] ; then +# RunTimeEnvironment function for MULTICORE_SCRATCH: +RTE_function_0 () { +export RUNTIME_ENABLE_MULTICORE_SCRATCH="yes" +} # Running RTE scripts (stage 1) runtimeenvironments= +runtimeenvironments="${runtimeenvironments}MULTICORE_SCRATCH;" +# Calling MULTICORE_SCRATCH function: +RTE_function_0 1 +if [ $? -ne 0 ]; then + echo "Runtime MULTICORE_SCRATCH stage 1 execution failed." 1>&2 + echo "Runtime MULTICORE_SCRATCH stage 1 execution failed." 1>>"${RUNTIME_JOB_STDERR}" + exit 1 +fi + echo "runtimeenvironments=$runtimeenvironments" >> "$RUNTIME_JOB_DIAG" if [ ! "X$SLURM_NODEFILE" = 'X' ] ; then if [ -r "$SLURM_NODEFILE" ] ; then @@ -182,7 +190,7 @@ nodename=`/bin/hostname -f` echo "nodename=$nodename" >> "$RUNTIME_JOB_DIAG" fi -echo "Processors=1" >> "$RUNTIME_JOB_DIAG" +echo "Processors=8" >> "$RUNTIME_JOB_DIAG" echo "Benchmark=HEPSPEC:1.0" >> "$RUNTIME_JOB_DIAG" executable='/bin/true' # Check if executable exists @@ -212,6 +220,15 @@ fi # Running RTE scripts (stage 2) runtimeenvironments= +runtimeenvironments="${runtimeenvironments}MULTICORE_SCRATCH;" +# Calling MULTICORE_SCRATCH function: +RTE_function_0 2 +if [ $? -ne 0 ]; then + echo "Runtime MULTICORE_SCRATCH stage 2 execution failed." 1>&2 + echo "Runtime MULTICORE_SCRATCH stage 2 execution failed." 1>>"${RUNTIME_JOB_STDERR}" + exit 1 +fi + # Measuring used scratch space echo "usedscratch=$( du -sb "$RUNTIME_JOB_DIR" | sed "s/\s.*$//" )" >> "$RUNTIME_JOB_DIAG" # Cleaning up extra files in the local scratch nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/std-io-test-stdout_stderr_j0000644000000000000000000000013215067751327030715 xustar0030 mtime=1759498967.770492207 30 atime=1759498967.872493757 30 ctime=1759499030.274759957 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/std-io-test-stdout_stderr_join.patch0000644000175000002070000000121115067751327034416 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2014-01-28 10:35:19.780083853 +0100 +++ b/basic-script.sh 2014-01-28 10:35:19.772083853 +0100 @@ -64,10 +64,10 @@ export 'GRID_GLOBAL_JOBINTERFACE=' export 'GRID_GLOBAL_JOBHOST=' RUNTIME_JOB_DIR=@TEST_SESSION_DIR@/@TEST_JOB_ID@ -RUNTIME_JOB_STDIN=/dev/null -RUNTIME_JOB_STDOUT=/dev/null -RUNTIME_JOB_STDERR=/dev/null +RUNTIME_JOB_STDIN=in +RUNTIME_JOB_STDOUT=@TEST_SESSION_DIR@/@TEST_JOB_ID@/out +RUNTIME_JOB_STDERR=@TEST_SESSION_DIR@/@TEST_JOB_ID@/out RUNTIME_JOB_DIAG=@TEST_SESSION_DIR@/@TEST_JOB_ID@.diag if [ ! -z "$RUNTIME_GRIDAREA_DIR" ] ; then RUNTIME_JOB_DIR=$RUNTIME_GRIDAREA_DIR/`basename $RUNTIME_JOB_DIR` nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/rte-test-rte0_nodeproperty.0000644000000000000000000000013215067751327030634 xustar0030 mtime=1759498967.770492207 30 atime=1759498967.872493757 30 ctime=1759499030.269620626 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/rte-test-rte0_nodeproperty.patch0000644000175000002070000000307515067751327033563 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2019-07-22 17:45:50.555992159 +0200 +++ b/basic-script.sh 2019-07-22 17:45:50.557992114 +0200 @@ -8,7 +8,7 @@ #SBATCH -J 'gridjob' #SBATCH --get-user-env #SBATCH -n 1 -#SBATCH +#SBATCH TEST TESTING # Overide umask of execution node (sometime values are really strange) umask 077 @@ -135,8 +138,22 @@ RESULT=0 if [ "$RESULT" = '0' ] ; then +# RunTimeEnvironment function for NODEPROPERTY: +RTE_function_0 () { +export joboption_nodeproperty_0="TEST" +export joboption_nodeproperty_1="TESTING" +} # Running RTE scripts (stage 1) runtimeenvironments= +runtimeenvironments="${runtimeenvironments}NODEPROPERTY;" +# Calling NODEPROPERTY function: +RTE_function_0 1 +if [ $? -ne 0 ]; then + echo "Runtime NODEPROPERTY stage 1 execution failed." 1>&2 + echo "Runtime NODEPROPERTY stage 1 execution failed." 1>>"${RUNTIME_JOB_STDERR}" + exit 1 +fi + echo "runtimeenvironments=$runtimeenvironments" >> "$RUNTIME_JOB_DIAG" if [ ! "X$SLURM_NODEFILE" = 'X' ] ; then if [ -r "$SLURM_NODEFILE" ] ; then @@ -205,6 +222,15 @@ fi # Running RTE scripts (stage 2) runtimeenvironments= +runtimeenvironments="${runtimeenvironments}NODEPROPERTY;" +# Calling NODEPROPERTY function: +RTE_function_0 2 +if [ $? -ne 0 ]; then + echo "Runtime NODEPROPERTY stage 2 execution failed." 1>&2 + echo "Runtime NODEPROPERTY stage 2 execution failed." 1>>"${RUNTIME_JOB_STDERR}" + exit 1 +fi + # Measuring used scratch space echo "usedscratch=$( du -sb "$RUNTIME_JOB_DIR" | sed "s/\s.*$//" )" >> "$RUNTIME_JOB_DIAG" # Cleaning up extra files in the local scratch nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/config-options-test-no_shar0000644000000000000000000000033415067751327030673 xustar00130 path=nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/config-options-test-no_shared_filesystem_with_std_io.patch 30 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.238470365 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/config-options-test-no_shared_filesyst0000644000175000002070000000620115067751327035023 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2014-01-28 11:18:43.152122917 +0100 +++ b/basic-script.sh 2014-01-28 11:18:43.144122917 +0100 @@ -64,23 +64,15 @@ export 'GRID_GLOBAL_JOBINTERFACE=' export 'GRID_GLOBAL_JOBHOST=' -RUNTIME_JOB_DIR=@TEST_SESSION_DIR@/@TEST_JOB_ID@ -RUNTIME_JOB_STDIN=/dev/null -RUNTIME_JOB_STDOUT=/dev/null -RUNTIME_JOB_STDERR=/dev/null -RUNTIME_JOB_DIAG=@TEST_SESSION_DIR@/@TEST_JOB_ID@.diag -if [ ! -z "$RUNTIME_GRIDAREA_DIR" ] ; then - RUNTIME_JOB_DIR=$RUNTIME_GRIDAREA_DIR/`basename $RUNTIME_JOB_DIR` - RUNTIME_JOB_STDIN=`echo "$RUNTIME_JOB_STDIN" | sed "s#^$RUNTIME_JOB_DIR#$RUNTIME_GRIDAREA_DIR#"` - RUNTIME_JOB_STDOUT=`echo "$RUNTIME_JOB_STDOUT" | sed "s#^$RUNTIME_JOB_DIR#$RUNTIME_GRIDAREA_DIR#"` - RUNTIME_JOB_STDERR=`echo "$RUNTIME_JOB_STDERR" | sed "s#^$RUNTIME_JOB_DIR#$RUNTIME_GRIDAREA_DIR#"` - RUNTIME_JOB_DIAG=`echo "$RUNTIME_JOB_DIAG" | sed "s#^$RUNTIME_JOB_DIR#$RUNTIME_GRIDAREA_DIR#"` - RUNTIME_CONTROL_DIR=`echo "$RUNTIME_CONTROL_DIR" | sed "s#^$RUNTIME_JOB_DIR#$RUNTIME_GRIDAREA_DIR#"` -fi -RUNTIME_LOCAL_SCRATCH_DIR=${RUNTIME_LOCAL_SCRATCH_DIR:-} +RUNTIME_JOB_DIR=/local/scratch/@TEST_JOB_ID@ +RUNTIME_JOB_DIAG=/local/scratch/@TEST_JOB_ID@.diag +RUNTIME_JOB_STDIN="in" +RUNTIME_JOB_STDOUT="/local/scratch/@TEST_JOB_ID@/out" +RUNTIME_JOB_STDERR="/local/scratch/@TEST_JOB_ID@/error" +RUNTIME_LOCAL_SCRATCH_DIR=${RUNTIME_LOCAL_SCRATCH_DIR:-/local/scratch} RUNTIME_LOCAL_SCRATCH_MOVE_TOOL=${RUNTIME_LOCAL_SCRATCH_MOVE_TOOL:-mv} RUNTIME_FRONTEND_SEES_NODE=${RUNTIME_FRONTEND_SEES_NODE:-} -RUNTIME_NODE_SEES_FRONTEND=${RUNTIME_NODE_SEES_FRONTEND:-yes} +RUNTIME_NODE_SEES_FRONTEND=${RUNTIME_NODE_SEES_FRONTEND:-} if [ ! -z "$RUNTIME_LOCAL_SCRATCH_DIR" ] && [ ! -z "$RUNTIME_NODE_SEES_FRONTEND" ]; then RUNTIME_NODE_JOB_DIR="$RUNTIME_LOCAL_SCRATCH_DIR"/`basename "$RUNTIME_JOB_DIR"` rm -rf "$RUNTIME_NODE_JOB_DIR" @@ -202,9 +194,9 @@ echo "Cannot run $executable: $interpreter: not found" 1>$RUNTIME_JOB_STDOUT 2>$RUNTIME_JOB_STDERR 1>&2 exit 1; } if [ "x$JOB_ACCOUNTING" = "xgnutime" ]; then - $GNU_TIME -o "$RUNTIME_JOB_DIAG" -a -f 'WallTime=%es\nKernelTime=%Ss\nUserTime=%Us\nCPUUsage=%P\nMaxResidentMemory=%MkB\nAverageResidentMemory=%tkB\nAverageTotalMemory=%KkB\nAverageUnsharedMemory=%DkB\nAverageUnsharedStack=%pkB\nAverageSharedMemory=%XkB\nPageSize=%ZB\nMajorPageFaults=%F\nMinorPageFaults=%R\nSwaps=%W\nForcedSwitches=%c\nWaitSwitches=%w\nInputs=%I\nOutputs=%O\nSocketReceived=%r\nSocketSent=%s\nSignals=%k\n' "/bin/true" <$RUNTIME_JOB_STDIN 1>$RUNTIME_JOB_STDOUT 2>&1 + $GNU_TIME -o "$RUNTIME_JOB_DIAG" -a -f 'WallTime=%es\nKernelTime=%Ss\nUserTime=%Us\nCPUUsage=%P\nMaxResidentMemory=%MkB\nAverageResidentMemory=%tkB\nAverageTotalMemory=%KkB\nAverageUnsharedMemory=%DkB\nAverageUnsharedStack=%pkB\nAverageSharedMemory=%XkB\nPageSize=%ZB\nMajorPageFaults=%F\nMinorPageFaults=%R\nSwaps=%W\nForcedSwitches=%c\nWaitSwitches=%w\nInputs=%I\nOutputs=%O\nSocketReceived=%r\nSocketSent=%s\nSignals=%k\n' "/bin/true" <$RUNTIME_JOB_STDIN 1>$RUNTIME_JOB_STDOUT 2>$RUNTIME_JOB_STDERR else - "/bin/true" <$RUNTIME_JOB_STDIN 1>$RUNTIME_JOB_STDOUT 2>&1 + "/bin/true" <$RUNTIME_JOB_STDIN 1>$RUNTIME_JOB_STDOUT 2>$RUNTIME_JOB_STDERR fi RESULT=$? nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/config-options-test-no_shar0000644000000000000000000000032015067751327030666 xustar00118 path=nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/config-options-test-no_shared_filesystem.patch 30 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.237349796 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/config-options-test-no_shared_filesyst0000644000175000002070000000342615067751327035031 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2014-01-28 11:18:43.152122917 +0100 +++ b/basic-script.sh 2014-01-28 11:18:43.144122917 +0100 @@ -64,22 +64,14 @@ export 'GRID_GLOBAL_JOBINTERFACE=' export 'GRID_GLOBAL_JOBHOST=' -RUNTIME_JOB_DIR=@TEST_SESSION_DIR@/@TEST_JOB_ID@ -RUNTIME_JOB_STDIN=/dev/null -RUNTIME_JOB_STDOUT=/dev/null -RUNTIME_JOB_STDERR=/dev/null -RUNTIME_JOB_DIAG=@TEST_SESSION_DIR@/@TEST_JOB_ID@.diag -if [ ! -z "$RUNTIME_GRIDAREA_DIR" ] ; then - RUNTIME_JOB_DIR=$RUNTIME_GRIDAREA_DIR/`basename $RUNTIME_JOB_DIR` - RUNTIME_JOB_STDIN=`echo "$RUNTIME_JOB_STDIN" | sed "s#^$RUNTIME_JOB_DIR#$RUNTIME_GRIDAREA_DIR#"` - RUNTIME_JOB_STDOUT=`echo "$RUNTIME_JOB_STDOUT" | sed "s#^$RUNTIME_JOB_DIR#$RUNTIME_GRIDAREA_DIR#"` - RUNTIME_JOB_STDERR=`echo "$RUNTIME_JOB_STDERR" | sed "s#^$RUNTIME_JOB_DIR#$RUNTIME_GRIDAREA_DIR#"` - RUNTIME_JOB_DIAG=`echo "$RUNTIME_JOB_DIAG" | sed "s#^$RUNTIME_JOB_DIR#$RUNTIME_GRIDAREA_DIR#"` - RUNTIME_CONTROL_DIR=`echo "$RUNTIME_CONTROL_DIR" | sed "s#^$RUNTIME_JOB_DIR#$RUNTIME_GRIDAREA_DIR#"` -fi -RUNTIME_LOCAL_SCRATCH_DIR=${RUNTIME_LOCAL_SCRATCH_DIR:-} +RUNTIME_JOB_DIR=/local/scratch/@TEST_JOB_ID@ +RUNTIME_JOB_DIAG=/local/scratch/@TEST_JOB_ID@.diag +RUNTIME_JOB_STDIN="/dev/null" +RUNTIME_JOB_STDOUT="/dev/null" +RUNTIME_JOB_STDERR="/dev/null" +RUNTIME_LOCAL_SCRATCH_DIR=${RUNTIME_LOCAL_SCRATCH_DIR:-/local/scratch} RUNTIME_LOCAL_SCRATCH_MOVE_TOOL=${RUNTIME_LOCAL_SCRATCH_MOVE_TOOL:-mv} RUNTIME_FRONTEND_SEES_NODE=${RUNTIME_FRONTEND_SEES_NODE:-} -RUNTIME_NODE_SEES_FRONTEND=${RUNTIME_NODE_SEES_FRONTEND:-yes} +RUNTIME_NODE_SEES_FRONTEND=${RUNTIME_NODE_SEES_FRONTEND:-} if [ ! -z "$RUNTIME_LOCAL_SCRATCH_DIR" ] && [ ! -z "$RUNTIME_NODE_SEES_FRONTEND" ]; then RUNTIME_NODE_JOB_DIR="$RUNTIME_LOCAL_SCRATCH_DIR"/`basename "$RUNTIME_JOB_DIR"` rm -rf "$RUNTIME_NODE_JOB_DIR" nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/cpu-wall-time-test-walltime0000644000000000000000000000031115067751327030575 xustar00111 path=nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/cpu-wall-time-test-walltime_count.patch 30 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.247265517 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/cpu-wall-time-test-walltime_count.patc0000644000175000002070000000117615067751327034643 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2019-11-04 17:19:13.898965397 +0200 +++ b/basic-script.sh 2019-11-04 17:19:13.899965382 +0200 @@ -8,8 +8,9 @@ #SBATCH --nice=50 #SBATCH -J 'gridjob' #SBATCH --get-user-env -#SBATCH -n 1 +#SBATCH -n 6 #SBATCH +#SBATCH -t 7:0 # Overide umask of execution node (sometime values are really strange) umask 077 @@ -178,7 +182,7 @@ nodename=`/bin/hostname -f` echo "nodename=$nodename" >> "$RUNTIME_JOB_DIAG" fi -echo "Processors=1" >> "$RUNTIME_JOB_DIAG" +echo "Processors=6" >> "$RUNTIME_JOB_DIAG" echo "Benchmark=HEPSPEC:1.0" >> "$RUNTIME_JOB_DIAG" executable='/bin/true' # Check if executable exists nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/rte-test-rte0_runtime.patch0000644000000000000000000000013215067751327030605 xustar0030 mtime=1759498967.770492207 30 atime=1759498967.872493757 30 ctime=1759499030.271013526 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/rte-test-rte0_runtime.patch0000644000175000002070000000466215067751327032517 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2019-11-04 17:19:15.660939819 +0200 +++ b/basic-script.sh 2019-11-04 17:19:15.661939804 +0200 @@ -8,7 +8,7 @@ #SBATCH --nice=50 #SBATCH -J 'gridjob' #SBATCH --get-user-env -#SBATCH -n 1 +#SBATCH -n 6 #SBATCH # Overide umask of execution node (sometime values are really strange) @@ -137,8 +140,35 @@ RESULT=0 if [ "$RESULT" = '0' ] ; then +# RunTimeEnvironment function for RTE: +RTE_function_0 () { +export joboption_runtime_1='EXTRA_RTE' +# TODO: Maybe set options? +} +# RunTimeEnvironment function for EXTRA_RTE: +RTE_function_1 () { +export joboption_count=6 +} # Running RTE scripts (stage 1) runtimeenvironments= +runtimeenvironments="${runtimeenvironments}RTE;" +# Calling RTE function: +RTE_function_0 1 +if [ $? -ne 0 ]; then + echo "Runtime RTE stage 1 execution failed." 1>&2 + echo "Runtime RTE stage 1 execution failed." 1>>"${RUNTIME_JOB_STDERR}" + exit 1 +fi + +runtimeenvironments="${runtimeenvironments}EXTRA_RTE;" +# Calling EXTRA_RTE function: +RTE_function_1 1 +if [ $? -ne 0 ]; then + echo "Runtime EXTRA_RTE stage 1 execution failed." 1>&2 + echo "Runtime EXTRA_RTE stage 1 execution failed." 1>>"${RUNTIME_JOB_STDERR}" + exit 1 +fi + echo "runtimeenvironments=$runtimeenvironments" >> "$RUNTIME_JOB_DIAG" if [ ! "X$SLURM_NODEFILE" = 'X' ] ; then if [ -r "$SLURM_NODEFILE" ] ; then @@ -178,7 +208,7 @@ nodename=`/bin/hostname -f` echo "nodename=$nodename" >> "$RUNTIME_JOB_DIAG" fi -echo "Processors=1" >> "$RUNTIME_JOB_DIAG" +echo "Processors=6" >> "$RUNTIME_JOB_DIAG" echo "Benchmark=HEPSPEC:1.0" >> "$RUNTIME_JOB_DIAG" executable='/bin/true' # Check if executable exists @@ -208,6 +238,24 @@ fi # Running RTE scripts (stage 2) runtimeenvironments= +runtimeenvironments="${runtimeenvironments}RTE;" +# Calling RTE function: +RTE_function_0 2 +if [ $? -ne 0 ]; then + echo "Runtime RTE stage 2 execution failed." 1>&2 + echo "Runtime RTE stage 2 execution failed." 1>>"${RUNTIME_JOB_STDERR}" + exit 1 +fi + +runtimeenvironments="${runtimeenvironments}EXTRA_RTE;" +# Calling EXTRA_RTE function: +RTE_function_1 2 +if [ $? -ne 0 ]; then + echo "Runtime EXTRA_RTE stage 2 execution failed." 1>&2 + echo "Runtime EXTRA_RTE stage 2 execution failed." 1>>"${RUNTIME_JOB_STDERR}" + exit 1 +fi + # Measuring used scratch space echo "usedscratch=$( du -sb "$RUNTIME_JOB_DIR" | sed "s/\s.*$//" )" >> "$RUNTIME_JOB_DIAG" # Cleaning up extra files in the local scratch nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/queue-test0000644000000000000000000000013215067751327025426 xustar0030 mtime=1759498967.769618065 30 atime=1759498967.872493757 30 ctime=1759499030.224779616 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/queue-test0000644000175000002070000000107115067751327027327 0ustar00mockbuildmock00000000000000TESTS="queue_name" simulate_cmds="sbatch rm" # Simulate rm in order not to get job script deleted read -r -d '' simulator_output <<'EOF' rargs="/sbatch .*/" output="Submitted batch job 1" rargs="/rm .*/" output="" EOF function test_queue_name() { read -r -d '' arc_test_configuration <<'EOF' [lrms] slurm_bin_path=@PWD@/bin [queue:nordugrid] EOF read -r -d '' job_description_input <<'EOF' &(executable = "/bin/true") (queue = "nordugrid") EOF cat "${SRCDIR}/queue-test-queue_name.patch" | patch -sf -p1 -d ${SRCDIR} -o ${testdir}/expected_lrms_job_script.tmpl } nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/rte-test-rte0_env.patch0000644000000000000000000000013215067751327027712 xustar0030 mtime=1759498967.770492207 30 atime=1759498967.872493757 30 ctime=1759499030.266312536 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/rte-test-rte0_env.patch0000644000175000002070000000372415067751327031622 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2019-07-22 17:40:18.291497489 +0200 +++ b/basic-script.sh 2019-07-22 17:40:18.293497443 +0200 @@ -58,11 +58,14 @@ if [ -z "$JOB_ACCOUNTING" ]; then echo "Failed to use both cgroups and GNU time for resource usage accounting. Accounting relies on LRMS information only." 1>&2 fi + # Setting environment variables as specified by user -export 'GRID_GLOBAL_JOBID=@TEST_JOB_ID@' -export 'GRID_GLOBAL_JOBURL=' -export 'GRID_GLOBAL_JOBINTERFACE=' -export 'GRID_GLOBAL_JOBHOST=' +export 'job_foo=foobar' +export 'test="TEST"' +export 'rte_foo="foobar"' +export 'GRID_GLOBAL_JOBINTERFACE=' +export 'GRID_GLOBAL_JOBHOST=' +export GRID_GLOBAL_JOBID='gsiftp://@TEST_HOSTNAME@:2811/jobs/rte0_env' RUNTIME_JOB_DIR=@TEST_SESSION_DIR@/@TEST_JOB_ID@ RUNTIME_JOB_STDIN=/dev/null @@ -140,8 +143,22 @@ RESULT=0 if [ "$RESULT" = '0' ] ; then +# RunTimeEnvironment function for ENV: +RTE_function_0 () { +export joboption_env_1="test=\"TEST\"" +export joboption_env_2="rte_foo=\"foobar\"" +} # Running RTE scripts (stage 1) runtimeenvironments= +runtimeenvironments="${runtimeenvironments}ENV;" +# Calling ENV function: +RTE_function_0 1 +if [ $? -ne 0 ]; then + echo "Runtime ENV stage 1 execution failed." 1>&2 + echo "Runtime ENV stage 1 execution failed." 1>>"${RUNTIME_JOB_STDERR}" + exit 1 +fi + echo "runtimeenvironments=$runtimeenvironments" >> "$RUNTIME_JOB_DIAG" if [ ! "X$SLURM_NODEFILE" = 'X' ] ; then if [ -r "$SLURM_NODEFILE" ] ; then @@ -211,6 +228,15 @@ fi # Running RTE scripts (stage 2) runtimeenvironments= +runtimeenvironments="${runtimeenvironments}ENV;" +# Calling ENV function: +RTE_function_0 2 +if [ $? -ne 0 ]; then + echo "Runtime ENV stage 2 execution failed." 1>&2 + echo "Runtime ENV stage 2 execution failed." 1>>"${RUNTIME_JOB_STDERR}" + exit 1 +fi + # Measuring used scratch space echo "usedscratch=$( du -sb "$RUNTIME_JOB_DIR" | sed "s/\s.*$//" )" >> "$RUNTIME_JOB_DIAG" # Cleaning up extra files in the local scratch nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/basic-script.sh0000644000000000000000000000013215067751327026321 xustar0030 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.233927099 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/basic-script.sh0000644000175000002070000003304415067751327030227 0ustar00mockbuildmock00000000000000#!/bin/bash -l # SLURM batch job script built by arex #SBATCH --no-requeue #SBATCH --export=NONE #SBATCH -e @TEST_SESSION_DIR@/@TEST_JOB_ID@.comment #SBATCH -o @TEST_SESSION_DIR@/@TEST_JOB_ID@.comment #SBATCH --nice=50 #SBATCH -J 'gridjob' #SBATCH --get-user-env #SBATCH -n 1 #SBATCH # Overide umask of execution node (sometime values are really strange) umask 077 # source with arguments for DASH shells sourcewithargs() { script=$1 shift . $script } # Record job start timestamp ACCOUNTING_STARTTIME=`date +"%s"` # Select accounting method echo "Detecting resource accounting method available for the job." 1>&2 JOB_ACCOUNTING="" # Try to use cgroups first if command -v arc-job-cgroup >/dev/null 2>&1; then echo "Found arc-job-cgroup tool: trying to initialize accounting cgroups for the job." 1>&2 while true; do # memory cgroup memory_cgroup="$( arc-job-cgroup -m -n @TEST_JOB_ID@ )" if [ $? -ne 0 -o -z "$memory_cgroup" ]; then echo "Failed to initialize memory cgroup for accounting." 1>&2 break; fi # cpuacct cgroup cpuacct_cgroup="$( arc-job-cgroup -c -n @TEST_JOB_ID@ )" if [ $? -ne 0 -o -z "$cpuacct_cgroup" ]; then echo "Failed to initialize cpuacct cgroup for accounting." 1>&2 break; fi echo "Using cgroups method for job accounting" 1>&2 JOB_ACCOUNTING="cgroup" break; done fi # Fallback to GNU_TIME if cgroups are not working if [ -z "$JOB_ACCOUNTING" ]; then GNU_TIME='/usr/bin/time' echo "Looking for $GNU_TIME tool for accounting measurements" 1>&2 if [ ! -z "$GNU_TIME" ] && ! "$GNU_TIME" --version >/dev/null 2>&1; then echo "GNU time is not found at: $GNU_TIME" 1>&2 else echo "GNU time found and will be used for job accounting." 1>&2 JOB_ACCOUNTING="gnutime" fi fi # Nothing works: rely on LRMS only if [ -z "$JOB_ACCOUNTING" ]; then echo "Failed to use both cgroups and GNU time for resource usage accounting. Accounting relies on LRMS information only." 1>&2 fi # Setting environment variables as specified by user export 'GRID_GLOBAL_JOBID=@TEST_JOB_ID@' export 'GRID_GLOBAL_JOBURL=' export 'GRID_GLOBAL_JOBINTERFACE=' export 'GRID_GLOBAL_JOBHOST=' RUNTIME_JOB_DIR=@TEST_SESSION_DIR@/@TEST_JOB_ID@ RUNTIME_JOB_STDIN=/dev/null RUNTIME_JOB_STDOUT=/dev/null RUNTIME_JOB_STDERR=/dev/null RUNTIME_JOB_DIAG=@TEST_SESSION_DIR@/@TEST_JOB_ID@.diag if [ ! -z "$RUNTIME_GRIDAREA_DIR" ] ; then RUNTIME_JOB_DIR=$RUNTIME_GRIDAREA_DIR/`basename $RUNTIME_JOB_DIR` RUNTIME_JOB_STDIN=`echo "$RUNTIME_JOB_STDIN" | sed "s#^$RUNTIME_JOB_DIR#$RUNTIME_GRIDAREA_DIR#"` RUNTIME_JOB_STDOUT=`echo "$RUNTIME_JOB_STDOUT" | sed "s#^$RUNTIME_JOB_DIR#$RUNTIME_GRIDAREA_DIR#"` RUNTIME_JOB_STDERR=`echo "$RUNTIME_JOB_STDERR" | sed "s#^$RUNTIME_JOB_DIR#$RUNTIME_GRIDAREA_DIR#"` RUNTIME_JOB_DIAG=`echo "$RUNTIME_JOB_DIAG" | sed "s#^$RUNTIME_JOB_DIR#$RUNTIME_GRIDAREA_DIR#"` RUNTIME_CONTROL_DIR=`echo "$RUNTIME_CONTROL_DIR" | sed "s#^$RUNTIME_JOB_DIR#$RUNTIME_GRIDAREA_DIR#"` fi RUNTIME_LOCAL_SCRATCH_DIR=${RUNTIME_LOCAL_SCRATCH_DIR:-} RUNTIME_LOCAL_SCRATCH_MOVE_TOOL=${RUNTIME_LOCAL_SCRATCH_MOVE_TOOL:-mv} RUNTIME_FRONTEND_SEES_NODE=${RUNTIME_FRONTEND_SEES_NODE:-} RUNTIME_NODE_SEES_FRONTEND=${RUNTIME_NODE_SEES_FRONTEND:-yes} if [ ! -z "$RUNTIME_LOCAL_SCRATCH_DIR" ] && [ ! -z "$RUNTIME_NODE_SEES_FRONTEND" ]; then RUNTIME_NODE_JOB_DIR="$RUNTIME_LOCAL_SCRATCH_DIR"/`basename "$RUNTIME_JOB_DIR"` rm -rf "$RUNTIME_NODE_JOB_DIR" mkdir -p "$RUNTIME_NODE_JOB_DIR" # move directory contents for f in "$RUNTIME_JOB_DIR"/.* "$RUNTIME_JOB_DIR"/*; do [ "$f" = "$RUNTIME_JOB_DIR/*" ] && continue # glob failed, no files [ "$f" = "$RUNTIME_JOB_DIR/." ] && continue [ "$f" = "$RUNTIME_JOB_DIR/.." ] && continue [ "$f" = "$RUNTIME_JOB_DIR/.diag" ] && continue [ "$f" = "$RUNTIME_JOB_DIR/.comment" ] && continue [ -f "$f" ] || continue if ! $RUNTIME_LOCAL_SCRATCH_MOVE_TOOL "$f" "$RUNTIME_NODE_JOB_DIR"; then echo "Failed to '$RUNTIME_LOCAL_SCRATCH_MOVE_TOOL' '$f' to '$RUNTIME_NODE_JOB_DIR'" 1>&2 exit 1 fi done if [ ! -z "$RUNTIME_FRONTEND_SEES_NODE" ] ; then # creating link for whole directory ln -s "$RUNTIME_FRONTEND_SEES_NODE"/`basename "$RUNTIME_JOB_DIR"` "$RUNTIME_JOB_DIR" else # keep stdout, stderr and control directory on frontend # recreate job directory mkdir -p "$RUNTIME_JOB_DIR" # make those files mkdir -p `dirname "$RUNTIME_JOB_STDOUT"` mkdir -p `dirname "$RUNTIME_JOB_STDERR"` touch "$RUNTIME_JOB_STDOUT" touch "$RUNTIME_JOB_STDERR" RUNTIME_JOB_STDOUT__=`echo "$RUNTIME_JOB_STDOUT" | sed "s#^${RUNTIME_JOB_DIR}#${RUNTIME_NODE_JOB_DIR}#"` RUNTIME_JOB_STDERR__=`echo "$RUNTIME_JOB_STDERR" | sed "s#^${RUNTIME_JOB_DIR}#${RUNTIME_NODE_JOB_DIR}#"` rm "$RUNTIME_JOB_STDOUT__" 2>/dev/null rm "$RUNTIME_JOB_STDERR__" 2>/dev/null if [ ! -z "$RUNTIME_JOB_STDOUT__" ] && [ "$RUNTIME_JOB_STDOUT" != "$RUNTIME_JOB_STDOUT__" ]; then ln -s "$RUNTIME_JOB_STDOUT" "$RUNTIME_JOB_STDOUT__" fi if [ "$RUNTIME_JOB_STDOUT__" != "$RUNTIME_JOB_STDERR__" ] ; then if [ ! -z "$RUNTIME_JOB_STDERR__" ] && [ "$RUNTIME_JOB_STDERR" != "$RUNTIME_JOB_STDERR__" ]; then ln -s "$RUNTIME_JOB_STDERR" "$RUNTIME_JOB_STDERR__" fi fi if [ ! -z "$RUNTIME_CONTROL_DIR" ] ; then # move control directory back to frontend RUNTIME_CONTROL_DIR__=`echo "$RUNTIME_CONTROL_DIR" | sed "s#^${RUNTIME_JOB_DIR}#${RUNTIME_NODE_JOB_DIR}#"` mv "$RUNTIME_CONTROL_DIR__" "$RUNTIME_CONTROL_DIR" fi fi # adjust stdin,stdout & stderr pointers RUNTIME_JOB_STDIN=`echo "$RUNTIME_JOB_STDIN" | sed "s#^${RUNTIME_JOB_DIR}#${RUNTIME_NODE_JOB_DIR}#"` RUNTIME_JOB_STDOUT=`echo "$RUNTIME_JOB_STDOUT" | sed "s#^${RUNTIME_JOB_DIR}#${RUNTIME_NODE_JOB_DIR}#"` RUNTIME_JOB_STDERR=`echo "$RUNTIME_JOB_STDERR" | sed "s#^${RUNTIME_JOB_DIR}#${RUNTIME_NODE_JOB_DIR}#"` RUNTIME_FRONTEND_JOB_DIR="$RUNTIME_JOB_DIR" RUNTIME_JOB_DIR="$RUNTIME_NODE_JOB_DIR" fi if [ -z "$RUNTIME_NODE_SEES_FRONTEND" ] ; then mkdir -p "$RUNTIME_JOB_DIR" fi RESULT=0 if [ "$RESULT" = '0' ] ; then # Running RTE scripts (stage 1) runtimeenvironments= echo "runtimeenvironments=$runtimeenvironments" >> "$RUNTIME_JOB_DIAG" if [ ! "X$SLURM_NODEFILE" = 'X' ] ; then if [ -r "$SLURM_NODEFILE" ] ; then cat "$SLURM_NODEFILE" | sed 's/\(.*\)/nodename=\1/' >> "$RUNTIME_JOB_DIAG" NODENAME_WRITTEN="1" else SLURM_NODEFILE= fi fi # Detecting WN operating system for accounting purposes if [ -f "/etc/os-release" ]; then SYSTEM_SOFTWARE="$( eval $( cat /etc/os-release ); echo "${NAME} ${VERSION}" )" elif [ -f "/etc/system-release" ]; then SYSTEM_SOFTWARE="$( cat /etc/system-release )" elif command -v lsb_release >/dev/null 2>&1; then SYSTEM_SOFTWARE=$(lsb_release -ds) elif command -v hostnamectl >/dev/null 2>&1; then SYSTEM_SOFTWARE="$( hostnamectl 2>/dev/null | sed -n '/Operating System/s/^\s*Operating System:\s*//p' )" elif command -v uname >/dev/null 2>&1; then SYSTEM_SOFTWARE="Linux $( uname -r)" fi [ -n "$SYSTEM_SOFTWARE" ] && echo "systemsoftware=${SYSTEM_SOFTWARE}" >> "$RUNTIME_JOB_DIAG" if [ "$RESULT" = '0' ] ; then # Changing to session directory HOME=$RUNTIME_JOB_DIR export HOME if ! cd "$RUNTIME_JOB_DIR"; then echo "Failed to switch to '$RUNTIME_JOB_DIR'" 1>&2 RESULT=1 fi if [ ! -z "$RESULT" ] && [ "$RESULT" != 0 ]; then exit $RESULT fi # Write nodename if not already written in LRMS-specific way if [ -z "$NODENAME_WRITTEN" ] ; then nodename=`/bin/hostname -f` echo "nodename=$nodename" >> "$RUNTIME_JOB_DIAG" fi echo "Processors=1" >> "$RUNTIME_JOB_DIAG" echo "Benchmark=HEPSPEC:1.0" >> "$RUNTIME_JOB_DIAG" executable='/bin/true' # Check if executable exists if [ ! -f "$executable" ]; then echo "Path \"$executable\" does not seem to exist" 1>$RUNTIME_JOB_STDOUT 2>$RUNTIME_JOB_STDERR 1>&2 exit 1 fi # See if executable is a script, and extract the name of the interpreter line1=$(dd if="$executable" count=1 2>/dev/null | head -n 1 | tr -d '\0') shebang=`echo $line1 | sed -n 's/^#! *//p'` interpreter=`echo $shebang | awk '{print $1}'` if [ "$interpreter" = /usr/bin/env ]; then interpreter=`echo $shebang | awk '{print $2}'`; fi # If it's a script and the interpreter is not found ... [ "x$interpreter" = x ] || type "$interpreter" > /dev/null 2>&1 || { echo "Cannot run $executable: $interpreter: not found" 1>$RUNTIME_JOB_STDOUT 2>$RUNTIME_JOB_STDERR 1>&2 exit 1; } if [ "x$JOB_ACCOUNTING" = "xgnutime" ]; then $GNU_TIME -o "$RUNTIME_JOB_DIAG" -a -f 'WallTime=%es\nKernelTime=%Ss\nUserTime=%Us\nCPUUsage=%P\nMaxResidentMemory=%MkB\nAverageResidentMemory=%tkB\nAverageTotalMemory=%KkB\nAverageUnsharedMemory=%DkB\nAverageUnsharedStack=%pkB\nAverageSharedMemory=%XkB\nPageSize=%ZB\nMajorPageFaults=%F\nMinorPageFaults=%R\nSwaps=%W\nForcedSwitches=%c\nWaitSwitches=%w\nInputs=%I\nOutputs=%O\nSocketReceived=%r\nSocketSent=%s\nSignals=%k\n' "/bin/true" <$RUNTIME_JOB_STDIN 1>$RUNTIME_JOB_STDOUT 2>&1 else "/bin/true" <$RUNTIME_JOB_STDIN 1>$RUNTIME_JOB_STDOUT 2>&1 fi RESULT=$? fi fi # Running RTE scripts (stage 2) runtimeenvironments= # Measuring used scratch space echo "usedscratch=$( du -sb "$RUNTIME_JOB_DIR" | sed "s/\s.*$//" )" >> "$RUNTIME_JOB_DIAG" # Cleaning up extra files in the local scratch if [ ! -z "$RUNTIME_LOCAL_SCRATCH_DIR" ] ; then find ./ -type l -exec rm -f "{}" ";" chmod -R u+w "./" find ./ -type f -perm /200 -exec rm -f "{}" ";" chmod -R u+w "./" fi if [ ! -z "$RUNTIME_LOCAL_SCRATCH_DIR" ] && [ ! -z "$RUNTIME_NODE_SEES_FRONTEND" ]; then if [ ! -z "$RUNTIME_FRONTEND_SEES_NODE" ] ; then # just move it rm -rf "$RUNTIME_FRONTEND_JOB_DIR" destdir=`dirname "$RUNTIME_FRONTEND_JOB_DIR"` if ! mv "$RUNTIME_NODE_JOB_DIR" "$destdir"; then echo "Failed to move '$RUNTIME_NODE_JOB_DIR' to '$destdir'" 1>&2 RESULT=1 fi else # remove links rm -f "$RUNTIME_JOB_STDOUT" 2>/dev/null rm -f "$RUNTIME_JOB_STDERR" 2>/dev/null # move directory contents for f in "$RUNTIME_NODE_JOB_DIR"/.* "$RUNTIME_NODE_JOB_DIR"/*; do [ "$f" = "$RUNTIME_NODE_JOB_DIR/*" ] && continue # glob failed, no files [ "$f" = "$RUNTIME_NODE_JOB_DIR/." ] && continue [ "$f" = "$RUNTIME_NODE_JOB_DIR/.." ] && continue [ "$f" = "$RUNTIME_NODE_JOB_DIR/.diag" ] && continue [ "$f" = "$RUNTIME_NODE_JOB_DIR/.comment" ] && continue [ -f "$f" ] || continue if ! mv "$f" "$RUNTIME_FRONTEND_JOB_DIR"; then echo "Failed to move '$f' to '$RUNTIME_FRONTEND_JOB_DIR'" 1>&2 RESULT=1 fi done rm -rf "$RUNTIME_NODE_JOB_DIR" fi fi # Handle cgroup measurements if [ "x$JOB_ACCOUNTING" = "xcgroup" ]; then # Max memory used (total) maxmemory=$( cat "${memory_cgroup}/memory.memsw.max_usage_in_bytes" ) maxmemory=$(( (maxmemory + 1023) / 1024 )) echo "maxtotalmemory=${maxmemory}kB" >> "$RUNTIME_JOB_DIAG" # Max memory used (RAM) maxram=$( cat "${memory_cgroup}/memory.max_usage_in_bytes" ) maxram=$(( (maxram + 1023) / 1024 )) echo "maxresidentmemory=${maxram}kB" >> "$RUNTIME_JOB_DIAG" # TODO: this is for compatibilty with current A-REX accounting code. Remove when A-REX will use max value instead. echo "averageresidentmemory=${maxram}kB" >> "$RUNTIME_JOB_DIAG" # User CPU time if [ -f "${cpuacct_cgroup}/cpuacct.usage_user" ]; then # cgroup values are in nanoseconds user_cputime=$( cat "${cpuacct_cgroup}/cpuacct.usage_user" ) user_cputime=$(( user_cputime / 1000000 )) elif [ -f "${cpuacct_cgroup}/cpuacct.stat" ]; then # older kernels have only cpuacct.stat that use USER_HZ units user_cputime=$( cat "${cpuacct_cgroup}/cpuacct.stat" | sed -n '/^user/s/user //p' ) user_hz=$( getconf CLK_TCK ) user_cputime=$(( user_cputime / user_hz )) fi [ -n "$user_cputime" ] && echo "usertime=${user_cputime}" >> "$RUNTIME_JOB_DIAG" # Kernel CPU time if [ -f "${cpuacct_cgroup}/cpuacct.usage_sys" ]; then # cgroup values are in nanoseconds kernel_cputime=$( cat "${cpuacct_cgroup}/cpuacct.usage_sys" ) kernel_cputime=$(( kernel_cputime / 1000000 )) elif [ -f "${cpuacct_cgroup}/cpuacct.stat" ]; then # older kernels have only cpuacct.stat that use USER_HZ units kernel_cputime=$( cat "${cpuacct_cgroup}/cpuacct.stat" | sed -n '/^system/s/system //p' ) [ -z "$user_hz" ] && user_hz=$( getconf CLK_TCK ) kernel_cputime=$(( kernel_cputime / user_hz )) fi [ -n "$kernel_cputime" ] && echo "kerneltime=${kernel_cputime}" >> "$RUNTIME_JOB_DIAG" # Remove nested job accouting cgroups arc-job-cgroup -m -d arc-job-cgroup -c -d fi # Record CPU benchmarking values for WN user by the job [ -n "${ACCOUNTING_BENCHMARK}" ] && echo "benchmark=${ACCOUNTING_BENCHMARK}" >> "$RUNTIME_JOB_DIAG" # Record WN instance tag if defined [ -n "${ACCOUNTING_WN_INSTANCE}" ] && echo "wninstance=${ACCOUNTING_WN_INSTANCE}" >> "$RUNTIME_JOB_DIAG" # Record execution clock times ACCOUNTING_ENDTIME=`date +"%s"` # Mds date format (YYYYMMDDHHMMSSZ) echo "LRMSStartTime=`date -d "1970-01-01 UTC ${ACCOUNTING_STARTTIME} seconds" +"%Y%m%d%H%M%SZ"`" >> "$RUNTIME_JOB_DIAG" echo "LRMSEndTime=`date -d "1970-01-01 UTC ${ACCOUNTING_ENDTIME} seconds" +"%Y%m%d%H%M%SZ"`" >> "$RUNTIME_JOB_DIAG" echo "walltime=$(( ACCOUNTING_ENDTIME - ACCOUNTING_STARTTIME ))" >> "$RUNTIME_JOB_DIAG" # Add exit code to the accounting information and exit the job script echo "exitcode=$RESULT" >> "$RUNTIME_JOB_DIAG" exit $RESULT nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/rte-test-rte_with_options.p0000644000000000000000000000013215067751327030730 xustar0030 mtime=1759498967.770492207 30 atime=1759498967.872493757 30 ctime=1759499030.272160826 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/rte-test-rte_with_options.patch0000644000175000002070000000466415067751327033504 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2019-07-22 17:45:53.993914586 +0200 +++ b/basic-script.sh 2019-07-22 17:45:53.995914541 +0200 @@ -9,6 +9,7 @@ #SBATCH --get-user-env #SBATCH -n 1 #SBATCH +#SBATCH --mem-per-cpu=13500 # Overide umask of execution node (sometime values are really strange) umask 077 @@ -135,8 +139,34 @@ RESULT=0 if [ "$RESULT" = '0' ] ; then +# RunTimeEnvironment function for INCREASEMEMORY_1: +RTE_function_0 () { +joboption_memory=$(( $(echo ${joboption_memory} + ${2} | tr -d \") )) +} +# RunTimeEnvironment function for INCREASEMEMORY_2: +RTE_function_1 () { +joboption_memory=$(( $(echo ${joboption_memory} + ${2} + ${2} | tr -d \") )) +} # Running RTE scripts (stage 1) runtimeenvironments= +runtimeenvironments="${runtimeenvironments}INCREASEMEMORY_1;" +# Calling INCREASEMEMORY_1 function: +RTE_function_0 1 "2000" "--test \"TESTING\\ TEST\"" +if [ $? -ne 0 ]; then + echo "Runtime INCREASEMEMORY_1 stage 1 execution failed." 1>&2 + echo "Runtime INCREASEMEMORY_1 stage 1 execution failed." 1>>"${RUNTIME_JOB_STDERR}" + exit 1 +fi + +runtimeenvironments="${runtimeenvironments}INCREASEMEMORY_2;" +# Calling INCREASEMEMORY_2 function: +RTE_function_1 1 "5000" +if [ $? -ne 0 ]; then + echo "Runtime INCREASEMEMORY_2 stage 1 execution failed." 1>&2 + echo "Runtime INCREASEMEMORY_2 stage 1 execution failed." 1>>"${RUNTIME_JOB_STDERR}" + exit 1 +fi + echo "runtimeenvironments=$runtimeenvironments" >> "$RUNTIME_JOB_DIAG" if [ ! "X$SLURM_NODEFILE" = 'X' ] ; then if [ -r "$SLURM_NODEFILE" ] ; then @@ -205,6 +235,24 @@ fi # Running RTE scripts (stage 2) runtimeenvironments= +runtimeenvironments="${runtimeenvironments}INCREASEMEMORY_1;" +# Calling INCREASEMEMORY_1 function: +RTE_function_0 2 "2000" "--test \"TESTING\\ TEST\"" +if [ $? -ne 0 ]; then + echo "Runtime INCREASEMEMORY_1 stage 2 execution failed." 1>&2 + echo "Runtime INCREASEMEMORY_1 stage 2 execution failed." 1>>"${RUNTIME_JOB_STDERR}" + exit 1 +fi + +runtimeenvironments="${runtimeenvironments}INCREASEMEMORY_2;" +# Calling INCREASEMEMORY_2 function: +RTE_function_1 2 "5000" +if [ $? -ne 0 ]; then + echo "Runtime INCREASEMEMORY_2 stage 2 execution failed." 1>&2 + echo "Runtime INCREASEMEMORY_2 stage 2 execution failed." 1>>"${RUNTIME_JOB_STDERR}" + exit 1 +fi + # Measuring used scratch space echo "usedscratch=$( du -sb "$RUNTIME_JOB_DIR" | sed "s/\s.*$//" )" >> "$RUNTIME_JOB_DIAG" # Cleaning up extra files in the local scratch nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/queue-test-queue_name.patch0000644000000000000000000000013215067751327030646 xustar0030 mtime=1759498967.769618065 30 atime=1759498967.872493757 30 ctime=1759499030.261393119 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/queue-test-queue_name.patch0000644000175000002070000000046115067751327032551 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2014-01-28 10:35:19.780083853 +0100 +++ b/basic-script.sh 2014-01-28 10:35:19.772083853 +0100 @@ -4,6 +4,7 @@ #SBATCH -e /my/session/directory.comment #SBATCH -o /my/session/directory.comment +#SBATCH -p nordugrid #SBATCH --nice=50 #SBATCH -J 'gridjob' #SBATCH --get-user-env nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/rte-test-rte0_arg.patch0000644000000000000000000000013215067751327027673 xustar0030 mtime=1759498967.770492207 30 atime=1759498967.872493757 30 ctime=1759499030.263674505 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/rte-test-rte0_arg.patch0000644000175000002070000000522015067751327031574 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2014-01-28 10:35:19.780083853 +0100 +++ b/basic-script.sh 2014-01-28 10:35:19.772083853 +0100 @@ -99,8 +99,23 @@ RESULT=0 if [ "$RESULT" = '0' ] ; then +# RunTimeEnvironment function for ARG: +RTE_function_0 () { +joboption_arg_0="${joboption_arg_0}/echo" +export joboption_arg_1="Hello" +export joboption_arg_2="World" +} # Running RTE scripts (stage 1) runtimeenvironments= +runtimeenvironments="${runtimeenvironments}ARG;" +# Calling ARG function: +RTE_function_0 1 +if [ $? -ne 0 ]; then + echo "Runtime ARG stage 1 execution failed." 1>&2 + echo "Runtime ARG stage 1 execution failed." 1>>"${RUNTIME_JOB_STDERR}" + exit 1 +fi + echo "runtimeenvironments=$runtimeenvironments" >> "$RUNTIME_JOB_DIAG" if [ ! "X$SLURM_NODEFILE" = 'X' ] ; then if [ -r "$SLURM_NODEFILE" ] ; then @@ -124,7 +139,7 @@ nodename=`/bin/hostname -f` echo "nodename=$nodename" >> "$RUNTIME_JOB_DIAG" echo "Processors=1" >> "$RUNTIME_JOB_DIAG" -executable='/bin/true' +executable='/bin' # Check if executable exists if [ ! -f "$executable" ]; then @@ -148,9 +163,9 @@ fi if [ -z "$GNU_TIME" ] ; then - "/bin/true" <$RUNTIME_JOB_STDIN 1>$RUNTIME_JOB_STDOUT 2>&1 + "/bin" <$RUNTIME_JOB_STDIN 1>$RUNTIME_JOB_STDOUT 2>&1 else - $GNU_TIME -o "$RUNTIME_JOB_DIAG" -a -f 'WallTime=%es\nKernelTime=%Ss\nUserTime=%Us\nCPUUsage=%P\nMaxResidentMemory=%MkB\nAverageResidentMemory=%tkB\nAverageTotalMemory=%KkB\nAverageUnsharedMemory=%DkB\nAverageUnsharedStack=%pkB\nAverageSharedMemory=%XkB\nPageSize=%ZB\nMajorPageFaults=%F\nMinorPageFaults=%R\nSwaps=%W\nForcedSwitches=%c\nWaitSwitches=%w\nInputs=%I\nOutputs=%O\nSocketReceived=%r\nSocketSent=%s\nSignals=%k\n' "/bin/true" <$RUNTIME_JOB_STDIN 1>$RUNTIME_JOB_STDOUT 2>&1 + $GNU_TIME -o "$RUNTIME_JOB_DIAG" -a -f 'WallTime=%es\nKernelTime=%Ss\nUserTime=%Us\nCPUUsage=%P\nMaxResidentMemory=%MkB\nAverageResidentMemory=%tkB\nAverageTotalMemory=%KkB\nAverageUnsharedMemory=%DkB\nAverageUnsharedStack=%pkB\nAverageSharedMemory=%XkB\nPageSize=%ZB\nMajorPageFaults=%F\nMinorPageFaults=%R\nSwaps=%W\nForcedSwitches=%c\nWaitSwitches=%w\nInputs=%I\nOutputs=%O\nSocketReceived=%r\nSocketSent=%s\nSignals=%k\n' "/bin" <$RUNTIME_JOB_STDIN 1>$RUNTIME_JOB_STDOUT 2>&1 fi RESULT=$? @@ -159,6 +174,15 @@ fi # Running RTE scripts (stage 2) runtimeenvironments= +runtimeenvironments="${runtimeenvironments}ARG;" +# Calling ARG function: +RTE_function_0 2 +if [ $? -ne 0 ]; then + echo "Runtime ARG stage 2 execution failed." 1>&2 + echo "Runtime ARG stage 2 execution failed." 1>>"${RUNTIME_JOB_STDERR}" + exit 1 +fi + if [ ! -z "$RUNTIME_LOCAL_SCRATCH_DIR" ] ; then find ./ -type l -exec rm -f "{}" ";" chmod -R u+w "./" nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/memory-test-memory_cluster_0000644000000000000000000000031515067751327031023 xustar00115 path=nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/memory-test-memory_cluster_nodememory.patch 30 mtime=1759498967.769618065 30 atime=1759498967.872493757 30 ctime=1759499030.258499218 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/memory-test-memory_cluster_nodememory.0000644000175000002070000000044315067751327035100 0ustar00mockbuildmock00000000000000--- a/basic-script.sh 2014-01-28 14:13:39.296280413 +0100 +++ b/basic-script.sh 2014-01-28 14:13:39.288280412 +0100 @@ -9,6 +9,7 @@ #SBATCH --get-user-env #SBATCH -n 1 #SBATCH +#SBATCH --mem-per-cpu=300 # Overide umask of execution node (sometime values are really strange) umask 077 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/memory-test0000644000000000000000000000013215067751327025612 xustar0030 mtime=1759498967.769618065 30 atime=1759498967.871493742 30 ctime=1759499030.222717535 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/memory-test0000644000175000002070000000440615067751327027520 0ustar00mockbuildmock00000000000000TESTS="memory_basic memory_cluster_nodememory "\ "memory_queue_defaultmemory memory_cluster_and_queue_nodememory_1 "\ "memory_cluster_and_queue_nodememory_2" simulate_cmds="sbatch rm" # Simulate rm in order not to get job script deleted read -r -d '' simulator_output <<'EOF' rargs="/sbatch .*/" output="Submitted batch job 1" rargs="/rm .*/" output="" EOF read -r -d '' general_arc_test_configuration <$RUNTIME_JOB_STDOUT 2>&1 + $GNU_TIME -o "$RUNTIME_JOB_DIAG" -a -f 'WallTime=%es\nKernelTime=%Ss\nUserTime=%Us\nCPUUsage=%P\nMaxResidentMemory=%MkB\nAverageResidentMemory=%tkB\nAverageTotalMemory=%KkB\nAverageUnsharedMemory=%DkB\nAverageUnsharedStack=%pkB\nAverageSharedMemory=%XkB\nPageSize=%ZB\nMajorPageFaults=%F\nMinorPageFaults=%R\nSwaps=%W\nForcedSwitches=%c\nWaitSwitches=%w\nInputs=%I\nOutputs=%O\nSocketReceived=%r\nSocketSent=%s\nSignals=%k\n' "/bin/true" <$RUNTIME_JOB_STDIN 1>$RUNTIME_JOB_STDOUT 2>$RUNTIME_JOB_STDERR else - "/bin/true" <$RUNTIME_JOB_STDIN 1>$RUNTIME_JOB_STDOUT 2>&1 + "/bin/true" <$RUNTIME_JOB_STDIN 1>$RUNTIME_JOB_STDOUT 2>$RUNTIME_JOB_STDERR fi RESULT=$? nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/PaxHeaders/user-env-test0000644000000000000000000000013215067751327026046 xustar0030 mtime=1759498967.770492207 30 atime=1759498967.872493757 30 ctime=1759499030.231640727 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/submit/user-env-test0000644000175000002070000000113315067751327027746 0ustar00mockbuildmock00000000000000TESTS="user_env" simulate_cmds="sbatch rm" # Simulate rm in order not to get job script deleted read -r -d '' simulator_output <<'EOF' rargs="/sbatch .*/" output="Submitted batch job 1" rargs="/rm .*/" output="" EOF read -r -d '' general_arc_test_configuration <&2 + echo "Runtime INCREASEMEMORY stage 1 execution failed." 1>>"${RUNTIME_JOB_STDERR}" + exit 1 +fi + echo "runtimeenvironments=$runtimeenvironments" >> "$RUNTIME_JOB_DIAG" if [ ! "X$SLURM_NODEFILE" = 'X' ] ; then if [ -r "$SLURM_NODEFILE" ] ; then @@ -205,6 +222,15 @@ fi # Running RTE scripts (stage 2) runtimeenvironments= +runtimeenvironments="${runtimeenvironments}INCREASEMEMORY;" +# Calling INCREASEMEMORY function: +RTE_function_0 2 +if [ $? -ne 0 ]; then + echo "Runtime INCREASEMEMORY stage 2 execution failed." 1>&2 + echo "Runtime INCREASEMEMORY stage 2 execution failed." 1>>"${RUNTIME_JOB_STDERR}" + exit 1 +fi + # Measuring used scratch space echo "usedscratch=$( du -sb "$RUNTIME_JOB_DIR" | sed "s/\s.*$//" )" >> "$RUNTIME_JOB_DIAG" # Cleaning up extra files in the local scratch nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/PaxHeaders/Makefile.in0000644000000000000000000000013115067751356024145 xustar0030 mtime=1759498990.617091054 29 atime=1759499018.91426934 30 ctime=1759499030.194791604 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/Makefile.in0000644000175000002070000006075315067751356026063 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms/slurm/test ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir distdir-am am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) am__DIST_COMMON = $(srcdir)/Makefile.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ SUBDIRS = submit scan all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/slurm/test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/slurm/test/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ check-am clean clean-generic clean-libtool cscopelist-am ctags \ ctags-am distclean distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags tags-am uninstall uninstall-am .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/PaxHeaders/scan0000644000000000000000000000013215067751426022746 xustar0030 mtime=1759499030.306442444 30 atime=1759499034.764510185 30 ctime=1759499030.306442444 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/scan/0000755000175000002070000000000015067751426024725 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/scan/PaxHeaders/basic-test0000644000000000000000000000013215067751327025004 xustar0030 mtime=1759498967.769509423 30 atime=1759498967.871493742 30 ctime=1759499030.307219531 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/test/scan/basic-test0000644000175000002070000001057715067751327026720 0ustar00mockbuildmock00000000000000TESTS="basic" function test_basic() { read -r -d '' arc_test_configuration <&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms/slurm/test/scan ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__tty_colors_dummy = \ mgn= red= grn= lgn= blu= brg= std=; \ am__color_tests=no am__tty_colors = { \ $(am__tty_colors_dummy); \ if test "X$(AM_COLOR_TESTS)" = Xno; then \ am__color_tests=no; \ elif test "X$(AM_COLOR_TESTS)" = Xalways; then \ am__color_tests=yes; \ elif test "X$$TERM" != Xdumb && { test -t 1; } 2>/dev/null; then \ am__color_tests=yes; \ fi; \ if test $$am__color_tests = yes; then \ red=''; \ grn=''; \ lgn=''; \ blu=''; \ mgn=''; \ brg=''; \ std=''; \ fi; \ } am__DIST_COMMON = $(srcdir)/Makefile.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ TESTS = basic-test TESTS_ENVIRONMENT = \ PYTHONPATH=$(abs_top_srcdir)/src/utils/python/ \ ARC_LOCATION=$(abs_builddir) \ SRCDIR=$(srcdir) \ $(SHELL) check_scan_script.sh scan-SLURM-job SCRIPTSNEEDED = check_scan_script.sh \ scan-SLURM-job $(pkgdatasubdir)/configure-SLURM-env.sh \ lrms_common.sh $(pkgdatasubdir)/scan_common.sh \ $(pkglibexecsubdir)/arcconfig-parser \ command-simulator.sh $(pkglibexecsubdir)/gm-kick check_SCRIPTS = $(TESTS) $(SCRIPTSNEEDED) EXTRA_DIST = $(TESTS) CLEANFILES = $(SCRIPTSNEEDED) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/slurm/test/scan/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/slurm/test/scan/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags TAGS: ctags CTAGS: cscope cscopelist: check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst $(AM_TESTS_FD_REDIRECT); then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ col="$$grn"; \ else \ col="$$red"; \ fi; \ echo "$${col}$$dashes$${std}"; \ echo "$${col}$$banner$${std}"; \ test -z "$$skipped" || echo "$${col}$$skipped$${std}"; \ test -z "$$report" || echo "$${col}$$report$${std}"; \ echo "$${col}$$dashes$${std}"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_SCRIPTS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: all all-am check check-TESTS check-am clean clean-generic \ clean-libtool cscopelist-am ctags-am distclean \ distclean-generic distclean-libtool distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags-am uninstall \ uninstall-am .PRECIOUS: Makefile check_scan_script.sh: $(builddir)/../../../test/check_scan_script.sh cp $< $@ scan-SLURM-job: $(builddir)/../../scan-SLURM-job cp $< $@ chmod +x $@ $(pkgdatasubdir)/configure-SLURM-env.sh: $(srcdir)/../../configure-SLURM-env.sh mkdir -p $(pkgdatasubdir) cp $< $@ $(pkgdatasubdir)/scan_common.sh: $(builddir)/../../../scan_common.sh mkdir -p $(pkgdatasubdir) cp $< $@ lrms_common.sh: $(builddir)/../../../lrms_common.sh cp $< $@ $(pkglibexecsubdir)/arcconfig-parser: $(top_builddir)/src/utils/python/arcconfig-parser mkdir -p $(pkglibexecsubdir) cp $< $@ command-simulator.sh: $(top_srcdir)/src/tests/lrms/command-simulator.sh cp $< $@ chmod +x $@ $(pkglibexecsubdir)/gm-kick: $(srcdir)/../../../test/test-gm-kick.sh mkdir -p $(pkglibexecsubdir) cp $< $@ chmod +x $@ # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/PaxHeaders/submit-SLURM-job.in0000644000000000000000000000013215067751327024423 xustar0030 mtime=1759498967.768492177 30 atime=1759498967.871493742 30 ctime=1759499030.169151376 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/submit-SLURM-job.in0000644000175000002070000004020415067751327026325 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -x # # Based on globus submission script for pbs # # Submits job to SLURM. # Input: path to grami file (same as Globus). # # The temporary job script is created for the submission and then removed # at the end of this script. echo "----- starting submit_slurm_job -----" 1>&2 joboption_lrms="SLURM" lrms_options="slurm_requirements slurm_wakeupperiod slurm_use_sacct slurm_bin_path" queue_options="slurm_requirements" # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi GRAMI_FILE=$1 # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # include common submit functions . "${pkgdatadir}/submit_common.sh" || exit $? # run common init # * parse grami # * parse config # * load LRMS-specific env # * set common variables common_init # perflog perflogfilesub="${perflogdir}/submission.perflog" if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi # check remote or local scratch is configured check_any_scratch ############################################################## # Zero stage of runtime environments ############################################################## RTE_stage0 ############################################################## # create job script ############################################################## mktempscript is_cluster=true ############################################################## # Start job script ############################################################## echo "#!/bin/bash -l" > $LRMS_JOB_SCRIPT echo "# SLURM batch job script built by arex" >> $LRMS_JOB_SCRIPT # rerun is handled by GM, do not let SLURM requeue jobs itself. echo "#SBATCH --no-requeue" >> $LRMS_JOB_SCRIPT # no environment exporting from ARC CE to WN echo "#SBATCH --export=NONE" >> $LRMS_JOB_SCRIPT # write SLURM output to 'comment' file echo "#SBATCH -e ${joboption_directory}.comment">> $LRMS_JOB_SCRIPT echo "#SBATCH -o ${joboption_directory}.comment">> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT # choose queue if [ ! -z "${joboption_queue}" ] ; then echo "#SBATCH -p $joboption_queue" >> $LRMS_JOB_SCRIPT fi ############################################################## # priority ############################################################## if [ ! -z "$joboption_priority" ]; then #Slurm priority is -10000 to 10000. Lower is better. #Default is 0, and only superusers can assign priorities #less than 0. #We set the priority as 100 - arc priority. #This will have the desired effect for all grid jobs #Local jobs will unfortunatly have a default priority equal #to ARC priority 100, but there is no way around that. priority=$((100-joboption_priority)) echo "#SBATCH --nice=${priority}" >> $LRMS_JOB_SCRIPT else #If priority is not set we should set it to #50 to match the default in the documentation priority=50 echo "#SBATCH --nice=${priority}" >> $LRMS_JOB_SCRIPT fi # project name for accounting if [ ! -z "${joboption_rsl_project}" ] ; then echo "#SBATCH -A $joboption_rsl_project" >> $LRMS_JOB_SCRIPT fi # job name for convenience if [ ! -z "${joboption_jobname}" ] ; then #TODO is this necessary? do parts of the infosys need these limitations? jobname=`echo "$joboption_jobname" | \ sed 's/^\([^[:alpha:]]\)/N\1/' | \ sed 's/[^[:alnum:]]/_/g' | \ sed 's/\(...............\).*/\1/'` echo "#SBATCH -J '$jobname'" >> $LRMS_JOB_SCRIPT else jobname="gridjob" echo "#SBATCH -J '$jobname'" >> $LRMS_JOB_SCRIPT fi echo "SLURM jobname: $jobname" 1>&2 # Set up the user's environment on the compute node where the script # is executed. echo "#SBATCH --get-user-env" >> $LRMS_JOB_SCRIPT ############################################################## # (non-)parallel jobs ############################################################## set_count nodes_string="#SBATCH -n ${joboption_count}" echo "$nodes_string" >> $LRMS_JOB_SCRIPT if [ ! -z $joboption_countpernode ] && [ $joboption_countpernode -gt 0 ] ; then echo "#SBATCH --ntasks-per-node $joboption_countpernode" >> $LRMS_JOB_SCRIPT fi nodes_string="#SBATCH " i=0 eval "var_is_set=\${joboption_nodeproperty_$i+yes}" while [ ! -z "${var_is_set}" ] ; do eval "var_value=\${joboption_nodeproperty_$i}" nodes_string="${nodes_string} ${var_value}" i=$(( $i + 1 )) eval "var_is_set=\${joboption_nodeproperty_$i+yes}" done echo "$nodes_string" >> $LRMS_JOB_SCRIPT if [ "$joboption_exclusivenode" = "true" ]; then echo "#SBATCH --exclusive" >> $LRMS_JOB_SCRIPT fi ############################################################## # Execution times (minutes) ############################################################## if [ ! -z "$joboption_cputime" ] ; then if [ $joboption_cputime -lt 0 ] ; then echo 'WARNING: Less than 0 cpu time requested: $joboption_cputime' 1>&2 joboption_cputime=0 echo 'WARNING: cpu time set to 0' 1>&2 fi # this is actually walltime deduced from cputime ! maxcputime=$(( $joboption_cputime / $joboption_count )) cputime_min=$(( $maxcputime / 60 )) cputime_sec=$(( $maxcputime - $cputime_min * 60 )) echo "#SBATCH -t ${cputime_min}:${cputime_sec}" >> $LRMS_JOB_SCRIPT fi if [ -z "$joboption_walltime" ] ; then if [ ! -z "$joboption_cputime" ] ; then # Set walltime for backward compatibility or incomplete requests joboption_walltime=$(( $maxcputime * $walltime_ratio )) fi fi if [ ! -z "$joboption_walltime" ] ; then if [ $joboption_walltime -lt 0 ] ; then echo 'WARNING: Less than 0 wall time requested: $joboption_walltime' 1>&2 joboption_walltime=0 echo 'WARNING: wall time set to 0' 1>&2 fi maxwalltime="$joboption_walltime" walltime_min=$(( $maxwalltime / 60 )) walltime_sec=$(( $maxwalltime - $walltime_min * 60 )) echo "#SBATCH -t ${walltime_min}:${walltime_sec}" >> $LRMS_JOB_SCRIPT fi ############################################################## # Requested memory (mb) ############################################################## set_req_mem if [ ! -z "$joboption_memory" ] ; then echo "#SBATCH --mem-per-cpu=${joboption_memory}" >> $LRMS_JOB_SCRIPT fi ############################################################## # Additional SLURM requirements ############################################################## requirements_string="" i=0 eval "var_is_set=\${CONFIG_slurm_requirements_$i+yes}" while [ ! -z "${var_is_set}" ] ; do eval "var_value=\${CONFIG_slurm_requirements_$i}" requirements_string="${requirements_string} ${var_value}" i=$(( $i + 1 )) eval "var_is_set=\${CONFIG_slurm_requirements_$i+yes}" done if [ ! -z "$requirements_string" ]; then echo "#SBATCH $requirements_string" >> $LRMS_JOB_SCRIPT fi if [ ! -z "$CONFIG_slurm_requirements" ] && [ "x$CONFIG_slurm_requirements" != "x__array__" ] ; then echo "#SBATCH $CONFIG_slurm_requirements" >> $LRMS_JOB_SCRIPT fi echo "" >> $LRMS_JOB_SCRIPT echo "# Overide umask of execution node (sometime values are really strange)" >> $LRMS_JOB_SCRIPT echo "umask 077" >> $LRMS_JOB_SCRIPT echo " " >> $LRMS_JOB_SCRIPT sourcewithargs_jobscript ############################################################## # Init accounting ############################################################## accounting_init ############################################################## # Add environment variables ############################################################## add_user_env ############################################################## # Check for existance of executable, # there is no sense to check for executable if files are # downloaded directly to computing node ############################################################## if [ -z "${joboption_arg_0}" ] ; then echo 'Executable is not specified' 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "Submission: Job description error." exit 1 fi ###################################################################### # Adjust working directory for tweaky nodes # RUNTIME_GRIDAREA_DIR should be defined by external means on nodes ###################################################################### if [ ! -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then setup_runtime_env else echo "RUNTIME_JOB_DIR=$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid" >> $LRMS_JOB_SCRIPT echo "RUNTIME_JOB_DIAG=$RUNTIME_LOCAL_SCRATCH_DIR/${joboption_gridid}.diag" >> $LRMS_JOB_SCRIPT RUNTIME_STDIN_REL=`echo "${joboption_stdin}" | sed "s#^${joboption_directory}/*##"` RUNTIME_STDOUT_REL=`echo "${joboption_stdout}" | sed "s#^${joboption_directory}/*##"` RUNTIME_STDERR_REL=`echo "${joboption_stderr}" | sed "s#^${joboption_directory}/*##"` if [ "$RUNTIME_STDIN_REL" = "${joboption_stdin}" ] ; then echo "RUNTIME_JOB_STDIN=\"${joboption_stdin}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDIN=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDIN_REL\"" >> $LRMS_JOB_SCRIPT fi if [ "$RUNTIME_STDOUT_REL" = "${joboption_stdout}" ] ; then echo "RUNTIME_JOB_STDOUT=\"${joboption_stdout}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDOUT=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDOUT_REL\"" >> $LRMS_JOB_SCRIPT fi if [ "$RUNTIME_STDERR_REL" = "${joboption_stderr}" ] ; then echo "RUNTIME_JOB_STDERR=\"${joboption_stderr}\"" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_JOB_STDERR=\"$RUNTIME_LOCAL_SCRATCH_DIR/$joboption_gridid/$RUNTIME_STDERR_REL\"" >> $LRMS_JOB_SCRIPT fi fi ############################################################## # Add std... to job arguments ############################################################## include_std_streams ############################################################## # Move files to local working directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_node echo "" >> $LRMS_JOB_SCRIPT echo "RESULT=0" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT ############################################################## # Skip execution if something already failed ############################################################## echo "if [ \"\$RESULT\" = '0' ] ; then" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime configuration at computing node ############################################################## RTE_stage1 ############################################################## # Diagnostics ############################################################## echo "echo \"runtimeenvironments=\$runtimeenvironments\" >> \"\$RUNTIME_JOB_DIAG\"" >> $LRMS_JOB_SCRIPT cat >> $LRMS_JOB_SCRIPT <<'EOSCR' if [ ! "X$SLURM_NODEFILE" = 'X' ] ; then if [ -r "$SLURM_NODEFILE" ] ; then cat "$SLURM_NODEFILE" | sed 's/\(.*\)/nodename=\1/' >> "$RUNTIME_JOB_DIAG" NODENAME_WRITTEN="1" else SLURM_NODEFILE= fi fi EOSCR ############################################################## # Accounting (WN OS Detection) ############################################################## detect_wn_systemsoftware ############################################################## # Check intermediate result again ############################################################## echo "if [ \"\$RESULT\" = '0' ] ; then" >> $LRMS_JOB_SCRIPT ############################################################## # Execution ############################################################## cd_and_run ############################################################## # End of RESULT checks ############################################################## echo "fi" >> $LRMS_JOB_SCRIPT echo "fi" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime (post)configuration at computing node ############################################################## RTE_stage2 ##################################################### # Upload output files #################################################### clean_local_scratch_dir_output ############################################################## # Move files back to session directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id # !!!!!!!!!!!!!!!!!!! would be better to know the names of files !!!!!!!!!!! ############################################################## move_files_to_frontend ############################################################## # Finish accounting and exit job ############################################################## accounting_end ####################################### # Submit the job ####################################### echo "SLURM job script built" 1>&2 # Execute sbatch command cd "$joboption_directory" echo "SLURM script follows:" 1>&2 echo "-------------------------------------------------------------------" 1>&2 cat "$LRMS_JOB_SCRIPT" 1>&2 echo "-------------------------------------------------------------------" 1>&2 echo "" 1>&2 SLURM_RESULT=1 SLURM_TRIES=0 #job creation finished if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-slurm-job, JobScriptCreation: $t" >> $perflogfilesub fi while [ "$SLURM_TRIES" -lt '10' ] ; do # Unset all environment variables before calling sbatch. Otherwise # SLURM will forward them to the job and leak information about arex. # Only unset lines with assignments. # Risks unsetting variables in sub assignments, but this is likely harmless. # TODO: Maybe we only should unset $ARC_*, $CONFIG_*, $GLOBUS_* etc? if [ ! -z "$perflogdir" ]; then start_ts=`date +%s.%N` fi (for i in $(env|grep '^[A-Za-z][A-Za-z0-9]*='|grep -v "LRMS_JOB_SCRIPT"|cut -d= -f1);do unset $i;done; \ ${sbatch} $LRMS_JOB_SCRIPT) 1>$LRMS_JOB_OUT 2>$LRMS_JOB_ERR SLURM_RESULT="$?" if [ ! -z "$perflogdir" ]; then stop_ts=`date +%s.%N` t=`awk "BEGIN { printf \"%.3f\", ${stop_ts}-${start_ts} }"` echo "[`date +%Y-%m-%d\ %T`] submit-slurm-job, JobSubmission: $t" >> $perflogfilesub fi if [ "$SLURM_RESULT" -eq '0' ] ; then break ; fi if [ "$SLURM_RESULT" -eq '198' ] ; then echo "Waiting for queue to decrease" 1>&2 sleep 60 SLURM_TRIES=0 continue fi grep 'maximum number of jobs' "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" if [ $? -eq '0' ] ; then echo "Waiting for queue to decrease" 1>&2 sleep 60 SLURM_TRIES=0 continue fi # A rare SLURM error, but may cause chaos in the information/accounting system grep 'unable to accept job' "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" if [ $? -eq '0' ] ; then echo "Waiting for queue to decrease" 1>&2 sleep 60 SLURM_TRIES=0 continue fi SLURM_TRIES=$(( $SLURM_TRIES + 1 )) sleep 2 done if [ $SLURM_RESULT -eq '0' ] ; then #TODO test what happens when the jobqueue is full or when the slurm ctld is not responding # SLURM 1.x and 2.2.x outputs the jobid into STDERR and STDOUT respectively. Concat them, # and let sed sort it out. From the exit code we know that the job was submitted, so this # is safe. Ulf Tigerstedt 1.5.2011 # This is unfortunately not safe. Cray's SLURM sbatch returns 0, when it fails to submit a job. 22.1.2015 job_id=`cat $LRMS_JOB_OUT $LRMS_JOB_ERR |sed -e 's/^\(sbatch: \)\{0,1\}Submitted batch job \([0-9]*\)$/\2/'` if expr match "${job_id}" '[0-9][0-9]*' >/dev/null; then echo "joboption_jobid=$job_id" >> $GRAMI_FILE echo "job submitted successfully!" 1>&2 echo "local job id: $job_id" 1>&2 # Remove temporary job script file rm -f $LRMS_JOB_SCRIPT $LRMS_JOB_OUT $LRMS_JOB_ERR echo "----- exiting submit_slurm_job -----" 1>&2 echo "" 1>&2 exit 0 else echo "job *NOT* submitted successfully!" 1>&2 echo "failed getting the slurm jobid for the job!" 1>&2 echo "Instead got: ${job_id}" 1>&2 echo "Submission: Local submission client behaved unexpectedly." fi else echo "job *NOT* submitted successfully!" 1>&2 echo "got error code from sbatch: $SLURM_RESULT !" 1>&2 echo "Submission: Local submission client failed." fi echo "Output is:" 1>&2 cat $LRMS_JOB_OUT 1>&2 echo "Error output is:" cat $LRMS_JOB_ERR 1>&2 rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" echo "----- exiting submit_slurm_job -----" 1>&2 echo "" 1>&2 exit 1 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/PaxHeaders/cancel-SLURM-job.in0000644000000000000000000000013015067751327024343 xustar0030 mtime=1759498967.768492177 30 atime=1759498967.871493742 28 ctime=1759499030.1668396 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/cancel-SLURM-job.in0000644000175000002070000000163615067751327026255 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -x # # Cancel job running in SLURM. # Input: grami file (same as Globus) echo "----- starting cancel_slurm_job -----" 1>&2 joboption_lrms="SLURM" lrms_options="slurm_wakeupperiod slurm_use_sacct slurm_bin_path" # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi GRAMI_FILE=$1 # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # load common cancel functions . "${pkgdatadir}/cancel_common.sh" || exit $? # run common init # * parse grami # * parse config # * load LRMS-specific env common_init # scancel verify_jobid $joboption_jobid || exit 1 echo "executing scancel with job id $joboption_jobid" 1>&2 "${scancel}" "${joboption_jobid}" if [ "$?" != "0" ];then echo "scancel failed" 1>&2 fi echo "----- exiting cancel_slurm_job -----" 1>&2 exit 0 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/PaxHeaders/configure-SLURM-env.sh0000644000000000000000000000013115067751327025122 xustar0030 mtime=1759498967.768492177 30 atime=1759498967.871493742 29 ctime=1759499030.16455267 nordugrid-arc-7.1.1/src/services/a-rex/lrms/slurm/configure-SLURM-env.sh0000755000175000002070000000173515067751327027036 0ustar00mockbuildmock00000000000000# # set environment variables: # # Conditionaly enable performance logging init_perflog # Path to slurm commands SLURM_BIN_PATH=${CONFIG_slurm_bin_path:-/usr/bin} if [ ! -d ${SLURM_BIN_PATH} ] ; then echo "Could not set SLURM_BIN_PATH." 1>&2 exit 1 fi # Paths to SLURM commands squeue="$SLURM_BIN_PATH/squeue" scontrol="$SLURM_BIN_PATH/scontrol" sinfo="$SLURM_BIN_PATH/sinfo" scancel="$SLURM_BIN_PATH/scancel" sbatch="$SLURM_BIN_PATH/sbatch" sacct="$SLURM_BIN_PATH/sacct" # Verifies that a SLURM jobid is set, and is an integer verify_jobid () { joboption_jobid="$1" # Verify that the jobid is somewhat sane. if [ -z ${joboption_jobid} ];then echo "error: joboption_jobid is not set" 1>&2 return 1 fi # jobid in slurm is always an integer, so anything else is an error. if [ "x" != "x$(echo ${joboption_jobid} | sed s/[0-9]//g )" ];then echo "error: non-numeric characters in joboption_jobid: ${joboption_jobid}" 1>&2 return 1 fi return 0 } nordugrid-arc-7.1.1/src/services/a-rex/lrms/PaxHeaders/fork0000644000000000000000000000013215067751426020642 xustar0030 mtime=1759499030.012437977 30 atime=1759499034.764510185 30 ctime=1759499030.012437977 nordugrid-arc-7.1.1/src/services/a-rex/lrms/fork/0000755000175000002070000000000015067751426022621 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/lrms/fork/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327022753 xustar0030 mtime=1759498967.765899957 30 atime=1759498967.870493727 30 ctime=1759499030.006217174 nordugrid-arc-7.1.1/src/services/a-rex/lrms/fork/Makefile.am0000644000175000002070000000015215067751327024653 0ustar00mockbuildmock00000000000000dist_pkgdata_DATA = configure-fork-env.sh pkgdata_SCRIPTS = scan-fork-job submit-fork-job cancel-fork-job nordugrid-arc-7.1.1/src/services/a-rex/lrms/fork/PaxHeaders/submit-fork-job.in0000644000000000000000000000013115067751327024260 xustar0030 mtime=1759498967.766520106 30 atime=1759498967.870493727 29 ctime=1759499030.01227137 nordugrid-arc-7.1.1/src/services/a-rex/lrms/fork/submit-fork-job.in0000644000175000002070000001620415067751327026166 0ustar00mockbuildmock00000000000000#!/bin/bash # set -x # # Input: path to grami file (same as Globus). # This script creates a temporary job script and runs it. echo "----- starting submit_fork_job -----" 1>&2 joboption_lrms="fork" # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi GRAMI_FILE=$1 # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # include common submit functions . "${pkgdatadir}/submit_common.sh" || exit $? # run common init # * parse grami # * parse config # * load LRMS-specific env # * set common variables common_init # always local RUNTIME_NODE_SEES_FRONTEND=yes ############################################################## # Zero stage of runtime environments ############################################################## RTE_stage0 ############################################################## # create job script ############################################################## mktempscript chmod u+x ${LRMS_JOB_SCRIPT} ############################################################## # Start job script ############################################################## echo '#!/bin/sh' > $LRMS_JOB_SCRIPT echo "# Fork job script built by arex" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT ############################################################## # non-parallel jobs ############################################################## set_count ############################################################## # Execution times (obtained in seconds) ############################################################## if [ ! -z "$joboption_walltime" ] ; then if [ $joboption_walltime -lt 0 ] ; then echo 'WARNING: Less than 0 wall time requested: $joboption_walltime' 1>&2 joboption_walltime=0 echo 'WARNING: wall time set to 0' 1>&2 fi maxwalltime="$joboption_walltime" elif [ ! -z "$joboption_cputime" ] ; then if [ $joboption_cputime -lt 0 ] ; then echo 'WARNING: Less than 0 cpu time requested: $joboption_cputime' 1>&2 joboption_cputime=0 echo 'WARNING: cpu time set to 0' 1>&2 fi maxwalltime="$joboption_cputime" fi if [ ! -z "$maxwalltime" ] ; then echo "ulimit -t $maxwalltime" >> $LRMS_JOB_SCRIPT fi sourcewithargs_jobscript ############################################################## # Override umask ############################################################## echo "" >> $LRMS_JOB_SCRIPT echo "# Overide umask of execution node (sometime values are really strange)" >> $LRMS_JOB_SCRIPT echo "umask 077" >> $LRMS_JOB_SCRIPT ############################################################## # Init accounting ############################################################## accounting_init ############################################################## # Add environment variables ############################################################## add_user_env ############################################################## # Check for existance of executable, ############################################################## if [ -z "${joboption_arg_0}" ] ; then echo 'Executable is not specified' 1>&2 exit 1 fi setup_runtime_env ############################################################## # Add std... to job arguments ############################################################## include_std_streams ############################################################## # Move files to local working directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_node echo "" >> $LRMS_JOB_SCRIPT echo "RESULT=0" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT echo "if [ \"\$RESULT\" = '0' ] ; then" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime configuration ############################################################## RTE_stage1 echo "echo \"runtimeenvironments=\$runtimeenvironments\" >> \"\$RUNTIME_JOB_DIAG\"" >> $LRMS_JOB_SCRIPT ##################################################### # Accounting (WN OS Detection) ##################################################### detect_wn_systemsoftware ##################################################### # Go to working dir and start job ##################################################### # Set the nice value (20 to -20) based on priority (1 to 100) # Note negative values are normally only settable by superusers priority=$joboption_priority if [ ! -z $priority ]; then if [ `id -u` = '0' ]; then nicevalue=$[ 20 - ($priority * 2 / 5) ] else nicevalue=$[ 20 - ($priority / 5) ] fi joboption_args="nice -n $nicevalue $joboption_args" fi cd_and_run echo "fi" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT ############################################################## # Runtime (post)configuration at computing node ############################################################## RTE_stage2 ############################################################## # Move files back to session directory (job is done on node only) # RUNTIME_JOB_DIR -> RUNTIME_LOCAL_SCRATCH_DIR/job_id ############################################################## move_files_to_frontend ############################################################## # Finish accounting and exit job ############################################################## accounting_end ####################################### # watcher process ####################################### JOB_ID= cleanup() { [ -n "$JOB_ID" ] && kill -9 $JOB_ID 2>/dev/null # remove temp files rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" } watcher() { "$1" > "$2" 2>&1 & rc=$? JOB_ID=$! export JOB_ID trap cleanup 0 1 2 3 4 5 6 7 8 10 12 15 if [ $rc -ne 0 ]; then echo "FAIL" > "$3" exit 1 else echo "OK" > "$3" wait $JOB_ID fi } ####################################### # Submit the job ####################################### echo "job script ${LRMS_JOB_SCRIPT} built:" 1>&2 echo "-------------------------------------------------------------------" 1>&2 cat "$LRMS_JOB_SCRIPT" 1>&2 echo "-------------------------------------------------------------------" 1>&2 echo "" 1>&2 # simple queuing system: make hard reference to the queue cd "$joboption_directory" 1>&2 || { echo "Could not cd to $joboption_directory, aborting" && exit 1; } # Bash (but not dash) needs the parantheses, otherwise 'trap' has no effect! ( watcher "$LRMS_JOB_SCRIPT" "${joboption_directory}.comment" "$LRMS_JOB_ERR"; ) & job_id=$! result= while [ -z "$result" ]; do sleep 1 result=`cat $LRMS_JOB_ERR` done case "$result" in OK) echo "job submitted successfully!" 1>&2 echo "local job id: $job_id" 1>&2 echo "joboption_jobid=$job_id" >> $GRAMI_FILE rc=0 ;; *) echo "job *NOT* submitted successfully!" 1>&2 echo "" 1>&2 echo "Output is:" 1>&2 cat $LRMS_JOB_OUT 1>&2 rm -f $LRMS_JOB_SCRIPT $LRMS_JOB_OUT $LRMS_JOB_ERR rc=1 ;; esac rm "$LRMS_JOB_ERR" echo "----- exiting submit_fork_job -----" 1>&2 echo "" 1>&2 exit $rc nordugrid-arc-7.1.1/src/services/a-rex/lrms/fork/PaxHeaders/Makefile.in0000644000000000000000000000013215067751356022766 xustar0030 mtime=1759498990.390579559 30 atime=1759499018.318260283 30 ctime=1759499030.008574925 nordugrid-arc-7.1.1/src/services/a-rex/lrms/fork/Makefile.in0000644000175000002070000005535015067751356024700 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/lrms/fork ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(dist_pkgdata_DATA) \ $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = scan-fork-job submit-fork-job cancel-fork-job CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)" SCRIPTS = $(pkgdata_SCRIPTS) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac DATA = $(dist_pkgdata_DATA) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/cancel-fork-job.in \ $(srcdir)/scan-fork-job.in $(srcdir)/submit-fork-job.in README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ dist_pkgdata_DATA = configure-fork-env.sh pkgdata_SCRIPTS = scan-fork-job submit-fork-job cancel-fork-job all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/lrms/fork/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/lrms/fork/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): scan-fork-job: $(top_builddir)/config.status $(srcdir)/scan-fork-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ submit-fork-job: $(top_builddir)/config.status $(srcdir)/submit-fork-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ cancel-fork-job: $(top_builddir)/config.status $(srcdir)/cancel-fork-job.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_pkgdataDATA: $(dist_pkgdata_DATA) @$(NORMAL_INSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-dist_pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dist_pkgdataDATA install-pkgdataSCRIPTS install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-dist_pkgdataDATA uninstall-pkgdataSCRIPTS .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am \ install-dist_pkgdataDATA install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-pkgdataSCRIPTS install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags-am uninstall uninstall-am uninstall-dist_pkgdataDATA \ uninstall-pkgdataSCRIPTS .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/lrms/fork/PaxHeaders/scan-fork-job.in0000644000000000000000000000013215067751327023702 xustar0030 mtime=1759498967.766520106 30 atime=1759498967.870493727 30 ctime=1759499030.011000625 nordugrid-arc-7.1.1/src/services/a-rex/lrms/fork/scan-fork-job.in0000644000175000002070000001176615067751327025617 0ustar00mockbuildmock00000000000000#!/bin/bash # # Periodically monitor for jobs which has finished or failed but not # reported an exitcode # id=`id -u` #debug='eval echo >> @tmp_dir@/parse-fork-log.$id' debug=: $debug "run at `date`" $debug "options = $@" # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi if [ -z "$1" ] ; then echo "Controldir argument missing" 1>&2 ; exit 1 ; fi # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # include common scan functions . "${pkgdatadir}/scan_common.sh" || exit $? # no arc.conf parsing needed as well as LRMS-specific config #common_init GMKICK=${pkglibexecdir}/gm-kick # Prints the uid of the owner of the file given as argument # Perl is used because it's more portable than using the stat command printuid () { code='my @s = stat($ARGV[0]); print($s[4] || "")' /usr/bin/perl -we "$code" "$1" } control_path () { # job_id=`echo "$2" | sed 's/\(.\{9\}\)/\1\//g' | sed 's/\/$//'` job_id=`echo "$2" | sed -e 's#\(.\{3\}\)#\1/#3' -e 's#\(.\{3\}\)#\1/#2' -e 's#\(.\{3\}\)#\1/#1' -e 's#$#/#'` path="$1/jobs/${job_id}/$3" echo "$path" } # # Attempts to switch to uid passed as the first argument and then runs the # commands passed as the second argument in a shell. The remaining arguments # are passed as arguments to the shell. No warning is given in case switching # uid is not possible. # do_as_uid () { test $# -ge 2 || { log "do_as_uid requires 2 arguments"; return 1; } script='use English; my ($uid, @args) = @ARGV; if ( $UID == 0 && $uid ) { eval { $UID = $uid }; print STDERR "Cannot switch to uid($UID): $@\n" if $UID != $uid; } system("@posix_shell@","-c",@args); exit ($?>>8||128+($?&127)); ' /usr/bin/perl -we "$script" "$@" } # Append .comment (containing STDOUT & STDERR of the job wrapper) to .errors save_commentfile () { uid=$1 commentfile=$2 errorsfile=$3 action=" { echo '---------- Output of the job wrapper script -----------' cat '$commentfile' 2> /dev/null echo '------------------------- End of output -------------------------' } >> '$errorsfile' " do_as_uid "$uid" "$action" } for control_dir in "$@" ; do if [ ! -d "${control_dir}" ]; then echo "No control dir $control_dir" 1>&2 continue fi # iterate over all jobs known in the control directory find "${control_dir}/processing" -name '*.status' \ | xargs egrep -l "INLRMS|CANCELING" \ | sed -e 's/.*\/\([^\/]*\)$/\1/' -e 's/\.status$//' \ | while read job; do $debug "scanning job = $job" unset joboption_jobid unset joboption_directory lrms_file=$(control_path "${control_dir}" "${job}" "lrms_done") grami_file=$(control_path "${control_dir}" "${job}" "grami") local_file=$(control_path "${control_dir}" "${job}" "local") # this job was already completed, nothing remains to be done [ -f "${lrms_file}" ] && continue # a grami file exists for all jobs that GM thinks are running. # proceed to next job if this file is missing. if [ ! -f "${grami_file}" ]; then continue fi # extract process IDs of the grami file [ ! -f "${grami_file}" ] && continue . "${grami_file}" # process IDs could not be learned, proceeding to next [ -z "$joboption_jobid" ] && continue $debug "local jobid = $joboption_jobid" # checking if process is still running if ps -ouser= -p$joboption_jobid > /dev/null; then $debug "ps returned $? - process $joboption_jobid of job $job is still running. Continuing to next" continue else $debug "ps returned $? - process $joboption_jobid of job $job has exited!" fi uid=$(printuid "${local_file}") $debug "local user id = $uid" diagfile=${joboption_directory}.diag $debug "checking $diagfile" exitcode=$(do_as_uid "$uid" "cat '$diagfile'" | sed -n 's/^exitcode=\([0-9]*\).*/\1/p') $debug "exitcode = [$exitcode] extracted from $diagfile" fork_comment="" if [ -z "$joboption_arg_code" ] ; then joboption_arg_code='0' ; fi if [ -z "$exitcode" ]; then echo "Job $job with PID $joboption_jobid died unexpectedly" 1>&2 fork_comment="Job died unexpectedly" 1>&2 exitcode=-1 elif [ "$exitcode" -ne "$joboption_arg_code" ]; then fork_comment="Job finished with wrong exit code - $exitcode != $joboption_arg_code" 1>&2 fi $debug "got exitcode=$exitcode" errors_file=$(control_path "${control_dir}" "${job}" "errors") save_commentfile "$uid" "${joboption_directory}.comment" "${errors_file}" echo "$exitcode $fork_comment" > "${lrms_file}" "$GMKICK" -j "${job}" "${control_dir}" done done $debug "done, going to sleep" sleep 10 exit 0 nordugrid-arc-7.1.1/src/services/a-rex/lrms/fork/PaxHeaders/cancel-fork-job.in0000644000000000000000000000013215067751327024203 xustar0030 mtime=1759498967.766520106 30 atime=1759498967.870493727 30 ctime=1759499030.009850703 nordugrid-arc-7.1.1/src/services/a-rex/lrms/fork/cancel-fork-job.in0000644000175000002070000000564615067751327026120 0ustar00mockbuildmock00000000000000#!@posix_shell@ # set -x # # Cancel job running in FORK. # control_path () { # job_id=`echo "$2" | sed 's/\(.\{9\}\)/\1\//g' | sed 's/\/$//'` job_id=`echo "$2" | sed -e 's#\(.\{3\}\)#\1/#3' -e 's#\(.\{3\}\)#\1/#2' -e 's#\(.\{3\}\)#\1/#1' -e 's#$#/#'` path="$1/jobs/${job_id}/$3" echo "$path" } echo "----- starting cancel_fork_job -----" 1>&2 trap 'echo "----- exiting cancel_fork_job -----" 1>&2; echo "" 1>&2' EXIT joboption_lrms="fork" # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi GRAMI_FILE=$1 # define paths and config parser basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? . "${basedir}/lrms_common.sh" # load common cancel functions . "${pkgdatadir}/cancel_common.sh" || exit $? # run common init # * parse grami # * parse config # * load LRMS-specific env common_init # fork cancel if [ -z "$joboption_controldir" ] ; then joboption_controldir=`dirname "$GRAMI_FILE"` joboption_controldir=`dirname "$joboption_controldir"` joboption_controldir=`dirname "$joboption_controldir"` joboption_controldir=`dirname "$joboption_controldir"` joboption_controldir=`dirname "$joboption_controldir"` if [ "$joboption_controldir" = '.' ] ; then joboption_controldir="$PWD" fi fi if [ -z "$joboption_gridid" ] ; then joboption_gridid=`echo "$GRAMI_FILE" | sed 's/.*\([^\/]*\)\/\([^\/]*\)\/\([^\/]*\)\/\([^\/]*\)\/grami$/\1\2\3\4/'` fi job_control_dir="$joboption_controldir" job_local_file=$(control_path "${joboption_controldir}" "${joboption_gridid}" "local") if [ ! -r "$job_local_file" ]; then echo "Local description of job ${joboption_gridid} not found at '$job_local_file'. Job was not killed, if running at all." 1>&2 exit 1 fi if [ -z "$joboption_jobid" ] ; then joboption_jobid=`cat "$job_local_file" | grep '^localid=' | sed 's/^localid=//'` fi echo "Deleting job $joboption_gridid, local id $joboption_jobid" 1>&2 job_control_subdir= if [ -r "$job_control_dir/accepting/${joboption_gridid}.status" ]; then job_control_subdir="$job_control_dir/accepting" elif [ -r "$job_control_dir/processing/${joboption_gridid}.status" ]; then job_control_subdir="$job_control_dir/processing" elif [ -r "$job_control_dir/finished/${joboption_gridid}.status" ]; then job_control_subdir="$job_control_dir/finished" else echo "Status file of job ${joboption_gridid} not found in '$job_control_dir'. Job was not killed, if running at all." 1>&2 exit 1 fi case X`cat "$job_control_subdir/${joboption_gridid}.status"` in XINLRMS | XCANCELING) if [ -z "$joboption_jobid" ] ; then echo "Can't find local id of job" 1>&2 exit 1 fi kill -TERM $joboption_jobid sleep 5 kill -KILL $joboption_jobid ;; XFINISHED | XDELETED) echo "Job already died, won't do anything" 1>&2 ;; *) echo "Job is at unkillable state" 1>&2 ;; esac exit 0 nordugrid-arc-7.1.1/src/services/a-rex/lrms/fork/PaxHeaders/configure-fork-env.sh0000644000000000000000000000013215067751327024761 xustar0030 mtime=1759498967.766520106 30 atime=1759498967.870493727 30 ctime=1759499030.007457287 nordugrid-arc-7.1.1/src/services/a-rex/lrms/fork/configure-fork-env.sh0000644000175000002070000000010115067751327026653 0ustar00mockbuildmock00000000000000# # set environment fork variables: # # Script returned ok true nordugrid-arc-7.1.1/src/services/a-rex/lrms/fork/PaxHeaders/README0000644000000000000000000000013215067751327021577 xustar0030 mtime=1759498967.766520106 30 atime=1759498967.870493727 30 ctime=1759499030.013634868 nordugrid-arc-7.1.1/src/services/a-rex/lrms/fork/README0000644000175000002070000000002615067751327023477 0ustar00mockbuildmock00000000000000Fork control scripts. nordugrid-arc-7.1.1/src/services/a-rex/lrms/PaxHeaders/community_rtes.sh0000644000000000000000000000013215067751327023373 xustar0030 mtime=1759498967.765456607 30 atime=1759498967.869493711 30 ctime=1759499029.914007838 nordugrid-arc-7.1.1/src/services/a-rex/lrms/community_rtes.sh0000644000175000002070000000265715067751327025307 0ustar00mockbuildmock00000000000000COMMUNITY_RTES=1 COMMUNITY_RTES_SW_SUBDIR="_software" community_software_prepare () { # skip if this is not a community-defined RTE [ -e "${rte_params_path}.community" ] || return # source community deploy-time parameters source "${rte_params_path}.community" 1>&2 # check software directotry is defined if [ -z "${SOFTWARE_DIR}" ]; then echo "ERROR: SOFTWARE_DIR is not defined for ${rte_name} community RTE. Failed to prepare software files." 1>&2 exit 1 fi # software location in the sessiondir RUNTIME_SOFTWARE_DIR="${joboption_directory}/${COMMUNITY_RTES_SW_SUBDIR}/${rte_name}" mkdir -p "${RUNTIME_SOFTWARE_DIR%/*}" # copy software if not shared, link if shared if [ "$SOFTWARE_SHARED" != "True" ]; then echo "Copying community software for RTE ${rte_name} into job directory." 1>&2 cp -rv "${SOFTWARE_DIR}" "${RUNTIME_SOFTWARE_DIR}" else echo "Linking community software for RTE ${rte_name} into job directory." 1>&2 ln -sf "${SOFTWARE_DIR}" "${RUNTIME_SOFTWARE_DIR}" fi # define ${RUNTIME_JOB_SWDIR} for RTE stage 0 RUNTIME_JOB_SWDIR="\${RUNTIME_JOB_DIR}/${COMMUNITY_RTES_SW_SUBDIR}/${rte_name}" } community_software_environment () { # skip if this is not a community-defined RTE [ -e "${rte_params_path}.community" ] || return # define RUNTIME_JOB_SWDIR for RTE stage 1/2 in the jobscript echo "RUNTIME_JOB_SWDIR=\"\${RUNTIME_JOB_DIR}/${COMMUNITY_RTES_SW_SUBDIR}/${rte_name}\"" } nordugrid-arc-7.1.1/src/services/a-rex/lrms/PaxHeaders/lrms_common.sh.in0000644000000000000000000000013215067751327023244 xustar0030 mtime=1759498967.766756242 30 atime=1759498967.870493727 30 ctime=1759499029.916134972 nordugrid-arc-7.1.1/src/services/a-rex/lrms/lrms_common.sh.in0000644000175000002070000001256315067751327025155 0ustar00mockbuildmock00000000000000# # Common LRMS functions # # packaging-dependent paths pkglibexecdir="${ARC_LOCATION:-@prefix@}/@pkglibexecsubdir@" pkgdatadir="${ARC_LOCATION:-@prefix@}/@pkgdatasubdir@" # posix shell POSIX_SHELL="@posix_shell@" # arc.conf location if [ -z "$ARC_CONFIG" ]; then prefix="@prefix@" ARC_CONFIG="@sysconfdir@/arc.conf" unset prefix fi # sanity checks (should be set always) if [ -z "$pkgdatadir" ]; then echo 'pkgdatadir must be set' 1>&2; exit 1; fi if [ -z "$pkglibexecdir" ]; then echo 'pkglibexecdir must be set' 1>&2; exit 1; fi parse_arc_conf () { # [common] used for performance logging only common_options="enable_perflog_reporting perflogdir hostname" # [arex] block needed for directories setup arex_options="controldir scratchdir shared_filesystem shared_scratch runtimedir" # [lrms] block for most of configuration lrms_options="gnu_time nodename defaultmemory benchmark movetool $lrms_options" # [queue] section is relevant to fetch per-queue memory limits, benchmarks and customizations queue_options="defaultmemory benchmark $queue_options" # define blocks to loop over blocks="common arex lrms" if [ -n "$joboption_queue" ]; then blocks="$blocks queue:$joboption_queue" fi for block in $blocks; do # construct options filter for block eval "block_options=\${${block%%:*}_options}" optfilter="" for opt in $block_options; do optfilter="$optfilter -f $opt" done # parse options (assumes runconfig comes from a-rex) eval $( $pkglibexecdir/arcconfig-parser --load -r ${ARC_CONFIG} --export bash -b $block $optfilter ) done # cleanup env unset opt optfilter block blocks arex_options common_options } parse_grami_file () { arg_file=$1 # some lagacy sanity checks (TODO: consider to remove) usage="usage: `basename $0` (|-h|--help)" if [ -z "$arg_file" ] ; then echo "Arguments file should be specified" 1>&2 echo "$usage" 1>&2 exit 1 fi if [ "--help" = "$1" -o "-h" = "$1" ]; then echo "$usage" 1>&2 cat <&2 This script should not be executed directly but it is called from the A-REX. EOHELP exit 1 fi # check grami file exists if [ ! -f $arg_file ] ; then echo "No such arguments file at '$arg_file'" 1>&2 echo "$usage" 1>&2 exit 1 fi # source grami files . $arg_file # exit if enough to source [ -n "$no_grami_extra_processing" ] && return 0 # or do extra post-processing of grami file content (needed for submit) if [ -z "$joboption_controldir" ] ; then joboption_controldir=`dirname "$arg_file"` if [ "$joboption_controldir" = '.' ] ; then joboption_controldir="$PWD" fi fi if [ -z "$joboption_gridid" ] ; then joboption_gridid=`basename "$arg_file" | sed 's/^job\.\(.*\)\.grami$/\1/'` fi # combine arguments to command - easier to use arg_i=0 joboption_args= eval "var_is_set=\${joboption_arg_$arg_i+yes}" while [ ! -z "${var_is_set}" ] ; do eval "var_value=\${joboption_arg_$arg_i}" # Use -- to avoid echo eating arguments it understands var_value=`echo -- "$var_value" |cut -f2- -d' '| sed 's/\\\\/\\\\\\\\/g' | sed 's/"/\\\"/g'` joboption_args="$joboption_args \"${var_value}\"" arg_i=$(( $arg_i + 1 )) eval "var_is_set=\${joboption_arg_$arg_i+yes}" done unset arg_file usage arg_i var_is_set var_value } init_lrms_env () { # User runtime scripts location (now relevant for unit tests only) RUNTIME_CONFIG_DIR=$CONFIG_runtimedir export RUNTIME_CONFIG_DIR # Description of (cross-)mounted disc space on cluster RUNTIME_FRONTEND_SEES_NODE=$CONFIG_shared_scratch RUNTIME_NODE_SEES_FRONTEND=$CONFIG_shared_filesystem RUNTIME_LOCAL_SCRATCH_DIR=$CONFIG_scratchdir RUNTIME_LOCAL_SCRATCH_MOVE_TOOL=${CONFIG_movetool:-mv} #default is NFS if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then RUNTIME_NODE_SEES_FRONTEND=yes fi # locally empty means no if [ "${RUNTIME_NODE_SEES_FRONTEND}" = 'no' ] ; then RUNTIME_NODE_SEES_FRONTEND= fi # Only CPU time specified in job limits, rough limit for wall time walltime_ratio='1' # Use specified CPU time as soft limit, allow to run a bit longer before hard limit time_hardlimit_ratio='1/1' # Use specified memory requirement as soft limit, allow a bit more before hard limit memory_hardlimit_ratio='1/1' # Where to store temporary files TMPDIR=${TMPDIR:-@tmp_dir@} # Where GNU time utility is located on computing nodes (empty if does not exist) GNU_TIME=${CONFIG_gnu_time:-@gnu_time@} # Command to get name of executing node NODENAME=${CONFIG_nodename:-"@nodename@"} } # performance logging: if perflogdir or perflogfile is set, logging is turned on. So only set them when enable_perflog_reporting is "expert-debug-on". init_perflog () { unset perflogdir unset perflogfile if [ "x$CONFIG_enable_perflog_reporting" = "xexpert-debug-on" ]; then perflogdir=${CONFIG_perflogdir:-/var/log/arc/perfdata} perflogfile="${perflogdir}/backends.perflog" perflogfilesub="${perflogdir}/submission.perflog" fi } # returns the control path for the fragmented controldir control_path () { # job_id=`echo "$2" | sed 's/\(.\{9\}\)/\1\//g' | sed 's/\/$//'` job_id=`echo "$2" | sed -e 's#\(.\{3\}\)#\1/#3' -e 's#\(.\{3\}\)#\1/#2' -e 's#\(.\{3\}\)#\1/#1' -e 's#$#/#'` path="$1/jobs/${job_id}/$3" echo "$path" } nordugrid-arc-7.1.1/src/services/a-rex/lrms/PaxHeaders/submit_common.sh0000644000000000000000000000013215067751327023165 xustar0030 mtime=1759498967.770492207 30 atime=1759498967.872493757 30 ctime=1759499029.911860761 nordugrid-arc-7.1.1/src/services/a-rex/lrms/submit_common.sh0000644000175000002070000007626015067751327025102 0ustar00mockbuildmock00000000000000###################################################### #Common functions for submit scripts ###################################################### # This script should not be executed directly but is sourced in # from various backend scripts that itself are called from the # grid manager. Its purpose is to prepare the runtime environments, # which is almost the same procedure invariant of the backend # used. sourcewithargs () { script=$1 shift . $script } # # Exits with 0 if the argument is all digits # is_number () { /usr/bin/perl -e 'exit 1 if $ARGV[0] !~ m/^\d+$/' "$1" } # # Initial parsing and environemtn setub for submission scripts # THIS FUNCTION USES FUNCTIONS DEFINED IN LRMS_common.sh # common_init () { # parse grami file parse_grami_file $GRAMI_FILE # parse configuration parse_arc_conf # read pbs-specific environment . ${pkgdatadir}/configure-${joboption_lrms}-env.sh || exit $? # init common LRMS environmental variables init_lrms_env # optionally enable support for community RTEs [ -e "${pkgdatadir}/community_rtes.sh" ] && . "${pkgdatadir}/community_rtes.sh" } # defines failures_file define_failures_file () { failures_file=$(control_path "$joboption_controldir" "$joboption_gridid" "failed") } # checks any scratch is defined (shared or local) check_any_scratch () { if [ -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then if [ -z "${RUNTIME_LOCAL_SCRATCH_DIR}" ] ; then echo "Need to know at which directory to run job: RUNTIME_LOCAL_SCRATCH_DIR must be set if RUNTIME_NODE_SEES_FRONTEND is empty" 1>&2 echo "Submission: Configuration error." exit 1 fi fi } # # Sets a default memory limit for jobs that don't have one # set_req_mem () { if ! is_number "$joboption_memory"; then echo "---------------------------------------------------------------------" 1>&2 echo "WARNING: The job description contains no explicit memory requirement." 1>&2 if is_number "$CONFIG_defaultmemory"; then joboption_memory=$CONFIG_defaultmemory echo " A default memory limit taken from 'defaultmemory' in " 1>&2 echo " arc.conf will apply. " 1>&2 echo " Limit is: $CONFIG_defaultmemory MB. " 1>&2 else echo " No 'defaultmemory' enforcement in in arc.conf. " 1>&2 echo " JOB WILL BE PASSED TO BATCH SYSTEM WITHOUT MEMORY LIMIT !!! " 1>&2 fi echo "---------------------------------------------------------------------" 1>&2 fi } set_count () { if [ -z "$joboption_count" ] || [ "$joboption_count" -le 1 ] ; then joboption_count=1 joboption_countpernode=-1 joboption_numnodes=-1 fi } ############################################################## # create temp job script ############################################################## mktempscript () { # File name to be used for temporary job script LRMS_JOB_SCRIPT=`mktemp ${TMPDIR}/${joboption_lrms}_job_script.XXXXXX` echo "Created file $LRMS_JOB_SCRIPT" if [ -z "$LRMS_JOB_SCRIPT" ] ; then echo "Creation of temporary file failed" exit 1 fi LRMS_JOB_OUT="${LRMS_JOB_SCRIPT}.out" touch $LRMS_JOB_OUT LRMS_JOB_ERR="${LRMS_JOB_SCRIPT}.err" touch $LRMS_JOB_ERR if [ ! -f "$LRMS_JOB_SCRIPT" ] || [ ! -f "$LRMS_JOB_OUT" ] || [ ! -f "$LRMS_JOB_ERR" ] ; then echo "Something is wrong. Either somebody is deleting files or I cannot write to ${TMPDIR}" rm -f "$LRMS_JOB_SCRIPT" "$LRMS_JOB_OUT" "$LRMS_JOB_ERR" exit 1 fi } ############################################################## # Jobscript resource usage accounting ############################################################## accounting_init () { cat >> $LRMS_JOB_SCRIPT <&2 JOB_ACCOUNTING="" # Try to use cgroups first if command -v arc-job-cgroup >/dev/null 2>&1; then echo "Found arc-job-cgroup tool: trying to initialize accounting cgroups for the job." 1>&2 while true; do # memory cgroup memory_cgroup="\$( arc-job-cgroup -m -n $joboption_gridid )" if [ \$? -ne 0 -o -z "\$memory_cgroup" ]; then echo "Failed to initialize memory cgroup for accounting." 1>&2 break; fi # cpuacct cgroup cpuacct_cgroup="\$( arc-job-cgroup -c -n $joboption_gridid )" if [ \$? -ne 0 -o -z "\$cpuacct_cgroup" ]; then echo "Failed to initialize cpuacct cgroup for accounting." 1>&2 break; fi echo "Using cgroups method for job accounting" 1>&2 JOB_ACCOUNTING="cgroup" break; done fi # Fallback to GNU_TIME if cgroups are not working if [ -z "\$JOB_ACCOUNTING" ]; then GNU_TIME='$GNU_TIME' echo "Looking for \$GNU_TIME tool for accounting measurements" 1>&2 if [ ! -z "\$GNU_TIME" ] && ! "\$GNU_TIME" --version >/dev/null 2>&1; then echo "GNU time is not found at: \$GNU_TIME" 1>&2 else echo "GNU time found and will be used for job accounting." 1>&2 JOB_ACCOUNTING="gnutime" fi fi # Nothing works: rely on LRMS only if [ -z "\$JOB_ACCOUNTING" ]; then echo "Failed to use both cgroups and GNU time for resource usage accounting. Accounting relies on LRMS information only." 1>&2 fi EOSCR if [ -n "$ACCOUNTING_WN_INSTANCE" ]; then echo "# Define accounting WN instance tag" >> $LRMS_JOB_SCRIPT echo "ACCOUNTING_WN_INSTANCE='$ACCOUNTING_WN_INSTANCE'" >> $LRMS_JOB_SCRIPT fi } accounting_end () { cat >> $LRMS_JOB_SCRIPT <<'EOSCR' # Handle cgroup measurements if [ "x$JOB_ACCOUNTING" = "xcgroup" ]; then # Max memory used (total) maxmemory=$( cat "${memory_cgroup}/memory.memsw.max_usage_in_bytes" ) maxmemory=$(( (maxmemory + 1023) / 1024 )) echo "maxtotalmemory=${maxmemory}kB" >> "$RUNTIME_JOB_DIAG" # Max memory used (RAM) maxram=$( cat "${memory_cgroup}/memory.max_usage_in_bytes" ) maxram=$(( (maxram + 1023) / 1024 )) echo "maxresidentmemory=${maxram}kB" >> "$RUNTIME_JOB_DIAG" # TODO: this is for compatibilty with current A-REX accounting code. Remove when A-REX will use max value instead. echo "averageresidentmemory=${maxram}kB" >> "$RUNTIME_JOB_DIAG" # User CPU time if [ -f "${cpuacct_cgroup}/cpuacct.usage_user" ]; then # cgroup values are in nanoseconds user_cputime=$( cat "${cpuacct_cgroup}/cpuacct.usage_user" ) user_cputime=$(( user_cputime / 1000000 )) elif [ -f "${cpuacct_cgroup}/cpuacct.stat" ]; then # older kernels have only cpuacct.stat that use USER_HZ units user_cputime=$( cat "${cpuacct_cgroup}/cpuacct.stat" | sed -n '/^user/s/user //p' ) user_hz=$( getconf CLK_TCK ) user_cputime=$(( user_cputime / user_hz )) fi [ -n "$user_cputime" ] && echo "usertime=${user_cputime}" >> "$RUNTIME_JOB_DIAG" # Kernel CPU time if [ -f "${cpuacct_cgroup}/cpuacct.usage_sys" ]; then # cgroup values are in nanoseconds kernel_cputime=$( cat "${cpuacct_cgroup}/cpuacct.usage_sys" ) kernel_cputime=$(( kernel_cputime / 1000000 )) elif [ -f "${cpuacct_cgroup}/cpuacct.stat" ]; then # older kernels have only cpuacct.stat that use USER_HZ units kernel_cputime=$( cat "${cpuacct_cgroup}/cpuacct.stat" | sed -n '/^system/s/system //p' ) [ -z "$user_hz" ] && user_hz=$( getconf CLK_TCK ) kernel_cputime=$(( kernel_cputime / user_hz )) fi [ -n "$kernel_cputime" ] && echo "kerneltime=${kernel_cputime}" >> "$RUNTIME_JOB_DIAG" # Remove nested job accouting cgroups arc-job-cgroup -m -d arc-job-cgroup -c -d fi # Record CPU benchmarking values for WN user by the job [ -n "${ACCOUNTING_BENCHMARK}" ] && echo "benchmark=${ACCOUNTING_BENCHMARK}" >> "$RUNTIME_JOB_DIAG" # Record WN instance tag if defined [ -n "${ACCOUNTING_WN_INSTANCE}" ] && echo "wninstance=${ACCOUNTING_WN_INSTANCE}" >> "$RUNTIME_JOB_DIAG" # Record execution clock times ACCOUNTING_ENDTIME=`date +"%s"` # Mds date format (YYYYMMDDHHMMSSZ) echo "LRMSStartTime=`date -d "1970-01-01 UTC ${ACCOUNTING_STARTTIME} seconds" +"%Y%m%d%H%M%SZ"`" >> "$RUNTIME_JOB_DIAG" echo "LRMSEndTime=`date -d "1970-01-01 UTC ${ACCOUNTING_ENDTIME} seconds" +"%Y%m%d%H%M%SZ"`" >> "$RUNTIME_JOB_DIAG" echo "walltime=$(( ACCOUNTING_ENDTIME - ACCOUNTING_STARTTIME ))" >> "$RUNTIME_JOB_DIAG" # Add exit code to the accounting information and exit the job script echo "exitcode=$RESULT" >> "$RUNTIME_JOB_DIAG" exit $RESULT EOSCR } detect_wn_systemsoftware () { cat >> $LRMS_JOB_SCRIPT <<'EOSCR' # Detecting WN operating system for accounting purposes if [ -f "/etc/os-release" ]; then SYSTEM_SOFTWARE="$( eval $( cat /etc/os-release ); echo "${NAME} ${VERSION}" )" elif [ -f "/etc/system-release" ]; then SYSTEM_SOFTWARE="$( cat /etc/system-release )" elif command -v lsb_release >/dev/null 2>&1; then SYSTEM_SOFTWARE=$(lsb_release -ds) elif command -v hostnamectl >/dev/null 2>&1; then SYSTEM_SOFTWARE="$( hostnamectl 2>/dev/null | sed -n '/Operating System/s/^\s*Operating System:\s*//p' )" elif command -v uname >/dev/null 2>&1; then SYSTEM_SOFTWARE="Linux $( uname -r)" fi [ -n "$SYSTEM_SOFTWARE" ] && echo "systemsoftware=${SYSTEM_SOFTWARE}" >> "$RUNTIME_JOB_DIAG" EOSCR } ############################################################## # Add environment variables ############################################################## add_user_env () { echo "# Setting environment variables as specified by user" >> $LRMS_JOB_SCRIPT has_gridglobalid='' i=0 eval "var_is_set=\${joboption_env_$i+yes}" while [ ! -z "${var_is_set}" ] ; do eval "var_value=\${joboption_env_$i}" if [ "$var_value" ] && [ -z "${var_value##GRID_GLOBAL_JOBID=*}" ]; then has_gridglobalid=yes fi var_escaped=`echo "$var_value" | sed "s/'/'\\\\\''/g"` echo "export '${var_escaped}'" >> $LRMS_JOB_SCRIPT i=$(( $i + 1 )) eval "var_is_set=\${joboption_env_$i+yes}" done # guess globalid in case not already provided if [ -z "$has_gridglobalid" ]; then hostname=`/usr/bin/perl -MSys::Hostname -we 'print hostname'` hostname=${CONFIG_hostname:-$hostname} # not configurable any longer gm_port=2811 gm_mount_point="/jobs" echo "export GRID_GLOBAL_JOBID='gsiftp://$hostname:$gm_port$gm_mount_point/$joboption_gridid'" >> $LRMS_JOB_SCRIPT fi echo "" >> $LRMS_JOB_SCRIPT } sourcewithargs_jobscript () { echo "# source with arguments for DASH shells" >> $LRMS_JOB_SCRIPT echo "sourcewithargs() {" >> $LRMS_JOB_SCRIPT echo "script=\$1" >> $LRMS_JOB_SCRIPT echo "shift" >> $LRMS_JOB_SCRIPT echo ". \$script" >> $LRMS_JOB_SCRIPT echo "}" >> $LRMS_JOB_SCRIPT } ############################################################## # RunTimeEnvironemt Functions ############################################################## RTE_include_default () { default_rte_dir="${CONFIG_controldir}/rte/default/" if [ -d "$default_rte_dir" ]; then # get default RTEs default_rtes=` find "$default_rte_dir" ! -type d -exec test -e {} \; -print | sed "s#^$default_rte_dir##" ` if [ -n "$default_rtes" ]; then # Find last RTE index defined rte_idx=0 eval "is_rte=\${joboption_runtime_${rte_idx}+yes}" while [ -n "${is_rte}" ] ; do rte_idx=$(( rte_idx + 1 )) eval "is_rte=\${joboption_runtime_${rte_idx}+yes}" done req_idx=$rte_idx # Add default RTEs to the list for rte_name in $default_rtes; do # check if already included into the list of requested RTEs check_idx=0 while [ $check_idx -lt $req_idx ]; do eval "check_rte=\${joboption_runtime_${check_idx}}" if [ "$rte_name" = "$check_rte" ]; then echo "$rte_name RTE is already requested: skipping the same default RTE injection." 1>&2 continue 2 fi check_idx=$(( check_idx + 1 )) done eval "joboption_runtime_${rte_idx}=$rte_name" rte_idx=$(( rte_idx + 1 )) done fi fi unset default_rte_dir default_rtes is_rte rte_idx rte_name req_idx check_idx check_rte } RTE_path_set () { rte_params_path="${CONFIG_controldir}/rte/params/${rte_name}" rte_path="${CONFIG_controldir}/rte/enabled/${rte_name}" if [ ! -e "$rte_path" ]; then rte_path="${CONFIG_controldir}/rte/default/${rte_name}" if [ ! -e "$rte_path" ]; then echo "ERROR: Requested RunTimeEnvironment ${rte_name} is missing, broken or not enabled." 1>&2 exit 1 fi fi # check RTE is empty unset rte_empty [ -s "$rte_path" ] || rte_empty=1 } RTE_add_optional_args () { # for RTE defined by 'rte_idx' adds optional arguments to 'args_value' if any arg_idx=1 eval "is_arg=\${joboption_runtime_${rte_idx}_${arg_idx}+yes}" while [ -n "${is_arg}" ] ; do eval "arg_value=\${joboption_runtime_${rte_idx}_${arg_idx}}" # Use printf in order to handle backslashes correctly (bash vs dash) arg_value=` printf "%s" "$arg_value" | sed 's/"/\\\\\\\"/g' ` args_value="$args_value \"${arg_value}\"" arg_idx=$(( arg_idx + 1 )) eval "is_arg=\${joboption_runtime_${rte_idx}_${arg_idx}+yes}" done unset arg_idx arg_value is_arg } RTE_to_jobscript () { rte_idx=0 eval "is_rte=\${joboption_runtime_${rte_idx}+yes}" while [ -n "${is_rte}" ] ; do eval "rte_name=\"\${joboption_runtime_${rte_idx}}\"" # define rte_path RTE_path_set # skip empty RTEs if [ -z "$rte_empty" ]; then # add RTE script content as a function into the job script echo "# RunTimeEnvironment function for ${rte_name}:" >> $LRMS_JOB_SCRIPT echo "RTE_function_${rte_idx} () {" >> $LRMS_JOB_SCRIPT # include parameters file (if exists) [ -e "${rte_params_path}" ] && cat "${rte_params_path}" >> $LRMS_JOB_SCRIPT # include community RTE environment [ -n "${COMMUNITY_RTES}" ] && community_software_environment >> $LRMS_JOB_SCRIPT # include RTE content itself cat "${rte_path}" >> $LRMS_JOB_SCRIPT echo "}" >> $LRMS_JOB_SCRIPT else # mark RTE as empty to skip further processing eval "joboption_runtime_${rte_idx}_empty=1" fi # next RTE rte_idx=$(( rte_idx + 1 )) eval "is_rte=\${joboption_runtime_${rte_idx}+yes}" done unset is_rte rte_idx rte_name rte_path rte_empty } RTE_jobscript_call () { rte_stage=$1 if [ "$rte_stage" = '1' ]; then RTE_to_jobscript fi echo "# Running RTE scripts (stage ${rte_stage})" >> $LRMS_JOB_SCRIPT echo "runtimeenvironments=" >> $LRMS_JOB_SCRIPT rte_idx=0 eval "is_rte=\${joboption_runtime_${rte_idx}+yes}" while [ -n "${is_rte}" ] ; do # RTE name is for admin-friendly logging only eval "rte_name=\"\${joboption_runtime_${rte_idx}}\"" echo "runtimeenvironments=\"\${runtimeenvironments}${rte_name};\"" >> $LRMS_JOB_SCRIPT # add call if RTE is not empty eval "is_empty=\${joboption_runtime_${rte_idx}_empty+yes}" if [ -z "${is_empty}" ]; then # compose arguments value for RTE function call args_value="${rte_stage} " RTE_add_optional_args # add function call to job script echo "# Calling ${rte_name} function: " >> $LRMS_JOB_SCRIPT # Use printf in order to handle backslashes correctly (bash vs dash) printf "RTE_function_${rte_idx} ${args_value}\n" >> $LRMS_JOB_SCRIPT echo "if [ \$? -ne 0 ]; then" >> $LRMS_JOB_SCRIPT echo " echo \"Runtime ${rte_name} stage ${rte_stage} execution failed.\" 1>&2" >> $LRMS_JOB_SCRIPT echo " echo \"Runtime ${rte_name} stage ${rte_stage} execution failed.\" 1>>\"\${RUNTIME_JOB_STDERR}\"" >> $LRMS_JOB_SCRIPT echo " exit 1" >> $LRMS_JOB_SCRIPT echo "fi" >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT fi rte_idx=$(( rte_idx + 1 )) eval "is_rte=\${joboption_runtime_${rte_idx}+yes}" done unset rte_idx is_rte is_empty rte_name args_value rte_stage } RTE_stage0 () { RTE_include_default rte_idx=0 eval "is_rte=\${joboption_runtime_${rte_idx}+yes}" while [ -n "${is_rte}" ] ; do eval "rte_name=\"\${joboption_runtime_${rte_idx}}\"" # define rte_path RTE_path_set # skip empty RTEs if [ -z "$rte_empty" ]; then # define arguments args_value="0 " RTE_add_optional_args # run RTE stage 0 # WARNING!!! IN SOME CASES DUE TO DIRECT SOURCING OF RTE SCRIPT WITHOUT ANY SAFETY CHECKS # SPECIALLY CRAFTED RTES CAN BROKE CORRECT SUBMISSION (e.g. RTE redefine 'rte_idx' variable) [ -e "${rte_params_path}" ] && . "${rte_params_path}" 1>&2 # prepare RUNTIME_JOB_SWDIR for community-defined RTE software [ -n "${COMMUNITY_RTES}" ] && community_software_prepare # execute RTE script stage 0 sourcewithargs "$rte_path" $args_value 1>&2 rte0_exitcode=$? if [ $rte0_exitcode -ne 0 ] ; then echo "ERROR: Runtime script ${rte_name} stage 0 execution failed with exit code ${rte0_exitcode}" 1>&2 exit 1 fi fi rte_idx=$(( rte_idx + 1 )) eval "is_rte=\${joboption_runtime_${rte_idx}+yes}" done unset rte_idx is_rte rte_name rte_path rte_empty rte0_exitcode } RTE_stage1 () { RTE_jobscript_call 1 } RTE_stage2 () { RTE_jobscript_call 2 } ############################################################## # Add std... to job arguments ############################################################## include_std_streams () { input_redirect= output_redirect= if [ ! -z "$joboption_stdin" ] ; then input_redirect="<\$RUNTIME_JOB_STDIN" fi if [ ! -z "$joboption_stdout" ] ; then output_redirect="1>\$RUNTIME_JOB_STDOUT" fi if [ ! -z "$joboption_stderr" ] ; then if [ "$joboption_stderr" = "$joboption_stdout" ] ; then output_redirect="$output_redirect 2>&1" else output_redirect="$output_redirect 2>\$RUNTIME_JOB_STDERR" fi fi } ############################################################## # move files to node ############################################################## move_files_to_node () { if [ "$joboption_count" -eq 1 ] || [ ! -z "$RUNTIME_ENABLE_MULTICORE_SCRATCH" ] || [ "$joboption_count" -eq "$joboption_countpernode" ]; then echo "RUNTIME_LOCAL_SCRATCH_DIR=\${RUNTIME_LOCAL_SCRATCH_DIR:-$RUNTIME_LOCAL_SCRATCH_DIR}" >> $LRMS_JOB_SCRIPT else echo "RUNTIME_LOCAL_SCRATCH_DIR=\${RUNTIME_LOCAL_SCRATCH_DIR:-}" >> $LRMS_JOB_SCRIPT fi echo "RUNTIME_LOCAL_SCRATCH_MOVE_TOOL=\${RUNTIME_LOCAL_SCRATCH_MOVE_TOOL:-$RUNTIME_LOCAL_SCRATCH_MOVE_TOOL}" >> $LRMS_JOB_SCRIPT echo "RUNTIME_FRONTEND_SEES_NODE=\${RUNTIME_FRONTEND_SEES_NODE:-$RUNTIME_FRONTEND_SEES_NODE}" >> $LRMS_JOB_SCRIPT echo "RUNTIME_NODE_SEES_FRONTEND=\${RUNTIME_NODE_SEES_FRONTEND:-$RUNTIME_NODE_SEES_FRONTEND}" >> $LRMS_JOB_SCRIPT cat >> $LRMS_JOB_SCRIPT <<'EOSCR' if [ ! -z "$RUNTIME_LOCAL_SCRATCH_DIR" ] && [ ! -z "$RUNTIME_NODE_SEES_FRONTEND" ]; then RUNTIME_NODE_JOB_DIR="$RUNTIME_LOCAL_SCRATCH_DIR"/`basename "$RUNTIME_JOB_DIR"` rm -rf "$RUNTIME_NODE_JOB_DIR" mkdir -p "$RUNTIME_NODE_JOB_DIR" # move directory contents for f in "$RUNTIME_JOB_DIR"/.* "$RUNTIME_JOB_DIR"/*; do [ "$f" = "$RUNTIME_JOB_DIR/*" ] && continue # glob failed, no files [ "$f" = "$RUNTIME_JOB_DIR/." ] && continue [ "$f" = "$RUNTIME_JOB_DIR/.." ] && continue [ "$f" = "$RUNTIME_JOB_DIR/.diag" ] && continue [ "$f" = "$RUNTIME_JOB_DIR/.comment" ] && continue [ -f "$f" ] || continue if ! $RUNTIME_LOCAL_SCRATCH_MOVE_TOOL "$f" "$RUNTIME_NODE_JOB_DIR"; then echo "Failed to '$RUNTIME_LOCAL_SCRATCH_MOVE_TOOL' '$f' to '$RUNTIME_NODE_JOB_DIR'" 1>&2 exit 1 fi done if [ ! -z "$RUNTIME_FRONTEND_SEES_NODE" ] ; then # creating link for whole directory ln -s "$RUNTIME_FRONTEND_SEES_NODE"/`basename "$RUNTIME_JOB_DIR"` "$RUNTIME_JOB_DIR" else # keep stdout, stderr and control directory on frontend # recreate job directory mkdir -p "$RUNTIME_JOB_DIR" # make those files mkdir -p `dirname "$RUNTIME_JOB_STDOUT"` mkdir -p `dirname "$RUNTIME_JOB_STDERR"` touch "$RUNTIME_JOB_STDOUT" touch "$RUNTIME_JOB_STDERR" RUNTIME_JOB_STDOUT__=`echo "$RUNTIME_JOB_STDOUT" | sed "s#^${RUNTIME_JOB_DIR}#${RUNTIME_NODE_JOB_DIR}#"` RUNTIME_JOB_STDERR__=`echo "$RUNTIME_JOB_STDERR" | sed "s#^${RUNTIME_JOB_DIR}#${RUNTIME_NODE_JOB_DIR}#"` rm "$RUNTIME_JOB_STDOUT__" 2>/dev/null rm "$RUNTIME_JOB_STDERR__" 2>/dev/null if [ ! -z "$RUNTIME_JOB_STDOUT__" ] && [ "$RUNTIME_JOB_STDOUT" != "$RUNTIME_JOB_STDOUT__" ]; then ln -s "$RUNTIME_JOB_STDOUT" "$RUNTIME_JOB_STDOUT__" fi if [ "$RUNTIME_JOB_STDOUT__" != "$RUNTIME_JOB_STDERR__" ] ; then if [ ! -z "$RUNTIME_JOB_STDERR__" ] && [ "$RUNTIME_JOB_STDERR" != "$RUNTIME_JOB_STDERR__" ]; then ln -s "$RUNTIME_JOB_STDERR" "$RUNTIME_JOB_STDERR__" fi fi if [ ! -z "$RUNTIME_CONTROL_DIR" ] ; then # move control directory back to frontend RUNTIME_CONTROL_DIR__=`echo "$RUNTIME_CONTROL_DIR" | sed "s#^${RUNTIME_JOB_DIR}#${RUNTIME_NODE_JOB_DIR}#"` mv "$RUNTIME_CONTROL_DIR__" "$RUNTIME_CONTROL_DIR" fi fi # adjust stdin,stdout & stderr pointers RUNTIME_JOB_STDIN=`echo "$RUNTIME_JOB_STDIN" | sed "s#^${RUNTIME_JOB_DIR}#${RUNTIME_NODE_JOB_DIR}#"` RUNTIME_JOB_STDOUT=`echo "$RUNTIME_JOB_STDOUT" | sed "s#^${RUNTIME_JOB_DIR}#${RUNTIME_NODE_JOB_DIR}#"` RUNTIME_JOB_STDERR=`echo "$RUNTIME_JOB_STDERR" | sed "s#^${RUNTIME_JOB_DIR}#${RUNTIME_NODE_JOB_DIR}#"` RUNTIME_FRONTEND_JOB_DIR="$RUNTIME_JOB_DIR" RUNTIME_JOB_DIR="$RUNTIME_NODE_JOB_DIR" fi if [ -z "$RUNTIME_NODE_SEES_FRONTEND" ] ; then mkdir -p "$RUNTIME_JOB_DIR" fi EOSCR } ############################################################## # Clean up output files in the local scratch dir ############################################################## clean_local_scratch_dir_output () { # "moveup" parameter will trigger output files moving to one level up if [ "x$1" = "xmoveup" ]; then move_files_up=1 fi # Calculate the scratch size at the end of execution echo '# Measuring used scratch space' >> $LRMS_JOB_SCRIPT echo 'echo "usedscratch=$( du -sb "$RUNTIME_JOB_DIR" | sed "s/\s.*$//" )" >> "$RUNTIME_JOB_DIAG"' >> $LRMS_JOB_SCRIPT # There is no sense to keep trash till GM runs uploader echo '# Cleaning up extra files in the local scratch' >> $LRMS_JOB_SCRIPT echo 'if [ ! -z "$RUNTIME_LOCAL_SCRATCH_DIR" ] ; then' >> $LRMS_JOB_SCRIPT # Delete all files except listed in job.#.output echo ' find ./ -type l -exec rm -f "{}" ";"' >> $LRMS_JOB_SCRIPT echo ' chmod -R u+w "./"' >> $LRMS_JOB_SCRIPT output_file=$(control_path "$joboption_controldir" "$joboption_gridid" "output") if [ -f "$output_file" ] ; then cat "$output_file" | \ # remove leading backslashes, if any sed 's/^\/*//' | \ # backslashes and spaces are escaped with a backslash in job.*.output. The # shell built-in read undoes this escaping. while read name rest; do # make it safe for shell by replacing single quotes with '\'' name=`printf "%s" "$name"|sed "s/'/'\\\\\\''/g"`; # protect from deleting output files including those in the dynamic list if [ "${name#@}" != "$name" ]; then # Does $name start with a @ ? dynlist=${name#@} echo " dynlist='$dynlist'" >> $LRMS_JOB_SCRIPT cat >> $LRMS_JOB_SCRIPT <<'EOSCR' chmod -R u-w "./$dynlist" 2>/dev/null cat "./$dynlist" | while read name rest; do chmod -R u-w "./$name" 2>/dev/null done EOSCR else printf "%s\n" " chmod -R u-w \"\$RUNTIME_JOB_DIR\"/'$name' 2>/dev/null" >> $LRMS_JOB_SCRIPT if [ -n "$move_files_up" -a -z "${RUNTIME_NODE_SEES_FRONTEND}" ] ; then printf "%s\n" " mv \"\$RUNTIME_JOB_DIR\"/'$name' ../." >> $LRMS_JOB_SCRIPT fi fi done fi echo ' find ./ -type f -perm /200 -exec rm -f "{}" ";"' >> $LRMS_JOB_SCRIPT echo ' chmod -R u+w "./"' >> $LRMS_JOB_SCRIPT echo 'fi' >> $LRMS_JOB_SCRIPT echo "" >> $LRMS_JOB_SCRIPT } ############################################################## # move files back to frontend ############################################################## move_files_to_frontend () { cat >> $LRMS_JOB_SCRIPT <<'EOSCR' if [ ! -z "$RUNTIME_LOCAL_SCRATCH_DIR" ] && [ ! -z "$RUNTIME_NODE_SEES_FRONTEND" ]; then if [ ! -z "$RUNTIME_FRONTEND_SEES_NODE" ] ; then # just move it rm -rf "$RUNTIME_FRONTEND_JOB_DIR" destdir=`dirname "$RUNTIME_FRONTEND_JOB_DIR"` if ! mv "$RUNTIME_NODE_JOB_DIR" "$destdir"; then echo "Failed to move '$RUNTIME_NODE_JOB_DIR' to '$destdir'" 1>&2 RESULT=1 fi else # remove links rm -f "$RUNTIME_JOB_STDOUT" 2>/dev/null rm -f "$RUNTIME_JOB_STDERR" 2>/dev/null # move directory contents for f in "$RUNTIME_NODE_JOB_DIR"/.* "$RUNTIME_NODE_JOB_DIR"/*; do [ "$f" = "$RUNTIME_NODE_JOB_DIR/*" ] && continue # glob failed, no files [ "$f" = "$RUNTIME_NODE_JOB_DIR/." ] && continue [ "$f" = "$RUNTIME_NODE_JOB_DIR/.." ] && continue [ "$f" = "$RUNTIME_NODE_JOB_DIR/.diag" ] && continue [ "$f" = "$RUNTIME_NODE_JOB_DIR/.comment" ] && continue [ -f "$f" ] || continue if ! mv "$f" "$RUNTIME_FRONTEND_JOB_DIR"; then echo "Failed to move '$f' to '$RUNTIME_FRONTEND_JOB_DIR'" 1>&2 RESULT=1 fi done rm -rf "$RUNTIME_NODE_JOB_DIR" fi fi EOSCR } ############################################################## # copy runtime settings to jobscript ############################################################## setup_runtime_env () { echo "RUNTIME_JOB_DIR=$joboption_directory" >> $LRMS_JOB_SCRIPT echo "RUNTIME_JOB_STDIN=$joboption_stdin" >> $LRMS_JOB_SCRIPT echo "RUNTIME_JOB_STDOUT=$joboption_stdout" >> $LRMS_JOB_SCRIPT echo "RUNTIME_JOB_STDERR=$joboption_stderr" >> $LRMS_JOB_SCRIPT echo "RUNTIME_JOB_DIAG=${joboption_directory}.diag" >> $LRMS_JOB_SCRIPT # Adjust working directory for tweaky nodes # RUNTIME_GRIDAREA_DIR should be defined by external means on nodes echo "if [ ! -z \"\$RUNTIME_GRIDAREA_DIR\" ] ; then" >> $LRMS_JOB_SCRIPT echo " RUNTIME_JOB_DIR=\$RUNTIME_GRIDAREA_DIR/\`basename \$RUNTIME_JOB_DIR\`" >> $LRMS_JOB_SCRIPT echo " RUNTIME_JOB_STDIN=\`echo \"\$RUNTIME_JOB_STDIN\" | sed \"s#^\$RUNTIME_JOB_DIR#\$RUNTIME_GRIDAREA_DIR#\"\`" >> $LRMS_JOB_SCRIPT echo " RUNTIME_JOB_STDOUT=\`echo \"\$RUNTIME_JOB_STDOUT\" | sed \"s#^\$RUNTIME_JOB_DIR#\$RUNTIME_GRIDAREA_DIR#\"\`" >> $LRMS_JOB_SCRIPT echo " RUNTIME_JOB_STDERR=\`echo \"\$RUNTIME_JOB_STDERR\" | sed \"s#^\$RUNTIME_JOB_DIR#\$RUNTIME_GRIDAREA_DIR#\"\`" >> $LRMS_JOB_SCRIPT echo " RUNTIME_JOB_DIAG=\`echo \"\$RUNTIME_JOB_DIAG\" | sed \"s#^\$RUNTIME_JOB_DIR#\$RUNTIME_GRIDAREA_DIR#\"\`" >> $LRMS_JOB_SCRIPT echo " RUNTIME_CONTROL_DIR=\`echo \"\$RUNTIME_CONTROL_DIR\" | sed \"s#^\$RUNTIME_JOB_DIR#\$RUNTIME_GRIDAREA_DIR#\"\`" >> $LRMS_JOB_SCRIPT echo "fi" >> $LRMS_JOB_SCRIPT } ############################################################## # change to runtime dir and setup timed run ############################################################## cd_and_run () { cat >> $LRMS_JOB_SCRIPT <<'EOSCR' # Changing to session directory HOME=$RUNTIME_JOB_DIR export HOME if ! cd "$RUNTIME_JOB_DIR"; then echo "Failed to switch to '$RUNTIME_JOB_DIR'" 1>&2 RESULT=1 fi if [ ! -z "$RESULT" ] && [ "$RESULT" != 0 ]; then exit $RESULT fi EOSCR if [ ! -z "$NODENAME" ] ; then cat >> $LRMS_JOB_SCRIPT <> "\$RUNTIME_JOB_DIAG" fi EOSCR fi # Add accounting information from frontend to RUNTIME_JOB_DIAG # Processors/Nodecount if [ -n "$joboption_count" ]; then echo "echo \"Processors=${joboption_count}\" >> \"\$RUNTIME_JOB_DIAG\"" >> $LRMS_JOB_SCRIPT if [ -n "$joboption_numnodes" -a "$joboption_numnodes" != "-1" ]; then echo "echo \"Nodecount=$joboption_numnodes\" >> \"\$RUNTIME_JOB_DIAG\"" >> $LRMS_JOB_SCRIPT fi fi # Benchmark values if [ -z "$joboption_benchmark" ]; then joboption_benchmark="HEPSPEC:1.0" if [ -n "$CONFIG_benchmark" ]; then if [ "$CONFIG_benchmark" = "__array__" ]; then joboption_benchmark=$(printf '%s' "${CONFIG_benchmark_0}" | tr ' ' ':') else joboption_benchmark=$(printf '%s' "${CONFIG_benchmark}" | tr ' ' ':') fi fi fi echo "echo \"Benchmark=$joboption_benchmark\" >> \"\$RUNTIME_JOB_DIAG\"" >> $LRMS_JOB_SCRIPT # add queue benchmark to frontend diag (for jobs that failed to reach/start in LRMS) diag_file=$(control_path "$joboption_controldir" "$joboption_gridid" "diag") echo "Benchmark=$joboption_benchmark" >> "${diag_file}" # Define executable and check it exists on the worker node echo "executable='$joboption_arg_0'" >> $LRMS_JOB_SCRIPT cat >> $LRMS_JOB_SCRIPT <<'EOSCR' # Check if executable exists if [ ! -f "$executable" ]; then echo "Path \"$executable\" does not seem to exist" 1>$RUNTIME_JOB_STDOUT 2>$RUNTIME_JOB_STDERR 1>&2 exit 1 fi EOSCR # In case the job executable is written in a scripting language and the # interpreter is not found, the error message printed by GNU_TIME is # misleading. This will print a more appropriate error message. cat >> $LRMS_JOB_SCRIPT <<'EOSCR' # See if executable is a script, and extract the name of the interpreter line1=$(dd if="$executable" count=1 2>/dev/null | head -n 1 | tr -d '\0') shebang=`echo $line1 | sed -n 's/^#! *//p'` interpreter=`echo $shebang | awk '{print $1}'` if [ "$interpreter" = /usr/bin/env ]; then interpreter=`echo $shebang | awk '{print $2}'`; fi # If it's a script and the interpreter is not found ... [ "x$interpreter" = x ] || type "$interpreter" > /dev/null 2>&1 || { echo "Cannot run $executable: $interpreter: not found" 1>$RUNTIME_JOB_STDOUT 2>$RUNTIME_JOB_STDERR 1>&2 exit 1; } EOSCR # Check gnutime wrap is used for accounting cat >> $LRMS_JOB_SCRIPT < #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "grid-manager/log/JobLog.h" #include "grid-manager/log/JobsMetrics.h" #include "grid-manager/log/HeartBeatMetrics.h" #include "grid-manager/log/SpaceMetrics.h" #include "grid-manager/jobs/ContinuationPlugins.h" #include "grid-manager/files/ControlFileHandling.h" #include "arex.h" namespace ARex { #define DEFAULT_INFOPROVIDER_WAKEUP_PERIOD (600) #define DEFAULT_INFOSYS_MAX_CLIENTS (1) #define DEFAULT_JOBCONTROL_MAX_CLIENTS (100) #define DEFAULT_DATATRANSFER_MAX_CLIENTS (100) static const std::string BES_ARC_NPREFIX("a-rex"); static const std::string BES_ARC_NAMESPACE("http://www.nordugrid.org/schemas/a-rex"); static const std::string DELEG_ARC_NPREFIX("arcdeleg"); static const std::string DELEG_ARC_NAMESPACE("http://www.nordugrid.org/schemas/delegation"); char const* ARexService::InfoPath = "*info"; char const* ARexService::LogsPath = "*logs"; char const* ARexService::NewPath = "*new"; char const* ARexService::DelegationPath = "*deleg"; char const* ARexService::CachePath = "cache"; char const* ARexService::RestPath = "rest"; #define AREX_POLICY_OPERATION_URN "http://www.nordugrid.org/schemas/policy-arc/types/a-rex/operation" #define AREX_POLICY_OPERATION_ADMIN "Admin" #define AREX_POLICY_OPERATION_INFO "Info" // Id: http://www.nordugrid.org/schemas/policy-arc/types/arex/joboperation // Value: // Create - creation of new job // Modify - modification of job paramaeters - change state, write data. // Read - accessing job information - get status information, read data. // Id: http://www.nordugrid.org/schemas/policy-arc/types/arex/operation // Value: // Admin - administrator level operation // Info - information about service class ARexSecAttr: public Arc::SecAttr { public: ARexSecAttr(const std::string& action); ARexSecAttr(const Arc::XMLNode op); virtual ~ARexSecAttr(void); virtual operator bool(void) const; virtual bool Export(Arc::SecAttrFormat format,Arc::XMLNode &val) const; virtual std::string get(const std::string& id) const; virtual std::map< std::string, std::list > getAll() const; void SetResource(const std::string& service, const std::string& job, const std::string& action); protected: std::string action_; std::string id_; std::string service_; std::string job_; std::string file_; virtual bool equal(const Arc::SecAttr &b) const; }; ARexSecAttr::ARexSecAttr(const std::string& action) { id_=JOB_POLICY_OPERATION_URN; action_=action; } ARexSecAttr::ARexSecAttr(const Arc::XMLNode op) { if(MatchXMLNamespace(op,BES_ARC_NAMESPACE)) { if(MatchXMLName(op,"CacheCheck")) { id_=AREX_POLICY_OPERATION_URN; action_=AREX_POLICY_OPERATION_INFO; } } else if(MatchXMLNamespace(op,DELEG_ARC_NAMESPACE)) { if(MatchXMLName(op,"DelegateCredentialsInit")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_CREATE; } else if(MatchXMLName(op,"UpdateCredentials")) { id_=JOB_POLICY_OPERATION_URN; action_=JOB_POLICY_OPERATION_MODIFY; } } } void ARexSecAttr::SetResource(const std::string& service, const std::string& job, const std::string& action) { service_ = service; job_ = job; action_ = action; } ARexSecAttr::~ARexSecAttr(void) { } ARexSecAttr::operator bool(void) const { return !action_.empty(); } bool ARexSecAttr::equal(const SecAttr &b) const { try { const ARexSecAttr& a = (const ARexSecAttr&)b; return ((id_ == a.id_) && (action_ == a.action_)); } catch(std::exception&) { }; return false; } bool ARexSecAttr::Export(Arc::SecAttrFormat format,Arc::XMLNode &val) const { if(format == UNDEFINED) { } else if(format == ARCAuth) { Arc::NS ns; ns["ra"]="http://www.nordugrid.org/schemas/request-arc"; val.Namespaces(ns); val.Name("ra:Request"); Arc::XMLNode item = val.NewChild("ra:RequestItem"); if(!action_.empty()) { Arc::XMLNode action = item.NewChild("ra:Action"); action=action_; action.NewAttribute("Type")="string"; action.NewAttribute("AttributeId")=id_; }; // TODO: add resource part return true; } else { }; return false; } std::string ARexSecAttr::get(const std::string& id) const { if(id == "ACTION") return action_; if(id == "NAMESPACE") return id_; if(id == "SERVICE") return service_; if(id == "JOB") return job_; if(id == "FILE") return file_; return ""; }; std::map< std::string, std::list > ARexSecAttr::getAll() const { std::map< std::string, std::list > all; all["ACTION"] = Arc::SecAttr::getAll("ACTION"); all["NAMESPACE"] = Arc::SecAttr::getAll("NAMESPACE"); all["SERVICE"] = Arc::SecAttr::getAll("SERVICE"); all["JOB"] = Arc::SecAttr::getAll("JOB"); all["FILE"] = Arc::SecAttr::getAll("FILE"); return all; } static bool match_lists(const std::list >& list1, const std::list& list2, std::string& matched) { for(std::list >::const_iterator l1 = list1.begin(); l1 != list1.end(); ++l1) { for(std::list::const_iterator l2 = list2.begin(); l2 != list2.end(); ++l2) { if((l1->second) == (*l2)) { matched = l1->second; return l1->first; }; }; }; return false; } static bool match_groups(std::list > const & groups, Arc::Message& inmsg) { std::string matched_group; if(!groups.empty()) { Arc::MessageAuth* auth = inmsg.Auth(); if(auth) { Arc::SecAttr* sattr = auth->get("ARCLEGACY"); if(sattr) { if(match_lists(groups, sattr->getAll("GROUP"), matched_group)) { return true; }; }; }; auth = inmsg.AuthContext(); if(auth) { Arc::SecAttr* sattr = auth->get("ARCLEGACY"); if(sattr) { if(match_lists(groups, sattr->getAll("GROUP"), matched_group)) { return true; }; }; }; }; return false; } //static Arc::LogStream logcerr(std::cerr); static Arc::Plugin* get_service(Arc::PluginArgument* arg) { Arc::ServicePluginArgument* srvarg = arg?dynamic_cast(arg):NULL; if(!srvarg) return NULL; Arc::PluginsFactory* factory = srvarg->get_factory();; Glib::Module* module = srvarg->get_module();; if(factory && module) factory->makePersistent(module); ARexService* arex = new ARexService((Arc::Config*)(*srvarg),arg); if(!*arex) { delete arex; arex=NULL; }; return arex; } void CountedResource::Acquire(void) { std::unique_lock lock(lock_); cond_.wait(lock, [this]() { return (limit_ < 0) || (count_ < limit_); }); ++count_; } void CountedResource::Release(void) { lock_.lock(); --count_; cond_.notify_one(); lock_.unlock(); } void CountedResource::MaxConsumers(int maxconsumers) { limit_ = maxconsumers; } CountedResource::CountedResource(int maxconsumers): limit_(maxconsumers),count_(0) { } CountedResource::~CountedResource(void) { } static std::string GetPath(std::string url){ std::string::size_type ds, ps; ds=url.find("//"); if (ds==std::string::npos) { ps=url.find("/"); } else { ps=url.find("/", ds+2); } if (ps==std::string::npos) return ""; return url.substr(ps); } Arc::MCC_Status ARexService::make_soap_fault(Arc::Message& outmsg, const char* resp) { Arc::PayloadSOAP* outpayload = new Arc::PayloadSOAP(ns_,true); Arc::SOAPFault* fault = outpayload?outpayload->Fault():NULL; if(fault) { fault->Code(Arc::SOAPFault::Sender); if(!resp) { fault->Reason("Failed processing request"); } else { fault->Reason(resp); }; }; delete outmsg.Payload(outpayload); return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status ARexService::extract_content(Arc::Message& inmsg,std::string& content,uint32_t size_limit) { // Identify payload Arc::MessagePayload* payload = inmsg.Payload(); if(!payload) { return Arc::MCC_Status(Arc::GENERIC_ERROR,"","Missing payload"); }; Arc::PayloadStreamInterface* stream = dynamic_cast(payload); Arc::PayloadRawInterface* buf = dynamic_cast(payload); if((!stream) && (!buf)) { return Arc::MCC_Status(Arc::GENERIC_ERROR,"","Error processing payload"); } // Fetch content content.clear(); if(stream) { std::string add_str; while(stream->Get(add_str)) { content.append(add_str); if((size_limit != 0) && (content.size() >= size_limit)) break; } } else { for(unsigned int n = 0;buf->Buffer(n);++n) { content.append(buf->Buffer(n),buf->BufferSize(n)); if((size_limit != 0) && (content.size() >= size_limit)) break; }; }; return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status ARexService::make_http_fault(Arc::Message& outmsg,int code,const char* resp) { Arc::PayloadRaw* outpayload = new Arc::PayloadRaw(); delete outmsg.Payload(outpayload); outmsg.Attributes()->set("HTTP:CODE",Arc::tostring(code)); if(resp) outmsg.Attributes()->set("HTTP:REASON",resp); return Arc::MCC_Status(Arc::UNKNOWN_SERVICE_ERROR); } Arc::MCC_Status ARexService::make_fault(Arc::Message& /*outmsg*/) { // That will cause 500 Internal Error in HTTP return Arc::MCC_Status(); } Arc::MCC_Status ARexService::make_empty_response(Arc::Message& outmsg) { Arc::PayloadRaw* outpayload = new Arc::PayloadRaw(); delete outmsg.Payload(outpayload); return Arc::MCC_Status(Arc::STATUS_OK); } static std::string GetPath(Arc::Message &inmsg,std::string &base) { base = inmsg.Attributes()->get("HTTP:ENDPOINT"); Arc::AttributeIterator iterator = inmsg.Attributes()->getAll("PLEXER:EXTENSION"); std::string path; if(iterator.hasMore()) { // Service is behind plexer path = *iterator; if(base.length() > path.length()) base.resize(base.length()-path.length()); } else { // Standalone service path=Arc::URL(base).Path(); base.resize(0); }; // Path is encoded in HTTP URLs path = Arc::uri_unencode(path); return path; } #define SOAP_NOT_SUPPORTED { \ logger_.msg(Arc::ERROR, "SOAP operation is not supported: %s", op.Name()); \ return make_soap_fault(outmsg,"Operation not supported"); \ } static void GetIdFromPath(std::string& subpath, std::string& id) { std::string::size_type subpath_pos = Arc::get_token(id, subpath, 0, "/"); subpath.erase(0,subpath_pos); while(subpath[0] == '/') subpath.erase(0,1); } Arc::MCC_Status ARexService::preProcessSecurity(Arc::Message& inmsg,Arc::Message& outmsg,Arc::SecAttr* sattr,bool is_soap,ARexConfigContext*& config,bool& passed) { passed = false; config = NULL; if(sattr) inmsg.Auth()->set("AREX",sattr); { Arc::MCC_Status sret = ProcessSecHandlers(inmsg,"incoming"); if(!sret) { logger_.msg(Arc::ERROR, "Security Handlers processing failed: %s", std::string(sret)); std::string fault_str = "Not authorized: " + std::string(sret); return is_soap ? make_soap_fault(outmsg, fault_str.c_str()) : make_http_fault(outmsg, HTTP_ERR_FORBIDDEN, fault_str.c_str()); }; }; // Check main authorization rules std::list > const & groups = config_.MatchingGroups(); if(!groups.empty()) { if(match_groups(groups, inmsg)) { // Process grid-manager configuration if not done yet config = ARexConfigContext::GetRutimeConfiguration(inmsg, config_, uname_, endpoint_); }; }; if(!config) { // Service is not operational except public information. // But public information also has own authorization rules // Check additional authorization rules std::list > const & groups = config_.MatchingGroupsPublicInformation(); if(!groups.empty()) { if(!match_groups(groups, inmsg)) { logger_.msg(Arc::VERBOSE, "Can't obtain configuration. Public information is disallowed for this user."); char const* fault = "User can't be assigned configuration"; return is_soap ? make_soap_fault(outmsg, fault) : make_http_fault(outmsg, HTTP_ERR_FORBIDDEN, fault); }; }; logger_.msg(Arc::VERBOSE, "Can't obtain configuration. Only public information is provided."); } else { config->ClearAuths(); config->AddAuth(inmsg.Auth()); config->AddAuth(inmsg.AuthContext()); } passed = true; return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status ARexService::postProcessSecurity(Arc::Message& outmsg, bool& passed) { passed = true; Arc::MCC_Status sret = ProcessSecHandlers(outmsg,"outgoing"); if(!sret) { logger_.msg(Arc::ERROR, "Security Handlers processing failed: %s", std::string(sret)); delete outmsg.Payload(NULL); passed = false; }; return sret; } Arc::MCC_Status ARexService::process(Arc::Message& inmsg,Arc::Message& outmsg) { // Split request path into parts: service, job and file path. // TODO: make it HTTP independent std::string endpoint; std::string method = inmsg.Attributes()->get("HTTP:METHOD"); std::string clientid = (inmsg.Attributes()->get("TCP:REMOTEHOST"))+":"+(inmsg.Attributes()->get("TCP:REMOTEPORT")); logger_.msg(Arc::INFO, "Connection from %s: %s", inmsg.Attributes()->get("TCP:REMOTEHOST"), inmsg.Attributes()->get("TLS:IDENTITYDN")); std::string subpath = GetPath(inmsg,endpoint); if((inmsg.Attributes()->get("PLEXER:PATTERN").empty()) && subpath.empty()) subpath=endpoint; logger_.msg(Arc::VERBOSE, "process: method: %s", method); logger_.msg(Arc::VERBOSE, "process: endpoint: %s", endpoint); std::string id; GetIdFromPath(subpath, id); // Make SubOpCache separate service enum { SubOpNone, SubOpInfo, SubOpLogs, SubOpNew, SubOpDelegation, SubOpCache, SubOpRest } sub_op = SubOpNone; // Sort out path if(id == InfoPath) { sub_op = SubOpInfo; id.erase(); } else if(id == LogsPath) { sub_op = SubOpLogs; GetIdFromPath(subpath, id); } else if(id == NewPath) { sub_op = SubOpNew; id.erase(); } else if(id == DelegationPath) { sub_op = SubOpDelegation; GetIdFromPath(subpath, id); } else if(id == CachePath) { sub_op = SubOpCache; id.erase(); } else if(id == RestPath) { sub_op = SubOpRest; id.erase(); }; logger_.msg(Arc::VERBOSE, "process: id: %s", id); logger_.msg(Arc::VERBOSE, "process: subop: %s", (sub_op==SubOpNone)?"none": ((sub_op==SubOpInfo)?InfoPath: ((sub_op==SubOpLogs)?LogsPath: ((sub_op==SubOpNew)?NewPath: ((sub_op==SubOpDelegation)?DelegationPath: ((sub_op==SubOpCache)?CachePath: ((sub_op==SubOpRest)?RestPath:"unknown"))))))); logger_.msg(Arc::VERBOSE, "process: subpath: %s", subpath); // Switch to REST ASAP if(sub_op == SubOpRest) { ARexSecAttr* sattr = new ARexSecAttr(std::string(JOB_POLICY_OPERATION_UNDEFINED)); if(sattr) sattr->SetResource(endpoint,id,subpath); ARexConfigContext* config(NULL); bool passed = false; Arc::MCC_Status sret = preProcessSecurity(inmsg,outmsg,sattr,false,config,passed); if(!passed) return sret; // config may be null here, but REST handles user configuration separately. sret = rest_.process(inmsg, outmsg); if(sret) { bool passed = false; sret = postProcessSecurity(outmsg,passed); } return sret; } // Sort out request and identify operation requested Arc::PayloadSOAP* inpayload = NULL; Arc::XMLNode op; ARexSecAttr* sattr = NULL; if(method == "POST") { logger_.msg(Arc::VERBOSE, "process: POST"); // Both input and output are supposed to be SOAP // Extracting payload try { inpayload = dynamic_cast(inmsg.Payload()); } catch(std::exception& e) { }; if(!inpayload) { logger_.msg(Arc::ERROR, "input is not SOAP"); return make_soap_fault(outmsg); }; if(logger_.getThreshold() <= Arc::VERBOSE) { std::string str; inpayload->GetDoc(str, true); logger_.msg(Arc::VERBOSE, "process: request=%s",str); }; // Analyzing request op = inpayload->Child(0); if(!op) { logger_.msg(Arc::ERROR, "input does not define operation"); return make_soap_fault(outmsg); }; logger_.msg(Arc::INFO, "process: operation: %s",op.Name()); // Adding A-REX attributes sattr = new ARexSecAttr(op); } else if(method == "GET") { sattr = new ARexSecAttr(std::string(JOB_POLICY_OPERATION_READ)); } else if(method == "HEAD") { sattr = new ARexSecAttr(std::string(JOB_POLICY_OPERATION_READ)); } else if(method == "PUT") { sattr = new ARexSecAttr(std::string(JOB_POLICY_OPERATION_MODIFY)); } else if(method == "DELETE") { sattr = new ARexSecAttr(std::string(JOB_POLICY_OPERATION_MODIFY)); } if(sattr) sattr->SetResource(endpoint,id,subpath); ARexConfigContext* config(NULL); bool passed = false; Arc::MCC_Status sret = preProcessSecurity(inmsg,outmsg,sattr,method=="POST",config,passed); if(!passed) return sret; // config may be null for anonymous requests // Identify which of served endpoints request is for. // Using simplified algorithm - POST for SOAP messages, // GET and PUT for data transfer if(method == "POST") { // Check if request is for top of tree (factory) or particular // job (listing activity) // It must be base URL in request if(sub_op != SubOpNone) { logger_.msg(Arc::ERROR, "POST request on special path is not supported"); return make_soap_fault(outmsg); }; if(id.empty()) { // Factory operations logger_.msg(Arc::VERBOSE, "process: factory endpoint"); if(config_.ARCInterfaceEnabled() && MatchXMLNamespace(op,BES_ARC_NAMESPACE)) { if(!config) return make_soap_fault(outmsg, "User can't be assigned configuration"); // Aplying known namespaces inpayload->Namespaces(ns_); if(MatchXMLName(op,"CacheCheck")) { Arc::PayloadSOAP* outpayload = new Arc::PayloadSOAP(ns_); // Preparing known namespaces outpayload->Namespaces(ns_); CacheCheck(*config,*inpayload,*outpayload); outmsg.Payload(outpayload); } else { SOAP_NOT_SUPPORTED; } } else if(delegation_stores_.MatchNamespace(*inpayload)) { if(!config) return make_soap_fault(outmsg, "User can't be assigned configuration"); // Aplying known namespaces inpayload->Namespaces(ns_); Arc::PayloadSOAP* outpayload = new Arc::PayloadSOAP(ns_); // Preparing known namespaces outpayload->Namespaces(ns_); CountedResourceLock cl_lock(beslimit_); std::string credentials; if(!delegation_stores_.Process(config->GmConfig().DelegationDir(), *inpayload,*outpayload,config->GridName(),credentials)) { delete outpayload; return make_soap_fault(outmsg); }; if(!credentials.empty()) { // Credentials obtained as outcome of operation if(MatchXMLNamespace(op,DELEG_ARC_NAMESPACE)) { // ARC delegation is done per job but stored under // own id. So storing must be done outside processing code. UpdateCredentials(*config,op,outpayload->Child(),credentials); }; }; outmsg.Payload(outpayload); } else { SOAP_NOT_SUPPORTED; }; if(logger_.getThreshold() <= Arc::VERBOSE) { std::string str; Arc::PayloadSOAP* oupayload = dynamic_cast(outmsg.Payload()); if(oupayload) oupayload->GetDoc(str, true); logger_.msg(Arc::VERBOSE, "process: response=%s",str); }; } else { // Listing operations for session directories // TODO: proper failure like interface is not supported logger_.msg(Arc::ERROR, "Per-job POST/SOAP requests are not supported"); return make_soap_fault(outmsg,"Operation not supported"); }; bool passed = false; Arc::MCC_Status sret = postProcessSecurity(outmsg,passed); if(!passed) return sret; return Arc::MCC_Status(Arc::STATUS_OK); } else if(method == "GET") { // HTTP plugin either provides buffer or stream logger_.msg(Arc::VERBOSE, "process: GET"); logger_.msg(Arc::INFO, "GET: id %s path %s", id, subpath); if(!config && (sub_op != SubOpInfo)) return make_http_fault(outmsg, HTTP_ERR_FORBIDDEN, "User can't be assigned configuration"); Arc::MCC_Status ret; CountedResourceLock cl_lock(datalimit_); switch(sub_op) { case SubOpInfo: ret = GetInfo(inmsg,outmsg,*config,subpath); break; case SubOpNew: ret = GetNew(inmsg,outmsg,*config,subpath); break; case SubOpLogs: ret = GetLogs(inmsg,outmsg,*config,id,subpath); break; case SubOpDelegation: ret = GetDelegation(inmsg,outmsg,*config,id,subpath); break; case SubOpCache: ret = GetCache(inmsg,outmsg,*config,subpath); break; case SubOpNone: default: ret = GetJob(inmsg,outmsg,*config,id,subpath); break; }; if(ret) { bool passed = false; Arc::MCC_Status sret = postProcessSecurity(outmsg,passed); if(!passed) return sret; }; return ret; } else if(method == "HEAD") { logger_.msg(Arc::VERBOSE, "process: HEAD"); logger_.msg(Arc::INFO, "HEAD: id %s path %s", id, subpath); if(!config && (sub_op != SubOpInfo)) return make_http_fault(outmsg, HTTP_ERR_FORBIDDEN, "User can't be assigned configuration"); Arc::MCC_Status ret; CountedResourceLock cl_lock(datalimit_); switch(sub_op) { case SubOpInfo: ret = HeadInfo(inmsg,outmsg,*config,subpath); break; case SubOpNew: ret = HeadNew(inmsg,outmsg,*config,subpath); break; case SubOpLogs: ret = HeadLogs(inmsg,outmsg,*config,id,subpath); break; case SubOpDelegation: ret = HeadDelegation(inmsg,outmsg,*config,id,subpath); break; case SubOpCache: ret = HeadCache(inmsg,outmsg,*config,subpath); break; case SubOpNone: default: ret = HeadJob(inmsg,outmsg,*config,id,subpath); break; }; if(ret) { bool passed = false; Arc::MCC_Status sret = postProcessSecurity(outmsg,passed); if(!passed) return sret; }; return ret; } else if(method == "PUT") { logger_.msg(Arc::VERBOSE, "process: PUT"); if(!config) return make_http_fault(outmsg, HTTP_ERR_FORBIDDEN, "User can't be assigned configuration"); Arc::MCC_Status ret; CountedResourceLock cl_lock(datalimit_); switch(sub_op) { case SubOpInfo: ret = PutInfo(inmsg,outmsg,*config,subpath); break; case SubOpNew: ret = PutNew(inmsg,outmsg,*config,subpath); break; case SubOpLogs: ret = PutLogs(inmsg,outmsg,*config,id,subpath); break; case SubOpDelegation: ret = PutDelegation(inmsg,outmsg,*config,id,subpath); break; case SubOpCache: ret = PutCache(inmsg,outmsg,*config,subpath); break; case SubOpNone: default: ret = PutJob(inmsg,outmsg,*config,id,subpath); break; }; if(ret) { bool passed = false; Arc::MCC_Status sret = postProcessSecurity(outmsg,passed); if(!passed) return sret; }; return ret; } else if(method == "DELETE") { logger_.msg(Arc::VERBOSE, "process: DELETE"); if(!config) return make_http_fault(outmsg, HTTP_ERR_FORBIDDEN, "User can't be assigned configuration"); Arc::MCC_Status ret; CountedResourceLock cl_lock(datalimit_); switch(sub_op) { case SubOpInfo: ret = DeleteInfo(inmsg,outmsg,*config,subpath); break; case SubOpNew: ret = DeleteNew(inmsg,outmsg,*config,subpath); break; case SubOpLogs: ret = DeleteLogs(inmsg,outmsg,*config,id,subpath); break; case SubOpDelegation: ret = DeleteDelegation(inmsg,outmsg,*config,id,subpath); break; case SubOpCache: ret = DeleteCache(inmsg,outmsg,*config,subpath); break; case SubOpNone: default: ret = DeleteJob(inmsg,outmsg,*config,id,subpath); break; }; if(ret) { bool passed = false; Arc::MCC_Status sret = postProcessSecurity(outmsg,passed); if(!passed) return sret; }; return ret; } else if(!method.empty()) { logger_.msg(Arc::VERBOSE, "process: method %s is not supported",method); return make_http_fault(outmsg,501,"Not Implemented"); } else { logger_.msg(Arc::VERBOSE, "process: method is not defined"); return Arc::MCC_Status(); }; return Arc::MCC_Status(); } Arc::MCC_Status ARexService::HeadDelegation(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath) { return make_http_fault(outmsg,405,"No probing on delegation interface"); } Arc::MCC_Status ARexService::GetDelegation(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath) { if(!&config) { return make_http_fault(outmsg, HTTP_ERR_FORBIDDEN, "User is not identified"); }; if(!subpath.empty()) { return make_http_fault(outmsg,500,"No additional path expected"); }; std::string deleg_id = id; // Update request in case of non-empty id std::string deleg_request; if(!delegation_stores_.GetRequest(config.GmConfig().DelegationDir(), deleg_id,config.GridName(),deleg_request)) { return make_http_fault(outmsg,500,"Failed generating delegation request"); }; // Create positive HTTP response Arc::PayloadRaw* buf = new Arc::PayloadRaw; if(buf) buf->Insert(deleg_request.c_str(),0,deleg_request.length()); outmsg.Payload(buf); outmsg.Attributes()->set("HTTP:content-type","application/x-pem-file"); // ?? outmsg.Attributes()->set("HTTP:CODE",Arc::tostring(200)); outmsg.Attributes()->set("HTTP:REASON",deleg_id.c_str()); return Arc::MCC_Status(Arc::STATUS_OK); } Arc::MCC_Status ARexService::PutDelegation(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath) { if(!subpath.empty()) { return make_http_fault(outmsg,500,"No additional path expected"); }; if(id.empty()) { return make_http_fault(outmsg,500,"Delegation id expected"); }; // Fetch HTTP content std::string content; Arc::MCC_Status res = ARexService::extract_content(inmsg,content,1024*1024); // 1mb size limit is sane enough if(!res) return make_http_fault(outmsg,500,res.getExplanation().c_str()); if(content.empty()) return make_http_fault(outmsg,500,"Missing payload"); if(!delegation_stores_.PutDeleg(config.GmConfig().DelegationDir(), id,config.GridName(),content)) { return make_http_fault(outmsg,500,"Failed accepting delegation"); }; #if 1 // In case of update for compatibility during intermediate period store delegations in // per-job proxy file too. DelegationStore& delegation_store(delegation_stores_[config.GmConfig().DelegationDir()]); std::list job_ids; if(delegation_store.GetLocks(id,config.GridName(),job_ids)) { for(std::list::iterator job_id = job_ids.begin(); job_id != job_ids.end(); ++job_id) { // check if that is main delegation for this job std::string delegationid; if(job_local_read_delegationid(*job_id,config.GmConfig(),delegationid)) { if(id == delegationid) { std::string credentials; if(delegation_store.GetCred(id,config.GridName(),credentials)) { if(!credentials.empty()) { GMJob job(*job_id,Arc::User(config.User().get_uid())); (void)job_proxy_write_file(job,config.GmConfig(),credentials); }; }; }; }; }; }; #endif return make_empty_response(outmsg); } Arc::MCC_Status ARexService::DeleteDelegation(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath) { return make_http_fault(outmsg,501,"Not Implemented"); } static void information_collector_starter(void* arg) { if(!arg) return; ((ARexService*)arg)->InformationCollector(); } void ARexService::gm_threads_starter(void* arg) { if(!arg) return; ARexService* arex = (ARexService*)arg; arex->gm_threads_starter(); } void ARexService::gm_threads_starter() { if (!endpoint_.empty()) { // no need to do this if no WS interface // Remove the WS log from the log destinations. // Here we assume the order is gm, ws, [stderr (arched -f)] std::list dests = Arc::Logger::getRootLogger().getDestinations(); if (dests.size() > 1) { std::list::iterator i = dests.begin(); ++i; dests.erase(i); Arc::Logger::getRootLogger().setThreadContext(); Arc::Logger::getRootLogger().removeDestinations(); Arc::Logger::getRootLogger().addDestinations(dests); } } // Run grid-manager in thread gm_ = new GridManager(config_); if (!(*gm_)) { logger_.msg(Arc::ERROR, "Failed to run Grid Manager thread"); delete gm_; gm_=NULL; return; } // Start info collector thread CreateThreadFunction(&information_collector_starter, this); } class ArexServiceNamespaces: public Arc::NS { public: ArexServiceNamespaces() { // Define supported namespaces Arc::NS& ns_(*this); ns_[BES_ARC_NPREFIX]=BES_ARC_NAMESPACE; ns_[DELEG_ARC_NPREFIX]=DELEG_ARC_NAMESPACE; ns_["wsa"]="http://www.w3.org/2005/08/addressing"; ns_["jsdl"]="http://schemas.ggf.org/jsdl/2005/11/jsdl"; ns_["wsrf-bf"]="http://docs.oasis-open.org/wsrf/bf-2"; ns_["wsrf-r"]="http://docs.oasis-open.org/wsrf/r-2"; ns_["wsrf-rw"]="http://docs.oasis-open.org/wsrf/rw-2"; }; }; Arc::NS ARexService::ns_ = ArexServiceNamespaces(); ARexService::ARexService(Arc::Config *cfg,Arc::PluginArgument *parg):Arc::Service(cfg,parg), logger_(Arc::Logger::rootLogger, "A-REX"), delegation_stores_(), infodoc_(true), infoprovider_wakeup_period_(0), all_jobs_count_(0), gm_(NULL), rest_(cfg, parg, config_, delegation_stores_, all_jobs_count_) { valid = false; config_.SetJobLog(new JobLog()); config_.SetJobsMetrics(new JobsMetrics()); config_.SetHeartBeatMetrics(new HeartBeatMetrics()); config_.SetSpaceMetrics(new SpaceMetrics()); config_.SetJobPerfLog(new Arc::JobPerfLog()); config_.SetContPlugins(new ContinuationPlugins()); // logger_.addDestination(logcerr); // Obtain information from configuration endpoint_=(std::string)((*cfg)["endpoint"]); uname_=(std::string)((*cfg)["usermap"]["defaultLocalName"]); std::string gmconfig=(std::string)((*cfg)["gmconfig"]); if (Arc::lower((std::string)((*cfg)["publishStaticInfo"])) == "yes") { publishstaticinfo_=true; } else { publishstaticinfo_=false; } config_.SetDelegations(&delegation_stores_); config_.SetConfigFile(gmconfig); if (!config_.Load()) { logger_.msg(Arc::ERROR, "Failed to process configuration in %s", gmconfig); return; } // Check for mandatory commands in configuration if (config_.ControlDir().empty()) { logger.msg(Arc::ERROR, "No control directory set in configuration"); return; } if (config_.SessionRoots().empty()) { logger.msg(Arc::ERROR, "No session directory set in configuration"); return; } if (config_.DefaultLRMS().empty()) { logger.msg(Arc::ERROR, "No LRMS set in configuration"); return; } // Pass information about delegation db type { DelegationStore::DbType deleg_db_type = DelegationStore::DbBerkeley; switch(config_.DelegationDBType()) { case GMConfig::deleg_db_bdb: deleg_db_type = DelegationStore::DbBerkeley; break; case GMConfig::deleg_db_sqlite: deleg_db_type = DelegationStore::DbSQLite; break; }; delegation_stores_.SetDbType(deleg_db_type); }; // Set default queue if none given if(config_.DefaultQueue().empty() && (config_.Queues().size() == 1)) { config_.SetDefaultQueue(config_.Queues().front()); } gmrun_ = (std::string)((*cfg)["gmrun"]); common_name_ = (std::string)((*cfg)["commonName"]); long_description_ = (std::string)((*cfg)["longDescription"]); // TODO: check for enumeration values os_name_ = (std::string)((*cfg)["OperatingSystem"]); std::string debugLevel = (std::string)((*cfg)["debugLevel"]); if(!debugLevel.empty()) { logger_.setThreshold(Arc::istring_to_level(debugLevel)); }; int valuei; if ((!(*cfg)["InfoproviderWakeupPeriod"]) || (!Arc::stringto((std::string)((*cfg)["InfoproviderWakeupPeriod"]),infoprovider_wakeup_period_))) { infoprovider_wakeup_period_ = DEFAULT_INFOPROVIDER_WAKEUP_PERIOD; }; if ((!(*cfg)["InfosysInterfaceMaxClients"]) || (!Arc::stringto((std::string)((*cfg)["InfosysInterfaceMaxClients"]),valuei))) { valuei = DEFAULT_INFOSYS_MAX_CLIENTS; }; infolimit_.MaxConsumers(valuei); if ((!(*cfg)["JobControlInterfaceMaxClients"]) || (!Arc::stringto((std::string)((*cfg)["JobControlInterfaceMaxClients"]),valuei))) { valuei = DEFAULT_JOBCONTROL_MAX_CLIENTS; }; beslimit_.MaxConsumers(valuei); if ((!(*cfg)["DataTransferInterfaceMaxClients"]) || (!Arc::stringto((std::string)((*cfg)["DataTransferInterfaceMaxClients"]),valuei))) { valuei = DEFAULT_DATATRANSFER_MAX_CLIENTS; }; datalimit_.MaxConsumers(valuei); // If WS interface is enabled and multiple log files are configured then here // the log splits between WS interface operations and GM job processing. // Start separate thread to start GM and info collector threads so they can // log to GM log after we remove it in this thread if ((gmrun_.empty()) || (gmrun_ == "internal")) { // create or update control directory if not yet done if(!config_.CreateControlDirectory()) { logger_.msg(Arc::ERROR, "Failed to create control directory %s", config_.ControlDir()); return; } if(!config_.UpdateControlDirectory()) { logger_.msg(Arc::ERROR, "Failed to update control directory %s", config_.ControlDir()); return; } Arc::SimpleCounter counter; if (!CreateThreadFunction(&gm_threads_starter, this, &counter)) return; counter.wait(); if(!gm_) { logger_.msg(Arc::ERROR, "Failed to start GM threads"); return; // GM didn't start } } // If WS is used then remove gm log destination from this thread if (!endpoint_.empty()) { // Assume that gm log is first in list - potentially dangerous std::list dests = logger.getRootLogger().getDestinations(); if (dests.size() > 1) { dests.pop_front(); logger.getRootLogger().removeDestinations(); logger.getRootLogger().addDestinations(dests); } } { std::string base_path = config_.ControlDir()+"/tokenissuers"; DIR* dir = ::opendir(base_path.c_str()); if(dir) { for(;;) { struct dirent* d = ::readdir(dir); if(!d) break; if(strcmp(d->d_name,".") == 0) continue; if(strcmp(d->d_name,"..") == 0) continue; std::string issuer_path = base_path + "/" + d->d_name + "/issuer"; std::string metadata_path = base_path + "/" + d->d_name + "/metadata"; std::string keys_path = base_path + "/" + d->d_name + "/keys"; std::string issuer; std::string metadata; std::string keys; if(Arc::FileRead(issuer_path, issuer) && Arc::FileRead(metadata_path, metadata) && Arc::FileRead(keys_path, keys)) { if(!issuer.empty() && !metadata.empty() && !keys.empty()) { // Assign validity enough to the future if(Arc::JWSE::SetIssuerInfo(nullptr, true, issuer, metadata, keys, logger_)) { logger_.msg(Arc::INFO, "Created entry for JWT issuer %s", d->d_name); } else { logger_.msg(Arc::ERROR, "Failed to create entry for JWT issuer %s", d->d_name); } } else { logger_.msg(Arc::ERROR, "Empty data for JWT issuer %s", d->d_name); } } else { logger_.msg(Arc::ERROR, "Failed to read data for JWT issuer %s", d->d_name); } } closedir(dir); } } valid=true; } ARexService::~ARexService(void) { thread_count_.RequestCancel(); delete gm_; // This should stop all GM-related threads too thread_count_.WaitForExit(); // Here A-REX threads are waited for // There should be no more threads using resources - can proceed if(config_.ConfigIsTemp()) unlink(config_.ConfigFile().c_str()); delete config_.GetContPlugins(); delete config_.GetJobLog(); delete config_.GetJobPerfLog(); delete config_.GetJobsMetrics(); delete config_.GetHeartBeatMetrics(); delete config_.GetSpaceMetrics(); } } // namespace ARex extern Arc::PluginDescriptor const ARC_PLUGINS_TABLE_NAME[] = { { "a-rex", "HED:SERVICE", NULL, 0, &ARex::get_service }, { NULL, NULL, NULL, 0, NULL } }; nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/job.h0000644000000000000000000000013215067751327017724 xustar0030 mtime=1759498967.764492116 30 atime=1759498967.869493711 30 ctime=1759499029.347380678 nordugrid-arc-7.1.1/src/services/a-rex/job.h0000644000175000002070000002256415067751327021637 0ustar00mockbuildmock00000000000000#ifndef __ARC_AREX_JOB_H__ #define __ARC_AREX_JOB_H__ #include #include #include #include #include #include #include #include "grid-manager/files/ControlFileContent.h" #include "tools.h" namespace ARex { class GMConfig; #define JOB_POLICY_OPERATION_URN "http://www.nordugrid.org/schemas/policy-arc/types/a-rex/joboperation" #define JOB_POLICY_OPERATION_CREATE "Create" #define JOB_POLICY_OPERATION_MODIFY "Modify" #define JOB_POLICY_OPERATION_READ "Read" #define JOB_POLICY_OPERATION_UNDEFINED "Undefined" class ARexGMConfig { private: const GMConfig& config_; Arc::User user_; bool readonly_; std::string grid_name_; // temporary solution std::string service_endpoint_; // temporary solution std::list auths_; // Separate lists outside GMConfig as they can be substituted per user std::vector session_roots_; std::vector session_roots_non_draining_; protected: static Arc::Logger logger; public: ARexGMConfig(const GMConfig& config,const std::string& uname,const std::string& grid_name,const std::string& service_endpoint); operator bool(void) const { return (bool)user_; }; bool operator!(void) const { return !(bool)user_; }; const Arc::User& User(void) const { return user_; }; const GMConfig& GmConfig() const { return config_; }; bool ReadOnly(void) const { return readonly_; }; const std::string& GridName(void) const { return grid_name_; }; const std::string& Endpoint(void) const { return service_endpoint_; }; void AddAuth(Arc::MessageAuth* auth) { auths_.push_back(auth); }; void ClearAuths(void) { auths_.clear(); }; std::list::iterator beginAuth(void) { return auths_.begin(); }; std::list::iterator endAuth(void) { return auths_.end(); }; std::vector SessionRootsNonDraining(void) { return session_roots_non_draining_; }; std::vector SessionRoots(void) { return session_roots_; }; }; class ARexConfigContext:public Arc::MessageContextElement, public ARexGMConfig { public: ARexConfigContext(GMConfig& config,const std::string& uname,const std::string& grid_name,const std::string& service_endpoint): ARexGMConfig(config,uname,grid_name,service_endpoint) { }; virtual ~ARexConfigContext(void) { }; static ARexConfigContext* GetRutimeConfiguration(Arc::Message& inmsg, GMConfig& gmconfig, std::string const & default_uname, std::string const & default_endpoint); // Authorization methods enum OperationType { OperationServiceInfo, // information about service OperationJobInfo, // information about job OperationJobCreate, // creation of new job OperationJobCancel, // canceling existing job OperationJobDelete, // removing existing job OperationDataInfo, // getting information about file in session OperationDataWrite, // writing file to session OperationDataRead, // reading file from session }; static bool CheckOperationAllowed(OperationType op, ARexConfigContext* config, std::string& msg); }; typedef enum { ARexJobNoError, ARexJobInternalError, // Failed during some internal operation - like writing some file ARexJobConfigurationError, // Problem detected which can be fixed by adjusting configuration of service ARexJobDescriptionUnsupportedError, // Job asks for feature or combination not supported by service ARexJobDescriptionMissingError, // Job is missing optional but needed for this service element ARexJobDescriptionSyntaxError, // Job description is malformed - missing elements, wrong names, etc. ARexJobDescriptionLogicalError // Job request otherwise corect has some values out of scope of service } ARexJobFailure; /** This class represents convenience interface to manage jobs handled by Grid Manager. It works mostly through corresponding classes and functions of Grid Manager. */ class ARexJob { private: std::string id_; std::string failure_; ARexJobFailure failure_type_; bool allowed_to_see_; bool allowed_to_maintain_; Arc::Logger& logger_; /** Returns true if job exists and authorization was checked without errors. Fills information about authorization in this instance. */ bool is_allowed(bool fast = false); ARexGMConfig& config_; uid_t uid_; /* local user id this job is mapped to - not always same as in config_.user_ */ gid_t gid_; JobLocalDescription job_; bool make_job_id(); static std::size_t make_job_id(ARexGMConfig& config_, Arc::Logger& logger_, std::vector& ids); bool delete_job_id(); static bool delete_job_id(ARexGMConfig& config_, Arc::User user_, std::string const& sessiondir, std::vector& ids, std::size_t offset = 0); bool update_credentials(const std::string& credentials); static void make_new_job(ARexGMConfig& config_, Arc::Logger& logger_, int& min_jobs, int& max_jobs, std::string const& job_desc_str, const std::string& delegid,const std::string& queue,const std::string& clientid,JobIDGenerator& idgenerator, std::vector& ids, JobLocalDescription& job_, ARexJobFailure& failure_type_, std::string& failure_); public: static bool Generate(Arc::XMLNode xmljobdesc,int& min_jobs,int& max_jobs,ARexGMConfig& config,const std::string& delegid,const std::string& queue,const std::string& clientid,Arc::Logger& logger,JobIDGenerator& idgenerator,std::vector& ids,std::string& failure); static bool Generate(std::string const& job_desc_str,int min_jobs,int max_jobs,ARexGMConfig& config,const std::string& delegid,const std::string& queue,const std::string& clientid,Arc::Logger& logger,JobIDGenerator& idgenerator,std::vector& ids,std::string& failure); /** Create instance which is an interface to existing job */ ARexJob(const std::string& id,ARexGMConfig& config,Arc::Logger& logger,bool fast_auth_check = false); /** Create new job with provided description */ ARexJob(Arc::XMLNode xmljobdesc,ARexGMConfig& config,const std::string& delegid,const std::string& queue,const std::string& clientid,Arc::Logger& logger,JobIDGenerator& idgenerator); /** Create new job with provided textual description */ ARexJob(std::string const& job_desc_str,ARexGMConfig& config,const std::string& delegid,const std::string& queue,const std::string& clientid,Arc::Logger& logger,JobIDGenerator& idgenerator); operator bool(void) { return !id_.empty(); }; bool operator!(void) { return id_.empty(); }; /** Returns textual description of failure of last operation */ std::string Failure(void) { std::string r=failure_; failure_=""; failure_type_=ARexJobNoError; return r; }; operator ARexJobFailure(void) { return failure_type_; }; /** Return ID assigned to job */ std::string ID(void) { return id_; }; /** Return local user id assigned to job */ uid_t UID(void) { return uid_; }; /** Return local user group assigned to job */ gid_t GID(void) { return gid_; }; /** Fills provided xml container with job description */ bool GetDescription(Arc::XMLNode& xmljobdesc); /** Cancel processing/execution of job */ bool Cancel(void); /** Remove job from local pool */ bool Clean(void); /** Resume execution of job after error */ bool Resume(void); /** Returns current state of job */ std::string State(void); /** Returns current state of job and sets job_pending to true if job is pending due to external limits */ std::string State(bool& job_pending); /** Returns true if job has failed */ bool Failed(void); /** Returns state at which job failed and sets cause to information what caused job failure: "internal" for server initiated and "client" for canceled on client request. */ std::string FailedState(std::string& cause); /** Returns time when job was created. */ Arc::Time Created(void); /** Returns time when job state was last modified. */ Arc::Time Modified(void); /** Returns path to session directory */ std::string SessionDir(void); /** Returns name of virtual log directory */ std::string LogDir(void); /** Return number of jobs associated with this configuration. TODO: total for all user configurations. */ static int TotalJobs(ARexGMConfig& config,Arc::Logger& logger); /** Returns list of user's jobs. Fine-grained ACL is ignored. */ static std::list Jobs(ARexGMConfig& config,Arc::Logger& logger); /** Creates file in job's session directory and returns handler */ Arc::FileAccess* CreateFile(const std::string& filename); /** Opens file in job's session directory and returns handler */ Arc::FileAccess* OpenFile(const std::string& filename,bool for_read,bool for_write); std::string GetFilePath(const std::string& filename); bool ReportFileComplete(const std::string& filename); bool ReportFilesComplete(); /** Opens log file in control directory */ int OpenLogFile(const std::string& name); std::string GetLogFilePath(const std::string& name); /** Opens directory inside session directory */ Arc::FileAccess* OpenDir(const std::string& dirname); /** Returns list of existing log files */ std::list LogFiles(void); /** Updates job credentials */ bool UpdateCredentials(const std::string& credentials); /** Select a session dir to use for this job */ static bool ChooseSessionDir(ARexGMConfig& config_,Arc::Logger& logger_,std::string& sessiondir); }; } // namespace ARex #endif nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/authop.cpp0000644000000000000000000000013215067751327021005 xustar0030 mtime=1759498967.750491903 30 atime=1759498967.862493605 30 ctime=1759499029.328333841 nordugrid-arc-7.1.1/src/services/a-rex/authop.cpp0000644000175000002070000000721015067751327022707 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "arex.h" namespace ARex { static std::string toString(std::list strings) { std::string res; for(std::list::iterator it = strings.begin(); it != strings.end(); ++it) { res.append(*it).append(" "); } return res; } bool ARexConfigContext::CheckOperationAllowed(OperationType op, ARexConfigContext* config, std::string& msg) { // TODO: very simplified code below. Proper way to identify how client was identified and // which authentication information matched authorization rules LegacySecAttr must be used. if(!config) { logger.msg(Arc::DEBUG, "CheckOperationAllowed: missing configuration"); msg = "User has no configuration assigned"; return false; } bool has_tls_identity = false; bool has_token_identity = false; std::list scopes; for(std::list::iterator a = config->beginAuth();a!=config->endAuth();++a) { if(*a) { Arc::SecAttr* sattr = NULL; if((sattr = (*a)->get("TLS"))) { has_tls_identity = !sattr->get("SUBJECT").empty(); } if((sattr = (*a)->get("OTOKENS"))) { scopes = sattr->getAll("scope"); has_token_identity = !sattr->get("iss").empty(); } } } if(has_token_identity) { std::list const * allowed_scopes = NULL; switch(op) { case OperationServiceInfo: allowed_scopes = &(config->GmConfig().TokenScopes("info")); break; case OperationJobInfo: allowed_scopes = &(config->GmConfig().TokenScopes("jobinfo")); break; case OperationJobCreate: allowed_scopes = &(config->GmConfig().TokenScopes("jobcreate")); break; case OperationJobCancel: allowed_scopes = &(config->GmConfig().TokenScopes("jobcancel")); break; case OperationJobDelete: allowed_scopes = &(config->GmConfig().TokenScopes("jobdelete")); break; case OperationDataInfo: allowed_scopes = &(config->GmConfig().TokenScopes("datainfo")); break; case OperationDataWrite: allowed_scopes = &(config->GmConfig().TokenScopes("datawrite")); break; case OperationDataRead: allowed_scopes = &(config->GmConfig().TokenScopes("dataread")); break; default: break; } // No assigned scopes means no limitation if((!allowed_scopes) || (allowed_scopes->empty())) { logger.msg(Arc::DEBUG, "CheckOperationAllowed: allowed due to missing configuration scopes"); return true; } logger.msg(Arc::DEBUG, "CheckOperationAllowed: token scopes: %s", toString(scopes)); logger.msg(Arc::DEBUG, "CheckOperationAllowed: configuration scopes: %s", toString(*allowed_scopes)); for(std::list::iterator scopeIt = scopes.begin(); scopeIt != scopes.end(); ++scopeIt) { if(std::find(allowed_scopes->begin(), allowed_scopes->end(), *scopeIt) != allowed_scopes->end()) { logger.msg(Arc::DEBUG, "CheckOperationAllowed: allowed due to matching scopes"); return true; } } logger.msg(Arc::ERROR, "CheckOperationAllowed: token scopes do not match required scopes"); msg = "Token scopes do not match requires scopes"; return false; } if(has_tls_identity) { logger.msg(Arc::DEBUG, "CheckOperationAllowed: allowed for TLS connection"); return true; // X.509 authorization has no per-operation granularity } logger.msg(Arc::ERROR, "CheckOperationAllowed: no supported identity found"); msg = "No supported identity found"; return false; } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/arc-arex.in0000644000000000000000000000013115067751327021032 xustar0030 mtime=1759498967.750197335 29 atime=1759498967.86149359 30 ctime=1759499029.321092688 nordugrid-arc-7.1.1/src/services/a-rex/arc-arex.in0000644000175000002070000001221515067751327022736 0ustar00mockbuildmock00000000000000#!/bin/bash # # Init file for the A-REX service # # chkconfig: 2345 75 25 # description: NorduGrid A-REX # # config: /etc/sysconfig/globus # config: /etc/sysconfig/nordugrid # config: @prefix@/etc/arc.conf # config: /etc/arc.conf # # This startup script takes ARC0 configuration file as # its input and generates ARC1 arched configuration file # which contains commands to start A-REX service. Service # is either run isolated or with WS interface enabled. # To enable WS interface ARC0 configuration file must # contain [arex/ws/jobs] section # and mandatory option in [arex/ws]: # wsurl="a_rex_url" ### BEGIN INIT INFO # Provides: arc-arex # Required-Start: $local_fs $remote_fs # Required-Stop: $local_fs $remote_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: ARC grid manager # Description: The unit of the NorduGrid's ARC middleware to # accept and control jobs. ### END INIT INFO # source function library if [ -f /etc/init.d/functions ]; then . /etc/init.d/functions log_success_msg() { echo -n "$@" success "$@" echo } log_warning_msg() { echo -n "$@" warning "$@" echo } log_failure_msg() { echo -n "$@" failure "$@" echo } elif [ -f /lib/lsb/init-functions ]; then . /lib/lsb/init-functions else echo "Error: Cannot source neither init.d nor lsb functions" exit 1 fi prog=arched # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/arc-arex ]; then . /etc/sysconfig/arc-arex elif [ -r /etc/default/arc-arex ]; then . /etc/default/arc-arex fi # ARC_LOCATION ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then log_failure_msg "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi # PID and lock file PID_FILE=`${ARC_LOCATION}/@pkgdatasubdir@/arc-arex-start --getpidfile` if [ $? -ne 0 ]; then # When --getpidfile fails it returns the error on stdout log_failure_msg "$PID_FILE" exit 1 fi if [ `id -u` = 0 ] ; then # Debian does not have /run/lock/subsys if [ -d /run/lock/subsys ]; then LOCKFILE=/run/lock/subsys/arex else LOCKFILE=/run/lock/arex fi else LOCKFILE=$HOME/arex.lock fi start() { echo -n "Starting $prog: " # Check if we are already running if [ -f "$PID_FILE" ]; then read pid < "$PID_FILE" if [ "x$pid" != "x" ]; then ps -p "$pid" -o comm 2>/dev/null | grep "^$prog$" 1>/dev/null 2>/dev/null if [ $? -eq 0 ] ; then log_success_msg "already running (pid $pid)" return 0 fi fi rm -f "$PID_FILE" "$LOCKFILE" fi ${ARC_LOCATION}/@pkgdatasubdir@/arc-arex-start RETVAL=$? if [ $RETVAL -eq 0 ]; then touch $LOCKFILE log_success_msg else log_failure_msg fi return $RETVAL } stop() { echo -n "Stopping $prog: " if [ -f "$PID_FILE" ]; then read pid < "$PID_FILE" if [ ! -z "$pid" ] ; then if [ "x$1" != "x" ]; then # kill whole process group on force-kill kill -TERM "-$pid" else kill "$pid" fi RETVAL=$? if [ $RETVAL -eq 0 ]; then log_success_msg else log_failure_msg fi timeout=300; # for stopping nicely if [ "x$1" != "x" ]; then timeout=1 # 1 second for force-kill fi while ( ps -p "$pid" -o comm 2>/dev/null | grep "^$prog$" 1>/dev/null 2>/dev/null ) && [ $timeout -ge 1 ] ; do sleep 1 timeout=$(($timeout - 1)) done [ $timeout -lt 1 ] && kill -9 "$pid" 1>/dev/null 2>&1 rm -f "$PID_FILE" "$LOCKFILE" else RETVAL=1 log_failure_msg "$prog shutdown - pidfile is empty" fi else RETVAL=0 log_success_msg "$prog shutdown - already stopped" fi return $RETVAL } status() { if [ -f "$PID_FILE" ]; then read pid < "$PID_FILE" if [ "$pid" != "" ]; then if ps -p "$pid" > /dev/null; then echo "$1 (pid $pid) is running..." return 0 fi echo "$1 stopped but pid file exists" return 1 fi fi if [ -f $LOCKFILE ]; then echo "$1 stopped but lockfile exists" return 2 fi echo "$1 is stopped" return 3 } restart() { stop start } case "$1" in start) start ;; stop) stop ;; status) status $prog ;; restart | force-reload) restart ;; reload) ;; condrestart | try-restart) [ -f $LOCKFILE ] && restart || : ;; force-kill) stop 1 ;; *) echo "Usage: $0 {start|stop|status|restart|force-reload|reload|condrestart|try-restart|force-kill}" exit 1 ;; esac exit $? nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/perferator.in0000644000000000000000000000013215067751327021502 xustar0030 mtime=1759498967.771747295 30 atime=1759498967.872493757 30 ctime=1759499029.323630916 nordugrid-arc-7.1.1/src/services/a-rex/perferator.in0000644000175000002070000000441115067751327023404 0ustar00mockbuildmock00000000000000#!/bin/bash # script to write static system data and upload to perflog.nordugrid.org # together with performance data taken by data, arex, infosys and backends # runs once a day, run by a-rex when configured through the helper option # in the grid-manager block in arc.conf. # e.g.: # [grid-manager] # helper=". /usr/share/arc/perferator" # Path to arc.conf can be given with --config option, default is /etc/arc.conf # TODO: Upload performance data to perflog.nordugrid.org command_exists () { type "$1" &> /dev/null ; } write_static_system_data () { outfile="$1" echo "=== Timestamp: ===" >> $outfile date >> $outfile echo "" >> $outfile echo "=== ARC version: ===" >> $outfile arched --version >> $outfile echo "" >> $outfile echo "=== fs types: ===" >> $outfile controldir_fstype=`df --output=fstype $CONFIG_controldir | grep -v Type` echo "controldir fstype: $controldir_fstype" >> $outfile sessiondir_fstype=`df --output=fstype $CONFIG_sessiondir | grep -v Type` echo "sessiondir fstype: $sessiondir_fstype" >> $outfile echo "" >> $outfile echo "=== CPU info: ===" >> $outfile echo "no. of CPUs: `getconf _NPROCESSORS_ONLN`" >> $outfile cat /proc/cpuinfo >> $outfile echo "" >> $outfile echo "=== Mem info: ===" >> $outfile cat /proc/meminfo >> $outfile echo "" >> $outfile echo "=== OS info: ===" >> $outfile uname -a >> $outfile cat /etc/*-release >> $outfile cat /proc/version >> $outfile echo "" >> $outfile } # ARC1 passes first the config file. if [ "$1" = "--config" ]; then shift; ARC_CONFIG=$1; shift; fi basedir=`dirname $0` basedir=`cd $basedir > /dev/null && pwd` || exit $? pkglibexecdir="${ARC_LOCATION:-@prefix@}/@pkglibexecsubdir@" pkgdatadir="$basedir" arcconfig_parser=${pkglibexecdir}/arcconfig-parser ARC_CONFIG=${ARC_CONFIG:-/etc/arc.conf} eval $(${arcconfig_parser} -c ${ARC_CONFIG} -b common -e bash) eval $(${arcconfig_parser} -c ${ARC_CONFIG} -b gridmanager -e bash) HOSTNAME=$CONFIG_hostname PERFDIR=${CONFIG_perflogdir:-/var/log/arc/perfdata} # sleep a bit, waiting for performance data to accumulate sleep 86400 # merge infosys files MERGEDATE=`date +%Y%m%d` write_static_system_data $PERFDIR/sysinfo.perflog # extract infosys data from nytprofd databases $pkgdatadir/PerfData.pl --config=$ARC_CONFIG || exit $? nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/arc-arex.service.in0000644000000000000000000000013115067751327022471 xustar0030 mtime=1759498967.750197335 29 atime=1759498967.86149359 30 ctime=1759499029.322221309 nordugrid-arc-7.1.1/src/services/a-rex/arc-arex.service.in0000644000175000002070000000034215067751327024373 0ustar00mockbuildmock00000000000000[Unit] Description=ARC Resource-coupled EXecution service After=local-fs.target remote-fs.target [Service] Type=forking ExecStart=@prefix@/@pkgdatasubdir@/arc-arex-start NotifyAccess=all [Install] WantedBy=multi-user.target nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/FileChunks.cpp0000644000000000000000000000013015067751327021536 xustar0030 mtime=1759498967.750121423 29 atime=1759498967.86149359 29 ctime=1759499029.34083045 nordugrid-arc-7.1.1/src/services/a-rex/FileChunks.cpp0000644000175000002070000001114515067751327023444 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "FileChunks.h" namespace ARex { void FileChunks::Print(void) { //int n = 0; lock.lock(); for(chunks_t::iterator c = chunks.begin();c!=chunks.end();++c) { //Hopi::logger.msg(Arc::DEBUG, "Chunk %u: %u - %u",n,c->first,c->second); }; lock.unlock(); } void FileChunks::Size(off_t size) { lock.lock(); if(size > FileChunks::size) FileChunks::size = size; lock.unlock(); } FileChunks::FileChunks(FileChunksList& container): list(container),self(container.files.end()),size(0), last_accessed(time(NULL)),refcount(0) { } FileChunks::FileChunks(const FileChunks& obj): lock(),list(obj.list),self(obj.list.files.end()),chunks(obj.chunks), size(0),last_accessed(time(NULL)),refcount(0) { } FileChunks* FileChunksList::GetStuck(void) { if(((int)(time(NULL)-last_timeout)) < timeout) return NULL; lock.lock(); for(std::map::iterator f = files.begin(); f != files.end();++f) { f->second->lock.lock(); if((f->second->refcount <= 0) && (((int)(time(NULL) - f->second->last_accessed)) >= timeout )) { ++(f->second->refcount); f->second->lock.unlock(); lock.unlock(); return f->second; } f->second->lock.unlock(); } last_timeout=time(NULL); lock.unlock(); return NULL; } void FileChunksList::RemoveStuck(void) { std::list stuck; for(;;) { FileChunks* s = GetStuck(); if(!s) break; stuck.push_back(s); } for(std::list::iterator s = stuck.begin(); s!=stuck.end();++s) { (*s)->Remove(); } } /* FileChunks* FileChunksList::GetFirst(void) { lock.lock(); std::map::iterator f = files.begin(); if(f != files.end()) { f->second.lock.lock(); ++(f->second.refcount); f->second.lock.unlock(); lock.unlock(); return &(f->second); }; lock.unlock(); return NULL; } */ void FileChunks::Remove(void) { list.lock.lock(); lock.lock(); --refcount; if(refcount <= 0) { if(self != list.files.end()) { lock.unlock(); list.files.erase(self); list.lock.unlock(); delete this; return; } } lock.unlock(); list.lock.unlock(); } FileChunks& FileChunksList::Get(std::string path) { lock.lock(); std::map::iterator c = files.find(path); if(c == files.end()) { c=files.insert(std::pair(path,new FileChunks(*this))).first; c->second->lock.lock(); c->second->self=c; } else { c->second->lock.lock(); } ++(c->second->refcount); c->second->lock.unlock(); lock.unlock(); RemoveStuck(); return *(c->second); } void FileChunks::Release(void) { lock.lock(); if(chunks.empty()) { lock.unlock(); Remove(); } else { --refcount; lock.unlock(); } } void FileChunks::Add(off_t start,off_t csize) { off_t end = start+csize; lock.lock(); last_accessed=time(NULL); if(end > size) size=end; for(chunks_t::iterator chunk = chunks.begin();chunk!=chunks.end();++chunk) { if((start >= chunk->first) && (start <= chunk->second)) { // New chunk starts within existing chunk if(end > chunk->second) { // Extend chunk chunk->second=end; // Merge overlapping chunks chunks_t::iterator chunk_ = chunk; ++chunk_; for(;chunk_!=chunks.end();) { if(chunk->second < chunk_->first) break; // Merge two chunks if(chunk_->second > chunk->second) chunk->second=chunk_->second; chunk_=chunks.erase(chunk_); }; }; lock.unlock(); return; } else if((end >= chunk->first) && (end <= chunk->second)) { // New chunk ends within existing chunk if(start < chunk->first) { // Extend chunk chunk->first=start; }; lock.unlock(); return; } else if(end < chunk->first) { // New chunk is between existing chunks or first chunk chunks.insert(chunk,std::pair(start,end)); lock.unlock(); return; }; }; // New chunk is last chunk or there are no chunks currently chunks.insert(chunks.end(),std::pair(start,end)); lock.unlock(); } bool FileChunks::Complete(void) { lock.lock(); bool r = ((chunks.size() == 1) && (chunks.begin()->first == 0) && (chunks.begin()->second == size)); lock.unlock(); return r; } FileChunksList::FileChunksList(void):timeout(600), last_timeout(time(NULL)) { } FileChunksList::~FileChunksList(void) { lock.lock(); // Not sure lock.unlock(); } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/infoproviders0000644000000000000000000000013015067751425021612 xustar0030 mtime=1759499029.845435439 28 atime=1759499034.7655102 30 ctime=1759499029.845435439 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/0000755000175000002070000000000015067751425023573 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/LRMSInfo.pm0000644000000000000000000000013215067751327023621 xustar0030 mtime=1759498967.760492055 30 atime=1759498967.867493681 30 ctime=1759499029.816100726 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/LRMSInfo.pm0000644000175000002070000002407515067751327025533 0ustar00mockbuildmock00000000000000package LRMSInfo; use Storable; use strict; use LogUtils; use InfoChecker; ############################################################################## # To include a new LRMS: ############################################################################## # # Each LRMS specific module needs to: # # 1. Provide subroutine get_lrms_info. The interfaces are documented in this # file. All variables required in lrms_info_schema should be defined in # LRMS modules. Returning empty variable "" is perfectly ok if variable # does not apply to a LRMS. # # 2. Provide subroutine get_lrms_options_schema. The return value must be a # schema describing the options that are recognized by the plugin. ############################################################################## # Public interface to LRMS modules ############################################################################## # # use LRMSInfo; # # my $collector = LRMSInfo->new(); # my $lrms_info = $collector->get_info($options); # # Arguments: # $options - a hash reference containing options. This module will check it # against $lrms_options_schema and the LRMS plugin's own schema # and then pass it on to the LRMS plugin. # # Returns: # $lrms_info - a hash reference containing all information collected from # the LRMS. This module will check it against # $lrms_info_schema (see below) ############################################################################## # Schemas ############################################################################## # # The usage of these schemas is described in InfoChecker.pm # # $lrms_options_schema - for checking $options hash. This is just a minimal # schema, LRMS plugins may use an extended version # $lrms_info_schema - for checking data returned by LRMS modules my $lrms_options_schema = { # C 10 new lrms block, more options are passed 'lrms' => '', # old name of the LRMS module 'defaultqueue' => '*', # default queue, optional 'queues' => { # queue names are keys in this hash '*' => { 'users' => [ '' ] # list of user IDs to query in the LRMS } }, 'jobs' => [ '' ], # list of jobs IDs to query in the LRMS 'controldir' => '*', # path to controldir, taken from main config 'loglevel' => '' # infoproviders loglevel }; my $lrms_info_schema = { 'cluster' => { 'lrms_type' => '', 'lrms_glue_type' => '*', # one of: bqs condor fork loadleveler lsf openpbs sungridengine torque torquemaui ... 'lrms_version' => '', 'schedpolicy' => '*', 'totalcpus' => '', 'queuedcpus' => '', 'usedcpus' => '', 'queuedjobs' => '', 'runningjobs' => '', 'cpudistribution' => '' }, 'queues' => { '*' => { 'status' => '', 'maxrunning' => '', # the max number of jobs allowed to run in this queue 'maxqueuable' => '*', # the max number of jobs allowed to be queued 'maxuserrun' => '*', # the max number of jobs that a single user can run 'maxcputime' => '*', # units: seconds (per-slot) 'maxtotalcputime' => '*', # units: seconds 'mincputime' => '*', # units: seconds 'defaultcput' => '*', # units: seconds 'maxwalltime' => '*', # units: seconds 'minwalltime' => '*', # units: seconds 'defaultwallt' => '*', # units: seconds 'running' => '', # the number of cpus being occupied by running jobs 'queued' => '', # the number of queued jobs 'suspended' => '*', # the number of suspended jobs 'total' => '*', # the total number of jobs in this queue 'totalcpus' => '', # the number of cpus dedicated to this queue 'preemption' => '*', 'acl_users' => [ '*' ], 'users' => { '*' => { 'freecpus' => { '*' => '' # key: # of cpus, value: time limit in minutes (0 for unlimited) }, 'queuelength' => '' } }, 'nodes' => [ '*' ] # list of hostnames or LRMS node id of nodes that belong to the queue } }, 'jobs' => { '*' => { 'status' => '', 'cpus' => '*', 'rank' => '*', 'mem' => '*', # units: kB 'walltime' => '*', # units: seconds 'cputime' => '*', # units: seconds 'reqwalltime' => '*', # units: seconds 'reqcputime' => '*', # units: seconds 'nodes' => [ '*' ], # names of nodes where the job runs 'comment' => [ '*' ] } }, 'nodes' => { '*' => { # key: hostname of the node (as known to the LRMS) 'isavailable' => '', # is available for running jobs 'isfree' => '', # is available and not yet fully used, can accept more jobs 'tags' => [ '*' ], # tags associated to nodes, i.e. node properties in PBS 'vmem' => '*', # virtual memory, units: kb 'pmem' => '*', # physical memory, units: kb 'slots' => '*', # job slots or virtual processors 'lcpus' => '*', # cpus visible to the os 'pcpus' => '*', # number of sockets 'sysname' => '*', # what would uname -s print on the node 'release' => '*', # what would uname -r print on the node 'machine' => '*', # what would uname -m print (if the node would run linux) } } }; our $log = LogUtils->getLogger("LRMSInfo"); sub collect($) { my ($options) = @_; my ($checker, @messages); #print Dumper($options); # these lines are meant to take the # default queue. Maybe should be done differently having the default # explicitly stored in the config. #my $lrmsstring = ($options->{lrms}); #my ($lrms_name, $share) = split / /, $lrmsstring; # C 10 new lrms block my $lrms_name = $options->{lrms}; my $share = $options->{defaultqueue}; my $loglevel = $options->{loglevel}; $log->error('lrms option is missing') unless $lrms_name; load_lrms($lrms_name, $loglevel); # merge schema exported by the LRMS plugin my $schema = { %$lrms_options_schema, %{get_lrms_options_schema()} }; $checker = InfoChecker->new($schema); @messages = $checker->verify($options); $log->warning("config key options->$_") foreach @messages; $log->fatal("Some required options are missing") if @messages; my $result = get_lrms_info($options); use Data::Dumper('Dumper'); my $custom_lrms_schema = customize_info_schema($lrms_info_schema, $options); $checker = InfoChecker->new($custom_lrms_schema); @messages = $checker->verify($result); $log->warning("return value lrmsinfo->$_") foreach @messages; # some backends leave extra spaces -- trim them $result->{cluster}{cpudistribution} =~ s/^\s+//; $result->{cluster}{cpudistribution} =~ s/\s+$//; # make sure nodes are unique for my $job (values %{$result->{jobs}}) { next unless $job->{nodes}; my %nodes; $nodes{$_} = 1 for @{$job->{nodes}}; $job->{nodes} = [ sort keys %nodes ]; } return $result; } # Loads the needed LRMS plugin at runtime # First try to load XYZmod.pm (implementing the native ARC1 interface) # otherwise try to load XYZ.pm (ARC0.6 plugin) # input: lrmsname, loglevel sub load_lrms($$) { my $lrms_name = uc(shift); my $loglevel = uc(shift); my $module = $lrms_name."mod"; eval { require "$module.pm" }; if ($@) { $log->debug("require for $module returned: $@"); $log->debug("Using ARC0.6 compatible $lrms_name module"); require ARC0mod; ARC0mod::load_lrms($lrms_name, $loglevel); $module = "ARC0mod"; } import $module qw(get_lrms_info get_lrms_options_schema); } # prepares a custom schema that has individual keys for each queue and each job # which is named in $options sub customize_info_schema($$) { my ($info_schema,$options) = @_; my $new_schema; # make a deep copy $new_schema = Storable::dclone($info_schema); # adjust schema for each job: Replace "*" with actual job id's for my $job (@{$options->{jobs}}) { $new_schema->{jobs}{$job} = $new_schema->{jobs}{"*"}; } delete $new_schema->{jobs}{"*"}; # adjust schema for each queue: Replace "*" with actual queue names for my $queue (keys %{$options->{queues}}) { $new_schema->{queues}{$queue} = $new_schema->{queues}{"*"}; } delete $new_schema->{queues}{"*"}; return $new_schema; } #### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST #### my $opt1 = {lrms => 'fork', sge_root => '/opt/n1ge6', sge_cell => 'cello', sge_bin_path => '/opt/n1ge6/bin/lx24-x86', queues => {'shar' => {users => []}, 'loca' => {users => ['joe','pete'], maxjobs => '4 2'}}, jobs => [qw(7 101 5865)] }; my $opt2 = {lrms => 'sge', sge_root => '/opt/n1ge6', sge_cell => 'cello', sge_bin_path => '/opt/n1ge6/bin/lx24-amd64', queues => {'shar' => {users => []}, 'all.q' => {users => ['joe','pete']}}, jobs => [63, 36006] }; my $opt3 = {lrms => 'pbs', pbs_bin_path => '/opt/torque/bin', pbs_log_path => '/var/spool/torque/server_logs', queues => {'batch' => {users => ['joe','pete']}}, jobs => [63, 453] }; sub test { my $options = shift; LogUtils::level('VERBOSE'); require Data::Dumper; import Data::Dumper qw(Dumper); $log->debug("Options: " . Dumper($options)); my $results = LRMSInfo::collect($options->{lrms}); $log->debug("Results: " . Dumper($results)); } #test($opt3); 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327023726 xustar0030 mtime=1759498967.760492055 30 atime=1759498967.867493681 30 ctime=1759499029.796625434 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/Makefile.am0000644000175000002070000000210215067751327025623 0ustar00mockbuildmock00000000000000pkgdata_SCRIPTS = CEinfo.pl PerfData.pl dist_pkgdata_DATA = ARC0mod.pm SGEmod.pm FORKmod.pm PBS.pm PBSPRO.pm \ LL.pm LSF.pm Condor.pm condor_env.pm SLURM.pm SLURMmod.pm Boinc.pm \ IniParser.pm LogUtils.pm Sysinfo.pm \ LRMSInfo.pm GMJobsInfo.pm HostInfo.pm RTEInfo.pm \ InfoChecker.pm \ ARC0ClusterInfo.pm ARC1ClusterInfo.pm \ SGE.pm Fork.pm \ XmlPrinter.pm GLUE2xmlPrinter.pm \ LdifPrinter.pm GLUE2ldifPrinter.pm \ NGldifPrinter.pm InfosysHelper.pm pkgdata_DATA = ConfigCentral.pm if LDAP_SERVICE_ENABLED arcldapschemadir = $(pkgdatadir)/ldap-schema dist_arcldapschema_DATA = schema/nordugrid.schema endif PERL = @PERL@ PERLSCRIPTS = $(dist_pkgdata_DATA) CEinfo.pl TESTS_ENVIRONMENT = \ $(PERL) -I$(srcdir) -Mstrict -wc TESTS = $(PERLSCRIPTS) check_SCRIPTS = $(PERLSCRIPTS) # Check if BDI module is available, if not exclude Boinc.pm from TESTS variable. Boinc.pm: FORCE $(eval TESTS := $(shell if `$(PERL) -e "use BDI; exit;" > /dev/null 2>&1`; then echo "$(TESTS)"; else echo "$(TESTS)" | sed 's/Boinc.pm//'; fi)) FORCE: DIST_SUBDIRS = test SUBDIRS = $(PERL_TEST_DIR) nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/condor_env.pm0000644000000000000000000000013115067751327024363 xustar0030 mtime=1759498967.761492071 30 atime=1759498967.868493696 29 ctime=1759499029.80798092 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/condor_env.pm0000644000175000002070000000240615067751327026270 0ustar00mockbuildmock00000000000000package condor_env; use strict; use warnings; BEGIN { use base 'Exporter'; our @EXPORT = qw( configure_condor_env ); } # Initializes environment variables: CONDOR_BIN_PATH # Values defined in arc.conf take priority over previously set environment # variables. # Condor executables are located using the following cues: # 1. condor_bin_path option in arc.conf # 2. PATH environment variable # Synopsis: # # use IniParser; # use condor_env; # # my $parser = IniParser->new('/etc/arc.conf'); # my %config = $parser->get_section("common"); # configure_condor_env(%config) or die "Condor executables not found"; # Returns 1 if Condor executables were NOT found, 0 otherwise. sub configure_condor_env(%) { my %config = @_; if ($config{condor_bin_path}) { $ENV{CONDOR_BIN_PATH} = $config{condor_bin_path}; } else { for (split ':', $ENV{PATH}) { $ENV{CONDOR_BIN_PATH} = $_ and last if -x "$_/condor_version"; } } return 0 unless -x "$ENV{CONDOR_BIN_PATH}/condor_version"; if ($config{condor_config}) { $ENV{CONDOR_CONFIG} = $config{condor_config}; } else { $ENV{CONDOR_CONFIG} = "/etc/condor/condor_config"; } return 0 unless -e "$ENV{CONDOR_CONFIG}"; return 1; } 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/LdifPrinter.pm0000644000000000000000000000013215067751327024452 xustar0030 mtime=1759498967.760492055 30 atime=1759498967.867493681 30 ctime=1759499029.830884134 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/LdifPrinter.pm0000644000175000002070000001336615067751327026365 0ustar00mockbuildmock00000000000000package LdifPrinter; use MIME::Base64; use LogUtils; our $log = LogUtils->getLogger(__PACKAGE__); sub new { my ($this, $handle) = @_; my $class = ref($this) || $this; # This would only affect comment lines, the rest is guaranteed to be ASCII binmode $handle, ':encoding(utf8)' or $log->error("binmode failed: $!"); #print $handle "# extended LDIF\n#\n# LDAPv3\n" # or $log->error("print failed: $!"); my $self = {fh => $handle, dn => undef, nick => undef, attrs => undef, disablecnt => 0}; return bless $self, $class; } sub disableOut { my ($self) = @_; $self->{disablecnt} = $self->{disablecnt} + 1; } sub enableOut { my ($self) = @_; $self->{disablecnt} = $self->{disablecnt} - 1; } sub begin { my ($self, $dnkey, $name) = @_; $self->_flush() if defined $self->{dn}; unshift @{$self->{dn}}, safe_dn("$dnkey=$name"); unshift @{$self->{nick}}, safe_comment("$name"); } sub attribute { my ($self, $attr, $value) = @_; if ($self->{disablecnt} == 0) { push @{$self->{attrs}}, [$attr, $value]; } } sub attributes { my ($self, $data, $prefix, @keys) = @_; if ($self->{disablecnt} == 0) { my $attrs = $self->{attrs} ||= []; push @$attrs, ["$prefix$_", $data->{$_}] for @keys; } } sub end { my ($self) = @_; $self->_flush(); shift @{$self->{dn}}; shift @{$self->{nick}}; } # # Prints an entry with the attributes added so far. # Prints nothing if there are no attributes. # sub _flush { my ($self) = @_; my $fh = $self->{fh}; my $attrs = $self->{attrs}; return unless defined $attrs; my $dn = join ",", @{$self->{dn}}; my $nick = join ", ", @{$self->{nick}}; print $fh "\n"; #print $fh "# $nick\n"; print $fh safe_attrval("dn", $dn)."\n" or $log->error("print failed: $!"); for my $pair (@$attrs) { my ($attr, $val) = @$pair; next unless defined $val; if (not ref $val) { print $fh safe_attrval($attr, $val)."\n" or $log->error("print failed: $!"); } elsif (ref $val eq 'ARRAY') { for (@$val) { print $fh safe_attrval($attr, $_)."\n" or $log->error("print failed: $!"); } } else { $log->error("Not an ARRAY reference in: $attr"); } } $self->{attrs} = undef; } # # Make a string safe to use as a Relative Distinguished Name, cf. RFC 2253 # sub safe_dn { my ($rdn) = @_; # Escape with \ the following characters ,;+"\<> Also escape # at the # beginning and space at the beginning and at the end of the string. $rdn =~ s/((?:^[#\s])|[,+"\\<>;]|(?:\s$))/\\$1/g; # Encode CR, LF and NUL characters (necessary except when the string # is further base64 encoded) $rdn =~ s/\x0D/\\0D/g; $rdn =~ s/\x0A/\\0A/g; $rdn =~ s/\x00/\\00/g; return $rdn; } # # Construct an attribute-value string safe to use in LDIF, fc. RFC 2849 # sub safe_attrval { my ($attr, $val) = @_; return "${attr}:: ".encode_base64($val,'') if $val =~ /^[\s,:<]/ or $val =~ /[\x0D\x0A\x00]/ or $val =~ /[^\x00-\x7F]/; return "${attr}: $val"; } # # Leave comments as they are, just encode CR, LF and NUL characters # sub safe_comment { my ($line) = @_; $line =~ s/\x0D/\\0D/g; $line =~ s/\x0A/\\0A/g; $line =~ s/\x00/\\00/g; return $line; } # # Fold long lines and add a final newline. Handles comments specially. # sub fold78 { my ($tail) = @_; my $is_comment = "#" eq substr($tail, 0, 1); my $contchar = $is_comment ? "# " : " "; my $output = ""; while (length $tail > 78) { $output .= substr($tail, 0, 78) . "\n"; $tail = $contchar . substr($tail, 78); } return "$output$tail\n"; } # # Higher level functions for recursive printing # # $collector - a func ref that upon evaluation returns a hash ref ($data) # $idkey - a key in %$data to be used to construct the relative DN component # $prefix - to be prepended to the relative DN # $attributes - a func ref that is meant to print attributes. Called with $data as input. # $subtree - yet another func ref that is meant to descend into the hierachy. Called # with $data as input. Optional. # # Prints a single entry sub Entry { my ($self, $collector, $prefix, $idkey, $attributes, $subtree) = @_; return unless $collector and my $data = &$collector(); if ($data->{NOPUBLISH}) { $self->disableOut(); } $self->begin("$prefix$idkey", $data->{$idkey}); &$attributes($self,$data); &$subtree($self, $data) if $subtree; $self->end(); if ($data->{NOPUBLISH}) { $self->enableOut(); } } # Prints entries for as long as $collector continues to evaluate to non-null sub Entries { my ($self, $collector, $prefix, $idkey, $attributes, $subtree) = @_; while ($collector and my $data = &$collector()) { if ($data->{NOPUBLISH}) { $self->disableOut(); } $self->begin("$prefix$idkey", $data->{$idkey}); &$attributes($self,$data); &$subtree($self, $data) if $subtree; $self->end(); if ($data->{NOPUBLISH}) { $self->enableOut(); } } } #### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST #### sub test { my $data; my $printer = LdifPrinter->new(*STDOUT); $printer->begin(o => "glue"); $data = { objectClass => "organization", o => "glue" }; $printer->attributes("", $data, qw(objectClass o)); $printer->begin(GLUE2GroupID => "grid"); $printer->attribute(objectClass => "GLUE2GroupID"); $data = { GLUE2GroupID => "grid" }; $printer->attributes("GLUE2", $data, qw( GroupID )); $printer->end(); $printer->end(); } #test; 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/Condor.pm0000644000000000000000000000013215067751327023454 xustar0030 mtime=1759498967.758492025 30 atime=1759498967.866493666 30 ctime=1759499029.806838318 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/Condor.pm0000644000175000002070000005221115067751327025357 0ustar00mockbuildmock00000000000000package Condor; ###################################################################### # DISCLAIMER ###################################################################### # This module depends on ARC0mod.pm which is obsolete and deprecated # starting from ARC 6.0 # Please DO NOT build new LRMS modules based on this one but follow # the indications in # LRMSInfo.pm # instead. ###################################################################### use strict; use POSIX; our @ISA = ('Exporter'); our @EXPORT_OK = ('cluster_info', 'queue_info', 'jobs_info', 'users_info'); use LogUtils ( 'start_logging', 'error', 'warning', 'debug' ); use condor_env; ########################################## # Saved private variables ########################################## # contains the requirements string for the current queue. # It is used by queue-aware functions my $qdef = ''; my %config = (); my $arcconf = $ENV{ARC_CONFIG} ? $ENV{ARC_CONFIG} : '/etc/arc.conf'; my %lrms_queue; my $lrms_queue_initialized = 0; my @allnodedata = (); my $allnodedata_initialized = 0; my %alljobdata = (); my $alljobdata_initialized = 0; my @queuenodes = (); my $queuenodes_initialized = 0; my @jobids_thisqueue = (); my @jobids_otherqueue = (); ########################################## # Private subs ########################################## # Runs a command. Returns a list of three values: # # [0] String containing stdout. # [1] String containing stderr. # [2] Program exit code ($?) that was returned to the shell. sub condor_run($) { my $command = shift; my $stderr_file = "/tmp/condor_run.$$"; my $stdout = `$ENV{CONDOR_BIN_PATH}/$command 2>$stderr_file`; debug "===condor_run: $command"; my $ret = $? >> 8; local (*ERROR, $/); open ERROR, "<$stderr_file"; my $stderr = ; close ERROR; unlink $stderr_file; return $stdout, $stderr, $ret; } # String containing LRMS version. ('UNKNOWN' in case of errors.) sub type_and_version() { my ($out, $err, $ret) = condor_run('condor_version'); return 'UNKNOWN' if $ret != 0; $out =~ /\$CondorVersion:\s+(\S+)/; my $version = $1 || 'UNKNOWN'; my $type = 'Condor'; return $type, $version; } # # Helper funtion which collects all the information about condor nodes. # sub collect_node_data() { return if $allnodedata_initialized; $allnodedata_initialized = 1; my ($out, $err, $ret) = condor_run('condor_status -format "Name = %V\n" Name -format "Machine = %V\n" Machine -format "State = %V\n" State -format "Cpus = %V\n" Cpus -format "TotalCpus = %V\n" TotalCpus -format "SlotType = %V\n\n" SlotType'); error("Failed collecting node information.") if $ret; for (split /\n\n+/, $out) { my %target = condor_digest_classad($_); next unless defined $target{machine}; push @allnodedata, \%target; } debug "===collect_node_data: " . join " ", (map { $$_{machine} } @allnodedata); } # # Helper funtion which collects all the information about condor jobs. # sub collect_job_data() { return if $alljobdata_initialized; $alljobdata_initialized = 1; $ENV{_condor_CONDOR_Q_ONLY_MY_JOBS}='false'; my ($out, $err, $ret) = condor_run('condor_q -format "ClusterId = %V\n" ClusterId -format "ProcId = %V\n" ProcId -format "JobStatus = %V\n" JobStatus -format "CurrentHosts = %V\n" CurrentHosts -format "LastRemoteHost = %V\n" LastRemoteHost -format "RemoteHost = %V\n" RemoteHost -format "ImageSize = %V\n" ImageSize -format "RemoteWallClockTime = %V\n" RemoteWallClockTime -format "RemoteUserCpu = %V\n" RemoteUserCpu -format "RemoteSysCpu = %V\n" RemoteSysCpu -format "JobTimeLimit = %V\n" JobTimeLimit -format "JobCpuLimit = %V\n" JobCpuLimit -format "HoldReasonCode = %V\n\n" HoldReasonCode'); return if $out =~ m/All queues are empty/; error("Failed collecting job information.") if $ret; for (split /\n\n+/, $out) { my %job = condor_digest_classad($_); next unless defined $job{clusterid}; $job{procid} = "0" unless $job{procid}; my $jobid = "$job{clusterid}.$job{procid}"; $alljobdata{$jobid} = \%job; } debug "===collect_job_data: " . (join " ", keys %alljobdata); } # # Scans grid-manager's controldir for jobs in LRMS state belonging to a # queue. Returns a list of their Condor jobids: clusterid.0 (assumes # procid=0). # sub collect_jobids($$) { my %pairs; my $qname = shift; my $controldir = shift; my $cmd = "find $controldir/processing -maxdepth 1 -name '?*.status'"; $cmd .= ' | xargs grep -l INLRMS '; $cmd .= ' | sed \'s/^.*\/\([^\.]*\)\.status$/\1/\' '; $cmd .= ' | sed -e \'s#\(.\{3\}\)#\1/#3\' -e \'s#\(.\{3\}\)#\1/#2\' -e \'s#\(.\{3\}\)#\1/#1\' -e \'s#$#/local#\' '; $cmd .= " | sed 's\\^\\$controldir/jobs/\\' "; $cmd .= ' | xargs grep -H "^queue=\|^localid="'; local *LOCAL; open(LOCAL, "$cmd |"); while () { m#/job\.(\w{10,})\.local:queue=(\S+)# && ($pairs{$1}{queue} = $2); m#/job\.(\w{10,})\.local:localid=(\S+)# && ($pairs{$1}{id} = $2); } close LOCAL; foreach my $pair (values %pairs) { # get rid of .condor from localid. $$pair{id} =~ s/(\d+)\..*/$1.0/; if ( $$pair{queue} eq $qname ) { push @jobids_thisqueue, $$pair{id}; } else { push @jobids_otherqueue, $$pair{id}; } } debug "===collect_jobids: thisqueue: @jobids_thisqueue"; debug "===collect_jobids: otherqueue: @jobids_otherqueue"; } # # Returns a job's rank (place) in the current queue, or False if job is not in # current queue. Highest rank is 1. The rank is deduced from jobid, based on # the assumption that jobs are started sequentially by Condor. # Input: jobid (of the form: clusterid.0) # sub rank($) { my $id = shift; my $rank = 0; # only calculate rank for queued jobs return 0 unless exists $alljobdata{$id}; return 0 unless $alljobdata{$id}{lc 'JobStatus'} == 1; foreach (@jobids_thisqueue) { # only include queued jobs in rank next unless exists $alljobdata{$_}; next unless $alljobdata{$_}{lc 'JobStatus'} == 1; $rank++; last if $id eq $_; } #debug "===rank($id) = $rank"; return $rank; } # # Parses long output from condor_q -l # and condor_status -l into and hash. # OBS: Field names are lowercased! # OBS: It removes quotes around strings # sub condor_digest_classad($) { my %classad; for (split /\n+/, shift) { next unless /^(\w+)\s*=\s*(.*\S|)\s*$/; my ($field, $val) = ($1, $2); $val =~ s/"(.*)"/$1/; # remove quotes, if any $classad{lc $field} = $val; } return %classad; } # # Takes an optional constraint description string and returns the names of the # nodes which satisfy this contraint. If no constraint is given, returns all # the nodes in the Condor pool # sub condor_grep_nodes { my $req = shift; my $cmd = 'condor_status -format "%s\n" Machine'; $cmd .= " -constraint '$req'" if $req; my ($out, $err, $ret) = condor_run($cmd); debug "===condor_grep_nodes: ". (join ', ', split /\n/, $out); return () if $ret; return split /\n/, $out; } # # Takes one argument: # 1. The LRMS job id as represented in the GM. (In Condor terms, # it's .) # # Returns the current status of the job by mapping Condor's JobStatus # integer into corresponding one-letter codes used by ARC: # # 1 (Idle) --> Q (job is queuing, waiting for a node, etc.) # 2 (Running) --> R (running on a host controlled by the LRMS) # 2 (Suspended) --> S (an already running job in a suspended state) # 3 (Removed) --> E (finishing in the LRMS) # 4 (Completed) --> E (finishing in the LRMS) # 5 (Held) --> H --> S ( some jobs are stuck in the queue) # --> H --> O if HoldReasonCode == 16 jobs are datastaging # 6 (Transfer) --> O (other, almost finished. Transferring output.) # 7 (Suspended) --> S (newer condor version support suspended) # # If the job couldn't be found, E is returned since it is probably finished. # sub condor_get_job_status($) { my $id = shift; my %num2letter = qw(1 Q 2 R 3 E 4 E 5 H 6 O 7 S); return 'E' unless $alljobdata{$id}; my $s = $alljobdata{$id}{jobstatus}; return 'E' if !defined $s; $s = $num2letter{$s}; if ($s eq 'R') { $s = 'S' if condor_job_suspended($id); } # Takes care of HOLD jobs if ($s eq 'H') { $s = condor_job_hold_isstaging($id) ? 'O' : 'S'; } debug "===condor_get_job_status $id: $s"; return $s; } # # Returns the list of nodes belonging to the current queue # sub condor_queue_get_nodes() { return @queuenodes if $queuenodes_initialized; $queuenodes_initialized = 1; @queuenodes = condor_grep_nodes($qdef); debug "===condor_queue_get_nodes @queuenodes"; return @queuenodes; } # # Count queued jobs (idle or held) within the current queue. # sub condor_queue_get_queued() { my $gridqueued = 0; my $localqueued = 0; my $qfactor = 0; if(condor_cluster_totalcpus() != 0){ $qfactor = condor_queue_get_nodes() / condor_cluster_totalcpus(); } for (values %alljobdata) { my %job = %$_; # only include jobs which are idle. # TODO: Held jobs (ID=5) removed upon WLCG request, maybe they need to be counted elsewhere next unless $job{jobstatus} == 1; my $clusterid = "$job{clusterid}.$job{procid}"; if (grep { $_ eq $clusterid } @jobids_thisqueue) { $gridqueued += 1; } elsif (grep { $_ eq $clusterid } @jobids_otherqueue) { # this is a grid job, but in a different queue } else { $localqueued += 1; } } # for locally queued jobs, we don't know to which queue it belongs # try guessing the odds my $total = $gridqueued + int($localqueued * $qfactor); debug "===condor_queue_get_queued: $total = $gridqueued+int($localqueued*$qfactor)"; return $total; } # # Counts all queued cpus (idle) in the cluster. # TODO: this counts jobs, not cpus. # TODO: Held jobs (ID=5) removed upon WLCG request, maybe they need to be counted elsewhere sub condor_cluster_get_queued_cpus() { my $sum = 0; do {$sum++ if $$_{jobstatus} == 1} for values %alljobdata; debug "===condor_cluster_get_queued_cpus: $sum"; return $sum; } # # Counts all queued jobs (idle) in the cluster. # sub condor_cluster_get_queued_jobs() { my $sum = 0; do {$sum++ if $$_{jobstatus} == 1} for values %alljobdata; debug "===condor_cluster_get_queued_jobs: $sum"; return $sum; } # Counts all held jobs (ID=5) in the cluster. # sub condor_cluster_get_held_jobs() { my $sum = 0; do {$sum++ if $$_{jobstatus} == 5} for values %alljobdata; debug "===condor_cluster_get_held_jobs: $sum"; return $sum; } # # Counts all running jobs in the cluster. # TODO: also counts suspended jobs apparently. # only counts suspended jobs in earlier versions of Condor # Newer versions have separate state (7) for supended jobs sub condor_cluster_get_running_jobs() { my $sum = 0; do {$sum++ if $$_{jobstatus} == 2} for values %alljobdata; debug "===condor_cluster_get_running_jobs: $sum"; return $sum; } # # Counts nodes in the current queue with state other than 'Unclaimed' # Every running job is automatically included, plus nodes used # interactively by their owners # sub condor_queue_get_running() { my $running = 0; my @qnod = condor_queue_get_nodes(); for (@allnodedata) { my %node = %$_; next unless grep { $_ eq $node{machine} } @qnod; $running += $node{cpus} if ($node{slottype} !~ /^Partitionable/i && $node{state} !~ /^Unclaimed/i); } debug "===condor_queue_get_running: $running"; return $running; } # # Same as above, but for the whole cluster # sub condor_cluster_get_usedcpus() { my $used = 0; for (@allnodedata) { $used += $$_{cpus} if ($$_{slottype} !~ /^Partitionable/i && $$_{state} !~ /^Unclaimed/i); } debug "===condor_cluster_get_usedcpus: $used"; return $used; } # # returns the total number of CPUs in the cluster # sub condor_queue_totalcpus() { my @qnod = condor_queue_get_nodes(); # List all machines in the pool. Create a hash specifying the TotalCpus # for each machine. my %machines; $machines{$$_{machine}} = $$_{totalcpus} for @allnodedata; my $totalcpus = 0; for (keys %machines) { my $machine = $_; next unless grep { $machine eq $_ } @qnod; $totalcpus += $machines{$_}; } return $totalcpus; } # # returns the total number of nodes in the cluster # sub condor_cluster_totalcpus() { # List all machines in the pool. Create a hash specifying the TotalCpus # for each machine. my %machines; $machines{$$_{machine}} = $$_{totalcpus} for @allnodedata; my $totalcpus = 0; for (keys %machines) { $totalcpus += $machines{$_}; } return $totalcpus; } # # This function parses the condor log to see if the job has been suspended. # (condor_q reports 'R' for running even when the job is suspended, so we need # to parse the log to be sure that 'R' actually means running.) # # Argument: the condor job id # Returns: true if the job is suspended, and false if it's running. # sub condor_job_suspended($) { my $id = shift; return 0 unless $alljobdata{$id}; my $logfile = $alljobdata{$id}{lc 'UserLog'}; return 0 unless $logfile; local *LOGFILE; open LOGFILE, "<$logfile" or return 0; my $suspended = 0; while (my $line = ) { $suspended = 1 if $line =~ /Job was suspended\.$/; $suspended = 0 if $line =~ /Job was unsuspended\.$/; } close LOGFILE; return $suspended; } # # This function parses the condor log to see if a job in HOLD state # has been kept in HOLD because HoldReasonCode == 16. # In this case the job is staging so it should not be discarded. # # Argument: the condor job id # Returns: E if the job is in a terminal state, O if not. # sub condor_job_hold_isstaging($) { my $id = shift; return 0 unless $alljobdata{$id}; return 1 if ($alljobdata{$id}{lc 'HoldReasonCode'} == '16'); # E state means the job will not exit the HOLD state. # O state means the job can be out of the HOLD state. # If HoldReasonCode == 16 --> staging --> O my $substate = 'E'; $substate = 'O' if $alljobdata{$id}{lc 'HoldReasonCode'} == '16'; return $substate; } # # CPU distribution string (e.g., '1cpu:5 2cpu:1'). # sub cpudistribution { # List all machines in the pool. Create a hash specifying the TotalCpus # for each machine. my %machines; $machines{$$_{machine}} = $$_{totalcpus} for @allnodedata; # Count number of machines with one CPU, number with two, etc. my %dist; for (keys %machines) { $dist{$machines{$_}}++; } # Generate CPU distribution string. my $diststr = ''; for (sort { $a <=> $b } keys %dist) { $diststr .= ' ' unless $diststr eq ''; $diststr .= "${_}cpu:$dist{$_}"; } return $diststr; } ############################################ # Public subs ############################################# sub cluster_info ($) { my $config = shift; my %lrms_cluster; configure_condor_env(%$config) or error("Condor executables (in condor_bin_path) or config file (condor_config) not found, check configuration. Exiting..."); collect_node_data(); collect_job_data(); ( $lrms_cluster{lrms_type}, $lrms_cluster{lrms_version} ) = type_and_version(); # not sure how Condor counts RemoteUserCpu and RemoteSysCpu but it should # not matter anyway since we don't support parallel jobs under Condor $lrms_cluster{has_total_cputime_limit} = 0; # Count used/free CPUs and queued jobs in the cluster # Note: SGE has the concept of "slots", which roughly corresponds to # concept of "cpus" in ARC (PBS) LRMS interface. $lrms_cluster{totalcpus} = condor_cluster_totalcpus(); $lrms_cluster{cpudistribution} = cpudistribution(); $lrms_cluster{usedcpus} = condor_cluster_get_usedcpus(); #NOTE: counts jobs, not cpus. $lrms_cluster{queuedcpus} = condor_cluster_get_queued_cpus(); $lrms_cluster{queuedjobs} = condor_cluster_get_queued_jobs(); $lrms_cluster{runningjobs} = condor_cluster_get_running_jobs(); # List LRMS queues. # This does not seem to be used in cluster.pl! @{$lrms_cluster{queue}} = (); return %lrms_cluster; } sub queue_info ($$) { return %lrms_queue if $lrms_queue_initialized; $lrms_queue_initialized = 1; my $config = shift; my $qname = shift; $qdef = join "", split /\[separator\]/, ($$config{condor_requirements} || ''); warning("Option 'condor_requirements' is not defined for queue $qname") unless $qdef; debug("===Requirements for queue $qname: $qdef"); configure_condor_env(%$config) or error("Condor executables (in condor_bin_path) or config file (condor_config) not found, check configuration. Exiting..."); collect_node_data(); collect_job_data(); collect_jobids($qname, $$config{controldir}); # Number of available (free) cpus can not be larger that # free cpus in the whole cluster my $totalcpus = condor_queue_totalcpus(); my $usedcpus = condor_queue_get_running(); my $queuedcpus = condor_queue_get_queued(); $lrms_queue{freecpus} = $totalcpus - $usedcpus; $lrms_queue{running} = $usedcpus; $lrms_queue{totalcpus} = $totalcpus; # In theory any job in some circumstances can consume all available slots $lrms_queue{MaxSlotsPerJob} = $totalcpus; $lrms_queue{queued} = $queuedcpus; # reserve negative numbers for error states if ($lrms_queue{freecpus}<0) { warning("lrms_queue{freecpus} = $lrms_queue{freecpus}") } # nordugrid-queue-maxrunning # nordugrid-queue-maxqueuable # nordugrid-queue-maxuserrun # nordugrid-queue-mincputime # nordugrid-queue-defaultcputime $lrms_queue{maxrunning} = $totalcpus; $lrms_queue{maxqueuable} = 2 * $lrms_queue{maxrunning}; $lrms_queue{maxuserrun} = $lrms_queue{maxrunning}; $lrms_queue{maxwalltime} = ''; $lrms_queue{minwalltime} = ''; $lrms_queue{defaultwallt} = ''; $lrms_queue{maxcputime} = ''; $lrms_queue{mincputime} = ''; $lrms_queue{defaultcput} = ''; $lrms_queue{status} = 1; return %lrms_queue; } sub jobs_info ($$@) { my $config = shift; my $qname = shift; my $jids = shift; my %lrms_jobs; queue_info($config, $qname); foreach my $id ( @$jids ) { # submit-condor-job might return identifiers of the form ClusterId.condor # Replace .hostname with .0. It is safe to assume that ProcId is 0 because # we only submit one job at a time. my $id0 = $id; $id0 =~ s/(\d+)\..*/$1.0/; debug "===jobs_info: Mapping $id to $id0"; if ( $alljobdata{$id0} ) { my %job = %{$alljobdata{$id0}}; $lrms_jobs{$id}{status} = condor_get_job_status($id0); $lrms_jobs{$id}{mem} = $job{lc 'ImageSize'}; $lrms_jobs{$id}{walltime} = floor($job{lc 'RemoteWallClockTime'} / 60); $lrms_jobs{$id}{cputime} = floor(($job{lc 'RemoteUserCpu'} + $job{lc 'RemoteSysCpu'}) / 60); $lrms_jobs{$id}{nodes} = []; $lrms_jobs{$id}{nodes} = [$job{lc 'LastRemoteHost'}] if ($job{lc 'LastRemoteHost'} ne "undefined"); $lrms_jobs{$id}{nodes} = [$job{lc 'RemoteHost'}] if ($job{lc 'RemoteHost'} ne "undefined"); if ($job{lc 'JobTimeLimit'} ne "undefined") { $lrms_jobs{$id}{reqwalltime} = floor($job{lc 'JobTimeLimit'} / 60); # caller knows these better } if ($job{lc 'JobCpuLimit'} ne "undefined") { $lrms_jobs{$id}{reqcputime} = floor($job{lc 'JobCpuLimit'} / 60); # caller knows these better } $lrms_jobs{$id}{rank} = rank($id0) ? rank($id0) : ''; $lrms_jobs{$id}{comment} = []; # TODO $lrms_jobs{$id}{cpus} = $job{lc 'CurrentHosts'}; # For queued jobs, unset meanigless values if ($lrms_jobs{$id}{status} eq 'Q') { $lrms_jobs{$id}{mem} = ''; $lrms_jobs{$id}{walltime} = ''; $lrms_jobs{$id}{cputime} = ''; $lrms_jobs{$id}{nodes} = []; } } else { # Job probably already finished debug("===Condor job $id not found. Probably it has finished"); $lrms_jobs{$id}{status} = ''; $lrms_jobs{$id}{mem} = ''; $lrms_jobs{$id}{walltime} = ''; $lrms_jobs{$id}{cputime} = ''; $lrms_jobs{$id}{reqwalltime} = ''; $lrms_jobs{$id}{reqcputime} = ''; $lrms_jobs{$id}{rank} = ''; $lrms_jobs{$id}{nodes} = []; $lrms_jobs{$id}{comment} = []; } } return %lrms_jobs; } sub users_info($$@) { my $config = shift; my $qname = shift; my $accts = shift; my %lrms_users; queue_info($config, $qname); foreach my $u ( @{$accts} ) { # all users are treated as equals # there is no maximum walltime/cputime limit in Condor $lrms_users{$u}{freecpus} = $lrms_queue{freecpus}; $lrms_users{$u}{queuelength} = "$lrms_queue{queued}"; } return %lrms_users; } 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/Makefile.in0000644000000000000000000000013215067751356023741 xustar0030 mtime=1759498990.163857541 30 atime=1759499018.076256606 30 ctime=1759499029.839572468 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/Makefile.in0000644000175000002070000010670615067751356025655 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/infoproviders ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am \ $(am__dist_arcldapschema_DATA_DIST) $(dist_pkgdata_DATA) \ $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CEinfo.pl ConfigCentral.pm PerfData.pl CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(pkgdatadir)" \ "$(DESTDIR)$(arcldapschemadir)" "$(DESTDIR)$(pkgdatadir)" \ "$(DESTDIR)$(pkgdatadir)" SCRIPTS = $(pkgdata_SCRIPTS) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__dist_arcldapschema_DATA_DIST = schema/nordugrid.schema DATA = $(dist_arcldapschema_DATA) $(dist_pkgdata_DATA) $(pkgdata_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir distdir-am am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__tty_colors_dummy = \ mgn= red= grn= lgn= blu= brg= std=; \ am__color_tests=no am__tty_colors = { \ $(am__tty_colors_dummy); \ if test "X$(AM_COLOR_TESTS)" = Xno; then \ am__color_tests=no; \ elif test "X$(AM_COLOR_TESTS)" = Xalways; then \ am__color_tests=yes; \ elif test "X$$TERM" != Xdumb && { test -t 1; } 2>/dev/null; then \ am__color_tests=yes; \ fi; \ if test $$am__color_tests = yes; then \ red=''; \ grn=''; \ lgn=''; \ blu=''; \ mgn=''; \ brg=''; \ std=''; \ fi; \ } am__DIST_COMMON = $(srcdir)/CEinfo.pl.in $(srcdir)/ConfigCentral.pm.in \ $(srcdir)/Makefile.in $(srcdir)/PerfData.pl.in README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ pkgdata_SCRIPTS = CEinfo.pl PerfData.pl dist_pkgdata_DATA = ARC0mod.pm SGEmod.pm FORKmod.pm PBS.pm PBSPRO.pm \ LL.pm LSF.pm Condor.pm condor_env.pm SLURM.pm SLURMmod.pm Boinc.pm \ IniParser.pm LogUtils.pm Sysinfo.pm \ LRMSInfo.pm GMJobsInfo.pm HostInfo.pm RTEInfo.pm \ InfoChecker.pm \ ARC0ClusterInfo.pm ARC1ClusterInfo.pm \ SGE.pm Fork.pm \ XmlPrinter.pm GLUE2xmlPrinter.pm \ LdifPrinter.pm GLUE2ldifPrinter.pm \ NGldifPrinter.pm InfosysHelper.pm pkgdata_DATA = ConfigCentral.pm @LDAP_SERVICE_ENABLED_TRUE@arcldapschemadir = $(pkgdatadir)/ldap-schema @LDAP_SERVICE_ENABLED_TRUE@dist_arcldapschema_DATA = schema/nordugrid.schema PERLSCRIPTS = $(dist_pkgdata_DATA) CEinfo.pl TESTS_ENVIRONMENT = \ $(PERL) -I$(srcdir) -Mstrict -wc TESTS = $(PERLSCRIPTS) check_SCRIPTS = $(PERLSCRIPTS) DIST_SUBDIRS = test SUBDIRS = $(PERL_TEST_DIR) all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/infoproviders/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/infoproviders/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): CEinfo.pl: $(top_builddir)/config.status $(srcdir)/CEinfo.pl.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ ConfigCentral.pm: $(top_builddir)/config.status $(srcdir)/ConfigCentral.pm.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ PerfData.pl: $(top_builddir)/config.status $(srcdir)/PerfData.pl.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-dist_arcldapschemaDATA: $(dist_arcldapschema_DATA) @$(NORMAL_INSTALL) @list='$(dist_arcldapschema_DATA)'; test -n "$(arcldapschemadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(arcldapschemadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(arcldapschemadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcldapschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcldapschemadir)" || exit $$?; \ done uninstall-dist_arcldapschemaDATA: @$(NORMAL_UNINSTALL) @list='$(dist_arcldapschema_DATA)'; test -n "$(arcldapschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(arcldapschemadir)'; $(am__uninstall_files_from_dir) install-dist_pkgdataDATA: $(dist_pkgdata_DATA) @$(NORMAL_INSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-dist_pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(dist_pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) install-pkgdataDATA: $(pkgdata_DATA) @$(NORMAL_INSTALL) @list='$(pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgdatadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgdatadir)" || exit $$?; \ done uninstall-pkgdataDATA: @$(NORMAL_UNINSTALL) @list='$(pkgdata_DATA)'; test -n "$(pkgdatadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst $(AM_TESTS_FD_REDIRECT); then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ col="$$grn"; \ else \ col="$$red"; \ fi; \ echo "$${col}$$dashes$${std}"; \ echo "$${col}$$banner$${std}"; \ test -z "$$skipped" || echo "$${col}$$skipped$${std}"; \ test -z "$$report" || echo "$${col}$$report$${std}"; \ echo "$${col}$$dashes$${std}"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_SCRIPTS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-recursive all-am: Makefile $(SCRIPTS) $(DATA) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(arcldapschemadir)" "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(pkgdatadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dist_arcldapschemaDATA \ install-dist_pkgdataDATA install-pkgdataDATA \ install-pkgdataSCRIPTS install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-dist_arcldapschemaDATA \ uninstall-dist_pkgdataDATA uninstall-pkgdataDATA \ uninstall-pkgdataSCRIPTS .MAKE: $(am__recursive_targets) check-am install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ check-TESTS check-am clean clean-generic clean-libtool \ cscopelist-am ctags ctags-am distclean distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dist_arcldapschemaDATA \ install-dist_pkgdataDATA install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-pkgdataDATA install-pkgdataSCRIPTS \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-am uninstall \ uninstall-am uninstall-dist_arcldapschemaDATA \ uninstall-dist_pkgdataDATA uninstall-pkgdataDATA \ uninstall-pkgdataSCRIPTS .PRECIOUS: Makefile # Check if BDI module is available, if not exclude Boinc.pm from TESTS variable. Boinc.pm: FORCE $(eval TESTS := $(shell if `$(PERL) -e "use BDI; exit;" > /dev/null 2>&1`; then echo "$(TESTS)"; else echo "$(TESTS)" | sed 's/Boinc.pm//'; fi)) FORCE: # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/Fork.pm0000644000000000000000000000013015067751327023127 xustar0029 mtime=1759498967.75949204 30 atime=1759498967.866493666 29 ctime=1759499029.82642814 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/Fork.pm0000644000175000002070000002610615067751327025040 0ustar00mockbuildmock00000000000000package Fork; ###################################################################### # DISCLAIMER ###################################################################### # This module depends on ARC0mod.pm which is obsolete and deprecated # starting from ARC 6.0 # Please DO NOT build new LRMS modules based on this one but follow # the indications in # LRMSInfo.pm # instead. ###################################################################### use strict; use POSIX qw(ceil floor); use Sys::Hostname; our @ISA = ('Exporter'); our @EXPORT_OK = ('cluster_info', 'queue_info', 'jobs_info', 'users_info'); use LogUtils ( 'start_logging', 'error', 'warning', 'debug' ); ########################################## # Saved private variables ########################################## our (%lrms_queue); our $running = undef; # total running jobs in a queue # the queue passed in the latest call to queue_info, jobs_info or users_info my $currentqueue = undef; # Resets queue-specific global variables if # the queue has changed since the last call sub init_globals($) { my $qname = shift; if (not defined $currentqueue or $currentqueue ne $qname) { $currentqueue = $qname; %lrms_queue = (); $running = undef; } } ########################################## # Private subs ########################################## sub cpu_threads_cores_sockets { my $nsockets; # total number of physical cpu sockets my $ncores; # total number of cpu cores my $nthreads; # total number of hardware execution threads if (-f "/proc/cpuinfo") { # Linux variant my %sockets; # cpu socket IDs my %cores; # cpu core IDs open (CPUINFO, " ) { if ($line=~/^processor\s*:\s+(\d+)$/) { ++$nthreads; } elsif ($line=~/^physical id\s*:\s+(\d+)$/) { ++$sockets{$1}; } elsif ($line=~/^core id\s*:\s+(\d+)$/) { ++$cores{$1}; } } close CPUINFO; # count total cpu cores and sockets $ncores = scalar keys %cores; $nsockets = scalar keys %sockets; if ($nthreads) { # if /proc/cpuinfo does not provide socket and core IDs, # assume every thread represents a separate cpu $ncores = $nthreads unless $ncores; $nsockets = $nthreads unless $nsockets; } else { warning("Failed parsing /proc/cpuinfo"); } } elsif (-x "/usr/sbin/system_profiler") { # OS X my @lines = `system_profiler SPHardwareDataType`; warning("Failed running system_profiler: $!") if $?; for my $line ( @lines ) { if ($line =~ /Number Of Processors:\s*(.+)/) { $nsockets = $1; } elsif ($line =~ /Total Number Of Cores:\s*(.+)/) { $ncores = $1; $nthreads = $1; # Assume 1 execution thread per core } } unless ($nsockets and $ncores) { warning("Failed parsing output of system_profiler"); } } elsif (-x "/usr/bin/kstat" ) { # Solaris my %chips; eval { require Sun::Solaris::Kstat; my $ks = Sun::Solaris::Kstat->new(); my $cpuinfo = $ks->{cpu_info}; die "key not found: cpu_info" unless defined $cpuinfo; for my $id (keys %$cpuinfo) { my $info = $cpuinfo->{$id}{"cpu_info$id"}; die "key not found: cpu_info$id" unless defined $info; $chips{$info->{chip_id}}++; $nthreads++; } }; if ($@) { error("Failed running module Sun::Solaris::Kstat: $@"); } # assume each core is in a separate socket $nsockets = $ncores = scalar keys %chips; } else { warning("Cannot query CPU info: unsupported operating system"); } return ($nthreads,$ncores,$nsockets); } # Produces stats for all processes on the system sub process_info() { my $command = "ps -e -o ppid,pid,vsz,time,etime,user,comm"; my @pslines = `$command`; if ($? != 0) { warning("Failed running (non-zero exit status): $command"); return (); } shift @pslines; # drop header line my @procinfo; for my $line (@pslines) { my ($ppid,$pid,$vsize,$cputime,$etime,$user,$comm) = split ' ', $line, 7; # matches time formats like: 21:29.44, 12:21:29, 3-12:21:29 if ($cputime =~ /^(?:(?:(\d+)-)?(\d+):)?(\d+):(\d\d(?:\.\d+)?)$/) { my ($days,$hours,$minutes,$seconds) = (($1||0), ($2||0), $3, $4); $cputime = $seconds + 60*($minutes + 60*($hours + 24*$days)); } else { warning("Invalid cputime: $cputime"); $cputime = 0; } # matches time formats like: 21:29.44, 12:21:29, 3-12:21:29 if ($etime =~ /^(?:(?:(\d+)-)?(\d+):)?(\d+):(\d\d(?:\.\d+)?)$/) { my ($days,$hours,$minutes,$seconds) = (($1||0), ($2||0), $3, $4); $etime = $seconds + 60*($minutes + 60*($hours + 24*$days)); } elsif ($etime eq '-') { $etime = 0; # a zombie ? } else { warning("Invalid etime: $etime"); $etime = 0; } my $pi = { ppid => $ppid, pid => $pid, vsize => $vsize, user => $user, cputime => $cputime, etime => $etime, comm => $comm }; push @procinfo, $pi, } return @procinfo; } ############################################ # Public subs ############################################# sub cluster_info ($) { my ($config) = shift; my (%lrms_cluster); $lrms_cluster{lrms_type} = "fork"; $lrms_cluster{lrms_version} = "1"; # only enforcing per-process cputime limit $lrms_cluster{has_total_cputime_limit} = 0; my ($cputhreads) = cpu_threads_cores_sockets(); $lrms_cluster{totalcpus} = $cputhreads; # Since fork is a single machine backend all there will only be one machine available $lrms_cluster{cpudistribution} = $lrms_cluster{totalcpus}."cpu:1"; # usedcpus on a fork machine is determined from the 1min cpu # loadaverage and cannot be larger than the totalcpus if ( `uptime` =~ /load averages?:\s+([.\d]+,?[.\d]+),?\s+([.\d]+,?[.\d]+),?\s+([.\d]+,?[.\d]+)/ ) { my $usedcpus = $1; $usedcpus =~ tr/,/./; $lrms_cluster{usedcpus} = ($usedcpus <= $lrms_cluster{totalcpus}) ? floor(0.5+$usedcpus) : $lrms_cluster{totalcpus}; } else { error("Failed getting load averages"); $lrms_cluster{usedcpus} = 0; } #Fork does not support parallel jobs $lrms_cluster{runningjobs} = $lrms_cluster{usedcpus}; # no LRMS queuing jobs on a fork machine, fork has no queueing ability $lrms_cluster{queuedcpus} = 0; $lrms_cluster{queuedjobs} = 0; $lrms_cluster{queue} = [ ]; return %lrms_cluster; } sub queue_info ($$) { my ($config) = shift; my ($qname) = shift; init_globals($qname); if (defined $running) { # job_info was already called, we know exactly how many grid jobs # are running $lrms_queue{running} = $running; } else { # assuming that the submitted grid jobs are cpu hogs, approximate # the number of running jobs with the number of running processes $lrms_queue{running}= 0; unless (open PSCOMMAND, "ps axr |") { error("error in executing ps axr"); } while(my $line = ) { chomp($line); next if ($line =~ m/PID TTY/); next if ($line =~ m/ps axr/); next if ($line =~ m/cluster-fork/); $lrms_queue{running}++; } close PSCOMMAND; } my ($cputhreads) = cpu_threads_cores_sockets(); $lrms_queue{totalcpus} = $cputhreads; $lrms_queue{status} = $lrms_queue{totalcpus}-$lrms_queue{running}; # reserve negative numbers for error states # Fork is not real LRMS, and cannot be in error state if ($lrms_queue{status}<0) { debug("lrms_queue{status} = $lrms_queue{status}"); $lrms_queue{status} = 0; } my $job_limit; $job_limit = 1; if ( $$config{maxjobs} ) { #extract lrms maxjobs from config option my @maxes = split(' ', $$config{maxjobs}); my $len=@maxes; if ($len > 1){ $job_limit = $maxes[1]; if ($job_limit eq "cpunumber") { $job_limit = $lrms_queue{totalcpus}; } } } $lrms_queue{maxrunning} = $job_limit; $lrms_queue{maxuserrun} = $job_limit; $lrms_queue{maxqueuable} = $job_limit; chomp( my $maxcputime = `ulimit "-t"` ); if ($maxcputime =~ /^\d+$/) { $lrms_queue{maxcputime} = $maxcputime; } elsif ($maxcputime eq 'unlimited') { $lrms_queue{maxcputime} = ""; } else { warning("Could not determine max cputime with ulimit -t"); $lrms_queue{maxcputime} = ""; } $lrms_queue{queued} = 0; $lrms_queue{mincputime} = ""; $lrms_queue{defaultcput} = ""; $lrms_queue{minwalltime} = ""; $lrms_queue{defaultwallt} = ""; $lrms_queue{maxwalltime} = $lrms_queue{maxcputime}; return %lrms_queue; } sub jobs_info ($$@) { my ($config) = shift; my ($qname) = shift; my ($jids) = shift; init_globals($qname); my (%lrms_jobs); my @procinfo = process_info(); foreach my $id (@$jids){ $lrms_jobs{$id}{nodes} = [ hostname ]; my ($proc) = grep { $id == $_->{pid} } @procinfo; if ($proc) { # number of running jobs. Will be used in queue_info ++$running; # sum cputime of all child processes my $cputime = $proc->{cputime}; $_->{ppid} == $id and $cputime += $_->{cputime} for @procinfo; $lrms_jobs{$id}{mem} = $proc->{vsize}; $lrms_jobs{$id}{walltime} = ceil $proc->{etime}/60; $lrms_jobs{$id}{cputime} = ceil $cputime/60; $lrms_jobs{$id}{status} = 'R'; $lrms_jobs{$id}{comment} = [ "LRMS: Running under fork" ]; } else { $lrms_jobs{$id}{mem} = ''; $lrms_jobs{$id}{walltime} = ''; $lrms_jobs{$id}{cputime} = ''; $lrms_jobs{$id}{status} = ''; # job is EXECUTED $lrms_jobs{$id}{comment} = [ "LRMS: no longer running" ]; } $lrms_jobs{$id}{reqwalltime} = ""; $lrms_jobs{$id}{reqcputime} = ""; $lrms_jobs{$id}{rank} = "0"; #Fork backend does not support parallel jobs $lrms_jobs{$id}{cpus} = "1"; } return %lrms_jobs; } sub users_info($$@) { my ($config) = shift; my ($qname) = shift; my ($accts) = shift; init_globals($qname); my (%lrms_users); # freecpus # queue length if ( ! exists $lrms_queue{status} ) { %lrms_queue = queue_info( $config, $qname ); } foreach my $u ( @{$accts} ) { $lrms_users{$u}{freecpus} = $lrms_queue{maxuserrun} - $lrms_queue{running}; $lrms_users{$u}{queuelength} = "$lrms_queue{queued}"; } return %lrms_users; } 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/GLUE2ldifPrinter.pm0000644000000000000000000000013115067751327025250 xustar0029 mtime=1759498967.75949204 30 atime=1759498967.866493666 30 ctime=1759499029.832576461 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/GLUE2ldifPrinter.pm0000644000175000002070000006525615067751327027171 0ustar00mockbuildmock00000000000000package GLUE2ldifPrinter; use base "LdifPrinter"; sub new { my ($this, $handle, $splitjobs) = @_; my $self = $this->SUPER::new($handle); $self->{splitjobs} = $splitjobs; return $self; } # bools come in lowecase, must be uppercased for LDAP # In the XML schema allowed values are: true, false, undefined # In the LDAP schema the above has been synced in arc 6.10, # previously only uppercase TRUE, FALSE where allowed sub lc_bools { my ($data, @keys) = @_; for (@keys) { my $val = $data->{$_}; next unless defined $val; $data->{$_} = $val = lc $val; delete $data->{$_} unless $val eq 'false' or $val eq 'true' or $val eq 'undefined'; } } # # Print attributes # sub beginGroup { my ($self, $name) = @_; $self->begin(GLUE2GroupID => $name); my $data = { objectClass => "GLUE2Group", GLUE2GroupID => $name}; $self->attributes($data, "", qw(objectClass GLUE2GroupID)); } sub EntityAttributes { my ($self, $data) = @_; $self->attributes($data, "GLUE2Entity", qw( CreationTime Validity Name OtherInfo )); } sub LocationAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2Location"); $self->attributes($data, "GLUE2Location", qw( ID Address Place Country PostCode Latitude Longitude )); $self->attribute(GLUE2LocationServiceForeignKey => $data->{ServiceForeignKey}); } sub ContactAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2Contact"); $self->attributes($data, "GLUE2Contact", qw( ID Detail Type )); $self->attribute(GLUE2ContactServiceForeignKey => $data->{ServiceForeignKey}); } sub DomainAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2Domain"); $self->attributes($data, "GLUE2Domain", qw( ID Description WWW )); } sub AdminDomainAttributes { my ($self, $data) = @_; lc_bools($data, qw( Distributed )); $self->DomainAttributes($data); $self->attribute(objectClass => "GLUE2AdminDomain"); $self->attributes($data, "GLUE2AdminDomain", qw( Distributed Owner )); $self->attribute(GLUE2AdminDomainAdminDomainForeignKey => $data->{AdminDomainID}); } sub UserDomainAttributes { my ($self, $data) = @_; $self->DomainAttributes($data); $self->attribute(objectClass => "GLUE2UserDomain"); $self->attributes($data, "GLUE2UserDomain", qw( Level UserManager Member )); $self->attribute(GLUE2UserDomainUserDomainForeignKey => $data->{UserDomainID}); } sub ServiceAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2Service"); $self->attributes($data, "GLUE2Service", qw( ID Capability Type QualityLevel StatusInfo Complexity )); $self->attribute(GLUE2ServiceAdminDomainForeignKey => $data->{AdminDomainID}); $self->attribute(GLUE2ServiceServiceForeignKey => $data->{ServiceID}); } sub EndpointAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2Endpoint"); $self->attributes($data, "GLUE2Endpoint", qw( ID URL Capability Technology InterfaceName InterfaceVersion InterfaceExtension WSDL SupportedProfile Semantics Implementor ImplementationName ImplementationVersion QualityLevel HealthState HealthStateInfo ServingState StartTime IssuerCA TrustedCA DowntimeAnnounce DowntimfeStart DowntimeEnd DowntimeInfo )); $self->attribute(GLUE2EndpointServiceForeignKey => $data->{ServiceID}); } sub ShareAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2Share"); $self->attributes($data, "GLUE2Share", qw( ID Description )); $self->attribute(GLUE2ShareServiceForeignKey => $data->{ServiceID}); $self->attribute(GLUE2ShareEndpointForeignKey => $data->{EndpointID}); $self->attribute(GLUE2ShareResourceForeignKey => $data->{ResourceID}); $self->attribute(GLUE2ShareShareForeignKey => $data->{ShareID}); } sub ManagerAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2Manager"); $self->attributes($data, "GLUE2Manager", qw( ID ProductName ProductVersion )); $self->attribute(GLUE2ManagerServiceForeignKey => $data->{ServiceID}); } sub ResourceAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2Resource"); $self->attributes($data, "GLUE2Resource", qw( ID )); $self->attribute(GLUE2ResourceManagerForeignKey => $data->{ManagerID}); } sub ActivityAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2Activity"); $self->attributes($data, "GLUE2Activity", qw( ID )); $self->attribute(GLUE2ActivityUserDomainForeignKey => $data->{UserDomainID}); $self->attribute(GLUE2ActivityEndpointForeignKey => $data->{EndpointID}); $self->attribute(GLUE2ActivityShareForeignKey => $data->{ShareID}); $self->attribute(GLUE2ActivityResourceForeignKey => $data->{ResourceID}); $self->attribute(GLUE2ActivityActivityForeignKey => $data->{ActivityID}); } sub PolicyAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2Policy"); $self->attributes($data, "GLUE2Policy", qw( ID Scheme Rule )); $self->attribute(GLUE2PolicyUserDomainForeignKey => $data->{UserDomainID}); } sub AccessPolicyAttributes { my ($self, $data) = @_; $self->PolicyAttributes($data); $self->attribute(objectClass => "GLUE2AccessPolicy"); $self->attribute(GLUE2AccessPolicyEndpointForeignKey => $data->{EndpointID}); } sub MappingPolicyAttributes { my ($self, $data) = @_; $self->PolicyAttributes($data); $self->attribute(objectClass => "GLUE2MappingPolicy"); $self->attribute(GLUE2MappingPolicyShareForeignKey => $data->{ShareID}); } sub ComputingServiceAttributes { my ($self, $data) = @_; $self->ServiceAttributes($data); $self->attribute(objectClass => "GLUE2ComputingService"); $self->attributes($data, "GLUE2ComputingService", qw( TotalJobs RunningJobs WaitingJobs StagingJobs SuspendedJobs PreLRMSWaitingJobs )); } sub ComputingEndpointAttributes { my ($self, $data) = @_; $self->EndpointAttributes($data); # The LDAP schema required both this and GLUE2ComputingEndpointComputingServiceForeignKey ! $self->attribute(GLUE2EndpointServiceForeignKey => $data->{ComputingServiceID}); $self->attribute(objectClass => "GLUE2ComputingEndpoint"); $self->attributes($data, "GLUE2ComputingEndpoint", qw( Staging JobDescription TotalJobs RunningJobs WaitingJobs StagingJobs SuspendedJobs PreLRMSWaitingJobs )); $self->attribute(GLUE2ComputingEndpointComputingServiceForeignKey => $data->{ComputingServiceID}); } sub ComputingShareAttributes { my ($self, $data) = @_; lc_bools($data, qw( Preemption )); $self->ShareAttributes($data); $self->attribute(objectClass => "GLUE2ComputingShare"); $self->attributes($data, "GLUE2ComputingShare", qw( MappingQueue MaxWallTime MaxMultiSlotWallTime MinWallTime DefaultWallTime MaxCPUTime MaxTotalCPUTime MinCPUTime DefaultCPUTime MaxTotalJobs MaxRunningJobs MaxWaitingJobs MaxPreLRMSWaitingJobs MaxUserRunningJobs MaxSlotsPerJob MaxStateInStreams MaxStageOutStreams SchedulingPolicy MaxMainMemory GuaranteedMainMemory MaxVirtualMemory GuaranteedVirtualMemory MaxDiskSpace DefaultStorageService Preemption ServingState TotalJobs RunningJobs LocalRunningJobs WaitingJobs LocalWaitingJobs SuspendedJobs LocalSuspendedJobs StagingJobs PreLRMSWaitingJobs EstimatedAverageWaitingTime EstimatedWorstWaitingTime FreeSlots FreeSlotsWithDuration UsedSlots RequestedSlots ReservationPolicy Tag )); $self->attribute(GLUE2ComputingShareComputingServiceForeignKey => $data->{ComputingServiceID}); # Mandatory by schema $self->attribute(GLUE2ComputingShareComputingEndpointForeignKey => $data->{ComputingEndpointID}); $self->attribute(GLUE2ComputingShareExecutionEnvironmentForeignKey => $data->{ExecutionEnvironmentID}); } sub ComputingManagerAttributes { my ($self, $data) = @_; lc_bools($data, qw( Reservation BulkSubmission Homogeneous WorkingAreaShared WorkingAreaGuaranteed )); $self->ManagerAttributes($data); $self->attribute(objectClass => "GLUE2ComputingManager"); $self->attributes($data, "GLUE2ComputingManager", qw( Reservation BulkSubmission TotalPhysicalCPUs TotalLogicalCPUs TotalSlots SlotsUsedByLocalJobs SlotsUsedByGridJobs Homogeneous NetworkInfo LogicalCPUDistribution WorkingAreaShared WorkingAreaGuaranteed WorkingAreaTotal WorkingAreaFree WorkingAreaLifeTime WorkingAreaMultiSlotTotal WorkingAreaMultiSlotFree WorkingAreaMultiSlotLifeTime CacheTotal CacheFree TmpDir ScratchDir ApplicationDir )); $self->attribute(GLUE2ComputingManagerComputingServiceForeignKey => $data->{ComputingServiceID}); } sub BenchmarkAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2Benchmark"); $self->attributes($data, "GLUE2Benchmark", qw( ID Type Value )); $self->attribute(GLUE2BenchmarkExecutionEnvironmentForeignKey => $data->{ExecutionEnvironmentID}); $self->attribute(GLUE2BenchmarkComputingManagerForeignKey => $data->{ComputingManagerID}); } sub ExecutionEnvironmentAttributes { my ($self, $data) = @_; lc_bools($data, qw( VirtualMachine ConnectivityIn ConnectivityOut )); $self->ResourceAttributes($data); $self->attribute(objectClass => "GLUE2ExecutionEnvironment"); $self->attributes($data, "GLUE2ExecutionEnvironment", qw( Platform VirtualMachine TotalInstances UsedInstances UnavailableInstances PhysicalCPUs LogicalCPUs CPUMultiplicity CPUVendor CPUModel CPUVersion CPUClockSpeed CPUTimeScalingFactor WallTimeScalingFactor MainMemorySize VirtualMemorySize OSFamily OSName OSVersion ConnectivityIn ConnectivityOut NetworkInfo )); $self->attribute(GLUE2ExecutionEnvironmentComputingManagerForeignKey => $data->{ComputingManagerID}); } sub ApplicationEnvironmentAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2ApplicationEnvironment"); $self->attributes($data, "GLUE2ApplicationEnvironment", qw( ID AppName AppVersion State RemovalDate License Description BestBenchmark ParallelSupport MaxSlots MaxJobs MaxUserSeats FreeSlots FreeJobs FreeUserSeats )); $self->attribute(GLUE2ApplicationEnvironmentComputingManagerForeignKey => $data->{ComputingManagerID}); $self->attribute(GLUE2ApplicationEnvironmentExecutionEnvironmentForeignKey => $data->{ExecutionEnvironmentID}); } sub ApplicationHandleAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2ApplicationHandle"); $self->attributes($data, "GLUE2ApplicationHandle", qw( ID Type Value )); $self->attribute(GLUE2ApplicationHandleApplicationEnvironmentForeignKey => $data->{ApplicationEnvironmentID}); } sub ComputingActivityAttributes { my ($self, $data) = @_; $self->ActivityAttributes($data); $self->attribute(objectClass => "GLUE2ComputingActivity"); $self->attributes($data, "GLUE2ComputingActivity", qw( Type IDFromEndpoint LocalIDFromManager JobDescription State RestartState ExitCode ComputingManagerExitCode Error WaitingPosition UserDomain Owner LocalOwner RequestedTotalWallTime RequestedTotalCPUTime RequestedSlots RequestedApplicationEnvironment StdIn StdOut StdErr LogDir ExecutionNode Queue UsedTotalWallTime UsedTotalCPUTime UsedMainMemory SubmissionTime ComputingManagerSubmissionTime StartTime ComputingManagerEndTime EndTime WorkingAreaEraseTime ProxyExpirationTime SubmissionHost SubmissionClientName OtherMessages )); $self->attribute(GLUE2ActivityShareForeignKey => $data->{ComputingShareID}); $self->attribute(GLUE2ActivityResourceForeignKey => $data->{ExecutionEnvironmentID}); $self->attribute(GLUE2ActivityActivityForeignKey => $data->{ActivityID}); } sub ToStorageServiceAttributes { my ($self, $data) = @_; $self->EntityAttributes($data); $self->attribute(objectClass => "GLUE2ToStorageService"); $self->attributes($data, "GLUE2ToStorageService", qw( ID LocalPath RemotePath )); $self->attribute(GLUE2ToStorageServiceComputingServiceForeignKey => $data->{ComputingServiceID}); $self->attribute(GLUE2ToStorageServiceStorageServiceForeignKey => $data->{StorageServiceID}); } # # Follow hierarchy # sub Location { LdifPrinter::Entry(@_, 'GLUE2Location', 'ID', \&LocationAttributes); } sub Contacts { LdifPrinter::Entries(@_, 'GLUE2Contact', 'ID', \&ContactAttributes); } sub AdminDomain { LdifPrinter::Entry(@_, 'GLUE2Domain', 'ID', \&AdminDomainAttributes, sub { my ($self, $data) = @_; $self->Location($data->{Location}); $self->Contacts($data->{Contacts}); #$self->ComputingService($data->{ComputingService}); }); } sub AccessPolicies { LdifPrinter::Entries(@_, 'GLUE2Policy', 'ID', \&AccessPolicyAttributes); } sub MappingPolicies { LdifPrinter::Entries(@_, 'GLUE2Policy', 'ID', \&MappingPolicyAttributes); } sub Services { LdifPrinter::Entries(@_, 'GLUE2Service', 'ID', \&ServiceAttributes, sub { my ($self, $data) = @_; $self->Endpoints($data->{Endpoints}); $self->Location($data->{Location}); $self->Contacts($data->{Contacts}); }); } sub ComputingService { LdifPrinter::Entry(@_, 'GLUE2Service', 'ID', \&ComputingServiceAttributes, sub { my ($self, $data) = @_; $self->ComputingEndpoints($data->{ComputingEndpoints}); $self->ComputingShares($data->{ComputingShares}); $self->ComputingManager($data->{ComputingManager}); $self->ToStorageServices($data->{ToStorageServices}); $self->Location($data->{Location}); $self->Contacts($data->{Contacts}); }); } sub Endpoint { LdifPrinter::Entry(@_, 'GLUE2Endpoint', 'ID', \&EndpointAttributes, sub { my ($self, $data) = @_; $self->AccessPolicies($data->{AccessPolicies}); }); } sub Endpoints { LdifPrinter::Entries(@_, 'GLUE2Endpoint', 'ID', \&EndpointAttributes, sub { my ($self, $data) = @_; $self->AccessPolicies($data->{AccessPolicies}); }); } sub ComputingEndpoint { LdifPrinter::Entry(@_, 'GLUE2Endpoint', 'ID', \&ComputingEndpointAttributes, sub { my ($self, $data) = @_; $self->AccessPolicies($data->{AccessPolicies}); }); } sub ComputingEndpoints { LdifPrinter::Entries(@_, 'GLUE2Endpoint', 'ID', \&ComputingEndpointAttributes, sub { my ($self, $data) = @_; if (!($self->{splitjobs}) && $data->{ComputingActivities}) { $self->beginGroup("ComputingActivities"); $self->ComputingActivities($data->{ComputingActivities}); $self->end(); } $self->AccessPolicies($data->{AccessPolicies}); }); } sub ComputingShares { LdifPrinter::Entries(@_, 'GLUE2Share', 'ID', \&ComputingShareAttributes, sub { my ($self, $data) = @_; $self->MappingPolicies($data->{MappingPolicies}); }); } sub ComputingManager { LdifPrinter::Entry(@_, 'GLUE2Manager', 'ID', \&ComputingManagerAttributes, sub { my ($self, $data) = @_; $self->Benchmarks($data->{Benchmarks}); $self->beginGroup("ExecutionEnvironments"); $self->ExecutionEnvironments($data->{ExecutionEnvironments}); $self->end(); $self->beginGroup("ApplicationEnvironments"); $self->ApplicationEnvironments($data->{ApplicationEnvironments}); $self->end(); }); } sub Benchmarks { LdifPrinter::Entries(@_, 'GLUE2Benchmark', 'ID', \&BenchmarkAttributes); } sub ExecutionEnvironments { LdifPrinter::Entries(@_, 'GLUE2Resource', 'ID', \&ExecutionEnvironmentAttributes, sub { my ($self, $data) = @_; $self->Benchmarks($data->{Benchmarks}); }); } sub ApplicationEnvironments { LdifPrinter::Entries(@_, 'GLUE2ApplicationEnvironment', 'ID', \&ApplicationEnvironmentAttributes, sub { my ($self, $data) = @_; $self->ApplicationHandles($data->{ApplicationHandles}); }); } sub ApplicationHandles { LdifPrinter::Entries(@_, 'GLUE2ApplicationHandle', 'ID', \&ApplicationHandleAttributes); } sub ComputingActivities { LdifPrinter::Entries(@_, 'GLUE2Activity', 'ID', \&ComputingActivityAttributes); } sub ToStorageServices { LdifPrinter::Entries(@_, 'GLUE2ToStorageService', 'ID', \&ToStorageServiceAttributes); } sub Top { my ($self, $data ) = @_; $self->begin(o => "glue"); #$self->attribute(objectClass => "top"); #$self->attribute(objectClass => "organization"); #$self->attribute(o => "glue"); # builds the grid subtree, with domain information #$self->beginGroup("grid"); $self->AdminDomain(&$data->{AdminDomain}); #$self->end; $self->beginGroup("services"); $self->Services(&$data->{Services}); $self->ComputingService(&$data->{ComputingService}); $self->end; } 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/ARC0mod.pm0000644000000000000000000000013215067751327023415 xustar0030 mtime=1759498967.758384845 30 atime=1759498967.865493651 30 ctime=1759499029.798915998 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/ARC0mod.pm0000644000175000002070000001562415067751327025327 0ustar00mockbuildmock00000000000000package ARC0mod; ###################################################################### # DISCLAIMER ###################################################################### # This module is obsolete and deprecated starting from ARC 6.0 # and all the modules depending on it are candidates for deprecation. # Please DO NOT build new LRMS modules based on this one but follow # the indications in # LRMSInfo.pm # instead. ###################################################################### # # Loads ARC0.6 LRMS modules for use with ARC1 # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # !!!! WARNING: DO *NOT* DEVELOP NEW MODULES BASED ON WHAT FOLLOWS !!! # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # THIS MODULE IS MAINTANED TO RUN LEGACY CODE. # THE INFORMATION BELOW IS KEPT FOR MAINTENANCE REFERENCE. # THIS MODULE WILL BE DEPRECATED. # ALL THE MODULES BASED ON THIS WILL BE REMOVED IN THE FUTURE. # IF YOU PLAN TO DEVELOP A NEW PERL LRMS MODULE, # READ THE DISCLAIMER ABOVE. # # !!! LEAGACY DO NOT USE FOR FUTURE DEVELOPMENTS - SEE DISCLAIMER !!!! # To include a new (ARC 0.6) LRMS plugin: # # 1. Each LRMS specific module needs to provide subroutines # cluster_info, queue_info, jobs_info, and users_info. # # 2. References to subroutines defined in new LRMS modules are added # to the select_lrms subroutine in this module, and the module reference # itself, naturally. # # NB: ARC0 modules use minutes for time units. ARC1 modules use seconds. # # !!! LEAGACY DO NOT USE FOR FUTURE DEVELOPMENTS - SEE DISCLAIMER !!!! require Exporter; our @ISA = qw(Exporter); our @EXPORT_OK = qw(get_lrms_info get_lrms_options_schema); use LogUtils; use strict; our $log = LogUtils->getLogger(__PACKAGE__); our %modnames = ( PBS => "PBS", PBSPRO => "PBSPRO", SGE => "SGE", LL => "LL", LSF => "LSF", CONDOR => "Condor", SLURM => "SLURM", BOINC => "Boinc", FORK => "Fork" ); # Whether the module implements support for listing nodes. our $has_nodes = 1; # input: lrmsname, loglevel sub load_lrms($$) { my $lrms_name = uc(shift); my $loglevel = uc(shift); my $module = $modnames{$lrms_name}; $log->error("No ARC0 module for $lrms_name") unless $module; eval { require "$module.pm" }; $log->error("Failed to load LRMS module $module: $@") if $@; import $module qw(cluster_info queue_info jobs_info users_info); eval { import $module qw(nodes_info) }; if ($@) { $log->debug("LRMS module $module does not export 'nodes_info'"); $has_nodes=0; } $LogUtils::default_logger = LogUtils->getLogger($module); $LogUtils::loglevel = $loglevel; } # Just generic options, cannot assume anything LRMS specific here sub get_lrms_options_schema { return { 'lrms' => '', # name of the LRMS module 'queues' => { # queue names are keys in this hash '*' => { 'users' => [ '' ] # list of user IDs to query in the LRMS } }, 'jobs' => [ '' ], # list of jobs IDs to query in the LRMS 'controldir' => '*', # path to controldir, taken from main config 'loglevel' => '' # infoproviders loglevel } } sub get_lrms_info($) { my $options = shift; my %cluster_config = %$options; delete $cluster_config{queues}; delete $cluster_config{jobs}; my $lrms_info = {cluster => {}, queues => {}, jobs => {}}; my $cluster_info = { cluster_info(\%cluster_config) }; delete $cluster_info->{queue}; $lrms_info->{cluster} = delete_empty($cluster_info); $lrms_info->{nodes} = { nodes_info(\%cluster_config) } if $has_nodes; for my $qname ( keys %{$options->{queues}} ) { my %queue_config = (%cluster_config, %{$options->{queues}{$qname}}); delete $queue_config{users}; my $jids = $options->{jobs}; # TODO: interface change: jobs under each queue my $jobs_info = { jobs_info(\%queue_config, $qname, $jids) }; for my $job ( values %$jobs_info ) { $job->{status} ||= 'EXECUTED'; delete_empty($job); } $lrms_info->{jobs} = { %{$lrms_info->{jobs}}, %$jobs_info }; my $queue_info = { queue_info(\%queue_config, $qname) }; $lrms_info->{queues}{$qname} = delete_empty($queue_info); my $users = $options->{queues}{$qname}{users}; $queue_info->{users} = { users_info(\%queue_config, $qname, $users) }; for my $user ( values %{$queue_info->{users}} ) { my $freecpus = $user->{freecpus}; $user->{freecpus} = split_freecpus($freecpus) if defined $freecpus; delete_empty($user); } $queue_info->{acl_users} = $queue_config{acl_users} if defined $queue_config{acl_users}; } # ARC0 LRMS plugins use minutes. Convert to seconds here. for my $queue (values %{$lrms_info->{queues}}) { $queue->{minwalltime} = int 60*$queue->{minwalltime} if $queue->{minwalltime}; $queue->{mincputime} = int 60*$queue->{mincputime} if $queue->{mincputime}; $queue->{maxwalltime} = int 60*$queue->{maxwalltime} if $queue->{maxwalltime}; $queue->{maxcputime} = int 60*$queue->{maxcputime} if $queue->{maxcputime}; $queue->{defaultwallt} = int 60*$queue->{defaultwallt} if $queue->{defaultwallt}; $queue->{defaultcput} = int 60*$queue->{defaultcput} if $queue->{defaultcput}; } for my $job (values %{$lrms_info->{jobs}}) { $job->{reqcputime} = int 60*$job->{reqcputime} if $job->{reqcputime}; $job->{reqwalltime} = int 60*$job->{reqwalltime} if $job->{reqwalltime}; $job->{cputime} = int 60*$job->{cputime} if $job->{cputime}; $job->{walltime} = int 60*$job->{walltime} if $job->{walltime}; delete $job->{nodes} unless ( defined $job->{nodes} && @{$job->{nodes}} ) ; delete $job->{comment} unless (defined $job->{comment} && @{$job->{comment}} ); } return $lrms_info; } sub delete_empty($) { my $hashref = shift; foreach my $k ( keys %$hashref) { delete $hashref->{$k} if ! defined $hashref->{$k} || $hashref->{$k} eq ''; } return $hashref; } # Convert frecpus string into a hash. # Example: "6 11:2880 23:1440" --> { 6 => 0, 11 => 2880, 23 => 1440 } # OBS: Assuming the function cpu vs. time is monotone, this transformation is safe. sub split_freecpus($) { my $freecpus_string = shift; my $freecpus_hash = {}; for my $countsecs (split ' ', $freecpus_string) { if ($countsecs =~ /^(\d+)(?::(\d+))?$/) { $freecpus_hash->{$1} = $2 || 0; # 0 means unlimited } else { $log->warning("Bad freecpus string: $freecpus_string"); return {}; } } return $freecpus_hash; } 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/Sysinfo.pm0000644000000000000000000000013215067751327023662 xustar0030 mtime=1759498967.761492071 30 atime=1759498967.868493696 30 ctime=1759499029.814874087 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/Sysinfo.pm0000644000175000002070000003215115067751327025566 0ustar00mockbuildmock00000000000000package Sysinfo; use strict; use POSIX; use Sys::Hostname; use Exporter; our @ISA = ('Exporter'); # Inherit from Exporter our @EXPORT_OK = qw(cpuinfo meminfo osinfo processid diskinfo diskspaces); use LogUtils; our $log = LogUtils->getLogger(__PACKAGE__); # Return PIDs of named commands owned by the current user # Only one pid is returned per command name sub processid (@) { my @procs = `ps -u $< -o pid,comm 2>/dev/null`; if ( $? != 0 ) { $log->info("Failed running: ps -u $< -o pid,comm"); $log->warning("Failed listing processes"); return {}; } shift @procs; # throw away header line # make hash of comm => pid my %all; /\s*(\d+)\s+(.+)/ and $all{$2} = $1 for @procs; my %pids; foreach my $name ( @_ ) { $pids{$name} = $all{$name} if $all{$name}; } return \%pids; } sub cpuinfo { my $info = {}; my $nsockets; # total number of physical cpu sockets my $ncores; # total number of cpu cores my $nthreads; # total number of hardware execution threads if (-f "/proc/cpuinfo") { # Linux variant my %sockets; # cpu socket IDs my %cores; # cpu core IDs open (CPUINFO, "warning("Failed opening /proc/cpuinfo: $!"); while ( my $line = ) { if ($line=~/^model name\s*:\s+(.*)$/) { $info->{cpumodel} = $1; } elsif ($line=~/^vendor_id\s+:\s+(.*)$/) { $info->{cpuvendor} = $1; } elsif ($line=~/^cpu MHz\s+:\s+(.*)$/) { $info->{cpufreq} = int $1; } elsif ($line=~/^stepping\s+:\s+(.*)$/) { $info->{cpustepping} = int $1; } elsif ($line=~/^processor\s*:\s+(\d+)$/) { ++$nthreads; } elsif ($line=~/^physical id\s*:\s+(\d+)$/) { ++$sockets{$1}; } elsif ($line=~/^core id\s*:\s+(\d+)$/) { ++$cores{$1}; } } close CPUINFO; if ($info->{cpumodel} =~ m/^(.*?)\s+@\s+([.\d]+)GHz$/) { $info->{cpumodel} = $1; $info->{cpufreq} = int($2*1000); } elsif ($info->{cpumodel} =~ m/\s+([.\d]+)GHz$/) { $info->{cpufreq} = int($1*1000); } elsif ($info->{cpumodel} =~ m/\s+([.\d]+)MHz$/) { $info->{cpufreq} = int($1); } # count total cpu cores and sockets $ncores = scalar keys %cores; $nsockets = scalar keys %sockets; if ($nthreads) { # if /proc/cpuinfo does not provide socket and core IDs, # assume every thread represents a separate cpu $ncores = $nthreads unless $ncores; $nsockets = $nthreads unless $nsockets; } } elsif (-x "/usr/sbin/system_profiler") { # OS X my @lines = `/usr/sbin/system_profiler SPHardwareDataType`; $log->warning("Failed running system_profiler: $!") if $?; for my $line ( @lines ) { if ($line =~ /Processor Name:\s*(.*)/) { $info->{cpumodel} = $1; } elsif ($line =~ /Processor Speed:\s*([.\d]+) (\w+)/) { if ($2 eq "MHz") { $info->{cpufreq} = int $1; } elsif ($2 eq "GHz") { $info->{cpufreq} = int 1000*$1; } } elsif ($line =~ /Number Of Processors:\s*(.+)/) { $nsockets = $1; } elsif ($line =~ /Total Number Of Cores:\s*(.+)/) { $ncores = $1; $nthreads = $1; # Assume 1 execution thread per core (Ouch!) } } } elsif (-x "/usr/bin/kstat" ) { # Solaris my %chips; eval { require Sun::Solaris::Kstat; my $ks = Sun::Solaris::Kstat->new(); my $cpuinfo = $ks->{cpu_info}; $log->error("kstat: key not found: cpu_info") unless defined $cpuinfo; for my $id (keys %$cpuinfo) { my $info = $cpuinfo->{$id}{"cpu_info$id"}; $log->error("kstat: key not found: cpu_info$id") unless defined $info; $chips{$info->{chip_id}}++; $nthreads++; } my $info = $cpuinfo->{0}{"cpu_info0"}; $log->error("kstat: key not found: cpu_info0") unless defined $info; # $info->{cpumodel} = $info->{cpu_type}; # like sparcv9 $info->{cpumodel} = $info->{implementation}; # like UltraSPARC-III+ $info->{cpufreq} = int $info->{clock_MHz}; }; if ($@) { $log->error("Failed running module Sun::Solaris::Kstat: $@"); } $nsockets = $ncores = scalar keys %chips; } else { $log->warning("Unsupported operating system"); } $info->{cputhreadcount} = $nthreads if $nthreads; $info->{cpucorecount} = $ncores if $ncores; $info->{cpusocketcount} = $nsockets if $nsockets; return $info; } sub meminfo { my ($memtotal, $swaptotal); if (-f "/proc/cpuinfo") { # Linux variant open (MEMINFO, "warning("Failed opening /proc/meminfo: $!"); while ( my $line = ) { if ($line =~ /^MemTotal:\s+(.*) kB$/) { $memtotal = int ($1/1024); } elsif ($line =~ /^SwapTotal:\s+(.*) kB$/) { $swaptotal = int ($1/1024); } } } my $info = {}; $info->{pmem} = $memtotal if $memtotal; $info->{vmem} = $memtotal + $swaptotal if $memtotal and $swaptotal; return $info; } sub osinfo { my $info = {}; my ($sysname, $nodename, $release, $version, $machine) = POSIX::uname(); $info->{machine} = $machine; $info->{sysname} = $sysname; $info->{release} = $release; if ($sysname =~ /linux/i) { my ($msg, $id, $descr, $version); if (-x '/usr/bin/lsb_release' or -x '/bin/lsb_release') { if (open RELEASE, "lsb_release -a 2>&1 |") { while () { $msg = $1 if m/^(No.*)/; $id = lc $1 if m/^Distributor ID:\s+(.*)/; $descr = $1 if m/^Description:\s+(.*)/; $version = $1 if m/^Release:\s+([.\d]+)/; } } close RELEASE; } elsif (open RELEASE, '< /etc/lsb-release') { while () { $id = lc $1 if m/^DISTRIB_ID=(.*)/; $descr = $1 if m/^DISTRIB_DESCRIPTION=(.*)/; $version = $1 if m/^DISTRIB_RELEASE=([.\d]+)/; } close RELEASE; } elsif (open RELEASE, '< /etc/redhat-release') { ($descr, $version) = ($1,$2) if =~ m/(.*) release ([.\d]+)/; close RELEASE; } elsif (open RELEASE, '< /etc/debian_version') { $version = $1 if =~ m/^([.\d]+)$/; $id = 'debian'; close RELEASE; } elsif (open RELEASE, '< /etc/SuSE-release') { while () { $version = $1 if m/^VERSION\s*=\s*([.\d]+)/; } $id = 'suse'; close RELEASE; } elsif (open RELEASE, '< /etc/gentoo-release') { $version = $1 if =~ m/.* version ([.\d]+)/; $id = 'gentoo'; close RELEASE; } # Try to stay within the predefined values for OSName_t from GLUE2 spec (GFD.147). if ($descr) { $id = 'centos' if $descr =~ m/^CentOS/i; $id = 'fedoracore' if $descr =~ m/^Fedora/i; $id = 'scientificlinux' if $descr =~ m/^Scientific Linux/i; $id = 'scientificlinuxcern' if $descr =~ m/^Scientific Linux CERN/i; $id = 'redhatenterpriseas' if $descr =~ m/^Red Hat Enterprise/i; # If all the above fail, a generic solution that looks GLUE2 compliant if (not $id) { $id = lc($descr); $id = join '', split ' ', $id; } } $info->{osname} = $id if $id; $info->{osversion} = $version if $version; } elsif ($sysname eq 'Darwin') { my $version = `sw_vers -productVersion`; chomp $version; if ($version =~ m/10\.[\d.]+/) { my $name; $info->{osname} = 'panther' if $version =~ m/^10\.3/; $info->{osname} = 'tiger' if $version =~ m/^10\.4/; $info->{osname} = 'leopard' if $version =~ m/^10\.5/; $info->{osname} = 'snowleopard' if $version =~ m/^10\.6/; $info->{osversion} = $version; } } elsif ($sysname eq 'SunOS') { $release =~ s/^5\.//; # SunOS 5.10 == solaris 10 $info->{osname} = 'solaris'; $info->{osversion} = $release; } return $info; } # # Returns disk space (total and free) in MB on a filesystem # sub diskinfo ($) { my $path = shift; my ($filesystem, $diskfree, $disktotal, $mountpoint); if ( -d "$path") { # check if on afs if ($path =~ m#/afs/#) { my @dfstring =`fs listquota $path 2>/dev/null`; if ($? != 0) { $log->warning("Failed running: fs listquota $path"); } elsif ($dfstring[-1] =~ /\s+(\d+)\s+(\d+)\s+\d+%\s+\d+%/) { $filesystem = 'afs'; $disktotal = int $1/1024; $diskfree = int(($1 - $2)/1024); $mountpoint = '/afs'; } else { $log->warning("Failed interpreting output of: fs listquota $path"); } # "ordinary" disk } else { my @dfstring =`df -k $path 2>/dev/null`; if ($? != 0) { $log->warning("Failed running: df -k $path"); # The first column may be printed on a separate line. # The relevant numbers are always on the last line. } elsif ($dfstring[-1] =~ /([\/\-.\:\w]+)\s+(\d+)\s+\d+\s+(\d+)\s+\d+%\s+(\/\S*)$/) { $filesystem = $1; $disktotal = int $2/1024; $diskfree = int $3/1024; $mountpoint = $4; } else { $log->warning("Failed interpreting output of: df -k $path"); } } } else { # Skip per-user sessiondirs if ($path eq '*') { $log->info("Found sessiondir=* in arc.conf: disk spaces per user home folders are not calculated."); } else { $log->warning("No such directory: $path"); } } return undef if not defined $disktotal; return {filesystem => $filesystem, megstotal => $disktotal, megsfree => $diskfree, mountpoint => $mountpoint}; } # Given a list of paths, it finds the set of mount points of the filesystems # containing the paths. It then returns a hash with these keys: # ndisks: number of distinct nount points # freesum: sum of free space on all mount points # freemin: minimum free space of any mount point # freemax: maximum free space of any mount point # totalsum: sum of total space on all mountpoints # totalmin: minimum total space of any mount point # totalmax: maximum total space of any mount point # errors: the number of non-existing paths sub diskspaces { my ($freesum, $freemin, $freemax); my ($totalsum, $totalmin, $totalmax); my $errors = 0; my %mounts; my %processedfilesystems = (); for my $path (@_) { my $di = diskinfo($path); if ($di) { my ($filesystem, $total, $free, $mount) = ($di->{filesystem},$di->{megstotal},$di->{megsfree},$di->{mountpoint}); # Do not double count space of paths belonging to the same filesystem if (! defined $processedfilesystems{$di->{filesystem}}) { $mounts{$mount}{free} = $free; $mounts{$mount}{total} = $total; }; $processedfilesystems{$di->{filesystem}} = ''; } else { ++$errors; } } for my $stats (values %mounts) { if (defined $freesum) { $freesum += $stats->{free}; $freemin = $stats->{free} if $freemin > $stats->{free}; $freemax = $stats->{free} if $freemax < $stats->{free}; $totalsum += $stats->{total}; $totalmin = $stats->{total} if $totalmin > $stats->{total}; $totalmax = $stats->{total} if $totalmax < $stats->{total}; } else { $freesum = $freemin = $freemax = $stats->{free}; $totalsum = $totalmin = $totalmax = $stats->{total}; } } return ( ndisks => scalar keys %mounts, freesum => $freesum || 0, freemin => $freemin || 0, freemax => $freemax || 0, totalsum => $totalsum || 0, totalmin => $totalmin || 0, totalmax => $totalmax || 0, errors => $errors ); } #### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST #### # #sub test { # my @options = ( '/grid/session1','/tmp/session2', '/opt/session3', '/tmp', '/opt', '/boot', '/boot/grub/', '/boot/efi', '/bogus'); # require Data::Dumper; import Data::Dumper qw(Dumper); # LogUtils::level('DEBUG'); # #$log->debug("Options:\n" . Dumper(@options)); # print Dumper(@options); # my %results = Sysinfo::diskspaces(@options); # #$log->debug("Results:\n" . Dumper(%results)); # print Dumper(\%results); #} # #test; 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/PBS.pm0000644000000000000000000000013215067751327022654 xustar0030 mtime=1759498967.760492055 30 atime=1759498967.867493681 30 ctime=1759499029.802330543 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PBS.pm0000644000175000002070000010660215067751327024563 0ustar00mockbuildmock00000000000000package PBS; ###################################################################### # DISCLAIMER ###################################################################### # This module depends on ARC0mod.pm which is obsolete and deprecated # starting from ARC 6.0 # Please DO NOT build new LRMS modules based on this one but follow # the indications in # LRMSInfo.pm # instead. ###################################################################### use strict; our @ISA = ('Exporter'); # Module implements these subroutines for the LRMS interface our @EXPORT_OK = ('cluster_info', 'queue_info', 'jobs_info', 'users_info', 'nodes_info'); use LogUtils ( 'start_logging', 'error', 'warning', 'info', 'debug' ); ########################################## # Saved private variables ########################################## our(%lrms_queue); my (%user_jobs_running, %user_jobs_queued); # the queue passed in the latest call to queue_info, jobs_info or users_info my $currentqueue = undef; # cache info returned by PBS commands my $pbsnodes; my $qstat_f; my $qstat_fQ; # PBS type and flavour my $lrms_type = undef; my $lrms_version = undef; # Resets queue-specific global variables if # the queue has changed since the last call sub init_globals($) { my $qname = shift; if (not defined $currentqueue or $currentqueue ne $qname) { $currentqueue = $qname; %lrms_queue = (); %user_jobs_running = (); %user_jobs_queued = (); } } # get PBS type and version sub get_pbs_version ($) { return unless not defined $lrms_type; # path to LRMS commands my ($config) = shift; my ($path) = $$config{pbs_bin_path}; error("pbs_bin_path not defined, cannot continue. Exiting...") unless defined $path; # determine the flavour and version of PBS my $qmgr_string=`$path/qmgr -c "list server"`; if ( $? != 0 ) { warning("Can't run qmgr"); } if ($qmgr_string =~ /pbs_version = \b(\D+)_(\d\S+)\b/) { $lrms_type = $1; $lrms_version = $2; } else { $qmgr_string =~ /pbs_version = \b(\d\S+)\b/; $lrms_type = "torque"; $lrms_version = $1; } } ########################################## # Private subs ########################################## sub read_pbsnodes ($) { return %$pbsnodes if $pbsnodes; #processing the pbsnodes output by using a hash of hashes # %hoh_pbsnodes (referrenced by $hashref) my ( $path ) = shift; my ( %hoh_pbsnodes); my ($nodeid,$node_var,$node_value); unless (open PBSNODESOUT, "$path/pbsnodes -a 2>/dev/null |") { error("error in executing pbsnodes"); } while (my $line= ) { if ($line =~ /^$/) {next}; if ($line =~ /^([\w\-]+)/) { $nodeid= $1 ; next; } if ($line =~ / = /) { ($node_var,$node_value) = split (/ = /, $line, 2); $node_var =~ s/\s+//g; chop $node_value; } $hoh_pbsnodes{$nodeid}{$node_var} = $node_value; } close PBSNODESOUT; $pbsnodes = \%hoh_pbsnodes; return %hoh_pbsnodes; } sub read_qstat_fQ ($) { # return already parsed value return %$qstat_fQ if $qstat_fQ; #processing the qstat -fQ output by using a hash of hashes my ( $path ) = shift; my ( %hoh_qstat ); unless (open QSTATOUTPUT, "$path/qstat -Q -f 2>/dev/null |") { error("Error in executing qstat: $path/qstat -Q -f"); } my $current_queue = undef; my ($qstat_var,$qstat_value) = (); while (my $line= ) { chomp($line); if ($line =~ /^$/) {next}; if ($line =~ /^Queue: ([\w\-.]+)$/) { $current_queue = $1; next; } if ( ! defined $current_queue ) {next}; if ($line =~ m/ = /) { ($qstat_var,$qstat_value) = split("=", $line, 2); $qstat_var =~ s/^\s+|\s+$//g; $qstat_value =~ s/^\s+|\s+$//g; $hoh_qstat{$current_queue}{$qstat_var} = $qstat_value; next; } # older PBS versions has no '-1' support # a line starting with a tab is a continuation line if ( $line =~ m/^\t(.+)$/ ) { $qstat_value .= $1; $qstat_value =~ s/\s+$//g; $hoh_qstat{$current_queue}{$qstat_var} = $qstat_value; } } close QSTATOUTPUT; $qstat_fQ = \%hoh_qstat; return %hoh_qstat; } sub read_qstat_f ($) { # return already parsed value return %$qstat_f if $qstat_f; #processing the qstat -f output by using a hash of hashes my ( $path ) = shift; my ( %hoh_qstat ); unless (open QSTATOUTPUT, "$path/qstat -f 2>/dev/null |") { error("Error in executing qstat: $path/qstat -f"); } my $jobid = undef; my ($qstat_var,$qstat_value) = (); while (my $line= ) { chomp($line); if ($line =~ /^$/) {next}; if ($line =~ /^Job Id: (.+)$/) { $jobid = $1; next; } if ( ! defined $jobid ) {next}; if ($line =~ m/ = /) { ($qstat_var,$qstat_value) = split("=", $line, 2); $qstat_var =~ s/^\s+|\s+$//g; $qstat_value =~ s/^\s+|\s+$//g; $hoh_qstat{$jobid}{$qstat_var} = $qstat_value; } # older PBS versions has no '-1' support # a line starting with a tab is a continuation line if ( $line =~ m/^\t(.+)$/ ) { $qstat_value .= $1; $qstat_value =~ s/\s+$//g; $hoh_qstat{$jobid}{$qstat_var} = $qstat_value; } } close QSTATOUTPUT; $qstat_f = \%hoh_qstat; return %hoh_qstat; } # Splits up the value of the exec_host string. # Returns a list of node names, one for each used cpu. # Should handle node specs of the form: # (1) node1/0+node0/1+node0/2+node2/1 (torque) # (2) hosta/J1+hostb/J2*P (according to the PBSPro manual) # (3) node1+node1+node2+node2 # (4) altix:ssinodes=2:mem=7974912kb:ncpus=4 (found on the web) # (5) grid-wn0749.desy.de/2 Resource_List.neednodes=1:ppn=8 (reported by Andreas Gellrich from Desy HH) sub split_hostlist { my ($exec_host_string) = @_; my @nodes; my $err; for my $nodespec (split '\+', $exec_host_string) { if ($nodespec =~ m{^([^/:]+)/\d+(?:\*(\d+))?$}) { # cases (1) and (2) my ($nodename, $multiplier) = ($1, $2 || 1); push @nodes, $nodename for 1..$multiplier; } elsif ($nodespec =~ m{^([^/:]+)(?::(.+))?$}) { # cases (3) and (4) my ($nodename, $resc) = ($1, $2 || ''); my $multiplier = get_ncpus($resc); push @nodes, $nodename for 1..$multiplier; } elsif ($nodespec =~ m{^([^/]+)/\d+ Resource_List\.neednodes=(\d+):ppn=(\d+)?$} ){ # case (5) my $nodename = $1; my $numnodes = $2 || 1; my $ppn = $3 || 1; my $multiplier = $ppn; #Not sure if this is the correct multiplier. Is there an entry for each node? or should multiplier be numnodes*ppn? push @nodes, $nodename for 1..$multiplier; } else { $err = $nodespec; } } warning("failed counting nodes in expression: $exec_host_string") if $err; return @nodes; } # Deduces the number of requested cpus from the values of these job properties: # Resource_List.select (PBSPro 8+) # Resource_List.nodes # Resource_List.ncpus sub set_cpucount { my ($job) = (@_); my $select = $job->{"Resource_List.select"}; my $nodes = $job->{"Resource_List.nodes"}; my $ncpus = $job->{"Resource_List.ncpus"}; $job->{cpus} = count_usedcpus($select, $nodes, $ncpus); delete $job->{"Resource_List.select"}; delete $job->{"Resource_List.nodes"}; delete $job->{"Resource_List.ncpus"}; } # Convert time from [DD:[HH:[MM:]]]SS to minutes sub count_time { my $pbs_time = shift; # split and reverse PBS time to start from seconds, then drop seconds my @t = reverse split /:/, $pbs_time; my $minutes = 0; if ( ! defined $t[1] ) { # PBS seconds only case $minutes = int( $t[0] / 60 ); } else { # drop seconds shift @t; $minutes = $t[0]; $minutes += $t[1]*60 if defined $t[1]; $minutes += $t[2]*60*24 if defined $t[2]; } return $minutes; } sub count_usedcpus { my ($select, $nodes, $ncpus) = @_; return sum_over_chunks(\&cpus_in_select_chunk, $select) if defined $select; return $ncpus || 1 if not defined $nodes or $nodes eq '1'; return sum_over_chunks(\&cpus_in_nodes_chunk, $nodes) if defined $nodes; return 1; } sub sum_over_chunks { my ($count_func, $string) = @_; my $totalcpus = 0; for my $chunk (split '\+', $string) { my $cpus = &$count_func($chunk); $totalcpus += $cpus; } return $totalcpus; } # counts cpus in chunk definitions of the forms found in Resource_List.nodes property # 4 # 2:ppn=2 # host1 # host1:prop1:prop2 # prop1:prop2:ppn=4 sub cpus_in_nodes_chunk { my ($chunk) = @_; my ($ncpus,$dummy) = split ':', $chunk; $ncpus = 1 if $ncpus =~ m/\D/; # chunk count ommited return $ncpus * get_ppn($chunk); } # counts cpus in chunk definitions of the forms found in Resource_List.select (PBSPro 8+): # 4 # 2:ncpus=1 # 1:ncpus=4:mpiprocs=4:host=hostA sub cpus_in_select_chunk { my ($chunk) = @_; return $1 if $chunk =~ m/^(\d+)$/; if ($chunk =~ m{^(\d+):(.*)$}) { my ($cpus, $resc) = ($1, $2); return $cpus * get_ncpus($resc); } return 0; # not a valid chunk } # extracts the value of ppn from a string like blah:ppn=N:blah sub get_ppn { my ($resc) = @_; for my $res (split ':', $resc) { return $1 if $res =~ m /^ppn=(\d+)$/; } return 1; } # extracts the value of ncpus from a string like blah:ncpus=N:blah sub get_ncpus { my ($resc) = @_; for my $res (split ':', $resc) { return $1 if $res =~ m /^ncpus=(\d+)$/; } return 1; } # gets information about each destination queue behind a # routing queue and copies it into the routing queue data structure. # at the moment it only copies data from the first queue # # input: $queue name of the current queue # $path to pbs binaries # $singledqueue that contains the only queue behind the routing one # %qstat{} for the current queue # output: the %dqueue hash containing info about destination queues # in %lrms_queue fashion sub process_dqueues($$%){ my $qname = shift; my $path = shift; my (%qstat) = %{$_[0]}; my $singledqueue; my %dqueues; # build DQs data structure my @dqnames; if (defined $qstat{'route_destinations'}) { @dqnames=split(",",$qstat{'route_destinations'}); @dqueues{@dqnames}=undef; my (%hoh_qstatfQ) = read_qstat_fQ($path); foreach my $dqname ( keys %dqueues ) { debug("Processing queues behind routing queue. Current queue is $dqname"); $dqueues{$dqname}=$hoh_qstatfQ{$dqname}; } # debug($dqueues{'verylong'}{'resources_max.walltime'}); } else { error("No route_destinations for routing queue $qname. Please check LRMS configuration."); } # take the first destination queue behind the RQ, copy its data to the RQ # this happens only if the RQ has no data defined on PBS # this should solve bug #859 $singledqueue=shift(@dqnames); debug('Just one queue behind routing queue is currently supported: '.$singledqueue); my @attributes=( 'max_running', 'max_user_run', 'max_queuable', 'resources_max.cput', 'resources_min.cput', 'resources_default.cput', 'resources_max.walltime', 'resources_min.walltime', 'resources_default.walltime', 'state_count' ); foreach my $rkey (@attributes) { # line to check queues under routing queue values. Undefined values generate crap in logs, # so is commented out. # debug('with key '.$rkey.' qstat returns '.$qstat{$rkey}.' and the dest. queue has '.$dqueues{$singledqueue}{$rkey} ); if (!defined $qstat{$rkey}) {${$_[0]}{$rkey}=$dqueues{$singledqueue}{$rkey};}; } return %dqueues; } ############################################ # Public subs ############################################# sub cluster_info ($) { # Path to LRMS commands my ($config) = shift; my ($path) = $$config{pbs_bin_path}; error("pbs_bin_path not defined, cannot continue. Exiting...") unless defined $path; # Return data structure %lrms_cluster{$keyword} # # All values should be defined, empty values "" are ok if field # does not apply to particular LRMS. my (%lrms_cluster); # flavour and version of PBS get_pbs_version($config); $lrms_cluster{lrms_type} = $lrms_type; $lrms_cluster{lrms_glue_type}=lc($lrms_type); $lrms_cluster{lrms_version} = $lrms_version; # PBS treats cputime limit for parallel/multi-cpu jobs as job-total $lrms_cluster{has_total_cputime_limit} = 1; # processing the pbsnodes output by using a hash of hashes %hoh_pbsnodes my ( %hoh_pbsnodes ) = read_pbsnodes( $path ); error("The given flavour of PBS $lrms_cluster{lrms_type} is not supported") unless grep {$_ eq lc($lrms_cluster{lrms_type})} qw(openpbs spbs torque pbspro); $lrms_cluster{totalcpus} = 0; my ($number_of_running_jobs) = 0; $lrms_cluster{cpudistribution} = ""; my (@cpudist) = 0; my %available_nodes = (); # loop over all available nodes foreach my $node (keys %hoh_pbsnodes) { # skip nodes that does not conform dedicated_node_string filter if ( exists $$config{dedicated_node_string} && $$config{dedicated_node_string} ne "") { next unless ( $hoh_pbsnodes{$node}{"properties"} =~ m/^([^,]+,)*$$config{dedicated_node_string}(,[^,]+)*$/); } # add node to available_nodes hash $available_nodes{$node} = 1; # get node state and number of CPUs my ($nodestate) = $hoh_pbsnodes{$node}{"state"}; my $nodecpus; if ($hoh_pbsnodes{$node}{'np'}) { $nodecpus = $hoh_pbsnodes{$node}{'np'}; } elsif ($hoh_pbsnodes{$node}{'resources_available.ncpus'}) { $nodecpus = $hoh_pbsnodes{$node}{'resources_available.ncpus'}; } next if ($nodestate=~/down/ or $nodestate=~/offline/); if ($nodestate=~/(?:,|^)busy/) { $lrms_cluster{totalcpus} += $nodecpus; $cpudist[$nodecpus] +=1; $number_of_running_jobs += $nodecpus; next; } $lrms_cluster{totalcpus} += $nodecpus; $cpudist[$nodecpus] += 1; if ($hoh_pbsnodes{$node}{"jobs"}){ $number_of_running_jobs++; my ( @comma ) = ($hoh_pbsnodes{$node}{"jobs"}=~ /,/g); $number_of_running_jobs += @comma; } } # form LRMS cpudistribution string for (my $i=0; $i<=$#cpudist; $i++) { next unless ($cpudist[$i]); $lrms_cluster{cpudistribution} .= " ".$i."cpu:".$cpudist[$i]; } # read the qstat -n information about all jobs # queued cpus, total number of cpus in all jobs $lrms_cluster{usedcpus} = 0; $lrms_cluster{queuedcpus} = 0; $lrms_cluster{queuedjobs} = 0; $lrms_cluster{runningjobs} = 0; my %qstat_jobs = read_qstat_f($path); for my $key (keys %qstat_jobs) { # usercpus (running jobs) if ( $qstat_jobs{$key}{job_state} =~ /R/) { $lrms_cluster{runningjobs}++; my @nodes = split_hostlist($qstat_jobs{$key}{exec_host}); # filter using dedicated_node_string foreach my $node ( @nodes ) { next unless defined $available_nodes{$node}; $lrms_cluster{usedcpus}++; } } # if ( $qstat_jobs{$key}{job_state} =~ /(W|T|Q)/) { $lrms_cluster{queuedjobs}++; $lrms_cluster{queuedcpus}+=count_usedcpus($qstat_jobs{$key}{"Resource_List.select"}, $qstat_jobs{$key}{"Resource_List.nodes"}, $qstat_jobs{$key}{"Resource_List.ncpus"}); } } # Names of all LRMS queues @{$lrms_cluster{queue}} = (); my ( %hoh_qstat ) = read_qstat_fQ($path); for my $qkey (keys %hoh_qstat) { push @{$lrms_cluster{queue}}, $qkey; } return %lrms_cluster; } sub queue_info ($$) { # Path to LRMS commands my ($config) = shift; my ($path) = $$config{pbs_bin_path}; error("pbs_bin_path not defined, cannot continue. Exiting...") unless defined $path; # Name of the queue to query my ($qname) = shift; init_globals($qname); # The return data structure is %lrms_queue. # In this template it is defined as persistent module data structure, # because it is later used by jobs_info(), and we wish to avoid # re-construction of it. If it were not needed later, it would be defined # only in the scope of this subroutine, as %lrms_cluster previously. # Return data structure %lrms_queue{$keyword} # # All values should be defined, empty values "" are ok if field # does not apply to particular LRMS. # read the queue information for the queue entry from the qstat my (%hoh_qstat) = read_qstat_fQ($path); my (%qstat) = %{$hoh_qstat{$qname}}; # this script contain a solution for a single queue behind the # routing one, the routing queue will inherit some of its # attributes. # this hash contains qstat records for queues - in this case just one my %dqueues; # this variable contains the single destination queue my $singledqueue; if ($qstat{queue_type} =~ /Route/) { %dqueues = process_dqueues($qname,$path,\%qstat); $singledqueue = ( keys %dqueues )[0]; } else { undef %dqueues; undef $singledqueue; } # publish queue limits parameters # general limits (publish as is) my (%keywords); my (%keywords_all) = ( 'max_running' => 'maxrunning', 'max_user_run' => 'maxuserrun', 'max_queuable' => 'maxqueuable' ); # TODO: MinSlots, etc. my (%keywords_torque) = ( 'resources_max.procct' => 'MaxSlotsPerJob' ); my (%keywords_pbspro) = ( 'resources_max.ncpus' => 'MaxSlotsPerJob' ); get_pbs_version($config); if ( $lrms_type eq lc "torque" ) { %keywords = (%keywords_all, %keywords_torque); } elsif ( $lrms_type eq lc "pbspro" ) { %keywords = (%keywords_all, %keywords_pbspro); } else { %keywords = %keywords_all; } foreach my $k (keys %keywords) { if (defined $qstat{$k} ) { $lrms_queue{$keywords{$k}} = $qstat{$k}; } else { $lrms_queue{$keywords{$k}} = ""; } } # queue time limits (convert to minutes) %keywords = ( 'resources_max.cput' => 'maxcputime', 'resources_min.cput' => 'mincputime', 'resources_default.cput' => 'defaultcput', 'resources_max.walltime' => 'maxwalltime', 'resources_min.walltime' => 'minwalltime', 'resources_default.walltime' => 'defaultwallt' ); foreach my $k (keys %keywords) { if ( defined $qstat{$k} ) { $lrms_queue{$keywords{$k}} = (&count_time($qstat{$k})+($k eq 'resources_min.cput'?1:0)); } else { $lrms_queue{$keywords{$k}} = ""; } } # determine the queue status from the LRMS # Used to be set to 'active' if the queue can accept jobs # Now lists the number of available processors, "0" if no free # cpus. Negative number signals some error state of PBS # (reserved for future use). $lrms_queue{status} = -1; $lrms_queue{running} = 0; $lrms_queue{queued} = 0; $lrms_queue{totalcpus} = 0; if ( ($qstat{"enabled"} =~ /True/) and ($qstat{"started"} =~ /True/)) { # refresh routing queue records, in case something changed on the # destination queues if ($qstat{queue_type} =~ /Route/) { debug("CPUs calculation pass. Queues are scanned a second time. Current queue is: $qstat{queue_type}"); %dqueues = process_dqueues($qname,$path,\%qstat); # this variable contains the single destination queue $singledqueue = ( keys %dqueues )[0]; } else { undef %dqueues; undef $singledqueue; } # qstat does not return number of cpus, use pbsnodes instead. my ($torque_freecpus,$torque_totalcpus,$nodes_totalcpus,$nodes_freecpus)=(0,0,0,0); # processing the pbsnodes output by using a hash of hashes %hoh_pbsnodes my ( %hoh_pbsnodes ) = read_pbsnodes( $path ); foreach my $node (keys %hoh_pbsnodes) { my $cpus; next if $hoh_pbsnodes{$node}{'state'} =~ m/offline/; next if $hoh_pbsnodes{$node}{'state'} =~ m/down/; if ($hoh_pbsnodes{$node}{'np'}) { $cpus = $hoh_pbsnodes{$node}{'np'}; } elsif ($hoh_pbsnodes{$node}{'resources_available.ncpus'}) { $cpus = $hoh_pbsnodes{$node}{'resources_available.ncpus'}; } $nodes_totalcpus+=$cpus; if ($hoh_pbsnodes{$node}{'state'} =~ m/free/){ $nodes_freecpus+=$cpus; } # If pbsnodes have properties assigned to them # check if queuename or dedicated_node_string matches. # $singledqueue check has been added for routing queue support, # also the destination queue is checked to calculate totalcpus # also adds correct behaviour for queue_node_string if ( ( ! defined $hoh_pbsnodes{$node}{'properties'} ) || ( ( defined $qname && $hoh_pbsnodes{$node}{'properties'} =~ m/^([^,]+,)*$qname(,[^,]+)*$/ ) || ( defined $$config{pbs_dedicated_node_string} && $hoh_pbsnodes{$node}{'properties'} =~ m/^([^,]+,)*$$config{pbs_dedicated_node_string}(,[^,]+)*$/ ) || ( defined $$config{pbs_queue_node} && $hoh_pbsnodes{$node}{'properties'} =~ m/^([^,]+,)*$$config{pbs_queue_node}(,[^,]+)*$/ ) || ( defined $singledqueue && $hoh_pbsnodes{$node}{'properties'} =~ m/^([^,]+,)*$singledqueue(,[^,]+)*$/ ) ) ) { $torque_totalcpus+=$cpus; if ($hoh_pbsnodes{$node}{'state'} =~ m/free/){ $torque_freecpus+=$cpus; } } } if ($torque_totalcpus eq 0) { warning("Node properties are defined in PBS but nothing match the queue filters. Assigning counters for all nodes."); $torque_totalcpus = $nodes_totalcpus; $torque_freecpus = $nodes_freecpus; } $lrms_queue{totalcpus} = $torque_totalcpus; debug("Totalcpus for all queues are: $lrms_queue{totalcpus}"); if(defined $$config{totalcpus}){ if ($lrms_queue{totalcpus} eq "" or $$config{totalcpus} < $lrms_queue{totalcpus}) { $lrms_queue{totalcpus}=$$config{totalcpus}; } } $lrms_queue{status} = $torque_freecpus; $lrms_queue{status}=0 if $lrms_queue{status} < 0; if ( $qstat{state_count} =~ m/.*Running:([0-9]*).*/ ){ $lrms_queue{running}=$1; } else { $lrms_queue{running}=0; } # calculate running in case of a routing queue if ( $qstat{queue_type} =~ /Route/ ) { debug($dqueues{$singledqueue}{state_count}); if ( $dqueues{$singledqueue}{state_count} =~ m/.*Running:([0-9]*).*/ ) { $lrms_queue{running}=$1; } } # the above gets the number of nodes not the number of cores in use. If multi core jobs are running, "running" will be underestimated. # Instead use totalcpus - freecpus (This might overrepresent running. because pbsnodes count whole nodes in use.) # CUS (2015-02-09) my $runningcores = $torque_totalcpus - $torque_freecpus ; $runningcores = 0 if $runningcores < 0; $lrms_queue{running} = $runningcores if $runningcores > $lrms_queue{running}; if ($lrms_queue{totalcpus} eq 0) { warning("Can't determine number of cpus for queue $qname"); } if ( $qstat{state_count} =~ m/.*Queued:([0-9]*).*/ ){ $lrms_queue{queued}=$1; } else { $lrms_queue{queued}=0; } # fallback for defult values that are required for normal operation $lrms_queue{MaxSlotsPerJob} = $lrms_queue{totalcpus} if $lrms_queue{MaxSlotsPerJob} eq ""; # calculate queued in case of a routing queue # queued jobs is the sum of jobs queued in the routing queue # plus jobs in the destination queue if ( $qstat{queue_type} =~ /Route/ ) { debug($dqueues{$singledqueue}{state_count}); if ( $dqueues{$singledqueue}{state_count} =~ m/.*Queued:([0-9]*).*/ ) { $lrms_queue{queued}=$lrms_queue{queued}+$1; } } } return %lrms_queue; } sub jobs_info ($$@) { # Path to LRMS commands my ($config) = shift; my ($path) = $$config{pbs_bin_path}; error("pbs_bin_path not defined, cannot continue. Exiting...") unless defined $path; # Name of the queue to query my ($qname) = shift; init_globals($qname); # LRMS job IDs from Grid Manager (jobs with "INLRMS" GM status) my ($jids) = shift; # Return data structure %lrms_jobs{$lrms_local_job_id}{$keyword} # # All values should be defined, empty values "" are ok if field # does not apply to particular LRMS. my (%lrms_jobs); # edge case if @$jids is empty, return now return %lrms_jobs if ($#$jids < 0); # Fill %lrms_jobs here (da implementation) # rank is treated separately as it does not have an entry in # qstat output, comment because it is an array, and mem # because "kB" needs to be stripped from the value my (%skeywords) = ('job_state' => 'status'); my (%tkeywords) = ( 'resources_used.walltime' => 'walltime', 'resources_used.cput' => 'cputime', 'Resource_List.walltime' => 'reqwalltime', 'Resource_List.cputime' => 'reqcputime'); my (%nkeywords) = ( 'Resource_List.select' => 1, 'Resource_List.nodes' => 1, 'Resource_List.ncpus' => 1); my ($alljids) = join ' ', @{$jids}; my ($rank) = 0; my %job_owner; my $handle_attr = sub { my ($jid, $k, $v) = @_; if ( defined $skeywords{$k} ) { $lrms_jobs{$jid}{$skeywords{$k}} = $v; if($k eq "job_state") { if( $v eq "U" ) { $lrms_jobs{$jid}{status} = "S"; } elsif ( $v eq "C" ) { $lrms_jobs{$jid}{status} = ""; # No status means job has completed } elsif ( $v ne "R" and $v ne "Q" and $v ne "S" and $v ne "E" ) { $lrms_jobs{$jid}{status} = "O"; } } } elsif ( defined $tkeywords{$k} ) { $lrms_jobs{$jid}{$tkeywords{$k}} = &count_time($v); } elsif ( defined $nkeywords{$k} ) { $lrms_jobs{$jid}{$k} = $v; } elsif ( $k eq 'exec_host' ) { my @nodes = split_hostlist($v); $lrms_jobs{$jid}{nodes} = \@nodes; #$lrms_jobs{$jid}{cpus} = scalar @nodes; } elsif ( $k eq 'comment' ) { $lrms_jobs{$jid}{comment} = [] unless $lrms_jobs{$jid}{comment}; push @{$lrms_jobs{$jid}{comment}}, "LRMS: $v"; } elsif ($k eq 'resources_used.vmem') { $v =~ s/(\d+).*/$1/; $lrms_jobs{$jid}{mem} = $v; } if ( $k eq 'Job_Owner' ) { $v =~ /(\S+)@/; $job_owner{$jid} = $1; } if ( $k eq 'job_state' ) { if ($v eq 'R') { $lrms_jobs{$jid}{rank} = ""; } elsif ($v eq 'C') { $lrms_jobs{$jid}{rank} = ""; } else { $rank++; $lrms_jobs{$jid}{rank} = $rank; $jid=~/^(\d+).+/; } if ($v eq 'R' or 'E'){ ++$user_jobs_running{$job_owner{$jid}}; } if ($v eq 'Q'){ ++$user_jobs_queued{$job_owner{$jid}}; } } }; my ( %hoh_qstatf ) = read_qstat_f($path); # make two sorted indices my @qstatkeys = sort keys %hoh_qstatf; my @sjids = sort (@$jids); my $jidindex = 0; foreach my $pbsjid (@qstatkeys) { # only jobids known by A-REX are processed my $jid = undef; while ($sjids[$jidindex] lt $pbsjid) { last if ($jidindex == $#sjids); $jidindex++; } if ( $pbsjid =~ /^$sjids[$jidindex]$/ ) { $jid = $sjids[$jidindex]; } next unless defined $jid; # handle qstat attributes of the jobs foreach my $k (keys %{$hoh_qstatf{$jid}} ) { my $v = $hoh_qstatf{$jid}{$k}; &$handle_attr($jid, $k, $v); } # count cpus for this jobs set_cpucount($lrms_jobs{$jid}); } my (@scalarkeywords) = ('status', 'rank', 'mem', 'walltime', 'cputime', 'reqwalltime', 'reqcputime'); foreach my $jid ( @$jids ) { foreach my $k ( @scalarkeywords ) { if ( ! defined $lrms_jobs{$jid}{$k} ) { $lrms_jobs{$jid}{$k} = ""; } } $lrms_jobs{$jid}{comment} = [] unless $lrms_jobs{$jid}{comment}; $lrms_jobs{$jid}{nodes} = [] unless $lrms_jobs{$jid}{nodes}; } return %lrms_jobs; } sub users_info($$@) { # Path to LRMS commands my ($config) = shift; my ($path) = $$config{pbs_bin_path}; error("pbs_bin_path not defined, cannot continue. Exiting...") unless defined $path; # Name of the queue to query my ($qname) = shift; init_globals($qname); # Unix user names mapped to grid users my ($accts) = shift; # Return data structure %lrms_users{$unix_local_username}{$keyword} # # All values should be defined, empty values "" are ok if field # does not apply to particular LRMS. my (%lrms_users); # Check that users have access to the queue my ( %hoh_qstatfQ ) = read_qstat_fQ( $path ); my $acl_user_enable = 0; my @acl_users; # added for routing queue support my @dqueues; my $singledqueue; my $isrouting; foreach my $k (keys %{$hoh_qstatfQ{$qname}}) { my $v = $hoh_qstatfQ{$qname}{$k}; if ( $k eq "acl_user_enable" && $v eq "True") { $acl_user_enable = 1; } if ( $k eq "acl_users" ) { unless ( $v eq 'False' ) { # This condition is kept here in case the reason # for it being there in the first place was that some # version or flavour of PBS really has False as an alternative # to usernames to indicate the absence of user access control # A Corrallary: Dont name your users 'False' ... push @acl_users, split ',', $v; } } # added to support routing queues if ( !$acl_user_enable ) { if ($k eq "route_destinations" ) { @dqueues=split (',',$v); $singledqueue=shift(@dqueues); info('Routing queue did not have acl information. Local user acl taken from destination queue: '.$singledqueue); $isrouting = 1; } } } # if the acl_user_enable is not defined in the RQ, # it could be defined in the destination queues. # we proceed same way as before but on the first # destination queue to propagate the info to the routing one if ($isrouting){ debug("Getting acl from destination queue $singledqueue"); # Check that users have access to the queue $acl_user_enable = 0; foreach my $k (keys %{$hoh_qstatfQ{$singledqueue}}) { my $v = $hoh_qstatfQ{$singledqueue}{$k}; if ( $k eq "acl_user_enable" && $v eq "True") { $acl_user_enable = 1; } if ( $k eq "acl_users" ) { unless ( $v eq 'False' ) { push @acl_users, split ',', $v; } } } debug(@acl_users); } # acl_users is only in effect when acl_user_enable is true if ($acl_user_enable) { foreach my $a ( @{$accts} ) { if ( grep { $a eq $_ } @acl_users ) { # The acl_users list has to be sent back to the caller. # This trick works because the config hash is passed by # reference. push @{$$config{acl_users}}, $a; } else { warning("Local user $a does not ". "have access in queue $qname."); } } } else { delete $$config{acl_users}; } # Uses saved module data structure %lrms_queue, which # exists if queue_info is called before if ( ! exists $lrms_queue{status} ) { %lrms_queue = queue_info( $config, $qname ); } foreach my $u ( @{$accts} ) { $user_jobs_running{$u} = 0 unless $user_jobs_running{$u}; if ($lrms_queue{maxuserrun} and ($lrms_queue{maxuserrun} - $user_jobs_running{$u}) < $lrms_queue{status} ) { $lrms_users{$u}{freecpus} = $lrms_queue{maxuserrun} - $user_jobs_running{$u}; } else { $lrms_users{$u}{freecpus} = $lrms_queue{status}; } $lrms_users{$u}{queuelength} = "$lrms_queue{queued}"; if ($lrms_users{$u}{freecpus} < 0) { $lrms_users{$u}{freecpus} = 0; } if ($lrms_queue{maxcputime} and $lrms_users{$u}{freecpus} > 0) { $lrms_users{$u}{freecpus} .= ':'.$lrms_queue{maxcputime}; } } return %lrms_users; } sub nodes_info($) { my $config = shift; my $path = $config->{pbs_bin_path}; my %hoh_pbsnodes = read_pbsnodes($path); my %nodes; for my $host (keys %hoh_pbsnodes) { my ($isfree, $isavailable) = (0,0); $isfree = 1 if $hoh_pbsnodes{$host}{state} =~ /free/; $isavailable = 1 unless $hoh_pbsnodes{$host}{state} =~ /down|offline|unknown/; $nodes{$host} = {isfree => $isfree, isavailable => $isavailable}; my $props = $hoh_pbsnodes{$host}{properties}; $nodes{$host}{tags} = [ split /,\s*/, $props ] if $props; my $np = $hoh_pbsnodes{$host}{np}; $nodes{$host}{slots} = int $np if $np; my $status = $hoh_pbsnodes{$host}{status}; if ($status) { for my $token (split ',', $status) { my ($opt, $val) = split '=', $token, 2; next unless defined $val; if ($opt eq 'totmem') { $nodes{$host}{vmem} = int($1/1024) if $val =~ m/^(\d+)kb/; } elsif ($opt eq 'physmem') { $nodes{$host}{pmem} = int($1/1024) if $val =~ m/^(\d+)kb/; } elsif ($opt eq 'ncpus') { $nodes{$host}{lcpus} = int $val; } elsif ($opt eq 'uname') { my @uname = split ' ', $val; $nodes{$host}{sysname} = $uname[0]; $nodes{$host}{release} = $uname[2] if @uname > 2; $nodes{$host}{machine} = $uname[-1] if $uname[-1]; } } } } return %nodes; } 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/test0000644000000000000000000000013015067751425022571 xustar0030 mtime=1759499029.880435971 28 atime=1759499034.7655102 30 ctime=1759499029.880435971 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/test/0000755000175000002070000000000015067751425024552 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/test/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327024705 xustar0030 mtime=1759498967.763329241 30 atime=1759498967.868493696 30 ctime=1759499029.873763706 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/test/Makefile.am0000644000175000002070000000066415067751327026615 0ustar00mockbuildmock00000000000000TESTS_ENVIRONMENT = \ $(PERL) -I$(srcdir) -I$(srcdir)/.. -w -MTest::Harness -e '$$Test::Harness::verbose=0; runtests @ARGV;' TESTS = pbs.t slurm.t sge.t PERL = @PERL@ check_SCRIPTS = InfoproviderTestSuite.pm command-simulator.sh \ $(TESTS) EXTRA_DIST = InfoproviderTestSuite.pm command-simulator.sh pbs.t slurm.t sge.t command-simulator.sh: $(top_srcdir)/src/tests/lrms/command-simulator.sh cp $< $@ chmod +x $@ nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/test/PaxHeaders/Makefile.in0000644000000000000000000000013215067751356024720 xustar0030 mtime=1759498990.195424405 30 atime=1759499018.160257882 30 ctime=1759499029.874909009 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/test/Makefile.in0000644000175000002070000005410615067751356026630 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/infoproviders/test ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__tty_colors_dummy = \ mgn= red= grn= lgn= blu= brg= std=; \ am__color_tests=no am__tty_colors = { \ $(am__tty_colors_dummy); \ if test "X$(AM_COLOR_TESTS)" = Xno; then \ am__color_tests=no; \ elif test "X$(AM_COLOR_TESTS)" = Xalways; then \ am__color_tests=yes; \ elif test "X$$TERM" != Xdumb && { test -t 1; } 2>/dev/null; then \ am__color_tests=yes; \ fi; \ if test $$am__color_tests = yes; then \ red=''; \ grn=''; \ lgn=''; \ blu=''; \ mgn=''; \ brg=''; \ std=''; \ fi; \ } am__DIST_COMMON = $(srcdir)/Makefile.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ TESTS_ENVIRONMENT = \ $(PERL) -I$(srcdir) -I$(srcdir)/.. -w -MTest::Harness -e '$$Test::Harness::verbose=0; runtests @ARGV;' TESTS = pbs.t slurm.t sge.t check_SCRIPTS = InfoproviderTestSuite.pm command-simulator.sh \ $(TESTS) EXTRA_DIST = InfoproviderTestSuite.pm command-simulator.sh pbs.t slurm.t sge.t all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/infoproviders/test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/infoproviders/test/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags TAGS: ctags CTAGS: cscope cscopelist: check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst $(AM_TESTS_FD_REDIRECT); then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ col="$$grn"; \ else \ col="$$red"; \ fi; \ echo "$${col}$$dashes$${std}"; \ echo "$${col}$$banner$${std}"; \ test -z "$$skipped" || echo "$${col}$$skipped$${std}"; \ test -z "$$report" || echo "$${col}$$report$${std}"; \ echo "$${col}$$dashes$${std}"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_SCRIPTS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: all all-am check check-TESTS check-am clean clean-generic \ clean-libtool cscopelist-am ctags-am distclean \ distclean-generic distclean-libtool distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags-am uninstall \ uninstall-am .PRECIOUS: Makefile command-simulator.sh: $(top_srcdir)/src/tests/lrms/command-simulator.sh cp $< $@ chmod +x $@ # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/test/PaxHeaders/InfoproviderTestSuite.pm0000644000000000000000000000013215067751327027527 xustar0030 mtime=1759498967.762492086 30 atime=1759498967.868493696 30 ctime=1759499029.876300712 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/test/InfoproviderTestSuite.pm0000644000175000002070000000415415067751327031435 0ustar00mockbuildmock00000000000000package InfoproviderTestSuite; use Cwd; use File::Path; use File::Temp; use LRMSInfo; use Test::More; BEGIN { require Exporter; require DynaLoader; @ISA = qw(Exporter DynaLoader); @EXPORT = qw(ok is isnt like unlike cmp_ok can_ok new_ok is_deeply pass fail); } sub new { my $class = shift; my $self = { _lrms => shift, _ntests => shift, _current_test => "", _current_name => "", _current_testdir => "", }; bless $self, $class; return $self; } sub test { my ( $self, $name, $test ) = @_; $self->{_current_name} = $name; $self->{_current_test} = $test; &$test; } sub setup { my ( $self, $progs, $simulator_output ) = @_; # Create directory for executing test. $self->{_current_testdir} = File::Temp::tempdir(getcwd()."/$self->{_lrms}_test_$self->{_current_name}_XXXXXX", CLEANUP => 1); mkdir "$self->{_current_testdir}/bin"; # Create simulator outcome file open(my $fh, '>', "$self->{_current_testdir}/simulator-outcome.dat"); print $fh "$simulator_output"; close $fh; $ENV{SIMULATOR_OUTCOME_FILE} = "$self->{_current_testdir}/simulator-outcome.dat"; $ENV{SIMULATOR_ERRORS_FILE} = "$self->{_current_testdir}/simulator-errors.dat"; # Create soft links for my $prog ( @{$progs} ) { my $newname = "$self->{_current_testdir}/bin/$prog"; symlink(getcwd()."/command-simulator.sh", $newname) or die("$0: Failed creating symlink: $newname"); } } sub collect { my ( $self, $progs, $simulator_output, $cfg ) = @_; $self->setup(\@{$progs}, $simulator_output); foreach $key (keys %{$cfg}) { if (ref($$cfg{$key}) eq "") { $$cfg{$key} =~ s//$self->{_current_testdir}/g; } } $$cfg{lrms} = $self->{_lrms}; my $lrms_info = LRMSInfo::collect($cfg); if (-e "$self->{_current_testdir}/simulator-errors.dat") { open FILE, "$self->{_current_testdir}/simulator-errors.dat"; $simulator_errors = join("", ); close FILE; diag($simulator_errors); fail("command simulatation"); } return $lrms_info; } sub done_testing { Test::More::done_testing(); } sub testing_done { Test::More::done_testing(); } 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/test/PaxHeaders/pbs.t0000644000000000000000000000013215067751327023622 xustar0030 mtime=1759498967.763329241 30 atime=1759498967.868493696 30 ctime=1759499029.879203969 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/test/pbs.t0000644000175000002070000000764015067751327025533 0ustar00mockbuildmock00000000000000#!/usr/bin/perl use strict; use InfoproviderTestSuite; my $suite = new InfoproviderTestSuite('pbs'); $suite->test("basic", sub { my @progs = qw(qmgr qstat pbsnodes); my $simulator_output = < "/bin", pbs_log_path => "/bin", dedicated_node_string => '', # NB: it's set in order to avoid perl warnings queues => {}, jobs => [], loglevel => '5' }; $cfg->{queues}{queue1} = {users => ['user1']}; my $lrms_info = $suite->collect(\@progs, $simulator_output, $cfg); is(ref $lrms_info, 'HASH', 'result type'); is(ref $lrms_info->{cluster}, 'HASH', 'has cluster'); is($lrms_info->{cluster}{lrms_type}, 'torque', 'lrms_type'); is($lrms_info->{cluster}{lrms_version}, '2.3.7', 'lrms_version'); is($lrms_info->{cluster}{runningjobs}, 0, 'runningjobs'); is($lrms_info->{cluster}{totalcpus}, 2, 'totalcpus'); is($lrms_info->{cluster}{queuedcpus}, 0, 'queuedcpus'); is($lrms_info->{cluster}{queuedjobs}, 0, 'queuedjobs'); is($lrms_info->{cluster}{usedcpus}, 0, 'usedcpus'); is($lrms_info->{cluster}{cpudistribution}, '2cpu:1', 'cpudistribution'); is(ref $lrms_info->{queues}, 'HASH', 'has queues'); is(ref $lrms_info->{queues}{queue1}, 'HASH', 'has queue1'); ok($lrms_info->{queues}{queue1}{status} >= 0, 'queue1->status'); is($lrms_info->{queues}{queue1}{totalcpus}, 2, 'queue1->totalcpus'); is($lrms_info->{queues}{queue1}{queued}, 0, 'queue1->queued'); is($lrms_info->{queues}{queue1}{running}, 0, 'queue1->running'); is($lrms_info->{queues}{queue1}{maxrunning}, 10, 'queue1->maxrunning'); is(ref $lrms_info->{queues}{queue1}{users}, 'HASH', 'has users'); is(ref $lrms_info->{queues}{queue1}{users}{user1}, 'HASH', 'has user1'); is($lrms_info->{queues}{queue1}{users}{user1}{queuelength}, 0, 'user1->queuelenght'); is_deeply($lrms_info->{queues}{queue1}{users}{user1}{freecpus}, { '2' => 0 }, 'user1->freecpus'); is(ref $lrms_info->{nodes}, 'HASH', 'has nodes'); is(ref $lrms_info->{nodes}{node1}, 'HASH', 'has node1'); is($lrms_info->{nodes}{node1}{isavailable}, 1, 'node1->isavailable'); is($lrms_info->{nodes}{node1}{slots}, 2, 'node1->slots'); }); $suite->testing_done(); nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/test/PaxHeaders/slurm.t0000644000000000000000000000013215067751327024200 xustar0030 mtime=1759498967.763329241 30 atime=1759498967.868493696 30 ctime=1759499029.880410591 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/test/slurm.t0000644000175000002070000002751415067751327026113 0ustar00mockbuildmock00000000000000#!/usr/bin/perl use strict; use InfoproviderTestSuite; my $suite = new InfoproviderTestSuite('slurm'); # TODO: Multiple queues, multiple users, multiple freecpus (if possible by slurm), multiple nodes, multiple comments?, multiple nodes. $suite->test("basic", sub { my @progs = qw(sinfo scontrol squeue); my $simulator_output = <<'ENDSIMULATOROUTPUT'; # Output from SLURM version 2.3.2 args="sinfo -a -h -o PartitionName=%P TotalCPUs=%C TotalNodes=%D MaxTime=%l DefTime=%L NodeNames=%N" output="PartitionName=queue1 TotalCPUs=0/1/0/1 TotalNodes=1 MaxTime=infinite DefTime=infinite NodeNames=test-machine" # Output from SLURM version 2.3.2 args="sinfo -a -h -o cpuinfo=%C" output="cpuinfo=0/1/0/1" args="sinfo -a -h -o %C" output="0/1/0/1" # Output from SLURM version 2.3.2 args="scontrol show node --oneliner" output="NodeName=test-machine Arch=i686 CoresPerSocket=1 CPUAlloc=0 CPUErr=0 CPUTot=1 Features=(null) Gres=(null) NodeAddr=test-machine NodeHostName=test-machine OS=Linux RealMemory=1 Sockets=1 State=IDLE ThreadsPerCore=1 TmpDisk=0 Weight=1 BootTime=2014-05-20T15:13:45 SlurmdStartTime=2014-05-20T15:13:50 Reason=(null)" # Output from SLURM version 2.3.2 args="squeue -a -h -t all -o JobId=%i TimeUsed=%M Partition=%P JobState=%T ReqNodes=%D ReqCPUs=%C TimeLimit=%l Name=%j NodeList=%N" output=<< "/bin", queues => { queue1 => { users => ['user1'] } }, jobs => ['1', '2'], loglevel => '5' }; my $lrms_info = $suite->collect(\@progs, $simulator_output, $cfg); is(ref $lrms_info, 'HASH', 'result type'); is(ref $lrms_info->{cluster}, 'HASH', 'has cluster'); is(lc($lrms_info->{cluster}{lrms_type}), 'slurm', 'lrms_type'); is($lrms_info->{cluster}{lrms_version}, '2.3.2', 'lrms_version'); is($lrms_info->{cluster}{schedpolicy}, undef, 'schedpolicy'); is($lrms_info->{cluster}{totalcpus}, 1, 'totalcpus'); is($lrms_info->{cluster}{queuedcpus}, 0, 'queuedcpus'); is($lrms_info->{cluster}{usedcpus}, 0, 'usedcpus'); is($lrms_info->{cluster}{queuedjobs}, 1, 'queuedjobs'); is($lrms_info->{cluster}{runningjobs}, 1, 'runningjobs'); is($lrms_info->{cluster}{cpudistribution}, '1cpu:1', 'cpudistribution'); is(ref $lrms_info->{queues}, 'HASH', 'has queues'); is(ref $lrms_info->{queues}{queue1}, 'HASH', 'has queue1'); is($lrms_info->{queues}{queue1}{status}, 10000, 'queue1->status'); is($lrms_info->{queues}{queue1}{maxrunning}, 10000, 'queue1->maxrunning'); is($lrms_info->{queues}{queue1}{maxqueuable}, 10000, 'queue1->maxqueuable'); is($lrms_info->{queues}{queue1}{maxuserrun}, 10000, 'queue1->maxuserrun'); is($lrms_info->{queues}{queue1}{maxcputime}, 2**31-1, 'queue1->maxcputime'); is($lrms_info->{queues}{queue1}{maxtotalcputime}, undef, 'queue1->maxtotalcputime'); is($lrms_info->{queues}{queue1}{mincputime}, 0, 'queue1->mincputime'); is($lrms_info->{queues}{queue1}{defaultcput}, 2**31-1, 'queue1->defaultcput'); is($lrms_info->{queues}{queue1}{maxwalltime}, 2**31-1, 'queue1->maxwalltime'); is($lrms_info->{queues}{queue1}{minwalltime}, 0, 'queue1->minwalltime'); is($lrms_info->{queues}{queue1}{defaultwallt}, 2**31-1, 'queue1->defaultwallt'); is($lrms_info->{queues}{queue1}{running}, 1, 'queue1->running'); is($lrms_info->{queues}{queue1}{queued}, 1, 'queue1->queued'); is($lrms_info->{queues}{queue1}{suspended}, undef, 'queue1->suspended'); is($lrms_info->{queues}{queue1}{total}, undef, 'queue1->total'); is($lrms_info->{queues}{queue1}{totalcpus}, 1, 'queue1->totalcpus'); is($lrms_info->{queues}{queue1}{preemption}, undef, 'queue1->preemption'); is($lrms_info->{queues}{queue1}{'acl_users'}, undef, 'queue1->acl_users'); is(ref $lrms_info->{queues}{queue1}{nodes}, 'ARRAY', 'queue1->nodes is ARRAY'); is(ref $lrms_info->{queues}{queue1}{users}, 'HASH', 'has users'); is(ref $lrms_info->{queues}{queue1}{users}{user1}, 'HASH', 'has user1'); #is_deeply($lrms_info->{queues}{queue1}{users}{user1}{freecpus}, { '1' => undef }, 'user1->freecpus'); is($lrms_info->{queues}{queue1}{users}{user1}{queuelength}, 0, 'user1->queuelength'); is(ref $lrms_info->{jobs}, 'HASH', 'has jobs'); is(ref $lrms_info->{jobs}{'1'}, 'HASH', 'has job id = 1'); is($lrms_info->{jobs}{'1'}{status}, 'R', 'job1->status'); is($lrms_info->{jobs}{'1'}{cpus}, 1, 'job1->cpus'); is($lrms_info->{jobs}{'1'}{rank}, 0, 'job1->rank'); is($lrms_info->{jobs}{'1'}{mem}, 1, 'job1->mem'); is($lrms_info->{jobs}{'1'}{walltime}, 83, 'job1->walltime'); is($lrms_info->{jobs}{'1'}{cputime}, 83, 'job1->cputime'); is($lrms_info->{jobs}{'1'}{reqwalltime}, 360, 'job1->reqwalltime'); is($lrms_info->{jobs}{'1'}{reqcputime}, 360, 'job1->reqcputime'); is(ref $lrms_info->{jobs}{'1'}{nodes}, 'ARRAY', 'job1->nodes is ARRAY'); is(scalar @{$lrms_info->{jobs}{'1'}{nodes}}, 1, 'job1->nodes size'); is($lrms_info->{jobs}{'1'}{nodes}[0], 'test-machine', 'job1->nodes->0'); is(ref $lrms_info->{jobs}{'1'}{comment}, 'ARRAY', 'job1->comment'); is(scalar @{$lrms_info->{jobs}{'1'}{comment}}, 1, 'job1->comment'); is($lrms_info->{jobs}{'1'}{comment}[0], 'test job', 'job1->comment->0'); is(ref $lrms_info->{jobs}{'2'}, 'HASH', 'has job id = 2'); is($lrms_info->{jobs}{'2'}{status}, 'Q', 'job2->status'); is($lrms_info->{jobs}{'2'}{cpus}, 1, 'job2->cpus'); is($lrms_info->{jobs}{'2'}{rank}, 0, 'job2->rank'); is($lrms_info->{jobs}{'2'}{mem}, undef, 'job2->mem'); is($lrms_info->{jobs}{'2'}{walltime}, 0, 'job2->walltime'); is($lrms_info->{jobs}{'2'}{cputime}, 0, 'job2->cputime'); is($lrms_info->{jobs}{'2'}{reqwalltime}, 360, 'job2->reqwalltime'); is($lrms_info->{jobs}{'2'}{reqcputime}, 360, 'job2->reqcputime'); is(ref $lrms_info->{jobs}{'2'}{nodes}, 'ARRAY', 'job2->nodes is ARRAY'); is(scalar @{$lrms_info->{jobs}{'2'}{nodes}}, 0, 'job2->nodes size'); is(ref $lrms_info->{jobs}{'2'}{comment}, 'ARRAY', 'job2->comment'); is(scalar @{$lrms_info->{jobs}{'2'}{comment}}, 1, 'job2->comment'); is($lrms_info->{jobs}{'2'}{comment}[0], 'test job', 'job2->comment->0'); is(ref $lrms_info->{nodes}, 'HASH', 'has nodes'); is(ref $lrms_info->{nodes}{"test-machine"}, 'HASH', 'has test-machine'); is($lrms_info->{nodes}{"test-machine"}{isavailable}, 1, 'test-machine->isavailable'); is($lrms_info->{nodes}{"test-machine"}{isfree}, 1, 'test-machine->isfree'); is($lrms_info->{nodes}{"test-machine"}{tags}, undef, 'test-machine->tags'); is($lrms_info->{nodes}{"test-machine"}{vmem}, undef, 'test-machine->vmem'); is($lrms_info->{nodes}{"test-machine"}{pmem}, 1, 'test-machine->pmem'); is($lrms_info->{nodes}{"test-machine"}{slots}, 1, 'test-machine->slots'); is($lrms_info->{nodes}{"test-machine"}{pcpus}, 1, 'test-machine->pcpus'); is($lrms_info->{nodes}{"test-machine"}{sysname}, 'Linux', 'test-machine->sysname'); is($lrms_info->{nodes}{"test-machine"}{release}, undef, 'test-machine->release'); is($lrms_info->{nodes}{"test-machine"}{machine}, 'i686', 'test-machine->machine'); }); $suite->testing_done(); nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/test/PaxHeaders/command-simulator.sh0000644000000000000000000000013215067751425026637 xustar0030 mtime=1759499029.865363058 30 atime=1759499029.864435728 30 ctime=1759499029.877696678 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/test/command-simulator.sh0000755000175000002070000000542215067751425030547 0ustar00mockbuildmock00000000000000#!/bin/bash # # Script to simulate a given command. Read 'instructions' from outcome file, and # acts accordingly. Each time this script is run, the outcome file is checked # for content. If no content exist, script outputs ${SUCCESS_OUTPUT} and exits # with code 0. Otherwise the first line from the outcome file is read, # 'eval'-ed and then removed from the file. # if test ! -f ${SIMULATOR_OUTCOME_FILE}; then SIMULATOR_OUTCOME_FILE="simulator-outcome.dat" fi if test -z ${SIMULATOR_ERRORS_FILE}; then SIMULATOR_ERRORS_FILE="simulator-errors.dat" fi cmd="$(basename ${0})" cmd_n_args="${cmd} ${*}" if test -f "${SIMULATOR_OUTCOME_FILE}"; then # Extract rargs from outcome file in order to be able to do reqular expresssion matching. # If rargs matches cmd_n_args, use rargs as key at lookup in outcom file. while read rargs; do echo "${cmd_n_args}" | sed -n -e "${rargs} q 100" if test ${?} == 100 && test "x${rargs}" != "x"; then # Use rargs as key. cmd_n_args="${rargs}" break fi done <<< "$(sed -n -e "/^[[:space:]]*rargs=\"[^\"]*\"/ s/^[[:space:]]*rargs=\"\([^\"]*\)\".*/\1/ p" "${SIMULATOR_OUTCOME_FILE}")" # Do not pipe output into while loop, because it creates a subshell, and then setting cmd_n_args has no effect. # Escape special characters so they are not interpretted by sed at lookup. cmd_n_args="$(echo "${cmd_n_args}" | sed -e 's/[][\/$*.^|]/\\&/g' -e 's/[(){}]/\\\\&/g')" # Lookup cmd_n_args in outcome file, and return corresponding options. outcome="$(sed -n -e ' /^[[:space:]]*r\?args="'"${cmd_n_args}"'"/ { :ARGS n /^[[:space:]]*\(#\|$\)/ b ARGS /^[[:space:]]*\(sleep\|rc\)=/ {p; b ARGS} /^[[:space:]]*output=<<<\([[:alnum:]]\+\)/ { :OUTPUT N /output=<<<\([[:alnum:]]\+\)\n.*\n\1$/ ! b OUTPUT s/output=<<<\([[:alnum:]]\+\)\(\n.*\n\)\1/read -r -d "" output <<"\1"\2\1/ p q 100 } /^[[:space:]]*output="[^\"]*"/ {p; q 100} q 50 }' ${SIMULATOR_OUTCOME_FILE})" sed_rc=${?} if test ${sed_rc} == 50; then printf "Syntax error in simulator outcome file ('%s') options for arguments '%s'\n" "${SIMULATOR_OUTCOME_FILE}" "${cmd_n_args}" >> ${SIMULATOR_ERRORS_FILE} exit 1 fi if test "${cmd}" == "sleep" && test ${sed_rc} != 100; then # Do not sleep - only if instructed to in SIMULATOR_OUTCOME_FILE exit 0 fi if test ${sed_rc} != 100; then echo "Command '${cmd} ${@}' was not expected to be executed." >> ${SIMULATOR_ERRORS_FILE} exit 1 fi eval "${outcome}" if test ! -z "${output+yes}"; then echo "${output}" else echo "${SUCCESS_OUTPUT}" fi if test ! -z "${sleep+yes}"; then /bin/sleep ${sleep} fi if test ! -z "${rc+yes}"; then exit ${rc} else exit 0 fi else echo "${SUCCESS_OUTPUT}" exit 0 fi nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/test/PaxHeaders/sge.t0000644000000000000000000000013215067751327023614 xustar0030 mtime=1759498967.763329241 30 atime=1759498967.868493696 30 ctime=1759499029.881753595 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/test/sge.t0000644000175000002070000002453515067751327025527 0ustar00mockbuildmock00000000000000#!/usr/bin/perl # TODO: This test is broken! use strict; use InfoproviderTestSuite; my $suite = new InfoproviderTestSuite('sge'); $suite->test('basic', sub { my @progs = qw(qsub qstat qconf qhost); my $simulator_output = <<'ENDSIMULATOROUTPUT'; # SGE test suite sge-1 # SGE version 6.2 # 1 nodes 1 cpus # 1 queue, no jobs # # Defines behaviour for: # qstat -help # qstat -u '*' -f (would neeed -F for older SGE) # qconf -sep # qconf -sconf global # qconf -sql # qconf -sq all.q # qstat -j ## Also these for the old module: # qstat -g c # qstat -u * -s rs # qstat -u '*' -s p args="qstat -help" output=<< ENDOUTPUT args="qstat -f" output="host=node1.site.org" args="qhost -F -h node1" output="host=node1.site.org" ENDSIMULATOROUTPUT my $cfg = { sge_bin_path => "/bin", sge_root => "/bin", queues => {}, jobs => [], loglevel => '5' }; $cfg->{queues}{'all.q'} = {users => ['user1']}; my $lrms_info = $suite->collect(\@progs, $simulator_output, $cfg); is(ref $lrms_info, 'HASH', 'result type'); is(ref $lrms_info->{cluster}, 'HASH', 'has cluster'); is($lrms_info->{cluster}{lrms_type}, 'SGE', 'lrms_type'); is($lrms_info->{cluster}{lrms_version}, '6.2', 'lrms_version'); is($lrms_info->{cluster}{runningjobs}, 0, 'runningjobs'); #is($lrms_info->{cluster}{totalcpus}, 1, 'totalcpus'); is($lrms_info->{cluster}{queuedcpus}, 0, 'queuedcpus'); is($lrms_info->{cluster}{queuedjobs}, 0, 'queuedjobs'); is($lrms_info->{cluster}{usedcpus}, 0, 'usedcpus'); #is($lrms_info->{cluster}{cpudistribution}, '1cpu:1', 'cpudistribution'); is(ref $lrms_info->{queues}, 'HASH', 'has queues'); is(ref $lrms_info->{queues}{'all.q'}, 'HASH', 'has queue all.q'); ok($lrms_info->{queues}{'all.q'}{status} >= 0, 'all.q->status'); is($lrms_info->{queues}{'all.q'}{totalcpus}, 1, 'all.q->totalcpus'); is($lrms_info->{queues}{'all.q'}{queued}, 0, 'all.q->queued'); is($lrms_info->{queues}{'all.q'}{running}, 0, 'all.q->running'); is($lrms_info->{queues}{'all.q'}{maxrunning}, 1, 'all.q->maxrunning'); is(ref $lrms_info->{queues}{'all.q'}{users}, 'HASH', 'has users'); is(ref $lrms_info->{queues}{'all.q'}{users}{user1}, 'HASH', 'has user1'); is($lrms_info->{queues}{'all.q'}{users}{user1}{queuelength}, 0, 'user1->queuelenght'); is_deeply($lrms_info->{queues}{'all.q'}{users}{user1}{freecpus}, { '1' => 0 }, 'user1->freecpus'); #is(ref $lrms_info->{nodes}, 'HASH', 'has nodes'); #is(ref $lrms_info->{nodes}{node1}, 'HASH', 'has node1'); #is($lrms_info->{nodes}{node1}{isavailable}, 1, 'node1->isavailable'); #is($lrms_info->{nodes}{node1}{slots}, 2, 'node1->slots'); }); $suite->testing_done(); nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/SGEmod.pm0000644000000000000000000000013215067751327023346 xustar0030 mtime=1759498967.761492071 30 atime=1759498967.868493696 30 ctime=1759499029.800075836 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/SGEmod.pm0000644000175000002070000011227415067751327025257 0ustar00mockbuildmock00000000000000package SGEmod; require Exporter; our @ISA = qw(Exporter); our @EXPORT_OK = qw(get_lrms_info get_lrms_options_schema); use POSIX qw(floor ceil); # force locale LANG to POSIX see bug #3314 $ENV{LANG}="POSIX"; use LogUtils; use XML::Simple qw(:strict); use strict; our $path; our $options; our $lrms_info = {}; # status of nodes and queues our %node_stats = (); # all running jobs, indexed by job-ID and task-ID our %running_jobs = (); # all waiting jobs, indexed by job-ID our %waiting_jobs = (); # Switch to choose between codepaths for SGE 6.x (the default) and SGE 5.x our $compat_mode; our $sge_type; our $sge_version; our $cpudistribution; our @queue_names; our $queuedjobs = 0; our $queuedcpus = 0; our $max_jobs; our $max_u_jobs; our %user_total_jobs; our %user_waiting_jobs; our $log = LogUtils->getLogger(__PACKAGE__); ########################################## # Public interface ########################################## sub get_lrms_options_schema { return { 'sge_root' => '', 'sge_bin_path' => '', 'sge_cell' => '*', 'sge_qmaster_port' => '*', 'sge_execd_port' => '*', 'queues' => { '*' => { 'users' => [ '' ], 'sge_queues' => '*', 'sge_jobopts' => '*' } }, 'jobs' => [ '' ] }; } sub get_lrms_info($) { $options = shift; lrms_init(); type_and_version(); run_qconf(); run_qstat(); require Data::Dumper; import Data::Dumper qw(Dumper); #print STDERR Dumper(\%node_stats); #print STDERR Dumper(\%running_jobs); #print STDERR Dumper(\%waiting_jobs); cluster_info(); my %qconf = %{$options->{queues}}; for my $qname ( keys %qconf ) { queue_info($qname); } my $jids = $options->{jobs}; jobs_info($jids); for my $qname ( keys %qconf ) { my $users = $qconf{$qname}{users}; users_info($qname,$users); } # recycle memory %running_jobs = (); %waiting_jobs = (); nodes_info(); return $lrms_info } ########################################## # Private subs ########################################## # # Generic function to process the ouptut of an external program. The callback # function will be invoked with a file descriptor receiving the standard output # of the external program as it's first argument. # sub run_callback { my ($command, $callback, @extraargs) = @_; my ($executable) = split ' ', $command; $log->error("Not an executable: $executable") unless (-x "$executable"); local *QQ; $log->error("Failed creating pipe from: $command: $!") unless open QQ, "$command |"; &$callback(*QQ, @extraargs); close QQ; my $exitcode = $? >> 8; $log->info("Failed running command (exit code $exitcode): $command") if $?; return ! $?; } # # Generic function to process the ouptut of an external program. The callback # function will be invoked for each line of output from the external program. # sub loop_callback { my ($command, $callback) = @_; return run_callback($command, sub { my $fh = shift; my $line; chomp $line and &$callback($line) while defined ($line = <$fh>); }); } # # Determine SGE variant and version. # Set compatibility mode if necessary. # sub type_and_version { run_callback("$path/qstat -help", sub { my $fh = shift; my ($firstline) = <$fh>; ($sge_type, $sge_version) = split " ", $firstline; }); $compat_mode = 0; # version 6.x assumed if ($sge_type !~ /GE/ or not $sge_version) { $log->error("Cannot indentify SGE version from output of '$path/qstat -help': $sge_type $sge_version"); } elsif ($sge_version =~ /^5\./ or $sge_version =~ /^pre6.0/) { $compat_mode = 1; $log->info("Using SGE 5.x compatibility mode"); } } # # Processes an array task definition (i.e.: '3,4,6-8,10-20:2') # and returns the number of individual tasks # sub count_array_spec($) { my $count = 0; for my $spec (split ',', shift) { # handles expressions like '6-10:2' and '6-10' and '6' return 0 unless $spec =~ '^(\d+)(?:-(\d+)(?::(\d+))?)?$'; my ($lower,$upper,$step) = ($1,$2,$3); $upper = $lower unless defined $upper; $step = 1 unless $step; $count += 1 + floor(abs($upper-$lower)/$step); } return $count; } # # this block contains the functions used to parse the output of qstat # { # shared variables my $line; # used to keep the most recently line read from qstat my $currentjobid = undef; my $currentqueue = undef; my $currentnode = undef; #### Regular expression matching a queue line, like: # libero@compute-3-7.local BPC 0/8 4.03 lx24-amd64 S # all.q@hyper.uio.no BIP 0/0/1 0.00 lx24-x86 # all.q@compute-14-19.local BIPC 0/8 -NA- lx24-amd64 Adu # corvus.q BICP 0/16 99.99 solaris64 aAdu my $queue_regex = '^\s*(\S+)\s+\w+\s+(?:(\d+)/)?(\d+)/(\d+)\s+(\S+)\s+\S+(?:\s+(\w+))?\s*$'; #### Regular expression matching complex lines from qstat -F # hl:num_proc=1 # hl:mem_total=1009.523M # qf:qname=shar # qf:hostname=squark.uio.no my $complex_regex = '\s+(\w\w:\w+)=(.*)\s*'; #### Regular expressions matching jobs (for SGE version 6.x), like: # 4518 2.71756 brkhrt_5ch whe042 r 08/20/2008 10:50:23 4 # 1602 2.59942 runmain_4_ user1 r 08/13/2008 22:42:17 1 21 # 1872 2.59343 test_GG1 otherusr Eqw 08/05/2008 17:36:45 1 4,6-8:1 # 7539 7.86785 methane_i user11 qw 06/26/2008 11:16:52 4 #### Assume job name column is exactly 10 characters wide. my $jobid_prio_name_regex6 = '(\d+)\s+[.\d]+\s+\S.{9}'; my $user_state_date_regex6 = '(\S+)\s+(\w+)\s+(\d\d/\d\d/\d{4} \d\d:\d\d:\d\d)'; my $slots_tid_regex6 = '(\d+)(?:\s+(\d+))?'; # for running jobs my $slots_taskdef_regex6 = '(\d+)(?:\s+([:\-\d,]+))?'; # for queued jobs my $base_regex6 = '^\s*'.$jobid_prio_name_regex6.' '.$user_state_date_regex6; my $running_regex6 = $base_regex6.'\s+'.$slots_tid_regex6.'\s*$'; my $waiting_regex6 = $base_regex6.'\s+'.$slots_taskdef_regex6.'\s*$'; #### Regular expressions matching jobs (for SGE version 5.x), like: # 217 0 submit.tem lemlib r 07/21/2008 09:55:32 MASTER # 0 submit.tem lemlib r 07/21/2008 09:55:32 SLAVE # 27 0 exam.sh c01p01 r 02/03/2006 16:40:49 MASTER 2 # 0 exam.sh c01p01 r 02/03/2006 16:40:49 SLAVE 2 # 254 0 CPMD baki qw 08/14/2008 10:12:29 # 207 0 STDIN adi qw 08/15/2008 17:23:37 2-10:2 #### Assume job name column is exactly 10 characters wide. my $jobid_prio_name_regex5 = '(?:(\d+)\s+)?[.\d]+\s+\S.{9}'; my $user_state_date_regex5 = '(\S+)\s+(\w+)\s+(\d\d/\d\d/\d{4} \d\d:\d\d:\d\d)'; my $master_tid_regex5 = '(MASTER|SLAVE)(?:\s+(\d+))?'; # for running jobs my $taskdef_regex5 = '(?:\s+([:\-\d,]+))?'; # for queued jobs my $base_regex5 = '^\s*'.$jobid_prio_name_regex5.' '.$user_state_date_regex5; my $running_regex5 = $base_regex5.'\s+'.$master_tid_regex5.'\s*$'; my $waiting_regex5 = $base_regex5.$taskdef_regex5.'\s*$'; sub run_qstat { my $command = "$path/qstat -u '*'"; $command .= $compat_mode ? " -F" : " -f"; die unless run_callback($command, \&qstat_parser_callback); } sub qstat_parser_callback { my $fh = shift; # validate header line $line = <$fh>; return unless defined $line; # if there was no output my @hdr = split ' ',$line; $log->error("qstat header line not recognized") unless ($hdr[0] eq 'queuename'); $line = <$fh>; while (defined $line and $line =~ /^--------------/) { handle_queue($fh); handle_running_jobs($fh); } return unless defined $line; # if there are no waiting jobs $line = <$fh>; $log->error("Unexpected line from qstat") unless $line =~ /############/; $line = <$fh>; $log->error("Unexpected line from qstat") unless $line =~ /PENDING JOBS/; $line = <$fh>; $log->error("Unexpected line from qstat") unless $line =~ /############/; $line = <$fh>; handle_waiting_jobs($fh); # there should be no lines left $log->error("Cannot parse qstat output line: $line") if defined $line; } sub handle_queue { my $fh = shift; $line = <$fh>; if (defined $line and $line =~ /$queue_regex/) { my ($qname,$used,$total,$load,$flags) = ($1,$3,$4,$5,$6||''); $line = <$fh>; if (not $compat_mode) { ($currentqueue, $currentnode) = split '@',$qname,2; unless ($currentnode) { $log->error("Queue name of the form 'queue\@host' expected. Got: $qname"); } } else { $currentqueue = $qname; # parse complexes to extract hostname while (defined $line and $line =~ /$complex_regex/) { $currentnode = $2 if $1 eq 'qf:hostname'; $line = <$fh>; } $log->warning("Could not extract hostname for queue $qname") unless $currentnode; } if ($currentnode) { # Was this node not listed with qhost -xml ? if (not exists $node_stats{$currentnode} or not exists $node_stats{$currentnode}{totalcpus}) { # Node name may have been truncated by qstat -f if (length $qname >= 30) { # Try to match it with a node already listed by qhost -xml my @fullnames = grep { length($_) >= length($currentnode) and $_ =~ m/^\Q$currentnode\E/ } grep { exists $node_stats{$_}{totalcpus} } keys %node_stats; $currentnode = $fullnames[0] if @fullnames == 1; } # Node name may have been truncated by qhost -xml for (my $name = $currentnode; length $name >= 24; chop $name) { $currentnode = $name if exists $node_stats{$name} and exists $node_stats{$name}{totalcpus} } } if (not exists $node_stats{$currentnode} or not exists $node_stats{$currentnode}{totalcpus}) { $log->warning("Queue $currentqueue\@$currentnode cannot be matched" ." with a hostname listed by qhost -xml"); } $node_stats{$currentnode}{load} = $load unless $load eq '-NA-'; $node_stats{$currentnode}{runningslots} ||= 0; # will be counted later $node_stats{$currentnode}{queues}{$currentqueue} = {usedslots=>$used, totalslots=>$total, suspslots=>'0', flags=>$flags}; } } } # Running jobs in a queue instance sub handle_running_jobs { my $fh = shift; my $regex = $compat_mode ? $running_regex5 : $running_regex6; while (defined $line and $line =~ /$regex/) { if (not $compat_mode) { ### SGE v 6.x ### my ($jobid,$user,$slots,$taskid) = ($1,$2,$5,$6); $taskid = 0 unless $taskid; # 0 is an invalid task id # Index running jobs by job-ID and task-ID my $task = $running_jobs{$jobid}{$taskid} || {}; $running_jobs{$jobid}{$taskid} = $task; $user_total_jobs{$user}++; $task->{user} = $user; $task->{state} = $3; $task->{date} = $4; $task->{queue} = $currentqueue; $task->{nodes}{$currentnode} = $slots; $task->{slots} += $slots; if ($task->{state} =~ /[sST]/) { $node_stats{$currentnode}{queues}{$currentqueue}{suspslots} += $slots; } else { $node_stats{$currentnode}{runningslots} += $slots; } } else { ### SGE 5.x, pre 6.0 ### my ($jobid,$user,$role,$taskid) = ($1,$2,$5,$6); $taskid = 0 unless $taskid; # 0 is an invalid task id if ($role eq 'MASTER' and not defined $jobid) { $log->error("Cannot parse qstat output line: $line"); } elsif (not defined $jobid) { $jobid = $currentjobid; } else { $currentjobid = $jobid; } # Index running jobs by job-ID and task-ID my $task = $running_jobs{$jobid}{$taskid} || {}; $running_jobs{$jobid}{$taskid} = $task; if ($role eq 'MASTER') { # each job has one MASTER $user_total_jobs{$user}++; $task->{user} = $user; $task->{state} = $3; $task->{date} = $4; $task->{queue} = $currentqueue; $task->{slots}++; $task->{nodes}{$currentnode}++; $task->{is_parallel} = 0; if ($task->{state} =~ /[sST]/) { $node_stats{$currentnode}{queues}{$currentqueue}{suspslots}++; } else { $node_stats{$currentnode}{runningslots}++; } } elsif (not $task->{is_parallel}) { # Fist SLAVE following the MASTER $task->{is_parallel} = 1; # Don't count this SLAVE } else { # Other SLAVEs, resume counting $task->{slots}++; $task->{nodes}{$currentnode}++; if ($task->{state} =~ /[sST]/) { $node_stats{$currentnode}{queues}{$currentqueue}{suspslots}++; } else { $node_stats{$currentnode}{runningslots}++; } } } last unless defined ($line = <$fh>); } } sub handle_waiting_jobs { my $fh = shift; my $rank = 1; my $regex = $compat_mode ? $waiting_regex5 : $waiting_regex6; while (defined $line and $line =~ /$regex/) { if (not $compat_mode) { ### SGE v 6.x ### my ($jobid,$user,$taskdef) = ($1,$2,$6); my $ntasks = $taskdef ? count_array_spec($taskdef) : 1; unless ($ntasks) { $log->error("Failed parsing task definition: $taskdef"); } $waiting_jobs{$jobid}{user} = $user; $waiting_jobs{$jobid}{state} = $3; $waiting_jobs{$jobid}{date} = $4; $waiting_jobs{$jobid}{slots} = $5; $waiting_jobs{$jobid}{tasks} += $ntasks; $waiting_jobs{$jobid}{rank} = $rank; $user_total_jobs{$user} += $ntasks; $user_waiting_jobs{$user} += $ntasks; $rank += $ntasks; } else { ### SGE 5.x, pre 6.0 ### my ($jobid,$user,$taskdef) = ($1,$2,$5); my $ntasks = $taskdef ? count_array_spec($taskdef) : 1; unless ($ntasks) { $log->error("Failed parsing task definition: $taskdef"); } # The number of slots is not available from qstat output. $waiting_jobs{$jobid}{user} = $user; $waiting_jobs{$jobid}{state} = $3; $waiting_jobs{$jobid}{date} = $4; $waiting_jobs{$jobid}{tasks} += $ntasks; $waiting_jobs{$jobid}{rank} = $rank; # SGE 5.x does not list number of slots. Assuming 1 slot per job! $waiting_jobs{$jobid}{slots} = 1; $user_total_jobs{$user} += $ntasks; $user_waiting_jobs{$user} += $ntasks; $rank += $ntasks; } last unless defined ($line = <$fh>); } } } # end of qstat block sub run_qconf { # cpu distribution $cpudistribution = ''; # qconf -sep deprecated therefore we are using qhost -xml my $qhost_xml_output = `$path/qhost -xml` or $log->error("Failed listing licensed processors"); use XML::Simple qw(:strict); my $xml = XMLin($qhost_xml_output, KeyAttr => { host => 'name' }, ForceArray => [ 'host' ]); for my $h ( keys %{$xml->{host}} ) { next if $h eq "global"; $node_stats{$h}{arch} =$xml->{host}{$h}{"hostvalue"}[0]{content}; $node_stats{$h}{totalcpus} = $xml->{host}{$h}{"hostvalue"}[1]{content}; } my %cpuhash; $cpuhash{$_->{totalcpus}}++ for values %node_stats; while ( my ($cpus,$count) = each %cpuhash ) { $cpudistribution .= "${cpus}cpu:$count " if $cpus > 0; } chop $cpudistribution; # global limits loop_callback("$path/qconf -sconf global", sub { my $l = shift; $max_jobs = $1 if $l =~ /^max_jobs\s+(\d+)/; $max_u_jobs = $1 if $l =~ /^max_u_jobs\s+(\d+)/; }) or $log->error("Failed listing global configurations"); # list all queues loop_callback("$path/qconf -sql", sub { push @queue_names, shift }) or $log->error("Failed listing all queues"); chomp @queue_names; } sub req_limits ($) { my $line = shift; my ($reqcputime, $reqwalltime); while ($line =~ /[sh]_cpu=(\d+)/g) { $reqcputime = $1 if not $reqcputime or $reqcputime > $1; } while ($line =~ /[sh]_rt=(\d+)/g) { $reqwalltime = $1 if not $reqwalltime or $reqwalltime > $1; } return ($reqcputime, $reqwalltime); } sub lrms_init() { $ENV{SGE_ROOT} = $options->{sge_root} || $ENV{SGE_ROOT}; $log->error("could not determine SGE_ROOT") unless $ENV{SGE_ROOT}; $ENV{SGE_CELL} = $options->{sge_cell} || $ENV{SGE_CELL} || 'default'; $ENV{SGE_QMASTER_PORT} = $options->{sge_qmaster_port} if $options->{sge_qmaster_port}; $ENV{SGE_EXECD_PORT} = $options->{sge_execd_port} if $options->{sge_execd_port}; for (split ':', $ENV{PATH}) { $ENV{SGE_BIN_PATH} = $_ and last if -x "$_/qsub"; } $ENV{SGE_BIN_PATH} = $options->{sge_bin_path} || $ENV{SGE_BIN_PATH}; $log->error("SGE executables not found") unless -x "$ENV{SGE_BIN_PATH}/qsub"; $path = $ENV{SGE_BIN_PATH}; } sub cluster_info () { my $lrms_cluster = {}; # add this cluster to the info tree $lrms_info->{cluster} = $lrms_cluster; # Figure out SGE type and version $lrms_cluster->{lrms_glue_type} = "sungridengine"; $lrms_cluster->{lrms_type} = $sge_type; $lrms_cluster->{lrms_version} = $sge_version; $lrms_cluster->{cpudistribution} = $cpudistribution; $lrms_cluster->{totalcpus} += $_->{totalcpus} || 0 for values %node_stats; # Count used/free CPUs and queued jobs in the cluster # Note: SGE has the concept of "slots", which roughly corresponds to # concept of "cpus" in ARC (PBS) LRMS interface. my $usedcpus = 0; my $runningjobs = 0; for my $tasks (values %running_jobs) { for my $task (values %$tasks) { $runningjobs++; # Skip suspended jobs $usedcpus += $task->{slots} unless $task->{state} =~ /[sST]/; } } for my $job (values %waiting_jobs) { $queuedjobs += $job->{tasks}; $queuedcpus += $job->{tasks} * $job->{slots}; } $lrms_cluster->{usedcpus} = $usedcpus; $lrms_cluster->{queuedcpus} = $queuedcpus; $lrms_cluster->{queuedjobs} = $queuedjobs; $lrms_cluster->{runningjobs} = $runningjobs; # List LRMS queues #$lrms_cluster->{queue} = [ @queue_names ]; } sub queue_info ($) { my $qname = shift; my $lrms_queue = {}; # add this queue to the info tree $lrms_info->{queues}{$qname} = $lrms_queue; # multiple (even overlapping) queues are supported. my @qnames = ($qname); # This code prepares for a scenario where grid jobs in a ComputingShare are # submitted by a-rex to thout requesting specific queue. Jobs in can end up # in several possible queues. This function should then try to agregate # values over all the queues in a list. # OBS: more work is needed to make this work if ($options->{queues}{$qname}{sge_queues}) { @qnames = split ' ', $options->{queues}{$qname}{sge_queues}; } # NOTE: # In SGE the relation between CPUs and slots is quite elastic. Slots is # just one of the complexes that the SGE scheduler takes into account. It # is quite possible (depending on configuration permits) to have more slots # used by jobs than total CPUs on a node. On the other side, even if there # are unused slots in a queue, jobs might be prevented to start because of # some other constraints. # queuestatus - will be negative only if all queue instances have a status # flag set other than 'a'. # queueused - sum of slots used by jobs, not including suspended jobs. # queuetotal - sum of slots limited by the number of cpus on each node. # queuefree - attempt to calculate free slots. my $queuestatus = -1; my $queuetotal = 0; my $queuefree = 0; my $queueused = 0; for my $nodename (keys %node_stats) { my $node = $node_stats{$nodename}; my $queues = $node->{queues}; next unless defined $queues; my $nodetotal = 0; # number of slots on this node in the selected queues my $nodemax = 0; # largest number of slots in any of the selected queues my $nodefree = 0; my $nodeused = 0; for my $name (keys %$queues) { next unless grep {$name eq $_} @qnames; my $q = $queues->{$name}; $nodetotal += $q->{totalslots}; $nodemax = $q->{totalslots} if $nodemax < $q->{totalslots}; $nodeused += $q->{usedslots} - $q->{suspslots}; # Any flag on the queue implies that the queue is not taking more jobs. $nodefree += $q->{totalslots} - $q->{usedslots} unless $q->{flags}; # The queue is healty if there is an instance in any other states # than normal or (a)larm. See man qstat for the meaning of the flags. $queuestatus = 1 unless $q->{flags} =~ /[dosuACDE]/; } # Cheating a bit here. SGE's scheduler would consider load averages # among other things to decide if there are free slots. if (defined $node->{totalcpus}) { my $maxslots = $node->{totalcpus}; if ($nodetotal > $maxslots) { $log->debug("Capping nodetotal ($nodename): $nodetotal > ".$maxslots); $nodetotal = $maxslots; } if ($nodefree > $maxslots - $node->{runningslots}) { $log->debug("Capping nodefree ($nodename): $nodefree > ".$maxslots." - ".$node->{runningslots}); $nodefree = $maxslots - $node->{runningslots}; $nodefree = 0 if $nodefree < 0; } } else { $log->info("Node not listed by qhost -xml: $nodename"); } $queuetotal += $nodetotal; $queuefree += $nodefree; $queueused += $nodeused; } $lrms_queue->{totalcpus} = $queuetotal; #$lrms_queue->{freecpus} = $queuefree; $lrms_queue->{running} = $queueused; $lrms_queue->{status} = $queuestatus; $lrms_queue->{MaxSlotsPerJob} = $queuetotal; # settings in the config file override my $qopts = $options->{queues}{$qname}; $lrms_queue->{totalcpus} = $qopts->{totalcpus} if $qopts->{totalcpus}; # reserve negative numbers for error states $log->warning("Negative status for queue $qname: $lrms_queue->{status}") if $lrms_queue->{status} < 0; # Grid Engine has hard and soft limits for both CPU time and # wall clock time. Nordugrid schema only has CPU time. # The lowest of the 2 limits is returned by this code. # This code breaks if there are some nodes with separate limits: # h_rt 48:00:00,[cpt.uio.no=24:00:00] my $command = "$path/qconf -sq @qnames"; loop_callback($command, sub { my $l = shift; if ($l =~ /^[sh]_rt\s+(\S+)/) { return if $1 eq 'INFINITY'; my $timelimit; if ($1 =~ /^(\d+):(\d+):(\d+)$/) { my ($h,$m,$s) = ($1,$2,$3); $timelimit = $s + 60 * ($m + 60 * $h); } else { $log->warning("Error extracting time limit from line: $l"); return; } if (not defined $lrms_queue->{maxwalltime} or $lrms_queue->{maxwalltime} > $timelimit) { $lrms_queue->{maxwalltime} = $timelimit; } } elsif ($l =~ /^[sh]_cpu\s+(\S+)/) { return if $1 eq 'INFINITY'; my $timelimit; if ($1 =~ /^(\d+):(\d+):(\d+)$/) { my ($h,$m,$s) = ($1,$2,$3); $timelimit = $s + 60 * ($m + 60 * $h); } else { $log->warning("Error extracting time limit from line: $l"); return; } if (not defined $lrms_queue->{maxcputime} or $lrms_queue->{maxcputime} > $timelimit) { $lrms_queue->{maxcputime} = $timelimit; } } }) or $log->error("Failed listing named queues"); # Grid Engine puts queueing jobs in single "PENDING" state pool, # so here we report the total number queueing jobs in the cluster. $lrms_queue->{queued} = $queuedjobs; # nordugrid-queue-maxrunning # nordugrid-queue-maxqueuable # nordugrid-queue-maxuserrun # The total max running jobs is the number of slots for this queue $lrms_queue->{maxrunning} = $lrms_queue->{totalcpus}; # SGE has a global limit on total number of jobs, but not per-queue limit. # This global limit gives an upper bound for maxqueuable and maxrunning if ($max_jobs) { $lrms_queue->{maxqueuable} = $max_jobs if $max_jobs; $lrms_queue->{maxrunning} = $max_jobs if $lrms_queue->{maxrunning} > $max_jobs; } if (defined $max_u_jobs and defined $lrms_queue->{maxuserrun} and $lrms_queue->{maxuserrun} > $max_u_jobs) { $lrms_queue->{maxuserrun} = $max_u_jobs; } } sub jobs_info ($) { # LRMS job IDs from Grid Manager my $jids = shift; my $lrms_jobs = {}; # add jobs to the info tree $lrms_info->{jobs} = $lrms_jobs; my ($job, @running, @queueing); # loop through all requested jobs for my $jid (@$jids) { if (defined $running_jobs{$jid} and not defined $running_jobs{$jid}{0}) { $log->warning("SGE job $jid is an array job. Unable to handle it"); } elsif (defined ($job = $running_jobs{$jid}{0})) { push @running, $jid; my $user = $job->{user}; $user_total_jobs{$user}++; # OBS: it's assumed that jobs in this loop are not part of array # jobs, which is true for grid jobs (non-array jobs have taskid 0) if ($job->{state} =~ /[rt]/) { # running or transfering $lrms_jobs->{$jid}{status} = 'R'; } elsif ($job->{state} =~ /[sST]/) { # suspended $lrms_jobs->{$jid}{status} = 'S'; } else { # Shouldn't happen $lrms_jobs->{$jid}{status} = 'O'; push @{$lrms_jobs->{$jid}{comment}}, "Unexpected SGE state: $job->{state}"; $log->warning("SGE job $jid is in an unexpected state: $job->{state}"); } # master node for parallel runs my ($cluster_queue, $exec_host) = split '@', $job->{queue}; $lrms_jobs->{$jid}{nodes} = [ $exec_host ] if $exec_host; $lrms_jobs->{$jid}{cpus} = $job->{slots}; } elsif (defined ($job = $waiting_jobs{$jid})) { push @queueing, $jid; $lrms_jobs->{$jid}{rank} = $job->{rank}; # Old SGE versions do not list the number of slots for queing jobs $lrms_jobs->{$jid}{cpus} = $job->{slots} if not $compat_mode; if ($job->{state} =~ /E/) { # DRMAA: SYSTEM_ON_HOLD ? # TODO: query qacct for error msg $lrms_jobs->{$jid}{status} = 'O'; } elsif ($job->{state} =~ /h/) { # Job is on hold $lrms_jobs->{$jid}{status} = 'H'; } elsif ($job->{state} =~ /w/ and $job->{state} =~ /q/) { # Normally queued $lrms_jobs->{$jid}{status} = 'Q'; } else { # Shouldn't happen $lrms_jobs->{$jid}{status} = 'O'; push @{$lrms_jobs->{$jid}{comment}}, "Unexpected SGE state: $job->{state}"; $log->warning("SGE job $jid is in an unexpected state: $job->{state}"); } } else { # The job has finished. # Querying accounting system is slow, so we skip it for now. # That will be done by scan-sge-jobs. $log->debug("SGE job $jid has finished"); $lrms_jobs->{$jid}{status} = 'EXECUTED'; $lrms_jobs->{$jid}{comment} = []; } } my $jid; # Running jobs $jid = undef; my ($jidstr) = join ',', @running; loop_callback("$path/qstat -j $jidstr", sub { my $l = shift; if ($l =~ /^job_number:\s+(\d+)/) { $jid=$1; } elsif ($l =~ /^usage/) { # OBS: array jobs have multiple 'usage' lines, one per runnig task # Memory usage in kB # SGE reports vmem and maxvmem. # maxvmem chosen here if ($l =~ /maxvmem=(\d+(?:\.\d+)?)\s*(\w)/) { my $mult = 1024; if ($2 eq "M") {$mult = 1024} if ($2 eq "G") {$mult = 1024*1024} $lrms_jobs->{$jid}{mem} = int($mult*$1); } # used cpu time in minutes if ($l =~ /cpu=(?:(\d+):)?(\d+):(\d\d):(\d\d)/) { my ($d,$h,$m,$s) = ($1||0,$2,$3,$4); my $cputime = $s + 60*($m + 60*($h + 24*$d)); $lrms_jobs->{$jid}{cputime} = $cputime; } } elsif ($l =~ /^hard resource_list/) { my ($reqcputime, $reqwalltime) = req_limits($l); $lrms_jobs->{$jid}{reqcputime} = $reqcputime if $reqcputime; $lrms_jobs->{$jid}{reqwalltime} = $reqwalltime if $reqwalltime; } }) or $log->warning("Failed listing named jobs"); # Waiting jobs $jidstr = join ',', @queueing; $jid = undef; loop_callback("$path/qstat -j $jidstr", sub { my $l = shift; if ($l =~ /^job_number:\s+(\d+)/) { $jid=$1; } elsif ($l =~ /^hard resource_list/) { my ($reqcputime, $reqwalltime) = req_limits($l); $lrms_jobs->{$jid}{reqcputime} = $reqcputime if $reqcputime; $lrms_jobs->{$jid}{reqwalltime} = $reqwalltime if $reqwalltime; } elsif ($l =~ /^\s*(cannot run because.*)/) { # Reason for being held in queue push @{$lrms_jobs->{$jid}{comment}}, "LRMS: $1"; } # Look for error messages, often jobs pending in error state 'Eqw' elsif ($l =~ /^error reason\s*\d*:\s*(.*)/) { # for SGE version 6.x. Examples: # error reason 1: can't get password entry for user "grid". Either the user does not exist or NIS error! # error reason 1: 08/20/2008 13:40:27 [113794:25468]: error: can't chdir to /some/dir: No such file or directory # error reason 1: fork failed: Cannot allocate memory # 1: fork failed: Cannot allocate memory push @{$lrms_jobs->{$jid}{comment}}, "SGE job state was Eqw. LRMS error message was: $1"; loop_callback("$path/qdel -fj $jidstr", sub {}) } elsif ($l =~ /(job is in error state)/) { # for SGE version 5.x. push @{$lrms_jobs->{$jid}{comment}}, "SGE job state was Eqw. LRMS error message was: $1"; loop_callback("$path/qdel -fj $jidstr", sub {}) # qstat is not informative. qacct would be a bit more helpful with # messages like: # failed 1 : assumedly before job # failed 28 : changing into working directory } }) or $log->warning("Failed listing named jobs"); } sub users_info($$) { my ($qname, $accts) = @_; my $lrms_users = {}; # add users to the info tree my $lrms_queue = $lrms_info->{queues}{$qname}; $lrms_queue->{users} = $lrms_users; # freecpus # queue length # # This is hard to implement correctly for a complex system such as SGE. # Using simple estimate. my $freecpus = 0; foreach my $u ( @{$accts} ) { if ($max_u_jobs) { $user_total_jobs{$u} = 0 unless $user_total_jobs{$u}; $freecpus = $max_u_jobs - $user_total_jobs{$u}; $freecpus = $lrms_queue->{status} if $lrms_queue->{status} < $freecpus; } else { $freecpus = $lrms_queue->{status}; } $lrms_queue->{minwalltime} = 0; $lrms_queue->{mincputime} = 0; $lrms_users->{$u}{queuelength} = $user_waiting_jobs{$u} || 0; $freecpus = 0 if $freecpus < 0; if ($lrms_queue->{maxwalltime}) { $lrms_users->{$u}{freecpus} = { $freecpus => $lrms_queue->{maxwalltime} }; } else { $lrms_users->{$u}{freecpus} = { $freecpus => 0 }; # unlimited } } } sub run_qhost { my ($host) = @_; my $result = {}; #require Data::Dumper; import Data::Dumper qw(Dumper); #print STDERR Dumper($host); loop_callback("$path/qhost -F -h `echo $host | cut -d . -f 1` | grep '='", sub { my $l = shift; my ($prefix, $value ) = split ":", $l; if ( $value =~ /^mem_total=(\d+(?:\.\d+)?)\s*(\w)/) { my $mult = 1; if ($2 eq "M") {$mult = 1024} if ($2 eq "G") {$mult = 1024*1024} $result->{$host}{pmem} = int($mult*$1); } elsif ( $value =~ /^virtual_total=(\d+(?:\.\d+)?)\s*(\w)/) { my ($mult) = 1; if ($2 eq "M") {$mult = 1024} if ($2 eq "G") {$mult = 1024*1024} $result->{$host}{vmem} = int($mult*$1); } elsif ( $value =~ /^m_socket=(\d+(?:\.\d+)?)/) { $result->{$host}{nsock} = int($1); } }) or $log->error("Failed listing host attributes"); return $result; } sub check_host_state_na { my ($host) = @_; my $result; loop_callback("$path/qstat -f | grep `echo $host | cut -d . -f 1`", sub { my $l = shift; if ( $l =~ /-NA-/) { $result=1; } else { $result=0; } }) or $log->error("Failed check host"); return $result; } sub nodes_info { #require Data::Dumper; import Data::Dumper qw(Dumper); my $lrms_nodes = {}; # add nodes to the info tree $lrms_info->{nodes} = $lrms_nodes; for my $host (keys %node_stats) { my $node = $node_stats{$host}; my $queues = $node->{queues}; next unless defined $queues; my $arc_queue = 0; for my $qname1 (keys %$queues) { for my $qname2 ( keys %{$options->{queues}}) { if ($qname1 =~ $qname2 ) {$arc_queue = 1;} } } if ($arc_queue == 0) {next;} $lrms_nodes->{$host}{lcpus} = $node_stats{$host}{totalcpus}; $lrms_nodes->{$host}{slots} = $node_stats{$host}{totalcpus}; my $pmem; my $vmem; my $nsock; if (check_host_state_na($host) != 1) { $pmem = run_qhost($host); $vmem = run_qhost($host); $nsock = run_qhost($host); $lrms_nodes->{$host}{pmem} = $pmem->{$host}{pmem}; $lrms_nodes->{$host}{vmem} = $vmem->{$host}{vmem}; $lrms_nodes->{$host}{nsock} = $nsock->{$host}{nsock}; $lrms_nodes->{$host}{isfree} = 1; $lrms_nodes->{$host}{isavailable} = 1; } else { $lrms_nodes->{$host}{pmem} = 0; $lrms_nodes->{$host}{vmem} = 0; $lrms_nodes->{$host}{nsock} = 0; $lrms_nodes->{$host}{lcpus} = 0; $lrms_nodes->{$host}{slots} = 0; $lrms_nodes->{$host}{isfree} = 0; $lrms_nodes->{$host}{isavailable} = 0; } # TODO # $lrms_nodes->{$host}{tags} = # $lrms_nodes->{$host}{release} = #my %system = qw(lx Linux sol SunOS darwin Darwin); #my %machine = qw(amd64 x86_64 x86 i686 ia64 ia64 ppc ppc sparc sparc sparc64 sparc64); #if ($node_stats{$host}{arch} =~ /^(lx|sol|darwin)-(amd64|x86|ia64|ppc|sparc|sparc64)$/) { # $lrms_nodes->{$host}{sysname} = $system{$1}; # $lrms_nodes->{$host}{machine} = $machine{$2}; #} } #print STDERR Dumper($lrms_nodes); #print STDERR Dumper(%{$options->{queues}}); } sub test { LogUtils::level("VERBOSE"); require Data::Dumper; import Data::Dumper qw(Dumper); $path = shift; (%running_jobs,%waiting_jobs) = (); get_lrms_info(""); print Dumper(\%node_stats,\%running_jobs,\%waiting_jobs); } #test('./test/6.0'); #test('./test/5.3'); #test($ARGV[0]); 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/FORKmod.pm0000644000000000000000000000013015067751327023467 xustar0029 mtime=1759498967.75949204 30 atime=1759498967.866493666 29 ctime=1759499029.80123864 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/FORKmod.pm0000644000175000002070000001767215067751327025410 0ustar00mockbuildmock00000000000000package FORKmod; require Exporter; our @ISA = qw(Exporter); our @EXPORT_OK = qw(get_lrms_info get_lrms_options_schema); use POSIX qw(ceil floor); use Sys::Hostname; use LogUtils; use Sysinfo; # uncomment the following and install the perl concise package # for a dump of datastructures. #use Data::Dumper::Concise; # usage: put the following anywhere in the code where you need the dump #$log->warning(Dumper($variable_to_dump)); use strict; ########################################## # Saved private variables ########################################## our $lrms_info = {}; our $options; our $running = 0; our $hostname = hostname(); our $cputhreadcount; our $log = LogUtils->getLogger(__PACKAGE__); ############################################ # Public subs ############################################# sub get_lrms_options_schema { return { 'queues' => { '*' => { 'users' => [ '' ] } }, 'jobs' => [ '' ], 'maxjobs' => '*' }; } sub get_lrms_info(\%) { $options = shift; my ($sysname, $nodename, $release, $version, $machine) = POSIX::uname(); my $meminfo = Sysinfo::meminfo(); my $cpuinfo = Sysinfo::cpuinfo(); $cputhreadcount = $cpuinfo->{cputhreadcount}; cluster_info(); my $jids = $options->{jobs}; jobs_info($jids); $lrms_info->{queues} = {}; my $isfree; my %queues = %{$options->{queues}}; for my $qname ( keys %queues ) { queue_info($qname); $isfree = $lrms_info->{queues}{$qname}{status} ? 1 : 0; } for my $qname ( keys %queues ) { my $users = $queues{$qname}{users}; users_info($qname,$users); } my $node = $lrms_info->{nodes}{$hostname} = {}; $node->{isavailable} = 1; $node->{isfree} = $isfree; $node->{sysname} = $sysname; $node->{release} = $release; $node->{machine} = $machine; $node->{lcpus} = $cpuinfo->{cputhreadcount} if $cpuinfo->{cputhreadcount}; $node->{pcpus} = $cpuinfo->{cpusocketcount} if $cpuinfo->{cpusocketcount}; $node->{pmem} = $meminfo->{pmem} if $meminfo->{pmem}; $node->{vmem} = $meminfo->{vmem} if $meminfo->{vmem}; return $lrms_info } ########################################## # Private subs ########################################## # Produces stats for all processes on the system sub process_info() { my @pslines = `ps -e -o ppid,pid,vsz,time,etime,user,comm`; if ($? != 0) { $log->warning("Failed running ps -e -o ppid,pid..."); return (); } shift @pslines; # drop header line my @procinfo; for my $line (@pslines) { my ($ppid,$pid,$vsize,$cputime,$etime,$user,$comm) = split ' ', $line, 7; # matches time formats like: 21:29.44, 12:21:29, 3-12:21:29 if ($cputime =~ /^(?:(?:(\d+)-)?(\d+):)?(\d+):(\d\d(?:\.\d+)?)$/) { my ($days,$hours,$minutes,$seconds) = (($1||0), ($2||0), $3, $4); $cputime = $seconds + 60*($minutes + 60*($hours + 24*$days)); } else { $log->warning("Invalid cputime: $cputime"); $cputime = 0; } # matches time formats like: 21:29.44, 12:21:29, 3-12:21:29 if ($etime =~ /^(?:(?:(\d+)-)?(\d+):)?(\d+):(\d\d(?:\.\d+)?)$/) { my ($days,$hours,$minutes,$seconds) = (($1||0), ($2||0), $3, $4); $etime = $seconds + 60*($minutes + 60*($hours + 24*$days)); } elsif ($etime eq '-') { $etime = 0; # a zombie ? } else { $log->warning("Invalid etime: $etime"); $etime = 0; } my $pi = { ppid => $ppid, pid => $pid, vsize => $vsize, user => $user, cputime => $cputime, etime => $etime, comm => $comm }; push @procinfo, $pi, } return @procinfo; } sub cluster_info () { my $lrms_cluster = {}; $lrms_info->{cluster} = $lrms_cluster; $lrms_cluster->{lrms_type} = "fork"; $lrms_cluster->{lrms_version} = "0.9"; my $cpuinfo = Sysinfo::cpuinfo(); $lrms_cluster->{totalcpus} = $cputhreadcount; # Since fork is a single machine backend all there will only be one machine available $lrms_cluster->{cpudistribution} = $lrms_cluster->{totalcpus}."cpu:1"; # usedcpus on a fork machine is determined from the 1min cpu # loadaverage and cannot be larger than the totalcpus if (`uptime` =~ /load averages?:\s+([.\d]+,?[.\d]+),?\s+([.\d]+,?[.\d]+),?\s+([.\d]+,?[.\d]+)/) { my $usedcpus = $1; $usedcpus =~ tr/,/./; $lrms_cluster->{usedcpus} = ($usedcpus <= $lrms_cluster->{totalcpus}) ? floor(0.5+$usedcpus) : $lrms_cluster->{totalcpus}; } else { $log->warning("Failed getting load averages"); $lrms_cluster->{usedcpus} = 0; } $lrms_cluster->{runningjobs} = $lrms_cluster->{usedcpus}; # no LRMS queuing jobs on a fork machine, fork has no queueing ability $lrms_cluster->{queuedjobs} = 0; $lrms_cluster->{queuedcpus} = 0; } sub queue_info ($) { my $qname = shift; my $qopts = $options->{queues}{$qname}; my $lrms_queue = {}; $lrms_info->{queues}{$qname} = $lrms_queue; my $cpuinfo = Sysinfo::cpuinfo(); $lrms_queue->{totalcpus} = $cputhreadcount; $lrms_queue->{running} = $running; $lrms_queue->{status} = $lrms_queue->{totalcpus} - $running; $lrms_queue->{status} = 0 if $lrms_queue->{status} < 0; my $job_limit; $job_limit = 1; if ( $options->{maxjobs} ) { #extract lrms maxjobs from config option my @maxes = split(' ', $options->{maxjobs}); my $len=@maxes; if ($len > 1){ $job_limit = $maxes[1]; #do we allow "cpunumber" special statement in maxjobs? if ($job_limit eq "cpunumber") { $job_limit = $lrms_queue->{totalcpus}; } } } $lrms_queue->{maxrunning} = $job_limit; $lrms_queue->{maxuserrun} = $job_limit; $lrms_queue->{maxqueuable} = $job_limit; chomp( my $maxcputime = `ulimit "-t"` ); if ($maxcputime =~ /^\d+$/) { $lrms_queue->{maxcputime} = $maxcputime; $lrms_queue->{maxwalltime} = $maxcputime; } elsif ($maxcputime ne 'unlimited') { $log->warning("Could not determine max cputime with ulimit -t"); } $lrms_queue->{queued} = 0; #$lrms_queue->{mincputime} = ""; #$lrms_queue->{defaultcput} = ""; #$lrms_queue->{minwalltime} = ""; #$lrms_queue->{defaultwallt} = ""; } sub jobs_info ($) { my $jids = shift; my $lrms_jobs = {}; $lrms_info->{jobs} = $lrms_jobs; my @procinfo = process_info(); foreach my $id (@$jids){ $lrms_jobs->{$id}{nodes} = [ $hostname ]; my ($proc) = grep { $id eq $_->{pid} } @procinfo; if ($proc) { # number of running jobs. Will be used in queue_info ++$running; # sum cputime of all child processes my $cputime = $proc->{cputime}; $_->{ppid} == $id and $cputime += $_->{cputime} for @procinfo; $lrms_jobs->{$id}{mem} = $proc->{vsize}; $lrms_jobs->{$id}{walltime} = $proc->{etime}; $lrms_jobs->{$id}{cputime} = $cputime; $lrms_jobs->{$id}{status} = 'R'; $lrms_jobs->{$id}{rank} = 0; $lrms_jobs->{$id}{cpus} = 1; #$lrms_jobs->{$id}{reqwalltime} = ""; #$lrms_jobs->{$id}{reqcputime} = ""; $lrms_jobs->{$id}{comment} = [ "LRMS: Running under fork" ]; } else { $lrms_jobs->{$id}{status} = 'EXECUTED'; } } } sub users_info($$) { my $qname = shift; my $accts = shift; # add users to the big tree my $lrms_users = {}; my $lrms_queue = $lrms_info->{queues}{$qname}; $lrms_queue->{users} = $lrms_users; # freecpus # queue length foreach my $u ( @{$accts} ) { my $freecpus = $lrms_queue->{maxuserrun} - $lrms_queue->{running}; $lrms_users->{$u}{freecpus} = { $freecpus => 0 }; $lrms_users->{$u}{queuelength} = $lrms_queue->{queued}; } } 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/SGE.pm0000644000000000000000000000013215067751327022646 xustar0030 mtime=1759498967.761492071 30 atime=1759498967.867493681 30 ctime=1759499029.825114643 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/SGE.pm0000644000175000002070000005122715067751327024557 0ustar00mockbuildmock00000000000000package SGE; ###################################################################### # DISCLAIMER ###################################################################### # This module depends on ARC0mod.pm which is obsolete and deprecated # starting from ARC 6.0 # Please DO NOT build new LRMS modules based on this one but follow # the indications in # LRMSInfo.pm # instead. ###################################################################### use strict; use POSIX qw(floor ceil); our @ISA = ('Exporter'); our @EXPORT_OK = ('lrms_init', 'cluster_info', 'queue_info', 'jobs_info', 'users_info'); use LogUtils ( 'start_logging', 'error', 'warning', 'debug' ); ########################################## # Saved private variables ########################################## our %lrms_queue; our $config; our $path; our $maxwalltime; our $max_u_jobs; our %total_user_jobs; # Regular expression used for interpreting output from qstat. # The simpler approach of splitting the line will not work if a job's name # includes a space our $name_re = '.*\S'; # may contain space our $ts_re = '\d\d\/\d\d\/\d{4} \d\d:\d\d:\d\d'; our $queue_re = '\S*\D\S*'; # not only digits our $task_re = '[-,:\d]+'; # example: 4,6-8:1 our $job_re = '^\s*(\d+)\s+([.\d]+|nan)\s+('.$name_re.')\s+(\S+)\s+(\w+)' . '\s+('.$ts_re.')(?:\s+('.$queue_re.'))??\s+(\d+)(?:\s+('.$task_re.'))?\s*$'; ########################################## # Private subs ########################################## sub type_and_version () { my ($type, $version); my ($command) = "$path/qstat -help"; open QSTAT, "$command 2>/dev/null |"; my ($line) = ; ($type, $version) = split " ", $line; close QSTAT; error("Cannot indentify SGE version from output of '$command': $line") unless $type =~ /GE/ and $version; warning("Unsupported SGE version: $version") unless $version =~ /^6/; return $type, $version; } sub queues () { my (@queue, $line); my ($command) = "$path/qconf -sql"; unless ( open QCONF, "$command 2>/dev/null |" ) { error("$command failed.");} while ( $line = ) { chomp $line; push @queue, $line; } close QCONF; return @queue; } # # Processes an array task definition (i.e.: '3,4,6-8,10-20:2') # and returns the number of individual tasks # sub count_array_spec($) { my $count = 0; for my $spec (split ',', shift) { # handles expressions like '6-10:2' and '6-10' and '6' return 0 unless $spec =~ '^(\d+)(?:-(\d+)(?::(\d+))?)?$'; my ($lower,$upper,$step) = ($1,$2,$3); $upper = $lower unless defined $upper; $step = 1 unless $step; $count += 1 + floor(abs($upper-$lower)/$step); } return $count; } sub cpudist (@) { my (@cpuarray) = @_; my (%cpuhash) = (); my ($cpudistribution) =""; while ( @cpuarray ) { $cpuhash{ pop(@cpuarray) }++; } while ( my ($cpus,$count) = each %cpuhash ) { if ( $cpus > 0 ) { $cpudistribution .= $cpus . 'cpu:' . $count . " "; } } chop $cpudistribution; return $cpudistribution; } sub slots () { my ($totalcpus, $distr, $usedslots, $queued); my ($foo, $line, @slotarray); my $queuetotalslots = {}; # Get version of SGE since API has changed # it is on the first line of output and is of the format # GE 6.2u2_1 # or # GE 6.0u8 my ($command) = "$path/qstat -help| sed -n '1,2 s/GE \\([0-9]\\.[0-9]\\).*/\\1/p'"; my ($version) = `$command`; # Number of slots in execution hosts $command = "$path/qconf -sep"; unless ( open QQ, "$command 2>/dev/null |" ) { error("$command failed."); } while ( $line = ) { if ( $line =~ /^HOST/ || $line =~ /^=+/ ) { next; } if ( $line =~ /^SUM\s+(\d+)/ ) { $totalcpus = $1; next; } my ($name, $ncpu, $arch ) = split " ", $line; push @slotarray, $ncpu; } close QQ; $distr = cpudist(@slotarray); # Used slots in all queues $command = "$path/qstat -g c"; unless ( open QSTAT, "$command 2>/dev/null |" ) { error("$command failed.");} $usedslots = 0; if ($version < 6.2){ while ( $line = ) { if ( $line =~ /^CLUSTER QUEUE/ || $line =~ /^-+/) { next; } my ($name, $cqload, $used, $avail, $total, $aoACDS, $cdsuE ) = split " ", $line; $usedslots += $used; $$queuetotalslots{$name} = $total; } } else { # Format changed in 6.2 while ( $line = ) { if ( $line =~ /^CLUSTER QUEUE/ || $line =~ /^-+/) { next; } my ($name, $cqload, $used, $res, $avail, $total, $aoACDS, $cdsuE ) = split " ", $line; $usedslots += $used; $$queuetotalslots{$name} = $total; } } close QSTAT; # List all jobs $command = "$path/qstat -u '*'"; unless ( open QSTAT, "$command 2>/dev/null |" ) { error("$command failed.");} my $queuedcpus = 0; my $queuedjobs = 0; my $runningjobs = 0; while ( $line = ) { # assume that all lines beginning with an integer are describing jobs next unless $line =~ /^\s*\d+\s+/; if ($line =~ /$job_re/) { my ($id,$state,$slots,$tasks) = ($1,$5,$8,$9); # for interpreting state codes: # http://gridengine.sunsource.net/nonav/source/browse/~checkout~/gridengine/source/libs/japi/jobstates.html if ($state =~ /[rtsSTR]/) { # This should match the jobs that would be listed by qstat -s rs # Job is either running, transfering or suspended for some reason. # It may also be waiting to be deleted while in the above states. # OBS: hr (hold running) state are also counted here. # R is state for restarted job. $runningjobs++; } elsif ($state =~ /[hw]/) { # This should match the jobs that would be listed by qstat -s p # Job is pending in the queue. It's possibly being hold or it can be in an error state. my $ntasks = 1; $ntasks = count_array_spec($tasks) if defined $tasks; error("Cannot understand array job specification '$tasks' from qstat line: $line") unless $ntasks; $queuedjobs += $ntasks; $queuedcpus += $ntasks * $slots; } else { # should not happen warning("SGE job $id is in an unexpected state: $state"); } } else { error("cannot parse an output line from $command: $line"); } } close QSTAT; return ($totalcpus, $distr, $usedslots, $runningjobs, $queuedjobs, $queuedcpus, $queuetotalslots); } sub global_limits() { my ($command) = "$path/qconf -sconf global"; unless ( open QQ, "$command 2>/dev/null |" ) { error("$command failed."); } my $max_jobs = 0; while ( my $line = ) { $max_jobs = $1 if $line =~ /^max_jobs\s+(\d+)/; $max_u_jobs = $1 if $line =~ /^max_u_jobs\s+(\d+)/; } close QQ; return ($max_jobs, $max_u_jobs); } sub req_limits ($) { my ($s) = shift; my ($reqcputime, $reqwalltime); # required cputime if ($s =~ /h_cpu=(\d+)/ ) { $reqcputime=ceil($1/60); } elsif ($s =~ /s_cpu=(\d+)/ ) { $reqcputime=ceil($1/60); } else { $reqcputime=""; } # required walltime if ($s =~ /h_rt=(\d+)/ ) { $reqwalltime=ceil($1/60); } elsif ($s =~ /s_rt=(\d+)/ ) { $reqwalltime=ceil($1/60); } else { $reqwalltime=""; } return ($reqcputime, $reqwalltime); } ############################################ # Public subs ############################################# sub configure_sge_env(%) { my %config = @_; $ENV{SGE_ROOT} = $config{sge_root} || $ENV{SGE_ROOT}; error("sge_root must be defined in arc.conf") unless $ENV{SGE_ROOT}; $ENV{SGE_CELL} = $config{sge_cell} || $ENV{SGE_CELL} || 'default'; $ENV{SGE_QMASTER_PORT} = $config{sge_qmaster_port} if $config{sge_qmaster_port}; $ENV{SGE_EXECD_PORT} = $config{sge_execd_port} if $config{sge_execd_port}; for (split ':', $ENV{PATH}) { $ENV{SGE_BIN_PATH} = $_ and last if -x "$_/qsub"; } $ENV{SGE_BIN_PATH} = $config{sge_bin_path} || $ENV{SGE_BIN_PATH}; error("SGE executables not found") unless -x "$ENV{SGE_BIN_PATH}/qsub"; $path = $ENV{SGE_BIN_PATH}; } sub cluster_info ($) { $config = shift; configure_sge_env(%$config); my (%lrms_cluster); # Figure out SGE type and version ( $lrms_cluster{lrms_type}, $lrms_cluster{lrms_version} ) = type_and_version(); # SGE has per-slot cputime limit $lrms_cluster{has_total_cputime_limit} = 0; # Count used/free CPUs and queued jobs in the cluster # Note: SGE has the concept of "slots", which roughly corresponds to # concept of "cpus" in ARC (PBS) LRMS interface. ( $lrms_cluster{totalcpus}, $lrms_cluster{cpudistribution}, $lrms_cluster{usedcpus}, $lrms_cluster{runningjobs}, $lrms_cluster{queuedjobs}, $lrms_cluster{queuedcpus}) = slots (); # List LRMS queues @{$lrms_cluster{queue}} = queues(); return %lrms_cluster; } sub queue_info ($$) { $config = shift; configure_sge_env(%$config); my ($qname) = shift; # status # running # totalcpus my ($command) = "$path/qstat -g c"; unless ( open QSTAT, "$command 2> /dev/null |" ) { error("$command failed.");} my ($line, $used); my ($sub_running, $sub_status, $sub_totalcpus); while ( $line = ) { if ( $line =~ /^(CLUSTER QUEUE)/ || $line =~ /^-+$/ ) {next} my (@a) = split " ", $line; if ( $a[0] eq $qname ) { # SGE 6.2 has an extra column between USED and AVAIL $lrms_queue{status} = $a[-4]; $lrms_queue{running} = $a[2]; $lrms_queue{totalcpus} = $a[-3]; } } close QSTAT; # settings in the config file override $lrms_queue{totalcpus} = $$config{totalcpus} if $$config{totalcpus}; # Number of available (free) cpus can not be larger that # free cpus in the whole cluster my ($totalcpus, $distr, $usedslots, $runningjobs, $queuedjobs, $queuedcpus, $queuetotalslots) = slots ( ); if ( $lrms_queue{status} > $totalcpus-$usedslots ) { $lrms_queue{status} = $totalcpus-$usedslots; $lrms_queue{status} = 0 if $lrms_queue{status} < 0; } # reserve negative numbers for error states if ($lrms_queue{status}<0) { warning("lrms_queue{status} = $lrms_queue{status}")} # Grid Engine has hard and soft limits for both CPU time and # wall clock time. Nordugrid schema only has CPU time. # The lowest of the 2 limits is returned by this code. my $qlist = $qname; $command = "$path/qconf -sq $qlist"; open QCONF, "$command 2>/dev/null |" or error("$command failed."); while ( $line = ) { if ($line =~ /^[sh]_rt\s+(.*)/) { next if $1 eq 'INFINITY'; my @a = split ":", $1; my $timelimit; if (@a == 3) { $timelimit = 60*$a[0]+$a[1]; } elsif (@a == 4) { $timelimit = 24*60*$a[0]+60*$a[1]+$a[2]; } elsif (@a == 1) { $timelimit = int($a[0]/60); } else { warning("Error parsing time info in output of $command"); next; } if (not defined $lrms_queue{maxwalltime} || $lrms_queue{maxwalltime} > $timelimit) { $lrms_queue{maxwalltime} = $timelimit; } } if ($line =~ /^[sh]_cpu\s+(.*)/) { next if $1 eq 'INFINITY'; my @a = split ":", $1; my $timelimit; if (@a == 3) { $timelimit = 60*$a[0]+$a[1]; } elsif (@a == 4) { $timelimit = 24*60*$a[0]+60*$a[1]+$a[2]; } elsif (@a == 1) { $timelimit = int($a[0]/60); } else { warning("Error parsing time info in output of $command"); next; } if (not defined $lrms_queue{maxcputime} || $lrms_queue{maxcputime} > $timelimit) { $lrms_queue{maxcputime} = $timelimit; } } } close QCONF; # Global variable; saved for later use in users_info() $maxwalltime = $lrms_queue{maxwalltime}; $lrms_queue{maxwalltime} = "" unless $lrms_queue{maxwalltime}; $lrms_queue{maxcputime} = "" unless $lrms_queue{maxcputime}; $lrms_queue{minwalltime} = ""; $lrms_queue{mincputime} = ""; $lrms_queue{defaultcput} = ""; $lrms_queue{defaultwallt} = ""; # Grid Engine puts queueing jobs in single "PENDING" state pool, # so here we report the total number queueing jobs in the cluster. $lrms_queue{queued} = $queuedjobs; # nordugrid-queue-maxqueuable # SGE has a global limit on total number of jobs, but not per-queue limit. # This global limit can be used as an upper bound for nordugrid-queue-maxqueuable my ($max_jobs,$max_u_jobs) = global_limits(); $max_jobs = "" unless $max_jobs; $lrms_queue{maxqueuable} = $max_jobs; # nordugrid-queue-maxrunning # The total max running jobs is the number of slots for this queue $lrms_queue{maxrunning} = $$queuetotalslots{$qname} || ""; # nordugrid-queue-maxuserrun $lrms_queue{maxuserrun} = ""; return %lrms_queue; } sub jobs_info ($$@) { $config = shift; my ($qname) = shift; my ($jids) = shift; my $line; my (%lrms_jobs); # Running jobs my ($command) = "$path/qstat -u '*' -s rs"; unless ( open QSTAT, "$command 2>/dev/null |" ) { error("$command failed.");} while ( $line = ) { # assume that all lines beginning with an integer are describing jobs next unless $line =~ /^\s*\d+\s+/; if ($line =~ /$job_re/) { my ($id,$user,$state,$queue,$slots) = ($1,$4,$5,$7,$8); $total_user_jobs{$user}++; if (grep { $id == $_ } @$jids) { # it's a grid job. # grid jobs are never array jobs so we don't worry about multiple lines for the same job id. if ($state =~ /^R?[rt]$/) { $lrms_jobs{$id}{status} = 'R'; } else { $lrms_jobs{$id}{status} = 'S'; } my ($cluster_queue, $exec_host) = split '@', $queue; $lrms_jobs{$id}{nodes} = [ $exec_host ]; # master node for parellel runs $lrms_jobs{$id}{cpus} = $slots; $lrms_jobs{$id}{walltime} = ""; $lrms_jobs{$id}{rank} = ""; } } else { error("cannot parse an output line from $command: $line"); } } close QSTAT; # lrms_jobs{$id}{status} # lrms_jobs{$id}{rank} # Pending (queued) jobs # NOTE: Counting rank based on all queues. $command = "$path/qstat -u '*' -s p"; unless ( open QSTAT, "$command 2>/dev/null |" ) { error("$command failed.");} my ($rank) = 1; while ( $line = ) { # assume that all lines beginning with an integer are describing jobs next unless $line =~ /^\s*\d+\s+/; if ($line =~ /$job_re/) { my ($id,$user,$state,$slots,$tasks) = ($1,$4,$5,$8,$9); if (grep { $id == $_ } @$jids) { # it's a grid job. # grid jobs are never array jobs so we don't worry about multiple lines for the same job id. $lrms_jobs{$id}{rank} = $rank; if ( $state =~ /E/ ) { $lrms_jobs{$id}{status} = 'O'; } else { # Normally queued $lrms_jobs{$id}{status} = 'Q'; } } my $ntasks = 1; $ntasks = count_array_spec($tasks) if defined $tasks; error("Cannot understand array job specification '$tasks' from qstat line: $line") unless $ntasks; $total_user_jobs{$user} += $ntasks; $rank += $ntasks; } else { error("cannot parse an output line from $command: $line"); } } close QSTAT; # lrms_jobs{$id}{mem} # lrms_jobs{$id}{walltime} # lrms_jobs{$id}{cputime} # lrms_jobs{$id}{reqwalltime} # lrms_jobs{$id}{reqcputime} # lrms_jobs{$id}{comment} my (@running, @queueing, @otherlrms, @notinlrms); foreach my $jid ( @{$jids} ) { next unless exists $lrms_jobs{$jid}; if ($lrms_jobs{$jid}{status} eq 'R') { push @running, $jid; } elsif ($lrms_jobs{$jid}{status} eq 'S') { push @running, $jid; } elsif ($lrms_jobs{$jid}{status} eq 'Q') { push @queueing, $jid; } elsif ($lrms_jobs{$jid}{status} eq 'O') { push @otherlrms, $jid; } else { push @notinlrms, $jid; } } # If qstat does not match, job has finished already. foreach my $jid ( @{$jids} ) { next if exists $lrms_jobs{$jid}; debug("Job executed $jid"); $lrms_jobs{$jid}{status} = ''; $lrms_jobs{$jid}{mem} = ''; $lrms_jobs{$jid}{walltime} = ''; $lrms_jobs{$jid}{cputime} = ''; $lrms_jobs{$jid}{reqwalltime} = ''; $lrms_jobs{$jid}{reqcputime} = ''; $lrms_jobs{$jid}{rank} = ''; $lrms_jobs{$jid}{nodes} = []; $lrms_jobs{$jid}{comment} = []; } my ($jid); # Running jobs my ($jidstr) = join ',', @running; $command = "$path/qstat -j $jidstr"; $jid = ""; unless ( open QSTAT, "$command 2>/dev/null |" ) { debug("Command $command failed.")} while ( $line = ) { if ( $line =~ /^job_number:\s+(\d+)/) { $jid=$1; $lrms_jobs{$jid}{comment} = [ ]; next; } if ( $line =~ /^usage/) { # Memory usage in kB # SGE reports mem, vmem and maxvmem # maxvmem chosen here $line =~ /maxvmem=(\d+)\.?(\d*)\s*(\w+)/; my ($mult) = 1024; if ($3 eq "M") {$mult = 1024} if ($3 eq "G") {$mult = 1024*1024} $lrms_jobs{$jid}{mem} = int($mult*$1 + $2*$mult/1000); # used cpu time $line =~ /cpu=((\d+:?)*)/; my (@a) = $1 =~ /(\d+):?/g; if ( @a == 4 ) { $lrms_jobs{$jid}{cputime} = 60 * ( $a[0]*24 + $a[1] ) + $a[2] ; } else { $lrms_jobs{$jid}{cputime} = 60 * $a[0] + $a[1];} next; } if ($line =~ /^hard resource_list/) { ( $lrms_jobs{$jid}{reqcputime}, $lrms_jobs{$jid}{reqwalltime} ) = req_limits($line); } next; } close QSTAT; # Normally queueing job $jidstr = join ',', @queueing; $command = "$path/qstat -j $jidstr"; $jid = ""; unless ( open QSTAT, "$command 2>/dev/null |" ) { debug("Command $command failed.")} while ( $line = ) { if ( $line =~ /^job_number:\s+(\d+)/) { $jid=$1; next; } if ($line =~ /^hard resource_list/) { ( $lrms_jobs{$jid}{reqcputime}, $lrms_jobs{$jid}{reqwalltime} ) = req_limits($line); next; } # Reason for beeing held in queue if ( $line =~ /^\s*(cannot run because.*)/ ) { if ( exists $lrms_jobs{$jid}{comment} ) { push @{ $lrms_jobs{$jid}{comment} }, "LRMS: $1"; } else { $lrms_jobs{$jid}{comment} = [ "LRMS: $1" ]; } } next; } close QSTAT; # Other LRMS state, often jobs pending in error state 'Eqw' # Skip the rest if no jobs are in this state return %lrms_jobs unless @otherlrms; $jidstr = join ',', @otherlrms; $command = "$path/qstat -j $jidstr"; $jid = ""; unless ( open QSTAT, "$command 2>/dev/null |" ) { debug("Command $command failed.")} while ( $line = ) { if ( $line =~ /^job_number:\s+(\d+)/) { $jid=$1; $lrms_jobs{$jid}{nodes} = []; $lrms_jobs{$jid}{mem} = ""; $lrms_jobs{$jid}{cputime} = ""; $lrms_jobs{$jid}{walltime} = ""; next; } if ($line =~ /^hard resource_list/) { ( $lrms_jobs{$jid}{reqcputime}, $lrms_jobs{$jid}{reqwalltime} ) = req_limits($line); next; } # Error reason nro 1 if ($line =~ /^error reason\s*\d*:\s*(.*)/ ) { if ( exists $lrms_jobs{$jid}{comment} ) { push @{$lrms_jobs{$jid}{comment}}, "LRMS: $1"; } else { $lrms_jobs{$jid}{comment} = [ "LRMS: $1" ]; } next; } # Let's say it once again ;-) if ($line =~ /(job is in error state)/ ) { if ( exists $lrms_jobs{$jid}{comment} ) { push @{ $lrms_jobs{$jid}{comment} }, "LRMS: $1"; } else { $lrms_jobs{$jid}{comment} = [ "LRMS: $1" ]; } } next; } return %lrms_jobs; } sub users_info($$@) { $config = shift; my ($qname) = shift; my ($accts) = shift; my (%lrms_users); # freecpus # queue length # # This is nearly impossible to implement generally for # complex system such as grid engine. Using simple # estimate. if ( ! exists $lrms_queue{status} ) { %lrms_queue = queue_info($config,$qname); } foreach my $u ( @{$accts} ) { $lrms_users{$u}{freecpus} = $lrms_queue{status}; $lrms_users{$u}{freecpus} .= ":$maxwalltime" if $maxwalltime; $lrms_users{$u}{queuelength} = "$lrms_queue{queued}"; } return %lrms_users; } 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/ARC1ClusterInfo.pm0000644000000000000000000000013215067751327025074 xustar0030 mtime=1759498967.758492025 30 atime=1759498967.865493651 30 ctime=1759499029.823825519 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/ARC1ClusterInfo.pm0000644000175000002070000031641115067751327027004 0ustar00mockbuildmock00000000000000package ARC1ClusterInfo; # This information collector combines the output of the other information collectors # and prepares the GLUE2 information model of A-REX. use Storable; #use Data::Dumper; use FileHandle; use File::Temp; use POSIX qw(ceil); use strict; use Sysinfo qw(cpuinfo processid diskinfo diskspaces); use LogUtils; use InfoChecker; our $log = LogUtils->getLogger(__PACKAGE__); # the time now in ISO 8061 format sub timenow { my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = gmtime(time); return sprintf "%4d-%02d-%02dT%02d:%02d:%02d%1s", $year+1900, $mon+1, $mday,$hour,$min,$sec,"Z"; } # converts MDS-style time to ISO 8061 time ( 20081212151903Z --> 2008-12-12T15:19:03Z ) sub mdstoiso { return "$1-$2-$3T$4:$5:$6Z" if shift =~ /^(\d\d\d\d)(\d\d)(\d\d)(\d\d)(\d\d)(\d\d(?:\.\d+)?)Z$/; return undef; } sub glue2bool { my $bool = shift; return 'undefined' unless defined $bool; return $bool ? "true" : "false"; } # Given the controldir path, the jobid and a suffix # returns a path to that job in the fragmented controldir. sub control_path { my ($controldir, $jobid, $suffix) = @_; my ($a,$b,$c,$d) = unpack("A3A3A3A3", $jobid); my $path = "$controldir/jobs/$a/$b/$c/$d/$suffix"; return $path; } sub local_state { ## Maps the gm_state added with detailed info of the lrms_state to local-state ## Added the local state terminal to gm_state KILLED - this will allow the job to be cleaned when calling arcclean #require Data::Dumper; import Data::Dumper qw(Dumper); # TODO: probably add $failure_state taken from somewhere my ($gm_state,$lrms_state,$failure_state) = @_; my $loc_state = { 'State' => '' }; if ($gm_state eq "ACCEPTED") { $loc_state->{State} = [ "accepted" ]; return $loc_state; } elsif ($gm_state eq "PREPARING") { $loc_state->{State} = [ "preparing" ]; return $loc_state; } elsif ($gm_state eq "SUBMIT") { $loc_state->{State} = [ "submit" ]; return $loc_state; } elsif ($gm_state eq "INLRMS") { if (not defined $lrms_state) { $loc_state->{State} = [ "inlrms" ]; return $loc_state; } elsif ($lrms_state eq 'Q') { $loc_state->{State} = [ "inlrms","q" ]; return $loc_state; } elsif ($lrms_state eq 'R') { $loc_state->{State} = [ "inlrms","r" ]; return $loc_state; } elsif ($lrms_state eq 'EXECUTED' or $lrms_state eq '') { $loc_state->{State} = [ "inlrms","e" ]; return $loc_state; } elsif ($lrms_state eq 'S') { $loc_state->{State} = [ "inlrms","s" ]; return $loc_state; } else { $loc_state->{State} = [ "running" ]; return $loc_state; } } elsif ($gm_state eq "FINISHING") { $loc_state->{State} = [ "finishing" ]; return $loc_state; } elsif ($gm_state eq "CANCELING") { $loc_state->{State} = [ "canceling" ]; return $loc_state; } elsif ($gm_state eq "KILLED") { $loc_state->{State} = [ "killed" ]; return $loc_state; } elsif ($gm_state eq "FAILED") { $loc_state->{State} = [ "failed" ]; if (! defined($failure_state)) { return $loc_state; } else { ## Not sure how these will be rendered - to-fix/to-check if ($failure_state eq "ACCEPTED") { $loc_state->{State} = [ "accepted","validation-failure" ]; return $loc_state; } elsif ($failure_state eq "PREPARING") { $loc_state->{State} = [ "preparing","cancel","failure" ]; return $loc_state; } elsif ($failure_state eq "SUBMIT") { $loc_state->{State} = [ "submit","cancel","failure" ]; return $loc_state; } elsif ($failure_state eq "INLRMS") { if ( $lrms_state eq "R" ) { $loc_state->{State} = [ "inlrms","cancel","app-failure" ]; return $loc_state; } else { $loc_state->{State} = [ "inlrms","cancel","processing-failure" ]; return $loc_state; } } elsif ($failure_state eq "FINISHING") { $loc_state->{State} = [ "finishing","cancel","failure" ]; return $loc_state; } elsif ($failure_state eq "FINISHED") { $loc_state->{State} = [ "finished","failure"]; return $loc_state; } elsif ($failure_state eq "DELETED") { $loc_state->{State} = [ "deleted","failure" ]; return $loc_state; } elsif ($failure_state eq "CANCELING") { $loc_state->{State} = ["canceling","failure"]; return $loc_state; } else { return $loc_state; } } } elsif ($gm_state eq "FINISHED") { $loc_state->{State} = [ "finished" ]; return $loc_state; } elsif ($gm_state eq "DELETED") { $loc_state->{State} = [ "deleted" ]; return $loc_state; } elsif ($gm_state) { # this is the "pending" case $loc_state->{State} = ["hold"]; return $loc_state; } else { return $loc_state; } } # TODO: verify: this sub evaluates also failure states and changes # rest attributes accordingly. sub rest_state { my ($gm_state,$lrms_state,$failure_state) = @_; my $state = [ "None" ]; my $is_pending = 0; if ($gm_state =~ /^PENDING:/) { $is_pending = 1; $gm_state = substr $gm_state, 8 } # REST State A-REX State # * ACCEPTING ACCEPTED # * ACCEPTED PENDING:ACCEPTED # * PREPARING PREPARING # * PREPARED PENDING:PREPARING # * SUBMITTING SUBMIT # * QUEUING INLRMS + LRMS queued # * RUNNING INLRMS + LRMS running # * HELD INLRMS + LRMS on hold # * EXITINGLRMS INLRMS + LRMS finished # * OTHER INLRMS + LRMS other # * EXECUTED PENDING:INLRMS # * FINISHING FINISHING # * KILLING CANCELLING # KILLING PREPARING + DTR cancel | FINISHING + DTR cancel # * FINISHED FINISHED + no errors & no cancel # * FAILED FINISHED + errors # * KILLED FINISHED + cancel # * WIPED DELETED if ($gm_state eq "ACCEPTED") { if ( $is_pending ) { $state = [ "ACCEPTED" ]; } else { $state = [ "ACCEPTING" ]; } } elsif ($gm_state eq "PREPARING") { if ( $is_pending ) { $state = [ "PREPARED" ]; } else { # KILLING $state = [ "PREPARING" ]; } } elsif ($gm_state eq "SUBMIT") { $state = [ "SUBMITTING" ]; } elsif ($gm_state eq "INLRMS") { if ( $is_pending ) { $state = [ "EXECUTED" ]; } else { if (not defined $lrms_state) { $state = [ "OTHER" ]; } elsif ($lrms_state eq 'Q') { $state = [ "QUEUING" ]; } elsif ($lrms_state eq 'R') { $state = [ "RUNNING" ]; } elsif ($lrms_state eq 'EXECUTED' or $lrms_state eq '') { $state = [ "EXITINGLRMS" ]; } elsif ($lrms_state eq 'S') { $state = [ "HELD" ]; } else { $state = [ "OTHER" ]; } } } elsif ($gm_state eq "FINISHING") { # KILLING $state = [ "FINISHING" ]; } elsif ($gm_state eq "CANCELING") { $state = [ "KILLING" ]; } elsif ($gm_state eq "KILLED") { $state = [ "KILLED" ]; } elsif ($gm_state eq "FAILED") { $state = [ "FAILED" ]; } elsif ($gm_state eq "FINISHED") { $state = [ "FINISHED" ]; } elsif ($gm_state eq "DELETED") { $state = [ "WIPED" ]; } elsif ($gm_state) { # this is the "pending" case $state = [ "None" ]; } else { # No idea } return $state; } # input is an array with (state, lrms_state, failure_state) sub glueState { my @ng_status = @_; return [ "UNDEFINEDVALUE" ] unless $ng_status[0]; my $status = [ "nordugrid:".join(':',@ng_status) ]; my $local_state = local_state(@ng_status); push @$status, "file:".@{$local_state->{State}}[0] if $local_state->{State};#try to fix so I have the full state here my $rest_state = rest_state(@ng_status); push @$status, "arcrest:".$rest_state->[0] if $rest_state; return $status; } sub getGMStatus { my ($controldir, $ID) = @_; foreach my $gmjob_status ("$controldir/accepting/$ID.status", "$controldir/processing/$ID.status", "$controldir/finished/$ID.status") { unless (open (GMJOB_STATUS, "<$gmjob_status")) { next; } else { my ($first_line) = ; close GMJOB_STATUS; unless ($first_line) { $log->verbose("Job $ID: cannot get status from file $gmjob_status : Skipping job"); next; } chomp $first_line; return $first_line; } } return undef; } # Helper function that assists the GLUE2 XML renderer handle the 'splitjobs' option # $config - the config hash # $jobid - job id from GM # $gmjob - a job hash ref as returned by GMJobsInfo # $xmlGenerator - a function ref that returns a string (the job's GLUE2 XML description) # Returns undef on error, 0 if the XML file was already up to date, 1 if it was written sub jobXmlFileWriter { my ($config, $jobid, $gmjob, $xmlGenerator) = @_; $log->debug("XML writer for $jobid."); # If this is defined, then it's a job managed by local A-REX. my $gmuser = $gmjob->{gmuser}; # This below is to avoid any processing for remote grid managers, a removed feature return 0 unless defined $gmuser; my $controldir = $config->{arex}{controldir}; $log->debug("XML writer in $controldir."); my $xml_file = control_path($controldir, $jobid, "xml"); $log->debug("XML writer to $xml_file."); # Here goes simple optimisation - do not write new # XML if status has not changed while in "slow" states my $xml_time = (stat($xml_file))[9]; my $status_time = $gmjob->{statusmodified}; return 0 if defined $xml_time and defined $status_time and $status_time < $xml_time and $gmjob->{status} =~ /ACCEPTED|FINISHED|FAILED|KILLED|DELETED/; my $xmlstring = &$xmlGenerator(); return undef unless defined $xmlstring; # tempfile croaks on error my $jobdir = control_path($controldir, $jobid, ""); my ($fh, $tmpnam) = File::Temp::tempfile("xml.XXXXXXX", DIR => $jobdir); $log->debug("XML $tmpnam to $xml_file."); binmode $fh, ':encoding(utf8)'; print $fh $xmlstring and close $fh or $log->warning("Error writing to temporary file $tmpnam: $!") and close $fh and unlink $tmpnam and return undef; rename $tmpnam, $xml_file or $log->warning("Error moving $tmpnam to $xml_file: $!") and unlink $tmpnam and return undef; # Avoid .xml files created after job is deleted # Check if status file exists if(not defined getGMStatus($controldir,$jobid)) { unlink $xml_file; return undef; } # Set timestamp to the time when the status file was read in. # This is because the status file might have been updated by the time the # XML file gets written. This step ensures that the XML will be updated on # the next run of the infoprovider. my $status_read = $gmjob->{statusread}; return undef unless defined $status_read; utime(time(), $status_read, $xml_file) or $log->warning("Couldn't touch $xml_file: $!") and return undef; # *.xml file was updated return 1; }; # Intersection of two arrays that completes in linear time. The input arrays # are the keys of the two hashes passed as reference. The intersection array # consists of the keys of the returned hash reference. sub intersection { my ($a, $b) = @_; my (%union, %xor, %isec); for (keys %$a) { $union{$_} = 1; $xor{$_} = 1 if exists $b->{$_} } for (keys %$b) { $union{$_} = 1; $xor{$_} = 1 if exists $a->{$_} } for (keys %union) { $isec{$_} = 1 if exists $xor{$_} } return \%isec; } # union of two arrays using hashes. Returns an array. sub union { my (@a, @b) = @_; my %union; foreach (@a) {$union{$_} = 1;} foreach (@b) {$union{$_} = 1;} return keys %union; } # processes NodeSelection options and returns the matching nodes. sub selectnodes { my ($nodes, %nscfg) = @_; return undef unless %$nodes and %nscfg; my @allnodes = keys %$nodes; my %selected = (); if ($nscfg{Regex}) { for my $re (@{$nscfg{Regex}}) { map { $selected{$_} = 1 if /$re/ } @allnodes; } } if ($nscfg{Tag}) { for my $tag (@{$nscfg{Tag}}) { for my $node (@allnodes) { my $tags = $nodes->{$node}{tags}; next unless $tags; map { $selected{$node} = 1 if $tag eq $_ } @$tags; } } } if ($nscfg{Command}) { $log->verbose("Not implemented: NodeSelection: Command"); } delete $nscfg{Regex}; delete $nscfg{Tag}; delete $nscfg{Command}; $log->verbose("Unknown NodeSelection option: @{[keys %nscfg]}") if %nscfg; $selected{$_} = $nodes->{$_} for keys %selected; return \%selected; } # Sums up ExecutionEnvironments attributes from the LRMS plugin sub xestats { my ($xenv, $nodes) = @_; return undef unless %$nodes; my %continuous = (vmem => 'VirtualMemorySize', pmem => 'MainMemorySize'); my %discrete = (lcpus => 'LogicalCPUs', pcpus => 'PhysicalCPUs', sysname => 'OSFamily', machine => 'Platform'); my (%minval, %maxval); my (%minnod, %maxnod); my %distrib; my %stats = (total => 0, free => 0, available => 0); for my $host (keys %$nodes) { my %node = %{$nodes->{$host}}; $stats{total}++; $stats{free}++ if $node{isfree}; $stats{available}++ if $node{isavailable}; # Also agregate values across nodes, check consistency for my $prop (%discrete) { my $val = $node{$prop}; next unless defined $val; push @{$distrib{$prop}{$val}}, $host; } for my $prop (keys %continuous) { my $val = $node{$prop}; next unless defined $val; if (not defined $minval{$prop} or (defined $minval{$prop} and $minval{$prop} > $val)) { $minval{$prop} = $val; $minnod{$prop} = $host; } if (not defined $maxval{$prop} or (defined $maxval{$prop} and $maxval{$prop} < $val)) { $maxval{$prop} = $val; $maxnod{$prop} = $host; } } } my $homogeneous = 1; while (my ($prop, $opt) = each %discrete) { my $values = $distrib{$prop}; next unless $values; if (scalar keys %$values > 1) { my $msg = "ExecutionEnvironment $xenv is inhomogeneous regarding $opt:"; while (my ($val, $hosts) = each %$values) { my $first = pop @$hosts; my $remaining = @$hosts; $val = defined $val ? $val : 'undef'; $msg .= " $val($first"; $msg .= "+$remaining more" if $remaining; $msg .= ")"; } $log->info($msg); $homogeneous = 0; } else { my ($val) = keys %$values; $stats{$prop} = $val; } } if ($maxval{pmem}) { my $rdev = 2 * ($maxval{pmem} - $minval{pmem}) / ($maxval{pmem} + $minval{pmem}); if ($rdev > 0.1) { my $msg = "ExecutionEnvironment $xenv has variability larger than 10% regarding MainMemorySize:"; $msg .= " Min=$minval{pmem}($minnod{pmem}),"; $msg .= " Max=$maxval{pmem}($maxnod{pmem})"; $log->info($msg); $homogeneous = 0; } $stats{pmem} = $minval{pmem}; } if ($maxval{vmem}) { my $rdev = 2 * ($maxval{vmem} - $minval{vmem}) / ($maxval{vmem} + $minval{vmem}); if ($rdev > 0.5) { my $msg = "ExecutionEnvironment $xenv has variability larger than 50% regarding VirtualMemorySize:"; $msg .= " Min=$minval{vmem}($minnod{vmem}),"; $msg .= " Max=$maxval{vmem}($maxnod{vmem})"; $log->debug($msg); } $stats{vmem} = $minval{vmem}; } $stats{homogeneous} = $homogeneous; return \%stats; } # Combine info about ExecutionEnvironments from config options and the LRMS plugin sub xeinfos { my ($config, $nodes, $queues) = @_; my $infos = {}; my %nodemap = (); my @xenvs = keys %{$config->{xenvs}}; for my $xenv (@xenvs) { my $xecfg = $config->{xenvs}{$xenv}; my $info = $infos->{$xenv} = {}; my $nodelist = \@{$queues->{$xenv}{nodes}}; my $nscfg = $xecfg->{NodeSelection} || { 'Regex' => $nodelist }; if (ref $nodes eq 'HASH') { my $selected; if (not $nscfg) { $log->info("NodeSelection configuration missing for ExecutionEnvironment $xenv, implicitly assigning all nodes into it") unless keys %$nodes == 1 and @xenvs == 1; $selected = $nodes; } else { $selected = selectnodes($nodes, %$nscfg); } $nodemap{$xenv} = $selected; $log->debug("Nodes in ExecutionEnvironment $xenv: ".join ' ', keys %$selected); $log->info("No nodes matching NodeSelection for ExecutionEnvironment $xenv") unless %$selected; my $stats = xestats($xenv, $selected); if ($stats) { $info->{ntotal} = $stats->{total}; $info->{nbusy} = $stats->{available} - $stats->{free}; $info->{nunavailable} = $stats->{total} - $stats->{available}; $info->{pmem} = $stats->{pmem} if $stats->{pmem}; $info->{vmem} = $stats->{vmem} if $stats->{vmem}; $info->{pcpus} = $stats->{pcpus} if $stats->{pcpus}; $info->{lcpus} = $stats->{lcpus} if $stats->{lcpus}; $info->{slots} = $stats->{slots} if $stats->{slots}; $info->{sysname} = $stats->{sysname} if $stats->{sysname}; $info->{machine} = $stats->{machine} if $stats->{machine}; } } else { $log->info("The LRMS plugin has no support for NodeSelection options, ignoring them") if $nscfg; } $info->{pmem} = $xecfg->{MainMemorySize} if $xecfg->{MainMemorySize}; $info->{vmem} = $xecfg->{VirtualMemorySize} if $xecfg->{VirtualMemorySize}; $info->{pcpus} = $xecfg->{PhysicalCPUs} if $xecfg->{PhysicalCPUs}; $info->{lcpus} = $xecfg->{LogicalCPUs} if $xecfg->{LogicalCPUs}; $info->{sysname} = $xecfg->{OSFamily} if $xecfg->{OSFamily}; $info->{machine} = $xecfg->{Platform} if $xecfg->{Platform}; } # Check for overlap of nodes if (ref $nodes eq 'HASH') { for (my $i=0; $i<@xenvs; $i++) { my $nodes1 = $nodemap{$xenvs[$i]}; next unless $nodes1; for (my $j=0; $j<$i; $j++) { my $nodes2 = $nodemap{$xenvs[$j]}; next unless $nodes2; my $overlap = intersection($nodes1, $nodes2); $log->verbose("Overlap detected between ExecutionEnvironments $xenvs[$i] and $xenvs[$j]. " ."Use NodeSelection options to select correct nodes") if %$overlap; } } } return $infos; } # For each duration, find the largest available numer of slots of any user # Input: the users hash returned by thr LRMS module. sub max_userfreeslots { my ($users) = @_; my %timeslots; for my $uid (keys %$users) { my $uinfo = $users->{$uid}; next unless defined $uinfo->{freecpus}; for my $nfree ( keys %{$uinfo->{freecpus}} ) { my $seconds = 60 * $uinfo->{freecpus}{$nfree}; if ($timeslots{$seconds}) { $timeslots{$seconds} = $nfree > $timeslots{$seconds} ? $nfree : $timeslots{$seconds}; } else { $timeslots{$seconds} = $nfree; } } } return %timeslots; } # adds a prefix to a set of strings in an array. # input: the prefix string, an array. sub addprefix { my $prefix = shift @_; my @set = @_; my @prefixedset = @set; @prefixedset = map { $prefix.$_ } @prefixedset; return @prefixedset; } # sub to pick a value in order: first value preferred to others # can have as many parameters as one wants. sub prioritizedvalues { my @values = @_; my $numelements = scalar @values; while (@values) { my $current = shift @values; return $current if (((defined $current) and ($current ne '')) or ( $numelements == 1)); } # just in case all the above fails, log and return empty string $log->debug("No suitable value found in call to prioritizedvalues. Returning undefined"); return undef; } ############################################################################ # Combine info from all sources to prepare the final representation ############################################################################ sub collect($) { my ($data) = @_; # used for testing # print Dumper($data); my $config = $data->{config}; my $usermap = $data->{usermap}; my $host_info = $data->{host_info}; my $rte_info = $data->{rte_info}; my $gmjobs_info = $data->{gmjobs_info}; my $lrms_info = $data->{lrms_info}; my $nojobs = $data->{nojobs}; my $creation_time = timenow(); my $validity_ttl = $config->{infosys}{validity_ttl}; my $hostname = $config->{hostname} || $host_info->{hostname}; my $wsenabled = (defined $config->{arex}{ws}) ? 1 : 0; my $restenabled = $config->{arex}{ws}{jobs}{enabled}; my $wsendpoint = $config->{arex}{ws}{wsurl}; my @allxenvs = keys %{$config->{xenvs}}; my @allshares = keys %{$config->{shares}}; ## NOTE: this might be moved to ConfigCentral, but Share is a glue only concept... # GLUE2 shares differ from the configuration one. # the one to one mapping from a share to a queue is too strong. # the following datastructure reshuffles queues into proper # GLUE2 shares based on advertisedvo # This may require rethinking of parsing the configuration... my $GLUE2shares = {}; # If advertisedvo is present in arc.conf defined, # generate one additional share for each VO. # # TODO: refactorize this to apply to cluster and queue VOs # with a single subroutine, even better do everything in ConfigCentral.pm if possible # what is needed to make it possible? new schema in configcentral for policies? # ## for each share(queue) for my $currentshare (@allshares) { # always add a share with no mapping policy my $share_name = $currentshare; $GLUE2shares->{$share_name} = Storable::dclone($config->{shares}{$currentshare}); # Create as many shares as the number of advertisedvo entries # in the [queue:queuename] block # if there is any VO generate new names if (defined $config->{shares}{$currentshare}{AdvertisedVO}) { my ($queueadvertvos) = $config->{shares}{$currentshare}{AdvertisedVO}; for my $queueadvertvo (@{$queueadvertvos}) { # generate an additional share with such advertisedVO my $share_vo = $currentshare.'_'.$queueadvertvo; $GLUE2shares->{$share_vo} = Storable::dclone($config->{shares}{$currentshare}); # add the queue from configuration as MappingQueue $GLUE2shares->{$share_vo}{MappingQueue} = $currentshare; # remove VOs from that share, substitute with default VO $GLUE2shares->{$share_vo}{AdvertisedVO} = $queueadvertvo; # Add supported policies # ARC5 could use XML config elements for this, but now that configuration is gone, so just placing a default here. $GLUE2shares->{$share_vo}{MappingPolicies} = { 'BasicMappingPolicy' => ''}; } } else { # create as many shares as the advertisedvo in the [infosys/cluster] block # iff advertisedvo not defined in queue block if (defined $config->{service}{AdvertisedVO}) { my ($clusteradvertvos) = $config->{service}{AdvertisedVO}; for my $clusteradvertvo (@{$clusteradvertvos}) { # generate an additional share with such advertisedVO my $share_vo = $currentshare.'_'.$clusteradvertvo; $GLUE2shares->{$share_vo} = Storable::dclone($config->{shares}{$currentshare}); # add the queue from configuration as MappingQueue $GLUE2shares->{$share_vo}{MappingQueue} = $currentshare; # remove VOs from that share, substitute with default VO $GLUE2shares->{$share_vo}{AdvertisedVO} = $clusteradvertvo; # ARC5 could use XML config elements for this, but now that configuration is gone, so just placing a default here. $GLUE2shares->{$share_vo}{MappingPolicies} = { 'BasicMappingPolicy' => '' }; } } } # remove VO array from the datastructure of the share with the same name of the queue delete $GLUE2shares->{$share_name}{AdvertisedVO}; undef $share_name; } ##replace @allshares with the newly created shares #@allshares = keys %{$GLUE2shares}; my $homogeneous = 1; $homogeneous = 0 if @allxenvs > 1; $homogeneous = 0 if @allshares > 1 and @allxenvs == 0; for my $xeconfig (values %{$config->{xenvs}}) { $homogeneous = 0 if defined $xeconfig->{Homogeneous} and not $xeconfig->{Homogeneous}; } my $xeinfos = xeinfos($config, $lrms_info->{nodes}, $lrms_info->{queues}); # Figure out total number of CPUs my ($totalpcpus, $totallcpus) = (0,0); # First, try to sum up cpus from all ExecutionEnvironments for my $xeinfo (values %$xeinfos) { unless (exists $xeinfo->{ntotal} and $xeinfo->{pcpus}) { $totalpcpus = 0; last } $totalpcpus += $xeinfo->{ntotal} * $xeinfo->{pcpus}; } for my $xeinfo (values %$xeinfos) { unless (exists $xeinfo->{ntotal} and $xeinfo->{lcpus}) { $totallcpus = 0; last } $totallcpus += $xeinfo->{ntotal} * $xeinfo->{lcpus}; } # Override totallcpus if defined in the cluster block $totallcpus = $config->{service}{totalcpus} if (defined $config->{service}{totalcpus}); #$log->debug("Cannot determine total number of physical CPUs in all ExecutionEnvironments") unless $totalpcpus; $log->debug("Cannot determine total number of logical CPUs in all ExecutionEnvironments") unless $totallcpus; # Next, use value returned by LRMS in case the the first try failed. # OBS: most LRMSes don't differentiate between Physical and Logical CPUs. $totalpcpus ||= $lrms_info->{cluster}{totalcpus}; $totallcpus ||= $lrms_info->{cluster}{totalcpus}; # my @advertisedvos = (); # if ($config->{service}{AdvertisedVO}) { # @advertisedvos = @{$config->{service}{AdvertisedVO}}; # # add VO: suffix to each advertised VO # @advertisedvos = map { "vo:".$_ } @advertisedvos; # } # # # # # # # # # # # # # # # # # # # # # # # # Job statistics # # # # # # # # # # # # # # # # # # # # # # # # # # total jobs in each GM state my %gmtotalcount; # jobs in each GM state, by share my %gmsharecount; # grid jobs in each lrms sub-state (queued, running, suspended), by share my %inlrmsjobs; # grid jobs in each lrms sub-state (queued, running, suspended) my %inlrmsjobstotal; # slots needed by grid jobs in each lrms sub-state (queued, running, suspended), by share my %inlrmsslots; # number of slots needed by all waiting jobs, per share my %requestedslots; # Jobs waiting to be prepared by GM (indexed by share) my %pending; # Jobs waiting to be prepared by GM my $pendingtotal; # Jobs being prepared by GM (indexed by share) my %share_prepping; # Jobs being prepared by GM (indexed by grid owner) my %user_prepping; # $user_prepping{$_} = 0 for keys %$usermap; # jobids divided per interface. This datastructure # is convenience way to fill jobs per endpoint # each endpoint its list of jobids my $jobs_by_endpoint = {}; # corecount per internal AREX state my %state_slots; # fills most of the above hashes for my $jobid (keys %$gmjobs_info) { my $job = $gmjobs_info->{$jobid}; my $gridowner = $gmjobs_info->{$jobid}{subject}; my $share = $job->{share}; # take only the first VO for now. # TODO: problem. A job gets assigned to the default # queue that is not assigned to that VO. How to solve? # This can only be solved with a better job<->vo mapping definition. # So it boils down to what to do when $job->{vomsvo} is not defined. my $vomsvo = $job->{vomsvo} if defined $job->{vomsvo}; my $sharevomsvo = $share.'_'.$vomsvo if defined $vomsvo; my $gmstatus = $job->{status} || ''; $gmtotalcount{totaljobs}++; $gmsharecount{$share}{totaljobs}++; # add info for VO dedicated shares $gmsharecount{$sharevomsvo}{totaljobs}++ if defined $vomsvo; # count GM states by category my %states = ( 'UNDEFINED' => [0, 'undefined'], 'ACCEPTING' => [1, 'accepted'], 'ACCEPTED' => [1, 'accepted'], 'PENDING:ACCEPTED' => [1, 'accepted'], 'PREPARING' => [2, 'preparing'], 'PENDING:PREPARING'=> [2, 'preparing'], 'SUBMIT' => [2, 'preparing'], 'SUBMITTING' => [2, 'preparing'], 'INLRMS' => [3, 'inlrms'], 'PENDING:INLRMS' => [4, 'finishing'], 'FINISHING' => [4, 'finishing'], 'CANCELING' => [4, 'finishing'], 'FAILED' => [5, 'finished'], 'KILLED' => [5, 'finished'], 'FINISHED' => [5, 'finished'], 'DELETED' => [6, 'deleted'] ); unless ($states{$gmstatus}) { $log->warning("Unexpected job status for job $jobid: $gmstatus"); $gmstatus = $job->{status} = 'UNDEFINED'; } my ($age, $category) = @{$states{$gmstatus}}; $gmtotalcount{$category}++; $gmsharecount{$share}{$category}++; $gmsharecount{$sharevomsvo}{$category}++ if defined $vomsvo; if ($age < 6) { $gmtotalcount{notdeleted}++; $gmsharecount{$share}{notdeleted}++; $gmsharecount{$sharevomsvo}{notdeleted}++ if defined $vomsvo; } if ($age < 5) { $gmtotalcount{notfinished}++; $gmsharecount{$share}{notfinished}++; $gmsharecount{$sharevomsvo}{notfinished}++ if defined $vomsvo; } if ($age < 3) { $gmtotalcount{notsubmitted}++; $gmsharecount{$share}{notsubmitted}++; $gmsharecount{$sharevomsvo}{notsubmitted}++ if defined $vomsvo; $requestedslots{$share} += $job->{count} || 1; $share_prepping{$share}++; if (defined $vomsvo) { $requestedslots{$sharevomsvo} += $job->{count} || 1; $share_prepping{$sharevomsvo}++; } # TODO: is this used anywhere? $user_prepping{$gridowner}++ if $gridowner; } if ($age < 2) { $pending{$share}++; $pending{$sharevomsvo}++ if defined $vomsvo; $pendingtotal++; } # count grid jobs running and queued in LRMS for each share if ($gmstatus eq 'INLRMS') { my $lrmsid = $job->{localid} || 'IDNOTFOUND'; my $lrmsjob = $lrms_info->{jobs}{$lrmsid}; my $slots = $job->{count} || 1; if (defined $lrmsjob) { if ($lrmsjob->{status} ne 'EXECUTED') { $inlrmsslots{$share}{running} ||= 0; $inlrmsslots{$share}{suspended} ||= 0; $inlrmsslots{$share}{queued} ||= 0; $state_slots{$share}{"INLRMS:Q"} ||= 0; $state_slots{$share}{"INLRMS:R"} ||= 0; $state_slots{$share}{"INLRMS:O"} ||= 0; $state_slots{$share}{"INLRMS:E"} ||= 0; $state_slots{$share}{"INLRMS:S"} ||= 0; if (defined $vomsvo) { $inlrmsslots{$sharevomsvo}{running} ||= 0; $inlrmsslots{$sharevomsvo}{suspended} ||= 0; $inlrmsslots{$sharevomsvo}{queued} ||= 0; $state_slots{$sharevomsvo}{"INLRMS:Q"} ||= 0; $state_slots{$sharevomsvo}{"INLRMS:R"} ||= 0; $state_slots{$sharevomsvo}{"INLRMS:O"} ||= 0; $state_slots{$sharevomsvo}{"INLRMS:E"} ||= 0; $state_slots{$sharevomsvo}{"INLRMS:S"} ||= 0; } if ($lrmsjob->{status} eq 'R') { $inlrmsjobstotal{running}++; $inlrmsjobs{$share}{running}++; $inlrmsslots{$share}{running} += $slots; $state_slots{$share}{"INLRMS:R"} += $slots; if (defined $vomsvo) { $inlrmsjobs{$sharevomsvo}{running}++; $inlrmsslots{$sharevomsvo}{running} += $slots; $state_slots{$sharevomsvo}{"INLRMS:R"} += $slots; } } elsif ($lrmsjob->{status} eq 'S') { $inlrmsjobstotal{suspended}++; $inlrmsjobs{$share}{suspended}++; $inlrmsslots{$share}{suspended} += $slots; $state_slots{$share}{"INLRMS:S"} += $slots; if (defined $vomsvo) { $inlrmsjobs{$sharevomsvo}{suspended}++; $inlrmsslots{$sharevomsvo}{suspended} += $slots; $state_slots{$sharevomsvo}{"INLRMS:S"} += $slots; } } elsif($lrmsjob->{status} eq 'E'){ $state_slots{$share}{"INLRMS:E"} += $slots; if (defined $vomsvo) { $state_slots{$sharevomsvo}{"INLRMS:E"} += $slots; } } elsif($lrmsjob->{status} eq 'O'){ $state_slots{$share}{"INLRMS:O"} += $slots; if (defined $vomsvo) { $state_slots{$sharevomsvo}{"INLRMS:O"} += $slots; } } else { # Consider other states 'queued' for $inlrms* $inlrmsjobstotal{queued}++; $inlrmsjobs{$share}{queued}++; $inlrmsslots{$share}{queued} += $slots; $requestedslots{$share} += $slots; $state_slots{$share}{"INLRMS:Q"} += $slots; if (defined $vomsvo) { $inlrmsjobs{$sharevomsvo}{queued}++; $inlrmsslots{$sharevomsvo}{queued} += $slots; $requestedslots{$sharevomsvo} += $slots; $state_slots{$sharevomsvo}{"INLRMS:Q"} += $slots; } } } } else { $log->warning("Info missing about lrms job $lrmsid"); } # Count cores in PREPARING and FINISHING states } elsif ($gmstatus eq 'PREPARING'){ my $slots = $job->{count} || 1; $state_slots{$share}{PREPARING} += $slots; if (defined $vomsvo) { $state_slots{$sharevomsvo}{PREPARING} += $slots; } } elsif ($gmstatus eq 'FINISHING'){ my $slots = $job->{count} || 1; $state_slots{$share}{FINISHING} += $slots; if (defined $vomsvo) { $state_slots{$sharevomsvo}{PREPARING} += $slots; } } # fills efficiently %jobs_by_endpoint, defaults to arcrest my $jobinterface = $job->{interface} || 'org.nordugrid.arcrest'; $jobs_by_endpoint->{$jobinterface}{$jobid} = {}; } my $admindomain = $config->{admindomain}{Name}; my $lrmsname = $config->{lrms}{lrms}; # Calculate endpoint URLs for A-REX and ARIS. # check what is enabled in configuration # also calculates static data that can be triggered by endpoints # such as known capabilities my $csvendpointsnum = 0; my $csvcapabilities = {}; my $epscapabilities = {}; # defaults now set in ConfigCentral my $ldaphostport = "ldap://$hostname:$config->{infosys}{ldap}{port}/" if ($config->{infosys}{ldap}{enabled}); my $ldapngendpoint = ''; my $ldapglue2endpoint = ''; # data push/pull capabilities. # Is it still possible to scan datalib patch to search for .apd to fill these? $epscapabilities->{'common'} = [ 'data.transfer.cepull.ftp', 'data.transfer.cepull.http', 'data.transfer.cepull.https', 'data.transfer.cepull.httpg', 'data.transfer.cepull.gridftp', 'data.transfer.cepull.srm', 'data.transfer.cepush.ftp', 'data.transfer.cepush.http', 'data.transfer.cepush.https', 'data.transfer.cepush.httpg', 'data.transfer.cepush.gridftp', 'data.transfer.cepush.srm', 'data.access.sessiondir.file', 'data.access.stageindir.file', 'data.access.stageoutdir.file' ]; ## Endpoints initialization. # checks for defined paths and enabled features, sets GLUE2 capabilities. # REST capabilities my $resthostport = ''; if ($restenabled) { $resthostport = $config->{arexhostport}; $csvendpointsnum = $csvendpointsnum + 1; $epscapabilities->{'org.nordugrid.arcrest'} = [ 'executionmanagement.jobcreation', 'executionmanagement.jobdescription', 'executionmanagement.jobmanagement', 'information.discovery.resource', 'information.discovery.job', 'information.lookup.job', 'security.delegation' ]; } # for the org.nordugrid.internal submission endpoint (files created directly in the controldir) # Currently not advertised, so we will not count it #$csvendpointsnum = $csvendpointsnum + 1; $epscapabilities->{'org.nordugrid.internal'} = [ 'executionmanagement.jobcreation', 'executionmanagement.jobexecution', 'executionmanagement.jobmanagement', 'executionmanagement.jobdescription', 'information.discovery.resource', 'information.discovery.job', 'information.lookup.job', 'security.delegation' ]; # ARIS LDAP endpoints # ldapng if ( $config->{infosys}{nordugrid}{enabled} ) { $csvendpointsnum++; $ldapngendpoint = $ldaphostport."Mds-Vo-Name=local,o=grid"; $epscapabilities->{'org.nordugrid.ldapng'} = [ 'information.discovery.resource' ]; } # ldapglue2 if ( $config->{infosys}{glue2}{ldap}{enabled} ) { $csvendpointsnum++; $ldapglue2endpoint = $ldaphostport."o=glue"; $epscapabilities->{'org.nordugrid.ldapglue2'} = [ 'information.discovery.resource' ]; } # Calculcate service capabilities as a union, using hash power foreach my $key (keys %{$epscapabilities}) { foreach my $capability (@{$epscapabilities->{$key}}) { $csvcapabilities->{$capability} = ''; } } # if all sessiondirs are in drain state, put the endpoints in # drain state too my $servingstate = 'draining'; my ($sessiondirs) = ($config->{arex}{sessiondir}); foreach my $sd (@$sessiondirs) { my @hasdrain = split(' ',$sd); if ($hasdrain[-1] ne 'drain') { $servingstate = 'production'; } } # TODO: userdomain - maybe use mapping concepts. Not a prio. my $userdomain=''; # Global IDs # ARC choices are as follows: # my $adID = "urn:ad:$admindomain"; # AdminDomain ID my $udID = "urn:ud:$userdomain" ; # UserDomain ID; my $csvID = "urn:ogf:ComputingService:$hostname:arex"; # ComputingService ID my $cmgrID = "urn:ogf:ComputingManager:$hostname:$lrmsname"; # ComputingManager ID # Computing Endpoints IDs my $ARCRESTcepIDp; $ARCRESTcepIDp = "urn:ogf:ComputingEndpoint:$hostname:rest:$wsendpoint" if $restenabled; # ARCRESTComputingEndpoint ID my $NGLScepIDp = "urn:ogf:ComputingEndpoint:$hostname:ngls"; # NorduGridLocalSubmissionEndpoint ID # the following array is needed to publish in shares. Must be modified # if we support share-per-endpoint configurations. my @cepIDs = (); push(@cepIDs,$ARCRESTcepIDp) if ($restenabled); my $cactIDp = "urn:caid:$hostname"; # ComputingActivity ID prefix my $cshaIDp = "urn:ogf:ComputingShare:$hostname"; # ComputingShare ID prefix my $xenvIDp = "urn:ogf:ExecutionEnvironment:$hostname"; # ExecutionEnvironment ID prefix my $aenvIDp = "urn:ogf:ApplicationEnvironment:$hostname:rte"; # ApplicationEnvironment ID prefix # my $ahIDp = "urn:ogf:ApplicationHandle:$hostname:"; # ApplicationHandle ID prefix my $apolIDp = "urn:ogf:AccessPolicy:$hostname"; # AccessPolicy ID prefix my $mpolIDp = "urn:ogf:MappingPolicy:$hostname"; # MappingPolicy ID prefix my %cactIDs; # ComputingActivity IDs my %cshaIDs; # ComputingShare IDs my %aenvIDs; # ApplicationEnvironment IDs my %xenvIDs; # ExecutionEnvironment IDs my $tseID = "urn:ogf:ToStorageElement:$hostname:storageservice"; # ToStorageElement ID prefix my $ARISepIDp = "urn:ogf:Endpoint:$hostname"; # ARIS Endpoint ID kept for uniqueness # Generate ComputingShare IDs for my $share (keys %{$GLUE2shares}) { $cshaIDs{$share} = "$cshaIDp:$share"; } # Generate ApplicationEnvironment IDs my $aecount = 0; for my $rte (keys %$rte_info) { $aenvIDs{$rte} = "$aenvIDp:$aecount"; $aecount++; } # Generate ExecutionEnvironment IDs my $envcount = 0; $xenvIDs{$_} = "$xenvIDp:execenv".$envcount++ for @allxenvs; # generate ComputingActivity IDs unless ($nojobs) { for my $jobid (keys %$gmjobs_info) { my $share = $gmjobs_info->{$jobid}{share}; my $interface = $gmjobs_info->{$jobid}{'interface'}; $cactIDs{$share}{$jobid} = "$cactIDp:$interface:$jobid"; } } # TODO: in a first attempt, accesspolicies were expected to be in the XML # config. this is not yet the case, moreover it might not be possible to # do that. So the following is commented out for now. # unless (@{$config->{accesspolicies}}) { # $log->warning("No AccessPolicy configured"); # } # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # build information tree # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # my $callcount = 0; ### Authorized VOs: Policy stuff # calculate union of the advertisedvos in shares - a hash is used as a set # and add it to the cluster accepted advertisedvos # TODO: this code could be generalized and moved to configcentral. my @clusteradvertisedvos; if ($config->{service}{AdvertisedVO}) { @clusteradvertisedvos = @{$config->{service}{AdvertisedVO}}; } my $unionadvertisedvos; if (@clusteradvertisedvos) { foreach my $vo (@clusteradvertisedvos) { $unionadvertisedvos->{$vo}=''; } } # add the per-queue advertisedvo if any my $shares = Storable::dclone($GLUE2shares); for my $share ( keys %$shares ) { if ($GLUE2shares->{$share}{AdvertisedVO}) { my (@tempvos) = $GLUE2shares->{$share}{AdvertisedVO} if ($GLUE2shares->{$share}{AdvertisedVO}); foreach my $vo (@tempvos) { $unionadvertisedvos->{$vo}=''; } } } my @unionadvertisedvos; if ($unionadvertisedvos) { @unionadvertisedvos = keys %$unionadvertisedvos ; @unionadvertisedvos = addprefix('vo:',@unionadvertisedvos); undef $unionadvertisedvos; } # AccessPolicies implementation. Can be called for each endpoint. # the basic policy value is taken from the service AdvertisedVO. # The logic is similar to the endpoints: first # all the policies subroutines are created, then stored in $accesspolicies, # then every endpoint passes custom values to the getAccessPolicies sub. my $accesspolicies = {}; # Basic access policy: union of advertisedvos my $getBasicAccessPolicy = sub { my $apol = {}; my ($epID) = @_; $apol->{ID} = "$apolIDp:basic"; $apol->{CreationTime} = $creation_time; $apol->{Validity} = $validity_ttl; $apol->{Scheme} = "basic"; if (@unionadvertisedvos) { $apol->{Rule} = [ @unionadvertisedvos ]; }; # $apol->{UserDomainID} = $apconf->{UserDomainID}; $apol->{EndpointID} = $epID; return $apol; }; $accesspolicies->{BasicAccessPolicy} = $getBasicAccessPolicy if (@unionadvertisedvos); ## more accesspolicies can go here. ## subroutines structure to return accesspolicies my $getAccessPolicies = sub { return undef unless my ($accesspolicy, $sub) = each %$accesspolicies; my ($epID) = @_; return &{$sub}($epID); }; # MappingPolicies implementation. Can be called for each ShareID. # the basic policy value is taken from the service AdvertisedVO. # The logic is similar to the endpoints: first # all the policies subroutines are created, stored in mappingpolicies, # then every endpoint passes custom values to the getMappingPolicies sub. my $mappingpolicies = {}; # Basic mapping policy: it can only contain one vo. my $getBasicMappingPolicy = sub { my ($shareID, $sharename) = @_; my $mpol = {}; $mpol->{CreationTime} = $creation_time; $mpol->{Validity} = $validity_ttl; $mpol->{ID} = "$mpolIDp:basic:$GLUE2shares->{$sharename}{AdvertisedVO}"; $mpol->{Scheme} = "basic"; $mpol->{Rule} = [ "vo:$GLUE2shares->{$sharename}{AdvertisedVO}" ]; # $mpol->{UserDomainID} = $apconf->{UserDomainID}; $mpol->{ShareID} = $shareID; return $mpol; }; $mappingpolicies->{'BasicMappingPolicy'} = $getBasicMappingPolicy; ## more accesspolicies can go here. ## subroutines structure to return MappingPolicies # MappingPolicies are processed by using the share name and the # GLUE2shares datastructure that contains the MappingPolicies applied to this # share. my $getMappingPolicies = sub { my ($shareID, $sharename) = @_; return undef unless my ($policy) = each %{$GLUE2shares->{$sharename}{MappingPolicies}}; my $sub = $mappingpolicies->{$policy}; return &{$sub}($shareID, $sharename); }; # TODO: the above policies can be rewritten in an object oriented fashion # one single policy object that can be specialized # it's just about changing few strings # Only makes sense once we have other policies than Basic. # function that generates ComputingService data my $getComputingService = sub { $callcount++; my $csv = {}; $csv->{CreationTime} = $creation_time; $csv->{Validity} = $validity_ttl; $csv->{ID} = $csvID; $csv->{Capability} = [keys %$csvcapabilities]; $csv->{Name} = $config->{service}{ClusterName} if $config->{service}{ClusterName}; # scalar $csv->{OtherInfo} = $config->{service}{OtherInfo} if $config->{service}{OtherInfo}; # array $csv->{Type} = 'org.nordugrid.arex'; # OBS: Service QualityLevel used to state the purpose of the service. Can be set by sysadmins. # One of: development, testing, pre-production, production $csv->{QualityLevel} = $config->{service}{QualityLevel}; $csv->{StatusInfo} = $config->{service}{StatusInfo} if $config->{service}{StatusInfo}; # array my $nshares = keys %{$GLUE2shares}; $csv->{Complexity} = "endpoint=$csvendpointsnum,share=$nshares,resource=".(scalar @allxenvs); $csv->{AllJobs} = $gmtotalcount{totaljobs} || 0; # OBS: Finished/failed/deleted jobs are not counted $csv->{TotalJobs} = $gmtotalcount{notfinished} || 0; $csv->{RunningJobs} = $inlrmsjobstotal{running} || 0; $csv->{SuspendedJobs} = $inlrmsjobstotal{suspended} || 0; $csv->{WaitingJobs} = $inlrmsjobstotal{queued} || 0; $csv->{StagingJobs} = ( $gmtotalcount{preparing} || 0 ) + ( $gmtotalcount{finishing} || 0 ); $csv->{PreLRMSWaitingJobs} = $pendingtotal || 0; # ComputingActivity sub. Will try to use as a general approach for each endpoint. my $getComputingActivities = sub { my ($interface) = @_; # $log->debug("interface is $interface"); my $joblist = $jobs_by_endpoint->{$interface}; return undef unless my ($jobid) = each %$joblist; my $gmjob = $gmjobs_info->{$jobid}; my $exited = undef; # whether the job has already run; my $cact = {}; $cact->{CreationTime} = $creation_time; $cact->{Validity} = $validity_ttl; my $share = $gmjob->{share}; $cact->{Type} = 'single'; # TODO: this is currently not universal $cact->{ID} = $cactIDs{$share}{$jobid}; $cact->{IDFromEndpoint} = "urn:idfe:$jobid" if $jobid; $cact->{Name} = $gmjob->{jobname} if $gmjob->{jobname}; # Set job specification language based on description if ($gmjob->{description}) { if ($gmjob->{description} eq 'adl') { $cact->{JobDescription} = 'emies:adl'; } else { $cact->{JobDescription} = 'nordugrid:xrsl'; } } else { $cact->{JobDescription} = 'UNDEFINEDVALUE'; } # TODO: understand this below $cact->{RestartState} = glueState($gmjob->{failedstate}) if $gmjob->{failedstate}; $cact->{ExitCode} = $gmjob->{exitcode} if defined $gmjob->{exitcode}; # TODO: modify scan-jobs to write it separately to .diag. All backends should do this. $cact->{ComputingManagerExitCode} = $gmjob->{lrmsexitcode} if $gmjob->{lrmsexitcode}; $cact->{Error} = [ @{$gmjob->{errors}} ] if $gmjob->{errors}; # TODO: VO info, like ATLAS/Prod; check whether this information is available to A-REX $cact->{Owner} = $gmjob->{subject} if $gmjob->{subject}; $cact->{LocalOwner} = $gmjob->{localowner} if $gmjob->{localowner}; # OBS: Times are in seconds. $cact->{RequestedTotalWallTime} = $gmjob->{reqwalltime} * ($gmjob->{count} || 1) if defined $gmjob->{reqwalltime}; $cact->{RequestedTotalCPUTime} = $gmjob->{reqcputime} if defined $gmjob->{reqcputime}; # OBS: Should include name and version. Exact format not specified $cact->{RequestedApplicationEnvironment} = $gmjob->{runtimeenvironments} if $gmjob->{runtimeenvironments}; $cact->{RequestedSlots} = $gmjob->{count} || 1; $cact->{StdIn} = $gmjob->{stdin} if $gmjob->{stdin}; $cact->{StdOut} = $gmjob->{stdout} if $gmjob->{stdout}; $cact->{StdErr} = $gmjob->{stderr} if $gmjob->{stderr}; $cact->{LogDir} = $gmjob->{gmlog} if $gmjob->{gmlog}; $cact->{ExecutionNode} = $gmjob->{nodenames} if $gmjob->{nodenames}; $cact->{Queue} = $gmjob->{queue} if $gmjob->{queue}; # Times for finished jobs $cact->{UsedTotalWallTime} = $gmjob->{WallTime} * ($gmjob->{count} || 1) if defined $gmjob->{WallTime}; $cact->{UsedTotalCPUTime} = $gmjob->{CpuTime} if defined $gmjob->{CpuTime}; $cact->{UsedMainMemory} = ceil($gmjob->{UsedMem}/1024) if defined $gmjob->{UsedMem}; # Submission Time to AREX $cact->{SubmissionTime} = mdstoiso($gmjob->{starttime}) if $gmjob->{starttime}; # TODO: change gm to save LRMSSubmissionTime - maybe take from accounting? #$cact->{ComputingManagerSubmissionTime} = 'NotImplemented'; # Start time in LRMS $cact->{StartTime} = mdstoiso($gmjob->{LRMSStartTime}) if $gmjob->{LRMSStartTime}; $cact->{ComputingManagerEndTime} = mdstoiso($gmjob->{LRMSEndTime}) if $gmjob->{LRMSEndTime}; $cact->{EndTime} = mdstoiso($gmjob->{completiontime}) if $gmjob->{completiontime}; $cact->{WorkingAreaEraseTime} = mdstoiso($gmjob->{cleanuptime}) if $gmjob->{cleanuptime}; $cact->{ProxyExpirationTime} = mdstoiso($gmjob->{delegexpiretime}) if $gmjob->{delegexpiretime}; if ($gmjob->{clientname}) { # OBS: address of client as seen by the server is used. my $dnschars = '-.A-Za-z0-9'; # RFC 1034,1035 my ($external_address, $port, $clienthost) = $gmjob->{clientname} =~ /^([$dnschars]+)(?::(\d+))?(?:;(.+))?$/; $cact->{SubmissionHost} = $external_address if $external_address; } # TODO: this in not fetched by GMJobsInfo at all. .local does not contain name. #$cact->{SubmissionClientName} = $gmjob->{clientsoftware} if $gmjob->{clientsoftware}; # Added for the client to know what was the original interface the job was submitted $cact->{OtherInfo} = ["SubmittedVia=$interface"]; # Computing Activity Associations # TODO: add link, is this even possible? needs share where the job is running. #$cact->{ExecutionEnvironmentID} = ; $cact->{ActivityID} = $gmjob->{activityid} if $gmjob->{activityid}; $cact->{ComputingShareID} = $cshaIDs{$share} || 'UNDEFINEDVALUE'; if ( $gmjob->{status} eq "INLRMS" ) { my $lrmsid = $gmjob->{localid}; if (not $lrmsid) { $log->warning("No local id for job $jobid") if $callcount == 1; next; } $cact->{LocalIDFromManager} = $lrmsid; my $lrmsjob = $lrms_info->{jobs}{$lrmsid}; if (not $lrmsjob) { $log->warning("No local job for $jobid") if $callcount == 1; next; } $cact->{State} = $gmjob->{failedstate} ? glueState("INLRMS", $lrmsjob->{status}, $gmjob->{failedstate}) : glueState("INLRMS", $lrmsjob->{status}); $cact->{WaitingPosition} = $lrmsjob->{rank} if defined $lrmsjob->{rank}; $cact->{ExecutionNode} = $lrmsjob->{nodes} if $lrmsjob->{nodes}; unshift @{$cact->{OtherMessages}}, $_ for @{$lrmsjob->{comment}}; # Times for running jobs $cact->{UsedTotalWallTime} = $lrmsjob->{walltime} * ($gmjob->{count} || 1) if defined $lrmsjob->{walltime}; $cact->{UsedTotalCPUTime} = $lrmsjob->{cputime} if defined $lrmsjob->{cputime}; $cact->{UsedMainMemory} = ceil($lrmsjob->{mem}/1024) if defined $lrmsjob->{mem}; } else { $cact->{State} = $gmjob->{failedstate} ? glueState($gmjob->{status},'',$gmjob->{failedstate}) : glueState($gmjob->{status}); } # TODO: UserDomain association, how to calculate it? $cact->{jobXmlFileWriter} = sub { jobXmlFileWriter($config, $jobid, $gmjob, @_) }; return $cact; }; # Computing Endpoints ######## # Here comes a list of endpoints we support. # TODO: verify: REST - org.nordugrid.arcrest # LDAP endpoints one per schema # these will contain only endpoints with URLs defined # Simple endpoints will be rendered as computingEndpoints # as GLUE2 does not admin simple Endpoints within a ComputingService. my $arexceps = {}; # arex computing endpoints. # my $arexeps = {}; # arex plain endpoints (i.e. former aris endpoints) # A-REX ComputingEndpoints # TODO: review that the content is consistent with GLUE2 # ARCREST my $getARCRESTComputingEndpoint = sub { # don't publish if no endpoint URL return undef unless $restenabled; my $cep = {}; $cep->{CreationTime} = $creation_time; $cep->{Validity} = $validity_ttl; $cep->{ID} = "$ARCRESTcepIDp"; $cep->{Name} = "ARC REST"; # OBS: ideally HED should be asked for the URL $cep->{URL} = $wsendpoint; $cep->{Capability} = $epscapabilities->{'org.nordugrid.arcrest'}; $cep->{Technology} = 'rest'; $cep->{InterfaceName} = 'org.nordugrid.arcrest'; # REST interface versions that we currently support: #$cep->{InterfaceVersion} = [ '1.1', '1.0' ]; # Due to a bug in LDAP only the first entry will be shown or the Endpoint will not be published. # I could change the LDAP code, but better to be consistent between the two renderings instead. # The LDAP issue is related to the GLUE2 schema (SingleValue instead of MultiValue) and cannot be corrected without hassle. # DO NOT CHANGE THIS BELOW or the REST endpoint will not be published in LDAP. $cep->{InterfaceVersion} = [ '1.1' ]; #$cep->{SupportedProfile} = [ "http://www.ws-i.org/Profiles/BasicProfile-1.0.html", # WS-I 1.0 # "http://schemas.ogf.org/hpcp/2007/01/bp" # HPC-BP # ]; $cep->{Semantics} = [ "https://www.nordugrid.org/arc/arc7/tech/rest/rest.html" ]; $cep->{Implementor} = "NorduGrid"; $cep->{ImplementationName} = "nordugrid-arc"; $cep->{ImplementationVersion} = $config->{arcversion}; $cep->{QualityLevel} = "production"; my %healthissues; if ($config->{x509_host_cert}) { if ( $host_info->{hostcert_expired}) { push @{$healthissues{critical}}, "Host credentials expired"; } elsif ($host_info->{issuerca_expired}) { push @{$healthissues{critical}}, "Host CA credentials expired"; } elsif (not $host_info->{hostcert_enddate}) { push @{$healthissues{critical}}, "Host credentials missing"; } elsif (not $host_info->{issuerca_enddate}) { push @{$healthissues{warning}}, "Host CA credentials missing"; } else { if ( $host_info->{hostcert_enddate} - time < 48*3600) { push @{$healthissues{warning}}, "Host credentials will expire soon"; } elsif ($host_info->{issuerca_enddate} - time < 48*3600) { push @{$healthissues{warning}}, "Host CA credentials will expire soon"; } } } # check health status by using port probe in hostinfo my $arexport = $config->{arex}{port}; if (defined $host_info->{ports}{arched}{$arexport} and @{$host_info->{ports}{arched}{$arexport}}[0] ne 'ok') { push @{$healthissues{@{$host_info->{ports}{arched}{$arexport}}[0]}} , @{$host_info->{ports}{arched}{$arexport}}[1]; } if (%healthissues) { my @infos; for my $level (qw(critical warning other unknown)) { next unless $healthissues{$level}; $cep->{HealthState} ||= $level; push @infos, @{$healthissues{$level}}; } $cep->{HealthStateInfo} = join "; ", @infos; } else { $cep->{HealthState} = 'ok'; } $cep->{ServingState} = $servingstate; # TODO: StartTime: get it from hed or from Sysinfo.pm processes $cep->{IssuerCA} = $host_info->{issuerca}; # scalar $cep->{TrustedCA} = $host_info->{trustedcas}; # array # TODO: Downtime, is this necessary, and how should it work? $cep->{Staging} = 'staginginout'; $cep->{JobDescription} = [ 'nordugrid:xrsl', 'emies:adl' ]; $cep->{TotalJobs} = $gmtotalcount{notfinished} || 0; $cep->{RunningJobs} = $inlrmsjobstotal{running} || 0; $cep->{SuspendedJobs} = $inlrmsjobstotal{suspended} || 0; $cep->{WaitingJobs} = $inlrmsjobstotal{queued} || 0; $cep->{StagingJobs} = ( $gmtotalcount{preparing} || 0 ) + ( $gmtotalcount{finishing} || 0 ); $cep->{PreLRMSWaitingJobs} = $pendingtotal || 0; $cep->{AccessPolicies} = sub { &{$getAccessPolicies}($cep->{ID}) }; # No OtherInfo at the moment, but should be an array #$cep->{OtherInfo} = [] # array # ComputingActivities if ($nojobs) { $cep->{ComputingActivities} = undef; } else { # this complicated thing here creates a specialized getComputingActivities # version of sub with a builtin parameter! $cep->{ComputingActivities} = sub { &{$getComputingActivities}('org.nordugrid.arcrest'); }; } # Associations $cep->{ComputingShareID} = [ values %cshaIDs ]; $cep->{ComputingServiceID} = $csvID; return $cep; }; # don't publish if no arex/ws/jobs configured $arexceps->{ARCRESTComputingEndpoint} = $getARCRESTComputingEndpoint if ($restenabled); # ## NorduGrid local submission # my $getNorduGridLocalSubmissionEndpoint = sub { # don't publish if no endpoint URL #return undef unless $emiesenabled; # To-decide: should really the local submission plugin be present in info.xml? It is not useful for the outside world. my $cep = {}; # Collected information not to be published $cep->{NOPUBLISH} = 1; $cep->{CreationTime} = $creation_time; $cep->{Validity} = $validity_ttl; $cep->{ID} = "$NGLScepIDp"; $cep->{Name} = "ARC CE Local Submission"; # OBS: ideally HED should be asked for the URL $cep->{URL} = $wsendpoint; $cep->{Technology} = 'direct'; $cep->{InterfaceName} = 'org.nordugrid.internal'; $cep->{InterfaceVersion} = [ '1.0' ]; $cep->{Capability} = [ @{$epscapabilities->{'org.nordugrid.internal'}}, @{$epscapabilities->{'common'}} ]; $cep->{Implementor} = "NorduGrid"; $cep->{ImplementationName} = "nordugrid-arc"; $cep->{ImplementationVersion} = $config->{arcversion}; $cep->{QualityLevel} = "testing"; my %healthissues; # Host certificate not required for INTERNAL submission interface. if ( $host_info->{gm_alive} ne 'all' ) { if ($host_info->{gm_alive} eq 'some') { push @{$healthissues{warning}}, 'One or more grid managers are down'; } else { push @{$healthissues{critical}}, $config->{remotegmdirs} ? 'All grid managers are down' : 'Grid manager is down'; } } if (%healthissues) { my @infos; for my $level (qw(critical warning other)) { next unless $healthissues{$level}; $cep->{HealthState} ||= $level; push @infos, @{$healthissues{$level}}; } $cep->{HealthStateInfo} = join "; ", @infos; } else { $cep->{HealthState} = 'ok'; } $cep->{ServingState} = $servingstate; # StartTime: get it from hed $cep->{IssuerCA} = $host_info->{issuerca}; # scalar $cep->{TrustedCA} = $host_info->{trustedcas}; # array # TODO: Downtime, is this necessary, and how should it work? $cep->{Staging} = 'staginginout'; $cep->{JobDescription} = [ 'nordugrid:xrsl', 'emies:adl' ]; $cep->{TotalJobs} = $gmtotalcount{notfinished} || 0; $cep->{RunningJobs} = $inlrmsjobstotal{running} || 0; $cep->{SuspendedJobs} = $inlrmsjobstotal{suspended} || 0; $cep->{WaitingJobs} = $inlrmsjobstotal{queued} || 0; $cep->{StagingJobs} = ( $gmtotalcount{preparing} || 0 ) + ( $gmtotalcount{finishing} || 0 ); $cep->{PreLRMSWaitingJobs} = $pendingtotal || 0; $cep->{AccessPolicies} = sub { &{$getAccessPolicies}($cep->{ID}) }; # ComputingActivities if ($nojobs) { $cep->{ComputingActivities} = undef; } else { # this complicated thing here creates a specialized getComputingActivities # version of sub with a builtin parameter! #TODO: change interfacename for jobs? $cep->{ComputingActivities} = sub { &{$getComputingActivities}('org.nordugrid.internal'); }; } # Associations $cep->{ComputingShareID} = [ values %cshaIDs ]; $cep->{ComputingServiceID} = $csvID; return $cep; }; $arexceps->{NorduGridLocalSubmissionEndpoint} = $getNorduGridLocalSubmissionEndpoint; ### ARIS endpoints are now part of the A-REX service. # TODO: verify: change ComputingService code in printers to scan for Endpoints - this might be no longer relevant - check live my $getArisLdapNGEndpoint = sub { my $ep = {}; $ep->{CreationTime} = $creation_time; $ep->{Validity} = $validity_ttl; $ep->{Name} = "ARC CE ARIS LDAP NorduGrid Schema Local Information System"; $ep->{URL} = $ldapngendpoint; $ep->{ID} = "$ARISepIDp:ldapng:$config->{infosys}{ldap}{port}"; $ep->{Capability} = $epscapabilities->{'org.nordugrid.ldapng'}; $ep->{Technology} = 'ldap'; $ep->{InterfaceName} = 'org.nordugrid.ldapng'; $ep->{InterfaceVersion} = [ '1.0' ]; # Wrong type, should be URI #$ep->{SupportedProfile} = [ "http://www.ws-i.org/Profiles/BasicProfile-1.0.html", # WS-I 1.0 # "http://schemas.ogf.org/hpcp/2007/01/bp" # HPC-BP # ]; $ep->{Semantics} = [ "http://www.nordugrid.org/documents/arc_infosys.pdf" ]; $ep->{Implementor} = "NorduGrid"; $ep->{ImplementationName} = "nordugrid-arc"; $ep->{ImplementationVersion} = $config->{arcversion}; $ep->{QualityLevel} = "production"; my %healthissues; # check health status by using port probe in hostinfo my $ldapport = $config->{infosys}{ldap}{port} if defined $config->{infosys}{ldap}{port}; if (defined $host_info->{ports}{slapd}{$ldapport} and @{$host_info->{ports}{slapd}{$ldapport}}[0] ne 'ok') { push @{$healthissues{@{$host_info->{ports}{slapd}{$ldapport}}[0]}} , @{$host_info->{ports}{slapd}{$ldapport}}[1]; } if (%healthissues) { my @infos; for my $level (qw(critical warning other)) { next unless $healthissues{$level}; $ep->{HealthState} ||= $level; push @infos, @{$healthissues{$level}}; } $ep->{HealthStateInfo} = join "; ", @infos; } else { $ep->{HealthState} = 'ok'; } $ep->{ServingState} = 'production'; # TODO: StartTime: get it from hed? # TODO: Downtime, is this necessary, and how should it work? # AccessPolicies $ep->{AccessPolicies} = sub { &{$getAccessPolicies}($ep->{ID}) }; $ep->{OtherInfo} = $host_info->{EMIversion} if ($host_info->{EMIversion}); # array # Associations $ep->{ComputingServiceID} = $csvID; return $ep; }; $arexceps->{LDAPNGEndpoint} = $getArisLdapNGEndpoint if $ldapngendpoint ne ''; my $getArisLdapGlue2Endpoint = sub { my $ep = {}; $ep->{CreationTime} = $creation_time; $ep->{Validity} = $validity_ttl; $ep->{Name} = "ARC CE ARIS LDAP GLUE2 Schema Local Information System"; $ep->{URL} = $ldapglue2endpoint; $ep->{ID} = "$ARISepIDp:ldapglue2:$config->{infosys}{ldap}{port}"; $ep->{Capability} = $epscapabilities->{'org.nordugrid.ldapglue2'}; $ep->{Technology} = 'ldap'; $ep->{InterfaceName} = 'org.nordugrid.ldapglue2'; $ep->{InterfaceVersion} = [ '1.0' ]; # Wrong type, should be URI #$ep->{SupportedProfile} = [ "http://www.ws-i.org/Profiles/BasicProfile-1.0.html", # WS-I 1.0 # "http://schemas.ogf.org/hpcp/2007/01/bp" # HPC-BP # ]; $ep->{Semantics} = [ "http://www.nordugrid.org/documents/arc_infosys.pdf" ]; $ep->{Implementor} = "NorduGrid"; $ep->{ImplementationName} = "nordugrid-arc"; $ep->{ImplementationVersion} = $config->{arcversion}; $ep->{QualityLevel} = "production"; # How to calculate health for this interface? my %healthissues; # check health status by using port probe in hostinfo my $ldapport = $config->{infosys}{ldap}{port} if defined $config->{infosys}{ldap}{port}; if (defined $host_info->{ports}{slapd}{$ldapport} and @{$host_info->{ports}{slapd}{$ldapport}}[0] ne 'ok') { push @{$healthissues{@{$host_info->{ports}{slapd}{$ldapport}}[0]}}, @{$host_info->{ports}{slapd}{$ldapport}}[1]; } if (%healthissues) { my @infos; for my $level (qw(critical warning other)) { next unless $healthissues{$level}; $ep->{HealthState} ||= $level; push @infos, @{$healthissues{$level}}; } $ep->{HealthStateInfo} = join "; ", @infos; } else { $ep->{HealthState} = 'ok'; } $ep->{ServingState} = 'production'; # TODO: StartTime: get it from hed? # TODO: Downtime, is this necessary, and how should it work? # AccessPolicies $ep->{AccessPolicies} = sub { &{$getAccessPolicies}($ep->{ID}) }; $ep->{OtherInfo} = $host_info->{EMIversion} if ($host_info->{EMIversion}); # array # Associations $ep->{ComputingServiceID} = $csvID; return $ep; }; $arexceps->{LDAPGLUE2Endpoint} = $getArisLdapGlue2Endpoint if $ldapglue2endpoint ne ''; # Collect endpoints in the datastructure # return ComputingEndpoints in sequence my $getComputingEndpoints = sub { return undef unless my ($cep, $sub) = each %$arexceps; return &$sub; }; $csv->{ComputingEndpoints} = $getComputingEndpoints; # ComputingShares: multiple shares can share the same LRMS queue my @shares = keys %{$GLUE2shares}; my $getComputingShares = sub { return undef unless my ($share, $dummy) = each %{$GLUE2shares}; # Prepare flattened config hash for this share. my $sconfig = { %{$config->{service}}, %{$GLUE2shares->{$share}} }; # List of all shares submitting to the current queue, including the current share. my $qname = $sconfig->{MappingQueue} || $share; # get lrms stats from the actual queues, not share names as they might not match my $qinfo = $lrms_info->{queues}{$qname}; # siblings for the main queue are just itself if ($qname ne $share) { my $siblings = $sconfig->{siblingshares} = []; # Do NOT use 'keys %{$GLUE2shares}' here because it would # reset the iterator of 'each' and cause this function to # always return the same result for my $sn (@shares) { my $s = $GLUE2shares->{$sn}; my $qn = $s->{MappingQueue} || $sn; # main queue should not be among the siblings push @$siblings, $sn if (($qn eq $qname) && ($sn ne $qname)); } } else { my $siblings = $sconfig->{siblingshares} = [$qname]; } my $csha = {}; $csha->{CreationTime} = $creation_time; $csha->{Validity} = $validity_ttl; $csha->{ID} = $cshaIDs{$share}; $csha->{Name} = $share; $csha->{Description} = $sconfig->{Description} if $sconfig->{Description}; $csha->{MappingQueue} = $qname if $qname; # use limits from LRMS $csha->{MaxCPUTime} = prioritizedvalues($sconfig->{maxcputime},$qinfo->{maxcputime}); # TODO: implement in backends - has this been done? $csha->{MaxTotalCPUTime} = $qinfo->{maxtotalcputime} if defined $qinfo->{maxtotalcputime}; $csha->{MinCPUTime} = prioritizedvalues($sconfig->{mincputime},$qinfo->{mincputime}); $csha->{DefaultCPUTime} = $qinfo->{defaultcput} if defined $qinfo->{defaultcput}; $csha->{MaxWallTime} = prioritizedvalues($sconfig->{maxwalltime},$qinfo->{maxwalltime}); # TODO: MaxMultiSlotWallTime replaces MaxTotalWallTime, but has different meaning. Check that it's used correctly #$csha->{MaxMultiSlotWallTime} = $qinfo->{maxwalltime} if defined $qinfo->{maxwalltime}; $csha->{MinWallTime} = prioritizedvalues($sconfig->{minwalltime},$qinfo->{minwalltime}); $csha->{DefaultWallTime} = $qinfo->{defaultwallt} if defined $qinfo->{defaultwallt}; my ($maxtotal, $maxlrms) = split ' ', ($config->{maxjobs} || ''); $maxtotal = undef if defined $maxtotal and $maxtotal eq '-1'; $maxlrms = undef if defined $maxlrms and $maxlrms eq '-1'; # MaxWaitingJobs: use the maxjobs config option # OBS: An upper limit is not really enforced by A-REX. # OBS: Currently A-REX only cares about totals, not per share limits! $csha->{MaxTotalJobs} = $maxtotal if defined $maxtotal; # MaxWaitingJobs, MaxRunningJobs: my ($maxrunning, $maxwaiting); # use values from lrms if avaialble if (defined $qinfo->{maxrunning}) { $maxrunning = $qinfo->{maxrunning}; } if (defined $qinfo->{maxqueuable}) { $maxwaiting = $qinfo->{maxqueuable}; } # maxjobs config option sets upper limits if (defined $maxlrms) { $maxrunning = $maxlrms if not defined $maxrunning or $maxrunning > $maxlrms; $maxwaiting = $maxlrms if not defined $maxwaiting or $maxwaiting > $maxlrms; } $csha->{MaxRunningJobs} = $maxrunning if defined $maxrunning; $csha->{MaxWaitingJobs} = $maxwaiting if defined $maxwaiting; # MaxPreLRMSWaitingJobs: use GM's maxjobs option # OBS: Currently A-REX only cares about totals, not per share limits! # OBS: this formula is actually an upper limit on the sum of pre + post # lrms jobs. A-REX does not have separate limit for pre lrms jobs $csha->{MaxPreLRMSWaitingJobs} = $maxtotal - $maxlrms if defined $maxtotal and defined $maxlrms; $csha->{MaxUserRunningJobs} = $qinfo->{maxuserrun} if defined $qinfo->{maxuserrun}; # TODO: eventually new return value from LRMS infocollector # not published if not in arc.conf or returned by infocollectors if ($sconfig->{MaxSlotsPerJob} || $qinfo->{MaxSlotsPerJob}) { $csha->{MaxSlotsPerJob} = $sconfig->{MaxSlotsPerJob} || $qinfo->{MaxSlotsPerJob}; } # Slots per share and state my @slot_entries; foreach my $state (keys %{$state_slots{$share}}) { my $value = $state_slots{$share}{$state}; if($value){ push @slot_entries, "CoreCount\=$state\=$value"; } } if(@slot_entries){ $csha->{OtherInfo} = \@slot_entries; } # MaxStageInStreams, MaxStageOutStreams # OBS: A-REX does not have separate limits for up and downloads. # OBS: A-REX only cares about totals, not per share limits! my ($maxloaders, $maxemergency, $maxthreads) = split ' ', ($config->{maxload} || ''); $maxloaders = undef if defined $maxloaders and $maxloaders eq '-1'; $maxthreads = undef if defined $maxthreads and $maxthreads eq '-1'; if ($maxloaders) { # default is 5 (see MAX_DOWNLOADS defined in a-rex/grid-manager/loaders/downloader.cpp) $maxthreads = 5 unless defined $maxthreads; $csha->{MaxStageInStreams} = $maxloaders * $maxthreads; $csha->{MaxStageOutStreams} = $maxloaders * $maxthreads; } # TODO: new return value schedpolicy from LRMS infocollector. my $schedpolicy = $lrms_info->{schedpolicy} || undef; if ($sconfig->{SchedulingPolicy} and not $schedpolicy) { $schedpolicy = 'fifo' if lc($sconfig->{SchedulingPolicy}) eq 'fifo'; } $csha->{SchedulingPolicy} = $schedpolicy if $schedpolicy; # GuaranteedVirtualMemory -- all nodes must be able to provide this # much memory per job. Some nodes might be able to afford more per job # (MaxVirtualMemory) # TODO: implement check at job accept time in a-rex # TODO: implement in LRMS plugin maxvmem and maxrss. $csha->{MaxVirtualMemory} = $sconfig->{MaxVirtualMemory} if $sconfig->{MaxVirtualMemory}; # MaxMainMemory -- usage not being tracked by most LRMSs # OBS: new config option (space measured in GB !?) # OBS: Disk usage of jobs is not being enforced. # This limit should correspond with the max local-scratch disk space on # clusters using local disks to run jobs. # TODO: implement check at job accept time in a-rex # TODO: see if any lrms can support this. Implement check in job wrapper $csha->{MaxDiskSpace} = $sconfig->{DiskSpace} if $sconfig->{DiskSpace}; # DefaultStorageService: # OBS: Should be ExtendedBoolean_t (one of 'true', 'false', 'undefined') $csha->{Preemption} = glue2bool($qinfo->{Preemption}) if defined $qinfo->{Preemption}; # ServingState: closed and queuing are not yet supported # OBS: this serving state should come from LRMS. $csha->{ServingState} = 'production'; # We can't guess which local job belongs to a certain VO, hence # we set LocalRunning/Waiting/Suspended to zero for shares related to # a VO. # The global share that represents the queue has also jobs not # managed by the ARC CE as it was in previous versions of ARC my $localrunning = ($qname eq $share) ? $qinfo->{running} : 0; my $localqueued = ($qname eq $share) ? $qinfo->{queued} : 0; my $localsuspended = ($qname eq $share) ? $qinfo->{suspended}||0 : 0; # TODO: [negative] This should avoid taking as local jobs # also those submitted without any VO # local jobs are per queue and not per share. $localrunning -= $inlrmsjobs{$qname}{running} || 0; if ( $localrunning < 0 ) { $localrunning = 0; } $localqueued -= $inlrmsjobs{$qname}{queued} || 0; if ( $localqueued < 0 ) { $localqueued = 0; } $localsuspended -= $inlrmsjobs{$qname}{suspended} || 0; if ( $localsuspended < 0 ) { $localsuspended = 0; } # OBS: Finished/failed/deleted jobs are not counted my $totaljobs = $gmsharecount{$share}{notfinished} || 0; $totaljobs += $localrunning + $localqueued + $localsuspended; $csha->{TotalJobs} = $totaljobs; $csha->{RunningJobs} = $localrunning + ( $inlrmsjobs{$share}{running} || 0 ); $csha->{WaitingJobs} = $localqueued + ( $inlrmsjobs{$share}{queued} || 0 ); $csha->{SuspendedJobs} = $localsuspended + ( $inlrmsjobs{$share}{suspended} || 0 ); # TODO: backends to count suspended jobs # fix localrunning when displaying the values if negative if ( $localrunning < 0 ) { $localrunning = 0; } $csha->{LocalRunningJobs} = $localrunning; $csha->{LocalWaitingJobs} = $localqueued; $csha->{LocalSuspendedJobs} = $localsuspended; $csha->{StagingJobs} = ( $gmsharecount{$share}{preparing} || 0 ) + ( $gmsharecount{$share}{finishing} || 0 ); $csha->{PreLRMSWaitingJobs} = $gmsharecount{$share}{notsubmitted} || 0; # TODO: investigate if it's possible to get these estimates from maui/torque $csha->{EstimatedAverageWaitingTime} = $qinfo->{averagewaitingtime} if defined $qinfo->{averagewaitingtime}; $csha->{EstimatedWorstWaitingTime} = $qinfo->{worstwaitingtime} if defined $qinfo->{worstwaitingtime}; # TODO: implement $qinfo->{freeslots} in LRMS plugins my $freeslots = 0; if (defined $qinfo->{freeslots}) { $freeslots = $qinfo->{freeslots}; } else { # TODO: to be removed after patch testing. Uncomment to check values # $log->debug("share name: $share, qname: $qname, totalcpus is $qinfo->{totalcpus}, running is $qinfo->{running}, ".Dumper($qinfo)); # TODO: still problems with this one, can be negative! Cpus are not enough. Cores must be counted, or logical cpus # in order, override with config values for queue or cluster or lrms module my $queuetotalcpus = $config->{shares}{$qname}{totalcpus} if (defined $config->{shares}{$qname}{totalcpus}); $queuetotalcpus ||= (defined $config->{service}{totalcpus}) ? $config->{service}{totalcpus} : $qinfo->{totalcpus}; $freeslots = $queuetotalcpus - $qinfo->{running}; } # This should not be needed, but the above case may trigger it $freeslots = 0 if $freeslots < 0; # Local users have individual restrictions # FreeSlots: find the maximum freecpus of any local user mapped in this # share and use that as an upper limit for $freeslots # FreeSlotsWithDuration: for each duration, find the maximum freecpus # of any local user mapped in this share # TODO: is this the correct way to do it? # TODO: currently shows negative numbers, check why # TODO: [negative] If more slots than the available are overbooked the number is negative # for example fork with parallel multicore, so duration should be set to 0 # OBS: this should be a string. extract the numeric part(s) and compare. Prevent type conversion. my @durations; # TODO: check the contents of this var my %timeslots = max_userfreeslots($qinfo->{users}); if (%timeslots) { # find maximum free slots regardless of duration my $maxuserslots = 0; for my $seconds ( keys %timeslots ) { my $nfree = $timeslots{$seconds}; $maxuserslots = $nfree if $nfree > $maxuserslots; } $freeslots = $maxuserslots < $freeslots ? $maxuserslots : $freeslots; # sort descending by duration, keping 0 first (0 for unlimited) for my $seconds (sort { if ($a == 0) {1} elsif ($b == 0) {-1} else {$b <=> $a} } keys %timeslots) { my $nfree = $timeslots{$seconds} < $freeslots ? $timeslots{$seconds} : $freeslots; unshift @durations, $seconds ? "$nfree:$seconds" : $nfree; } } # This should be 0 if the queue is full, check the zeroing above? $csha->{FreeSlots} = $freeslots; my $freeslotswithduration = join(" ", @durations); # fallback to same freeslots if @durations is empty if ( $freeslotswithduration eq "") { $freeslotswithduration = $freeslots; } $csha->{FreeSlotsWithDuration} = $freeslotswithduration; $csha->{UsedSlots} = $inlrmsslots{$share}{running}; $csha->{RequestedSlots} = $requestedslots{$share} || 0; # TODO: detect reservationpolicy in the lrms $csha->{ReservationPolicy} = $qinfo->{reservationpolicy} if $qinfo->{reservationpolicy}; # Florido's Mapping Policies $csha->{MappingPolicies} = sub { &{$getMappingPolicies}($csha->{ID},$csha->{Name})}; # Tag: skip it for now # Associations my $xenvs = $sconfig->{ExecutionEnvironmentName} || []; push @{$csha->{ExecutionEnvironmentID}}, $xenvIDs{$_} for @$xenvs; ## check this association below. Which endpoint? $csha->{ComputingEndpointID} = \@cepIDs; $csha->{ServiceID} = $csvID; $csha->{ComputingServiceID} = $csvID; return $csha; }; $csv->{ComputingShares} = $getComputingShares; # ComputingManager my $getComputingManager = sub { my $cmgr = {}; $cmgr->{CreationTime} = $creation_time; $cmgr->{Validity} = $validity_ttl; $cmgr->{ID} = $cmgrID; my $cluster_info = $lrms_info->{cluster}; # array # Name not needed $cmgr->{ProductName} = $cluster_info->{lrms_glue_type} || lc $cluster_info->{lrms_type}; $cmgr->{ProductVersion} = $cluster_info->{lrms_version}; # $cmgr->{Reservation} = "undefined"; $cmgr->{BulkSubmission} = "false"; #$cmgr->{TotalPhysicalCPUs} = $totalpcpus if $totalpcpus; $cmgr->{TotalLogicalCPUs} = $totallcpus if $totallcpus; # OBS: Assuming 1 slot per CPU # TODO: slots should be cores? $cmgr->{TotalSlots} = (defined $config->{service}{totalcpus}) ? $config->{service}{totalcpus} : $cluster_info->{totalcpus}; # This number can be more than totalslots in case more # than the published cores can be used -- happens with fork my @queuenames = keys %{$lrms_info->{queues}}; my $gridrunningslots = 0; for my $qname (@queuenames) { $gridrunningslots += $inlrmsslots{$qname}{running} if defined $inlrmsslots{$qname}{running}; } my $localrunningslots = $cluster_info->{usedcpus} - $gridrunningslots; $cmgr->{SlotsUsedByLocalJobs} = ($localrunningslots < 0) ? 0 : $localrunningslots; $cmgr->{SlotsUsedByGridJobs} = $gridrunningslots; $cmgr->{Homogeneous} = $homogeneous ? "true" : "false"; # NetworkInfo of all ExecutionEnvironments my %netinfo = (); for my $xeconfig (values %{$config->{xenvs}}) { $netinfo{$xeconfig->{NetworkInfo}} = 1 if $xeconfig->{NetworkInfo}; } $cmgr->{NetworkInfo} = [ keys %netinfo ] if %netinfo; # TODO: this could also be cross-checked with info from ExecEnvs my $cpuistribution = $cluster_info->{cpudistribution} || ''; $cpuistribution =~ s/cpu:/:/g; $cmgr->{LogicalCPUDistribution} = $cpuistribution if $cpuistribution; if (defined $host_info->{session_total}) { my $sharedsession = "true"; $sharedsession = "false" if lc($config->{arex}{shared_filesystem}) eq "no" or lc($config->{arex}{shared_filesystem}) eq "false"; $cmgr->{WorkingAreaShared} = $sharedsession; $cmgr->{WorkingAreaGuaranteed} = "false"; my $gigstotal = ceil($host_info->{session_total} / 1024); my $gigsfree = ceil($host_info->{session_free} / 1024); $cmgr->{WorkingAreaTotal} = $gigstotal; $cmgr->{WorkingAreaFree} = $gigsfree; # OBS: There is no special area for MPI jobs, no need to advertize anything #$cmgr->{WorkingAreaMPIShared} = $sharedsession; #$cmgr->{WorkingAreaMPITotal} = $gigstotal; #$cmgr->{WorkingAreaMPIFree} = $gigsfree; #$cmgr->{WorkingAreaMPILifeTime} = $sessionlifetime; } my ($sessionlifetime) = (split ' ', $config->{arex}{defaultttl}); $sessionlifetime ||= 7*24*60*60; $cmgr->{WorkingAreaLifeTime} = $sessionlifetime; if (defined $host_info->{cache_total}) { my $gigstotal = ceil($host_info->{cache_total} / 1024); my $gigsfree = ceil($host_info->{cache_free} / 1024); $cmgr->{CacheTotal} = $gigstotal; $cmgr->{CacheFree} = $gigsfree; } if ($config->{service}{Benchmark}) { my @bconfs = @{$config->{service}{Benchmark}}; $cmgr->{Benchmarks} = sub { return undef unless @bconfs; my ($type, $value) = split " ", shift @bconfs; my $bench = {}; $bench->{Type} = $type; $bench->{Value} = $value; $bench->{ID} = "urn:ogf:Benchmark:$hostname:$lrmsname:$type"; return $bench; }; } # Not publishing absolute paths #$cmgr->{TmpDir}; #$cmgr->{ScratchDir}; #$cmgr->{ApplicationDir}; # ExecutionEnvironments my $getExecutionEnvironments = sub { return undef unless my ($xenv, $dummy) = each %{$config->{xenvs}}; my $xeinfo = $xeinfos->{$xenv}; # Prepare flattened config hash for this xenv. my $xeconfig = { %{$config->{service}}, %{$config->{xenvs}{$xenv}} }; my $execenv = {}; my $execenvName = $1 if ( $xenvIDs{$xenv} =~ /(?:.*)\:(.*)$/ ); # $execenv->{Name} = $xenv; $execenv->{Name} = $execenvName; $execenv->{CreationTime} = $creation_time; $execenv->{Validity} = $validity_ttl; $execenv->{ID} = $xenvIDs{$xenv}; my $machine = $xeinfo->{machine}; if ($machine) { $machine =~ s/^x86_64/amd64/; $machine =~ s/^ia64/itanium/; $machine =~ s/^ppc/powerpc/; } my $sysname = $xeinfo->{sysname}; if ($sysname) { $sysname =~ s/^Linux/linux/; $sysname =~ s/^Darwin/macosx/; $sysname =~ s/^SunOS/solaris/; } elsif ($xeconfig->{OpSys}) { $sysname = 'linux' if grep /linux/i, @{$xeconfig->{OpSys}}; } $execenv->{Platform} = $machine ? $machine : 'UNDEFINEDVALUE'; # placeholder value $execenv->{TotalInstances} = $xeinfo->{ntotal} if defined $xeinfo->{ntotal}; $execenv->{UsedInstances} = $xeinfo->{nbusy} if defined $xeinfo->{nbusy}; $execenv->{UnavailableInstances} = $xeinfo->{nunavailable} if defined $xeinfo->{nunavailable}; $execenv->{VirtualMachine} = glue2bool($xeconfig->{VirtualMachine}) if defined $xeconfig->{VirtualMachine}; $execenv->{PhysicalCPUs} = $xeinfo->{pcpus} if $xeinfo->{pcpus}; $execenv->{LogicalCPUs} = $xeinfo->{lcpus} if $xeinfo->{lcpus}; if ($xeinfo->{pcpus} and $xeinfo->{lcpus}) { my $cpum = ($xeinfo->{pcpus} > 1) ? 'multicpu' : 'singlecpu'; my $corem = ($xeinfo->{lcpus} > $xeinfo->{pcpus}) ? 'multicore' : 'singlecore'; $execenv->{CPUMultiplicity} = "$cpum-$corem"; } $execenv->{CPUVendor} = $xeconfig->{CPUVendor} if $xeconfig->{CPUVendor}; $execenv->{CPUModel} = $xeconfig->{CPUModel} if $xeconfig->{CPUModel}; $execenv->{CPUVersion} = $xeconfig->{CPUVersion} if $xeconfig->{CPUVersion}; $execenv->{CPUClockSpeed} = $xeconfig->{CPUClockSpeed} if $xeconfig->{CPUClockSpeed}; $execenv->{CPUTimeScalingFactor} = $xeconfig->{CPUTimeScalingFactor} if $xeconfig->{CPUTimeScalingFactor}; $execenv->{WallTimeScalingFactor} = $xeconfig->{WallTimeScalingFactor} if $xeconfig->{WallTimeScalingFactor}; $execenv->{MainMemorySize} = $xeinfo->{pmem} || "999999999"; # placeholder value $execenv->{VirtualMemorySize} = $xeinfo->{vmem} if $xeinfo->{vmem}; $execenv->{OSFamily} = $sysname || 'UNDEFINEDVALUE'; # placeholder value $execenv->{OSName} = $xeconfig->{OSName} if $xeconfig->{OSName}; $execenv->{OSVersion} = $xeconfig->{OSVersion} if $xeconfig->{OSVersion}; # if Connectivity* not specified, assume false. # this has been change due to this value to be mandatory in the LDAP schema. $execenv->{ConnectivityIn} = glue2bool($xeconfig->{ConnectivityIn}) || 'FALSE'; # placeholder value $execenv->{ConnectivityOut} = glue2bool($xeconfig->{ConnectivityOut}) || 'FALSE'; # placeholder value $execenv->{NetworkInfo} = [ $xeconfig->{NetworkInfo} ] if $xeconfig->{NetworkInfo}; if ($callcount == 1) { $log->info("MainMemorySize not set for ExecutionEnvironment $xenv, will default to 9999999") unless $xeinfo->{pmem}; $log->info("OSFamily not set for ExecutionEnvironment $xenv") unless $sysname; $log->info("ConnectivityIn not set for ExecutionEnvironment $xenv, will default to undefined") unless defined $xeconfig->{ConnectivityIn}; $log->info("ConnectivityOut not set for ExecutionEnvironment $xenv, will default to undefined") unless defined $xeconfig->{ConnectivityOut}; my @missing; for (qw(Platform CPUVendor CPUModel CPUClockSpeed OSFamily OSName OSVersion)) { push @missing, $_ unless defined $execenv->{$_}; } $log->info("Missing attributes for ExecutionEnvironment $xenv: ".join ", ", @missing) if @missing; } if ($xeconfig->{Benchmark}) { my @bconfs = @{$xeconfig->{Benchmark}}; $execenv->{Benchmarks} = sub { return undef unless @bconfs; my ($type, $value) = split " ", shift @bconfs; my $bench = {}; $bench->{Type} = $type; $bench->{Value} = $value; $bench->{ID} = "urn:ogf:Benchmark:$hostname:$execenvName:$type"; return $bench; }; } # Associations for my $share (keys %{$GLUE2shares}) { my $sconfig = $GLUE2shares->{$share}; next unless $sconfig->{ExecutionEnvironmentName}; next unless grep { $xenv eq $_ } @{$sconfig->{ExecutionEnvironmentName}}; push @{$execenv->{ComputingShareID}}, $cshaIDs{$share}; } $execenv->{ManagerID} = $cmgrID; $execenv->{ComputingManagerID} = $cmgrID; return $execenv; }; $cmgr->{ExecutionEnvironments} = $getExecutionEnvironments; # ApplicationEnvironments my $getApplicationEnvironments = sub { return undef unless my ($rte, $rinfo) = each %$rte_info; my $appenv = {}; # name and version is separated at the first dash (-) which is followed by a digit my ($name,$version) = ($rte, undef); ($name,$version) = ($1, $2) if $rte =~ m{^(.*?)-([0-9].*)$}; $appenv->{AppName} = $name; $appenv->{AppVersion} = $version if defined $version; $appenv->{ID} = $aenvIDs{$rte}; $appenv->{State} = $rinfo->{state} if $rinfo->{state}; $appenv->{Description} = $rinfo->{description} if $rinfo->{description}; #$appenv->{ParallelSupport} = 'none'; # Associations $appenv->{ComputingManagerID} = $cmgrID; return $appenv; }; $cmgr->{ApplicationEnvironments} = $getApplicationEnvironments; # Associations $cmgr->{ServiceID} = $csvID; $cmgr->{ComputingServiceID} = $csvID; return $cmgr; }; $csv->{ComputingManager} = $getComputingManager; # Location and Contacts if (my $lconfig = $config->{location}) { my $count = 1; $csv->{Location} = sub { return undef if $count-- == 0; my $loc = {}; $loc->{ID} = "urn:ogf:Location:$hostname:ComputingService:arex"; for (qw(Name Address Place PostCode Country Latitude Longitude)) { $loc->{$_} = $lconfig->{$_} if defined $lconfig->{$_}; } $loc->{ServiceForeignKey} = $csvID; return $loc; } } if (my $cconfs = $config->{contacts}) { my $i = 0; $csv->{Contacts} = sub { return undef unless $i < scalar(@$cconfs); my $cconfig = $cconfs->[$i]; #my $detail = $cconfig->{Detail}; my $cont = {}; $cont->{ID} = "urn:ogf:Contact:$hostname:ComputingService:arex:con$i"; for (qw(Name Detail Type)) { $cont->{$_} = $cconfig->{$_} if $cconfig->{$_}; } $cont->{ServiceForeignKey} = $csvID; $i++; return $cont; }; } # Associations $csv->{AdminDomainID} = $adID; $csv->{ServiceID} = $csvID; return $csv; }; my $getAdminDomain = sub { my $dom = { ID => $adID, Name => $config->{admindomain}{Name}, OtherInfo => $config->{admindomain}{OtherInfo}, Description => $config->{admindomain}{Description}, WWW => $config->{admindomain}{WWW}, Owner => $config->{admindomain}{Owner}, CreationTime => $creation_time, Validity => $validity_ttl }; $dom->{Distributed} = glue2bool($config->{admindomain}{Distributed}); # TODO: Location and Contact for AdminDomain goes here. # Contacts can be multiple, don't know how to handle this # in configfile. # TODO: remember to sync ForeignKeys # Disabled for now, as it would only cause trouble. # if (my $lconfig = $config->{location}) { # my $count = 1; # $dom->{Location} = sub { # return undef if $count-- == 0; # my $loc = {}; # $loc->{ID} = "urn:ogf:Location:$hostname:AdminDomain:$admindomain"; # for (qw(Name Address Place PostCode Country Latitude Longitude)) { # $loc->{$_} = $lconfig->{$_} if defined $lconfig->{$_}; # } # return $loc; # } # } # if (my $cconfs = $config->{contacts}) { # my $i = 0; # $dom->{Contacts} = sub { # return undef unless $i < scalar(@$cconfs); # my $cconfig = $cconfs->[$i++]; # #my $detail = $cconfig->{Detail}; # my $cont = {}; # $cont->{ID} = "urn:ogf:Contact:$hostname:AdminDomain:$admindomain:$i"; # for (qw(Name Detail Type)) { # $cont->{$_} = $cconfig->{$_} if $cconfig->{$_}; # } # return $cont; # }; # } return $dom; }; # Other Services my $othersv = {}; #EndPoint here # aggregates services my $getServices = sub { return undef unless my ($service, $sub) = each %$othersv; # returns the hash for Entries. Odd, must understand this behaviour return &$sub; }; # TODO: UserDomain my $getUserDomain = sub { my $ud = {}; $ud->{CreationTime} = $creation_time; $ud->{Validity} = $validity_ttl; $ud->{ID} = $udID; $ud->{Name} = ""; $ud->{OtherInfo} = $config->{service}{OtherInfo} if $config->{service}{OtherInfo}; # array $ud->{Description} = ''; # Number of hops to reach the root $ud->{Level} = 0; # Endpoint of some service, such as VOMS server $ud->{UserManager} = 'http://voms.nordugrid.org'; # List of users $ud->{Member} = [ 'users here' ]; # TODO: Calculate Policies, ContactID and LocationID # Associations $ud->{UserDomainID} = $udID; return $ud; }; # TODO: ToStorageElement my $getToStorageElement = sub { my $tse = {}; $tse->{CreationTime} = $creation_time; $tse->{Validity} = $validity_ttl; $tse->{ID} = $tseID; $tse->{Name} = ""; $tse->{OtherInfo} = ''; # array # Local path on the machine to access storage, for example a NFS share $tse->{LocalPath} = 'String'; # Remote path in the Storage Service associated with the local path above $tse->{RemotePath} = 'String'; # Associations $tse->{ComputingService} = $csvID; $tse->{StorageService} = ''; }; # returns the two branches for =grid and =services GroupName. # It's not optimal but it doesn't break recursion my $GLUE2InfoTreeRoot = sub { my $treeroot = { AdminDomain => $getAdminDomain, UserDomain => $getUserDomain, ComputingService => $getComputingService, Services => $getServices }; return $treeroot; }; return $GLUE2InfoTreeRoot; } 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/ARC0ClusterInfo.pm0000644000000000000000000000013115067751327025072 xustar0030 mtime=1759498967.757868355 30 atime=1759498967.865493651 29 ctime=1759499029.82251864 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/ARC0ClusterInfo.pm0000644000175000002070000006500315067751327027001 0ustar00mockbuildmock00000000000000package ARC0ClusterInfo; # This information collector combines the output of the other information collectors # and prepares info modelled on the classic Nordugrid information schema (arc0). use POSIX; use Storable; use strict; use LogUtils; our $log = LogUtils->getLogger(__PACKAGE__); sub mds_date { my $seconds = shift; return strftime("%Y%m%d%H%M%SZ", gmtime($seconds)); } # sub to pick a value in order: first value preferred to others # can have as many parameters as one wants. sub prioritizedvalues { my @values = @_; my $numelements = scalar @values; while (@values) { my $current = shift @values; return $current if (((defined $current) and ($current ne '')) or ( $numelements == 1)); } # just in case all the above fails, return empty string $log->debug("No suitable value found in call to prioritizedvalues. Returning undefined"); return undef; } # optimization for GDPR, hash of DN->sha512hash my $dnhashes = {}; # sub to create a sha512 has using coreutils' sha512sum # input: a text string, usually a user DN, a hash of DN->hash sub sha512sum { my ($text,$dnhashes) = @_; my $digestfromcmd = defined $dnhashes->{$text} ? $dnhashes->{$text} : ''; if ( (! defined $digestfromcmd) or ($digestfromcmd eq '') ) { my $digestcmd = "echo -n \'$text\' | sha512sum -t"; open(my $shasum, "-|", $digestcmd) // $log->warning("Fork failed while running $digestcmd, error: $!"); while (my $cmdout = <$shasum>) { chomp $cmdout; $digestfromcmd = substr($cmdout, 0, index($cmdout, " ")); } close($shasum); }; # should the encoding fail, we put a placeholder if ( $digestfromcmd eq '' or $digestfromcmd =~ /\s/) { $digestfromcmd = 'UNDEFINEDVALUE'; $log->warning("sha512sum failed in ".__PACKAGE__.".pm, using placeholder $digestfromcmd"); } $dnhashes->{$text} = $digestfromcmd; return $digestfromcmd; } ############################################################################ # Combine info from all sources to prepare the final representation ############################################################################ sub collect($) { my ($data) = @_; my $config = $data->{config}; my $usermap = $data->{usermap}; my $host_info = $data->{host_info}; my $rte_info = $data->{rte_info}; my $gmjobs_info = $data->{gmjobs_info}; my $lrms_info = $data->{lrms_info}; my $nojobs = $data->{nojobs}; my @allxenvs = keys %{$config->{xenvs}}; my @allshares = keys %{$config->{shares}}; # homogeneity of the cluster my $homogeneous; if (defined $config->{service}{Homogeneous}) { $homogeneous = $config->{service}{Homogeneous}; } else { # not homogeneous if there are multiple ExecEnvs $homogeneous = @allxenvs > 1 ? 0 : 1; # not homogeneous if one ExecEnv is not homogeneous for my $xeconfig (values %{$config->{xenvs}}) { $homogeneous = 0 if defined $xeconfig->{Homogeneous} and not $xeconfig->{Homogeneous}; } } # config overrides my $hostname = $config->{hostname} || $host_info->{hostname}; # count grid-manager jobs my %gmjobcount = (totaljobs => 0, accepted => 0, preparing => 0, submit => 0, inlrms => 0, canceling => 0, finishing => 0, finished => 0, deleted => 0); for my $job (values %{$gmjobs_info}) { $gmjobcount{totaljobs}++; if ( $job->{status} =~ /ACCEPTED/ ) { $gmjobcount{accepted}++ ; next; } if ( $job->{status} =~ /PREPARING/) { $gmjobcount{preparing}++; next; } if ( $job->{status} =~ /SUBMIT/ ) { $gmjobcount{submit}++ ; next; } if ( $job->{status} =~ /INLRMS/ ) { $gmjobcount{inlrms}++ ; next; } if ( $job->{status} =~ /CANCELING/) { $gmjobcount{canceling}++; next; } if ( $job->{status} =~ /FINISHING/) { $gmjobcount{finishing}++; next; } if ( $job->{status} =~ /FINISHED/ ) { $gmjobcount{finished}++ ; next; } if ( $job->{status} =~ /FAILED/ ) { $gmjobcount{finished}++ ; next; } if ( $job->{status} =~ /KILLED/ ) { $gmjobcount{finished}++ ; next; } if ( $job->{status} =~ /DELETED/ ) { $gmjobcount{deleted}++ ; next; } $log->warning("Unexpected job status: $job->{status}"); } # count grid jobs running and queued in LRMS for each queue my %gridrunning; my %gridqueued; for my $jobid (keys %{$gmjobs_info}) { my $job = $gmjobs_info->{$jobid}; my $share = $job->{share}; if ($job->{status} eq 'INLRMS') { my $lrmsid = $job->{localid}; unless (defined $lrmsid) { $log->warning("localid missing for INLRMS job $jobid"); next; } my $lrmsjob = $lrms_info->{jobs}{$lrmsid}; unless ((defined $lrmsjob) and $lrmsjob->{status}) { $log->warning("LRMS plugin returned no status for job $jobid (lrmsid: $lrmsid)"); next; } if ((defined $lrmsjob) and $lrmsjob->{status} ne 'EXECUTED') { if ($lrmsjob->{status} eq 'R' or $lrmsjob->{status} eq 'S') { $gridrunning{$share} += $lrmsjob->{cpus}; } else { $gridqueued{$share}++; } } } } my %prelrmsqueued; my %pendingprelrms; my %gm_queued; my @gmqueued_states = ("ACCEPTED","PENDING:ACCEPTED","PREPARING","PENDING:PREPARING","SUBMIT"); my @gmpendingprelrms_states =("PENDING:ACCEPTED","PENDING:PREPARING" ); for my $job_gridowner (keys %$usermap) { $gm_queued{$job_gridowner} = 0; } for my $ID (keys %{$gmjobs_info}) { my $share = $gmjobs_info->{$ID}{share}; # set the job_gridowner of the job (read from the job.id.local) # which is used as the key of the %gm_queued my $job_gridowner = $gmjobs_info->{$ID}{subject}; # count the gm_queued jobs per grid users (SNs) and the total if ( grep /^$gmjobs_info->{$ID}{status}$/, @gmqueued_states ) { $gm_queued{$job_gridowner}++; $prelrmsqueued{$share}++; } # count the GM PRE-LRMS pending jobs if ( grep /^$gmjobs_info->{$ID}{status}$/, @gmpendingprelrms_states ) { $pendingprelrms{$share}++; } } # Grid Manager job state mappings to Infosys job states my %map_always = ( 'ACCEPTED' => 'ACCEPTING', 'PENDING:ACCEPTED' => 'ACCEPTED', 'PENDING:PREPARING' => 'PREPARED', 'PENDING:INLRMS' => 'EXECUTED', 'CANCELING' => 'KILLING'); my %map_if_gm_up = ( 'SUBMIT' => 'SUBMITTING'); my %map_if_gm_down = ( 'PREPARING' => 'ACCEPTED', 'FINISHING' => 'EXECUTED', 'SUBMIT' => 'PREPARED'); # Infosys is run by A-REX: Always assume GM is up $host_info->{processes}{'grid-manager'} = 1; for my $job (values %$gmjobs_info) { $job->{status} = $map_always{$job->{status}} if grep { $job->{status} eq $_ } keys %map_always; if ($host_info->{processes}{'grid-manager'}) { $job->{status} = $map_if_gm_up{$job->{status}} if grep { $job->{status} eq $_ } keys %map_if_gm_up; } else { $job->{status} = $map_if_gm_down{$job->{status}} if grep { $job->{status} eq $_ } keys %map_if_gm_down; } } my @supportmails; if ($config->{contacts}) { for (@{$config->{contacts}}) { push @supportmails, $1 if $_->{Detail} =~ m/^mailto:(.*)/; } } my @advertisedvos = (); if ($config->{service}{AdvertisedVO}) { @advertisedvos = @{$config->{service}{AdvertisedVO}}; # add VO: suffix to each advertised VO @advertisedvos = map { "VO:".$_ } @advertisedvos; } # Assume no connectivity unles explicitly configured otherwise on each # ExecutionEnvironment my ($inbound, $outbound) = (1,1); for my $xeconfig (values %{$config->{xenvs}}) { $inbound = 0 unless ($xeconfig->{connectivityIn} || 'false') eq 'true'; $outbound = 0 unless ($xeconfig->{connectivityOut} || 'false') eq 'true'; } $inbound = 1 if ($config->{service}{connectivityIn} || 'false') eq 'true'; $outbound = 1 if ($config->{service}{connectivityOut} || 'false') eq 'true'; # the earliest of hostcert and cacert enddates. my $credenddate; if ($host_info->{issuerca_enddate} and $host_info->{hostcert_enddate}) { $credenddate = ( $host_info->{hostcert_enddate} lt $host_info->{issuerca_enddate} ) ? $host_info->{hostcert_enddate} : $host_info->{issuerca_enddate}; } my $callcount = 0; # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # build information tree # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # my $getCluster = sub { $callcount++; my $c = {}; $c->{name} = $hostname; $c->{aliasname} = $config->{service}{ClusterAlias} if $config->{service}{ClusterAlias}; $c->{comment} = $config->{service}{ClusterComment} if $config->{service}{ClusterComment}; # added to help client to match GLUE2 services on the same machine $c->{comment} = $c->{comment} ? $c->{comment}."; GLUE2ServiceID=urn:ogf:ComputingService:$hostname:arex" : "GLUE2ServiceID=urn:ogf:ComputingService:$hostname:arex"; # GLUE2ComputingService ID $c->{owner} = $config->{service}{ClusterOwner} if $config->{service}{ClusterOwner}; $c->{acl} = [ @advertisedvos ] if @advertisedvos; $c->{location} = $config->{location}{PostCode} if $config->{location}{PostCode}; $c->{issuerca} = $host_info->{issuerca} if $host_info->{issuerca}; $c->{'issuerca-hash'} = $host_info->{issuerca_hash} if $host_info->{issuerca_hash}; $c->{credentialexpirationtime} = mds_date($credenddate) if $credenddate; $c->{trustedca} = $host_info->{trustedcas} if $host_info->{trustedcas}; # Solution: use contactstring from REST? # $c->{contactstring} = "gsiftp://$hostname:".$config->{gridftpd}{port}.$config->{gridftpd}{mountpoint} if ($config->{gridftpd}{enabled}); $c->{contactstring} = $config->{arex}{ws}{wsurl}; # Removed from ConfigCentral. Left here in case it still makes sense. #$c->{'interactive-contactstring'} = $config->{service}{InteractiveContactstring} if $config->{service}{InteractiveContactstring}; $c->{support} = [ @supportmails ] if @supportmails; $c->{'lrms-type'} = $lrms_info->{cluster}{lrms_type}; $c->{'lrms-version'} = $lrms_info->{cluster}{lrms_version} if $lrms_info->{cluster}{lrms_version}; $c->{'lrms-config'} = $config->{service}{lrmsconfig} if $config->{service}{lrmsconfig}; # orphan $c->{architecture} = $config->{service}{Platform} if $config->{service}{Platform}; push @{$c->{opsys}}, @{$config->{service}{OpSys}} if $config->{service}{OpSys}; push @{$c->{opsys}}, $config->{service}{OSName}.'-'.$config->{service}{OSVersion} if $config->{service}{OSName} and $config->{service}{OSVersion}; $c->{benchmark} = [ map {join ' @ ', split /\s+/,$_,2 } @{$config->{service}{Benchmark}} ] if $config->{service}{Benchmark}; $c->{nodecpu} = $config->{service}{CPUModel}." @ ".$config->{service}{CPUClockSpeed}." MHz" if $config->{service}{CPUModel} and $config->{service}{CPUClockSpeed}; $c->{homogeneity} = $homogeneous ? 'TRUE' : 'FALSE'; $c->{nodememory} = $config->{service}{MaxVirtualMemory} if ( $homogeneous && $config->{service}{MaxVirtualMemory} ); $c->{nodeaccess} = 'inbound' if $inbound; $c->{nodeaccess} = 'outbound' if $outbound; if ($config->{service}{totalcpus}) { $c->{totalcpus} = $config->{service}{totalcpus}; } else { $c->{totalcpus} = $lrms_info->{cluster}{totalcpus}; } $c->{usedcpus} = $lrms_info->{cluster}{usedcpus}; $c->{cpudistribution} = $lrms_info->{cluster}{cpudistribution}; $c->{prelrmsqueued} = ($gmjobcount{accepted} + $gmjobcount{preparing} + $gmjobcount{submit}); $c->{totaljobs} = ($gmjobcount{totaljobs} - $gmjobcount{finishing} - $gmjobcount{finished} - $gmjobcount{deleted} + $lrms_info->{cluster}{queuedcpus} + $lrms_info->{cluster}{usedcpus} - $gmjobcount{inlrms}); $c->{localse} = $config->{service}{LocalSE} if $config->{service}{LocalSE}; $c->{'sessiondir-free'} = $host_info->{session_free}; $c->{'sessiondir-total'} = $host_info->{session_total}; if ($config->{control}{'.'}{defaultttl}) { my ($sessionlifetime) = split ' ', $config->{control}{'.'}{defaultttl}; $c->{'sessiondir-lifetime'} = int $sessionlifetime/60 if $sessionlifetime; } $c->{'cache-free'} = $host_info->{cache_free}; $c->{'cache-total'} = $host_info->{cache_total}; $c->{runtimeenvironment} = [ sort keys %$rte_info ]; push @{$c->{middleware}}, "nordugrid-arc-".$config->{arcversion}; push @{$c->{middleware}}, "globus-$host_info->{globusversion}" if $host_info->{globusversion}; # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # my $getQueues = sub { return undef unless my ($share, $dummy) = each %{$config->{shares}}; my $q = {}; my $qinfo = $lrms_info->{queues}{$share}; # merge cluster wide and queue-specific options my $sconfig = { %{$config->{service}}, %{$config->{shares}{$share}} }; my @queue_advertisedvos = (); if ($sconfig->{AdvertisedVO}) { @queue_advertisedvos = @{$sconfig->{AdvertisedVO}}; # add VO: suffix to each advertised VO @queue_advertisedvos = map { "VO:".$_ } @queue_advertisedvos; } $sconfig->{ExecutionEnvironmentName} ||= []; my @nxenvs = @{$sconfig->{ExecutionEnvironmentName}}; if (@nxenvs) { my $xeconfig = $config->{xenvs}{$nxenvs[0]}; $log->info("The Nordugrid InfoSchema is not compatible with multiple ExecutionEnvironments per share") if @nxenvs > 1; $sconfig = { %$sconfig, %$xeconfig }; } $q->{'name'} = $share; if ( defined $config->{arex}{ws}{jobs} and $config->{arex}{ws}{jobs}{enabled} == 1 and $config->{arex}{ws}{jobs}{allownew} == 0 ) { $q->{status} = 'inactive, a-rex does not accept new jobs'; } elsif ( $host_info->{gm_alive} ne 'all' ) { if ($host_info->{gm_alive} eq 'some') { $q->{status} = 'degraded, one or more grid-managers are down'; } else { $q->{status} = $config->{remotegmdirs} ? 'inactive, all grid managers are down' : 'inactive, grid-manager is down'; } } elsif (not $host_info->{hostcert_enddate} or not $host_info->{issuerca_enddate}) { $q->{status} = 'inactive, host credentials missing'; } elsif ($host_info->{hostcert_expired} or $host_info->{issuerca_expired}) { $q->{status} = 'inactive, host credentials expired'; } elsif ( $qinfo->{status} < 0 ) { $q->{status} = 'inactive, LRMS interface returns negative status'; } else { $q->{status} = 'active'; } $q->{comment}=$sconfig->{Description} if $sconfig->{Description}; if ( defined $sconfig->{OtherInfo}) { my @sotherinfo = @{ $sconfig->{OtherInfo} }; $q->{comment} = "$q->{comment}, OtherInfo: @sotherinfo"; } $q->{schedulingpolicy} = $sconfig->{SchedulingPolicy} if $sconfig->{SchedulingPolicy}; if (defined $sconfig->{Homogeneous}) { $q->{homogeneity} = $sconfig->{Homogeneous} ? 'TRUE' : 'FALSE'; } else { $q->{homogeneity} = @nxenvs > 1 ? 'FALSE' : 'TRUE'; } $q->{nodecpu} = $sconfig->{CPUModel}." @ ".$sconfig->{CPUClockSpeed}." MHz" if $sconfig->{CPUModel} and $sconfig->{CPUClockSpeed}; $q->{nodememory} = $sconfig->{MaxVirtualMemory} if $sconfig->{MaxVirtualMemory}; $q->{architecture} = $sconfig->{Platform} if $sconfig->{Platform}; # override instead of merging if ($sconfig->{OSName} and $sconfig->{OSVersion}) { push @{$q->{opsys}}, $sconfig->{OSName}.'-'.$sconfig->{OSVersion} } else { push @{$q->{opsys}}, @{$sconfig->{OpSys}} if $sconfig->{OpSys}; } $q->{benchmark} = [ map {join ' @ ', split /\s+/,$_,2 } @{$sconfig->{Benchmark}} ] if $sconfig->{Benchmark}; $q->{maxrunning} = $qinfo->{maxrunning} if defined $qinfo->{maxrunning}; $q->{maxqueuable}= $qinfo->{maxqueuable}if defined $qinfo->{maxqueuable}; $q->{maxuserrun} = $qinfo->{maxuserrun} if defined $qinfo->{maxuserrun}; $q->{maxcputime} = prioritizedvalues($sconfig->{maxcputime},$qinfo->{maxcputime}); $q->{maxcputime} = defined $q->{maxcputime} ? int $q->{maxcputime}/60 : undef; $q->{mincputime} = prioritizedvalues($sconfig->{mincputime},$qinfo->{mincputime}); $q->{mincputime} = defined $q->{mincputime} ? int $q->{mincputime}/60 : undef; $q->{defaultcputime} = int $qinfo->{defaultcput}/60 if defined $qinfo->{defaultcput}; $q->{maxwalltime} = prioritizedvalues($sconfig->{maxwalltime},$qinfo->{maxwalltime}); $q->{maxwalltime} = defined $q->{maxwalltime} ? int $q->{maxwalltime}/60 : undef; $q->{minwalltime} = prioritizedvalues($sconfig->{minwalltime},$qinfo->{minwalltime}); $q->{minwalltime} = defined $q->{minwalltime} ? int $q->{minwalltime}/60 : undef; $q->{defaultwalltime} = int $qinfo->{defaultwallt}/60 if defined $qinfo->{defaultwallt}; $q->{running} = $qinfo->{running} if defined $qinfo->{running}; $q->{gridrunning} = $gridrunning{$share} || 0; $q->{gridqueued} = $gridqueued{$share} || 0; $q->{localqueued} = ($qinfo->{queued} - ( $gridqueued{$share} || 0 )); if ( $q->{localqueued} < 0 ) { $q->{localqueued} = 0; } $q->{prelrmsqueued} = $prelrmsqueued{$share} || 0; if ( $sconfig->{totalcpus} ) { $q->{totalcpus} = $sconfig->{totalcpus}; # orphan } elsif ( $qinfo->{totalcpus} ) { $q->{totalcpus} = $qinfo->{totalcpus}; } $q->{acl} = [ @queue_advertisedvos ] if @queue_advertisedvos; keys %$gmjobs_info; # reset iterator of each() # # # # # # # # # # # # # # # # # # # # # # # # # # # # # my $getJobs = sub { # find the next job that belongs to the current share my ($jobid, $gmjob); while (1) { return undef unless ($jobid, $gmjob) = each %$gmjobs_info; last if $gmjob->{share} eq $share; } my $j = {}; $j->{name} = $jobid; # Old globalid code used gridftp URL, should it be needed for backward compatibility it is left here #$j->{globalid} = $c->{contactstring}."/$jobid"; # ARC7 contactstring for job: use GLUE2 IDFromEndpoint $j->{globalid} = "urn:idfe:$jobid"; # Starting from ARC 6.10 we out a hash here for GDPR compliance. $j->{globalowner} = sha512sum($gmjob->{subject},$dnhashes) if $gmjob->{subject}; $j->{jobname} = $gmjob->{jobname} if $gmjob->{jobname}; $j->{submissiontime} = $gmjob->{starttime} if $gmjob->{starttime}; $j->{execcluster} = $hostname if $hostname; $j->{execqueue} = [ $share ] if $share; $j->{cpucount} = [ $gmjob->{count} || 1 ]; $j->{sessiondirerasetime} = [ $gmjob->{cleanuptime} ] if $gmjob->{cleanuptime}; $j->{stdin} = [ $gmjob->{stdin} ] if $gmjob->{stdin}; $j->{stdout} = [ $gmjob->{stdout} ] if $gmjob->{stdout}; $j->{stderr} = [ $gmjob->{stderr} ] if $gmjob->{stderr}; $j->{gmlog} = [ $gmjob->{gmlog} ] if $gmjob->{gmlog}; $j->{runtimeenvironment} = $gmjob->{runtimeenvironments} if $gmjob->{runtimeenvironments}; $j->{submissionui} = $gmjob->{clientname} if $gmjob->{clientname}; $j->{clientsoftware} = $gmjob->{clientsoftware} if $gmjob->{clientsoftware}; $j->{proxyexpirationtime} = $gmjob->{delegexpiretime} if $gmjob->{delegexpiretime}; $j->{rerunable} = $gmjob->{failedstate} ? $gmjob->{failedstate} : 'none' if $gmjob->{status} eq "FAILED"; $j->{comment} = [ $gmjob->{comment} ] if $gmjob->{comment}; # added to record which was the submission interface if ( $gmjob->{interface} ) { my $submittedstring = 'SubmittedVia='.$gmjob->{interface}; push(@{$j->{comment}}, $submittedstring); }; $j->{reqcputime} = int $gmjob->{reqcputime}/60 if $gmjob->{reqcputime}; $j->{reqwalltime} = int $gmjob->{reqwalltime}/60 if $gmjob->{reqwalltime}; if ($gmjob->{status} eq "INLRMS") { my $localid = $gmjob->{localid} or $log->warning("No local id for job $jobid") and next; my $lrmsjob = $lrms_info->{jobs}{$localid} or $log->warning("No local job for $jobid") and next; $j->{usedmem} = $lrmsjob->{mem} if defined $lrmsjob->{mem}; $j->{usedwalltime}= int $lrmsjob->{walltime}/60 if defined $lrmsjob->{walltime}; $j->{usedcputime} = int $lrmsjob->{cputime}/60 if defined $lrmsjob->{cputime}; $j->{reqwalltime} = int $lrmsjob->{reqwalltime}/60 if defined $lrmsjob->{reqwalltime}; $j->{reqcputime} = int $lrmsjob->{reqcputime}/60 if defined $lrmsjob->{reqcputime}; $j->{executionnodes} = $lrmsjob->{nodes} if $lrmsjob->{nodes}; if ($lrms_info->{cluster}{lrms_type} eq "boinc") { # BOINC allocates a dynamic number of cores to jobs so set here what is actually used # This is abusing the schema a bit since cpucount is really requested slots $j->{cpucount} = int $lrmsjob->{cpus} if defined $lrmsjob->{cpus}; } # LRMS-dependent attributes taken from LRMS when the job # is in state 'INLRMS' #nuj0:status # take care of the GM latency, check if the job is in LRMS # according to both GM and LRMS, GM might think the job # is still in LRMS while the job have already left LRMS if ($lrmsjob->{status} and $lrmsjob->{status} ne 'EXECUTED') { $j->{status} = "INLRMS:$lrmsjob->{status}"; } else { $j->{status} = 'EXECUTED'; } push @{$j->{comment}}, @{$lrmsjob->{comment}} if $lrmsjob->{comment}; $j->{queuerank} = $lrmsjob->{rank} if $lrmsjob->{rank}; } else { # LRMS-dependent attributes taken from GM when # the job has passed the 'INLRMS' state $j->{status} = $gmjob->{status}; $j->{usedwalltime} = int $gmjob->{WallTime}/60 if defined $gmjob->{WallTime}; $j->{usedcputime} = int $gmjob->{CpuTime}/60 if defined $gmjob->{CpuTime}; $j->{executionnodes} = $gmjob->{nodenames} if $gmjob->{nodenames}; $j->{usedmem} = $gmjob->{UsedMem} if $gmjob->{UsedMem}; $j->{completiontime} = $gmjob->{completiontime} if $gmjob->{completiontime}; $j->{errors} = join "; ", @{$gmjob->{errors}} if $gmjob->{errors}; $j->{exitcode} = $gmjob->{exitcode} if defined $gmjob->{exitcode}; } return $j; }; $q->{jobs} = $getJobs; # # # # # # # # # # # # # # # # # # # # # # # # # # # # # my $usernumber = 0; keys %$usermap; # reset iterator of each() my $getUsers = sub { # find the next user that is authorized in this queue my ($sn, $localid, $lrms_user); while (1) { return undef unless ($sn, $localid) = each %$usermap; # skip users whose SNs need to be base64 encoded if ($sn =~ /^[\s,:<]/ or $sn =~ /[\x0D\x0A\x00]/ or $sn =~ /[^\x00-\x7F]/) { $log->warning("While collecting info for queue $q->{'name'}: user with sn $sn will not be published due to characters that require base64 encoding. Skipping"); next; } $lrms_user = $qinfo->{users}{$localid}; last if not exists $qinfo->{acl_users}; last if grep { $_ eq $localid } @{$qinfo->{acl_users}}; } my $u = {}; ++$usernumber; my $space = $host_info->{localusers}{$localid}; #name= CN from the SN + unique number my $cn = ($sn =~ m#/CN=([^/]+)(/Email)?#) ? $1 : $sn; $u->{name} = "${cn}...$usernumber"; $u->{sn} = $sn; $u->{diskspace} = $space->{diskfree} if defined $space->{diskfree}; my @freecpus; # sort by decreasing number of cpus for my $ncpu ( sort { $b <=> $a } keys %{$lrms_user->{freecpus}} ) { my $minutes = $lrms_user->{freecpus}{$ncpu}; push @freecpus, $minutes ? "$ncpu:$minutes" : $ncpu; } $u->{freecpus} = join(" ", @freecpus) || 0; $u->{queuelength} = $gm_queued{$sn} + $lrms_user->{queuelength}; return $u; }; $q->{users} = $getUsers; return $q; }; $c->{queues} = $getQueues; return $c; }; return $getCluster; } 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/ConfigCentral.pm.in0000644000000000000000000000013015067751327025351 xustar0029 mtime=1759498967.75949204 30 atime=1759498967.866493666 29 ctime=1759499029.83817846 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/ConfigCentral.pm.in0000644000175000002070000010402015067751327027252 0ustar00mockbuildmock00000000000000package ConfigCentral; # Builds an intermediate config structure used by infoproviders. # OBS: the structure is NOT the same as arc.conf. use strict; use warnings; use File::Basename; use Sys::Hostname; # added to parse JSON stuff binmode STDOUT, ":utf8"; use utf8; use XML::Simple; use Data::Dumper qw(Dumper); use JSON::XS; #use Data::Dumper::Concise; use IniParser; use InfoChecker; use LogUtils; # while parsing, loglevel is WARNING (the default) our $log = LogUtils->getLogger(__PACKAGE__); ####################################################################### ## Block dependencies ####################################################################### my $blockdependencies = { 'infosys/cluster' => ['infosys'], 'infosys/ldap' => ['infosys'], 'infosys/nordugrid' => ['infosys/ldap'], 'infosys/glue2' => ['infosys'], 'infosys/glue2/ldap' => ['infosys/glue2','infosys/ldap'], }; ###################################################################### # Legacy Internal representation of configuration data after parsing # ###################################################################### my $lrms_options = { pbs_bin_path => '*', pbs_log_path => '*', dedicated_node_string => '*', condor_bin_path => '*', condor_config => '*', condor_rank => '*', sge_bin_path => '*', sge_root => '*', sge_cell => '*', sge_qmaster_port => '*', sge_execd_port => '*', lsf_bin_path => '*', lsf_profile_path => '*', ll_bin_path => '*', slurm_bin_path => '*', slurm_wakeupperiod => '*', boinc_db_host => '*', boinc_db_port => '*', boinc_db_name => '*', boinc_db_user => '*', boinc_db_pass => '*' }; my $lrms_share_options = { queue_node_string => '*', condor_requirements => '*', sge_jobopts => '*', lsf_architecture => '*', ll_consumable_resources => '*' }; my $xenv_options = { Platform => '*', Homogeneous => '*', PhysicalCPUs => '*', LogicalCPUs => '*', CPUVendor => '*', CPUModel => '*', CPUVersion => '*', CPUClockSpeed => '*', CPUTimeScalingFactor => '*', WallTimeScalingFactor => '*', MainMemorySize => '*', VirtualMemorySize => '*', OSFamily => '*', OSName => '*', OSVersion => '*', VirtualMachine => '*', NetworkInfo => '*', ConnectivityIn => '*', ConnectivityOut => '*', Benchmark => [ '*' ], OpSys => [ '*' ], nodecpu => '*', }; my $share_options = { MaxVirtualMemory => '*', MaxSlotsPerJob => '*', SchedulingPolicy => '*', Preemption => '*', totalcpus => '*', defaultmemory => '*', AdvertisedVO => [ '*' ], maxcputime => '*', maxwalltime => '*', mincputime => '*', minwalltime => '*' }; my $cache_options = { cachedir => [ '*' ], cachesize => '*' }; my $common_options = { arcversion => '', hostname => '*', x509_host_key => '*', x509_host_cert => '*', x509_cert_dir => '*', x509_cert_policy => '', enable_perflog_reporting => '*', perflogdir => '*', # TODO: Maxload is about max downloadable streams. Most likely this is now configured via datastaging. Used for MaxStageIn|OutStreams. #maxload => '*', }; my $sshcommon_options = { remote_user => '*', remote_host => '*', remote_sessiondir => '*', private_key => '*', }; my $ldap_infosys_options = { enabled => '', port => '*', infosys_ldap_run_dir => '*', validity_ttl => '*', # These values have been checked to be used by infoproviders. user => '*', bdii_run_dir => '*', bdii_log_dir => '*', bdii_tmp_dir => '*', bdii_var_dir => '*', bdii_update_pid_file => '*' ## TODO: Do the infosys need to know this number? check InfosysHelper. ## fix this elsewere, the value should be set according to the formula below ## not used directly by infoproviders, maybe by bdii config? ## this is probably done in the startup script, is called max_cycle ## bdii_read_timeout = number - Sets BDII_READ_TIMEOUT in bdii configuration file ## default: $bdii_provider_timeout + $infoproviders_timelimit + $wakeupperiod #bdii_read_timeout=300 }; # [arex] subblocks my $wsjobs_options = { allownew => '*', enabled => '' }; my $ws_options = { enabled => '', wsurl => '*', jobs => { %$wsjobs_options } }; my $admindomain_options = { Name => '*', Description => '*', WWW => '*', Distributed => '*', Owner => '*', OtherInfo => '*', }; my $glue2_options = { enabled => '', computingservice_qualitylevel => '*' }; my $arex_options = { enabled => '', user => '', defaultttl => '*', logfile => '*', loglevel => '*', infoproviders_timelimit => '*', port => '*', arexhostport => '', controldir => '', sessiondir => [ '' ], runtimedir => '*', shared_filesystem => '*', shared_scratch => '*', scratchdir => '*', maxjobs => '*', wakeupperiod => '*', }; # # # # # # # # # # # # # # my $config_schema = { ttl => '*', admindomain => { %$admindomain_options }, %$common_options, # TODO: what to do with these? add ssh block? %$sshcommon_options, # service is a mix of nordugrid "cluster" and glue2 concepts, taking info from different sources. # Translate/copy just what is needed from infosys/cluster service => { OtherInfo => [ '*' ], StatusInfo => [ '*' ], Downtime => '*', ClusterName => '*', ClusterAlias => '*', ClusterComment => '*', ClusterOwner => [ '*' ], AdvertisedVO => [ '*' ], LocalSE => [ '*' ], QualityLevel => '', # TODO: such hashes might make odd options bypass the checks, review InfoChecker # These below are repeated here to implement infosys/cluster level options that # apply to all xenvs and shares %$xenv_options, %$share_options }, location => { Name => '*', Address => '*', Place => '*', Country => '*', PostCode => '*', Latitude => '*', Longitude => '*', }, contacts => [ { Name => '*', OtherInfo => [ '*' ], Detail => '', Type => '', } ], accesspolicies => { '*' => { Rule => [ '' ], UserDomainID => [ '' ] } }, mappingpolicies => { '*' => { ShareName => [ '' ], Rule => [ '' ], UserDomainID => [ '' ], } }, xenvs => { '*' => { OtherInfo => [ '*' ], NodeSelection => { Regex => [ '*' ], Command => [ '*' ], Tag => [ '*' ], }, %$xenv_options } }, shares => { '*' => { Description => '*', OtherInfo => [ '*' ], MappingQueue => '', ExecutionEnvironmentName => [ '' ], %$share_options, %$lrms_share_options } }, lrms => { lrms => '', defaultqueue => '*', lrmsconfig => '*', %$lrms_options, %$lrms_share_options }, infosys => { enabled => '', allowaccess => { enabled => '*' }, nordugrid => { enabled => '*' }, logfile => '*', loglevel => '*', validity_ttl => '*', ldap => { %$ldap_infosys_options, }, glue2 => { %$glue2_options, ldap => { enabled => '', showactivities => '*' } } }, arex => { %$arex_options, ws => { %$ws_options }, cache => { %$cache_options } }, }; my $allbools = [ qw( Homogeneous VirtualMachine ConnectivityIn ConnectivityOut Preemption showactivities shared_filesystem enabled allownew Distributed) ]; ############################ Generic functions ########################### # walks a tree of hashes and arrays while applying a function to each hash. sub hash_tree_apply { my ($ref, $func) = @_; if (not ref($ref)) { return; } elsif (ref($ref) eq 'ARRAY') { map {hash_tree_apply($_,$func)} @$ref; return; } elsif (ref($ref) eq 'HASH') { &$func($ref); map {hash_tree_apply($_,$func)} values %$ref; return; } else { return; } } # Strips namespace prefixes from the keys of the hash passed by reference sub hash_strip_prefixes { my ($h) = @_; my %t; while (my ($k,$v) = each %$h) { next if $k =~ m/^xmlns/; $k =~ s/^\w+://; $t{$k} = $v; } %$h=%t; return; } # Verifies that a key is an HASH reference and returns that reference sub hash_get_hashref { my ($h, $key) = @_; my $r = ($h->{$key} ||= {}); $log->fatal("badly formed '$key' element in XML config") unless ref $r eq 'HASH'; return $r; } # Verifies that a key is an ARRAY reference and returns that reference sub hash_get_arrayref { my ($h, $key) = @_; my $r = ($h->{$key} ||= []); $log->fatal("badly formed '$key' element in XML config") unless ref $r eq 'ARRAY'; return $r; } # Set selected keys to either 'true' or 'false' sub fixbools { my ($h,$bools) = @_; for my $key (@$bools) { next unless exists $h->{$key}; my $val = $h->{$key}; if ($val eq '0' or lc $val eq 'false' or lc $val eq 'no' or lc $val eq 'disable') { $h->{$key} = '0'; } elsif ($val eq '1' or lc $val eq 'true' or lc $val eq 'yes' or lc $val eq 'enable' or lc $val eq 'expert-debug-on') { $h->{$key} = '1'; } else { $log->error("Invalid value for $key"); } } return $h; } sub move_keys { my ($h, $k, $names) = @_; for my $key (@$names) { next unless exists $h->{$key}; $k->{$key} = $h->{$key}; delete $h->{$key}; } } sub rename_keys { my ($h, $k, $names) = @_; for my $key (keys %$names) { next unless exists $h->{$key}; my $newkey = $names->{$key}; $k->{$newkey} = $h->{$key}; delete $h->{$key}; } } # Takes two hash references and merges values # the value of hash2 is taken if the value in # hash1 is not defined # usage: merge_hash_values(hash1,hash2) sub merge_hash_values { my ($hash1,$hash2) = @_; for my $key (keys %{$hash2}) { $hash1->{$key} = $hash2->{$key} if ((not defined $hash1->{$key}) || ($hash1->{$key} eq '')); # attempt to merge recursively # merge_hash_values($hash1->{$key},$hash2->{$key}) if (ref $key eq ref {}); } } ##################### Read config via arcconfig-parser ################ # execute parser and get json data sub read_json_config { my ($arcconf) = @_; # get the calling script basepath. Will be used to # find external scripts like arcconfig-parser. my $libexecpath = ($ENV{'ARC_LOCATION'} || '@prefix@') . '/@pkglibexecsubdir@'; my $jsonconfig=''; { local $/; # slurp mode open (my $jsonout, "$libexecpath/arcconfig-parser -e json --load -r $arcconf |") || $log->error("Python config parser error: $! at line: ".__LINE__." libexecpath: $libexecpath"); $jsonconfig = <$jsonout>; close $jsonout; } my $config = decode_json($jsonconfig); #print Dumper($config); return $config; } # # Removes spaces at beginning and end from all config values # and config names (such as queue:\s\squeuename\s*) # sub strip_spaces { my ($jsonconf) = @_; for my $key (keys %{$jsonconf}) { # recur if inner values if (ref($jsonconf->{$key}) eq 'HASH') { strip_spaces($jsonconf->{$key}); } elsif (ref($jsonconf->{$key}) eq 'ARRAY') { # strip spaces from array elements for my $item (@{$jsonconf->{$key}}) { $item =~ s/^\s+|\s+$//g; } } elsif ($jsonconf->{$key} =~ /^\s*(.*)\s*$/) { my $newvalue = $1; $newvalue =~ s/^\s+|\s+$//g; $jsonconf->{$key} = $newvalue; } # change key once back from recursion if it contains spaces if ($key =~ /\s*(queue|authgroup)\s*:\s*(.*)\s*/) { my $prefix = "$1:"; my $stripstring = $2; $stripstring =~ s/^\s+|\s+$//g; my $newkey="$prefix$stripstring"; $jsonconf->{$newkey} = $jsonconf->{$key}; # not needed as the reference is copied. But I don't trust it. #delete $jsonconf->{$key}; } } } # # Reads the json config file passed as the first argument and produces a config # hash conforming to $config_schema. # sub build_config_from_json { my ($file) = @_; my $jsonconf = read_json_config($file); strip_spaces($jsonconf); set_defaults($jsonconf); # Those values that are the same as in arc.conf will # be copied and checked. my $config ||= {}; # service is an aggregation of the contents of infosys/cluster and GLUE2 relevant info $config->{service} ||= {}; ## This below is mostly GLUE2, but we decided not to create a dedicated block. $config->{location} ||= {}; $config->{contacts} ||= []; $config->{accesspolicies} ||= {}; $config->{mappingpolicies} ||= {}; $config->{xenvs} ||= {}; $config->{shares} ||= {}; # start of restructured pieces of information $config->{infosys} ||= {}; $config->{arex} ||= {}; $config->{lrms} ||= {}; # end of restructured pieces of information # [common] options can be retrieved directly using $config->{optionname} my $common = $jsonconf->{'common'}; move_keys $common, $config, [keys %$common_options]; # TODO: not for arc7.0. Create a mapping hash eventually if we need # mapped users info. # new mapping block (just move to the top config as it was before) #my $mapping = $jsonconf->{'mapping'}; #move_keys $mapping, $config, [keys %$common_options]; my $lrms = $jsonconf->{'lrms'}; # some options in lrms, moved to $config for backward compatibility, # should be moved to {lrms} instead - 2024 is this done? #move_keys $lrms, $config, [keys %$lrms_options, keys %$lrms_share_options]; move_keys $lrms, $config->{'lrms'}, [keys %$lrms]; move_keys $lrms, $config->{'lrms'}, [keys %$lrms_options, keys %$lrms_share_options]; # Parsing for default queue my ($lrmsname, $defaultqueue) = split /\s+/, $config->{lrms}{lrms} || ''; $config->{'lrms'}{'lrms'} = $lrmsname; $config->{'lrms'}{'defaultqueue'} = $defaultqueue if defined $defaultqueue; my $arex = $jsonconf->{'arex'}; move_keys $arex, $config->{'arex'}, [keys %$arex]; my $ssh = $jsonconf->{'ssh'}; move_keys $ssh, $config, [keys %$sshcommon_options]; my $infosys = $jsonconf->{'infosys'}; move_keys $infosys, $config->{'infosys'}, [keys %$infosys]; rename_keys $infosys, $config, {port => 'SlapdPort'}; move_keys $infosys, $config, [keys %$ldap_infosys_options]; # check that the above generated required information $log->error("No control directory configured") unless $config->{arex}{controldir}; if (defined $jsonconf->{'arex/ws'}) { my $arexws = $jsonconf->{'arex/ws'}; $config->{arex}{ws} ||= {}; move_keys $arexws, $config->{arex}{ws}, [keys %$ws_options]; } # handle ws endpoint information for use inside infoproviders if (defined $config->{arex}{ws}{wsurl}) { $config->{arex}{ws}{wsurl} =~ m{^(https?)://([^:/]+)(?::(\d+))?(.*)}; my ($proto,$host,$port,$mountpoint) = ($1,$2,$3,$4); $port ||= 80 if $proto eq "http"; $port ||= 443 if $proto eq "https"; $config->{arex}{port} = $port; $config->{arex}{arexhostport} = "$host:$port"; } $config->{arex}{ws}{jobs} ||= {}; if (defined $jsonconf->{'arex/ws/jobs'}) { my $arexwsjobs = $jsonconf->{'arex/ws/jobs'}; move_keys $arexwsjobs, $config->{arex}{ws}{jobs}, [keys %$wsjobs_options]; $config->{arex}{ws}{jobs}{enabled} = 1; } else { $config->{arex}{ws}{jobs}{enabled} = 0; } $config->{arex}{cache} ||= {}; if (defined $jsonconf->{'arex/cache'}) { my $cacheopts = $jsonconf->{'arex/cache'}; move_keys $cacheopts, $config->{arex}{cache}, [keys %$cache_options]; } $config->{arex}{cache}{cleaner} ||= {}; if (defined $jsonconf->{'arex/cache/cleaner'}) { my $cacheopts = $jsonconf->{'arex/cache/cleaner'}; move_keys $cacheopts, $config->{arex}{cache}, [keys %$cache_options]; } # LDAP and BDII config if (defined $jsonconf->{'infosys/ldap'}) { $config->{infosys}{ldap} ||= {}; my $ldapconf = $jsonconf->{'infosys/ldap'}; move_keys $ldapconf, $config->{infosys}{ldap}, [keys %$ldap_infosys_options]; } else { $log->verbose("[infosys/ldap] block not found in arc.conf Disabling LDAP/LDIF information generation."); } # information schemas # NorduGRID if ($jsonconf->{'infosys/nordugrid'}{enabled}) { $config->{infosys}{nordugrid} ||= {}; move_keys $jsonconf->{'infosys/nordugrid'}, $config->{infosys}{nordugrid}, [keys %{$config_schema->{infosys}{nordugrid}}]; } ####### GLUE2 if ( $jsonconf->{'infosys/glue2'}{enabled} ) { $config->{infosys}{glue2} ||= {}; my $glue2conf = $jsonconf->{'infosys/glue2'}; move_keys $glue2conf, $config->{infosys}{glue2}, [keys %$glue2_options]; rename_keys $config->{infosys}{glue2}, $config->{service}, {computingservice_qualitylevel => 'QualityLevel'}; # GLUE2 ldap if ( $jsonconf->{'infosys/glue2/ldap'}{enabled} ) { $config->{infosys}{glue2}{ldap} ||= {}; my $glue2ldapconf = $jsonconf->{'infosys/glue2/ldap'}; move_keys $glue2ldapconf, $config->{infosys}{glue2}{ldap}, [keys %{$config_schema->{infosys}{glue2}{ldap}}]; } # AdminDomain $config->{admindomain} ||= {}; $log->warning('[infosys/glue2] section missing admindomain_name information. Default will be set to GLUE2 default UNDEFINEDVALUE.') if ($jsonconf->{'infosys/glue2'}{admindomain_name} eq 'UNDEFINEDVALUE' ); my $admindomainconf = $jsonconf->{'infosys/glue2'}; rename_keys $admindomainconf, $config->{'admindomain'}, { admindomain_name => 'Name', admindomain_description => 'Description', admindomain_www => 'WWW', admindomain_distributed => 'Distributed', admindomain_owner => 'Owner', admindomain_otherinfo => 'OtherInfo' } } else { $log->error('Infoproviders cannot continue without the [infosys/glue2] block. Please add it. Exiting...') } ### Process infosys/cluster my $cluster = $jsonconf->{'infosys/cluster'}; if (%$cluster) { # Ignored: cluster_location, lrmsconfig rename_keys $cluster, $config->{location}, { cluster_location => 'PostCode' }; rename_keys $cluster, $config->{service}, { interactive_contactstring => 'InteractiveContactstring', cluster_owner => 'ClusterOwner', localse => 'LocalSE', advertisedvo => 'AdvertisedVO', homogeneity => 'Homogeneous', architecture => 'Platform', opsys => 'OpSys', benchmark => 'Benchmark', nodememory => 'MaxVirtualMemory', middleware => 'Middleware', alias => 'ClusterAlias', comment => 'ClusterComment'}; if ($cluster->{clustersupport} and $cluster->{clustersupport} =~ /(.*)@/) { my $contact = {}; push @{$config->{contacts}}, $contact; $contact->{Name} = $1; $contact->{Detail} = "mailto:".$cluster->{clustersupport}; $contact->{Type} = 'usersupport'; } if (defined $cluster->{nodeaccess}) { $config->{service}{ConnectivityIn} = 0; $config->{service}{ConnectivityOut} = 0; for (split '\[separator\]', $cluster->{nodeaccess}) { $config->{service}{ConnectivityIn} = 1 if lc $_ eq 'inbound'; $config->{service}{ConnectivityOut} = 1 if lc $_ eq 'outbound'; } } # TODO: this causes possibly unsupported values like OSName, OSVersion to be moved and not renamed - fix with infochecker move_keys $cluster, $config->{service}, [keys %$share_options, keys %$xenv_options]; } ## use hostname as cluster alias if not defined my $hostname = $config->{hostname}; my @dns = split /\./, $hostname; my $shorthost = shift @dns; my $dnsdomain = join ".", @dns; unless (defined $config->{service}{ClusterAlias}) { $log->info("[infosys/cluster] alias= in arc.conf missing. Defaulting to $shorthost"); chomp ($config->{service}{ClusterAlias} ||= $shorthost); } ## use cluster alias as cluster name. Currently we have no way to specify cluster name directly. $config->{service}{ClusterName} = $config->{service}{ClusterAlias}; # remove useless objects if not set delete $config->{location} unless $config->{location} and %{$config->{location}}; delete $config->{contacts} unless $config->{contacts} and @{$config->{contacts}}; # Some checks about contacts if ($config->{contacts}) { for (@{$config->{contacts}}) { $log->warning("Contact is missing Type") and next unless $_->{Type}; $log->warning("Contact is missing Detail") and next unless $_->{Detail}; $log->warning("Contact Detail is not an URI: ".$_->{Detail}) and next unless $_->{Detail} =~ m/^\w+:/; } } # Generate initial shares and execution environments array, using configured queues my @qnames=(); for my $keyname (keys %{$jsonconf}) { push(@qnames,$1) if $keyname =~ /queue\:(.*)/; } for my $name (@qnames) { my $queue = $jsonconf->{"queue:$name"}; # at first every bare queue is a share my $sconf = $config->{shares}{$name} ||= {}; $config->{shares}{$name}{MappingQueue} = $name; my $xeconf = $config->{xenvs}{$name} ||= {}; push @{$sconf->{ExecutionEnvironmentName}}, $name; rename_keys $queue, $sconf, {nodememory => 'MaxVirtualMemory', comment => 'Description', advertisedvo => 'AdvertisedVO', maxslotsperjob => 'MaxSlotsPerJob'}; move_keys $queue, $sconf, [keys %$share_options, keys %$lrms_share_options]; # TODO: change opsys here if needed rename_keys $queue, $xeconf, {homogeneity => 'Homogeneous', architecture => 'Platform', opsys => 'OpSys', osname => 'OSName', osversion => 'OSVersion', osfamily => 'OSFamily', benchmark => 'Benchmark'}; move_keys $queue, $xeconf, [keys %$xenv_options]; # This unsupported option was meant to select a group of nodes for a queue # Since arc 6.6 this is autodiscovered from the batch system for some LRMS. # It may be useful in cloud context, so kept for now $xeconf->{NodeSelection} = {}; } # At least one queue must be defined. Maybe this can be relaxed. $log->error("No queue or ComputingShare configured") unless %{$config->{shares}}; $log->error("GLUE2: defaultqueue set to nonexistent ComputingShare") if $config->{lrms}{defaultqueue} and not $config->{shares}{$config->{lrms}{defaultqueue}}; # fire warning if GLUE2 Service Quality Level is not good if (defined $config->{service}{QualityLevel}) { my $qualitylevelstring = $config->{service}{QualityLevel}; my $closedenumeration = {'development' => '1', 'pre-production' => '1', 'production' => '1', 'testing' => '1' }; unless (defined $closedenumeration->{$config->{service}{QualityLevel}}) { my @enum = keys %$closedenumeration; $log->error("computingservice_qualitylevel contains \"$qualitylevelstring\" which is an invalid value. Allowed value is one of: @enum"); } } # This only happens when no queue is configured. At the moment every queue has its own execution env $log->error("No ExecutionEnvironment configured") unless %{$config->{xenvs}}; # ExecutionEnvironments are automatically discovered in some LRMS since arc6.6 # However manual selection could be useful in the future (for example cloud environments) so this code is kept. # Cross-check ExecutionEnvironment references for my $s (values %{$config->{shares}}) { next unless $s->{ExecutionEnvironmentName}; for my $group (@{$s->{ExecutionEnvironmentName}}) { $log->error("ComputingShare associated with non-existent ExecutionEnvironment: $group") unless $config->{xenvs}{$group}; } } for my $s (values %{$config->{xenvs}}) { delete $s->{NodeSelection} unless %{$s->{NodeSelection}}; } ## TODO: still valid in 2024/ARC7 - this should be done together with [mapping] user stuff if needed ## TODO: This code below is currently not used. Access/mapping policies are currently # generated in ARC1ClusterInfo.pm, but this information is mostly static and # can be generated already now, simplifying ARC1ClusterInfo.pm. However simplification is not much. # Also, this is strictly GLUE2, so maybe it should be confined in ARC1ClusterInfo.pm after all... # TODO: create shares based on configured VOs in ConfigCentral? Is this possible/good?) # Can these these checks be done by InfoChecker? # Cross-check MappingPolicy references and move them to the share where they belong # Initialize policies data based on authorization information. # This might be extended in the future for more complex scenarios. For now # only VO based ones are built. #my %queuenamesset = map { $_ => '1' } (keys %{$config->{shares}}); # #if (defined $config->{service}{AuthorizedVO}) { # for my $policy (@{$config->{service}{AuthorizedVO}}) { # $config->{mappingpolicies}{$policy} ||= {}; # $config->{mappingpolicies}{$policy}{Scheme} = 'basic;'; # $config->{mappingpolicies}{$policy}{Rule} = [ "vo:$policy" ]; # $config->{mappingpolicies}{$policy}{queues} = Storable::dclone(\%queuenamesset); # # $config->{accesspolicies}{$policy} ||= {}; # $config->{accesspolicies}{$policy}{Scheme} = 'basic;'; # $config->{accesspolicies}{$policy}{Rule} = [ "vo:$policy" ]; # } #} #for my $queue (keys %queuenamesset) { # if (defined $config->{shares}{$queue}{authorizedvo}) { # for my $policy (@{$config->{shares}{$queue}{authorizedvo}}) { # #$log->debug("$policy".Dumper($config->{mappingpolicies})); # $config->{mappingpolicies}{$policy} ||= {}; # $config->{mappingpolicies}{$policy}{Scheme} = 'basic'; # $config->{mappingpolicies}{$policy}{Rule} = [ "vo:$policy" ]; # $config->{mappingpolicies}{$policy}{queues}{$queue} = '1'; # # $config->{accesspolicies}{$policy} ||= {}; # $config->{accesspolicies}{$policy}{Scheme} = 'basic'; # $config->{accesspolicies}{$policy}{Rule} = [ "vo:$policy" ]; # } # } #} # Disable this for now, but it should replace similar code in ARC1ClusterInfo.pm #for my $s (keys %{$config->{mappingpolicies}}) { ## generate new share names and their data #for my $queuename (keys %{$config->{mappingpolicies}{$s}{queues}}) { #my $newsharename = $queuename.'_'.$s; ## temporary until we fix mappingqueues properly #$config->{shares}{$newsharename} = Storable::dclone($config->{shares}{$queuename}); ## TODO: add this to the other shares too! #$config->{shares}{$newsharename}{MappingQueue} = $queuename; #}; #} ## Final cleanup of the config datastructure # Create a list with all multi-valued options based on $config_schema. my @multival = (); hash_tree_apply $config_schema, sub { my $h = shift; for (keys %$h) { next if ref $h->{$_} ne 'ARRAY'; next if ref $h->{$_}[0]; # exclude deep structures push @multival, $_; } }; # Transform multi-valued options into arrays hash_tree_apply $config, sub { my $h = shift; while (my ($k,$v) = each %$h) { next if ref $v; # skip anything other than scalars $h->{$k} = [split '\[separator\]', $v]; unless (grep {$k eq $_} @multival) { $h->{$k} = pop @{$h->{$k}}; # single valued options, remember last defined value only } } }; hash_tree_apply $config, sub { fixbools shift, $allbools }; return $config; } # # Infoproviders config parser. It takes in input a json file which # represents arc.conf and manipulates it to prepare information. # sub parseConfig { my ($file,$arc_location) = @_; my $config; $config = build_config_from_json($file); #print Dumper($config); LogUtils::level($config->{infosys}{loglevel}) if $config->{infosys}{loglevel}; my $checker = InfoChecker->new($config_schema); my @messages = $checker->verify($config,1); $log->verbose("config key config->$_") foreach @messages; $log->verbose("Some required config options are missing or not used by infosys") if @messages; return $config; } ## subblock_check_required(jsonconfig, block, requiredblock) # Checks if the parent block is enabled and sets the enable flags accordingly sub subblock_check_required { my ($config, $block) = @_; my @dependencyarray = @{$blockdependencies->{$block}}; if ($config->{$block}{enabled}) { $config->{$block}{enabled} = 0; for my $requiredblock (@dependencyarray) { $log->debug("Checking $block, enabled=$config->{$block}{enabled}, $requiredblock, enabled=$config->{$requiredblock}{enabled}"); unless ($config->{$requiredblock}{enabled}) { $log->error("required [$requiredblock] block not found but [$block] defined. Please define [$requiredblock]. Exiting"); } else { $config->{$block}{enabled} = 1; } } } } ## The defaults come from the parser, but due to a PERL side effect in scanning # hashes here we add information about enable/disable blocks. sub set_defaults { my ($config) = @_; $log->debug("Applying defaults"); # blocknames that are relevant for infosys and should be set to enable my @blocknames = ('arex', 'arex/ws', 'arex/ws/jobs', 'infosys', 'infosys/cluster', 'infosys/glue2', 'infosys/glue2/ldap', 'infosys/ldap', 'infosys/nordugrid' ); ## Fix for Perl problem with nested hashes: add an enabled item to all blocks that can be enabled and add 0 or 1 values for my $block (@blocknames) { $config->{$block}{enabled} = (defined $config->{$block}) ? 1 : 0; }; #$log->debug(Dumper($config)); # Check for dependencies for my $block (keys %$blockdependencies) { subblock_check_required($config,$block); } ## Other defaults not covered by arcconfig-parser. Might be done with an hash if more are missing. We keep it as a safety measure. #default ARC version taken from build. Artificiously placed in common $config->{arcversion}='@VERSION@'; # force ldap defaults if ldap renderings are enabled in config $config->{infosys}{ldap}{port}='2135' if ($config->{'infosys/ldap'}{enabled}); # Define a-rex endpoint. Hardcoded after ARC6. Still valid for ARC7. if (defined $config->{'arex/ws'}) { $config->{'arex/ws'}{wsurl} ||= 'https://'.$config->{hostname}.':443/arex'; } else { $log->verbose("WS interface disabled. Add [arex/ws] block to enable"); } ### end of default blocks creation } ## getValueOf: Cherry picks arc.conf values ## Perl wrapper for the python parser ## input: configfile,configblock, configoption ## TODO: maybe use read_json_config for security reasons? sub getValueOf ($$$){ my ($arcconf,$block,$option) = @_; # get the calling script basepath. Will be used to # find external scripts like arcconfig-parser. my $libexecpath = ($ENV{'ARC_LOCATION'} || '@prefix@') . '/@pkglibexecsubdir@'; my $value=''; { local $/; # slurp mode open (my $parserout, "$libexecpath/arcconfig-parser --load -r $arcconf -b $block -o $option |") || $log->error("Python config parser error: $! at line: ".__LINE__." libexecpath: $libexecpath"); $value = <$parserout>; close $parserout; } # remove blank spaces before and after $value =~ s/^\s+|\s+$//g; # strip trailing newline chomp $value; return $value; } ## isBlockPresent: returns true if block exists in config sub isBlockPresent ($$) { my ($arcconf,$block) = @_; my $jsonconf = read_json_config($arcconf); if (defined $jsonconf->{$block}) { return 1 } else { return 0 } ; } ## Todo: review, this doesn't work anymore sub dumpInternalDatastructure ($){ my ($configfile) = @_; my $config = parseConfig($configfile); print Dumper($config); } 1; __END__ nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/NGldifPrinter.pm0000644000000000000000000000013115067751327024736 xustar0030 mtime=1759498967.760492055 30 atime=1759498967.867493681 29 ctime=1759499029.83429079 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/NGldifPrinter.pm0000644000175000002070000002035315067751327026644 0ustar00mockbuildmock00000000000000package NGldifPrinter; use strict; use base 'LdifPrinter'; use POSIX; sub new { my ($this, $handle, $ttl) = @_; my $self = $this->SUPER::new($handle); my $now = time; $self->{validfrom} = strftime("%Y%m%d%H%M%SZ", gmtime($now)); $self->{validto} = strftime("%Y%m%d%H%M%SZ", gmtime($now + $ttl)); return $self; } # # Print attributes # sub beginGroup { my ($self, $name) = @_; $self->begin('nordugrid-info-group-name' => $name); $self->MdsAttributes(); $self->attribute(objectClass => 'nordugrid-info-group'); $self->attribute('nordugrid-info-group-name' => $name); } sub MdsAttributes { my ($self) = @_; $self->attribute(objectClass => 'Mds'); $self->attribute('Mds-validfrom' => $self->{validfrom}); $self->attribute('Mds-validto' => $self->{validto}); } sub clusterAttributes { my ($self, $data) = @_; $self->MdsAttributes(); $self->attribute(objectClass => 'nordugrid-cluster'); $self->attributes($data, 'nordugrid-cluster-', qw( name aliasname contactstring support lrms-type lrms-version lrms-config architecture opsys homogeneity nodecpu nodememory totalcpus cpudistribution sessiondir-free sessiondir-total cache-free cache-total runtimeenvironment localse middleware totaljobs usedcpus queuedjobs location owner issuerca nodeaccess comment interactive-contactstring benchmark sessiondir-lifetime prelrmsqueued issuerca-hash trustedca acl credentialexpirationtime )); } sub queueAttributes { my ($self, $data) = @_; $self->MdsAttributes(); $self->attribute(objectClass => 'nordugrid-queue'); $self->attributes($data, 'nordugrid-queue-', qw( name status running queued maxrunning maxqueuable maxuserrun maxcputime mincputime defaultcputime schedulingpolicy totalcpus nodecpu nodememory architecture opsys gridrunning gridqueued comment benchmark homogeneity prelrmsqueued localqueued maxwalltime minwalltime defaultwalltime maxtotalcputime acl )); } sub jobAttributes { my ($self, $data) = @_; $self->MdsAttributes(); $self->attribute(objectClass => 'nordugrid-job'); $self->attributes($data, 'nordugrid-job-', qw( globalid globalowner execcluster execqueue stdout stderr stdin reqcputime status queuerank comment submissionui submissiontime usedcputime usedwalltime sessiondirerasetime usedmem errors jobname runtimeenvironment cpucount executionnodes gmlog clientsoftware proxyexpirationtime completiontime exitcode rerunable reqwalltime )); } # #sub userAttributes { # my ($self, $data) = @_; # $self->MdsAttributes(); # $self->attribute(objectClass => 'nordugrid-authuser'); # $self->attributes($data, 'nordugrid-authuser-', qw( name sn freecpus diskspace queuelength )); #} # # Follow hierarchy # sub jobs { LdifPrinter::Entries(@_, 'nordugrid-job-', 'globalid', \&jobAttributes); } #sub users { # LdifPrinter::Entries(@_, 'nordugrid-authuser-', 'name', \&userAttributes); #} sub queues { LdifPrinter::Entries(@_, 'nordugrid-queue-', 'name', \&queueAttributes, sub { my ($self, $data) = @_; $self->beginGroup('jobs'); $self->jobs($data->{jobs}); $self->end(); #$self->beginGroup('users'); #$self->users($data->{users}); #$self->end(); }); } sub cluster { LdifPrinter::Entry(@_, 'nordugrid-cluster-', 'name', \&clusterAttributes, sub { my ($self, $data) = @_; $self->queues($data->{queues}); }); } sub Top { my ($self, $data) = @_; $self->begin('o' => "grid"); $self->begin('Mds-Vo-name' => "local"); $self->cluster($data); } 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/InfosysHelper.pm0000644000000000000000000000013215067751327025022 xustar0030 mtime=1759498967.760492055 30 atime=1759498967.867493681 30 ctime=1759499029.835690464 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/InfosysHelper.pm0000644000175000002070000003024715067751327026732 0ustar00mockbuildmock00000000000000package InfosysHelper; # Helper functions to be used for communication between the A-REX infoprovider and ldap-infosys # # * for A-REX infoprovider: # - createLdifScript: creates a script that prints the ldif from the infoprovider when executed, # - notifyInfosys: notifies ldap-infosys through a fifo file created by ldap-infosys. # * for ldap-infosys: # - waitForProvider: waits for A-REX infoprovider to give a life sign on the fifo it created # - ldifIsReady: calls waitForProvider and checks that ldif is fresh enough ## TODO: do NOT hardcode defaults here anymore. Take them from configuration. use POSIX; use Fcntl; use English; use File::Basename; use File::Temp qw(tempfile tempdir); use File::Path qw(mkpath); #use Data::Dumper::Concise; ## usage: # print Dumper($datastructure); use LogUtils; our $log = LogUtils->getLogger(__PACKAGE__); LogUtils::level("VERBOSE"); # # Given a pid file, returns the user id of the running process # sub uidFromPidfile { my ($pidfile) = @_; open(my $fh, "<", "$pidfile") or return undef; my @stat = stat $pidfile; my $pid = <$fh>; close $fh; $pid =~ m/^\s*(\d+)\s*$/ or return undef; my $uid = `ps -ouid= $pid`; close $fh; $uid =~ m/^\s*(\d+)\s*$/ or return undef; return $1; } # # stat the file, get the uid, pid # sub uidGidFromFile { my ($file) = @_; my @stat = stat $file; return () unless @stat; return (@stat[4,5]); } # # switch effective user if possible. This is reversible. # It switches back to root if the passed parameter # is 0. # sub switchEffectiveUser { my ($uid) = @_; if ($UID == 0 && $uid != 0) { my ($name, $pass, $uid, $gid) = getpwuid($uid); return unless defined $gid; eval { $EGID = $gid; $EUID = $uid; }; # Switch back to original UID/GID } else { eval { $EGID = $GID; $EUID = $UID; }; }; } # # Waits for a sign from the infoprovider. Implemented using a fifo file. # * creates a fifo (unlinks it first if it already exists) # * opens the fifo -- this blocks until the other end opens the fifo for writing # * returns false in case of error # sub waitForProvider { my ($runtime_dir) = @_; my $fifopath = "$runtime_dir/ldif-provider.fifo"; if (! -d $runtime_dir) { $log->warning("No such directory: $runtime_dir"); return undef; } if (-e $fifopath) { $log->info("Unlinking stale fifo file: $fifopath"); unlink $fifopath; } unless (POSIX::mkfifo $fifopath, 0600) { $log->warning("Mkfifo failed: $fifopath: $!"); return undef; } $log->verbose("New fifo created: $fifopath"); my $handle; # This might be a long wait. In case somebody kills us, be nice and clean up. $log->info("Start waiting for notification from A-REX's infoprovider"); my $ret; eval { local $SIG{TERM} = sub { die "terminated\n" }; unless ($ret = sysopen($handle, $fifopath, O_RDONLY)) { $log->warning("Failed to open: $fifopath: $!"); unlink $fifopath; } else { while(<$handle>){}; # not interested in contents } }; close $handle; unlink $fifopath; if ($@) { $log->error("Unexpected: $@") unless $@ eq "terminated\n"; $log->warning("SIGTERM caught while waiting for notification from A-REX's infoprovider"); return undef; } return undef unless $ret; $log->info("Notification received from A-REX's infoprovider"); return 1; } { my $cache = undef; # # Finds infosys' runtime directory and the infosys user's uid, gid # TODO: this is a bit complicated and due to BDII4/BDII5. # Maybe it needs simplification, but requires understanding # of what happens in BDII5 since they changed directory paths. # sub findInfosys { return @$cache if defined $cache; my ($config) = @_; my ($bdii_run_dir) = $config->{bdii_run_dir}; # remove trailing slashes $bdii_run_dir =~ s|/\z||; $log->debug("BDII run dir set to: $bdii_run_dir"); # TODO: remove this legacy BDII4 location from here and from grid-infosys my ($bdii_var_dir) = $config->{bdii_var_dir}; # remove trailing slashes $bdii_var_dir =~ s|/\z||; $log->debug("BDII var dir set to: $bdii_var_dir"); my ($bdii_update_pid_file) = $config->{bdii_update_pid_file}; $log->debug("BDII pid guessed location: $bdii_update_pid_file. Will search for it later"); my ($infosys_uid, $infosys_gid); my $infosys_ldap_run_dir = $config->{infosys_ldap_run_dir}; # remove trailing slashes $infosys_ldap_run_dir =~ s|/\z||; $log->debug("LDAP subsystem run dir set to $infosys_ldap_run_dir"); # search for bdii pid file: legacy bdii4 locations still here # TODO: remove bdii_var_dir from everywhere (also from grid-infosys) # if not specified with bdii_update_pid_file, it's likely here my $existsPidFile = 0; my $bdii5_pidfile = "$bdii_run_dir/bdii-update.pid"; my $bdii4_pidfile = "$bdii_var_dir/bdii-update.pid"; for my $pidfile ( $bdii_update_pid_file, $bdii5_pidfile, $bdii4_pidfile) { unless ( ($infosys_uid, $infosys_gid) = uidGidFromFile($pidfile) ) { $log->verbose("BDII pidfile not found at: $pidfile"); next; } $existsPidFile = 1; $log->verbose("BDII pidfile found at: $pidfile"); next unless (my $user = getpwuid($infosys_uid)); $log->verbose("BDII pidfile owned by: $user ($infosys_uid)"); last; } unless ($existsPidFile) { $log->warning("BDII pid file not found. Check that nordugrid-arc-bdii is running, or that bdii_run_dir is set"); return @$cache = (); } unless (-d $infosys_ldap_run_dir) { $log->warning("LDAP information system runtime directory does not exist. Check that:"); $log->warning("1) The arc.conf parameter infosys_ldap_run_dir is correctly set if manually added."); $log->warning("2) nordugrid-arc-bdii is running"); return @$cache = (); } return @$cache = ($infosys_ldap_run_dir, $infosys_uid, $infosys_gid); } } # # # Notify Infosys that there is a new fresh ldif. Implemented using a fifo file. # * finds out whether there is a reader on the other end if the fifo # * opens the file and then closes it (thus waking up the listener on other end) # * returns false on error # sub notifyInfosys { my ($config) = @_; # my ($infosys_ldap_run_dir) = findInfosys($config); my $infosys_ldap_run_dir = $config->{infosys_ldap_run_dir}; return undef unless $infosys_ldap_run_dir; my $fifopath = "$infosys_ldap_run_dir/ldif-provider.fifo"; unless (-e $fifopath) { $log->info("LDAP subsystem has not yet created fifo file $fifopath"); return undef; } my $handle; # Open the fifo -- Normally it should't block since the other end is # supposed to be listening. If it blocks nevertheless, something must have # happened to the reader and it's not worth waiting here. Set an alarm and # get out. my $ret; eval { local $SIG{ALRM} = sub { die "alarm\n" }; alarm 5; unless ($ret = sysopen($handle, $fifopath, O_WRONLY)) { $log->warning("Failed to open fifo (as user id $EUID): $fifopath: $!"); } alarm 0; }; if ($@) { $log->error("Unexpected: $@") unless $@ eq "alarm\n"; # timed out -- no reader $log->warning("Fifo file exists but LDAP information system is not listening"); return undef; } return undef unless $ret; close $handle; $log->info("LDAP information system notified on fifo: $fifopath"); return $handle; } # # To be called by the A-REX infoprovider # * Takes the ldif generated by calling &$print_ldif and creates an executable # script that when executed, outputs that ldif. # * If applicable: switches user to that running infosys and then switches back to root # * Returns false on error # sub createLdifScript { my ($config, $print_ldif) = @_; my ($infosys_ldap_run_dir, $infosys_uid, $infosys_gid) = findInfosys($config); return undef unless $infosys_ldap_run_dir; eval { mkpath($infosys_ldap_run_dir); }; if ($@) { $log->warning("Failed creating parent directory $infosys_ldap_run_dir: $@"); return undef; } unless (chown $infosys_uid, $infosys_gid, $infosys_ldap_run_dir) { $log->warning("Chown to uid($infosys_uid) gid($infosys_gid) failed on: $infosys_ldap_run_dir: $!"); return undef; } switchEffectiveUser($infosys_uid); my ($h, $tmpscript); eval { my $template = "ldif-provider.sh.XXXXXXX"; ($h, $tmpscript) = tempfile($template, DIR => $infosys_ldap_run_dir); }; if ($@) { $log->warning("Failed to create temporary file: $@"); switchEffectiveUser($UID); return undef; } # Hopefully this string is not in the ldif my $mark=substr rand(), 2; eval { local $SIG{TERM} = sub { die "terminated\n" }; die "file\n" unless print $h "#!/bin/sh\n\n"; die "file\n" unless print $h "# Autogenerated by A-REX's infoprovider\n\n"; die "file\n" unless print $h "cat <<'EOF_$mark'\n"; &$print_ldif($h); die "file\n" unless print $h "\nEOF_$mark\n"; die "file\n" unless close $h; }; if ($@) { my $msg = "An error occured while creating ldif generator script: $@"; $msg = "An error occured while writing to: $tmpscript: $!" if $@ eq "file\n"; $msg = "SIGTERM caught while creating ldif generator script" if $@ eq "terminated\n"; close $h; unlink $tmpscript; $log->warning($msg); $log->verbose("Removing temporary ldif generator script"); switchEffectiveUser($UID); return undef; } unless (chmod 0700, $tmpscript) { $log->warning("Chmod failed: $tmpscript: $!"); unlink $tmpscript; switchEffectiveUser($UID); return undef; } my $finalscript = "$infosys_ldap_run_dir/ldif-provider.sh"; unless (rename $tmpscript, $finalscript) { $log->warning("Failed renaming temporary script to $finalscript: $!"); unlink $tmpscript; switchEffectiveUser($UID); return undef; } $log->verbose("Ldif generator script created: $finalscript"); switchEffectiveUser($UID); return 1; } # # To be called by ldap-infosys # * returns true if/when there is a fresh ldif # max_cycle is a number calculated as such by the init scripts: # max_cycle=$(( $bdii_provider_timeout + $infoproviders_timelimit + $wakeupperiod )) # where: # - $bdii_provider_timeout: max time for BDII infoproviders to run # - $infoproviders_timelimit: max time for PERL infoproviders to run # - $wakeupperiod: interval used by A-REX to restart CEinfo.pl # max_cycle is the time bdii-update will trust the content of any provider to be fresh enough sub ldifIsReady { my ($infosys_ldap_run_dir, $max_cycle) = @_; LogUtils::timestamps(1); # Check if ldif generator script exists and is fresh enough my $scriptpath = "$infosys_ldap_run_dir/ldif-provider.sh"; unless (-e $scriptpath) { $log->info("The ldif generator script was not found ($scriptpath)"); $log->info("This file should have been created by A-REX's infoprovider. Check that A-REX is running."); return undef; } my @stat = stat $scriptpath; $log->error("Cant't stat $scriptpath: $!") unless @stat; if (time() - $stat[9] > $max_cycle) { $log->info("The ldif generator script is too old ($scriptpath)"); $log->info("This file should have been refreshed by A-REX's infoprovider. Check that A-REX is running."); return undef; } # A-REX has started up... Wait for the next infoprovider cycle waitForProvider($infosys_ldap_run_dir) or $log->warning("Failed to receive notification from A-REX's infoprovider"); $log->verbose("Using ldif generator script: $scriptpath"); return 1; } 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/GLUE2xmlPrinter.pm0000644000000000000000000000013115067751327025132 xustar0029 mtime=1759498967.75949204 30 atime=1759498967.866493666 30 ctime=1759499029.829735421 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/GLUE2xmlPrinter.pm0000644000175000002070000005525215067751327027046 0ustar00mockbuildmock00000000000000package GLUE2xmlPrinter; use base "XmlPrinter"; # the line below is useful for debugging. see http://perldoc.perl.org/Data/Dumper.html for usage # use Data::Dumper; sub new { my ($this, $handle, $splitjobs) = @_; my $self = $this->SUPER::new($handle); $self->{splitjobs} = $splitjobs; return $self; } sub beginEntity { my ($self, $data, $name, $basetype) = @_; return undef unless $name; $data->{BaseType} = $basetype; $self->begin($name, $data, qw( BaseType CreationTime Validity )); $self->properties($data, qw( ID Name OtherInfo )); } sub Element { my ($self, $collector, $name, $basetype, $filler) = @_; return unless $collector and my $data = &$collector(); if ($data->{NOPUBLISH}) { $self->disableOut(); } $self->beginEntity($data, $name, $basetype); &$filler($self, $data) if $filler; $self->end($name); if ($data->{NOPUBLISH}) { $self->enableOut(); } } # This function creates an open element. # this has been used for problems in the way sub ElementNoClose { my ($self, $collector, $name, $basetype, $filler) = @_; return unless $collector and my $data = &$collector(); $self->beginEntity($data, $name, $basetype); &$filler($self, $data) if $filler; } sub Elements { my ($self, $collector, $name, $basetype, $filler) = @_; while ($collector and my $data = &$collector()) { if ($data->{NOPUBLISH}) { $self->disableOut(); } $self->beginEntity($data, $name, $basetype); &$filler($self, $data) if $filler; $self->end($name); if ($data->{NOPUBLISH}) { $self->enableOut(); } } } sub Location { Element(@_, 'Location', undef, sub { my ($self, $data) = @_; $self->properties($data, qw( Address Place Country PostCode Latitude Longitude )); }); } sub Contacts { Elements(@_, 'Contact', undef, sub { my ($self, $data) = @_; $self->properties($data, qw( Detail Type )); }); } sub AdminDomain { # Warning: this element is NOT closed. # must be closed using the end function. ElementNoClose(@_, 'AdminDomain', 'Domain', sub { my ($self, $data) = @_; $self->properties($data, qw( Description WWW Distributed Owner)); $self->Location($data->{Location}); $self->Contacts($data->{Contacts}); }); } sub AccessPolicies { Elements(@_, 'AccessPolicy', 'Policy', sub { my ($self, $data) = @_; $self->properties($data, qw( Scheme Rule )); if ($data->{UserDomainID}) { $self->begin('Associations'); $self->properties($data, 'UserDomainID'); $self->end('Associations'); } }); } sub MappingPolicies { Elements(@_, 'MappingPolicy', 'Policy', sub { my ($self, $data) = @_; $self->properties($data, qw( Scheme Rule )); if ($data->{UserDomainID}) { $self->begin('Associations'); $self->properties($data, 'UserDomainID'); $self->end('Associations'); } }); } sub Endpoint { Element(@_, 'Endpoint', 'Endpoint', sub { my ($self, $data) = @_; $self->properties($data, qw( URL Capability Technology InterfaceName InterfaceVersion InterfaceExtension WSDL SupportedProfile Semantics Implementor ImplementationName ImplementationVersion QualityLevel HealthState HealthStateInfo ServingState StartTime IssuerCA TrustedCA DowntimeAnnounce DowntimeStart DowntimeEnd DowntimeInfo )); $self->AccessPolicies($data->{AccessPolicies}); #if ($data->{ShareID}) { # $self->begin('Associations'); # $self->properties($data, 'ShareID'); # $self->end('Associations'); #} #if ($data->{Activities}) { # $self->begin('Activities'); # $self->ComputingActivities($data->{Activities}); # $self->end('Activities'); #} }); } sub Endpoints { Elements(@_, 'Endpoint', 'Endpoint', sub { my ($self, $data) = @_; $self->properties($data, qw( URL Capability Technology InterfaceName InterfaceVersion InterfaceExtension WSDL SupportedProfile Semantics Implementor ImplementationName ImplementationVersion QualityLevel HealthState HealthStateInfo ServingState StartTime IssuerCA TrustedCA DowntimeAnnounce DowntimeStart DowntimeEnd DowntimeInfo )); $self->AccessPolicies($data->{AccessPolicies}); #if ($data->{ShareID}) { # $self->begin('Associations'); # $self->properties($data, 'ShareID'); # $self->end('Associations'); #} #if ($data->{Activities}) { # $self->begin('Activities'); # $self->ComputingActivities($data->{Activities}); # $self->end('Activities'); #} }); } sub Services { Elements(@_, 'Service', 'Service', sub { my ($self, $data) = @_; $self->properties($data, qw( Capability Type QualityLevel StatusInfo Complexity )); # XML validation is order-sensitive. $self->Location($data->{Location}); $self->Contacts($data->{Contacts}); $self->Endpoints($data->{Endpoints}); $self->ComputingShares($data->{ComputingShares}); $self->ComputingManager($data->{ComputingManager}); $self->ToStorageServices($data->{ToStorageServices}); if ($data->{ServiceID}) { $self->begin('Associations'); $self->properties($data, 'ServiceID'); $self->end('Associations'); } }); } sub ComputingService { Element(@_, 'ComputingService', 'Service', sub { my ($self, $data) = @_; $self->properties($data, qw( Capability Type QualityLevel StatusInfo Complexity )); $self->Location($data->{Location}); $self->Contacts($data->{Contacts}); $self->properties($data, qw( TotalJobs RunningJobs WaitingJobs StagingJobs SuspendedJobs PreLRMSWaitingJobs )); $self->ComputingEndpoints($data->{ComputingEndpoints}); $self->ComputingShares($data->{ComputingShares}); $self->ComputingManager($data->{ComputingManager}); $self->ToStorageServices($data->{ToStorageServices}); if ($data->{ServiceID}) { $self->begin('Associations'); $self->properties($data, 'ServiceID'); $self->end('Associations'); } }); } sub ComputingEndpoints { Elements(@_, 'ComputingEndpoint', 'Endpoint', sub { my ($self, $data) = @_; $self->properties($data, qw( URL Capability Technology InterfaceName InterfaceVersion InterfaceExtension WSDL SupportedProfile Semantics Implementor ImplementationName ImplementationVersion QualityLevel HealthState HealthStateInfo ServingState StartTime IssuerCA TrustedCA DowntimeAnnounce DowntimeStart DowntimeEnd DowntimeInfo )); $self->AccessPolicies($data->{AccessPolicies}); $self->properties($data, qw( Staging JobDescription TotalJobs RunningJobs WaitingJobs StagingJobs SuspendedJobs PreLRMSWaitingJobs )); if ($data->{ComputingShareID}) { $self->begin('Associations'); $self->properties($data, 'ComputingShareID'); $self->end('Associations'); } if ($data->{ComputingActivities}) { $self->begin('ComputingActivities') unless ($self->{splitjobs}); $self->ComputingActivities($data->{ComputingActivities}); $self->end('ComputingActivities') unless ($self->{splitjobs}); } }); } sub ComputingShares { Elements(@_, 'ComputingShare', 'Share', sub { my ($self, $data) = @_; $self->properties($data, qw( Description )); $self->MappingPolicies($data->{MappingPolicies}); $self->properties($data, qw( MappingQueue MaxWallTime MaxMultiSlotWallTime MinWallTime DefaultWallTime MaxCPUTime MaxTotalCPUTime MinCPUTime DefaultCPUTime MaxTotalJobs MaxRunningJobs MaxWaitingJobs MaxPreLRMSWaitingJobs MaxUserRunningJobs MaxSlotsPerJob MaxStateInStreams MaxStageOutStreams SchedulingPolicy MaxMainMemory GuaranteedMainMemory MaxVirtualMemory GuaranteedVirtualMemory MaxDiskSpace DefaultStorageService Preemption ServingState TotalJobs RunningJobs LocalRunningJobs WaitingJobs LocalWaitingJobs SuspendedJobs LocalSuspendedJobs StagingJobs PreLRMSWaitingJobs EstimatedAverageWaitingTime EstimatedWorstWaitingTime FreeSlots FreeSlotsWithDuration UsedSlots RequestedSlots ReservationPolicy Tag )); $self->begin('Associations'); $self->properties($data, 'ComputingEndpointID'); $self->properties($data, 'ExecutionEnvironmentID'); $self->end('Associations'); }); } sub ComputingManager { Element(@_, 'ComputingManager', 'Manager', sub { my ($self, $data) = @_; $self->properties($data, qw( ProductName ProductVersion Reservation BulkSubmission TotalPhysicalCPUs TotalLogicalCPUs TotalSlots SlotsUsedByLocalJobs SlotsUsedByGridJobs Homogeneous NetworkInfo LogicalCPUDistribution WorkingAreaShared WorkingAreaGuaranteed WorkingAreaTotal WorkingAreaFree WorkingAreaLifeTime WorkingAreaMultiSlotTotal WorkingAreaMultiSlotFree WorkingAreaMultiSlotLifeTime CacheTotal CacheFree TmpDir ScratchDir ApplicationDir )); $self->Benchmarks($data->{Benchmarks}); $self->begin('ExecutionEnvironments'); $self->ExecutionEnvironments($data->{ExecutionEnvironments}); $self->end('ExecutionEnvironments'); $self->begin('ApplicationEnvironments'); $self->ApplicationEnvironments($data->{ApplicationEnvironments}); $self->end('ApplicationEnvironments'); }); } sub Benchmarks { Elements(@_, 'Benchmark', undef, sub { my ($self, $data) = @_; $self->properties($data, qw( Type Value )); }); } sub ExecutionEnvironments { Elements(@_, 'ExecutionEnvironment', 'Resource', sub { my ($self, $data) = @_; $self->properties($data, qw( Platform VirtualMachine TotalInstances UsedInstances UnavailableInstances PhysicalCPUs LogicalCPUs CPUMultiplicity CPUVendor CPUModel CPUVersion CPUClockSpeed CPUTimeScalingFactor WallTimeScalingFactor MainMemorySize VirtualMemorySize OSFamily OSName OSVersion ConnectivityIn ConnectivityOut NetworkInfo )); $self->Benchmarks($data->{Benchmarks}); if ($data->{ComputingShareID} or $data->{ComputingActivityID} or $data->{ApplicationEnvironmentID}) { $self->begin('Associations'); $self->properties($data, 'ComputingShareID'); $self->properties($data, 'ComputingActivityID'); $self->properties($data, 'ApplicationEnvironmentID'); $self->end('Associations'); } }); } sub ApplicationEnvironments { Elements(@_, 'ApplicationEnvironment', undef, sub { my ($self, $data) = @_; $self->properties($data, qw( AppName AppVersion State RemovalDate License Description BestBenchmark ParallelSupport MaxSlots MaxJobs MaxUserSeats FreeSlots FreeJobs FreeUserSeats )); $self->ApplicationHandles($data->{ApplicationHandles}); if ($data->{ExecutionEnvironmentID}) { $self->begin('Associations'); $self->properties($data, 'ExecutionEnvironmentID'); $self->end('Associations'); } }); } sub ApplicationHandles { Elements(@_, 'ApplicationHandle', undef, sub { my ($self, $data) = @_; $self->properties($data, qw( Type Value )); }); } sub ComputingActivities { my $filler = sub { my ($self, $data) = @_; $self->properties($data, qw( Type IDFromEndpoint LocalIDFromManager JobDescription State RestartState ExitCode ComputingManagerExitCode Error WaitingPosition UserDomain Owner LocalOwner RequestedTotalWallTime RequestedTotalCPUTime RequestedSlots RequestedApplicationEnvironment StdIn StdOut StdErr LogDir ExecutionNode Queue UsedTotalWallTime UsedTotalCPUTime UsedMainMemory SubmissionTime ComputingManagerSubmissionTime StartTime ComputingManagerEndTime EndTime WorkingAreaEraseTime ProxyExpirationTime SubmissionHost SubmissionClientName OtherMessages )); $self->begin('Associations'); $self->properties($data, 'UserDomainID'); $self->properties($data, 'ComputingEndpointID'); $self->properties($data, 'ComputingShareID'); $self->properties($data, 'ExecutionEnvironmentID'); $self->properties($data, 'ActivityID'); $self->end('Associations'); }; my ($self, $collector) = @_; if (not $self->{splitjobs}) { Elements(@_, 'ComputingActivity', 'Activity', $filler); } else { while (my $data = &$collector()) { # Function that returns a string containing the ComputingActivity's XML my $xmlGenerator = sub { my ($memhandle, $xmlstring); open $memhandle, '>', \$xmlstring; return undef unless defined $memhandle; my $memprinter = XmlPrinter->new($memhandle); $data->{xmlns} = "http://schemas.ogf.org/glue/2009/03/spec_2.0_r1"; # Todo: fix a-rex, client to handle correct namespace $data->{xmlns} = "http://schemas.ogf.org/glue/2009/03/spec_2.0_r1"; $data->{BaseType} = "Activity"; $memprinter->begin('ComputingActivity', $data, qw(xmlns BaseType CreationTime Validity )); $memprinter->properties($data, qw(ID Name OtherInfo)); &$filler($memprinter, $data); $memprinter->end('ComputingActivity'); close($memhandle); return $xmlstring; }; my $filewriter = $data->{jobXmlFileWriter}; &$filewriter($xmlGenerator); } } } sub ToStorageServices { Elements(@_, 'ToStorageService', undef, sub { my ($self, $data) = @_; $self->properties($data, qw( LocalPath RemotePath )); $self->begin('Associations'); $self->properties($data, 'StorageServiceID'); $self->end('Associations'); }); } sub Domains { my ($self, $data) = @_; my $attrs = { 'xmlns' => "http://schemas.ogf.org/glue/2009/03/spec_2.0_r1", 'xmlns:xsi' => "http://www.w3.org/2001/XMLSchema-instance", 'xsi:schemaLocation' => "https://raw.github.com/OGF-GLUE/XSD/master/schema/GLUE2.xsd" #might change in the future }; # Todo: fix a-rex, client to handle correct namespace # $attrs->{'xmlns'} = "http://schemas.ogf.org/glue/2008/05/spec_2.0_d41_r01"; $self->begin('Domains', $attrs, qw( xmlns xmlns:xsi xsi:schemaLocation )); $self->AdminDomain(&$data->{AdminDomain}); $self->begin('Services'); $self->Services(&$data->{Services}); $self->ComputingService(&$data->{ComputingService}); $self->end('Services'); $self->end('AdminDomain'); $self->end('Domains'); } 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/LSF.pm0000644000000000000000000000013215067751327022654 xustar0030 mtime=1759498967.760492055 30 atime=1759498967.867493681 30 ctime=1759499029.805720984 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/LSF.pm0000644000175000002070000003044715067751327024566 0ustar00mockbuildmock00000000000000package LSF; ###################################################################### # DISCLAIMER ###################################################################### # This module depends on ARC0mod.pm which is obsolete and deprecated # starting from ARC 6.0 # Please DO NOT build new LRMS modules based on this one but follow # the indications in # LRMSInfo.pm # instead. ###################################################################### use strict; our @ISA = ('Exporter'); our @EXPORT_OK = ('cluster_info', 'queue_info', 'jobs_info', 'users_info'); use LogUtils ( 'start_logging', 'error', 'warning', 'debug' ); ########################################## # Saved private variables ########################################## our $lsf_profile_path; our $lsf_profile; our $lshosts_command; our $bhosts_command; our $bqueues_command; our $bqueuesl_command; our $bjobs_command; our $lsid_command; our $total_cpus="0"; ########################################## # Private subs ########################################## sub lsf_env($$){ my ($path)=shift; error("lsf_bin_path not defined, cannot continue. Exiting...") unless defined $path; $lsf_profile_path=shift; error("lsf_profile_path not defined, cannot continue. Exiting...") unless defined $lsf_profile_path; $lsf_profile=`source $lsf_profile_path`; $lshosts_command="$path/lshosts -w"; $bhosts_command = "$path/bhosts -w"; $bqueues_command = "$path/bqueues -w"; $bqueuesl_command = "$path/bqueues -l"; $bjobs_command = "$path/bjobs -W -w"; $lsid_command="$path/lsid"; } sub totalcpus { my %lsfnodes; my $ncpus=0; if ( $total_cpus eq "0"){ read_lsfnodes(\%lsfnodes); #calculate totals foreach my $node (keys %lsfnodes){ if( ($lsfnodes{$node}{"node_status"} eq "ok") || ($lsfnodes{$node}{"node_status"} eq "closed_Full") || ($lsfnodes{$node}{"node_status"} eq "closed_Excl") || ($lsfnodes{$node}{"node_status"} eq "closed_Busy") || ($lsfnodes{$node}{"node_status"} eq "closed_Adm") ){ $ncpus += $lsfnodes{$node}{"node_ncpus"}; } } return $ncpus; } return $total_cpus; } sub read_lsfnodes ($){ my ($hashref) =shift; my ($node_count) = 0; my ($cpu_count) = 0; unless (open LSFHOSTSOUTPUT, "$lshosts_command |") { error("Error in executing lshosts command: $lshosts_command"); } while (my $line= ) { if (! ($line =~ '^HOST_NAME')) { chomp($line); my ($nodeid,$OStype,$model,$cpuf,$ncpus,$maxmem,$maxswp)= split(" ", $line); ${$hashref}{$nodeid}{"node_hostname"} = $nodeid; ${$hashref}{$nodeid}{"node_os_type"} = $OStype; ${$hashref}{$nodeid}{"node_model"} = $model; ${$hashref}{$nodeid}{"node_cpuf"} = $cpuf; ${$hashref}{$nodeid}{"node_maxmem"} = $maxmem; ${$hashref}{$nodeid}{"node_max_swap"} = $maxswp; if($ncpus != "-") { ${$hashref}{$nodeid}{"node_ncpus"} = $ncpus; } else { ${$hashref}{$nodeid}{"node_ncpus"} = 1; } } } close LSFHOSTSOUTPUT; unless (open LSFBHOSTSOUTPUT, "$bhosts_command |") { error("Error in executing bhosts command: $bhosts_command"); } while (my $line= ) { if (! ( ($line =~ '^HOST_NAME') || ($line =~ '^My cluster') || ($line =~ '^My master') ) ) { chomp($line); # HOST_NAME STATUS JL/U MAX NJOBS RUN SSUSP USUSP RSV my ($nodeid,$status,$lju,$max,$njobs,$run,$ssusp,$ususp,$rsv) = split(" ", $line); ${$hashref}{$nodeid}{"node_used_slots"} = $njobs; ${$hashref}{$nodeid}{"node_running"} = $run; ${$hashref}{$nodeid}{"node_suspended"} = $ssusp + $ususp; ${$hashref}{$nodeid}{"node_reserved"} = $rsv; ${$hashref}{$nodeid}{"node_status"} = $status; } } close LSFBHOSTSOUTPUT; } sub type_and_version { my (@lsf_version) = `$lsid_command -V 2>&1`; my ($type) ="LSF"; my ($version)="0.0"; if($lsf_version[0] =~ /^Platform/) { my @s = split(/ +/,$lsf_version[0]); $type=$s[1]; $version=$s[2]; $version=~s/,$//; } my (@result) = [$type,$version]; return ($type,$version); } sub queue_info_user ($$$) { my ($path) = shift; my ($qname) = shift; my ($user) = shift; my (%lrms_queue); #calculate running cpus and queues available unless ($user eq ""){ $user = "-u " . $user; } unless (open BQOUTPUT, "$bqueues_command $user $qname|") { error("Error in executing bqueues command: $bqueues_command $user $qname"); } while (my $line= ) { if (! ($line =~ '^QUEUE')) { chomp($line); my ($q_name,$q_priority,$q_status,$q_mjobs,$q_mslots,$q_mslots_proc,$q_mjob_slots_host,$q_num_jobs,$q_job_pending,$q_job_running,$q_job_suspended) = split(" ", $line); $lrms_queue{totalcpus} = "$q_mjobs"; $lrms_queue{maxrunning} = "$q_mjobs"; $lrms_queue{maxqueuable} = "$q_mjobs"; if ($q_mjobs eq "-") { $lrms_queue{totalcpus} = totalcpus(); $lrms_queue{maxrunning} = $lrms_queue{totalcpus}; $lrms_queue{maxqueuable} = $lrms_queue{totalcpus}; } $lrms_queue{maxuserrun} = "$q_mslots"; if ($q_mslots eq "-"){ $lrms_queue{maxuserrun} = $lrms_queue{totalcpus}; } $lrms_queue{running}= $q_job_running; $lrms_queue{status} = $q_status; $lrms_queue{queued} = $q_job_pending; } } close BQOUTPUT; $lrms_queue{defaultcput} = ""; $lrms_queue{defaultwallt} = ""; $lrms_queue{maxcputime} = ""; $lrms_queue{maxwalltime} = ""; unless (open BQOUTPUT, "$bqueuesl_command $user $qname|") { error("Error in executing bqueues command: $bqueuesl_command $user $qname"); } my $lastline =""; while (my $line= ) { if ( ($line =~ '^ CPULIMIT')) { my $line2=; chomp($line2); my (@mcput)= split(" ", $line2); #change from float to int. $mcput[0]=~ s/(\d+).*/$1/; if ($lastline =~ '^DEFAULT'){ $lrms_queue{defaultcput} = "$mcput[0]"; } else { $lrms_queue{maxcputime} = "$mcput[0]"; if ($lrms_queue{maxwalltime} == "") { $lrms_queue{maxwalltime} = "$mcput[0]"; } } } if ( ($line =~ '^ RUNLIMIT')) { my $line2=; chomp($line2); my (@mcput)= split(" ", $line2); #change from float to int. $mcput[0]=~ s/(\d+).*/$1/; if ($lastline =~ '^DEFAULT'){ $lrms_queue{defaultwallt} = "$mcput[0]"; } else { $lrms_queue{maxwalltime} = "$mcput[0]"; } } $lastline = $line; } close BQOUTPUT; $lrms_queue{mincputime} = "0"; $lrms_queue{minwalltime} = "0"; return %lrms_queue; } sub get_jobinfo($){ my $id = shift; my %job; unless (open BJOUTPUT, "$bjobs_command $id|") { error("Error in executing bjobs command: $bjobs_command $id"); } while (my $line= ) { if (! ($line =~ '^JOBID')) { chomp($line); my ($j_id,$j_user,$j_stat,$j_queue,$j_fromh,$j_exech,$j_name,$j_submittime,$j_projname,$j_cput,$j_mem,$j_swap,$j_pids,$j_start,$j_finish) = split(" ", $line); $job{id} = $j_id; # Report one node per job. Needs improoving for multi-CPU jobs. $job{nodes} = [ $j_exech ]; $job{cput} = $j_cput; $job{mem} = $j_mem; $job{start} = $j_start; $job{finish} = $j_finish; if ($j_stat eq "RUN"){ $job{status} = "R"; } if ($j_stat eq "PEND"){ $job{status} = "Q"; } if ($j_stat eq "PSUSP" || $j_stat eq "USUSP" || $j_stat eq "SSUSP"){ $job{status} = "S"; } if ($j_stat eq "DONE" || $j_stat eq "EXIT"){ $job{status} = "E"; } if ($j_stat eq "UNKWN" || $j_stat eq "WAIT" || $j_stat eq "ZOMBI"){ $job{status} = "O"; } } } close BJOUTPUT; return %job; } ############################################ # Public subs ############################################# sub cluster_info ($) { my ($config) = shift; lsf_env($$config{lsf_bin_path},$$config{lsf_profile_path}); #init my %lrms_cluster; my %lsfnodes; $lrms_cluster{totalcpus} = 0; $lrms_cluster{usedcpus} = 0; $lrms_cluster{queuedcpus} = 0; $lrms_cluster{runningjobs} = 0; $lrms_cluster{queuedjobs} = 0; my @cpudist; $lrms_cluster{cpudistribution} = ""; $lrms_cluster{queue} = []; #lookup batch type and version ($lrms_cluster{lrms_type},$lrms_cluster{lrms_version}) = type_and_version(); # cputime limit for parallel/multi-cpu jobs is treated as job-total # OBS: Assuming LSB_JOB_CPULIMIT=y ! $lrms_cluster{has_total_cputime_limit} = 1; #get info on nodes in cluster read_lsfnodes(\%lsfnodes); #calculate totals foreach my $node (keys %lsfnodes){ if( ($lsfnodes{$node}{"node_status"} eq "ok") || ($lsfnodes{$node}{"node_status"} eq "closed_Full") || ($lsfnodes{$node}{"node_status"} eq "closed_Excl") || ($lsfnodes{$node}{"node_status"} eq "closed_Busy") || ($lsfnodes{$node}{"node_status"} eq "closed_Adm") ){ my $ncpus = $lsfnodes{$node}{"node_ncpus"}; # we use lshosts output, maybe we should use bhosts? $lrms_cluster{totalcpus} += $ncpus; $lrms_cluster{usedcpus} += $lsfnodes{$node}{"node_used_slots"}; $cpudist[$ncpus] += 1; } } #write cpu distribution string of the form: 1cpu:15 4cpu:4 for (my $i=0; $i<=$#cpudist; $i++) { next unless ($cpudist[$i]); $lrms_cluster{cpudistribution} .= " " . $i . "cpu:" . $cpudist[$i]; } #calculate queued cpus and queues available unless (open BQOUTPUT, "$bqueues_command|") { debug("Error in executing bqueues command: $bqueues_command "); die "Error in executing bqueues: $bqueues_command \n"; } my @queues; while (my $line= ) { if (! ($line =~ '^QUEUE')) { chomp($line); my ($q_name,$q_priority,$q_status,$q_mjobs,$q_mslots,$q_mslots_proc,$q_mjob_slots_host,$q_num_jobs,$q_job_pending,$q_job_running,$q_job_suspended) = split(" ", $line); #TODO: total number of jobs in queue is not equal to queued cpus. $lrms_cluster{queuedcpus}+=$q_num_jobs; $lrms_cluster{runningjobs}+=$q_job_running; $lrms_cluster{queuedjobs}+=$q_job_pending; push @queues, $q_name; } } close BQOUTPUT; @{$lrms_cluster{queue}} = @queues; return %lrms_cluster; } sub queue_info($$){ my ($config) = shift; my ($qname) = shift; lsf_env($$config{lsf_bin_path},$$config{lsf_profile_path}); return queue_info_user($$config{lsf_bin_path},$qname,""); } #LSF time is on format: 000:00:00.28 #output should be an integer in minutes, rounded up. sub translate_time_lsf_to_minutes ($@) { my ($cputime) = shift; my ($days,$hours,$rest) = split(/:/,$cputime); my ($minutes, $seconds)=split(/\./,$rest); if ( $seconds > 0){ $minutes++; } $minutes=$days*24*60+$hours*60+$minutes; return $minutes; } sub jobs_info ($$@) { my ($config) = shift; my ($qname) = shift; my ($jids) = shift; lsf_env($$config{lsf_bin_path},$$config{lsf_profile_path}); my (%lrms_jobs); my (%job); my (@s); foreach my $id (@$jids){ %job = get_jobinfo($id); $lrms_jobs{$id}{status}=$job{status}; $lrms_jobs{$id}{nodes}=$job{nodes}; $lrms_jobs{$id}{mem}=$job{mem}; $lrms_jobs{$id}{cputime}=translate_time_lsf_to_minutes($job{cput}); $lrms_jobs{$id}{walltime}=""; $lrms_jobs{$id}{reqwalltime}=""; $lrms_jobs{$id}{reqcputime}=""; $lrms_jobs{$id}{comment}=["job started: $job{start}"]; $lrms_jobs{$id}{rank}=""; #TODO fix to support parallel jobs $lrms_jobs{$id}{cpus}=1; } return %lrms_jobs; } sub users_info($$@) { my ($config) = shift; my ($qname) = shift; my ($accts) = shift; lsf_env($$config{lsf_bin_path},$$config{lsf_profile_path}); my (%lrms_users); my (%queue); foreach my $u ( @{$accts} ) { %queue = queue_info_user( $$config{lsf_bin_path}, $qname, $u ); $lrms_users{$u}{freecpus} = $queue{maxrunning}-$queue{runnning}; $lrms_users{$u}{queuelength} = "$queue{queued}"; } return %lrms_users; } 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/CEinfo.pl.in0000644000000000000000000000013215067751327023777 xustar0030 mtime=1759498967.758492025 30 atime=1759498967.866493666 30 ctime=1759499029.836435303 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/CEinfo.pl.in0000644000175000002070000005301715067751327025707 0ustar00mockbuildmock00000000000000#!/usr/bin/perl -w package CEInfo; ######################################################## # Driver for information collection ######################################################## use File::Basename; use Getopt::Long; use Data::Dumper; use Cwd; ## enable this below to dump datastructures ## Note: Concise is not an ARC dependency! ## must be installed separately. #use Data::Dumper::Concise; ## usage: # print Dumper($datastructure); use strict; # Some optional features that may be unavailable in older Perl versions. # Should work with Perl v5.8.0 and up. BEGIN { # Makes sure the GLUE document is valid UTF8 eval {binmode(STDOUT, ":utf8")}; # Used for reading UTF8 encoded grid-mapfile eval {require Encode; import Encode "decode"}; # Fall back to whole-second precision if not avaiable eval {require Time::HiRes; import Time::HiRes "time"}; } BEGIN { my $pkgdatadir = dirname($0); unshift @INC, $pkgdatadir; } # Attempt to recursively create directory # takes absolute filename (absolute path and file name) sub createdirs { my ($fullpathfilename, $log) = @_; my @paths; for (my $path = dirname $fullpathfilename; length $path > 1; $path = dirname $path) { push @paths, $path; } mkdir $_ for reverse @paths; $log->error("Failed to create directory $paths[0], error: $!") if @paths and not -d $paths[0]; } # minimal set of vars before loading the profiler our $configfile; our $NYTPROF_PRESENT = 0; # Profiler config and code moved here because we need # to get more data before main starts BEGIN { use ConfigCentral; use LogUtils; use Getopt::Long; LogUtils::level('WARNING'); LogUtils::indentoutput(1); my $log = LogUtils->getLogger(__PACKAGE__); my $dumpconfig = 0; my $nojobs; my $splitjobs; my $perffreq = 1800; my $print_help; # Make a copy of @ARGV and restore it after extracting options from it. my @options = @ARGV; GetOptions("config:s" => \$configfile, "dumpconfig|d" => \$dumpconfig, "nojobs|n" => \$nojobs, "splitjobs|s" => \$splitjobs, "perffreq:i" => \$perffreq, "help|h" => \$print_help ); @ARGV = @options; unless ( $configfile ) { $log->warning("Performance code setup: No 'config' option, skipping performance configuration. See --help "); } else { my $perflogdir; my $perflognytprofdir; my $perflognytproffilepath; my $infosysloglevel = ConfigCentral::getValueOf($configfile,'infosys','loglevel'); $log->error("Can\'t determine loglevel. Probably the [infosys] block is missing. Exiting...") if ($infosysloglevel eq ''); LogUtils::level($infosysloglevel); my $enable_perflog_reporting = ConfigCentral::isBlockPresent($configfile,'monitoring/perflog'); if ($enable_perflog_reporting) { # The profiling tool might be missing in some distributions. # Default is to assume is not present. $perflogdir = ConfigCentral::getValueOf($configfile,'monitoring/perflog','perflogdir'); $log->error("perflogdir parameter not defined or no default set in code, $perflogdir") if ((!defined $perflogdir) || $perflogdir eq ''); $perflognytprofdir = $perflogdir.'/perl_nytprof/'; $log->info("Performance reporting enabled. The database files will be stored in $perflognytprofdir"); # reduce performance reporting depending on interval by checking dir last modification time if ( -e $perflognytprofdir ) { my $t0 = time(); my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,$blksize,$blocks) = stat($perflognytprofdir); my $timediff = $t0 - $mtime; if ($timediff <= $perffreq) { $log->debug("$perflognytprofdir changed $timediff seconds ago, less than $perffreq. Skipping performance data collection"); undef $log; no ConfigCentral; no LogUtils; return 0; } } # append and create raw folder for NYTProf database files my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(); my $timestamp=POSIX::strftime("%Y%m%d%H%M%S", $sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst); my $perflognytproffilename = 'infosys_'.$timestamp.'.perflog.raw'; $perflognytproffilepath = $perflognytprofdir.$perflognytproffilename; createdirs($perflognytproffilepath,$log); if ( -e $perflogdir ) { $ENV{NYTPROF} = "savesrc=0:start=no:file=".$perflognytproffilepath; $NYTPROF_PRESENT = eval { require Devel::NYTProf; 1; }; if ($NYTPROF_PRESENT) { DB::enable_profile($perflognytproffilepath); $log->info("Performance reporting enabled. The database file is $perflognytproffilepath"); } else { $log->warning("Devel::NYTProf PERL module not installed. Performance data cannot be collected."); } } else { $log->warning("Cannot access directory $perflogdir. Unable to open performance file $perflognytproffilepath. Check arc.conf perflogdir option, directories and permissions"); } } } undef $log; no ConfigCentral; no LogUtils; } use ConfigCentral; use LogUtils; use HostInfo; use RTEInfo; use GMJobsInfo; use LRMSInfo; use ARC0ClusterInfo; use ARC1ClusterInfo; use GLUE2xmlPrinter; use GLUE2ldifPrinter; use NGldifPrinter; use InfosysHelper; our $nojobs; our $log = LogUtils->getLogger(__PACKAGE__); sub timed { my ($fname, $func) = @_; my $t0 = time(); my $result = &$func(); my $dt = sprintf('%.3f', time() - $t0); $log->verbose("Time spent in $fname: ${dt}s"); return $result; } sub main { LogUtils::level('INFO'); LogUtils::indentoutput(1); # Parse command line options my $dumpconfig = 0; my $splitjobs; my $perffreq = 1800; my $print_help; GetOptions("config:s" => \$configfile, "dumpconfig|d" => \$dumpconfig, "nojobs|n" => \$nojobs, "splitjobs|s" => \$splitjobs, "perffreq:i" => \$perffreq, "help|h" => \$print_help ); if ($print_help) { print "Usage: $0 --config - location of arc.conf --dumpconfig - dumps internal representation of a-rex configuration and exits. --nojobs|n - don't include information about jobs --splitjobs|s - write job info in a separate XML file for each job in the controldir --perffreq|p - interval between performance collections, in seconds. Default is 1200 --help - this help\n"; exit 1; } unless ( $configfile ) { $log->error("a command line argument is missing, see --help "); } # setup output to logfile before parsing the complete config file my $infosysloglevel = ConfigCentral::getValueOf($configfile,'infosys','loglevel'); $log->error("Cannot determine loglevel. Probably the [infosys] block is missing. Exiting...") if ($infosysloglevel eq ''); # Change level for root logger (affects all loggers from now on) LogUtils::level($infosysloglevel); my $providerlog = ConfigCentral::getValueOf($configfile,'infosys','logfile'); $log->info("Redirecting further messages to $providerlog"); LogUtils::indentoutput(0); # Attempt to recursively create directory createdirs($providerlog,$log); open STDERR, ">>", $providerlog or $log->error("Failed to open to $providerlog"); LogUtils::timestamps(1); $log->info("############## A-REX infoprovider started ##############"); # Read ARC configuration our $config = timed 'ConfigCentral', sub { ConfigCentral::parseConfig($configfile) }; # Dump config (uncomment if needed) #print Dumper($config); # Dump internal datastructure if required and exit if ($dumpconfig) { $log->info("Dumping configuration and exiting"); ConfigCentral::dumpInternalDatastructure($configfile); $log->info("############## A-REX infoprovider finished ##############"); exit 0; }; $log->info('Start data collection...'); my $data = timed 'all info collectors', sub { CEInfo::collect($config) }; $data->{nojobs} = $nojobs; # Print GLUE2 XML $log->info("Generating GLUE2 XML rendering"); my $glue2data = timed 'ARC1ClusterInfo', sub { ARC1ClusterInfo::collect($data) }; my $xmlPrinter = GLUE2xmlPrinter->new(*STDOUT, $splitjobs); $xmlPrinter->begin('InfoRoot'); timed 'GLUE2xml', sub { $xmlPrinter->Domains($glue2data) }; $xmlPrinter->end('InfoRoot'); # Generate ldif for infosys-ldap $log->info("Generating LDIF renderings"); my $ngdata; if ( defined $config->{infosys}{nordugrid} ) { $ngdata = timed 'ARC0ClusterInfo', sub { ARC0ClusterInfo::collect($data) } } # Write out LDIF document(s) my $print_ldif = sub { my ($fh) = @_; if ( defined $config->{infosys}{glue2}{ldap} ) { # introduced to find GLUE2 LDAP schema version my $glue2schemaversion; unless (open (GLUE2_SCHEMA, "debug("CEinfo.pl didn't find GLUE20.schema in standard location. Will create LDIF in compatibitity mode."); } else { my $linecnt=0; while (my $line = ) { chomp $line; next unless $line; if ($line =~ m/# Schema Version: (\d).*/) { $glue2schemaversion = $1; $log->debug("GLUE2 schema version major identified. It is: $glue2schemaversion"); last } if ($linecnt > 25) { $log->debug("Can't identify GLUE2 schema version. Will create LDIF in compatibility mode"); last } $linecnt++; } close GLUE2_SCHEMA; } # override splitjobs for ldap if option specified in arc.conf if ( $config->{infosys}{glue2}{ldap}{showactivities} ) { $splitjobs = 0 }; $log->info("Generating GLUE2 LDIF rendering"); my $ldifPrinter = GLUE2ldifPrinter->new($fh, $splitjobs, $glue2schemaversion); timed 'GLUE2ldif', sub { $ldifPrinter->Top($glue2data) }; } if ($config->{infosys}{nordugrid}{enabled}) { $log->info("Generating NorduGrid LDIF rendering"); my $ldifPrinter = NGldifPrinter->new($fh, $config->{infosys}{validity_ttl}); timed 'NGldif', sub { $ldifPrinter->Top($ngdata) }; } }; # only notifiy ldap infosys if ldap is enabled! # C 141 use block to check if ldap is enabled if ( $config->{infosys}{ldap}{enabled} ) { # check that fifo is working and notify if so. if (InfosysHelper::createLdifScript($config->{infosys}{ldap}, $print_ldif)) { if (InfosysHelper::notifyInfosys($config->{infosys}{ldap})) { $log->verbose("LDAP information system notified"); } else { $log->warning("Failed to notify LDAP information system"); } } else { $log->warning("Failed to create ldif generator script for LDAP information system"); } } else { $log->verbose("LDAP/LDIF information generation is disabled. To enable it, enable [infosys/ldap] AND one of the blocks [infosys/nordugrid], [infosys/glue2/ldap]"); } $log->info("############## A-REX infoprovider finished ##############"); # close performance file if enabled DB::disable_profile() if ($NYTPROF_PRESENT && (defined $config->{enable_perflog_reporting})); } ################################################## # information collector ################################################## sub collect($) { my ($config) = @_; # uncomment for a dump of the whole config hash # print Dumper($config); #TODO: Everything works without the locausers block, but we may reuse it in the future, so keeping the code here. #TODO: @localusers parsed by: # - hostinfo to provide stats for storage usage # - lrms_info to ?? #TODO: maybe use a default user e.g. root? #TODO: no more per-user stats? Needs different code to handle new mapping # # get all local users from grid-map. Sort unique # $log->verbose("Reading grid-mapfiles. These are obsolete, one should use other methods, this message can be ignored."); # my @localusers; # my $usermap = {}; # if ($config->{gridmap}) { # my %saw = (); # $usermap = read_grid_mapfile($config->{gridmap}); # @localusers = grep !$saw{$_}++, values %$usermap; # } else { # $log->verbose("gridmap not configured. Gridmap files are obsolete, this message can be ignored"); # my $defaultuser = $config->{defaultLocalName}; # @localusers = ($defaultuser) if $defaultuser; # } # $log->verbose("Cannot determine local users from gridmapfile or config. This approach is obsolete, this message can be discarded.") unless @localusers; #TODO: Placeholder for localusers, before removal of the above block my @localusers; $log->info("Fetching job information from control directory (GMJobsInfo.pm)"); my $gmjobs_info = get_gmjobs_info($config); $log->info("Updating job status information"); # build the list of all jobs in state INLRMS my @jobids; for my $job (values %$gmjobs_info) { next unless $job->{status} and $job->{status} eq 'INLRMS'; next unless defined $job->{localid} and length $job->{localid}; push @jobids, $job->{localid}; } # build hash with all the input necessary for the renderers my $data = {}; $data->{config} = $config; #TODO: this is not used, but may come handy if we implement mapping info. # Used in ARC0 and ARC1 ClusterInfo #$data->{usermap} = $usermap; $log->info("Updating frontend information (HostInfo.pm)"); # @localusers seems not needed, but might be if we go non-root, so keeping code here. $data->{host_info} = get_host_info($config,\@localusers); $log->info("Updating RTE information (RTEInfo.pm)"); $data->{rte_info} = get_rte_info($config); $data->{gmjobs_info} = $gmjobs_info; $log->info("Updating LRMS information (LRMSInfo.pm)"); $data->{lrms_info} = get_lrms_info($config,\@localusers,\@jobids); $log->info("Discovering adotf values"); fix_adotf($config->{service}, $data->{host_info}); fix_adotf($_, $data->{host_info}) for values %{$config->{xenvs}}; return $data; } ################################################## # Calling other information collectors ################################################## sub get_host_info($$) { my ($config,$localusers) = @_; my $host_opts = {}; $host_opts->{localusers} = $localusers; $host_opts->{processes} = ['arched', 'slapd']; $host_opts->{ports}{'arched'} = ['443']; $host_opts->{ports}{'slapd'} = [$config->{infosys}{ldap}{port}] if $config->{infosys}{ldap}{enabled}; $host_opts->{x509_host_cert} = $config->{x509_host_cert}; $host_opts->{x509_cert_dir} = $config->{x509_cert_dir}; $host_opts->{x509_cert_policy} = $config->{x509_cert_policy}; $host_opts->{wakeupperiod} = $config->{wakeupperiod}; $host_opts->{arex} = $config->{arex}; return timed 'HostInfo', sub { HostInfo::collect($host_opts) }; } sub get_rte_info($) { my ($config) = @_; my $rte_opts; $rte_opts->{controldir} = $config->{arex}{controldir} if $config->{arex}{controldir}; return timed 'RTEInfo', sub { RTEInfo::collect($rte_opts) }; } sub get_lrms_info($$$) { my ($config,$localusers,$jobids) = @_; # NOTE: possibly any options from config are needed, but they should be # moved into the lrms block if required. I see no point in moving all of them # if not needed. # this conforms to LRMSInfo.pm specification my $lrms_opts = Storable::dclone($config->{lrms}); # Additional info used by LRMS modules $lrms_opts->{controldir} = $config->{arex}{controldir}; $lrms_opts->{loglevel} = $config->{infosys}{loglevel}; # TODO: double check this cleanup, if needed delete $lrms_opts->{$_} for qw(xenvs shares); $lrms_opts->{jobs} = $jobids; ## TODO: review and implement this, is cleaner than what we have now ## This former implementation of shares might solve problems in ARC1ClusterInfo. ## nowadays it is not used. for my $share ( keys %{$config->{shares}} ) { $lrms_opts->{queues}{$share} = $config->{shares}{$share}; $lrms_opts->{queues}{$share}{users} = $localusers; } return timed 'LRMSInfo', sub { LRMSInfo::collect($lrms_opts) }; } sub get_gmjobs_info($) { my $config = shift; my $gmjobs_info = timed 'GMJobsInfo', sub { GMJobsInfo::collect($config->{arex}, $nojobs) }; return fix_jobs($config, $gmjobs_info); } ################################################## # ################################################## # Check validity and fill in missing 'share' and 'queue' attributes of jobs. sub fix_jobs { my ($config, $gmjobs_info) = @_; my $lrms = $config->{lrms}{lrms}; my $defaultshare = $config->{lrms}{defaultqueue} if defined $config->{lrms}{defaultqueue} || ''; for my $jobid (keys %$gmjobs_info) { my $job = $gmjobs_info->{$jobid}; my $share = $job->{share}; # If A-REX has not chosen a share for the job, default to one for data collection. # This changes once the shared is written in the controldir by submission scripts. if (not $share) { my $msg = "A-REX has not chosen a share for job $jobid"; if ($defaultshare) { $log->info($msg.". Assuming default: ".$defaultshare); $share = $defaultshare; } else { my @shares = keys %{$config->{shares}}; if (@shares == 1) { $log->info($msg.". Assuming: ".$shares[0]); $share = $shares[0]; } else { $log->warning($msg." and no default share is defined."); } } } # Set correct queue if ($share) { my $sconfig = $config->{shares}{$share}; if ($sconfig) { $job->{queue} = $sconfig->{MappingQueue} || $share; } else { $log->warning("Job $jobid belongs to an invalid share '$share'"); $share = undef; } } # Group jobs not belonging to any known share into a catch-all share named '' $job->{share} = $share || ''; } return $gmjobs_info; } # TODO: reuse for map_with_file if still needed. Should probably be moved to configcentral eventually. # reads grid-mapfile. Returns a ref to a DN => uid hash #sub read_grid_mapfile($) { # my $gridmapfile = shift; # my $usermap = {}; # # unless (open MAPFILE, "<$gridmapfile") { # $log->warning("can't open gridmapfile at $gridmapfile"); # return; # } # while (my $line = ) { # chomp($line); # if ( $line =~ m/\"([^\"]+)\"\s+(\S+)/ ) { # my $subject = $1; # eval { # $subject = decode("utf8", $subject, 1); # }; # $usermap->{$subject} = $2; # } # } # close MAPFILE; # # return $usermap; #} ## This routine belongs here as info from HostInfo.pm and LRMS modules is required # Replaces 'adotf' in config options with autodetected values sub fix_adotf { my ($h, $hostinfo) = @_; if ($h->{nodecpu}) { if ($h->{nodecpu} =~ m/(.*?)(?:\s+stepping\s+(\d+))?\s+@\s+([.\d]+)\s*(M|G)Hz$/i) { $h->{CPUModel} ||= $1; $h->{CPUVersion} ||= $2; $h->{CPUClockSpeed} ||= ($4 eq 'G') ? int($3 * 1000) : int($3); } elsif ($h->{nodecpu} eq 'adotf') { $h->{CPUVendor} ||= 'adotf'; $h->{CPUModel} ||= 'adotf'; $h->{CPUClockSpeed} ||= 'adotf'; } else { $log->warning("Invalid value for nodecpu option: ".$h->{nodecpu}); } delete $h->{nodecpu}; } if ($h->{OpSys} and grep {$_ eq 'adotf'} @{$h->{OpSys}}) { $h->{OpSys} = [ grep {$_ ne 'adotf'} @{$h->{OpSys}} ]; unless (defined($hostinfo->{osname})) { $log->warning("Failed to autodetect value for 'OSName'. Enter correct value in config file"); $h->{OSName} = 'unknown'; } $h->{OSName} ||= 'adotf'; $h->{OSVersion} ||= 'adotf'; $h->{OSFamily} ||= 'adotf'; } my %hostkey = (Platform => 'machine', PhysicalCPUs => 'cpusocketcount', LogicalCPUs => 'cputhreadcount', CPUVendor => 'cpuvendor', CPUModel => 'cpumodel', CPUClockSpeed => 'cpufreq', MainMemorySize => 'pmem', VirtualMemorySize => 'vmem', OSFamily => 'sysname', OSName => 'osname', OSVersion => 'osversion' ); for my $key (keys %hostkey) { if (exists $h->{$key} and $h->{$key} eq 'adotf') { $log->warning("Failed to autodetect value for '$key'. Enter correct value in config file") unless defined $hostinfo->{$hostkey{$key}}; $h->{$key} = $hostinfo->{$hostkey{$key}}; } } } main(); nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/LL.pm0000644000000000000000000000013215067751327022537 xustar0030 mtime=1759498967.760492055 30 atime=1759498967.867493681 30 ctime=1759499029.804625969 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/LL.pm0000644000175000002070000003653515067751327024455 0ustar00mockbuildmock00000000000000package LL; ###################################################################### # DISCLAIMER ###################################################################### # This module depends on ARC0mod.pm which is obsolete and deprecated # starting from ARC 6.0 # Please DO NOT build new LRMS modules based on this one but follow # the indications in # LRMSInfo.pm # instead. ###################################################################### use strict; our @ISA = ('Exporter'); # Module implements these subroutines for the LRMS interface our @EXPORT_OK = ('cluster_info', 'queue_info', 'jobs_info', 'users_info'); use LogUtils ( 'start_logging', 'error', 'warning', 'debug' ); ########################################## # Saved private variables ########################################## our(%lrms_queue); ########################################## # Private subs ########################################## # calculates the total cpus from a string like: " 1 3 5-9 " sub total_from_individual($){ my $str=shift; #trim string $str =~ s/^\s+//; $str =~ s/\s+$//; my @ids = split(' ', $str); my $total = 0; foreach my $id (@ids) { if ( $id =~ /([0-9]+)-([0-9]+)/ ){ $total += $2-$1 +1; }elsif( $id =~ /[0-9]+/ ){ $total++; } } return $total; } sub consumable_distribution ($$) { my ( $path ) = shift; my ( $consumable_type ) = shift; unless (open LLSTATUSOUT, "$path/llstatus -R|") { error("Error in executing llstatus"); } my @cons_dist = (); while () { if ( /[^# ]*(#*) *.*$consumable_type\*?\(([0-9]*),([0-9]*).*/ ) { #if displayed as total cpus # Check if node is down if ( $1 ne "#" ) { my @a = ($3 - $2,$3); push @cons_dist, [ @a ]; } } elsif ( /[^# ]*(#*) *.*$consumable_type<([^>]*)><([^>]*)>.*/ ){ #if displayed as individual cpu numbers if ( $1 ne "#" ) { my $availcpu=total_from_individual($2); my $alcpu=total_from_individual($3); my @a = ($alcpu - $availcpu,$alcpu); push @cons_dist, [ @a ]; } } } return @cons_dist; } sub consumable_total (@) { my @dist = @_; my ($cpus, $used, $max); foreach $cpus (@{dist}) { $used += ${$cpus}[0]; $max += ${$cpus}[1]; } return ($used,$max) } sub cpudist2str (@) { my @dist = @_; my @total_dist = (); my $str = ''; # Collect number of available cores my ($used,$max,$cpus); foreach $cpus (@dist) { ($used, $max) = @{$cpus}; $total_dist[$max]++; } # Turn it into a string my $n; $n = 0; foreach $cpus (@total_dist) { if ($cpus) { if ( $str ne '') { $str .= ' '; } $str .= $n . "cpu:" . $cpus; } $n++; } return $str; } sub get_cpu_distribution($) { my ( $path ) = shift; my $single_job_per_box = 1; if ($single_job_per_box == 1) { # Without hyperthreading unless (open LLSTATUSOUT, "$path/llstatus -f %sta|") { error("Error in executing llstatus"); } } else { # Use all available cpus/cores including hyperthreading: unless (open LLSTATUSOUT, "$path/llstatus -r %cpu %sta|") { error("Error in executing llstatus"); } } my %cpudist; while () { chomp; # We only want CPU lines (and there must be at least one) next if !/^[1-9]/; # An empty line denotes end of CPUs last if /^$/; my $cpus; my $startd; if ($single_job_per_box == 1) { ($startd) = split/\!/; $cpus = 1; } else { ($cpus, $startd) = split/\!/; } # Only count those machines which have startd running if ($startd ne "0") { $cpudist{$cpus}++; } } close LLSTATUSOUT; return %cpudist; } sub get_used_cpus($) { my ( $path ) = shift; unless (open LLSTATUSOUT, "$path/llstatus |") { error("Error in executing llstatus"); } my $cpus_used; while () { chomp; # We only want CPU lines (and there must be at least one) next if !/^Total Machines/; tr / //s; my @fields = split; $cpus_used = $fields[6]; last; } close LLSTATUSOUT; return ($cpus_used); } sub get_long_status($) { my ( $path ) = shift; unless (open LLSTATUSOUT, "$path/llstatus -l |") { error("Error in executing llstatus"); } my %cpudist; my $machine_name; my %machines; while () { # Discard trailing information separated by a newline if ( /^$/ ) { next; } chomp; my ($par, $val) = split/\s*=\s*/,$_,2; if ($par eq 'Name') { $machine_name=$val; next; } $machines{$machine_name}{$par} = $val; } close LLSTATUSOUT; return %machines; } sub get_long_queue_info($$) { my ( $path ) = shift; my ( $queue) = shift; unless (open LLCLASSOUT, "$path/llclass -l $queue |") { error("Error in executing llclass"); } my %queue_info; my $queue_name; while () { # Discard trailing information separated by a newline and header if ( /^$/ || /^==========/ ) { next; } # Info ends with a line of dashes last if /^----------/; s/^\s*//; chomp; my ($par, $val) = split/\s*:\s*/,$_,2; if ($par eq 'Name') { $queue_name=$val; next; } $queue_info{$queue_name}{$par} = $val; } close LLCLASSOUT; return %queue_info; } sub get_queues($) { my ( $path ) = shift; unless (open LLCLASSOUT, "$path/llclass |") { error("Error in executing llclass"); } # llclass outputs queues (classes) delimited by ---- markers my @queues; my $queue_sect; while () { # Now reading queues if ( /^----------/ && $queue_sect == 0) { if ($#queues == -1) { $queue_sect = 1; next; } } # Normal ending after reading final queue if ( /^----------/ && $queue_sect == 1) { $queue_sect = 0; return @queues; } if ( $queue_sect == 1 ) { chomp; s/ .*//; push @queues, $_; } } # We only end here if there were no queues return @queues; } sub get_short_job_info($$) { # Path to LRMS commands my ($path) = shift; # Name of the queue to query my ($queue) = shift; if ($queue ne "") { unless (open LLQOUT, "$path/llq -c $queue |") { error("Error in executing llq"); } } else { unless (open LLQOUT, "$path/llq |") { error("Error in executing llq"); } } my %jobstatus; while () { my ($total, $waiting, $pending, $running, $held, $preempted); $total = 0; $waiting = 0; $pending = 0; $running = 0; $held = 0; $preempted = 0; if (/(\d*) .* (\d*) waiting, (\d*) pending, (\d*) running, (\d*) held, (\d*) preempted/) { $total = $1; $waiting = $2; $pending = $3; $running = $4; $held = $5; $preempted = $6; } $jobstatus{total} = $total; $jobstatus{waiting} = $waiting; $jobstatus{pending} = $pending; $jobstatus{running} = $running; $jobstatus{held} = $held; $jobstatus{preempted} = $preempted; } close LLQOUT; return %jobstatus; } sub get_long_job_info($$) { # Path to LRMS commands my ($path) = shift; # LRMS job IDs from Grid Manager (jobs with "INLRMS" GM status) my ($lrms_ids) = @_; my %jobinfo; if ( (@{$lrms_ids})==0){ return %jobinfo; } # can the list of ids become too long for the shell? my $lrmsidstr = join(" ", @{$lrms_ids}); unless (open LLQOUT, "$path/llq -l -x $lrmsidstr |") { error("Error in executing llq"); } my $jobid; my $skip=0; while () { # Discard trailing information separated by a newline if (/job step\(s\) in queue, /) { last; } # Discard header lines if (/^===/) { $skip=0; next; } # Skip all lines of extra info if (/^--------------------------------------------------------------------------------/) { $skip=!$skip; next; } if ($skip) { next; } chomp; # Create variables using text before colon, trimming whitespace on both sides and replacing white space with _ my ($par, $val) = split/: */,$_,2; $par =~ s/^\s+//; $par =~ s/\s+$//; $par =~ s/\s/_/g; # Assign variables if ($par eq 'Job_Step_Id') { $jobid = $val; next; } $jobinfo{$jobid}{$par} = $val; } close LLQOUT; return %jobinfo; } ############################################ # Public subs ############################################# sub cluster_info ($) { # Path to LRMS commands my ($config) = shift; my ($path) = $$config{ll_bin_path}; my (%lrms_cluster); # lrms_type $lrms_cluster{lrms_type} = "LoadLeveler"; # lrms_version my $status_string=`$path/llstatus -v`; if ( $? != 0 ) { warning("Can't run llstatus"); } $status_string =~ /^\S+\s+(\S+)/; $lrms_cluster{lrms_version} = $1; # LL tracks total cpu time but the cpu time limit that the user asked is # first scaled up with the number of requested job slots before enforcing # the cpu time limit. Effectively the cpu time limit is the maxmum average # per-slot cputime $lrms_cluster{has_total_cputime_limit} = 0; my ($ll_consumable_resources) = $$config{ll_consumable_resources}; if ($ll_consumable_resources ne "yes") { # totalcpus $lrms_cluster{totalcpus} = 0; $lrms_cluster{cpudistribution} = ""; my %cpudist = get_cpu_distribution($path); my $sep = ""; foreach my $key (keys %cpudist) { $lrms_cluster{cpudistribution} .= $sep.$key."cpu:".$cpudist{$key}; if (!$sep) { $sep = " "; } $lrms_cluster{totalcpus} += $key * $cpudist{$key}; } # Simple way to find used cpus (slots/cores) by reading the output of llstatus $lrms_cluster{usedcpus} = get_used_cpus($path); } else { # Find used / max CPUs from cconsumable resources my @dist = consumable_distribution($path,"ConsumableCpus"); my @cpu_total = consumable_total(@dist); $lrms_cluster{cpudistribution} = cpudist2str(@dist); $lrms_cluster{totalcpus} = $cpu_total[1]; $lrms_cluster{usedcpus} = $cpu_total[0]; } my %jobstatus = get_short_job_info($path,""); # Here waiting actually refers to jobsteps $lrms_cluster{queuedcpus} = $jobstatus{waiting}; # TODO: this is wrong, but we are not worse off than earlier # we should count jobs, not cpus $lrms_cluster{runningjobs} = $jobstatus{running}; $lrms_cluster{queuedjobs} = $jobstatus{waiting}; $lrms_cluster{queue} = [ ]; return %lrms_cluster; } sub queue_info ($$) { # Path to LRMS commands my ($config) = shift; my ($path) = $$config{ll_bin_path}; # Name of the queue to query my ($queue) = shift; my %long_queue_info = get_long_queue_info($path,$queue); my %jobstatus = get_short_job_info($path,$queue); # Translate between LoadLeveler and ARC $lrms_queue{status} = $long_queue_info{$queue}{'Free_slots'}; # Max_total_tasks seems to give the right queue limit #$lrms_queue{maxrunning} = $long_queue_info{$queue}{'Max_total_tasks'}; # Maximum_slots is really the right parameter to use for queue limit $lrms_queue{maxrunning} = $long_queue_info{$queue}{'Maximum_slots'}; $lrms_queue{maxqueuable} = ""; $lrms_queue{maxuserrun} = $lrms_queue{maxrunning}; # Note we use Wall Clock! $_ = $long_queue_info{$queue}{'Wall_clock_limit'}; if (/\((.*) seconds,/) { $lrms_queue{maxcputime} = int($1 / 60); } $lrms_queue{maxwalltime} = $lrms_queue{maxcputime}; # There is no lower limit enforced $lrms_queue{mincputime} = 0; $lrms_queue{minwalltime} = 0; # LL v3 has Def_wall... and LL v5 has Default_wall... $_ = $long_queue_info{$queue}{'Def_wall_clock_limit'}; if (! defined $_ || $_ eq ""){ $_ = $long_queue_info{$queue}{'Default_wall_clock_limit'}; } if (/\((.*) seconds,/) { $lrms_queue{defaultcput} = int($1 / 60); } $lrms_queue{defaultwallt}= $lrms_queue{defaultcput}; $lrms_queue{running} = $jobstatus{running}; # + $jobstatus{held} + $jobstatus{preempted}; $lrms_queue{queued} = $jobstatus{waiting}; # $lrms_queue{totalcpus} = $long_queue_info{$queue}{'Max_processors'}; $lrms_queue{totalcpus} = $long_queue_info{$queue}{'Maximum_slots'}; return %lrms_queue; } sub jobs_info ($$$) { # Path to LRMS commands my ($config) = shift; my ($path) = $$config{ll_bin_path}; # Name of the queue to query my ($queue) = shift; # LRMS job IDs from Grid Manager (jobs with "INLRMS" GM status) my ($lrms_ids) = @_; my (%lrms_jobs); my %jobinfo = get_long_job_info($path,$lrms_ids); foreach my $id (keys %jobinfo) { $lrms_jobs{$id}{status} = "O"; if ( $jobinfo{$id}{Status} eq "Running" ) { $lrms_jobs{$id}{status} = "R"; } if ( ($jobinfo{$id}{Status} eq "Idle") || ($jobinfo{$id}{Status} eq "Deferred") ) { $lrms_jobs{$id}{status} = "Q"; } if ( ($jobinfo{$id}{Status} eq "Completed") || ($jobinfo{$id}{Status} eq "Canceled") || ($jobinfo{$id}{Status} eq "Removed") || ($jobinfo{$id}{Status} eq "Remove Pending") || ($jobinfo{$id}{Status} eq "Terminated") ) { $lrms_jobs{$id}{status} = "E"; } if ( ($jobinfo{$id}{Status} eq "System Hold") || ($jobinfo{$id}{Status} eq "User Hold") || ($jobinfo{$id}{Status} eq "User and System Hold") ) { $lrms_jobs{$id}{status} = "S"; } if ( ($jobinfo{$id}{Status} eq "Checkpointing") ) { $lrms_jobs{$id}{status} = "O"; } $lrms_jobs{$id}{mem} = -1; my $dispt = `date +%s -d "$jobinfo{$id}{Dispatch_Time}\n"`; chomp $dispt; $lrms_jobs{$id}{walltime} = POSIX::ceil((time() - $dispt) /60); # Setting cputime, should be converted to minutes $lrms_jobs{$id}{cputime} = 0; if (defined $jobinfo{$id}{Step_Total_Time}) { my (@cput) = split(/:/,$jobinfo{$id}{Step_Total_Time}); my (@cpudh) = split(/\+/,$cput[0]); if (@cpudh == 2){ $cput[0]= 24*$cpudh[0] + $cpudh[1]; } $lrms_jobs{$id}{cputime} = int($cput[0]*60 + $cput[1] + $cput[2]/60) if (@cput); } if ($jobinfo{$id}{Wall_Clk_Hard_Limit} =~ / \(([0-9]*) seconds\)/) { $lrms_jobs{$id}{reqwalltime} = int($1 / 60); } $lrms_jobs{$id}{reqcputime} = $lrms_jobs{$id}{reqwalltime}; $lrms_jobs{$id}{comment} = [ "LRMS: $jobinfo{$id}{Status}" ]; if (defined $jobinfo{$id}{Allocated_Host} && $jobinfo{$id}{Allocated_Host} ne "") { $lrms_jobs{$id}{nodes} = ["$jobinfo{$id}{Allocated_Host}"]; } elsif (defined $jobinfo{$id}{Allocated_Hosts} && $jobinfo{$id}{Allocated_Hosts} ne "") { $lrms_jobs{$id}{nodes} = ["$jobinfo{$id}{Allocated_Hosts}"]; } else { $lrms_jobs{$id}{nodes} = []; } $lrms_jobs{$id}{rank} = -1; $lrms_jobs{$id}{cpus} = 0; $lrms_jobs{$id}{cpus} = $jobinfo{$id}{Step_Cpus}; } return %lrms_jobs; } sub users_info($$@) { my ($config) = shift; my ($path) = $$config{ll_bin_path}; my ($qname) = shift; my ($accts) = shift; my (%lrms_users); if ( ! exists $lrms_queue{status} ) { %lrms_queue = queue_info( $path, $qname ); } # Using simple estimate. Fair-share value is only known by Administrator. foreach my $u ( @{$accts} ) { $lrms_users{$u}{freecpus} = $lrms_queue{status}; $lrms_users{$u}{queuelength} = "$lrms_queue{queued}"; } return %lrms_users; } 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/PBSPRO.pm0000644000000000000000000000013215067751327023235 xustar0030 mtime=1759498967.761492071 30 atime=1759498967.867493681 30 ctime=1759499029.803472041 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PBSPRO.pm0000644000175000002070000010042415067751327025140 0ustar00mockbuildmock00000000000000package PBSPRO; ###################################################################### # DISCLAIMER ###################################################################### # This module depends on ARC0mod.pm which is obsolete and deprecated # starting from ARC 6.0 # Please DO NOT build new LRMS modules based on this one but follow # the indications in # LRMSInfo.pm # instead. ###################################################################### use strict; our @ISA = ('Exporter'); # Module implements these subroutines for the LRMS interface our @EXPORT_OK = ('cluster_info', 'queue_info', 'jobs_info', 'users_info', 'nodes_info'); use LogUtils ( 'start_logging', 'error', 'warning', 'debug' ); ########################################## # Saved private variables ########################################## our(%lrms_queue); my (%user_jobs_running, %user_jobs_queued); # the queue passed in the latest call to queue_info, jobs_info or users_info my $currentqueue = undef; # cache info returned by PBS commands my $pbsnodes; my $qstat_f; my $qstat_fQ; # PBS type and flavour my $lrms_type = undef; my $lrms_version = undef; # Resets queue-specific global variables if # the queue has changed since the last call sub init_globals($) { my $qname = shift; if (not defined $currentqueue or $currentqueue ne $qname) { $currentqueue = $qname; %lrms_queue = (); %user_jobs_running = (); %user_jobs_queued = (); } } # get PBS type and version sub get_pbs_version ($) { return unless not defined $lrms_type; # path to LRMS commands my ($config) = shift; my ($path) = $$config{pbs_bin_path}; error("pbs_bin_path not defined, cannot continue. Exiting...") unless defined $path; # determine the flavour and version of PBS my $qmgr_string=`$path/qmgr -c "list server"`; if ( $? != 0 ) { error("Can't run qmgr"); } if ($qmgr_string =~ /pbs_version = \b(\D+)_(\d\S+)\b/) { $lrms_type = $1; $lrms_version = $2; } else { error("Cannot determine PBSPro version from qmrg. Exiting..."); } } ########################################## # Private subs ########################################## sub read_pbsnodes ($) { return %$pbsnodes if $pbsnodes; #processing the pbsnodes output by using a hash of hashes # %hoh_pbsnodes (referrenced by $hashref) my ( $path ) = shift; my ( %hoh_pbsnodes); my ($nodeid,$node_var,$node_value); unless (open PBSNODESOUT, "$path/pbsnodes -a 2>/dev/null |") { error("error in executing pbsnodes"); } while (my $line= ) { if ($line =~ /^$/) {next}; if ($line =~ /^([\w\-]+)/) { $nodeid= $1 ; next; } if ($line =~ / = /) { ($node_var,$node_value) = split (/ = /, $line, 2); $node_var =~ s/\s+//g; chop $node_value; } $hoh_pbsnodes{$nodeid}{$node_var} = $node_value; } close PBSNODESOUT; $pbsnodes = \%hoh_pbsnodes; return %hoh_pbsnodes; } sub read_qstat_fQ ($) { # return already parsed value return %$qstat_fQ if $qstat_fQ; #processing the qstat -fQ output by using a hash of hashes my ( $path ) = shift; my ( %hoh_qstat ); unless (open QSTATOUTPUT, "$path/qstat -Q -f 2>/dev/null |") { error("Error in executing qstat: $path/qstat -Q -f"); } my $current_queue = undef; my ($qstat_var,$qstat_value) = (); while (my $line= ) { chomp($line); if ($line =~ /^$/) {next}; if ($line =~ /^Queue: ([\w\-]+)$/) { $current_queue = $1; next; } if ( ! defined $current_queue ) {next}; if ($line =~ m/ = /) { ($qstat_var,$qstat_value) = split("=", $line, 2); $qstat_var =~ s/^\s+|\s+$//g; $qstat_value =~ s/^\s+|\s+$//g; $hoh_qstat{$current_queue}{$qstat_var} = $qstat_value; } # older PBS versions has no '-1' support # a line starting with a tab is a continuation line if ( $line =~ m/^\t(.+)$/ ) { $qstat_value .= $1; $qstat_value =~ s/\s+$//g; $hoh_qstat{$current_queue}{$qstat_var} = $qstat_value; } } close QSTATOUTPUT; $qstat_fQ = \%hoh_qstat; return %hoh_qstat; } sub read_qstat_f ($) { # return already parsed value return %$qstat_f if $qstat_f; #processing the qstat -f output by using a hash of hashes my ( $path ) = shift; my ( %hoh_qstat ); unless (open QSTATOUTPUT, "$path/qstat -f 2>/dev/null |") { error("Error in executing qstat: $path/qstat -f"); } my $jobid = undef; my ($qstat_var,$qstat_value) = (); while (my $line= ) { chomp($line); if ($line =~ /^$/) {next}; if ($line =~ /^Job Id: (.+)$/) { $jobid = $1; next; } if ( ! defined $jobid ) {next}; if ($line =~ m/ = /) { ($qstat_var,$qstat_value) = split("=", $line, 2); $qstat_var =~ s/^\s+|\s+$//g; $qstat_value =~ s/^\s+|\s+$//g; $hoh_qstat{$jobid}{$qstat_var} = $qstat_value; } # older PBS versions has no '-1' support # a line starting with a tab is a continuation line if ( $line =~ m/^\t(.+)$/ ) { $qstat_value .= $1; $qstat_value =~ s/\s+$//g; $hoh_qstat{$jobid}{$qstat_var} = $qstat_value; } } close QSTATOUTPUT; $qstat_f = \%hoh_qstat; return %hoh_qstat; } # Splits up the value of the exec_host string. # # According to PBSPro manual: # Format: exec_host=/* [+/ * ] # where index is task slot number starting at 0, on that host, # and CPUs is the number of CPUs assigned to the job, 1 if omitted. # # Returns a list of node names, one for each used cpu. sub split_hostlist { my ($exec_host_string) = @_; my @nodes; my $err; for my $nodespec (split '\+', $exec_host_string) { if ($nodespec =~ m{^([^/:]+)/\d+(?:\*(\d+))?$}) { my ($nodename, $multiplier) = ($1, $2 || 1); push @nodes, $nodename for 1..$multiplier; } else { $err = $nodespec; } } warning("failed counting nodes in expression: $exec_host_string") if $err; return @nodes; } # Deduces the number of requested cpus from the values of these job properties: sub set_cpucount { my ($job) = (@_); my $select = $job->{"Resource_List.select"}; my $totalcpus = 0; for my $chunk (split '\+', $select) { my $cpus = cpus_in_select_chunk($chunk); $totalcpus += $cpus; } $job->{cpus} = $totalcpus; delete $job->{"Resource_List.select"}; } # counts cpus in chunk definitions of the forms found in Resource_List.select (PBSPro 8+): # 4 # 2:ncpus=1 # 1:ncpus=4:mpiprocs=4:host=hostA sub cpus_in_select_chunk { my ($chunk) = @_; return $1 if $chunk =~ m/^(\d+)$/; if ($chunk =~ m{^(\d+):(.*)$}) { my ($cpus, $resc) = ($1, $2); return $cpus * get_ncpus($resc); } return 0; # not a valid chunk } # extracts the value of ncpus from a string like blah:ncpus=N:blah sub get_ncpus { my ($resc) = @_; for my $res (split ':', $resc) { return $1 if $res =~ m /^ncpus=(\d+)$/; } return 1; } # Convert time from [DD:[HH:[MM:]]]SS to minutes sub count_time { my $pbs_time = shift; # split and reverse PBS time to start from seconds, then drop seconds my @t = reverse split /:/, $pbs_time; my $minutes = 0; if ( ! defined $t[1] ) { # PBS seconds only case $minutes = int( $t[0] / 60 ); } else { # drop seconds shift @t; $minutes = $t[0]; $minutes += $t[1]*60 if defined $t[1]; $minutes += $t[2]*60*24 if defined $t[2]; } return $minutes; } # gets information about each destination queue behind a # routing queue and copies it into the routing queue data structure. # at the moment it only copies data from the first queue # # input: $queue name of the current queue # $path to pbs binaries # $singledqueue that contains the only queue behind the routing one # %qstat{} for the current queue # output: the %dqueue hash containing info about destination queues # in %lrms_queue fashion sub process_dqueues($$%){ my $qname = shift; my $path = shift; my (%qstat) = %{$_[0]}; my $singledqueue; my %dqueues; # build DQs data structure my @dqnames; if (defined $qstat{'route_destinations'}) { @dqnames=split(",",$qstat{'route_destinations'}); @dqueues{@dqnames}=undef; my (%hoh_qstatfQ) = read_qstat_fQ($path); foreach my $dqname ( keys %dqueues ) { debug("Processing queues behind routing queue. Current queue is $dqname"); $dqueues{$dqname}=$hoh_qstatfQ{$dqname}; } # debug($dqueues{'verylong'}{'resources_max.walltime'}); } else { error("No route_destinations for routing queue $qname. Please check LRMS configuration."); } # take the first destination queue behind the RQ, copy its data to the RQ # this happens only if the RQ has no data defined on PBS # this should solve bug #859 $singledqueue=shift(@dqnames); debug('Just one queue behind routing queue is currently supported: '.$singledqueue); my @attributes=( 'max_running', 'max_user_run', 'max_queuable', 'resources_max.cput', 'resources_min.cput', 'resources_default.cput', 'resources_max.walltime', 'resources_min.walltime', 'resources_default.walltime', 'state_count' ); foreach my $rkey (@attributes) { # line to check queues under routing queue values. Undefined values generate crap in logs, # so is commented out. # debug('with key '.$rkey.' qstat returns '.$qstat{$rkey}.' and the dest. queue has '.$dqueues{$singledqueue}{$rkey} ); if (!defined $qstat{$rkey}) {${$_[0]}{$rkey}=$dqueues{$singledqueue}{$rkey};}; } return %dqueues; } ############################################ # Public subs ############################################# sub cluster_info ($) { # Path to LRMS commands my ($config) = shift; my ($path) = $$config{pbs_bin_path}; error("pbs_bin_path not defined, cannot continue. Exiting...") unless defined $path; # Return data structure %lrms_cluster{$keyword} # # All values should be defined, empty values "" are ok if field # does not apply to particular LRMS. my (%lrms_cluster); # flavour and version of PBS get_pbs_version($config); $lrms_cluster{lrms_type} = $lrms_type; $lrms_cluster{lrms_glue_type}=lc($lrms_type); $lrms_cluster{lrms_version} = $lrms_version; # PBS treats cputime limit for parallel/multi-cpu jobs as job-total $lrms_cluster{has_total_cputime_limit} = 1; # processing the pbsnodes output by using a hash of hashes %hoh_pbsnodes my ( %hoh_pbsnodes ) = read_pbsnodes( $path ); $lrms_cluster{totalcpus} = 0; my ($number_of_running_jobs) = 0; $lrms_cluster{cpudistribution} = ""; my (@cpudist) = 0; my %available_nodes = (); # loop over all available nodes foreach my $node (keys %hoh_pbsnodes) { # skip nodes that does not conform dedicated_node_string filter if ( exists $$config{dedicated_node_string} && $$config{dedicated_node_string} ne "") { next unless ( $hoh_pbsnodes{$node}{"properties"} =~ m/^([^,]+,)*$$config{dedicated_node_string}(,[^,]+)*$/); } # add node to available_nodes hash $available_nodes{$node} = 1; # get node state and number of CPUs my ($nodestate) = $hoh_pbsnodes{$node}{"state"}; my $nodecpus; if ($hoh_pbsnodes{$node}{'np'}) { $nodecpus = $hoh_pbsnodes{$node}{'np'}; } elsif ($hoh_pbsnodes{$node}{'resources_available.ncpus'}) { $nodecpus = $hoh_pbsnodes{$node}{'resources_available.ncpus'}; } next if ($nodestate=~/down/ or $nodestate=~/offline/); if ($nodestate=~/(?:,|^)busy/) { $lrms_cluster{totalcpus} += $nodecpus; $cpudist[$nodecpus] +=1; $number_of_running_jobs += $nodecpus; next; } $lrms_cluster{totalcpus} += $nodecpus; $cpudist[$nodecpus] += 1; if ($hoh_pbsnodes{$node}{"jobs"}){ $number_of_running_jobs++; my ( @comma ) = ($hoh_pbsnodes{$node}{"jobs"}=~ /,/g); $number_of_running_jobs += @comma; } } # form LRMS cpudistribution string for (my $i=0; $i<=$#cpudist; $i++) { next unless ($cpudist[$i]); $lrms_cluster{cpudistribution} .= " ".$i."cpu:".$cpudist[$i]; } # read the qstat -n information about all jobs # queued cpus, total number of cpus in all jobs $lrms_cluster{usedcpus} = 0; $lrms_cluster{queuedcpus} = 0; $lrms_cluster{queuedjobs} = 0; $lrms_cluster{runningjobs} = 0; my %qstat_jobs = read_qstat_f($path); for my $key (keys %qstat_jobs) { # usercpus (running jobs) if ( $qstat_jobs{$key}{job_state} =~ /R/) { $lrms_cluster{runningjobs}++; my @nodes = split_hostlist($qstat_jobs{$key}{exec_host}); # filter using dedicated_node_string foreach my $node ( @nodes ) { next unless defined $available_nodes{$node}; $lrms_cluster{usedcpus}++; } } # if ( $qstat_jobs{$key}{job_state} =~ /(W|T|Q)/) { $lrms_cluster{queuedjobs}++; $lrms_cluster{queuedcpus}+=count_usedcpus($qstat_jobs{$key}{"Resource_List.select"}, $qstat_jobs{$key}{"Resource_List.nodes"}, $qstat_jobs{$key}{"Resource_List.ncpus"}); } } # Names of all LRMS queues @{$lrms_cluster{queue}} = (); my ( %hoh_qstat ) = read_qstat_fQ($path); for my $qkey (keys %hoh_qstat) { push @{$lrms_cluster{queue}}, $qkey; } return %lrms_cluster; } sub queue_info ($$) { # Path to LRMS commands my ($config) = shift; my ($path) = $$config{pbs_bin_path}; error("pbs_bin_path not defined, cannot continue. Exiting...") unless defined $path; # Name of the queue to query my ($qname) = shift; init_globals($qname); # The return data structure is %lrms_queue. # In this template it is defined as persistent module data structure, # because it is later used by jobs_info(), and we wish to avoid # re-construction of it. If it were not needed later, it would be defined # only in the scope of this subroutine, as %lrms_cluster previously. # Return data structure %lrms_queue{$keyword} # # All values should be defined, empty values "" are ok if field # does not apply to particular LRMS. # read the queue information for the queue entry from the qstat my (%hoh_qstat) = read_qstat_fQ($path); my (%qstat) = %{$hoh_qstat{$qname}}; # this script contain a solution for a single queue behind the # routing one, the routing queue will inherit some of its # attributes. # this hash contains qstat records for queues - in this case just one my %dqueues; # this variable contains the single destination queue my $singledqueue; if ($qstat{queue_type} =~ /Route/) { %dqueues = process_dqueues($qname,$path,\%qstat); $singledqueue = ( keys %dqueues )[0]; } else { undef %dqueues; undef $singledqueue; } # publish queue limits parameters # general limits (publish as is) my (%keywords) = ( 'max_running' => 'maxrunning', 'max_user_run' => 'maxuserrun', 'max_queuable' => 'maxqueuable', 'resources_max.ncpus' => 'MaxSlotsPerJob'); foreach my $k (keys %keywords) { if (defined $qstat{$k} ) { $lrms_queue{$keywords{$k}} = $qstat{$k}; } else { $lrms_queue{$keywords{$k}} = ""; } } # queue time limits (convert to minutes) %keywords = ( 'resources_max.cput' => 'maxcputime', 'resources_min.cput' => 'mincputime', 'resources_default.cput' => 'defaultcput', 'resources_max.walltime' => 'maxwalltime', 'resources_min.walltime' => 'minwalltime', 'resources_default.walltime' => 'defaultwallt' ); foreach my $k (keys %keywords) { if ( defined $qstat{$k} ) { $lrms_queue{$keywords{$k}} = (&count_time($qstat{$k})+($k eq 'resources_min.cput'?1:0)); } else { $lrms_queue{$keywords{$k}} = ""; } } # determine the queue status from the LRMS # Used to be set to 'active' if the queue can accept jobs # Now lists the number of available processors, "0" if no free # cpus. Negative number signals some error state of PBS # (reserved for future use). $lrms_queue{status} = -1; $lrms_queue{running} = 0; $lrms_queue{queued} = 0; $lrms_queue{totalcpus} = 0; if ( ($qstat{"enabled"} =~ /True/) and ($qstat{"started"} =~ /True/)) { # refresh routing queue records, in case something changed on the # destination queues if ($qstat{queue_type} =~ /Route/) { debug("CPUs calculation pass. Queues are scanned a second time. Current queue is: $qstat{queue_type}"); %dqueues = process_dqueues($qname,$path,\%qstat); # this variable contains the single destination queue $singledqueue = ( keys %dqueues )[0]; } else { undef %dqueues; undef $singledqueue; } # qstat does not return number of cpus, use pbsnodes instead. my ($pbs_freecpus,$pbs_totalcpus,$nodes_totalcpus,$nodes_freecpus)=(0,0,0,0); # processing the pbsnodes output by using a hash of hashes %hoh_pbsnodes my ( %hoh_pbsnodes ) = read_pbsnodes( $path ); foreach my $node (keys %hoh_pbsnodes) { my $cpus; next if $hoh_pbsnodes{$node}{'state'} =~ m/offline/; next if $hoh_pbsnodes{$node}{'state'} =~ m/down/; if ($hoh_pbsnodes{$node}{'np'}) { $cpus = $hoh_pbsnodes{$node}{'np'}; } elsif ($hoh_pbsnodes{$node}{'resources_available.ncpus'}) { $cpus = $hoh_pbsnodes{$node}{'resources_available.ncpus'}; } $nodes_totalcpus+=$cpus; if ($hoh_pbsnodes{$node}{'state'} =~ m/free/){ $nodes_freecpus+=$cpus; } # If pbsnodes have properties assigned to them # check if queuename or dedicated_node_string matches. # $singledqueue check has been added for routing queue support, # also the destination queue is checked to calculate totalcpus # also adds correct behaviour for queue_node_string if ( ( ! defined $hoh_pbsnodes{$node}{'properties'} ) || ( ( defined $qname && $hoh_pbsnodes{$node}{'properties'} =~ m/^([^,]+,)*$qname(,[^,]+)*$/ ) || ( defined $$config{pbs_dedicated_node_string} && $hoh_pbsnodes{$node}{'properties'} =~ m/^([^,]+,)*$$config{pbs_dedicated_node_string}(,[^,]+)*$/ ) || ( defined $$config{pbs_queue_node} && $hoh_pbsnodes{$node}{'properties'} =~ m/^([^,]+,)*$$config{pbs_queue_node}(,[^,]+)*$/ ) || ( defined $singledqueue && $hoh_pbsnodes{$node}{'properties'} =~ m/^([^,]+,)*$singledqueue(,[^,]+)*$/ ) ) ) { $pbs_totalcpus+=$cpus; if ($hoh_pbsnodes{$node}{'state'} =~ m/free/){ $pbs_freecpus+=$cpus; } } } if ($pbs_totalcpus eq 0) { warning("Node properties are defined in PBS but nothing match the queue filters. Assigning counters for all nodes."); $pbs_totalcpus = $nodes_totalcpus; $pbs_freecpus = $nodes_freecpus; } $lrms_queue{totalcpus} = $pbs_totalcpus; debug("Totalcpus for all queues are: $lrms_queue{totalcpus}"); if(defined $$config{totalcpus}){ if ($lrms_queue{totalcpus} eq "" or $$config{totalcpus} < $lrms_queue{totalcpus}) { $lrms_queue{totalcpus}=$$config{totalcpus}; } } $lrms_queue{status} = $pbs_freecpus; $lrms_queue{status}=0 if $lrms_queue{status} < 0; if ( $qstat{state_count} =~ m/.*Running:([0-9]*).*/ ){ $lrms_queue{running}=$1; } else { $lrms_queue{running}=0; } # calculate running in case of a routing queue if ( $qstat{queue_type} =~ /Route/ ) { debug($dqueues{$singledqueue}{state_count}); if ( $dqueues{$singledqueue}{state_count} =~ m/.*Running:([0-9]*).*/ ) { $lrms_queue{running}=$1; } } # the above gets the number of nodes not the number of cores in use. If multi core jobs are running, "running" will be underestimated. # Instead use totalcpus - freecpus (This might overrepresent running. because pbsnodes count whole nodes in use.) # CUS (2015-02-09) my $runningcores = $pbs_totalcpus - $pbs_freecpus ; $runningcores = 0 if $runningcores < 0; $lrms_queue{running} = $runningcores if $runningcores > $lrms_queue{running}; if ($lrms_queue{totalcpus} eq 0) { warning("Can't determine number of cpus for queue $qname"); } if ( $qstat{state_count} =~ m/.*Queued:([0-9]*).*/ ){ $lrms_queue{queued}=$1; } else { $lrms_queue{queued}=0; } # fallback for defult values that are required for normal operation $lrms_queue{MaxSlotsPerJob} = $lrms_queue{totalcpus} if $lrms_queue{MaxSlotsPerJob} eq ""; # calculate queued in case of a routing queue # queued jobs is the sum of jobs queued in the routing queue # plus jobs in the destination queue if ( $qstat{queue_type} =~ /Route/ ) { debug($dqueues{$singledqueue}{state_count}); if ( $dqueues{$singledqueue}{state_count} =~ m/.*Queued:([0-9]*).*/ ) { $lrms_queue{queued}=$lrms_queue{queued}+$1; } } } return %lrms_queue; } sub jobs_info ($$@) { # Path to LRMS commands my ($config) = shift; my ($path) = $$config{pbs_bin_path}; error("pbs_bin_path not defined, cannot continue. Exiting...") unless defined $path; # Name of the queue to query my ($qname) = shift; init_globals($qname); # LRMS job IDs from Grid Manager (jobs with "INLRMS" GM status) my ($jids) = shift; # Return data structure %lrms_jobs{$lrms_local_job_id}{$keyword} # # All values should be defined, empty values "" are ok if field # does not apply to particular LRMS. my (%lrms_jobs); # Fill %lrms_jobs here (da implementation) # rank is treated separately as it does not have an entry in # qstat output, comment because it is an array, and mem # because "kB" needs to be stripped from the value my (%skeywords) = ('job_state' => 'status'); my (%tkeywords) = ( 'resources_used.walltime' => 'walltime', 'resources_used.cput' => 'cputime', 'Resource_List.walltime' => 'reqwalltime', 'Resource_List.cputime' => 'reqcputime'); my (%nkeywords) = ( 'Resource_List.select' => 1, 'Resource_List.nodes' => 1, 'Resource_List.ncpus' => 1); my ($alljids) = join ' ', @{$jids}; my ($rank) = 0; my %job_owner; my $handle_attr = sub { my ($jid, $k, $v) = @_; if ( defined $skeywords{$k} ) { $lrms_jobs{$jid}{$skeywords{$k}} = $v; if($k eq "job_state") { if( $v eq "U" ) { $lrms_jobs{$jid}{status} = "S"; } elsif ( $v eq "C" ) { $lrms_jobs{$jid}{status} = ""; # No status means job has completed } elsif ( $v ne "R" and $v ne "Q" and $v ne "S" and $v ne "E" ) { $lrms_jobs{$jid}{status} = "O"; } } } elsif ( defined $tkeywords{$k} ) { $lrms_jobs{$jid}{$tkeywords{$k}} = &count_time($v); } elsif ( defined $nkeywords{$k} ) { $lrms_jobs{$jid}{$k} = $v; } elsif ( $k eq 'exec_host' ) { my @nodes = split_hostlist($v); $lrms_jobs{$jid}{nodes} = \@nodes; #$lrms_jobs{$jid}{cpus} = scalar @nodes; } elsif ( $k eq 'comment' ) { $lrms_jobs{$jid}{comment} = [] unless $lrms_jobs{$jid}{comment}; push @{$lrms_jobs{$jid}{comment}}, "LRMS: $v"; } elsif ($k eq 'resources_used.vmem') { $v =~ s/(\d+).*/$1/; $lrms_jobs{$jid}{mem} = $v; } if ( $k eq 'Job_Owner' ) { $v =~ /(\S+)@/; $job_owner{$jid} = $1; } if ( $k eq 'job_state' ) { if ($v eq 'R') { $lrms_jobs{$jid}{rank} = ""; } elsif ($v eq 'C') { $lrms_jobs{$jid}{rank} = ""; } else { $rank++; $lrms_jobs{$jid}{rank} = $rank; $jid=~/^(\d+).+/; } if ($v eq 'R' or 'E'){ ++$user_jobs_running{$job_owner{$jid}}; } if ($v eq 'Q'){ ++$user_jobs_queued{$job_owner{$jid}}; } } }; my ( %hoh_qstatf ) = read_qstat_f($path); foreach my $pbsjid (keys %hoh_qstatf) { # only jobids known by A-REX are processed my $jid = undef; foreach my $j (@$jids) { if ( $pbsjid =~ /^$j$/ ) { $jid = $j; last; } } next unless defined $jid; # handle qstat attributes of the jobs foreach my $k (keys %{$hoh_qstatf{$jid}} ) { my $v = $hoh_qstatf{$jid}{$k}; &$handle_attr($jid, $k, $v); } # count cpus for this jobs set_cpucount($lrms_jobs{$jid}); } my (@scalarkeywords) = ('status', 'rank', 'mem', 'walltime', 'cputime', 'reqwalltime', 'reqcputime'); foreach my $jid ( @$jids ) { foreach my $k ( @scalarkeywords ) { if ( ! defined $lrms_jobs{$jid}{$k} ) { $lrms_jobs{$jid}{$k} = ""; } } $lrms_jobs{$jid}{comment} = [] unless $lrms_jobs{$jid}{comment}; $lrms_jobs{$jid}{nodes} = [] unless $lrms_jobs{$jid}{nodes}; } return %lrms_jobs; } sub users_info($$@) { # Path to LRMS commands my ($config) = shift; my ($path) = $$config{pbs_bin_path}; error("pbs_bin_path not defined, cannot continue. Exiting...") unless defined $path; # Name of the queue to query my ($qname) = shift; init_globals($qname); # Unix user names mapped to grid users my ($accts) = shift; # Return data structure %lrms_users{$unix_local_username}{$keyword} # # All values should be defined, empty values "" are ok if field # does not apply to particular LRMS. my (%lrms_users); # Check that users have access to the queue my ( %hoh_qstatfQ ) = read_qstat_fQ( $path ); my $acl_user_enable = 0; my @acl_users; # added for routing queue support my @dqueues; my $singledqueue; my $isrouting; foreach my $k (keys %{$hoh_qstatfQ{$qname}}) { my $v = $hoh_qstatfQ{$qname}{$k}; if ( $k eq "acl_user_enable" && $v eq "True") { $acl_user_enable = 1; } if ( $k eq "acl_users" ) { unless ( $v eq 'False' ) { # This condition is kept here in case the reason # for it being there in the first place was that some # version or flavour of PBS really has False as an alternative # to usernames to indicate the absence of user access control # A Corrallary: Dont name your users 'False' ... push @acl_users, split ',', $v; } } # added to support routing queues if ( !$acl_user_enable ) { if ($k eq "route_destinations" ) { @dqueues=split (',',$v); $singledqueue=shift(@dqueues); info('Routing queue did not have acl information. Local user acl taken from destination queue: '.$singledqueue); $isrouting = 1; } } } # if the acl_user_enable is not defined in the RQ, # it could be defined in the destination queues. # we proceed same way as before but on the first # destination queue to propagate the info to the routing one if ($isrouting){ debug("Getting acl from destination queue $singledqueue"); # Check that users have access to the queue $acl_user_enable = 0; foreach my $k (keys %{$hoh_qstatfQ{$singledqueue}}) { my $v = $hoh_qstatfQ{$singledqueue}{$k}; if ( $k eq "acl_user_enable" && $v eq "True") { $acl_user_enable = 1; } if ( $k eq "acl_users" ) { unless ( $v eq 'False' ) { push @acl_users, split ',', $v; } } } debug(@acl_users); } # acl_users is only in effect when acl_user_enable is true if ($acl_user_enable) { foreach my $a ( @{$accts} ) { if ( grep { $a eq $_ } @acl_users ) { # The acl_users list has to be sent back to the caller. # This trick works because the config hash is passed by # reference. push @{$$config{acl_users}}, $a; } else { warning("Local user $a does not ". "have access in queue $qname."); } } } else { delete $$config{acl_users}; } # Uses saved module data structure %lrms_queue, which # exists if queue_info is called before if ( ! exists $lrms_queue{status} ) { %lrms_queue = queue_info( $config, $qname ); } foreach my $u ( @{$accts} ) { $user_jobs_running{$u} = 0 unless $user_jobs_running{$u}; if ($lrms_queue{maxuserrun} and ($lrms_queue{maxuserrun} - $user_jobs_running{$u}) < $lrms_queue{status} ) { $lrms_users{$u}{freecpus} = $lrms_queue{maxuserrun} - $user_jobs_running{$u}; } else { $lrms_users{$u}{freecpus} = $lrms_queue{status}; } $lrms_users{$u}{queuelength} = "$lrms_queue{queued}"; if ($lrms_users{$u}{freecpus} < 0) { $lrms_users{$u}{freecpus} = 0; } if ($lrms_queue{maxcputime} and $lrms_users{$u}{freecpus} > 0) { $lrms_users{$u}{freecpus} .= ':'.$lrms_queue{maxcputime}; } } return %lrms_users; } sub nodes_info($) { my $config = shift; my $path = $config->{pbs_bin_path}; my %hoh_pbsnodes = read_pbsnodes($path); my %nodes; for my $host (keys %hoh_pbsnodes) { my ($isfree, $isavailable) = (0,0); $isfree = 1 if $hoh_pbsnodes{$host}{state} =~ /free/; $isavailable = 1 unless $hoh_pbsnodes{$host}{state} =~ /down|offline|unknown/; $nodes{$host} = {isfree => $isfree, isavailable => $isavailable}; my $props = $hoh_pbsnodes{$host}{properties}; $nodes{$host}{tags} = [ split /,\s*/, $props ] if $props; my $np = $hoh_pbsnodes{$host}{np}; $nodes{$host}{slots} = int $np if $np; my $status = $hoh_pbsnodes{$host}{status}; if ($status) { for my $token (split ',', $status) { my ($opt, $val) = split '=', $token, 2; next unless defined $val; if ($opt eq 'totmem') { $nodes{$host}{vmem} = int($1/1024) if $val =~ m/^(\d+)kb/; } elsif ($opt eq 'physmem') { $nodes{$host}{pmem} = int($1/1024) if $val =~ m/^(\d+)kb/; } elsif ($opt eq 'ncpus') { $nodes{$host}{lcpus} = int $val; } elsif ($opt eq 'uname') { my @uname = split ' ', $val; $nodes{$host}{sysname} = $uname[0]; $nodes{$host}{release} = $uname[2] if @uname > 2; $nodes{$host}{machine} = $uname[-1] if $uname[-1]; } } } } return %nodes; } 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/HostInfo.pm0000644000000000000000000000013115067751327023760 xustar0029 mtime=1759498967.75949204 30 atime=1759498967.867493681 30 ctime=1759499029.818470361 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/HostInfo.pm0000644000175000002070000004047115067751327025671 0ustar00mockbuildmock00000000000000package HostInfo; use POSIX; use Sys::Hostname; use Time::Local; use strict; BEGIN { eval {require Time::HiRes; import Time::HiRes "time"}; } use Sysinfo; use LogUtils; use InfoChecker; our $host_options_schema = { x509_host_cert => '*', x509_cert_dir => '*', x509_cert_policy => '*', wakeupperiod => '*', processes => [ '' ], ports => { '*' => [ '*' ] #process name, ports }, localusers => [ '' ], # TODO: Test use case of multiple sessiondirs live. arex => { controldir => '', sessiondir => [ '' ], cache => { cachedir => [ '*' ], cachesize => '*' }, }, }; our $host_info_schema = { hostname => '', osname => '*', # see OSName_t, GFD.147 osversion => '*', # see OSName_t, GFD.147 sysname => '', # uname -s release => '', # uname -r machine => '', # uname -m (what would it be on a linux machine) cpuvendor => '*', cpumodel => '*', cpufreq => '*', # unit: MHz cpustepping => '*', pmem => '*', # unit: MB vmem => '*', # unit: MB cputhreadcount=> '*', cpucorecount => '*', cpusocketcount=> '*', issuerca => '', issuerca_hash => '', issuerca_enddate => '*', issuerca_expired => '*', hostcert_enddate => '*', hostcert_expired => '*', trustedcas => [ '' ], session_free => '', # unit: MB session_total => '', # unit: MB cache_free => '', # unit: MB cache_total => '', # unit: MB globusversion => '*', processes => { '*' => '' }, ports => { '*' => { # process name '*' => [ '' ] # port -> [port status, error message ] } }, gm_alive => '', localusers => { '*' => { gridareas => [ '' ], diskfree => '' # unit: MB } }, }; our $log = LogUtils->getLogger(__PACKAGE__); { my ($t0, $descr); sub timer_start($) { $descr = shift; $t0 = time(); } sub timer_stop() { my $dt = sprintf("%.3f", time() - $t0); $log->debug("Time spent $descr: ${dt}s"); } } sub collect($) { my ($options) = @_; my ($checker, @messages); $checker = InfoChecker->new($host_options_schema); @messages = $checker->verify($options); $log->warning("config key options->$_") foreach @messages; $log->fatal("Some required options are missing") if @messages; my $result = get_host_info($options); $checker = InfoChecker->new($host_info_schema); @messages = $checker->verify($result); $log->debug("SelfCheck: result key hostinfo->$_") foreach @messages; return $result; } # private subroutines # Obtain the end date of a certificate (in seconds since the epoch) sub enddate { my ($openssl, $certfile) = @_; # assuming here that the file exists and is a well-formed certificate. my $stdout =`$openssl x509 -noout -enddate -in '$certfile' 2>&1`; if ($?) { $log->info("openssl error: $stdout"); return undef; } chomp ($stdout); my %mon = (Jan=>0,Feb=>1,Mar=>2,Apr=>3,May=>4,Jun=>5,Jul=>6,Aug=>7,Sep=>8,Oct=>9,Nov=>10,Dec=>11); if ($stdout =~ m/notAfter=(\w{3}) ?(\d\d?) (\d\d):(\d\d):(\d\d) (\d{4}) GMT/ and exists $mon{$1}) { return timegm($5,$4,$3,$2,$mon{$1},$6); } else { $log->warning("Unexpected -enddate from openssl for $certfile"); return undef; } } sub get_ports_info { my ($processes, $ports) = @_; my $portsstatus = {}; my $errormessage = ''; # Assume user is root my $userisroot = 1; if ($> != 0) { $userisroot = 0; $errormessage = "Checking if ARC ports are open: user ".getpwuid($>)." cannot access process names. Infosys will assume AREX interfaces are running properly;"; $log->verbose($errormessage); } my $netcommand = ''; my $stdout = ''; # check if to use either netstat or ss if ($userisroot) { for my $path (split ':', "$ENV{PATH}") { $netcommand = "$path/netstat" and last if -x "$path/netstat"; $netcommand = "$path/ss" and last if -x "$path/ss"; } if ($netcommand eq '') { $errormessage = $errormessage." Could not find neither netstat nor ss command, cannot probe open ports, assuming services are up;"; $log->verbose("Could not find neither netstat nor ss command, cannot probe open ports, assuming services are up"); } else { # run net command $stdout = `$netcommand -antup 2>&1`; if ($?) { $errormessage = $errormessage." $netcommand error: $stdout"; $log->info("$netcommand error: $stdout"); return undef; } } chomp ($stdout); } foreach my $process (@$processes) { my $procports = $ports->{$process}; foreach my $port (@$procports) { if ( $stdout =~ m/$port.*$process/ or $netcommand eq '' or $userisroot == 0 ) { $portsstatus->{$process}{$port} = ['ok', $errormessage ]; } else { my $porterrormessage = $errormessage. " $netcommand: process $process is not listening on port $port;"; $portsstatus->{$process}{$port} = ['critical', $porterrormessage ]; } } } return $portsstatus; } # Hostcert, issuer CA, trustedca, issuercahash, enddate ... sub get_cert_info { my ($options, $globusloc) = @_; my $host_info = {}; if (not $options->{x509_host_cert}) { $log->info("x509_host_cert not configured"); return $host_info; } # find an openssl my $openssl = ''; for my $path (split ':', "$ENV{PATH}:$globusloc/bin") { $openssl = "$path/openssl" and last if -x "$path/openssl"; } $log->error("Could not find openssl command") unless $openssl; # Inspect host certificate my $hostcert = $options->{x509_host_cert}; chomp (my $issuerca = `$openssl x509 -noout -issuer -nameopt oneline -in '$hostcert' 2>&1`); if ($?) { $log->warning("Failed processing host certificate file: $hostcert, openssl error: $issuerca") if $?; } else { $issuerca =~ s/, /\//g; $issuerca =~ s/ = /=/g; $issuerca =~ s/^[^=]*= */\//; $host_info->{issuerca} = $issuerca; $host_info->{hostcert_enddate} = enddate($openssl, $hostcert); system("$openssl x509 -noout -checkend 3600 -in '$hostcert' >/dev/null 2>&1"); $host_info->{hostcert_expired} = $? ? 1 : 0; $log->warning("Host certificate is expired in file: $hostcert") if $?; } if (not $options->{x509_cert_dir} or $options->{x509_cert_policy} eq 'system') { $log->info("x509_cert_dir not configured"); $host_info->{issuerca_enddate} = $host_info->{hostcert_enddate}; $host_info->{issuerca_expired} = 0; return $host_info; } # List certs and elliminate duplication in case 2 soft links point to the same file. my %certfiles; my $certdir = $options->{x509_cert_dir}; if (opendir(CERTDIR, $certdir)) { for (readdir CERTDIR) { next unless m/\.\d$/; my $file = $certdir."/".$_; my $link = -l $file ? readlink $file : $_; $certfiles{$link} = $file; } closedir CERTDIR; } else { $log->warning("Failed listing certificates directory $certdir: $!"); } my %trustedca; foreach my $cert ( sort values %certfiles ) { chomp (my $ca_sn = `$openssl x509 -checkend 3600 -noout -subject -nameopt oneline -in '$cert'`); my $is_expired = $?; $ca_sn = (split(/\n/, $ca_sn))[0]; $ca_sn =~ s/, /\//g; $ca_sn =~ s/ = /=/g; $ca_sn =~ s/^[^=]*= */\//; if ($ca_sn eq $issuerca) { chomp (my $issuerca_hash = `$openssl x509 -noout -hash -in '$cert'`); if ($?) { $log->warning("Failed processing issuer CA certificate file: $cert"); } else { $host_info->{issuerca_hash} = $issuerca_hash || undef; $host_info->{issuerca_enddate} = enddate($openssl, $cert); $host_info->{issuerca_expired} = $is_expired ? 1 : 0; $log->warning("Issuer CA certificate is expired in file: $cert") if $is_expired; } } $log->warning("Certificate is expired for CA: $ca_sn") if $is_expired; $trustedca{$ca_sn} = 1 unless $is_expired; } $host_info->{trustedcas} = [ sort keys %trustedca ]; $log->warning("Issuer CA certificate file not found") unless exists $host_info->{issuerca_hash}; return $host_info; } # Returns 'all' if all grid-managers are up # 'some' if one or more grid-managers are down # 'none' if all grid-managers are down sub gm_alive { my ($timeout, @controldirs) = @_; my $up = 0; my $down = 0; for my $dir (@controldirs) { my @stat = stat("$dir/gm-heartbeat"); if (@stat and time() - $stat[9] < $timeout) { $up++; } else { $down++; } } return 'none' if not $up; return $down ? 'some' : 'all'; } sub get_host_info { my $options = shift; my $host_info = {}; $host_info->{hostname} = hostname(); my $osinfo = Sysinfo::osinfo() || {}; my $cpuinfo = Sysinfo::cpuinfo() || {}; my $meminfo = Sysinfo::meminfo() || {}; $log->error("Failed querying CPU info") unless %$cpuinfo; $log->error("Failed querying OS info") unless %$osinfo; # Globus location my $globusloc = $ENV{GLOBUS_LOCATION} || "/usr"; if ($ENV{GLOBUS_LOCATION}) { if ($ENV{LD_LIBRARY_PATH}) { $ENV{LD_LIBRARY_PATH} .= ":$ENV{GLOBUS_LOCATION}/lib"; } else { $ENV{LD_LIBRARY_PATH} = "$ENV{GLOBUS_LOCATION}/lib"; } } timer_start("collecting certificates info"); my $certinfo = get_cert_info($options, $globusloc); timer_stop(); $host_info = {%$host_info, %$osinfo, %$cpuinfo, %$meminfo, %$certinfo}; my @controldirs; my $control = $options->{arex}; # Leaving this array here in case we have still multiple controldirs of some kind push @controldirs, $control->{controldir}; # Considering only common session disk space (not including per-user session directoires) my (%commongridareas, $commonfree); if ($control) { $commongridareas{$_} = 1 for map { my ($path, $drain) = split /\s+/, $_; $path; } @{$control->{sessiondir}}; } # TODO: this can be removed. Commenting out for now. # Also include remote session directoires. #if (my $remotes = $options->{remotegmdirs}) { # for my $remote (@$remotes) { # my ($ctrldir, @sessions) = split ' ', $remote; # $commongridareas{$_} = 1 for grep { $_ ne 'drain' } @sessions; # push @controldirs, $ctrldir; # } #} if (%commongridareas) { my %res = Sysinfo::diskspaces(keys %commongridareas); if ($res{errors}) { $log->warning("Failed checking disk space available in session directories. The check is skipped if sessiondir=* present in arc.conf"); } else { $host_info->{session_free} = $commonfree = $res{freesum}; $host_info->{session_total} = $res{totalsum}; } } # TODO: this is broken since many years. Needs better handling in CEinfo.pl and ConfigCentral.pm # calculate free space on the sessionsirs of each "local user". my $user = $host_info->{localusers} = {}; foreach my $u (@{$options->{localusers}}) { # TODO: this can be reengineered for user-based sessiondirs ('*' parameter) # Are there grid-manager settings applying for this local user? if ($control->{$u}) { my $sessiondirs = [ map { my ($path, $drain) = split /\s+/, $_; $path; } @{$control->{$u}{sessiondir}} ]; my %res = Sysinfo::diskspaces(@$sessiondirs); if ($res{errors}) { $log->warning("Failed checking disk space available in session directories of user $u") } else { $user->{$u}{gridareas} = $sessiondirs; $user->{$u}{diskfree} = $res{freesum}; } } elsif (defined $commonfree) { # default for other users $user->{$u}{gridareas} = [ keys %commongridareas ]; $user->{$u}{diskfree} = $commonfree; } } # Considering only common cache disk space (not including per-user caches) if ($control->{'cache'}) { my $cachedirs = $control->{'cache'}{cachedir} || []; my ($cachemax, $cachemin) = split " ", $control->{'cache'}{cachesize} if defined $control->{'cache'}{cachesize}; my @paths = map { my @pair = split " ", $_; $pair[0] } @$cachedirs; if (@paths) { # TODO: treat cache the same way as sessiondir, avoid double counting sizes in in same filesystem my %res = Sysinfo::diskspaces(@paths); if ($res{errors}) { $log->warning("Failed checking disk space available in common cache directories") } else { # What to publish as CacheFree if there are multiple cache disks? # HighWatermark is factored in # Only accurate if caches are on filesystems of their own $host_info->{cache_total} = (defined $cachemax) ? $res{totalsum}*$cachemax/100 : $res{totalsum}; $host_info->{cache_total} = int $host_info->{cache_total}; # Opting to publish the least free space on any of the cache # disks -- at least this has a simple meaning and is useful to # diagnose if a disk gets full -- but upper limit is # the max space usable calculated above, for consistency $host_info->{cache_free} = ($res{freemin} >= $host_info->{cache_total}) ? $host_info->{cache_total} : $res{freemin}; $host_info->{cache_free} = int $host_info->{cache_free}; } } } my $gm_timeout = $options->{wakeupperiod} ? $options->{wakeupperiod} * 10 : 1800; $host_info->{gm_alive} = gm_alive($gm_timeout, @controldirs); #Globus Toolkit version #globuslocation/share/doc/VERSION my $globusversion; if (-r "$globusloc/share/doc/VERSION" ) { chomp ( $globusversion = `cat $globusloc/share/doc/VERSION 2>/dev/null`); if ($?) { $log->warning("Failed reading the globus version file")} } #globuslocation/bin/globus-version elsif (-x "$globusloc/bin/globus-version" ) { chomp ( $globusversion = `$globusloc/bin/globus-version 2>/dev/null`); if ($?) { $log->warning("Failed running $globusloc/bin/globus-version command")} } $host_info->{globusversion} = $globusversion if $globusversion; $host_info->{processes} = Sysinfo::processid(@{$options->{processes}}); $host_info->{ports} = get_ports_info($options->{processes},$options->{ports}); return $host_info; } #### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST #### sub test { my $options = { x509_host_cert => '/etc/grid-security/testCA-hostcert.pem', x509_cert_dir => '/etc/grid-security/certificates', arex => { sessiondir => [ '/home', '/boot', '*', ], cache => { cachedir => [ '/home' ], cachesize => '60 80', }, }, libexecdir => '/usr/libexec/arc', runtimedir => '/home/grid/runtime', processes => [ qw(bash ps init grid-manager bogous cupsd slapd) ], ports => { cupsd => ['631'], gridftpd => ['3811'], slapd => ['133','389','2135'] }, localusers => [ qw(root bin daemon) ] }; require Data::Dumper; import Data::Dumper qw(Dumper); LogUtils::level('DEBUG'); $log->debug("Options:\n" . Dumper($options)); my $results = HostInfo::collect($options); $log->debug("Results:\n" . Dumper($results)); } #test; 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/RTEInfo.pm0000644000000000000000000000013215067751327023476 xustar0030 mtime=1759498967.761492071 30 atime=1759498967.867493681 30 ctime=1759499029.819885543 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/RTEInfo.pm0000755000175000002070000000272015067751327025404 0ustar00mockbuildmock00000000000000package RTEInfo; use warnings; use strict; use LogUtils; our $rte_options_schema = { controldir => '', }; our $rte_info_schema = { '*' => { state => '', description => '*', } }; our $log = LogUtils->getLogger(__PACKAGE__); sub collect($) { my ($options) = @_; my $rtes = {}; # enabled RTEs in controldir add_static_rtes("$options->{controldir}/rte/enabled/", $rtes) if $options->{controldir}; return $rtes; } sub add_static_rtes { my ($runtimedir, $rtes) = @_; unless (opendir DIR, $runtimedir) { $log->debug("Can't access runtimedir: $runtimedir: $!"); return; } closedir DIR; my $cmd = "find '$runtimedir' ! -type d ! -name '.*' ! -name '*~' -exec test -e {} \\; -print"; unless (open RTE, "$cmd |") { $log->warning("Failed to run: $cmd"); return; } while (my $dir = ) { chomp $dir; $dir =~ s#$runtimedir/*##; $rtes->{$dir} = { state => 'installednotverified' }; } } #### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST #### sub test { my $options = { controldir => '/var/spool/nordugrid', }; require Data::Dumper; import Data::Dumper qw(Dumper); LogUtils::level('VERBOSE'); $log->debug("Options:\n" . Dumper($options)); my $results = RTEInfo::collect($options); $log->debug("Results:\n" . Dumper($results)); } #test; 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/PerfData.pl.in0000644000000000000000000000013215067751327024322 xustar0030 mtime=1759498967.761492071 30 atime=1759498967.867493681 30 ctime=1759499029.840906694 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PerfData.pl.in0000644000175000002070000004606415067751327026236 0ustar00mockbuildmock00000000000000#!/usr/bin/perl -w package PerfData; ######################################################## # Script for performance collection # Requires the NYTProf profiler # Requires files generated after the execution of # CEinfo.pl # Generates output as requested by the NorduGrid # collaboration at # https://wiki.nordugrid.org/wiki/Logging_of_CE_performance_numbers # # # 20160414 metrics: # - lrmscalltiming, lrmstype, lrms full command, durationinseconds # - controldirreadtiming, all, directoryname, durationinseconds # - controldirreadtiming, file, filename, durationinseconds # - controldirreadtiming, job, jobid, durationinseconds # this can be derived by above numbers # # Author: Florido Paganelli florido.paganelli@hp.lu.se, NorduGrid Collaboration # ######################################################## use File::Basename; use Getopt::Long; use Sys::Hostname; use Cwd; #use Data::Dumper::Concise; #use Devel::NYTProf::Data; #use Symbol; # The profiling tool might be missing in some distributions. # Default is to assume is not present. my $NYTPROF_PRESENT = 0; $ENV{NYTPROF} = "start=no:file=/tmp/arcnytproftmp"; $NYTPROF_PRESENT = eval { require Devel::NYTProf; 1; }; if ($NYTPROF_PRESENT) { # temporary workaround to reduce useless files written by # the profiler. Might be removed in the future # NOTE: comment lines out to profile/debug this script or profiling will # stop after the following line. DB::disable_profile(); DB::finish_profile(); unlink '/tmp/arcnytproftmp'; }; use strict; BEGIN { my $pkgdatadir = dirname($0); unshift @INC, $pkgdatadir; } # used for code parsing my $pkgdatadir = dirname($0); use ConfigCentral; use LogUtils; our $log = LogUtils->getLogger(__PACKAGE__); my $debuglevel; # default is to delete parsed nytprof files my $keepnytproffiles = 0; our $configfile; my $controldirpath = ''; # default performance log filename as agreed in the NG wiki # https://wiki.nordugrid.org/wiki/Logging_of_CE_performance_numbers#details.2C_agreements my $perflogfilename = 'infosys.perflog'; ############################################################# # Datastructures to hold metrics to collect # To add new metrics is enough to fill such datastructures # with the needed information. ############################################################# # TODO: # - controldirreadtiming, job, jobid, durationinseconds # candidate fine grained: raw file date, job, N/A: can NYTPROF get this runtime info? , sum of the above for each jobid? # There is no way to obtain JOB IDS from NYTProf. # This can be achieved only by adding additional info in the code. # candidate coarse grained: raw file date, job, any , sum of the above for just one call or NYTPROF block: foreach my $ID (@gridmanager_jobs) { # This can be derived by other numbers. But it requires summing times of all lines inside the for loop above, which is quite time consuming # and it will not give a much better number than the time to run getgmjobs / number of calls for each file ## GMJobsInfo.pm ####################################### # subs for gmjobs # Implements: # - controldirreadtiming, all, directoryname, durationinseconds # candidate: raw file date, all, $controlsubdir? , sub get_gmjobs # Can't get specific controlsubdir. Just showing aggregated data for now. # # NYTProf datastructure: GMJobsInfo::get_gmjobs => [ ?, ?, ?, external time, internaltime, pointer ] # we always take externaltime # my $gmjobssubs = { 'GMJobsInfo::get_gmjobs' => "controldirreadtiming,all,controldir" }; # code patterns to get line info for gmjobsinfo my $gmjobsinfopatterns = { '.local' => 'my @local_allines = ;', '.status' => 'my \(\$first_line\) = ;', '.failed' => 'read GMJOB_FAILED, \$chars, 1024;', '.grami' => 'while \(my \$line = \) {', '.description' => 'while \(my \$line = \) {', '.diag' => 'unless \( open \(GMJOB_DIAG, "<\$gmjob_diag"\) \) {' }; # mapping between info sources and output strings # # Implements: # - controldirreadtiming, file, filename, durationinseconds, number of calls # filename is missing as it is currently impossible to get from the profiler. # my $gmjobsinfometrics = { 'subs' => { %$gmjobssubs }, 'codepatterns' => { 'subprefix' => 'controldirreadtiming,file', 'patterns' => { %$gmjobsinfopatterns }, 'lines' => {}, # will contain calculated line numbers in source code for the above patterns 'params' => 'incl' } }; # Coarse grained LRMSInfo information # Implements: # not very interesting: candidate coarse grained: raw file date, LRMSInfo.pm, nofullcommand, # timing of line my $result = get_lrms_info($options); <-- same info in infoprovider.log, but maybe good for comparison? my $lrmsinfosubs = { 'LRMSInfo::collect' => 'lrmscalltiming,LRMSInfo.pm,collect', }; my $lrmsinfometrics = { 'subs' => { %$lrmsinfosubs }, }; # Module stuff for each LRMS, to be loaded depending on config # Implements: # - lrmscalltiming, lrmstype, lrms full command, durationinseconds # candidate fine grained: raw file date, lrmsmodulename, lrms full command?, sum of queueinfo and jobsinfo timing # # TODO: add lrms full command, must be extracted from code. Exact values will be missing ## fork my $forkmodsubs = { 'FORKmod::queue_info' => 'lrmscalltiming,fork,queue_info', 'FORKmod::jobs_info' => 'lrmscalltiming,fork,jobs_info' }; #my $forkmodpatterns = { # '.local' => 'my @local_allines = ;', #}; my $forkmodmetrics = { 'subs' => { %$forkmodsubs }, # 'codepatterns' => { # 'subprefix' => 'controldirreadtiming,file', # 'patterns' => { %$forkmodpatterns }, # 'lines' => {'*'}, # will contain calculated line numbers in source code for the above patterns # 'params' => 'incl' # } }; ## slurm my $slurmmodsubs = { 'SLURMmod::queue_info' => 'lrmscalltiming,slurm,queue_info', 'SLURMmod::jobs_info' => 'lrmscalltiming,slurm,jobs_info' }; #my $slurmmodpatterns = { # '.local' => 'my @local_allines = ;', #}; my $slurmmodmetrics = { 'subs' => { %$slurmmodsubs }, # 'codepatterns' => { # 'subprefix' => 'lrmscalltiming,slurm,command', # 'patterns' => { %$slurmmodpatterns }, # 'lines' => {'*'}, # will contain calculated line numbers in source code for the above patterns # 'params' => 'incl' # } }; ## Condor my $condorsubs = { 'Condor::queue_info' => 'lrmscalltiming,condor,queue_info', 'Condor::jobs_info' => 'lrmscalltiming,condor,jobs_info' }; #my $condorpatterns = { # '.local' => 'my @local_allines = ;', #}; my $condormetrics = { 'subs' => { %$condorsubs }, # 'codepatterns' => { # 'subprefix' => 'lrmscalltiming,condor,command', # 'patterns' => { %$condorpatterns }, # 'lines' => {'*'}, # will contain calculated line numbers in source code for the above patterns # 'params' => 'incl' # } }; ## PBS my $pbsmodsubs = { 'PBS::queue_info' => 'lrmscalltiming,pbs,queue_info', 'PBS::jobs_info' => 'lrmscalltiming,pbs,jobs_info' }; #my $pbspatterns = { # '.local' => 'my @local_allines = ;', #}; my $pbsmodmetrics = { 'subs' => { %$pbsmodsubs }, # 'codepatterns' => { # 'subprefix' => 'lrmscalltiming,pbs,command', # 'patterns' => { %$pbsmodpatterns }, # 'lines' => {'*'}, # will contain calculated line numbers in source code for the above patterns # 'params' => 'incl' # } }; ## PBSPRO my $pbspromodsubs = { 'PBSPRO::queue_info' => 'lrmscalltiming,pbs,queue_info', 'PBSPRO::jobs_info' => 'lrmscalltiming,pbs,jobs_info' }; my $pbspromodmetrics = { 'subs' => { %$pbspromodsubs }, }; ## SGE my $sgemodsubs = { 'SGEmod::queue_info' => 'lrmscalltiming,sge,queue_info', 'SGEmod::jobs_info' => 'lrmscalltiming,sge,jobs_info' }; #my $sgemodpatterns = { # '.local' => 'my @local_allines = ;', #}; my $sgemodmetrics = { 'subs' => { %$sgemodsubs }, # 'codepatterns' => { # 'subprefix' => 'lrmscalltiming,sge,command', # 'patterns' => { %$sgemodpatterns }, # 'lines' => {'*'}, # will contain calculated line numbers in source code for the above patterns # 'params' => 'incl' # } }; ## Metrics list ############################################ my $metrics = { 'modules' => { 'GMJobsInfo.pm' => { %$gmjobsinfometrics }, 'LRMSInfo.pm' => { %$lrmsinfometrics }, # These are loaded depending on config # 'FORKmod.pm' => { %$forkmodmetrics }, # 'SLURMmod.pm' => { %$slurmmodmetrics }, # 'Condor.pm' => { %$condormetrics }, # 'PBS.pm' => { %$pbsmodmetrics }, # 'PBSPRO.pm' => { %$pbspromodmetrics }, # 'SGEmod.pm' => { %$sgemodmetrics }, }, }; ############################################################ # Subroutines ############################################################ # Scans the modules datastucture and writes out NYTProf data sub getdatabymodule { my ($arcmodulefilename,$prefixstring,$profile) = @_; my $arcmoduledata = $metrics->{'modules'}{$arcmodulefilename}; # print Data::Dumper::Dumper($arcmoduledata); # TODO: check that profile contains relevant data # get performance data for given modules if (defined $arcmoduledata->{subs}) { my $modulesubstimes = {}; $modulesubstimes = getsubroutinedata($modulesubstimes,$arcmoduledata->{subs},$profile); # output strings for my $subr (keys %{$arcmoduledata->{subs}}) { # print Data::Dumper::Dumper($arcmoduledata->{subs}); writeperf("$prefixstring,".$arcmoduledata->{subs}{$subr}.','.$modulesubstimes->{$subr}) if defined $modulesubstimes; } } # print lines information #print Data::Dumper::Dumper("lines before stats is: ".$arcmoduledata->{codepatterns}{lines}); if (defined $arcmoduledata->{codepatterns}) { printfiledata($arcmoduledata->{codepatterns}{lines},$arcmodulefilename,$profile,"$prefixstring,$arcmoduledata->{codepatterns}{subprefix}"); } } # adds to the input hash subroutine times sub getsubroutinedata { my ($modulesubstimes,$arcmodulesubs,$profile) = @_; my $subinfo; # get subroutine data for my $subroutine (keys %$arcmodulesubs) { # TODO: protect from missing data $subinfo = $profile->subinfo_of($subroutine); if (defined $subinfo) { # Suggested by Tim Bunce: $modulesubstimes->{$subroutine} = $subinfo->incl_time; } else { $modulesubstimes->{$subroutine} = '0,0'; } } return $modulesubstimes; } # get stats for selected lines of code. Prints file data per line # Fills the lines hash in the datastructure sub printfiledata { my ($linenumbers,$perlmodulefilename,$profile,$prefixstring) = @_; # get file data my $fileinfo = $profile->fileinfo_of($perlmodulefilename); my $linesinfo = $fileinfo->line_time_data; # [0] is the time for a call, [1] the number of calls for my $filetype (keys %{$linenumbers}) { my $line = $linenumbers->{$filetype}; if (defined @{$linesinfo}[$line]) { my $line_time = @{@{$linesinfo}[$line]}[0]; my $line_calls = @{@{$linesinfo}[$line]}[1]; writeperf("$prefixstring,$filetype,$line_time,$line_calls"); } } } # get code line numbers # taking them from the datastructure at the top sub getlinenumbers { # calculate line numbers for each pattern # this could be static, but allows the code to change independently foreach my $module (keys %{$metrics->{'modules'}}) { my $modulehash = $metrics->{'modules'}{$module}; if (defined $modulehash->{codepatterns}) { open ( SF , "$pkgdatadir/$module" ) or $log->error("$!"); while ( my $fileline = ) { for my $filetype (keys %{$modulehash->{codepatterns}{patterns}}) { my $pattern = $modulehash->{codepatterns}{patterns}{$filetype}; if ($fileline =~ /$pattern/ ) { $modulehash->{codepatterns}{lines}{$filetype} = $.; } } # TODO: cycle through lines to check that values have been defined, # otherwise there might be an error in the patterns #$log->warning("Pattern $pattern for module $module not found. Please recheck codepatterns datastructure in PerfData.pl") unless (defined $modulehash->{codepatterns}{lines}{$filetype}); }; close (SF); #print Data::Dumper::Dumper($metrics->{modules}{$module}{'codepatterns'}); }; }; } # Writes to file the performance information. # structure is not checked here, currently it must be enforced by the # various functions creating the output message. sub writeperf { my ($msg) = @_; open ( my $filehandle, ">>", $perflogfilename) || $log->error("Cannot write to $perflogfilename, exiting"); print $filehandle "$msg\n"; close $filehandle; } # used to remove files in the nytprof db folder sub deletefiles { my ($dbfilefullpath) = @_; unless ($keepnytproffiles) { $log->verbose("deleting file $dbfilefullpath"); $log->warning("Cannot delete file $dbfilefullpath: $!") unless unlink $dbfilefullpath; } } ############################################################ # Main ############################################################ sub main { $log->error('Devel::NYTProf not present. Perfomance files generaton cannot continue. ') unless ($NYTPROF_PRESENT); exit 0; # Parse command line options my $print_help; my $testfilename; GetOptions("config:s" => \$configfile, "testfilename|test|t:s" => \$testfilename, "debuglevel|d:s" => \$debuglevel, "keepnytproffiles|k!" => \$keepnytproffiles, "help|h" => \$print_help ); if ($print_help) { print " This script loads a set of NYTProf databases and extracts relevant data for ARC information system as specified in https://wiki.nordugrid.org/wiki/Logging_of_CE_performance_numbers . Usage: $0 Options: --config - full path to arc.conf --testfilename|test|t - filename to use for testing. If not specified all files in the performance folder will be scanned (default) --debuglevel|d - debug level as one of ARC {FATAL|ERROR|WARNING|INFO|VERBOSE|DEBUG}. Default is INFO --keepnytproffiles|k - if enabled, the script will not delete nytprof files in perfdata/perl_nytprof. Default is to wipe out the processed ones to save space. --help - this help\n"; exit 1; } $log->error("--config argument is missing, see --help ") unless ( $configfile ); # Read ARC configuration my $perflogdir = ConfigCentral::getValueOf($configfile,'monitoring/perflog','perflogdir'); $perflogdir ||= ($perflogdir) ? $perflogdir : '/var/log/arc/perfdata'; my $hostname = ConfigCentral::getValueOf($configfile,'common','hostname'); my $arcversion = '@VERSION@'; $controldirpath = ConfigCentral::getValueOf($configfile,'arex','controldir'); # get lrms info and add relevant metrics to datastructure my $lrms = ConfigCentral::getValueOf($configfile,'lrms','lrms'); if ($lrms eq 'fork') { $metrics->{'modules'}{'FORKmod.pm'} = { %$forkmodmetrics }; } elsif ($lrms =~ /slurm/i) { $metrics->{'modules'}{'SLURMmod.pm'} = { %$slurmmodmetrics }; } elsif ($lrms =~ /pbspro/i) { $metrics->{'modules'}{'PBSPRO.pm'} = { %$pbspromodmetrics }; } elsif ($lrms =~ /pbs/i) { $metrics->{'modules'}{'PBS.pm'} = { %$pbsmodmetrics }; } elsif ($lrms =~ /condor/i) { $metrics->{'modules'}{'Condor.pm'} = { %$condormetrics }; }; $debuglevel ? LogUtils::level($debuglevel) : LogUtils::level('INFO'); LogUtils::timestamps(1); $log->verbose('--keepnytproffiles option detected, db files will not be deleted') if ($keepnytproffiles); # calculate line numbers for each pattern # this could be static, but allows the code to change independently getlinenumbers(); #print Data::Dumper::Dumper($config); $log->info("Performance folder: ".$perflogdir); # set performance outputfile # timestamp not needed anymore but I will keep the code for now #my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(); #my $timestamp=POSIX::strftime("%Y%m%d%H%M%S", $sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst); #$perflogfilename = 'infosys_'.$timestamp.'_'.$perflogfilename; $perflogfilename = $perflogdir.'/'.$perflogfilename; $log->info("Performance file will be created: ".$perflogfilename); # open nytprof database files in the folder and save their names my $nytprofperflogdir = $perflogdir.'/perl_nytprof'; $log->info("NYTProf databases folder: $nytprofperflogdir"); unless (opendir PERFDIR, $nytprofperflogdir ) { $log->error("Can't access the nytprof perfdata directory: $nytprofperflogdir"); } my @dbfiles = (); if (defined $testfilename) { push @dbfiles,$testfilename; } else { @dbfiles = grep /infosys\_\d{14}.perflog\.raw/, readdir PERFDIR; closedir PERFDIR; # remove last file as it is usually incomplete @dbfiles = sort @dbfiles; my $lastfile = pop @dbfiles; $log->debug("Skipping $nytprofperflogdir/$lastfile as it might be open by CEInfo.pl/NYTProf"); } # get some files to scan stats my $totalfilestoscan = @dbfiles; $log->info("Files to scan: $totalfilestoscan"); my $processedfiles = 0; # for each file extract relevant calls. foreach my $dbfile (@dbfiles) { #my $dbfile = 'infosys_20160704182917.perflog.raw'; $processedfiles++; my $dbfilefullpath = $nytprofperflogdir.'/'.$dbfile; $log->verbose("Processing: $dbfilefullpath , $processedfiles of $totalfilestoscan"); # Hack to solve NYTProf memory overflow. A circular reference in the # $profile datastructure prevents the garbage collector to cleanup. # in this way each file is processed in a child process that # forces the garbage collector to cleanup on exit. my $pid = fork(); log->error('Cannot fork NYTProf scanning, exiting...') unless (defined $pid); ## child code if( $pid == 0 ){ $log->debug("Starting helper process for $dbfilefullpath"); my $profile = Devel::NYTProf::Data->new( { filename => $dbfilefullpath, quiet => 1 } ); #$profile->dump_profile_data(); # Prefix for performance strings. my $prefixstring = ''; my $rawtimestamp =''; if ( $dbfile =~ /infosys\_(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})\.perflog\.raw/ ) { # Format POSIX style $rawtimestamp = "$1-$2-$3".'T'."$4:$5:$6".'Z'; }; $prefixstring = "$rawtimestamp,$hostname,$arcversion,infosys"; # cycle throught datastructure for my $module (keys %{$metrics->{modules}}) { getdatabymodule($module,$prefixstring,$profile); } exit 0; # children exits here } # parent code # waits for child to exit waitpid ($pid, 0); # delete processed file deletefiles($dbfilefullpath); } $log->info("$processedfiles of $totalfilestoscan processed. Results (if any) written to: $perflogfilename"); exit; } main; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/IniParser.pm0000644000000000000000000000013215067751327024124 xustar0030 mtime=1759498967.760492055 30 atime=1759498967.867493681 30 ctime=1759499029.812494652 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/IniParser.pm0000644000175000002070000002550715067751327026037 0ustar00mockbuildmock00000000000000package IniParser; use strict; use warnings; # Configuration parser classes for arc.conf ###### IniParser # # Synopsis: # # use IniParser; # # my $parser = IniParser->new("/etc/arc.conf") # or die 'Cannot parse config file'; # # my %common = $parser->get_section('common'); # get hash with all options in a section # my %queue = $parser->get_section('queue/atlas'); # # print $parser->list_subsections('gridftpd'); # list all subsections of 'gridftpd', but not # # the 'gridftpd' section itself # # my %gmopts = $parser->get_section('grid-manager'); # gm options which are not user-specific # my %useropts = $parser->get_section('grid-manager/.'); # gm options for the default user (this # # section is instantiated automatically # # if the controldir command was used) # # The [grid-manager] section is treated specially. Options that are # user-specific are put in separate pseudo-sections [grid-manager/]. # reffers to the user that is initiated by a 'control' command. The # 'controldir' command initiates user '.'. Each pseudo-section has it's own # 'controldir' option. Other user-specific options are: 'sessiondir', 'cachedir', # 'cachesize', 'cachelifetime', 'norootpower', 'maxrerun', # 'maxtransferfiles' and 'defaultttl'. No substituions are made and user names # '.' and '*' are not handled specially. # ###### SubstitutingIniParser # # Synopsis: # # use IniParser; # # my $parser = SubstitutingIniParser->new("/etc/arc.conf") # or die 'Cannot parse config file'; # # This class is just like IniParser, but substitutions are made and sections # for user names like @filename are expanded into separate sections for each # individual user. # sub new($$) { my ($this,$arcconf) = @_; my $class = ref($this) || $this; open(my $fh, "< $arcconf") || return undef; my $self = { config => {} }; bless $self, $class; $self->{config} = _parse($fh); close($fh); return $self; } # Expects the filename of the arc.conf file. # Returns false if it cannot open the file. sub _parse($) { my ($fh) = @_; my $config = {}; # current section my $section = Section->new('common'); while (my $line =<$fh>) { # handle runaway LF in CRLF and LFCR $line =~ s/^\r//; $line =~ s/\r$//; # skip comments and empty lines next if $line =~/^\s*;/; next if $line =~/^\s*#/; next if $line =~/^\s*$/; # new section starts here if ($line =~ /^\s*\[([\w\-\.\/]+)\]\s*$/) { my $sname = $1; $section->register($config); if ($sname =~ m/^vo/) { $section = SelfNamingSection->new($sname,'id'); } elsif ($sname =~ m/^group/) { $section = SelfNamingSection->new($sname,'name'); } elsif ($sname =~ m/^queue/) { $section = SelfNamingSection->new($sname,'name'); } elsif ($sname eq 'grid-manager') { $section = GMSection->new($sname); } else { $section = Section->new($sname); } # single or double quotes can be used. Quotes are removed from the values } elsif ($line =~ /^(\w+)\s*=\s*(["']?)(.*)(\2)\s*$/) { my ($opt,$val) = ($1,$3); $section->add($opt,$val); # bad line, ignore it for now } else { } } $section->register($config); delete $config->{common} unless %{$config->{common}}; return $config; } # Returns a hash with all options defined in a section. If the section does not # exist, it returns an empty hash sub get_section($$) { my ($self,$sname) = @_; return $self->{config}{$sname} ? %{$self->{config}{$sname}} : (); } # Returns the list of all sections sub list_sections($) { my ($self) = @_; return keys %{$self->{config}}; } sub has_section($$) { my ($self,$sname) = @_; return defined $self->{config}{$sname}; } # list all subsections of a section, but not the section section itself sub list_subsections($$) { my ($self,$sname) = @_; my %ssnames = (); for (keys %{$self->{config}}) { $ssnames{$1}='' if m|^$sname/(.+)|; } return keys %ssnames; } 1; ######################################################## package SubstitutingIniParser; our @ISA = ('IniParser'); sub new($$$) { my ($this,$arcconf,$arc_location) = @_; my $self = $this->SUPER::new($arcconf); return undef unless $self; _substitute($self, $arc_location); return $self; } sub _substitute { my ($self, $arc_location) = @_; my $config = $self->{config}; my $lrmsstring = $config->{'grid-manager'}{lrms} || $config->{common}{lrms}; my ($lrms, $defqueue) = split " ", $lrmsstring || ''; die 'Gridmap user list feature is not supported anymore. Please use @filename to specify user list.' if $config->{'grid-manager/*'}; # expand user sections whose user name is like @filename my @users = $self->list_subsections('grid-manager'); for my $user (@users) { my $section = "grid-manager/$user"; next unless $user =~ m/^\@(.*)$/; my $path = $1; my $fh; # read in user names from file if (open ($fh, "< $path")) { while (my $line = <$fh>) { chomp (my $newsection = "grid-manager/$line"); next if exists $config->{$newsection}; # Duplicate user!!!! $config->{$newsection} = { %{$config->{$section}} }; # shallow copy } close $fh; delete $config->{$section}; } else { die "Failed opening file to read user list from: $path: $!"; } } # substitute per-user options @users = $self->list_subsections('grid-manager'); for my $user (@users) { my @pw; my $home; if ($user ne '.') { @pw = getpwnam($user); die "getpwnam failed for user: $user: $!" unless @pw; $home = $pw[7]; } else { $home = "/tmp"; } my $opts = $config->{"grid-manager/$user"}; # Default for controldir, sessiondir if ($opts->{controldir} eq '*') { $opts->{controldir} = $pw[7]."/.jobstatus" if @pw; } if (not $opts->{sessiondir} or $opts->{sessiondir} eq '*') { $opts->{sessiondir} = "$home/.jobs"; } my $controldir = $opts->{controldir}; my @sessiondirs = split /\[separator\]/, $opts->{sessiondir}; my $substitute_opt = sub { my ($key) = @_; my $val = $opts->{$key}; return unless defined $val; # %R - session root $val =~ s/%R/$sessiondirs[0]/g if $val =~ m/%R/; # %C - control dir $val =~ s/%C/$controldir/g if $val =~ m/%C/; if (@pw) { # %U - username $val =~ s/%U/$user/g if $val =~ m/%U/; # %u - userid # %g - groupid # %H - home dir $val =~ s/%u/$pw[2]/g if $val =~ m/%u/; $val =~ s/%g/$pw[3]/g if $val =~ m/%g/; $val =~ s/%H/$home/g if $val =~ m/%H/; } # %L - default lrms # %Q - default queue $val =~ s/%L/$lrms/g if $val =~ m/%L/; $val =~ s/%Q/$defqueue/g if $val =~ m/%Q/; # %W - installation path $val =~ s/%W/$arc_location/g if $val =~ m/%W/; # %G - globus path my $G = $ENV{GLOBUS_LOCATION} || '/usr'; $val =~ s/%G/$G/g if $val =~ m/%G/; $opts->{$key} = $val; }; &$substitute_opt('controldir'); &$substitute_opt('sessiondir'); &$substitute_opt('cachedir'); } # authplugin, localcred, helper: not substituted } 1; ######################################################## package Section; sub new($$) { my ($this,$name) = @_; my $class = ref($this) || $this; my $self = { name => $name, data => {} }; bless $self, $class; return $self; } sub add($$$) { my ($self,$opt,$val) = @_; my $data = $self->{data}; my $old = $data->{$opt}; $data->{$opt} = $old ? $old."[separator]".$val : $val; } sub register($$) { my ($self,$config) = @_; my $name = $self->{name}; my $orig = $config->{$name} || {}; my $new = $self->{data}; $config->{$name} = { %$orig, %$new }; } 1; ######################################################## package SelfNamingSection; use base "Section"; sub new($$$) { my ($this,$name,$nameopt) = @_; my $self = $this->SUPER::new($name); $self->{nameopt} = $nameopt; return $self; } sub add($$$) { my ($self,$opt,$val) = @_; if ($opt eq $self->{nameopt}) { $self->{name} =~ s|(/[^/]+)?$|/$val|; } else { $self->SUPER::add($opt,$val); } } 1; ######################################################## package GMSection; use base "Section"; sub new($) { my ($this) = @_; my $self = $this->SUPER::new('grid-manager'); # OBS sessiondir is not treated $self->{muopts} = [qw(sessiondir cachedir)]; $self->{suopts} = [qw(cachesize cachelifetime norootpower maxrerun maxtransferfiles defaultttl)]; $self->{thisuser} = {}; $self->{allusers} = {}; $self->{controldir} = undef; return $self; } sub add($$$) { my ($self,$opt,$val) = @_; my $thisuser = $self->{thisuser}; if ($opt eq 'controldir') { $self->{controldir} = $val; } elsif ($opt eq 'control') { my ($dir, @usernames) = split /\s+/, $val; $thisuser->{controldir} = $dir; $self->{allusers}{$_} = $thisuser for @usernames; $thisuser = $self->{thisuser} = {%$thisuser}; # make copy delete $thisuser->{$_} for @{$self->{muopts}}; } elsif (grep {$opt eq $_} @{$self->{muopts}}) { my $old = $thisuser->{$opt}; $thisuser->{$opt} = $old ? $old."[separator]".$val : $val; } elsif (grep {$opt eq $_} @{$self->{suopts}}) { $thisuser->{$opt} = $val; } else { $self->SUPER::add($opt,$val); } } sub register($$) { my ($self,$config) = @_; my $dir = $self->{controldir}; if ($dir) { my $thisuser = $self->{thisuser}; $thisuser->{controldir} = $dir; $self->{allusers}{'.'} = $thisuser; } my $allusers = $self->{allusers}; $config->{"grid-manager/$_"} = $allusers->{$_} for keys %$allusers; $self->SUPER::register($config); } sub test { require Data::Dumper; import Data::Dumper qw(Dumper); my $parser = SubstitutingIniParser->new('/tmp/arc.conf','/usr') or die; print Dumper($parser); print "@{[$parser->list_subsections('gridftpd')]}\n"; print "@{[$parser->list_subsections('group')]}\n"; } #test(); 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/GMJobsInfo.pm0000644000000000000000000000013115067751327024164 xustar0029 mtime=1759498967.75949204 30 atime=1759498967.866493666 30 ctime=1759499029.817331624 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/GMJobsInfo.pm0000644000175000002070000003757515067751327026110 0ustar00mockbuildmock00000000000000package GMJobsInfo; use POSIX qw(ceil); use English; use LogUtils; use strict; # The returned hash looks something like this: # TODO: review this schema, there is a lot of legacy stuff to fix. our $j = { 'jobID' => { gmuser => '*', # from .local lrms => '*', queue => '', localid => '*', subject => '', starttime => '*', # MDS time format jobname => '*', gmlog => '*', cleanuptime => '*', # MDS time format delegexpiretime => '*', # MDS time format clientname => '', clientsoftware => '*', activityid => [ '*' ], sessiondir => '', diskspace => '', failedstate => '*', fullaccess => '*', lifetime => '*', # seconds jobreport => '*', interface => '*', # added for GLUE2, the interface the job was submitted. If missing, gridftp voms => [ '*' ], #ref array, contains all the voms attributes in the user certificate that A-REX authorization accepted vomsvo => '*', # string, the first VO in the voms array without the slashes # from .description description => '', # rsl or xml # from .grami -- not kept when the job is deleted stdin => '*', stdout => '*', stderr => '*', count => '*', reqwalltime => '*', # units: s reqcputime => '*', # units: s runtimeenvironments=> [ '*' ], # from .status status => '', statusmodified => '', # seconds since epoch statusread => '', # seconds since epoch localowner => '', # from .failed errors => [ '*' ], lrmsexitcode => '*', # from .diag exitcode => '*', nodenames => [ '*' ], UsedMem => '*', # units: kB; summed over all execution threads CpuTime => '*', # units: s; summed over all execution threads WallTime => '*', # units: s; real-world time elapsed LRMSStartTime => '*', # units: MDS time format LRMSEndTime => '*', # units: MDS time format completiontime => '*' # MDS time format } }; our $log = LogUtils->getLogger(__PACKAGE__); # # switch effective user if possible. This is reversible. # sub switchEffectiveUser { my ($user) = @_; return unless $UID == 0; my ($name, $pass, $uid, $gid); if ($user eq '.') { ($uid, $gid) = (0, 0); } else { ($name, $pass, $uid, $gid) = getpwnam($user); return unless defined $gid; } eval { $EGID = $gid; $EUID = $uid; }; } sub collect { my ($arex, $nojobs) = @_; my $gmjobs = {}; my $user = $arex->{user}; # Disabling user switching for now, runs as the same user as a-rex #switchEffectiveUser($user); my $controldir = $arex->{controldir}; my $newjobs = get_gmjobs($controldir, $nojobs); # TODO: can this be removed? What is this one fetching exaclty? $newjobs->{$_}{gmuser} = $user for keys %$newjobs; $gmjobs->{$_} = $newjobs->{$_} for keys %$newjobs; return $gmjobs; } # Given the controldir path, the jobid and a suffix # returns a path to that job in the fragmented controldir. sub control_path { my ($controldir, $jobid, $suffix) = @_; my ($a,$b,$c,$d) = unpack("A3A3A3A3", $jobid); my $path = "$controldir/jobs/$a/$b/$c/$d/$suffix"; return $path; } sub get_gmjobs { my ($controldir, $nojobs) = @_; my %gmjobs; my $jobstoscan = 0; my $jobsskipped = 0; # read the list of jobs from the jobdir and create the @gridmanager_jobs # the @gridmanager_jobs contains the IDs from the job.ID.status foreach my $controlsubdir ("$controldir/accepting", "$controldir/processing", "$controldir/finished") { unless (opendir JOBDIR, $controlsubdir ) { $log->warning("Can't access the job control directory: $controlsubdir") and return {}; } my @allfiles = grep /\.status/, readdir JOBDIR; closedir JOBDIR; my @gridmanager_jobs = map {$_=~m/(.+)\.status/; $_=$1;} @allfiles; # count job IDs to scan $jobstoscan = $jobstoscan + @gridmanager_jobs; $log->verbose("Found ". scalar @gridmanager_jobs. " jobs in $controlsubdir"); foreach my $ID (@gridmanager_jobs) { $log->debug("Scanning job $ID"); my $job = $gmjobs{$ID} = {}; my $gmjob_status = $controlsubdir."/".$ID.".status"; my $jobpath = control_path($controldir, $ID, ""); my $gmjob_local = "$jobpath"."local"; my $gmjob_failed = "$jobpath"."failed"; my $gmjob_description = "$jobpath"."description"; my $gmjob_grami = "$jobpath"."grami"; my $gmjob_diag = "$jobpath"."diag"; unless ( open (GMJOB_LOCAL, "<$gmjob_local") ) { $log->debug( "Job $ID: Can't read jobfile $gmjob_local, skipping job" ); delete $gmjobs{$ID}; $jobsskipped++; next; } my @local_allines = ; $job->{activityid} = []; # parse the content of the job.ID.local into the %gmjobs hash foreach my $line (@local_allines) { if ($line=~m/^(\w+)=(.+)$/) { # multiple activityid support. # Still in CPP codebase, see grid-manager/files/ControlFileContent.cpp, so keeping it here. if ($1 eq "activityid") { push @{$job->{activityid}}, $2; } else { # a job can belong to a user that has multiple voms roles # for completeness all added to the datastructure # in an array if ($1 eq "voms") { push @{$job->{voms}}, $2; # vomsvo to hold the selected vo, I assume is the first in the list. # will be used to calculate vo statistics # must match advertised (i.e. slashes are removed) unless (defined $job->{vomsvo}) { my $vostring = $2; if ($vostring =~ /^\/+(\w+)/) { $vostring = $1; }; $job->{vomsvo} = $vostring; } } else { $job->{$1}=$2; } } } } close GMJOB_LOCAL; # Extrasct jobID uri if ($job->{globalid}) { $job->{globalid} =~ s/.*JobSessionDir>([^<]+)<.*/$1/; } else { $log->debug("Job $ID: 'globalid' missing from .local file"); } # Rename queue -> share if (exists $job->{queue}) { $job->{share} = $job->{queue}; delete $job->{queue}; } else { $log->debug("Job $ID: 'queue' missing from .local file"); } # check for interface field if (! $job->{interface}) { $log->debug("Job $ID: 'interface' missing from .local file, reverting to org.nordugrid.gridftpjob"); $job->{interface} = 'org.nordugrid.gridftpjob'; } # read the job.ID.status into "status" unless (open (GMJOB_STATUS, "<$gmjob_status")) { $log->debug("Job $ID: Can't open status file $gmjob_status, skipping job"); delete $gmjobs{$ID}; $jobsskipped++; next; } else { my @file_stat = stat GMJOB_STATUS; my ($first_line) = ; close GMJOB_STATUS; unless ($first_line) { $log->debug("Job $ID: Failed to read status from file $gmjob_status, skipping job"); delete $gmjobs{$ID}; $jobsskipped++; next; } chomp ($first_line); $job->{status} = $first_line; if (@file_stat) { # localowner my $uid = $file_stat[4]; my $user = (getpwuid($uid))[0]; if ($user) { $job->{localowner} = $user; } else { $log->debug("Job $ID: Cannot determine user name for owner (uid $uid)"); } $job->{"statusmodified"} = $file_stat[9]; $job->{"statusread"} = time(); } else { $log->debug("Job $ID: Cannot stat status file: $!"); } } # check for localid if (! $job->{localid}) { if ($job->{status} eq 'INLRMS') { $log->debug("Job $ID: has no local ID but is in INLRMS state, this should not happen"); } $job->{localid} = 'UNDEFINEDVALUE'; } # Comes the splitting of the terminal job state # check for job failure, (job.ID.failed ) "errors" if (-e $gmjob_failed) { unless (open (GMJOB_FAILED, "<$gmjob_failed")) { $log->debug("Job $ID: Can't open $gmjob_failed"); } else { my $chars; read GMJOB_FAILED, $chars, 1024; my @allines = split "\n", $chars; close GMJOB_FAILED; $job->{errors} = \@allines; } } if ($job->{"status"} eq "FINISHED") { #terminal job state mapping if ( $job->{errors} ) { if (grep /Job is canceled by external request/, @{$job->{errors}}) { $job->{status} = "KILLED"; } elsif ( defined $job->{errors} ) { $job->{status} = "FAILED"; } } } # if jobs are not printed, it's sufficient to have jobid, status, # subject, queue and share. Can skip the rest. next if $nojobs; # read the job.ID.grami file unless ($job->{status} eq 'DELETED') { unless ( open (GMJOB_GRAMI, "<$gmjob_grami") ) { # this file is is kept by A-REX during the hole existence of the # job. grid-manager from arc0, however, deletes it after the job # has finished. $log->debug("Job $ID: Can't open $gmjob_grami"); } else { my $sessiondir = $job->{sessiondir} || ''; while (my $line = ) { if ($line =~ m/^joboption_(\w+)='(.*)'$/) { my ($param, $value) = ($1, $2); $param =~ s/'\\''/'/g; # unescape quotes # These parameters are quoted by A-REX if ($param eq "stdin") { $job->{stdin} = $value; $job->{stdin} =~ s/^\Q$sessiondir\E\/*//; } elsif ($param eq "stdout") { $job->{stdout} = $value; $job->{stdout} =~ s/^\Q$sessiondir\E\/*//; } elsif ($param eq "stderr") { $job->{stderr} = $value; $job->{stderr} =~ s/^\Q$sessiondir\E\/*//; } elsif ($param =~ m/^runtime_/) { push @{$job->{runtimeenvironments}}, $value; } } elsif ($line =~ m/^joboption_(\w+)=(\w+)$/) { my ($param, $value) = ($1, $2); # These parameters are not quoted by A-REX if ($param eq "count") { $job->{count} = int($value); } elsif ($param eq "walltime") { $job->{reqwalltime} = int($value); } elsif ($param eq "cputime") { $job->{reqcputime} = int($value); } elsif ($param eq "starttime") { $job->{starttime} = $value; } } } close GMJOB_GRAMI; } } #read the job.ID.description file unless ($job->{status} eq 'DELETED') { unless ( open (GMJOB_DESCRIPTION, "<$gmjob_description") ) { $log->debug("Job $ID: Can't open $gmjob_description"); } else { while (my $line = ) { chomp $line; next unless $line; if ($line =~ m/^\s*[&+|(]/) { $job->{description} = 'rsl'; last } if ($line =~ m/http\:\/\/www.eu-emi.eu\/es\/2010\/12\/adl/) { $job->{description} = 'adl'; last } my $nextline = ; if ($nextline =~ m/http\:\/\/www.eu-emi.eu\/es\/2010\/12\/adl/) { $job->{description} = 'adl'; last } $log->debug("Job $ID: Can't identify job description language"); last; } close GMJOB_DESCRIPTION; } } #read the job.ID.diag file if (-s $gmjob_diag) { unless ( open (GMJOB_DIAG, "<$gmjob_diag") ) { $log->debug("Job $ID: Can't open $gmjob_diag"); } else { my %nodenames; my ($kerneltime, $usertime); while (my $line = ) { $line=~m/^nodename=(\S+)/ and $nodenames{$1} = 1; $line=~m/^WallTime=(\d+)(\.\d*)?/ and $job->{WallTime} = ceil($1); $line=~m/^exitcode=(\d+)/ and $job->{exitcode} = $1; $line=~m/^AverageTotalMemory=(\d+)kB/ and $job->{UsedMem} = ceil($1); $line=~m/^KernelTime=(\d+)(\.\d*)?/ and $kerneltime=$1; $line=~m/^UserTime=(\d+)(\.\d*)?/ and $usertime=$1; $line=~m/^LRMSStartTime=(\d*Z)/ and $job->{LRMSStartTime}=$1; $line=~m/^LRMSEndTime=(\d*Z)/ and $job->{LRMSEndTime}=$1; } # Use completion time from diag instead of status, more reliable if ( ( $job->{status} eq 'FINISHED' ) ) { my @file_stat = stat GMJOB_DIAG; if (@file_stat) { my ($s,$m,$h,$D,$M,$Y) = gmtime($file_stat[9]); my $ts = sprintf("%4d%02d%02d%02d%02d%02d%1s",$Y+1900,$M+1,$D,$h,$m,$s,"Z"); $job->{"completiontime"} = $ts; } else { $log->debug("Job $ID: Cannot stat diag file: $!"); } } close GMJOB_DIAG; $job->{nodenames} = [ sort keys %nodenames ] if %nodenames; $job->{CpuTime}= ceil($kerneltime + $usertime) if defined $kerneltime and defined $usertime; } } } # job ID loop } # controlsubdir loop $log->verbose("Number of jobs to scan: $jobstoscan ; Number of jobs skipped: $jobsskipped"); return \%gmjobs; } #### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST #### sub test() { require Data::Dumper; LogUtils::level('VERBOSE'); my $controldirs = {'*' => { 'controldir' => '/tmp/jobstatus' } }; my $results = GMJobsInfo::collect(\%$controldirs); print Data::Dumper::Dumper($results); } #test; 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/Boinc.pm0000644000000000000000000000013215067751327023262 xustar0030 mtime=1759498967.758492025 30 atime=1759498967.866493666 30 ctime=1759499029.811350578 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/Boinc.pm0000644000175000002070000002107115067751327025165 0ustar00mockbuildmock00000000000000package Boinc; ###################################################################### # DISCLAIMER ###################################################################### # This module depends on ARC0mod.pm which is obsolete and deprecated # starting from ARC 6.0 # Please DO NOT build new LRMS modules based on this one but follow # the indications in # LRMSInfo.pm # instead. ###################################################################### use strict; use DBI; use POSIX qw(ceil floor); use Sys::Hostname; our @ISA = ('Exporter'); our @EXPORT_OK = ('cluster_info', 'queue_info', 'jobs_info', 'users_info'); use LogUtils ( 'start_logging', 'error', 'warning', 'debug' ); ########################################## # Saved private variables ########################################## our (%lrms_queue); our $running = undef; # total running jobs in a queue # the queue passed in the latest call to queue_info, jobs_info or users_info my $currentqueue = undef; # Resets queue-specific global variables if # the queue has changed since the last call sub init_globals($) { my $qname = shift; if (not defined $currentqueue or $currentqueue ne $qname) { $currentqueue = $qname; %lrms_queue = (); $running = undef; } } ########################################## # Private subs ########################################## sub db_conn($){ my $config=shift; my $DB_HOST=$$config{boinc_db_host}; my $DB_PORT=$$config{boinc_db_port}; my $DB_NAME=$$config{boinc_db_name}; my $DB_USER=$$config{boinc_db_user}; my $DB_PASS=$$config{boinc_db_pass}; my $dbh = DBI->connect("DBI:mysql:$DB_NAME;host=$DB_HOST:$DB_PORT","$DB_USER","$DB_PASS",{RaiseError=>1}); return $dbh; } sub get_total_cpus($){ # Total number of hosts that finished jobs recently my $config=shift; my $dbh = db_conn($config); my $where = ""; if (defined($$config{boinc_app_id})) { $where = " and appid=$$config{boinc_app_id}"; } my $sth = $dbh->prepare('select count(distinct hostid) from result where server_state=4'.$where); $sth->execute(); my $result = $sth->fetchrow_array(); if(defined($result)){ # Return a reasonable number to allow bootstrapping projects if($result < 100) {$result = 100;} return $result; } return 0; } sub get_max_cpus($){ # Total number of hosts with running and finished jobs my $config=shift; my $dbh = db_conn($config); my $where = ""; if (defined($$config{boinc_app_id})) { $where = " and appid=$$config{boinc_app_id}"; } my $sth = $dbh->prepare('select count(distinct hostid) from result where server_state=4'.$where); $sth->execute(); my $result = $sth->fetchrow_array(); if(defined($result)){return $result;} else{ return 0;} } sub get_jobs_in_que($){ # Unsent jobs on BOINC server my $config=shift; my $dbh = db_conn($config); my $where = ""; if (defined($$config{boinc_app_id})) { $where = " and appid=$$config{boinc_app_id}"; } my $sth = $dbh->prepare('select count(*) from result where server_state in (1,2)'.$where); $sth->execute(); my $result = $sth->fetchrow_array(); if(defined($result)){return $result;} else{ return 0;} } sub get_jobs_in_run($){ # Jobs in progress my $config=shift; my $dbh = db_conn($config); my $where = ""; if (defined($$config{boinc_app_id})) { $where = " and appid=$$config{boinc_app_id}"; } my $sth = $dbh->prepare('select count(*) from result where server_state=4'.$where); $sth->execute(); my $result = $sth->fetchrow_array(); if(defined($result)){return $result;} else{ return 0;} } sub get_jobs_status($){ # Convert BOINC status into ARC LRMS state for each job my $config=shift; my $dbh = db_conn($config); my $where = ""; if (defined($$config{boinc_app_id})) { $where = " and appid=$$config{boinc_app_id}"; } my $sth = $dbh->prepare('SELECT result.server_state, result.name, user.name FROM result LEFT JOIN user ON result.userid=user.id WHERE server_state in (1,2,4)'.$where); $sth->execute(); my (%jobs_status, @result,$job_status, $job_state, $job_name, $user_name); while(($job_state, $job_name, $user_name) = $sth->fetchrow_array()) { $job_status="Q"; my @tmp=split(/_/,$job_name); $job_name=$tmp[0]; if($job_state==4){$job_status="R";} # Strip out non-ascii $user_name =~ s/[^[:ascii:]]/_/g if $user_name; $jobs_status{$job_name}=[$job_status, $user_name]; } return \%jobs_status; } ############################################ # Public subs ############################################# sub cluster_info ($) { my ($config) = shift; my (%lrms_cluster); $lrms_cluster{lrms_type} = "boinc"; $lrms_cluster{lrms_version} = "1"; # only enforcing per-process cputime limit $lrms_cluster{has_total_cputime_limit} = 0; my ($total_cpus) = get_total_cpus($config); my ($max_cpus) = get_max_cpus($config); $lrms_cluster{totalcpus} = $total_cpus; $lrms_cluster{cpudistribution} = $lrms_cluster{totalcpus}."cpu:1"; my $que_jobs = get_jobs_in_que($config); my $run_jobs = get_jobs_in_run($config); $lrms_cluster{usedcpus} = $run_jobs; $lrms_cluster{runningjobs} = $lrms_cluster{usedcpus}; $lrms_cluster{queuedcpus} = $max_cpus-$total_cpus; $lrms_cluster{queuedjobs} = $que_jobs; $lrms_cluster{queue} = [ ]; return %lrms_cluster; } sub queue_info ($$) { my ($config) = shift; my ($qname) = shift; init_globals($qname); my ($total_cpus) = get_total_cpus($config); my ($max_cpus) = get_max_cpus($config); my $que_jobs = get_jobs_in_que($config); my $running = get_jobs_in_run($config); if (defined $running) { # job_info was already called, we know exactly how many grid jobs # are running $lrms_queue{running} = $running; } else { # assuming that the submitted grid jobs are cpu hogs, approximate # the number of running jobs with the number of running processes $lrms_queue{running}= 0; } $lrms_queue{totalcpus} = $total_cpus; $lrms_queue{status} = $lrms_queue{totalcpus}-$lrms_queue{running}; # reserve negative numbers for error states # Fork is not real LRMS, and cannot be in error state if ($lrms_queue{status}<0) { debug("lrms_queue{status} = $lrms_queue{status}"); $lrms_queue{status} = 0; } my $job_limit; $job_limit = 1000; $lrms_queue{maxrunning} = $job_limit; $lrms_queue{maxuserrun} = $job_limit; $lrms_queue{maxqueuable} = $job_limit; $lrms_queue{maxcputime} = ""; $lrms_queue{queued} = $que_jobs; $lrms_queue{mincputime} = ""; $lrms_queue{defaultcput} = ""; $lrms_queue{minwalltime} = ""; $lrms_queue{defaultwallt} = ""; $lrms_queue{maxwalltime} = $lrms_queue{maxcputime}; return %lrms_queue; } sub jobs_info ($$@) { my ($config) = shift; my ($qname) = shift; my ($jids) = shift; init_globals($qname); my (%lrms_jobs,$jstatus); $jstatus=get_jobs_status($config); foreach my $id (@$jids){ # Real hostname will be published when job finishes my $host = 'unknown@unknown'; if ($$jstatus{$id} and $$jstatus{$id}[1]) { $host = $$jstatus{$id}[1].'@unknown'; } $lrms_jobs{$id}{nodes} = [ $host ]; $lrms_jobs{$id}{mem} = 2000000000; $lrms_jobs{$id}{walltime} = ""; $lrms_jobs{$id}{cputime} = ""; $lrms_jobs{$id}{comment} = [ "LRMS: Running under boinc" ]; $lrms_jobs{$id}{reqwalltime} = ""; $lrms_jobs{$id}{reqcputime} = ""; $lrms_jobs{$id}{rank} = "0"; # Fix cores to 1 since volunteers download 1 task per core $lrms_jobs{$id}{cpus} = "1"; if(! exists $$jstatus{$id}) { $lrms_jobs{$id}{status} = "O"; } elsif($$jstatus{$id}[0] eq "R") { $lrms_jobs{$id}{status} = "R"; } elsif($$jstatus{$id}[0] eq "Q") { $lrms_jobs{$id}{status} = "Q"; } else { $lrms_jobs{$id}{status} = "O"; } } return %lrms_jobs; } sub users_info($$@) { my ($config) = shift; my ($qname) = shift; my ($accts) = shift; init_globals($qname); my (%lrms_users); # freecpus # queue length if ( ! exists $lrms_queue{status} ) { %lrms_queue = queue_info( $config, $qname ); } foreach my $u ( @{$accts} ) { $lrms_users{$u}{freecpus} = $lrms_queue{maxuserrun} - $lrms_queue{running}; $lrms_users{$u}{queuelength} = "$lrms_queue{queued}"; } return %lrms_users; } 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/XmlPrinter.pm0000644000000000000000000000013215067751327024334 xustar0030 mtime=1759498967.761492071 30 atime=1759498967.868493696 30 ctime=1759499029.828385756 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/XmlPrinter.pm0000644000175000002070000000567215067751327026250 0ustar00mockbuildmock00000000000000package XmlPrinter; sub new { my ($this, $handle) = @_; my $class = ref($this) || $this; binmode $handle, ':encoding(utf8)'; #print $handle ''."\n"; my $self = {fh => $handle, indent => '', disablecnt => 0}; return bless $self, $class; } sub disableOut { my ($self) = @_; $self->{disablecnt} = $self->{disablecnt} + 1; } sub enableOut { my ($self) = @_; $self->{disablecnt} = $self->{disablecnt} - 1; } sub escape { my ($chars) = @_; $chars =~ s/&/&/g; $chars =~ s/>/>/g; $chars =~ s/{disablecnt} == 0) { my $fh = $self->{fh}; if (not @attributes) { print $fh $self->{indent}."<$name>\n"; } else { die "$name: Not a HASH reference" unless ref $data eq 'HASH'; print $fh $self->{indent}."<$name"; for my $attr (@attributes) { my $val = $data->{$attr}; print $fh " $attr=\"$val\"" if defined $val; } print $fh ">\n"; } } $self->{indent} .= ' '; } sub end { my ($self, $name) = @_; chop $self->{indent}; chop $self->{indent}; if ($self->{disablecnt} == 0) { my $fh = $self->{fh}; print $fh $self->{indent}."\n"; } } sub property { my ($self, $prop, $val) = @_; if ($self->{disablecnt} == 0) { my $indent = $self->{indent}; my $fh = $self->{fh}; return unless defined $val; if (not ref $val) { print $fh "$indent<$prop>".escape($val)."\n"; } elsif (ref $val eq 'ARRAY') { print $fh "$indent<$prop>".escape($_)."\n" for @$val; } else { die "$prop: Not an ARRAY reference"; } } } sub properties { my ($self, $data, @props) = @_; if ($self->{disablecnt} == 0) { my $indent = $self->{indent}; my $fh = $self->{fh}; for my $prop (@props) { my $val = $data->{$prop}; next unless defined $val; if (not ref $val) { print $fh "$indent<$prop>".escape($val)."\n"; } elsif (ref $val eq 'ARRAY') { print $fh "$indent<$prop>".escape($_)."\n" for @$val; } else { die "$prop: Not an ARRAY reference"; } } } } #### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST #### sub test { my $printer = XmlPrinter->new(*STDOUT); my $data = { xmlns => "blah/blah", date => "today" }; $printer->header(); $printer->begin("Persons", $data, qw( date )); $data = { id => "1", name => "James", nick => "Jimmy" }; $printer->begin("Person", $data, "id"); $printer->properties($data, qw( name nick )); $printer->end("Person"); $printer->end("Persons"); } #test; 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/InfoChecker.pm0000644000000000000000000000013115067751327024407 xustar0029 mtime=1759498967.75949204 30 atime=1759498967.867493681 30 ctime=1759499029.821182452 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/InfoChecker.pm0000644000175000002070000001476415067751327026326 0ustar00mockbuildmock00000000000000package InfoChecker; use base 'Exporter'; use strict; # Class to check that a data structure conforms to a schema. Data and schema # are both nested perl structures consisting of hashes and arrays nested to any # depth. This function will check that data and schema have the same nesting # structure. For hashes, all required keys in the schema must also be defined # in the data. A "*" value in the schema marks that key optional. A "*" key in # the schema matches all unmatched keys in the data (if any). Arrays in the # schema should have exactly one element, and this element will be matched # against all elements in the corresponding array in the data. # To make a named hash reference optional, but still write all the hash # keys in the schema, use: # enable => '*' means the whole hash is optional # Constructor # # Arguments: # $schema - reference to the schema structure sub new($$) { my ($this,$schema) = @_; my $class = ref($this) || $this; die "Schema not a reference" unless ref($schema); my $self = {schema => $schema}; bless $self, $class; return $self; } # # Arguments: # $data - reference to a data structure that should be checked # $strict - (optional) if true, extra hash keys in data will be reported. # Otherwise only missing keys are reported. # # Returns: # @errors - list of error messages, one for each mismatch found during # checking sub verify($$;$) { my ($self,$data,$strict) = @_; $self->{strict} = $strict; $self->{errors} = []; $self->_verify_part("",$data,$self->{schema}); return @{$self->{errors}}; } sub _verify_part($$$$); # prototype it, because it's a recursive function sub _verify_part($$$$) { my ($self,$subject,$data,$schema) = @_; unless (defined $data) { push @{$self->{errors}}, "$subject is undefined"; return 1; # tell caller this entry can be deleted } unless ( ref($data) eq ref($schema) ) { my $type = ref($schema) ? ref($schema) : "SCALAR"; push @{$self->{errors}}, "$subject has wrong type, $type expected"; return 1; # tell caller this entry can be deleted } # process a hash reference if ( ref($schema) eq "HASH" ) { # deal with hash keys other than '*' my @templkeys = grep { $_ ne "*" } keys %$schema; for my $key ( sort @templkeys ) { my $subj = $subject."{$key}"; if ( defined $data->{$key} ) { # check that existing key value is valid my $can_delete = $self->_verify_part($subj, $data->{$key}, $schema->{$key}); # delete it if it's not valid if ($can_delete and $self->{strict}) { push @{$self->{errors}}, "$subj deleting it"; delete $data->{$key}; } } elsif ($schema->{$key} eq "*") { # do nothing: # this missing key is optional } elsif (ref($schema->{$key}) eq "ARRAY" and $schema->{$key}[0] eq "*") { # do nothing: # this missing key is optional, it points to optional array } elsif (ref($schema->{$key}) eq "HASH") { if ( ( keys(%{$schema->{$key}}) == 1 and exists $schema->{$key}{'*'} ) or ( exists $schema->{$key}{'enabled'} and $schema->{$key}{'enabled'} eq "*" ) ) { # do nothing: # this missing key is optional, it points to optional hash } } else { push @{$self->{errors}}, "$subj is missing"; } } # deal with '*' hash key in schema if ( grep { $_ eq "*" } keys %$schema ) { for my $datakey ( sort keys %$data ) { # skip keys that have been checked already next if grep { $datakey eq $_ } @templkeys; my $subj = $subject."{$datakey}"; # check that the key's value is valid my $can_delete = $self->_verify_part($subj, $data->{$datakey}, $schema->{"*"}); # delete it if it's not valid if ($can_delete and $self->{strict}) { push @{$self->{errors}}, "$subj deleting it"; delete $data->{$datakey}; } } # no '*' key in schema, reverse checking may be performed } elsif ($self->{strict}) { for my $datakey ( sort keys %$data) { my $subj = $subject."{$datakey}"; unless (exists $schema->{$datakey}) { push @{$self->{errors}}, "$subj is not recognized or not used by infoproviders"; push @{$self->{errors}}, "$subj deleting it"; delete $data->{$datakey}; } } } # process an array reference } elsif ( ref($schema) eq "ARRAY" ) { for ( my $i=0; $i < @$data; $i++ ) { my $subj = $subject."[$i]"; # check that data element is valid my $can_delete = $self->_verify_part($subj, $data->[$i], $schema->[0]); # delete it if it's not valid if ($can_delete and $self->{strict}) { push @{$self->{errors}}, "$subj deleting it"; splice @$data, $i, 1; --$i; } } # process a scalar: nothing to do here } elsif ( not ref($data)) { # nothing else than scalars and HASH and ARRAY references are allowed in # the schema } else { my $type = ref($schema); push @{$self->{errors}}, "$subject bad value in schema, ref($type) not allowed"; } return 0; } #### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST #### sub test() { my $schema = { totalcpus => '', freecpus => '', jobs => { '*' => { owner => '' } }, users => [ { dn => '' } ] }; my $data = { freecpus => undef, jobs => { id1 => { owner => 'val' }, id2 => 'something else' }, users => [{dn => 'joe', extra => 'dummy'}, 'bad user', { }] }; require Data::Dumper; import Data::Dumper; print "Before: ",Dumper($data); print "Checker: options->$_\n" foreach InfoChecker->new($schema)->verify($data,1); print "After: ",Dumper($data); } #test; 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/schema0000644000000000000000000000013015067751425023052 xustar0030 mtime=1759499029.796625434 28 atime=1759499034.7655102 30 ctime=1759499029.796625434 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/schema/0000755000175000002070000000000015067751425025033 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/schema/PaxHeaders/nordugrid.schema0000644000000000000000000000013215067751327026311 xustar0030 mtime=1759498967.762492086 30 atime=1759498967.868493696 30 ctime=1759499029.797765349 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/schema/nordugrid.schema0000644000175000002070000013300315067751327030213 0ustar00mockbuildmock00000000000000#--------------------------------------------------------- # These classes and attributes are imported from globus mds # only slightly modified attributetype ( 1.3.6.1.4.1.11604.2.1.8.1.4.1.0.1 NAME 'Mds-Vo-Op-name' DESC 'Locally unique Op name' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) objectclass ( 1.3.6.1.4.1.11604.2.1.8.1.4.1 NAME 'MdsVoOp' SUP 'Mds' STRUCTURAL MUST Mds-Vo-Op-name ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.0.1 NAME 'Mds-Service-type' DESC 'Service protocol' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.0.2 NAME 'Mds-Service-protocol' DESC 'Service protocol OID' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.0.3 NAME 'Mds-Service-port' DESC 'Service TCP port' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.0.4 NAME 'Mds-Service-hn' DESC 'Service FQDN hostname' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.0.5 NAME 'Mds-Service-url' DESC 'Service URL' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) objectclass ( 1.3.6.1.4.1.11604.2.1.8.2.7.1 NAME 'MdsService' SUP 'Mds' MUST ( Mds-Service-type $ Mds-Service-port $ Mds-Service-hn ) MAY ( Mds-Service-protocol $ Mds-Service-url ) ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.1.0.1 NAME 'Mds-Service-Ldap-suffix' DESC 'DN suffix of service' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.1.0.2 NAME 'Mds-Service-Ldap-timeout' DESC 'suggested timeout' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.1.0.3 NAME 'Mds-Service-Ldap-sizelimit' DESC 'suggested sizelimit' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.1.0.4 NAME 'Mds-Service-Ldap-cachettl' DESC 'suggested cacheability' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.1.0.5 NAME 'Mds-Service-Ldap-ttl' DESC 'suggested ttl' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.1.0.10 NAME 'Mds-Reg-status' DESC 'VALID/INVALID/PURGED' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.1.0.11 NAME 'Mds-Bind-Method-servers' DESC 'AUTHC-ONLY/AUTHC-FIRST/ANONYM-ONLY' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) objectclass ( 1.3.6.1.4.1.11604.2.1.8.2.7.1.1 NAME 'MdsServiceLdap' SUP 'MdsService' MUST Mds-Service-Ldap-suffix MAY ( Mds-Service-Ldap-timeout $ Mds-Service-Ldap-sizelimit $ Mds-Service-Ldap-cachettl $ Mds-Service-Ldap-ttl $ Mds-Reg-status $ Mds-Bind-Method-servers ) ) # attributes for the nordugrid-cluster objectclass # attributetype ( 1.3.6.1.4.1.11604.2.1.1.1 NAME 'nordugrid-cluster-name' DESC 'The name of the cluster specified as the domain name of the frontend' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.2 NAME 'nordugrid-cluster-aliasname' DESC 'The alias name of the cluster' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE) attributetype ( 1.3.6.1.4.1.11604.2.1.1.3 NAME 'nordugrid-cluster-contactstring' DESC 'The URL of the job submission service running on the cluster frontend' EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.4 NAME 'nordugrid-cluster-support' DESC 'RFC822 email address of support' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{256}) attributetype ( 1.3.6.1.4.1.11604.2.1.1.5 NAME 'nordugrid-cluster-lrms-type' DESC 'The type of the Local Resource Management System' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.6 NAME 'nordugrid-cluster-lrms-version' DESC 'The version of the Local Resource Management System' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.7 NAME 'nordugrid-cluster-lrms-config' DESC 'Additional remark on the LRMS config' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.8 NAME 'nordugrid-cluster-architecture' DESC 'The architecture of the cluster nodes' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.9 NAME 'nordugrid-cluster-opsys' DESC 'The operating system of the machines of the cluster' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15) attributetype ( 1.3.6.1.4.1.11604.2.1.1.10 NAME 'nordugrid-cluster-homogeneity' DESC 'A logical flag indicating the homogeneity of the cluster nodes' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.11 NAME 'nordugrid-cluster-nodecpu' DESC 'The cpu type of the nodes expressed in a fixed form (model name + MHz)' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.12 NAME 'nordugrid-cluster-nodememory' DESC 'The amount of memory which can be guaranteed to be available on the node in MB' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.13 NAME 'nordugrid-cluster-totalcpus' DESC 'The total number of cpus in the cluster' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.14 NAME 'nordugrid-cluster-cpudistribution' DESC 'The cpu distribution of the nodes given in the form of 1cpu:3 2cpu:4 4cpu:1' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.15 NAME 'nordugrid-cluster-sessiondir-free' DESC 'Free diskspace in MB of the sessiondirectory on the cluster' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.16 NAME 'nordugrid-cluster-sessiondir-total' DESC 'Total diskspace in MB of the sessiondirectory on the cluster' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.17 NAME 'nordugrid-cluster-cache-free' DESC 'Free diskspace in MB of the cache area on the cluster' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.18 NAME 'nordugrid-cluster-cache-total' DESC 'Total diskspace in MB of the cache area on the cluster' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.19 NAME 'nordugrid-cluster-runtimeenvironment' DESC 'preinstalled software packages of the cluster' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.20 NAME 'nordugrid-cluster-localse' DESC 'The URL of a storage element considered to be local to the cluster' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.21 NAME 'nordugrid-cluster-middleware' DESC 'The middleware packages on the cluster' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.22 NAME 'nordugrid-cluster-totaljobs' DESC 'The total number of jobs (Grid + non-Grid) in the cluster' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.23 NAME 'nordugrid-cluster-usedcpus' DESC 'The total number of occupied cpus in the cluster' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.24 NAME 'nordugrid-cluster-queuedjobs' DESC 'The total number of jobs (grid and non-grid) not-yet running: preparing or waiting to run on the cluster, either in the grid-manager or in the LRMS. The attribute is TO BE DEPRECATED' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.25 NAME 'nordugrid-cluster-location' DESC 'The geographical location of the cluster expressed in terms of a Postal ZIP code' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.26 NAME 'nordugrid-cluster-owner' DESC 'The owner of the resource' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.27 NAME 'nordugrid-cluster-issuerca' DESC 'The DN of the Certificate Authority which issued the certificate of the cluster' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.28 NAME 'nordugrid-cluster-nodeaccess' DESC 'The inbound/outbound network accessibility of the nodes' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.29 NAME 'nordugrid-cluster-comment' DESC 'Free form comment' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.30 NAME 'nordugrid-cluster-interactive-contactstring' DESC 'The URL for interactive login' EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.31 NAME 'nordugrid-cluster-benchmark' DESC '@ separated benchmark_name, benchmark_value pair characterizing the cluster nodes' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.32 NAME 'nordugrid-cluster-sessiondir-lifetime' DESC 'The lifetime of the sessiondir after the job has completed (in minutes)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.33 NAME 'nordugrid-cluster-prelrmsqueued' DESC 'The total number of grid jobs not-yet reached the LRMS: preparing or queuing in the grid-layer' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.34 NAME 'nordugrid-cluster-issuerca-hash' DESC 'The HASH of the Certificate Authority which issued the certificate for the cluster' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.35 NAME 'nordugrid-cluster-trustedca' DESC 'The DN of a Certificate Authority trusted by the cluster' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.1.36 NAME 'nordugrid-cluster-acl' DESC 'Cluster authorization information' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype (1.3.6.1.4.1.11604.2.1.1.37 NAME 'nordugrid-cluster-credentialexpirationtime' DESC 'The expiration date of the shortest living credential affecting the cluster\27s x509 environment in GMT' EQUALITY generalizedTimeMatch ORDERING generalizedTimeOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE ) objectclass ( 1.3.6.1.4.1.11604.2.1.1 NAME 'nordugrid-cluster' DESC 'Description of a Nordugrid cluster' SUP 'Mds' STRUCTURAL MUST ( nordugrid-cluster-name $ nordugrid-cluster-contactstring ) MAY ( nordugrid-cluster-aliasname $ nordugrid-cluster-support $ nordugrid-cluster-lrms-type $ nordugrid-cluster-lrms-version $ nordugrid-cluster-lrms-config $ nordugrid-cluster-architecture $ nordugrid-cluster-opsys $ nordugrid-cluster-homogeneity $ nordugrid-cluster-nodecpu $ nordugrid-cluster-nodememory $ nordugrid-cluster-totalcpus $ nordugrid-cluster-cpudistribution $ nordugrid-cluster-sessiondir-free $ nordugrid-cluster-sessiondir-total $ nordugrid-cluster-cache-free $ nordugrid-cluster-cache-total $ nordugrid-cluster-runtimeenvironment $ nordugrid-cluster-localse $ nordugrid-cluster-middleware $ nordugrid-cluster-totaljobs $ nordugrid-cluster-usedcpus $ nordugrid-cluster-queuedjobs $ nordugrid-cluster-location $ nordugrid-cluster-owner $ nordugrid-cluster-issuerca $ nordugrid-cluster-nodeaccess $ nordugrid-cluster-comment $ nordugrid-cluster-interactive-contactstring $ nordugrid-cluster-benchmark $ nordugrid-cluster-sessiondir-lifetime $ nordugrid-cluster-prelrmsqueued $ nordugrid-cluster-issuerca-hash $ nordugrid-cluster-trustedca $ nordugrid-cluster-acl $ nordugrid-cluster-credentialexpirationtime )) #----------------------------------------------------------------- # attributes for the nordugrid-info-group objectclass # attributetype ( 1.3.6.1.4.1.11604.2.1.2.1 NAME 'nordugrid-info-group-name' DESC 'Locally unique info group name' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE) objectclass ( 1.3.6.1.4.1.11604.2.1.2 NAME 'nordugrid-info-group' DESC 'A general entry for grouping together MDS entries' SUP 'Mds' STRUCTURAL MUST ( nordugrid-info-group-name )) #----------------------------------------------------------------- # attributes for the nordugrid-queue objectclass # attributetype ( 1.3.6.1.4.1.11604.2.1.3.1 NAME 'nordugrid-queue-name' DESC 'The queue name' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.2 NAME 'nordugrid-queue-status' DESC 'The queue status' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.3 NAME 'nordugrid-queue-running' DESC 'Number of running jobs (Grid + non-Grid) in the queue with multi-node jobs multiciplity' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.4 NAME 'nordugrid-queue-queued' DESC 'The number of jobs (Grid + non-Grid) waiting in the queue. The attribute is TO BE DEPRECATED' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.5 NAME 'nordugrid-queue-maxrunning' DESC 'The maximum number of jobs allowed to run from this queue' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.6 NAME 'nordugrid-queue-maxqueuable' DESC 'The maximum number of jobs allowed to reside in the queue' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.7 NAME 'nordugrid-queue-maxuserrun' DESC 'Maximum number of jobs a user can run at the same time' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.8 NAME 'nordugrid-queue-maxcputime' DESC 'The maximum cputime allowed in this queue (in minutes)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.9 NAME 'nordugrid-queue-mincputime' DESC 'The minimum possible cputime of this queue (in minutes)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.10 NAME 'nordugrid-queue-defaultcputime' DESC 'The default cputime assigned to this queue (in minutes)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.11 NAME 'nordugrid-queue-schedulingpolicy' DESC 'The scheduling policy of the queue (i.e. FIFO)' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.12 NAME 'nordugrid-queue-totalcpus' DESC 'Total number of cpus assigned to the queue' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.13 NAME 'nordugrid-queue-nodecpu' DESC 'The cpu type of the nodes assigned to the queue (model name + MHz)' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.14 NAME 'nordugrid-queue-nodememory' DESC 'The installed memory of a node assigned to the queue in MB' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.15 NAME 'nordugrid-queue-architecture' DESC 'The architecture of the machines in the queue' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.16 NAME 'nordugrid-queue-opsys' DESC 'The operating system of the nodes in the queue' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15) attributetype ( 1.3.6.1.4.1.11604.2.1.3.17 NAME 'nordugrid-queue-gridrunning' DESC 'Number of running Grid jobs in the queue with multi-node jobs multiciplity' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.18 NAME 'nordugrid-queue-gridqueued' DESC 'The number of Grid jobs waiting in the LRMS queue' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.19 NAME 'nordugrid-queue-comment' DESC 'Free form comment' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.20 NAME 'nordugrid-queue-benchmark' DESC 'Colon separated benchmark_name, benchmark_value pair characterizing the queue' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.21 NAME 'nordugrid-queue-homogeneity' DESC 'A logical flag indicating the homogeneity of the queue nodes' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.22 NAME 'nordugrid-queue-prelrmsqueued' DESC 'The number of Grid jobs belonging to this queue being processed or waiting in the Grid-layer before the LRMS submission.' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.23 NAME 'nordugrid-queue-localqueued' DESC 'The number of non-Grid jobs waiting in the LRMS queue.' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.24 NAME 'nordugrid-queue-maxwalltime' DESC 'The maximum walltime allowed in this queue (in minutes)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.25 NAME 'nordugrid-queue-minwalltime' DESC 'The minimum possible walltime of this queue (in minutes)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.26 NAME 'nordugrid-queue-defaultwalltime' DESC 'The default walltime assigned to this queue (in minutes)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.27 NAME 'nordugrid-queue-maxtotalcputime' DESC 'The maximum total cputime allowed in this queue (in minutes)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.3.28 NAME 'nordugrid-queue-acl' DESC 'Queue authorization information' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) objectclass ( 1.3.6.1.4.1.11604.2.1.3 NAME 'nordugrid-queue' DESC 'An LRMS queue' SUP 'Mds' STRUCTURAL MUST ( nordugrid-queue-name $ nordugrid-queue-status) MAY ( nordugrid-queue-running $ nordugrid-queue-queued $ nordugrid-queue-maxrunning $ nordugrid-queue-maxqueuable$ nordugrid-queue-maxuserrun $ nordugrid-queue-maxcputime $ nordugrid-queue-mincputime $ nordugrid-queue-defaultcputime $ nordugrid-queue-schedulingpolicy $ nordugrid-queue-totalcpus $ nordugrid-queue-nodecpu $ nordugrid-queue-nodememory $ nordugrid-queue-opsys $ nordugrid-queue-architecture $ nordugrid-queue-gridrunning $ nordugrid-queue-gridqueued $ nordugrid-queue-comment $ nordugrid-queue-benchmark $ nordugrid-queue-homogeneity $ nordugrid-queue-prelrmsqueued $ nordugrid-queue-localqueued $ nordugrid-queue-maxwalltime $ nordugrid-queue-minwalltime $ nordugrid-queue-defaultwalltime $ nordugrid-queue-maxtotalcputime $ nordugrid-queue-acl)) #----------------------------------------------------------------- #attributes for the nordugrid-job objectclass # attributetype ( 1.3.6.1.4.1.11604.2.1.4.1 NAME 'nordugrid-job-globalid' DESC 'The global job identifier string' EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.2 NAME 'nordugrid-job-globalowner' DESC 'An identifier of the job owner' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.3 NAME 'nordugrid-job-execcluster' DESC 'The name of the execution cluster' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE) attributetype ( 1.3.6.1.4.1.11604.2.1.4.4 NAME 'nordugrid-job-execqueue' DESC 'The name of the execution queue' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.5 NAME 'nordugrid-job-stdout' DESC 'The name of the file which contains the stdout' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.6 NAME 'nordugrid-job-stderr' DESC 'The name of the file which contains the stderr' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.7 NAME 'nordugrid-job-stdin' DESC 'The name of the file which contains the stdin' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.8 NAME 'nordugrid-job-reqcputime' DESC 'The cputime request by the job in minutes' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.9 NAME 'nordugrid-job-status' DESC 'The status of the grid job' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.10 NAME 'nordugrid-job-queuerank' DESC 'The queue position of the job' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.11 NAME 'nordugrid-job-comment' DESC 'Free form comment about the job' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.12 NAME 'nordugrid-job-submissionui' DESC 'The name of the UI from where the job was submitted' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.13 NAME 'nordugrid-job-submissiontime' DESC 'The submission time of the job in GMT' EQUALITY generalizedTimeMatch ORDERING generalizedTimeOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.14 NAME 'nordugrid-job-usedcputime' DESC 'The consumed cputime of the job in minutes' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.15 NAME 'nordugrid-job-usedwalltime' DESC 'The consumed walltime of the job in minutes' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.16 NAME 'nordugrid-job-sessiondirerasetime' DESC 'The date when the session dir will be deleted in GMT' EQUALITY generalizedTimeMatch ORDERING generalizedTimeOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.17 NAME 'nordugrid-job-usedmem' DESC 'The memory usage of the job (in KB)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.18 NAME 'nordugrid-job-errors' DESC 'Error mesages from the cluster' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.19 NAME 'nordugrid-job-jobname' DESC 'The jobname specified by the user with the jobname RSL attribute' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.20 NAME 'nordugrid-job-runtimeenvironment' DESC 'The runtimeenvironment requested by the job' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.21 NAME 'nordugrid-job-cpucount' DESC 'The number of CPUs requested by the job' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.22 NAME 'nordugrid-job-executionnodes' DESC 'The list of nodenames where the job is running' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.23 NAME 'nordugrid-job-gmlog' DESC 'The name of the directory which contains the grid session related logs' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.24 NAME 'nordugrid-job-clientsoftware' DESC 'The client software which submitted the job' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15) attributetype ( 1.3.6.1.4.1.11604.2.1.4.25 NAME 'nordugrid-job-proxyexpirationtime' DESC 'The expiration time of the proxy of the job in GMT' EQUALITY generalizedTimeMatch ORDERING generalizedTimeOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.26 NAME 'nordugrid-job-completiontime' DESC 'The completion time of the grid job in GMT' EQUALITY generalizedTimeMatch ORDERING generalizedTimeOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.27 NAME 'nordugrid-job-exitcode' DESC 'The exit code of the executable of the job obtained from the LRMS' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.28 NAME 'nordugrid-job-rerunable' DESC 'Rerunability of the FAILED grid jobs' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.4.29 NAME 'nordugrid-job-reqwalltime' DESC 'The request wallclock time of the job in minutes' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) objectclass ( 1.3.6.1.4.1.11604.2.1.4 NAME 'nordugrid-job' DESC 'A Grid job' SUP 'Mds' STRUCTURAL MUST ( nordugrid-job-globalid $ nordugrid-job-globalowner $ nordugrid-job-status ) MAY ( nordugrid-job-queuerank $ nordugrid-job-submissionui $ nordugrid-job-submissiontime $ nordugrid-job-usedcputime $ nordugrid-job-usedwalltime $ nordugrid-job-usedmem $ nordugrid-job-comment $ nordugrid-job-execcluster $ nordugrid-job-execqueue $ nordugrid-job-stdout $ nordugrid-job-stderr $ nordugrid-job-stdin $ nordugrid-job-sessiondirerasetime $ nordugrid-job-reqcputime $ nordugrid-job-errors $ nordugrid-job-jobname $ nordugrid-job-runtimeenvironment $ nordugrid-job-cpucount $ nordugrid-job-executionnodes $ nordugrid-job-gmlog $ nordugrid-job-clientsoftware $ nordugrid-job-proxyexpirationtime $ nordugrid-job-completiontime $ nordugrid-job-exitcode $ nordugrid-job-rerunable $ nordugrid-job-reqwalltime)) #---------------------------------------------------------------- # attributes for the nordugrid-authuser objectclass # attributetype ( 1.3.6.1.4.1.11604.2.1.5.1 NAME 'nordugrid-authuser-name' DESC 'The Common Name of the authorized user plus a local unique number' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.5.2 NAME 'nordugrid-authuser-sn' DESC 'The SubjectName of the authorized user' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.5.3 NAME 'nordugrid-authuser-freecpus' DESC 'The number of freely available cpus with their timelimits in minutes for a user in the queue. Given in the form ncpus:min, min is optional (example: 2 4:25 5:180)' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.5.4 NAME 'nordugrid-authuser-diskspace' DESC 'The free diskspace available for the job (in MB)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.5.5 NAME 'nordugrid-authuser-queuelength' DESC 'The number of queuing jobs of a particular user, both queuing in the LRMS and in the Grid-layer' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) objectclass ( 1.3.6.1.4.1.11604.2.1.5 NAME 'nordugrid-authuser' DESC 'An authorised user of a NorduGrid cluster' SUP 'Mds' STRUCTURAL MUST ( nordugrid-authuser-name $ nordugrid-authuser-sn ) MAY ( nordugrid-authuser-queuelength $ nordugrid-authuser-diskspace $ nordugrid-authuser-freecpus )) #---------------------------------------------------------------- # # nordugrid-se attributetype ( 1.3.6.1.4.1.11604.2.1.6.1 NAME 'nordugrid-se-name' DESC 'The name of the Storage Element' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.2 NAME 'nordugrid-se-aliasname' DESC 'The alias name of the SE' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.3 NAME 'nordugrid-se-type' DESC 'The type of the SE' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.4 NAME 'nordugrid-se-freespace' DESC 'The free space available in the SE (in MB)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.5 NAME 'nordugrid-se-url' DESC 'The URL to contact the Storage Element' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.6 NAME 'nordugrid-se-authuser' DESC 'The DN of an authorized user' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.7 NAME 'nordugrid-se-location' DESC 'The geographical location of the SE expressed in terms of a Postal ZIP code: SE-22363' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.8 NAME 'nordugrid-se-owner' DESC 'The owner of the resource' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.9 NAME 'nordugrid-se-issuerca' DESC 'The DN of the Certificate Authority which issued the certificate of the SE' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.10 NAME 'nordugrid-se-totalspace' DESC 'The total capacity of the SE (in MB)' EQUALITY integerMatch ORDERING integerOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.11 NAME 'nordugrid-se-middleware' DESC 'The middleware packages on the SE' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.12 NAME 'nordugrid-se-comment' DESC 'Free form comment' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.13 NAME 'nordugrid-se-accesscontrol' DESC 'The access control framework of the SE' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.14 NAME 'nordugrid-se-issuerca-hash' DESC 'The HASH of the Certificate Authority which issued the certificate for the SE' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.15 NAME 'nordugrid-se-trustedca' DESC 'The DN of a Certificate Authority trusted by the SE' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.6.16 NAME 'nordugrid-se-acl' DESC 'Storage Element authorization information' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) objectclass ( 1.3.6.1.4.1.11604.2.1.6 NAME 'nordugrid-se' DESC 'A storage element in the Nordugrid' SUP 'Mds' STRUCTURAL MUST ( nordugrid-se-name $ nordugrid-se-url) MAY ( nordugrid-se-aliasname $ nordugrid-se-type $ nordugrid-se-freespace $ nordugrid-se-authuser $ nordugrid-se-location $ nordugrid-se-owner $ nordugrid-se-issuerca $ nordugrid-se-totalspace $ nordugrid-se-middleware $ nordugrid-se-comment $ nordugrid-se-accesscontrol $ nordugrid-se-issuerca-hash $ nordugrid-se-trustedca $ nordugrid-se-acl )) #-------------------------------------------------------------------- # nordugrid-rc # attributetype ( 1.3.6.1.4.1.11604.2.1.7.1 NAME 'nordugrid-rc-name' DESC 'The domain name of the machine hosting the Replica Catalog' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.7.2 NAME 'nordugrid-rc-aliasname' DESC 'The alias name of the rc' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.7.3 NAME 'nordugrid-rc-baseurl' DESC 'The URL of the Replica Catalog' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.3.6.1.4.1.11604.2.1.7.4 NAME 'nordugrid-rc-authuser' DESC 'An authorized user of the replica catalog' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.7.5 NAME 'nordugrid-rc-location' DESC 'The geographical location of the RC expressed in terms of a Postal ZIP code' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.7.6 NAME 'nordugrid-rc-owner' DESC 'The owner of the resource' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) attributetype ( 1.3.6.1.4.1.11604.2.1.7.7 NAME 'nordugrid-rc-issuerca' DESC 'The DN of the Certificate Authority which issued the certificate of the RC' EQUALITY caseExactMatch ORDERING caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) objectclass ( 1.3.6.1.4.1.11604.2.1.7 NAME 'nordugrid-rc' DESC 'A replica catalogue in the Nordugrid' SUP 'Mds' STRUCTURAL MUST ( nordugrid-rc-name $ nordugrid-rc-baseurl ) MAY ( nordugrid-rc-aliasname $ nordugrid-rc-authuser $ nordugrid-rc-location $ nordugrid-rc-owner $ nordugrid-rc-issuerca )) nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/LogUtils.pm0000644000000000000000000000013215067751327023772 xustar0030 mtime=1759498967.760492055 30 atime=1759498967.867493681 30 ctime=1759499029.813618735 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/LogUtils.pm0000644000175000002070000001073015067751327025675 0ustar00mockbuildmock00000000000000package LogUtils; # Object-oriented usage example: # # LogUtils::level("VERBOSE"); # $log = LogUtils->getLogger("MyProg.MyClass"); # $log->warning("Oops!"); # $log->error("Can't go on!"); # Procedural usage example: # # start_logging('INFO'); # warning("Oops!"); # error("Can't go on!"); use strict; use warnings; use POSIX; use FileHandle; use File::Basename; use Exporter; our @ISA = ('Exporter'); # Inherit from Exporter our @EXPORT_OK = qw(start_logging error warning info verbose debug); our %names = (FATAL => 0, ERROR => 1, WARNING => 2, INFO => 3, VERBOSE => 4, DEBUG => 5); our $loglevel = 2; # default level is WARNING our $ts_enabled = 0; # by default do not print timestamps our $indented = ""; # do not indent by default our $default_logger = LogUtils->getLogger(basename($0)); # redirect perl warnings to ARC logging format, # and attempt to limit number of warnings if not verbose my %WARNS if ($loglevel < 4); $SIG{__WARN__} = sub { my $message = shift; chomp($message); if ( $loglevel < 4 ) { if (exists($WARNS{$message})) { if ($WARNS{$message} == 1) { $default_logger->warning("\'PERL: $message\' repeated more than once, skipping... set loglevel to VERBOSE to see all messages."); $WARNS{$message} = 2; return; } } else { $default_logger->warning("PERL: $message"); $WARNS{$message} = 1; return; } } else { $default_logger->warning("PERL: $message"); return; } }; # For backwards compatibility sub start_logging($) { level(shift); } # set loglevel for all loggers sub level { return $loglevel unless @_; my $level = shift; if (defined $names{$level}) { $loglevel = $names{$level}; } elsif ($level =~ m/^\d+$/ and $level < keys %names) { $loglevel = $level; } else { fatal("No such loglevel '$level'"); } return $loglevel; } # enable/disable printing of timestamps sub timestamps { return $ts_enabled unless @_; return $ts_enabled = shift() ? 1 : 0; } sub indentoutput { my ($indent) = @_; if ($indent) { $indented = "\t"; } else { $indented = ""; } } # constructor sub getLogger { my $class = shift; my $self = {name => (shift || '')}; bless $self, $class; return $self; } sub debug { return unless $loglevel > 4; unshift(@_, $default_logger) unless ref($_[0]) eq __PACKAGE__; my ($self, $msg) = @_; $self->_log('DEBUG',$msg); } sub verbose { return unless $loglevel > 3; unshift(@_, $default_logger) unless ref($_[0]) eq __PACKAGE__; my ($self, $msg) = @_; $self->_log('VERBOSE',$msg); } sub info { return unless $loglevel > 2; unshift(@_, $default_logger) unless ref($_[0]) eq __PACKAGE__; my ($self, $msg) = @_; $self->_log('INFO',$msg); } sub warning { return unless $loglevel > 1; unshift(@_, $default_logger) unless ref($_[0]) eq __PACKAGE__; my ($self, $msg) = @_; $self->_log('WARNING',$msg); } # Causes program termination sub error { unshift(@_, $default_logger) unless ref($_[0]) eq __PACKAGE__; my ($self, $msg) = @_; $self->_log('ERROR',$msg); exit 1; } # Causes program termination sub fatal { unshift(@_, $default_logger) unless ref($_[0]) eq __PACKAGE__; my ($self, $msg) = @_; $self->_log('FATAL',$msg); exit 2; } sub _log { my ($self,$severity,$msg) = @_; my $name = $self->{name}; $name = $name ? "$name" : ""; # strip any newline in the message, substitute with ' \\ ', for nicer error reporting $msg =~ s/[\r\n]/ \\\\ /g; print STDERR $indented.($ts_enabled ? "[".timestamp()."] " : "")."[$name] [$severity] [$$] $msg\n"; } sub timestamp { my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(); return POSIX::strftime("%Y-%m-%d %H:%M:%S", $sec,$min,$hour,$mday, $mon,$year,$wday,$yday,$isdst); } sub test { LogUtils::level('INFO'); LogUtils::timestamps(1); my $log = LogUtils->getLogger(); $log->warning("Hi"); $log = LogUtils->getLogger("main"); $log->warning("Hi"); $log = LogUtils->getLogger("main.sub"); $log->warning("Hi"); $log = LogUtils->getLogger("main.sub.one"); $log->warning("Hi"); LogUtils->getLogger("main.sub.too")->info("Boo"); LogUtils->getLogger("main.sub.too")->debug("Hoo"); } sub test2 { start_logging('VERBOSE'); debug('geee'); info('beee'); warning('meee'); error('mooo'); } #test(); #test2(); 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/README0000644000000000000000000000013215067751327022552 xustar0030 mtime=1759498967.761492071 30 atime=1759498967.867493681 30 ctime=1759499029.842370064 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/README0000644000175000002070000000503415067751327024456 0ustar00mockbuildmock00000000000000Information providers for A-REX. Currenly all information collection for A-REX is done by these scripts. Collected data is presented in 2 formats (classic NG schema and GLUE2 schema) rendered in XML. CEinfo.pl - driver for information collection. It calls all other infomation collectors and prints results in XML. InfoChecker.pm - used to validate options and results against a simple 'schema' (not XML Schema) GMJobsInfo.pm - collects information about jobs from grid manager status files HostInfo.pm - collects other information that can be collected on the front end (hostname, software version, disk space for users, installed certificates, Runtime environments ...) LRMSInfo.pm - collects information that is LRMS specific (queues, jobs, local user limits ...) XXXXmod.pm - plugins for LRMSInfo implementing the new LRMS module interface, such as: FORKmod.pm - Fork information module SGEmod.pm - SGE information module SLURMmod.pm - SLURM information module ARC0mod.pm - special module that loads ARC0 information modules and does the conversion between the old interface and the new one. NOTE: New development should NOT develop against this implementations. These are obsolete and deprecated. PBS.pm, LL.pm, LSF.pm, Condor.pm, SLURM.pm - ARC0 information modules NOTE: New development should NOT develop against this implementations. These are obsolete and deprecated. ARC0ClusterInfo.pm - combines all information about A-REX and produces information structured according to the classic NG schema. ARC1ClusterInfo.pm - combines all information about A-REX and produces information structured according to the GLUE2 schema. Extended information: CEinfo.pl uses *Info.pm to read all information. LRMSInfo in turn uses ${LRMS}mod.pm and ${LRMS}.pm to get information. That information is aggregated by CEinfo.pl. CEinfo.pl then gives that data to ARC*ClusterInfo.pm which transforms the output to the appropriate format. Lastly, CEinfo.pl uses *Printer.pm to generate ldif/xml. Loading structure: CEinfo.pl loads: ARC*ClusterInfo.pm, GLUE2*Printer.pm, NGldifPrinter.pm, {Host,RTE,GMJobsInfo,LRMS}Info.pm, InfosysHelper.pm, LRMSInfo loads: ${LRMS}mod.pm, ARC0mod.pm ARC0mod.pm: loads ${LRMS}.pm # TODO: update description of the call chain when ARC 6.0 config is in place. Call-chain: To be described nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/SLURM.pm0000644000000000000000000000013215067751327023132 xustar0030 mtime=1759498967.761492071 30 atime=1759498967.868493696 30 ctime=1759499029.809086486 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/SLURM.pm0000644000175000002070000004473215067751327025046 0ustar00mockbuildmock00000000000000package SLURM; ###################################################################### # DISCLAIMER ###################################################################### # This module depends on ARC0mod.pm which is obsolete and deprecated # starting from ARC 6.0 # Please DO NOT build new LRMS modules based on this one but follow # the indications in # LRMSInfo.pm # instead. ###################################################################### use strict; use POSIX qw(ceil floor); our @ISA = ('Exporter'); # Module implements these subroutines for the LRMS interface our @EXPORT_OK = ('cluster_info', 'queue_info', 'jobs_info', 'users_info', 'nodes_info'); use LogUtils ( 'start_logging', 'error', 'warning', 'debug' ); ########################################## # Saved private variables ########################################## #our(%lrms_queue,%lrms_users); our(%scont_config, %scont_part, %scont_jobs, %scont_nodes, %sinfo_cpuinfo); ########################################## # Private subs ########################################## sub slurm_read_config($){ my ($config) = shift; my ($path) = ($$config{slurm_bin_path} or $$config{SLURM_bin_path} or "/usr/bin"); # get SLURM config, store dictionary in scont_config my %scont_config; checkbin("$path/scontrol"); open (SCPIPE,"$path/scontrol show config| grep -Ev \"primary|Configuration|^\$\"|"); while(){ chomp; my @mrr = split(" = ", $_, 2); $mrr[0]=~s/\s+$//; $scont_config{$mrr[0]} = $mrr[1]; } close(SCPIPE); return %scont_config; } sub get_variable($$){ my $match = shift; my $string = shift; $string =~ m/(\w\s)*?$match=((\w|\s|\/|,|.|:|;|\[|\]|\(|\)|-)*?)($| \w+=.*)/ ; my $var = $2; return $var; } sub slurm_read_jobs($){ my ($config) = shift; my ($path) = ($$config{slurm_bin_path} or $$config{SLURM_bin_path} or "/usr/bin"); # get SLURM jobs, store dictionary in scont_jobs my %scont_jobs; checkbin("$path/squeue"); open (SCPIPE,"$path/squeue -a -h -t all -o \"JobId=%i TimeUsed=%M Partition=%P JobState=%T ReqNodes=%D ReqCPUs=%C TimeLimit=%l Name=%j NodeList=%N\"|"); while(){ my %job; my $string = $_; #Fetching of data from squeue output my $JobId = get_variable("JobId",$string); $job{JobId} = get_variable("JobId",$string); $job{TimeUsed} = get_variable("TimeUsed",$string); $job{Partition} = get_variable("Partition",$string); $job{JobState} = get_variable("JobState",$string); $job{ReqNodes} = get_variable("ReqNodes",$string); $job{ReqCPUs} = get_variable("ReqCPUs",$string); $job{TimeLimit} = get_variable("TimeLimit",$string); $job{Name} = get_variable("Name",$string); $job{NodeList} = get_variable("NodeList",$string); #Translation of data $job{TimeUsed} = slurm_to_arc_time($job{TimeUsed}); $job{TimeLimit} = slurm_to_arc_time($job{TimeLimit}); $scont_jobs{$JobId} = \%job; } close(SCPIPE); return %scont_jobs; } sub slurm_read_partitions($){ my ($config) = shift; my ($path) = ($$config{slurm_bin_path} or $$config{SLURM_bin_path} or "/usr/bin"); # get SLURM partitions, store dictionary in scont_part my %scont_part; checkbin("$path/sinfo"); open (SCPIPE,"$path/sinfo -a -h -o \"PartitionName=%P TotalCPUs=%C TotalNodes=%D MaxTime=%l DefTime=%L\"|"); while(){ my %part; my $string = $_; my $PartitionName = get_variable("PartitionName",$string); $PartitionName =~ s/\*$//; #Fetch data from sinfo $part{PartitionName} = $PartitionName; my $totalcpus = get_variable("TotalCPUs",$string); $part{TotalNodes} = get_variable("TotalNodes",$string); $part{MaxTime} = get_variable("MaxTime",$string); $part{DefTime} = get_variable("DefTime",$string); #Translation of data $part{MaxTime} = slurm_to_arc_time($part{MaxTime}); $part{DefTime} = slurm_to_arc_time($part{DefTime}); # Format of "%C" is: Number of CPUs by state in the format "allocated/idle/other/total" # We only care about total: ###### ($part{AllocatedCPUs},$part{IdleCPUs},$part{OtherCPUs},$part{TotalCPUs}) = split('/',$totalcpus); # Neither of these fields probably need this in SLURM 1.3, but it doesn't hurt. $part{AllocatedCPUs} = slurm_parse_number($part{AllocatedCPUs}); $part{IdleCPUs} = slurm_parse_number($part{IdleCPUs}); $part{OtherCPUs} = slurm_parse_number($part{OtherCPUs}); $part{TotalCPUs} = slurm_parse_number($part{TotalCPUs}); $part{TotalNodes} = slurm_parse_number($part{TotalNodes}); $scont_part{$PartitionName} = \%part; } close(SCPIPE); return %scont_part; } sub slurm_read_cpuinfo($){ my ($config) = shift; my ($path) = ($$config{slurm_bin_path} or $$config{SLURM_bin_path} or "/usr/bin"); # get SLURM partitions, store dictionary in scont_part my %sinfo_cpuinfo; my $cpuinfo; checkbin("$path/sinfo"); open (SCPIPE,"$path/sinfo -a -h -o \"cpuinfo=%C\"|"); while(){ my $string = $_; $cpuinfo = get_variable("cpuinfo",$string); } close(SCPIPE); ($sinfo_cpuinfo{AllocatedCPUs},$sinfo_cpuinfo{IdleCPUs},$sinfo_cpuinfo{OtherCPUs},$sinfo_cpuinfo{TotalCPUs}) = split('/',$cpuinfo); $sinfo_cpuinfo{AllocatedCPUs} = slurm_parse_number($sinfo_cpuinfo{AllocatedCPUs}); $sinfo_cpuinfo{IdleCPUs} = slurm_parse_number($sinfo_cpuinfo{IdleCPUs}); $sinfo_cpuinfo{OtherCPUs} = slurm_parse_number($sinfo_cpuinfo{OtherCPUs}); $sinfo_cpuinfo{TotalCPUs} = slurm_parse_number($sinfo_cpuinfo{TotalCPUs}); return %sinfo_cpuinfo; } sub slurm_read_nodes($){ my ($config) = shift; my ($path) = ($$config{slurm_bin_path} or $$config{SLURM_bin_path} or "/usr/bin"); # get SLURM nodes, store dictionary in scont_nodes my %scont_nodes; checkbin("$path/scontrol"); open (SCPIPE,"$path/scontrol show node --oneliner|"); while(){ my %record; my $string = $_; my $node = get_variable("NodeName",$string); # We have to keep CPUs key name for not to break other # functions that use this key $record{CPUs} = get_variable("CPUTot",$string); $record{RealMemory} = get_variable("RealMemory",$string); my $StateName = get_variable("State",$string); # Node status can be followed by different symbols # according to it being unresponsive, powersaving, etc. # Get rid of them $StateName =~ s/[\*~#\+]$//; $record{State} = $StateName; $record{Sockets} = get_variable("Sockets",$string); $record{SysName} = get_variable("OS",$string); $record{Arch} = get_variable("Arch",$string); $scont_nodes{$node} = \%record; } close(SCPIPE); return %scont_nodes; } #Function for retrieving used and queued cpus from slurm sub slurm_get_jobs { my $queue = shift; my $queuedjobs=0, my $usedcpus=0, my $nocpus=0, my $jqueue=0; my $runningjobs=0; foreach my $i (keys %scont_jobs){ $jqueue = $scont_jobs{$i}{"Partition"}; next if (defined($queue) && !($jqueue =~ /$queue/)); if ($scont_jobs{$i}{"JobState"} =~ /^PENDING$/){ $queuedjobs++; } if (($scont_jobs{$i}{"JobState"} =~ /^RUNNING$/) || ($scont_jobs{$i}{"JobState"} =~ /^COMPLETING$/)){ $runningjobs++; } } return ($queuedjobs, $runningjobs); } sub slurm_get_data($){ my $config = shift; %scont_config = slurm_read_config($config); %scont_part = slurm_read_partitions($config); %scont_jobs = slurm_read_jobs($config); %scont_nodes = slurm_read_nodes($config); %sinfo_cpuinfo = slurm_read_cpuinfo($config); } sub slurm_to_arc_time($){ my $timeslurm = shift; my $timearc = 0; # $timeslurm can be "infinite" or "UNLIMITED" if (($timeslurm =~ "UNLIMITED") or ($timeslurm =~ "infinite")) { #Max number allowed by ldap $timearc = 2**31-1; } # days-hours:minutes:seconds elsif ( $timeslurm =~ /(\d+)-(\d+):(\d+):(\d+)/ ) { $timearc = $1*24*60*60 + $2*60*60 + $3*60 + $4; } # hours:minutes:seconds elsif ( $timeslurm =~ /(\d+):(\d+):(\d+)/ ) { $timearc = $1*60*60 + $2*60 + $3; } # minutes:seconds elsif ( $timeslurm =~ /(\d+):(\d+)/ ) { $timearc = $1*60 + $2; } # ARC infosys uses minutes as the smallest allowed value. $timearc = floor($timearc/60); return $timearc; } # SLURM outputs some values as 12.3K where K is 1024. Include T, G, M # as well in case they become necessary in the future. sub slurm_parse_number($){ my $value = shift; if ( $value =~ /(\d+\.?\d*)K$/ ){ $value = floor($1 * 1024); } if ( $value =~ /(\d+\.?\d*)M$/ ){ $value = floor($1 * 1024 * 1024); } if ( $value =~ /(\d+\.?\d*)G$/ ){ $value = floor($1 * 1024 * 1024 * 1024); } if ( $value =~ /(\d+\.?\d*)T$/ ){ $value = floor($1 * 1024 * 1024 * 1024 * 1024); } return $value; } sub slurm_get_first_node($){ my $nodes = shift; my @enodes = split(",", slurm_expand_nodes($nodes)); return " NoNode " if ! @enodes; return $enodes[0]; } #translates a list like n[1-2,5],n23,n[54-55] to n1,n2,n5,n23,n54,n55 sub slurm_expand_nodes($){ my $nodes = shift; my $enodes = ""; $nodes =~ s/,([a-zA-Z])/ $1/g; foreach my $node (split(" ",$nodes)){ if( $node =~ m/([a-zA-Z0-9-_]*)\[([0-9\-,]*)\]/ ){ my $name = $1; my $list = $2; foreach my $element (split(",",$list)){ if($element =~ /(\d*)-(\d*)/){ my $start=$1; my $end=$2; my $l = length($start); for (my $i=$start;$i<=$end;$i++){ # Preserve leading zeroes in sequence, if needed $enodes .= sprintf("%s%0*d,", $name, $l, $i); } } else { $enodes .= $name.$element.","; } } } else { $enodes .= $node . ","; } } chop $enodes; return $enodes; } # check the existence of a passed binary, full path. sub checkbin ($) { my $apbin = shift; error("Can't find $apbin , check slurm_bin_path. Exiting...") unless -f $apbin; } ############################################ # Public subs ############################################# sub cluster_info ($) { # config array my ($config) = shift; my ($path) = ($$config{slurm_bin_path} or $$config{SLURM_bin_path} or "/usr/bin"); # Get Data needed by this function, stored in the global variables # scont_nodes, scont_part, scont_jobs slurm_get_data($config); # Return data structure %lrms_cluster{$keyword} # # lrms_type LRMS type (eg. LoadLeveler) # lrms_version LRMS version # totalcpus Total number of cpus in the system # queuedjobs Number of queueing jobs in LRMS # runningjobs Number of running jobs in LRMS # usedcpus Used cpus in the system # cpudistribution CPU distribution string # # All values should be defined, empty values "" are ok if field # does not apply to particular LRMS. my (%lrms_cluster); #determine the version of SLURM $lrms_cluster{lrms_type} = "SLURM"; $lrms_cluster{lrms_version} = $scont_config{"SLURM_VERSION"}; # SLURM has no cputime limit at all $lrms_cluster{has_total_cputime_limit} = 0; #determine number of processors my $totalcpus=0; foreach my $i (keys %scont_nodes){ $totalcpus += $scont_nodes{$i}{"CPUs"}; } $lrms_cluster{totalcpus} = $totalcpus; $lrms_cluster{usedcpus} = $sinfo_cpuinfo{AllocatedCPUs}; # TODO: investigate if this can be calculated for SLURM # this is a quick and dirty fix for a warning, might be fixed somewhere else $lrms_cluster{queuedcpus} = 0; ($lrms_cluster{queuedjobs}, $lrms_cluster{runningjobs}) = slurm_get_jobs(); #NOTE: should be on the form "8cpu:800 2cpu:40" my @queue=(); foreach my $i (keys %scont_part){ unshift (@queue,$i); } my %cpudistribution; $lrms_cluster{cpudistribution} = ""; foreach my $key (keys %scont_nodes){ if(exists $cpudistribution{$scont_nodes{$key}{CPUs}}){ $cpudistribution{$scont_nodes{$key}{CPUs}} +=1; } else{ $cpudistribution{$scont_nodes{$key}{CPUs}} = 1; } } foreach my $key (keys %cpudistribution){ $lrms_cluster{cpudistribution}.= $key ."cpu:" . $cpudistribution{$key} . " "; } $lrms_cluster{queue} = [@queue]; return %lrms_cluster; } sub queue_info ($$) { # config array my ($config) = shift; my ($path) = ($$config{slurm_bin_path} or $$config{SLURM_bin_path} or "/usr/bin"); # Name of the queue to query my ($queue) = shift; # Get data needed by this function, stored in global variables # scont_nodes, scont_part, scont_jobs slurm_get_data($config); # The return data structure is %lrms_queue. my (%lrms_queue); # Return data structure %lrms_queue{$keyword} # # status available slots in the queue, negative number signals # some kind of LRMS error state for the queue # maxrunning queue limit for number of running jobs # maxqueuable queue limit for number of queueing jobs # maxuserrun queue limit for number of running jobs per user # maxcputime queue limit for max cpu time for a job # mincputime queue limit for min cpu time for a job # defaultcput queue default for cputime # maxwalltime queue limit for max wall time for a job # minwalltime queue limit for min wall time for a job # defaultwalltime queue default for walltime # running number of procs used by running jobs in the queue # queued number of procs requested by queueing jobs in the queue # totalcpus number of procs in the queue # # All values should be defined, empty values "" are ok if field # does not apply to particular LRMS. #TODO available slots, not max jobs. $lrms_queue{status} = $scont_config{"MaxJobCount"}; $lrms_queue{maxrunning} = $scont_config{"MaxJobCount"}; $lrms_queue{maxqueuable} = $scont_config{"MaxJobCount"}; $lrms_queue{maxuserrun} = $scont_config{"MaxJobCount"}; my $maxtime = $scont_part{$queue}{"MaxTime"}; my $deftime = $scont_part{$queue}{"DefTime"}; $lrms_queue{maxcputime} = $maxtime; $lrms_queue{mincputime} = 0; $lrms_queue{defaultcput} = $deftime; $lrms_queue{maxwalltime} = $maxtime; $lrms_queue{minwalltime} = 0; $lrms_queue{defaultwallt} = $deftime; ($lrms_queue{queued}, $lrms_queue{running}) = slurm_get_jobs($queue); $lrms_queue{totalcpus} = $scont_part{$queue}{TotalCPUs}; return %lrms_queue; } sub jobs_info ($$$) { # config array my ($config) = shift; my ($path) = ($$config{slurm_bin_path} or $$config{SLURM_bin_path} or "/usr/bin"); # Name of the queue to query my ($queue) = shift; # LRMS job IDs from Grid Manager (jobs with "INLRMS" GM status) my ($jids) = shift; #Get data needed by this function, stored in global variables # scont_nodes, scont_part, scont_jobs slurm_get_data($config); # status Status of the job: Running 'R', Queued'Q', # Suspended 'S', Exiting 'E', Other 'O' # rank Position in the queue # mem Used (virtual) memory # walltime Used walltime # cputime Used cpu-time # reqwalltime Walltime requested from LRMS # reqcputime Cpu-time requested from LRMS # nodes List of execution hosts. # comment Comment about the job in LRMS, if any # cpus Number of cpus requested/used by the job my (%lrms_jobs); #$queue is not used to keep jobs from different queues separate #jobs can't have overlapping job-ids between queues in SLURM foreach my $jid (@{$jids}){ if ($scont_jobs{$jid}{"JobState"} eq "RUNNING") { $lrms_jobs{$jid}{status} = "R"; } elsif ($scont_jobs{$jid}{"JobState"} eq "COMPLETED") { $lrms_jobs{$jid}{status} = "E"; } elsif ($scont_jobs{$jid}{"JobState"} eq "CANCELLED") { $lrms_jobs{$jid}{status} = "O"; } elsif ($scont_jobs{$jid}{"JobState"} eq "FAILED") { $lrms_jobs{$jid}{status} = "O"; } elsif ($scont_jobs{$jid}{"JobState"} eq "PENDING") { $lrms_jobs{$jid}{status} = "Q"; } elsif ($scont_jobs{$jid}{"JobState"} eq "TIMEOUT") { $lrms_jobs{$jid}{status} = "O"; } else { $lrms_jobs{$jid}{status} = "O"; } #TODO: calculate rank? Probably not possible. $lrms_jobs{$jid}{rank} = 0; #TODO: This gets the memory from the first node in a job #allocation which will not be correct on a heterogenous #cluster my $node = slurm_get_first_node($scont_jobs{$jid}{"NodeList"}); $lrms_jobs{$jid}{mem} = $scont_nodes{$node}{"RealMemory"}; my $walltime = $scont_jobs{$jid}{"TimeUsed"}; my $count = $scont_jobs{$jid}{ReqCPUs}; $lrms_jobs{$jid}{walltime} = $walltime; # TODO: multiply walltime by number of cores to get cputime? $lrms_jobs{$jid}{cputime} = $walltime*$count; $lrms_jobs{$jid}{reqwalltime} = $scont_jobs{$jid}{"TimeLimit"}; # TODO: cputime/walltime confusion again... $lrms_jobs{$jid}{reqcputime} = $scont_jobs{$jid}{"TimeLimit"}*$count; $lrms_jobs{$jid}{nodes} = [ split(",",slurm_expand_nodes($scont_jobs{$jid}{"NodeList"}) ) ]; $lrms_jobs{$jid}{comment} = [$scont_jobs{$jid}{"Name"}]; $lrms_jobs{$jid}{cpus} = $scont_jobs{$jid}{ReqCPUs}; } return %lrms_jobs; } sub users_info($$@) { # config array my ($config) = shift; my ($path) = ($$config{slurm_bin_path} or $$config{SLURM_bin_path} or "/usr/bin"); # name of queue to query my ($queue) = shift; # user accounts my ($accts) = shift; #Get data needed by this function, stored in global variables # scont_nodes, scont_part, scont_jobs slurm_get_data($config); my (%lrms_users); # freecpus for given account # queue length for given account # foreach my $u ( @{$accts} ) { $lrms_users{$u}{freecpus} = $sinfo_cpuinfo{IdleCPUs}; $lrms_users{$u}{queuelength} = 0; } return %lrms_users; } sub nodes_info($) { my $config = shift; my %hoh_slurmnodes = slurm_read_nodes($config); my %nodes; for my $host (keys %hoh_slurmnodes) { my ($isfree, $isavailable) = (0,0); $isavailable = 1 unless $hoh_slurmnodes{$host}{State} =~ /DOWN|DRAIN|FAIL|MAINT|UNK/; $isfree = 1 if $hoh_slurmnodes{$host}{State} =~ /IDLE|MIXED/; $nodes{$host} = {isfree => $isfree, isavailable => $isavailable}; my $np = $hoh_slurmnodes{$host}{CPUs}; my $nsock = $hoh_slurmnodes{$host}{Sockets}; my $rmem = $hoh_slurmnodes{$host}{RealMemory}; $nodes{$host}{lcpus} = int $np if $np; $nodes{$host}{slots} = int $np if $np; $nodes{$host}{pmem} = int $rmem if $rmem; $nodes{$host}{pcpus} = int $nsock if $nsock; $nodes{$host}{sysname} = $hoh_slurmnodes{$host}{SysName}; $nodes{$host}{machine} = $hoh_slurmnodes{$host}{Arch}; } return %nodes; } 1; nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/PaxHeaders/SLURMmod.pm0000644000000000000000000000013215067751327023632 xustar0030 mtime=1759498967.761492071 30 atime=1759498967.868493696 30 ctime=1759499029.810239174 nordugrid-arc-7.1.1/src/services/a-rex/infoproviders/SLURMmod.pm0000644000175000002070000003700315067751327025537 0ustar00mockbuildmock00000000000000package SLURMmod; use strict; use POSIX qw(ceil floor); our @ISA = ('Exporter'); # Module implements these subroutines for the LRMS interface our @EXPORT_OK = ('get_lrms_info', 'get_lrms_options_schema'); use LogUtils ( 'start_logging', 'error', 'warning', 'debug' ); ########################################## # Public variables ########################################## our $options; our $lrms_info; ########################################## # Saved private variables ########################################## our $path; our(%scont_config, %scont_part, %scont_jobs, %scont_nodes, %sinfo_cpuinfo); our $log = LogUtils->getLogger("SLURMmod"); sub get_lrms_options_schema { return { 'slurm_bin_path' => '*', 'queues' => { '*' => { 'users' => [ '' ], } }, 'jobs' => [ '' ] }; } sub get_lrms_info($) { $options = shift; $path = ($options->{slurm_bin_path} or "/usr/bin"); slurm_init_check($path); slurm_get_data(); cluster_info(); my %qconf = %{$options->{queues}}; for my $qname ( keys %qconf ) { queue_info($qname); } my $jids = $options->{jobs}; jobs_info($jids); for my $qname ( keys %qconf ) { my $users = $qconf{$qname}{users}; users_info($qname,$users); } nodes_info(); return $lrms_info; } ########################################## # Private subs ########################################## # checks existence of slurm commands sub slurm_init_check($) { my $path = shift; $log->info("Verifying slurm commands..."); my @slurm_commands = ('scontrol','squeue','sinfo'); foreach my $slurmcmd (@slurm_commands) { $log->error("$path/$slurmcmd command not found. Check slurm_bin_path in configuration. Exiting...") unless (-f "$path/$slurmcmd") ; } } sub nodes_info() { my $lrms_nodes = {}; # add this cluster to the info tree $lrms_info->{nodes} = $lrms_nodes; for my $host (keys %scont_nodes) { my ($isfree, $isavailable) = (0,0); $isavailable = 1 unless $scont_nodes{$host}{State} =~ /DOWN|DRAIN|FAIL|MAINT|UNK/; $isfree = 1 if $scont_nodes{$host}{State} =~ /IDLE|MIXED/; $lrms_nodes->{$host} = {isfree => $isfree, isavailable => $isavailable}; my $np = $scont_nodes{$host}{CPUTot}; my $nsock = $scont_nodes{$host}{Sockets}; my $rmem = $scont_nodes{$host}{RealMemory}; $lrms_nodes->{$host}{lcpus} = int $np if $np; $lrms_nodes->{$host}{slots} = int $np if $np; $lrms_nodes->{$host}{pmem} = int $rmem if $rmem; $lrms_nodes->{$host}{pcpus} = int $nsock if $nsock; $lrms_nodes->{$host}{sysname} = $scont_nodes{$host}{SysName}; $lrms_nodes->{$host}{machine} = $scont_nodes{$host}{Arch}; } } sub users_info($@) { # name of queue to query my ($queue) = shift; # user accounts my ($accts) = shift; my $lrms_users = {}; # add users to the info tree my $lrms_queue = $lrms_info->{queues}{$queue}; $lrms_queue->{users} = $lrms_users; # freecpus for given account # queue length for given account # foreach my $u ( @{$accts} ) { $lrms_users->{$u}{freecpus} = { $sinfo_cpuinfo{IdleCPUs} => 0 }; $lrms_users->{$u}{queuelength} = 0; } } sub slurm_get_first_node($){ my $nodes = shift; my @enodes = split(",", slurm_expand_nodes($nodes)); return " NoNode " if ! @enodes; return $enodes[0]; } #translates a list like n[1-2,5],n23,n[54-55] to n1,n2,n5,n23,n54,n55 sub slurm_expand_nodes($){ my $nodes = shift; my $enodes = ""; $nodes =~ s/,([a-zA-Z])/ $1/g; foreach my $node (split(" ",$nodes)){ if( $node =~ m/([a-zA-Z0-9-_]*)\[([0-9\-,]*)\]/ ){ my $name = $1; my $list = $2; foreach my $element (split(",",$list)){ if($element =~ /(\d*)-(\d*)/){ my $start=$1; my $end=$2; my $l = length($start); for (my $i=$start;$i<=$end;$i++){ # Preserve leading zeroes in sequence, if needed $enodes .= sprintf("%s%0*d,", $name, $l, $i); } } else { $enodes .= $name.$element.","; } } } else { $enodes .= $node . ","; } } chop $enodes; return $enodes; } sub jobs_info ($) { my $jids = shift; my $lrms_jobs = {}; # add jobs to the info tree $lrms_info->{jobs} = $lrms_jobs; #jobs can't have overlapping job-ids between queues in SLURM foreach my $jid (@{$jids}){ if ($scont_jobs{$jid}{"JobState"} eq "RUNNING") { $lrms_jobs->{$jid}{status} = "R"; } elsif ($scont_jobs{$jid}{"JobState"} eq "COMPLETED") { $lrms_jobs->{$jid}{status} = "E"; } elsif ($scont_jobs{$jid}{"JobState"} eq "CANCELLED") { $lrms_jobs->{$jid}{status} = "O"; } elsif ($scont_jobs{$jid}{"JobState"} eq "FAILED") { $lrms_jobs->{$jid}{status} = "O"; } elsif ($scont_jobs{$jid}{"JobState"} eq "PENDING") { $lrms_jobs->{$jid}{status} = "Q"; } elsif ($scont_jobs{$jid}{"JobState"} eq "TIMEOUT") { $lrms_jobs->{$jid}{status} = "O"; } else { $lrms_jobs->{$jid}{status} = "O"; } #TODO: calculate rank? Probably not possible. $lrms_jobs->{$jid}{rank} = 0; $lrms_jobs->{$jid}{cpus} = $scont_jobs{$jid}{ReqCPUs}; #TODO: This gets the memory from the first node in a job #allocation which will not be correct on a heterogenous #cluster my $node = slurm_get_first_node($scont_jobs{$jid}{"NodeList"}); # Only jobs that got the nodes can report the memory of # their nodes if($node ne " NoNode "){ $lrms_jobs->{$jid}{mem} = $scont_nodes{$node}{"RealMemory"}; } my $walltime = $scont_jobs{$jid}{"TimeUsed"}; my $count = $scont_jobs{$jid}{ReqCPUs}; $lrms_jobs->{$jid}{walltime} = $walltime; # TODO: multiply walltime by number of cores to get cputime? $lrms_jobs->{$jid}{cputime} = $walltime*$count; $lrms_jobs->{$jid}{reqwalltime} = $scont_jobs{$jid}{"TimeLimit"}; # TODO: cputime/walltime confusion again... $lrms_jobs->{$jid}{reqcputime} = $scont_jobs{$jid}{"TimeLimit"}*$count; $lrms_jobs->{$jid}{nodes} = [ split(",",slurm_expand_nodes($scont_jobs{$jid}{"NodeList"}) ) ]; $lrms_jobs->{$jid}{comment} = [$scont_jobs{$jid}{"Name"}]; } } sub queue_info ($) { # Name of the queue to query my ($queue) = shift; my $lrms_queue = {}; # add this queue to the info tree $lrms_info->{queues}{$queue} = $lrms_queue; $lrms_queue->{status} = $scont_config{"MaxJobCount"}; $lrms_queue->{maxrunning} = $scont_config{"MaxJobCount"}; $lrms_queue->{maxqueuable} = $scont_config{"MaxJobCount"}; $lrms_queue->{maxuserrun} = $scont_config{"MaxJobCount"}; my $maxtime = $scont_part{$queue}{"MaxTime"}; my $deftime = $scont_part{$queue}{"DefTime"}; $lrms_queue->{maxcputime} = $maxtime; $lrms_queue->{mincputime} = 0; $lrms_queue->{defaultcput} = $deftime; $lrms_queue->{maxwalltime} = $maxtime; $lrms_queue->{minwalltime} = 0; $lrms_queue->{defaultwallt} = $deftime; ($lrms_queue->{queued}, $lrms_queue->{running}) = slurm_get_jobs($queue); $lrms_queue->{totalcpus} = $scont_part{$queue}{TotalCPUs}; $lrms_queue->{freeslots} = $scont_part{$queue}{IdleCPUs}; $lrms_queue->{nodes} = $scont_part{$queue}{NodeNames}; } #Function for retrieving running and queued jobs from slurm sub slurm_get_jobs { my $queue = shift; my $queuedjobs=0, my $usedcpus=0, my $nocpus=0, my $jqueue=0; my $runningjobs=0; foreach my $i (keys %scont_jobs){ $jqueue = $scont_jobs{$i}{"Partition"}; next if (defined($queue) && !($jqueue =~ /$queue/)); if ($scont_jobs{$i}{"JobState"} =~ /^PENDING$/){ $queuedjobs++; } if (($scont_jobs{$i}{"JobState"} =~ /^RUNNING$/) || ($scont_jobs{$i}{"JobState"} =~ /^COMPLETING$/)){ $runningjobs++; } } return ($queuedjobs, $runningjobs); } sub cluster_info () { my $lrms_cluster = {}; # add this cluster to the info tree $lrms_info->{cluster} = $lrms_cluster; #determine the version of SLURM $lrms_cluster->{lrms_type} = "SLURM"; $lrms_cluster->{lrms_version} = $scont_config{"SLURM_VERSION"}; #determine number of processors my $totalcpus=0; foreach my $i (keys %scont_nodes){ $totalcpus += $scont_nodes{$i}{CPUTot}; } $lrms_cluster->{totalcpus} = $totalcpus; # TODO: investigate if this can be calculated for SLURM # this is a quick and dirty fix for a warning, might be fixed somewhere else $lrms_cluster->{queuedcpus} = 0; $lrms_cluster->{usedcpus} = $sinfo_cpuinfo{AllocatedCPUs}; ($lrms_cluster->{queuedjobs}, $lrms_cluster->{runningjobs}) = slurm_get_jobs(); #NOTE: should be on the form "8cpu:800 2cpu:40" my %cpudistribution; $lrms_cluster->{cpudistribution} = ""; foreach my $key (keys %scont_nodes){ if(exists $cpudistribution{$scont_nodes{$key}{CPUTot}}){ $cpudistribution{$scont_nodes{$key}{CPUTot}} +=1; } else{ $cpudistribution{$scont_nodes{$key}{CPUTot}} = 1; } } foreach my $key (keys %cpudistribution){ $lrms_cluster->{cpudistribution}.= $key ."cpu:" . $cpudistribution{$key} . " "; } } sub slurm_get_data(){ %scont_config = slurm_read_config(); %scont_part = slurm_read_partitions(); %scont_jobs = slurm_read_jobs(); %scont_nodes = slurm_read_nodes(); %sinfo_cpuinfo = slurm_read_cpuinfo(); } sub slurm_read_config(){ # get SLURM config, store dictionary in scont_config my %scont_config; open (SCPIPE,"$path/scontrol show config| grep -Ev \"primary|Configuration|^\$\"|"); while(){ chomp; my @mrr = split(" = ", $_, 2); $mrr[0]=~s/\s+$//; $scont_config{$mrr[0]} = $mrr[1]; } close(SCPIPE); return %scont_config; } sub get_variable($$){ my $match = shift; my $string = shift; $string =~ m/(\w\s)*?$match=((\w|\s|\/|,|.|:|;|\[|\]|\(|\)|-)*?)($| \w+=.*)/ ; my $var = $2; return $var; } sub slurm_to_arc_time($){ my $timeslurm = shift; my $timearc = 0; # $timeslurm can be "infinite" or "UNLIMITED" if (($timeslurm =~ "UNLIMITED") or ($timeslurm =~ "infinite")) { #Max number allowed by ldap $timearc = 2**31-1; } # days-hours:minutes:seconds elsif ( $timeslurm =~ /(\d+)-(\d+):(\d+):(\d+)/ ) { $timearc = $1*24*60*60 + $2*60*60 + $3*60 + $4; } # hours:minutes:seconds elsif ( $timeslurm =~ /(\d+):(\d+):(\d+)/ ) { $timearc = $1*60*60 + $2*60 + $3; } # minutes:seconds elsif ( $timeslurm =~ /(\d+):(\d+)/ ) { $timearc = $1*60 + $2; } return $timearc; } # SLURM outputs some values as 12.3K where K is 1024. Include T, G, M # as well in case they become necessary in the future. sub slurm_parse_number($){ my $value = shift; if ( $value =~ /(\d+\.?\d*)K$/ ){ $value = floor($1 * 1024); } if ( $value =~ /(\d+\.?\d*)M$/ ){ $value = floor($1 * 1024 * 1024); } if ( $value =~ /(\d+\.?\d*)G$/ ){ $value = floor($1 * 1024 * 1024 * 1024); } if ( $value =~ /(\d+\.?\d*)T$/ ){ $value = floor($1 * 1024 * 1024 * 1024 * 1024); } return $value; } sub slurm_read_partitions(){ # get SLURM partitions, store dictionary in scont_part my %scont_part; open (SCPIPE,"$path/sinfo -a -h -o \"PartitionName=%P TotalCPUs=%C TotalNodes=%D MaxTime=%l DefTime=%L NodeNames=%N\"|"); while(){ my %part; my $string = $_; my $PartitionName = get_variable("PartitionName",$string); $PartitionName =~ s/\*$//; #Fetch data from sinfo $part{PartitionName} = $PartitionName; my $totalcpus = get_variable("TotalCPUs",$string); $part{TotalNodes} = get_variable("TotalNodes",$string); $part{MaxTime} = get_variable("MaxTime",$string); $part{DefTime} = get_variable("DefTime",$string); #Translation of data $part{MaxTime} = slurm_to_arc_time($part{MaxTime}); $part{DefTime} = slurm_to_arc_time($part{DefTime}); # Format of "%C" is: Number of CPUs by state in the format "allocated/idle/other/total" # We only care about total: ###### ($part{AllocatedCPUs},$part{IdleCPUs},$part{OtherCPUs},$part{TotalCPUs}) = split('/',$totalcpus); # Neither of these fields probably need this in SLURM 1.3, but it doesn't hurt. $part{AllocatedCPUs} = slurm_parse_number($part{AllocatedCPUs}); $part{IdleCPUs} = slurm_parse_number($part{IdleCPUs}); $part{OtherCPUs} = slurm_parse_number($part{OtherCPUs}); $part{TotalCPUs} = slurm_parse_number($part{TotalCPUs}); $part{TotalNodes} = slurm_parse_number($part{TotalNodes}); $part{NodeNames} = [ split(",", slurm_expand_nodes( get_variable("NodeNames",$string ) ) ) ]; $scont_part{$PartitionName} = \%part; } close(SCPIPE); return %scont_part; } sub slurm_read_jobs($){ # get SLURM jobs, store dictionary in scont_jobs my %scont_jobs; open (SCPIPE,"$path/squeue -a -h -t all -o \"JobId=%i TimeUsed=%M Partition=%P JobState=%T ReqNodes=%D ReqCPUs=%C TimeLimit=%l Name=%j NodeList=%N\"|"); while(){ my %job; my $string = $_; #Fetching of data from squeue output my $JobId = get_variable("JobId",$string); $job{JobId} = get_variable("JobId",$string); $job{TimeUsed} = get_variable("TimeUsed",$string); $job{Partition} = get_variable("Partition",$string); $job{JobState} = get_variable("JobState",$string); $job{ReqNodes} = get_variable("ReqNodes",$string); $job{ReqCPUs} = get_variable("ReqCPUs",$string); $job{TimeLimit} = get_variable("TimeLimit",$string); $job{Name} = get_variable("Name",$string); $job{NodeList} = get_variable("NodeList",$string); #Translation of data $job{TimeUsed} = slurm_to_arc_time($job{TimeUsed}); $job{TimeLimit} = slurm_to_arc_time($job{TimeLimit}); $scont_jobs{$JobId} = \%job; } close(SCPIPE); return %scont_jobs; } sub slurm_read_nodes($){ # get SLURM nodes, store dictionary in scont_nodes my %scont_nodes; open (SCPIPE,"$path/scontrol show node --oneliner|"); while(){ my %record; my $string = $_; my $node = get_variable("NodeName",$string); # We have to keep CPUs key name for not to break other # functions that use this key $record{CPUTot} = get_variable("CPUTot",$string); $record{RealMemory} = get_variable("RealMemory",$string); my $StateName = get_variable("State",$string); # Node status can be followed by different symbols # according to it being unresponsive, powersaving, etc. # Get rid of them $StateName =~ s/[\*~#\+]$//; $record{State} = $StateName; $record{Sockets} = get_variable("Sockets",$string); $record{SysName} = get_variable("OS",$string); $record{Arch} = get_variable("Arch",$string); $scont_nodes{$node} = \%record; } close(SCPIPE); return %scont_nodes; } sub slurm_read_cpuinfo($){ my %sinfo_cpuinfo; my $cpuinfo; open (SCPIPE,"$path/sinfo -a -h -o \"cpuinfo=%C\"|"); while(){ my $string = $_; $cpuinfo = get_variable("cpuinfo",$string); } close(SCPIPE); ($sinfo_cpuinfo{AllocatedCPUs},$sinfo_cpuinfo{IdleCPUs},$sinfo_cpuinfo{OtherCPUs},$sinfo_cpuinfo{TotalCPUs}) = split('/',$cpuinfo); $sinfo_cpuinfo{AllocatedCPUs} = slurm_parse_number($sinfo_cpuinfo{AllocatedCPUs}); $sinfo_cpuinfo{IdleCPUs} = slurm_parse_number($sinfo_cpuinfo{IdleCPUs}); $sinfo_cpuinfo{OtherCPUs} = slurm_parse_number($sinfo_cpuinfo{OtherCPUs}); $sinfo_cpuinfo{TotalCPUs} = slurm_parse_number($sinfo_cpuinfo{TotalCPUs}); return %sinfo_cpuinfo; } 1; nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/update-controldir.in0000644000000000000000000000013215067751327022770 xustar0030 mtime=1759498967.773273145 30 atime=1759498967.873493772 30 ctime=1759499029.324727181 nordugrid-arc-7.1.1/src/services/a-rex/update-controldir.in0000644000175000002070000000641315067751327024676 0ustar00mockbuildmock00000000000000#!/bin/bash log_failure_msg() { echo $@ } control_path () { # job_id=`echo "$2" | sed 's/\(.\{9\}\)/\1\//g' | sed 's/\/$//'` job_id=`echo "$2" | sed -e 's#\(.\{3\}\)#\1/#3' -e 's#\(.\{3\}\)#\1/#2' -e 's#\(.\{3\}\)#\1/#1' -e 's#$#/#'` path="$1/jobs/${job_id}/$3" echo "$path" } update_control_dir() { controldir="$1" # convert files stored directly in control dir find "$controldir" -maxdepth 1 -regex '.*/job\.[0-9a-zA-Z]*\.[^.]*' -printf '%f\n' | \ while true; do read file if [ $? -ne 0 ]; then break; fi id=`echo "$file" | sed -e 's/^job\.//' -e 's/\.[^.]*$//'` suffix=`echo "$file" | sed -e 's/^job\.//' -e 's/^[^.]*\.//'` newfile=$(control_path "$controldir" "$id" "$suffix") newpath=`dirname "$newfile"` mkdir -p "$newpath" if [ $? -ne 0 ]; then return 1; fi mv "$controldir/$file" "$newfile" if [ $? -ne 0 ]; then return 1; fi done # convert status files for subdir in accepting finished processing restarting; do find "$controldir/$subdir" -maxdepth 1 -regex '.*/job\.[0-9a-zA-Z]*\.[^.]*' -printf '%f\n' | \ while true; do read file if [ $? -ne 0 ]; then break; fi newfile=`echo "$file" | sed -e 's/^job\.//'` mv "$controldir/$subdir/$file" "$controldir/$subdir/$newfile" if [ $? -ne 0 ]; then return 1; fi done done } readconfigvar() { fname="$1" optname="$2" blocks="" while [ ! -z "$3" ] ; do blocks="$blocks -b $3" shift done value=`$ARC_LOCATION/@pkglibexecsubdir@/arcconfig-parser --runconfig "$fname" --load $blocks -o "$optname" 2>/dev/null` if [ $? -eq 0 ] ; then echo "$value" exit 0 else exit 1 fi } if [ "$1" = '-h' ]; then echo "Usage: update-controldir [path to controldir]" echo " Used env. variables: ARC_LOCATION, ARC_CONFIG." exit 0 fi # ARC_LOCATION ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then log_failure_msg "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi export ARC_LOCATION if [ -z "$1" ]; then # ARC_CONFIG if [ "x$ARC_CONFIG" = "x" ]; then if [ -r $ARC_LOCATION/etc/arc.conf ]; then ARC_CONFIG=$ARC_LOCATION/etc/arc.conf elif [ -r /etc/arc.conf ]; then ARC_CONFIG=/etc/arc.conf fi fi if [ -z "$ARC_CONFIG" ] ; then log_failure_msg "Missing A-REX configuration file" exit 1 fi CONTROLDIR=`readconfigvar "$ARC_CONFIG" controldir arex` if [ -z "$CONTROLDIR" ] ; then log_failure_msg "Missing controldir in A-REX configuration" exit 1 fi else CONTROLDIR="$1" fi if [ ! -d "$CONTROLDIR" ]; then log_failure_msg "The controldir '$CONTROLDIR' does not exist or is not accessible." exit 1 fi # check special mark indicating new version of control dir if [ ! -f "$CONTROLDIR/version2" ] ; then echo "Updating A-REX control dir..." update_control_dir "$CONTROLDIR" if [ $? -ne 0 ]; then log_failure_msg "Failed to update A-REX control dir" exit 1 fi touch "$CONTROLDIR/version2" else echo "A-REX control dir '$CONTROLDIR' is already updated." echo "Remove file named 'version2' from controldir to force update." fi exit 0 nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/FileChunks.h0000644000000000000000000000013115067751327021204 xustar0030 mtime=1759498967.750197335 29 atime=1759498967.86149359 30 ctime=1759499029.350520762 nordugrid-arc-7.1.1/src/services/a-rex/FileChunks.h0000644000175000002070000000542015067751327023110 0ustar00mockbuildmock00000000000000#include #include #include #include #include namespace ARex { class FileChunksList; /// Representation of delivered file chunks class FileChunks { friend class FileChunksList; private: std::mutex lock; FileChunksList& list; std::map::iterator self; typedef std::list > chunks_t; chunks_t chunks; off_t size; time_t last_accessed; int refcount; FileChunks(FileChunksList& container); public: FileChunks(const FileChunks& obj); /// Returns assigned file path (id of file) std::string Path(void) { return self->first; }; /// Assign file size void Size(off_t size); /// Returns assigned file size off_t Size(void) { return size; }; /// Report one more delivered chunk void Add(off_t start,off_t csize); /// Returns true if all chunks were delivered. bool Complete(void); /// Prints chunks delivered so far. For debuging purposes. void Print(void); /// Release reference obtained through FileChunksList::Get() method. /// This operation may lead to destruction of FileChunk instance /// hence previously obtained refrence must not be used. void Release(void); /// Relases reference obtained through Get() method and destroys its instance. /// Normally this method to be called instead of Release() after whole /// file is delivered in order to free resources associated with /// FileChunks instance. void Remove(void); }; /// Container for FileChunks instances class FileChunksList { friend class FileChunks; private: std::mutex lock; typedef std::map files_t; files_t files; int timeout; time_t last_timeout; /// Returns pointer to first stuck file. /// File is considred stuck if its Add method was last called more /// timeout seconds ago. FileChunks* GetStuck(void); void RemoveStuck(void); public: FileChunksList(void); ~FileChunksList(void); /// Returns previously created FileChunks object with associated path. /// If such instance does not exist new one is created. /// Obtained reference may be used for other operations. /// Obtained reference must be Release()ed after it is not longer needed. FileChunks& Get(std::string path); /// Assign timeout value (seconds) for file transfers void Timeout(int t) { timeout=t; }; /// Returns pointer to first in a list created FileChunks instance. //FileChunks* GetFirst(void); }; class FileChunksRef { private: FileChunks& obj; FileChunksRef(); FileChunksRef(FileChunksRef const&); FileChunksRef& operator=(FileChunksRef const&); public: FileChunksRef(FileChunks& o):obj(o) { }; ~FileChunksRef(void) { obj.Release(); }; FileChunks* operator->(void) { return &obj; }; FileChunks& operator*(void) { return obj; }; }; } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/arc-arex-ws.service.in0000644000000000000000000000013115067751327023120 xustar0030 mtime=1759498967.750197335 29 atime=1759498967.86149359 30 ctime=1759499029.319608721 nordugrid-arc-7.1.1/src/services/a-rex/arc-arex-ws.service.in0000644000175000002070000000036215067751327025024 0ustar00mockbuildmock00000000000000[Unit] Description=ARC Resource-coupled EXecution WS interface service After=local-fs.target remote-fs.target [Service] Type=forking ExecStart=@prefix@/@pkgdatasubdir@/arc-arex-ws-start NotifyAccess=all [Install] WantedBy=multi-user.target nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/change_activity_status.cpp0000644000000000000000000000013215067751327024251 xustar0030 mtime=1759498967.750491903 30 atime=1759498967.862493605 30 ctime=1759499029.332461694 nordugrid-arc-7.1.1/src/services/a-rex/change_activity_status.cpp0000644000175000002070000000646215067751327026163 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "job.h" #include "arex.h" namespace ARex { Arc::MCC_Status ARexService::PutLogs(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath) { // Nothing can be put into root endpoint if(id.empty()) return make_http_fault(outmsg, 500, "No job specified"); // Acquire job ARexJob job(id,config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "%s: there is no such job: %s", job.ID(), job.Failure()); return make_http_fault(outmsg, 500, "Job does not exist"); }; if(subpath == "status") { // Request to change job state // Fetch content Arc::MessagePayload* payload = inmsg.Payload(); if(!payload) { logger_.msg(Arc::ERROR, "%s: put log %s: there is no payload", id, subpath); return make_http_fault(outmsg,500,"Missing payload"); }; Arc::PayloadStreamInterface* stream = dynamic_cast(payload); Arc::PayloadRawInterface* buf = dynamic_cast(payload); if((!stream) && (!buf)) { logger_.msg(Arc::ERROR, "%s: put log %s: unrecognized payload", id, subpath); return make_http_fault(outmsg, 500, "Error processing payload"); } std::string new_state; static const int new_state_max_size = 256; if(stream) { std::string new_state_add_str; while(stream->Get(new_state_add_str)) { new_state.append(new_state_add_str); if(new_state.size() > new_state_max_size) break; } } else { for(unsigned int n = 0;buf->Buffer(n);++n) { new_state.append(buf->Buffer(n),buf->BufferSize(n)); if(new_state.size() > new_state_max_size) break; }; }; new_state = Arc::upper(new_state); std::string gm_state = job.State(); // Check for allowed combinations if(new_state == "FINISHED") { // Request to cancel job if((gm_state != "FINISHED") && (gm_state != "CANCELING") && (gm_state != "DELETED")) { job.Cancel(); }; } else if(new_state == "DELETED") { // Request to clean job if((gm_state != "FINISHED") && (gm_state != "CANCELING") && (gm_state != "DELETED")) { job.Cancel(); }; job.Clean(); } else if((new_state == "PREPARING") || (new_state == "SUBMIT") || (new_state == "INLRMS") || (new_state == "FINISHING")) { // Request to resume job if(!job.Resume()) { logger_.msg(Arc::ERROR, "A-REX REST: Failed to resume job"); return Arc::MCC_Status(Arc::STATUS_OK); }; } else { logger_.msg(Arc::ERROR, "A-REX REST: State change not allowed: from %s to %s", gm_state, new_state); return make_http_fault(outmsg, 500, "Impossible job state change request"); }; return make_http_fault(outmsg,200,"Done"); } return make_http_fault(outmsg,500,"Requested operation is not possible"); } Arc::MCC_Status ARexService::DeleteLogs(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& id,std::string const& subpath) { return make_http_fault(outmsg,501,"Not Implemented"); } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/arc-arex-ws.in0000644000000000000000000000013115067751327021461 xustar0030 mtime=1759498967.750197335 29 atime=1759498967.86149359 30 ctime=1759499029.318255077 nordugrid-arc-7.1.1/src/services/a-rex/arc-arex-ws.in0000644000175000002070000001230515067751327023365 0ustar00mockbuildmock00000000000000#!/bin/bash # # Init file for the A-REX WS interface service # # chkconfig: 2345 75 25 # description: NorduGrid A-REX WS interface # # config: /etc/sysconfig/globus # config: /etc/sysconfig/nordugrid # config: @prefix@/etc/arc.conf # config: /etc/arc.conf # # This startup script takes ARC0 configuration file as # its input and generates ARC1 arched configuration file # which contains commands to start A-REX service. Service # is either run isolated or with WS interface enabled. # To enable WS interface ARC0 configuration file must # contain [arex/ws/jobs] section # and mandatory option in [arex/ws]: # wsurl="a_rex_url" ### BEGIN INIT INFO # Provides: arc-arex-ws # Required-Start: $local_fs $remote_fs # Required-Stop: $local_fs $remote_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: ARC WS interface # Description: The unit of the NorduGrid's ARC middleware to # accept and control jobs. ### END INIT INFO # source function library if [ -f /etc/init.d/functions ]; then . /etc/init.d/functions log_success_msg() { echo -n "$@" success "$@" echo } log_warning_msg() { echo -n "$@" warning "$@" echo } log_failure_msg() { echo -n "$@" failure "$@" echo } elif [ -f /lib/lsb/init-functions ]; then . /lib/lsb/init-functions else echo "Error: Cannot source neither init.d nor lsb functions" exit 1 fi prog=arched # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/arc-arex-ws ]; then . /etc/sysconfig/arc-arex-ws elif [ -r /etc/default/arc-arex-ws ]; then . /etc/default/arc-arex-ws fi # ARC_LOCATION ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then log_failure_msg "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi # PID and lock file PID_FILE=`${ARC_LOCATION}/@pkgdatasubdir@/arc-arex-ws-start --getpidfile` if [ $? -ne 0 ]; then # When --getpidfile fails it returns the error on stdout log_failure_msg "$PID_FILE" exit 1 fi if [ `id -u` = 0 ] ; then # Debian does not have /run/lock/subsys if [ -d /run/lock/subsys ]; then LOCKFILE=/run/lock/subsys/arex-ws else LOCKFILE=/run/lock/arex-ws fi else LOCKFILE=$HOME/arex-ws.lock fi start() { echo -n "Starting $prog: " # Check if we are already running if [ -f "$PID_FILE" ]; then read pid < "$PID_FILE" if [ "x$pid" != "x" ]; then ps -p "$pid" -o comm 2>/dev/null | grep "^$prog$" 1>/dev/null 2>/dev/null if [ $? -eq 0 ] ; then log_success_msg "already running (pid $pid)" return 0 fi fi rm -f "$PID_FILE" "$LOCKFILE" fi ${ARC_LOCATION}/@pkgdatasubdir@/arc-arex-ws-start RETVAL=$? if [ $RETVAL -eq 0 ]; then touch $LOCKFILE log_success_msg else log_failure_msg fi return $RETVAL } stop() { echo -n "Stopping $prog: " if [ -f "$PID_FILE" ]; then read pid < "$PID_FILE" if [ ! -z "$pid" ] ; then if [ "x$1" != "x" ]; then # kill whole process group on force-kill kill -TERM "-$pid" else kill "$pid" fi RETVAL=$? if [ $RETVAL -eq 0 ]; then log_success_msg else log_failure_msg fi timeout=300; # for stopping nicely if [ "x$1" != "x" ]; then timeout=1 # 1 second for force-kill fi while ( ps -p "$pid" -o comm 2>/dev/null | grep "^$prog$" 1>/dev/null 2>/dev/null ) && [ $timeout -ge 1 ] ; do sleep 1 timeout=$(($timeout - 1)) done [ $timeout -lt 1 ] && kill -9 "$pid" 1>/dev/null 2>&1 rm -f "$PID_FILE" "$LOCKFILE" else RETVAL=1 log_failure_msg "$prog shutdown - pidfile is empty" fi else RETVAL=0 log_success_msg "$prog shutdown - already stopped" fi return $RETVAL } status() { if [ -f "$PID_FILE" ]; then read pid < "$PID_FILE" if [ "$pid" != "" ]; then if ps -p "$pid" > /dev/null; then echo "$1 (pid $pid) is running..." return 0 fi echo "$1 stopped but pid file exists" return 1 fi fi if [ -f $LOCKFILE ]; then echo "$1 stopped but lockfile exists" return 2 fi echo "$1 is stopped" return 3 } restart() { stop start } case "$1" in start) start ;; stop) stop ;; status) status $prog ;; restart | force-reload) restart ;; reload) ;; condrestart | try-restart) [ -f $LOCKFILE ] && restart || : ;; force-kill) stop 1 ;; *) echo "Usage: $0 {start|stop|status|restart|force-reload|reload|condrestart|try-restart|force-kill}" exit 1 ;; esac exit $? nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/update_credentials.cpp0000644000000000000000000000013215067751327023344 xustar0030 mtime=1759498967.773273145 30 atime=1759498967.873493772 30 ctime=1759499029.333929819 nordugrid-arc-7.1.1/src/services/a-rex/update_credentials.cpp0000644000175000002070000000633015067751327025250 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "job.h" #include "arex.h" namespace ARex { Arc::MCC_Status ARexService::UpdateCredentials(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out,const std::string& credentials) { /* UpdateCredentials (deleg) DelegatedToken Reference (multiple) UpdateCredentialsResponse (deleg) NotAuthorizedFault InvalidRequestMessageFault InvalidActivityIdentifierFault */ { std::string s; in.GetXML(s); logger_.msg(Arc::VERBOSE, "UpdateCredentials: request = \n%s", s); }; // Extract job id from references Arc::XMLNode refnode = in["DelegatedToken"]["Reference"]; if(!refnode) { // Must refer to job logger_.msg(Arc::ERROR, "UpdateCredentials: missing Reference"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Must have Activity specified in Reference"); InvalidRequestMessageFault(fault,"arcdeleg:Reference","Wrong multiplicity"); out.Destroy(); return Arc::MCC_Status(); } if((bool)(refnode[1])) { // Only one job can be updated per operation (profile) logger_.msg(Arc::ERROR, "UpdateCredentials: wrong number of Reference"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Can update credentials only for single Activity"); InvalidRequestMessageFault(fault,"arcdeleg:Reference","Wrong multiplicity"); out.Destroy(); return Arc::MCC_Status(); }; if(refnode.Size() != 1) { // Expecting single job EPR in Reference logger_.msg(Arc::ERROR, "UpdateCredentials: wrong number of elements inside Reference"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Can update credentials only for single Activity"); InvalidRequestMessageFault(fault,"arcdeleg:Reference","Wrong content"); out.Destroy(); return Arc::MCC_Status(); } std::string jobid = Arc::WSAEndpointReference(refnode.Child()).ReferenceParameters()["a-rex:JobID"]; if(jobid.empty()) { // EPR is wrongly formatted or not an A-REX EPR logger_.msg(Arc::ERROR, "UpdateCredentials: EPR contains no JobID"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Can't find JobID element in ActivityIdentifier"); InvalidRequestMessageFault(fault,"arcdeleg:Reference","Wrong content"); out.Destroy(); return Arc::MCC_Status(); }; ARexJob job(jobid,config,logger_); if(!job) { // There is no such job std::string failure = job.Failure(); logger_.msg(Arc::ERROR, "UpdateCredentials: no job found: %s",failure); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Can't find requested Activity"); UnknownActivityIdentifierFault(fault,"No corresponding Activity found"); out.Destroy(); return Arc::MCC_Status(); }; if(!job.UpdateCredentials(credentials)) { logger_.msg(Arc::ERROR, "UpdateCredentials: failed to update credentials"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Internal error: Failed to update credentials"); out.Destroy(); return Arc::MCC_Status(); }; { std::string s; out.GetXML(s); logger_.msg(Arc::VERBOSE, "UpdateCredentials: response = \n%s", s); }; return Arc::MCC_Status(Arc::STATUS_OK); } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/grid-manager0000644000000000000000000000013015067751425021256 xustar0030 mtime=1759499029.731433707 28 atime=1759499034.7655102 30 ctime=1759499029.731433707 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/0000755000175000002070000000000015067751425023237 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/PaxHeaders/gm_jobs.cpp0000644000000000000000000000013115067751327023461 xustar0030 mtime=1759498967.754640094 29 atime=1759498967.86349362 30 ctime=1759499029.436580381 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/gm_jobs.cpp0000644000175000002070000004416515067751327025376 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include "conf/GMConfig.h" #include "conf/StagingConfig.h" #include "files/ControlFileHandling.h" #include "jobs/CommFIFO.h" #include "jobs/JobsList.h" #include "../delegation/DelegationStore.h" using namespace ARex; static Arc::Logger logger(Arc::Logger::getRootLogger(), "gm-jobs"); /** Fill maps with shares taken from data staging states log */ static bool get_data_staging_shares(const GMConfig& config, std::map& share_preparing, std::map& share_preparing_pending, std::map& share_finishing, std::map& share_finishing_pending) { // get DTR configuration StagingConfig staging_conf(config); if (!staging_conf) { logger.msg(Arc::ERROR, "Could not read data staging configuration from %s", config.ConfigFile()); return false; } std::string dtr_log = staging_conf.get_dtr_log(); // read DTR state info std::list data; if (!Arc::FileRead(dtr_log, data)) { logger.msg(Arc::ERROR, "Can't read transfer states from %s. Perhaps A-REX is not running?", dtr_log); return false; } // format DTR_ID state priority share [destination] // any state but TRANSFERRING is a pending state for (std::list::iterator line = data.begin(); line != data.end(); ++line) { std::vector entries; Arc::tokenize(*line, entries, " "); if (entries.size() < 4 || entries.size() > 6) continue; std::string state = entries[1]; std::string share = entries[3]; bool preparing = (share.find("-download") == share.size()-9); if (state == "TRANSFERRING") { preparing ? share_preparing[share]++ : share_finishing[share]++; } else { preparing ? share_preparing_pending[share]++ : share_finishing_pending[share]++; } } return true; } class counters_t { public: unsigned int jobs_num[JOB_STATE_NUM]; const static unsigned int jobs_pending; unsigned int& operator[](int n) { return jobs_num[n]; }; }; const unsigned int counters_t::jobs_pending = 0; static bool match_list(const std::string& arg, std::list& args, bool erase = false) { for(std::list::const_iterator a = args.begin(); a != args.end(); ++a) { if(*a == arg) { //if(erase) args.erase(a); return true; } } return false; } /** * Print info to stdout on users' jobs */ int main(int argc, char* argv[]) { // stderr destination for error messages Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::LongFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::DEBUG); Arc::OptionParser options(" ", istring("gm-jobs displays information on " "current jobs in the system.")); bool long_list = false; options.AddOption('l', "longlist", istring("display more information on each job"), long_list); std::string conf_file; options.AddOption('c', "conffile", istring("use specified configuration file"), istring("file"), conf_file); std::string control_dir; options.AddOption('d', "controldir", istring("read information from specified control directory"), istring("dir"), control_dir); bool show_share = false; options.AddOption('s', "showshares", istring("print summary of jobs in each transfer share"), show_share); bool notshow_jobs = false; options.AddOption('J', "notshowjobs", istring("do not print list of jobs"), notshow_jobs); bool notshow_states = false; options.AddOption('S', "notshowstates", istring("do not print number of jobs in each state"), notshow_states); bool show_service = false; options.AddOption('w', "showservice", istring("print state of the service"), show_service); std::list filter_users; options.AddOption('f', "filteruser", istring("show only jobs of user(s) with specified subject name(s)"), istring("dn"), filter_users); std::list cancel_jobs; options.AddOption('k', "killjob", istring("request to cancel job(s) with specified ID(s)"), istring("id"), cancel_jobs); std::list cancel_users; options.AddOption('K', "killuser", istring("request to cancel jobs belonging to user(s) with specified subject name(s)"), istring("dn"), cancel_users); std::list clean_jobs; options.AddOption('r', "remjob", istring("request to clean job(s) with specified ID(s)"), istring("id"), clean_jobs); std::list clean_users; options.AddOption('R', "remuser", istring("request to clean jobs belonging to user(s) with specified subject name(s)"), istring("dn"), clean_users); std::list filter_jobs; options.AddOption('j', "filterjob", istring("show only jobs with specified ID(s)"), istring("id"), filter_jobs); bool show_delegs = false; options.AddOption('e', "listdelegs", istring("print list of available delegation IDs"), show_delegs); std::list show_deleg_ids; options.AddOption('E', "showdeleg", istring("print delegation token of specified ID(s)"), istring("id"), show_deleg_ids); std::list show_deleg_jobs; options.AddOption('D', "showdelegjob", istring("print main delegation token of specified Job ID(s)"), istring("job id"), show_deleg_jobs); std::string output_file; options.AddOption('o', "output", istring("output requested elements (jobs list, delegation ids and tokens) to file"), istring("file name"), output_file); std::string debug; options.AddOption('x', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); std::list params = options.Parse(argc, argv); // If debug is specified as argument, it should be set as soon as possible if (!debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(debug)); if(show_share) { // Why? notshow_jobs=true; notshow_states=true; } GMConfig config; if (!conf_file.empty()) config.SetConfigFile(conf_file); logger.msg(Arc::VERBOSE, "Using configuration at %s", config.ConfigFile()); if(!config.Load()) exit(1); if (!control_dir.empty()) config.SetControlDir(control_dir); config.Print(); DelegationStore::DbType deleg_db_type = DelegationStore::DbSQLite; switch(config.DelegationDBType()) { case GMConfig::deleg_db_bdb: deleg_db_type = DelegationStore::DbBerkeley; break; case GMConfig::deleg_db_sqlite: deleg_db_type = DelegationStore::DbSQLite; break; }; int exit_code = 0; std::ostream* outs = &std::cout; std::ofstream outf; if(!output_file.empty()) { outf.open(output_file.c_str()); if(!outf.is_open()) { logger.msg(Arc::ERROR, "Failed to open output file '%s'", output_file); return -1; }; outs = &outf; } if((!notshow_jobs) || (!notshow_states) || (show_share) || (cancel_users.size() > 0) || (clean_users.size() > 0) || (cancel_jobs.size() > 0) || (clean_jobs.size() > 0)) { logger.msg(Arc::VERBOSE, "Looking for current jobs"); } bool service_alive = false; counters_t counters; counters_t counters_pending; for(int i=0; i cancel_jobs_list; std::list clean_jobs_list; std::list alljobs; if((!notshow_jobs) || (!notshow_states) || (cancel_users.size() > 0) || (clean_users.size() > 0) || (cancel_jobs.size() > 0) || (clean_jobs.size() > 0)) { if(filter_jobs.size() > 0) { for(std::list::iterator id = filter_jobs.begin(); id != filter_jobs.end(); ++id) { GMJobRef jref = JobsList::GetJob(config,*id); if(jref) alljobs.push_back(jref); } } else { if(!JobsList::GetAllJobs(config,alljobs)) exit_code |= 1; } for (std::list::iterator ji=alljobs.begin(); ji!=alljobs.end(); ++ji) { // Collecting information bool pending; GMJobRef i = *ji; if(!i) continue; job_state_t new_state = job_state_read_file(i->get_id(), config, pending); if (new_state == JOB_STATE_UNDEFINED) { logger.msg(Arc::ERROR, "Job: %s : ERROR : Unrecognizable state", i->get_id()); exit_code |= 2; continue; } Arc::Time job_time(job_state_time(i->get_id(), config)); jobs_total++; counters[new_state]++; if (pending) counters_pending[new_state]++; if (i->GetLocalDescription(config) == NULL) { logger.msg(Arc::ERROR, "Job: %s : ERROR : No local information.", i->get_id()); exit_code |= 4; continue; } JobLocalDescription& job_desc = *(i->GetLocalDescription(config)); if(match_list(job_desc.DN,cancel_users)) { cancel_jobs_list.push_back(&(*i)); } if(match_list(i->get_id(),cancel_jobs)) { cancel_jobs_list.push_back(&(*i)); } if(match_list(job_desc.DN,clean_users)) { clean_jobs_list.push_back(&(*i)); } if(match_list(i->get_id(),clean_jobs)) { clean_jobs_list.push_back(&(*i)); } // Printing information if((filter_users.size() > 0) && (!match_list(job_desc.DN,filter_users))) continue; if((!show_share) && (!notshow_jobs)) *outs << "Job: "<get_id(); if(!notshow_jobs) { if (!long_list) { *outs<<" : "< share_preparing; std::map share_preparing_pending; std::map share_finishing; std::map share_finishing_pending; if(!get_data_staging_shares(config, share_preparing, share_preparing_pending, share_finishing, share_finishing_pending)) { exit_code |= 8; } else { *outs<<"\n Preparing/Pending files\tTransfer share"<::iterator i = share_preparing.begin(); i != share_preparing.end(); i++) { *outs<<" "<second<<"/"<first]<<"\t\t\t"<first<::iterator i = share_preparing_pending.begin(); i != share_preparing_pending.end(); i++) { if (share_preparing[i->first] == 0) *outs<<" 0/"<first]<<"\t\t\t"<first<::iterator i = share_finishing.begin(); i != share_finishing.end(); i++) { *outs<<" "<second<<"/"<first]<<"\t\t\t"<first<::iterator i = share_finishing_pending.begin(); i != share_finishing_pending.end(); i++) { if (share_finishing[i->first] == 0) *outs<<" 0/"<first]<<"\t\t\t"<first<(i))<<": "< 0)) { ARex::DelegationStore dstore(config.DelegationDir(), deleg_db_type, false); if(!dstore) { exit_code |= 16; } else { std::list > creds = dstore.ListCredIDs(); for(std::list >::iterator cred = creds.begin(); cred != creds.end(); ++cred) { if((filter_users.size() > 0) && (!match_list(cred->second,filter_users))) continue; if(show_delegs) { *outs<<"Delegation: "<first<second< lock_ids; if(dstore.GetLocks(cred->first, cred->second, lock_ids)) { for(std::list::iterator lock = lock_ids.begin(); lock != lock_ids.end(); ++lock) { *outs<<"\tJob: "<<*lock< 0) { // TODO: optimize to avoid full scanning. if(match_list(cred->first,show_deleg_ids)) { std::string tokenpath = dstore.FindCred(cred->first,cred->second); if(!tokenpath.empty()) { std::string token; if(Arc::FileRead(tokenpath,token) && (!token.empty())) { *outs<<"Delegation: "<first<<", "<second< 0) { ARex::DelegationStore dstore(config.DelegationDir(), deleg_db_type, false); if(!dstore) { exit_code |= 32; } else { for(std::list::iterator jobid = show_deleg_jobs.begin(); jobid != show_deleg_jobs.end(); ++jobid) { // Read job's local file to extract delegation id JobLocalDescription job_desc; if(!job_local_read_file(*jobid,config,job_desc)) { exit_code |= 64; } else { std::string token; if(!job_desc.delegationid.empty()) { std::string tokenpath = dstore.FindCred(job_desc.delegationid,job_desc.DN); if(!tokenpath.empty()) { (void)Arc::FileRead(tokenpath,token); } } if(token.empty()) { // fall back to public only part (void)job_proxy_read_file(*jobid,config,token); job_desc.delegationid = "public"; } if(!token.empty()) { *outs<<"Job: "<<*jobid< 0) { for(std::list::iterator job = cancel_jobs_list.begin(); job != cancel_jobs_list.end(); ++job) { if(!job_cancel_mark_put(**job, config)) { logger.msg(Arc::ERROR, "Job: %s : ERROR : Failed to put cancel mark", (*job)->get_id()); exit_code |= 128; } else { if(!ARex::CommFIFO::Signal(config.ControlDir(),(*job)->get_id())) { logger.msg(Arc::WARNING, "Job: %s : Cancel request put but failed to communicate to service", (*job)->get_id()); } else { logger.msg(Arc::INFO, "Job: %s : Cancel request put and communicated to service", (*job)->get_id()); } } } } if(clean_jobs_list.size() > 0) { for(std::list::iterator job = clean_jobs_list.begin(); job != clean_jobs_list.end(); ++job) { // Do not clean job directly because it may have delegations locked. // Instead put clean mark and let A-REX do cleaning properly. if(!job_clean_mark_put(**job, config)) { logger.msg(Arc::ERROR, "Job: %s : ERROR : Failed to put clean mark", (*job)->get_id()); exit_code |= 256; } else { if(!ARex::CommFIFO::Signal(config.ControlDir(),(*job)->get_id())) { logger.msg(Arc::WARNING, "Job: %s : Clean request put but failed to communicate to service", (*job)->get_id()); } else { logger.msg(Arc::INFO, "Job: %s : Clean request put and communicated to service", (*job)->get_id()); } } } } // Cleanly destroy refrences to avoid error messags for (std::list::iterator ji=alljobs.begin(); ji!=alljobs.end(); ++ji) ji->Destroy(); return exit_code; } nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327023372 xustar0030 mtime=1759498967.752606304 30 atime=1759498967.862493605 30 ctime=1759499029.428602368 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/Makefile.am0000644000175000002070000000356315067751327025303 0ustar00mockbuildmock00000000000000SUBDIRS = accounting jobs run conf misc log mail files noinst_LTLIBRARIES = libgridmanager.la pkglibexec_PROGRAMS = gm-kick gm-jobs inputcheck arc-blahp-logger noinst_PROGRAMS = test_write_grami_file man_MANS = arc-blahp-logger.8 gm-jobs.8 libgridmanager_la_SOURCES = GridManager.cpp GridManager.h libgridmanager_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libgridmanager_la_LIBADD = \ jobs/libjobs.la conf/libconf.la log/liblog.la files/libfiles.la \ run/librun.la misc/libmisc.la mail/libmail.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) -lpthread gm_kick_SOURCES = gm_kick.cpp gm_kick_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) gm_kick_LDADD = libgridmanager.la gm_jobs_SOURCES = gm_jobs.cpp gm_jobs_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) gm_jobs_LDADD = libgridmanager.la ../delegation/libdelegation.la inputcheck_SOURCES = inputcheck.cpp inputcheck_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) inputcheck_LDADD = libgridmanager.la ../delegation/libdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la arc_blahp_logger_SOURCES = arc_blahp_logger.cpp arc_blahp_logger_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arc_blahp_logger_LDADD = $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) test_write_grami_file_SOURCES = test_write_grami_file.cpp test_write_grami_file_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) test_write_grami_file_LDADD = libgridmanager.la ../delegation/libdelegation.la nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/PaxHeaders/conf0000644000000000000000000000013015067751425022203 xustar0030 mtime=1759499029.605431792 28 atime=1759499034.7655102 30 ctime=1759499029.605431792 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/0000755000175000002070000000000015067751425024164 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/PaxHeaders/Makefile.am0000644000000000000000000000013115067751327024316 xustar0030 mtime=1759498967.753619622 29 atime=1759498967.86349362 30 ctime=1759499029.590569359 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/Makefile.am0000644000175000002070000000050215067751327026216 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libconf.la libconf_la_SOURCES = \ GMConfig.h GMConfig.cpp \ CoreConfig.cpp CoreConfig.h \ UrlMapConfig.cpp UrlMapConfig.h \ CacheConfig.cpp CacheConfig.h \ StagingConfig.cpp StagingConfig.h libconf_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/PaxHeaders/StagingConfig.h0000644000000000000000000000013115067751327025155 xustar0030 mtime=1759498967.753619622 29 atime=1759498967.86349362 30 ctime=1759499029.606417906 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/StagingConfig.h0000644000175000002070000000756715067751327027077 0ustar00mockbuildmock00000000000000#ifndef GM_CONF_STAGING_H_ #define GM_CONF_STAGING_H_ #include #include #include #include #include #include "GMConfig.h" namespace ARex { class DTRGenerator; /// Represents configuration of DTR data staging class StagingConfig { friend class DTRGenerator; public: /// Load config from [arex/data-staging] section of configuration file StagingConfig(const GMConfig& config); operator bool() const { return valid; }; bool operator!() const { return !valid; }; int get_max_delivery() const { return max_delivery; }; int get_max_processor() const { return max_processor; }; int get_max_emergency() const { return max_emergency; }; int get_max_prepared() const { return max_prepared; }; unsigned long long int get_min_speed() const { return min_speed; }; time_t get_min_speed_time() const { return min_speed_time; }; unsigned long long int get_min_average_speed() const { return min_average_speed; }; time_t get_max_inactivity_time() const { return max_inactivity_time; }; int get_max_retries() const { return max_retries; }; bool get_passive() const { return passive; }; bool get_httpgetpartial() const { return httpgetpartial; }; std::string get_preferred_pattern() const { return preferred_pattern; }; std::vector get_delivery_services() const { return delivery_services; }; unsigned long long int get_remote_size_limit() const { return remote_size_limit; }; std::string get_share_type() const { return share_type; }; std::map get_defined_shares() const { return defined_shares; }; bool get_use_host_cert_for_remote_delivery() const { return use_host_cert_for_remote_delivery; }; Arc::LogLevel get_log_level() const { return log_level; }; std::string get_dtr_log() const { return dtr_log; }; std::string get_dtr_central_log() const { return dtr_central_log; }; private: /// Max transfers in delivery int max_delivery; /// Max number of pre- and post-processor slots per state int max_processor; /// Max number of emergency slots int max_emergency; /// Number of files per share to keep prepared int max_prepared; /// Minimum speed for transfer over min_speed_time seconds unsigned long long int min_speed; /// Time over which to calculate min_speed time_t min_speed_time; /// Minimum average speed for entire transfer unsigned long long int min_average_speed; /// Maximum time with no transfer activity time_t max_inactivity_time; /// Max retries for failed transfers that can be retried int max_retries; /// Whether or not to use passive transfer bool passive; /// Whether to use partial HTTP GET transfers bool httpgetpartial; /// Pattern for choosing preferred replicas std::string preferred_pattern; /// Endpoints of delivery services std::vector delivery_services; /// File size limit (in bytes) below which local transfer should be used unsigned long long int remote_size_limit; /// Criterion on which to split transfers into shares std::string share_type; /// The list of shares with defined priorities std::map defined_shares; /// Whether to use the host certificate for remote delivery bool use_host_cert_for_remote_delivery; /// Log level for DTR transfer log in job.id.errors file Arc::LogLevel log_level; /// where to log DTR state information std::string dtr_log; /// Log for performance metrics Arc::JobPerfLog perf_log; /// Central log file for all DTR messages std::string dtr_central_log; /// Validity of configuration bool valid; /// Logger object static Arc::Logger logger; /// Read in params from ini config bool readStagingConf(Arc::ConfigFile& cfile); /// Convert parameter to integer with mimimum value of -1 bool paramToInt(const std::string& param, int& value); StagingConfig(); }; } // namespace ARex #endif /* GM_CONF_STAGING_H_ */ nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/PaxHeaders/Makefile.in0000644000000000000000000000013215067751355024331 xustar0030 mtime=1759498989.810713477 30 atime=1759499017.920254236 30 ctime=1759499029.591665401 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/Makefile.in0000644000175000002070000007717315067751355026252 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/grid-manager/conf ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libconf_la_LIBADD = am_libconf_la_OBJECTS = libconf_la-GMConfig.lo \ libconf_la-CoreConfig.lo libconf_la-UrlMapConfig.lo \ libconf_la-CacheConfig.lo libconf_la-StagingConfig.lo libconf_la_OBJECTS = $(am_libconf_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = libconf_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libconf_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = ./$(DEPDIR)/libconf_la-CacheConfig.Plo \ ./$(DEPDIR)/libconf_la-CoreConfig.Plo \ ./$(DEPDIR)/libconf_la-GMConfig.Plo \ ./$(DEPDIR)/libconf_la-StagingConfig.Plo \ ./$(DEPDIR)/libconf_la-UrlMapConfig.Plo am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(libconf_la_SOURCES) DIST_SOURCES = $(libconf_la_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ noinst_LTLIBRARIES = libconf.la libconf_la_SOURCES = \ GMConfig.h GMConfig.cpp \ CoreConfig.cpp CoreConfig.h \ UrlMapConfig.cpp UrlMapConfig.h \ CacheConfig.cpp CacheConfig.h \ StagingConfig.cpp StagingConfig.h libconf_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/conf/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/conf/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libconf.la: $(libconf_la_OBJECTS) $(libconf_la_DEPENDENCIES) $(EXTRA_libconf_la_DEPENDENCIES) $(AM_V_CXXLD)$(libconf_la_LINK) $(libconf_la_OBJECTS) $(libconf_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libconf_la-CacheConfig.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libconf_la-CoreConfig.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libconf_la-GMConfig.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libconf_la-StagingConfig.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libconf_la-UrlMapConfig.Plo@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< libconf_la-GMConfig.lo: GMConfig.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -MT libconf_la-GMConfig.lo -MD -MP -MF $(DEPDIR)/libconf_la-GMConfig.Tpo -c -o libconf_la-GMConfig.lo `test -f 'GMConfig.cpp' || echo '$(srcdir)/'`GMConfig.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libconf_la-GMConfig.Tpo $(DEPDIR)/libconf_la-GMConfig.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='GMConfig.cpp' object='libconf_la-GMConfig.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -c -o libconf_la-GMConfig.lo `test -f 'GMConfig.cpp' || echo '$(srcdir)/'`GMConfig.cpp libconf_la-CoreConfig.lo: CoreConfig.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -MT libconf_la-CoreConfig.lo -MD -MP -MF $(DEPDIR)/libconf_la-CoreConfig.Tpo -c -o libconf_la-CoreConfig.lo `test -f 'CoreConfig.cpp' || echo '$(srcdir)/'`CoreConfig.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libconf_la-CoreConfig.Tpo $(DEPDIR)/libconf_la-CoreConfig.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='CoreConfig.cpp' object='libconf_la-CoreConfig.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -c -o libconf_la-CoreConfig.lo `test -f 'CoreConfig.cpp' || echo '$(srcdir)/'`CoreConfig.cpp libconf_la-UrlMapConfig.lo: UrlMapConfig.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -MT libconf_la-UrlMapConfig.lo -MD -MP -MF $(DEPDIR)/libconf_la-UrlMapConfig.Tpo -c -o libconf_la-UrlMapConfig.lo `test -f 'UrlMapConfig.cpp' || echo '$(srcdir)/'`UrlMapConfig.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libconf_la-UrlMapConfig.Tpo $(DEPDIR)/libconf_la-UrlMapConfig.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='UrlMapConfig.cpp' object='libconf_la-UrlMapConfig.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -c -o libconf_la-UrlMapConfig.lo `test -f 'UrlMapConfig.cpp' || echo '$(srcdir)/'`UrlMapConfig.cpp libconf_la-CacheConfig.lo: CacheConfig.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -MT libconf_la-CacheConfig.lo -MD -MP -MF $(DEPDIR)/libconf_la-CacheConfig.Tpo -c -o libconf_la-CacheConfig.lo `test -f 'CacheConfig.cpp' || echo '$(srcdir)/'`CacheConfig.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libconf_la-CacheConfig.Tpo $(DEPDIR)/libconf_la-CacheConfig.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='CacheConfig.cpp' object='libconf_la-CacheConfig.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -c -o libconf_la-CacheConfig.lo `test -f 'CacheConfig.cpp' || echo '$(srcdir)/'`CacheConfig.cpp libconf_la-StagingConfig.lo: StagingConfig.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -MT libconf_la-StagingConfig.lo -MD -MP -MF $(DEPDIR)/libconf_la-StagingConfig.Tpo -c -o libconf_la-StagingConfig.lo `test -f 'StagingConfig.cpp' || echo '$(srcdir)/'`StagingConfig.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libconf_la-StagingConfig.Tpo $(DEPDIR)/libconf_la-StagingConfig.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='StagingConfig.cpp' object='libconf_la-StagingConfig.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libconf_la_CXXFLAGS) $(CXXFLAGS) -c -o libconf_la-StagingConfig.lo `test -f 'StagingConfig.cpp' || echo '$(srcdir)/'`StagingConfig.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -f ./$(DEPDIR)/libconf_la-CacheConfig.Plo -rm -f ./$(DEPDIR)/libconf_la-CoreConfig.Plo -rm -f ./$(DEPDIR)/libconf_la-GMConfig.Plo -rm -f ./$(DEPDIR)/libconf_la-StagingConfig.Plo -rm -f ./$(DEPDIR)/libconf_la-UrlMapConfig.Plo -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libconf_la-CacheConfig.Plo -rm -f ./$(DEPDIR)/libconf_la-CoreConfig.Plo -rm -f ./$(DEPDIR)/libconf_la-GMConfig.Plo -rm -f ./$(DEPDIR)/libconf_la-StagingConfig.Plo -rm -f ./$(DEPDIR)/libconf_la-UrlMapConfig.Plo -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ clean-generic clean-libtool clean-noinstLTLIBRARIES \ cscopelist-am ctags ctags-am distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/PaxHeaders/CacheConfig.cpp0000644000000000000000000000013115067751327025117 xustar0030 mtime=1759498967.752828852 29 atime=1759498967.86349362 30 ctime=1759499029.602242017 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/CacheConfig.cpp0000644000175000002070000001744315067751327027033 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "GMConfig.h" #include "CacheConfig.h" namespace ARex { CacheConfig::CacheConfig(const GMConfig& config): _cache_max(100), _cache_min(100), _cleaning_enabled(false), _log_file("/var/log/arc/cache-clean.log"), _log_level("INFO") , _lifetime("0"), _cache_shared(false), _clean_timeout(0) { // Load conf file Arc::ConfigFile cfile; if(!cfile.open(config.ConfigFile())) throw CacheConfigException("Can't open configuration file"); /* detect type of file */ if (cfile.detect() != Arc::ConfigFile::file_INI) { cfile.close(); throw CacheConfigException("Can't recognize type of configuration file"); } Arc::ConfigIni cf(cfile); try { parseINIConf(cf); } catch (CacheConfigException& e) { cfile.close(); throw; } cfile.close(); } void CacheConfig::parseINIConf(Arc::ConfigIni& cf) { cf.SetSectionIndicator("."); cf.AddSection("arex/cache/cleaner"); // 0 cf.AddSection("arex/cache"); // 1 cf.AddSection("arex/ws/cache"); // 2 for(;;) { std::string rest; std::string command; cf.ReadNext(command,rest); if(command.length() == 0) break; // EOF if (cf.SectionNum() == 0) { // arex/cache/cleaner if (cf.SubSection()[0] == '\0') { // The presence of this sub-block enables cleaning _cleaning_enabled = true; if(command == "cachesize") { std::string max_s = Arc::ConfigIni::NextArg(rest); if(max_s.length() == 0) continue; std::string min_s = Arc::ConfigIni::NextArg(rest); if(min_s.length() == 0) throw CacheConfigException("Not enough parameters in cachesize parameter"); float max_i; if(!Arc::stringto(max_s,max_i)) throw CacheConfigException("bad number in cachesize parameter"); if (max_i > 100 || max_i < 0) throw CacheConfigException("max cache size must be between 0 and 100"); _cache_max = max_i; float min_i; if(!Arc::stringto(min_s,min_i)) throw CacheConfigException("bad number in cachesize parameter"); if (min_i > 100 || min_i < 0) throw CacheConfigException("min cache size must be between 0 and 100"); if (min_i >= max_i) throw CacheConfigException("max cache size must be greater than min size"); _cache_min = min_i; } else if(command == "logfile") { std::string logfile = rest; if (logfile.length() < 2 || logfile[0] != '/' || logfile[logfile.length()-1] == '/') throw CacheConfigException("Bad filename in cachelogfile parameter"); _log_file = logfile; } else if(command == "loglevel") { std::string log_level = Arc::ConfigIni::NextArg(rest); if(log_level.length() == 0) throw CacheConfigException("No value specified in cacheloglevel"); off_t level_i; if(!Arc::stringto(log_level, level_i)) throw CacheConfigException("bad number in cacheloglevel parameter"); // manual conversion from int to log level switch (level_i) { case 0: { _log_level = "FATAL"; }; break; case 1: { _log_level = "ERROR"; }; break; case 2: { _log_level = "WARNING"; }; break; case 3: { _log_level = "INFO"; }; break; case 4: { _log_level = "VERBOSE"; }; break; case 5: { _log_level = "DEBUG"; }; break; default: { _log_level = "INFO"; }; break; } } else if(command == "cachelifetime") { std::string lifetime = Arc::ConfigIni::NextArg(rest); if(lifetime.length() != 0) { _lifetime = lifetime; } } else if(command == "calculatesize") { std::string cache_shared = Arc::ConfigIni::NextArg(rest); if (cache_shared == "cachedir") { _cache_shared = true; } else if (cache_shared != "filesystem") { throw CacheConfigException("Bad value in cacheshared parameter: only 'cachedir' or 'filesystem' allowed"); } } else if (command == "cachespacetool") { _cache_space_tool = rest; } else if (command == "cachecleantimeout") { std::string timeout = Arc::ConfigIni::NextArg(rest); if(timeout.length() == 0) continue; if(!Arc::stringto(timeout, _clean_timeout)) throw CacheConfigException("bad number in cachecleantimeout parameter"); } } } else if (cf.SectionNum() == 1) { // arex/cache if (cf.SubSection()[0] == '\0') { if(command == "cachedir") { std::string cache_dir = Arc::ConfigIni::NextArg(rest); if(cache_dir.length() == 0) continue; // cache is disabled std::string cache_link_dir = Arc::ConfigIni::NextArg(rest); // validation of paths while (cache_dir.length() > 1 && cache_dir.rfind("/") == cache_dir.length()-1) cache_dir = cache_dir.substr(0, cache_dir.length()-1); if (cache_dir[0] != '/') throw CacheConfigException("Cache path must start with '/'"); if (cache_dir.find("..") != std::string::npos) throw CacheConfigException("Cache path cannot contain '..'"); if (!cache_link_dir.empty() && cache_link_dir != "." && cache_link_dir != "drain" && cache_link_dir != "readonly") { while (cache_link_dir.rfind("/") == cache_link_dir.length()-1) cache_link_dir = cache_link_dir.substr(0, cache_link_dir.length()-1); if (cache_link_dir[0] != '/') throw CacheConfigException("Cache link path must start with '/'"); if (cache_link_dir.find("..") != std::string::npos) throw CacheConfigException("Cache link path cannot contain '..'"); } // check if the cache dir needs to be drained or is read-only if (cache_link_dir == "drain") { _draining_cache_dirs.push_back(cache_dir); } else if (cache_link_dir == "readonly") { _readonly_cache_dirs.push_back(cache_dir); } else { if (!cache_link_dir.empty()) { cache_dir += " "+cache_link_dir; } _cache_dirs.push_back(cache_dir); } } } } else if (cf.SectionNum() == 2) { // arex/ws/cache if (cf.SubSection()[0] == '\0') { if (command == "cacheaccess") { Arc::RegularExpression regexp(Arc::ConfigIni::NextArg(rest)); if (!regexp.isOk()) throw CacheConfigException("Bad regexp " + regexp.getPattern() + " in cacheaccess"); std::string cred_type(Arc::ConfigIni::NextArg(rest)); if (cred_type.empty()) throw CacheConfigException("Missing credential type in cacheaccess"); Arc::RegularExpression cred_value(rest); if (!cred_value.isOk()) throw CacheConfigException("Missing credential value in cacheaccess"); struct CacheAccess ca; ca.regexp = regexp; ca.cred_type = cred_type; ca.cred_value = cred_value; _cache_access.push_back(ca); } } } } } void CacheConfig::substitute(const GMConfig& config, const Arc::User& user) { for (std::vector::iterator i = _cache_dirs.begin(); i != _cache_dirs.end(); ++i) { config.Substitute(*i, user); } for (std::vector::iterator i = _draining_cache_dirs.begin(); i != _draining_cache_dirs.end(); ++i) { config.Substitute(*i, user); } for (std::vector::iterator i = _readonly_cache_dirs.begin(); i != _readonly_cache_dirs.end(); ++i) { config.Substitute(*i, user); } } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/PaxHeaders/GMConfig.h0000644000000000000000000000013115067751327024064 xustar0030 mtime=1759498967.753619622 29 atime=1759498967.86349362 30 ctime=1759499029.594432898 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/GMConfig.h0000644000175000002070000003713315067751327025776 0ustar00mockbuildmock00000000000000#ifndef GMCONFIG_H_ #define GMCONFIG_H_ #include #include #include #include #include #include "CacheConfig.h" namespace ARex { // Forward declarations for classes for which this is just a container class JobLog; class JobsMetrics; class HeartBeatMetrics; class SpaceMetrics; class ContinuationPlugins; class DelegationStores; /// Configuration information related to the grid manager part of A-REX. /** * This class contains all configuration variables related to grid-manager. It * also acts as a container for objects which are used in different parts of * A-REX. Therefore since this class contains pointers to complex objects, it * cannot be copied and hence the copy constructor and assignment operator are * private. Those pointers should be managed outside this class. GMConfig * should be instantiated once when the grid-manager is initialised and only * destroyed with the GM has finished. Ideally this would be a singleton but * that would prevent running multiple A-REXes in the same container. * * Substitutions are not done while parsing the configuration, as * substitution variables can change depending on the job. Therefore paths * are stored in their raw format, unsubstituted. The exception is the * control directory which cannot change and is substituted during parsing, * and helper options. Substitution of other variables should be done as * necessary using Substitute(). */ class GMConfig { // Configuration parser which sets values for members of this class friend class CoreConfig; // Parser of data-staging configuration which uses this class' values as default friend class StagingConfig; public: /// Different options for fixing directories enum fixdir_t { fixdir_always, fixdir_missing, fixdir_never }; enum deleg_db_t { deleg_db_bdb, deleg_db_sqlite }; /// Returns configuration file as guessed. /** * Guessing uses $ARC_CONFIG, $ARC_LOCATION/etc/arc.conf or the default * location /etc/arc.conf. */ static std::string GuessConfigFile(); /// Use given (or guessed if not given) configuration file. /** * Load() should then be used to parse the * configuration and fill member variables. * @param conffile Path to configuration file, will be guessed if empty */ GMConfig(const std::string& conffile=""); /// Load configuration from file into members of this object. /// Returns false if errors are found during parsing. bool Load(); /// Print a summary of configuration to stderr void Print() const; /// Get path to configuration file const std::string& ConfigFile() const { return conffile; } /// Set path to configuration file void SetConfigFile(const std::string& file) { conffile = file; } /// Returns true if configuration file is temporary bool ConfigIsTemp() const { return conffile_is_temp; } /// Sets whether configuration file is temporary void SetConfigIsTemp(bool temp) { conffile_is_temp = temp; } /// Create control structure with permissions depending on fixdir_t value. /// Typically called at A-REX service creation. bool CreateControlDirectory() const; /// Update control structure from older verssion to current one. bool UpdateControlDirectory() const; /// Create session directory with correct permissions. Typically called when /// a new job is created and after all substitutions have been done. Creates /// session root if it does not already exist. bool CreateSessionDirectory(const std::string& dir, const Arc::User& user) const; /// Substitute characters in param specified by % with real values. An /// optional User can be specified for the user-related substitutions. bool Substitute(std::string& param, bool& userSubs, bool& otherSubs, const Arc::User& user=Arc::User()) const; bool Substitute(std::string& param, const Arc::User& user=Arc::User()) const { bool userSubs; bool otherSubs; return Substitute(param, userSubs, otherSubs, user); } /// Set control directory void SetControlDir(const std::string &dir); /// Set session root dir void SetSessionRoot(const std::string &dir); /// Set multiple session root dirs void SetSessionRoot(const std::vector &dirs); /// Set uid and gids used by other process sharing information with A-REX void SetShareID(const Arc::User& share_user); /// Set default queue void SetDefaultQueue(const std::string& queue) { default_queue = queue; } /// Certificates directory location const std::string& CertDir() const { return cert_dir; } /// VOMS lsc files root directory location const std::string& VomsDir() const { return voms_dir; } /// Location of RTE setup scripts const std::string& RTEDir() const { return rte_dir; } /// Directory storing delegations std::string DelegationDir() const; /// Database type to use for delegation storage deleg_db_t DelegationDBType() const; /// Helper(s) log file path const std::string& HelperLog() const { return helper_log; } /// email address of person responsible for this ARC installation const std::string& SupportMailAddress() const { return support_email_address; } /// Set JobLog object void SetJobLog(JobLog* log) { job_log = log; } /// Set JobPerfLog object void SetJobPerfLog(Arc::JobPerfLog* log) { job_perf_log = log; } /// Set JobsMetrics object void SetJobsMetrics(JobsMetrics* metrics) { jobs_metrics = metrics; } /// Set HeartBeatMetrics object void SetHeartBeatMetrics(HeartBeatMetrics* metrics) { heartbeat_metrics = metrics; } /// Set HeartBeatMetrics object void SetSpaceMetrics(SpaceMetrics* metrics) { space_metrics = metrics; } /// Set ContinuationPlugins (plugins run at state transitions) void SetContPlugins(ContinuationPlugins* plugins) { cont_plugins = plugins; } /// Set DelegationStores object void SetDelegations(ARex::DelegationStores* stores) { delegations = stores; } /// JobLog object JobLog* GetJobLog() const { return job_log; } /// JobsMetrics object JobsMetrics* GetJobsMetrics() const { return jobs_metrics; } /// HeartBeatMetrics object HeartBeatMetrics* GetHeartBeatMetrics() const { return heartbeat_metrics; } /// SpaceMetrics object SpaceMetrics* GetSpaceMetrics() const { return space_metrics; } /// JobPerfLog object Arc::JobPerfLog* GetJobPerfLog() const { return job_perf_log; } /// Plugins run at state transitions ContinuationPlugins* GetContPlugins() const { return cont_plugins; } /// DelegationsStores object ARex::DelegationStores* GetDelegations() const { return delegations; } /// Control directory const std::string & ControlDir() const { return control_dir; } /// Session root directory corresponding to given job ID. If the session /// dir corresponding to job_id is not found an empty string is returned. std::string SessionRoot(const std::string& job_id) const; /// Session directories const std::vector & SessionRoots() const { return session_roots; } /// Session directories that can be used for new jobs const std::vector & SessionRootsNonDraining() const { return session_roots_non_draining; } /// Base scratch directory for job execution on node const std::string & ScratchDir() const { return scratch_dir; } /// Whether access to session dir must be performed under mapped uid bool StrictSession() const { return strict_session; } /// Path to information file in control dir std::string InformationFile(void) const { return ControlDir()+G_DIR_SEPARATOR_S+"info.xml"; } /// Cache configuration const CacheConfig & CacheParams() const { return cache_params; } /// URL of cluster's headnode const std::string & HeadNode() const { return headnode; } /// Whether ARC (BES) WS-interface is enabled bool ARCInterfaceEnabled() const { return enable_arc_interface; } /// Whether EMI-ES interface is enabled bool EMIESInterfaceEnabled() const { return enable_emies_interface; } /// GridFTP job interface endpoint const std::string & GridFTPEndpoint() const { return gridftp_endpoint; } /// A-REX WS-interface job submission endpoint const std::string & AREXEndpoint() const { return arex_endpoint; } /// Default LRMS const std::string & DefaultLRMS() const { return default_lrms; } /// Default queue const std::string & DefaultQueue() const { return default_queue; } /// Default benchmark const std::string & DefaultBenchmark() const { return default_benchmark; } /// All configured queues const std::list& Queues() const { return queues; } /// Username of user running A-REX const std::string & UnixName() const { return gm_user.Name(); } /// Returns true if submission of new jobs is allowed bool AllowNew() const { return allow_new; } /// Groups allowed to submit when general job submission is disabled const std::list & AllowSubmit() const { return allow_submit; } /// Length of time to keep session dir after job finishes time_t KeepFinished() const { return keep_finished; } /// Length of time to keep control information after job finishes time_t KeepDeleted() const { return keep_deleted; } /// Maximum number of job re-runs allowed int Reruns() const { return reruns; } /// Maximum size for job description int MaxJobDescSize() const { return maxjobdesc; } /// Strategy for fixing directories fixdir_t FixDirectories() const { return fixdir; } /// Maxmimum time for A-REX to wait between job processing loops unsigned int WakeupPeriod() const { return wakeup_period; } const std::list & Helpers() const { return helpers; } /// Max jobs being processed (from PREPARING to FINISHING) int MaxJobs() const { return max_jobs; }; /// Max jobs in the LRMS int MaxRunning() const { return max_jobs_running; } /// Max jobs being processed per-DN int MaxPerDN() const { return max_jobs_per_dn; } /// Max total jobs in the system int MaxTotal() const { return max_jobs_total; } /// Max submit/cancel scripts int MaxScripts() const { return max_scripts; } /// Returns true if the shared uid matches the given uid bool MatchShareUid(uid_t suid) const { return ((share_uid==0) || (share_uid==suid)); }; /// Returns true if any of the shared gids matches the given gid bool MatchShareGid(gid_t sgid) const; /// Returns true id token's claims from WLCG profile to be treated as VOMS attributes bool WLCGtoVOMS() const; /// Returns forced VOMS attributes for users which have none. /// If queue is not specified value for server is returned. const std::string & ForcedVOMS(const char * queue = "") const; /// Returns list of authorized VOs for specified queue const std::list & AuthorizedVOs(const char * queue) const; /// Returns list of authorization groups for specified queue. /// If queue is not specified value for server is returned. const std::list > & MatchingGroups(const char * queue = "") const; /// Returns list of authorization groups for public information. const std::list > & MatchingGroupsPublicInformation() const; /// Returns configured scopes for specified action. const std::list & TokenScopes(const char* action) const; bool UseSSH() const { return sshfs_mounts_enabled; } /// Check if remote directory is mounted bool SSHFS_OK(const std::string& mount_point) const; private: /// Configuration file std::string conffile; /// Whether configuration file is temporary bool conffile_is_temp; /// For logging job information to external logging service JobLog* job_log; /// For reporting jobs metric to ganglia JobsMetrics* jobs_metrics; /// For reporting heartbeat metric to ganglia HeartBeatMetrics* heartbeat_metrics; /// For reporting free space metric to ganglia SpaceMetrics* space_metrics; /// For logging performace/profiling information Arc::JobPerfLog* job_perf_log; /// Plugins run at certain state changes ContinuationPlugins* cont_plugins; /// Delegated credentials stored by A-REX // TODO: this should go away after proper locking in DelegationStore is implemented ARex::DelegationStores* delegations; /// Certificates directory std::string cert_dir; /// VOMS LSC files directory std::string voms_dir; /// RTE directory std::string rte_dir; /// email address for support std::string support_email_address; /// helper(s) log path std::string helper_log; /// Scratch directory std::string scratch_dir; /// Directory where files explaining jobs are stored std::string control_dir; /// Directories where directories used to run jobs are created std::vector session_roots; /// Session directories allowed for new jobs (i.e. not draining) std::vector session_roots_non_draining; /// Cache information CacheConfig cache_params; /// URL of the cluster's headnode std::string headnode; /// Default LRMS and queue to use std::string default_lrms; std::string default_queue; /// Default benchmark to store in AAR std::string default_benchmark; /// All configured queues std::list queues; /// User running A-REX Arc::User gm_user; /// uid and gid(s) running other ARC processes that share files with A-REX uid_t share_uid; std::list share_gids; /// How long jobs are kept after job finished time_t keep_finished; time_t keep_deleted; /// Whether session must always be accessed under mapped user's uid bool strict_session; /// Strategy for fixing directories fixdir_t fixdir; /// Maximal value of times job is allowed to be rerun int reruns; /// Maximal size of job description int maxjobdesc; /// If submission of new jobs is allowed bool allow_new; /// Maximum time for A-REX to wait between each loop processing jobs unsigned int wakeup_period; /// Groups allowed to submit while job submission is disabled std::list allow_submit; /// List of associated external processes std::list helpers; /// Maximum number of jobs running (between PREPARING and FINISHING) int max_jobs_running; /// Maximum total jobs in the system, including FINISHED and DELETED int max_jobs_total; /// Maximum jobs in the LRMS int max_jobs; /// Maximum jobs running per DN int max_jobs_per_dn; /// Maximum submit/cancel scripts running int max_scripts; /// Whether WS-interface is enabled bool enable_arc_interface; /// Whether EMI-ES interface is enabled bool enable_emies_interface; /// GridFTP job endpoint std::string gridftp_endpoint; /// WS-interface endpoint std::string arex_endpoint; /// Delegation db type deleg_db_t deleg_db; /// Either id token's claims from WLCG profile to be treated as VOMS attributes bool wlcg_to_voms; /// Forced VOMS attribute for non-VOMS credentials per queue std::map forced_voms; /// VOs authorized per queue std::map > authorized_vos; /// groups allowed per queue with allow/deny mark (true/false) std::map > > matching_groups; /// groups allowed to access public information with allow/deny mark (true/false) std::list > matching_groups_publicinfo; std::map > token_scopes; /// Indicates whether session, runtime and cache dirs are mounted through sshfs (only suppored by Python backends) bool sshfs_mounts_enabled; /// Logger object static Arc::Logger logger; /// Set defaults for all configuration parameters. Called by constructors. void SetDefaults(); /// Assignment operator and copy constructor are private to prevent copying. GMConfig& operator=(const GMConfig& conf); GMConfig(const GMConfig& conf); }; } // namespace ARex #endif /* GMCONFIG_H_ */ nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/PaxHeaders/CacheConfig.h0000644000000000000000000000013115067751327024564 xustar0030 mtime=1759498967.753619622 29 atime=1759498967.86349362 30 ctime=1759499029.603629918 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/CacheConfig.h0000644000175000002070000000667515067751327026505 0ustar00mockbuildmock00000000000000#ifndef __GM_CONFIG_CACHE_H__ #define __GM_CONFIG_CACHE_H__ #include #include #include namespace ARex { class GMConfig; /** * Exception thrown by constructor caused by bad cache params in conf file */ class CacheConfigException : public std::exception { private: std::string _desc; public: CacheConfigException(std::string desc = ""): _desc(desc) {}; ~CacheConfigException() throw() {}; virtual const char* what() const throw() {return _desc.c_str();}; }; /** * Reads conf file and provides methods to obtain cache info from it. * Methods of this class may throw CacheConfigException. */ class CacheConfig { public: /// A struct defining a URL pattern and credentials which can access it struct CacheAccess { Arc::RegularExpression regexp; std::string cred_type; Arc::RegularExpression cred_value; }; private: /** * List of (cache dir [link dir]) */ std::vector _cache_dirs; float _cache_max; float _cache_min; /** * Whether automatic cleaning is enabled */ bool _cleaning_enabled; /** * Cache directories that are needed to be drained **/ std::vector _draining_cache_dirs; /** * Cache directories that are read-only **/ std::vector _readonly_cache_dirs; /** * Logfile for cache cleaning messages */ std::string _log_file; /** * cache-clean log level */ std::string _log_level; /** * Lifetime of files in cache */ std::string _lifetime; /** * Whether the cache is shared with other data on the file system */ bool _cache_shared; /** * User-specified tool for getting space information for cleaning tool */ std::string _cache_space_tool; /** * Timeout for cleaning process */ int _clean_timeout; /** * List of CacheAccess structs describing who can access what URLs in cache */ std::list _cache_access; /** * Parsers for the two different conf styles */ void parseINIConf(Arc::ConfigIni& cf); public: /** * Create a new CacheConfig instance. Read the config file and fill in * private member variables with cache parameters. */ CacheConfig(const GMConfig& config); /** * Empty CacheConfig */ CacheConfig(): _cache_max(0), _cache_min(0), _cleaning_enabled(false), _cache_shared(false), _clean_timeout(0) {}; std::vector getCacheDirs() const { return _cache_dirs; }; std::vector getDrainingCacheDirs() const { return _draining_cache_dirs; }; std::vector getReadOnlyCacheDirs() const { return _readonly_cache_dirs; }; /// Substitute all cache paths, with information given in user if necessary void substitute(const GMConfig& config, const Arc::User& user); float getCacheMax() const { return _cache_max; }; float getCacheMin() const { return _cache_min; }; bool cleanCache() const { return _cleaning_enabled; }; std::string getLogFile() const { return _log_file; }; std::string getLogLevel() const { return _log_level; }; std::string getLifeTime() const { return _lifetime; }; bool getCacheShared() const { return _cache_shared; }; std::string getCacheSpaceTool() const { return _cache_space_tool; }; int getCleanTimeout() const { return _clean_timeout; }; const std::list& getCacheAccess() const { return _cache_access; }; }; } // namespace ARex #endif /*__GM_CONFIG_CACHE_H__*/ nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/PaxHeaders/StagingConfig.cpp0000644000000000000000000000013115067751327025510 xustar0030 mtime=1759498967.753619622 29 atime=1759498967.86349362 30 ctime=1759499029.604431777 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/StagingConfig.cpp0000644000175000002070000001353115067751327027416 0ustar00mockbuildmock00000000000000#include #include #include #include "StagingConfig.h" namespace ARex { Arc::Logger StagingConfig::logger(Arc::Logger::getRootLogger(), "StagingConfig"); StagingConfig::StagingConfig(const GMConfig& config): max_delivery(10), max_processor(10), max_emergency(1), max_prepared(200), min_speed(0), min_speed_time(300), min_average_speed(0), max_inactivity_time(300), max_retries(10), passive(true), httpgetpartial(false), remote_size_limit(0), use_host_cert_for_remote_delivery(false), log_level(Arc::Logger::getRootLogger().getThreshold()), dtr_log(config.ControlDir()+"/dtr.state"), valid(true) { perf_log.SetOutput("/var/log/arc/perfdata/data.perflog"); Arc::ConfigFile cfile; if (!cfile.open(config.ConfigFile())) { logger.msg(Arc::ERROR, "Can't read configuration file"); valid = false; return; } // check type of file if (cfile.detect() != Arc::ConfigFile::file_INI) { logger.msg(Arc::ERROR, "Can't recognize type of configuration file"); valid = false; cfile.close(); return; } if (!readStagingConf(cfile)) { logger.msg(Arc::ERROR, "Configuration error"); valid = false; } cfile.close(); } bool StagingConfig::readStagingConf(Arc::ConfigFile& cfile) { Arc::ConfigIni cf(cfile); static const int common_perflog_secnum = 0; cf.AddSection("common/perflog"); cf.AddSection("arex/data-staging"); for(;;) { std::string rest; std::string command; cf.ReadNext(command,rest); if (command.empty()) break; // eof if (cf.SectionNum() == common_perflog_secnum) { // common/perflog if (cf.SubSection()[0] == '\0') { perf_log.SetEnabled(true); if (command == "perflogdir") { perf_log.SetOutput(rest + "/data.perflog"); } } continue; } if (command == "maxdelivery") { if (!paramToInt(Arc::ConfigIni::NextArg(rest), max_delivery)) { logger.msg(Arc::ERROR, "Bad number in maxdelivery"); return false; } } else if (command == "maxemergency") { if (!paramToInt(Arc::ConfigIni::NextArg(rest), max_emergency)) { logger.msg(Arc::ERROR, "Bad number in maxemergency"); return false; } } else if (command == "maxprocessor") { if (!paramToInt(Arc::ConfigIni::NextArg(rest), max_processor)) { logger.msg(Arc::ERROR, "Bad number in maxprocessor"); return false; } } else if (command == "maxprepared") { if (!paramToInt(Arc::ConfigIni::NextArg(rest), max_prepared) || max_prepared <= 0) { logger.msg(Arc::ERROR, "Bad number in maxprepared"); return false; } } else if (command == "maxtransfertries") { if (!paramToInt(Arc::ConfigIni::NextArg(rest), max_retries)) { logger.msg(Arc::ERROR, "Bad number in maxtransfertries"); return false; } } else if (command == "speedcontrol") { if (rest.empty()) { min_speed = min_speed_time = min_average_speed = max_inactivity_time = 0; } else if (!Arc::stringto(Arc::ConfigIni::NextArg(rest), min_speed) || !Arc::stringto(Arc::ConfigIni::NextArg(rest), min_speed_time) || !Arc::stringto(Arc::ConfigIni::NextArg(rest), min_average_speed) || !Arc::stringto(Arc::ConfigIni::NextArg(rest), max_inactivity_time)) { logger.msg(Arc::ERROR, "Bad number in speedcontrol"); return false; } } else if (command == "sharepolicy") { share_type = Arc::ConfigIni::NextArg(rest); } else if (command == "sharepriority") { std::string share = Arc::ConfigIni::NextArg(rest); int priority = 0; if (!paramToInt(Arc::ConfigIni::NextArg(rest), priority) || priority <= 0) { logger.msg(Arc::ERROR, "Bad number in definedshare %s", share); return false; } defined_shares[share] = priority; } else if (command == "deliveryservice") { std::string url = rest; Arc::URL u(url); if (!u) { logger.msg(Arc::ERROR, "Bad URL in deliveryservice: %s", url); return false; } delivery_services.push_back(u); } else if (command == "localdelivery") { std::string use_local = Arc::ConfigIni::NextArg(rest); if (use_local == "yes") delivery_services.push_back(Arc::URL("file:/local")); } else if (command == "remotesizelimit") { if (!Arc::stringto(Arc::ConfigIni::NextArg(rest), remote_size_limit)) { logger.msg(Arc::ERROR, "Bad number in remotesizelimit"); return false; } } else if (command == "passivetransfer") { std::string pasv = Arc::ConfigIni::NextArg(rest); if (pasv == "yes") passive = true; else passive = false; } else if (command == "httpgetpartial") { std::string partial = Arc::ConfigIni::NextArg(rest); if (partial == "yes") httpgetpartial = true; else httpgetpartial = false; } else if (command == "preferredpattern") { preferred_pattern = rest; } else if (command == "usehostcert") { std::string use_host_cert = Arc::ConfigIni::NextArg(rest); if (use_host_cert == "yes") use_host_cert_for_remote_delivery = true; else use_host_cert_for_remote_delivery = false; } else if (command == "loglevel") { unsigned int level; if (!Arc::strtoint(Arc::ConfigIni::NextArg(rest), level)) { logger.msg(Arc::ERROR, "Bad value for loglevel"); return false; } log_level = Arc::old_level_to_level(level); } else if (command == "statefile") { dtr_log = rest; } else if (command == "logfile") { dtr_central_log = rest; } } return true; } bool StagingConfig::paramToInt(const std::string& param, int& value) { int i; if (!Arc::stringto(param, i)) return false; if (i < 0) i=-1; value = i; return true; } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/PaxHeaders/UrlMapConfig.h0000644000000000000000000000013115067751327024761 xustar0030 mtime=1759498967.753619622 29 atime=1759498967.86349362 30 ctime=1759499029.601150309 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/UrlMapConfig.h0000644000175000002070000000065615067751327026673 0ustar00mockbuildmock00000000000000#ifndef __GM_CONFIG_MAP_H__ #define __GM_CONFIG_MAP_H__ #include #include "GMConfig.h" namespace ARex { /* Look URLMap.h for functionality. This object automatically reads configuration file and fills list of mapping for UrlMap. */ class UrlMapConfig: public Arc::URLMap { public: UrlMapConfig(const GMConfig& config); ~UrlMapConfig(void); }; } // namespace ARex #endif // __GM_CONFIG_MAP_H__ nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/PaxHeaders/CoreConfig.h0000644000000000000000000000013015067751327024450 xustar0030 mtime=1759498967.753619622 29 atime=1759498967.86349362 29 ctime=1759499029.59838941 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/CoreConfig.h0000644000175000002070000000142415067751327026355 0ustar00mockbuildmock00000000000000#ifndef __GM_CORE_CONFIG_H__ #define __GM_CORE_CONFIG_H__ #include #include namespace ARex { class GMConfig; /// Parses configuration and fills GMConfig with information class CoreConfig { public: /// Parse config static bool ParseConf(GMConfig& config); private: /// Parse ini-style config from stream cfile static bool ParseConfINI(GMConfig& config, Arc::ConfigFile& cfile); /// Function to check that LRMS scripts are available static void CheckLRMSBackends(const std::string& default_lrms); /// Function handle yes/no config commands static bool CheckYesNoCommand(bool& config_param, const std::string& name, std::string& rest); /// Logger static Arc::Logger logger; }; } // namespace ARex #endif // __GM_CORE_CONFIG_H__ nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/PaxHeaders/UrlMapConfig.cpp0000644000000000000000000000013115067751327025314 xustar0030 mtime=1759498967.753619622 29 atime=1759498967.86349362 30 ctime=1759499029.599824092 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/UrlMapConfig.cpp0000644000175000002070000000327415067751327027225 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "UrlMapConfig.h" namespace ARex { static Arc::Logger& glogger = Arc::Logger::getRootLogger(); UrlMapConfig::UrlMapConfig(const GMConfig& config) { Arc::ConfigFile cfile; if (!cfile.open(config.ConfigFile())) { glogger.msg(Arc::ERROR,"Can't open configuration file"); return; } if (cfile.detect() != Arc::ConfigFile::file_INI) { glogger.msg(Arc::ERROR,"Can't recognize type of configuration file"); cfile.close(); return; } Arc::ConfigIni cf(cfile); cf.AddSection("arex/data-staging"); for (;;) { std::string rest; std::string command; cf.ReadNext(command, rest); if (command.empty()) break; if (command == "copyurl") { std::string initial = Arc::ConfigIni::NextArg(rest); std::string replacement = rest; if ((initial.length() == 0) || (replacement.length() == 0)) { glogger.msg(Arc::ERROR,"Not enough parameters in copyurl"); continue; } add(initial,replacement); } else if (command == "linkurl") { std::string initial = Arc::ConfigIni::NextArg(rest); std::string replacement = Arc::ConfigIni::NextArg(rest); if ((initial.length() == 0) || (replacement.length() == 0)) { glogger.msg(Arc::ERROR,"Not enough parameters in linkurl"); continue; } std::string access = rest; if (access.length() == 0) access = replacement; add(initial,replacement,access); } } cfile.close(); } UrlMapConfig::~UrlMapConfig(void) { } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/PaxHeaders/GMConfig.cpp0000644000000000000000000000013115067751327024417 xustar0030 mtime=1759498967.753619622 29 atime=1759498967.86349362 30 ctime=1759499029.595734039 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/GMConfig.cpp0000644000175000002070000003645015067751327026332 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include // ::SSHFS_OK, check device #ifdef _MACOSX #include #include #else #include // ::SSHFS_OK, check file system #endif #include #include #include #include #include "CoreConfig.h" #include "GMConfig.h" namespace ARex { // Defaults // default job ttl after finished - 1 week #define DEFAULT_KEEP_FINISHED (7*24*60*60) // default job ttr after deleted - 1 month #define DEFAULT_KEEP_DELETED (30*24*60*60) // default maximal allowed amount of reruns #define DEFAULT_JOB_RERUNS (5) // default maximal size of job description #define DEFAULT_MAX_JOB_DESC (5*1024*1024) // default wake up period for main job loop #define DEFAULT_WAKE_UP (600) Arc::Logger GMConfig::logger(Arc::Logger::getRootLogger(), "GMConfig"); static std::string empty_string(""); static std::list empty_string_list; static std::list > empty_group_list; std::string GMConfig::GuessConfigFile() { struct stat st; std::string file = Arc::GetEnv("ARC_CONFIG"); if(!file.empty()) { return file; // enforced location } file = Arc::ArcLocation::Get() + "/etc/arc.conf"; if (Arc::FileStat(file, &st, true)) { return file; } file = "/etc/arc.conf"; if (Arc::FileStat(file, &st, true)) { return file; } return ""; } GMConfig::GMConfig(const std::string& conf): conffile(conf) { SetDefaults(); // If no config file was given, guess it. The order to try is // $ARC_CONFIG, $ARC_LOCATION/etc/arc.conf, /etc/arc.conf if (conffile.empty()) { conffile = GuessConfigFile(); } } void GMConfig::SetDefaults() { conffile_is_temp = false; job_log = NULL; jobs_metrics = NULL; heartbeat_metrics = NULL; space_metrics = NULL; job_perf_log = NULL; cont_plugins = NULL; delegations = NULL; share_uid = 0; keep_finished = DEFAULT_KEEP_FINISHED; keep_deleted = DEFAULT_KEEP_DELETED; strict_session = false; fixdir = fixdir_always; reruns = DEFAULT_JOB_RERUNS; maxjobdesc = DEFAULT_MAX_JOB_DESC; wakeup_period = DEFAULT_WAKE_UP; allow_new = true; max_jobs_running = -1; max_jobs_total = -1; max_jobs = -1; max_jobs_per_dn = -1; max_scripts = -1; deleg_db = deleg_db_sqlite; enable_arc_interface = false; enable_emies_interface = false; cert_dir = Arc::GetEnv("X509_CERT_DIR"); voms_dir = Arc::GetEnv("X509_VOMS_DIR"); sshfs_mounts_enabled = false; wlcg_to_voms = false; } bool GMConfig::Load() { // Call CoreConfig (CoreConfig.h) to fill values in this object return CoreConfig::ParseConf(*this); } void GMConfig::Print() const { for(std::vector::const_iterator i = session_roots.begin(); i != session_roots.end(); ++i) logger.msg(Arc::INFO, "\tSession root dir : %s", *i); logger.msg(Arc::INFO, "\tControl dir : %s", control_dir); logger.msg(Arc::INFO, "\tdefault LRMS : %s", default_lrms); logger.msg(Arc::INFO, "\tdefault queue : %s", default_queue); logger.msg(Arc::INFO, "\tdefault ttl : %u", keep_finished); std::vector conf_caches = cache_params.getCacheDirs(); std::vector readonly_caches = cache_params.getReadOnlyCacheDirs(); if(conf_caches.empty() && readonly_caches.empty()) { logger.msg(Arc::INFO,"No valid caches found in configuration, caching is disabled"); return; } // list each cache for (std::vector::iterator i = conf_caches.begin(); i != conf_caches.end(); i++) { logger.msg(Arc::INFO, "\tCache : %s", (*i).substr(0, (*i).find(" "))); if ((*i).find(" ") != std::string::npos) logger.msg(Arc::INFO, "\tCache link dir : %s", (*i).substr((*i).find_last_of(" ")+1, (*i).length()-(*i).find_last_of(" ")+1)); } for (std::vector::iterator i = readonly_caches.begin(); i != readonly_caches.end(); i++) { logger.msg(Arc::INFO, "\tCache (read-only): %s", *i); } if (cache_params.cleanCache()) logger.msg(Arc::INFO, "\tCache cleaning enabled"); else logger.msg(Arc::INFO, "\tCache cleaning disabled"); } void GMConfig::SetControlDir(const std::string &dir) { if (dir.empty()) control_dir = gm_user.Home() + "/.jobstatus"; else control_dir = dir; } void GMConfig::SetSessionRoot(const std::string &dir) { session_roots.clear(); if (dir.empty() || dir == "*") session_roots.push_back(gm_user.Home() + "/.jobs"); else session_roots.push_back(dir); } void GMConfig::SetSessionRoot(const std::vector &dirs) { session_roots.clear(); if (dirs.empty()) { std::string dir; SetSessionRoot(dir); } else { for (std::vector::const_iterator i = dirs.begin(); i != dirs.end(); i++) { if (*i == "*") session_roots.push_back(gm_user.Home() + "/.jobs"); else session_roots.push_back(*i); } } } std::string GMConfig::SessionRoot(const std::string& job_id) const { if (session_roots.empty()) return empty_string; if (session_roots.size() == 1 || job_id.empty()) return session_roots[0]; // search for this jobid's session dir struct stat st; for (std::vector::const_iterator i = session_roots.begin(); i != session_roots.end(); i++) { std::string sessiondir(*i + '/' + job_id); if (stat(sessiondir.c_str(), &st) == 0 && S_ISDIR(st.st_mode)) return *i; } return empty_string; // not found } static bool fix_directory(const std::string& path, GMConfig::fixdir_t fixmode, mode_t mode, uid_t uid, gid_t gid) { if (fixmode == GMConfig::fixdir_never) { struct stat st; if (!Arc::FileStat(path, &st, true)) return false; if (!S_ISDIR(st.st_mode)) return false; return true; } else if(fixmode == GMConfig::fixdir_missing) { struct stat st; if (Arc::FileStat(path, &st, true)) { if (!S_ISDIR(st.st_mode)) return false; return true; } } // GMConfig::fixdir_always if (!Arc::DirCreate(path, mode, true)) return false; // Only can switch owner if running as root if (getuid() == 0) if (chown(path.c_str(), uid, gid) != 0) return false; if (chmod(path.c_str(), mode) != 0) return false; return true; } bool GMConfig::CreateControlDirectory() const { bool res = true; if (!control_dir.empty()) { mode_t mode = 0; if (gm_user.get_uid() == 0) { // This control dir serves multiple users and running // as root (hence really can serve multiple users) mode = S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH; } else { mode = S_IRWXU; } if (!fix_directory(control_dir, fixdir, mode, gm_user.get_uid(), gm_user.get_gid())) res = false; // Structure inside control dir is important - *always* create it // Directories containing logs and job states may need access from // information system, etc. So allowing them to be more open. // Delegation is only accessed by service itself. if (!fix_directory(control_dir+"/logs", fixdir_always, mode, gm_user.get_uid(), gm_user.get_gid())) res = false; if (!fix_directory(control_dir+"/accepting", fixdir_always, mode, gm_user.get_uid(), gm_user.get_gid())) res = false; if (!fix_directory(control_dir+"/restarting", fixdir_always, mode, gm_user.get_uid(), gm_user.get_gid())) res = false; if (!fix_directory(control_dir+"/processing", fixdir_always, mode, gm_user.get_uid(), gm_user.get_gid())) res = false; if (!fix_directory(control_dir+"/finished", fixdir_always, mode, gm_user.get_uid(), gm_user.get_gid())) res = false; std::string deleg_dir = DelegationDir(); if (!fix_directory(deleg_dir, fixdir_always, S_IRWXU, gm_user.get_uid(), gm_user.get_gid())) res = false; } return res; } class LogData: public Arc::Run::Data { public: LogData(Arc::Logger& logger, Arc::LogLevel level): logger(logger), level(level) {} virtual ~LogData() {}; virtual void Append(char const* data, unsigned int size) { if(data && size) { logger.msg(level, "%s", std::string(data,size)); } }; virtual void Remove(unsigned int size) {}; virtual char const* Get() const { return nullptr; }; virtual unsigned int Size() const { return 0; }; private: Arc::Logger& logger; Arc::LogLevel level; }; bool GMConfig::UpdateControlDirectory() const { bool res = true; if (!control_dir.empty()) { // We have dedicated external tool for updating controldir std::list args; args.push_back(Arc::ArcLocation::GetDataDir()+"/update-controldir"); args.push_back(control_dir); LogData run_stdout(logger, Arc::INFO); LogData run_stderr(logger, Arc::ERROR); Arc::Run run(args); run.AssignStdout(run_stdout); run.AssignStderr(run_stderr); logger.msg(Arc::INFO, "Starting controldir update tool."); if(!run.Start()) { logger.msg(Arc::ERROR, "Failed to start controldir update tool."); res = false; } else if(!run.Wait()) { logger.msg(Arc::ERROR, "Failed to run controldir update tool. Exit code: %i", run.Result()); res = false; } } return res; } bool GMConfig::CreateSessionDirectory(const std::string& dir, const Arc::User& user) const { // First just try to create per-job dir, assuming session root already exists if (gm_user.get_uid() != 0) { if (Arc::DirCreate(dir, S_IRWXU, false)) return true; } else if (strict_session) { if (Arc::DirCreate(dir, user.get_uid(), user.get_gid(), S_IRWXU, false)) return true; } else { if (Arc::DirCreate(dir, S_IRWXU, false)) return (chown(dir.c_str(), user.get_uid(), user.get_gid()) == 0); } // Creation failed so try to create session root and try again std::string session_root(dir.substr(0, dir.rfind('/'))); if (session_root.empty()) return false; mode_t mode = 0; if (gm_user.get_uid() == 0) { if (strict_session) { // For multiple users creating immediate subdirs using own account // dangerous permissions, but there is no other option mode = S_IRWXU | S_IRWXG | S_IRWXO | S_ISVTX; } else { // For multiple users not creating immediate subdirs using own account mode = S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH; } } else { // For single user mode = S_IRWXU; } if (!fix_directory(session_root, fixdir, mode, gm_user.get_uid(), gm_user.get_gid())) return false; // Try per-job dir again if (gm_user.get_uid() != 0) { return Arc::DirCreate(dir, S_IRWXU, false); } else if (strict_session) { return Arc::DirCreate(dir, user.get_uid(), user.get_gid(), S_IRWXU, false); } else { if (!Arc::DirCreate(dir, S_IRWXU, false)) return false; return (chown(dir.c_str(), user.get_uid(), user.get_gid()) == 0); } } std::string GMConfig::DelegationDir() const { std::string deleg_dir = control_dir+"/delegations"; uid_t u = gm_user.get_uid(); if (u == 0) return deleg_dir; struct passwd pwbuf; char buf[4096]; struct passwd* pw; if (::getpwuid_r(u, &pwbuf, buf, sizeof(buf), &pw) == 0) { if (pw && pw->pw_name) { deleg_dir+="."; deleg_dir+=pw->pw_name; } } return deleg_dir; } GMConfig::deleg_db_t GMConfig::DelegationDBType() const { return deleg_db; } bool GMConfig::WLCGtoVOMS() const { return wlcg_to_voms; } const std::string & GMConfig::ForcedVOMS(const char * queue) const { std::map::const_iterator pos = forced_voms.find(queue); return (pos == forced_voms.end()) ? empty_string : pos->second; } const std::list & GMConfig::AuthorizedVOs(const char * queue) const { std::map >::const_iterator pos = authorized_vos.find(queue); return (pos == authorized_vos.end()) ? empty_string_list : pos->second; } const std::list > & GMConfig::MatchingGroups(const char * queue) const { std::map > >::const_iterator pos = matching_groups.find(queue); return (pos == matching_groups.end()) ? empty_group_list : pos->second; } const std::list > & GMConfig::MatchingGroupsPublicInformation() const { return matching_groups_publicinfo; } const std::list & GMConfig::TokenScopes(const char * action) const { if(!action) return empty_string_list; std::map >::const_iterator it = token_scopes.find(action); if(it == token_scopes.end()) return empty_string_list; return it->second; } bool GMConfig::Substitute(std::string& param, bool& userSubs, bool& otherSubs, const Arc::User& user) const { std::string::size_type curpos = 0; userSubs = false; otherSubs = false; for (;;) { if (curpos >= param.length()) break; std::string::size_type pos = param.find('%', curpos); if (pos == std::string::npos) break; pos++; if (pos >= param.length()) break; if (param[pos] == '%') { curpos=pos+1; continue; }; std::string to_put; switch (param[pos]) { case 'R': to_put = SessionRoot(""); otherSubs = true; break; // First session dir will be used if there are multiple case 'C': to_put = ControlDir(); otherSubs = true; break; case 'U': to_put = user.Name(); userSubs = true; break; case 'H': to_put = user.Home(); userSubs = true; break; case 'Q': to_put = DefaultQueue(); otherSubs = true; break; case 'L': to_put = DefaultLRMS(); otherSubs = true; break; case 'u': to_put = Arc::tostring(user.get_uid()); userSubs = true; break; case 'g': to_put = Arc::tostring(user.get_gid()); userSubs = true; break; case 'W': to_put = Arc::ArcLocation::Get(); otherSubs = true; break; case 'F': to_put = conffile; otherSubs = true; break; case 'G': logger.msg(Arc::ERROR, "Globus location variable substitution is not supported anymore. Please specify path directly."); break; default: to_put = param.substr(pos-1, 2); break; } curpos = pos+1+(to_put.length() - 2); param.replace(pos-1, 2, to_put); } return true; } void GMConfig::SetShareID(const Arc::User& share_user) { share_uid = share_user.get_uid(); share_gids.clear(); if (share_uid <= 0) return; struct passwd pwd_buf; struct passwd* pwd = NULL; #ifdef _SC_GETPW_R_SIZE_MAX int buflen = sysconf(_SC_GETPW_R_SIZE_MAX); if (buflen <= 0) buflen = 16384; #else int buflen = 16384; #endif char* buf = (char*)malloc(buflen); if (!buf) return; if (getpwuid_r(share_uid, &pwd_buf, buf, buflen, &pwd) == 0) { if (pwd) { #ifdef HAVE_GETGROUPLIST #ifdef _MACOSX int groups[100]; #else gid_t groups[100]; #endif int ngroups = 100; if (getgrouplist(pwd->pw_name, pwd->pw_gid, groups, &ngroups) >= 0) { for (int n = 0; npw_gid); } } free(buf); } bool GMConfig::MatchShareGid(gid_t sgid) const { for (std::list::const_iterator i = share_gids.begin(); i != share_gids.end(); ++i) { if (sgid == *i) return true; } return false; } bool GMConfig::SSHFS_OK(const std::string& mount_point) const { struct stat st; struct stat st_root; stat(mount_point.c_str(), &st); stat(mount_point.substr(0, mount_point.rfind('/')).c_str(), &st_root); // rootdir and dir on different devices? if (st.st_dev != st_root.st_dev) { struct statfs stfs; statfs(mount_point.c_str(), &stfs); // dir is also a fuse fs? return stfs.f_type == 0x65735546; } return false; } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/PaxHeaders/README0000644000000000000000000000013115067751327023142 xustar0030 mtime=1759498967.753619622 29 atime=1759498967.86349362 30 ctime=1759499029.592805073 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/README0000644000175000002070000000003215067751327025040 0ustar00mockbuildmock00000000000000configuration processing. nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/PaxHeaders/CoreConfig.cpp0000644000000000000000000000013115067751327025004 xustar0030 mtime=1759498967.753619622 29 atime=1759498967.86349362 30 ctime=1759499029.597062653 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/conf/CoreConfig.cpp0000644000175000002070000006451615067751327026723 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include "../jobs/ContinuationPlugins.h" #include "../log/JobLog.h" #include "../log/JobsMetrics.h" #include "../log/HeartBeatMetrics.h" #include "../log/SpaceMetrics.h" #include "../jobs/JobsList.h" #include "CacheConfig.h" #include "GMConfig.h" #include "CoreConfig.h" #include "glibmm-compat.h" namespace ARex { Arc::Logger CoreConfig::logger(Arc::Logger::getRootLogger(), "CoreConfig"); void CoreConfig::CheckLRMSBackends(const std::string& default_lrms) { std::string tool_path; tool_path=Arc::ArcLocation::GetDataDir()+"/cancel-"+default_lrms+"-job"; if(!Glib::file_test(tool_path,Glib::FILE_TEST_IS_REGULAR)) { logger.msg(Arc::WARNING,"Missing cancel-%s-job - job cancellation may not work",default_lrms); } tool_path=Arc::ArcLocation::GetDataDir()+"/submit-"+default_lrms+"-job"; if(!Glib::file_test(tool_path,Glib::FILE_TEST_IS_REGULAR)) { logger.msg(Arc::WARNING,"Missing submit-%s-job - job submission to LRMS may not work",default_lrms); } tool_path=Arc::ArcLocation::GetDataDir()+"/scan-"+default_lrms+"-job"; if(!Glib::file_test(tool_path,Glib::FILE_TEST_IS_REGULAR)) { logger.msg(Arc::WARNING,"Missing scan-%s-job - may miss when job finished executing",default_lrms); } } bool CoreConfig::CheckYesNoCommand(bool& config_param, const std::string& name, std::string& rest) { std::string s = Arc::ConfigIni::NextArg(rest); if (s == "yes") { config_param = true; } else if(s == "no") { config_param = false; } else { logger.msg(Arc::ERROR, "Wrong option in %s", name); return false; } return true; } bool CoreConfig::ParseConf(GMConfig& config) { if (!config.conffile.empty()) { Arc::ConfigFile cfile; if (!cfile.open(config.conffile)) { logger.msg(Arc::ERROR, "Can't read configuration file at %s", config.conffile); return false; } // detect type of file Arc::ConfigFile::file_type type = cfile.detect(); if (type == Arc::ConfigFile::file_INI) { bool result = ParseConfINI(config, cfile); cfile.close(); return result; } logger.msg(Arc::ERROR, "Can't recognize type of configuration file at %s", config.conffile); return false; } logger.msg(Arc::ERROR, "Could not determine configuration type or configuration is empty"); return false; } bool CoreConfig::ParseConfINI(GMConfig& config, Arc::ConfigFile& cfile) { // List of helper commands that will be substituted after all configuration is read std::list helpers; std::string jobreport_publisher; bool helper_log_is_set = false; bool job_log_log_is_set = false; Arc::ConfigIni cf(cfile); cf.SetSectionIndicator("."); static const int perflog_secnum = 0; cf.AddSection("common/perflog"); static const int common_secnum = 1; cf.AddSection("common"); static const int ganglia_secnum = 2; cf.AddSection("arex/ganglia"); static const int emies_secnum = 3; cf.AddSection("arex/ws/jobs"); static const int publicinfo_secnum = 4; cf.AddSection("infosys/accesscontrol"); static const int ws_secnum = 5; cf.AddSection("arex/ws"); static const int jura_secnum = 6; cf.AddSection("arex/jura"); static const int gm_secnum = 7; cf.AddSection("arex"); static const int cluster_secnum = 8; cf.AddSection("infosys/cluster"); static const int infosys_secnum = 9; cf.AddSection("infosys"); static const int queue_secnum = 10; cf.AddSection("queue"); static const int ssh_secnum = 11; cf.AddSection("lrms/ssh"); static const int lrms_secnum = 12; cf.AddSection("lrms"); if (config.job_perf_log) { config.job_perf_log->SetEnabled(false); config.job_perf_log->SetOutput("/var/log/arc/perfdata/arex.perflog"); } // process configuration information here for(;;) { std::string rest; std::string command; cf.ReadNext(command, rest); if (command.empty()) { // EOF break; } if (cf.SectionNum() == common_secnum) { // common if (cf.SubSection()[0] == '\0') { if(command == "x509_voms_dir") { config.voms_dir = rest; }; }; continue; }; if (cf.SectionNum() == perflog_secnum) { // common/perflog if (cf.SubSection()[0] == '\0') { if(config.job_perf_log) config.job_perf_log->SetEnabled(true); if (command == "perflogdir") { // if (!config.job_perf_log) continue; std::string fname = rest; // empty is allowed too if(!fname.empty()) fname += "/arex.perflog"; config.job_perf_log->SetOutput(fname.c_str()); } }; continue; }; if (cf.SectionNum() == lrms_secnum) { // lrms if (cf.SubSection()[0] == '\0') { if (command == "lrms") { // default lrms type and queue (optional) std::string default_lrms = Arc::ConfigIni::NextArg(rest); if (default_lrms.empty()) { logger.msg(Arc::ERROR, "lrms is empty"); return false; } if (default_lrms == "slurm") { // allow lower case slurm in config default_lrms = "SLURM"; } config.default_lrms = default_lrms; std::string default_queue = Arc::ConfigIni::NextArg(rest); if (!default_queue.empty()) { config.default_queue = default_queue; } CheckLRMSBackends(default_lrms); } else if (command == "benchmark") { std::string default_benchmark = rest; if (!default_benchmark.empty()) { config.default_benchmark = default_benchmark; } }; }; continue; }; if (cf.SectionNum() == gm_secnum) { // arex if (cf.SubSection()[0] == '\0') { if (command == "user") { config.SetShareID(Arc::User(rest)); } else if (command == "runtimedir") { config.rte_dir = rest; } else if (command == "maxjobs") { // maximum number of the jobs to support std::string max_jobs_s = Arc::ConfigIni::NextArg(rest); if (max_jobs_s.empty()) { logger.msg(Arc::ERROR, "Missing number in maxjobs"); return false; } if (!Arc::stringto(max_jobs_s, config.max_jobs)) { logger.msg(Arc::ERROR, "Wrong number in maxjobs: %s", max_jobs_s); return false; } if (config.max_jobs < 0) config.max_jobs = -1; max_jobs_s = Arc::ConfigIni::NextArg(rest); if (max_jobs_s.empty()) { logger.msg(Arc::ERROR, "Missing number in maxjobs"); return false; } if (!Arc::stringto(max_jobs_s, config.max_jobs_running)) { logger.msg(Arc::ERROR, "Wrong number in maxjobs: %s", max_jobs_s); return false; } if (config.max_jobs_running < 0) config.max_jobs_running = -1; max_jobs_s = Arc::ConfigIni::NextArg(rest); if (max_jobs_s.empty()) { logger.msg(Arc::ERROR, "Missing number in maxjobs"); return false; } if (!Arc::stringto(max_jobs_s, config.max_jobs_per_dn)) { logger.msg(Arc::ERROR, "Wrong number in maxjobs: %s", max_jobs_s); return false; } if (config.max_jobs_per_dn < 0) config.max_jobs_per_dn = -1; max_jobs_s = Arc::ConfigIni::NextArg(rest); if (max_jobs_s.empty()) { logger.msg(Arc::ERROR, "Missing number in maxjobs"); return false; } if (!Arc::stringto(max_jobs_s, config.max_jobs_total)) { logger.msg(Arc::ERROR, "Wrong number in maxjobs: %s", max_jobs_s); return false; } if (config.max_jobs_total < 0) config.max_jobs_total = -1; max_jobs_s = Arc::ConfigIni::NextArg(rest); if (max_jobs_s.empty()) { logger.msg(Arc::ERROR, "Missing number in maxjobs"); return false; } if (!Arc::stringto(max_jobs_s, config.max_scripts)) { logger.msg(Arc::ERROR, "Wrong number in maxjobs: %s", max_jobs_s); return false; } if (config.max_scripts < 0) config.max_scripts = -1; } else if(command == "norootpower") { if (!CheckYesNoCommand(config.strict_session, command, rest)) return false; } else if (command == "wakeupperiod") { std::string wakeup_s = Arc::ConfigIni::NextArg(rest); if (!Arc::stringto(wakeup_s, config.wakeup_period)) { logger.msg(Arc::ERROR,"Wrong number in wakeupperiod: %s",wakeup_s); return false; } } else if (command == "mail") { // internal address from which to send mail config.support_email_address = rest; if (config.support_email_address.empty()) { logger.msg(Arc::ERROR, "mail parameter is empty"); return false; } } else if (command == "defaultttl") { // time to keep job after finished std::string default_ttl_s = Arc::ConfigIni::NextArg(rest); if (!Arc::stringto(default_ttl_s, config.keep_finished)) { logger.msg(Arc::ERROR, "Wrong number in defaultttl command"); return false; } default_ttl_s = Arc::ConfigIni::NextArg(rest); if (!default_ttl_s.empty() && !Arc::stringto(default_ttl_s, config.keep_deleted)) { logger.msg(Arc::ERROR, "Wrong number in defaultttl command"); return false; } } else if (command == "maxrerun") { // number of retries allowed std::string default_reruns_s = Arc::ConfigIni::NextArg(rest); if (!Arc::stringto(default_reruns_s, config.reruns)) { logger.msg(Arc::ERROR, "Wrong number in maxrerun command"); return false; } } else if (command == "statecallout") { // set plugin to be called on state changes if (!config.cont_plugins) continue; std::string state_name = Arc::ConfigIni::NextArg(rest); if (state_name.empty()) { logger.msg(Arc::ERROR, "State name for plugin is missing"); return false; } std::string options_s = Arc::ConfigIni::NextArg(rest); if (options_s.empty()) { logger.msg(Arc::ERROR, "Options for plugin are missing"); return false; } if (!config.cont_plugins->add(state_name.c_str(), options_s.c_str(), rest.c_str())) { logger.msg(Arc::ERROR, "Failed to register plugin for state %s", state_name); return false; } } else if (command == "sessiondir") { // set session root directory std::string session_root = Arc::ConfigIni::NextArg(rest); if (session_root.empty()) { logger.msg(Arc::ERROR, "Session root directory is missing"); return false; } if (rest.length() != 0 && rest != "drain") { logger.msg(Arc::ERROR, "Junk in sessiondir command"); return false; } if (session_root == "*") { // special value which uses each user's home area session_root = "%H/.jobs"; } config.session_roots.push_back(session_root); if (rest != "drain") config.session_roots_non_draining.push_back(session_root); } else if (command == "controldir") { std::string control_dir = rest; if (control_dir.empty()) { logger.msg(Arc::ERROR, "Missing directory in controldir command"); return false; } config.control_dir = control_dir; } else if (command == "control") { logger.msg(Arc::WARNING, "'control' configuration option is no longer supported, please use 'controldir' instead"); } else if (command == "helper") { std::string helper_user = Arc::ConfigIni::NextArg(rest); if (helper_user.empty()) { logger.msg(Arc::ERROR, "User for helper program is missing"); return false; } if (helper_user != ".") { logger.msg(Arc::ERROR, "Only user '.' for helper program is supported"); return false; } if (rest.empty()) { logger.msg(Arc::ERROR, "Helper program is missing"); return false; } helpers.push_back(rest); } else if (command == "helperlog") { config.helper_log = rest; // empty is allowed helper_log_is_set = true; } else if (command == "fixdirectories") { std::string s = Arc::ConfigIni::NextArg(rest); if (s == "yes") { config.fixdir = GMConfig::fixdir_always; } else if (s == "missing") { config.fixdir = GMConfig::fixdir_missing; } else if (s == "no") { config.fixdir = GMConfig::fixdir_never; } else { logger.msg(Arc::ERROR, "Wrong option in fixdirectories"); return false; } } else if (command == "scratchdir") { std::string scratch = rest; // don't set if already set by shared_scratch if (config.scratch_dir.empty()) config.scratch_dir = scratch; } else if (command == "shared_scratch") { std::string scratch = rest; config.scratch_dir = scratch; } else if (command == "joblog") { // where to write job information if (config.job_log) { std::string fname = rest; // empty is allowed too config.job_log->SetOutput(fname.c_str()); } } else if (command == "delegationdb") { std::string s = Arc::ConfigIni::NextArg(rest); if (s == "bdb") { config.deleg_db = GMConfig::deleg_db_bdb; } else if (s == "sqlite") { config.deleg_db = GMConfig::deleg_db_sqlite; } else { logger.msg(Arc::ERROR, "Wrong option in delegationdb"); return false; }; } else if (command == "usetokenforvoms") { if (!CheckYesNoCommand(config.wlcg_to_voms, command, rest)) return false; } else if (command == "forcedefaultvoms") { std::string str = rest; if (str.empty()) { logger.msg(Arc::ERROR, "forcedefaultvoms parameter is empty"); return false; } config.forced_voms[""] = str; } else if (command == "tokenscopes") { std::string str = rest; std::list pairs; Arc::tokenize(str,pairs,","); for(std::list::iterator pair = pairs.begin(); pair != pairs.end(); ++pair) { std::string::size_type pos = pair->find('='); if(pos != std::string::npos) { // action=scope config.token_scopes[Arc::trim(pair->substr(0,pos))].push_back(Arc::trim(pair->substr(pos+1))); } else { // shortcut std::string shortcut = Arc::trim(*pair); if(shortcut == "wlcg") { config.token_scopes["jobinfo"].push_back("compute.read"); config.token_scopes["jobcreate"].push_back("compute.create"); config.token_scopes["jobcancel"].push_back("compute.cancel"); config.token_scopes["jobdelete"].push_back("compute.cancel"); config.token_scopes["datainfo"].push_back("compute.read"); config.token_scopes["datawrite"].push_back("compute.modify"); config.token_scopes["dataread"].push_back("compute.read"); } } } } else if (command == "authtokenmap") { if (config.job_log) { std::list pairs; Arc::tokenize(rest, pairs, ","); for(std::list::iterator pair = pairs.begin(); pair != pairs.end(); ++pair) { std::string::size_type seppos = pair->find(':'); if(seppos != std::string::npos) { config.job_log->AddTokenMap(pair->substr(0,seppos),pair->substr(seppos+1)); } } } } /* #infoproviders_timelimit Currently information provider timeout is not implemented, hence no need to read this option. */ }; continue; }; if (cf.SectionNum() == ganglia_secnum) { // arex/ganglia if (cf.SubSection()[0] == '\0') { if (!config.jobs_metrics) continue; if (command == "gmetric_bin_path") { std::string fname = rest; // empty is not allowed, if not filled in arc.conf default value is used config.jobs_metrics->SetGmetricPath(fname.c_str()); config.heartbeat_metrics->SetGmetricPath(fname.c_str()); config.space_metrics->SetGmetricPath(fname.c_str()); } else if (command == "metrics") { std::list metrics; Arc::tokenize(rest, metrics, ","); for(std::list::iterator m = metrics.begin(); m != metrics.end(); ++m) { std::string metric = Arc::trim(*m); if((metric == "jobstates") || (metric == "all")) { config.jobs_metrics->SetEnabled(true); }; if((metric == "heartbeat") || (metric == "all")){ config.heartbeat_metrics->SetEnabled(true); }; if((metric == "cache") || (metric == "all")){ config.space_metrics->SetEnabled(true); }; }; }; }; continue; }; if (cf.SectionNum() == ws_secnum) { // arex/ws if (cf.SubSection()[0] == '\0') { if(command == "wsurl") { config.arex_endpoint = rest; }; }; continue; }; if (cf.SectionNum() == emies_secnum) { // arex/ws/jobs if (cf.SubSection()[0] == '\0') { config.enable_emies_interface = true; config.enable_arc_interface = true; // so far if (command == "allownew") { bool enable = false; if (!CheckYesNoCommand(enable, command, rest)) return false; config.allow_new = enable; } else if (command == "allownew_override") { while(!rest.empty()) { std::string str = Arc::ConfigIni::NextArg(rest); if(!str.empty()) { config.allow_submit.push_back(str); }; }; } else if (command == "maxjobdesc") { std::string maxjobdesc_s = Arc::ConfigIni::NextArg(rest); if (!Arc::stringto(maxjobdesc_s, config.maxjobdesc)) { logger.msg(Arc::ERROR, "Wrong number in maxjobdesc command"); return false; } } else if (command == "allowaccess") { while(!rest.empty()) { std::string str = Arc::ConfigIni::NextArg(rest); if(!str.empty()) { config.matching_groups[""].push_back(std::pair(true,str)); }; }; } else if (command == "denyaccess") { while(!rest.empty()) { std::string str = Arc::ConfigIni::NextArg(rest); if(!str.empty()) { config.matching_groups[""].push_back(std::pair(false,str)); }; }; }; }; continue; }; if (cf.SectionNum() == publicinfo_secnum) { // arex/ws/publicinfo if (cf.SubSection()[0] == '\0') { if (command == "allowaccess") { while(!rest.empty()) { std::string str = Arc::ConfigIni::NextArg(rest); if(!str.empty()) { config.matching_groups_publicinfo.push_back(std::pair(true,str)); }; }; } else if (command == "denyaccess") { while(!rest.empty()) { std::string str = Arc::ConfigIni::NextArg(rest); if(!str.empty()) { config.matching_groups_publicinfo.push_back(std::pair(false,str)); }; }; }; }; continue; }; if (cf.SectionNum() == jura_secnum) { // arex/jura if (cf.SubSection()[0] == '\0') { jobreport_publisher = "jura-ng"; if (command == "logfile") { if (config.job_log) { std::string logfile = rest; if (logfile.empty()) { logger.msg(Arc::ERROR, "Missing file name in [arex/jura] logfile"); return false; }; config.job_log->SetReporterLogFile(logfile.c_str()); job_log_log_is_set = true; }; } else if (command == "urdelivery_frequency") { if (config.job_log) { std::string period_s = Arc::ConfigIni::NextArg(rest); unsigned int period = 0; if (!Arc::stringto(period_s, period)) { logger.msg(Arc::ERROR, "Wrong number in urdelivery_frequency: %s", period_s); return false; } config.job_log->SetReporterPeriod(period); } } else if (command == "x509_host_key") { if (config.job_log) { std::string jobreport_key = rest; config.job_log->SetCredentials(jobreport_key, "", ""); } } else if (command == "x509_host_cert") { if (config.job_log) { std::string jobreport_cert = rest; config.job_log->SetCredentials("", jobreport_cert, ""); } } else if (command == "x509_cert_dir") { if (config.job_log) { std::string jobreport_cadir = rest; config.job_log->SetCredentials("", "", jobreport_cadir); } } else if (command == "vomsless_vo") { if (config.job_log) { std::string vomsless_vo = rest; std::string authgroup; std::string::size_type seppos = vomsless_vo.find(' '); if(seppos != std::string::npos) { authgroup = vomsless_vo.substr(0, seppos); vomsless_vo.erase(0, seppos+1); } config.job_log->AddVomslessVo(authgroup, vomsless_vo); } } }; continue; }; if (cf.SectionNum() == infosys_secnum) { // infosys - looking for user name to get share uid /* if (cf.SubSection()[0] == '\0') { if (command == "user") { config.SetShareID(Arc::User(rest)); }; }; */ continue; } if (cf.SectionNum() == queue_secnum) { // queue if (cf.SubSection()[0] == '\0') { if (cf.SectionNew()) { std::string name = cf.SectionIdentifier(); if (name.empty()) { logger.msg(Arc::ERROR, "No queue name given in queue block name"); return false; } config.queues.push_back(name); } if (command == "forcedefaultvoms") { std::string str = rest; if (str.empty()) { logger.msg(Arc::ERROR, "forcedefaultvoms parameter is empty"); return false; } if (!config.queues.empty()) { std::string queue_name = *(--config.queues.end()); config.forced_voms[queue_name] = str; } } else if (command == "advertisedvo") { std::string str = rest; if (str.empty()) { logger.msg(Arc::ERROR, "advertisedvo parameter is empty"); return false; } if (!config.queues.empty()) { std::string queue_name = *(--config.queues.end()); config.authorized_vos[queue_name].push_back(str); } } else if (command == "allowaccess") { if (!config.queues.empty()) { std::string queue_name = *(--config.queues.end()); while(!rest.empty()) { std::string str = Arc::ConfigIni::NextArg(rest); if(!str.empty()) { config.matching_groups[queue_name].push_back(std::pair(true,str)); } } } } else if (command == "denyaccess") { if (!config.queues.empty()) { std::string queue_name = *(--config.queues.end()); while(!rest.empty()) { std::string str = Arc::ConfigIni::NextArg(rest); if(!str.empty()) { config.matching_groups[queue_name].push_back(std::pair(false,str)); } } } } } continue; } if (cf.SectionNum() == cluster_secnum) { // cluster if (cf.SubSection()[0] == '\0') { if (command == "advertisedvo") { std::string str = rest; if (str.empty()) { logger.msg(Arc::ERROR, "advertisedvo parameter is empty"); return false; } config.authorized_vos[""].push_back(str); }; }; continue; } if (cf.SectionNum() == ssh_secnum) { // ssh if (cf.SubSection()[0] == '\0') { config.sshfs_mounts_enabled = true; } continue; } }; // End of parsing conf commands // Define accounting reporter and database manager if configured if(config.job_log) { if(!jobreport_publisher.empty()) { config.job_log->SetReporter(jobreport_publisher.c_str()); if(!job_log_log_is_set) config.job_log->SetReporterLogFile("/var/log/arc/jura.log"); } } if(!helper_log_is_set) { // Assign default value config.helper_log = "/var/log/arc/job.helper.errors"; } if (config.default_benchmark.empty()){ // Assign default benchmark value with no CPUTime normalization config.default_benchmark = "HEPSPEC:1.0"; } // Do substitution of control dir and helpers here now we have all the // configuration. These are special because they do not change per-user config.Substitute(config.control_dir); for (std::list::iterator helper = helpers.begin(); helper != helpers.end(); ++helper) { config.Substitute(*helper); config.helpers.push_back(*helper); } // Add helper to poll for finished LRMS jobs if (!config.default_lrms.empty() && !config.control_dir.empty()) { std::string cmd = Arc::ArcLocation::GetDataDir() + "/scan-"+config.default_lrms+"-job"; cmd = Arc::escape_chars(cmd, " \\", '\\', false); if (!config.conffile.empty()) cmd += " --config " + config.conffile; cmd += " " + config.control_dir; config.helpers.push_back(cmd); } // Get cache parameters try { CacheConfig cache_config = CacheConfig(config); config.cache_params = cache_config; } catch (CacheConfigException& e) { logger.msg(Arc::ERROR, "Error with cache configuration: %s", e.what()); return false; } return true; } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/PaxHeaders/Makefile.in0000644000000000000000000000013215067751355023404 xustar0030 mtime=1759498989.696825379 30 atime=1759499017.880253628 30 ctime=1759499029.429741184 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/Makefile.in0000644000175000002070000015245015067751355025315 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ pkglibexec_PROGRAMS = gm-kick$(EXEEXT) gm-jobs$(EXEEXT) \ inputcheck$(EXEEXT) arc-blahp-logger$(EXEEXT) noinst_PROGRAMS = test_write_grami_file$(EXEEXT) subdir = src/services/a-rex/grid-manager ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = arc-blahp-logger.8 gm-jobs.8 CONFIG_CLEAN_VPATH_FILES = am__installdirs = "$(DESTDIR)$(pkglibexecdir)" "$(DESTDIR)$(man8dir)" PROGRAMS = $(noinst_PROGRAMS) $(pkglibexec_PROGRAMS) LTLIBRARIES = $(noinst_LTLIBRARIES) am__DEPENDENCIES_1 = libgridmanager_la_DEPENDENCIES = jobs/libjobs.la conf/libconf.la \ log/liblog.la files/libfiles.la run/librun.la misc/libmisc.la \ mail/libmail.la $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_libgridmanager_la_OBJECTS = libgridmanager_la-GridManager.lo libgridmanager_la_OBJECTS = $(am_libgridmanager_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = libgridmanager_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libgridmanager_la_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_arc_blahp_logger_OBJECTS = \ arc_blahp_logger-arc_blahp_logger.$(OBJEXT) arc_blahp_logger_OBJECTS = $(am_arc_blahp_logger_OBJECTS) arc_blahp_logger_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) arc_blahp_logger_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(arc_blahp_logger_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_gm_jobs_OBJECTS = gm_jobs-gm_jobs.$(OBJEXT) gm_jobs_OBJECTS = $(am_gm_jobs_OBJECTS) gm_jobs_DEPENDENCIES = libgridmanager.la \ ../delegation/libdelegation.la gm_jobs_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(gm_jobs_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_gm_kick_OBJECTS = gm_kick-gm_kick.$(OBJEXT) gm_kick_OBJECTS = $(am_gm_kick_OBJECTS) gm_kick_DEPENDENCIES = libgridmanager.la gm_kick_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(gm_kick_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_inputcheck_OBJECTS = inputcheck-inputcheck.$(OBJEXT) inputcheck_OBJECTS = $(am_inputcheck_OBJECTS) inputcheck_DEPENDENCIES = libgridmanager.la \ ../delegation/libdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la inputcheck_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(inputcheck_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_test_write_grami_file_OBJECTS = \ test_write_grami_file-test_write_grami_file.$(OBJEXT) test_write_grami_file_OBJECTS = $(am_test_write_grami_file_OBJECTS) test_write_grami_file_DEPENDENCIES = libgridmanager.la \ ../delegation/libdelegation.la test_write_grami_file_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(test_write_grami_file_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = \ ./$(DEPDIR)/arc_blahp_logger-arc_blahp_logger.Po \ ./$(DEPDIR)/gm_jobs-gm_jobs.Po ./$(DEPDIR)/gm_kick-gm_kick.Po \ ./$(DEPDIR)/inputcheck-inputcheck.Po \ ./$(DEPDIR)/libgridmanager_la-GridManager.Plo \ ./$(DEPDIR)/test_write_grami_file-test_write_grami_file.Po am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(libgridmanager_la_SOURCES) $(arc_blahp_logger_SOURCES) \ $(gm_jobs_SOURCES) $(gm_kick_SOURCES) $(inputcheck_SOURCES) \ $(test_write_grami_file_SOURCES) DIST_SOURCES = $(libgridmanager_la_SOURCES) \ $(arc_blahp_logger_SOURCES) $(gm_jobs_SOURCES) \ $(gm_kick_SOURCES) $(inputcheck_SOURCES) \ $(test_write_grami_file_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } man8dir = $(mandir)/man8 NROFF = nroff MANS = $(man_MANS) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir distdir-am am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) am__DIST_COMMON = $(srcdir)/Makefile.in \ $(srcdir)/arc-blahp-logger.8.in $(srcdir)/gm-jobs.8.in \ $(top_srcdir)/depcomp DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ SUBDIRS = accounting jobs run conf misc log mail files noinst_LTLIBRARIES = libgridmanager.la man_MANS = arc-blahp-logger.8 gm-jobs.8 libgridmanager_la_SOURCES = GridManager.cpp GridManager.h libgridmanager_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libgridmanager_la_LIBADD = \ jobs/libjobs.la conf/libconf.la log/liblog.la files/libfiles.la \ run/librun.la misc/libmisc.la mail/libmail.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) -lpthread gm_kick_SOURCES = gm_kick.cpp gm_kick_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) gm_kick_LDADD = libgridmanager.la gm_jobs_SOURCES = gm_jobs.cpp gm_jobs_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) gm_jobs_LDADD = libgridmanager.la ../delegation/libdelegation.la inputcheck_SOURCES = inputcheck.cpp inputcheck_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) inputcheck_LDADD = libgridmanager.la ../delegation/libdelegation.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la arc_blahp_logger_SOURCES = arc_blahp_logger.cpp arc_blahp_logger_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arc_blahp_logger_LDADD = $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) test_write_grami_file_SOURCES = test_write_grami_file.cpp test_write_grami_file_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(AM_CXXFLAGS) test_write_grami_file_LDADD = libgridmanager.la ../delegation/libdelegation.la all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): arc-blahp-logger.8: $(top_builddir)/config.status $(srcdir)/arc-blahp-logger.8.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ gm-jobs.8: $(top_builddir)/config.status $(srcdir)/gm-jobs.8.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ clean-noinstPROGRAMS: @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list install-pkglibexecPROGRAMS: $(pkglibexec_PROGRAMS) @$(NORMAL_INSTALL) @list='$(pkglibexec_PROGRAMS)'; test -n "$(pkglibexecdir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkglibexecdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkglibexecdir)" || exit 1; \ fi; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p \ || test -f $$p1 \ ; then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' \ -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(pkglibexecdir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(pkglibexecdir)$$dir" || exit $$?; \ } \ ; done uninstall-pkglibexecPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(pkglibexec_PROGRAMS)'; test -n "$(pkglibexecdir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' \ `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkglibexecdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkglibexecdir)" && rm -f $$files clean-pkglibexecPROGRAMS: @list='$(pkglibexec_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libgridmanager.la: $(libgridmanager_la_OBJECTS) $(libgridmanager_la_DEPENDENCIES) $(EXTRA_libgridmanager_la_DEPENDENCIES) $(AM_V_CXXLD)$(libgridmanager_la_LINK) $(libgridmanager_la_OBJECTS) $(libgridmanager_la_LIBADD) $(LIBS) arc-blahp-logger$(EXEEXT): $(arc_blahp_logger_OBJECTS) $(arc_blahp_logger_DEPENDENCIES) $(EXTRA_arc_blahp_logger_DEPENDENCIES) @rm -f arc-blahp-logger$(EXEEXT) $(AM_V_CXXLD)$(arc_blahp_logger_LINK) $(arc_blahp_logger_OBJECTS) $(arc_blahp_logger_LDADD) $(LIBS) gm-jobs$(EXEEXT): $(gm_jobs_OBJECTS) $(gm_jobs_DEPENDENCIES) $(EXTRA_gm_jobs_DEPENDENCIES) @rm -f gm-jobs$(EXEEXT) $(AM_V_CXXLD)$(gm_jobs_LINK) $(gm_jobs_OBJECTS) $(gm_jobs_LDADD) $(LIBS) gm-kick$(EXEEXT): $(gm_kick_OBJECTS) $(gm_kick_DEPENDENCIES) $(EXTRA_gm_kick_DEPENDENCIES) @rm -f gm-kick$(EXEEXT) $(AM_V_CXXLD)$(gm_kick_LINK) $(gm_kick_OBJECTS) $(gm_kick_LDADD) $(LIBS) inputcheck$(EXEEXT): $(inputcheck_OBJECTS) $(inputcheck_DEPENDENCIES) $(EXTRA_inputcheck_DEPENDENCIES) @rm -f inputcheck$(EXEEXT) $(AM_V_CXXLD)$(inputcheck_LINK) $(inputcheck_OBJECTS) $(inputcheck_LDADD) $(LIBS) test_write_grami_file$(EXEEXT): $(test_write_grami_file_OBJECTS) $(test_write_grami_file_DEPENDENCIES) $(EXTRA_test_write_grami_file_DEPENDENCIES) @rm -f test_write_grami_file$(EXEEXT) $(AM_V_CXXLD)$(test_write_grami_file_LINK) $(test_write_grami_file_OBJECTS) $(test_write_grami_file_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arc_blahp_logger-arc_blahp_logger.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/gm_jobs-gm_jobs.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/gm_kick-gm_kick.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/inputcheck-inputcheck.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libgridmanager_la-GridManager.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_write_grami_file-test_write_grami_file.Po@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< libgridmanager_la-GridManager.lo: GridManager.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libgridmanager_la_CXXFLAGS) $(CXXFLAGS) -MT libgridmanager_la-GridManager.lo -MD -MP -MF $(DEPDIR)/libgridmanager_la-GridManager.Tpo -c -o libgridmanager_la-GridManager.lo `test -f 'GridManager.cpp' || echo '$(srcdir)/'`GridManager.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libgridmanager_la-GridManager.Tpo $(DEPDIR)/libgridmanager_la-GridManager.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='GridManager.cpp' object='libgridmanager_la-GridManager.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libgridmanager_la_CXXFLAGS) $(CXXFLAGS) -c -o libgridmanager_la-GridManager.lo `test -f 'GridManager.cpp' || echo '$(srcdir)/'`GridManager.cpp arc_blahp_logger-arc_blahp_logger.o: arc_blahp_logger.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_blahp_logger_CXXFLAGS) $(CXXFLAGS) -MT arc_blahp_logger-arc_blahp_logger.o -MD -MP -MF $(DEPDIR)/arc_blahp_logger-arc_blahp_logger.Tpo -c -o arc_blahp_logger-arc_blahp_logger.o `test -f 'arc_blahp_logger.cpp' || echo '$(srcdir)/'`arc_blahp_logger.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arc_blahp_logger-arc_blahp_logger.Tpo $(DEPDIR)/arc_blahp_logger-arc_blahp_logger.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arc_blahp_logger.cpp' object='arc_blahp_logger-arc_blahp_logger.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_blahp_logger_CXXFLAGS) $(CXXFLAGS) -c -o arc_blahp_logger-arc_blahp_logger.o `test -f 'arc_blahp_logger.cpp' || echo '$(srcdir)/'`arc_blahp_logger.cpp arc_blahp_logger-arc_blahp_logger.obj: arc_blahp_logger.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_blahp_logger_CXXFLAGS) $(CXXFLAGS) -MT arc_blahp_logger-arc_blahp_logger.obj -MD -MP -MF $(DEPDIR)/arc_blahp_logger-arc_blahp_logger.Tpo -c -o arc_blahp_logger-arc_blahp_logger.obj `if test -f 'arc_blahp_logger.cpp'; then $(CYGPATH_W) 'arc_blahp_logger.cpp'; else $(CYGPATH_W) '$(srcdir)/arc_blahp_logger.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arc_blahp_logger-arc_blahp_logger.Tpo $(DEPDIR)/arc_blahp_logger-arc_blahp_logger.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arc_blahp_logger.cpp' object='arc_blahp_logger-arc_blahp_logger.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arc_blahp_logger_CXXFLAGS) $(CXXFLAGS) -c -o arc_blahp_logger-arc_blahp_logger.obj `if test -f 'arc_blahp_logger.cpp'; then $(CYGPATH_W) 'arc_blahp_logger.cpp'; else $(CYGPATH_W) '$(srcdir)/arc_blahp_logger.cpp'; fi` gm_jobs-gm_jobs.o: gm_jobs.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gm_jobs_CXXFLAGS) $(CXXFLAGS) -MT gm_jobs-gm_jobs.o -MD -MP -MF $(DEPDIR)/gm_jobs-gm_jobs.Tpo -c -o gm_jobs-gm_jobs.o `test -f 'gm_jobs.cpp' || echo '$(srcdir)/'`gm_jobs.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/gm_jobs-gm_jobs.Tpo $(DEPDIR)/gm_jobs-gm_jobs.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='gm_jobs.cpp' object='gm_jobs-gm_jobs.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gm_jobs_CXXFLAGS) $(CXXFLAGS) -c -o gm_jobs-gm_jobs.o `test -f 'gm_jobs.cpp' || echo '$(srcdir)/'`gm_jobs.cpp gm_jobs-gm_jobs.obj: gm_jobs.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gm_jobs_CXXFLAGS) $(CXXFLAGS) -MT gm_jobs-gm_jobs.obj -MD -MP -MF $(DEPDIR)/gm_jobs-gm_jobs.Tpo -c -o gm_jobs-gm_jobs.obj `if test -f 'gm_jobs.cpp'; then $(CYGPATH_W) 'gm_jobs.cpp'; else $(CYGPATH_W) '$(srcdir)/gm_jobs.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/gm_jobs-gm_jobs.Tpo $(DEPDIR)/gm_jobs-gm_jobs.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='gm_jobs.cpp' object='gm_jobs-gm_jobs.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gm_jobs_CXXFLAGS) $(CXXFLAGS) -c -o gm_jobs-gm_jobs.obj `if test -f 'gm_jobs.cpp'; then $(CYGPATH_W) 'gm_jobs.cpp'; else $(CYGPATH_W) '$(srcdir)/gm_jobs.cpp'; fi` gm_kick-gm_kick.o: gm_kick.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gm_kick_CXXFLAGS) $(CXXFLAGS) -MT gm_kick-gm_kick.o -MD -MP -MF $(DEPDIR)/gm_kick-gm_kick.Tpo -c -o gm_kick-gm_kick.o `test -f 'gm_kick.cpp' || echo '$(srcdir)/'`gm_kick.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/gm_kick-gm_kick.Tpo $(DEPDIR)/gm_kick-gm_kick.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='gm_kick.cpp' object='gm_kick-gm_kick.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gm_kick_CXXFLAGS) $(CXXFLAGS) -c -o gm_kick-gm_kick.o `test -f 'gm_kick.cpp' || echo '$(srcdir)/'`gm_kick.cpp gm_kick-gm_kick.obj: gm_kick.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gm_kick_CXXFLAGS) $(CXXFLAGS) -MT gm_kick-gm_kick.obj -MD -MP -MF $(DEPDIR)/gm_kick-gm_kick.Tpo -c -o gm_kick-gm_kick.obj `if test -f 'gm_kick.cpp'; then $(CYGPATH_W) 'gm_kick.cpp'; else $(CYGPATH_W) '$(srcdir)/gm_kick.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/gm_kick-gm_kick.Tpo $(DEPDIR)/gm_kick-gm_kick.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='gm_kick.cpp' object='gm_kick-gm_kick.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(gm_kick_CXXFLAGS) $(CXXFLAGS) -c -o gm_kick-gm_kick.obj `if test -f 'gm_kick.cpp'; then $(CYGPATH_W) 'gm_kick.cpp'; else $(CYGPATH_W) '$(srcdir)/gm_kick.cpp'; fi` inputcheck-inputcheck.o: inputcheck.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(inputcheck_CXXFLAGS) $(CXXFLAGS) -MT inputcheck-inputcheck.o -MD -MP -MF $(DEPDIR)/inputcheck-inputcheck.Tpo -c -o inputcheck-inputcheck.o `test -f 'inputcheck.cpp' || echo '$(srcdir)/'`inputcheck.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/inputcheck-inputcheck.Tpo $(DEPDIR)/inputcheck-inputcheck.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='inputcheck.cpp' object='inputcheck-inputcheck.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(inputcheck_CXXFLAGS) $(CXXFLAGS) -c -o inputcheck-inputcheck.o `test -f 'inputcheck.cpp' || echo '$(srcdir)/'`inputcheck.cpp inputcheck-inputcheck.obj: inputcheck.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(inputcheck_CXXFLAGS) $(CXXFLAGS) -MT inputcheck-inputcheck.obj -MD -MP -MF $(DEPDIR)/inputcheck-inputcheck.Tpo -c -o inputcheck-inputcheck.obj `if test -f 'inputcheck.cpp'; then $(CYGPATH_W) 'inputcheck.cpp'; else $(CYGPATH_W) '$(srcdir)/inputcheck.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/inputcheck-inputcheck.Tpo $(DEPDIR)/inputcheck-inputcheck.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='inputcheck.cpp' object='inputcheck-inputcheck.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(inputcheck_CXXFLAGS) $(CXXFLAGS) -c -o inputcheck-inputcheck.obj `if test -f 'inputcheck.cpp'; then $(CYGPATH_W) 'inputcheck.cpp'; else $(CYGPATH_W) '$(srcdir)/inputcheck.cpp'; fi` test_write_grami_file-test_write_grami_file.o: test_write_grami_file.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_write_grami_file_CXXFLAGS) $(CXXFLAGS) -MT test_write_grami_file-test_write_grami_file.o -MD -MP -MF $(DEPDIR)/test_write_grami_file-test_write_grami_file.Tpo -c -o test_write_grami_file-test_write_grami_file.o `test -f 'test_write_grami_file.cpp' || echo '$(srcdir)/'`test_write_grami_file.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/test_write_grami_file-test_write_grami_file.Tpo $(DEPDIR)/test_write_grami_file-test_write_grami_file.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='test_write_grami_file.cpp' object='test_write_grami_file-test_write_grami_file.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_write_grami_file_CXXFLAGS) $(CXXFLAGS) -c -o test_write_grami_file-test_write_grami_file.o `test -f 'test_write_grami_file.cpp' || echo '$(srcdir)/'`test_write_grami_file.cpp test_write_grami_file-test_write_grami_file.obj: test_write_grami_file.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_write_grami_file_CXXFLAGS) $(CXXFLAGS) -MT test_write_grami_file-test_write_grami_file.obj -MD -MP -MF $(DEPDIR)/test_write_grami_file-test_write_grami_file.Tpo -c -o test_write_grami_file-test_write_grami_file.obj `if test -f 'test_write_grami_file.cpp'; then $(CYGPATH_W) 'test_write_grami_file.cpp'; else $(CYGPATH_W) '$(srcdir)/test_write_grami_file.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/test_write_grami_file-test_write_grami_file.Tpo $(DEPDIR)/test_write_grami_file-test_write_grami_file.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='test_write_grami_file.cpp' object='test_write_grami_file-test_write_grami_file.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_write_grami_file_CXXFLAGS) $(CXXFLAGS) -c -o test_write_grami_file-test_write_grami_file.obj `if test -f 'test_write_grami_file.cpp'; then $(CYGPATH_W) 'test_write_grami_file.cpp'; else $(CYGPATH_W) '$(srcdir)/test_write_grami_file.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man8: $(man_MANS) @$(NORMAL_INSTALL) @list1=''; \ list2='$(man_MANS)'; \ test -n "$(man8dir)" \ && test -n "`echo $$list1$$list2`" \ || exit 0; \ echo " $(MKDIR_P) '$(DESTDIR)$(man8dir)'"; \ $(MKDIR_P) "$(DESTDIR)$(man8dir)" || exit 1; \ { for i in $$list1; do echo "$$i"; done; \ if test -n "$$list2"; then \ for i in $$list2; do echo "$$i"; done \ | sed -n '/\.8[a-z]*$$/p'; \ fi; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^8][0-9a-z]*$$,8,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man8dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man8dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man8dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man8dir)" || exit $$?; }; \ done; } uninstall-man8: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man8dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.8[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^8][0-9a-z]*$$,8,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ dir='$(DESTDIR)$(man8dir)'; $(am__uninstall_files_from_dir) # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(PROGRAMS) $(LTLIBRARIES) $(MANS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkglibexecdir)" "$(DESTDIR)$(man8dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ clean-noinstPROGRAMS clean-pkglibexecPROGRAMS mostlyclean-am distclean: distclean-recursive -rm -f ./$(DEPDIR)/arc_blahp_logger-arc_blahp_logger.Po -rm -f ./$(DEPDIR)/gm_jobs-gm_jobs.Po -rm -f ./$(DEPDIR)/gm_kick-gm_kick.Po -rm -f ./$(DEPDIR)/inputcheck-inputcheck.Po -rm -f ./$(DEPDIR)/libgridmanager_la-GridManager.Plo -rm -f ./$(DEPDIR)/test_write_grami_file-test_write_grami_file.Po -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-man install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-pkglibexecPROGRAMS install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-man8 install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f ./$(DEPDIR)/arc_blahp_logger-arc_blahp_logger.Po -rm -f ./$(DEPDIR)/gm_jobs-gm_jobs.Po -rm -f ./$(DEPDIR)/gm_kick-gm_kick.Po -rm -f ./$(DEPDIR)/inputcheck-inputcheck.Po -rm -f ./$(DEPDIR)/libgridmanager_la-GridManager.Plo -rm -f ./$(DEPDIR)/test_write_grami_file-test_write_grami_file.Po -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-man uninstall-pkglibexecPROGRAMS uninstall-man: uninstall-man8 .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \ am--depfiles check check-am clean clean-generic clean-libtool \ clean-noinstLTLIBRARIES clean-noinstPROGRAMS \ clean-pkglibexecPROGRAMS cscopelist-am ctags ctags-am \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-man8 install-pdf \ install-pdf-am install-pkglibexecPROGRAMS install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am uninstall-man \ uninstall-man8 uninstall-pkglibexecPROGRAMS .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/PaxHeaders/GridManager.h0000644000000000000000000000013115067751327023666 xustar0030 mtime=1759498967.752606304 30 atime=1759498967.862493605 29 ctime=1759499029.43436244 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/GridManager.h0000644000175000002070000000107015067751327025567 0ustar00mockbuildmock00000000000000#ifndef GRID_MANAGER_H #define GRID_MANAGER_H namespace ARex { class JobsList; class DTRGenerator; class GMConfig; class GridManager { private: Arc::SimpleCounter active_; bool tostop_; GMConfig& config_; JobsList* jobs_; GridManager(); GridManager(const GridManager&); static void grid_manager(void* arg); bool thread(void); public: GridManager(GMConfig& config); ~GridManager(void); operator bool(void) { return (active_.get()>0); }; void RequestJobAttention(const std::string& job_id); }; } // namespace ARex #endif // GRID_MANAGER_H nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/PaxHeaders/mail0000644000000000000000000000013015067751425022200 xustar0030 mtime=1759499029.728780947 28 atime=1759499034.7655102 30 ctime=1759499029.728780947 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/mail/0000755000175000002070000000000015067751425024161 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/mail/PaxHeaders/Makefile.am0000644000000000000000000000013115067751327024313 xustar0030 mtime=1759498967.756876948 30 atime=1759498967.865493651 29 ctime=1759499029.72268032 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/mail/Makefile.am0000644000175000002070000000047215067751327026221 0ustar00mockbuildmock00000000000000pkglibexec_PROGRAMS = smtp-send dist_pkglibexec_SCRIPTS = smtp-send.sh noinst_LTLIBRARIES = libmail.la smtp_send_SOURCES = smtp-send.c smtp_send_LDADD = $(LIBRESOLV) libmail_la_SOURCES = send_mail.cpp send_mail.h libmail_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/mail/PaxHeaders/send_mail.cpp0000644000000000000000000000013215067751327024717 xustar0030 mtime=1759498967.757381403 30 atime=1759498967.865493651 30 ctime=1759499029.727548306 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/mail/send_mail.cpp0000644000175000002070000000555215067751327026630 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "../files/ControlFileContent.h" #include "../files/ControlFileHandling.h" #include "../run/RunParallel.h" #include "../conf/GMConfig.h" #include "send_mail.h" namespace ARex { static Arc::Logger& logger = Arc::Logger::getRootLogger(); /* check if have to send mail and initiate sending */ bool send_mail(GMJob &job,const GMConfig& config) { char flag = GMJob::get_state_mail_flag(job.get_state()); if(flag == ' ') return true; std::string notify = ""; std::string jobname = ""; JobLocalDescription *job_desc = job.GetLocalDescription(config); if(job_desc != NULL) { jobname=job_desc->jobname; notify=job_desc->notify; } else { logger.msg(Arc::ERROR,"Failed reading local information"); }; // job_local_read_notify(job.get_id(),user,notify); if(notify.length() == 0) return true; /* save some time */ Arc::Run* child = NULL; std::string failure_reason=job.GetFailure(config); if(job_failed_mark_check(job.get_id(),config)) { if(failure_reason.length() == 0) failure_reason=""; }; for(std::string::size_type n=0;;) { n=failure_reason.find('\n',n); if(n == std::string::npos) break; failure_reason[n]='.'; }; failure_reason = '"' + failure_reason + '"'; std::string cmd(Arc::ArcLocation::GetToolsDir()+"/smtp-send.sh"); cmd += " " + std::string(job.get_state_name()); cmd += " " + job.get_id(); cmd += " " + config.ControlDir(); cmd += " " + config.SupportMailAddress(); cmd += " \"" + jobname + "\""; cmd += " " + failure_reason; /* go through mail addresses and flags */ std::string::size_type pos=0; std::string::size_type pos_s=0; /* max 3 mail addresses */ std::string mails[3]; int mail_n=0; bool right_flag = false; /* by default mail is sent when job enters states PREPARING and FINISHED */ if((flag == 'b') || (flag == 'e')) right_flag=true; for(;;) { if(pos_s >= notify.length()) break; if((pos = notify.find(' ',pos_s)) == std::string::npos) pos=notify.length(); if(pos==pos_s) { pos++; pos_s++; continue; }; std::string word(notify.substr(pos_s,pos-pos_s)); if(word.find('@') == std::string::npos) { /* flags */ if(word.find(flag) == std::string::npos) { right_flag=false; } else { right_flag=true; }; pos_s=pos+1; continue; }; if(right_flag) { mails[mail_n]=word; mail_n++; }; if(mail_n >= 3) break; pos_s=pos+1; }; if(mail_n == 0) return true; /* not sending to anyone */ for(mail_n--;mail_n>=0;mail_n--) { cmd += " " + mails[mail_n]; }; logger.msg(Arc::DEBUG, "Running mailer command (%s)", cmd); if(!RunParallel::run(config,job,NULL,cmd,&child)) { logger.msg(Arc::ERROR,"Failed running mailer"); return false; }; child->Abandon(); delete child; return true; } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/mail/PaxHeaders/Makefile.in0000644000000000000000000000013215067751356024327 xustar0030 mtime=1759498990.025421627 30 atime=1759499017.995255375 30 ctime=1759499029.725027881 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/mail/Makefile.in0000644000175000002070000010251115067751356026231 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ pkglibexec_PROGRAMS = smtp-send$(EXEEXT) subdir = src/services/a-rex/grid-manager/mail ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(dist_pkglibexec_SCRIPTS) \ $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__installdirs = "$(DESTDIR)$(pkglibexecdir)" \ "$(DESTDIR)$(pkglibexecdir)" PROGRAMS = $(pkglibexec_PROGRAMS) LTLIBRARIES = $(noinst_LTLIBRARIES) libmail_la_LIBADD = am_libmail_la_OBJECTS = libmail_la-send_mail.lo libmail_la_OBJECTS = $(am_libmail_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = libmail_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libmail_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_smtp_send_OBJECTS = smtp-send.$(OBJEXT) smtp_send_OBJECTS = $(am_smtp_send_OBJECTS) am__DEPENDENCIES_1 = smtp_send_DEPENDENCIES = $(am__DEPENDENCIES_1) am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } SCRIPTS = $(dist_pkglibexec_SCRIPTS) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = ./$(DEPDIR)/libmail_la-send_mail.Plo \ ./$(DEPDIR)/smtp-send.Po am__mv = mv -f COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = SOURCES = $(libmail_la_SOURCES) $(smtp_send_SOURCES) DIST_SOURCES = $(libmail_la_SOURCES) $(smtp_send_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ dist_pkglibexec_SCRIPTS = smtp-send.sh noinst_LTLIBRARIES = libmail.la smtp_send_SOURCES = smtp-send.c smtp_send_LDADD = $(LIBRESOLV) libmail_la_SOURCES = send_mail.cpp send_mail.h libmail_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) all: all-am .SUFFIXES: .SUFFIXES: .c .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/mail/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/mail/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-pkglibexecPROGRAMS: $(pkglibexec_PROGRAMS) @$(NORMAL_INSTALL) @list='$(pkglibexec_PROGRAMS)'; test -n "$(pkglibexecdir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkglibexecdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkglibexecdir)" || exit 1; \ fi; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p \ || test -f $$p1 \ ; then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' \ -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(pkglibexecdir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(pkglibexecdir)$$dir" || exit $$?; \ } \ ; done uninstall-pkglibexecPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(pkglibexec_PROGRAMS)'; test -n "$(pkglibexecdir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' \ `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(pkglibexecdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(pkglibexecdir)" && rm -f $$files clean-pkglibexecPROGRAMS: @list='$(pkglibexec_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libmail.la: $(libmail_la_OBJECTS) $(libmail_la_DEPENDENCIES) $(EXTRA_libmail_la_DEPENDENCIES) $(AM_V_CXXLD)$(libmail_la_LINK) $(libmail_la_OBJECTS) $(libmail_la_LIBADD) $(LIBS) smtp-send$(EXEEXT): $(smtp_send_OBJECTS) $(smtp_send_DEPENDENCIES) $(EXTRA_smtp_send_DEPENDENCIES) @rm -f smtp-send$(EXEEXT) $(AM_V_CCLD)$(LINK) $(smtp_send_OBJECTS) $(smtp_send_LDADD) $(LIBS) install-dist_pkglibexecSCRIPTS: $(dist_pkglibexec_SCRIPTS) @$(NORMAL_INSTALL) @list='$(dist_pkglibexec_SCRIPTS)'; test -n "$(pkglibexecdir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkglibexecdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkglibexecdir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkglibexecdir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkglibexecdir)$$dir" || exit $$?; \ } \ ; done uninstall-dist_pkglibexecSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(dist_pkglibexec_SCRIPTS)'; test -n "$(pkglibexecdir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(pkglibexecdir)'; $(am__uninstall_files_from_dir) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmail_la-send_mail.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/smtp-send.Po@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .c.o: @am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $< .c.obj: @am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .c.lo: @am__fastdepCC_TRUE@ $(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $< .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< libmail_la-send_mail.lo: send_mail.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmail_la_CXXFLAGS) $(CXXFLAGS) -MT libmail_la-send_mail.lo -MD -MP -MF $(DEPDIR)/libmail_la-send_mail.Tpo -c -o libmail_la-send_mail.lo `test -f 'send_mail.cpp' || echo '$(srcdir)/'`send_mail.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libmail_la-send_mail.Tpo $(DEPDIR)/libmail_la-send_mail.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='send_mail.cpp' object='libmail_la-send_mail.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmail_la_CXXFLAGS) $(CXXFLAGS) -c -o libmail_la-send_mail.lo `test -f 'send_mail.cpp' || echo '$(srcdir)/'`send_mail.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) $(LTLIBRARIES) $(SCRIPTS) installdirs: for dir in "$(DESTDIR)$(pkglibexecdir)" "$(DESTDIR)$(pkglibexecdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ clean-pkglibexecPROGRAMS mostlyclean-am distclean: distclean-am -rm -f ./$(DEPDIR)/libmail_la-send_mail.Plo -rm -f ./$(DEPDIR)/smtp-send.Po -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-dist_pkglibexecSCRIPTS \ install-pkglibexecPROGRAMS install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libmail_la-send_mail.Plo -rm -f ./$(DEPDIR)/smtp-send.Po -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-dist_pkglibexecSCRIPTS \ uninstall-pkglibexecPROGRAMS .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ clean-generic clean-libtool clean-noinstLTLIBRARIES \ clean-pkglibexecPROGRAMS cscopelist-am ctags ctags-am \ distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dist_pkglibexecSCRIPTS install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-pkglibexecPROGRAMS \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am \ uninstall-dist_pkglibexecSCRIPTS uninstall-pkglibexecPROGRAMS .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/mail/PaxHeaders/send_mail.h0000644000000000000000000000013215067751327024364 xustar0030 mtime=1759498967.757381403 30 atime=1759498967.865493651 30 ctime=1759499029.728780947 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/mail/send_mail.h0000644000175000002070000000040615067751327026266 0ustar00mockbuildmock00000000000000#ifndef __ARC_GM_SEND_MAIL_H__ #define __ARC_GM_SEND_MAIL_H__ namespace ARex { /* Starts external process smtp-send.sh to send mail to user about changes in job's status. */ bool send_mail(GMJob &job, const GMConfig& config); } // namespace ARex #endif nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/mail/PaxHeaders/smtp-send.c0000644000000000000000000000013015067751327024334 xustar0030 mtime=1759498967.757381403 30 atime=1759498967.865493651 28 ctime=1759499029.7299389 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/mail/smtp-send.c0000644000175000002070000001326515067751327026247 0ustar00mockbuildmock00000000000000/* Simple program to mail information piped to stdin to address 'to' from address 'from'. It tries to connect directly to SMTP server responsible for destination address. */ #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #define SMTP_PORT 25 typedef union { HEADER hdr; unsigned char buf[8192]; } answer_t; void usage(void) { fprintf(stdout,"smtp-send from to\n"); exit(1); } int send_mail(char* mail_server ,char* mail_from,char* mail_to) { char buf[256]; int s,i; FILE* S; int err_code; char my_hostname[256]; struct addrinfo *res = NULL; struct addrinfo *r = NULL; memset(my_hostname,0,256); gethostname(my_hostname,255); if(getaddrinfo(mail_server,NULL,NULL,&res) != 0) return 2; if(res == NULL) return 2; for(r=res;r;r=r->ai_next) { if(r->ai_addr == NULL) continue; if(r->ai_socktype != SOCK_STREAM) continue; if(r->ai_protocol != IPPROTO_TCP) continue; if(r->ai_family == AF_INET) { ((struct sockaddr_in*)(r->ai_addr))->sin_port=htons(SMTP_PORT); break; }; if(r->ai_family == AF_INET6) { ((struct sockaddr_in6*)(r->ai_addr))->sin6_port=htons(SMTP_PORT); break; }; }; if(!r) { freeaddrinfo(res); return 2; }; s=socket(r->ai_family,r->ai_socktype,r->ai_protocol); if(s==-1) { freeaddrinfo(res); return 2; }; if(connect(s,r->ai_addr,r->ai_addrlen)==-1) { freeaddrinfo(res); close(s); return 2; }; freeaddrinfo(res); if((S=fdopen(s,"r+")) == NULL) { close(s); return 2; }; if(fscanf(S,"%i%*[^\n]",&err_code) != 1) { fclose(S); return 2; }; fgetc(S); if( err_code != 220 ) { fclose(S); return 2; }; fprintf(S,"HELO %s\r\n",my_hostname); fflush(S); if(fscanf(S,"%i%*[^\n]",&err_code) != 1) { fclose(S); return 2; }; fgetc(S); if( err_code != 250 ) { fclose(S); return 2; }; fprintf(S,"MAIL FROM: <%s>\r\n",mail_from); fflush(S); if(fscanf(S,"%i%*[^\n]",&err_code) != 1) { fclose(S); return 2; }; fgetc(S); if( err_code != 250 ) { fclose(S); return 2; }; fprintf(S,"RCPT TO: <%s>\r\n",mail_to); fflush(S); if(fscanf(S,"%i%*[^\n]",&err_code) != 1) { fclose(S); return 2; }; fgetc(S); if( err_code != 250 ) { fclose(S); return 2; }; fprintf(S,"DATA\r\n"); fflush(S); if(fscanf(S,"%i%*[^\n]",&err_code) != 1) { fclose(S); return 2; }; fgetc(S); if( err_code != 354 ) { fclose(S); return 2; }; /* read from stdin and send to socket */ for(;;) { buf[0]=0; if((i=fscanf(stdin,"%255[^\n]",buf)) == EOF) break; if(fscanf(stdin,"%*[^\n]") > 0) {}; fgetc(stdin); if(!strcmp(".",buf)) { fputc(' ',S); }; fprintf(S,"%s\r\n",buf); fflush(S); }; fprintf(S,".\r\n"); fflush(S); if(fscanf(S,"%i%*[^\n]",&err_code) != 1) { fclose(S); return 1; }; fgetc(S); if( err_code != 250 ) { fclose(S); return 1; }; fprintf(S,"QUIT\r\n"); fflush(S); fclose(S); return 0; } int connect_mail(char* domain,char* mail_from,char* mail_to) { char mxbuf[1024]; unsigned short mxtype; unsigned short mxpref; answer_t answer; int l,na,nq; unsigned char *sp; unsigned char *cp; unsigned char *ep; HEADER *hp; int err_code = 2; fprintf(stdout,"Searching for domain %s\n",domain); if((l=res_search(domain,C_IN,T_MX,answer.buf,sizeof(answer))) == -1) { fprintf(stderr,"Query failed\n"); return 2; }; hp = &(answer.hdr); sp = answer.buf; cp = answer.buf + HFIXEDSZ; ep = answer.buf + l; nq=ntohs(hp->qdcount); for(;nq>0;nq--) { if((l=dn_skipname(cp,ep)) == -1) { fprintf(stderr,"skipname failed\n"); return 2; }; cp+=l+QFIXEDSZ; }; na=ntohs(hp->ancount); for(;(na>0) && (cp&2 exit 1 fi # arguments status=$1 shift job_id=$1 shift control_dir=$1 shift local_mail=$1 shift job_name=$1 shift failure_reason=$1 shift if [ -z "$local_mail" ] ; then echo "Empty local mail address" 1>&2 exit 1 fi control_path () { # job_id=`echo "$2" | sed 's/\(.\{9\}\)/\1\//g' | sed 's/\/$//'` job_id=`echo "$2" | sed -e 's#\(.\{3\}\)#\1/#3' -e 's#\(.\{3\}\)#\1/#2' -e 's#\(.\{3\}\)#\1/#1' -e 's#$#/#'` path="$1/jobs/${job_id}/$3" echo "$path" } #host_name=`hostname -f` cur_time=`date -R` cluster_name=`hostname --fqdn` while true ; do if [ $# -lt 1 ] ; then break ; fi mail_addr=$1 if [ -z "$mail_addr" ] ; then break; fi ( # job_name=`cat $control_dir/job.$job_id.local 2>/dev/null | \ # sed --quiet 's/^jobname=\(.*\)/\1/;t print;s/.*//;t;:print;p'` # if [ -z "$job_name" ] ; then # job_name='' # fi echo "From: $local_mail" echo "To: $mail_addr" if [ -z "$job_name" ] ; then echo "Subject: Message from job $job_id" else echo "Subject: Message from job $job_name ($job_id)" fi echo "Date: $cur_time" echo if [ ! -z "$job_name" ] ; then job_name="\"$job_name\" " fi job_name="${job_name}(${job_id})" if [ ! -z "$cluster_name" ] ; then job_name="${job_name} at ${cluster_name}" fi if [ ! -z "$failure_reason" ] ; then echo "Job $job_name state is $status. Job FAILED with reason:" echo "$failure_reason" if [ "$status" = FINISHED ] ; then if [ -r "$control_dir/job.$job_id.diag" ] ; then grep -i '^WallTime' "$control_dir/job.$job_id.diag" 2>/dev/null grep -i '^KernelTime' "$control_dir/job.$job_id.diag" 2>/dev/null grep -i '^UserTime' "$control_dir/job.$job_id.diag" 2>/dev/null grep -i '^MaxResidentMemory' "$control_dir/job.$job_id.diag" 2>/dev/null fi # Oxana requested more information. Race conditions are possible here if [ -r "$control_dir/job.$job_id.local" ] ; then grep -i '^queue' "$control_dir/job.$job_id.local" 2>/dev/null grep -i '^starttime' "$control_dir/job.$job_id.local" 2>/dev/null grep -i '^cleanuptime' "$control_dir/job.$job_id.local" 2>/dev/null fi fi errors_file=$(control_path "$control_dir" "$job_id" "errors") if [ -f "$errors_file" ] ; then echo echo 'Following is the log of job processing:' echo '-------------------------------------------------' cat "$errors_file" 2>/dev/null echo '-------------------------------------------------' echo fi else echo "Job $job_name current state is $status." if [ "$status" = FINISHED ] ; then if [ -r "$control_dir/job.$job_id.diag" ] ; then grep -i '^WallTime' "$control_dir/job.$job_id.diag" 2>/dev/null grep -i '^KernelTime' "$control_dir/job.$job_id.diag" 2>/dev/null grep -i '^UserTime' "$control_dir/job.$job_id.diag" 2>/dev/null grep -i '^MaxResidentMemory' "$control_dir/job.$job_id.diag" 2>/dev/null fi if [ -r "$control_dir/job.$job_id.local" ] ; then grep -i '^queue' "$control_dir/job.$job_id.local" 2>/dev/null grep -i '^starttime' "$control_dir/job.$job_id.local" 2>/dev/null grep -i '^cleanuptime' "$control_dir/job.$job_id.local" 2>/dev/null fi fi fi ) | \ $basedir/smtp-send "$local_mail" "$mail_addr" shift done nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/mail/PaxHeaders/README0000644000000000000000000000013215067751327023140 xustar0030 mtime=1759498967.757381403 30 atime=1759498967.865493651 30 ctime=1759499029.726364596 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/mail/README0000644000175000002070000000004615067751327025042 0ustar00mockbuildmock00000000000000Utility and function to send E-mails. nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/PaxHeaders/misc0000644000000000000000000000013015067751425022211 xustar0030 mtime=1759499029.639432309 28 atime=1759499034.7655102 30 ctime=1759499029.639432309 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/misc/0000755000175000002070000000000015067751425024172 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/misc/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327024325 xustar0030 mtime=1759498967.757381403 30 atime=1759498967.865493651 30 ctime=1759499029.635765871 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/misc/Makefile.am0000644000175000002070000000062615067751327026233 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libmisc.la libmisc_la_SOURCES = \ proxy.cpp \ proxy.h libmisc_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libmisc_la_LIBADD = \ $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/misc/PaxHeaders/Makefile.in0000644000000000000000000000013215067751356024340 xustar0030 mtime=1759498990.073316428 30 atime=1759499018.015255679 30 ctime=1759499029.636837967 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/misc/Makefile.in0000644000175000002070000006456515067751356026262 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/grid-manager/misc ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libmisc_la_DEPENDENCIES = $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libmisc_la_OBJECTS = libmisc_la-proxy.lo libmisc_la_OBJECTS = $(am_libmisc_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = libmisc_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libmisc_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = ./$(DEPDIR)/libmisc_la-proxy.Plo am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(libmisc_la_SOURCES) DIST_SOURCES = $(libmisc_la_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ noinst_LTLIBRARIES = libmisc.la libmisc_la_SOURCES = \ proxy.cpp \ proxy.h libmisc_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libmisc_la_LIBADD = \ $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/misc/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/misc/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libmisc.la: $(libmisc_la_OBJECTS) $(libmisc_la_DEPENDENCIES) $(EXTRA_libmisc_la_DEPENDENCIES) $(AM_V_CXXLD)$(libmisc_la_LINK) $(libmisc_la_OBJECTS) $(libmisc_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libmisc_la-proxy.Plo@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< libmisc_la-proxy.lo: proxy.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmisc_la_CXXFLAGS) $(CXXFLAGS) -MT libmisc_la-proxy.lo -MD -MP -MF $(DEPDIR)/libmisc_la-proxy.Tpo -c -o libmisc_la-proxy.lo `test -f 'proxy.cpp' || echo '$(srcdir)/'`proxy.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libmisc_la-proxy.Tpo $(DEPDIR)/libmisc_la-proxy.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='proxy.cpp' object='libmisc_la-proxy.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libmisc_la_CXXFLAGS) $(CXXFLAGS) -c -o libmisc_la-proxy.lo `test -f 'proxy.cpp' || echo '$(srcdir)/'`proxy.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -f ./$(DEPDIR)/libmisc_la-proxy.Plo -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libmisc_la-proxy.Plo -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ clean-generic clean-libtool clean-noinstLTLIBRARIES \ cscopelist-am ctags ctags-am distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/misc/PaxHeaders/proxy.h0000644000000000000000000000013215067751327023623 xustar0030 mtime=1759498967.757668791 30 atime=1759498967.865493651 30 ctime=1759499029.640229017 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/misc/proxy.h0000644000175000002070000000025615067751327025530 0ustar00mockbuildmock00000000000000#ifndef __ARC_GM_PROXY_H__ #define __ARC_GM_PROXY_H__ namespace ARex { int prepare_proxy(void); int remove_proxy(void); } // namespace ARex #endif // __ARC_GM_PROXY_H__ nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/misc/PaxHeaders/proxy.cpp0000644000000000000000000000013215067751327024156 xustar0030 mtime=1759498967.757668791 30 atime=1759498967.865493651 30 ctime=1759499029.639132355 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/misc/proxy.cpp0000644000175000002070000000331115067751327026056 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "proxy.h" #include #include #include #include #include #include namespace ARex { int prepare_proxy(void) { int h = -1; off_t len; char* buf = NULL; off_t l,ll; int res=-1; if(getuid() == 0) { /* create temporary proxy */ std::string proxy_file=Arc::GetEnv("X509_USER_PROXY"); if(proxy_file.empty()) goto exit; h=::open(proxy_file.c_str(),O_RDONLY); if(h==-1) goto exit; if((len=lseek(h,0,SEEK_END))==-1) goto exit; if(lseek(h,0,SEEK_SET) != 0) goto exit; buf=(char*)malloc(len); if(buf==NULL) goto exit; for(l=0;l -U -P -L [-c ] [-p ] [-d ] [-i] .SH OPTIONS .IP "\fB-I\fR \fIjobID\fR" A-REX job identifier .IP "\fB-U\fR \fIuser\fR" local account that own the job files and processes .IP "\fB-P\fR \fIuser proxy\fR" path to user proxy certificate file to get VOMS membership info .IP "\fB-L\fR \fIjob status file\fR" A-REX jobstatus .local file containint more info about job (like LRMS id) .IP "\fB-c\fR \fIceid prefix\fR" prefix to generate CE ID in accordance to GLUE1.3 publishing .IP "\fB-p\fR \fIlog prefix\fR" logs location and filename prefix. Default is \fB/var/log/arc/accounting/blahp.log\fR .IP "\fB-d\fR \fIloglevel\fR" logging level from 0(ERROR) to 5(DEBUG) .IP "\fB-i\fR" Ignore failed jobs. Default is to publish them. .SH ENABLING PLUGIN IN A-REX CONFIGURATION You need to add BLAH logger plugin as a handler for FINISHED state, e.g.: .B authplugin="FINISHED timeout=10,onfailure=pass /usr/libexec/arc/arc-blahp-logger -I %I -U %u -L %C/job.%I.local -P %C/job.%I.proxy" .SH CONFIGURATION There are no particular plugin configuration except passing correct options. By default BLAH log is written to .B /var/log/arc/accounting/blahp.log-YYYYMMDD. The log prefix (without \fB-YYYYMMDD\fR) can be redefined with optional \fB-p\fR option. CE ID is generated automatically and has a format .B host.fqdn:2811/nordugrid-torque-queue in accordance to GLUE1.3 publishing. Queue is added in runtime anyway, but prefix can be redefined with \fB-c\fR option. This option should be added in case of LRMS. To debug logger execution you can add \fB-d 5\fR option and see all ARC logger output from all subsystems used. .SH AUTHOR Andrii Salnikov nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/PaxHeaders/GridManager.cpp0000644000000000000000000000013215067751327024222 xustar0030 mtime=1759498967.751729254 30 atime=1759498967.862493605 30 ctime=1759499029.433150751 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/GridManager.cpp0000644000175000002070000003304715067751327026133 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "jobs/JobsList.h" #include "jobs/CommFIFO.h" #include "log/JobLog.h" #include "log/JobsMetrics.h" #include "log/HeartBeatMetrics.h" #include "log/SpaceMetrics.h" #include "run/RunRedirected.h" #include "files/ControlFileHandling.h" #include "../delegation/DelegationStore.h" #include "../delegation/DelegationStores.h" #include "GridManager.h" namespace ARex { /* do job cleaning every 2 hours */ #define HARD_JOB_PERIOD 7200 /* cache cleaning every 5 minutes */ #define CACHE_CLEAN_PERIOD 300 /* cache cleaning default timeout */ #define CACHE_CLEAN_TIMEOUT 3600 static Arc::Logger logger(Arc::Logger::getRootLogger(),"A-REX"); class cache_st { public: Arc::SimpleCounter counter; Arc::SimpleCondition to_exit; const GMConfig* config; cache_st(GMConfig* config_):config(config_) { }; ~cache_st(void) { to_exit.signal(); counter.wait(); }; }; static void cache_func(void* arg) { const GMConfig* config = ((cache_st*)arg)->config; Arc::SimpleCondition& to_exit = ((cache_st*)arg)->to_exit; CacheConfig cache_info(config->CacheParams()); if (!cache_info.cleanCache()) return; // Note: per-user substitutions do not work here. If they are used // cache-clean must be run manually eg via cron cache_info.substitute(*config, Arc::User()); // get the cache dirs std::vector cache_info_dirs = cache_info.getCacheDirs(); if (cache_info_dirs.empty()) return; std::string maxusedspace = Arc::tostring(cache_info.getCacheMax()); std::string minusedspace = Arc::tostring(cache_info.getCacheMin()); std::string cachelifetime = cache_info.getLifeTime(); std::string logfile = cache_info.getLogFile(); bool cacheshared = cache_info.getCacheShared(); std::string cachespacetool = cache_info.getCacheSpaceTool(); // do cache-clean -h for explanation of options std::string cmd = Arc::ArcLocation::GetToolsDir() + "/cache-clean"; cmd += " -m " + minusedspace; cmd += " -M " + maxusedspace; if (!cachelifetime.empty()) cmd += " -E " + cachelifetime; if (cacheshared) cmd += " -S "; if (!cachespacetool.empty()) cmd += " -f \"" + cachespacetool + "\" "; cmd += " -D " + cache_info.getLogLevel(); for (std::vector::iterator i = cache_info_dirs.begin(); i != cache_info_dirs.end(); i++) { cmd += " " + (i->substr(0, i->find(" "))); } // use large timeout, as disk scan can take a long time // blocks until command finishes or timeout int clean_timeout = cache_info.getCleanTimeout(); if (clean_timeout == 0) clean_timeout = CACHE_CLEAN_TIMEOUT; // run cache cleaning periodically forever for(;;) { int h = open(logfile.c_str(), O_WRONLY | O_APPEND); if (h == -1) { std::string dirname(logfile.substr(0, logfile.rfind('/'))); if (!dirname.empty() && !Arc::DirCreate(dirname, S_IRWXU | S_IRGRP | S_IROTH | S_IXGRP | S_IXOTH, true)) { logger.msg(Arc::WARNING, "Cannot create directories for log file %s." " Messages will be logged to this log", logfile); } else { h = open(logfile.c_str(), O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); if (h == -1) { logger.msg(Arc::WARNING, "Cannot open cache log file %s: %s. Cache cleaning" " messages will be logged to this log", logfile, Arc::StrError(errno)); } } } logger.msg(Arc::DEBUG, "Running command: %s", cmd); int result = RunRedirected::run(Arc::User(), "cache-clean", -1, h, h, cmd.c_str(), clean_timeout); if(h != -1) close(h); if (result != 0) { if (result == -1) logger.msg(Arc::ERROR, "Failed to start cache clean script"); else logger.msg(Arc::ERROR, "Cache cleaning script failed"); } if (to_exit.wait(CACHE_CLEAN_PERIOD*1000)) { break; } } } /*!! class sleep_st { public: Arc::SimpleCondition* sleep_cond; CommFIFO* timeout; std::string control_dir; bool to_exit; // tells thread to exit bool exited; // set by thread while exiting sleep_st(const std::string& control):sleep_cond(NULL),timeout(NULL),control_dir(control),to_exit(false),exited(false) { }; ~sleep_st(void) { to_exit = true; CommFIFO::Signal(control_dir); while(!exited) sleep(1); }; }; */ class WakeupInterface: protected Arc::Thread, public CommFIFO { public: WakeupInterface(JobsList& jobs); ~WakeupInterface(); bool start(); protected: void thread(); JobsList& jobs_; bool to_exit; // tells thread to exit bool exited; // set by thread while exiting }; WakeupInterface::WakeupInterface(JobsList& jobs): jobs_(jobs), to_exit(false), exited(true) { } WakeupInterface::~WakeupInterface() { to_exit = true; CommFIFO::kick(); while(!exited) { sleep(1); CommFIFO::kick(); } } bool WakeupInterface::start() { // No need to do locking because this method is // always called from single thread if(!exited) return false; exited = !Arc::Thread::start(); return !exited; } void WakeupInterface::thread() { for(;;) { if(to_exit) break; // request to stop std::string event; bool has_event = CommFIFO::wait(event); if(to_exit) break; // request to stop if(has_event) { // Event arrived if(!event.empty()) { // job id provided logger.msg(Arc::DEBUG, "External request for attention %s", event); jobs_.RequestAttention(event); } else { // generic kick jobs_.RequestAttention(); }; } else { // timeout - use as timer jobs_.RequestAttention(); }; }; exited = true; } void touch_heartbeat(const std::string& dir, const std::string& file) { std::string gm_heartbeat(dir + "/" + file); int r = ::open(gm_heartbeat.c_str(), O_WRONLY|O_CREAT|O_TRUNC, S_IRUSR|S_IWUSR); if (r == -1) { logger.msg(Arc::WARNING, "Failed to open heartbeat file %s", gm_heartbeat); } else { ::close(r); r = -1; }; } void GridManager::grid_manager(void* arg) { GridManager* gm = (GridManager*)arg; if(!arg) { ::kill(::getpid(),SIGTERM); return; } if(!gm->thread()) { // thread exited because of internal error // that means whole server must be stopped ::kill(::getpid(),SIGTERM); } } bool GridManager::thread() { logger.msg(Arc::INFO,"Starting jobs processing thread"); logger.msg(Arc::INFO,"Used configuration file %s",config_.ConfigFile()); config_.Print(); // Preparing various structures, dirs, etc. ARex::DelegationStores* delegs = config_.GetDelegations(); if(delegs) { ARex::DelegationStore& deleg = (*delegs)[config_.DelegationDir()]; if(!deleg) { logger.msg(Arc::FATAL,"Error initiating delegation database in %s. " "Maybe permissions are not suitable. Returned error is: %s.", config_.DelegationDir(),deleg.Error()); return false; }; }; /* start timer thread - wake up every 2 minutes */ // TODO: use timed wait instead of dedicated thread // check if cache cleaning is enabled, if so activate cleaning thread cache_st cache_h(&config_); if (!config_.CacheParams().getCacheDirs().empty() && config_.CacheParams().cleanCache()) { if(!Arc::CreateThreadFunction(cache_func,&cache_h,&cache_h.counter)) { logger.msg(Arc::INFO,"Failed to start new thread: cache won't be cleaned"); } } // Start new job list JobsList jobs(config_); if(!jobs) { logger.msg(Arc::ERROR, "Failed to activate Jobs Processing object, exiting Grid Manager thread"); return false; } // Setup listening for job attention requests WakeupInterface wakeup_interface_(jobs); CommFIFO::add_result r = wakeup_interface_.add(config_.ControlDir()); if(r != CommFIFO::add_success) { if(r == CommFIFO::add_busy) { logger.msg(Arc::FATAL,"Error adding communication interface in %s. " "Maybe another instance of A-REX is already running.",config_.ControlDir()); } else { logger.msg(Arc::FATAL,"Error adding communication interface in %s. " "Maybe permissions are not suitable.",config_.ControlDir()); }; return false; }; wakeup_interface_.timeout(config_.WakeupPeriod()); if(!wakeup_interface_.start()) { logger.msg(Arc::ERROR,"Failed to start new thread for monitoring job requests"); return false; }; // Start jobs processing jobs_ = &jobs; logger.msg(Arc::INFO,"Picking up left jobs"); jobs.RestartJobs(); logger.msg(Arc::INFO, "Starting data staging threads"); std::string heartbeat_file("gm-heartbeat"); Arc::WatchdogChannel wd(config_.WakeupPeriod()*3+300); /* main loop - forever */ logger.msg(Arc::INFO,"Starting jobs' monitoring"); time_t poll_job_time = time(NULL); // run once immediately + config_.WakeupPeriod(); for(;;) { if(tostop_) break; // TODO: make processing of SSH async or remove SSH from GridManager completely if (config_.UseSSH()) { // TODO: can there be more than one session root? while (!config_.SSHFS_OK(config_.SessionRoots().front())) { logger.msg(Arc::WARNING, "SSHFS mount point of session directory (%s) is broken - waiting for reconnect ...", config_.SessionRoots().front()); active_.wait(10000); } while(!config_.SSHFS_OK(config_.RTEDir())) { logger.msg(Arc::WARNING, "SSHFS mount point of runtime directory (%s) is broken - waiting for reconnect ...", config_.RTEDir()); active_.wait(10000); } // TODO: can there be more than one cache dir? while(!config_.SSHFS_OK(config_.CacheParams().getCacheDirs().front())) { logger.msg(Arc::WARNING, "SSHFS mount point of cache directory (%s) is broken - waiting for reconnect ...", config_.CacheParams().getCacheDirs().front()); active_.wait(10000); } } // TODO: check conditions for following calls JobLog* joblog = config_.GetJobLog(); if(joblog) { // run jura reporter if enabled if (joblog->ReporterEnabled()){ joblog->RunReporter(config_); } } // TODO: review metrics calls to reduce frequency of calling gmetrics tool. JobsMetrics* metrics = config_.GetJobsMetrics(); if(metrics) metrics->Sync(); // Process jobs which need attention ASAP jobs.ActJobsAttention(); if(((int)(time(NULL) - poll_job_time)) >= 0) { // Polling time poll_job_time = time(NULL) + config_.WakeupPeriod(); // touch heartbeat file touch_heartbeat(config_.ControlDir(), heartbeat_file); // touch temporary configuration so /tmp cleaner does not erase it if(config_.ConfigIsTemp()) ::utimes(config_.ConfigFile().c_str(), NULL); // Tell watchdog we are alive wd.Kick(); /* check for new marks and activate related jobs - TODO: remove */ jobs.ScanNewMarks(); /* look for new jobs - TODO: remove */ jobs.ScanNewJobs(); /* process jobs which do not get attention calls in their current state */ jobs.ActJobsPolling(); //jobs.ActJobs(); // Clean old delegations ARex::DelegationStores* delegs = config_.GetDelegations(); if(delegs) { ARex::DelegationStore& deleg = (*delegs)[config_.DelegationDir()]; deleg.Expiration(24*60*60); deleg.CheckTimeout(60); // During this time delegation database will be locked. So it must not be too long. deleg.PeriodicCheckConsumers(); // once in a while check for delegations which are locked by non-exiting jobs std::list lock_ids; if(deleg.GetLocks(lock_ids)) { for(std::list::iterator lock_id = lock_ids.begin(); lock_id != lock_ids.end(); ++lock_id) { time_t t = job_state_time(*lock_id,config_); // Returns zero if file is not present if(t == 0) { logger.msg(Arc::ERROR,"Orphan delegation lock detected (%s) - cleaning", *lock_id); deleg.ReleaseCred(*lock_id); // not forcing credential removal - PeriodicCheckConsumers will do it with time control }; }; } else { logger.msg(Arc::ERROR,"Failed to obtain delegation locks for cleaning orphaned locks"); }; }; }; //Is this the right place to call ReportHeartBeatChange? HeartBeatMetrics* heartbeat_metrics = config_.GetHeartBeatMetrics(); if(heartbeat_metrics) heartbeat_metrics->ReportHeartBeatChange(config_); SpaceMetrics* space_metrics = config_.GetSpaceMetrics(); if(space_metrics) space_metrics->ReportSpaceChange(config_); jobs.WaitAttention(); logger.msg(Arc::DEBUG,"Waking up"); }; // Waiting for children to finish logger.msg(Arc::INFO,"Stopping jobs processing thread"); jobs.PrepareToDestroy(); logger.msg(Arc::INFO,"Exiting jobs processing thread"); jobs_ = NULL; return true; } void GridManager::RequestJobAttention(const std::string& job_id) { if(jobs_) { // TODO: a bit of race condition here against destructor jobs_->RequestAttention(job_id); }; } GridManager::GridManager(GMConfig& config):tostop_(false), config_(config) { jobs_ = NULL; if(!Arc::CreateThreadFunction(&grid_manager,(void*)this,&active_)) { }; } GridManager::~GridManager(void) { if(!jobs_) return; // Not initialized at all logger.msg(Arc::INFO, "Requesting to stop job processing"); // Tell main thread to stop tostop_ = true; // Wait for main thread while(true) { if(jobs_) // Race condition again jobs_->RequestAttention(); // Kick jobs processor to release control if(active_.wait(1000)) break; logger.msg(Arc::VERBOSE, "Waiting for main job processing thread to exit"); } logger.msg(Arc::INFO, "Stopped job processing"); } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/PaxHeaders/gm_kick.cpp0000644000000000000000000000013215067751327023446 xustar0030 mtime=1759498967.754640094 30 atime=1759498967.864493635 30 ctime=1759499029.437691249 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/gm_kick.cpp0000644000175000002070000000416015067751327025351 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "conf/GMConfig.h" #include "jobs/CommFIFO.h" int main(int argc,char* argv[]) { Arc::OptionParser options("[control_dir]", istring("gm-kick wakes up the A-REX corresponding to the given " "control directory. If no directory is given it uses the control directory " "found in the configuration file.")); std::string conf_file; options.AddOption('c', "conffile", istring("use specified configuration file"), istring("file"), conf_file); std::list job_ids; options.AddOption('j', "jobid", istring("inform about changes in particular job (can be used multiple times)"), istring("id"), job_ids); std::list params = options.Parse(argc, argv); std::string control_dir; if (params.empty()) { // Read from config ARex::GMConfig config(conf_file); if (!config.Load()) { std::cerr << "Could not load configuration from " << config.ConfigFile() << std::endl; return 1; } if (config.ControlDir().empty()) { std::cerr << "No control dir found in configuration file " << config.ConfigFile() << std::endl; return 1; } control_dir = config.ControlDir(); } else { control_dir = params.front(); if (control_dir[0] != '/') { char buf[1024]; if (getcwd(buf, 1024) != NULL) control_dir = std::string(buf) + "/" + control_dir; } } bool success = true; if(job_ids.empty()) { // general kick success = ARex::CommFIFO::Signal(control_dir); } else { for(std::list::iterator id = job_ids.begin(); id != job_ids.end(); ++id) { if(!ARex::CommFIFO::Signal(control_dir,*id)) { success = false; }; }; }; if(!success) { std::cerr<<"Failed reporting changes to A-REX"< #endif #include #include #include #include #include #include "../files/ControlFileHandling.h" #include "../run/RunParallel.h" #include "../mail/send_mail.h" #include "../log/JobLog.h" #include "../log/JobsMetrics.h" #include "../misc/proxy.h" #include "../../delegation/DelegationStores.h" #include "../../delegation/DelegationStore.h" #include "../conf/GMConfig.h" #include "ContinuationPlugins.h" #include "DTRGenerator.h" #include "JobsList.h" namespace ARex { /* max time to run submit-*-job/cancel-*-job before to start looking for alternative way to detect result. Only for protecting against lost child. */ #define CHILD_RUN_TIME_SUSPICIOUS (10*60) /* max time to run submit-*-job/cancel-*-job before to decide that it is gone. Only for protecting against lost child. */ #define CHILD_RUN_TIME_TOO_LONG (60*60) static Arc::Logger& logger = Arc::Logger::getRootLogger(); JobsList::ExternalHelpers::ExternalHelpers(const std::list& commands, JobsList const& jobs): Arc::Thread(), jobs_list(jobs), stop_request(false) { for (std::list::const_iterator command = commands.begin(); command != commands.end(); ++command) { helpers.push_back(*command); } } void JobsList::ExternalHelpers::start() { if(!helpers.empty()) Arc::Thread::start(&stop_cond); } JobsList::ExternalHelpers::~ExternalHelpers() { stop_request = true; stop_cond.wait(); } void JobsList::ExternalHelpers::thread(void) { while(!stop_request) { for (std::list::iterator i = helpers.begin(); i != helpers.end(); ++i) { i->run(jobs_list); // Wait enough to avoid failing helper consume 100% CPU sleep(10); // This will delay destructor. And it is not very nice that value is hardcoded. } } for (std::list::iterator i = helpers.begin(); i != helpers.end(); ++i) { i->stop(); } } JobsList::JobsList(const GMConfig& gmconfig) : valid(false), jobs_processing(ProcessingQueuePriority, "processing"), jobs_attention(AttentionQueuePriority, "attention"), jobs_polling(0, "polling"), jobs_wait_for_running(WaitQueuePriority, "wait for running"), config(gmconfig), staging_config(gmconfig), dtr_generator(config, *this), job_desc_handler(config), jobs_pending(0), helpers(config.Helpers(), *this) { job_slow_polling_last = time(NULL); job_slow_polling_dir = NULL; for(int n = 0;n lock(jobs_lock); std::map::iterator ji = jobs.find(id); if(ji == jobs.end()) return GMJobRef(); return ji->second; } bool JobsList::HasJob(const JobId &id) const { std::unique_lock lock(jobs_lock); std::map::const_iterator ji = jobs.find(id); return (ji != jobs.end()); } void JobsList::UpdateJobCredentials(GMJobRef i) { if(i) { if(GetLocalDescription(i)) { std::string delegation_id = i->local->delegationid; if(!delegation_id.empty()) { ARex::DelegationStores* delegs = config.GetDelegations(); if(delegs) { std::string cred; if((*delegs)[config.DelegationDir()].GetCred(delegation_id,i->local->DN,cred)) { (void)job_proxy_write_file(*i,config,cred); }; }; }; }; }; } void JobsList::SetJobState(GMJobRef i, job_state_t new_state, const char* reason) { if(i) { if((i->job_state != new_state) || (i->job_pending)) { JobsMetrics* metrics = config.GetJobsMetrics(); if(metrics) metrics->ReportJobStateChange(config, i, i->job_state, new_state); std::string msg = Arc::Time().str(Arc::UTCTime); msg += " Job state change "; msg += i->get_state_name(); msg += " -> "; msg += GMJob::get_state_name(new_state); if(reason) { msg += " Reason: "; msg += reason; }; msg += "\n"; i->job_state = new_state; i->job_pending = false; job_errors_mark_add(*i,config,msg); // During intermediate period job.proxy file must contain full delegated proxy. // To ensure its content is up to date even if proxy was updated in store here // we update content of that file on every active job state change. if((new_state != JOB_STATE_DELETED) && (new_state != JOB_STATE_UNDEFINED)) UpdateJobCredentials(i); }; }; } void JobsList::SetJobPending(GMJobRef i, const char* reason) { if(i) { if(!i->job_pending) { std::string msg = Arc::Time().str(Arc::UTCTime); msg += " Job state change "; msg += i->get_state_name(); msg += " -> "; msg += i->get_state_name(); msg += "(PENDING)"; if(reason) { msg += " Reason: "; msg += reason; }; msg += "\n"; i->job_pending = true; job_errors_mark_add(*i,config,msg); }; }; } bool JobsList::AddJob(const JobId &id,uid_t uid,gid_t gid,job_state_t state,const char* reason){ GMJobRef i(new GMJob(id,Arc::User(uid))); i->keep_finished=config.KeepFinished(); i->keep_deleted=config.KeepDeleted(); i->job_state = state; i->job_pending = false; if (!GetLocalDescription(i)) { // safest thing to do is add failure and move to FINISHED i->AddFailure("Internal error"); SetJobState(i, JOB_STATE_FINISHED, "Internal failure"); FailedJob(i, false); if(!job_state_write_file(*i,config,i->job_state,i->job_pending)) { logger.msg(Arc::ERROR, "%s: Failed reading .local and changing state, job and " "A-REX may be left in an inconsistent state", id); } std::unique_lock lock(jobs_lock); if(jobs.find(id) != jobs.end()) { logger.msg(Arc::ERROR, "%s: unexpected failed job add request: %s", i->job_id, reason?reason:""); } else { jobs[id] = i; RequestReprocess(i); // To make job being properly thrown from system } return false; } i->session_dir = i->local->sessiondir; if (i->session_dir.empty()) i->session_dir = config.SessionRoot(id)+'/'+id; std::unique_lock lock(jobs_lock); if(jobs.find(id) != jobs.end()) { logger.msg(Arc::ERROR, "%s: unexpected job add request: %s", i->job_id, reason?reason:""); } else { jobs[id] = i; RequestAttention(i); } return true; } int JobsList::AcceptedJobs() const { return jobs_num[JOB_STATE_ACCEPTED] + jobs_num[JOB_STATE_PREPARING] + jobs_num[JOB_STATE_SUBMITTING] + jobs_num[JOB_STATE_INLRMS] + jobs_num[JOB_STATE_FINISHING] + jobs_pending; } bool JobsList::RunningJobsLimitReached() const { if(config.MaxRunning()==-1) return false; int num = jobs_num[JOB_STATE_SUBMITTING] + jobs_num[JOB_STATE_INLRMS]; return num >= config.MaxRunning(); } void JobsList::PrepareToDestroy(void) { std::unique_lock lock(jobs_lock); for(std::map::iterator i=jobs.begin();i!=jobs.end();++i) { i->second->PrepareToDestroy(); } } bool JobsList::RequestAttention(const JobId& id) { GMJobRef i = FindJob(id); if(!i) { // Must be new job arriving or finished job which got user request or whatever if(!ScanNewJob(id) && !ScanOldJob(id)) return false; // If scanned successfuly job is immediately requested for attention. } else { if(!RequestAttention(i)) { // Request by id most probably means there is somethng external pending. // And because we do not want external cancel request to get lost, check // immediately and inform DTR generator immediately (so it does not hold job). if(job_cancel_mark_check(i->job_id,config)) dtr_generator.cancelJob(i); // Please note job is not canceled here. But cancel mark will be picked up later. return false; }; }; return true; } bool JobsList::RequestAttention(GMJobRef i) { if(i) { logger.msg(Arc::DEBUG, "%s: job for attention", i->job_id); if(jobs_attention.Push(i)) { jobs_attention_cond.signal(); return true; }; }; return false; } void JobsList::RequestAttention(void) { logger.msg(Arc::DEBUG, "all for attention"); jobs_attention_cond.signal(); } bool JobsList::ScanOldJobs(void) { if(job_slow_polling_dir) { // continue already started scaning std::string file = job_slow_polling_dir->read_name(); if(file.empty()) { delete job_slow_polling_dir; job_slow_polling_dir = NULL; } // extract job id and cause its processing // todo: implement fetching few jobs before passing them to attention // job id must contain at least one character int l=file.length(); if(l>7 && file.substr(l-7) == ".status") { JobId id(file.substr(0, l-7)); logger.msg(Arc::DEBUG, "%s: job found while scanning", id); RequestAttention(id); }; } else { // Check if it is time for next scanning if((time(NULL) - job_slow_polling_last) >= job_slow_polling_period) { job_slow_polling_dir = new Glib::Dir(config.ControlDir()+"/"+subdir_old); if(job_slow_polling_dir) job_slow_polling_last = time(NULL); }; }; if(!job_slow_polling_dir) return false; return true; } void JobsList::WaitAttention(void) { // Check if condition signaled while(!jobs_attention_cond.wait(0)) { // Use spare time to process slow polling queue if(!ScanOldJobs()) { // If there is no scanning going on then simply wait and exit jobs_attention_cond.wait(); return; }; }; // while !jobs_attention_cond } bool JobsList::RequestWaitForRunning(GMJobRef i) { if(i) { logger.msg(Arc::DEBUG, "%s: job will wait for external process", i->job_id); jobs_wait_for_running.Push(i); return true; }; return false; } bool JobsList::RequestPolling(GMJobRef i) { if(i) { jobs_polling.Push(i); return true; }; return false; } bool JobsList::RequestSlowPolling(GMJobRef i) { if(i) { logger.msg(Arc::DEBUG, "%s: job assigned for slow polling", i->job_id); return true; }; return false; } bool JobsList::RequestReprocess(GMJobRef i) { if(i) { jobs_processing.Unpop(i); return true; }; return false; } bool JobsList::ActJobsProcessing(void) { while(true) { GMJobRef i = jobs_processing.Pop(); if(!i) break; logger.msg(Arc::DEBUG, "%s: job being processed", i->job_id); ActJob(i); }; // Check limit on number of running jobs and activate some of them if possible if(!RunningJobsLimitReached()) { GMJobRef i = jobs_wait_for_running.Pop(); if(i) RequestAttention(i); }; return true; } bool JobsList::ActJobsAttention(void) { { while(true) { GMJobRef i = jobs_attention.Pop(); if(!i) break; jobs_processing.Push(i); }; }; ActJobsProcessing(); return true; } bool JobsList::ActJobsPolling(void) { { while(true) { GMJobRef i = jobs_polling.Pop(); if(!i) break; jobs_processing.Push(i); }; }; ActJobsProcessing(); // debug info on jobs per DN { std::unique_lock lock(jobs_lock); logger.msg(Arc::VERBOSE, "Current jobs in system (PREPARING to FINISHING) per-DN (%i entries)", jobs_dn.size()); for (std::map::iterator it = jobs_dn.begin(); it != jobs_dn.end(); ++it) logger.msg(Arc::VERBOSE, "%s: %i", it->first, (unsigned int)(it->second)); }; return true; } bool JobsList::FailedJob(GMJobRef i,bool cancel) { bool r = true; // add failure mark if(job_failed_mark_add(*i,config,i->failure_reason)) { i->failure_reason = ""; } else { logger.msg(Arc::ERROR,"%s: Failed storing failure reason: %s",i->job_id,Arc::StrError(errno)); r = false; } if(GetLocalDescription(i)) { i->local->uploads=0; } else { logger.msg(Arc::ERROR,"%s: Failed reading job description: %s",i->job_id,Arc::StrError(errno)); r=false; } // If the job failed during FINISHING then DTR deals with .output if (i->get_state() == JOB_STATE_FINISHING) { if (i->local) job_local_write_file(*i,config,*(i->local)); return r; } // adjust output files to failure state // Not good looking code JobLocalDescription job_desc; if(job_desc_handler.parse_job_req(i->get_id(),job_desc) != JobReqSuccess) { logger.msg(Arc::ERROR,"%s: Failed parsing job request.",i->job_id); r = false; } // Convert delegation ids to credential paths. ARex::DelegationStores* delegs = config.GetDelegations(); std::string default_cred = job_proxy_filename(i->get_id(), config); // TODO: drop job.proxy as source of delegation std::string default_cred_type; if(!job_desc.delegationid.empty()) { if(delegs) { std::list meta; DelegationStore& deleg = delegs->operator[](config.DelegationDir()); std::string fname = deleg.FindCred(job_desc.delegationid, job_desc.DN, meta); if(!fname.empty()) { default_cred = fname; default_cred_type = (!meta.empty())?meta.front():""; }; }; }; for(std::list::iterator f = job_desc.outputdata.begin(); f != job_desc.outputdata.end(); ++f) { if(f->has_lfn()) { if(f->cred.empty()) { f->cred = default_cred; f->cred_type = default_cred_type; } else { std::string path; std::list meta; if(delegs && i->local) path = (*delegs)[config.DelegationDir()].FindCred(f->cred,i->local->DN,meta); f->cred = path; f->cred_type = (!meta.empty())?meta.front():""; } if(i->local) ++(i->local->uploads); } } // Add user-uploaded input files so that they are not deleted during // FINISHING and so resume will work. Credentials are not necessary for // these files. The real output list will be recreated from the job // description if the job is restarted. if (!cancel && job_desc.reruns > 0) { for(std::list::iterator f = job_desc.inputdata.begin(); f != job_desc.inputdata.end(); ++f) { if (f->lfn.find(':') == std::string::npos) { FileData fd(f->pfn, ""); fd.iffailure = true; // make sure to keep file job_desc.outputdata.push_back(fd); } } } if(!job_output_write_file(*i,config,job_desc.outputdata,cancel?job_output_cancel:job_output_failure)) { r=false; logger.msg(Arc::ERROR,"%s: Failed writing list of output files: %s",i->job_id,Arc::StrError(errno)); } if(i->local) job_local_write_file(*i,config,*(i->local)); return r; } bool JobsList::GetLocalDescription(GMJobRef i) const { if(!i->GetLocalDescription(config)) { logger.msg(Arc::ERROR,"%s: Failed reading local information",i->job_id); return false; } return true; } void JobsList::CleanChildProcess(GMJobRef i) { if(i->child) { delete i->child; i->child=NULL; if((i->job_state == JOB_STATE_SUBMITTING) || (i->job_state == JOB_STATE_CANCELING)) --jobs_scripts; } } bool JobsList::state_submitting_success(GMJobRef i,bool &state_changed,std::string local_id) { CleanChildProcess(i); if(local_id.empty()) { local_id=job_desc_handler.get_local_id(i->job_id); if(local_id.empty()) { logger.msg(Arc::ERROR,"%s: Failed obtaining lrms id",i->job_id); i->AddFailure("Failed extracting LRMS ID due to some internal error"); JobFailStateRemember(i,JOB_STATE_SUBMITTING); return false; } } // put id into local information file if(!GetLocalDescription(i)) { i->AddFailure("Internal error"); return false; } i->local->localid=local_id; if(!job_local_write_file(*i,config,*(i->local))) { i->AddFailure("Internal error"); logger.msg(Arc::ERROR,"%s: Failed writing local information: %s",i->job_id,Arc::StrError(errno)); return false; } // move to next state state_changed=true; return true; } bool JobsList::state_submitting(GMJobRef i,bool &state_changed) { if(i->child == NULL) { // no child was running yet, or recovering from fault if((config.MaxScripts()!=-1) && (jobs_scripts>=config.MaxScripts())) { //logger.msg(Arc::WARNING,"%s: Too many LRMS scripts running - limit is %u", // i->job_id,config.MaxScripts()); // returning true but not advancing to next state should cause retry return true; } // Just in case we are recovering from restart or failure check if we already have // LRMS id (previously run submission script succeeded). std::string local_id=job_desc_handler.get_local_id(i->job_id); if(!local_id.empty()) { // Have local id - skip running submition script return state_submitting_success(i,state_changed,local_id); } // write grami file for submit-X-job if(!(i->GetLocalDescription(config))) { logger.msg(Arc::ERROR,"%s: Failed reading local information",i->job_id); i->AddFailure("Internal error: can't read local file"); return false; }; JobLocalDescription* job_desc = i->local; if(!job_desc_handler.write_grami(*i)) { logger.msg(Arc::ERROR,"%s: Failed creating grami file",i->job_id); return false; } if(!job_desc_handler.set_execs(*i)) { logger.msg(Arc::ERROR,"%s: Failed setting executable permissions",i->job_id); return false; } // precreate file to store diagnostics from lrms job_diagnostics_mark_put(*i,config); job_lrmsoutput_mark_put(*i,config); // submit job to LRMS using submit-X-job std::string cmd = Arc::ArcLocation::GetDataDir()+"/submit-"+job_desc->lrms+"-job"; logger.msg(Arc::INFO,"%s: state SUBMIT: starting child: %s",i->job_id,cmd); std::string grami = job_control_path(config.ControlDir(),(*i).job_id,sfx_grami); cmd += " --config " + config.ConfigFile() + " " + grami; job_errors_mark_put(*i,config); i->child_output.clear(); if(!RunParallel::run(config,*i,*this,&(i->child_output),cmd,&(i->child))) { i->AddFailure("Failed initiating job submission to LRMS"); logger.msg(Arc::ERROR,"%s: Failed running submission process",i->job_id); return false; } ++jobs_scripts; if((config.MaxScripts()!=-1) && (jobs_scripts>=config.MaxScripts())) { logger.msg(Arc::WARNING,"%s: LRMS scripts limit of %u is reached - suspending submit/cancel", i->job_id,config.MaxScripts()); } return true; } // child was run - check if exited and then exit code if(i->child->Running()) { // child is running - come later // Due to unknown reason sometimes child exit event is lost. // As workaround check if child is running for too long. If // it does then check in grami file for generated local id // or in case of cancel just assume child exited. if((Arc::Time() - i->child->RunTime()) > Arc::Period(CHILD_RUN_TIME_SUSPICIOUS)) { // Check if local id is already obtained std::string local_id=job_desc_handler.get_local_id(i->job_id); if(!local_id.empty()) { logger.msg(Arc::ERROR,"%s: Job submission to LRMS takes too long, but ID is already obtained. Pretending submission is done.",i->job_id); return state_submitting_success(i,state_changed,local_id); } } if((Arc::Time() - i->child->RunTime()) > Arc::Period(CHILD_RUN_TIME_TOO_LONG)) { // In any case it is way too long. Job must fail. Otherwise it will hang forever. CleanChildProcess(i); logger.msg(Arc::ERROR,"%s: Job submission to LRMS takes too long. Failing.",i->job_id); JobFailStateRemember(i,JOB_STATE_SUBMITTING); i->AddFailure("Job submission to LRMS failed"); // It would be nice to cancel if job finally submits. But we do not know id. return false; } return true; } // real processing logger.msg(Arc::INFO,"%s: state SUBMIT: child exited with code %i",i->job_id,i->child->Result()); // Another workaround in Run class may also detect lost child. // It then sets exit code to -1. This value is also set in // case child was killed. So it is worth to check grami anyway. if((i->child->Result() != 0) && (i->child->Result() != -1)) { logger.msg(Arc::ERROR,"%s: Job submission to LRMS failed",i->job_id); JobFailStateRemember(i,JOB_STATE_SUBMITTING); CleanChildProcess(i); if(i->child_output.empty()) { i->AddFailure("Job submission to LRMS failed"); } else { i->AddFailure(i->child_output); } return false; } // success code - process LRMS job id return state_submitting_success(i,state_changed,""); } bool JobsList::state_canceling_success(GMJobRef i,bool &state_changed) { // job diagnostics collection done in background (scan-*-job script) if(!job_lrms_mark_check(i->job_id,config)) { // job diag not yet collected - come later if((i->child->ExitTime() != Arc::Time::UNDEFINED) && ((Arc::Time() - i->child->ExitTime()) > Arc::Period(Arc::Time::HOUR))) { // it takes too long logger.msg(Arc::ERROR,"%s: state CANCELING: timeout waiting for cancellation",i->job_id); CleanChildProcess(i); return false; } return true; } else { logger.msg(Arc::INFO,"%s: state CANCELING: job diagnostics collected",i->job_id); CleanChildProcess(i); job_diagnostics_mark_move(*i,config); } // move to next state state_changed=true; return true; } bool JobsList::state_canceling(GMJobRef i,bool &state_changed) { if(i->child == NULL) { // no child was running yet, or recovering from fault if((config.MaxScripts()!=-1) && (jobs_scripts>=config.MaxScripts())) { //logger.msg(Arc::WARNING,"%s: Too many LRMS scripts running - limit is %u", // i->job_id,config.MaxScripts()); // returning true but not advancing to next state should cause retry return true; } // write grami file for cancel-X-job if(!(i->GetLocalDescription(config))) { logger.msg(Arc::ERROR,"%s: Failed reading local information",i->job_id); return false; }; JobLocalDescription* job_desc = i->local; // cancel job to LRMS using cancel-X-job std::string cmd; cmd=Arc::ArcLocation::GetDataDir()+"/cancel-"+job_desc->lrms+"-job"; if(!job_lrms_mark_check(i->job_id,config)) { logger.msg(Arc::INFO,"%s: state CANCELING: starting child: %s",i->job_id,cmd); } else { logger.msg(Arc::INFO,"%s: Job has completed already. No action taken to cancel",i->job_id); state_changed=true; return true; } std::string grami = job_control_path(config.ControlDir(),(*i).job_id,sfx_grami); cmd += " --config " + config.ConfigFile() + " " + grami; job_errors_mark_put(*i,config); if(!RunParallel::run(config,*i,*this,NULL,cmd,&(i->child))) { logger.msg(Arc::ERROR,"%s: Failed running cancellation process",i->job_id); return false; } ++jobs_scripts; if((config.MaxScripts()!=-1) && (jobs_scripts>=config.MaxScripts())) { logger.msg(Arc::WARNING,"%s: LRMS scripts limit of %u is reached - suspending submit/cancel", i->job_id,config.MaxScripts()); } return true; } // child was run - check if exited if(i->child->Running()) { // child is running - come later // Due to unknown reason sometimes child exit event is lost. // As workaround check if child is running for too long. // In case of cancel just assume child exited. if((Arc::Time() - i->child->RunTime()) > Arc::Period(CHILD_RUN_TIME_SUSPICIOUS)) { // Check if diagnostics collection is done if(job_lrms_mark_check(i->job_id,config)) { logger.msg(Arc::ERROR,"%s: Job cancellation takes too long, but diagnostic collection seems to be done. Pretending cancellation succeeded.",i->job_id); return state_canceling_success(i,state_changed); } } if((Arc::Time() - i->child->RunTime()) > Arc::Period(CHILD_RUN_TIME_TOO_LONG)) { // In any case it is way too long. Job must fail. Otherwise it will hang forever. logger.msg(Arc::ERROR,"%s: Job cancellation takes too long. Failing.",i->job_id); CleanChildProcess(i); return false; } return true; } // real processing if((i->child->ExitTime() != Arc::Time::UNDEFINED) && ((Arc::Time() - i->child->ExitTime()) < (config.WakeupPeriod()*2))) { // not ideal solution logger.msg(Arc::INFO,"%s: state CANCELING: child exited with code %i",i->job_id,i->child->Result()); } // Another workaround in Run class may also detect lost child. // It then sets exit code to -1. This value is also set in // case child was killed. So it is worth to check grami anyway. if((i->child->Result() != 0) && (i->child->Result() != -1)) { logger.msg(Arc::ERROR,"%s: Failed to cancel running job",i->job_id); CleanChildProcess(i); return false; } return state_canceling_success(i,state_changed); } bool JobsList::state_loading(GMJobRef i,bool &state_changed,bool up) { // first check if job is already in the system if (!dtr_generator.hasJob(i)) { return dtr_generator.receiveJob(i); } // if job has already failed then do not set failed state again if DTR failed bool already_failed = i->CheckFailure(config); // queryJobFinished() calls i->AddFailure() if any DTR failed if (dtr_generator.queryJobFinished(i)) { // DTR part already finished. Do other checks if needed. logger.msg(Arc::VERBOSE, "%s: State: %s: data staging finished", i->job_id, (up ? "FINISHING" : "PREPARING")); bool done = true; bool result = true; // check for failure if (i->CheckFailure(config)) { if (!already_failed) JobFailStateRemember(i, (up ? JOB_STATE_FINISHING : JOB_STATE_PREPARING)); result = false; } else { if (!up) { // check for user-uploadable files if downloading DTRGenerator::checkUploadedFilesResult res = dtr_generator.checkUploadedFiles(i); if (res == DTRGenerator::uploadedFilesMissing) { // still going // Every file will cause request for attention. // So polling is mostly for handling timeout. RequestPolling(i); done = false; } else if (res == DTRGenerator::uploadedFilesSuccess) { // finished successfully state_changed=true; } else { // error result = false; } } else { // if uploading we are done state_changed = true; } } if (done) dtr_generator.removeJob(i); return result; } else { // not finished yet - should not happen logger.msg(Arc::DEBUG, "%s: State: %s: still in data staging", i->job_id, (up ? "FINISHING" : "PREPARING")); // Since something is out of sync do polling as backup solution RequestPolling(i); return true; } } job_state_t JobsList::JobFailStateGet(GMJobRef i) { if(!GetLocalDescription(i)) return JOB_STATE_UNDEFINED; if(i->local->failedstate.empty()) { return JOB_STATE_UNDEFINED; } job_state_t state = GMJob::get_state(i->local->failedstate.c_str()); if(state != JOB_STATE_UNDEFINED) { if(i->local->reruns <= 0) { logger.msg(Arc::ERROR,"%s: Job is not allowed to be rerun anymore",i->job_id); job_local_write_file(*i,config,*(i->local)); return JOB_STATE_UNDEFINED; } i->local->failedstate=""; i->local->failedcause=""; i->local->reruns--; job_local_write_file(*i,config,*(i->local)); return state; } logger.msg(Arc::ERROR,"%s: Job failed in unknown state. Won't rerun.",i->job_id); i->local->failedstate=""; i->local->failedcause=""; job_local_write_file(*i,config,*(i->local)); return JOB_STATE_UNDEFINED; } bool JobsList::RecreateTransferLists(GMJobRef i) { // Recreate list of output and input files, excluding those already // transferred. For input files this is done by looking at the session dir, // for output files by excluding files in .output_status std::list output_files; std::list output_files_done; std::list input_files; // keep local info if(!GetLocalDescription(i)) return false; // get output files already done job_output_status_read_file(i->job_id,config,output_files_done); // recreate lists by reprocessing job description JobLocalDescription job_desc; // placeholder if(!job_desc_handler.process_job_req(*i,job_desc)) { logger.msg(Arc::ERROR,"%s: Reprocessing job description failed",i->job_id); return false; } // Restore 'local' if(!job_local_write_file(*i,config,*(i->local))) return false; // Read new lists if(!job_output_read_file(i->job_id,config,output_files)) { logger.msg(Arc::ERROR,"%s: Failed to read reprocessed list of output files",i->job_id); return false; } if(!job_input_read_file(i->job_id,config,input_files)) { logger.msg(Arc::ERROR,"%s: Failed to read reprocessed list of input files",i->job_id); return false; } // remove already uploaded files i->local->uploads=0; for(std::list::iterator i_new = output_files.begin(); i_new!=output_files.end();) { if(!(i_new->has_lfn())) { // user file - keep ++i_new; continue; } std::list::iterator i_done = output_files_done.begin(); for(;i_done!=output_files_done.end();++i_done) { if((i_new->pfn == i_done->pfn) && (i_new->lfn == i_done->lfn)) break; } if(i_done == output_files_done.end()) { ++i_new; i->local->uploads++; continue; } i_new=output_files.erase(i_new); } if(!job_output_write_file(*i,config,output_files)) return false; // remove already downloaded files i->local->downloads=0; for(std::list::iterator i_new = input_files.begin(); i_new!=input_files.end();) { std::string path = i->session_dir+"/"+i_new->pfn; struct stat st; if(::stat(path.c_str(),&st) == -1) { ++i_new; i->local->downloads++; } else { i_new=input_files.erase(i_new); } } if(!job_input_write_file(*i,config,input_files)) return false; return true; } bool JobsList::JobFailStateRemember(GMJobRef i,job_state_t state,bool internal) { if(!(i->GetLocalDescription(config))) { logger.msg(Arc::ERROR,"%s: Failed reading local information",i->job_id); return false; } if(state == JOB_STATE_UNDEFINED) { i->local->failedstate=""; i->local->failedcause=internal?"internal":"client"; return job_local_write_file(*i,config,*(i->local)); } if(i->local->failedstate.empty()) { i->local->failedstate=GMJob::get_state_name(state); i->local->failedcause=internal?"internal":"client"; return job_local_write_file(*i,config,*(i->local)); } return true; } time_t JobsList::PrepareCleanupTime(GMJobRef i,time_t& keep_finished) { JobLocalDescription job_desc; time_t t = -1; // read lifetime - if empty it wont be overwritten job_local_read_file(i->job_id,config,job_desc); if(!Arc::stringto(job_desc.lifetime,t)) t = keep_finished; if(t > keep_finished) t = keep_finished; time_t last_changed=job_state_time(i->job_id,config); t=last_changed+t; job_desc.cleanuptime=t; job_local_write_file(*i,config,job_desc); return t; } void JobsList::UnlockDelegation(GMJobRef i) { ARex::DelegationStores* delegs = config.GetDelegations(); if(delegs) (*delegs)[config.DelegationDir()].ReleaseCred(i->job_id,true,false); } JobsList::ActJobResult JobsList::ActJobUndefined(GMJobRef i) { ActJobResult job_result = JobDropped; // new job - read its status from status file, but first check if it is // under the limit of maximum jobs allowed in the system if((AcceptedJobs() < config.MaxJobs()) || (config.MaxJobs() == -1)) { bool new_pending = false; job_state_t new_state=job_state_read_file(i->job_id,config,new_pending); if(new_state == JOB_STATE_UNDEFINED) { // something failed logger.msg(Arc::ERROR,"%s: Reading status of new job failed",i->job_id); i->AddFailure("Failed reading status of the job"); return JobFailed; } // By keeping once_more==false job does not cycle here but // goes out and registers its state in counters. This allows // to maintain limits properly after restart. Except FINISHED // jobs because they are not kept in memory and should be // processed immediately. job_result = JobSuccess; if(new_state == JOB_STATE_ACCEPTED) { // first phase of job - just accepted - parse request SetJobState(i, new_state, "(Re)Accepting new job"); // this can be any state, after A-REX restart logger.msg(Arc::INFO,"%s: State: ACCEPTED: parsing job description",i->job_id); if(!job_desc_handler.process_job_req(*i,*i->local)) { logger.msg(Arc::ERROR,"%s: Processing job description failed",i->job_id); i->AddFailure("Could not process job description"); return JobFailed; // go to next job } job_state_write_file(*i,config,i->job_state,i->job_pending); // makes sure job state is stored in proper subdir // prepare information for logger // This call is not needed here because at higher level WriteJobRecord() // is called for every state change //if(config.GetJobLog()) config.GetJobLog()->WriteJobRecord(*i,config); // Write initial XML job information file. That should ensure combination of quick job // and slow infosys is not going to produce incomplete job information at next states. // Such effect was detected through observing finished job without exit code. if(!job_xml_check_file(i->job_id,config)) { // in case job is restarted and we already have xml static const char* job_xml_template = "" "" "" "" "single" "" "nordugrid:ACCEPTED" "emies:accepted" "arcrest:ACCEPTED" "emiesattr:client-stagein-possible" "" ""; time_t created = job_description_time(i->job_id,config); if(created == 0) created = time(NULL); Arc::XMLNode glue_xml(job_xml_template); glue_xml["ID"] = std::string("urn:caid:")+Arc::URL(config.HeadNode()).Host()+":"+i->local->interface+":"+i->job_id; glue_xml["IDFromEndpoint"] = "urn:idfe:"+i->job_id; glue_xml["OtherInfo"] = "SubmittedVia=" + i->local->interface; glue_xml["Owner"] = i->local->DN; glue_xml["Name"] = i->local->jobname; glue_xml.Attribute("CreationTime") = Arc::Time(created).str(Arc::ISOTime); std::string glue_xml_str; glue_xml.GetXML(glue_xml_str,true); job_xml_write_file(i->job_id,config,glue_xml_str); } logger.msg(Arc::DEBUG, "%s: new job is accepted", i->job_id); RequestReprocess(i); // process to make job fall into Preparing and wait there } else if(new_state == JOB_STATE_FINISHED) { SetJobState(i, new_state, "(Re)Accepting new job"); // this can be any state, after A-REX restart RequestReprocess(i); // process immediately to fall off } else if(new_state == JOB_STATE_DELETED) { SetJobState(i, new_state, "(Re)Accepting new job"); // this can be any state, after A-REX restart RequestReprocess(i); // process immediately to fall off } else { // Generic case SetJobState(i, new_state, "(Re)Accepting new job"); // this can be any state, after A-REX restart if(new_pending) SetJobPending(i, "(Re)Accepting new job"); logger.msg(Arc::INFO,"%s: %s: New job belongs to %i/%i",i->job_id.c_str(), GMJob::get_state_name(new_state),i->get_user().get_uid(),i->get_user().get_gid()); // Make it clean state after restart job_state_write_file(*i,config,i->job_state,i->job_pending); // makes sure job state is stored in proper subdir i->Start(); logger.msg(Arc::DEBUG, "%s: old job is accepted", i->job_id); RequestAttention(i); // process ASAP TODO: consider Reprocess for some states } } // Not doing SetJobPending here because that job kind of does not exist. return job_result; } JobsList::ActJobResult JobsList::ActJobAccepted(GMJobRef i) { // accepted state - job was just accepted by A-REX and we already // know that it is accepted - now we are analyzing/parsing request, // or it can also happen we are waiting for user specified time logger.msg(Arc::VERBOSE,"%s: State: ACCEPTED",i->job_id); if(!GetLocalDescription(i)) { i->AddFailure("Internal error"); return JobFailed; // go to next job } if(i->local->dryrun) { logger.msg(Arc::INFO,"%s: State: ACCEPTED: dryrun",i->job_id); i->AddFailure("Job has dryrun requested. Job skipped."); return JobFailed; // go to next job } // check per-DN limit on processing jobs // TODO: do it in ActJobUndefined. Otherwise one DN can block others if total limit is reached. if (config.MaxPerDN() > 0) { bool limited = false; { std::unique_lock lock(jobs_lock); limited = (jobs_dn[i->local->DN] >= config.MaxPerDN()); } if (limited) { SetJobPending(i,"Jobs per DN limit is reached"); // Because we have no event for per-DN limit just do polling RequestPolling(i); return JobSuccess; } } // check for user specified time if(i->local->processtime != -1 && (i->local->processtime) > time(NULL)) { logger.msg(Arc::INFO,"%s: State: ACCEPTED: has process time %s",i->job_id.c_str(), i->local->processtime.str(Arc::UserTime)); // No events for start times yet. Do polling. RequestPolling(i); return JobSuccess; } logger.msg(Arc::INFO,"%s: State: ACCEPTED: moving to PREPARING",i->job_id); SetJobState(i, JOB_STATE_PREPARING, "Starting job processing"); i->Start(); // gather some frontend specific information for user, do it only once // Runs user-supplied executable placed at "frontend-info-collector" std::string cmd = Arc::ArcLocation::GetToolsDir()+"/frontend-info-collector"; char const * const args[2] = { cmd.c_str(), NULL }; job_controldiag_mark_put(*i,config,args); RequestReprocess(i); return JobSuccess; } JobsList::ActJobResult JobsList::ActJobPreparing(GMJobRef i) { // preparing state - job is in data staging system, so check if it has // finished and whether all user uploadable files have been uploaded. logger.msg(Arc::VERBOSE,"%s: State: PREPARING",i->job_id); bool state_changed = false; // TODO: avoid re-checking all conditions while job is pending if(i->job_pending || state_loading(i,state_changed,false)) { if(i->job_pending || state_changed) { // check for rest of state changing condition if(!GetLocalDescription(i)) { logger.msg(Arc::ERROR,"%s: Failed obtaining local job information.",i->job_id); i->AddFailure("Internal error"); return JobFailed; } // For jobs with free stage in check if user reported complete stage in. bool stagein_complete = true; if(i->local->freestagein) { stagein_complete = false; std::list ifiles; if(job_input_status_read_file(i->job_id,config,ifiles)) { for(std::list::iterator ifile = ifiles.begin(); ifile != ifiles.end(); ++ifile) { if(*ifile == "/") { stagein_complete = true; break; } } } } // Here we have branch. Either job is ordinary one and goes to SUBMIT // or it has no executable and hence goes to FINISHING if(!stagein_complete) { // Wait for user to report complete staging keeping job in PENDING SetJobPending(i, "Waiting for confirmation of stage-in complete from client"); // The complete stagein will be reported and will cause RequestAttention() // RequestPolling(i); } else if(i->local->exec.size() > 0 && !i->local->exec.front().empty()) { // Job has executable if(!RunningJobsLimitReached()) { // And limit of running jobs is not reached SetJobState(i, JOB_STATE_SUBMITTING, "Pre-staging finished, passing job to LRMS"); RequestReprocess(i); // act on new state immediately } else { // Wait for running jobs to fall below limit keeping job in PENDING SetJobPending(i, "Limit of RUNNING jobs is reached"); RequestWaitForRunning(i); } } else { // No execution requested SetJobState(i, JOB_STATE_FINISHING, "Job does NOT define executable. Going directly to post-staging."); RequestReprocess(i); // act on new state immediately } } return JobSuccess; } else { if(!i->CheckFailure(config)) i->AddFailure("Data download failed"); return JobFailed; } } JobsList::ActJobResult JobsList::ActJobSubmitting(GMJobRef i) { // everything is ready for submission to batch system or currently submitting logger.msg(Arc::VERBOSE,"%s: State: SUBMIT",i->job_id); bool state_changed = false; if(state_submitting(i,state_changed)) { if(state_changed) { SetJobState(i, JOB_STATE_INLRMS, "Job is passed to LRMS"); RequestReprocess(i); return JobSuccess; } else { // Exited child will report job for attention. // But in case of hanging child we need some hack - use polling RequestPolling(i); return JobSuccess; } } else { return JobFailed; } } JobsList::ActJobResult JobsList::ActJobCanceling(GMJobRef i) { // This state is like submitting, only -cancel instead of -submit logger.msg(Arc::VERBOSE,"%s: State: CANCELING",i->job_id); bool state_changed = false; if(state_canceling(i,state_changed)) { if(state_changed) { SetJobState(i, JOB_STATE_FINISHING, "Job cancellation succeeded"); RequestReprocess(i); return JobSuccess; } else { // Exited child will report job for attention. // But in case of hanging child we need some hack - use polling RequestPolling(i); return JobSuccess; } } else { return JobFailed; } } JobsList::ActJobResult JobsList::ActJobInlrms(GMJobRef i) { // Job is currently running in LRMS, check if it has finished logger.msg(Arc::VERBOSE,"%s: State: INLRMS",i->job_id); if(!GetLocalDescription(i)) { i->AddFailure("Failed reading local job information"); return JobFailed; // go to next job } logger.msg(Arc::DEBUG,"%s: State: INLRMS - checking for pending(%u) and mark",i->job_id, (unsigned int)(i->job_pending)); if(i->job_pending || job_lrms_mark_check(i->job_id,config)) { logger.msg(Arc::DEBUG,"%s: State: INLRMS - checking for not pending",i->job_id); if(!i->job_pending) { logger.msg(Arc::INFO,"%s: Job finished",i->job_id); job_diagnostics_mark_move(*i,config); LRMSResult ec = job_lrms_mark_read(i->job_id,config); if(ec.code() != i->local->exec.successcode) { logger.msg(Arc::INFO,"%s: State: INLRMS: exit message is %i %s",i->job_id,ec.code(),ec.description()); i->AddFailure("LRMS error: ("+ Arc::tostring(ec.code())+") "+ec.description()); JobFailStateRemember(i,JOB_STATE_INLRMS); // This does not require any special postprocessing and // can go to next state directly return JobFailed; } } SetJobState(i, JOB_STATE_FINISHING, "Job finished executing in LRMS"); RequestReprocess(i); return JobSuccess; } else { logger.msg(Arc::DEBUG,"%s: State: INLRMS - no mark found",i->job_id); // Job state scanner will report job for attention. // But in case signal or job information in batch system is // lost do polling as backup solution. RequestPolling(i); return JobSuccess; } } JobsList::ActJobResult JobsList::ActJobFinishing(GMJobRef i) { // Batch job has finished and now ready to upload output files, or // upload is already on-going logger.msg(Arc::VERBOSE,"%s: State: FINISHING",i->job_id); bool state_changed = false; if(state_loading(i,state_changed,true)) { if(state_changed) { SetJobState(i, JOB_STATE_FINISHED, "Stage-out finished."); RequestReprocess(i); } else { // still in data staging } return JobSuccess; } else { if(!i->CheckFailure(config)) i->AddFailure("Data upload failed"); return JobFailed; } } JobsList::ActJobResult JobsList::ActJobFinished(GMJobRef i) { // Job has completely finished, check for user requests to restart or // clean up job, and if it is time to move to DELETED if(job_clean_mark_check(i->job_id,config)) { // request to clean job logger.msg(Arc::INFO,"%s: Job is requested to clean - deleting",i->job_id); UnlockDelegation(i); // delete everything SetJobState(i, JOB_STATE_UNDEFINED, "Request to clean job"); job_clean_final(*i,config); return JobDropped; } if(job_restart_mark_check(i->job_id,config)) { job_restart_mark_remove(i->job_id,config); // request to rerun job - check if we can // Get information about failed state and forget it job_state_t state_ = JobFailStateGet(i); if(state_ == JOB_STATE_PREPARING) { if(RecreateTransferLists(i)) { job_failed_mark_remove(i->job_id,config); SetJobState(i, JOB_STATE_ACCEPTED, "Request to restart job failed in PREPARING"); SetJobPending(i, "Skip job to PREPARING immediately"); // make it go to end of state immediately logger.msg(Arc::DEBUG, "%s: restarted PREPARING job", i->job_id); RequestAttention(i); // make it start ASAP return JobSuccess; } } else if((state_ == JOB_STATE_SUBMITTING) || (state_ == JOB_STATE_INLRMS)) { if(RecreateTransferLists(i)) { job_failed_mark_remove(i->job_id,config); if(i->local->downloads > 0) { // missing input files has to be re-downloaded SetJobState(i, JOB_STATE_ACCEPTED, "Request to restart job failed in INLRMS (some input files are missing)"); } else { SetJobState(i, JOB_STATE_PREPARING, "Request to restart job failed in INLRMS (no input files are missing)"); } SetJobPending(i, "Skip job to next state immediately"); // make it go to end of state immediately // TODO: check for order of processing logger.msg(Arc::DEBUG, "%s: restarted INLRMS job", i->job_id); RequestAttention(i); // make it start ASAP return JobSuccess; } } else if(state_ == JOB_STATE_FINISHING) { if(RecreateTransferLists(i)) { job_failed_mark_remove(i->job_id,config); SetJobState(i, JOB_STATE_INLRMS, "Request to restart job failed in FINISHING"); SetJobPending(i, "Skip job to FINISHING immediately"); // make it go to end of state immediately logger.msg(Arc::DEBUG, "%s: restarted FINISHING job", i->job_id); RequestAttention(i); // make it start ASAP return JobSuccess; } } else if(state_ == JOB_STATE_UNDEFINED) { logger.msg(Arc::ERROR,"%s: Can't rerun on request",i->job_id); } else { logger.msg(Arc::ERROR,"%s: Can't rerun on request - not a suitable state",i->job_id); } } // process cleanup time time_t t = -1; if(!job_local_read_cleanuptime(i->job_id,config,t)) { // must be first time - create cleanuptime t=PrepareCleanupTime(i,i->keep_finished); } // check if it is time to move job to DELETED if(((int)(time(NULL)-t)) >= 0) { logger.msg(Arc::INFO,"%s: Job is too old - deleting",i->job_id); UnlockDelegation(i); // Check either configured to keep job in DELETED state if(i->keep_deleted) { // here we have to get the cache per-job dirs to be deleted std::list cache_per_job_dirs; CacheConfig cache_config(config.CacheParams()); cache_config.substitute(config, i->user); std::vector conf_caches = cache_config.getCacheDirs(); // add each dir to our list for (std::vector::iterator it = conf_caches.begin(); it != conf_caches.end(); it++) { cache_per_job_dirs.push_back(it->substr(0, it->find(" "))+"/joblinks"); } // add draining caches std::vector draining_caches = cache_config.getDrainingCacheDirs(); for (std::vector::iterator it = draining_caches.begin(); it != draining_caches.end(); it++) { cache_per_job_dirs.push_back(*it+"/joblinks"); } // and read-only caches std::vector readonly_caches = cache_config.getReadOnlyCacheDirs(); for (std::vector::iterator it = readonly_caches.begin(); it != readonly_caches.end(); it++) { cache_per_job_dirs.push_back(*it+"/joblinks"); } job_clean_deleted(*i,config,cache_per_job_dirs); SetJobState(i, JOB_STATE_DELETED, "Job stayed unattended too long"); RequestSlowPolling(i); return JobSuccess; } else { // delete everything SetJobState(i, JOB_STATE_UNDEFINED, "Job stayed unattended too long"); job_clean_final(*i,config); return JobDropped; } } else { RequestSlowPolling(i); // FINISHED jobs not kept in memory. So it is not important if return is Success or Dropped return JobDropped; } } JobsList::ActJobResult JobsList::ActJobDeleted(GMJobRef i) { // Job only has a few control files left, check if is it time to // remove all traces time_t t = -1; if(!job_local_read_cleanuptime(i->job_id,config,t) || ((time(NULL)-(t+i->keep_deleted)) >= 0)) { logger.msg(Arc::INFO,"%s: Job is ancient - delete rest of information",i->job_id); UnlockDelegation(i); // not needed here but in case someting went wrong previously // delete everything SetJobState(i, JOB_STATE_UNDEFINED, "Job stayed deleted too long"); job_clean_final(*i,config); return JobDropped; } RequestSlowPolling(i); return JobDropped; } bool JobsList::CheckJobCancelRequest(GMJobRef i) { // some states can not be canceled (or there is no sense to do that) if((i->job_state != JOB_STATE_CANCELING) && (i->job_state != JOB_STATE_FINISHED) && (i->job_state != JOB_STATE_DELETED) && (i->job_state != JOB_STATE_SUBMITTING)) { if(job_cancel_mark_check(i->job_id,config)) { logger.msg(Arc::INFO,"%s: Canceling job because of user request",i->job_id); if (i->job_state == JOB_STATE_PREPARING || i->job_state == JOB_STATE_FINISHING) { dtr_generator.cancelJob(i); } // kill running child if(i->child) { i->child->Kill(0); CleanChildProcess(i); } // put some explanation i->AddFailure("Job is canceled by external request"); JobFailStateRemember(i,i->job_state,false); // behave like if job failed if(!FailedJob(i,true)) { logger.msg(Arc::ERROR,"%s: Failed to turn job into failed during cancel processing.",i->job_id); // DO NOT KNOW WHAT TO DO HERE !!!!!!!!!! } // special processing for INLRMS case if(i->job_state == JOB_STATE_INLRMS) { SetJobState(i, JOB_STATE_CANCELING, "Request to cancel job"); } else if (i->job_state == JOB_STATE_PREPARING) { // if PREPARING we wait to get back all DTRs (only if job is still in DTR processing) if(!dtr_generator.hasJob(i)) { SetJobState(i, JOB_STATE_FINISHING, "Request to cancel job"); } } else { SetJobState(i, JOB_STATE_FINISHING, "Request to cancel job"); } job_cancel_mark_remove(i->job_id,config); RequestReprocess(i); return true; } } return false; } bool JobsList::CheckJobContinuePlugins(GMJobRef i) { bool plugins_result = true; if(config.GetContPlugins()) { std::list results; config.GetContPlugins()->run(*i,config,results); std::list::iterator result = results.begin(); while(result != results.end()) { // analyze results if(result->action == ContinuationPlugins::act_fail) { logger.msg(Arc::ERROR,"%s: Plugin at state %s : %s", i->job_id.c_str(),i->get_state_name(), result->response); i->AddFailure(std::string("Plugin at state ")+ i->get_state_name()+" failed: "+(result->response)); plugins_result = false;; } else if(result->action == ContinuationPlugins::act_log) { // Scream but go ahead logger.msg(Arc::WARNING,"%s: Plugin at state %s : %s", i->job_id.c_str(),i->get_state_name(), result->response); } else if(result->action == ContinuationPlugins::act_pass) { // Just continue quietly } else { logger.msg(Arc::ERROR,"%s: Plugin execution failed",i->job_id); i->AddFailure(std::string("Failed running plugin at state ")+ i->get_state_name()); plugins_result = false;; } ++result; } } return plugins_result; } bool JobsList::NextJob(GMJobRef i, job_state_t old_state, bool old_pending) { bool at_limit = RunningJobsLimitReached(); // update counters if(!old_pending) { jobs_num[old_state]--; } else { jobs_pending--; } if(!i->job_pending) { jobs_num[i->job_state]++; } else { jobs_pending++; } if(at_limit && !RunningJobsLimitReached()) { // Report about change in conditions //RequestAttention(); }; return i; } bool JobsList::DropJob(GMJobRef& i, job_state_t old_state, bool old_pending) { bool at_limit = RunningJobsLimitReached(); // update counters if(!old_pending) { jobs_num[old_state]--; } else { jobs_pending--; } if(at_limit && !RunningJobsLimitReached()) { // Report about change in conditions RequestAttention(); // TODO: Check if really needed }; { std::unique_lock lock(jobs_lock); jobs.erase(i->job_id); }; i.Destroy(); return true; } #define IS_ACTIVE_STATE(state) ((state >= JOB_STATE_PREPARING) && (state <= JOB_STATE_FINISHING)) bool JobsList::ActJob(GMJobRef& i) { Arc::JobPerfRecord perfrecord(*config.GetJobPerfLog(), i->job_id); job_state_t perflog_start_state = i->job_state; job_state_t old_state = i->job_state; job_state_t old_reported_state = i->job_state; bool old_pending = i->job_pending; ActJobResult job_result = JobSuccess; if(!CheckJobCancelRequest(i)) { switch(i->job_state) { case JOB_STATE_UNDEFINED: { job_result = ActJobUndefined(i); } break; case JOB_STATE_ACCEPTED: { job_result = ActJobAccepted(i); } break; case JOB_STATE_PREPARING: { job_result = ActJobPreparing(i); } break; case JOB_STATE_SUBMITTING: { job_result = ActJobSubmitting(i); } break; case JOB_STATE_CANCELING: { job_result = ActJobCanceling(i); } break; case JOB_STATE_INLRMS: { job_result = ActJobInlrms(i); } break; case JOB_STATE_FINISHING: { job_result = ActJobFinishing(i); } break; case JOB_STATE_FINISHED: { job_result = ActJobFinished(i); } break; case JOB_STATE_DELETED: { job_result = ActJobDeleted(i); } break; default: { // should destroy job with unknown state ?! } break; }; }; if(job_result == JobFailed) { // Process errors which happened during processing this job job_result = ActJobFailed(i); // normally it must not be JobFailed here } // Process state changes, also those generated by error processing if(old_reported_state != i->job_state) { if(old_reported_state != JOB_STATE_UNDEFINED) { // Report state change into log. But not if job just picked up. logger.msg(Arc::INFO,"%s: State: %s from %s", i->job_id.c_str(),GMJob::get_state_name(i->job_state), GMJob::get_state_name(old_reported_state)); } old_reported_state=i->job_state; }; if (job_result != JobDropped) { if(old_state != i->job_state) { // Job changed state. Skip state change processing if job is about to be dropped. if(!job_state_write_file(*i,config,i->job_state,i->job_pending)) { i->AddFailure("Failed writing job status: "+Arc::StrError(errno)); job_result = ActJobFailed(i); // immedaitely process failure } else { // Talk to external plugin to ask if we can proceed // Jobs with ACCEPTED state or UNDEFINED previous state // could be ignored here. But there is tiny possibility // that service failed while processing ContinuationPlugins. // Hence here we have duplicate call for ACCEPTED state. // TODO: maybe introducing job state prefix VALIDATING: // could be used to resolve this situation. if(!CheckJobContinuePlugins(i)) { // No need for AddFailure. It is filled inside CheckJobContinuePlugins. job_result = ActJobFailed(i); // immedaitely process failure }; // Processing to be done on relatively successful state changes JobLog* joblog = config.GetJobLog(); if(joblog) joblog->WriteJobRecord(*i,config); // TODO: Consider moving following code into ActJob* methods if(i->job_state == JOB_STATE_FINISHED) { job_clean_finished(i->job_id,config); if(joblog) joblog->WriteFinishInfo(*i,config); PrepareCleanupTime(i,i->keep_finished); } else if(i->job_state == JOB_STATE_PREPARING) { joblog->WriteStartInfo(*i,config); }; }; // send mail after error and change are processed // do not send if something really wrong happened to avoid email DoS if(job_result != JobFailed) send_mail(*i,config); // Manage per-DN counter // Any job state change goes through here if(!IS_ACTIVE_STATE(old_state)) { if(IS_ACTIVE_STATE(i->job_state)) { if(i->GetLocalDescription(config)) { // add to DN map if (i->local->DN.empty()) { logger.msg(Arc::WARNING, "Failed to get DN information from .local file for job %s", i->job_id); } std::unique_lock lock(jobs_lock); ++(jobs_dn[i->local->DN]); }; }; } else if(IS_ACTIVE_STATE(old_state)) { if(!IS_ACTIVE_STATE(i->job_state)) { if(i->GetLocalDescription(config)) { std::unique_lock lock(jobs_lock); if (--(jobs_dn[i->local->DN]) == 0) jobs_dn.erase(i->local->DN); }; }; }; } else if(old_pending != i->job_pending) { // Only pending flag has changed - only write state file if(!job_state_write_file(*i,config,i->job_state,i->job_pending)) { i->AddFailure("Failed writing job status: "+Arc::StrError(errno)); job_result = ActJobFailed(i); // immedaitely process failure } }; }; // !JobDropped if(job_result == JobFailed) { // If it is still job failed then just force everything down logger.msg(Arc::ERROR,"%s: Delete request due to internal problems",i->job_id); SetJobState(i, JOB_STATE_FINISHED, "Job processing failed"); // move to finished in order to remove from list (void)job_state_write_file(*i,config,i->job_state,i->job_pending); i->AddFailure("Serious troubles (problems during processing problems)"); FailedJob(i,false); // put some marks job_clean_finished(i->job_id,config); // clean status files job_result = JobDropped; }; if(perfrecord.Started()) { job_state_t perflog_end_state = i->job_state; std::string name(GMJob::get_state_name(perflog_start_state)); name += "-"; name += GMJob::get_state_name(perflog_end_state); perfrecord.End(name); }; // Job in special state or specifically requested to be removed (TODO: remove check for job state) if((job_result == JobDropped) || (i->job_state == JOB_STATE_DELETED) || (i->job_state == JOB_STATE_UNDEFINED)) { // Such jobs are not kept in memory // this is the ONLY place where jobs are removed from memory DropJob(i, old_state, old_pending); } else { NextJob(i, old_state, old_pending); } return true; } JobsList::ActJobResult JobsList::ActJobFailed(GMJobRef i) { // Failed job - move it to proper state logger.msg(Arc::ERROR,"%s: Job failure detected",i->job_id); if(!FailedJob(i,false)) { // something is really wrong i->AddFailure("Failed during processing failure"); return JobFailed; } else { // just move job to proper state if((i->job_state == JOB_STATE_FINISHED) || (i->job_state == JOB_STATE_DELETED)) { // Normally these stages should not generate errors // so ignore them return JobDropped; } else if(i->job_state == JOB_STATE_FINISHING) { // No matter if FINISHING fails - it still goes to FINISHED SetJobState(i, JOB_STATE_FINISHED, "Job failure detected"); RequestReprocess(i); } else if(i->job_state == JOB_STATE_INLRMS) { // This happens either if job processing failed or continuation // plugin failed. But that also means job is probably being // processed by batch system. So safest is to act as if cncel // request arrived. SetJobState(i, JOB_STATE_CANCELING, "Job failure detected"); RequestReprocess(i); } else { // Other states are moved to FINISHING and start post-staging SetJobState(i, JOB_STATE_FINISHING, "Job failure detected"); RequestReprocess(i); }; // Reset pending (it is useless for any post-failure state anyway) i->job_pending=false; }; return JobSuccess; } // Description of job status file class JobFDesc { public: JobId id; uid_t uid; gid_t gid; time_t t; JobFDesc(const std::string& s):id(s),uid(0),gid(0),t(-1) { } bool operator<(const JobFDesc &right) const { return (t < right.t); } }; bool JobsList::RestartJobs(const std::string& cdir,const std::string& odir) { bool res = true; try { Glib::Dir dir(cdir); for(;;) { std::string file=dir.read_name(); if(file.empty()) break; int l=file.length(); // job id contains at least 1 character if(l>7 && file.substr(l-7) == ".status") { uid_t uid; gid_t gid; time_t t; std::string fname=cdir+'/'+file.c_str(); std::string oname=odir+'/'+file.c_str(); if(check_file_owner(fname,uid,gid,t)) { if(::rename(fname.c_str(),oname.c_str()) != 0) { logger.msg(Arc::ERROR,"Failed to move file %s to %s",fname,oname); res=false; } } } } dir.close(); } catch(Glib::FileError& e) { logger.msg(Arc::ERROR,"Failed reading control directory: %s",cdir); return false; } return res; } // This code is run at service restart bool JobsList::RestartJobs(void) { std::string cdir=config.ControlDir(); // Jobs from old version bool res1 = RestartJobs(cdir,cdir+"/"+subdir_rew); // Jobs after service restart bool res2 = RestartJobs(cdir+"/"+subdir_cur,cdir+"/"+subdir_rew); return res1 && res2; } bool JobsList::ScanJobDesc(const std::string& cdir, JobFDesc& id) { if(!FindJob(id.id)) { std::string fname=cdir+'/'+id.id+"."+sfx_status; uid_t uid; gid_t gid; time_t t; if(check_file_owner(fname,uid,gid,t)) { id.uid=uid; id.gid=gid; id.t=t; return true; }; }; return false; } bool JobsList::ScanJobDescs(const std::string& cdir,std::list& ids) const { class JobFilterSkipExisting: public JobFilter { public: JobFilterSkipExisting(JobsList const& jobs): jobs_(jobs) {}; virtual ~JobFilterSkipExisting() {}; virtual bool accept(JobId const& id) const { return !jobs_.HasJob(id); }; private: JobsList const& jobs_; }; Arc::JobPerfRecord perfrecord(*config.GetJobPerfLog(), "*"); bool result = ScanAllJobs(cdir, ids, JobFilterSkipExisting(*this)); perfrecord.End("SCAN-JOBS"); return result; } bool JobsList::ScanAllJobs(const std::string& cdir,std::list& ids, JobFilter const& filter) { try { Glib::Dir dir(cdir); for(;;) { std::string file=dir.read_name(); if(file.empty()) break; int l=file.length(); // job id contains at least 1 character if(l>7 && file.substr(l-7) == ".status") { JobFDesc id(file.substr(0,l-7)); if(filter.accept(id.id)) { std::string fname=cdir+'/'+file.c_str(); uid_t uid; gid_t gid; time_t t; if(check_file_owner(fname,uid,gid,t)) { // add it to the list id.uid=uid; id.gid=gid; id.t=t; ids.push_back(id); } } } } } catch(Glib::FileError& e) { logger.msg(Arc::ERROR,"Failed reading control directory: %s: %s", cdir, e.what()); return false; } return true; } bool JobsList::ScanMarks(const std::string& cdir,const std::list& suffices,std::list& ids) { Arc::JobPerfRecord perfrecord(*config.GetJobPerfLog(), "*"); try { Glib::Dir dir(cdir); for(;;) { std::string file=dir.read_name(); if(file.empty()) break; int l=file.length(); // job id contains at least 1 separtor and 1 suffix character if(l>1) { for(std::list::const_iterator sfx = suffices.begin(); sfx != suffices.end();++sfx) { int ll = sfx->length(); if(l > ll && file.substr(l-ll) == *sfx) { JobFDesc id(file.substr(0,l-ll)); if(!FindJob(id.id)) { std::string fname=cdir+'/'+file.c_str(); uid_t uid; gid_t gid; time_t t; if(check_file_owner(fname,uid,gid,t)) { // add it to the list id.uid=uid; id.gid=gid; id.t=t; ids.push_back(id); } } break; } } } } } catch(Glib::FileError& e) { logger.msg(Arc::ERROR,"Failed reading control directory: %s",config.ControlDir()); return false; } perfrecord.End("SCAN-MARKS"); return true; } bool JobsList::ScanNewJob(const JobId& id) { // New jobs will be accepted only if number of jobs being processed if((AcceptedJobs() < config.MaxJobs()) || (config.MaxJobs() == -1)) { JobFDesc fid(id); std::string cdir=config.ControlDir(); std::string ndir=cdir+"/"+subdir_new; if(!ScanJobDesc(ndir,fid)) return false; return AddJob(fid.id,fid.uid,fid.gid,"scan for specific new job"); } return false; } bool JobsList::ScanOldJob(const JobId& id) { JobFDesc fid(id); std::string cdir=config.ControlDir(); std::string ndir=cdir+"/"+subdir_old; if(!ScanJobDesc(ndir,fid)) return false; job_state_t st = job_state_read_file(id,config); if(st == JOB_STATE_FINISHED || st == JOB_STATE_DELETED) { return AddJob(fid.id,fid.uid,fid.gid,st,"scan for specific old job"); }; return false; } // find new jobs - sort by date to implement FIFO bool JobsList::ScanNewJobs(void) { Arc::JobPerfRecord perfrecord(*config.GetJobPerfLog(), "*"); // New jobs will be accepted only if number of jobs being processed // does not exceed allowed. So avoid scanning if no jobs will be allowed. std::string cdir=config.ControlDir(); if((config.MaxJobs() == -1) || (AcceptedJobs() < config.MaxJobs())) { std::list ids; // For picking up jobs after service restart std::string odir=cdir+"/"+subdir_rew; if(!ScanJobDescs(odir,ids)) return false; // sorting by date ids.sort(); for(std::list::iterator id=ids.begin();id!=ids.end();++id) { if((config.MaxJobs() != -1) && (AcceptedJobs() >= config.MaxJobs())) break; AddJob(id->id,id->uid,id->gid,"scan for new jobs in restarting"); }; }; if((config.MaxJobs() == -1) || (AcceptedJobs() < config.MaxJobs())) { std::list ids; // For new jobs std::string ndir=cdir+"/"+subdir_new; if(!ScanJobDescs(ndir,ids)) return false; // sorting by date ids.sort(); for(std::list::iterator id=ids.begin();id!=ids.end();++id) { if((config.MaxJobs() != -1) && (AcceptedJobs() >= config.MaxJobs())) break; // adding job with file's uid/gid AddJob(id->id,id->uid,id->gid,"scan for new jobs in new"); }; }; perfrecord.End("SCAN-JOBS-NEW"); return true; } bool JobsList::ScanNewMarks(void) { Arc::JobPerfRecord perfrecord(*config.GetJobPerfLog(), "*"); std::string cdir=config.ControlDir(); std::string ndir=cdir+"/"+subdir_new; std::list ids; std::list sfx; sfx.push_back(sfx_clean); sfx.push_back(sfx_restart); sfx.push_back(sfx_cancel); if(!ScanMarks(ndir,sfx,ids)) return false; ids.sort(); std::string last_id; for(std::list::iterator id=ids.begin();id!=ids.end();++id) { if(id->id == last_id) continue; // already processed last_id = id->id; job_state_t st = job_state_read_file(id->id,config); if((st == JOB_STATE_UNDEFINED) || (st == JOB_STATE_DELETED)) { // Job probably does not exist anymore job_clean_mark_remove(id->id,config); job_restart_mark_remove(id->id,config); job_cancel_mark_remove(id->id,config); } // Check if such job finished and add it to list. if(st == JOB_STATE_FINISHED) { // That will activate its processing at least for one step. AddJob(id->id,id->uid,id->gid,st,"scan for new jobs in marks"); } } perfrecord.End("SCAN-MARKS-NEW"); return true; } // For simply collecting all jobs. Only used by gm-jobs. bool JobsList::GetAllJobs(const GMConfig& config, std::list& alljobs) { class JobFilterNoSkip: public JobFilter { public: JobFilterNoSkip() {}; virtual ~JobFilterNoSkip() {}; virtual bool accept(JobId const& id) const { return true; }; }; std::list subdirs; subdirs.push_back(std::string("/")+subdir_rew); // For picking up jobs after service restart subdirs.push_back(std::string("/")+subdir_new); // For new jobs subdirs.push_back(std::string("/")+subdir_cur); // For active jobs subdirs.push_back(std::string("/")+subdir_old); // For done jobs for(std::list::iterator subdir = subdirs.begin(); subdir != subdirs.end();++subdir) { std::string cdir=config.ControlDir(); std::list ids; std::string odir=cdir+(*subdir); if(!ScanAllJobs(odir,ids,JobFilterNoSkip())) return false; // sorting by date ids.sort(); for(std::list::iterator id=ids.begin();id!=ids.end();++id) { GMJobRef i(new GMJob(id->id,Arc::User(id->uid))); if (i->GetLocalDescription(config)) { i->session_dir = i->local->sessiondir; if (i->session_dir.empty()) i->session_dir = config.SessionRoot(id->id)+'/'+id->id; alljobs.push_back(i); } } } return true; } // For simply collecting all job ids. bool JobsList::GetAllJobIds(const GMConfig& config, std::list& alljobs) { class JobFilterNoSkip: public JobFilter { public: JobFilterNoSkip() {}; virtual ~JobFilterNoSkip() {}; virtual bool accept(JobId const& id) const { return true; }; }; std::list subdirs; subdirs.push_back(std::string("/")+subdir_rew); // For picking up jobs after service restart subdirs.push_back(std::string("/")+subdir_new); // For new jobs subdirs.push_back(std::string("/")+subdir_cur); // For active jobs subdirs.push_back(std::string("/")+subdir_old); // For done jobs for(std::list::iterator subdir = subdirs.begin(); subdir != subdirs.end();++subdir) { std::string cdir=config.ControlDir(); std::list ids; std::string odir=cdir+(*subdir); if(!ScanAllJobs(odir,ids,JobFilterNoSkip())) return false; // sorting by date ids.sort(); for(std::list::iterator id=ids.begin();id!=ids.end();++id) { alljobs.push_back(id->id); } } return true; } // Only used by gm-jobs GMJobRef JobsList::GetJob(const GMConfig& config, const JobId& id) { std::list subdirs; subdirs.push_back(std::string("/")+subdir_rew); // For picking up jobs after service restart subdirs.push_back(std::string("/")+subdir_new); // For new jobs subdirs.push_back(std::string("/")+subdir_cur); // For active jobs subdirs.push_back(std::string("/")+subdir_old); // For done jobs for(std::list::iterator subdir = subdirs.begin(); subdir != subdirs.end();++subdir) { std::string cdir=config.ControlDir(); std::string odir=cdir+(*subdir); std::string fname=odir+'/'+id+".status"; uid_t uid; gid_t gid; time_t t; if(check_file_owner(fname,uid,gid,t)) { GMJobRef i(new GMJob(id,Arc::User(uid))); if (i->GetLocalDescription(config)) { i->session_dir = i->local->sessiondir; if (i->session_dir.empty()) i->session_dir = config.SessionRoot(id)+'/'+id; return i; } } } return GMJobRef(); } // For simply counting all jobs. int JobsList::CountAllJobs(const GMConfig& config) { class JobFilterNoSkip: public JobFilter { public: JobFilterNoSkip() {}; virtual ~JobFilterNoSkip() {}; virtual bool accept(JobId const& id) const { return true; }; }; int count = 0; std::list subdirs; subdirs.push_back(std::string("/")+subdir_rew); // For picking up jobs after service restart subdirs.push_back(std::string("/")+subdir_new); // For new jobs subdirs.push_back(std::string("/")+subdir_cur); // For active jobs subdirs.push_back(std::string("/")+subdir_old); // For done jobs for(std::list::iterator subdir = subdirs.begin(); subdir != subdirs.end();++subdir) { std::string cdir=config.ControlDir(); std::list ids; std::string odir=cdir+(*subdir); if(ScanAllJobs(odir,ids,JobFilterNoSkip())) { count += ids.size(); }; }; return count; } JobsList::ExternalHelper::ExternalHelper(const std::string &cmd) { command = cmd; proc = NULL; } JobsList::ExternalHelper::~ExternalHelper() { if(proc != NULL) { delete proc; proc=NULL; } } static void ExternalHelperInitializer(void* arg) { const char* logpath = reinterpret_cast(arg); // set up stdin,stdout and stderr int h; h = ::open("/dev/null",O_RDONLY); if(h != 0) { if(dup2(h,0) != 0) { _exit(1); }; close(h); }; h = ::open("/dev/null",O_WRONLY); if(h != 1) { if(dup2(h,1) != 1) { _exit(1); }; close(h); }; if(logpath && logpath[0]) { h = ::open(logpath,O_WRONLY | O_CREAT | O_APPEND,S_IRUSR | S_IWUSR); if(h==-1) { h = ::open("/dev/null",O_WRONLY); }; } else { h = ::open("/dev/null",O_WRONLY); }; if(h != 2) { if(dup2(h,2) != 2) { exit(1); }; close(h); }; } // Uncomment following if we must support helpers // which can't run gm-kick. // static void ExternalHelperKicker(void* arg) { // JobsList* jobs = reinterpret_cast(arg); // if(jobs) jobs->RequestAttention(); // } bool JobsList::ExternalHelper::run(JobsList const& jobs) { if (proc != NULL) { if (proc->Running()) { return true; // it is already/still running } delete proc; proc = NULL; } // start/restart if (command.empty()) return true; // has anything to run ? logger.msg(Arc::VERBOSE, "Starting helper process: %s", command); proc = new Arc::Run(command); proc->KeepStdin(true); proc->KeepStdout(true); proc->KeepStderr(true); proc->AssignInitializer(&ExternalHelperInitializer, const_cast(jobs.config.HelperLog().c_str()), false); //proc->AssignKicker(&ExternalHelperKicker, const_cast(reinterpret_cast(&jobs))); if (proc->Start()) return true; delete proc; proc = NULL; logger.msg(Arc::ERROR, "Helper process start failed: %s", command); // start failed, doing nothing - maybe in the future return false; } void JobsList::ExternalHelper::stop() { if (proc && proc->Running()) { logger.msg(Arc::VERBOSE, "Stopping helper process %s", command); proc->Kill(1); } } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327024327 xustar0030 mtime=1759498967.755491979 30 atime=1759498967.865493651 30 ctime=1759499029.508724584 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/Makefile.am0000644000175000002070000000110515067751327026226 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libjobs.la libjobs_la_SOURCES = \ CommFIFO.cpp JobsList.cpp GMJob.cpp JobDescriptionHandler.cpp \ ContinuationPlugins.cpp DTRGenerator.cpp \ CommFIFO.h JobsList.h GMJob.h JobDescriptionHandler.h \ ContinuationPlugins.h DTRGenerator.h libjobs_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libjobs_la_LIBADD = \ $(top_builddir)/src/libs/data-staging/libarcdatastaging.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/PaxHeaders/Makefile.in0000644000000000000000000000013215067751355024341 xustar0030 mtime=1759498989.917032096 30 atime=1759499017.958254813 30 ctime=1759499029.509430334 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/Makefile.in0000644000175000002070000010307215067751355026246 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/grid-manager/jobs ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libjobs_la_DEPENDENCIES = \ $(top_builddir)/src/libs/data-staging/libarcdatastaging.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libjobs_la_OBJECTS = libjobs_la-CommFIFO.lo libjobs_la-JobsList.lo \ libjobs_la-GMJob.lo libjobs_la-JobDescriptionHandler.lo \ libjobs_la-ContinuationPlugins.lo libjobs_la-DTRGenerator.lo libjobs_la_OBJECTS = $(am_libjobs_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = libjobs_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libjobs_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = ./$(DEPDIR)/libjobs_la-CommFIFO.Plo \ ./$(DEPDIR)/libjobs_la-ContinuationPlugins.Plo \ ./$(DEPDIR)/libjobs_la-DTRGenerator.Plo \ ./$(DEPDIR)/libjobs_la-GMJob.Plo \ ./$(DEPDIR)/libjobs_la-JobDescriptionHandler.Plo \ ./$(DEPDIR)/libjobs_la-JobsList.Plo am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(libjobs_la_SOURCES) DIST_SOURCES = $(libjobs_la_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ noinst_LTLIBRARIES = libjobs.la libjobs_la_SOURCES = \ CommFIFO.cpp JobsList.cpp GMJob.cpp JobDescriptionHandler.cpp \ ContinuationPlugins.cpp DTRGenerator.cpp \ CommFIFO.h JobsList.h GMJob.h JobDescriptionHandler.h \ ContinuationPlugins.h DTRGenerator.h libjobs_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libjobs_la_LIBADD = \ $(top_builddir)/src/libs/data-staging/libarcdatastaging.la \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/jobs/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/jobs/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libjobs.la: $(libjobs_la_OBJECTS) $(libjobs_la_DEPENDENCIES) $(EXTRA_libjobs_la_DEPENDENCIES) $(AM_V_CXXLD)$(libjobs_la_LINK) $(libjobs_la_OBJECTS) $(libjobs_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libjobs_la-CommFIFO.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libjobs_la-ContinuationPlugins.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libjobs_la-DTRGenerator.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libjobs_la-GMJob.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libjobs_la-JobDescriptionHandler.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libjobs_la-JobsList.Plo@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< libjobs_la-CommFIFO.lo: CommFIFO.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -MT libjobs_la-CommFIFO.lo -MD -MP -MF $(DEPDIR)/libjobs_la-CommFIFO.Tpo -c -o libjobs_la-CommFIFO.lo `test -f 'CommFIFO.cpp' || echo '$(srcdir)/'`CommFIFO.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libjobs_la-CommFIFO.Tpo $(DEPDIR)/libjobs_la-CommFIFO.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='CommFIFO.cpp' object='libjobs_la-CommFIFO.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -c -o libjobs_la-CommFIFO.lo `test -f 'CommFIFO.cpp' || echo '$(srcdir)/'`CommFIFO.cpp libjobs_la-JobsList.lo: JobsList.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -MT libjobs_la-JobsList.lo -MD -MP -MF $(DEPDIR)/libjobs_la-JobsList.Tpo -c -o libjobs_la-JobsList.lo `test -f 'JobsList.cpp' || echo '$(srcdir)/'`JobsList.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libjobs_la-JobsList.Tpo $(DEPDIR)/libjobs_la-JobsList.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='JobsList.cpp' object='libjobs_la-JobsList.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -c -o libjobs_la-JobsList.lo `test -f 'JobsList.cpp' || echo '$(srcdir)/'`JobsList.cpp libjobs_la-GMJob.lo: GMJob.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -MT libjobs_la-GMJob.lo -MD -MP -MF $(DEPDIR)/libjobs_la-GMJob.Tpo -c -o libjobs_la-GMJob.lo `test -f 'GMJob.cpp' || echo '$(srcdir)/'`GMJob.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libjobs_la-GMJob.Tpo $(DEPDIR)/libjobs_la-GMJob.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='GMJob.cpp' object='libjobs_la-GMJob.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -c -o libjobs_la-GMJob.lo `test -f 'GMJob.cpp' || echo '$(srcdir)/'`GMJob.cpp libjobs_la-JobDescriptionHandler.lo: JobDescriptionHandler.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -MT libjobs_la-JobDescriptionHandler.lo -MD -MP -MF $(DEPDIR)/libjobs_la-JobDescriptionHandler.Tpo -c -o libjobs_la-JobDescriptionHandler.lo `test -f 'JobDescriptionHandler.cpp' || echo '$(srcdir)/'`JobDescriptionHandler.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libjobs_la-JobDescriptionHandler.Tpo $(DEPDIR)/libjobs_la-JobDescriptionHandler.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='JobDescriptionHandler.cpp' object='libjobs_la-JobDescriptionHandler.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -c -o libjobs_la-JobDescriptionHandler.lo `test -f 'JobDescriptionHandler.cpp' || echo '$(srcdir)/'`JobDescriptionHandler.cpp libjobs_la-ContinuationPlugins.lo: ContinuationPlugins.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -MT libjobs_la-ContinuationPlugins.lo -MD -MP -MF $(DEPDIR)/libjobs_la-ContinuationPlugins.Tpo -c -o libjobs_la-ContinuationPlugins.lo `test -f 'ContinuationPlugins.cpp' || echo '$(srcdir)/'`ContinuationPlugins.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libjobs_la-ContinuationPlugins.Tpo $(DEPDIR)/libjobs_la-ContinuationPlugins.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='ContinuationPlugins.cpp' object='libjobs_la-ContinuationPlugins.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -c -o libjobs_la-ContinuationPlugins.lo `test -f 'ContinuationPlugins.cpp' || echo '$(srcdir)/'`ContinuationPlugins.cpp libjobs_la-DTRGenerator.lo: DTRGenerator.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -MT libjobs_la-DTRGenerator.lo -MD -MP -MF $(DEPDIR)/libjobs_la-DTRGenerator.Tpo -c -o libjobs_la-DTRGenerator.lo `test -f 'DTRGenerator.cpp' || echo '$(srcdir)/'`DTRGenerator.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libjobs_la-DTRGenerator.Tpo $(DEPDIR)/libjobs_la-DTRGenerator.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='DTRGenerator.cpp' object='libjobs_la-DTRGenerator.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libjobs_la_CXXFLAGS) $(CXXFLAGS) -c -o libjobs_la-DTRGenerator.lo `test -f 'DTRGenerator.cpp' || echo '$(srcdir)/'`DTRGenerator.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -f ./$(DEPDIR)/libjobs_la-CommFIFO.Plo -rm -f ./$(DEPDIR)/libjobs_la-ContinuationPlugins.Plo -rm -f ./$(DEPDIR)/libjobs_la-DTRGenerator.Plo -rm -f ./$(DEPDIR)/libjobs_la-GMJob.Plo -rm -f ./$(DEPDIR)/libjobs_la-JobDescriptionHandler.Plo -rm -f ./$(DEPDIR)/libjobs_la-JobsList.Plo -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libjobs_la-CommFIFO.Plo -rm -f ./$(DEPDIR)/libjobs_la-ContinuationPlugins.Plo -rm -f ./$(DEPDIR)/libjobs_la-DTRGenerator.Plo -rm -f ./$(DEPDIR)/libjobs_la-GMJob.Plo -rm -f ./$(DEPDIR)/libjobs_la-JobDescriptionHandler.Plo -rm -f ./$(DEPDIR)/libjobs_la-JobsList.Plo -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ clean-generic clean-libtool clean-noinstLTLIBRARIES \ cscopelist-am ctags ctags-am distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/PaxHeaders/JobDescriptionHandler.h0000644000000000000000000000013215067751327026660 xustar0030 mtime=1759498967.755491979 30 atime=1759498967.864493635 30 ctime=1759499029.524296066 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.h0000644000175000002070000001113215067751327030560 0ustar00mockbuildmock00000000000000#ifndef __ARC_GM_JOB_REQUEST_H__ #define __ARC_GM_JOB_REQUEST_H__ #include #include #include "GMJob.h" namespace ARex { /// Return code of parsing operation enum JobReqResultType { JobReqSuccess, JobReqInternalFailure, JobReqSyntaxFailure, JobReqMissingFailure, JobReqUnsupportedFailure, JobReqLogicalFailure }; /// Return value of parsing operation class JobReqResult { public: JobReqResultType result_type; std::string acl; std::string failure; JobReqResult(JobReqResultType type, const std::string& acl="", const std::string& failure="") :result_type(type), acl(acl), failure(failure) {} bool operator==(const JobReqResultType& result) const { return result == result_type; } bool operator!=(const JobReqResultType& result) const { return result != result_type; } }; /// Deals with parsing and converting job descriptions between Arc::JobDescription /// and JobLocalDescription. Also deals with reading and writing .grami file. class JobDescriptionHandler { public: /// Create a new job description handler JobDescriptionHandler(const GMConfig& config): config(config) {} /// Parse the job description at the given file into job_desc and /// arc_job_desc. Optionally check acl file and put result into /// returned object JobReqResult parse_job_req_from_file(JobLocalDescription &job_desc,Arc::JobDescription& arc_job_desc,const std::string &fname,bool check_acl=false) const; /// Parse the job description from the given string into job_desc and /// arc_job_desc. Optionally check acl file and put result into /// returned object JobReqResult parse_job_req_from_mem(JobLocalDescription &job_desc,Arc::JobDescription& arc_job_desc,const std::string &desc_str,bool check_acl=false) const; /// Parse the job description for job_id into job_desc. Optionally check /// acl file and put result into returned object JobReqResult parse_job_req(const JobId &job_id,JobLocalDescription &job_desc,bool check_acl=false) const; /// Parse the job description for job_id into job_desc and arc_job_desc. /// Optionally check acl file and put result into returned object JobReqResult parse_job_req(const JobId &job_id,JobLocalDescription &job_desc,Arc::JobDescription& arc_job_desc,bool check_acl=false) const; /// Parse job description into job_desc and write .local, .input and .output files bool process_job_req(const GMJob &job,JobLocalDescription &job_desc) const; /// Write .grami file after parsing job description file bool write_grami(GMJob &job,const char *opt_add = NULL) const; /// Write .grami from information in arc_job_desc and job bool write_grami(const Arc::JobDescription& arc_job_desc, GMJob& job, const char* opt_add) const; /// Get the local LRMS job id corresponding to A-REX job_id std::string get_local_id(const JobId &job_id) const; /// Set executable bits on appropriate files for the given job bool set_execs(const GMJob &job) const; private: JobReqResult parse_job_req_internal(JobLocalDescription &job_desc,Arc::JobDescription const& arc_job_desc,bool check_acl=false) const; /// Read and parse job description from file and update the job description reference. /** @param fname filename of the job description file. * @param desc a reference to a Arc::JobDescription which is filled on success, * if the job description format is unknown the reference is not touched. * @return false if job description could not be read or parsed, true on success. */ Arc::JobDescriptionResult get_arc_job_description(const std::string& fname, Arc::JobDescription& desc) const; /// Read ACLs from .acl file JobReqResult get_acl(const Arc::JobDescription& arc_job_desc) const; /// Write info to .grami for job executable bool write_grami_executable(std::ofstream& f, const std::string& name, const Arc::ExecutableType& exec) const; /// Class for handling escapes and quotes when writing to .grami class value_for_shell { friend std::ostream& operator<<(std::ostream&,const value_for_shell&); private: const char* str; bool quote; public: value_for_shell(const char *str_,bool quote_):str(str_),quote(quote_) { }; value_for_shell(const std::string &str_,bool quote_):str(str_.c_str()),quote(quote_) { }; }; friend std::ostream& operator<<(std::ostream&,const value_for_shell&); const GMConfig& config; static Arc::Logger logger; static const std::string NG_RSL_DEFAULT_STDIN; static const std::string NG_RSL_DEFAULT_STDOUT; static const std::string NG_RSL_DEFAULT_STDERR; }; std::ostream& operator<<(std::ostream&,const JobDescriptionHandler::value_for_shell&); } // namespace ARex #endif nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/PaxHeaders/GMJob.cpp0000644000000000000000000000013215067751327023735 xustar0030 mtime=1759498967.755491979 30 atime=1759498967.864493635 30 ctime=1759499029.515118318 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/GMJob.cpp0000644000175000002070000002211415067751327025637 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "../files/ControlFileContent.h" #include "../files/ControlFileHandling.h" #include "GMJob.h" namespace ARex { static Arc::Logger& logger = Arc::Logger::getRootLogger(); std::recursive_mutex GMJobQueue::lock_; GMJob::job_state_rec_t const GMJob::states_all[JOB_STATE_NUM] = { { "ACCEPTED", ' ' }, // JOB_STATE_ACCEPTED { "PREPARING", 'b' }, // JOB_STATE_PREPARING { "SUBMIT", ' ' }, // JOB_STATE_SUBMITING { "INLRMS", 'q' }, // JOB_STATE_INLRMS, { "FINISHING", 'f' }, // JOB_STATE_FINISHING { "FINISHED", 'e' }, // JOB_STATE_FINISHED { "DELETED", 'd' }, // JOB_STATE_DELETED { "CANCELING", 'c' }, // JOB_STATE_CANCELING { "UNDEFINED", ' ' } // JOB_STATE_UNDEFINED }; const char* GMJob::get_state_name() const { if((job_state<0) || (job_state>=JOB_STATE_NUM)) return states_all[JOB_STATE_UNDEFINED].name; return states_all[job_state].name; } char GMJob::get_state_mail_flag() const { if((job_state<0) || (job_state>=JOB_STATE_NUM)) return states_all[JOB_STATE_UNDEFINED].mail_flag; return states_all[job_state].mail_flag; } const char* GMJob::get_state_name(job_state_t st) { if((st<0) || (st>=JOB_STATE_NUM)) return states_all[JOB_STATE_UNDEFINED].name; return states_all[st].name; } char GMJob::get_state_mail_flag(job_state_t st) { if((st<0) || (st>=JOB_STATE_NUM)) return states_all[JOB_STATE_UNDEFINED].mail_flag; return states_all[st].mail_flag; } job_state_t GMJob::get_state(const char* state) { for(int i = 0;iWait(); delete child; child=NULL; } delete local; } void GMJob::AddReference(void) { std::unique_lock lock(ref_lock); if(++ref_count == 0) { logger.msg(Arc::FATAL,"%s: Job monitoring counter is broken",job_id); } } void GMJob::RemoveReference(void) { std::unique_lock lock(ref_lock); if(--ref_count == 0) { logger.msg(Arc::ERROR,"%s: Job monitoring is unintentionally lost",job_id); lock.unlock(); delete this; }; } void GMJob::DestroyReference(void) { std::unique_lock lock(ref_lock); if(--ref_count == 0) { logger.msg(Arc::VERBOSE,"%s: Job monitoring stop success",job_id); lock.unlock(); delete this; } else { if(queue) logger.msg(Arc::ERROR,"%s: Job monitoring stop requested with %u active references and %s queue associated",job_id,ref_count,queue->name_); else logger.msg(Arc::ERROR,"%s: Job monitoring stop requested with %u active references",job_id,ref_count); }; } bool GMJobQueue::CanSwitch(GMJob const& job, GMJobQueue const& new_queue, bool to_front) { if(!to_front) { if(!(new_queue.priority_ > priority_)) return false; } else { // If moving to first place in queue accept same priority if(!(new_queue.priority_ >= priority_)) return false; }; return true; } bool GMJobQueue::CanRemove(GMJob const& job) { return true; } bool GMJob::SwitchQueue(GMJobQueue* new_queue, bool to_front) { // Simply use global lock. It will protect both queue content and // reference to queue inside job. std::unique_lock qlock(GMJobQueue::lock_); GMJobQueue* old_queue = queue; if (old_queue == new_queue) { // shortcut if(!to_front) return true; if(!old_queue) return true; // move to front old_queue->queue_.remove(this); // ineffective operation! old_queue->queue_.push_front(this); return true; }; // Check priority if (old_queue && new_queue) { if (!old_queue->CanSwitch(*this, *new_queue, to_front)) return false; } else if (old_queue) { if (!old_queue->CanRemove(*this)) return false; } if (old_queue) { // Remove from current queue old_queue->queue_.remove(this); // ineffective operation! queue = NULL; // Unlock current queue }; if (new_queue) { // Add to new queue if(!to_front) { new_queue->queue_.push_back(this); } else { new_queue->queue_.push_front(this); }; queue = new_queue; // Unlock new queue }; // Handle reference counter if(new_queue && !old_queue) { std::unique_lock lock(ref_lock); if(++ref_count == 0) { logger.msg(Arc::FATAL,"%s: Job monitoring counter is broken",job_id); } } else if(!new_queue && old_queue) { std::unique_lock lock(ref_lock); if(--ref_count == 0) { logger.msg(Arc::ERROR,"%s: Job monitoring is lost due to removal from queue",job_id); lock.unlock(); // release before deleting referenced object delete this; }; }; // Unlock job instance return true; } JobLocalDescription* GMJob::GetLocalDescription(const GMConfig& config) { if(local) return local; JobLocalDescription* job_desc; job_desc=new JobLocalDescription; if(!job_local_read_file(job_id,config,*job_desc)) { delete job_desc; return NULL; }; local=job_desc; return local; } JobLocalDescription* GMJob::GetLocalDescription(void) const { return local; } std::string GMJob::GetFailure(const GMConfig& config) const { std::string reason = job_failed_mark_read(job_id,config); if(!failure_reason.empty()) { reason+=failure_reason; reason+="\n"; }; return reason; } bool GMJob::CheckFailure(const GMConfig& config) const { if(!failure_reason.empty()) return true; return job_failed_mark_check(job_id,config); } void GMJob::PrepareToDestroy(void) { // We could send signals to downloaders and uploaders. // But currently those do not implement safe shutdown. // So we will simply wait for them to finish in destructor. } // ---------------------------------------------------------- GMJobQueue::GMJobQueue(int priority, char const * name):priority_(priority),name_(name) { } bool GMJobQueue::Push(GMJobRef& ref) { if(!ref) return false; return ref->SwitchQueue(this); } bool GMJobQueue::PushSorted(GMJobRef& ref, comparator_t compare) { if(!ref) return false; std::unique_lock qlock(lock_); GMJobQueue* old_queue = ref->queue; if(!ref->SwitchQueue(this)) return false; // Most of the cases job lands last in list std::list::reverse_iterator opos = queue_.rbegin(); while(opos != queue_.rend()) { if(ref == *opos) { // Can it be moved ? std::list::reverse_iterator npos = opos; std::list::reverse_iterator rpos = npos; ++npos; while(npos != queue_.rend()) { if(!compare((GMJob*)ref, *npos)) break; rpos = npos; ++npos; }; if(rpos != opos) { // no reason to move to itself queue_.insert(--(rpos.base()),*opos); queue_.erase(--(opos.base())); }; return true; }; ++opos; }; // Job is not found in queue - can only happen in case of bug in the code. // Try to recover and bail out. logger.msg(Arc::FATAL,"%s: PushSorted failed to find job where expected",ref->job_id); ref->SwitchQueue(old_queue); return false; } GMJobRef GMJobQueue::Front() { std::unique_lock qlock(lock_); if(queue_.empty()) return GMJobRef(); GMJobRef ref(queue_.front()); return ref; } GMJobRef GMJobQueue::Pop() { std::unique_lock qlock(lock_); if(queue_.empty()) return GMJobRef(); GMJobRef ref(queue_.front()); ref->SwitchQueue(NULL); return ref; } bool GMJobQueue::Unpop(GMJobRef& ref) { if(!ref) return false; return ref->SwitchQueue(this, true); } bool GMJobQueue::Erase(GMJobRef& ref) { if(!ref) return false; std::unique_lock lock(lock_); if(ref->queue == this) { ref->SwitchQueue(NULL); return true; }; return false; } bool GMJobQueue::Exists(const GMJobRef& ref) const { if(!ref) return false; std::unique_lock lock(lock_); return (ref->queue == this); } bool GMJobQueue::IsEmpty() const { std::unique_lock lock(lock_); return queue_.empty(); } int GMJobQueue::Size() const { std::unique_lock lock(lock_); return queue_.size(); } void GMJobQueue::Sort(comparator_t compare) { std::unique_lock lock(lock_); queue_.sort(compare); } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/PaxHeaders/GMJob.h0000644000000000000000000000013215067751327023402 xustar0030 mtime=1759498967.755491979 30 atime=1759498967.864493635 30 ctime=1759499029.523003807 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/GMJob.h0000644000175000002070000002052215067751327025305 0ustar00mockbuildmock00000000000000#ifndef GRID_MANAGER_JOB_H #define GRID_MANAGER_JOB_H #include #include #include #include namespace ARex { class JobsList; class JobLocalDescription; class GMConfig; class JobsMetrics; /// Possible job states enum job_state_t { JOB_STATE_ACCEPTED = 0, JOB_STATE_PREPARING = 1, JOB_STATE_SUBMITTING = 2, JOB_STATE_INLRMS = 3, JOB_STATE_FINISHING = 4, JOB_STATE_FINISHED = 5, JOB_STATE_DELETED = 6, JOB_STATE_CANCELING = 7, JOB_STATE_UNDEFINED = 8 }; /// Number of job states #define JOB_STATE_NUM (JOB_STATE_UNDEFINED+1) /// Jobs identifier. Stored as string. Normally is a random string of /// numbers and letters. typedef std::string JobId; class GMJobRef; class GMJobQueue; /// Represents a job in memory as it passes through the JobsList state machine. class GMJob { friend class JobsList; friend class GMJobRef; friend class GMJobQueue; friend class GMJobMock; friend class JobsMetrics; private: // State of the job (state machine) job_state_t job_state; // Flag to indicate job stays at this stage due to limits imposed. // Such jobs are not counted in counters bool job_pending; // Job identifier JobId job_id; // Directory to run job in std::string session_dir; // Explanation of job's failure std::string failure_reason; // How long job is kept on cluster after it finished time_t keep_finished; time_t keep_deleted; // Pointer to object containing most important parameters of job, // loaded when needed. JobLocalDescription* local; // Job's owner Arc::User user; // Used to determine data transfer share (eg DN, VOMS VO) std::string transfer_share; // Start time of job i.e. when it first moves to PREPARING time_t start_time; struct job_state_rec_t { const char* name; char mail_flag; }; /// Maps job state to state name and flag for email at that state static job_state_rec_t const states_all[JOB_STATE_NUM]; // Job references handler std::recursive_mutex ref_lock; int ref_count; /// Inform job it has new GMJobRef associated void AddReference(void); /// Inform job that GMJobRef was destroyed void RemoveReference(void); /// Inform job that GMJobRef intends to destroy job void DestroyReference(void); /// Change queue to which job belongs. Queue switch is subject to queue's priority and /// happens atomically. Similar to GMJobQueue::Push(GMJobRef(this)). /// Returns true if queue was changed. bool SwitchQueue(GMJobQueue* new_queue, bool to_front = false); /// Queue to which job is currently associated GMJobQueue* queue; public: // external utility being run to perform tasks like stage-in/out, // submit/cancel. (todo - move to private) Arc::Run* child; std::string child_output; // Constructors and destructor. // Accepts: // job_id - identifier // user - owner of job // dir - session_dir of job // state - initial state of job GMJob(const JobId &job_id,const Arc::User& user,const std::string &dir = "",job_state_t state = JOB_STATE_UNDEFINED); GMJob(void); GMJob(const GMJob &job); ~GMJob(void); job_state_t get_state() const { return job_state; }; const char* get_state_name() const; char get_state_mail_flag() const; static const char* get_state_name(job_state_t st); static char get_state_mail_flag(job_state_t st); static job_state_t get_state(const char* state); const JobId& get_id() const { return job_id; }; std::string SessionDir(void) const { return session_dir; }; void AddFailure(const std::string &reason) { failure_reason+=reason; failure_reason+="\n"; }; /// Retrieve current failure reason (both in memory and stored in control dir). /// For non-failed jobs returned string is empty. std::string GetFailure(const GMConfig& config) const; /// Check if job is marked as failed (slightly faster than GetFailure). /// For failed job returns true, non-failed - false. bool CheckFailure(const GMConfig& config) const; bool operator==(const GMJob& job) const { return (job_id == job.job_id); }; bool operator==(const JobId &id) const { return (job_id == id); }; bool operator!=(const JobId &id) const { return (job_id != id); }; void set_user(const Arc::User& u) { user = u; } const Arc::User& get_user() const { return user;} void set_share(std::string share); // Force 'local' to be created and read from file if not already available JobLocalDescription* GetLocalDescription(const GMConfig& config); // Use only preloaded local JobLocalDescription* GetLocalDescription() const; void Start() { start_time = time(NULL); }; time_t GetStartTime() const { return start_time; }; void PrepareToDestroy(void); }; class GMJobRef { private: GMJob* job_; public: GMJobRef() { job_ = NULL; } GMJobRef(GMJob* job) { job_ = job; if(job_) job_->AddReference(); } GMJobRef(GMJobRef const& other) { job_ = other.job_; if(job_) job_->AddReference(); } ~GMJobRef() { if (job_) job_->RemoveReference(); } GMJobRef& operator=(GMJobRef const& other) { if (job_) job_->RemoveReference(); job_ = other.job_; if(job_) job_->AddReference(); return *this; } bool operator==(GMJobRef const& other) const { return (job_ == other.job_); } bool operator==(GMJob const* job) const { return (job_ == job); } bool operator==(GMJob* job) const { return (job_ == job); } operator bool() const { return job_ != NULL; } bool operator!() const { return job_ == NULL; } GMJob& operator*() const { return *job_; } GMJob* operator->() const { return job_; } operator GMJob*() const { return job_; } void Destroy() { if (job_) job_->DestroyReference(); job_ = NULL; } }; class GMJobQueue { friend class GMJob; private: // Using global lock intentionally. // It would be possible to have per-queue lock but rules to avoid // deadlocks between 2 queues and queue+job locks would be too complex // and too easy to break. So as long as we have not so many queues // global lock is acceptable. static std::recursive_mutex lock_; int const priority_; std::list queue_; std::string name_; GMJobQueue(); GMJobQueue(GMJobQueue const& it); public: //! Construct jobs queue with specified priority. GMJobQueue(int priority, char const * name); //! Comparison function type definition. typedef bool (*comparator_t)(GMJob const * first, GMJob const * second); //! Insert job at end of the queue. Subject to queue priority. bool Push(GMJobRef& ref); //! Insert job into queue at position defined by sorting. Subject to queue priority. bool PushSorted(GMJobRef& ref, comparator_t compare); //! Check if queue allows for job to be moved into another queue. Default implementation checks priority. virtual bool CanSwitch(GMJob const& ref, GMJobQueue const& new_queue, bool to_front); //! Check if queue allows for job to be removed. Default implementation returns true. virtual bool CanRemove(GMJob const& ref); //! Returns reference to first job in the queue. GMJobRef Front(); //! Removes first job n the queue and returns its reference. GMJobRef Pop(); //! Insert job at beginnign of the queue. Subject to queue priority. bool Unpop(GMJobRef& ref); //! Removes job from the queue bool Erase(GMJobRef& ref); //! Returns true if job is in queue bool Exists(const GMJobRef& ref) const; //! Returns true if there are no jobs in queue bool IsEmpty() const; //! Returns number of jobs in queue int Size() const; //! Sort jobs in queue void Sort(comparator_t compare); //! Removes job from queue identified by key template bool Erase(KEY const& key) { std::unique_lock lock(lock_); for(std::list::iterator i = queue_.begin(); i != queue_.end(); ++i) { if((*i) && (**i == key)) { (*i)->SwitchQueue(NULL); return true; }; }; return false; }; //! Gets reference to job identified by key and stored in this queue template GMJobRef Find(KEY const& key) const { std::unique_lock lock(lock_); for(std::list::const_iterator i = queue_.begin(); i != queue_.end(); ++i) { if((*i) && (**i == key)) { return GMJobRef(*i); }; }; return GMJobRef(); }; }; } // namespace ARex #endif nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/PaxHeaders/JobDescriptionHandler.cpp0000644000000000000000000000013215067751327027213 xustar0030 mtime=1759498967.755491979 30 atime=1759498967.864493635 30 ctime=1759499029.516373798 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/JobDescriptionHandler.cpp0000644000175000002070000004672115067751327031127 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "../files/ControlFileHandling.h" #include "../conf/GMConfig.h" #include "../../delegation/DelegationStore.h" #include "../../delegation/DelegationStores.h" #include "JobDescriptionHandler.h" // TODO: move to using process_job_req as much as possible namespace ARex { Arc::Logger JobDescriptionHandler::logger(Arc::Logger::getRootLogger(), "JobDescriptionHandler"); const std::string JobDescriptionHandler::NG_RSL_DEFAULT_STDIN("/dev/null"); const std::string JobDescriptionHandler::NG_RSL_DEFAULT_STDOUT("/dev/null"); const std::string JobDescriptionHandler::NG_RSL_DEFAULT_STDERR("/dev/null"); bool JobDescriptionHandler::process_job_req(const GMJob &job,JobLocalDescription &job_desc) const { /* read local first to get some additional info pushed here by script */ job_local_read_file(job.get_id(),config,job_desc); /* some default values */ if(job_desc.lrms.empty()) job_desc.lrms=config.DefaultLRMS(); if(job_desc.queue.empty()) job_desc.queue=config.DefaultQueue(); if(job_desc.lifetime.empty()) job_desc.lifetime=Arc::tostring(config.KeepFinished()); if(parse_job_req(job.get_id(),job_desc) != JobReqSuccess) return false; if(job_desc.reruns>config.Reruns()) job_desc.reruns=config.Reruns(); if(!job_local_write_file(job,config,job_desc)) return false; // Convert delegation ids to credential paths. // Add default credentials for file which have no own assigned. ARex::DelegationStores* delegs = config.GetDelegations(); std::string default_cred = job_proxy_filename(job.get_id(), config); // TODO: drop job.proxy as source of delegation std::string default_cred_type; if(!job_desc.delegationid.empty()) { if(delegs) { std::list meta; DelegationStore& deleg = delegs->operator[](config.DelegationDir()); std::string fname = deleg.FindCred(job_desc.delegationid, job_desc.DN, meta); if(!fname.empty()) { default_cred = fname; default_cred_type = (!meta.empty())?meta.front():""; }; }; }; // Resolve delegation ids into proxy credential paths for(std::list::iterator f = job_desc.inputdata.begin(); f != job_desc.inputdata.end(); ++f) { if(f->has_lfn()) { if(f->cred.empty()) { f->cred = default_cred; f->cred_type = default_cred_type; } else { std::string path; std::list meta; if(delegs) path = (*delegs)[config.DelegationDir()].FindCred(f->cred,job_desc.DN,meta); f->cred = path; f->cred_type = (!meta.empty())?meta.front():""; }; }; }; for(std::list::iterator f = job_desc.outputdata.begin(); f != job_desc.outputdata.end(); ++f) { if(f->has_lfn()) { if(f->cred.empty()) { f->cred = default_cred; } else { std::string path; std::list meta; ARex::DelegationStores* delegs = config.GetDelegations(); if(delegs) path = (*delegs)[config.DelegationDir()].FindCred(f->cred,job_desc.DN,meta); f->cred = path; f->cred_type = (!meta.empty())?meta.front():""; }; }; }; if(!job_input_write_file(job,config,job_desc.inputdata)) return false; if(!job_output_write_file(job,config,job_desc.outputdata,job_output_success)) return false; return true; } JobReqResult JobDescriptionHandler::parse_job_req_from_mem(JobLocalDescription &job_desc,Arc::JobDescription& arc_job_desc,const std::string &desc_str,bool check_acl) const { { std::list descs; Arc::JobDescriptionResult r = Arc::JobDescription::Parse(desc_str, descs, "", "GRIDMANAGER"); if (!r) { std::string failure = r.str(); if(failure.empty()) failure = "Unable to parse job description."; return JobReqResult(JobReqInternalFailure, "", failure); } if(descs.size() != 1) { return JobReqResult(JobReqInternalFailure, "", "Multiple job descriptions not supported"); } arc_job_desc = descs.front(); } return parse_job_req_internal(job_desc, arc_job_desc, check_acl); } JobReqResult JobDescriptionHandler::parse_job_req_from_file(JobLocalDescription &job_desc,Arc::JobDescription& arc_job_desc,const std::string &fname,bool check_acl) const { Arc::JobDescriptionResult arc_job_res = get_arc_job_description(fname, arc_job_desc); if (!arc_job_res) { std::string failure = arc_job_res.str(); if(failure.empty()) failure = "Unable to read or parse job description."; return JobReqResult(JobReqInternalFailure, "", failure); } return parse_job_req_internal(job_desc, arc_job_desc, check_acl); } JobReqResult JobDescriptionHandler::parse_job_req_internal(JobLocalDescription &job_desc,Arc::JobDescription const& arc_job_desc,bool check_acl) const { if (!arc_job_desc.Resources.RunTimeEnvironment.isResolved()) { return JobReqResult(JobReqInternalFailure, "", "Runtime environments have not been resolved."); } job_desc = arc_job_desc; // Additional queue processing // TODO: Temporary solution. // Check for special WLCG queues made out of "queue name_VO name". for(std::list::const_iterator q = config.Queues().begin(); q != config.Queues().end();++q) { if(*q == job_desc.queue) break; const std::list & vos = config.AuthorizedVOs(q->c_str()); // per queue const std::list & cvos = config.AuthorizedVOs(""); // per cluster bool vo_found = false; if(!vos.empty()) { for(std::list::const_iterator vo = vos.begin();vo != vos.end(); ++vo) { std::string synthetic_queue = *q; synthetic_queue += "_"; synthetic_queue += *vo; if(synthetic_queue == job_desc.queue) { vo_found = true; break; }; }; } else { for(std::list::const_iterator vo = cvos.begin();vo != cvos.end(); ++vo) { std::string synthetic_queue = *q; synthetic_queue += "_"; synthetic_queue += *vo; if(synthetic_queue == job_desc.queue) { vo_found = true; break; }; }; }; if(vo_found) { logger.msg(Arc::WARNING, "Replacing queue '%s' with '%s'", job_desc.queue, *q); job_desc.queue = *q; break; }; }; if (check_acl) return get_acl(arc_job_desc); return JobReqSuccess; } JobReqResult JobDescriptionHandler::parse_job_req(const JobId &job_id,JobLocalDescription &job_desc,bool check_acl) const { Arc::JobDescription arc_job_desc; return parse_job_req(job_id,job_desc,arc_job_desc,check_acl); } JobReqResult JobDescriptionHandler::parse_job_req(const JobId &job_id,JobLocalDescription &job_desc,Arc::JobDescription& arc_job_desc,bool check_acl) const { std::string fname = job_control_path(config.ControlDir(),job_id,sfx_desc); return parse_job_req_from_file(job_desc,arc_job_desc,fname,check_acl); } std::string JobDescriptionHandler::get_local_id(const JobId &job_id) const { std::string id; std::string joboption("joboption_jobid="); std::string fgrami(job_control_path(config.ControlDir(),job_id,sfx_grami)); std::list grami_data; if (Arc::FileRead(fgrami, grami_data)) { for (std::list::iterator line = grami_data.begin(); line != grami_data.end(); ++line) { if (line->find(joboption) == 0) { id = line->substr(joboption.length()); id = Arc::trim(id, "'"); break; } } } return id; } bool JobDescriptionHandler::write_grami_executable(std::ofstream& f, const std::string& name, const Arc::ExecutableType& exec) const { std::string executable = Arc::trim(exec.Path); if (executable[0] != '/' && executable[0] != '$' && !(executable[0] == '.' && executable[1] == '/')) executable = "./"+executable; f<<"joboption_"<::const_iterator it = exec.Argument.begin(); it != exec.Argument.end(); it++, i++) { f<<"joboption_"<c_str(),true)<::const_iterator e = arc_job_desc.Application.PreExecutable.begin(); e != arc_job_desc.Application.PreExecutable.end(); ++e) { if(!write_grami_executable(f,"pre_"+Arc::tostring(n),*e)) return false; ++n; } for(std::list::const_iterator e = arc_job_desc.Application.PostExecutable.begin(); e != arc_job_desc.Application.PostExecutable.end(); ++e) { if(!write_grami_executable(f,"post_"+Arc::tostring(n),*e)) return false; } f<<"joboption_stdin="< >::const_iterator it = arc_job_desc.Application.Environment.begin(); it != arc_job_desc.Application.Environment.end(); it++, i++) { f<<"joboption_env_"<first+"="+it->second,true)<::const_iterator itSW = arc_job_desc.Resources.RunTimeEnvironment.getSoftwareList().begin(); itSW != arc_job_desc.Resources.RunTimeEnvironment.getSoftwareList().end(); itSW++) { if (itSW->empty()) continue; std::string rte = Arc::upper(*itSW); if (!Arc::CanonicalDir(rte)) { logger.msg(Arc::ERROR, "Bad name for runtime environment: %s", (std::string)*itSW); return false; } f<<"joboption_runtime_"<& opts = itSW->getOptions(); int n = 1; for(std::list::const_iterator opt = opts.begin(); opt != opts.end();++opt) { f<<"joboption_runtime_"< descs; Arc::JobDescriptionResult r = Arc::JobDescription::Parse(job_desc_str, descs, "", "GRIDMANAGER"); if (r) { if(descs.size() == 1) { desc = descs.front(); } else { r = Arc::JobDescriptionResult(false,"Multiple job descriptions not supported"); } } return r; } JobReqResult JobDescriptionHandler::get_acl(const Arc::JobDescription& arc_job_desc) const { if( !arc_job_desc.Application.AccessControl ) return JobReqSuccess; Arc::XMLNode typeNode = arc_job_desc.Application.AccessControl["Type"]; Arc::XMLNode contentNode = arc_job_desc.Application.AccessControl["Content"]; if( !contentNode ) { std::string failure = "acl element wrongly formatted - missing Content element"; logger.msg(Arc::ERROR, failure); return JobReqResult(JobReqMissingFailure, "", failure); }; if( (!typeNode) || ( ( (std::string) typeNode ) == "GACL" ) || ( ( (std::string) typeNode ) == "ARC" ) ) { std::string str_content; if(contentNode.Size() > 0) { Arc::XMLNode acl_doc; contentNode.Child().New(acl_doc); acl_doc.GetDoc(str_content); } else { str_content = (std::string)contentNode; } return JobReqResult(JobReqSuccess, str_content); } std::string failure = "ARC: unsupported ACL type specified: " + (std::string)typeNode; logger.msg(Arc::ERROR, "%s", failure); return JobReqResult(JobReqUnsupportedFailure, "", failure); } /* parse job description and set specified file permissions to executable */ bool JobDescriptionHandler::set_execs(const GMJob &job) const { std::string fname = job_control_path(config.ControlDir(),job.get_id(),sfx_desc); Arc::JobDescription desc; if (!get_arc_job_description(fname, desc)) return false; std::string session_dir = job.SessionDir(); if (desc.Application.Executable.Path[0] != '/' && desc.Application.Executable.Path[0] != '$') { std::string executable = desc.Application.Executable.Path; if(!Arc::CanonicalDir(executable)) { logger.msg(Arc::ERROR, "Bad name for executable: %s", executable); return false; } fix_file_permissions_in_session(session_dir+"/"+executable,job,config,true); } // TOOD: Support for PreExecutable and PostExecutable for(std::list::const_iterator it = desc.DataStaging.InputFiles.begin(); it!=desc.DataStaging.InputFiles.end();it++) { if(it->IsExecutable) { std::string executable = it->Name; if (executable[0] != '/' && executable[0] != '.' && executable[1] != '/') executable = "./"+executable; if(!Arc::CanonicalDir(executable)) { logger.msg(Arc::ERROR, "Bad name for executable: %s", executable); return false; } fix_file_permissions_in_session(session_dir+"/"+executable,job,config,true); } } return true; } std::ostream& operator<<(std::ostream& o, const JobDescriptionHandler::value_for_shell& s) { if(s.str == NULL) return o; if(s.quote) o<<"'"; const char* p = s.str; for(;;) { const char* pp = strchr(p,'\''); if(pp == NULL) { o< #endif #include #include #include #include #include #include #include "../conf/UrlMapConfig.h" #include "../files/ControlFileHandling.h" #include "../conf/StagingConfig.h" #include "../../delegation/DelegationStore.h" #include "../../delegation/DelegationStores.h" #include "GMJob.h" #include "JobsList.h" #include "DTRGenerator.h" namespace ARex { Arc::Logger DTRInfo::logger(Arc::Logger::getRootLogger(), "DTRInfo"); DTRInfo::DTRInfo(const GMConfig& config): config(config) { } void DTRInfo::receiveDTR(DataStaging::DTR_ptr dtr) { // write state info to job.id.input for example } // We can't just let jobs leave internal queues because their DTR may be still running. // So here is the place some precautions may be taken. bool GMJobQueueDTR::CanSwitch(GMJob const& job, GMJobQueue const& new_queue, bool to_front) { return GMJobQueue::CanSwitch(job, new_queue, to_front); } bool GMJobQueueDTR::CanRemove(GMJob const& job) { return GMJobQueue::CanRemove(job); } Arc::Logger DTRGenerator::logger(Arc::Logger::getRootLogger(), "Generator"); bool compare_job_description(GMJob const * first, GMJob const * second) { if(!first) return false; if(!second) return false; int priority_first = first->GetLocalDescription() ? first->GetLocalDescription()->priority : JobLocalDescription::prioritydefault; int priority_second = first->GetLocalDescription() ? second->GetLocalDescription()->priority : JobLocalDescription::prioritydefault; return priority_first > priority_second; } std::string filedata_pfn(FileData const& fd) { return fd.pfn; } void DTRGenerator::main_thread(void* arg) { ((DTRGenerator*)arg)->thread(); } void DTRGenerator::thread() { // set up logging - to avoid logging DTR logs to the main A-REX log // we disconnect the root logger while submitting to the Scheduler //Arc::Logger::getRootLogger().setThreadContext(); while (generator_state != DataStaging::TO_STOP) { // look at event queue and deal with any events. // This method of iteration should be thread-safe because events // are always added to the end of the list logger.msg(Arc::DEBUG, "DTR Generator waiting to process: %d jobs to cancel, %d DTRs, %d new jobs", jobs_cancelled.size(), dtrs_received.size(), jobs_received.Size()); int cancelled_num = 0; int dtrs_num = 0; int jobs_num = 0; // take cancelled jobs first so we can ignore other DTRs in those jobs Arc::AutoLock elock(event_lock); std::list::iterator it_cancel = jobs_cancelled.begin(); while (it_cancel != jobs_cancelled.end()) { // check if it is still in received queue and remove GMJobRef job = jobs_received.Find(*it_cancel); if(!job) { // job must be in scheduler already logger.msg(Arc::DEBUG, "%s: Job cancel request from DTR generator to scheduler", *it_cancel); elock.unlock(); processCancelledJob(*it_cancel); elock.lock(); } else { logger.msg(Arc::DEBUG, "%s: Returning canceled job from DTR generator", job->get_id()); elock.unlock(); { Arc::AutoLock dlock(dtrs_lock); finished_jobs[job->get_id()] = std::string("Job was canceled while waiting in DTR queue"); } elock.lock(); jobs_received.Erase(job); jobs.RequestAttention(job); // pass job back to states processing } it_cancel = jobs_cancelled.erase(it_cancel); ++cancelled_num; } // next DTRs sent back from the Scheduler std::list::iterator it_dtrs = dtrs_received.begin(); while (it_dtrs != dtrs_received.end()) { elock.unlock(); processReceivedDTR(*it_dtrs); elock.lock(); it_dtrs = dtrs_received.erase(it_dtrs); ++dtrs_num; } // finally new jobs // it can happen that the list grows faster than the jobs are processed // so here we only process for a small time to avoid blocking other // jobs finishing Arc::Time limit(Arc::Time() + Arc::Period(30)); // sort the list by job priority //jobs_received.Sort(compare_job_description); while (Arc::Time() < limit) { GMJobRef job = jobs_received.Front(); // get reference but keep job in queue if(!job) break; elock.unlock(); bool jobAccepted = processReceivedJob(job); // on success job is moved to jobs_processing queue elock.lock(); if(!jobAccepted) { logger.msg(Arc::DEBUG, "%s: Re-requesting attention from DTR generator", job->get_id()); // processReceivedJob fills error in finished_jobs - no need to do that here jobs_received.Erase(job); // release from queue cause 'jobs' queues have lower priority jobs.RequestAttention(job); // pass job back to states processing } ++jobs_num; } bool queuesEmpty = jobs_cancelled.empty() && dtrs_received.empty() && jobs_received.IsEmpty(); elock.unlock(); logger.msg(Arc::DEBUG, "DTR Generator processed: %d jobs to cancel, %d DTRs, %d new jobs", cancelled_num, dtrs_num, jobs_num); // wait till something arrives or go back to processing almost immediately if queues not empty event_lock.wait(queuesEmpty ? 50000 : 100); } // main processing loop // stop scheduler - cancels all DTRs and waits for them to complete scheduler->stop(); // Handle all the DTRs returned by the scheduler, in case there are completed // DTRs to process before exiting and thus avoiding redoing those transfers // when A-REX restarts. // Lock is not necessary here because scheduler has finished and A-REX is // waiting for this thread to exit. std::list::iterator it_dtrs = dtrs_received.begin(); while (it_dtrs != dtrs_received.end()) { processReceivedDTR(*it_dtrs); it_dtrs = dtrs_received.erase(it_dtrs); } run_condition.signal(); logger.msg(Arc::INFO, "Exiting Generator thread"); } DTRGenerator::DTRGenerator(const GMConfig& config, JobsList& jobs) : jobs_received(JobsList::ProcessingQueuePriority+1, "DTR received", *this), jobs_processing(JobsList::ProcessingQueuePriority+2, "DTR processing", *this), generator_state(DataStaging::INITIATED), config(config), staging_conf(config), info(config), jobs(jobs) { if (!staging_conf) return; // Set log level for DTR in job.id.errors files DataStaging::DTR::LOG_LEVEL = staging_conf.log_level; scheduler = DataStaging::Scheduler::getInstance(); // Convert A-REX configuration values to DTR configuration scheduler->SetDumpLocation(staging_conf.dtr_log); // Read DTR state from previous dump to find any transfers stopped half-way // If those destinations appear again, add overwrite=yes readDTRState(staging_conf.dtr_log); // Processing limits scheduler->SetSlots(staging_conf.max_processor, staging_conf.max_processor, staging_conf.max_delivery, staging_conf.max_emergency, staging_conf.max_prepared); // Transfer shares DataStaging::TransferSharesConf share_conf(staging_conf.share_type, staging_conf.defined_shares); scheduler->SetTransferSharesConf(share_conf); // Transfer limits DataStaging::TransferParameters transfer_limits; transfer_limits.min_current_bandwidth = staging_conf.min_speed; transfer_limits.averaging_time = staging_conf.min_speed_time; transfer_limits.min_average_bandwidth = staging_conf.min_average_speed; transfer_limits.max_inactivity_time = staging_conf.max_inactivity_time; scheduler->SetTransferParameters(transfer_limits); // URL mappings UrlMapConfig url_map(config); scheduler->SetURLMapping(url_map); // Preferred pattern scheduler->SetPreferredPattern(staging_conf.preferred_pattern); // Delivery services scheduler->SetDeliveryServices(staging_conf.delivery_services); // Limit on remote delivery size scheduler->SetRemoteSizeLimit(staging_conf.remote_size_limit); // Set performance metrics logging scheduler->SetJobPerfLog(staging_conf.perf_log); // End of configuration - start Scheduler thread scheduler->start(); generator_state = DataStaging::RUNNING; Arc::CreateThreadFunction(&main_thread, this); } DTRGenerator::~DTRGenerator() { if (generator_state != DataStaging::RUNNING) return; logger.msg(Arc::INFO, "Shutting down data staging threads"); generator_state = DataStaging::TO_STOP; event_lock.signal(); run_condition.wait(); generator_state = DataStaging::STOPPED; } void DTRGenerator::receiveDTR(DataStaging::DTR_ptr dtr) { if (generator_state == DataStaging::INITIATED || generator_state == DataStaging::STOPPED) { logger.msg(Arc::ERROR, "DTRGenerator is not running!"); return; } else if (generator_state == DataStaging::TO_STOP) { logger.msg(Arc::VERBOSE, "Received DTR %s during Generator shutdown - may not be processed", dtr->get_id()); // still a chance to process this DTR so don't return } Arc::AutoLock elock(event_lock); dtrs_received.push_back(dtr); event_lock.signal_nonblock(); } bool DTRGenerator::receiveJob(GMJobRef& job) { if (generator_state != DataStaging::RUNNING) { logger.msg(Arc::WARNING, "DTRGenerator is not running!"); } if(!job) { logger.msg(Arc::ERROR, "DTRGenerator was sent null job"); return false; } // Add to jobs list even if Generator is stopped, so that A-REX doesn't // think that staging has finished. Arc::AutoLock elock(event_lock); bool result = jobs_received.PushSorted(job, compare_job_description); if(result) { logger.msg(Arc::DEBUG, "%s: Received job in DTR generator", job->get_id()); event_lock.signal_nonblock(); } else { logger.msg(Arc::ERROR, "%s: Failed to receive job in DTR generator", job->get_id()); } return result; } void DTRGenerator::cancelJob(const GMJobRef& job) { if(!job) { logger.msg(Arc::ERROR, "DTRGenerator got request to cancel null job"); return; } if (generator_state != DataStaging::RUNNING) { logger.msg(Arc::WARNING, "DTRGenerator is not running!"); } Arc::AutoLock elock(event_lock); jobs_cancelled.push_back(job->get_id()); event_lock.signal_nonblock(); } bool DTRGenerator::queryJobFinished(GMJobRef const& job) { if(!job) { logger.msg(Arc::ERROR, "DTRGenerator is queried about null job"); return false; } // Data staging is finished if the job is in finished_jobs and // not in active_dtrs or jobs_received. // check if this job is still in the received jobs queue Arc::AutoLock elock(event_lock); if(jobs_received.Exists(job)) { return false; } elock.unlock(); // check if any DTRs in this job are still active Arc::AutoLock dlock(dtrs_lock); if (active_dtrs.find(job->get_id()) != active_dtrs.end()) { return false; } std::map::iterator i = finished_jobs.find(job->get_id()); if (i != finished_jobs.end() && !i->second.empty()) { // add failure to job if any DTR failed job->AddFailure(i->second); finished_jobs[job->get_id()] = ""; } return true; } bool DTRGenerator::hasJob(const GMJobRef& job) { if(!job) { logger.msg(Arc::ERROR, "DTRGenerator is asked about null job"); return false; } // check if this job is still in the received jobs queue Arc::AutoLock elock(event_lock); if(jobs_received.Exists(job)) { return true; } elock.unlock(); // check if any DTRs in this job are still active Arc::AutoLock dlock(dtrs_lock); if (active_dtrs.find(job->get_id()) != active_dtrs.end()) { return true; } // finally check finished jobs std::map::iterator i = finished_jobs.find(job->get_id()); if (i != finished_jobs.end()) { return true; } // not found return false; } void DTRGenerator::removeJob(const GMJobRef& job) { if(!job) { logger.msg(Arc::ERROR, "DTRGenerator is requested to remove null job"); return; } // check if this job is still in the received jobs queue Arc::AutoLock elock(event_lock); if(jobs_received.Exists(job)) { logger.msg(Arc::WARNING, "%s: Trying to remove job from data staging which is still active", job->get_id()); return; } elock.unlock(); // check if any DTRs in this job are still active Arc::AutoLock dlock(dtrs_lock); if (active_dtrs.find(job->get_id()) != active_dtrs.end()) { logger.msg(Arc::WARNING, "%s: Trying to remove job from data staging which is still active", job->get_id()); return; } // finally check finished jobs std::map::iterator i = finished_jobs.find(job->get_id()); if (i == finished_jobs.end()) { // warn if not in finished logger.msg(Arc::WARNING, "%s: Trying remove job from data staging which does not exist", job->get_id()); return; } finished_jobs.erase(i); } bool DTRGenerator::processReceivedDTR(DataStaging::DTR_ptr dtr) { std::string jobid(dtr->get_parent_job_id()); GMJobRef job = jobs_processing.Find(jobid); if (!(*dtr)) { logger.msg(Arc::ERROR, "%s: Invalid DTR", jobid); if (dtr->get_status() != DataStaging::DTRStatus::CANCELLED) { scheduler->cancelDTRs(jobid); { Arc::AutoLock dlock(dtrs_lock); finished_jobs[jobid] = std::string("Invalid Data Transfer Request"); active_dtrs.erase(jobid); } // Because it is not possible to find out if there will be more // job's DTR coming, if possible return job back to jobs processing queue. if(job) { jobs_processing.Erase(job); jobs.RequestAttention(job); } } return false; } logger.msg(Arc::DEBUG, "%s: Received DTR %s to copy file %s in state %s", jobid, dtr->get_id(), dtr->get_source()->str(), dtr->get_status().str()); if(!job) { // This job is not being processed anymore (somehow) logger.msg(Arc::ERROR, "%s: Received DTR belongs to inactive job", jobid); scheduler->cancelDTRs(jobid); // Cancel rest of such DTRs Arc::AutoLock dlock(dtrs_lock); finished_jobs[jobid] = std::string("Job was gone while performing data transfer"); active_dtrs.erase(jobid); return false; } uid_t job_uid = config.StrictSession() ? dtr->get_local_user().get_uid() : 0; uid_t job_gid = config.StrictSession() ? dtr->get_local_user().get_gid() : 0; // Get session dir from .local if possible std::string session_dir; JobLocalDescription job_desc; if (job_local_read_file(jobid, config, job_desc) && !job_desc.sessiondir.empty()) { session_dir = job_desc.sessiondir; } else { logger.msg(Arc::WARNING, "%s: Failed reading local information", jobid); session_dir = config.SessionRoot(jobid) + '/' + jobid; } std::string dtr_transfer_statistics; if (dtr->error() && dtr->is_mandatory() && dtr->get_status() != DataStaging::DTRStatus::CANCELLED) { // for uploads, report error but let other transfers continue // for downloads, cancel all other transfers logger.msg(Arc::ERROR, "%s: DTR %s to copy file %s failed", jobid, dtr->get_id(), dtr->get_source()->str()); Arc::AutoLock dlock(dtrs_lock); if (!dtr->get_source()->Local() && finished_jobs.find(jobid) == finished_jobs.end()) { // download // cancel other DTRs and erase from our list unless error was already reported logger.msg(Arc::INFO, "%s: Cancelling other DTRs", jobid); scheduler->cancelDTRs(jobid); } // add error to finished jobs finished_jobs[jobid] += std::string("Failed in data staging: " + dtr->get_error_status().GetDesc() + '\n'); } else if (dtr->get_status() != DataStaging::DTRStatus::CANCELLED) { // remove from job.id.input/output files on success // find out if download or upload by checking which is remote file if (dtr->error() && !dtr->is_mandatory()) { dtr->get_logger()->msg(Arc::INFO, "%s: DTR %s to copy to %s failed but is not mandatory", jobid, dtr->get_id(), dtr->get_destination_str()); } std::list files; if (dtr->get_source()->Local()) { // output files dtr_transfer_statistics = "outputfile:url=" + dtr->get_destination()->str() + ','; if (!job_output_read_file(jobid, config, files)) { logger.msg(Arc::WARNING, "%s: Failed to read list of output files", jobid); } else { FileData uploaded_file; // go through list and take out this file for (std::list::iterator i = files.begin(); i != files.end();) { // compare 'standard' URLs Arc::URL file_lfn(i->lfn); Arc::URL dtr_lfn(dtr->get_destination()->str()); // check if it is in a dynamic list - if so remove from it if (i->pfn.size() > 1 && i->pfn[1] == '@') { std::string dynamic_output(session_dir+'/'+i->pfn.substr(2)); std::list dynamic_files; if (!job_Xput_read_file(dynamic_output, dynamic_files, job_uid, job_gid)) { logger.msg(Arc::WARNING, "%s: Failed to read dynamic output files in %s", jobid, dynamic_output); } else { logger.msg(Arc::DEBUG, "%s: Going through files in list %s", jobid, dynamic_output); for (std::list::iterator dynamic_file = dynamic_files.begin(); dynamic_file != dynamic_files.end(); ++dynamic_file) { if (Arc::URL(dynamic_file->lfn).str() == dtr_lfn.str()) { logger.msg(Arc::DEBUG, "%s: Removing %s from dynamic output file %s", jobid, dtr_lfn.str(), dynamic_output); uploaded_file = *dynamic_file; dynamic_files.erase(dynamic_file); if (!job_Xput_write_file(dynamic_output, dynamic_files, job_output_all, job_uid, job_gid)) logger.msg(Arc::WARNING, "%s: Failed to write back dynamic output files in %s", jobid, dynamic_output); break; } } } } if (file_lfn.str() == dtr_lfn.str()) { uploaded_file = *i; i = files.erase(i); } else { ++i; } } // files // write back .output file if (!job_output_write_file(*job, config, files)) { logger.msg(Arc::WARNING, "%s: Failed to write list of output files", jobid); } if(!uploaded_file.pfn.empty()) { if(!job_output_status_add_file(*job, config, uploaded_file)) { logger.msg(Arc::WARNING, "%s: Failed to write list of output status files", jobid); } } } if (dtr->get_source()->CheckSize()) dtr_transfer_statistics += "size=" + Arc::tostring(dtr->get_source()->GetSize()) + ','; dtr_transfer_statistics += "starttime=" + dtr->get_creation_time().str(Arc::UTCTime) + ','; dtr_transfer_statistics += "endtime=" + Arc::Time().str(Arc::UTCTime); } else if (dtr->get_destination()->Local()) { // input files dtr_transfer_statistics = "inputfile:url=" + dtr->get_source()->str() + ','; if (!job_input_read_file(jobid, config, files)) { logger.msg(Arc::WARNING,"%s: Failed to read list of input files", jobid); } else { // go through list and take out this file for (std::list::iterator i = files.begin(); i != files.end();) { // compare 'standard' URLs Arc::URL file_lfn(i->lfn); Arc::URL dtr_lfn(dtr->get_source()->str()); if (file_lfn.str() == dtr_lfn.str()) { struct stat st; Arc::FileStat(job->SessionDir() + i->pfn, &st, job_uid, job_gid, true); dtr_transfer_statistics += "size=" + Arc::tostring(st.st_size) + ','; i = files.erase(i); break; } else { ++i; } } // write back .input file if (!job_input_write_file(*job, config, files)) { logger.msg(Arc::WARNING, "%s: Failed to write list of input files", jobid); } } dtr_transfer_statistics += "starttime=" + dtr->get_creation_time().str(Arc::UTCTime) + ','; dtr_transfer_statistics += "endtime=" + Arc::Time().str(Arc::UTCTime) + ','; if (dtr->get_cache_state() == DataStaging::CACHE_ALREADY_PRESENT) dtr_transfer_statistics += "fromcache=yes"; else dtr_transfer_statistics += "fromcache=no"; } else { // transfer between two remote endpoints, shouldn't happen... logger.msg(Arc::WARNING, "%s: Received DTR with two remote endpoints!"); } } // get DTRs for this job id Arc::AutoLock dlock(dtrs_lock); std::pair::iterator, std::multimap::iterator> dtr_iterator = active_dtrs.equal_range(jobid); if (dtr_iterator.first == dtr_iterator.second) { finished_jobs[jobid] += std::string(""); // It is not clear either this is error. At least mark it as finished. dlock.unlock(); logger.msg(Arc::WARNING, "No active job id %s", jobid); // No DTRs recorded. But still we have job ref. It is probably safer to return it. jobs_processing.Erase(job); jobs.RequestAttention(job); return true; } // Print transfer statistics std::string fname = job_control_path(config.ControlDir(),job->get_id(),sfx_statistics); std::ofstream f(fname.c_str(),std::ios::out | std::ios::app); if(f.is_open() ) { f << dtr_transfer_statistics << std::endl; } f.close(); // remove this DTR from list for (std::multimap::iterator i = dtr_iterator.first; i != dtr_iterator.second; ++i) { if (i->second == dtr->get_id()) { active_dtrs.erase(i); break; } } // check if any DTRs left from this job, if so return if (active_dtrs.find(jobid) != active_dtrs.end()) { // still have some DTRs running return true; } // No DTRs left, clean up session dir if upload or failed download // But first add the DTR back to the active list to avoid race condition // caused by calling hasJob() between removing from active and adding to // finished, which results in job being submitted to DTR again active_dtrs.insert(std::pair(jobid, dtr->get_id())); bool finished_with_error = ((finished_jobs.find(jobid) != finished_jobs.end() && !finished_jobs[jobid].empty()) || dtr->get_status() == DataStaging::DTRStatus::CANCELLED); dlock.unlock(); if (dtr->get_source()->Local()) { // list of files to keep in session dir std::list files; if (!job_output_read_file(jobid, config, files)) logger.msg(Arc::WARNING, "%s: Failed to read list of output files, can't clean up session dir", jobid); else { if (finished_with_error) { // if error with uploads, don't remove dynamic output files so that resume will work for (std::list::iterator i = files.begin(); i != files.end(); ++i) { if (i->pfn.size() > 1 && i->pfn[1] == '@') { std::string dynamic_output(session_dir+'/'+i->pfn.substr(2)); FileData fd(std::string(i->pfn.erase(1,1)), ""); files.push_back(fd); // also add files left inside dynamic output file std::list dynamic_files; if (!job_Xput_read_file(dynamic_output, dynamic_files, job_uid, job_gid)) { logger.msg(Arc::WARNING, "%s: Failed to read dynamic output files in %s", jobid, dynamic_output); } else { for (std::list::iterator dynamic_file = dynamic_files.begin(); dynamic_file != dynamic_files.end(); ++dynamic_file) { FileData f(dynamic_file->pfn, ""); files.push_back(f); } } } } } std::list tokeep(files.size()); std::transform(files.begin(), files.end(), tokeep.begin(), filedata_pfn); if (!Arc::DirDeleteExcl(job->SessionDir(), tokeep, true, job_uid, job_gid)) { logger.msg(Arc::WARNING, "%s: Failed to clean up session dir", jobid); } } // clean up cache joblinks CleanCacheJobLinks(config, job); } else if (finished_with_error) { // clean all files still in input list which could be half-downloaded std::list files; if (!job_input_read_file(jobid, config, files)) logger.msg(Arc::WARNING, "%s: Failed to read list of input files, can't clean up session dir", jobid); else { std::list todelete(files.size()); for (std::list::const_iterator f = files.begin(); f != files.end(); ++f) { if (f->lfn.find(':') != std::string::npos) { todelete.push_back(f->pfn); } } if (!Arc::DirDeleteExcl(job->SessionDir(), todelete, false, job_uid, job_gid)) { logger.msg(Arc::WARNING, "%s: Failed to clean up session dir", jobid); } } } // add to finished jobs (without overwriting existing error) and finally // remove from active dlock.lock(); active_dtrs.erase(jobid); finished_jobs[jobid] += ""; // log summary to DTR log and A-REX log if (finished_jobs[jobid].empty()) dtr->get_logger()->msg(Arc::INFO, "%s: All %s %s successfully", jobid, dtr->get_source()->Local() ? istring("uploads") : istring("downloads"), (dtr->get_status() == DataStaging::DTRStatus::CANCELLED) ? istring("cancelled") : istring("finished")); else dtr->get_logger()->msg(Arc::INFO, "%s: Some %s failed", jobid, dtr->get_source()->Local() ? istring("uploads") : istring("downloads")); dlock.unlock(); logger.msg(Arc::DEBUG, "%s: Requesting attention from DTR generator", jobid); // Passing job to lower priority queue - hence must use Erase. jobs_processing.Erase(job); jobs.RequestAttention(job); return true; } bool DTRGenerator::processReceivedJob(GMJobRef& job) { if(!job) { logger.msg(Arc::ERROR, "DTRGenerator is requested to process null job"); return false; } JobId jobid(job->get_id()); logger.msg(Arc::VERBOSE, "%s: Received data staging request to %s files", jobid, (job->get_state() == JOB_STATE_PREPARING ? istring("download") : istring("upload"))); uid_t job_uid = config.StrictSession() ? job->get_user().get_uid() : 0; uid_t job_gid = config.StrictSession() ? job->get_user().get_gid() : 0; // Default credentials to be used by transfering files if not specified per file std::string default_cred = job_proxy_filename(jobid, config); // TODO: drop job.proxy as source of delegation std::string default_cred_type; JobLocalDescription job_desc; if(job_local_read_file(jobid, config, job_desc)) { if(!job_desc.delegationid.empty()) { ARex::DelegationStores* delegs = config.GetDelegations(); if(delegs) { DelegationStore& deleg = delegs->operator[](config.DelegationDir()); std::list meta; std::string fname = deleg.FindCred(job_desc.delegationid, job_desc.DN, meta); if(!fname.empty()) { default_cred = fname; if(!meta.empty()) default_cred_type = meta.front(); } } } } // Collect credential info for DTRs DataStaging::DTRCredentialInfo cred_info(job_desc.DN, job_desc.expiretime, job_desc.voms); // Create a file for the transfer statistics and fix its permissions std::string fname = job_control_path(config.ControlDir(),jobid,sfx_statistics); std::ofstream f(fname.c_str(),std::ios::out | std::ios::app); f.close(); fix_file_permissions(fname); // read in input/output files std::list files; bool replication = false; // output files need to be read whether PREPARING or FINISHING std::list output_files; if (!job_output_read_file(jobid, config, output_files)) { logger.msg(Arc::ERROR, "%s: Failed to read list of output files", jobid); { Arc::AutoLock dlock(dtrs_lock); finished_jobs[jobid] = std::string("Failed to read list of output files"); } if (job->get_state() == JOB_STATE_FINISHING) CleanCacheJobLinks(config, job); return false; } Arc::Time sessiondir_processing_start; if (job->get_state() == JOB_STATE_PREPARING) { if (!job_input_read_file(jobid, config, files)) { logger.msg(Arc::ERROR, "%s: Failed to read list of input files", jobid); Arc::AutoLock dlock(dtrs_lock); finished_jobs[jobid] = std::string("Failed to read list of input files"); return false; } // check for duplicates (see bug 1285) for (std::list::iterator i = files.begin(); i != files.end(); i++) { for (std::list::iterator j = files.begin(); j != files.end(); j++) { if (i != j && j->pfn == i->pfn) { logger.msg(Arc::ERROR, "%s: Duplicate file in list of input files: %s", jobid, i->pfn); Arc::AutoLock dlock(dtrs_lock); finished_jobs[jobid] = std::string("Duplicate file in list of input files: " + i->pfn); return false; } } } // check if any input files are also output files (bug 1387 and 2793) for (std::list::iterator j = output_files.begin(); j != output_files.end(); j++) { for (std::list::iterator i = files.begin(); i != files.end(); i++) { if (i->pfn == j->pfn && i->lfn.find(':') != std::string::npos) { Arc::URL u(i->lfn); std::string opt = u.Option("cache"); // don't add copy option if exists or current option is "no" or "renew" if (opt.empty() || !(opt == "no" || opt == "renew" || opt == "copy")) { u.AddOption("cache", "copy", true); i->lfn = u.fullstr(); } } } } // pre-clean session dir before downloading std::list todelete(files.size()); for (std::list::const_iterator f = files.begin(); f != files.end(); ++f) { if (f->lfn.find(':') != std::string::npos) { todelete.push_back(f->pfn); } } if (!Arc::DirDeleteExcl(job->SessionDir(), todelete, false, job_uid, job_gid)) { logger.msg(Arc::ERROR, "%s: Failed to clean up session dir", jobid); Arc::AutoLock dlock(dtrs_lock); finished_jobs[jobid] = std::string("Failed to clean up session dir before downloading inputs"); return false; } } // PREPARING else if (job->get_state() == JOB_STATE_FINISHING) { files = output_files; std::list::iterator it; // add any output files dynamically added by the user during the job and // resolve directories for (it = files.begin(); it != files.end() ;) { if (it->pfn.find("@") == 1) { // GM puts a slash on the front of the local file // Following is always empty currently. But it will start working as soon as // there is a way to pass credentials for dynamic files. But so far default_cred // is always picked up. std::string cred(it->cred); std::string cred_type(it->cred_type); if(cred.empty()) { cred = default_cred; cred_type = default_cred_type; } std::list files_; std::string outputfilelist = job->SessionDir() + std::string("/") + it->pfn.substr(2); logger.msg(Arc::INFO, "%s: Reading output files from user generated list in %s", jobid, outputfilelist); if (!job_Xput_read_file(outputfilelist, files_, job_uid, job_gid)) { logger.msg(Arc::ERROR, "%s: Error reading user generated output file list in %s", jobid, outputfilelist); Arc::AutoLock dlock(dtrs_lock); // Only write this failure if no previous failure if (!job->CheckFailure(config)) { finished_jobs[jobid] = std::string("Error reading user generated output file list"); } else { finished_jobs[jobid] = ""; } dlock.unlock(); CleanCacheJobLinks(config, job); return false; } // Attach dynamic files and assign credentials to them unless already available for(std::list::iterator it_ = files_.begin(); it_ != files_.end(); ++it_) { if(it_->cred.empty()) { it_->cred = cred; it_->cred_type = cred_type; } files.push_back(*it_); } it->pfn.erase(1, 1); ++it; continue; } if (it->pfn.rfind('/') == it->pfn.length()-1) { if (it->lfn.find(':') != std::string::npos) { std::string cred(it->cred); std::string cred_type(it->cred_type); std::string dir(job->SessionDir() + it->pfn); std::list entries; if (!Arc::DirList(dir, entries, true, job_uid, job_gid)) { logger.msg(Arc::ERROR, "%s: Failed to list output directory %s: %s", jobid, dir, Arc::StrError(errno)); Arc::AutoLock dlock(dtrs_lock); // Only write this failure if no previous failure if (!job->CheckFailure(config)) { finished_jobs[jobid] = std::string("Failed to list output directory"); } else { finished_jobs[jobid] = ""; } dlock.unlock(); CleanCacheJobLinks(config, job); return false; } // add entries which are not directories or links to output file list struct stat st; for (std::list::iterator i = entries.begin(); i != entries.end(); ++i) { if (Arc::FileStat(*i, &st, job_uid, job_gid, false) && S_ISREG(st.st_mode)) { std::string lfn(it->lfn + '/' + i->substr(job->SessionDir().length()+it->pfn.length())); std::string pfn(i->substr(job->SessionDir().length())); logger.msg(Arc::DEBUG, "%s: Adding new output file %s: %s", jobid, pfn, lfn); FileData fd(pfn, lfn); fd.cred = cred; fd.cred_type = cred_type; files.push_back(fd); } } it = files.erase(it); continue; } // Remove trailing slashes otherwise it will be cleaned in DirDeleteExcl std::string::size_type pos = it->pfn.find_last_not_of('/'); it->pfn.resize((pos == std::string::npos)?1:(pos+1)); } ++it; } // check if any files share the same LFN, if so allow overwriting existing LFN for (it = files.begin(); it != files.end(); it++) { bool done = false; for (std::list::iterator it2 = files.begin(); it2 != files.end(); it2++) { if (it != it2 && !it->lfn.empty() && !it2->lfn.empty()) { // error if lfns (including locations) are identical if (it->lfn == it2->lfn) { logger.msg(Arc::ERROR, "%s: Two identical output destinations: %s", jobid, it->lfn); { Arc::AutoLock dlock(dtrs_lock); finished_jobs[jobid] = std::string("Two identical output destinations: " + it->lfn); } CleanCacheJobLinks(config, job); return false; } Arc::URL u_it(it->lfn); Arc::URL u_it2(it2->lfn); if (u_it == u_it2) { // error if pfns are different if (it->pfn != it2->pfn) { logger.msg(Arc::ERROR, "%s: Cannot upload two different files %s and %s to same LFN: %s", jobid, it->pfn, it2->pfn, it->lfn); { Arc::AutoLock dlock(dtrs_lock); finished_jobs[jobid] = std::string("Cannot upload two different files to same LFN: " + it->lfn); } CleanCacheJobLinks(config, job); return false; } replication = true; done = true; break; } } } if (done) break; } // pre-clean session dir before uploading std::list tokeep(files.size()); std::transform(files.begin(), files.end(), tokeep.begin(), filedata_pfn); if (!Arc::DirDeleteExcl(job->SessionDir(), tokeep, true, job_uid, job_gid)) { logger.msg(Arc::ERROR, "%s: Failed to clean up session dir", jobid); { Arc::AutoLock dlock(dtrs_lock); finished_jobs[jobid] = std::string("Failed to clean up session dir before uploading outputs"); } CleanCacheJobLinks(config, job); return false; } } // FINISHING else { // bad state logger.msg(Arc::ERROR, "%s: Received job in a bad state: %s", jobid, job->get_state_name()); Arc::AutoLock dlock(dtrs_lock); finished_jobs[jobid] = std::string("Logic error: DTR Generator received job in a bad state"); return false; } Arc::Time sessiondir_processing_end; Arc::Period sessiondir_processing_time = sessiondir_processing_end - sessiondir_processing_start; if ((sessiondir_processing_time.GetPeriod() >= 1) || (sessiondir_processing_time.GetPeriodNanoseconds() > 100000000)) { // >0.1s logger.msg(Arc::WARNING, "%s: Session directory processing takes too long - %u.%06u seconds", jobid, sessiondir_processing_time.GetPeriod(), sessiondir_processing_time.GetPeriodNanoseconds()/1000); } Arc::initializeCredentialsType cred_init_type(Arc::initializeCredentialsType::SkipCredentials); Arc::UserConfig usercfg(cred_init_type); usercfg.UtilsDirPath(config.ControlDir()); usercfg.CACertificatesDirectory(config.CertDir()); if (config.StrictSession()) usercfg.SetUser(job->get_user()); // TODO: chelonia bartenders // create job.id.errors file with correct permissions to add to Logger job_errors_mark_put(*job, config); if (files.empty()) { // if job is FINISHING then clean up cache joblinks if (job->get_state() == JOB_STATE_FINISHING) CleanCacheJobLinks(config, job); // nothing else to do so wake up GM thread and return Arc::AutoLock dlock(dtrs_lock); finished_jobs[jobid] = ""; return false; } // flag to say whether at least one file needs to be staged bool staging = false; for (std::list::iterator i = files.begin(); i != files.end(); ++i) { if (i->lfn.find(":") == std::string::npos) continue; // user down/uploadable file staging = true; std::string source; std::string destination; if (job->get_state() == JOB_STATE_PREPARING) { // PREPARING source = i->lfn; destination = "file:" + job->SessionDir() + i->pfn; } else { // FINISHING source = "file:" + job->SessionDir() + i->pfn; // Upload to dest ending in '/': append filename to lfn // Note: won't work for nested URLs used for index services if (i->lfn.rfind('/') == i->lfn.length()-1) { destination = i->lfn + i->pfn.substr(i->pfn.rfind('/')+1); } else { destination = i->lfn; } } // Check if this file was recovered from a crash, if so add overwrite option for (std::list::iterator file = recovered_files.begin(); file != recovered_files.end();) { if (*file == destination) { logger.msg(Arc::WARNING, "%s: Destination file %s was possibly left unfinished" " from previous A-REX run, will overwrite", jobid, destination); Arc::URL u(destination); if (u) { u.AddOption("overwrite=yes", true); destination = u.fullstr(); } file = recovered_files.erase(file); } else { ++file; } } // Add common purpose URL options from configuration { Arc::URL u(source); if (u) { u.AddOption("httpgetpartial", staging_conf.get_httpgetpartial()?"yes":"no", false); // Consider adding passive and secure here source = u.fullstr(); } } std::string proxy_cred; if(!i->cred.empty()) { if(i->cred_type.empty() || (i->cred_type == "x509")) { usercfg.ProxyPath(i->cred); if (Arc::FileRead(i->cred, proxy_cred)) usercfg.CredentialString(proxy_cred); } else if(i->cred_type == "jwt") { if (Arc::FileRead(i->cred, proxy_cred)) usercfg.OToken(proxy_cred); } } else { if(default_cred_type.empty() || (default_cred_type == "x509")) { usercfg.ProxyPath(default_cred); if (Arc::FileRead(default_cred, proxy_cred)) usercfg.CredentialString(proxy_cred); } else if(i->cred_type == "jwt") { if (Arc::FileRead(default_cred, proxy_cred)) usercfg.OToken(proxy_cred); } } std::list logs; Arc::LogFile* dest = new Arc::LogFile(job_errors_filename(jobid, config)); dest->setReopen(true); dest->setFormat(Arc::MediumFormat); logs.push_back(dest); // Central DTR log if configured if (!staging_conf.get_dtr_central_log().empty()) { Arc::LogFile* central_dtr_log = new Arc::LogFile(staging_conf.get_dtr_central_log()); central_dtr_log->setReopen(true); central_dtr_log->setFormat(Arc::MediumFormat); logs.push_back(central_dtr_log); } // create DTR and send to Scheduler DataStaging::DTR_ptr dtr(new DataStaging::DTR(source, destination, usercfg, jobid, job->get_user().get_uid(), logs, "DataStaging.DTR")); // set retry count (tmp errors only) dtr->set_tries_left(staging_conf.max_retries); // allow the same file to be uploaded to multiple locations with same LFN dtr->set_force_registration(replication); // set sub-share for download or upload dtr->set_sub_share((job->get_state() == JOB_STATE_PREPARING) ? "download" : "upload"); // set priority as given in job description if (job->GetLocalDescription(config)) dtr->set_priority(job->GetLocalDescription(config)->priority); // set whether to use A-REX host certificate for remote delivery services dtr->host_cert_for_remote_delivery(staging_conf.use_host_cert_for_remote_delivery); dtr->get_job_perf_log().SetOutput(staging_conf.perf_log.GetOutput()); dtr->get_job_perf_log().SetEnabled(staging_conf.perf_log.GetEnabled()); DataStaging::DTRCacheParameters cache_parameters; CacheConfig cache_params(config.CacheParams()); // Substitute cache paths cache_params.substitute(config, job->get_user()); cache_parameters.cache_dirs = cache_params.getCacheDirs(); cache_parameters.readonly_cache_dirs = cache_params.getReadOnlyCacheDirs(); dtr->set_cache_parameters(cache_parameters); dtr->registerCallback(this,DataStaging::GENERATOR); dtr->registerCallback(scheduler, DataStaging::SCHEDULER); // callbacks for info dtr->registerCallback(&info, DataStaging::SCHEDULER); dtr->set_credential_info(cred_info); { Arc::AutoLock dlock(dtrs_lock); active_dtrs.insert(std::pair(jobid, dtr->get_id())); } // send to Scheduler DataStaging::DTR::push(dtr, DataStaging::SCHEDULER); // update .local with transfer share JobLocalDescription *job_desc = new JobLocalDescription; if (!job_local_read_file(jobid, config, *job_desc)) { logger.msg(Arc::ERROR, "%s: Failed reading local information", jobid); delete job_desc; continue; } job_desc->transfershare = dtr->get_transfer_share(); if (!job_local_write_file(*job, config, *job_desc)) { logger.msg(Arc::ERROR, "%s: Failed writing local information", jobid); } delete job_desc; } // files if (!staging) { // nothing needed staged so mark as finished // if job is FINISHING then clean up cache joblinks if (job->get_state() == JOB_STATE_FINISHING) CleanCacheJobLinks(config, job); Arc::AutoLock dlock(dtrs_lock); finished_jobs[jobid] = ""; return false; } jobs_processing.Push(job); // take this job (job should be in jobs_received till now) return true; } bool DTRGenerator::processCancelledJob(const std::string& jobid) { // cancel DTRs in Scheduler logger.msg(Arc::INFO, "%s: Cancelling active DTRs", jobid); scheduler->cancelDTRs(jobid); return true; } DTRGenerator::checkUploadedFilesResult DTRGenerator::checkUploadedFiles(GMJobRef& job) { if(!job) { logger.msg(Arc::ERROR, "DTRGenerator is asked to check files for null job"); return uploadedFilesError; } JobId jobid(job->get_id()); uid_t job_uid = config.StrictSession() ? job->get_user().get_uid() : 0; uid_t job_gid = config.StrictSession() ? job->get_user().get_gid() : 0; std::string session_dir; if (job->GetLocalDescription(config) && !job->GetLocalDescription(config)->sessiondir.empty()) session_dir = job->GetLocalDescription(config)->sessiondir; else session_dir = config.SessionRoot(jobid) + '/' + jobid; // get input files list std::list uploaded_files; std::list* uploaded_files_ = NULL; std::list input_files; std::list input_files_ = input_files; if (!job_input_read_file(jobid, config, input_files)) { job->AddFailure("Error reading list of input files"); logger.msg(Arc::ERROR, "%s: Can't read list of input files", jobid); return uploadedFilesError; } if (job_input_status_read_file(jobid, config, uploaded_files)) { uploaded_files_ = &uploaded_files; } checkUploadedFilesResult res = uploadedFilesSuccess; // loop through each file and check for (FileData::iterator i = input_files.begin(); i != input_files.end();) { // all remote files should have been downloaded by this point if (i->lfn.find(":") != std::string::npos) { ++i; continue; } logger.msg(Arc::VERBOSE, "%s: Checking user uploadable file: %s", jobid, i->pfn); std::string error; int err = user_file_exists(*i, session_dir, jobid, error, job_uid, job_gid, uploaded_files_); if (err == 0) { // file is uploaded logger.msg(Arc::VERBOSE, "%s: User has uploaded file %s", jobid, i->pfn); // remove from input list i = input_files.erase(i); input_files_.clear(); for (FileData::iterator it = input_files.begin(); it != input_files.end(); ++it) input_files_.push_back(*it); if (!job_input_write_file(*job, config, input_files_)) { logger.msg(Arc::WARNING, "%s: Failed writing changed input file.", jobid); } } else if (err == 1) { // critical failure logger.msg(Arc::ERROR, "%s: Critical error for uploadable file %s", jobid, i->pfn); job->AddFailure("User file: "+i->pfn+" - "+error); res = uploadedFilesError; break; } else { // still waiting logger.msg(Arc::VERBOSE, "%s: User has NOT uploaded file %s", jobid, i->pfn); res = uploadedFilesMissing; ++i; } } // check for timeout if ((res == uploadedFilesMissing) && ((time(NULL) - job->GetStartTime()) > 600)) { // hard-coded timeout for (FileData::iterator i = input_files.begin(); i != input_files.end(); ++i) { if (i->lfn.find(":") == std::string::npos) { job->AddFailure("User file: "+i->pfn+" - Timeout waiting"); } } logger.msg(Arc::ERROR, "%s: Uploadable files timed out", jobid); res = uploadedFilesError; } return res; } bool match_list(const std::list& slist, const std::string& str) { for(std::list::const_iterator s = slist.begin(); s != slist.end(); ++s) { if(*s == str) return true; } return false; } int DTRGenerator::user_file_exists(FileData &dt, const std::string& session_dir, const std::string& jobid, std::string& error, uid_t uid, gid_t gid, const std::list* uploaded_files) { struct stat st; std::string file_info(dt.lfn); if (file_info == "*.*") return 0; // do not wait for this file std::string fname = session_dir + '/' + dt.pfn; // check if file exists at all if (!Arc::FileStat(fname.c_str(), &st, uid, gid, false)) return 2; // if no size/checksum was supplied, return success if (file_info.empty()) { // but check status first if avaialble if (uploaded_files) { if (!match_list(*uploaded_files, dt.pfn)) return 2; } return 0; } if (S_ISDIR(st.st_mode)) { error = "Expected file. Directory found."; return 1; } if (!S_ISREG(st.st_mode)) { error = "Expected ordinary file. Special object found."; return 1; } long long int fsize; long long int fsum; bool have_size = false; bool have_checksum = false; // parse format [size][.checksum] if (file_info[0] == '.') { // checksum only if (!Arc::stringto(file_info.substr(1), fsum)) { logger.msg(Arc::ERROR, "%s: Can't convert checksum %s to int for %s", jobid, file_info.substr(1), dt.pfn); error = "Invalid checksum information"; return 1; } have_checksum = true; } else if (file_info.find('.') == std::string::npos) { // size only if (!Arc::stringto(file_info, fsize)) { logger.msg(Arc::ERROR, "%s: Can't convert filesize %s to int for %s", jobid, file_info, dt.pfn); error = "Invalid file size information"; return 1; } have_size = true; } else { // size and checksum std::vector file_attrs; Arc::tokenize(dt.lfn, file_attrs, "."); if (file_attrs.size() != 2) { logger.msg(Arc::ERROR, "%s: Invalid size/checksum information (%s) for %s", jobid, file_info, dt.pfn); error = "Invalid size/checksum information"; return 1; } if (!Arc::stringto(file_attrs[0], fsize)) { logger.msg(Arc::ERROR, "%s: Can't convert filesize %s to int for %s", jobid, file_attrs[0], dt.pfn); error = "Invalid file size information"; return 1; } if (!Arc::stringto(file_attrs[1], fsum)) { logger.msg(Arc::ERROR, "%s: Can't convert checksum %s to int for %s", jobid, file_attrs[1], dt.pfn); error = "Invalid checksum information"; return 1; } have_size = true; have_checksum = true; } // now check if proper size if (have_size) { if (st.st_size < fsize) return 2; if (st.st_size > fsize) { logger.msg(Arc::ERROR, "%s: Invalid file: %s is too big.", jobid, dt.pfn); error = "Delivered file is bigger than specified."; return 1; } } if (uploaded_files) { if (!match_list(*uploaded_files, dt.pfn)) return 2; } else if (have_checksum) { // calculate checksum (if no better way) int h = -1; Arc::FileAccess* fa = NULL; if ((uid && uid != getuid()) || (gid && gid != getgid())) { fa = new Arc::FileAccess(); if (!fa->fa_setuid(uid, gid)) { delete fa; logger.msg(Arc::ERROR, "%s: Failed to switch user ID to %d/%d to read file %s", jobid, (unsigned int)uid, (unsigned int)gid, dt.pfn); error = "Could not switch user id to read file"; return 1; } if(!fa->fa_open(fname, O_RDONLY, 0)) { delete fa; logger.msg(Arc::ERROR, "%s: Failed to open file %s for reading", jobid, dt.pfn); error = "Failed to open file for reading"; return 1; } } else { h = ::open(fname.c_str(), O_RDONLY); if (h == -1) { // if we can't read that file job won't too logger.msg(Arc::ERROR, "%s: Error accessing file %s", jobid, dt.pfn); error = "Delivered file is unreadable."; return 1; } } Arc::CRC32Sum crc; char buffer[1024]; ssize_t l; for(;;) { if (fa) l = fa->fa_read(buffer, 1024); else l = read(h, buffer, 1024); if (l == -1) { logger.msg(Arc::ERROR, "%s: Error reading file %s", jobid, dt.pfn); error = "Could not read file to compute checksum."; delete fa; return 1; } if (l == 0) break; crc.add(buffer, l); } if (h != -1) close(h); if (fa) fa->fa_close(); delete fa; crc.end(); if (fsum != crc.crc()) { if (have_size) { // size was checked and is ok logger.msg(Arc::ERROR, "%s: File %s has wrong checksum: %llu. Expected %lli", jobid, dt.pfn, crc.crc(), fsum); error = "Delivered file has wrong checksum."; return 1; } return 2; // not uploaded yet } logger.msg(Arc::VERBOSE, "%s: Checksum %llu verified for %s", jobid, crc.crc(), dt.pfn); } return 0; // all checks passed - file is ok } void DTRGenerator::readDTRState(const std::string& dtr_log) { std::list lines; // file may not exist if this is the first use of DTR if (!Arc::FileRead(dtr_log, lines)) return; if (!lines.empty()) { logger.msg(Arc::WARNING, "Found unfinished DTR transfers. It is possible the " "previous A-REX process did not shut down normally"); } for (std::list::iterator line = lines.begin(); line != lines.end(); ++line) { std::vector fields; Arc::tokenize(*line, fields); if ((fields.size() == 5 || fields.size() == 6) && (fields.at(1) == "TRANSFERRING" || fields.at(1) == "TRANSFER")) { logger.msg(Arc::VERBOSE, "Found DTR %s for file %s left in transferring state from previous run", fields.at(0), fields.at(4)); recovered_files.push_back(fields.at(4)); } } } void DTRGenerator::CleanCacheJobLinks(const GMConfig& config, const GMJobRef& job) const { if(!job) { logger.msg(Arc::ERROR, "DTRGenerator is requested to clean links for null job"); return; } Arc::Time processing_start; CacheConfig cache_config(config.CacheParams()); cache_config.substitute(config, job->get_user()); // there is no uid switch during Release so uid/gid is not so important Arc::FileCache cache(cache_config.getCacheDirs(), cache_config.getDrainingCacheDirs(), cache_config.getReadOnlyCacheDirs(), job->get_id(), job->get_user().get_uid(), job->get_user().get_gid()); cache.Release(); Arc::Time processing_end; Arc::Period processing_time = processing_end - processing_start; if ((processing_time.GetPeriod() >= 1) || (processing_time.GetPeriodNanoseconds() > 100000000)) { // >0.1s logger.msg(Arc::WARNING, "%s: Cache cleaning takes too long - %u.%06u seconds", job->get_id(), processing_time.GetPeriod(), processing_time.GetPeriodNanoseconds()/1000); } } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/PaxHeaders/JobsList.h0000644000000000000000000000013215067751327024175 xustar0030 mtime=1759498967.755491979 30 atime=1759498967.865493651 30 ctime=1759499029.521706392 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/JobsList.h0000644000175000002070000003424515067751327026107 0ustar00mockbuildmock00000000000000#ifndef GRID_MANAGER_STATES_H #define GRID_MANAGER_STATES_H #include #include #include #include #include "../conf/StagingConfig.h" #include "GMJob.h" #include "JobDescriptionHandler.h" #include "DTRGenerator.h" namespace ARex { class JobFDesc; class GMConfig; /// ZeroUInt is a wrapper around unsigned int. It provides a consistent default /// value, as int type variables have no predefined value assigned upon /// creation. It also protects from potential counter underflow, to stop /// counter jumping to MAX_INT. TODO: move to common lib? class ZeroUInt { private: unsigned int value_; public: ZeroUInt(void):value_(0) { }; ZeroUInt(unsigned int v):value_(v) { }; ZeroUInt(const ZeroUInt& v):value_(v.value_) { }; ZeroUInt& operator=(unsigned int v) { value_=v; return *this; }; ZeroUInt& operator=(const ZeroUInt& v) { value_=v.value_; return *this; }; ZeroUInt& operator++(void) { ++value_; return *this; }; ZeroUInt operator++(int) { ZeroUInt temp(value_); ++value_; return temp; }; ZeroUInt& operator--(void) { if(value_) --value_; return *this; }; ZeroUInt operator--(int) { ZeroUInt temp(value_); if(value_) --value_; return temp; }; operator unsigned int(void) const { return value_; }; }; /// List of jobs. This class contains the main job management logic which moves /// jobs through the state machine. New jobs found through Scan methods are /// held in memory until reaching FINISHED state. class JobsList { private: bool valid; // List of jobs currently tracked in memory conveniently indexed by identifier. // TODO: It would be nice to remove it and use status files distribution among // subfolders in controldir. std::map jobs; mutable std::recursive_mutex jobs_lock; GMJobQueue jobs_processing; // List of jobs currently scheduled for processing GMJobQueue jobs_attention; // List of jobs which need attention Arc::SimpleCondition jobs_attention_cond; GMJobQueue jobs_polling; // List of jobs which need polling soon GMJobQueue jobs_wait_for_running; // List of jobs waiting for limit on running jobs time_t job_slow_polling_last; static time_t const job_slow_polling_period = 24UL*60UL*60UL; // todo: variable Glib::Dir* job_slow_polling_dir; // GM configuration const GMConfig& config; // Staging configuration StagingConfig staging_config; // Generator for handling data staging DTRGenerator dtr_generator; // Job description handler JobDescriptionHandler job_desc_handler; // number of jobs for every state int jobs_num[JOB_STATE_NUM]; int jobs_scripts; // map of number of active jobs for each DN std::map jobs_dn; // number of jobs currently in pending state int jobs_pending; // Add job into list. It is supposed to be called only for jobs which are not in main list. bool AddJob(const JobId &id,uid_t uid,gid_t gid,job_state_t state,const char* reason = NULL); bool AddJob(const JobId &id,uid_t uid,gid_t gid,const char* reason = NULL) { return AddJob(id, uid, gid, JOB_STATE_UNDEFINED, reason); } // Perform all actions necessary in case of job failure bool FailedJob(GMJobRef i,bool cancel); // Cleaning reference to running child process void CleanChildProcess(GMJobRef i); // Remove Job from list. All corresponding files are deleted and pointer is // advanced. If finished is false - job is not destroyed if it is FINISHED // If active is false - job is not destroyed if it is not UNDEFINED. Returns // false if external process is still running. //bool DestroyJob(iterator &i,bool finished=true,bool active=true); // Perform actions necessary in case job goes to/is in SUBMITTING/CANCELING state bool state_submitting(GMJobRef i,bool &state_changed); bool state_submitting_success(GMJobRef i,bool &state_changed,std::string local_id); bool state_canceling(GMJobRef i,bool &state_changed); bool state_canceling_success(GMJobRef i,bool &state_changed); // Same for PREPARING/FINISHING bool state_loading(GMJobRef i,bool &state_changed,bool up); // Get the state in which the job failed from .local file job_state_t JobFailStateGet(GMJobRef i); // Write the state in which the job failed to .local file bool JobFailStateRemember(GMJobRef i,job_state_t state,bool internal = true); // In case of job restart, recreates lists of input and output files taking // into account what was already transferred bool RecreateTransferLists(GMJobRef i); // Read into ids all jobs in the given dir except those already being handled bool ScanJobDescs(const std::string& cdir,std::list& ids) const; // Check and read into id information about job in the given dir // (id has job id filled on entry) unless job is already handled bool ScanJobDesc(const std::string& cdir,JobFDesc& id); // Read into ids all jobs in the given dir with marks given by suffices // (corresponding to file suffixes) except those of jobs already handled bool ScanMarks(const std::string& cdir,const std::list& suffices,std::list& ids); // Called after service restart to move jobs that were processing to a // restarting state bool RestartJobs(const std::string& cdir,const std::string& odir); // Release delegation after job finishes void UnlockDelegation(GMJobRef i); // Calculate job expiration time from last state change and configured lifetime time_t PrepareCleanupTime(GMJobRef i, time_t& keep_finished); // Read in information from .local file bool GetLocalDescription(GMJobRef i) const; // Modify job state, log that change and optionally log modification reson void SetJobState(GMJobRef i, job_state_t new_state, const char* reason = NULL); // Modify job state to set is as waiting on some condition or limit before // progressing to the next state void SetJobPending(GMJobRef i, const char* reason); // Update content of job proxy file with one stored in delegations store void UpdateJobCredentials(GMJobRef i); // Main job processing method. Analyze current state of job, perform // necessary actions and advance state or remove job if needed. Iterator 'i' // is advanced or erased inside this function. bool ActJob(GMJobRef& i); // Helper method for ActJob. Finishes processing of job. bool NextJob(GMJobRef i, job_state_t old_state, bool old_pending); // Helper method for ActJob. Finishes processing of job, removes it from list. bool DropJob(GMJobRef& i, job_state_t old_state, bool old_pending); enum ActJobResult { JobSuccess, JobFailed, JobDropped, }; // ActJob() calls one of these methods depending on the state of the job. // Each ActJob*() returns processing result: // JobSuccess - job was passed to other module (usually DTR) and does // not require any additional processing. The module must later // pass job to one of RequestAttention/RequestPolling/RequestSlowPolling // queues. If the module fails to do that job may be lost. // It is also possible to return JobSuccess if ActJob*() methods // already passed job to one of the queues. // JobFailed - job processing failed. Job must be moved to FAILED. // This result to be removed in a future. // JobDropped - job does not need any further processing and should // be removed from memory. This result to be removed in a future // when automatic job unloading from RAM is implemented. ActJobResult ActJobUndefined(GMJobRef i); ActJobResult ActJobAccepted(GMJobRef i); ActJobResult ActJobPreparing(GMJobRef i); ActJobResult ActJobSubmitting(GMJobRef i); ActJobResult ActJobCanceling(GMJobRef i); ActJobResult ActJobInlrms(GMJobRef i); ActJobResult ActJobFinishing(GMJobRef i); ActJobResult ActJobFinished(GMJobRef i); ActJobResult ActJobDeleted(GMJobRef i); // Special processing method for job processing failure ActJobResult ActJobFailed(GMJobRef i); // Checks and processes user's request to cancel job. // Returns false if job was not modified (canceled or // failed) and true if canceling/modification took place. bool CheckJobCancelRequest(GMJobRef i); // Checks job state against continuation plugins. // Returns false if job is not allowed to continue. bool CheckJobContinuePlugins(GMJobRef i); // Call ActJob for all jobs in processing queue bool ActJobsProcessing(void); // Inform this instance that job with specified id needs immediate re-processing bool RequestReprocess(GMJobRef i); // Similar to RequestAttention but jobs does not need immediate attention. // These jobs will be processed when polling period elapses. // This method/queue to be removed when even driven processing implementation // becomes stable. bool RequestPolling(GMJobRef i); // Register job as waiting for limit for running jobs to be cleared. // This method does not have corresponding ActJobs*. Instead these jobs // are handled by ActJobsAttention if corresponding condition is met. bool RequestWaitForRunning(GMJobRef i); // Even slower polling. Typically once per 24 hours. // This queue is meant for FINISHED and DELETED jobs. // Jobs which are put into this queue are also removed from RAM and // queue content is backed by file/database. // There is no corresponding ActJobs*() method. Instead jobs put into // slow queue are handled in WaitAttention() while there is nothing // assigned for immediate processing. // Note: In current implementation this method does nothing because // /finished/ is used as slow queue. bool RequestSlowPolling(GMJobRef i); // Incrementally scan through old jobs in order to check for // removal time // Returns true if scanning is going on, false if scanning cycle is over. bool ScanOldJobs(void); /// Class to run external processes (helper) class ExternalHelper { private: /// Command being run std::string command; /// Object representing running process Arc::Run *proc; public: ExternalHelper(const std::string &cmd); ~ExternalHelper(); /// Start process if it is not running yet bool run(JobsList const& list); /// Stop process if it is running void stop(); }; /// Class for handling multiple external threads class ExternalHelpers: protected Arc::Thread { public: /// Construct instance from commands attached to jobs. ExternalHelpers(std::list const& commands, JobsList const& jobs); /// Kill thread handling helpers and destroy this instance. ~ExternalHelpers(); /// Start handling by spawning dedicated thread. void start(); private: virtual void thread(void); std::list helpers; JobsList const& jobs_list; Arc::SimpleCounter stop_cond; bool stop_request; }; /// Associated external processes ExternalHelpers helpers; // Return iterator to object matching given id or null if not found GMJobRef FindJob(const JobId &id); bool HasJob(const JobId &id) const; public: static const int ProcessingQueuePriority = 3; static const int AttentionQueuePriority = 2; static const int WaitQueuePriority = 1; // Constructor. JobsList(const GMConfig& gmconfig); ~JobsList(void); operator bool(void) { return valid; }; bool operator!(void) { return !valid; }; // Information about jobs for external utilities // No of jobs in all active states from ACCEPTED and FINISHING int AcceptedJobs() const; // No of jobs in batch system or in process of submission to batch system bool RunningJobsLimitReached() const; // No of jobs in data staging //int ProcessingJobs() const; // No of jobs staging in data before job execution //int PreparingJobs() const; // No of jobs staging out data after job execution //int FinishingJobs() const; // Inform this instance that job with specified id needs attention bool RequestAttention(const JobId& id); bool RequestAttention(GMJobRef i); // Inform this instance that generic unscheduled attention is needed void RequestAttention(); // Call ActJob for all current jobs bool ActJobs(void); // Call ActJob for all jobs for which RequestAttention and RequestWaitFor* - if condition allows - was called bool ActJobsAttention(void); // Call ActJob for all jobs for which RequestPolling was called bool ActJobsPolling(void); // Look for new or restarted jobs. Jobs are added to list with state UNDEFINED and requested for attention. bool ScanNewJobs(void); // Look for new job with specified id. Job is added to list with state UNDEFINED and requested for attention. bool ScanNewJob(const JobId& id); // Look for old job with specified id. Job is added to list with its current state and requested for attention. bool ScanOldJob(const JobId& id); // Pick jobs which have been marked for restarting, cancelling or cleaning bool ScanNewMarks(void); // Rearrange status files on service restart bool RestartJobs(void); // Send signals to external processes to shut down nicely (not implemented) void PrepareToDestroy(void); // Wait for attention request or polling time // While waiting may also perform slow scanning of old jobs void WaitAttention(); // Class to be used in job scanning methods to filter out jobs by their id. class JobFilter { public: JobFilter() {}; virtual ~JobFilter() {}; // This method is called when jobs scanning needs to decide // if job to be picked up. Must return true for suitable job ids. virtual bool accept(const JobId &id) const = 0; }; // Look for all jobs residing in specified control directories. // Fils ids with information about those jobs. // Uses filter to skip jobs which do not fit filter requirments. static bool ScanAllJobs(const std::string& cdir,std::list& ids, JobFilter const& filter); // Collect all jobs in all states and return references to their descriptions in alljobs. static bool GetAllJobs(const GMConfig& config, std::list& alljobs); // Collect all job ids in all states and return them in alljobs. static bool GetAllJobIds(const GMConfig& config, std::list& alljobs); // Collect information about job with specified id. // Returns valid reference if job was found. static GMJobRef GetJob(const GMConfig& config, const JobId& id); static int CountAllJobs(const GMConfig& config); }; } // namespace ARex #endif nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/PaxHeaders/CommFIFO.cpp0000644000000000000000000000013215067751327024336 xustar0030 mtime=1759498967.754640094 30 atime=1759498967.864493635 30 ctime=1759499029.512577589 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/CommFIFO.cpp0000644000175000002070000001742015067751327026244 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "CommFIFO.h" namespace ARex { static const unsigned int MAX_ID_SIZE = 64; static const std::string fifo_file("/gm.fifo"); bool CommFIFO::make_pipe(void) { bool res = false; lock.lock(); if (kick_in != -1) { close(kick_in); kick_in = -1; }; if (kick_out != -1) { close(kick_out); kick_out = -1; }; int filedes[2]; if(pipe(filedes) == 0) { kick_in=filedes[1]; kick_out=filedes[0]; long arg; arg=fcntl(kick_in,F_GETFL); if(arg != -1) { arg|=O_NONBLOCK; fcntl(kick_in,F_SETFL,&arg); }; arg=fcntl(kick_out,F_GETFL); if(arg != -1) { arg|=O_NONBLOCK; fcntl(kick_out,F_SETFL,&arg); }; res = (kick_in != -1); }; lock.unlock(); return res; } CommFIFO::CommFIFO(void) { timeout_=-1; kick_in=-1; kick_out=-1; make_pipe(); } CommFIFO::~CommFIFO(void) { } bool CommFIFO::wait(int timeout, std::string& event) { time_t start_time = time(NULL); time_t end_time = start_time + timeout; bool have_generic_event = false; bool kicked = false; event.clear(); for(;;) { // Check if there is something in buffers lock.lock(); for(std::list::iterator i = fds.begin();i!=fds.end();++i) { if(!(i->ids.empty())) { event = *(i->ids.begin()); i->ids.pop_front(); lock.unlock(); return true; }; }; lock.unlock(); if(have_generic_event) return true; if(kicked) return false; // If nothing found - wait for incoming information fd_set fin,fout,fexc; FD_ZERO(&fin); FD_ZERO(&fout); FD_ZERO(&fexc); int maxfd=-1; if(kick_out == -1) make_pipe(); // try to recover if had error previously if(kick_out != -1) { maxfd=kick_out; FD_SET(kick_out,&fin); }; lock.lock(); for(std::list::iterator i = fds.begin();i!=fds.end();++i) { if(i->fd < 0) { // try to recover lost pipe std::string pipe_dir = i->path; take_pipe(pipe_dir, *i); if(i->fd < 0) continue; }; if(i->fd > maxfd) maxfd=i->fd; FD_SET(i->fd,&fin); }; lock.unlock(); int err; maxfd++; if(timeout >= 0) { struct timeval t; if(((int)(end_time-start_time)) < 0) return false; // timeout t.tv_sec=end_time-start_time; t.tv_usec=0; if(maxfd > 0) { err = select(maxfd,&fin,&fout,&fexc,&t); } else { sleep(t.tv_sec); err = 0; }; start_time = time(NULL); } else { if(maxfd > 0) { err = select(maxfd,&fin,&fout,&fexc,NULL); } else { err = 0; }; }; if(err == 0) return false; // timeout if(err == -1) { if(errno == EBADF) { // One of fifos must be broken. Let read() find that out. } else if(errno == EINTR) { // interrupted by signal, retry continue; }; // No idea how this could happen and how to deal with it. // Lets try to escape and start from beginning return false; }; lock.lock(); for(std::list::iterator i = fds.begin();i!=fds.end();++i) { if(i->fd < 0) continue; if((err < 0) || FD_ISSET(i->fd,&fin)) { for(;;) { char buf[16]; ssize_t l = read(i->fd,buf,sizeof(buf)); if(l == 0) { break; // eol } else if(l < 0) { if((errno == EBADF) || (errno == EINVAL) || (errno == EIO)) { close(i->fd); close(i->fd_keep); i->fd = -1; i->fd_keep = -1; }; break; } else if(l > 0) { // it must be zero-terminated string representing job id for(ssize_t n = 0; nbuffer.empty()) { have_generic_event = true; } else { i->ids.push_back(i->buffer); i->buffer.clear(); }; } else { // Some sanity check if(i->buffer.length() < MAX_ID_SIZE) i->buffer.append(1,buf[n]); }; }; }; }; // for(;;) }; }; // for(fds) lock.unlock(); if(kick_out >= 0) { if((err < 0) || FD_ISSET(kick_out,&fin)) { for(;;) { // read as much as arrived char buf[16]; ssize_t l = read(kick_out,buf,sizeof(buf)); if(l == -1) { if((errno == EAGAIN) || (errno == EWOULDBLOCK)) { break; // nothing to read more }; // Recover after error make_pipe(); } else if(l == 0) { break; // nothing to read more } else if(l > 0) { kicked = true; break; }; }; }; }; }; return false; } CommFIFO::add_result CommFIFO::take_pipe(const std::string& dir_path, elem_t& el) { std::string path = dir_path + fifo_file; if(mkfifo(path.c_str(),S_IRUSR | S_IWUSR) != 0) { if(errno != EEXIST) { return add_error; }; }; (void)chmod(path.c_str(),S_IRUSR | S_IWUSR); int fd = -1; // This must fail. If not then there is another a-rex hanging around. fd = open(path.c_str(),O_WRONLY | O_NONBLOCK); if(fd != -1) { close(fd); return add_busy; }; // (errno != ENXIO)) { fd = open(path.c_str(),O_RDONLY | O_NONBLOCK); if(fd == -1) return add_error; int fd_keep = open(path.c_str(),O_WRONLY | O_NONBLOCK); if(fd_keep == -1) { close(fd); return add_error; }; el.fd=fd; el.fd_keep=fd_keep; el.path=dir_path; return add_success; } void CommFIFO::kick(void) { if(kick_in >= 0) { char c = '\0'; (void)write(kick_in,&c,1); }; } CommFIFO::add_result CommFIFO::add(const std::string& dir_path) { elem_t el; CommFIFO::add_result result = take_pipe(dir_path, el); if(result == add_success) { lock.lock(); fds.push_back(el); if(kick_in != -1) { char c = '\0'; (void)write(kick_in,&c,1); }; lock.unlock(); }; return result; } static int OpenFIFO(const std::string& path) { // Here O_NONBLOCK ensures open() will fail if nothing listens int fd = open(path.c_str(),O_WRONLY | O_NONBLOCK); // If fd == -1 here there is no FIFO or nothing is listening on another end return fd; } bool CommFIFO::Signal(const std::string& dir_path, const std::string& id) { std::string path = dir_path + fifo_file; int fd = OpenFIFO(path); if(fd == -1) return false; for(std::string::size_type pos = 0; pos <= id.length(); ++pos) { ssize_t l = write(fd, id.c_str()+pos, id.length()+1-pos); if(l == -1) { if((errno == EAGAIN) || (errno == EWOULDBLOCK)) { sleep(1); // todo: select/poll continue; // retry }; close(fd); return false; }; pos += l; }; close(fd); return true; } bool CommFIFO::Signal(const std::string& dir_path, const std::vector& ids) { if(ids.empty()) return true; std::string path = dir_path + fifo_file; int fd = OpenFIFO(path); if(fd == -1) return false; for(std::size_t idx = 0; idx #include "GMJob.h" namespace ARex { class ContinuationPlugins { public: typedef enum { act_fail, act_pass, act_log, act_undefined } action_t; class result_t { public: action_t action; int result; std::string response; result_t(action_t act,int res,const std::string& resp): action(act),result(res),response(resp) { }; result_t(action_t act): action(act),result(0) { }; }; private: class command_t { public: std::string cmd; unsigned int to; action_t onsuccess; action_t onfailure; action_t ontimeout; }; std::list commands[JOB_STATE_NUM]; public: ContinuationPlugins(void); ~ContinuationPlugins(void); bool add(job_state_t state,unsigned int timeout,const char* command); bool add(const char* state,unsigned int timeout,const char* command); bool add(job_state_t state,const char* options,const char* command); bool add(const char* state,const char* options,const char* command); void run(const GMJob &job,const GMConfig& config,std::list& results); }; } // namespace ARex #endif nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/PaxHeaders/DTRGenerator.h0000644000000000000000000000013215067751327024744 xustar0030 mtime=1759498967.755420728 30 atime=1759498967.864493635 30 ctime=1759499029.526891732 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/DTRGenerator.h0000644000175000002070000001673415067751327026661 0ustar00mockbuildmock00000000000000#ifndef DTR_GENERATOR_H_ #define DTR_GENERATOR_H_ #include #include #include "../conf/StagingConfig.h" namespace ARex { class GMConfig; class FileData; class GMJobRef; class JobsList; class DTRGenerator; /** * DTRInfo passes state information from data staging to A-REX * via the defined callback, called when the DTR passes to the * certain processes. It could for example write to files in the * control directory, and this information can be picked up and * published by the info system. */ class DTRInfo: public DataStaging::DTRCallback { private: const GMConfig& config; static Arc::Logger logger; public: DTRInfo(const GMConfig& config); virtual void receiveDTR(DataStaging::DTR_ptr dtr); }; class GMJobQueueDTR: public GMJobQueue { private: DTRGenerator& generator; public: GMJobQueueDTR(int priority, char const * name, DTRGenerator& parent): GMJobQueue(priority, name), generator(parent) {}; virtual bool CanSwitch(GMJob const& ref, GMJobQueue const& new_queue, bool to_front); virtual bool CanRemove(GMJob const& ref); }; /** * A-REX implementation of DTR Generator. Note that job migration functionality * present in the down/uploaders has not been implemented here. */ class DTRGenerator: public DataStaging::DTRCallback { private: /** Active DTRs. Map of job id to DTR id(s). */ std::multimap active_dtrs; /** Jobs where all DTRs are finished. Map of job id to failure reason (empty if success) Finished jobs are stored only by ID because they references are already passed back to one of main processing queue. */ std::map finished_jobs; /** Lock for lists - protects active_dtrs and finished_jobs */ Arc::SimpleCondition dtrs_lock; // Event lists /** DTRs received */ std::list dtrs_received; /** Jobs received */ GMJobQueueDTR jobs_received; /** Jobs being processing. This list is not protected and is used only from DTRGenerator::thread() */ GMJobQueueDTR jobs_processing; /** Jobs cancelled. List of Job IDs. */ std::list jobs_cancelled; /** Lock for events. Protects jobs_received, jobs_cancelled and dtrs_received. This object is also used for signaling insertions in aforementioned lists. */ Arc::SimpleCondition event_lock; /** Condition to wait on when stopping Generator */ Arc::SimpleCondition run_condition; /** State of Generator */ DataStaging::ProcessState generator_state; /** Grid manager configuration */ const GMConfig& config; /** A list of files left mid-transfer from a previous process. This list is not protected and is used only from DTRGenerator::thread() */ std::list recovered_files; /** logger to a-rex log */ static Arc::Logger logger; /** Associated scheduler */ DataStaging::Scheduler* scheduler; /** Staging configuration */ StagingConfig staging_conf; /** Info object for passing DTR info back to A-REX */ DTRInfo info; //static DTRGeneratorCallback receive_dtr; /** The processing object for passing jobs when all DTRs for a job have finished */ JobsList& jobs; /** Private constructors */ DTRGenerator(const DTRGenerator& generator); /** run main thread */ static void main_thread(void* arg); void thread(void); /** Process a received DTR */ bool processReceivedDTR(DataStaging::DTR_ptr dtr); /** Process a received job */ bool processReceivedJob(GMJobRef& job); /** Process a cancelled job */ bool processCancelledJob(const std::string& jobid); /** Read in state left from previous process and fill recovered_files */ void readDTRState(const std::string& dtr_log); /** Clean up joblinks dir in caches for given job (called at the end of upload) */ void CleanCacheJobLinks(const GMConfig& config, const GMJobRef& job) const; /** Check that user-uploadable file exists. * Returns 0 - if file exists * 1 - it is not proper file or other error * 2 - not there yet * @param dt Filename and size/checksum information * @param session_dir Directory in which to find uploaded file * @param jobid Job ID, used in log messages * @param error Errors are reported in this string * @param uid uid under which to access session dir * @param gid gid under which to access session dir */ static int user_file_exists(FileData &dt, const std::string& session_dir, const std::string& jobid, std::string& error, uid_t uid, gid_t gid, const std::list* uploaded_files); public: /** * Start up Generator. * @param user Grid manager configuration. * @param kicker_func Function to call on completion of all DTRs for a job * @param kicker_arg Argument to kicker function */ DTRGenerator(const GMConfig& config, JobsList& jobs); /** * Stop Generator */ ~DTRGenerator(); operator bool(void) { return (generator_state == DataStaging::RUNNING); }; bool operator!(void) { return (generator_state != DataStaging::RUNNING); }; /** * Callback called when DTR is finished. This DTR is marked done in the * DTR list and if all DTRs for the job have completed, the job is marked * as done. * @param dtr DTR object sent back from the Scheduler */ virtual void receiveDTR(DataStaging::DTR_ptr dtr); /** * A-REX sends data transfer requests to the data staging system through * this method. It reads the job.id.input/output files, forms DTRs and * sends them to the Scheduler. * @param job Job description object. */ bool receiveJob(GMJobRef& job); /** * This method is used by A-REX to cancel on-going DTRs. A cancel request * is made for each DTR in the job and the method returns. The Scheduler * asychronously deals with cancelling the DTRs. * @param job The job which is being cancelled */ void cancelJob(const GMJobRef& job); /** * Query status of DTRs in job. If all DTRs are finished, returns true, * otherwise returns false. If true is returned, the JobDescription should * be checked for whether the staging was successful or not by checking * CheckFailure() or GetFailure(). * @param job Description of job to query. Can be modified to add a failure * reason. * @return True if all DTRs in the job are finished, false otherwise. */ bool queryJobFinished(const GMJobRef& job); /** * Query whether the Generator has a record of this job. * @param job Job to query. * @return True if the job is active or finished. */ bool hasJob(const GMJobRef& job); /** * Remove the job from the Generator. Only finished jobs will be removed, * and a warning will be logged if the job still has active DTRs. This * method should be called after A-REX has finished PREPARING or FINISHING. * @param job The job to remove. */ void removeJob(const GMJobRef& job); enum checkUploadedFilesResult { uploadedFilesSuccess = 0, uploadedFilesError = 1, uploadedFilesMissing = 2 }; /** * Utility method to check that all files the user was supposed to * upload with the job are ready. * @param job Job description, failures will be reported directly in * this object. * @return 0 if file exists, 1 if it is not a proper file or other error, * 2 if the file not there yet */ checkUploadedFilesResult checkUploadedFiles(GMJobRef& job); }; } // namespace ARex #endif /* DTR_GENERATOR_H_ */ nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/PaxHeaders/ContinuationPlugins.cpp0000644000000000000000000000013215067751327027013 xustar0030 mtime=1759498967.755420728 30 atime=1759498967.864493635 30 ctime=1759499029.517698516 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/ContinuationPlugins.cpp0000644000175000002070000001464415067751327030726 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "../jobs/GMJob.h" #include "../jobs/JobsList.h" #include "../conf/GMConfig.h" #include "ContinuationPlugins.h" namespace ARex { /* Substitution: %I - job id */ ContinuationPlugins::ContinuationPlugins(void) { } ContinuationPlugins::~ContinuationPlugins(void) { } bool ContinuationPlugins::add(job_state_t state,unsigned int timeout,const char* command) { if((state == JOB_STATE_ACCEPTED) || (state == JOB_STATE_PREPARING) || (state == JOB_STATE_SUBMITTING) || (state == JOB_STATE_INLRMS) || (state == JOB_STATE_FINISHING) || (state == JOB_STATE_FINISHED) || (state == JOB_STATE_DELETED)) { command_t cmd; cmd.cmd=command; cmd.to=timeout; cmd.onsuccess=act_pass; cmd.onfailure=act_fail; cmd.ontimeout=act_fail; commands[state].push_back(cmd); } else { return false; }; return true; } bool ContinuationPlugins::add(const char* state,unsigned int timeout,const char* command) { job_state_t i = GMJob::get_state(state); if(i != JOB_STATE_UNDEFINED) { return add(i,timeout,command); }; return false; } static ContinuationPlugins::action_t get_action(const char *s,unsigned int l) { if((l == 4) && (strncasecmp(s,"fail",4) == 0)) return ContinuationPlugins::act_fail; if((l == 4) && (strncasecmp(s,"pass",4) == 0)) return ContinuationPlugins::act_pass; if((l == 3) && (strncasecmp(s,"log",3) == 0)) return ContinuationPlugins::act_log; return ContinuationPlugins::act_undefined; } #define RES_ONSUCCESS 0 #define RES_ONFAILURE 1 #define RES_ONTIMEOUT 2 #define RES_TIMEOUT 3 #define RES_UNDEFINED -1 static int get_result(const char *s,unsigned int l) { if((l == 9) && (strncasecmp(s,"onsuccess",9) == 0)) return RES_ONSUCCESS; if((l == 9) && (strncasecmp(s,"onfailure",9) == 0)) return RES_ONFAILURE; if((l == 9) && (strncasecmp(s,"ontimeout",9) == 0)) return RES_ONTIMEOUT; if((l == 7) && (strncasecmp(s,"timeout",7) == 0)) return RES_TIMEOUT; return RES_UNDEFINED; } bool ContinuationPlugins::add(job_state_t state,const char* options,const char* command) { if((state == JOB_STATE_ACCEPTED) || (state == JOB_STATE_PREPARING) || (state == JOB_STATE_SUBMITTING) || (state == JOB_STATE_INLRMS) || (state == JOB_STATE_FINISHING) || (state == JOB_STATE_FINISHED) || (state == JOB_STATE_DELETED)) { } else { return false; }; // go through options separated by ',' action_t onsuccess = act_pass; action_t onfailure = act_fail; action_t ontimeout = act_fail; unsigned int to = 0; const char *opt_p = options; for(;*opt_p;) { const char *next_opt_p = strchr(opt_p,','); if(next_opt_p == NULL) next_opt_p=opt_p+strlen(opt_p); const char *val_p = strchr(opt_p,'='); unsigned int name_len; unsigned int val_len; if((val_p == NULL) || (val_p >= next_opt_p)) { name_len = next_opt_p-opt_p; val_p=next_opt_p; val_len=0; } else { name_len = val_p-opt_p; val_p++; val_len=next_opt_p-val_p; }; action_t act = act_undefined; int res = get_result(opt_p,name_len); if(res == RES_UNDEFINED) { // can be timeout if(val_len != 0) return false; res=RES_TIMEOUT; val_p=opt_p; val_len=next_opt_p-val_p; }; if(res != RES_TIMEOUT) { act=get_action(val_p,val_len); if(act == act_undefined) return false; }; switch(res) { case RES_ONSUCCESS: onsuccess=act; break; case RES_ONFAILURE: onfailure=act; break; case RES_ONTIMEOUT: ontimeout=act; break; case RES_TIMEOUT: { if(val_len > 0) { char* e; to=strtoul(val_p,&e,0); if(e != next_opt_p) return false; } else { to=0; }; }; break; default: return false; }; opt_p=next_opt_p; if(!(*opt_p)) break; opt_p++; }; command_t cmd; cmd.cmd=command; cmd.to=to; cmd.onsuccess=onsuccess; cmd.onfailure=onfailure; cmd.ontimeout=ontimeout; commands[state].push_back(cmd); return true; } bool ContinuationPlugins::add(const char* state,const char* options,const char* command) { job_state_t i = GMJob::get_state(state); if(i != JOB_STATE_UNDEFINED) { return add(i,options,command); }; return false; } void ContinuationPlugins::run(const GMJob &job,const GMConfig& config,std::list& results) { job_state_t state = job.get_state(); for(std::list::iterator command = commands[state].begin(); command != commands[state].end();++command) { action_t act = act_pass; if(command->cmd.length() == 0) { results.push_back(result_t(act_pass)); continue; }; std::string cmd = command->cmd; for(std::string::size_type p = 0;;) { p=cmd.find('%',p); if(p==std::string::npos) break; if(cmd[p+1]=='I') { cmd.replace(p,2,job.get_id().c_str()); p+=job.get_id().length(); } else if(cmd[p+1]=='S') { cmd.replace(p,2,job.get_state_name()); p+=strlen(job.get_state_name()); } else if(cmd[p+1]=='R') { // Get correct session root (without job subdir) for this job std::string sessionroot(job.SessionDir().substr(0, job.SessionDir().rfind('/'))); cmd.replace(p,2,sessionroot); p+=sessionroot.length(); } else { p+=2; }; }; if(!config.Substitute(cmd, job.get_user())) { results.push_back(result_t(act_undefined)); continue; // or break ? }; std::string res_out(""); std::string res_err(""); int to = command->to; int result = -1; Arc::Run re(cmd); re.AssignStdout(res_out); re.AssignStderr(res_err); re.KeepStdin(); std::string response; if(re.Start()) { bool r = to?re.Wait(to):re.Wait(); if(!r) { response="TIMEOUT"; act=command->ontimeout; } else { result=re.Result(); if(result == 0) { act=command->onsuccess; } else { response="FAILED"; act=command->onfailure; }; }; } else { response="FAILED to start plugin"; // act=command->onfailure; ?? act=act_undefined; }; if(!res_out.empty()) { if(!response.empty()) response+=" : "; response+=res_out; }; if(!res_err.empty()) { if(!response.empty()) response+=" : "; response+=res_err; }; results.push_back(result_t(act,result,response)); if(act == act_fail) break; }; } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/PaxHeaders/CommFIFO.h0000644000000000000000000000013215067751327024003 xustar0030 mtime=1759498967.755420728 30 atime=1759498967.864493635 30 ctime=1759499029.520589161 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/CommFIFO.h0000644000175000002070000000405015067751327025704 0ustar00mockbuildmock00000000000000#ifndef GM_COMMFIFO_H #define GM_COMMFIFO_H #include #include #include namespace ARex { class CommFIFO { public: typedef enum { add_success, add_busy, add_error } add_result; private: class elem_t { public: elem_t(void):fd(-1),fd_keep(-1) { }; int fd; int fd_keep; std::string path; std::list ids; std::string buffer; }; // Open external pipes std::list fds; // Internal pipe used to report about addition // of new external pipes int kick_in; int kick_out; // Multi-threading protection std::recursive_mutex lock; int timeout_; // Create internal pipe bool make_pipe(void); // Open external pipe add_result take_pipe(const std::string& dir_path, elem_t& el); public: CommFIFO(void); ~CommFIFO(void); /// Add new external signal source add_result add(const std::string& dir_path); /// Remove external signal source bool remove(const std::string& dir_path); /// Wait for any event with specified timeout bool wait(int timeout) { std::string event; return wait(timeout, event); }; /// Wait for any event with specified timeout and collect event id bool wait(int timeout, std::string& event); /// Wait for any event with default timeout bool wait(void) { std::string event; return wait(timeout_, event); }; /// Wait for any event with default timeout and collect event id bool wait(std::string& event) { return wait(timeout_, event); }; /// Kick waiting wait() causing it to exit as if timeout accured void kick(); /// Set default timeout (negative for infinite) void timeout(int t) { timeout_=t; }; /// Signal to A-REX job id which changed. Or generic kick if id is not set. static bool Signal(const std::string& dir_path, const std::string& id = ""); static bool Signal(const std::string& dir_path, const std::vector& ids); /// Check if A-REX is listening to signals (without sending any signal). static bool Ping(const std::string& dir_path); }; } // namespace ARex #endif // GM_COMMFIFO_H nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/PaxHeaders/README0000644000000000000000000000013215067751327023153 xustar0030 mtime=1759498967.755491979 30 atime=1759498967.865493651 30 ctime=1759499029.511307398 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/jobs/README0000644000175000002070000000003215067751327025050 0ustar00mockbuildmock00000000000000Main job management code. nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/PaxHeaders/gm-jobs.8.in0000644000000000000000000000013015067751327023370 xustar0030 mtime=1759498967.754640094 29 atime=1759498967.86349362 29 ctime=1759499029.43201311 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/gm-jobs.8.in0000644000175000002070000000405015067751327025273 0ustar00mockbuildmock00000000000000.TH gm-jobs 8 "2013-01-30" "NorduGrid @VERSION@" "NorduGrid Toolkit" .SH NAME gm-jobs \- displays information and manages current jobs handled by ARC middleware .SH DESCRIPTION .B gm-jobs displays information related to jobs handled by locally running ARC middleware service A-REX. Different kind of information may be selected by using various options. This utility also can perform simple management operations - currently cancel processing of specific jobs and remove them. Default behavior is to print minimal information about all jobs currently handled by A-REX and some statistics. .SH SYNOPSIS gm-jobs [OPTION...] .SH OPTIONS .IP "\fB-h, --help\fR" Show help for available options .IP "\fB-l, --longlist\fR" display more information about each job .IP "\fB-c, --conffile=file\fR" use specified configuration file .IP "\fB-d, --controldir=dir\fR" read information from specified control directory .IP "\fB-s, --showshares\fR" print summary of jobs in each transfer share. Shows for input (preparing) and output (finishing) files the number of files being copied and the number queued per transfer share .IP "\fB-J, --notshowjobs\fR" do not print list of jobs (printed by default) .IP "\fB-S, --notshowstates\fR" do not print number of jobs in each state (printed by default) .IP "\fB-w, --showservice\fR" print state of the service .IP "\fB-f, --filteruser=dn\fR" show only jobs of user(s) with specified subject name(s) .IP "\fB-k, --killjob=id\fR" request to cancel job(s) with specified ID(s) .IP "\fB-K, --killuser=dn\fR" request to cancel jobs belonging to user(s) with specified subject name(s) .IP "\fB-r, --remjob=id\fR" request to clean job(s) with specified ID(s) .IP "\fB-R, --remuser=dn\fR" request to clean jobs belonging to user(s) with specified subject name(s) .IP "\fB-j, --filterjob=id\fR" show only jobs with specified ID(s) .IP "\fB-e, --listdelegs\fR" print list of available delegation IDs .IP "\fB-E, --showdeleg=id\fR" print delegation token of specified ID(s) .SH AUTHOR Aleksandr Konstantinov nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/PaxHeaders/accounting0000644000000000000000000000013015067751425023410 xustar0030 mtime=1759499029.478472238 28 atime=1759499034.7655102 30 ctime=1759499029.478472238 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/0000755000175000002070000000000015067751425025371 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327025524 xustar0030 mtime=1759498967.752828852 30 atime=1759498967.862493605 30 ctime=1759499029.468418214 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/Makefile.am0000644000175000002070000000136515067751327027433 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libaccounting.la libaccounting_la_SOURCES = \ AccountingDBSQLite.cpp AAR.cpp AccountingDBAsync.cpp \ AccountingDBSQLite.h AccountingDB.h AAR.h AccountingDBAsync.h \ ../../SQLhelpers.h libaccounting_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(SQLITE_CFLAGS) $(AM_CXXFLAGS) libaccounting_la_LIBADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(SQLITE_LIBS) noinst_PROGRAMS = test_adb test_adb_SOURCES = test_adb.cpp test_adb_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(SQLITE_CFLAGS) $(AM_CXXFLAGS) test_adb_LDADD = libaccounting.la arcsqlschemadir = $(pkgdatadir)/sql-schema arcsqlschema_DATA = arex_accounting_db_schema_v2.sql EXTRA_DIST = $(arcsqlschema_DATA) nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/PaxHeaders/AccountingDB.h0000644000000000000000000000013215067751327026141 xustar0030 mtime=1759498967.752828852 30 atime=1759498967.862493605 30 ctime=1759499029.475117314 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/AccountingDB.h0000644000175000002070000000333115067751327030043 0ustar00mockbuildmock00000000000000#ifndef ARC_ACCOUNTING_DB_H #define ARC_ACCOUNTING_DB_H #include #include "AAR.h" namespace ARex { /// Abstract class for storing A-REX accounting records (AAR) /** * This abstract class provides an interface which can be used to store * AAR information in the database * * \note This class is abstract. All functionality is provided by specialised * child classes. **/ class AccountingDB { public: AccountingDB(const std::string& name) : name(name), isValid(false) {} virtual ~AccountingDB() {} /// Check if database connection is successful /** * @return true if database connection successful **/ bool IsValid() const { return isValid; } /// Create new AAR in the database /** * write basic info available in ACCEPTED state to the * accounting database. * This method registers a new job that is just accepted * and write down jobID and ownership information **/ virtual bool createAAR(AAR& aar) = 0; /// Update AAR in the database /** * write all accounting info when job reaches FINISHED state * this updates all dynamic and resource consumtion info * collected during the job execution. Extra information about the job * is also recorded here **/ virtual bool updateAAR(AAR& aar) = 0; /// Add job event record to AAR /** * write record about job state change to accounting log **/ virtual bool addJobEvent(aar_jobevent_t& events, const std::string& jobid) = 0; protected: const std::string name; bool isValid; }; } #endif nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/PaxHeaders/test_adb.cpp0000644000000000000000000000013115067751327025760 xustar0030 mtime=1759498967.752828852 29 atime=1759498967.86349362 30 ctime=1759499029.478472238 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/test_adb.cpp0000644000175000002070000000477215067751327027675 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "AccountingDBSQLite.h" #include "AAR.h" int main(int argc, char **argv) { Arc::LogStream logcerr(std::cerr); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::DEBUG); ARex::AccountingDBSQLite adb("/tmp/adb.sqlite"); if (!adb.IsValid()) { std::cerr << "Database connection was not successfull" << std::endl; return EXIT_FAILURE; } ARex::AAR aar; aar.jobid = "0DULDmc8azunjwO5upha6lOqABFKDmABFKDmpjJKDmABFKDmQs7RCo"; aar.endpoint = { "org.nordugrid.arcrest", "https://arc.univ.kiev.ua:443/arex/" }; aar.queue = "grid"; aar.userdn = "/DC=org/DC=ugrid/O=people/O=KNU/CN=Andrii Salnikov"; aar.wlcgvo = "testbed.univ.kiev.ua"; aar.status = "in-progress"; aar.submittime = Arc::Time("20190624101218Z"); aar.authtokenattrs.push_back(ARex::aar_authtoken_t("vomsfqan", "/testbed.univ.kiev.ua")); aar.authtokenattrs.push_back(ARex::aar_authtoken_t("vomsfqan", "/testbed.univ.kiev.ua/Role=VO-Admin")); ARex::aar_jobevent_t accepted_event("ACCEPTED", Arc::Time("20190624101218Z")); aar.jobevents.push_back(accepted_event); adb.createAAR(aar); ARex::aar_jobevent_t preparing_event("PREPARING", Arc::Time("20190624121218Z")); adb.addJobEvent(preparing_event, aar.jobid); aar.localid = "2805309"; aar.endtime = Arc::Time("20190625101758Z"); aar.stageinvolume = 100; aar.status = "completed"; aar.exitcode = 0; aar.nodecount = 2; aar.cpucount = 4; aar.usedmemory = 584; aar.usedvirtmemory = 678; aar.usedwalltime = 1; aar.usedcpuusertime = 0; aar.usedcpukerneltime = 0; aar.usedscratch = 0; aar.stageinvolume = 0; aar.stageoutvolume = 0; aar.rtes.push_back("ENV/PROXY"); aar.rtes.push_back("ENV/CANDYPOND"); aar.transfers.push_back({"srm://glite01.grid.hku.hk/dpm/grid.hku.hk/home/ops/nagios-snf-3988/arcce/srm-input", 57, Arc::Time("2019-03-20T09:41:37Z"), Arc::Time("2019-03-20T09:41:41Z"), ARex::dtr_input}); aar.extrainfo.insert(std::pair ("jobname", "test2")); aar.extrainfo.insert(std::pair ("lrms", "pbs")); aar.extrainfo.insert(std::pair ("nodename", "s2")); aar.extrainfo.insert(std::pair ("clienthost", "192.0.2.100")); aar.extrainfo.insert(std::pair ("localuser", "tb175")); adb.updateAAR(aar); } nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/PaxHeaders/Makefile.in0000644000000000000000000000013215067751355025536 xustar0030 mtime=1759498989.755501926 30 atime=1759499017.900253932 30 ctime=1759499029.469496311 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/Makefile.in0000644000175000002070000010623015067751355027442 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ noinst_PROGRAMS = test_adb$(EXEEXT) subdir = src/services/a-rex/grid-manager/accounting ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = PROGRAMS = $(noinst_PROGRAMS) LTLIBRARIES = $(noinst_LTLIBRARIES) am__DEPENDENCIES_1 = libaccounting_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) am_libaccounting_la_OBJECTS = libaccounting_la-AccountingDBSQLite.lo \ libaccounting_la-AAR.lo libaccounting_la-AccountingDBAsync.lo libaccounting_la_OBJECTS = $(am_libaccounting_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = libaccounting_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libaccounting_la_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ am_test_adb_OBJECTS = test_adb-test_adb.$(OBJEXT) test_adb_OBJECTS = $(am_test_adb_OBJECTS) test_adb_DEPENDENCIES = libaccounting.la test_adb_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(test_adb_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = ./$(DEPDIR)/libaccounting_la-AAR.Plo \ ./$(DEPDIR)/libaccounting_la-AccountingDBAsync.Plo \ ./$(DEPDIR)/libaccounting_la-AccountingDBSQLite.Plo \ ./$(DEPDIR)/test_adb-test_adb.Po am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(libaccounting_la_SOURCES) $(test_adb_SOURCES) DIST_SOURCES = $(libaccounting_la_SOURCES) $(test_adb_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(arcsqlschemadir)" DATA = $(arcsqlschema_DATA) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ noinst_LTLIBRARIES = libaccounting.la libaccounting_la_SOURCES = \ AccountingDBSQLite.cpp AAR.cpp AccountingDBAsync.cpp \ AccountingDBSQLite.h AccountingDB.h AAR.h AccountingDBAsync.h \ ../../SQLhelpers.h libaccounting_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(SQLITE_CFLAGS) $(AM_CXXFLAGS) libaccounting_la_LIBADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(SQLITE_LIBS) test_adb_SOURCES = test_adb.cpp test_adb_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(SQLITE_CFLAGS) $(AM_CXXFLAGS) test_adb_LDADD = libaccounting.la arcsqlschemadir = $(pkgdatadir)/sql-schema arcsqlschema_DATA = arex_accounting_db_schema_v2.sql EXTRA_DIST = $(arcsqlschema_DATA) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/accounting/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/accounting/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstPROGRAMS: @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libaccounting.la: $(libaccounting_la_OBJECTS) $(libaccounting_la_DEPENDENCIES) $(EXTRA_libaccounting_la_DEPENDENCIES) $(AM_V_CXXLD)$(libaccounting_la_LINK) $(libaccounting_la_OBJECTS) $(libaccounting_la_LIBADD) $(LIBS) test_adb$(EXEEXT): $(test_adb_OBJECTS) $(test_adb_DEPENDENCIES) $(EXTRA_test_adb_DEPENDENCIES) @rm -f test_adb$(EXEEXT) $(AM_V_CXXLD)$(test_adb_LINK) $(test_adb_OBJECTS) $(test_adb_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccounting_la-AAR.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccounting_la-AccountingDBAsync.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libaccounting_la-AccountingDBSQLite.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_adb-test_adb.Po@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< libaccounting_la-AccountingDBSQLite.lo: AccountingDBSQLite.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccounting_la_CXXFLAGS) $(CXXFLAGS) -MT libaccounting_la-AccountingDBSQLite.lo -MD -MP -MF $(DEPDIR)/libaccounting_la-AccountingDBSQLite.Tpo -c -o libaccounting_la-AccountingDBSQLite.lo `test -f 'AccountingDBSQLite.cpp' || echo '$(srcdir)/'`AccountingDBSQLite.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libaccounting_la-AccountingDBSQLite.Tpo $(DEPDIR)/libaccounting_la-AccountingDBSQLite.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='AccountingDBSQLite.cpp' object='libaccounting_la-AccountingDBSQLite.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccounting_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccounting_la-AccountingDBSQLite.lo `test -f 'AccountingDBSQLite.cpp' || echo '$(srcdir)/'`AccountingDBSQLite.cpp libaccounting_la-AAR.lo: AAR.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccounting_la_CXXFLAGS) $(CXXFLAGS) -MT libaccounting_la-AAR.lo -MD -MP -MF $(DEPDIR)/libaccounting_la-AAR.Tpo -c -o libaccounting_la-AAR.lo `test -f 'AAR.cpp' || echo '$(srcdir)/'`AAR.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libaccounting_la-AAR.Tpo $(DEPDIR)/libaccounting_la-AAR.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='AAR.cpp' object='libaccounting_la-AAR.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccounting_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccounting_la-AAR.lo `test -f 'AAR.cpp' || echo '$(srcdir)/'`AAR.cpp libaccounting_la-AccountingDBAsync.lo: AccountingDBAsync.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccounting_la_CXXFLAGS) $(CXXFLAGS) -MT libaccounting_la-AccountingDBAsync.lo -MD -MP -MF $(DEPDIR)/libaccounting_la-AccountingDBAsync.Tpo -c -o libaccounting_la-AccountingDBAsync.lo `test -f 'AccountingDBAsync.cpp' || echo '$(srcdir)/'`AccountingDBAsync.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libaccounting_la-AccountingDBAsync.Tpo $(DEPDIR)/libaccounting_la-AccountingDBAsync.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='AccountingDBAsync.cpp' object='libaccounting_la-AccountingDBAsync.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libaccounting_la_CXXFLAGS) $(CXXFLAGS) -c -o libaccounting_la-AccountingDBAsync.lo `test -f 'AccountingDBAsync.cpp' || echo '$(srcdir)/'`AccountingDBAsync.cpp test_adb-test_adb.o: test_adb.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_adb_CXXFLAGS) $(CXXFLAGS) -MT test_adb-test_adb.o -MD -MP -MF $(DEPDIR)/test_adb-test_adb.Tpo -c -o test_adb-test_adb.o `test -f 'test_adb.cpp' || echo '$(srcdir)/'`test_adb.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/test_adb-test_adb.Tpo $(DEPDIR)/test_adb-test_adb.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='test_adb.cpp' object='test_adb-test_adb.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_adb_CXXFLAGS) $(CXXFLAGS) -c -o test_adb-test_adb.o `test -f 'test_adb.cpp' || echo '$(srcdir)/'`test_adb.cpp test_adb-test_adb.obj: test_adb.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_adb_CXXFLAGS) $(CXXFLAGS) -MT test_adb-test_adb.obj -MD -MP -MF $(DEPDIR)/test_adb-test_adb.Tpo -c -o test_adb-test_adb.obj `if test -f 'test_adb.cpp'; then $(CYGPATH_W) 'test_adb.cpp'; else $(CYGPATH_W) '$(srcdir)/test_adb.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/test_adb-test_adb.Tpo $(DEPDIR)/test_adb-test_adb.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='test_adb.cpp' object='test_adb-test_adb.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(test_adb_CXXFLAGS) $(CXXFLAGS) -c -o test_adb-test_adb.obj `if test -f 'test_adb.cpp'; then $(CYGPATH_W) 'test_adb.cpp'; else $(CYGPATH_W) '$(srcdir)/test_adb.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcsqlschemaDATA: $(arcsqlschema_DATA) @$(NORMAL_INSTALL) @list='$(arcsqlschema_DATA)'; test -n "$(arcsqlschemadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(arcsqlschemadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(arcsqlschemadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcsqlschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcsqlschemadir)" || exit $$?; \ done uninstall-arcsqlschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcsqlschema_DATA)'; test -n "$(arcsqlschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(arcsqlschemadir)'; $(am__uninstall_files_from_dir) ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) $(LTLIBRARIES) $(DATA) installdirs: for dir in "$(DESTDIR)$(arcsqlschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ clean-noinstPROGRAMS mostlyclean-am distclean: distclean-am -rm -f ./$(DEPDIR)/libaccounting_la-AAR.Plo -rm -f ./$(DEPDIR)/libaccounting_la-AccountingDBAsync.Plo -rm -f ./$(DEPDIR)/libaccounting_la-AccountingDBSQLite.Plo -rm -f ./$(DEPDIR)/test_adb-test_adb.Po -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcsqlschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libaccounting_la-AAR.Plo -rm -f ./$(DEPDIR)/libaccounting_la-AccountingDBAsync.Plo -rm -f ./$(DEPDIR)/libaccounting_la-AccountingDBSQLite.Plo -rm -f ./$(DEPDIR)/test_adb-test_adb.Po -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcsqlschemaDATA .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ clean-generic clean-libtool clean-noinstLTLIBRARIES \ clean-noinstPROGRAMS cscopelist-am ctags ctags-am distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-arcsqlschemaDATA install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am uninstall-arcsqlschemaDATA .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/PaxHeaders/AccountingDBAsync.cpp0000644000000000000000000000013215067751327027472 xustar0030 mtime=1759498967.752828852 30 atime=1759498967.862493605 30 ctime=1759499029.472876341 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/AccountingDBAsync.cpp0000644000175000002070000001104515067751327031375 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "AccountingDBAsync.h" namespace ARex { class AccountingDBThread: public Arc::Thread { friend class AccountingDBAsync; public: static const std::size_t MaxQueueDepth = 10000; static AccountingDBThread& Instance(); bool Push(AccountingDBAsync::Event* event); private: AccountingDBThread(); virtual ~AccountingDBThread(); void thread(); Arc::SimpleCondition lock_; AccountingDBThread* instance_; std::map< std::string,Arc::AutoPointer > dbs_; std::list queue_; // this queue is emptied in destructor bool exited_; }; AccountingDBThread& AccountingDBThread::Instance() { static AccountingDBThread instance; return instance; } AccountingDBThread::AccountingDBThread():exited_(false) { start(); } AccountingDBThread::~AccountingDBThread() { Push(new AccountingDBAsync::EventQuit()); while(!exited_) sleep(1); Arc::AutoLock lock(lock_); while(!queue_.empty()) { delete queue_.front(); queue_.pop_front(); } } bool AccountingDBThread::Push(AccountingDBAsync::Event* event) { Arc::AutoLock lock(lock_); while(queue_.size() >= MaxQueueDepth) { lock.unlock(); sleep(1); // TODO: something clever needed lock.lock(); }; queue_.push_back(event); lock_.signal_nonblock(); return true; } void AccountingDBThread::thread() { while(true) { Arc::AutoLock lock(lock_); if(queue_.empty()) { lock_.wait_nonblock(); if(queue_.empty()) continue; } Arc::AutoPointer event(queue_.front()); queue_.pop_front(); AccountingDBAsync::EventQuit* eventQuit = dynamic_cast(event.Ptr()); if(eventQuit) break; std::map< std::string,Arc::AutoPointer >::iterator db = dbs_.find(event->name); if(db == dbs_.end()) continue; // not expected lock.unlock(); // no need to keep lock anymore - db and event are picked up AccountingDBAsync::EventCreateAAR* eventCreateAAR = dynamic_cast(event.Ptr()); if(eventCreateAAR) { db->second->createAAR(eventCreateAAR->aar); continue; }; AccountingDBAsync::EventUpdateAAR* eventUpdateAAR = dynamic_cast(event.Ptr()); if(eventUpdateAAR) { db->second->updateAAR(eventUpdateAAR->aar); continue; }; AccountingDBAsync::EventAddJobEvent* eventAddJobEvent = dynamic_cast(event.Ptr()); if(eventAddJobEvent) { db->second->addJobEvent(eventAddJobEvent->events, eventAddJobEvent->jobid); continue; }; }; } AccountingDBAsync::AccountingDBAsync(const std::string& name, AccountingDB* (*ctr)(const std::string&)) : AccountingDB(name) { AccountingDBThread& thread(AccountingDBThread::Instance()); Arc::AutoLock lock(thread.lock_); std::map< std::string,Arc::AutoPointer >::iterator dbIt = thread.dbs_.find(name); if(dbIt == thread.dbs_.end()) { AccountingDB* db = ctr(name); if(!db || !db->IsValid()) { delete db; return; } thread.dbs_[name] = db; } isValid = true; } AccountingDBAsync::~AccountingDBAsync() { } bool AccountingDBAsync::createAAR(AAR& aar) { return AccountingDBThread::Instance().Push(new EventCreateAAR(name, aar)); } bool AccountingDBAsync::updateAAR(AAR& aar) { return AccountingDBThread::Instance().Push(new EventUpdateAAR(name, aar)); } bool AccountingDBAsync::addJobEvent(aar_jobevent_t& events, const std::string& jobid) { return AccountingDBThread::Instance().Push(new EventAddJobEvent(name, events, jobid)); } AccountingDBAsync::Event::Event(std::string const& name): name(name) { } AccountingDBAsync::Event::~Event() { } AccountingDBAsync::EventCreateAAR::EventCreateAAR(std::string const& name, AAR const& aar): Event(name), aar(aar) { } AccountingDBAsync::EventUpdateAAR::EventUpdateAAR(std::string const& name, AAR const& aar): Event(name), aar(aar) { } AccountingDBAsync::EventAddJobEvent::EventAddJobEvent(std::string const& name, aar_jobevent_t const& events, std::string const& jobid): Event(name), events(events), jobid(jobid) { } AccountingDBAsync::EventQuit::EventQuit(): Event("") { } } nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/PaxHeaders/AccountingDBSQLite.h0000644000000000000000000000013215067751327027223 xustar0030 mtime=1759498967.752828852 30 atime=1759498967.862493605 30 ctime=1759499029.474012265 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.h0000644000175000002070000001167015067751327031132 0ustar00mockbuildmock00000000000000#ifndef ARC_ACCOUNTING_DB_SQLITE_H #define ARC_ACCOUNTING_DB_SQLITE_H #include #include #include #include #include "AccountingDB.h" namespace ARex { /// Store name to ID mappings in database tables typedef std::map name_id_map_t; /// Class implementing A-REX accounting records (AAR) storing in SQLite class AccountingDBSQLite : public AccountingDB { public: AccountingDBSQLite(const std::string& name); ~AccountingDBSQLite(); /// Create new AAR in the database (ACCEPTED) bool createAAR(AAR& aar); /// Update AAR in the database (FINISHED) bool updateAAR(AAR& aar); /// Add job event record to AAR (any other state changes) bool addJobEvent(aar_jobevent_t& events, const std::string& jobid); private: static Arc::Logger logger; std::mutex lock_; // General Name-ID tables name_id_map_t db_queue; name_id_map_t db_users; name_id_map_t db_wlcgvos; name_id_map_t db_fqans; name_id_map_t db_benchmarks; name_id_map_t db_status; // AAR specific structures representation std::map db_endpoints; // Class to handle SQLite DB Operations class SQLiteDB { public: SQLiteDB(const std::string& name, bool create = false); ~SQLiteDB(); bool isConnected(void); int changes(void) { return sqlite3_changes(aDB); } sqlite3_int64 insertID(void) { return sqlite3_last_insert_rowid(aDB); } int exec(const char *sql, int (*callback)(void*,int,char**,char**), void *arg, char **errmsg); void logError(const char* errpfx, int err, Arc::LogLevel level = Arc::DEBUG); private: sqlite3* aDB; void closeDB(); }; SQLiteDB* db; /// Initialize and close connection to SQLite database void initSQLiteDB(void); void closeSQLiteDB(void); /// General helper to execute INSERT statement and return the autoincrement ID unsigned int GeneralSQLInsert(const std::string& sql); /// General helper to execute UPDATE statement bool GeneralSQLUpdate(const std::string& sql); /// General helper that return accounting database ID for requested iname /** * Performs lookup in the specified table with the [ID, Name] columns * Updates the name_id_map map of the object **/ unsigned int QueryAndInsertNameID(const std::string& table, const std::string& iname, name_id_map_t* name_id_map); /// General helper that query [ID, Name] table and put the result into the the name_id_map map bool QueryNameIDmap(const std::string& table, name_id_map_t* name_id_map); /// Get database ID for the specified queue /** * This method fetches the database to get stored Queue * primary key. If requested Queue is missing in the database * it will be created and inserted ID is returned. * * Updates db_queue map. * * If db_queue map is already populated with database data and Queue name * is already inside the map, cached data will be returned. * * In case of failure to find and insert the queue name in the database * returns 0. * * @return database primary key for provided queue **/ unsigned int getDBQueueId(const std::string& queue); /// Get database ID for the specified user DN unsigned int getDBUserId(const std::string& userdn); /// Get database ID for the specified WLCG VO name unsigned int getDBWLCGVOId(const std::string& voname); /// Get database ID for the specified FQAN unsigned int getDBFQANId(const std::string& fqan); /// Get database ID for the specified benchmark unsigned int getDBBenchmarkId(const std::string& benchmark); /// Get database ID for the specified status string unsigned int getDBStatusId(const std::string& status); /// Query endpoints (dedicated implementation) bool QueryEnpointsmap(void); /// Get endpoint ID unsigned int getDBEndpointId(const aar_endpoint_t& endpoint); /// get DB ID for already registered job AAR unsigned int getAARDBId(const AAR& aar); unsigned int getAARDBId(const std::string& jobid); // Write AAR dedicated info tables bool writeRTEs(std::list & rtes, unsigned int recordid); bool writeAuthTokenAttrs(std::list & attrs, unsigned int recordid); bool writeExtraInfo(std::map & info, unsigned int recordid); bool writeDTRs(std::list & dtrs, unsigned int recordid); bool writeEvents(std::list & events, unsigned int recordid); }; } #endif nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/PaxHeaders/AAR.cpp0000644000000000000000000000013215067751327024577 xustar0030 mtime=1759498967.752606304 30 atime=1759498967.862493605 30 ctime=1759499029.471762401 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/AAR.cpp0000644000175000002070000003773715067751327026522 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include "../jobs/GMJob.h" #include "../conf/GMConfig.h" #include "../files/ControlFileHandling.h" #include "AAR.h" namespace ARex { Arc::Logger AAR::logger(Arc::Logger::getRootLogger(), "AAR"); static void extract_integer(std::string& s,std::string::size_type n = 0) { for(;npw_name) { owner += pw->pw_name; } grp = getgrgid(st.st_gid); if (grp != NULL && grp->gr_name) { owner += ":"; owner += grp->gr_name; } } return owner; } bool AAR::FetchJobData(const GMJob &job,const GMConfig& config,std::map > const& tokenmap, std::list > const& vomsless_vo) { // jobid jobid = job.get_id(); /* analyze job.ID.local and store relevant information */ JobLocalDescription local; if (!job_local_read_file(job.get_id(), config, local)) return false; // endpoint if (local.headnode.empty() || local.interface.empty()) { logger.msg(Arc::ERROR, "Cannot find information abouto job submission endpoint"); return false; } endpoint = {local.interface, local.headnode}; // *localid if (!local.localid.empty()) localid = local.localid; // ?queue if (!local.queue.empty()) queue = local.queue; // userdn if (!local.DN.empty()) userdn = local.DN; // VOMS info if (!local.voms.empty()) { // wlcgwo wlcgvo = local.voms.front(); // remove first slash from FQAN if (wlcgvo.at(0) == '/') wlcgvo.erase(0,1); // crop everything after slash from FQAN std::size_t wlcgvo_slash = wlcgvo.find('/'); if (wlcgvo_slash != std::string::npos) { wlcgvo.erase(wlcgvo_slash, std::string::npos); } // authtokenattrs bool mainfqan = true; for(std::list::const_iterator it=local.voms.begin(); it != local.voms.end(); ++it) { authtokenattrs.push_back(aar_authtoken_t("vomsfqan", (*it))); if (mainfqan) { // add first FQAN to main AAR data fqan = (*it); mainfqan = false; } } } else { // apply local.authgroups to vomsless_vo for(auto const & vv: vomsless_vo) { if(vv.first.empty()) { // untagged vomsless_vo applies always but is overwritten if authgroup is matched wlcgvo = vv.second; } else if(std::count(local.authgroups.begin(), local.authgroups.end(), vv.first)) { wlcgvo = vv.second; break; } } } // token claims to auth attributes for(std::map >::const_iterator it = tokenmap.cbegin(); it != tokenmap.cend(); ++it) { std::map >::const_iterator claims = local.tokenclaim.find(it->first); if(claims != local.tokenclaim.end()) { for(std::list::const_iterator destattr = it->second.cbegin(); destattr != it->second.cend(); ++destattr) { for(std::list::const_iterator claim = claims->second.cbegin(); claim != claims->second.cend(); ++claim) { authtokenattrs.emplace_back(*destattr, *claim); } } } } // submittime submittime = local.starttime; // add submit time to event log on ACCEPTED if (job.get_state() == JOB_STATE_ACCEPTED) { aar_jobevent_t startevent(job.get_state_name(), local.starttime); jobevents.push_back(startevent); } // extra info if (!local.jobname.empty()) extrainfo.insert( std::pair ("jobname", local.jobname) ); if (!local.lrms.empty()) extrainfo.insert( std::pair ("lrms", local.lrms) ); if (!local.clientname.empty()) extrainfo.insert( std::pair ("clienthost", local.clientname) ); for (std::list::const_iterator it = local.projectnames.begin(); it != local.projectnames.end(); ++it) { extrainfo.insert( std::pair ("projectname", (*it)) ); } if (job.get_state() == JOB_STATE_ACCEPTED) { status = "in-progress"; // nothing from .diag and .statistics is relevant for just ACCEPTED jobs // so we can stop processing here return true; } // job completion status and endtime for FINISHED if (job.get_state() == JOB_STATE_FINISHED) { status = "completed"; // end time time_t t = job_state_time(job.get_id(),config); if (t == 0) t=::time(NULL); endtime = Arc::Time(t); // end event aar_jobevent_t endevent(job.get_state_name(), endtime); jobevents.push_back(endevent); // failure if (job_failed_mark_check(job.get_id(),config)) { status = "failed"; } } /* * analyze job.ID.diag and store relevant information */ std::string fname_src = job_control_path(config.ControlDir(), job.get_id(), sfx_diag); std::list diag_data; // nodenames used for node/cpus couting as well as extra info std::list nodenames; // there are different memory metrics are avaiable from different sources // prefered is MAX memory, but use an AVARAGE metrics as fallback long long int mem_avg_total = 0; long long int mem_max_total = 0; long long int mem_avg_resident = 0; long long int mem_max_resident = 0; // benchmark is present bool is_benchmark = false; if (Arc::FileRead(fname_src, diag_data)) { for (std::list::iterator line = diag_data.begin(); line != diag_data.end(); ++line) { // parse key=value lowercasing all keys std::string::size_type p = line->find('='); if (p == std::string::npos) continue; std::string key(Arc::lower(line->substr(0, p))); std::string value(line->substr(p+1)); if (key.empty()) continue; // process keys if (key == "nodename") { nodenames.push_back(value); } else if (key == "processors") { long long int n; if (string_to_number(value,n)) cpucount = n; } else if (key == "exitcode") { long long int n; if (string_to_number(value,n)) exitcode = n; } else if (key == "walltime" ) { long long int n; if (string_to_number(value,n)) usedwalltime = n; } else if (key == "kerneltime" ) { long long int n; if (string_to_number(value,n)) usedcpukerneltime = n; } else if (key == "usertime" ) { long long int n; if (string_to_number(value,n)) usedcpuusertime = n; } else if (key == "maxresidentmemory" ) { long long int n; if (string_to_number(value,n)) mem_max_resident = n; } else if (key == "averageresidentmemory" ) { long long int n; if (string_to_number(value,n)) mem_avg_resident = n; } else if (key == "maxtotalmemory" ) { long long int n; if (string_to_number(value,n)) mem_max_total = n; } else if (key == "averagetotalmemory" ) { long long int n; if (string_to_number(value,n)) mem_avg_total = n; } else if (key == "usedscratch" ) { long long int n; if (string_to_number(value,n)) usedscratch = n; } else if (key == "runtimeenvironments") { // rtes are splited by semicolon Arc::tokenize(value, rtes, ";"); } else if (key == "lrmsstarttime") { aar_jobevent_t lrmsevent("LRMSSTART", Arc::Time(value)); jobevents.push_back(lrmsevent); } else if (key == "lrmsendtime") { aar_jobevent_t lrmsevent("LRMSEND", Arc::Time(value)); jobevents.push_back(lrmsevent); } else if (key == "systemsoftware" ) { extrainfo.insert( std::pair ("systemsoftware", value) ); } else if (key == "wninstance" ) { extrainfo.insert( std::pair ("wninstance", value) ); } else if (key == "benchmark" ) { is_benchmark = true; benchmark = value; } } } // Insert fallback LRMS benchmark if missing in the .diag file if (!is_benchmark) { benchmark = config.DefaultBenchmark(); } // Memory: use max if available, otherwise use avarage as a fallback usedmemory = mem_max_resident ? mem_max_resident : mem_avg_resident; usedvirtmemory = mem_max_total ? mem_max_total: mem_avg_total; // Memory: if no virt memory measured - copy from physical if (!usedvirtmemory && usedmemory ) { usedvirtmemory = usedmemory; } // Nodes/CPUs if (!nodenames.empty()) { // if not recorded in .diag implicitly use nodenames list to count CPUs if (!cpucount) { cpucount = nodenames.size(); } // add extra info about used nodes extrainfo.insert( std::pair ("nodenames", Arc::join(nodenames, ":")) ); // count the unique nodes nodenames.sort(); nodenames.unique(); nodecount = nodenames.size(); } // localuser[:group] std::string localuser = get_file_owner(fname_src); if (!localuser.empty()) { extrainfo.insert( std::pair ("localuser", localuser) ); } /* * analyze job.ID.statistics and store relevant DTR information */ fname_src = job_control_path(config.ControlDir(), job.get_id(), sfx_statistics); std::list statistics_data; // DTR events Arc::Time dtr_in_start(-1); Arc::Time dtr_in_end(-1); Arc::Time dtr_out_start(-1); Arc::Time dtr_out_end(-1); if (Arc::FileRead(fname_src, statistics_data)) { for (std::list::iterator line = statistics_data.begin(); line != statistics_data.end(); ++line) { // statistics file has : as first delimiter std::string::size_type p = line->find(':'); if (p == std::string::npos) continue; std::string key(Arc::lower(line->substr(0, p))); std::string value(line->substr(p+1)); if (key.empty()) continue; // new dtr record struct aar_data_transfer_t dtrinfo; bool is_input = true; // key define type of transfer if (key == "inputfile") { dtrinfo.type = dtr_input; } else if (key == "outputfile") { dtrinfo.type = dtr_output; is_input = false; } // parse comma separated values std::list dtr_values; Arc::tokenize(value, dtr_values, ","); for (std::list::iterator it = dtr_values.begin(); it != dtr_values.end(); ++it) { std::string::size_type kvp = it->find('='); if (kvp == std::string::npos) continue; std::string dkey(Arc::lower(it->substr(0, kvp))); std::string dval(it->substr(kvp+1)); if (dkey.empty()) continue; if (dkey == "url") { dtrinfo.url = dval; } else if (dkey == "size") { long long int n; if (string_to_number(dval,n)) dtrinfo.size = n; } else if (dkey == "starttime") { Arc::Time stime(dval); dtrinfo.transferstart = stime; if (is_input) { if (dtr_in_start == Arc::Time(-1)) dtr_in_start = stime; if (stime < dtr_in_start) dtr_in_start = stime; } else { if (dtr_out_start == Arc::Time(-1)) dtr_out_start = stime; if (stime < dtr_out_start) dtr_out_start = stime; } } else if (dkey == "endtime") { Arc::Time etime(dval); dtrinfo.transferend = etime; if (is_input) { if (dtr_in_end == Arc::Time(-1)) dtr_in_end = etime; if (etime > dtr_in_end) dtr_in_end = etime; } else { if (dtr_out_end == Arc::Time(-1)) dtr_out_end = etime; if (etime > dtr_out_end) dtr_out_end = etime; } } else if (dkey == "fromcache") { if (dval == "yes") { dtrinfo.type = dtr_cache_input; } } } // total counters: stageinvolume, stageoutvolume if ( dtrinfo.type == dtr_input ) stageinvolume += dtrinfo.size; if ( dtrinfo.type == dtr_output ) stageoutvolume += dtrinfo.size; // add dtr info to AAR transfers.push_back(dtrinfo); } } // Events for data stagein/out if (dtr_in_start != Arc::Time(-1) && dtr_in_end != Arc::Time(-1)) { aar_jobevent_t dtrstart("DTRDOWNLOADSTART", dtr_in_start); jobevents.push_back(dtrstart); aar_jobevent_t dtrend("DTRDOWNLOADEND", dtr_in_end); jobevents.push_back(dtrend); } if (dtr_out_start != Arc::Time(-1) && dtr_out_end != Arc::Time(-1)) { aar_jobevent_t dtrstart("DTRUPLOADSTART", dtr_out_start); jobevents.push_back(dtrstart); aar_jobevent_t dtrend("DTRUPLOADEND", dtr_out_end); jobevents.push_back(dtrend); } return true; } } nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/PaxHeaders/AccountingDBAsync.h0000644000000000000000000000013215067751327027137 xustar0030 mtime=1759498967.752828852 30 atime=1759498967.862493605 30 ctime=1759499029.477371651 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/AccountingDBAsync.h0000644000175000002070000000247115067751327031045 0ustar00mockbuildmock00000000000000#ifndef ARC_ACCOUNTING_DB_ASYNC_H #define ARC_ACCOUNTING_DB_ASYNC_H #include #include "AAR.h" #include "AccountingDB.h" namespace ARex { class AccountingDBAsync: public AccountingDB { public: AccountingDBAsync(const std::string& name, AccountingDB* (*ctr)(const std::string&)); virtual ~AccountingDBAsync(); virtual bool createAAR(AAR& aar); virtual bool updateAAR(AAR& aar); virtual bool addJobEvent(aar_jobevent_t& events, const std::string& jobid); class Event { public: Event(std::string const& name); virtual ~Event(); std::string name; }; class EventCreateAAR: public Event { public: EventCreateAAR(std::string const& name, AAR const& aar); AAR aar; }; class EventUpdateAAR: public Event { public: EventUpdateAAR(std::string const& name, AAR const& aar); AAR aar; }; class EventAddJobEvent: public Event { public: EventAddJobEvent(std::string const& name, aar_jobevent_t const& events, std::string const& jobid); aar_jobevent_t events; std::string jobid; }; class EventQuit: public Event { public: EventQuit(); }; }; } #endif nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/PaxHeaders/AAR.h0000644000000000000000000000013215067751327024244 xustar0030 mtime=1759498967.752828852 30 atime=1759498967.862493605 30 ctime=1759499029.476214075 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/AAR.h0000644000175000002070000000711415067751327026151 0ustar00mockbuildmock00000000000000#ifndef ARC_AAR_CONTENT_H #define ARC_AAR_CONTENT_H #include #include #include #include #include namespace ARex { /* * Defines the data types to store A-REX Accounting Records (AAR) */ struct aar_endpoint_t { std::string interface; std::string url; bool operator<(const aar_endpoint_t& endpoint) const { if ( interface < endpoint.interface ) return true; if ( interface == endpoint.interface ) { if ( url < endpoint.url ) return true; return false; } return false; }; }; typedef enum { dtr_input = 10, dtr_cache_input = 11, dtr_output = 20 } dtr_type; struct aar_data_transfer_t { std::string url; unsigned long long int size; Arc::Time transferstart; Arc::Time transferend; dtr_type type; }; typedef std::pair aar_jobevent_t; typedef std::pair aar_authtoken_t; class GMJob; class GMConfig; /* * C++ class representing A-REX Accounting Record (AAR) structure and corresponding methods to build it */ class AAR { public: AAR(void): jobid(""), localid(""), queue(""), userdn(""), wlcgvo(""), fqan(""), status(""), benchmark(""), exitcode(1), submittime((time_t)(0)), endtime((time_t)(0)), nodecount(1), cpucount(1), usedmemory(0), usedvirtmemory(0), usedwalltime(0), usedcpuusertime(0), usedcpukerneltime(0), usedscratch(0), stageinvolume(0), stageoutvolume(0) {} /* Unique job ids */ std::string jobid; // job unique A-REX ID std::string localid; // job local LRMS ID /* Submission data */ aar_endpoint_t endpoint; // endpoint type and URL used to submit job std::string queue; // queue std::string userdn; // distinguished name of the job owner std::string wlcgvo; // WLCG VO name std::string fqan; // main accounting FQAN /* Completion data */ std::string status; // Job completion status std::string benchmark; // Job's node benchmark int exitcode; // Job exit code /* Main accounting times to search jobs */ Arc::Time submittime; // Job submission time Arc::Time endtime; // Job completion time /* Used resources (SQLite INT is signed 64-bit integer) */ long long int nodecount; long long int cpucount; long long int usedmemory; long long int usedvirtmemory; long long int usedwalltime; long long int usedcpuusertime; long long int usedcpukerneltime; long long int usedscratch; long long int stageinvolume; long long int stageoutvolume; /* Complex extra data */ std::list authtokenattrs; // auth token attributes std::list jobevents; // events of the job std::list rtes; // RTEs std::list transfers; // data transfers information /* Store non-seachable optional text data, such as: * jobname, lrms, nodename, clienthost, localuser, projectname, systemsoftware, wninstance, benchmark */ std::map extrainfo; /// Fetch info from the job's controldir files and fill AAR data structures bool FetchJobData(const GMJob &job,const GMConfig& config,std::map > const& tokenmap, std::list > const& vomsless_vo); private: static Arc::Logger logger; }; } #endif nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/PaxHeaders/AccountingDBSQLite.cpp0000644000000000000000000000013215067751327027556 xustar0030 mtime=1759498967.752828852 30 atime=1759498967.862493605 30 ctime=1759499029.470634521 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/AccountingDBSQLite.cpp0000644000175000002070000006122015067751327031461 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "../../SQLhelpers.h" #include "AccountingDBSQLite.h" #include "glibmm-compat.h" #define DB_SCHEMA_FILE "arex_accounting_db_schema_v2.sql" namespace ARex { Arc::Logger AccountingDBSQLite::logger(Arc::Logger::getRootLogger(), "AccountingDBSQLite"); int AccountingDBSQLite::SQLiteDB::exec(const char *sql, int (*callback)(void*,int,char**,char**), void *arg, char **errmsg) { int err; while((err = sqlite3_exec(aDB, sql, callback, arg, errmsg)) == SQLITE_BUSY) { // Access to database is designed in such way that it should not block for long time. // So it should be safe to simply wait for lock to be released without any timeout. struct timespec delay = { 0, 10000000 }; // 0.01s - should be enough for most cases (void)::nanosleep(&delay, NULL); }; return err; } AccountingDBSQLite::SQLiteDB::SQLiteDB(const std::string& name, bool create): aDB(NULL) { if (aDB != NULL) return; // already open int flags = SQLITE_OPEN_READWRITE; if (create) flags |= SQLITE_OPEN_CREATE; int err; while((err = sqlite3_open_v2(name.c_str(), &aDB, flags, NULL)) == SQLITE_BUSY) { // In case something prevents database from open right now - retry closeDB(); struct timespec delay = { 0, 10000000 }; // 0.01s - should be enough for most cases (void)::nanosleep(&delay, NULL); }; if(err != SQLITE_OK) { logError("Unable to open accounting database connection", err, Arc::ERROR); closeDB(); return; }; if (create) { std::string db_schema_str; std::string sql_file = Arc::ArcLocation::Get() + G_DIR_SEPARATOR_S + PKGDATASUBDIR + G_DIR_SEPARATOR_S + "sql-schema" + G_DIR_SEPARATOR_S + DB_SCHEMA_FILE; if(!Arc::FileRead(sql_file, db_schema_str)) { AccountingDBSQLite::logger.msg(Arc::ERROR, "Failed to read database schema file at %s", sql_file); closeDB(); return; } err = exec(db_schema_str.c_str(), NULL, NULL, NULL); if(err != SQLITE_OK) { logError("Failed to initialize accounting database schema", err, Arc::ERROR); closeDB(); return; } AccountingDBSQLite::logger.msg(Arc::INFO, "Accounting database initialized successfully"); } AccountingDBSQLite::logger.msg(Arc::DEBUG, "Accounting database connection has been established"); } void AccountingDBSQLite::SQLiteDB::logError(const char* errpfx, int err, Arc::LogLevel loglevel) { #ifdef HAVE_SQLITE3_ERRSTR std::string msg = sqlite3_errstr(err); #else std::string msg = "error code "+Arc::tostring(err); #endif if (errpfx) { AccountingDBSQLite::logger.msg(loglevel, "%s. SQLite database error: %s", errpfx, msg); } else { AccountingDBSQLite::logger.msg(loglevel, "SQLite database error: %s", msg); } } bool AccountingDBSQLite::SQLiteDB::isConnected(void) { if (aDB) return true; return false; } void AccountingDBSQLite::SQLiteDB::closeDB(void) { if (aDB) { (void)sqlite3_close(aDB); // TODO: handle errors? aDB = NULL; }; } AccountingDBSQLite::SQLiteDB::~SQLiteDB() { closeDB(); } AccountingDBSQLite::AccountingDBSQLite(const std::string& name) : AccountingDB(name), db(NULL) { isValid = false; // check database file exists if (!Glib::file_test(name, Glib::FILE_TEST_EXISTS)) { const std::string dbdir = Glib::path_get_dirname(name); // Check if the parent directory exist if (!Glib::file_test(dbdir, Glib::FILE_TEST_EXISTS)) { if (Arc::DirCreate(dbdir, S_IRWXU, true)) { logger.msg(Arc::INFO, "Directory %s to store accounting database has been created.", dbdir); } else { logger.msg(Arc::ERROR, "Accounting database cannot be created. Faile to create parent directory %s.", dbdir); return; } } else if (!Glib::file_test(dbdir, Glib::FILE_TEST_IS_DIR)) { logger.msg(Arc::ERROR, "Accounting database cannot be created: %s is not a directory", dbdir); return; } // initialize new database std::unique_lock lock(lock_); db = new SQLiteDB(name, true); if (!db->isConnected()){ logger.msg(Arc::ERROR, "Failed to initialize accounting database"); closeSQLiteDB(); return; } isValid = true; return; } else if (!Glib::file_test(name, Glib::FILE_TEST_IS_REGULAR)) { logger.msg(Arc::ERROR, "Accounting database file (%s) is not a regular file", name); return; } // if we are here database location is fine, trying to open initSQLiteDB(); if (!db->isConnected()) { logger.msg(Arc::ERROR, "Error opening accounting database"); closeSQLiteDB(); return; } // TODO: implement schema version checking and possible updates isValid = true; } // init DB connection for multiple usages void AccountingDBSQLite::initSQLiteDB(void) { // already initialized if (db) return; db = new SQLiteDB(name); } // close DB connection void AccountingDBSQLite::closeSQLiteDB(void) { if (db) { logger.msg(Arc::DEBUG, "Closing connection to SQLite accounting database"); delete db; db = NULL; } } AccountingDBSQLite::~AccountingDBSQLite() { closeSQLiteDB(); } // perform insert query and return // 0 - failure // id - autoincrement id of the inserted raw unsigned int AccountingDBSQLite::GeneralSQLInsert(const std::string& sql) { if (!isValid) return 0; initSQLiteDB(); std::unique_lock lock(lock_); int err; err = db->exec(sql.c_str(), NULL, NULL, NULL); if (err != SQLITE_OK) { if (err == SQLITE_CONSTRAINT) { db->logError("It seams record exists already", err, Arc::ERROR); } else { db->logError("Failed to insert data into database", err, Arc::ERROR); } return 0; } if(db->changes() < 1) { return 0; } sqlite3_int64 newid = db->insertID(); return (unsigned int) newid; } // perform update query bool AccountingDBSQLite::GeneralSQLUpdate(const std::string& sql) { if (!isValid) return false; initSQLiteDB(); std::unique_lock lock(lock_); int err; err = db->exec(sql.c_str(), NULL, NULL, NULL); if (err != SQLITE_OK ) { db->logError("Failed to update data in the database", err, Arc::ERROR); return false; } if(db->changes() < 1) { return false; } return true; } // callback to build (name,id) map from database table static int ReadIdNameCallback(void* arg, int colnum, char** texts, char** names) { name_id_map_t* name_id_map = static_cast(arg); std::pair rec; rec.second = 0; for (int n = 0; n < colnum; ++n) { if (names[n] && texts[n]) { if (strcmp(names[n], "ID") == 0) { int id; sql_unescape(texts[n], id); rec.second = id; } else if (strcmp(names[n], "Name") == 0) { rec.first = sql_unescape(texts[n]); } } } if(rec.second) name_id_map->insert(rec); return 0; } bool AccountingDBSQLite::QueryNameIDmap(const std::string& table, name_id_map_t* name_id_map) { if (!isValid) return false; initSQLiteDB(); // empty map corresponding to the table if not empty if (!name_id_map->empty()) name_id_map->clear(); std::string sql = "SELECT * FROM " + sql_escape(table); if (db->exec(sql.c_str(), &ReadIdNameCallback, name_id_map, NULL) != SQLITE_OK ) { return false; } return true; } // general helper to build (name,id) map from database table unsigned int AccountingDBSQLite::QueryAndInsertNameID(const std::string& table, const std::string& iname, name_id_map_t* name_id_map) { // fill map with db values if (name_id_map->empty()) { if (!QueryNameIDmap(table, name_id_map)) { logger.msg(Arc::ERROR, "Failed to fetch data from %s accounting database table", table); return 0; } } // find name name_id_map_t::iterator it; it = name_id_map->find(iname); if (it != name_id_map->end()) { return it->second; } else { // if not found - create the new record in the database std::string sql = "INSERT INTO " + sql_escape(table) + " (Name) VALUES ('" + sql_escape(iname) + "')"; unsigned int newid = GeneralSQLInsert(sql); if ( newid ) { name_id_map->insert(std::pair (iname, newid)); return newid; } else { logger.msg(Arc::ERROR, "Failed to add '%s' into the accounting database %s table", iname, table); } } return 0; } unsigned int AccountingDBSQLite::getDBQueueId(const std::string& queue) { return QueryAndInsertNameID("Queues", queue, &db_queue); } unsigned int AccountingDBSQLite::getDBUserId(const std::string& userdn) { return QueryAndInsertNameID("Users", userdn, &db_users); } unsigned int AccountingDBSQLite::getDBWLCGVOId(const std::string& voname) { return QueryAndInsertNameID("WLCGVOs", voname, &db_wlcgvos); } unsigned int AccountingDBSQLite::getDBFQANId(const std::string& fqan) { return QueryAndInsertNameID("FQANs", fqan, &db_fqans); } unsigned int AccountingDBSQLite::getDBBenchmarkId(const std::string& benchmark) { return QueryAndInsertNameID("Benchmarks", benchmark, &db_benchmarks); } unsigned int AccountingDBSQLite::getDBStatusId(const std::string& status) { return QueryAndInsertNameID("Status", status, &db_status); } // endpoints static int ReadEndpointsCallback(void* arg, int colnum, char** texts, char** names) { std::map * endpoints_map = static_cast*>(arg); std::pair rec; for (int n = 0; n < colnum; ++n) { if (names[n] && texts[n]) { if (strcmp(names[n], "ID") == 0) { int id; sql_unescape(texts[n], id); rec.second = id; } else if (strcmp(names[n], "Interface") == 0) { rec.first.interface = sql_unescape(texts[n]); } else if (strcmp(names[n], "URL") == 0) { rec.first.url = sql_unescape(texts[n]); } } } endpoints_map->insert(rec); return 0; } bool AccountingDBSQLite::QueryEnpointsmap() { if (!isValid) return false; initSQLiteDB(); // empty map corresponding to the table if not empty if (!db_endpoints.empty()) db_endpoints.clear(); std::string sql = "SELECT * FROM Endpoints"; if (db->exec(sql.c_str(), &ReadEndpointsCallback, &db_endpoints, NULL) != SQLITE_OK ) { return false; } return true; } unsigned int AccountingDBSQLite::getDBEndpointId(const aar_endpoint_t& endpoint) { // fill map with db values if (db_endpoints.empty()) { if (!QueryEnpointsmap()) { logger.msg(Arc::ERROR, "Failed to fetch data from accounting database Endpoints table"); return 0; } } // find endpoint std::map ::iterator it; it = db_endpoints.find(endpoint); if (it != db_endpoints.end()) { return it->second; } else { // if not found - create the new record in the database std::string sql = "INSERT INTO Endpoints (Interface, URL) VALUES ('" + sql_escape(endpoint.interface) + "', '" + sql_escape(endpoint.url) +"')"; unsigned int newid = GeneralSQLInsert(sql); if ( newid ) { db_endpoints.insert(std::pair (endpoint, newid)); return newid; } else { logger.msg(Arc::ERROR, "Failed to add '%s' URL (interface type %s) into the accounting database Endpoints table", endpoint.url, endpoint.interface); } } return 0; } // callback to get id from database table static int ReadIdCallback(void* arg, int colnum, char** texts, char** names) { unsigned int* dbid = static_cast(arg); for (int n = 0; n < colnum; ++n) { if (names[n] && texts[n]) { int id; sql_unescape(texts[n], id); *dbid = id; } } return 0; } // AAR processing unsigned int AccountingDBSQLite::getAARDBId(const AAR& aar) { if (!isValid) return 0; initSQLiteDB(); unsigned int dbid = 0; std::string sql = "SELECT RecordID FROM AAR WHERE JobID = '" + sql_escape(aar.jobid) + "'"; if (db->exec(sql.c_str(), &ReadIdCallback, &dbid, NULL) != SQLITE_OK ) { logger.msg(Arc::ERROR, "Failed to query AAR database ID for job %s", aar.jobid); return 0; } return dbid; } unsigned int AccountingDBSQLite::getAARDBId(const std::string& jobid) { AAR aar; aar.jobid = jobid; return getAARDBId(aar); } bool AccountingDBSQLite::createAAR(AAR& aar) { if (!isValid) return false; initSQLiteDB(); // get the corresponding IDs in connected tables unsigned int endpointid = getDBEndpointId(aar.endpoint); if (!endpointid) return false; unsigned int queueid = getDBQueueId(aar.queue); if (!queueid) return false; unsigned int userid = getDBUserId(aar.userdn); if (!userid) return false; unsigned int wlcgvoid = getDBWLCGVOId(aar.wlcgvo); if (!wlcgvoid) return false; unsigned int fqanid = getDBFQANId(aar.fqan); if (!fqanid) return false; unsigned int benchmarkid = getDBBenchmarkId(aar.benchmark); if (!benchmarkid) return false; unsigned int statusid = getDBStatusId(aar.status); if (!statusid) return false; // construct insert statement std::string sql = "INSERT INTO AAR (" "JobID, LocalJobID, EndpointID, QueueID, UserID, VOID, FQANID, StatusID, ExitCode, BenchmarkID, " "SubmitTime, EndTime, NodeCount, CPUCount, UsedMemory, UsedVirtMem, UsedWalltime, " "UsedCPUUserTime, UsedCPUKernelTime, UsedScratch, StageInVolume, StageOutVolume ) " "VALUES ('" + sql_escape(aar.jobid) + "', '" + sql_escape(aar.localid) + "', " + sql_escape(endpointid) + ", " + sql_escape(queueid) + ", " + sql_escape(userid) + ", " + sql_escape(wlcgvoid) + ", " + sql_escape(fqanid) + ", " + sql_escape(statusid) + ", " + sql_escape(aar.exitcode) + ", " + sql_escape(benchmarkid) + ", " + sql_escape(aar.submittime.GetTime()) + ", " + sql_escape(aar.endtime.GetTime()) + ", " + sql_escape(aar.nodecount) + ", " + sql_escape(aar.cpucount) + ", " + sql_escape(aar.usedmemory) + ", " + sql_escape(aar.usedvirtmemory) + ", " + sql_escape(aar.usedwalltime) + ", " + sql_escape(aar.usedcpuusertime) + ", " + sql_escape(aar.usedcpukerneltime) + ", " + sql_escape(aar.usedscratch) + ", " + sql_escape(aar.stageinvolume) + ", " + sql_escape(aar.stageoutvolume) + ")"; unsigned int recordid = GeneralSQLInsert(sql); if (!recordid) { logger.msg(Arc::ERROR, "Failed to insert AAR into the database for job %s", aar.jobid); logger.msg(Arc::DEBUG, "SQL statement used: %s", sql); return false; } // insert authtoken attributes if (!writeAuthTokenAttrs(aar.authtokenattrs, recordid)) { logger.msg(Arc::ERROR, "Failed to write authtoken attributes for job %s", aar.jobid); } // record ACCEPTED state if (!writeEvents(aar.jobevents, recordid)) { logger.msg(Arc::ERROR, "Failed to write event records for job %s", aar.jobid); } return true; } bool AccountingDBSQLite::updateAAR(AAR& aar) { if (!isValid) return false; initSQLiteDB(); // get AAR ID in the database unsigned int recordid = getAARDBId(aar); if (!recordid) { logger.msg(Arc::ERROR, "Cannot to update AAR. Cannot find registered AAR for job %s in accounting database.", aar.jobid); return false; } // get the corresponding IDs in connected tables unsigned int statusid = getDBStatusId(aar.status); unsigned int benchmarkid = getDBBenchmarkId(aar.benchmark); // construct update statement // NOTE: it only make sense update the dynamic information not available on submission time std::string sql = "UPDATE AAR SET " "LocalJobID = '" + sql_escape(aar.localid) + "', " + "StatusID = " + sql_escape(statusid) + ", " + "ExitCode = " + sql_escape(aar.exitcode) + ", " + "BenchmarkID = " + sql_escape(benchmarkid) + ", " + "EndTime = " + sql_escape(aar.endtime.GetTime()) + ", " + "NodeCount = " + sql_escape(aar.nodecount) + ", " + "CPUCount = " + sql_escape(aar.cpucount) + ", " + "UsedMemory = " + sql_escape(aar.usedmemory) + ", " + "UsedVirtMem = " + sql_escape(aar.usedvirtmemory) + ", " + "UsedWalltime = " + sql_escape(aar.usedwalltime) + ", " + "UsedCPUUserTime = " + sql_escape(aar.usedcpuusertime) + ", " + "UsedCPUKernelTime = " + sql_escape(aar.usedcpukerneltime) + ", " + "UsedScratch = " + sql_escape(aar.usedscratch) + ", " + "StageInVolume = " + sql_escape(aar.stageinvolume) + ", " + "StageOutVolume = " + sql_escape(aar.stageoutvolume) + " " + "WHERE RecordId = " + sql_escape(recordid); // run update if (!GeneralSQLUpdate(sql)) { logger.msg(Arc::ERROR, "Failed to update AAR in the database for job %s", aar.jobid); logger.msg(Arc::DEBUG, "SQL statement used: %s", sql); return false; } // write RTE info if (!writeRTEs(aar.rtes, recordid)) { logger.msg(Arc::ERROR, "Failed to write RTEs information for the job %s", aar.jobid); } // write DTR info if (!writeDTRs(aar.transfers, recordid)) { logger.msg(Arc::ERROR, "Failed to write data transfers information for the job %s", aar.jobid); } // write extra information if (!writeExtraInfo(aar.extrainfo, recordid)) { logger.msg(Arc::ERROR, "Failed to write data transfers information for the job %s", aar.jobid); } // record FINISHED state if (!writeEvents(aar.jobevents, recordid)) { logger.msg(Arc::ERROR, "Failed to write event records for job %s", aar.jobid); } return true; } bool AccountingDBSQLite::writeRTEs(std::list & rtes, unsigned int recordid) { if (rtes.empty()) return true; std::string sql = "BEGIN TRANSACTION; "; std::string sql_base = "INSERT INTO RunTimeEnvironments (RecordID, RTEName) VALUES "; for (std::list::iterator it=rtes.begin(); it != rtes.end(); ++it) { sql += sql_base + "(" + sql_escape(recordid) + ", '" + sql_escape(*it) + "'); "; } sql += "COMMIT;"; if(!GeneralSQLInsert(sql)) { logger.msg(Arc::DEBUG, "SQL statement used: %s", sql); return false; } return true; } bool AccountingDBSQLite::writeAuthTokenAttrs(std::list & attrs, unsigned int recordid) { if (attrs.empty()) return true; std::string sql = "BEGIN TRANSACTION; "; std::string sql_base = "INSERT INTO AuthTokenAttributes (RecordID, AttrKey, AttrValue) VALUES "; for (std::list ::iterator it=attrs.begin(); it!=attrs.end(); ++it) { sql += sql_base + "(" + sql_escape(recordid) + ", '" + sql_escape(it->first) + "', '" + sql_escape(it->second) + "'); "; } sql += "COMMIT;"; if(!GeneralSQLInsert(sql)) { logger.msg(Arc::DEBUG, "SQL statement used: %s", sql); return false; } return true; } bool AccountingDBSQLite::writeExtraInfo(std::map & info, unsigned int recordid) { if (info.empty()) return true; std::string sql = "BEGIN TRANSACTION; "; std::string sql_base = "INSERT INTO JobExtraInfo (RecordID, InfoKey, InfoValue) VALUES "; for (std::map::iterator it=info.begin(); it!=info.end(); ++it) { sql += sql_base + "(" + sql_escape(recordid) + ", '" + sql_escape(it->first) + "', '" + sql_escape(it->second) + "'); "; } sql += "COMMIT;"; if(!GeneralSQLInsert(sql)) { logger.msg(Arc::DEBUG, "SQL statement used: %s", sql); return false; } return true; } bool AccountingDBSQLite::writeDTRs(std::list & dtrs, unsigned int recordid) { if (dtrs.empty()) return true; std::string sql = "BEGIN TRANSACTION; "; std::string sql_base = "INSERT INTO DataTransfers " "(RecordID, URL, FileSize, TransferStart, TransferEnd, TransferType) VALUES "; for (std::list::iterator it=dtrs.begin(); it != dtrs.end(); ++it) { sql += sql_base + "( " + sql_escape(recordid) + ", '" + sql_escape(it->url) + "', " + sql_escape(it->size) + ", " + sql_escape(it->transferstart.GetTime()) + ", " + sql_escape(it->transferend.GetTime()) + ", " + sql_escape(static_cast(it->type)) + "); "; } sql += "COMMIT;"; if(!GeneralSQLInsert(sql)) { logger.msg(Arc::DEBUG, "SQL statement used: %s", sql); return false; } return true; } bool AccountingDBSQLite::writeEvents(std::list & events, unsigned int recordid) { if (events.empty()) return true; std::string sql = "BEGIN TRANSACTION; "; std::string sql_base = "INSERT INTO JobEvents (RecordID, EventKey, EventTime) VALUES "; for (std::list::iterator it=events.begin(); it != events.end(); ++it) { sql += sql_base + "( " + sql_escape(recordid) + ", '" + sql_escape(it->first) + "', '" + sql_escape(it->second) + "'); "; } sql += "COMMIT;"; if(!GeneralSQLInsert(sql)) { logger.msg(Arc::DEBUG, "SQL statement used: %s", sql); return false; } return true; } bool AccountingDBSQLite::addJobEvent(aar_jobevent_t& event, const std::string& jobid) { unsigned int recordid = getAARDBId(jobid); if (!recordid) { logger.msg(Arc::ERROR, "Unable to add event: cannot find AAR for job %s in accounting database.", jobid); return false; } std::string sql = "INSERT INTO JobEvents (RecordID, EventKey, EventTime) VALUES (" + sql_escape(recordid) + ", '" + sql_escape(event.first) + "', '" + sql_escape(event.second) + "')"; if(!GeneralSQLInsert(sql)) { logger.msg(Arc::DEBUG, "SQL statement used: %s", sql); return false; } return true; } } nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/PaxHeaders/arex_accounting_db_schema_0000644000000000000000000000013215067751327030710 xustar0030 mtime=1759498967.752828852 30 atime=1759498967.862493605 30 ctime=1759499029.479549293 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/accounting/arex_accounting_db_schema_v2.sql0000644000175000002070000001520715067751327033665 0ustar00mockbuildmock00000000000000/* * Main AAR attributes and metrics to search and gather stats for */ CREATE TABLE IF NOT EXISTS AAR ( RecordID INTEGER PRIMARY KEY AUTOINCREMENT, /* Unique job ids */ JobID TEXT NOT NULL UNIQUE, LocalJobID TEXT, /* Submission data */ EndpointID INTEGER NOT NULL, QueueID INTEGER NOT NULL, UserID INTEGER NOT NULL, VOID INTEGER NOT NULL, FQANID INTEGER NOT NULL, /* Completion data */ StatusID INTEGER NOT NULL, ExitCode INTEGER NOT NULL, BenchmarkID INTEGER NOT NULL, /* Main accounting times to search jobs (as unix timestamp) */ SubmitTime INTEGER NOT NULL, EndTime INTEGER NOT NULL, /* Used resources */ NodeCount INTEGER NOT NULL, CPUCount INTEGER NOT NULL, UsedMemory INTEGER NOT NULL, UsedVirtMem INTEGER NOT NULL, UsedWalltime INTEGER NOT NULL, UsedCPUUserTime INTEGER NOT NULL, UsedCPUKernelTime INTEGER NOT NULL, UsedScratch INTEGER NOT NULL, StageInVolume INTEGER NOT NULL, StageOutVolume INTEGER NOT NULL, /* Foreign keys constraints */ FOREIGN KEY(EndpointID) REFERENCES Endpoints(ID), FOREIGN KEY(QueueID) REFERENCES Queues(ID), FOREIGN KEY(UserID) REFERENCES Users(ID), FOREIGN KEY(VOID) REFERENCES WLCGVOs(ID), FOREIGN KEY(FQANID) REFERENCES FQANs(ID), FOREIGN KEY(StatusID) REFERENCES Status(ID), FOREIGN KEY(BenchmarkID) REFERENCES Benchmarks(ID) ); CREATE UNIQUE INDEX IF NOT EXISTS AAR_JobID_IDX ON AAR(JobID); CREATE INDEX IF NOT EXISTS AAR_LocalJobID_IDX ON AAR(LocalJobID); CREATE INDEX IF NOT EXISTS AAR_EndpointID_IDX ON AAR(EndpointID); CREATE INDEX IF NOT EXISTS AAR_QueueID_IDX ON AAR(QueueID); CREATE INDEX IF NOT EXISTS AAR_UserID_IDX ON AAR(UserID); CREATE INDEX IF NOT EXISTS AAR_VOID_IDX ON AAR(VOID); CREATE INDEX IF NOT EXISTS AAR_FQAN_IDX ON AAR(FQANID); CREATE INDEX IF NOT EXISTS AAR_StatusID_IDX ON AAR(StatusID); CREATE INDEX IF NOT EXISTS AAR_SubmitTime_IDX ON AAR(SubmitTime); CREATE INDEX IF NOT EXISTS AAR_EndTime_IDX ON AAR(EndTime); CREATE INDEX IF NOT EXISTS AAR_Benchmark_IDX ON AAR(BenchmarkID); /* optimize publishing queries */ CREATE INDEX IF NOT EXISTS AAR_StatusID_EndTime_IDX ON AAR(StatusID, EndTime); /* * Extra tables for AAR normalization */ /* Submission endpoints (limited enum of types and URLs) */ CREATE TABLE IF NOT EXISTS Endpoints ( ID INTEGER PRIMARY KEY AUTOINCREMENT, Interface TEXT NOT NULL, URL TEXT NOT NULL, UNIQUE(Interface, URL) ); CREATE INDEX IF NOT EXISTS Endpoints_Interface_IDX ON Endpoints(Interface); /* Queues (limited enum on particular resource) */ CREATE TABLE IF NOT EXISTS Queues ( ID INTEGER PRIMARY KEY AUTOINCREMENT, Name TEXT NOT NULL UNIQUE ); CREATE UNIQUE INDEX IF NOT EXISTS Queues_Name_IDX ON Queues(Name); /* Users */ CREATE TABLE IF NOT EXISTS Users ( ID INTEGER PRIMARY KEY AUTOINCREMENT, Name TEXT NOT NULL UNIQUE ); CREATE UNIQUE INDEX IF NOT EXISTS Users_Name_IDX ON Users(Name); /* WLCG VOs */ CREATE TABLE IF NOT EXISTS WLCGVOs ( ID INTEGER PRIMARY KEY AUTOINCREMENT, Name TEXT NOT NULL UNIQUE ); CREATE UNIQUE INDEX IF NOT EXISTS WLCGVOs_Name_IDX ON WLCGVOs(Name); INSERT INTO WLCGVOs(Name) VALUES(''); /* FQANs */ CREATE TABLE IF NOT EXISTS FQANs ( ID INTEGER PRIMARY KEY AUTOINCREMENT, Name TEXT NOT NULL UNIQUE ); CREATE UNIQUE INDEX IF NOT EXISTS FQANs_Name_IDX ON FQANs(Name); INSERT INTO FQANs(Name) VALUES(''); /* Benchmark */ CREATE TABLE IF NOT EXISTS Benchmarks ( ID INTEGER PRIMARY KEY AUTOINCREMENT, Name TEXT NOT NULL UNIQUE ); CREATE UNIQUE INDEX IF NOT EXISTS Benchmarks_Name_IDX ON Benchmarks(Name); INSERT INTO Benchmarks(Name) VALUES(''); /* Status */ CREATE TABLE IF NOT EXISTS Status ( ID INTEGER PRIMARY KEY AUTOINCREMENT, Name TEXT NOT NULL UNIQUE ); CREATE UNIQUE INDEX IF NOT EXISTS Status_Name_IDX ON Status(Name); /* * Extra data recorded for the job in dedicated tables */ /* User token attributes */ CREATE TABLE IF NOT EXISTS AuthTokenAttributes ( RecordID INTEGER NOT NULL, AttrKey TEXT NOT NULL, AttrValue TEXT, FOREIGN KEY(RecordID) REFERENCES AAR(RecordID) ON DELETE CASCADE ); CREATE INDEX IF NOT EXISTS AuthTokenAttributes_RecordID_IDX ON AuthTokenAttributes(RecordID); CREATE INDEX IF NOT EXISTS AuthTokenAttributes_RecordID_AttrKey_IDX ON AuthTokenAttributes(RecordID, AttrKey); /* Event timestamps for the job */ CREATE TABLE IF NOT EXISTS JobEvents ( RecordID INTEGER NOT NULL, EventKey TEXT NOT NULL, -- including: submit, stageinstart, stageinstop, lrmssubmit, lrmsstart, lrmsend, stageoutstart, stageoutend, finish EventTime TEXT NOT NULL, FOREIGN KEY(RecordID) REFERENCES AAR(RecordID) ON DELETE CASCADE ); CREATE INDEX IF NOT EXISTS JobEvents_RecordID_IDX ON JobEvents(RecordID); /* RTEs */ CREATE TABLE IF NOT EXISTS RunTimeEnvironments ( RecordID INTEGER NOT NULL, RTEName TEXT NOT NULL, -- TODO: should we record arguments, versions, default/enabled? FOREIGN KEY(RecordID) REFERENCES AAR(RecordID) ON DELETE CASCADE ); CREATE INDEX IF NOT EXISTS RunTimeEnvironments_RecordID_IDX ON RunTimeEnvironments(RecordID); CREATE INDEX IF NOT EXISTS RunTimeEnvironments_RecordID_RTEName_IDX ON RunTimeEnvironments(RecordID, RTEName); /* Data transfers info */ CREATE TABLE IF NOT EXISTS DataTransfers ( RecordID INTEGER NOT NULL, URL TEXT NOT NULL, FileSize INTEGER NOT NULL, TransferStart INTEGER NOT NULL, TransferEnd INTEGER NOT NULL, TransferType INTEGER NOT NULL, -- download, download from cahce, upload FOREIGN KEY(RecordID) REFERENCES AAR(RecordID) ON DELETE CASCADE ); CREATE INDEX IF NOT EXISTS DataTransfers_RecordID_IDX ON DataTransfers(RecordID); CREATE INDEX IF NOT EXISTS DataTransfers_RecordID_URL_IDX ON DataTransfers(RecordID, URL); /* Extra arbitrary text attributes affiliated with AAR */ CREATE TABLE IF NOT EXISTS JobExtraInfo ( RecordID INTEGER NOT NULL, InfoKey TEXT NOT NULL, -- including: jobname, lrms, nodename, clienthost, localuser, projectname, systemsoftware, wninstance, benchmark InfoValue TEXT, FOREIGN KEY(RecordID) REFERENCES AAR(RecordID) ON DELETE CASCADE ); CREATE INDEX IF NOT EXISTS JobExtraInfo_RecordID_IDX ON JobExtraInfo(RecordID); CREATE INDEX IF NOT EXISTS JobExtraInfo_RecordID_InfoKey_IDX ON JobExtraInfo(RecordID, InfoKey); /* * Database common config parameters */ CREATE TABLE IF NOT EXISTS DBConfig ( KeyName TEXT PRIMARY KEY, KeyValue TEXT NOT NULL ); INSERT INTO DBConfig VALUES ('DBSCHEMA_VERSION', '2'); nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/PaxHeaders/run0000644000000000000000000000013015067751425022062 xustar0030 mtime=1759499029.561431124 28 atime=1759499034.7655102 30 ctime=1759499029.561431124 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/run/0000755000175000002070000000000015067751425024043 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/run/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327024176 xustar0030 mtime=1759498967.757668791 30 atime=1759498967.865493651 30 ctime=1759499029.554769698 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/run/Makefile.am0000644000175000002070000000037315067751327026103 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = librun.la librun_la_SOURCES = RunParallel.cpp RunParallel.h \ RunRedirected.cpp RunRedirected.h librun_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) librun_la_LIBADD = $(DLOPEN_LIBS) nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/run/PaxHeaders/Makefile.in0000644000000000000000000000013215067751356024211 xustar0030 mtime=1759498990.123231419 30 atime=1759499018.036255998 30 ctime=1759499029.556035206 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/run/Makefile.in0000644000175000002070000006704115067751356026123 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/grid-manager/run ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) am__DEPENDENCIES_1 = librun_la_DEPENDENCIES = $(am__DEPENDENCIES_1) am_librun_la_OBJECTS = librun_la-RunParallel.lo \ librun_la-RunRedirected.lo librun_la_OBJECTS = $(am_librun_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = librun_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(librun_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = ./$(DEPDIR)/librun_la-RunParallel.Plo \ ./$(DEPDIR)/librun_la-RunRedirected.Plo am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(librun_la_SOURCES) DIST_SOURCES = $(librun_la_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ noinst_LTLIBRARIES = librun.la librun_la_SOURCES = RunParallel.cpp RunParallel.h \ RunRedirected.cpp RunRedirected.h librun_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) librun_la_LIBADD = $(DLOPEN_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/run/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/run/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } librun.la: $(librun_la_OBJECTS) $(librun_la_DEPENDENCIES) $(EXTRA_librun_la_DEPENDENCIES) $(AM_V_CXXLD)$(librun_la_LINK) $(librun_la_OBJECTS) $(librun_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/librun_la-RunParallel.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/librun_la-RunRedirected.Plo@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< librun_la-RunParallel.lo: RunParallel.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librun_la_CXXFLAGS) $(CXXFLAGS) -MT librun_la-RunParallel.lo -MD -MP -MF $(DEPDIR)/librun_la-RunParallel.Tpo -c -o librun_la-RunParallel.lo `test -f 'RunParallel.cpp' || echo '$(srcdir)/'`RunParallel.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/librun_la-RunParallel.Tpo $(DEPDIR)/librun_la-RunParallel.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='RunParallel.cpp' object='librun_la-RunParallel.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librun_la_CXXFLAGS) $(CXXFLAGS) -c -o librun_la-RunParallel.lo `test -f 'RunParallel.cpp' || echo '$(srcdir)/'`RunParallel.cpp librun_la-RunRedirected.lo: RunRedirected.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librun_la_CXXFLAGS) $(CXXFLAGS) -MT librun_la-RunRedirected.lo -MD -MP -MF $(DEPDIR)/librun_la-RunRedirected.Tpo -c -o librun_la-RunRedirected.lo `test -f 'RunRedirected.cpp' || echo '$(srcdir)/'`RunRedirected.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/librun_la-RunRedirected.Tpo $(DEPDIR)/librun_la-RunRedirected.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='RunRedirected.cpp' object='librun_la-RunRedirected.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librun_la_CXXFLAGS) $(CXXFLAGS) -c -o librun_la-RunRedirected.lo `test -f 'RunRedirected.cpp' || echo '$(srcdir)/'`RunRedirected.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -f ./$(DEPDIR)/librun_la-RunParallel.Plo -rm -f ./$(DEPDIR)/librun_la-RunRedirected.Plo -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/librun_la-RunParallel.Plo -rm -f ./$(DEPDIR)/librun_la-RunRedirected.Plo -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ clean-generic clean-libtool clean-noinstLTLIBRARIES \ cscopelist-am ctags ctags-am distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/run/PaxHeaders/RunRedirected.h0000644000000000000000000000013215067751327025052 xustar0030 mtime=1759498967.757868355 30 atime=1759498967.865493651 30 ctime=1759499029.562519226 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/run/RunRedirected.h0000644000175000002070000000143615067751327026760 0ustar00mockbuildmock00000000000000#ifndef GRID_MANAGER_RUN_REDIRECTED_H #define GRID_MANAGER_RUN_REDIRECTED_H #include #include namespace ARex { /// Run child process with stdin, stdout and stderr redirected to specified handles class RunRedirected { private: RunRedirected(int in,int out,int err):stdin_(in),stdout_(out),stderr_(err) { }; ~RunRedirected(void) { }; int stdin_; int stdout_; int stderr_; static void initializer(void* arg); public: operator bool(void) { return true; }; bool operator!(void) { return false; }; static int run(const Arc::User& user,const char* cmdname,int in,int out,int err,char *const args[],int timeout); static int run(const Arc::User& user,const char* cmdname,int in,int out,int err,const char* cmd,int timeoutd); }; } // namespace ARex #endif nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/run/PaxHeaders/RunParallel.h0000644000000000000000000000013215067751327024534 xustar0030 mtime=1759498967.757868355 30 atime=1759498967.865493651 30 ctime=1759499029.559890281 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/run/RunParallel.h0000644000175000002070000000220015067751327026430 0ustar00mockbuildmock00000000000000#ifndef GRID_MANAGER_RUN_PARALLEL_H #define GRID_MANAGER_RUN_PARALLEL_H #include #include "../jobs/JobsList.h" namespace ARex { /// Run child process in parallel with stderr redirected to job.jobid.errors class RunParallel { private: RunParallel() { }; ~RunParallel(void) { }; static void initializer(void* arg); operator bool(void) { return true; }; bool operator!(void) { return false; }; static bool run(const GMConfig& config, const Arc::User& user, const char* procid, const char* errlog, std::string* errstr, const std::string& args, Arc::Run**, const char* job_proxy, bool su = true, void (*kicker_func)(void*) = NULL, void* kicker_arg = NULL); public: static bool run(const GMConfig& config, const GMJob& job, JobsList& list, std::string* errstr, const std::string& args, Arc::Run**, bool su = true); static bool run(const GMConfig& config, const GMJob& job, std::string* errstr, const std::string& args, Arc::Run**, bool su = true); }; } // namespace ARex #endif nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/run/PaxHeaders/RunRedirected.cpp0000644000000000000000000000013215067751327025405 xustar0030 mtime=1759498967.757868355 30 atime=1759498967.865493651 30 ctime=1759499029.561171082 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/run/RunRedirected.cpp0000644000175000002070000000527715067751327027322 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "RunRedirected.h" namespace ARex { static Arc::Logger& logger = Arc::Logger::getRootLogger(); int RunRedirected::run(const Arc::User& user,const char* cmdname,int in,int out,int err,char *const args[],int timeout) { std::list args_; for(int n = 0;args[n];++n) args_.push_back(std::string(args[n])); Arc::Run re(args_); if(!re) { logger.msg(Arc::ERROR,"%s: Failure creating slot for child process",cmdname?cmdname:""); return -1; }; RunRedirected* rr = new RunRedirected(in,out,err); if((!rr) || (!(*rr))) { if(rr) delete rr; logger.msg(Arc::ERROR,"%s: Failure creating data storage for child process",cmdname?cmdname:""); return -1; }; re.AssignInitializer(&initializer,rr,false); re.AssignUserId(user.get_uid()); re.AssignGroupId(user.get_gid()); re.KeepStdin(true); re.KeepStdout(true); re.KeepStderr(true); if(!re.Start()) { delete rr; logger.msg(Arc::ERROR,"%s: Failure starting child process",cmdname?cmdname:""); return -1; }; delete rr; if(!re.Wait(timeout)) { logger.msg(Arc::ERROR,"%s: Failure waiting for child process to finish",cmdname?cmdname:""); re.Kill(5); return -1; }; return re.Result(); } int RunRedirected::run(const Arc::User& user,const char* cmdname,int in,int out,int err,const char* cmd,int timeout) { Arc::Run re(cmd); if(!re) { logger.msg(Arc::ERROR,"%s: Failure creating slot for child process",cmdname?cmdname:""); return -1; }; RunRedirected* rr = new RunRedirected(in,out,err); if((!rr) || (!(*rr))) { if(rr) delete rr; logger.msg(Arc::ERROR,"%s: Failure creating data storage for child process",cmdname?cmdname:""); return -1; }; re.AssignInitializer(&initializer,rr,false); re.AssignUserId(user.get_uid()); re.AssignGroupId(user.get_gid()); re.KeepStdin(true); re.KeepStdout(true); re.KeepStderr(true); if(!re.Start()) { delete rr; logger.msg(Arc::ERROR,"%s: Failure starting child process",cmdname?cmdname:""); return -1; }; delete rr; if(!re.Wait(timeout)) { logger.msg(Arc::ERROR,"%s: Failure waiting for child process to finish",cmdname?cmdname:""); re.Kill(5); return -1; }; return re.Result(); } void RunRedirected::initializer(void* arg) { // There must be only async-safe calls here! // child RunRedirected* it = (RunRedirected*)arg; // set up stdin,stdout and stderr if(it->stdin_ != -1) dup2(it->stdin_,0); if(it->stdout_ != -1) dup2(it->stdout_,1); if(it->stderr_ != -1) dup2(it->stderr_,2); } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/run/PaxHeaders/RunParallel.cpp0000644000000000000000000000013215067751327025067 xustar0030 mtime=1759498967.757868355 30 atime=1759498967.865493651 30 ctime=1759499029.558620046 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/run/RunParallel.cpp0000644000175000002070000001133615067751327026775 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "../conf/GMConfig.h" #include "../files/ControlFileHandling.h" #include "RunParallel.h" namespace ARex { static Arc::Logger& logger = Arc::Logger::getRootLogger(); class JobRefInList { private: JobId id; JobsList& list; public: JobRefInList(const GMJob& job, JobsList& list): id(job.get_id()), list(list) {}; static void kicker(void* arg); }; void JobRefInList::kicker(void* arg) { JobRefInList* ref = reinterpret_cast(arg); if(ref) { logger.msg(Arc::DEBUG,"%s: Job's helper exited",ref->id); ref->list.RequestAttention(ref->id); delete ref; }; } bool RunParallel::run(const GMConfig& config,const GMJob& job, JobsList& list, std::string* errstr, const std::string& args,Arc::Run** ere,bool su) { std::string errlog = job_control_path(config.ControlDir(),job.get_id(),sfx_errors); std::string proxy = job_control_path(config.ControlDir(),job.get_id(),sfx_proxy); JobRefInList* ref = new JobRefInList(job, list); bool result = run(config, job.get_user(), job.get_id().c_str(), errlog.c_str(), errstr, args, ere, proxy.c_str(), su, &JobRefInList::kicker, ref); if(!result) delete ref; return result; } bool RunParallel::run(const GMConfig& config,const GMJob& job, std::string* errstr, const std::string& args,Arc::Run** ere,bool su) { std::string errlog = job_control_path(config.ControlDir(),job.get_id(),sfx_errors); std::string proxy = job_control_path(config.ControlDir(),job.get_id(),sfx_proxy); bool result = run(config, job.get_user(), job.get_id().c_str(), errlog.c_str(), errstr, args, ere, proxy.c_str(), su); return result; } /* fork & execute child process with stderr redirected to job.ID.errors, stdin and stdout to /dev/null */ bool RunParallel::run(const GMConfig& config, const Arc::User& user, const char* procid, const char* errlog, std::string* errstr, const std::string& args, Arc::Run** ere, const char* jobproxy, bool su, void (*kicker_func)(void*), void* kicker_arg) { *ere=NULL; Arc::Run* re = new Arc::Run(args); if((!re) || (!(*re))) { if(re) delete re; logger.msg(Arc::ERROR,"%s: Failure creating slot for child process",procid?procid:""); return false; }; if(kicker_func) re->AssignKicker(kicker_func,kicker_arg); re->AssignInitializer(&initializer,(void*)errlog,false); if(su) { // change user re->AssignUserId(user.get_uid()); re->AssignGroupId(user.get_gid()); }; // setting environment - TODO - better environment if(jobproxy && jobproxy[0]) { re->RemoveEnvironment("X509_RUN_AS_SERVER"); re->AddEnvironment("X509_USER_PROXY",jobproxy); // for Globus 2.2 set fake cert and key, or else it takes // those from host in case of root user. // 2.4 needs names and 2.2 will work too. // 3.x requires fake ones again. #if GLOBUS_IO_VERSION>=5 re->AddEnvironment("X509_USER_KEY",(std::string("fake"))); re->AddEnvironment("X509_USER_CERT",(std::string("fake"))); #else re->AddEnvironment("X509_USER_KEY",jobproxy); re->AddEnvironment("X509_USER_CERT",jobproxy); #endif std::string cert_dir = config.CertDir(); if(!cert_dir.empty()) { re->AddEnvironment("X509_CERT_DIR",cert_dir); } else { re->RemoveEnvironment("X509_CERT_DIR"); }; std::string voms_dir = config.VomsDir(); if(!voms_dir.empty()) { re->AddEnvironment("X509_VOMS_DIR",voms_dir); } else { re->RemoveEnvironment("X509_VOMS_DIR"); }; }; re->KeepStdin(true); if(errstr) { re->KeepStdout(false); re->AssignStdout(*errstr, 1024); // Expecting short failure reason here. Rest goes into .errors file. } else { re->KeepStdout(true); }; re->KeepStderr(true); if(!re->Start()) { delete re; logger.msg(Arc::ERROR,"%s: Failure starting child process",procid?procid:""); return false; }; *ere=re; return true; } void RunParallel::initializer(void* arg) { // child char const * errlog = (char const *)arg; int h; // set up stdin,stdout and stderr h=::open("/dev/null",O_RDONLY); if(h != 0) { if(dup2(h,0) != 0) { _exit(1); }; close(h); }; h=::open("/dev/null",O_WRONLY); if(h != 1) { if(dup2(h,1) != 1) { _exit(1); }; close(h); }; if(errlog) { h=::open(errlog,O_WRONLY | O_CREAT | O_APPEND,S_IRUSR | S_IWUSR); if(h==-1) { h=::open("/dev/null",O_WRONLY); }; } else { h=::open("/dev/null",O_WRONLY); }; if(h != 2) { if(dup2(h,2) != 2) { _exit(1); }; close(h); }; } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/run/PaxHeaders/README0000644000000000000000000000013215067751327023022 xustar0030 mtime=1759498967.757868355 30 atime=1759498967.865493651 30 ctime=1759499029.557333077 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/run/README0000644000175000002070000000004515067751327024723 0ustar00mockbuildmock00000000000000Classes to run external executables. nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/PaxHeaders/files0000644000000000000000000000013015067751425022360 xustar0030 mtime=1759499029.763833626 28 atime=1759499034.7655102 30 ctime=1759499029.763833626 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/files/0000755000175000002070000000000015067751425024341 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/files/PaxHeaders/Makefile.am0000644000000000000000000000013115067751327024473 xustar0030 mtime=1759498967.754640094 29 atime=1759498967.86349362 30 ctime=1759499029.758543868 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/files/Makefile.am0000644000175000002070000000037615067751327026404 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libfiles.la libfiles_la_SOURCES = \ ControlFileHandling.cpp ControlFileContent.cpp \ ControlFileHandling.h ControlFileContent.h libfiles_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/files/PaxHeaders/Makefile.in0000644000000000000000000000013215067751355024506 xustar0030 mtime=1759498989.860343055 30 atime=1759499017.939254524 30 ctime=1759499029.759602781 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/files/Makefile.in0000644000175000002070000006751215067751355026423 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/grid-manager/files ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libfiles_la_LIBADD = am_libfiles_la_OBJECTS = libfiles_la-ControlFileHandling.lo \ libfiles_la-ControlFileContent.lo libfiles_la_OBJECTS = $(am_libfiles_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = libfiles_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(libfiles_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = ./$(DEPDIR)/libfiles_la-ControlFileContent.Plo \ ./$(DEPDIR)/libfiles_la-ControlFileHandling.Plo am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(libfiles_la_SOURCES) DIST_SOURCES = $(libfiles_la_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ noinst_LTLIBRARIES = libfiles.la libfiles_la_SOURCES = \ ControlFileHandling.cpp ControlFileContent.cpp \ ControlFileHandling.h ControlFileContent.h libfiles_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/files/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/files/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libfiles.la: $(libfiles_la_OBJECTS) $(libfiles_la_DEPENDENCIES) $(EXTRA_libfiles_la_DEPENDENCIES) $(AM_V_CXXLD)$(libfiles_la_LINK) $(libfiles_la_OBJECTS) $(libfiles_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libfiles_la-ControlFileContent.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libfiles_la-ControlFileHandling.Plo@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< libfiles_la-ControlFileHandling.lo: ControlFileHandling.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libfiles_la_CXXFLAGS) $(CXXFLAGS) -MT libfiles_la-ControlFileHandling.lo -MD -MP -MF $(DEPDIR)/libfiles_la-ControlFileHandling.Tpo -c -o libfiles_la-ControlFileHandling.lo `test -f 'ControlFileHandling.cpp' || echo '$(srcdir)/'`ControlFileHandling.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libfiles_la-ControlFileHandling.Tpo $(DEPDIR)/libfiles_la-ControlFileHandling.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='ControlFileHandling.cpp' object='libfiles_la-ControlFileHandling.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libfiles_la_CXXFLAGS) $(CXXFLAGS) -c -o libfiles_la-ControlFileHandling.lo `test -f 'ControlFileHandling.cpp' || echo '$(srcdir)/'`ControlFileHandling.cpp libfiles_la-ControlFileContent.lo: ControlFileContent.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libfiles_la_CXXFLAGS) $(CXXFLAGS) -MT libfiles_la-ControlFileContent.lo -MD -MP -MF $(DEPDIR)/libfiles_la-ControlFileContent.Tpo -c -o libfiles_la-ControlFileContent.lo `test -f 'ControlFileContent.cpp' || echo '$(srcdir)/'`ControlFileContent.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libfiles_la-ControlFileContent.Tpo $(DEPDIR)/libfiles_la-ControlFileContent.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='ControlFileContent.cpp' object='libfiles_la-ControlFileContent.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libfiles_la_CXXFLAGS) $(CXXFLAGS) -c -o libfiles_la-ControlFileContent.lo `test -f 'ControlFileContent.cpp' || echo '$(srcdir)/'`ControlFileContent.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -f ./$(DEPDIR)/libfiles_la-ControlFileContent.Plo -rm -f ./$(DEPDIR)/libfiles_la-ControlFileHandling.Plo -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libfiles_la-ControlFileContent.Plo -rm -f ./$(DEPDIR)/libfiles_la-ControlFileHandling.Plo -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ clean-generic clean-libtool clean-noinstLTLIBRARIES \ cscopelist-am ctags ctags-am distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/files/PaxHeaders/ControlFileContent.cpp0000644000000000000000000000013115067751327026716 xustar0030 mtime=1759498967.753619622 29 atime=1759498967.86349362 30 ctime=1759499029.762759386 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/files/ControlFileContent.cpp0000644000175000002070000006516315067751327030634 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include "ControlFileContent.h" namespace ARex { static std::mutex local_lock; static Arc::Logger& logger = Arc::Logger::getRootLogger(); class KeyValueFile { public: enum OpenMode { Fetch, Create }; KeyValueFile(std::string const& fname, OpenMode mode); ~KeyValueFile(void); operator bool(void) { return handle_ != -1; }; bool operator!(void) { return handle_ == -1; }; bool Write(std::string const& name, std::string const& value); bool Read(std::string& name, std::string& value); private: int handle_; char* read_buf_; int read_buf_pos_; int read_buf_avail_; static int const read_buf_size_ = 256; // normally should fit full line static int const data_max_ = 1024*1024; // sanity protection }; KeyValueFile::KeyValueFile(std::string const& fname, OpenMode mode): handle_(-1),read_buf_(NULL),read_buf_pos_(0),read_buf_avail_(0) { if(mode == Create) { handle_ = ::open(fname.c_str(),O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR); if(handle_==-1) return; struct flock lock; lock.l_type=F_WRLCK; lock.l_whence=SEEK_SET; lock.l_start=0; lock.l_len=0; for(;;) { if(::fcntl(handle_,F_SETLKW,&lock) != -1) break; if(errno == EINTR) continue; ::close(handle_); handle_ = -1; return; }; if((::ftruncate(handle_,0) != 0) || (::lseek(handle_,0,SEEK_SET) != 0)) { close(handle_); handle_ = -1; return; }; } else { handle_ = ::open(fname.c_str(),O_RDONLY); if(handle_ == -1) return; struct flock lock; lock.l_type=F_RDLCK; lock.l_whence=SEEK_SET; lock.l_start=0; lock.l_len=0; for(;;) { if(::fcntl(handle_,F_SETLKW,&lock) != -1) break; // success if(errno == EINTR) continue; // retry close(handle_); handle_ = -1; // failure return; }; read_buf_ = new char[read_buf_size_]; if(!read_buf_) { close(handle_); handle_ = -1; return; }; }; } KeyValueFile::~KeyValueFile(void) { if(handle_ != -1) ::close(handle_); if(read_buf_) delete[] read_buf_; } static inline bool write_str(int f,const char* buf, std::string::size_type len) { for(;len > 0;) { ssize_t l = write(f,buf,len); if(l < 0) { if(errno != EINTR) return false; } else { len -= l; buf += l; }; }; return true; } bool KeyValueFile::Write(std::string const& name, std::string const& value) { if(handle_ == -1) return false; if(read_buf_) return false; if(name.empty()) return false; if(name.length() > data_max_) return false; if(value.length() > data_max_) return false; if(!write_str(handle_, name.c_str(), name.length())) return false; if(!write_str(handle_, "=", 1)) return false; if(!write_str(handle_, value.c_str(), value.length())) return false; if(!write_str(handle_, "\n", 1)) return false; return true; } bool KeyValueFile::Read(std::string& name, std::string& value) { if(handle_ == -1) return false; if(!read_buf_) return false; name.clear(); value.clear(); char c; bool key_done = false; for(;;) { if(read_buf_pos_ >= read_buf_avail_) { read_buf_pos_ = 0; read_buf_avail_ = 0; ssize_t l = ::read(handle_, read_buf_, read_buf_size_); if(l < 0) { if(errno == EINTR) continue; return false; }; if(l == 0) break; // EOF - not error read_buf_avail_ = l; }; c = read_buf_[read_buf_pos_++]; if(c == '\n') break; // EOL if(!key_done) { if(c == '=') { key_done = true; } else { name += c; if(name.length() > data_max_) return false; }; } else { value += c; if(value.length() > data_max_) return false; }; }; return true; } std::ostream &operator<< (std::ostream &o,const FileData &fd) { // TODO: switch to HEX encoding and drop dependency on ConfigIni in major release std::string escaped_pfn(Arc::escape_chars(fd.pfn, " \\\r\n", '\\', false)); if(!escaped_pfn.empty()) { o.write(escaped_pfn.c_str(), escaped_pfn.size()); std::string escaped_lfn(Arc::escape_chars(fd.lfn, " \\\r\n", '\\', false)); if(!escaped_lfn.empty()) { o.put(' '); o.write(escaped_lfn.c_str(), escaped_lfn.size()); std::string escaped_cred(Arc::escape_chars(fd.cred, " \\\r\n", '\\', false)); if(!escaped_cred.empty()) { o.put(' '); o.write(escaped_cred.c_str(), escaped_cred.size()); std::string escaped_cred_type(Arc::escape_chars(fd.cred_type, " \\\r\n", '\\', false)); if(!escaped_cred_type.empty()) { o.put(' '); o.write(escaped_cred_type.c_str(), escaped_cred_type.size()); }; }; }; }; return o; } std::istream &operator>> (std::istream &i,FileData &fd) { std::string buf; std::getline(i,buf); Arc::trim(buf," \t\r\n"); fd.pfn.resize(0); fd.lfn.resize(0); fd.cred.resize(0); fd.cred_type.resize(0); fd.pfn = Arc::unescape_chars(Arc::extract_escaped_token(buf, ' ', '\\'), '\\'); fd.lfn = Arc::unescape_chars(Arc::extract_escaped_token(buf, ' ', '\\'), '\\'); fd.cred = Arc::unescape_chars(Arc::extract_escaped_token(buf, ' ', '\\'), '\\'); fd.cred_type = Arc::unescape_chars(Arc::extract_escaped_token(buf, ' ', '\\'), '\\'); if((fd.pfn.length() == 0) && (fd.lfn.length() == 0)) return i; /* empty st */ if(!Arc::CanonicalDir(fd.pfn,true,true)) { logger.msg(Arc::ERROR,"Wrong directory in %s",buf); fd.pfn.resize(0); fd.lfn.resize(0); }; return i; } FileData::FileData(void) { ifsuccess = true; ifcancel = false; iffailure = false; } FileData::FileData(const std::string& pfn_s,const std::string& lfn_s) { ifsuccess = true; ifcancel = false; iffailure = false; if(!pfn_s.empty()) { pfn=pfn_s; } else { pfn.resize(0); }; if(!lfn_s.empty()) { lfn=lfn_s; } else { lfn.resize(0); }; } //FileData& FileData::operator= (const char *str) { // pfn.resize(0); lfn.resize(0); // int n=input_escaped_string(str,pfn); // input_escaped_string(str+n,lfn); // return *this; //} bool FileData::operator== (const FileData& data) { // pfn may contain leading slash. It must be striped // before comparison. const char* pfn_ = pfn.c_str(); if(pfn_[0] == '/') ++pfn_; const char* dpfn_ = data.pfn.c_str(); if(dpfn_[0] == '/') ++dpfn_; return (strcmp(pfn_,dpfn_) == 0); // return (pfn == data.pfn); } bool FileData::operator== (const char *name) { if(name == NULL) return false; if(name[0] == '/') ++name; const char* pfn_ = pfn.c_str(); if(pfn_[0] == '/') ++pfn_; return (strcmp(pfn_,name) == 0); } bool FileData::has_lfn(void) { return (lfn.find(':') != std::string::npos); } static char StateToShortcut(const std::string& state) { if(state == "ACCEPTED") return 'a'; // not supported if(state == "PREPARING") return 'b'; if(state == "SUBMIT") return 's'; // not supported if(state == "INLRMS") return 'q'; if(state == "FINISHING") return 'f'; if(state == "FINISHED") return 'e'; if(state == "DELETED") return 'd'; if(state == "CANCELING") return 'c'; return ' '; } Exec& Exec::operator=(const Arc::ExecutableType& src) { Exec& dest = *this; // Order of the following calls matters! dest.clear(); dest.successcode = 0; dest = src.Argument; dest.push_front(src.Path); if(src.SuccessExitCode.first) dest.successcode = src.SuccessExitCode.second; return dest; } JobLocalDescription& JobLocalDescription::operator=(const Arc::JobDescription& arc_job_desc) { // TODO: handle errors action = "request"; std::map::const_iterator act_i = arc_job_desc.OtherAttributes.find("nordugrid:xrsl;action"); if(act_i != arc_job_desc.OtherAttributes.end()) action = act_i->second; std::map::const_iterator jid_i = arc_job_desc.OtherAttributes.find("nordugrid:xrsl;jobid"); if(jid_i != arc_job_desc.OtherAttributes.end()) jobid = jid_i->second; dryrun = arc_job_desc.Application.DryRun; projectnames.clear(); std::map::const_iterator jr_i = arc_job_desc.OtherAttributes.find("emies:adl;JobProject"); if (jr_i != arc_job_desc.OtherAttributes.end()) projectnames.push_back(jr_i->second); jobname = arc_job_desc.Identification.JobName; downloads = 0; uploads = 0; freestagein = false; outputdata.clear(); inputdata.clear(); rte.clear(); transfershare="_default"; const std::list& sw = arc_job_desc.Resources.RunTimeEnvironment.getSoftwareList(); for (std::list::const_iterator itSW = sw.begin(); itSW != sw.end(); ++itSW) rte.push_back(std::string(*itSW)); for (std::list::const_iterator file = arc_job_desc.DataStaging.InputFiles.begin(); file != arc_job_desc.DataStaging.InputFiles.end(); ++file) { std::string fname = file->Name; if(fname[0] != '/') fname = "/"+fname; // Just for safety inputdata.push_back(FileData(fname, "")); if(!file->Sources.empty()) { // Only one source per file is used if (file->Sources.front() && file->Sources.front().Protocol() != "file") { inputdata.back().lfn = file->Sources.front().fullstr(); // It is not possible to extract credentials path here. // So temporarily storing id here. inputdata.back().cred = file->Sources.front().DelegationID; inputdata.back().cred_type = "id"; // just to have it set to something } } if(fname == "/") { // Unnamed file is used to mark request for free stage in freestagein = true; } if (inputdata.back().has_lfn()) { ++downloads; Arc::URL u(inputdata.back().lfn); if (file->IsExecutable || file->Name == arc_job_desc.Application.Executable.Path) { u.AddOption("exec", "yes", true); } inputdata.back().lfn = u.fullstr(); } else if (file->FileSize != -1) { inputdata.back().lfn = Arc::tostring(file->FileSize); if (!file->Checksum.empty()) { // Only set checksum if FileSize is also set. inputdata.back().lfn += "."+file->Checksum; } } } for (std::list::const_iterator file = arc_job_desc.DataStaging.OutputFiles.begin(); file != arc_job_desc.DataStaging.OutputFiles.end(); ++file) { std::string fname = file->Name; if(fname[0] != '/') fname = "/"+fname; // Just for safety bool ifsuccess = false; bool ifcancel = false; bool iffailure = false; if (!file->Targets.empty()) { // output file for(std::list::const_iterator target = file->Targets.begin(); target != file->Targets.end(); ++target) { FileData fdata(fname, target->fullstr()); fdata.ifsuccess = target->UseIfSuccess; fdata.ifcancel = target->UseIfCancel; fdata.iffailure = target->UseIfFailure; outputdata.push_back(fdata); if (outputdata.back().has_lfn()) { ++uploads; Arc::URL u(outputdata.back().lfn); // really needed? if(u.Option("preserve","no") == "yes") { outputdata.back().ifcancel = true; outputdata.back().iffailure = true; }; switch(target->CreationFlag) { case Arc::TargetType::CFE_OVERWRITE: u.AddOption("overwrite","yes",true); break; case Arc::TargetType::CFE_DONTOVERWRITE: u.AddOption("overwrite","no",true); break; // Rest is not supported in URLs yet. default: break; }; u.RemoveOption("preserve"); u.RemoveOption("mandatory"); // TODO: implement outputdata.back().lfn = u.fullstr(); // It is not possible to extract credentials path here. // So temporarily storing id here. outputdata.back().cred = target->DelegationID; outputdata.back().cred_type = "id"; // just to have it set to something } if(outputdata.back().ifsuccess) ifsuccess = true; if(outputdata.back().ifcancel) ifcancel = true; if(outputdata.back().iffailure) iffailure = true; } if(ifsuccess && ifcancel && iffailure) { // All possible results are covered } else { // For not covered cases file is treated as user downloadable FileData fdata(fname, ""); fdata.ifsuccess = !ifsuccess; fdata.ifcancel = !ifcancel; fdata.iffailure = !iffailure; outputdata.push_back(fdata); } } else { // user downloadable file FileData fdata(fname, ""); // user decides either to use file fdata.ifsuccess = true; fdata.ifcancel = true; fdata.iffailure = true; outputdata.push_back(fdata); } } // Pick up per job delegation if(!arc_job_desc.DataStaging.DelegationID.empty()) { delegationid = arc_job_desc.DataStaging.DelegationID; }; exec = arc_job_desc.Application.Executable; for(std::list::const_iterator e = arc_job_desc.Application.PreExecutable.begin(); e != arc_job_desc.Application.PreExecutable.end(); ++e) { Exec pe = *e; preexecs.push_back(pe); } for(std::list::const_iterator e = arc_job_desc.Application.PostExecutable.begin(); e != arc_job_desc.Application.PostExecutable.end(); ++e) { Exec pe = *e; postexecs.push_back(pe); } stdin_ = arc_job_desc.Application.Input; stdout_ = arc_job_desc.Application.Output; stderr_ = arc_job_desc.Application.Error; if (arc_job_desc.Resources.DiskSpaceRequirement.DiskSpace > -1) diskspace = (unsigned long long int)(arc_job_desc.Resources.DiskSpaceRequirement.DiskSpace*1024*1024); processtime = arc_job_desc.Application.ProcessingStartTime; const int lifetimeTemp = (int)arc_job_desc.Resources.SessionLifeTime.GetPeriod(); if (lifetimeTemp > 0) lifetime = lifetimeTemp; activityid = arc_job_desc.Identification.ActivityOldID; stdlog = arc_job_desc.Application.LogDir; jobreport.clear(); for (std::list::const_iterator it = arc_job_desc.Application.RemoteLogging.begin(); it != arc_job_desc.Application.RemoteLogging.end(); ++it) { // TODO: Support optional requirement. // TODO: Support other types than SGAS. if (it->ServiceType == "SGAS") { jobreport.push_back(it->Location.str()); } } notify.clear(); { int n = 0; for (std::list::const_iterator it = arc_job_desc.Application.Notification.begin(); it != arc_job_desc.Application.Notification.end(); it++) { if (n >= 3) break; // Only 3 instances are allowed. std::string states; for (std::list::const_iterator s = it->States.begin(); s != it->States.end(); ++s) { char state = StateToShortcut(*s); if(state == ' ') continue; states+=state; } if(states.empty()) continue; if(it->Email.empty()) continue; if (!notify.empty()) notify += " "; notify += states + " " + it->Email; ++n; } } if (!arc_job_desc.Resources.QueueName.empty()) { queue = arc_job_desc.Resources.QueueName; } if (!arc_job_desc.Application.CredentialService.empty() && arc_job_desc.Application.CredentialService.front()) credentialserver = arc_job_desc.Application.CredentialService.front().fullstr(); if (arc_job_desc.Application.Rerun > -1) reruns = arc_job_desc.Application.Rerun; if ( arc_job_desc.Application.Priority <= 100 && arc_job_desc.Application.Priority > 0 ) priority = arc_job_desc.Application.Priority; return *this; } const char* const JobLocalDescription::transfersharedefault = "_default"; int const JobLocalDescription::prioritydefault = 50; bool LRMSResult::set(const char* s) { // 1. Empty string = exit code 0 if(s == NULL) s=""; for(;*s;++s) { if(!isspace(*s)) break; }; if(!*s) { code_=0; description_=""; }; // Try to read first word as number char* e; code_=strtol(s,&e,0); if((!*e) || (isspace(*e))) { for(;*e;++e) { if(!isspace(*e)) break; }; description_=e; return true; }; // If there is no number that means some "uncoded" failure code_=-1; description_=s; return true; } std::istream& operator>>(std::istream& i,LRMSResult &r) { std::string buf; if(i.eof() || i.fail()) { } else { std::getline(i,buf); }; r=buf; return i; } std::ostream& operator<<(std::ostream& o,const LRMSResult &r) { o<& value) { for(std::list::const_iterator v = value.begin(); v != value.end(); ++v) { if(!write_pair(f,name,*v)) return false; } return true; } static inline bool parse_boolean(const std::string& buf) { if(strncasecmp("yes",buf.c_str(),3) == 0) return true; if(strncasecmp("true",buf.c_str(),4) == 0) return true; if(strncmp("1",buf.c_str(),1) == 0) return true; return false; } bool JobLocalDescription::write(const std::string& fname) const { std::unique_lock lock_(local_lock); // *.local file is accessed concurently. To avoid improper readings lock is acquired. KeyValueFile f(fname,KeyValueFile::Create); if(!f) return false; for (std::list::const_iterator it=jobreport.begin(); it!=jobreport.end(); it++) { if(!write_pair(f,"jobreport",*it)) return false; }; if(!write_pair(f,"globalid",globalid)) return false; if(!write_pair(f,"headnode",headnode)) return false; if(!write_pair(f,"headhost",headhost)) return false; if(!write_pair(f,"globalurl",globalurl)) return false; if(!write_pair(f,"interface",interface)) return false; if(!write_pair(f,"lrms",lrms)) return false; if(!write_pair(f,"queue",queue)) return false; if(!write_pair(f,"localid",localid)) return false; if(!write_pair(f,"args",exec)) return false; if(!write_pair(f,"pre",preexecs)) return false; if(!write_pair(f,"post",postexecs)) return false; if(!write_pair(f,"subject",DN)) return false; if(!write_pair(f,"starttime",starttime)) return false; if(!write_pair(f,"lifetime",lifetime)) return false; if(!write_pair(f,"notify",notify)) return false; if(!write_pair(f,"processtime",processtime)) return false; if(!write_pair(f,"exectime",exectime)) return false; if(!write_pair(f,"rerun",Arc::tostring(reruns))) return false; if(downloads>=0) if(!write_pair(f,"downloads",Arc::tostring(downloads))) return false; if(uploads>=0) if(!write_pair(f,"uploads",Arc::tostring(uploads))) return false; if(!write_pair(f,"jobname",jobname)) return false; for (std::list::const_iterator ppn=projectnames.begin(); ppn!=projectnames.end(); ++ppn) { if(!write_pair(f,"projectname",*ppn)) return false; }; if(!write_pair(f,"gmlog",stdlog)) return false; if(!write_pair(f,"cleanuptime",cleanuptime)) return false; if(!write_pair(f,"delegexpiretime",expiretime)) return false; if(!write_pair(f,"clientname",clientname)) return false; if(!write_pair(f,"clientsoftware",clientsoftware)) return false; if(!write_pair(f,"delegationid",delegationid)) return false; if(!write_pair(f,"sessiondir",sessiondir)) return false; if(!write_pair(f,"diskspace",Arc::tostring(diskspace))) return false; if(!write_pair(f,"failedstate",failedstate)) return false; if(!write_pair(f,"failedcause",failedcause)) return false; if(!write_pair(f,"credentialserver",credentialserver)) return false; if(!write_pair(f,"freestagein",freestagein)) return false; for(std::list::const_iterator lv=localvo.begin(); lv != localvo.end(); ++lv) { if(!write_pair(f,"localvo",(*lv))) return false; }; for(std::list::const_iterator vf=voms.begin(); vf != voms.end(); ++vf) { if(!write_pair(f,"voms",(*vf))) return false; }; for(std::list::const_iterator ag=authgroups.begin(); ag != authgroups.end(); ++ag) { if(!write_pair(f,"auth",(*ag))) return false; }; for(std::list::const_iterator act_id=activityid.begin(); act_id != activityid.end(); ++act_id) { if(!write_pair(f,"activityid",(*act_id))) return false; }; for(std::map >::const_iterator claims=tokenclaim.begin(); claims != tokenclaim.end(); ++claims) { for(std::list::const_iterator claim = claims->second.begin(); claim != claims->second.end(); ++claim) { if(!write_pair(f,"tokenclaim."+claims->first,*claim)) return false; }; }; if(!write_pair(f,"transfershare",transfershare)) return false; if(!write_pair(f,"priority",Arc::tostring(priority))) return false; if(!write_pair(f,"dryrun",dryrun)) return false; return true; } bool JobLocalDescription::read(const std::string& fname) { std::unique_lock lock_(local_lock); // *.local file is accessed concurently. To avoid improper readings lock is acquired. KeyValueFile f(fname,KeyValueFile::Fetch); if(!f) return false; activityid.clear(); tokenclaim.clear(); localvo.clear(); voms.clear(); authgroups.clear(); for(;;) { std::string name; std::string buf; if(!f.Read(name,buf)) return false; if(name.empty() && buf.empty()) break; // EOF if(name.empty()) continue; if(buf.empty()) continue; if(name == "lrms") { lrms = buf; } else if(name == "headnode") { headnode = buf; } else if(name == "headhost") { headhost = buf; } else if(name == "interface") { interface = buf; } else if(name == "queue") { queue = buf; } else if(name == "localid") { localid = buf; } else if(name == "subject") { DN = buf; } else if(name == "starttime") { starttime = buf; } // else if(name == "UI") { UI = buf; } else if(name == "lifetime") { lifetime = buf; } else if(name == "notify") { notify = buf; } else if(name == "processtime") { processtime = buf; } else if(name == "exectime") { exectime = buf; } else if(name == "jobreport") { jobreport.push_back(std::string(buf)); } else if(name == "globalid") { globalid = buf; } else if(name == "globalurl") { globalurl = buf; } else if(name == "jobname") { jobname = buf; } else if(name == "projectname") { projectnames.push_back(std::string(buf)); } else if(name == "gmlog") { stdlog = buf; } else if(name == "rerun") { int n; if(!Arc::stringto(buf,n)) return false; reruns = n; } else if(name == "downloads") { int n; if(!Arc::stringto(buf,n)) return false; downloads = n; } else if(name == "uploads") { int n; if(!Arc::stringto(buf,n)) return false; uploads = n; } else if(name == "args") { exec.clear(); exec.successcode = 0; while(!buf.empty()) { std::string arg; arg = Arc::unescape_chars(Arc::extract_escaped_token(buf, ' ', '\\'), '\\'); exec.push_back(arg); }; } else if(name == "argscode") { int n; if(!Arc::stringto(buf,n)) return false; exec.successcode = n; } else if(name == "pre") { Exec pe; while(!buf.empty()) { std::string arg; arg = Arc::unescape_chars(Arc::extract_escaped_token(buf, ' ', '\\'), '\\'); pe.push_back(arg); }; preexecs.push_back(pe); } else if(name == "precode") { if(preexecs.empty()) return false; int n; if(!Arc::stringto(buf,n)) return false; preexecs.back().successcode = n; } else if(name == "post") { Exec pe; while(!buf.empty()) { std::string arg; arg = Arc::unescape_chars(Arc::extract_escaped_token(buf, ' ', '\\'), '\\'); pe.push_back(arg); }; postexecs.push_back(pe); } else if(name == "postcode") { if(postexecs.empty()) return false; int n; if(!Arc::stringto(buf,n)) return false; postexecs.back().successcode = n; } else if(name == "cleanuptime") { cleanuptime = buf; } else if(name == "delegexpiretime") { expiretime = buf; } else if(name == "clientname") { clientname = buf; } else if(name == "clientsoftware") { clientsoftware = buf; } else if(name == "delegationid") { delegationid = buf; } else if(name == "sessiondir") { sessiondir = buf; } else if(name == "failedstate") { failedstate = buf; } else if(name == "failedcause") { failedcause = buf; } else if(name == "credentialserver") { credentialserver = buf; } else if(name == "freestagein") { freestagein = parse_boolean(buf); } else if(name == "localvo") { localvo.push_back(buf); } else if(name == "voms") { voms.push_back(buf); } else if(name == "auth") { authgroups.push_back(buf); } else if(name == "diskspace") { unsigned long long int n; if(!Arc::stringto(buf,n)) return false; diskspace = n; } else if(name == "activityid") { activityid.push_back(buf); } else if(name == "transfershare") { transfershare = buf; } else if(name == "priority") { int n; if(!Arc::stringto(buf,n)) return false; priority = n; } else if(name == "dryrun") { dryrun = parse_boolean(buf); } else if(name.compare(0,11,"tokenclaim.") == 0) { tokenclaim[name.substr(11)].push_back(buf); } } return true; } bool JobLocalDescription::read_var(const std::string &fname,const std::string &vnam,std::string &value) { std::unique_lock lock_(local_lock); // *.local file is accessed concurently. To avoid improper readings lock is acquired. KeyValueFile f(fname,KeyValueFile::Fetch); if(!f) return false; // using iostream for handling file content bool found = false; for(;;) { std::string buf; std::string name; if(!f.Read(name, buf)) return false; if(name.empty() && buf.empty()) break; // EOF if(name.empty()) continue; if(buf.empty()) continue; if(name == vnam) { value = buf; found=true; break; }; }; return found; } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/files/PaxHeaders/ControlFileContent.h0000644000000000000000000000013115067751327026363 xustar0030 mtime=1759498967.754640094 29 atime=1759498967.86349362 30 ctime=1759499029.764884393 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/files/ControlFileContent.h0000644000175000002070000001774715067751327030306 0ustar00mockbuildmock00000000000000#ifndef GRID_MANAGER_INFO_TYPES_H #define GRID_MANAGER_INFO_TYPES_H #include #include #include #include #include namespace ARex { /* Defines few data types used by grid-manager to store information about jobs. */ /* Pair of values containing file's path (pfn - physical file name) and it's source or destination on the net (lfn - logical file name) */ class FileData { public: typedef std::list::iterator iterator; FileData(void); FileData(const std::string& pfn_s,const std::string& lfn_s); std::string pfn; // path relative to session dir std::string lfn; // input/output url or size.checksum std::string cred; // path to file containing credentials std::string cred_type; // type of credentials in cred file bool ifsuccess; bool ifcancel; bool iffailure; FileData& operator= (const char* str); bool operator== (const char* name); bool operator== (const FileData& data); bool has_lfn(void); }; std::istream &operator>> (std::istream &i,FileData &fd); std::ostream &operator<< (std::ostream &o,const FileData &fd); class Exec: public std::list { public: Exec(void):successcode(0) {}; Exec(const std::list& src):std::list(src),successcode(0) {}; Exec(const Arc::ExecutableType& src):successcode(0) { operator=(src); }; Exec& operator=(const std::list& src) { std::list::operator=(src); return *this; }; Exec& operator=(const Arc::ExecutableType& src); int successcode; }; std::istream &operator>> (std::istream &i,Exec &fd); std::ostream &operator<< (std::ostream &o,const Exec &fd); /* Most important information about job extracted from different sources (mostly from job description) and stored in separate file for relatively quick and simple access. */ class JobLocalDescription { /* all values are public, this class is just for convenience */ public: JobLocalDescription(void):jobid(""),globalid(""),headnode(""),headhost(""),globalurl(""), lrms(""),queue(""),localid(""), DN(""),starttime((time_t)(-1)),lifetime(""), notify(""),processtime((time_t)(-1)),exectime((time_t)(-1)), clientname(""),clientsoftware(""),delegationid(""), reruns(0),priority(prioritydefault),downloads(-1),uploads(-1), jobname(""),jobreport(), cleanuptime((time_t)(-1)),expiretime((time_t)(-1)), failedstate(""),failedcause(""), credentialserver(""),freestagein(false)/*,gsiftpthreads(1)*/, dryrun(false),diskspace(0), transfershare(JobLocalDescription::transfersharedefault) {} JobLocalDescription& operator=(const Arc::JobDescription& arc_job_desc); bool read(const std::string& fname); bool write(const std::string& fname) const; static bool read_var(const std::string &fname,const std::string &vnam,std::string &value); // All non-static members are safe to copy std::string jobid; /* job's unique identifier */ /* attributes stored in job.ID.local */ std::string globalid; /* job id as seen from outside */ std::string headnode; /* URL of the cluster's headnode */ std::string headhost; /* hostname of cluster's headnode */ std::string globalurl; /* URL roughly representing job on cluster */ std::string interface; /* interface type used to submit job */ std::string lrms; /* lrms type to use - pbs */ std::string queue; /* queue name - default */ std::string localid; /* job's id in lrms */ std::list preexecs; /* executable + arguments */ Exec exec; /* executable + arguments */ std::list postexecs; /* executable + arguments */ std::string DN; /* user's distinguished name aka subject name */ Arc::Time starttime; /* job submission time */ std::string lifetime; /* time to live for submission directory */ std::string notify; /* notification flags used and email address */ Arc::Time processtime; /* time to start job processing (downloading) */ Arc::Time exectime; /* time to start execution */ std::string clientname; /* IP+port of user interface + info given by ui */ std::string clientsoftware; /* Client's version */ std::string delegationid; /* id of deleation assigned to this job (not per file) */ int reruns; /* number of allowed reruns left */ int priority; /* priority the job has */ int downloads; /* number of downloadable files requested */ int uploads; /* number of uploadable files requested */ std::string jobname; /* name of job given by user */ std::list projectnames; /* project names, i.e. "ACIDs" */ std::list jobreport; /* URLs of user's/VO's loggers */ Arc::Time cleanuptime; /* time to remove job completely */ Arc::Time expiretime; /* when main delegation expires */ std::string stdlog; /* dirname to which log messages will be put after job finishes */ std::string sessiondir; /* job's session directory */ std::string failedstate; /* state at which job failed, used for rerun */ std::string failedcause; /* reason for job failure, client or internal error */ std::string credentialserver; /* URL of server used to renew credentials - MyProxy */ bool freestagein; /* if true, client is allowed to stage in any files */ std::list localvo; /* VO names to which user belongs according to local configuration*/ std::list voms; /* VOMS FQANs which we matched during authorization process */ std::list authgroups; /* auth groups matched during authorization process */ /* attributes stored in other files */ std::list inputdata; /* input files */ std::list outputdata; /* output files */ /* attributes taken from job description */ std::list rte; /* runtime environments */ std::string action; /* what to do - must be 'request' */ //std::string rc; /* url to contact replica collection */ std::string stdin_; /* file name for stdin handle */ std::string stdout_; /* file name for stdout handle */ std::string stderr_; /* file name for stderr handle */ //std::string cache; /* cache default, yes/no */ //int gsiftpthreads; /* number of parallel connections to use // during gsiftp down/uploads */ bool dryrun; /* if true, this is test job */ unsigned long long int diskspace; /* anount of requested space on disk (unit bytes) */ std::list activityid; /* ID of activity */ std::map > tokenclaim; std::string transfershare; /* share assigned to job for transfer fair share */ // Default values which are not zero static int const prioritydefault; /* default priority for the job if not specified */ static const char* const transfersharedefault; /* default value for transfer share */ }; /* Information stored in job.#.lrms_done file */ class LRMSResult { private: int code_; std::string description_; bool set(const char* s); public: LRMSResult(void):code_(-1),description_("") { }; LRMSResult(const std::string& s) { set(s.c_str()); }; LRMSResult(int c):code_(c),description_("") { }; LRMSResult(const char* s) { set(s); }; LRMSResult& operator=(const std::string& s) { set(s.c_str()); return *this; }; LRMSResult& operator=(const char* s) { set(s); return *this; }; int code(void) const { return code_; }; const std::string& description(void) const { return description_; }; }; std::istream& operator>>(std::istream& i,LRMSResult &r); std::ostream& operator<<(std::ostream& i,const LRMSResult &r); } // namespace ARex #endif nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/files/PaxHeaders/ControlFileHandling.cpp0000644000000000000000000000013115067751327027030 xustar0030 mtime=1759498967.754640094 29 atime=1759498967.86349362 30 ctime=1759499029.761709025 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/files/ControlFileHandling.cpp0000644000175000002070000007633715067751327030753 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "../run/RunRedirected.h" #include "../conf/GMConfig.h" #include "../jobs/GMJob.h" #include "ControlFileHandling.h" namespace ARex { // Files in control dir, job.id.sfx const char * const sfx_restart = "restart"; // Mark to tell A-REX to restart job const char * const sfx_clean = "clean"; // Mark to tell A-REX to clean job const char * const sfx_cancel = "cancel"; // Mark to tell A-REX to cancel job const char * const sfx_failed = "failed"; // Description of failure const char * const sfx_status = "status"; // Current job status const char * const sfx_local = "local"; // Local information about job const char * const sfx_errors = "errors"; // Log of data staging and job submission const char * const sfx_desc = "description"; // Job description sent by user const char * const sfx_diag = "diag"; // Diagnostic info about finished job const char * const sfx_lrmsoutput = "comment"; // Additional information from LRMS const char * const sfx_acl = "acl"; // ACL information for job const char * const sfx_proxy = "proxy"; // Delegated proxy const char * const sfx_xml = "xml"; // XML description of job const char * const sfx_input = "input"; // Input files required by job const char * const sfx_output = "output"; // Output files written by job const char * const sfx_inputstatus = "input_status"; // Input files staged by client const char * const sfx_outputstatus = "output_status";// Output files already staged out const char * const sfx_statistics = "statistics"; // Statistical information on data staging const char * const sfx_lrmsdone = "lrms_done"; // Job execution in lrms exit code and failure reason const char * const sfx_lrmsjob = "lrms_job"; // File LRMS backends keep their specific information const char * const sfx_proxy_tmp = "proxy_tmp"; const char * const sfx_grami = "grami"; // Sub-directories for different jobs states const char * const subdir_new = "accepting"; // Submitted but not yet picked up by A-REX const char * const subdir_cur = "processing"; // Being processed by A-REX const char * const subdir_old = "finished"; // Finished or deleted jobs const char * const subdir_rew = "restarting"; // Jobs waiting to restart static Arc::Logger& logger = Arc::Logger::getRootLogger(); static job_state_t job_state_read_file(const std::string &fname,bool &pending); static bool job_state_write_file(const std::string &fname,job_state_t state,bool pending); static bool job_mark_put(Arc::FileAccess& fa, const std::string &fname); static bool job_mark_remove(Arc::FileAccess& fa,const std::string &fname); bool fix_file_permissions(const std::string &fname,bool executable) { mode_t mode = S_IRUSR | S_IWUSR; if(executable) { mode |= S_IXUSR; }; return (chmod(fname.c_str(),mode) == 0); } static bool fix_file_permissions(Arc::FileAccess& fa,const std::string &fname,bool executable = false) { mode_t mode = S_IRUSR | S_IWUSR; if(executable) { mode |= S_IXUSR; }; return fa.fa_chmod(fname.c_str(),mode); } bool fix_file_permissions(const std::string &fname,const GMJob &job,const GMConfig& config) { mode_t mode = S_IRUSR | S_IWUSR; uid_t uid = job.get_user().get_uid(); gid_t gid = job.get_user().get_gid(); if(!config.MatchShareUid(uid)) { mode |= S_IRGRP; if(!config.MatchShareGid(gid)) { mode |= S_IROTH; }; }; return (chmod(fname.c_str(),mode) == 0); } bool fix_file_permissions_in_session(const std::string &fname,const GMJob &job,const GMConfig &config,bool executable) { mode_t mode = S_IRUSR | S_IWUSR; if(executable) { mode |= S_IXUSR; }; if(config.StrictSession()) { uid_t uid = getuid()==0?job.get_user().get_uid():getuid(); uid_t gid = getgid()==0?job.get_user().get_gid():getgid(); Arc::FileAccess fa; if(!fa.fa_setuid(uid,gid)) return false; return fa.fa_chmod(fname,mode); }; return (chmod(fname.c_str(),mode) == 0); } bool fix_file_owner(const std::string &fname,const GMJob& job) { return fix_file_owner(fname, job.get_user()); } bool fix_file_owner(const std::string &fname,const Arc::User& user) { if(getuid() == 0) { if(lchown(fname.c_str(),user.get_uid(),user.get_gid()) == -1) { logger.msg(Arc::ERROR,"Failed setting file owner: %s",fname); return false; }; }; return true; } bool check_file_owner(const std::string &fname) { uid_t uid; gid_t gid; time_t t; return check_file_owner(fname,uid,gid,t); } bool check_file_owner(const std::string &fname,uid_t &uid,gid_t &gid) { time_t t; return check_file_owner(fname,uid,gid,t); } bool check_file_owner(const std::string &fname,uid_t &uid,gid_t &gid,time_t &t) { struct stat st; if(lstat(fname.c_str(),&st) != 0) return false; if(!S_ISREG(st.st_mode)) return false; uid=st.st_uid; gid=st.st_gid; t=st.st_ctime; /* superuser can't run jobs */ if(uid == 0) return false; /* accept any file if superuser */ if(getuid() != 0) { if(uid != getuid()) return false; }; return true; } static const std::string::size_type id_split_chunk = 3; static const std::string::size_type id_split_num = 4; std::string job_control_path(std::string const& control_dir, std::string const& id, char const* sfx) { std::string path(control_dir); path += "/jobs/"; int num = id_split_num; for(std::string::size_type pos = 0; pos < id.length(); pos+=id_split_chunk) { if (--num == 0) { path.append(id,pos,std::string::npos); path += "/"; break; }; path.append(id,pos,id_split_chunk); path += "/"; }; if(sfx) path += sfx; return path; } //static std::string job_control_path(std::string const& control_dir, std::string const& id, char const* sfx) { // return control_dir + id + sfx; //} bool job_lrms_mark_check(const JobId &id,const GMConfig &config) { std::string fname = job_control_path(config.ControlDir(), id, sfx_lrmsdone); return job_mark_check(fname); } bool job_lrms_mark_remove(const JobId &id,const GMConfig &config) { std::string fname = job_control_path(config.ControlDir(), id, sfx_lrmsdone); return job_mark_remove(fname); } LRMSResult job_lrms_mark_read(const JobId &id,const GMConfig &config) { std::string fname = job_control_path(config.ControlDir(), id, sfx_lrmsdone); LRMSResult r("-1 Internal error"); std::ifstream f(fname.c_str()); if(! f.is_open() ) return r; f>>r; return r; } bool job_cancel_mark_put(const GMJob &job,const GMConfig &config) { std::string fname = config.ControlDir() + "/" + subdir_new + "/" + job.get_id() + sfx_cancel; return job_mark_put(fname) && fix_file_owner(fname,job) && fix_file_permissions(fname); } bool job_cancel_mark_check(const JobId &id,const GMConfig &config) { std::string fname = config.ControlDir() + "/" + subdir_new + "/" + id + sfx_cancel; return job_mark_check(fname); } bool job_cancel_mark_remove(const JobId &id,const GMConfig &config) { std::string fname = config.ControlDir() + "/" + subdir_new + "/" + id + sfx_cancel; return job_mark_remove(fname); } bool job_restart_mark_put(const GMJob &job,const GMConfig &config) { std::string fname = config.ControlDir() + "/" + subdir_new + "/" + job.get_id() + sfx_restart; return job_mark_put(fname) && fix_file_owner(fname,job) && fix_file_permissions(fname); } bool job_restart_mark_check(const JobId &id,const GMConfig &config) { std::string fname = config.ControlDir() + "/" + subdir_new + "/" + id + sfx_restart; return job_mark_check(fname); } bool job_restart_mark_remove(const JobId &id,const GMConfig &config) { std::string fname = config.ControlDir() + "/" + subdir_new + "/" + id + sfx_restart; return job_mark_remove(fname); } bool job_clean_mark_put(const GMJob &job,const GMConfig &config) { std::string fname = config.ControlDir() + "/" + subdir_new + "/" + job.get_id() + sfx_clean; return job_mark_put(fname) && fix_file_owner(fname,job) && fix_file_permissions(fname); } bool job_clean_mark_check(const JobId &id,const GMConfig &config) { std::string fname = config.ControlDir() + "/" + subdir_new + "/" + id + sfx_clean; return job_mark_check(fname); } bool job_clean_mark_remove(const JobId &id,const GMConfig &config) { std::string fname = config.ControlDir() + "/" + subdir_new + "/" + id + sfx_clean; return job_mark_remove(fname); } bool job_failed_mark_put(const GMJob &job,const GMConfig &config,const std::string &content) { std::string fname = job_control_path(config.ControlDir(), job.get_id(), sfx_failed); if(job_mark_size(fname) > 0) return true; return job_mark_write(fname,content) && fix_file_owner(fname,job) && fix_file_permissions(fname,job,config); } bool job_failed_mark_add(const GMJob &job,const GMConfig &config,const std::string &content) { std::string fname = job_control_path(config.ControlDir(), job.get_id(), sfx_failed); return job_mark_add(fname,content) && fix_file_owner(fname,job) && fix_file_permissions(fname,job,config); } bool job_failed_mark_check(const JobId &id,const GMConfig &config) { std::string fname = job_control_path(config.ControlDir(), id, sfx_failed); return job_mark_check(fname); } bool job_failed_mark_remove(const JobId &id,const GMConfig &config) { std::string fname = job_control_path(config.ControlDir(), id, sfx_failed); return job_mark_remove(fname); } std::string job_failed_mark_read(const JobId &id,const GMConfig &config) { std::string fname = job_control_path(config.ControlDir(), id, sfx_failed); return job_mark_read(fname); } bool job_controldiag_mark_put(const GMJob &job,const GMConfig &config,char const * const args[]) { std::string fname = job_control_path(config.ControlDir(), job.get_id(), sfx_diag); if(!job_mark_put(fname)) return false; if(!fix_file_owner(fname,job)) return false; if(!fix_file_permissions(fname)) return false; if(args == NULL) return true; struct stat st; if(args[0] && stat(args[0], &st) != 0) return true; int h = open(fname.c_str(),O_WRONLY); if(h == -1) return false; int r; int t = 10; r=RunRedirected::run(job.get_user(),"job_controldiag_mark_put",-1,h,-1,(char**)args,t); close(h); if(r != 0) return false; return true; } bool job_diagnostics_mark_put(const GMJob &job,const GMConfig &config) { std::string fname = job.SessionDir(); if(fname.empty()) return false; fname += "."; fname += sfx_diag; if(config.StrictSession()) { Arc::FileAccess fa; if(!fa.fa_setuid(job.get_user().get_uid(),job.get_user().get_gid())) return false; return job_mark_put(fa,fname) && fix_file_permissions(fa,fname); }; return job_mark_put(fname) && fix_file_owner(fname,job) && fix_file_permissions(fname); } bool job_diagnostics_mark_remove(const GMJob &job,const GMConfig &config) { std::string fname = job_control_path(config.ControlDir(), job.get_id(), sfx_diag); bool res1 = job_mark_remove(fname); fname = job.SessionDir(); if(fname.empty()) return false; fname += "."; fname += sfx_diag; if(config.StrictSession()) { Arc::FileAccess fa; if(!fa.fa_setuid(job.get_user().get_uid(),job.get_user().get_gid())) return res1; return (res1 | job_mark_remove(fa,fname)); }; return (res1 | job_mark_remove(fname)); } bool job_diagnostics_mark_move(const GMJob &job,const GMConfig &config) { std::string fname1; if (job.GetLocalDescription() && !job.GetLocalDescription()->sessiondir.empty()) fname1 = job.GetLocalDescription()->sessiondir; else fname1 = job.SessionDir(); if(fname1.empty()) return false; fname1 += "."; fname1 += sfx_diag; std::string fname2 = job_control_path(config.ControlDir(), job.get_id(), sfx_diag); std::string data; if(config.StrictSession()) { Arc::FileRead(fname1, data, job.get_user().get_uid(), job.get_user().get_gid()); Arc::FileDelete(fname1, job.get_user().get_uid(), job.get_user().get_gid()); } else { Arc::FileRead(fname1, data); Arc::FileDelete(fname1); } // behaviour is to create file in control dir even if reading fails return Arc::FileCreate(fname2, data) && fix_file_owner(fname2,job) && fix_file_permissions(fname2,job,config); } bool job_lrmsoutput_mark_put(const GMJob &job,const GMConfig &config) { std::string fname = job.SessionDir(); if(fname.empty()) return false; fname += "."; fname += sfx_lrmsoutput; if(config.StrictSession()) { Arc::FileAccess fa; if(!fa.fa_setuid(job.get_user().get_uid(),job.get_user().get_gid())) return false; return job_mark_put(fa,fname) && fix_file_permissions(fa,fname); }; return job_mark_put(fname) && fix_file_owner(fname,job) && fix_file_permissions(fname); } bool job_lrmsoutput_mark_remove(const GMJob &job,const GMConfig &config) { std::string fname = job.SessionDir(); if(fname.empty()) return false; fname += "."; fname += sfx_lrmsoutput; if(config.StrictSession()) { Arc::FileAccess fa; if(!fa.fa_setuid(job.get_user().get_uid(),job.get_user().get_gid())) return false; return job_mark_remove(fa,fname); }; return job_mark_remove(fname); } std::string job_mark_read(const std::string &fname) { std::string s(""); Arc::FileRead(fname, s); return s; } bool job_mark_write(const std::string &fname,const std::string &content) { return Arc::FileCreate(fname, content); } bool job_mark_add(const std::string &fname,const std::string &content) { int h=open(fname.c_str(),O_WRONLY | O_CREAT | O_APPEND,S_IRUSR | S_IWUSR); if(h==-1) return false; write(h,(const void *)content.c_str(),content.length()); close(h); return true; } bool job_mark_put(const std::string &fname) { int h=open(fname.c_str(),O_WRONLY | O_CREAT,S_IRUSR | S_IWUSR); if(h==-1) return false; close(h); return true; } static bool job_mark_put(Arc::FileAccess& fa, const std::string &fname) { if(!fa.fa_open(fname,O_WRONLY | O_CREAT,S_IRUSR | S_IWUSR)) return false; fa.fa_close(); return true; } bool job_mark_check(const std::string &fname) { struct stat st; if(lstat(fname.c_str(),&st) != 0) return false; if(!S_ISREG(st.st_mode)) return false; return true; } bool job_mark_remove(const std::string &fname) { if(unlink(fname.c_str()) != 0) { if(errno != ENOENT) return false; }; return true; } static bool job_mark_remove(Arc::FileAccess& fa,const std::string &fname) { if(!fa.fa_unlink(fname)) { if(fa.geterrno() != ENOENT) return false; }; return true; } time_t job_mark_time(const std::string &fname) { struct stat st; if(lstat(fname.c_str(),&st) != 0) return 0; if(st.st_mtime == 0) st.st_mtime = 1; // doomsday protection return st.st_mtime; } long int job_mark_size(const std::string &fname) { struct stat st; if(lstat(fname.c_str(),&st) != 0) return 0; if(!S_ISREG(st.st_mode)) return 0; return st.st_size; } bool job_errors_mark_put(const GMJob &job,const GMConfig &config) { std::string fname = job_control_path(config.ControlDir(), job.get_id(), sfx_errors); return job_mark_put(fname) && fix_file_owner(fname,job) && fix_file_permissions(fname); } bool job_errors_mark_add(const GMJob &job,const GMConfig &config,const std::string &msg) { std::string fname = job_control_path(config.ControlDir(), job.get_id(), sfx_errors); return job_mark_add(fname,msg) && fix_file_owner(fname,job) && fix_file_permissions(fname); } std::string job_errors_filename(const JobId &id, const GMConfig &config) { return job_control_path(config.ControlDir(), id, sfx_errors); } time_t job_state_time(const JobId &id,const GMConfig &config) { std::string fname; time_t t; fname = config.ControlDir() + "/" + subdir_cur + "/" + id + "." + sfx_status; t = job_mark_time(fname); if(t != 0) return t; fname = config.ControlDir() + "/" + subdir_new + "/" + id + "." + sfx_status; t = job_mark_time(fname); if(t != 0) return t; fname = config.ControlDir() + "/" + subdir_rew + "/" + id + "." + sfx_status; t = job_mark_time(fname); if(t != 0) return t; fname = config.ControlDir() + "/" + subdir_old + "/" + id + "." + sfx_status; return job_mark_time(fname); } job_state_t job_state_read_file(const JobId &id,const GMConfig &config) { bool pending; return job_state_read_file(id, config, pending); } job_state_t job_state_read_file(const JobId &id,const GMConfig &config,bool& pending) { std::string fname; job_state_t st; fname = config.ControlDir() + "/" + subdir_cur + "/" + id + "." + sfx_status; st = job_state_read_file(fname,pending); if(st != JOB_STATE_DELETED) return st; fname = config.ControlDir() + "/" + subdir_new + "/" + id + "." + sfx_status; st = job_state_read_file(fname,pending); if(st != JOB_STATE_DELETED) return st; fname = config.ControlDir() + "/" + subdir_rew + "/" + id + "." + sfx_status; st = job_state_read_file(fname,pending); if(st != JOB_STATE_DELETED) return st; fname = config.ControlDir() + "/" + subdir_old + "/" + id + "." + sfx_status; return job_state_read_file(fname,pending); } bool job_state_write_file(const GMJob &job,const GMConfig &config,job_state_t state,bool pending) { std::string fname; if(state == JOB_STATE_ACCEPTED) { fname = config.ControlDir() + "/" + subdir_old + "/" + job.get_id() + "." + sfx_status; remove(fname.c_str()); fname = config.ControlDir() + "/" + subdir_cur + "/" + job.get_id() + "." + sfx_status; remove(fname.c_str()); fname = config.ControlDir() + "/" + subdir_rew + "/" + job.get_id() + "." + sfx_status; remove(fname.c_str()); fname = config.ControlDir() + "/" + subdir_new + "/" + job.get_id() + "." + sfx_status; } else if((state == JOB_STATE_FINISHED) || (state == JOB_STATE_DELETED)) { fname = config.ControlDir() + "/" + subdir_new + "/" + job.get_id() + "." + sfx_status; remove(fname.c_str()); fname = config.ControlDir() + "/" + subdir_cur + "/" + job.get_id() + "." + sfx_status; remove(fname.c_str()); fname = config.ControlDir() + "/" + subdir_rew + "/" + job.get_id() + "." + sfx_status; remove(fname.c_str()); fname = config.ControlDir() + "/" + subdir_old + "/" + job.get_id() + "." + sfx_status; } else { fname = config.ControlDir() + "/" + subdir_new + "/" + job.get_id() + "." + sfx_status; remove(fname.c_str()); fname = config.ControlDir() + "/" + subdir_old + "/" + job.get_id() + "." + sfx_status; remove(fname.c_str()); fname = config.ControlDir() + "/" + subdir_rew + "/" + job.get_id() + "." + sfx_status; remove(fname.c_str()); fname = config.ControlDir() + "/" + subdir_cur + "/" + job.get_id() + "." + sfx_status; }; return job_state_write_file(fname,state,pending) && fix_file_owner(fname,job) && fix_file_permissions(fname,job,config); } static job_state_t job_state_read_file(const std::string &fname,bool &pending) { std::string data; if(!Arc::FileRead(fname, data)) { if(!job_mark_check(fname)) return JOB_STATE_DELETED; /* job does not exist */ return JOB_STATE_UNDEFINED; /* can't open file */ }; data = data.substr(0, data.find('\n')); /* interpret information */ if(data.substr(0, 8) == "PENDING:") { data = data.substr(8); pending=true; } else { pending=false; }; return GMJob::get_state(data.c_str()); } static bool job_state_write_file(const std::string &fname,job_state_t state,bool pending) { std::string data; if (pending) data += "PENDING:"; data += GMJob::get_state_name(state); return Arc::FileCreate(fname, data); } time_t job_description_time(const JobId &id,const GMConfig &config) { std::string fname = job_control_path(config.ControlDir(), id, sfx_desc); return job_mark_time(fname); } bool job_description_read_file(const JobId &id,const GMConfig &config,std::string &desc) { std::string fname = job_control_path(config.ControlDir(), id, sfx_desc); return job_description_read_file(fname,desc); } bool job_description_read_file(const std::string &fname,std::string &desc) { if (!Arc::FileRead(fname, desc)) return false; while (desc.find('\n') != std::string::npos) desc.erase(desc.find('\n'), 1); return true; } bool job_description_write_file(const GMJob &job,const GMConfig &config,const std::string &desc) { std::string fname = job_control_path(config.ControlDir(), job.get_id(), sfx_desc); return Arc::FileCreate(fname, desc) && fix_file_owner(fname,job) && fix_file_permissions(fname,job,config); } bool job_acl_read_file(const JobId &id,const GMConfig &config,std::string &acl) { std::string fname = job_control_path(config.ControlDir(), id, sfx_acl); return job_description_read_file(fname,acl); } bool job_acl_write_file(const JobId &id,const GMConfig &config,const std::string &acl) { std::string fname = job_control_path(config.ControlDir(), id, sfx_acl); return Arc::FileCreate(fname, acl); } bool job_xml_read_file(const JobId &id,const GMConfig &config,std::string &xml) { std::string fname = job_control_path(config.ControlDir(), id, sfx_xml); return job_description_read_file(fname,xml); } bool job_xml_check_file(const JobId &id,const GMConfig &config) { std::string fname = config.ControlDir() + "/job." + id + sfx_xml; return job_mark_check(fname); } bool job_xml_write_file(const JobId &id,const GMConfig &config,const std::string &xml) { std::string fname = job_control_path(config.ControlDir(), id, sfx_xml); return Arc::FileCreate(fname, xml); } bool job_local_write_file(const GMJob &job,const GMConfig &config,const JobLocalDescription &job_desc) { std::string fname = job_control_path(config.ControlDir(), job.get_id(), sfx_local); return job_local_write_file(fname,job_desc) && fix_file_owner(fname,job) && fix_file_permissions(fname,job,config); } bool job_local_write_file(const std::string &fname,const JobLocalDescription &job_desc) { return job_desc.write(fname); } bool job_local_read_file(const JobId &id,const GMConfig &config,JobLocalDescription &job_desc) { std::string fname = job_control_path(config.ControlDir(), id, sfx_local); return job_local_read_file(fname,job_desc); } bool job_local_read_file(const std::string &fname,JobLocalDescription &job_desc) { return job_desc.read(fname); } bool job_local_read_var(const std::string &fname,const std::string &vnam,std::string &value) { return JobLocalDescription::read_var(fname,vnam,value); } bool job_local_read_cleanuptime(const JobId &id,const GMConfig &config,time_t &cleanuptime) { std::string fname = job_control_path(config.ControlDir(), id, sfx_local); std::string str; if(!job_local_read_var(fname,"cleanuptime",str)) return false; cleanuptime=Arc::Time(str).GetTime(); return true; } bool job_local_read_failed(const JobId &id,const GMConfig &config,std::string &state,std::string &cause) { state = ""; cause = ""; std::string fname = job_control_path(config.ControlDir(), id, sfx_local); job_local_read_var(fname,"failedstate",state); job_local_read_var(fname,"failedcause",cause); return true; } bool job_local_read_delegationid(const JobId &id,const GMConfig &config,std::string &delegationid) { std::string fname = job_control_path(config.ControlDir(), id, sfx_local); if(!job_local_read_var(fname,"cleanuptime",delegationid)) return false; return true; } /* job.ID.input functions */ bool job_input_write_file(const GMJob &job,const GMConfig &config,std::list &files) { std::string fname = job_control_path(config.ControlDir(), job.get_id(), sfx_input); return job_Xput_write_file(fname,files) && fix_file_owner(fname,job) && fix_file_permissions(fname); } bool job_input_read_file(const JobId &id,const GMConfig &config,std::list &files) { std::string fname = job_control_path(config.ControlDir(), id, sfx_input); return job_Xput_read_file(fname,files); } bool job_input_status_add_file(const GMJob &job,const GMConfig &config,const std::string& file) { // 1. lock // 2. add // 3. unlock std::string fname = job_control_path(config.ControlDir(), job.get_id(), sfx_inputstatus); Arc::FileLock lock(fname); for (int i = 10; !lock.acquire() && i >= 0; --i) { if (i == 0) return false; sleep(1); } std::string data; if (!Arc::FileRead(fname, data) && errno != ENOENT) { lock.release(); return false; } std::ostringstream line; line<& files) { std::string fname = job_control_path(config.ControlDir(), id, sfx_inputstatus); Arc::FileLock lock(fname); for (int i = 10; !lock.acquire() && i >= 0; --i) { if (i == 0) return false; sleep(1); } bool r = Arc::FileRead(fname, files); lock.release(); return r; } /* job.ID.output functions */ bool job_output_write_file(const GMJob &job,const GMConfig &config,std::list &files,job_output_mode mode) { std::string fname = job_control_path(config.ControlDir(), job.get_id(), sfx_output); return job_Xput_write_file(fname,files,mode) && fix_file_owner(fname,job) && fix_file_permissions(fname); } bool job_output_read_file(const JobId &id,const GMConfig &config,std::list &files) { std::string fname = job_control_path(config.ControlDir(), id, sfx_output); return job_Xput_read_file(fname,files); } bool job_output_status_add_file(const GMJob &job,const GMConfig &config,const FileData& file) { // Not using lock here because concurrent read/write is not expected std::string fname = job_control_path(config.ControlDir(), job.get_id(), sfx_outputstatus); std::string data; if (!Arc::FileRead(fname, data) && errno != ENOENT) return false; std::ostringstream line; line< &files) { std::string fname = job_control_path(config.ControlDir(), job.get_id(), sfx_outputstatus); return job_Xput_write_file(fname,files) && fix_file_owner(fname,job) && fix_file_permissions(fname); } bool job_output_status_read_file(const JobId &id,const GMConfig &config,std::list &files) { std::string fname = job_control_path(config.ControlDir(), id, sfx_outputstatus); return job_Xput_read_file(fname,files); } /* common functions */ bool job_Xput_write_file(const std::string &fname,std::list &files,job_output_mode mode, uid_t uid, gid_t gid) { std::ostringstream s; for(FileData::iterator i=files.begin();i!=files.end(); ++i) { if(mode == job_output_all) { s << (*i) << std::endl; } else if(mode == job_output_success) { if(i->ifsuccess) { s << (*i) << std::endl; } else { // This case is handled at higher level }; } else if(mode == job_output_cancel) { if(i->ifcancel) { s << (*i) << std::endl; } else { // This case is handled at higher level }; } else if(mode == job_output_failure) { if(i->iffailure) { s << (*i) << std::endl; } else { // This case is handled at higher level }; }; }; if (!Arc::FileCreate(fname, s.str(), uid, gid)) return false; return true; } bool job_Xput_read_file(const std::string &fname,std::list &files, uid_t uid, gid_t gid) { std::list file_content; if (!Arc::FileRead(fname, file_content, uid, gid)) return false; for(std::list::iterator i = file_content.begin(); i != file_content.end(); ++i) { FileData fd; std::istringstream s(*i); s >> fd; if(!fd.pfn.empty()) files.push_back(fd); }; return true; } std::string job_proxy_filename(const JobId &id, const GMConfig &config){ return job_control_path(config.ControlDir(), id, sfx_proxy); } bool job_proxy_write_file(const GMJob &job,const GMConfig &config,const std::string &cred) { std::string fname = job_control_path(config.ControlDir(), job.get_id(), sfx_proxy); return Arc::FileCreate(fname, cred, 0, 0, S_IRUSR | S_IWUSR) && fix_file_owner(fname,job); } bool job_proxy_read_file(const JobId &id,const GMConfig &config,std::string &cred) { std::string fname = job_control_path(config.ControlDir(), id, sfx_proxy); return Arc::FileRead(fname, cred, 0, 0); } bool job_clean_finished(const JobId &id,const GMConfig &config) { std::string fname; fname = job_control_path(config.ControlDir(), id, sfx_proxy_tmp); remove(fname.c_str()); fname = job_control_path(config.ControlDir(), id, sfx_lrmsdone); remove(fname.c_str()); fname = job_control_path(config.ControlDir(), id, sfx_lrmsjob); remove(fname.c_str()); return true; } bool job_clean_deleted(const GMJob &job,const GMConfig &config,std::list cache_per_job_dirs) { std::string id = job.get_id(); job_clean_finished(id,config); std::string session; if(job.GetLocalDescription() && !job.GetLocalDescription()->sessiondir.empty()) session = job.GetLocalDescription()->sessiondir; else session = job.SessionDir(); std::string fname; fname = job_control_path(config.ControlDir(),id,sfx_proxy); remove(fname.c_str()); fname = config.ControlDir()+"/"+subdir_new+"/"+id+"."+sfx_restart; remove(fname.c_str()); fname = job_control_path(config.ControlDir(),id,sfx_errors); remove(fname.c_str()); fname = config.ControlDir()+"/"+subdir_new+"/"+id+"."+sfx_cancel; remove(fname.c_str()); fname = config.ControlDir()+"/"+subdir_new+"/"+id+"."+sfx_clean; remove(fname.c_str()); fname = job_control_path(config.ControlDir(),id,sfx_output); remove(fname.c_str()); fname = job_control_path(config.ControlDir(),id,sfx_input); remove(fname.c_str()); fname = job_control_path(config.ControlDir(),id,"grami_log"); remove(fname.c_str()); fname = job_control_path(config.ControlDir(),id,sfx_outputstatus); remove(fname.c_str()); fname = job_control_path(config.ControlDir(),id,sfx_inputstatus); remove(fname.c_str()); fname = job_control_path(config.ControlDir(),id,sfx_statistics); remove(fname.c_str()); if(!session.empty()) { fname = session+"."+sfx_lrmsoutput; remove(fname.c_str()); /* remove session directory */ if(config.StrictSession()) { Arc::DirDelete(session, true, job.get_user().get_uid(), job.get_user().get_gid()); } else { Arc::DirDelete(session); } } // remove cache per-job links, in case this failed earlier for (std::list::iterator i = cache_per_job_dirs.begin(); i != cache_per_job_dirs.end(); i++) { Arc::DirDelete((*i) + "/" + id); } return true; } bool job_clean_final(const GMJob &job,const GMConfig &config) { std::string id = job.get_id(); job_clean_finished(id,config); job_clean_deleted(job,config); std::string fname; fname = job_control_path(config.ControlDir(),id,sfx_local); remove(fname.c_str()); fname = job_control_path(config.ControlDir(),id,sfx_grami); remove(fname.c_str()); fname = job_control_path(config.ControlDir(),id,sfx_failed); remove(fname.c_str()); job_diagnostics_mark_remove(job,config); job_lrmsoutput_mark_remove(job,config); fname = config.ControlDir()+"/"+subdir_new+"/"+id+"."+sfx_status; remove(fname.c_str()); fname = config.ControlDir()+"/"+subdir_cur+"/"+id+"."+sfx_status; remove(fname.c_str()); fname = config.ControlDir()+"/"+subdir_old+"/"+id+"."+sfx_status; remove(fname.c_str()); fname = config.ControlDir()+"/"+subdir_rew+"/"+id+"."+sfx_status; remove(fname.c_str()); fname = job_control_path(config.ControlDir(),id,sfx_desc); remove(fname.c_str()); fname = job_control_path(config.ControlDir(),id,sfx_xml); remove(fname.c_str()); return true; } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/files/PaxHeaders/ControlFileHandling.h0000644000000000000000000000013115067751327026475 xustar0030 mtime=1759498967.754640094 29 atime=1759498967.86349362 30 ctime=1759499029.763833626 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/files/ControlFileHandling.h0000644000175000002070000002552215067751327030406 0ustar00mockbuildmock00000000000000#ifndef GRID_MANAGER_INFO_FILES_H #define GRID_MANAGER_INFO_FILES_H #include #include #include "../jobs/GMJob.h" #include "ControlFileContent.h" namespace ARex { class GMConfig; class GMJob; /* Definition of functions used to manipulate files used to stored information about jobs. Most used arguments: job - description of job. Mostly used to obtain job identifier and directories associated with job. config - GM configuration. Used to get control dir information. id - job identifier. Used to derive names of files. */ extern const char * const sfx_failed; extern const char * const sfx_cancel; extern const char * const sfx_restart; extern const char * const sfx_clean; extern const char * const sfx_status; extern const char * const sfx_local; extern const char * const sfx_errors; extern const char * const sfx_desc; extern const char * const sfx_diag; extern const char * const sfx_lrmsoutput; extern const char * const sfx_acl; extern const char * const sfx_proxy; extern const char * const sfx_xml; extern const char * const sfx_input; extern const char * const sfx_output; extern const char * const sfx_inputstatus; extern const char * const sfx_outputstatus; extern const char * const sfx_statistics; extern const char * const sfx_lrms_done; extern const char * const sfx_proxy_tmp; extern const char * const sfx_grami; extern const char * const subdir_new; extern const char * const subdir_cur; extern const char * const subdir_old; extern const char * const subdir_rew; enum job_output_mode { job_output_all, job_output_success, job_output_cancel, job_output_failure }; // Set permissions of file 'fname' to -rw------- or if 'executable' is set // to - -rwx--------- . bool fix_file_permissions(const std::string &fname,bool executable = false); // Set permissions taking into account share uid/gid in GMConfig bool fix_file_permissions(const std::string &fname,const GMJob &job,const GMConfig &config); bool fix_file_permissions_in_session(const std::string &fname,const GMJob &job,const GMConfig &config,bool executable); // Set owner of file 'fname' to one specified in 'job' bool fix_file_owner(const std::string &fname,const GMJob &job); // Set owner to user bool fix_file_owner(const std::string &fname,const Arc::User& user); // Check if file is owned by current user. If user is equivalent to root any // file is accepted. // Returns: // true - belongs // false - does not belong or error. // If file exists 'uid', 'gid' and 't' are set to uid, gid and creation // of that file. bool check_file_owner(const std::string &fname); bool check_file_owner(const std::string &fname,uid_t &uid,gid_t &gid); bool check_file_owner(const std::string &fname,uid_t &uid,gid_t &gid,time_t &t); // Make path to file in control dir std::string job_control_path(std::string const& control_dir, std::string const& id, char const* sfx); // Check existence, remove and read content of file used to mark // job finish in LRMS. This file is created by external script/executable // after it detects job exited and contains exit code of that job. bool job_lrms_mark_check(const JobId &id,const GMConfig &config); bool job_lrms_mark_remove(const JobId &id,const GMConfig &config); LRMSResult job_lrms_mark_read(const JobId &id,const GMConfig &config); // Create, check existence and remove file used to mark cancellation // request for specified job. The content of file is not important. bool job_cancel_mark_put(const GMJob &job,const GMConfig &config); bool job_cancel_mark_check(const JobId &id,const GMConfig &config); bool job_cancel_mark_remove(const JobId &id,const GMConfig &config); // Create, check existence and remove file used to mark request to // restart job. The content of file is not important. bool job_restart_mark_put(const GMJob &job,const GMConfig &config); bool job_restart_mark_check(const JobId &id,const GMConfig &config); bool job_restart_mark_remove(const JobId &id,const GMConfig &config); // Same for file, which marks job cleaning/removal request bool job_clean_mark_put(const GMJob &job,const GMConfig &config); bool job_clean_mark_check(const JobId &id,const GMConfig &config); bool job_clean_mark_remove(const JobId &id,const GMConfig &config); // Create (with given content), add to content, check for existence, delete // and read content of file used to mark failure of the job. // Content describes reason of failure (usually 1-2 strings). bool job_failed_mark_put(const GMJob &job,const GMConfig &config,const std::string &content = ""); bool job_failed_mark_add(const GMJob &job,const GMConfig &config,const std::string &content); bool job_failed_mark_check(const JobId &id,const GMConfig &config); bool job_failed_mark_remove(const JobId &id,const GMConfig &config); std::string job_failed_mark_read(const JobId &id,const GMConfig &config); // Create, add content, delete and move from session to control directory // file holding information about resources used by job while running. // Content is normally produced by "time" utility. bool job_controldiag_mark_put(const GMJob &job,const GMConfig &config,char const * const args[]); bool job_diagnostics_mark_put(const GMJob &job,const GMConfig &config); bool job_diagnostics_mark_remove(const GMJob &job,const GMConfig &config); bool job_diagnostics_mark_move(const GMJob &job,const GMConfig &config); // Same for file containing messages from LRMS, which could give additional // information about reason of job failure. bool job_lrmsoutput_mark_put(const GMJob &job,const GMConfig &config); // Common purpose functions, used by previous functions. std::string job_mark_read(const std::string &fname); bool job_mark_write(const std::string &fname,const std::string &content); bool job_mark_add(const std::string &fname,const std::string &content); bool job_mark_put(const std::string &fname); bool job_mark_check(const std::string &fname); bool job_mark_remove(const std::string &fname); time_t job_mark_time(const std::string &fname); long int job_mark_size(const std::string &fname); // Create file to store stderr of external utilities used to stage-in/out // data, submit/cancel job in LRMS. bool job_errors_mark_put(const GMJob &job,const GMConfig &config); bool job_errors_mark_add(const GMJob &job,const GMConfig &config,const std::string &msg); std::string job_errors_filename(const JobId &id, const GMConfig &config); // Get modification time of file used to store state of the job. time_t job_state_time(const JobId &id,const GMConfig &config); // Read and write file storing state of the job. job_state_t job_state_read_file(const JobId &id,const GMConfig &config); job_state_t job_state_read_file(const JobId &id,const GMConfig &config,bool &pending); bool job_state_write_file(const GMJob &job,const GMConfig &config,job_state_t state,bool pending); // Get modification time of file used to store description of the job. time_t job_description_time(const JobId &id,const GMConfig &config); // Read and write file used to store description of job. bool job_description_read_file(const JobId &id,const GMConfig &config,std::string &desc); bool job_description_read_file(const std::string &fname,std::string &desc); bool job_description_write_file(const GMJob &job,const GMConfig &config,const std::string &desc); // Read and write file used to store ACL of job. bool job_acl_read_file(const JobId &id,const GMConfig &config,std::string &acl); bool job_acl_write_file(const JobId &id,const GMConfig &config,const std::string &acl); // Read and write xml file containing job description. bool job_xml_read_file(const JobId &id,const GMConfig &config,std::string &xml); bool job_xml_write_file(const JobId &id,const GMConfig &config,const std::string &xml); bool job_xml_check_file(const JobId &id,const GMConfig &config); // Write and read file, containing most important/needed job parameters. // Information is passed to/from file through 'job' object. bool job_local_write_file(const GMJob &job,const GMConfig &config,const JobLocalDescription &job_desc); bool job_local_write_file(const std::string &fname,const JobLocalDescription &job_desc); bool job_local_read_file(const JobId &id,const GMConfig &config,JobLocalDescription &job_desc); bool job_local_read_file(const std::string &fname,JobLocalDescription &job_desc); // Read only some attributes from previously mentioned file. bool job_local_read_cleanuptime(const JobId &id,const GMConfig &config,time_t &cleanuptime); bool job_local_read_failed(const JobId &id,const GMConfig &config,std::string &state,std::string &cause); bool job_local_read_delegationid(const JobId &id,const GMConfig &config,std::string &delegationid); // Write and read file containing list of input files. Each line of file // contains name of input file relative to session directory and optionally // source, from which it should be transferred. bool job_input_write_file(const GMJob &job,const GMConfig &config,std::list &files); bool job_input_read_file(const JobId &id,const GMConfig &config,std::list &files); bool job_input_status_add_file(const GMJob &job,const GMConfig &config,const std::string& file = ""); bool job_input_status_read_file(const JobId &id,const GMConfig &config,std::list& files); // Write and read file containing list of output files. Each line of file // contains name of output file relative to session directory and optionally // destination, to which it should be transferred. bool job_output_write_file(const GMJob &job,const GMConfig &config,std::list &files,job_output_mode mode = job_output_all); bool job_output_read_file(const JobId &id,const GMConfig &config,std::list &files); bool job_output_status_add_file(const GMJob &job,const GMConfig &config,const FileData& file); bool job_output_status_write_file(const GMJob &job,const GMConfig &config,std::list& files); bool job_output_status_read_file(const JobId &id,const GMConfig &config,std::list& files); // Common functions for input/output files. bool job_Xput_read_file(const std::string &fname,std::list &files, uid_t uid = 0, gid_t gid = 0); bool job_Xput_write_file(const std::string &fname,std::list &files, job_output_mode mode = job_output_all, uid_t uid = 0, gid_t gid = 0); // Return filename storing job's proxy. std::string job_proxy_filename(const JobId &id, const GMConfig &config); bool job_proxy_write_file(const GMJob &job,const GMConfig &config,const std::string &cred); bool job_proxy_read_file(const JobId &id,const GMConfig &config,std::string &cred); // Remove all files, which should be removed after job's state becomes FINISHED bool job_clean_finished(const JobId &id,const GMConfig &config); // Remove all files, which should be removed after job's state becomes DELETED bool job_clean_deleted(const GMJob &job,const GMConfig &config, std::list cache_per_job_dirs=std::list()); // Remove all job's files. bool job_clean_final(const GMJob &job,const GMConfig &config); } // namespace ARex #endif nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/files/PaxHeaders/README0000644000000000000000000000013115067751327023317 xustar0030 mtime=1759498967.754640094 29 atime=1759498967.86349362 30 ctime=1759499029.760656922 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/files/README0000644000175000002070000000004215067751327025216 0ustar00mockbuildmock00000000000000control and other files handling. nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/PaxHeaders/log0000644000000000000000000000013015067751425022037 xustar0030 mtime=1759499029.684432993 28 atime=1759499034.7655102 30 ctime=1759499029.684432993 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/0000755000175000002070000000000015067751425024020 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327024153 xustar0030 mtime=1759498967.756876948 30 atime=1759498967.865493651 30 ctime=1759499029.671339694 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/Makefile.am0000644000175000002070000000057115067751327026060 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = liblog.la liblog_la_SOURCES = JobLog.cpp JobLog.h JobsMetrics.cpp JobsMetrics.h HeartBeatMetrics.cpp HeartBeatMetrics.h SpaceMetrics.cpp SpaceMetrics.h liblog_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) liblog_la_LIBADD = $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../accounting/libaccounting.la nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/PaxHeaders/SpaceMetrics.h0000644000000000000000000000013215067751327024652 xustar0030 mtime=1759498967.756876948 30 atime=1759498967.865493651 30 ctime=1759499029.685460551 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/SpaceMetrics.h0000644000175000002070000000237315067751327026561 0ustar00mockbuildmock00000000000000/* write essential inforamtion about job started/finished */ #ifndef __GM_SPACE_METRICS_H__ #define __GM_SPACE_METRICS_H__ #include #include #include #include #include #include "../jobs/GMJob.h" #define GMETRIC_STATERATE_UPDATE_INTERVAL 5//to-fix this value could be set in arc.conf to be tailored to site namespace ARex { class SpaceMetrics { private: std::recursive_mutex lock; bool enabled; std::string config_filename; std::string tool_path; double freeCache; double totalFreeCache; bool freeCache_update; double freeSession; double totalFreeSession; bool freeSession_update; Arc::Run *proc; std::string proc_stderr; bool RunMetrics(const std::string name, const std::string& value, const std::string unit_type, const std::string unit); bool CheckRunMetrics(void); static void RunMetricsKicker(void* arg); static void SyncAsync(void* arg); public: SpaceMetrics(void); ~SpaceMetrics(void); void SetEnabled(bool val); /* Set path of configuration file */ void SetConfig(const char* fname); /* Set path/name of gmetric */ void SetGmetricPath(const char* path); void ReportSpaceChange(const GMConfig& config); void Sync(void); }; } // namespace ARex #endif nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/PaxHeaders/Makefile.in0000644000000000000000000000013215067751355024165 xustar0030 mtime=1759498989.970115624 30 atime=1759499017.977255102 30 ctime=1759499029.673320752 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/Makefile.in0000644000175000002070000007457115067751355026105 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/grid-manager/log ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) liblog_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../accounting/libaccounting.la am_liblog_la_OBJECTS = liblog_la-JobLog.lo liblog_la-JobsMetrics.lo \ liblog_la-HeartBeatMetrics.lo liblog_la-SpaceMetrics.lo liblog_la_OBJECTS = $(am_liblog_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = liblog_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(liblog_la_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = ./$(DEPDIR)/liblog_la-HeartBeatMetrics.Plo \ ./$(DEPDIR)/liblog_la-JobLog.Plo \ ./$(DEPDIR)/liblog_la-JobsMetrics.Plo \ ./$(DEPDIR)/liblog_la-SpaceMetrics.Plo am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(liblog_la_SOURCES) DIST_SOURCES = $(liblog_la_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ noinst_LTLIBRARIES = liblog.la liblog_la_SOURCES = JobLog.cpp JobLog.h JobsMetrics.cpp JobsMetrics.h HeartBeatMetrics.cpp HeartBeatMetrics.h SpaceMetrics.cpp SpaceMetrics.h liblog_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) liblog_la_LIBADD = $(top_builddir)/src/hed/libs/common/libarccommon.la \ ../accounting/libaccounting.la all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/log/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/grid-manager/log/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } liblog.la: $(liblog_la_OBJECTS) $(liblog_la_DEPENDENCIES) $(EXTRA_liblog_la_DEPENDENCIES) $(AM_V_CXXLD)$(liblog_la_LINK) $(liblog_la_OBJECTS) $(liblog_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblog_la-HeartBeatMetrics.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblog_la-JobLog.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblog_la-JobsMetrics.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/liblog_la-SpaceMetrics.Plo@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< liblog_la-JobLog.lo: JobLog.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(liblog_la_CXXFLAGS) $(CXXFLAGS) -MT liblog_la-JobLog.lo -MD -MP -MF $(DEPDIR)/liblog_la-JobLog.Tpo -c -o liblog_la-JobLog.lo `test -f 'JobLog.cpp' || echo '$(srcdir)/'`JobLog.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/liblog_la-JobLog.Tpo $(DEPDIR)/liblog_la-JobLog.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='JobLog.cpp' object='liblog_la-JobLog.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(liblog_la_CXXFLAGS) $(CXXFLAGS) -c -o liblog_la-JobLog.lo `test -f 'JobLog.cpp' || echo '$(srcdir)/'`JobLog.cpp liblog_la-JobsMetrics.lo: JobsMetrics.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(liblog_la_CXXFLAGS) $(CXXFLAGS) -MT liblog_la-JobsMetrics.lo -MD -MP -MF $(DEPDIR)/liblog_la-JobsMetrics.Tpo -c -o liblog_la-JobsMetrics.lo `test -f 'JobsMetrics.cpp' || echo '$(srcdir)/'`JobsMetrics.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/liblog_la-JobsMetrics.Tpo $(DEPDIR)/liblog_la-JobsMetrics.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='JobsMetrics.cpp' object='liblog_la-JobsMetrics.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(liblog_la_CXXFLAGS) $(CXXFLAGS) -c -o liblog_la-JobsMetrics.lo `test -f 'JobsMetrics.cpp' || echo '$(srcdir)/'`JobsMetrics.cpp liblog_la-HeartBeatMetrics.lo: HeartBeatMetrics.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(liblog_la_CXXFLAGS) $(CXXFLAGS) -MT liblog_la-HeartBeatMetrics.lo -MD -MP -MF $(DEPDIR)/liblog_la-HeartBeatMetrics.Tpo -c -o liblog_la-HeartBeatMetrics.lo `test -f 'HeartBeatMetrics.cpp' || echo '$(srcdir)/'`HeartBeatMetrics.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/liblog_la-HeartBeatMetrics.Tpo $(DEPDIR)/liblog_la-HeartBeatMetrics.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='HeartBeatMetrics.cpp' object='liblog_la-HeartBeatMetrics.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(liblog_la_CXXFLAGS) $(CXXFLAGS) -c -o liblog_la-HeartBeatMetrics.lo `test -f 'HeartBeatMetrics.cpp' || echo '$(srcdir)/'`HeartBeatMetrics.cpp liblog_la-SpaceMetrics.lo: SpaceMetrics.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(liblog_la_CXXFLAGS) $(CXXFLAGS) -MT liblog_la-SpaceMetrics.lo -MD -MP -MF $(DEPDIR)/liblog_la-SpaceMetrics.Tpo -c -o liblog_la-SpaceMetrics.lo `test -f 'SpaceMetrics.cpp' || echo '$(srcdir)/'`SpaceMetrics.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/liblog_la-SpaceMetrics.Tpo $(DEPDIR)/liblog_la-SpaceMetrics.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='SpaceMetrics.cpp' object='liblog_la-SpaceMetrics.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(liblog_la_CXXFLAGS) $(CXXFLAGS) -c -o liblog_la-SpaceMetrics.lo `test -f 'SpaceMetrics.cpp' || echo '$(srcdir)/'`SpaceMetrics.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -f ./$(DEPDIR)/liblog_la-HeartBeatMetrics.Plo -rm -f ./$(DEPDIR)/liblog_la-JobLog.Plo -rm -f ./$(DEPDIR)/liblog_la-JobsMetrics.Plo -rm -f ./$(DEPDIR)/liblog_la-SpaceMetrics.Plo -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/liblog_la-HeartBeatMetrics.Plo -rm -f ./$(DEPDIR)/liblog_la-JobLog.Plo -rm -f ./$(DEPDIR)/liblog_la-JobsMetrics.Plo -rm -f ./$(DEPDIR)/liblog_la-SpaceMetrics.Plo -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ clean-generic clean-libtool clean-noinstLTLIBRARIES \ cscopelist-am ctags ctags-am distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/PaxHeaders/HeartBeatMetrics.cpp0000644000000000000000000000013215067751327026011 xustar0030 mtime=1759498967.755491979 30 atime=1759498967.865493651 30 ctime=1759499029.681882903 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/HeartBeatMetrics.cpp0000644000175000002070000000763315067751327027724 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "HeartBeatMetrics.h" #include "../conf/GMConfig.h" namespace ARex { static Arc::Logger& logger = Arc::Logger::getRootLogger(); HeartBeatMetrics::HeartBeatMetrics():enabled(false),proc(NULL) { free = 0; totalfree = 0; time_delta = 0; time_update = false; } HeartBeatMetrics::~HeartBeatMetrics() { } void HeartBeatMetrics::SetEnabled(bool val) { enabled = val; } void HeartBeatMetrics::SetConfig(const char* fname) { config_filename = fname; } void HeartBeatMetrics::SetGmetricPath(const char* path) { tool_path = path; } void HeartBeatMetrics::ReportHeartBeatChange(const GMConfig& config) { if(!enabled) return; // not configured std::unique_lock lock_(lock); struct stat st; std::string heartbeat_file(config.ControlDir() + "/gm-heartbeat"); if(Arc::FileStat(heartbeat_file, &st, true)){ time_t time_lastupdate = st.st_mtime; time_t time_now = time(NULL); time_delta = time_now - time_lastupdate; time_update = true; } else{ logger.msg(Arc::ERROR,"Error with hearbeatfile: %s",heartbeat_file.c_str()); time_update = false; } Sync(); } bool HeartBeatMetrics::CheckRunMetrics(void) { if(!proc) return true; if(proc->Running()) return false; int run_result = proc->Result(); if(run_result != 0) { logger.msg(Arc::ERROR,": Metrics tool returned error code %i: %s",run_result,proc_stderr); }; delete proc; proc = NULL; return true; } void HeartBeatMetrics::Sync(void) { if(!enabled) return; // not configured std::unique_lock lock_(lock); if(!CheckRunMetrics()) return; // Run gmetric to report one change at a time //since only one process can be started from Sync(), only 1 histogram can be sent at a time, therefore return for each call; //Sync is therefore called multiple times until there are not more histograms that have changed if(time_update){ if(RunMetrics( std::string("AREX-HEARTBEAT_LAST_SEEN"), Arc::tostring(time_delta), "int32", "sec" )) { time_update = false; return; }; } } bool HeartBeatMetrics::RunMetrics(const std::string name, const std::string& value, const std::string unit_type, const std::string unit) { if(proc) return false; std::list cmd; if(tool_path.empty()) { logger.msg(Arc::ERROR,"gmetric_bin_path empty in arc.conf (should never happen the default value should be used)"); return false; } else { cmd.push_back(tool_path); }; if(!config_filename.empty()) { cmd.push_back("-c"); cmd.push_back(config_filename); }; cmd.push_back("-n"); cmd.push_back(name); cmd.push_back("-g"); cmd.push_back("arc_system"); cmd.push_back("-v"); cmd.push_back(value); cmd.push_back("-t");//unit-type cmd.push_back(unit_type); cmd.push_back("-u");//unit cmd.push_back(unit); proc = new Arc::Run(cmd); proc->AssignStderr(proc_stderr); proc->AssignKicker(&RunMetricsKicker, this); if(!(proc->Start())) { delete proc; proc = NULL; return false; }; return true; } void HeartBeatMetrics::SyncAsync(void* arg) { if(arg) { HeartBeatMetrics& it = *reinterpret_cast(arg); std::unique_lock lock_(it.lock); if(it.proc) { // Continue only if no failure in previous call. // Otherwise it can cause storm of failed calls. if(it.proc->Result() == 0) { it.Sync(); }; }; }; } void HeartBeatMetrics::RunMetricsKicker(void* arg) { // Currently it is not allowed to start new external process // from inside process kicker (todo: redesign). // So do it asynchronously from another thread. Arc::CreateThreadFunction(&SyncAsync, arg); } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/PaxHeaders/JobsMetrics.cpp0000644000000000000000000000013215067751327025047 xustar0030 mtime=1759498967.756876948 30 atime=1759498967.865493651 30 ctime=1759499029.679456022 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/JobsMetrics.cpp0000644000175000002070000001422515067751327026755 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "JobsMetrics.h" namespace ARex { static Arc::Logger& logger = Arc::Logger::getRootLogger(); JobStateList::JobStateList(int _limit):limit(_limit){ failures = 0; } JobStateList::~JobStateList(){} JobStateList::JobNode::JobNode(bool _isfailed, std::string _job_id): job_id(_job_id), isfailed(_isfailed) { } JobStateList::JobNode::~JobNode(){} JobStateList::JobNode* JobStateList::NodeInList(std::string _job_id){ std::list::iterator it = nodes.begin(); while(it != nodes.end()){ if(it->job_id == _job_id){ return &(*it); } ++it; } return NULL; } void JobStateList::SetFailure(bool _isfailed,std::string _job_id){ //check if the node is already in the list, and if it is update the failure status JobStateList::JobNode* this_node = NodeInList(_job_id); if(this_node){ //existing job in the list if(!this_node->isfailed && _isfailed){ //update the failure-state of the node //only update once (i.e. if node was not failed before) this_node->isfailed=_isfailed; failures++; } } else{ JobStateList::JobNode node(_isfailed,_job_id); nodes.push_back(node); if(_isfailed)failures++; if(nodes.size()>limit){ //list is now 1 too long, remove the old head of the list (oldest job) if(nodes.front().isfailed)failures--; nodes.pop_front(); } } } JobsMetrics::JobsMetrics():enabled(false),proc(NULL),jobstatelist(100) { job_fail_counter = 0; std::memset(jobs_in_state, 0, sizeof(jobs_in_state)); std::memset(jobs_in_state_changed, 0, sizeof(jobs_in_state_changed)); std::memset(jobs_state_old_new, 0, sizeof(jobs_state_old_new)); std::memset(jobs_state_old_new_changed, 0, sizeof(jobs_state_old_new_changed)); std::memset(jobs_rate, 0, sizeof(jobs_rate)); std::memset(jobs_rate_changed, 0, sizeof(jobs_rate_changed)); fail_changed = false; time_lastupdate = time(NULL); } JobsMetrics::~JobsMetrics() { } void JobsMetrics::SetEnabled(bool val) { enabled = val; } void JobsMetrics::SetConfig(const char* fname) { config_filename = fname; } void JobsMetrics::SetGmetricPath(const char* path) { tool_path = path; } void JobsMetrics::ReportJobStateChange(const GMConfig& config, GMJobRef i, job_state_t old_state, job_state_t new_state) { if(!enabled) return; // not configured std::unique_lock lock_(lock); std::string job_id = i->job_id; /* ## - failed jobs -- of the last 100 jobs, the number of failed jobs ## - job states -- number of jobs in different A-REX internal stages */ /*jobstatelist holds jobid and true for failed or false for non-failed job for 100 latest jobs */ jobstatelist.SetFailure(i->CheckFailure(config),job_id); job_fail_counter = jobstatelist.failures; fail_changed = true; //actual states (jobstates) if(old_state < JOB_STATE_UNDEFINED) { --(jobs_in_state[old_state]); jobs_in_state_changed[old_state] = true; }; if(new_state < JOB_STATE_UNDEFINED) { ++(jobs_in_state[new_state]); jobs_in_state_changed[new_state] = true; }; Sync(); } bool JobsMetrics::CheckRunMetrics(void) { if(!proc) return true; if(proc->Running()) return false; int run_result = proc->Result(); if(run_result != 0) { logger.msg(Arc::ERROR,": Metrics tool returned error code %i: %s",run_result,proc_stderr); }; delete proc; proc = NULL; return true; } void JobsMetrics::Sync(void) { if(!enabled) return; // not configured std::unique_lock lock_(lock); if(!CheckRunMetrics()) return; // Run gmetric to report one change at a time //since only one process can be started from Sync(), only 1 histogram can be sent at a time, therefore return for each call; //Sync is therefore called multiple times until there are not more histograms that have changed if(fail_changed){ if(RunMetrics( std::string("AREX-JOBS-FAILED-PER-100"), Arc::tostring(job_fail_counter), "int32", "failed" )) { fail_changed = false; return; }; } for(int state = 0; state < JOB_STATE_UNDEFINED; ++state) { if(jobs_in_state_changed[state]) { if(RunMetrics( std::string("AREX-JOBS-IN_STATE-") + Arc::tostring(state) + "-" + GMJob::get_state_name(static_cast(state)), Arc::tostring(jobs_in_state[state]), "int32", "jobs" )) { jobs_in_state_changed[state] = false; return; }; }; }; } bool JobsMetrics::RunMetrics(const std::string name, const std::string& value, const std::string unit_type, const std::string unit) { if(proc) return false; std::list cmd; if(tool_path.empty()) { logger.msg(Arc::ERROR,"gmetric_bin_path empty in arc.conf (should never happen the default value should be used)"); return false; } else { cmd.push_back(tool_path); }; if(!config_filename.empty()) { cmd.push_back("-c"); cmd.push_back(config_filename); }; cmd.push_back("-n"); cmd.push_back(name); cmd.push_back("-g"); cmd.push_back("arc_jobs"); cmd.push_back("-v"); cmd.push_back(value); cmd.push_back("-t");//unit-type cmd.push_back(unit_type); cmd.push_back("-u");//unit cmd.push_back(unit); proc = new Arc::Run(cmd); proc->AssignStderr(proc_stderr); proc->AssignKicker(&RunMetricsKicker, this); if(!(proc->Start())) { delete proc; proc = NULL; return false; }; return true; } void JobsMetrics::SyncAsync(void* arg) { if(arg) { JobsMetrics& it = *reinterpret_cast(arg); std::unique_lock lock_(it.lock); if(it.proc) { // Continue only if no failure in previous call. // Otherwise it can cause storm of failed calls. if(it.proc->Result() == 0) { it.Sync(); }; }; }; } void JobsMetrics::RunMetricsKicker(void* arg) { // Currently it is not allowed to start new external process // from inside process kicker (todo: redesign). // So do it asynchronously from another thread. Arc::CreateThreadFunction(&SyncAsync, arg); } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/PaxHeaders/HeartBeatMetrics.h0000644000000000000000000000013215067751327025456 xustar0030 mtime=1759498967.756876948 30 atime=1759498967.865493651 30 ctime=1759499029.682895525 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/HeartBeatMetrics.h0000644000175000002070000000231715067751327027363 0ustar00mockbuildmock00000000000000/* write essential inforamtion about job started/finished */ #ifndef __GM_HEARTBEAT_METRICS_H__ #define __GM_HEARTBEAT_METRICS_H__ #include #include #include #include #include #include "../jobs/GMJob.h" #define GMETRIC_STATERATE_UPDATE_INTERVAL 5//to-fix this value could be set in arc.conf to be tailored to site namespace ARex { class HeartBeatMetrics { private: std::recursive_mutex lock; bool enabled; std::string config_filename; std::string tool_path; time_t time_delta; double free; double totalfree; bool time_update; Arc::Run *proc; std::string proc_stderr; bool RunMetrics(const std::string name, const std::string& value, const std::string unit_type, const std::string unit); bool CheckRunMetrics(void); static void RunMetricsKicker(void* arg); static void SyncAsync(void* arg); public: HeartBeatMetrics(void); ~HeartBeatMetrics(void); void SetEnabled(bool val); /* Set path of configuration file */ void SetConfig(const char* fname); /* Set path/name of gmetric */ void SetGmetricPath(const char* path); void ReportHeartBeatChange(const GMConfig& config); void Sync(void); }; } // namespace ARex #endif nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/PaxHeaders/JobsMetrics.h0000644000000000000000000000013215067751327024514 xustar0030 mtime=1759498967.756876948 30 atime=1759498967.865493651 30 ctime=1759499029.680806152 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/JobsMetrics.h0000644000175000002070000000465115067751327026424 0ustar00mockbuildmock00000000000000/* write essential inforamtion about job started/finished */ #ifndef __GM_JOBS_METRICS_H__ #define __GM_JOBS_METRICS_H__ #include #include #include #include #include #include "../jobs/GMJob.h" #define GMETRIC_STATERATE_UPDATE_INTERVAL 5//to-fix this value could be set in arc.conf to be tailored to site namespace ARex { class JobStateList { /*Holds success or fail of last 100 jobs */ private: class JobNode { public: std::string job_id; bool isfailed; JobNode(bool _isfailed=false, std::string _job_id=""); ~JobNode(void); }; private: const int limit; std::list nodes; JobStateList::JobNode* NodeInList(std::string _job_id); public: int failures; void SetFailure(bool _isfailed, std::string _job_id); JobStateList(int _limit); ~JobStateList(void); }; class JobsMetrics { private: std::recursive_mutex lock; bool enabled; std::string config_filename; std::string tool_path; time_t time_lastupdate; unsigned long long int job_fail_counter; unsigned long long int jobs_in_state[JOB_STATE_UNDEFINED]; unsigned long long int jobs_state_old_new[JOB_STATE_UNDEFINED+1][JOB_STATE_UNDEFINED]; unsigned long long int jobs_state_accum[JOB_STATE_UNDEFINED+1]; unsigned long long int jobs_state_accum_last[JOB_STATE_UNDEFINED+1]; double jobs_rate[JOB_STATE_UNDEFINED]; bool fail_changed; bool jobs_in_state_changed[JOB_STATE_UNDEFINED]; bool jobs_state_old_new_changed[JOB_STATE_UNDEFINED+1][JOB_STATE_UNDEFINED]; bool jobs_rate_changed[JOB_STATE_UNDEFINED]; //id,state std::map jobs_state_old_map; std::map jobs_state_new_map; Arc::Run *proc; std::string proc_stderr; bool RunMetrics(const std::string name, const std::string& value, const std::string unit_type, const std::string unit); bool CheckRunMetrics(void); static void RunMetricsKicker(void* arg); static void SyncAsync(void* arg); JobStateList jobstatelist; public: JobsMetrics(void); ~JobsMetrics(void); void SetEnabled(bool val); /* Set path of configuration file */ void SetConfig(const char* fname); /* Set path/name of gmetric */ void SetGmetricPath(const char* path); void ReportJobStateChange(const GMConfig& config, GMJobRef i, job_state_t old_state, job_state_t new_state); void Sync(void); }; } // namespace ARex #endif nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/PaxHeaders/JobLog.cpp0000644000000000000000000000013215067751327023777 xustar0030 mtime=1759498967.756876948 30 atime=1759498967.865493651 30 ctime=1759499029.676586038 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/JobLog.cpp0000644000175000002070000001717215067751327025711 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif /* write essential information about job started/finished */ #include #include #include #include #include #include #include #include #include #include #include #include "../files/ControlFileContent.h" #include "../conf/GMConfig.h" #include "../accounting/AAR.h" #include "../accounting/AccountingDBSQLite.h" #include "../accounting/AccountingDBAsync.h" #include "JobLog.h" #define ACCOUNTING_SUBDIR "accounting" #define ACCOUNTING_DB_FILE "accounting_v2.db" namespace ARex { static Arc::Logger& logger = Arc::Logger::getRootLogger(); JobLog::JobLog(void):filename(""),reporter_proc(NULL),reporter_last_run(0),reporter_period(3600) { } void JobLog::SetOutput(const char* fname) { filename=fname; } bool JobLog::SetReporterPeriod(int new_period) { if ( new_period < 3600 ) return false; reporter_period=new_period; return true; } bool JobLog::open_stream(std::ofstream &o) { o.open(filename.c_str(),std::ofstream::app); if(!o.is_open()) return false; o<<(Arc::Time().str()); o<<" "; return true; } bool JobLog::WriteStartInfo(GMJob &job,const GMConfig &config) { if(filename.length()==0) return true; std::ofstream o; if(!open_stream(o)) return false; o<<"Started - job id: "<jobname; tmps = Arc::escape_chars(tmps, "\"\\", '\\', false); o<<"name: \""<DN; tmps = Arc::escape_chars(tmps, "\"\\", '\\', false); o<<"owner: \""<lrms<<", queue: "<queue; }; o<jobname; tmps = Arc::escape_chars(tmps, "\"\\", '\\', false); o<<"name: \""<DN; tmps = Arc::escape_chars(tmps, "\"\\", '\\', false); o<<"owner: \""<lrms<<", queue: "<queue; if(job_desc->localid.length() >0) o<<", lrmsid: "<localid; }; tmps = job.GetFailure(config); if(tmps.length()) { for(std::string::size_type i=0;;) { i=tmps.find('\n',i); if(i==std::string::npos) break; tmps[i]='.'; }; tmps = Arc::escape_chars(tmps, "\"\\", '\\', false); o<<", failure: \""<Running()) return true; /* running */ delete reporter_proc; reporter_proc=NULL; }; // Check tool exits if (reporter_tool.empty()) { logger.msg(Arc::ERROR,": Accounting records reporter tool is not specified"); return false; } // Record the start time if(time(NULL) < (reporter_last_run+reporter_period)) return true; // default: once per hour reporter_last_run=time(NULL); // Reporter is run with only argument - configuration file. // It is supposed to parse that configuration file to obtain other parameters. std::list argv; argv.push_back(Arc::ArcLocation::GetToolsDir()+"/"+reporter_tool); argv.push_back("-c"); argv.push_back(config.ConfigFile()); reporter_proc = new Arc::Run(argv); if((!reporter_proc) || (!(*reporter_proc))) { delete reporter_proc; reporter_proc = NULL; logger.msg(Arc::ERROR,": Failure creating slot for accounting reporter child process"); return false; }; std::string errlog; JobLog* joblog = config.GetJobLog(); if(joblog) { if(!joblog->reporter_logfile.empty()) errlog = joblog->reporter_logfile; }; reporter_proc->AssignInitializer(&initializer,errlog.empty()?NULL:(void*)errlog.c_str(),false); logger.msg(Arc::DEBUG, "Running command: %s", argv.front()); if(!reporter_proc->Start()) { delete reporter_proc; reporter_proc = NULL; logger.msg(Arc::ERROR,": Failure starting accounting reporter child process"); return false; }; return true; } bool JobLog::ReporterEnabled(void) { if (reporter_tool.empty()) return false; return true; } bool JobLog::SetReporter(const char* fname) { if(fname) reporter_tool = (std::string(fname)); return true; } bool JobLog::SetReporterLogFile(const char* fname) { if(fname) reporter_logfile = (std::string(fname)); return true; } static AccountingDB* AccountingDBCtor(std::string const & name) { return new AccountingDBSQLite(name); } bool JobLog::WriteJobRecord(GMJob &job, const GMConfig& config) { bool r = true; timespec tstart; clock_gettime(CLOCK_MONOTONIC, &tstart); // Create accounting DB connection std::string accounting_db_path = config.ControlDir() + G_DIR_SEPARATOR_S + ACCOUNTING_SUBDIR + G_DIR_SEPARATOR_S + ACCOUNTING_DB_FILE; AccountingDBAsync adb(accounting_db_path, &AccountingDBCtor); if (!adb.IsValid()) { logger.msg(Arc::ERROR,": Failure creating accounting database connection"); r = false; } // create initial AAR record in the accounting database on ACCEPTED else if(job.get_state() == JOB_STATE_ACCEPTED) { AAR aar; aar.FetchJobData(job, config, token_map, vomsless_vo); r = adb.createAAR(aar); } // update all job metrics when job FINISHED else if (job.get_state() == JOB_STATE_FINISHED) { AAR aar; aar.FetchJobData(job, config, token_map, vomsless_vo); r = adb.updateAAR(aar); } else { // record job state change event in the accounting database aar_jobevent_t jobevent(job.get_state_name(), Arc::Time()); r = adb.addJobEvent(jobevent, job.get_id()); } timespec tend; clock_gettime(CLOCK_MONOTONIC, &tend); unsigned long long int dt = ((unsigned long long int)tend.tv_sec*1000 + tend.tv_nsec/1000000 - (unsigned long long int)tstart.tv_sec*1000 - tstart.tv_nsec/1000000); logger.msg(Arc::DEBUG,": writing accounting record took %llu ms", dt); return r; } void JobLog::SetCredentials(std::string const &key_path,std::string const &certificate_path,std::string const &ca_certificates_dir) { if (!key_path.empty()) report_config.push_back(std::string("key_path=")+key_path); if (!certificate_path.empty()) report_config.push_back(std::string("certificate_path=")+certificate_path); if (!ca_certificates_dir.empty()) report_config.push_back(std::string("ca_certificates_dir=")+ca_certificates_dir); } JobLog::~JobLog(void) { if(reporter_proc != NULL) { if(reporter_proc->Running()) reporter_proc->Kill(0); delete reporter_proc; reporter_proc=NULL; }; } void JobLog::initializer(void* arg) { char const * errlog = (char const *)arg; int h; // set up stdin,stdout and stderr h=::open("/dev/null",O_RDONLY); if(h != 0) { if(dup2(h,0) != 0) { exit(1); }; close(h); }; h=::open("/dev/null",O_WRONLY); if(h != 1) { if(dup2(h,1) != 1) { exit(1); }; close(h); }; h=errlog ? ::open(errlog,O_WRONLY | O_CREAT | O_APPEND,S_IRUSR | S_IWUSR) : -1; if(h==-1) { h=::open("/dev/null",O_WRONLY); }; if(h != 2) { if(dup2(h,2) != 2) { exit(1); }; close(h); }; } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/PaxHeaders/JobLog.h0000644000000000000000000000013215067751327023444 xustar0030 mtime=1759498967.756876948 30 atime=1759498967.865493651 30 ctime=1759499029.678333689 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/JobLog.h0000644000175000002070000000513715067751327025354 0ustar00mockbuildmock00000000000000/* write essential inforamtion about job started/finished */ #ifndef __GM_JOB_LOG_H__ #define __GM_JOB_LOG_H__ #include #include #include #include #include "../jobs/GMJob.h" namespace ARex { class GMConfig; class JobLocalDescription; /// Put short information into log when every job starts/finishes. /// And store more detailed information for Reporter. class JobLog { private: std::string filename; std::list report_config; // additional configuration for usage reporter std::string certificate_path; std::string ca_certificates_dir; std::map > token_map; std::list > vomsless_vo; // reporter tool vars std::string reporter_tool; std::string reporter_logfile; Arc::Run *reporter_proc; time_t reporter_last_run; int reporter_period; bool open_stream(std::ofstream &o); static void initializer(void* arg); public: JobLog(void); //JobLog(const char* fname); ~JobLog(void); /* chose name of log file */ void SetOutput(const char* fname); /* log job start information */ bool WriteStartInfo(GMJob &job,const GMConfig &config); /* log job finish iformation */ bool WriteFinishInfo(GMJob &job,const GMConfig& config); /* Check accounting records reporting is enabled */ bool ReporterEnabled(void); /* Run external utility to report gathered information to accounting services */ bool RunReporter(const GMConfig& config); /* Set period of running reporter */ bool SetReporterPeriod(int period); /* Set name of the accounting reporter */ bool SetReporter(const char* fname); /* Set name of the log file for accounting reporter */ bool SetReporterLogFile(const char* fname); /* Create data file for Reporter */ bool WriteJobRecord(GMJob &job,const GMConfig &config); /* Set credential file names for accessing logging service */ void SetCredentials(std::string const &key_path,std::string const &certificate_path,std::string const &ca_certificates_dir); /* Set accounting options (e.g. batch size for SGAS LUTS) */ void SetOptions(std::string const &options) { report_config.push_back(std::string("accounting_options=")+options); } /* Add mapping entry from token claim to accounting attribute */ void AddTokenMap(std::string const& source, std::string const& dest) { token_map[source].push_back(dest); } /* Add VO name to use in case VOMS information is not available for specific auth group */ void AddVomslessVo(std::string const& authgroup, std::string const& vo) { vomsless_vo.emplace_back(authgroup, vo); } }; } // namespace ARex #endif nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/PaxHeaders/SpaceMetrics.cpp0000644000000000000000000000013215067751327025205 xustar0030 mtime=1759498967.756876948 30 atime=1759498967.865493651 30 ctime=1759499029.684305816 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/SpaceMetrics.cpp0000644000175000002070000001506115067751327027112 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include "SpaceMetrics.h" #include "../conf/GMConfig.h" namespace ARex { static Arc::Logger& logger = Arc::Logger::getRootLogger(); SpaceMetrics::SpaceMetrics():enabled(false),proc(NULL) { freeCache = 0; totalFreeCache = 0; freeCache_update = false; freeSession = 0; totalFreeSession = 0; freeSession_update = false; } SpaceMetrics::~SpaceMetrics() { } void SpaceMetrics::SetEnabled(bool val) { enabled = val; } void SpaceMetrics::SetConfig(const char* fname) { config_filename = fname; } void SpaceMetrics::SetGmetricPath(const char* path) { tool_path = path; } void SpaceMetrics::ReportSpaceChange(const GMConfig& config) { if(!enabled) return; // not configured std::unique_lock lock_(lock); /*Free sessiondir space*/ struct statvfs info_session; totalFreeSession = 0; std::vector sessiondirs = config.SessionRoots(); if(!sessiondirs.empty()){ for(std::vector::iterator i = sessiondirs.begin(); i!= sessiondirs.end(); i++){ std::string path = *i; //sessiondir can have several options, extract the path part if ((*i).find(" ") != std::string::npos){ path = (*i).substr((*i).find_last_of(" ")+1, (*i).length()-(*i).find_last_of(" ")+1); } bool userSubs = false; bool otherSubs = false; config.Substitute(path, userSubs, otherSubs); if(userSubs) { logger.msg(Arc::WARNING,"Session dir '%s' contains user specific substitutions - skipping it", *i); continue; } if (statvfs(path.c_str(), &info_session) != 0) { logger.msg(Arc::ERROR,"Error getting info from statvfs for the path %s: %s", path, Arc::StrError(errno)); continue; } // return free space in GB freeSession = (float)(info_session.f_bfree * info_session.f_bsize) / (float)(1024 * 1024 * 1024); totalFreeSession += freeSession; logger.msg(Arc::DEBUG, "Sessiondir %s: Free space %f GB", path, totalFreeSession); freeSession_update = true; } } else{ logger.msg(Arc::ERROR,"No session directories found in configuration."); } /*Cache space */ struct statvfs info_cache; totalFreeCache = 0; std::vector cachedirs = config.CacheParams().getCacheDirs(); if(!cachedirs.empty()){ for(std::vector::iterator i = cachedirs.begin(); i!= cachedirs.end(); i++){ std::string path = *i; //cachedir can have several options, extract the path part if ((*i).find(" ") != std::string::npos){ path = (*i).substr((*i).find_last_of(" ")+1, (*i).length()-(*i).find_last_of(" ")+1); } if (statvfs(path.c_str(), &info_cache) != 0) { logger.msg(Arc::ERROR,"Error getting info from statvfs for the path %s: %s", path, Arc::StrError(errno)); } else{ // return free space in GB freeCache = (float)(info_cache.f_bfree * info_cache.f_bsize) / (float)(1024 * 1024 * 1024); totalFreeCache += freeCache; logger.msg(Arc::DEBUG, "Cache %s: Free space %f GB", path, totalFreeCache); freeCache_update = true; } } } else{ logger.msg(Arc::DEBUG,"No cachedirs found/configured for calculation of free space."); } Sync(); } bool SpaceMetrics::CheckRunMetrics(void) { if(!proc) return true; if(proc->Running()) return false; int run_result = proc->Result(); if(run_result != 0) { logger.msg(Arc::ERROR,": Metrics tool returned error code %i: %s",run_result,proc_stderr); }; delete proc; proc = NULL; return true; } void SpaceMetrics::Sync(void) { if(!enabled) return; // not configured std::unique_lock lock_(lock); if(!CheckRunMetrics()) return; // Run gmetric to report one change at a time //since only one process can be started from Sync(), only 1 histogram can be sent at a time, therefore return for each call; //Sync is therefore called multiple times until there are not more histograms that have changed if(freeCache_update){ if(RunMetrics( std::string("AREX-CACHE-FREE"), Arc::tostring(totalFreeCache), "int32", "GB") ) { freeCache_update = false; return; }; } if(freeSession_update){ if(RunMetrics( std::string("AREX-SESSION-FREE"), Arc::tostring(totalFreeSession), "int32", "GB") ) { freeSession_update = false; return; }; } } bool SpaceMetrics::RunMetrics(const std::string name, const std::string& value, const std::string unit_type, const std::string unit) { if(proc) return false; std::list cmd; if(tool_path.empty()) { logger.msg(Arc::ERROR,"gmetric_bin_path empty in arc.conf (should never happen the default value should be used)"); return false; } else { cmd.push_back(tool_path); }; if(!config_filename.empty()) { cmd.push_back("-c"); cmd.push_back(config_filename); }; cmd.push_back("-n"); cmd.push_back(name); cmd.push_back("-g"); cmd.push_back("arc_system"); cmd.push_back("-v"); cmd.push_back(value); cmd.push_back("-t");//unit-type cmd.push_back(unit_type); cmd.push_back("-u");//unit cmd.push_back(unit); proc = new Arc::Run(cmd); proc->AssignStderr(proc_stderr); proc->AssignKicker(&RunMetricsKicker, this); if(!(proc->Start())) { delete proc; proc = NULL; return false; }; return true; } void SpaceMetrics::SyncAsync(void* arg) { if(arg) { SpaceMetrics& it = *reinterpret_cast(arg); std::unique_lock lock_(it.lock); if(it.proc) { // Continue only if no failure in previous call. // Otherwise it can cause storm of failed calls. if(it.proc->Result() == 0) { it.Sync(); }; }; }; } void SpaceMetrics::RunMetricsKicker(void* arg) { // Currently it is not allowed to start new external process // from inside process kicker (todo: redesign). // So do it asynchronously from another thread. Arc::CreateThreadFunction(&SyncAsync, arg); } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/PaxHeaders/README0000644000000000000000000000013215067751327022777 xustar0030 mtime=1759498967.756876948 30 atime=1759498967.865493651 30 ctime=1759499029.675371837 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/log/README0000644000175000002070000000003715067751327024701 0ustar00mockbuildmock00000000000000Local and remote log handling. nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/PaxHeaders/arc_blahp_logger.cpp0000644000000000000000000000013115067751327025313 xustar0030 mtime=1759498967.752828852 29 atime=1759498967.86349362 30 ctime=1759499029.435468474 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/arc_blahp_logger.cpp0000644000175000002070000002641415067751327027225 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include static Arc::Logger logger(Arc::Logger::rootLogger, "arc-blahp-logger"); static void usage(char *pname) { std::cerr << "Usage: " << pname << " -I -U -P -L [-c ] [-p ] [-d ] [ -i ]\n"; std::cerr << "\n Where\n -i should be set to ignore failed jobs. Default is to publish them.\n"; } int main(int argc, char *argv[]) { int opt; const char *user_proxy_f = NULL; const char *job_local_f = NULL; const char *jobid_s = NULL; const char *user_s = NULL; const char *ceid_s = NULL; std::string logprefix = "/var/log/arc/accounting/blahp.log"; bool ignore_failed = false; // log Arc::LogLevel debuglevel = Arc::ERROR; Arc::LogStream logcerr(std::cerr); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(debuglevel); // Parse command line options while ((opt = getopt(argc, argv, "iI:U:P:L:c:p:d:")) != -1) { switch (opt) { case 'i': ignore_failed = true; break; case 'I': jobid_s = optarg; break; case 'U': user_s = optarg; break; case 'P': user_proxy_f = optarg; break; case 'L': job_local_f = optarg; break; case 'c': ceid_s = optarg; break; case 'p': logprefix = std::string(optarg); break; case 'd': debuglevel = Arc::old_level_to_level(atoi(optarg)); Arc::Logger::getRootLogger().setThreshold(debuglevel); break; default: logger.msg(Arc::ERROR,"Unknown option %s", opt); usage(argv[0]); return EXIT_FAILURE; } } if ( !jobid_s ) { logger.msg(Arc::ERROR,"Job ID argument is required."); usage(argv[0]); return EXIT_FAILURE; } if ( !user_proxy_f ) { logger.msg(Arc::ERROR,"Path to user's proxy file should be specified."); usage(argv[0]); return EXIT_FAILURE; } if ( !user_s ) { logger.msg(Arc::ERROR,"User name should be specified."); usage(argv[0]); return EXIT_FAILURE; } if ( !job_local_f ) { logger.msg(Arc::ERROR,"Path to .local job status file is required."); usage(argv[0]); return EXIT_FAILURE; } // Get or generate ceID prefix std::string ceid; if ( !ceid_s ) { logger.msg(Arc::DEBUG,"Generating ceID prefix from hostname automatically"); char host[256]; if (gethostname(host, sizeof(host)) != 0) { logger.msg(Arc::ERROR, "Cannot determine hostname from gethostname() to generate ceID automatically."); return EXIT_FAILURE; } else { host[sizeof(host)-1] = 0; ceid = std::string(host) + ":2811/nordugrid-torque"; } } else { ceid = std::string(ceid_s); } logger.msg(Arc::DEBUG,"ceID prefix is set to %s", ceid); // Get the current timestamp for log and logsuffix Arc::SetEnv("TZ","UTC"); tzset(); Arc::Time exectime; std::string timestamp = exectime.str(Arc::UserTime); std::string logsuffix = exectime.str(Arc::MDSTime).substr(0,8); logger.msg(Arc::DEBUG,"Getting currect timestamp for BLAH parser log: %s", timestamp); // Parse .local file to get required information std::string globalid; std::string localid; std::string queue; std::string subject; std::string interface; std::string headnode; logger.msg(Arc::DEBUG,"Parsing .local file to obtain job-specific identifiers and info"); std::ifstream job_local; job_local.open(job_local_f, std::ios::in); if ( job_local.is_open() ) { std::string line; while ( job_local.good() ) { getline(job_local,line); if ( ! line.compare(0,9,"globalid=") ) { globalid = line.substr(9); logger.msg(Arc::DEBUG,"globalid is set to %s", globalid); } else if ( ! line.compare(0,9,"headnode=") ) { headnode = line.substr(9); logger.msg(Arc::DEBUG,"headnode is set to %s", headnode); } else if ( ! line.compare(0,10,"interface=") ) { interface = line.substr(10); logger.msg(Arc::DEBUG,"interface is set to %s", interface); } else if ( ! line.compare(0,8,"localid=") ) { localid = line.substr(8); if ( localid.empty() ) { logger.msg(Arc::ERROR,"There is no local LRMS ID. Message will not be written to BLAH log."); return EXIT_FAILURE; } logger.msg(Arc::DEBUG,"localid is set to %s", localid); } else if ( ! line.compare(0,6,"queue=") ) { queue = line.substr(6); logger.msg(Arc::DEBUG,"queue name is set to %s", queue); } else if ( ! line.compare(0,8,"subject=") ) { subject = line.substr(8); logger.msg(Arc::DEBUG,"owner subject is set to %s", subject); } else if ( (! line.compare(0,12,"failedstate=")) && ignore_failed ) { logger.msg(Arc::ERROR,"Job did not finished successfully. Message will not be written to BLAH log."); return EXIT_FAILURE; } else if ( ! line.compare(0,10,"starttime=") ) { //need to convert timestamp into a blah compatible format //blah / apel use the timestamp to determine job eligibility to accounting, as job IDs can (?) loop //it is more deterministic to use the job start date as the timestamp than "now()" which will cause issues in case of processing delays Arc::Time job_timestamp(line.substr(10)) ; timestamp = job_timestamp.str(Arc::UserTime); logger.msg(Arc::DEBUG,"Job timestamp successfully parsed as %s", timestamp); } } } else { logger.msg(Arc::ERROR,"Can not read information from the local job status file"); return EXIT_FAILURE; } // Just in case subject escape subject = Arc::escape_chars(subject, "\"\\", '\\', false); // Construct clientID depend on submission interface std::string clientid; if ( interface == "org.nordugrid.gridftpjob" ) { clientid = globalid; } else if ( interface == "org.ogf.glue.emies.activitycreation" ) { clientid = headnode + "/" + globalid; } else if ( interface == "org.nordugrid.arcrest" ) { clientid = headnode + "/" + globalid; } else { logger.msg(Arc::ERROR,"Unsupported submission interface %s. Seems arc-blahp-logger need to be updated accordingly. Please submit the bug to bugzilla."); return EXIT_FAILURE; } // Get FQANs information from user's proxy // P.S. validity check is not enforced, proxy can be even expired long time before job finished Arc::Credential holder(user_proxy_f, "", "", "", false, true); std::vector voms_attributes; Arc::VOMSTrustList vomscert_trust_dn; logger.msg(Arc::DEBUG, "Parsing VOMS AC to get FQANs information"); // suppress expired 'ERROR' from Arc.Credential output if ( debuglevel == Arc::ERROR ) Arc::Logger::getRootLogger().setThreshold(Arc::FATAL); Arc::parseVOMSAC(holder, "", "", true, "", vomscert_trust_dn, voms_attributes, false, true); Arc::Logger::getRootLogger().setThreshold(debuglevel); std::string fqans_logentry; std::string fqan; std::size_t pos; if(voms_attributes.size() > 0) { for (std::vector::iterator iAC = voms_attributes.begin(); iAC != voms_attributes.end(); iAC++) { for (unsigned int acnt = 0; acnt < iAC->attributes.size(); acnt++ ) { fqan = iAC->attributes[acnt]; logger.msg(Arc::DEBUG, "Found VOMS AC attribute: %s", fqan); std::list elements; Arc::tokenize(fqan, elements, "/"); if ( elements.size() == 0 ) { logger.msg(Arc::DEBUG, "Malformed VOMS AC attribute %s", fqan); continue; } if (elements.front().rfind("voname=", 0) == 0) { elements.pop_front(); // crop voname= if ( ! elements.empty() ) elements.pop_front(); // crop hostname= if ( ! elements.empty() ) { logger.msg(Arc::DEBUG, "VOMS AC attribute is a tag"); fqan = ""; while (! elements.empty () ) { fqan.append("/").append(elements.front()); elements.pop_front(); } } else { logger.msg(Arc::DEBUG, "Skipping policyAuthority VOMS AC attribute"); continue; } } else { logger.msg(Arc::DEBUG, "VOMS AC attribute is the FQAN"); pos = fqan.find("/Role="); if ( pos == std::string::npos ) fqan = fqan + "/Role=NULL"; } fqans_logentry += "\"userFQAN=" + Arc::trim(Arc::escape_chars(fqan, "\"\\", '\\', false)) + "\" "; } } } else { logger.msg(Arc::DEBUG, "No FQAN found. Using None as userFQAN value"); fqans_logentry = "\"userFQAN=/None/Role=NULL\" "; } // Assemble BLAH logentry std::string logentry = "\"timestamp=" + timestamp + "\" \"userDN=" + Arc::trim(subject) + "\" " + fqans_logentry + "\"ceID=" + ceid + "-" + queue + "\" \"jobID=" + std::string(jobid_s) + "\" \"lrmsID=" + Arc::trim(localid) + "\" \"localUser=" + std::string(user_s) + "\" \"clientID=" + clientid + "\""; logger.msg(Arc::DEBUG, "Assembling BLAH parser log entry: %s", logentry); // Write entry to BLAH log with locking to exclude possible simultaneous writes when several jobs are finished std::string fname = logprefix + "-" + logsuffix; Arc::FileLock lock(fname); logger.msg(Arc::DEBUG,"Writing the info to the BLAH parser log: %s", fname); for (int i = 10; !lock.acquire() && i >= 0; --i) { if (i == 0) return false; sleep(1); } std::ofstream logfile; logfile.open(fname.c_str(),std::ofstream::app); if(!logfile.is_open()) { logger.msg(Arc::ERROR,"Cannot open BLAH log file '%s'", fname); lock.release(); return EXIT_FAILURE; } logfile << logentry << std::endl; logfile.close(); lock.release(); return EXIT_SUCCESS; } nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/PaxHeaders/test_write_grami_file.cpp0000644000000000000000000000013215067751327026411 xustar0030 mtime=1759498967.757868355 30 atime=1759498967.865493651 30 ctime=1759499029.440404865 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/test_write_grami_file.cpp0000644000175000002070000000773115067751327030323 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "conf/GMConfig.h" #include "conf/StagingConfig.h" #include "jobs/JobDescriptionHandler.h" #include "files/ControlFileContent.h" #include "files/ControlFileHandling.h" namespace ARex { class GMJobMock : public GMJob { public: GMJobMock(const JobId &job_id, const std::string &dir = "") : GMJob(job_id, Arc::User(), dir) {} ~GMJobMock() {} void SetLocalDescription(const Arc::JobDescription& desc) { if (local) { delete local; } local = new JobLocalDescription; *local = desc; local->globalid = job_id; } std::list& GetOutputdata() { return local->outputdata; } }; } int main(int argc, char **argv) { Arc::Logger logger(Arc::Logger::getRootLogger(), "test_write_grami_file"); Arc::LogStream logcerr(std::cerr); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::OptionParser options(istring("[job description input]"), istring("Tool for writing the grami file representation of a job description file.")); std::string gramiFileName; options.AddOption('g', "grami", istring("Name of grami file"), istring("filename"), gramiFileName); std::string confFileName; options.AddOption('z', "conf", istring("Configuration file to load"), istring("arc.conf"), confFileName); std::string sessionDir = ""; options.AddOption('s', "session-dir", istring("Session directory to use"), istring("directory"), sessionDir); std::string debug; options.AddOption('d', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); std::list descriptions = options.Parse(argc, argv); if (descriptions.empty()) { std::cout << Arc::IString("Use --help option for detailed usage information") << std::endl; return 1; } if (!debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::string_to_level(debug)); if (descriptions.empty()) { logger.msg(Arc::ERROR, "No job description file name provided."); return 1; } std::list jobdescs; if (!Arc::JobDescription::Parse(descriptions.front(), jobdescs) || jobdescs.empty()) { logger.msg(Arc::ERROR, "Unable to parse job description input: %s", descriptions.front()); return 1; } ARex::GMConfig gmc(confFileName); if (!gmc.Load()) { logger.msg(Arc::ERROR, "Unable to load ARC configuration file."); return 1; } gmc.SetShareID(Arc::User()); ARex::StagingConfig sconf(gmc); if (sessionDir.empty() && !gmc.SessionRoots().empty()) { sessionDir = gmc.SessionRoots().front(); } ARex::GMJobMock gmjob(gramiFileName, sessionDir + "/" + gramiFileName); gmjob.SetLocalDescription(jobdescs.front()); ARex::JobDescriptionHandler jdh(gmc); Arc::DirCreate(ARex::job_control_path(gmc.ControlDir(),gmjob.get_id(),NULL), S_IRWXU | S_IRGRP | S_IROTH | S_IXGRP | S_IXOTH, true); if (!jdh.write_grami(jobdescs.front(), gmjob, NULL)) { const std::string fgrami = ARex::job_control_path(gmc.ControlDir(),gmjob.get_id(),ARex::sfx_grami); logger.msg(Arc::ERROR, "Unable to write grami file: %s", fgrami); return 1; } if (!job_output_write_file(gmjob, gmc, gmjob.GetOutputdata())) { const std::string foutput = ARex::job_control_path(gmc.ControlDir(),gmjob.get_id(),ARex::sfx_output); logger.msg(Arc::ERROR, "Unable to write 'output' file: %s", foutput); return 1; } const std::string fgrami = ARex::job_control_path(gmc.ControlDir(),gmjob.get_id(),ARex::sfx_grami); std::cout << "grami file written to " << fgrami << std::endl; return 0; } nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/PaxHeaders/inputcheck.cpp0000644000000000000000000000013215067751327024177 xustar0030 mtime=1759498967.754640094 30 atime=1759498967.864493635 30 ctime=1759499029.438773235 nordugrid-arc-7.1.1/src/services/a-rex/grid-manager/inputcheck.cpp0000644000175000002070000000761715067751327026114 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include "conf/GMConfig.h" #include "files/ControlFileContent.h" #include "jobs/JobDescriptionHandler.h" #include "misc/proxy.h" static Arc::SimpleCondition cond; static Arc::Logger logger(Arc::Logger::rootLogger, "inputcheck"); class lfn_t { public: std::string lfn; bool done; bool failed; lfn_t(const std::string& l):lfn(l),done(false),failed(false) { }; }; void check_url(void *arg) { lfn_t* lfn = (lfn_t*)arg; logger.msg(Arc::INFO,"%s",lfn->lfn); Arc::UserConfig usercfg; Arc::DataHandle source(Arc::URL(lfn->lfn),usercfg); source->SetSecure(false); if(!source) { logger.msg(Arc::ERROR,"Failed to acquire source: %s",lfn->lfn); lfn->failed=true; lfn->done=true; cond.signal(); return; }; if(!source->Resolve(true).Passed()) { logger.msg(Arc::ERROR,"Failed to resolve %s",lfn->lfn); lfn->failed=true; lfn->done=true; cond.signal(); return; }; source->SetTries(1); // TODO. Run every URL in separate thread. // TODO. Do only connection (optionally) bool check_passed = false; if(source->HaveLocations()) { do { if(source->CurrentLocationHandle()->Check(false).Passed()) { check_passed=true; break; } } while (source->NextLocation()); }; if(!check_passed) { logger.msg(Arc::ERROR,"Failed to check %s",lfn->lfn); lfn->failed=true; lfn->done=true; cond.signal(); return; }; lfn->done=true; cond.signal(); return; } int main(int argc,char* argv[]) { Arc::LogStream logcerr(std::cerr); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::OptionParser options(istring("job_description_file [proxy_file]"), istring("inputcheck checks that input files specified " "in the job description are available and accessible " "using the credentials in the given proxy file.")); std::string debug; options.AddOption('d', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); std::list params = options.Parse(argc, argv); if (!debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(debug)); if (params.size() != 1 && params.size() != 2) { logger.msg(Arc::ERROR, "Wrong number of arguments given"); return -1; } std::string desc = params.front(); std::string proxy; if (params.size() == 2) proxy = params.back(); // TODO It would be better to use Arc::JobDescription::Parse(desc) ARex::GMConfig config; ARex::JobDescriptionHandler job_desc_handler(config); ARex::JobLocalDescription job; Arc::JobDescription arc_job_desc; if(job_desc_handler.parse_job_req_from_file(job,arc_job_desc,desc) != ARex::JobReqSuccess) return 1; if(!proxy.empty()) { Arc::SetEnv("X509_USER_PROXY",proxy,true); Arc::SetEnv("X509_USER_CERT",proxy,true); Arc::SetEnv("X509_USER_KEY",proxy,true); }; ARex::prepare_proxy(); std::list::iterator file; bool has_lfns = false; std::list lfns; for(file=job.inputdata.begin();file!=job.inputdata.end();++file) { if(file->has_lfn()) { lfn_t* lfn = new lfn_t(file->lfn); lfns.push_back(lfn); Arc::CreateThreadFunction(&check_url,lfn); has_lfns=true; }; }; for(;has_lfns;) { cond.wait(); has_lfns=false; for(std::list::iterator l = lfns.begin();l!=lfns.end();++l) { if((*l)->done) { if((*l)->failed) { ARex::remove_proxy(); exit(1); }; } else { has_lfns=true; }; }; }; ARex::remove_proxy(); exit(0); } nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/create_activity.cpp0000644000000000000000000000013115067751327022663 xustar0030 mtime=1759498967.750491903 30 atime=1759498967.862493605 29 ctime=1759499029.33134036 nordugrid-arc-7.1.1/src/services/a-rex/create_activity.cpp0000644000175000002070000000357115067751327024574 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "job.h" #include "arex.h" namespace ARex { Arc::MCC_Status ARexService::PutNew(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath) { /* adl:ActivityDescription - http://www.eu-emi.eu/es/2010/12/adl */ // subpath is ignored // Check for proper payload Arc::MessagePayload* payload = inmsg.Payload(); if(!payload) { logger_.msg(Arc::ERROR, "NEW: put new job: there is no payload"); return make_http_fault(outmsg,500,"Missing payload"); }; if(config.GmConfig().MaxTotal() > 0 && all_jobs_count_ >= config.GmConfig().MaxTotal()) { logger_.msg(Arc::ERROR, "NEW: put new job: max jobs total limit reached"); return make_http_fault(outmsg,500,"No more jobs allowed"); }; // Fetch content std::string desc_str; // TODO: Add job description size limit control Arc::MCC_Status res = ARexService::extract_content(inmsg,desc_str,100*1024*1024); // todo: add size control if(!res) return make_http_fault(outmsg,500,res.getExplanation().c_str()); std::string clientid = (inmsg.Attributes()->get("TCP:REMOTEHOST"))+":"+(inmsg.Attributes()->get("TCP:REMOTEPORT")); // TODO: Do we need different generators for different formats? JobIDGeneratorES idgenerator(config.Endpoint()); ARexJob job(desc_str,config,"","",clientid,logger_,idgenerator); if(!job) { return make_http_fault(outmsg,500,job.Failure().c_str()); }; return make_http_fault(outmsg,200,job.ID().c_str()); } Arc::MCC_Status ARexService::DeleteNew(Arc::Message& inmsg,Arc::Message& outmsg,ARexGMConfig& config,std::string const& subpath) { return make_http_fault(outmsg,501,"Not Implemented"); } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/rest0000644000000000000000000000013015067751426017677 xustar0030 mtime=1759499030.522445726 28 atime=1759499034.7655102 30 ctime=1759499030.522445726 nordugrid-arc-7.1.1/src/services/a-rex/rest/0000755000175000002070000000000015067751426021660 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/rest/PaxHeaders/Makefile.am0000644000000000000000000000013115067751327022011 xustar0030 mtime=1759498967.771747295 30 atime=1759498967.872493757 29 ctime=1759499030.51744565 nordugrid-arc-7.1.1/src/services/a-rex/rest/Makefile.am0000644000175000002070000000060015067751327023710 0ustar00mockbuildmock00000000000000DIST_SUBDIRS = test SUBDIRS = $(TEST_DIR) noinst_LTLIBRARIES = libarexrest.la libarexrest_la_SOURCES = rest.cpp rest.h libarexrest_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libarexrest_la_LIBADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la libarexrest_la_LDFLAGS = -no-undefined -avoid-version -module nordugrid-arc-7.1.1/src/services/a-rex/rest/PaxHeaders/rest.cpp0000644000000000000000000000013215067751327021437 xustar0030 mtime=1759498967.772230924 30 atime=1759498967.872493757 30 ctime=1759499030.520022663 nordugrid-arc-7.1.1/src/services/a-rex/rest/rest.cpp0000644000175000002070000030526115067751327023350 0ustar00mockbuildmock00000000000000#include #include #include #include #include #include #include #include #include #include #include "../job.h" #include "../PayloadFile.h" #include "../FileChunks.h" #include "../delegation/DelegationStores.h" #include "../grid-manager/files/ControlFileHandling.h" #include "rest.h" namespace Arc { std::list< std::pair >::iterator FindFirst(std::list< std::pair >::iterator first, std::list< std::pair >::iterator last, std::string const & str) { while (first != last) { if (first->first == str) return first; ++first; } return last; } } using namespace ARex; using namespace Arc; enum ResponseFormat { ResponseFormatHtml, ResponseFormatXml, ResponseFormatJson }; static bool JsonPathsHasLastMember(char const * const array_paths[]) { if(array_paths) { for(int idx = 0; array_paths[idx]; ++idx) { char const * array_path = array_paths[idx]; char const * sep = strchr(array_path, '/'); if(sep) continue; // not last element of path return true; } } return false; } static void RenderToJson(Arc::XMLNode xml, std::string& output, char const * const array_paths[], bool show_empty_array = true, int depth = 0) { if(xml.Size() == 0) { // Either it is a value or it has forced array sub-elements. bool has_arrays = JsonPathsHasLastMember(array_paths); if(!has_arrays) { // shortcut - nothing will match to array_paths at this depth std::string val = json_encode((std::string)xml); if((depth != 0) || (!val.empty())) { output += "\""; output += val; output += "\""; } return; } } output += "{"; // Because JSON does not allow for same key we must first // group XML elements by names. Using list to preserve order // in which elements appear. std::list< std::pair > names; // counters per names, list instead of map for keeping order for(int n = 0; ; ++n) { XMLNode child = xml.Child(n); if(!child) break; std::string name = child.Name(); std::list< std::pair >::iterator nameIt = FindFirst(names.begin(),names.end(),name); if(nameIt == names.end()) names.push_back(std::make_pair(name,1)); else ++(nameIt->second); } // Check for forced JSON arrays if(array_paths) { for(int idx = 0; array_paths[idx]; ++idx) { char const * array_path = array_paths[idx]; char const * sep = strchr(array_path, '/'); if(sep) continue; // not last element of path // Add fake 2 elements to make them arrays later std::list< std::pair >::iterator nameIt = FindFirst(names.begin(),names.end(),array_path); if(nameIt == names.end()) { if(show_empty_array) names.push_back(std::make_pair(array_path,2)); } else { nameIt->second += 2; } } } bool newElement = true; for(std::list< std::pair >::iterator nameIt = names.begin(); nameIt != names.end(); ++nameIt) { if(!newElement) output += ","; newElement = false; output += "\""; output += nameIt->first; output += "\""; output += ":"; XMLNode child = xml[nameIt->first.c_str()]; if(child) { std::vector new_array_paths; if(array_paths) { for(int idx = 0; array_paths[idx]; ++idx) { char const * array_path = array_paths[idx]; if(strncmp(nameIt->first.c_str(), array_path, nameIt->first.length()) == 0) { if(array_path[nameIt->first.length()] == '/') { new_array_paths.push_back(array_path+(nameIt->first.length()+1)); } } } if(!new_array_paths.empty()) new_array_paths.push_back(NULL); } if(nameIt->second == 1) { RenderToJson(child, output, new_array_paths.empty() ? NULL : &(new_array_paths[0]), show_empty_array, depth+1); } else { output += "["; bool newItem = true; while(child) { if(!newItem) output += ","; newItem = false; RenderToJson(child, output, new_array_paths.empty() ? NULL : &(new_array_paths[0]), show_empty_array, depth+1); ++child; } output += "]"; } } else { // Must be forced array element output += "[]"; } } // Hope no attributes with same name if(xml.AttributesSize() > 0) { if(!newElement) output += ","; output += "\"_attributes\":{"; for(int n = 0; ; ++n) { XMLNode child = xml.Attribute(n); if (!child) break; if(n != 0) output += ","; std::string val = json_encode((std::string)child); output += "\""; output += child.Name(); output += "\":\""; output += val; output += "\""; } output += "}"; } output += "}"; } static void RenderToHtml(Arc::XMLNode xml, std::string& output, int depth = 0) { if(depth == 0) { output += ""; output += xml.Name(); output += ""; } if(xml.Size() == 0) { output += (std::string)xml; } else { output += "
    font_title."> $value 
    "; for(int n = 0; ; ++n) { XMLNode child = xml.Child(n); if (!child) break; output += ""; } output += "
    "; output += child.Name(); output += ""; RenderToHtml(child, output, depth+1); output += "
    "; } if(depth == 0) { output += ""; } } static void RenderToXml(Arc::XMLNode xml, std::string& output, int depth = 0) { xml.GetXML(output, "utf-8"); } static char const * SkipWS(char const * input) { while(*input) { if(!std::isspace(*input)) break; ++input; } return input; } static char const * SkipTo(char const * input, char tag) { while(*input) { if(*input == tag) break; ++input; } return input; } static char const * SkipToEscaped(char const * input, char tag) { while(*input) { if(*input == '\\') { ++input; if(!*input) break; } else if(*input == tag) { break; } ++input; } return input; } static char const * ParseFromJson(Arc::XMLNode& xml, char const * input, int depth = 0) { input = SkipWS(input); if(!*input) return input; if(*input == '{') { // complex item ++input; input = SkipWS(input); if(*input != '}') while(true) { char const * nameStart = input; if(*nameStart != '"') return NULL; ++nameStart; char const * nameEnd = SkipToEscaped(nameStart, '"'); if(*nameEnd != '"') return NULL; char const * sep = SkipWS(nameEnd+1); if(*sep != ':') return NULL; XMLNode item = xml.NewChild(json_unencode(std::string(nameStart, nameEnd-nameStart))); input = sep+1; input = ParseFromJson(item,input,depth+1); if(!input) return NULL; input = SkipWS(input); if(*input == ',') { // next element ++input; input = SkipWS(input); } else if(*input == '}') { // last element break; } else { return NULL; }; }; ++input; } else if(*input == '[') { ++input; // array input = SkipWS(input); XMLNode item = xml; if(*input != ']') while(true) { input = ParseFromJson(item,input,depth+1); if(!input) return NULL; input = SkipWS(input); if(*input == ',') { // next element ++input; item = xml.Parent().NewChild(item.Name()); } else if(*input == ']') { // last element item = xml.Parent().NewChild(item.Name()); // It will be deleted outside loop break; } else { return NULL; }; }; item.Destroy(); ++input; } else if(*input == '"') { ++input; // string char const * strStart = input; input = SkipToEscaped(strStart, '"'); if(*input != '"') return NULL; xml = json_unencode(std::string(strStart, input-strStart)); ++input; // } else if((*input >= '0') && (*input <= '9')) { // } else if(*input == 't') { // } else if(*input == 'f') { // } else if(*input == 'n') { } else { ++input; // true, false, null, number char const * strStart = input; while(*input) { if((*input == ',') || (*input == '}') || (*input == ']') || (std::isspace(*input))) break; ++input; } xml = std::string(strStart, input-strStart); }; return input; } static void RenderResponse(Arc::XMLNode xml, ResponseFormat format, std::string& output, char const * const json_arrays[], bool show_empty_arrays = true) { switch(format) { case ResponseFormatXml: RenderToXml(xml, output); break; case ResponseFormatHtml: RenderToHtml(xml, output); break; case ResponseFormatJson: RenderToJson(xml, output, json_arrays, show_empty_arrays); break; default: break; } } static void ExtractRange(Arc::Message& inmsg, off_t& range_start, off_t& range_end) { range_start = 0; range_end = (off_t)(-1); { std::string val; val=inmsg.Attributes()->get("HTTP:RANGESTART"); if(!val.empty()) { // Negative ranges not supported if(!Arc::stringto(val,range_start)) { range_start=0; } else { val=inmsg.Attributes()->get("HTTP:RANGEEND"); if(!val.empty()) { if(!Arc::stringto(val,range_end)) { range_end=(off_t)(-1); } else { // Rest of code here treats end of range as exclusive // While HTTP ranges are inclusive ++range_end; }; }; }; }; }; } #ifndef CPPUNITTEST std::string ARexRest::ProcessingContext::operator[](char const * key) const { if(!key) return ""; std::multimap::const_iterator it = query.find(key); if(it == query.end()) return ""; return it->second; } static Arc::MCC_Status extract_content(Arc::Message& inmsg,std::string& content,uint32_t size_limit) { // Identify payload Arc::MessagePayload* payload = inmsg.Payload(); if(!payload) { return Arc::MCC_Status(Arc::GENERIC_ERROR,"","Missing payload"); }; Arc::PayloadStreamInterface* stream = dynamic_cast(payload); Arc::PayloadRawInterface* buf = dynamic_cast(payload); if((!stream) && (!buf)) { return Arc::MCC_Status(Arc::GENERIC_ERROR,"","Error processing payload"); } // Fetch content content.clear(); if(stream) { std::string add_str; while(stream->Get(add_str)) { content.append(add_str); if((size_limit != 0) && (content.size() >= size_limit)) break; } } else { for(unsigned int n = 0;buf->Buffer(n);++n) { content.append(buf->Buffer(n),buf->BufferSize(n)); if((size_limit != 0) && (content.size() >= size_limit)) break; }; }; return Arc::MCC_Status(Arc::STATUS_OK); } // Strip first token from path delimited by /. static bool GetPathToken(std::string& subpath, std::string& token) { std::string::size_type token_start = 0; while(subpath[token_start] == '/') ++token_start; std::string::size_type token_end = token_start; while((token_end < subpath.length()) && (subpath[token_end] != '/')) ++token_end; if (token_start == token_end) return false; token = subpath.substr(token_start, token_end-token_start); while(subpath[token_end] == '/') ++token_end; subpath.erase(0, token_end); return true; } static std::string StripNewLine(char const * str) { std::string res(str); for(std::string::size_type pos = res.find_first_of("\r\n"); pos != std::string::npos; pos = res.find_first_of("\r\n",pos)) { res[pos] = ' '; } return res; } // Insert generic (error) HTTP response into outmsg. static Arc::MCC_Status HTTPFault(Arc::Message& inmsg, Arc::Message& outmsg,int code,const char* resp,const char* msg = NULL) { Arc::PayloadRaw* outpayload = new Arc::PayloadRaw(); if(msg && *msg) { outpayload->Insert(msg); outmsg.Attributes()->set("HTTP:Content-Type","text/plain"); } delete outmsg.Payload(outpayload); outmsg.Attributes()->set("HTTP:CODE",Arc::tostring(code)); if(resp) outmsg.Attributes()->set("HTTP:REASON",StripNewLine(resp)); return Arc::MCC_Status(Arc::STATUS_OK); } static Arc::MCC_Status HTTPResponse(Arc::Message& inmsg, Arc::Message& outmsg) { Arc::PayloadRaw* outpayload = new Arc::PayloadRaw(); delete outmsg.Payload(outpayload); outmsg.Attributes()->set("HTTP:CODE","200"); outmsg.Attributes()->set("HTTP:REASON","OK"); return Arc::MCC_Status(Arc::STATUS_OK); } static Arc::MCC_Status HTTPResponse(Arc::Message& inmsg, Arc::Message& outmsg, std::string const & content, std::string const& mime) { if(inmsg.Attributes()->get("HTTP:METHOD") == "HEAD") { Arc::PayloadRaw* outpayload = new Arc::PayloadRaw(); if(outpayload) outpayload->Truncate(content.length()); delete outmsg.Payload(outpayload); } else { Arc::PayloadRaw* outpayload = new Arc::PayloadRaw(); if(outpayload) outpayload->Insert(content.c_str(),0,content.length()); delete outmsg.Payload(outpayload); } outmsg.Attributes()->set("HTTP:CODE","200"); outmsg.Attributes()->set("HTTP:REASON","OK"); outmsg.Attributes()->set("HTTP:content-type",mime); return Arc::MCC_Status(Arc::STATUS_OK); } static Arc::MCC_Status HTTPResponseFile(Arc::Message& inmsg, Arc::Message& outmsg, int& fileHandle, std::string const& mime) { if(inmsg.Attributes()->get("HTTP:METHOD") == "HEAD") { Arc::PayloadRaw* outpayload = new Arc::PayloadRaw(); struct stat st; if(outpayload && (::fstat(fileHandle,&st) == 0)) outpayload->Truncate(st.st_size); delete outmsg.Payload(outpayload); } else { off_t range_start = 0; off_t range_end = 0; ExtractRange(inmsg, range_start, range_end); Arc::MessagePayload* outpayload = newFileRead(fileHandle,range_start,range_end); delete outmsg.Payload(outpayload); fileHandle = -1; } outmsg.Attributes()->set("HTTP:CODE","200"); outmsg.Attributes()->set("HTTP:REASON","OK"); outmsg.Attributes()->set("HTTP:content-type",mime); return Arc::MCC_Status(Arc::STATUS_OK); } static Arc::MCC_Status HTTPResponseFile(Arc::Message& inmsg, Arc::Message& outmsg, Arc::FileAccess*& fileHandle, std::string const& mime) { if(inmsg.Attributes()->get("HTTP:METHOD") == "HEAD") { Arc::PayloadRaw* outpayload = new Arc::PayloadRaw(); struct stat st; if(outpayload && fileHandle->fa_fstat(st)) outpayload->Truncate(st.st_size); delete outmsg.Payload(outpayload); } else { off_t range_start; off_t range_end; ExtractRange(inmsg, range_start, range_end); Arc::MessagePayload* outpayload = newFileRead(fileHandle,range_start,range_end); delete outmsg.Payload(outpayload); fileHandle = NULL; } outmsg.Attributes()->set("HTTP:CODE","200"); outmsg.Attributes()->set("HTTP:REASON","OK"); outmsg.Attributes()->set("HTTP:content-type",mime); return Arc::MCC_Status(Arc::STATUS_OK); } static Arc::MCC_Status HTTPDELETEResponse(Arc::Message& inmsg, Arc::Message& outmsg, bool queued = false) { Arc::PayloadRaw* outpayload = new Arc::PayloadRaw(); delete outmsg.Payload(outpayload); if(queued) { outmsg.Attributes()->set("HTTP:CODE","204"); outmsg.Attributes()->set("HTTP:REASON","No Content"); } else { outmsg.Attributes()->set("HTTP:CODE","202"); outmsg.Attributes()->set("HTTP:REASON","Accepted"); } return Arc::MCC_Status(Arc::STATUS_OK); } static Arc::MCC_Status HTTPPOSTDelayedResponse(Arc::Message& inmsg, Arc::Message& outmsg) { Arc::PayloadRaw* outpayload = new Arc::PayloadRaw(); delete outmsg.Payload(outpayload); outmsg.Attributes()->set("HTTP:CODE","202"); outmsg.Attributes()->set("HTTP:REASON","Queued"); return Arc::MCC_Status(Arc::STATUS_OK); } static Arc::MCC_Status HTTPPOSTResponse(Arc::Message& inmsg, Arc::Message& outmsg, std::string const & redir = "") { Arc::PayloadRaw* outpayload = new Arc::PayloadRaw(); delete outmsg.Payload(outpayload); outmsg.Attributes()->set("HTTP:CODE","201"); outmsg.Attributes()->set("HTTP:REASON","Created"); if(!redir.empty()) outmsg.Attributes()->set("HTTP:location",redir); return Arc::MCC_Status(Arc::STATUS_OK); } static Arc::MCC_Status HTTPPOSTResponse(Arc::Message& inmsg, Arc::Message& outmsg, std::string const & content, std::string const& mime, std::string const & redir = "") { Arc::PayloadRaw* outpayload = new Arc::PayloadRaw(); if(outpayload) outpayload->Insert(content.c_str(),0,content.length()); delete outmsg.Payload(outpayload); outmsg.Attributes()->set("HTTP:CODE","201"); outmsg.Attributes()->set("HTTP:REASON","Created"); outmsg.Attributes()->set("HTTP:content-type",mime); if(!redir.empty()) outmsg.Attributes()->set("HTTP:location",redir); return Arc::MCC_Status(Arc::STATUS_OK); } static ResponseFormat ProcessAcceptedFormat(Arc::Message& inmsg, Arc::Message& outmsg) { // text/html, application/xhtml+xml, application/xml;q=0.9, image/webp, */*;q=0.8 std::list accepts; for(Arc::AttributeIterator attrIt = inmsg.Attributes()->getAll("HTTP:accept"); attrIt.hasMore(); ++attrIt) tokenize(*attrIt, accepts, ","); for(std::list::iterator acc = accepts.begin(); acc != accepts.end(); ++acc) { *acc = Arc::trim(*acc, " "); std::string::size_type pos = acc->find_first_of(';'); if(pos != std::string::npos) acc->erase(pos); } ResponseFormat outFormat = ResponseFormatHtml; for(std::list::iterator acc = accepts.begin(); acc != accepts.end(); ++acc) { if(*acc == "application/json") { outFormat = ResponseFormatJson; outmsg.Attributes()->set("HTTP:content-type","application/json"); break; } else if((*acc == "text/xml") || (*acc == "application/xml")) { outFormat = ResponseFormatXml; outmsg.Attributes()->set("HTTP:content-type","application/xml"); break; } else if(*acc == "text/html") { outFormat = ResponseFormatHtml; outmsg.Attributes()->set("HTTP:content-type","text/html"); break; } } return outFormat; } // Insert structured positive response into outmsg. static Arc::MCC_Status HTTPResponse(Arc::Message& inmsg, Arc::Message& outmsg, Arc::XMLNode& resp, char const * const json_arrays[], bool show_empty_arrays = true) { ResponseFormat outFormat = ProcessAcceptedFormat(inmsg,outmsg); std::string respStr; RenderResponse(resp, outFormat, respStr, json_arrays, show_empty_arrays); if(inmsg.Attributes()->get("HTTP:METHOD") == "HEAD") { Arc::PayloadRaw* outpayload = new Arc::PayloadRaw(); if(outpayload) outpayload->Truncate(respStr.length()); delete outmsg.Payload(outpayload); } else { Arc::PayloadRaw* outpayload = new Arc::PayloadRaw(); if(outpayload) outpayload->Insert(respStr.c_str(),0,respStr.length()); delete outmsg.Payload(outpayload); } outmsg.Attributes()->set("HTTP:CODE","200"); outmsg.Attributes()->set("HTTP:REASON","OK"); return Arc::MCC_Status(Arc::STATUS_OK); } static Arc::MCC_Status HTTPPOSTResponse(Arc::Message& inmsg, Arc::Message& outmsg, Arc::XMLNode& resp, char const * const json_arrays[], std::string const & redir = "") { ResponseFormat outFormat = ProcessAcceptedFormat(inmsg,outmsg); std::string respStr; RenderResponse(resp, outFormat, respStr, json_arrays); Arc::PayloadRaw* outpayload = new Arc::PayloadRaw(); if(outpayload) outpayload->Insert(respStr.c_str(),0,respStr.length()); delete outmsg.Payload(outpayload); outmsg.Attributes()->set("HTTP:CODE","201"); outmsg.Attributes()->set("HTTP:REASON","Created"); if(!redir.empty()) outmsg.Attributes()->set("HTTP:location",redir); return Arc::MCC_Status(Arc::STATUS_OK); } static std::string GetPath(Arc::Message &inmsg,std::string &base,std::multimap& query) { base = inmsg.Attributes()->get("ENDPOINT"); Arc::AttributeIterator iterator = inmsg.Attributes()->getAll("PLEXER:EXTENSION"); std::string path; if(iterator.hasMore()) { // Service is behind plexer path = *iterator; if(base.length() > path.length()) base.resize(base.length()-path.length()); } else { // Standalone service path=Arc::URL(base).Path(); base.resize(0); }; std::string::size_type queryPos = path.find('?'); if(queryPos == std::string::npos) queryPos = path.find(';'); if(queryPos != std::string::npos) { std::list queryItems; Arc::tokenize(path.substr(queryPos+1), queryItems, "&"); for(std::list::iterator queryItem = queryItems.begin(); queryItem != queryItems.end(); ++queryItem) { std::string::size_type valuePos = queryItem->find('='); std::string value; if(valuePos != std::string::npos) { value = queryItem->substr(valuePos+1); queryItem->resize(valuePos); }; query.insert(std::pair(Arc::uri_unencode(*queryItem),Arc::uri_unencode(value))); }; path.resize(queryPos); }; // Path is encoded in HTTP URLs too path = Arc::uri_unencode(path); return path; } static void ParseIds(std::multimap const & query, std::list& ids) { typedef std::multimap::const_iterator iter; std::pair range = query.equal_range("id"); for(iter id = range.first; id != range.second; ++id) { ids.push_back(id->second); }; } static void ParseJobIds(Arc::Message& inmsg, Arc::Message& outmsg, std::list& ids) { std::string content; Arc::MCC_Status status = extract_content(inmsg,content,1024*1024); std::string contentType = inmsg.Attributes()->get("HTTP:content-type"); Arc::XMLNode listXml; if(contentType == "application/json") { Arc::XMLNode("").Move(listXml); (void)ParseFromJson(listXml, content.c_str()); } else if((contentType == "application/xml") || contentType.empty()) { Arc::XMLNode(content).Move(listXml); } // jobs // job // id for(Arc::XMLNode jobXml = listXml["job"];(bool)jobXml;++jobXml) { std::string id = jobXml["id"]; if(!id.empty()) ids.push_back(id); } } // REST State A-REX State // * ACCEPTING ACCEPTED // * ACCEPTED PENDING:ACCEPTED // * PREPARING PREPARING // * PREPARED PENDING:PREPARING // * SUBMITTING SUBMIT // - QUEUING INLRMS + LRMS queued // - RUNNING INLRMS + LRMS running // - HELD INLRMS + LRMS on hold // - EXITINGLRMS INLRMS + LRMS finished // - OTHER INLRMS + LRMS other // * EXECUTED PENDING:INLRMS // * FINISHING FINISHING // * KILLING CANCELLING | PREPARING + DTR cancel | FINISHING + DTR cancel // * FINISHED FINISHED + no errors & no cancel // * FAILED FINISHED + errors // * KILLED FINISHED + cancel // * WIPED DELETED static void convertActivityStatusREST(const std::string& gm_state,std::string& rest_state, bool failed,bool pending,const std::string& /*failedstate*/,const std::string& failedcause) { rest_state.clear(); if(gm_state == "ACCEPTED") { if(!pending) rest_state="ACCEPTING"; else rest_state="ACCEPTED"; } else if(gm_state == "PREPARING") { if(!pending) rest_state="PREPARING"; else rest_state="PREPARED"; } else if(gm_state == "SUBMIT") { rest_state="SUBMITTING"; } else if(gm_state == "INLRMS") { if(!pending) { // Talking to LRMS would be too heavy. Just choose something innocent enough. rest_state="RUNNING"; } else { rest_state="EXECUTED"; } } else if(gm_state == "FINISHING") { rest_state="FINISHING"; } else if(gm_state == "CANCELING") { rest_state="KILLING"; } else if(gm_state == "FINISHED") { if(!pending) { if(failed) { // TODO: hack if(failedcause.find("Job is canceled by external request") != std::string::npos) { rest_state = "KILLED"; } else { rest_state = "FAILED"; } } else { rest_state="FINISHED"; } } else { rest_state="EXECUTED"; } } else if(gm_state == "DELETED") { rest_state="WIPED"; } else { rest_state="None"; } } ARexRest::ARexRest(Arc::Config *cfg, Arc::PluginArgument *parg, GMConfig& config, ARex::DelegationStores& delegation_stores,unsigned int& all_jobs_count): logger_(Arc::Logger::rootLogger, "A-REX REST"), config_(config),delegation_stores_(delegation_stores),all_jobs_count_(all_jobs_count) { endpoint_=(std::string)((*cfg)["endpoint"]); uname_=(std::string)((*cfg)["usermap"]["defaultLocalName"]); } ARexRest::~ARexRest(void) { } // Main request processor of REST interface Arc::MCC_Status ARexRest::process(Arc::Message& inmsg,Arc::Message& outmsg) { // Split request path into parts: service, jobs, files, etc. // TODO: make it HTTP independent std::string endpoint; ProcessingContext context; context.method = inmsg.Attributes()->get("HTTP:METHOD"); std::string clientid = (inmsg.Attributes()->get("TCP:REMOTEHOST"))+":"+(inmsg.Attributes()->get("TCP:REMOTEPORT")); logger_.msg(Arc::INFO, "Connection from %s: %s", inmsg.Attributes()->get("TCP:REMOTEHOST"), inmsg.Attributes()->get("TLS:IDENTITYDN")); context.subpath = GetPath(inmsg,endpoint,context.query); context.processed = "/"; if((inmsg.Attributes()->get("PLEXER:PATTERN").empty()) && context.subpath.empty()) context.subpath=endpoint; logger_.msg(Arc::VERBOSE, "process: method: %s", context.method); logger_.msg(Arc::VERBOSE, "process: endpoint: %s", endpoint); // {/rest}// logger_.msg(Arc::VERBOSE, "REST: process %s at %s",context.method,context.subpath); std::string apiVersion; GetPathToken(context.subpath, apiVersion); // drop /rest if ((!GetPathToken(context.subpath, apiVersion)) || apiVersion.empty()) { // {/rest return processVersions(inmsg, outmsg, context); } context.processed += apiVersion; context.processed += "/"; if (apiVersion == "1.0") { context.version = ProcessingContext::Version_1_0; } else if (apiVersion == "1.1") { context.version = ProcessingContext::Version_1_1; } else { return HTTPFault(inmsg,outmsg,404,"Version Not Supported"); } std::string functionality; if(!GetPathToken(context.subpath, functionality) || functionality.empty()) { // {/rest}/ return processGeneral(inmsg, outmsg, context); } context.processed += functionality; context.processed += "/"; if (functionality == "info") { // {/rest}//info[?schema=glue2] return processInfo(inmsg, outmsg, context); } else if (functionality == "delegations") { // {/rest}/delegations/[[?action=get,renew,delete]] return processDelegations(inmsg, outmsg, context); } else if (functionality == "jobs") { // {/rest}//jobs[?state=[&state=[...]]] // {/rest}//jobs?action={new|info|status|kill|clean|restart} return processJobs(inmsg, outmsg, context); } return HTTPFault(inmsg,outmsg,404,"Functionality Not Supported"); } // ---------------------------- GENERAL INFO --------------------------------- Arc::MCC_Status ARexRest::processVersions(Arc::Message& inmsg,Arc::Message& outmsg,ProcessingContext& context) { if((context.method == "GET") || (context.method == "HEAD")) { XMLNode versions("1.01.1"); // only supported versions are 1.0 and 1.1 char const * const json_arrays[] = { "version", NULL }; return HTTPResponse(inmsg, outmsg, versions, json_arrays); } logger_.msg(Arc::VERBOSE, "process: method %s is not supported for subpath %s",context.method,context.processed); return HTTPFault(inmsg,outmsg,501,"Not Implemented"); } Arc::MCC_Status ARexRest::processGeneral(Arc::Message& inmsg,Arc::Message& outmsg, ProcessingContext& context) { return HTTPFault(inmsg,outmsg,404,"Not Found"); } Arc::MCC_Status ARexRest::processInfo(Arc::Message& inmsg,Arc::Message& outmsg, ProcessingContext& context) { if(!context.subpath.empty()) return HTTPFault(inmsg,outmsg,404,"Not Found"); // GET /info[?schema=glue2] - retrieve generic information about cluster. // HEAD - supported. // PUT,POST,DELETE - not supported. if((context.method != "GET") && (context.method != "HEAD")) { logger_.msg(Arc::VERBOSE, "process: method %s is not supported for subpath %s",context.method,context.processed); return HTTPFault(inmsg,outmsg,501,"Not Implemented"); } std::string schema = context["schema"]; if (!schema.empty() && (schema != "glue2")) { logger_.msg(Arc::VERBOSE, "process: schema %s is not supported for subpath %s",schema,context.processed); return HTTPFault(inmsg,outmsg,501,"Schema not implemented"); } std::string infoStr; Arc::FileRead(config_.InformationFile(), infoStr); XMLNode infoXml(infoStr); char const * const info_json_arrays[] = { "Domains/AdminDomain/Services/Service/Endpoint/Activities/Activity/Associations/ActivityID", "Domains/AdminDomain/Services/Service/Endpoint/Activities/Activity", "Domains/AdminDomain/Services/Service/Endpoint/Extensions/Extension", "Domains/AdminDomain/Services/Service/Endpoint/OtherInfo", "Domains/AdminDomain/Services/Service/Endpoint/Capability", "Domains/AdminDomain/Services/Service/Endpoint/InterfaceVersion", "Domains/AdminDomain/Services/Service/Endpoint/InterfaceExtension", "Domains/AdminDomain/Services/Service/Endpoint/WSDL", "Domains/AdminDomain/Services/Service/Endpoint/SupportedProfile", "Domains/AdminDomain/Services/Service/Endpoint/Semantics", "Domains/AdminDomain/Services/Service/Endpoint/TrustedCA", "Domains/AdminDomain/Services/Service/Endpoint/AccessPolicy", "Domains/AdminDomain/Services/Service/Endpoint/AccessPolicy/OtherInfo", "Domains/AdminDomain/Services/Service/Endpoint/AccessPolicy/Extensions/Extension", "Domains/AdminDomain/Services/Service/Endpoint/AccessPolicy/Rule", "Domains/AdminDomain/Services/Service/Endpoint/AccessPolicy/Associations/UserDomainID", "Domains/AdminDomain/Services/Service/Endpoint", "Domains/AdminDomain/Services/Service/Associations/ServiceID", "Domains/AdminDomain/Services/Service/Capability", "Domains/AdminDomain/Services/Service/StatusInfo", "Domains/AdminDomain/Services/Service/Contact", "Domains/AdminDomain/Services/Service/Extensions/Extension", "Domains/AdminDomain/Services/Service/OtherInfo", "Domains/AdminDomain/Services/Service/StorageManager/Extensions/Extension", "Domains/AdminDomain/Services/Service/StorageManager/OtherInfo", "Domains/AdminDomain/Services/Service/StorageManager/DataStore/Extensions/Extension", "Domains/AdminDomain/Services/Service/StorageManager/DataStore/OtherInfo", "Domains/AdminDomain/Services/Service/StorageManager/DataStore", "Domains/AdminDomain/Services/Service/StorageManager", "Domains/AdminDomain/Services/Service/Location/Extensions/Extension", "Domains/AdminDomain/Services/Service/Location/OtherInfo", "Domains/AdminDomain/Services/Service/Contact/Extensions/Extension", "Domains/AdminDomain/Services/Service/Contact/OtherInfo", "Domains/AdminDomain/Services/Service", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/ComputingActivities/ComputingActivity/State", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/ComputingActivities/ComputingActivity/RestartState", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/ComputingActivities/ComputingActivity/Error", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/ComputingActivities/ComputingActivity/RequestedApplicationEnvironment", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/ComputingActivities/ComputingActivity/ExecutionNode", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/ComputingActivities/ComputingActivity/OtherMessages", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/ComputingActivities/ComputingActivity/Associations/ActivityID", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/ComputingActivities/ComputingActivity", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/JobDescription", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/Associations/ComputingShareID", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/Extensions/Extension", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/OtherInfo", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/Capability", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/InterfaceVersion", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/InterfaceExtension", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/WSDL", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/SupportedProfile", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/Semantics", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/TrustedCA", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/AccessPolicy", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/AccessPolicy/OtherInfo", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/AccessPolicy/Extensions/Extension", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/AccessPolicy/Rule", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint/AccessPolicy/Associations/UserDomainID", "Domains/AdminDomain/Services/ComputingService/ComputingEndpoint", "Domains/AdminDomain/Services/ComputingService/ComputingManager/ExecutionEnvironments/ExecutionEnvironment/NetworkInfo", "Domains/AdminDomain/Services/ComputingService/ComputingManager/ExecutionEnvironments/ExecutionEnvironment/Benchmark", "Domains/AdminDomain/Services/ComputingService/ComputingManager/ExecutionEnvironments/ExecutionEnvironment/Associations/ComputingShareID", "Domains/AdminDomain/Services/ComputingService/ComputingManager/ExecutionEnvironments/ExecutionEnvironment/Associations/ComputingActivityID", "Domains/AdminDomain/Services/ComputingService/ComputingManager/ExecutionEnvironments/ExecutionEnvironment/Associations/ApplicationEnvironmentID", "Domains/AdminDomain/Services/ComputingService/ComputingManager/ExecutionEnvironments/ExecutionEnvironment/Benchmark/Extensions/Extension", "Domains/AdminDomain/Services/ComputingService/ComputingManager/ExecutionEnvironments/ExecutionEnvironment/Benchmark/OtherInfo", "Domains/AdminDomain/Services/ComputingService/ComputingManager/ExecutionEnvironments/ExecutionEnvironment/Extensions/Extension", "Domains/AdminDomain/Services/ComputingService/ComputingManager/ExecutionEnvironments/ExecutionEnvironment/OtherInfo", "Domains/AdminDomain/Services/ComputingService/ComputingManager/ExecutionEnvironments/ExecutionEnvironment", "Domains/AdminDomain/Services/ComputingService/ComputingManager/ApplicationEnvironments/ApplicationEnvironment/BestBenchmark", "Domains/AdminDomain/Services/ComputingService/ComputingManager/ApplicationEnvironments/ApplicationEnvironment/Associations/ExecutionEnvironmentID", "Domains/AdminDomain/Services/ComputingService/ComputingManager/ApplicationEnvironments/ApplicationEnvironment/Extensions/Extension", "Domains/AdminDomain/Services/ComputingService/ComputingManager/ApplicationEnvironments/ApplicationEnvironment/OtherInfo", "Domains/AdminDomain/Services/ComputingService/ComputingManager/ApplicationEnvironments/ApplicationEnvironment/ApplicationHandle/Extensions/Extension", "Domains/AdminDomain/Services/ComputingService/ComputingManager/ApplicationEnvironments/ApplicationEnvironment/ApplicationHandle/OtherInfo", "Domains/AdminDomain/Services/ComputingService/ComputingManager/ApplicationEnvironments/ApplicationEnvironment/ApplicationHandle", "Domains/AdminDomain/Services/ComputingService/ComputingManager/ApplicationEnvironments/ApplicationEnvironment", "Domains/AdminDomain/Services/ComputingService/ComputingManager/Extensions/Extension", "Domains/AdminDomain/Services/ComputingService/ComputingManager/OtherInfo", "Domains/AdminDomain/Services/ComputingService/ComputingManager/NetworkInfo", "Domains/AdminDomain/Services/ComputingService/ComputingManager/Benchmark/Extensions/Extension", "Domains/AdminDomain/Services/ComputingService/ComputingManager/Benchmark/OtherInfo", "Domains/AdminDomain/Services/ComputingService/ComputingManager/Benchmark", "Domains/AdminDomain/Services/ComputingService/ComputingManager", "Domains/AdminDomain/Services/ComputingService/Capability", "Domains/AdminDomain/Services/ComputingService/StatusInfo", "Domains/AdminDomain/Services/ComputingService/Contact", "Domains/AdminDomain/Services/ComputingService/Extensions/Extension", "Domains/AdminDomain/Services/ComputingService/OtherInfo", "Domains/AdminDomain/Services/ComputingService/StorageManager/Extensions/Extension", "Domains/AdminDomain/Services/ComputingService/StorageManager/OtherInfo", "Domains/AdminDomain/Services/ComputingService/StorageManager", "Domains/AdminDomain/Services/ComputingService/Location/Extensions/Extension", "Domains/AdminDomain/Services/ComputingService/Location/OtherInfo", "Domains/AdminDomain/Services/ComputingService/Contact/Extensions/Extension", "Domains/AdminDomain/Services/ComputingService/Contact/OtherInfo", "Domains/AdminDomain/Services/ComputingService/ComputingShare/MappingPolicy/OtherInfo", "Domains/AdminDomain/Services/ComputingService/ComputingShare/MappingPolicy/Extensions/Extension", "Domains/AdminDomain/Services/ComputingService/ComputingShare/MappingPolicy/Rule", "Domains/AdminDomain/Services/ComputingService/ComputingShare/MappingPolicy/Associations/UserDomainID", "Domains/AdminDomain/Services/ComputingService/ComputingShare/MappingPolicy", "Domains/AdminDomain/Services/ComputingService/ComputingShare/Extensions/Extension", "Domains/AdminDomain/Services/ComputingService/ComputingShare/OtherInfo", "Domains/AdminDomain/Services/ComputingService/ComputingShare/Tag", "Domains/AdminDomain/Services/ComputingService/ComputingShare/Associations/ComputingEndpointID", "Domains/AdminDomain/Services/ComputingService/ComputingShare/Associations/ExecutionEnvironmentID", "Domains/AdminDomain/Services/ComputingService/ComputingShare/Associations/ComputingActivityID", "Domains/AdminDomain/Services/ComputingService/ComputingShare", "Domains/AdminDomain/Services/ComputingService/ComputingManager", "Domains/AdminDomain/Services/ComputingService/Associations/ServiceID", "Domains/AdminDomain/Services/ComputingService/ToStorageService/Extensions/Extension", "Domains/AdminDomain/Services/ComputingService/ToStorageService/OtherInfo", "Domains/AdminDomain/Services/ComputingService", "Domains/AdminDomain/Services/StorageService/Capability", "Domains/AdminDomain/Services/StorageService/StatusInfo", "Domains/AdminDomain/Services/StorageService/Contact", "Domains/AdminDomain/Services/StorageService/Extensions/Extension", "Domains/AdminDomain/Services/StorageService/OtherInfo", "Domains/AdminDomain/Services/StorageService/StorageManager/Extensions/Extension", "Domains/AdminDomain/Services/StorageService/StorageManager/OtherInfo", "Domains/AdminDomain/Services/StorageService/StorageManager", "Domains/AdminDomain/Services/StorageService/Location/Extensions/Extension", "Domains/AdminDomain/Services/StorageService/Location/OtherInfo", "Domains/AdminDomain/Services/StorageService/Contact/Extensions/Extension", "Domains/AdminDomain/Services/StorageService/Contact/OtherInfo", "Domains/AdminDomain/Services/StorageService/StorageServiceCapacity/Extensions/Extension", "Domains/AdminDomain/Services/StorageService/StorageServiceCapacity/OtherInfo", "Domains/AdminDomain/Services/StorageService/StorageShare/StorageShareCapacity/Extensions/Extension", "Domains/AdminDomain/Services/StorageService/StorageShare/StorageShareCapacity/OtherInfo", "Domains/AdminDomain/Services/StorageService/StorageShare/MappingPolicy", "Domains/AdminDomain/Services/StorageService/StorageShare/MappingPolicy/Extensions/Extension", "Domains/AdminDomain/Services/StorageService/StorageShare/MappingPolicy/Rule", "Domains/AdminDomain/Services/StorageService/StorageShare/MappingPolicy/Associations/UserDomainID", "Domains/AdminDomain/Services/StorageService/StorageShare/MappingPolicy/OtherInfo", "Domains/AdminDomain/Services/StorageService/StorageShare/Extensions/Extension", "Domains/AdminDomain/Services/StorageService/StorageShare/OtherInfo", "Domains/AdminDomain/Services/StorageService/StorageShare/AccessMode", "Domains/AdminDomain/Services/StorageService/StorageShare/RetentionPolicy", "Domains/AdminDomain/Services/StorageService/StorageShare/StorageShareCapacity", "Domains/AdminDomain/Services/StorageService/StorageShare/Associations/StorageEndpointID", "Domains/AdminDomain/Services/StorageService/StorageShare/Associations/DataStoreID", "Domains/AdminDomain/Services/StorageService/StorageShare", "Domains/AdminDomain/Services/StorageService/StorageEndpoint/Extensions/Extension", "Domains/AdminDomain/Services/StorageService/StorageEndpoint/OtherInfo", "Domains/AdminDomain/Services/StorageService/StorageEndpoint/Capability", "Domains/AdminDomain/Services/StorageService/StorageEndpoint/InterfaceVersion", "Domains/AdminDomain/Services/StorageService/StorageEndpoint/InterfaceExtension", "Domains/AdminDomain/Services/StorageService/StorageEndpoint/WSDL", "Domains/AdminDomain/Services/StorageService/StorageEndpoint/SupportedProfile", "Domains/AdminDomain/Services/StorageService/StorageEndpoint/Semantics", "Domains/AdminDomain/Services/StorageService/StorageEndpoint/TrustedCA", "Domains/AdminDomain/Services/StorageService/StorageEndpoint/AccessPolicy", "Domains/AdminDomain/Services/StorageService/StorageEndpoint/AccessPolicy/OtherInfo", "Domains/AdminDomain/Services/StorageService/StorageEndpoint/AccessPolicy/Extensions/Extension", "Domains/AdminDomain/Services/StorageService/StorageEndpoint/AccessPolicy/Rule", "Domains/AdminDomain/Services/StorageService/StorageEndpoint/AccessPolicy/Associations/UserDomainID", "Domains/AdminDomain/Services/StorageService/StorageEndpoint", "Domains/AdminDomain/Services/StorageService/StorageManager", "Domains/AdminDomain/Services/StorageService/StorageAccessProtocol/Associations/ToComputingServiceID", "Domains/AdminDomain/Services/StorageService/StorageAccessProtocol", "Domains/AdminDomain/Services/StorageService/StorageServiceCapacity", "Domains/AdminDomain/Services/StorageService/ToComputingService", "Domains/AdminDomain/Services/StorageService/Associations/ServiceID", "Domains/AdminDomain/Services/StorageService", "Domains/AdminDomain/Services/Owner", "Domains/AdminDomain/Services/AdminDomain", "Domains/AdminDomain/Services/Extensions/Extension", "Domains/AdminDomain/Services/OtherInfo", "Domains/AdminDomain/Services/Location/Extensions/Extension", "Domains/AdminDomain/Services/Location/OtherInfo", "Domains/AdminDomain/Services/Contact/Extensions/Extension", "Domains/AdminDomain/Services/Contact/OtherInfo", "Domains/AdminDomain/Services/WWW", "Domains/AdminDomain/Services/Contact", "Domains/AdminDomain", "Domains/UserDomain/UserManager", "Domains/UserDomain/Member", "Domains/UserDomain/UserDomain", "Domains/UserDomain/WWW", "Domains/UserDomain/Contact", "Domains/UserDomain/Extensions/Extension", "Domains/UserDomain/OtherInfo", "Domains/UserDomain/Location/Extensions/Extension", "Domains/UserDomain/Location/OtherInfo", "Domains/UserDomain/Contact/Extensions/Extension", "Domains/UserDomain/Contact/OtherInfo", "Domains/UserDomain", NULL }; return HTTPResponse(inmsg, outmsg, infoXml, info_json_arrays, false); } // ---------------------------- DELEGATIONS --------------------------------- Arc::MCC_Status ARexRest::processDelegations(Arc::Message& inmsg,Arc::Message& outmsg, ProcessingContext& context) { // GET /delegations[&type={x509|jwt}] - retrieves list of delegations belonging to authenticated user // HEAD - supported. // POST /delegations?action=new starts a new delegation process (1st step). // PUT /delegations/ stores public part (2nd step). // POST /delegations/?action=get,renew,delete used to manage delegation. std::string delegationId; if(GetPathToken(context.subpath, delegationId)) { context.processed += delegationId; context.processed += "/"; return processDelegation(inmsg,outmsg,context,delegationId); } ARexConfigContext* config = ARexConfigContext::GetRutimeConfiguration(inmsg,config_,uname_,endpoint_); if(!config) { return HTTPFault(inmsg,outmsg,500,"User can't be assigned configuration"); } if((context.method == "GET") || (context.method == "HEAD")) { std::string errmsg; if(!ARexConfigContext::CheckOperationAllowed(ARexConfigContext::OperationJobInfo, config, errmsg)) return HTTPFault(inmsg,outmsg,HTTP_ERR_FORBIDDEN,"Operation is not allowed",errmsg.c_str()); std::string requestedType; if(context.version >= ProcessingContext::Version_1_1) { requestedType = context["type"]; } XMLNode listXml(""); std::list > > ids = delegation_stores_[config_.DelegationDir()].ListCredInfos(config->GridName()); for(std::list > >::iterator itId = ids.begin(); itId != ids.end(); ++itId) { char const * delegType = "x509"; if(itId->second.size() > 0) delegType = itId->second.front().c_str(); if (!requestedType.empty()) { if(requestedType != delegType) continue; } XMLNode delegXml = listXml.NewChild("delegation"); delegXml.NewChild("id") = itId->first; delegXml.NewChild("type") = delegType; } char const * const json_arrays[] = { "delegation", NULL }; return HTTPResponse(inmsg, outmsg, listXml, json_arrays); } else if(context.method == "POST") { std::string action = context["action"]; if(action != "new") return HTTPFault(inmsg,outmsg,501,"Action not implemented"); std::string errmsg; if(!ARexConfigContext::CheckOperationAllowed(ARexConfigContext::OperationJobCreate, config, errmsg)) return HTTPFault(inmsg,outmsg,HTTP_ERR_FORBIDDEN,"Operation is not allowed",errmsg.c_str()); std::string requestedType; if(context.version >= ProcessingContext::Version_1_1) { requestedType = context["type"]; } std::string delegationId; std::string delegationRequest; if(requestedType.empty() || (requestedType == "x509")) { // TODO: explicitely put x509 into meta if(!delegation_stores_.GetRequest(config_.DelegationDir(),delegationId,config->GridName(),delegationRequest)) { return HTTPFault(inmsg,outmsg,500,"Failed generating delegation request"); } Arc::URL base(inmsg.Attributes()->get("ENDPOINT")); return HTTPPOSTResponse(inmsg,outmsg,delegationRequest,"application/x-pem-file",base.Path()+"/"+delegationId); } else if(requestedType == "jwt") { Arc::AttributeIterator tokenIt = inmsg.Attributes()->getAll("HTTP:x-token-delegation"); if(!tokenIt.hasMore()) return HTTPFault(inmsg,outmsg,501,"Missing X-Token-Delegation header in delegation request"); std::list meta; meta.push_back("jwt"); if(!delegation_stores_.PutCred(config_.DelegationDir(),delegationId,config->GridName(),*tokenIt,meta)) { return HTTPFault(inmsg,outmsg,500,"Failed storing delegation token"); } Arc::URL base(inmsg.Attributes()->get("ENDPOINT")); return HTTPPOSTResponse(inmsg,outmsg,base.Path()+"/"+delegationId); } return HTTPFault(inmsg,outmsg,501,"Unknown delegation type specified"); } logger_.msg(Arc::VERBOSE, "process: method %s is not supported for subpath %s",context.method,context.processed); return HTTPFault(inmsg,outmsg,501,"Not Implemented"); } void UpdateProxyFile(ARex::DelegationStores& delegation_stores, ARexConfigContext& config, std::string const& id) { #if 1 // In case of update for compatibility during intermediate period store delegations in // per-job proxy file too. DelegationStore& delegation_store(delegation_stores[config.GmConfig().DelegationDir()]); std::list job_ids; if(delegation_store.GetLocks(id,config.GridName(),job_ids)) { for(std::list::iterator job_id = job_ids.begin(); job_id != job_ids.end(); ++job_id) { // check if that is main delegation for this job std::string delegationid; if(job_local_read_delegationid(*job_id,config.GmConfig(),delegationid)) { if(id == delegationid) { std::string credentials; if(delegation_store.GetCred(id,config.GridName(),credentials)) { if(!credentials.empty()) { GMJob job(*job_id,Arc::User(config.User().get_uid())); (void)job_proxy_write_file(job,config.GmConfig(),credentials); }; }; }; }; }; }; #endif } Arc::MCC_Status ARexRest::processDelegation(Arc::Message& inmsg,Arc::Message& outmsg,ProcessingContext& context,std::string const & id) { // GET,HEAD,DELETE - not supported. // PUT /delegations/ stores public part (2nd step) to finish delegation procedure or to re-new delegation. // POST /delegations/?action=get,renew,delete used to manage delegation. if(!context.subpath.empty()) return HTTPFault(inmsg,outmsg,404,"Not Found"); // no more sub-resources ARexConfigContext* config = ARexConfigContext::GetRutimeConfiguration(inmsg,config_,uname_,endpoint_); if(!config) return HTTPFault(inmsg,outmsg,500,"User can't be assigned configuration"); // POST - manages delegation. if(context.method == "PUT") { std::string errmsg; if(!ARexConfigContext::CheckOperationAllowed(ARexConfigContext::OperationJobCreate, config, errmsg)) return HTTPFault(inmsg,outmsg,HTTP_ERR_FORBIDDEN,"Operation is not allowed",errmsg.c_str()); // Fetch HTTP content to pass it as delegation std::string content; Arc::MCC_Status res = extract_content(inmsg,content,1024*1024); // 1mb size limit is sane enough if(!res) return HTTPFault(inmsg,outmsg,500,res.getExplanation().c_str()); if(content.empty()) return HTTPFault(inmsg,outmsg,500,"Missing payload"); if(!delegation_stores_.PutDeleg(config_.DelegationDir(),id,config->GridName(),content)) return HTTPFault(inmsg,outmsg,500,"Failed accepting delegation"); UpdateProxyFile(delegation_stores_, *config, id); return HTTPResponse(inmsg,outmsg); } else if(context.method == "POST") { std::string action = context["action"]; if(action == "get") { std::string errmsg; if(!ARexConfigContext::CheckOperationAllowed(ARexConfigContext::OperationJobInfo, config, errmsg)) return HTTPFault(inmsg,outmsg,HTTP_ERR_FORBIDDEN,"Operation is not allowed",errmsg.c_str()); std::string credentials; if(!delegation_stores_[config_.DelegationDir()].GetDeleg(id, config->GridName(), credentials)) { return HTTPFault(inmsg,outmsg,404,"No delegation found"); } return HTTPResponse(inmsg, outmsg, credentials, "application/x-pem-file"); // ?? } else if(action == "renew") { std::string errmsg; if(!ARexConfigContext::CheckOperationAllowed(ARexConfigContext::OperationJobCreate, config, errmsg)) return HTTPFault(inmsg,outmsg,HTTP_ERR_FORBIDDEN,"Operation is not allowed",errmsg.c_str()); std::string delegationId = id; std::string delegationRequest; if(!delegation_stores_.GetRequest(config_.DelegationDir(),delegationId,config->GridName(),delegationRequest)) return HTTPFault(inmsg,outmsg,500,"Failed generating delegation request"); return HTTPPOSTResponse(inmsg,outmsg,delegationRequest,"application/x-pem-file",""); } else if(action == "delete") { std::string errmsg; if(!ARexConfigContext::CheckOperationAllowed(ARexConfigContext::OperationJobDelete, config, errmsg)) return HTTPFault(inmsg,outmsg,HTTP_ERR_FORBIDDEN,"Operation is not allowed",errmsg.c_str()); Arc::DelegationConsumerSOAP* deleg = delegation_stores_[config_.DelegationDir()].FindConsumer(id, config->GridName()); if(!deleg) return HTTPFault(inmsg,outmsg,404,"No such delegation"); if(!(delegation_stores_[config_.DelegationDir()].RemoveConsumer(deleg))) return HTTPFault(inmsg,outmsg,500,"Failed deleting delegation"); return HTTPDELETEResponse(inmsg, outmsg); // ?? } logger_.msg(Arc::VERBOSE, "process: action %s is not supported for subpath %s",action,context.processed); return HTTPFault(inmsg,outmsg,501,"Action not implemented"); } logger_.msg(Arc::VERBOSE, "process: method %s is not supported for subpath %s",context.method,context.processed); return HTTPFault(inmsg,outmsg,501,"Not Implemented"); } // ---------------------------- JOBS --------------------------------- static bool processJobInfo(Arc::Message& inmsg,ARexConfigContext& config, Arc::Logger& logger, std::string const & id, XMLNode jobXml); static bool processJobStatus(Arc::Message& inmsg,ARexConfigContext& config, Arc::Logger& logger, std::string const & id, XMLNode jobXml); static bool processJobKill(Arc::Message& inmsg,ARexConfigContext& config, Arc::Logger& logger, std::string const & id, XMLNode jobXml); static bool processJobClean(Arc::Message& inmsg,ARexConfigContext& config, Arc::Logger& logger, std::string const & id, XMLNode jobXml); static bool processJobRestart(Arc::Message& inmsg,ARexConfigContext& config, Arc::Logger& logger, std::string const & id, XMLNode jobXml); static bool processJobDelegations(Arc::Message& inmsg,ARexConfigContext& config, Arc::Logger& logger, std::string const & id, XMLNode jobXml, ARex::DelegationStores& delegation_stores); Arc::MCC_Status ARexRest::processJobs(Arc::Message& inmsg,Arc::Message& outmsg,ProcessingContext& context) { // GET /jobs[?state=] // HEAD - supported. // POST /jobs?action=new initiates creation of a new job instance or multiple jobs. // POST /jobs?action={info|status|kill|clean|restart|delegations} - job management operations supporting arrays of jobs. // PUT - not supported. std::string jobId; if(GetPathToken(context.subpath, jobId)) { // /jobs//... context.processed += jobId; context.processed += "/"; return processJob(inmsg,outmsg,context,jobId); } ARexConfigContext* config = ARexConfigContext::GetRutimeConfiguration(inmsg,config_,uname_,endpoint_); if(!config) { return HTTPFault(inmsg,outmsg,500,"User can't be assigned configuration"); } if((context.method == "GET") || (context.method == "HEAD")) { std::string errmsg; if(!ARexConfigContext::CheckOperationAllowed(ARexConfigContext::OperationJobInfo, config, errmsg)) return HTTPFault(inmsg,outmsg,HTTP_ERR_FORBIDDEN,"Operation is not allowed",errmsg.c_str()); std::list states; tokenize(context["state"], states, ","); XMLNode listXml(""); std::list ids = ARexJob::Jobs(*config,logger_); for(std::list::iterator itId = ids.begin(); itId != ids.end(); ++itId) { std::string rest_state; if(!states.empty()) { ARexJob job(*itId,*config,logger_); if(!job) continue; // There is no such job bool job_pending = false; std::string gm_state = job.State(job_pending); bool job_failed = job.Failed(); std::string failed_cause; std::string failed_state = job.FailedState(failed_cause); convertActivityStatusREST(gm_state,rest_state,job_failed,job_pending,failed_state,failed_cause); bool state_found = false; for(std::list::iterator itState = states.begin(); itState != states.end(); ++itState) { if(rest_state == *itState) { state_found = true; break; } } if(!state_found) continue; } // states filter XMLNode jobXml = listXml.NewChild("job"); jobXml.NewChild("id") = *itId; if(!rest_state.empty()) jobXml.NewChild("state") = rest_state; } char const * const json_arrays[] = { "job", NULL }; return HTTPResponse(inmsg, outmsg, listXml, json_arrays); } else if(context.method == "POST") { std::string action = context["action"]; if(action == "new") { unsigned int all_jobs_count = all_jobs_count_; std::string errmsg; if(!ARexConfigContext::CheckOperationAllowed(ARexConfigContext::OperationJobCreate, config, errmsg)) return HTTPFault(inmsg,outmsg,HTTP_ERR_FORBIDDEN,"Operation is not allowed",errmsg.c_str()); if((config->GmConfig().MaxTotal() > 0) && (all_jobs_count >= config->GmConfig().MaxTotal())) return HTTPFault(inmsg,outmsg,500,"No more jobs allowed"); int can_accept_jobs = -1; // stands for no limit if(config->GmConfig().MaxTotal() > 0) { if(config->GmConfig().MaxTotal() > all_jobs_count) can_accept_jobs = config->GmConfig().MaxTotal() - all_jobs_count; else can_accept_jobs = 0; } // Fetch HTTP content to pass it as job description std::string desc_str; Arc::MCC_Status res = extract_content(inmsg,desc_str,100*1024*1024); if(!res) return HTTPFault(inmsg,outmsg,500,res.getExplanation().c_str()); if(desc_str.empty()) return HTTPFault(inmsg,outmsg,500,"Missing payload"); JobIDGeneratorREST idgenerator(config->Endpoint()); std::string clientid = (inmsg.Attributes()->get("TCP:REMOTEHOST"))+":"+(inmsg.Attributes()->get("TCP:REMOTEPORT")); // TODO: Make ARexJob accept JobDescription directly to avoid reparsing jobs and use Arc::JobDescription::Parse here. // Quck and dirty check for job type std::string::size_type start_pos = desc_str.find_first_not_of(" \t\r\n"); if(start_pos == std::string::npos) return HTTPFault(inmsg,outmsg,500,"Payload is empty"); std::string default_queue; std::string default_delegation_id; int instances_min = 1; int instances_max = 1; if(context.version >= ProcessingContext::Version_1_1) { default_queue = context["queue"]; default_delegation_id = context["delegation_id"]; Arc::stringto(context["instances"], instances_max); Arc::stringto(context["instances_min"], instances_min); if((instances_max < 1) || (instances_min > instances_max)) return HTTPFault(inmsg,outmsg,500,"Wrong number of instances specified"); if(config->GmConfig().MaxTotal() > 0) { if ((all_jobs_count+instances_min) > config->GmConfig().MaxTotal()) return HTTPFault(inmsg,outmsg,403,"Number of requested instances exceeds allowed limit"); if ((all_jobs_count+instances_max) > config->GmConfig().MaxTotal()) instances_max = config->GmConfig().MaxTotal()-all_jobs_count; } } XMLNode listXml(""); // TODO: Split to separate functions switch(desc_str[start_pos]) { case '<': { // XML (multi- or single-ADL) Arc::XMLNode jobs_desc_xml(desc_str); if (jobs_desc_xml.Name() == "ActivityDescriptions") { // multi if(instances_max > 1) return HTTPFault(inmsg,outmsg,403,"No multiple descriptions and multiple instances simultaneously"); for(int idx = 0;;++idx) { Arc::XMLNode job_desc_xml = jobs_desc_xml.Child(idx); if(!job_desc_xml) break; XMLNode jobXml = listXml.NewChild("job"); if(can_accept_jobs != 0) { ARexJob job(job_desc_xml,*config,default_delegation_id,default_queue,clientid,logger_,idgenerator); if(!job) { jobXml.NewChild("status-code") = "500"; jobXml.NewChild("reason") = job.Failure(); } else { jobXml.NewChild("status-code") = "201"; jobXml.NewChild("reason") = "Created"; jobXml.NewChild("id") = job.ID(); jobXml.NewChild("state") = "ACCEPTING"; } } else { jobXml.NewChild("status-code") = "500"; jobXml.NewChild("reason") = "No more jobs allowed"; } if(can_accept_jobs > 0) --can_accept_jobs; } } else { // maybe single if(instances_max <= 1) { if(can_accept_jobs == 0) return HTTPFault(inmsg,outmsg,500,"No more jobs allowed"); XMLNode jobXml = listXml.NewChild("job"); ARexJob job(jobs_desc_xml,*config,default_delegation_id,default_queue,clientid,logger_,idgenerator); if(!job) { jobXml.NewChild("status-code") = "500"; jobXml.NewChild("reason") = job.Failure(); } else { jobXml.NewChild("status-code") = "201"; jobXml.NewChild("reason") = "Created"; jobXml.NewChild("id") = job.ID(); jobXml.NewChild("state") = "ACCEPTING"; } } else { if(can_accept_jobs >= 0) { if(can_accept_jobs < instances_min) return HTTPFault(inmsg,outmsg,500,"No more jobs allowed"); if(can_accept_jobs < instances_max) instances_max = can_accept_jobs; } std::string failure; std::vector ids; if(!ARexJob::Generate(jobs_desc_xml,instances_min,instances_max,*config, default_delegation_id,default_queue,clientid,logger_,idgenerator, ids,failure)) { if(instances_max < instances_min) { return HTTPFault(inmsg,outmsg,403,"Can't create requested number of job instances"); } XMLNode jobXml = listXml.NewChild("job"); jobXml.NewChild("status-code") = "500"; jobXml.NewChild("reason") = failure; } else { for(std::size_t idx = 0; idx= 0) { if(can_accept_jobs < instances_min) return HTTPFault(inmsg,outmsg,500,"No more jobs allowed"); if(can_accept_jobs < instances_max) instances_max = can_accept_jobs; } std::string failure; std::vector ids; if(!ARexJob::Generate(desc_str,instances_min,instances_max,*config, default_delegation_id,default_queue,clientid,logger_,idgenerator, ids,failure)) { if(instances_max < instances_min) { return HTTPFault(inmsg,outmsg,403,"Can't create requested number of job instances"); } XMLNode jobXml = listXml.NewChild("job"); jobXml.NewChild("status-code") = "500"; jobXml.NewChild("reason") = failure; } else { for(std::size_t idx = 0; idx 1) return HTTPFault(inmsg,outmsg,403,"No multiple descriptions and multiple instances simultaneously"); std::list jobdescs; Arc::JobDescriptionResult result = Arc::JobDescription::Parse(desc_str, jobdescs, "nordugrid:xrsl", "GRIDMANAGER"); if (!result) { return HTTPFault(inmsg,outmsg,500,result.str().c_str()); } else { for(std::list::iterator jobdesc = jobdescs.begin(); jobdesc != jobdescs.end(); ++jobdesc) { XMLNode jobXml = listXml.NewChild("job"); if(can_accept_jobs != 0) { std::string jobdesc_str; result = jobdesc->UnParse(jobdesc_str, "nordugrid:xrsl", "GRIDMANAGER"); if (!result) { jobXml.NewChild("status-code") = "500"; jobXml.NewChild("reason") = result.str(); } else { ARexJob job(jobdesc_str,*config,default_delegation_id,default_queue,clientid,logger_,idgenerator); if(!job) { jobXml.NewChild("status-code") = "500"; jobXml.NewChild("reason") = job.Failure(); } else { jobXml.NewChild("status-code") = "201"; jobXml.NewChild("reason") = "Created"; jobXml.NewChild("id") = job.ID(); jobXml.NewChild("state") = "ACCEPTING"; } } } else { jobXml.NewChild("status-code") = "500"; jobXml.NewChild("reason") = "No more jobs allowed"; } if(can_accept_jobs > 0) --can_accept_jobs; } } }; break; default: return HTTPFault(inmsg,outmsg,500,"Payload is not recognized"); break; } char const * const json_arrays[] = { "job", NULL }; return HTTPPOSTResponse(inmsg, outmsg, listXml, json_arrays); } else if(action == "info") { std::string errmsg; if(!ARexConfigContext::CheckOperationAllowed(ARexConfigContext::OperationJobInfo, config, errmsg)) return HTTPFault(inmsg,outmsg,HTTP_ERR_FORBIDDEN,"Operation is not allowed",errmsg.c_str()); std::list ids; ParseJobIds(inmsg,outmsg,ids); XMLNode listXml(""); for(std::list::iterator id = ids.begin(); id != ids.end(); ++id) { XMLNode jobXml = listXml.NewChild("job"); (void)processJobInfo(inmsg,*config,logger_,*id,jobXml); } char const * const json_arrays[] = { "job", NULL }; return HTTPPOSTResponse(inmsg, outmsg, listXml, json_arrays); } else if(action == "status") { std::string errmsg; if(!ARexConfigContext::CheckOperationAllowed(ARexConfigContext::OperationJobInfo, config, errmsg)) return HTTPFault(inmsg,outmsg,HTTP_ERR_FORBIDDEN,"Operation is not allowed",errmsg.c_str()); std::list ids; ParseJobIds(inmsg,outmsg,ids); XMLNode listXml(""); for(std::list::iterator id = ids.begin(); id != ids.end(); ++id) { XMLNode jobXml = listXml.NewChild("job"); (void)processJobStatus(inmsg,*config,logger_,*id,jobXml); } char const * const json_arrays[] = { "job", NULL }; return HTTPPOSTResponse(inmsg, outmsg, listXml, json_arrays); } else if(action == "kill") { std::string errmsg; if(!ARexConfigContext::CheckOperationAllowed(ARexConfigContext::OperationJobCancel, config, errmsg)) return HTTPFault(inmsg,outmsg,HTTP_ERR_FORBIDDEN,"Operation is not allowed",errmsg.c_str()); std::list ids; ParseJobIds(inmsg,outmsg,ids); XMLNode listXml(""); for(std::list::iterator id = ids.begin(); id != ids.end(); ++id) { XMLNode jobXml = listXml.NewChild("job"); (void)processJobKill(inmsg,*config,logger_,*id,jobXml); } char const * const json_arrays[] = { "job", NULL }; return HTTPPOSTResponse(inmsg, outmsg, listXml, json_arrays); } else if(action == "clean") { std::string errmsg; if(!ARexConfigContext::CheckOperationAllowed(ARexConfigContext::OperationJobDelete, config, errmsg)) return HTTPFault(inmsg,outmsg,HTTP_ERR_FORBIDDEN,"Operation is not allowed",errmsg.c_str()); std::list ids; ParseJobIds(inmsg,outmsg,ids); XMLNode listXml(""); for(std::list::iterator id = ids.begin(); id != ids.end(); ++id) { XMLNode jobXml = listXml.NewChild("job"); (void)processJobClean(inmsg,*config,logger_,*id,jobXml); } char const * const json_arrays[] = { "job", NULL }; return HTTPPOSTResponse(inmsg, outmsg, listXml, json_arrays); } else if(action == "restart") { std::string errmsg; if(!ARexConfigContext::CheckOperationAllowed(ARexConfigContext::OperationJobCreate, config, errmsg)) return HTTPFault(inmsg,outmsg,HTTP_ERR_FORBIDDEN,"Operation is not allowed",errmsg.c_str()); std::list ids; ParseJobIds(inmsg,outmsg,ids); XMLNode listXml(""); for(std::list::iterator id = ids.begin(); id != ids.end(); ++id) { XMLNode jobXml = listXml.NewChild("job"); (void)processJobRestart(inmsg,*config,logger_,*id,jobXml); } char const * const json_arrays[] = { "job", NULL }; return HTTPPOSTResponse(inmsg, outmsg, listXml, json_arrays); } else if(action == "delegations") { std::string errmsg; if(!ARexConfigContext::CheckOperationAllowed(ARexConfigContext::OperationJobInfo, config, errmsg)) return HTTPFault(inmsg,outmsg,HTTP_ERR_FORBIDDEN,"Operation is not allowed",errmsg.c_str()); std::list ids; ParseJobIds(inmsg,outmsg,ids); XMLNode listXml(""); for(std::list::iterator id = ids.begin(); id != ids.end(); ++id) { XMLNode jobXml = listXml.NewChild("job"); (void)processJobDelegations(inmsg,*config,logger_,*id,jobXml,delegation_stores_); } char const * const json_arrays[] = { "job", NULL }; return HTTPPOSTResponse(inmsg, outmsg, listXml, json_arrays); } logger_.msg(Arc::VERBOSE, "process: action %s is not supported for subpath %s",action,context.processed); return HTTPFault(inmsg,outmsg,501,"Action not implemented"); } logger_.msg(Arc::VERBOSE, "process: method %s is not supported for subpath %s",context.method,context.processed); return HTTPFault(inmsg,outmsg,501,"Not Implemented"); } static bool processJobInfo(Arc::Message& inmsg,ARexConfigContext& config, Arc::Logger& logger, std::string const & id, XMLNode jobXml) { ARexJob job(id,config,logger); if(!job) { // There is no such job std::string failure = job.Failure(); logger.msg(Arc::ERROR, "REST:GET job %s - %s", id, failure); jobXml.NewChild("status-code") = "404"; jobXml.NewChild("reason") = (!failure.empty()) ? failure : "Job not found"; jobXml.NewChild("id") = id; jobXml.NewChild("info_document"); return false; } std::string glue_s; Arc::XMLNode glue_xml(job_xml_read_file(id,config.GmConfig(),glue_s)?glue_s:""); if(!glue_xml) { // Fallback: create something minimal static const char* job_xml_template = "\n" " \n" " SubmittedVia=org.ogf.glue.emies.activitycreation\n" " single\n" " \n" " emies:adl\n" " \n" " \n" " \n" " \n" " \n" ""; Arc::XMLNode(job_xml_template).New(glue_xml); Arc::URL headnode(config.GmConfig().HeadNode()); glue_xml["ID"] = std::string("urn:caid:")+headnode.Host()+":org.ogf.glue.emies.activitycreation:"+id; glue_xml["IDFromEndpoint"] = "urn:idfe:"+id; { // Collecting job state bool job_pending = false; std::string gm_state = job.State(job_pending); bool job_failed = job.Failed(); std::string failed_cause; std::string failed_state = job.FailedState(failed_cause); std::string primary_state; std::list state_attributes; convertActivityStatusES(gm_state,primary_state,state_attributes, job_failed,job_pending,failed_state,failed_cause); glue_xml["State"] = "emies:"+primary_state; std::string prefix = glue_xml["State"].Prefix(); for(std::list::iterator attr = state_attributes.begin(); attr != state_attributes.end(); ++attr) { glue_xml.NewChild(prefix+":State") = "emiesattr:"+(*attr); }; std::string rest_state; convertActivityStatusREST(gm_state,rest_state, job_failed,job_pending,failed_state,failed_cause); glue_xml["State"] = "arcrest:"+rest_state; }; glue_xml["Owner"] = config.GridName(); glue_xml.Attribute("CreationTime") = job.Created().str(Arc::ISOTime); }; // Delegation ids? jobXml.NewChild("status-code") = "200"; jobXml.NewChild("reason") = "OK"; jobXml.NewChild("id") = id; jobXml.NewChild("info_document").NewChild("ComputingActivity").Exchange(glue_xml); return true; } static bool processJobStatus(Arc::Message& inmsg,ARexConfigContext& config, Arc::Logger& logger, std::string const & id, XMLNode jobXml) { ARexJob job(id,config,logger); if(!job) { // There is no such job std::string failure = job.Failure(); logger.msg(Arc::ERROR, "REST:GET job %s - %s", id, failure); jobXml.NewChild("status-code") = "404"; jobXml.NewChild("reason") = (!failure.empty()) ? failure : "Job not found"; jobXml.NewChild("id") = id; jobXml.NewChild("state") = "None"; return false; } // Collecting job state // Most detailed state is obtianable from XML info std::string rest_state; { std::string glue_s; if(job_xml_read_file(id,config.GmConfig(),glue_s)) { Arc::XMLNode glue_xml(glue_s); if((bool)glue_xml) { for(Arc::XMLNode snode = glue_xml["State"]; (bool)snode ; ++snode) { std::string state_str = snode; if(state_str.compare(0, 8, "arcrest:") == 0) { rest_state = state_str.substr(8); break; } } } } } if (rest_state.empty()) { // Faster but less detailed state can be computed from GM state bool job_pending = false; std::string gm_state = job.State(job_pending); bool job_failed = job.Failed(); std::string failed_cause; std::string failed_state = job.FailedState(failed_cause); convertActivityStatusREST(gm_state,rest_state, job_failed,job_pending,failed_state,failed_cause); } jobXml.NewChild("status-code") = "200"; jobXml.NewChild("reason") = "OK"; jobXml.NewChild("id") = id; jobXml.NewChild("state") = rest_state; return true; } static bool processJobKill(Arc::Message& inmsg,ARexConfigContext& config, Arc::Logger& logger, std::string const & id, XMLNode jobXml) { ARexJob job(id,config,logger); if(!job) { // There is no such job std::string failure = job.Failure(); logger.msg(Arc::ERROR, "REST:KILL job %s - %s", id, failure); jobXml.NewChild("status-code") = "404"; jobXml.NewChild("reason") = (!failure.empty()) ? failure : "Job not found"; jobXml.NewChild("id") = id; return false; } if(!job.Cancel()) { std::string failure = job.Failure(); logger.msg(Arc::ERROR, "REST:KILL job %s - %s", id, failure); jobXml.NewChild("status-code") = "505"; jobXml.NewChild("reason") = (!failure.empty()) ? failure : "Job could not be canceled"; jobXml.NewChild("id") = id; return false; } jobXml.NewChild("status-code") = "202"; jobXml.NewChild("reason") = "Queued for killing"; jobXml.NewChild("id") = id; return true; } static bool processJobClean(Arc::Message& inmsg,ARexConfigContext& config, Arc::Logger& logger, std::string const & id, XMLNode jobXml) { ARexJob job(id,config,logger); if(!job) { // There is no such job std::string failure = job.Failure(); logger.msg(Arc::ERROR, "REST:CLEAN job %s - %s", id, failure); jobXml.NewChild("status-code") = "404"; jobXml.NewChild("reason") = (!failure.empty()) ? failure : "Job not found"; jobXml.NewChild("id") = id; return false; } if(!job.Clean()) { std::string failure = job.Failure(); logger.msg(Arc::ERROR, "REST:CLEAN job %s - %s", id, failure); jobXml.NewChild("status-code") = "505"; jobXml.NewChild("reason") = (!failure.empty()) ? failure : "Job could not be cleaned"; jobXml.NewChild("id") = id; return false; } jobXml.NewChild("status-code") = "202"; jobXml.NewChild("reason") = "Queued for cleaning"; jobXml.NewChild("id") = id; return true; } static bool processJobRestart(Arc::Message& inmsg,ARexConfigContext& config, Arc::Logger& logger, std::string const & id, XMLNode jobXml) { ARexJob job(id,config,logger); if(!job) { // There is no such job std::string failure = job.Failure(); logger.msg(Arc::ERROR, "REST:RESTART job %s - %s", id, failure); jobXml.NewChild("status-code") = "404"; jobXml.NewChild("reason") = (!failure.empty()) ? failure : "Job not found"; jobXml.NewChild("id") = id; return false; } if(!job.Resume()) { std::string failure = job.Failure(); logger.msg(Arc::ERROR, "REST:RESTART job %s - %s", id, failure); jobXml.NewChild("status-code") = "505"; jobXml.NewChild("reason") = (!failure.empty()) ? failure : "Job could not be resumed"; jobXml.NewChild("id") = id; return false; } jobXml.NewChild("status-code") = "202"; jobXml.NewChild("reason") = "Queued for restarting"; jobXml.NewChild("id") = id; return true; } static bool processJobDelegations(Arc::Message& inmsg,ARexConfigContext& config, Arc::Logger& logger, std::string const & id, XMLNode jobXml, ARex::DelegationStores& delegation_stores) { ARexJob job(id,config,logger); if(!job) { // There is no such job std::string failure = job.Failure(); logger.msg(Arc::ERROR, "REST:RESTART job %s - %s", id, failure); jobXml.NewChild("status-code") = "404"; jobXml.NewChild("reason") = (!failure.empty()) ? failure : "Job not found"; jobXml.NewChild("id") = id; return false; } jobXml.NewChild("status-code") = "200"; jobXml.NewChild("reason") = "OK"; jobXml.NewChild("id") = id; std::list ids = delegation_stores[config.GmConfig().DelegationDir()].ListLockedCredIDs(id,config.GridName()); for(std::list::iterator itId = ids.begin(); itId != ids.end(); ++itId) { jobXml.NewChild("delegation_id") = *itId; } return true; } Arc::MCC_Status ARexRest::processJob(Arc::Message& inmsg,Arc::Message& outmsg, ProcessingContext& context,std::string const & id) { std::string subResource; if(GetPathToken(context.subpath, subResource)) { context.processed += subResource; context.processed += "/"; if(subResource == "session") { return processJobSessionDir(inmsg,outmsg,context,id); } else if(subResource == "diagnose") { return processJobControlDir(inmsg,outmsg,context,id); } return HTTPFault(inmsg,outmsg,404,"Wrong job sub-resource requested"); } return HTTPFault(inmsg,outmsg,404,"Missing job sub-resource"); } // ------------------------------- PER-JOB SESSION DIR ------------------------------------- static bool write_file(Arc::FileAccess& h,char* buf,size_t size) { for(;size>0;) { ssize_t l = h.fa_write(buf,size); if(l == -1) return false; size-=l; buf+=l; }; return true; } static Arc::MCC_Status PutJobFile(Arc::Message& inmsg, Arc::Message& outmsg, Arc::FileAccess& file, std::string& errstr, Arc::PayloadStreamInterface& stream, FileChunks& fc, bool& complete) { complete = false; // TODO: Use memory mapped file to minimize number of in memory copies const int bufsize = 1024*1024; if(!fc.Size()) fc.Size(stream.Size()); off_t pos = stream.Pos(); if(file.fa_lseek(pos,SEEK_SET) != pos) { std::string err = Arc::StrError(); errstr = "failed to set position of file to "+Arc::tostring(pos)+" - "+err; return HTTPFault(inmsg, outmsg, 500, "Error seeking to specified position in file"); }; char* buf = new char[bufsize]; if(!buf) { errstr = "failed to allocate memory"; return HTTPFault(inmsg, outmsg, 500, "Error allocating memory"); }; bool got_something = false; for(;;) { int size = bufsize; if(!stream.Get(buf,size)) break; if(size > 0) got_something = true; if(!write_file(file,buf,size)) { std::string err = Arc::StrError(); delete[] buf; errstr = "failed to write to file - "+err; return HTTPFault(inmsg, outmsg, 500, "Error writing to file"); }; if(size) fc.Add(pos,size); pos+=size; }; delete[] buf; // Due to limitation of PayloadStreamInterface it is not possible to // directly distingush between zero sized file and file with undefined // size. But by applying some dynamic heuristics it is possible. // TODO: extend/modify PayloadStreamInterface. if((stream.Size() == 0) && (stream.Pos() == 0) && (!got_something)) { complete = true; } return HTTPResponse(inmsg,outmsg); } static Arc::MCC_Status PutJobFile(Arc::Message& inmsg, Arc::Message& outmsg, Arc::FileAccess& file, std::string& errstr, Arc::PayloadRawInterface& buf, FileChunks& fc, bool& complete) { complete = false; bool got_something = false; if(!fc.Size()) fc.Size(buf.Size()); for(int n = 0;;++n) { char* sbuf = buf.Buffer(n); if(sbuf == NULL) break; off_t offset = buf.BufferPos(n); off_t size = buf.BufferSize(n); if(size > 0) { got_something = true; off_t o = file.fa_lseek(offset,SEEK_SET); if(o != offset) { std::string err = Arc::StrError(); errstr = "failed to set position of file to "+Arc::tostring(offset)+" - "+err; return HTTPFault(inmsg, outmsg, 500, "Error seeking to specified position"); }; if(!write_file(file,sbuf,size)) { std::string err = Arc::StrError(); errstr = "failed to write to file - "+err; return HTTPFault(inmsg, outmsg, 500, "Error writing file"); }; if(size) fc.Add(offset,size); }; }; if((buf.Size() == 0) && (!got_something)) { complete = true; } return HTTPResponse(inmsg,outmsg); } static void STATtoPROP(std::string const& name, struct stat& st, std::list requestProps, XMLNode& response) { XMLNode propstat = response.NewChild("d:propstat"); XMLNode prop = propstat.NewChild("d:prop"); propstat.NewChild("d:status") = "HTTP/1.1 200 OK"; prop.NewChild("d:displayname") = name; if(S_ISDIR(st.st_mode)) { prop.NewChild("d:resourcetype").NewChild("d:collection"); } else { prop.NewChild("d:resourcetype"); prop.NewChild("d:getcontentlength") = Arc::tostring(st.st_size); }; prop.NewChild("d:getlastmodified") = Arc::Time(st.st_mtime).str(Arc::ISOTime); prop.NewChild("d:creationdate") = Arc::Time(st.st_ctime).str(Arc::ISOTime); } static void ProcessPROPFIND(Arc::FileAccess* fa, Arc::XMLNode& multistatus,URL const& url,std::string const& path,uid_t uid,gid_t gid,int depth) { std::string name; std::size_t pos = path.rfind('/'); if(pos == std::string::npos) name = path; else name = path.substr(pos+1); XMLNode response = multistatus.NewChild("d:response"); std::string hrefStr = url.fullstr(); struct stat st; if(!fa->fa_stat(path,st)) { // Not found response.NewChild("d:href") = hrefStr; response.NewChild("d:status") = "HTTP/1.1 404 Not Found"; } else if(S_ISREG(st.st_mode)) { while(!hrefStr.empty() && hrefStr[hrefStr.length()-1] == '/') hrefStr.resize(hrefStr.length()-1); response.NewChild("d:href") = hrefStr; STATtoPROP(name, st, std::list(), response); } else if(S_ISDIR(st.st_mode)) { if(!hrefStr.empty() && hrefStr[hrefStr.length()-1] != '/') hrefStr += '/'; response.NewChild("d:href") = hrefStr; STATtoPROP(name, st, std::list(), response); if(depth > 0) { if (fa->fa_opendir(path)) { std::list names; std::string name; while(fa->fa_readdir(name)) { if(name == ".") continue; if(name == "..") continue; names.push_back(name); } fa->fa_closedir(); for(std::list::iterator name = names.begin(); name != names.end(); ++name) { URL subUrl(url); subUrl.ChangePath(subUrl.Path() + "/" + *name); std::string subPath = path + "/" + *name; ProcessPROPFIND(fa,multistatus,subUrl,subPath,uid,gid,depth-1); } } } } else { // Not for this interface response.NewChild("d:href") = hrefStr; response.NewChild("d:status") = "HTTP/1.1 404 Not Found"; } } Arc::MCC_Status ARexRest::processJobSessionDir(Arc::Message& inmsg,Arc::Message& outmsg, ProcessingContext& context,std::string const & id) { class FileAccessRef { public: FileAccessRef(Arc::FileAccess* obj):obj_(obj) { } ~FileAccessRef() { if(obj_) { obj_->fa_close(); obj_->fa_closedir(); Arc::FileAccess::Release(obj_); } } operator bool() const { return (obj_ == NULL); } bool operator !() const { return (obj_ == NULL); } Arc::FileAccess& operator*() { return *obj_; } Arc::FileAccess* operator->() { return obj_; } operator Arc::FileAccess*() { return obj_; } Arc::FileAccess*& get() { return obj_; } protected: Arc::FileAccess* obj_; }; // GET,HEAD,PUT,DELETE - supported for files stored in job's session directory and perform usual actions. // GET,HEAD - for directories retrieves list of stored files (consider WebDAV for format). // DELETE - for directories removes whole directory. // PUT - for directory not supported. // POST - not supported. // PATCH - for files modifies part of files (body format need to be defined, all files treated as binary, currently support non-standard PUT with ranges). // PROPFIND - list diectories, stat files. ARexConfigContext* config = ARexConfigContext::GetRutimeConfiguration(inmsg,config_,uname_,endpoint_); if(!config) { return HTTPFault(inmsg,outmsg,500,"User can't be assigned configuration"); } ARexJob job(id,*config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "REST:GET job %s - %s", id, job.Failure()); return HTTPFault(inmsg,outmsg,404,job.Failure().c_str()); } // Make sure path is correct while working with files if(!CanonicalDir(context.subpath, false, false)) return HTTPFault(inmsg,outmsg,404,"Wrong path"); if((context.method == "GET") || (context.method == "HEAD")) { // File or folder FileAccessRef dir(job.OpenDir(context.subpath)); if(dir) { std::string errmsg; if(!ARexConfigContext::CheckOperationAllowed(ARexConfigContext::OperationDataInfo, config, errmsg)) return HTTPFault(inmsg,outmsg,HTTP_ERR_FORBIDDEN,"Operation is not allowed",errmsg.c_str()); XMLNode listXml(""); std::string dirpath = job.GetFilePath(context.subpath); for(;;) { std::string fileName; if(!dir->fa_readdir(fileName)) break; if(fileName == ".") continue; if(fileName == "..") continue; std::string fpath = dirpath+"/"+fileName; struct stat st; if(dir->fa_lstat(fpath.c_str(),st)) { if(S_ISREG(st.st_mode)) { XMLNode itemXml = listXml.NewChild("file"); itemXml = fileName; itemXml.NewAttribute("size") = Arc::tostring(st.st_size); } else if(S_ISDIR(st.st_mode)) { XMLNode itemXml = listXml.NewChild("dir"); itemXml = fileName; }; }; }; char const * const json_arrays[] = { "file", "dir", NULL }; return HTTPResponse(inmsg,outmsg,listXml,json_arrays); }; std::string errmsg; if(!ARexConfigContext::CheckOperationAllowed(ARexConfigContext::OperationDataRead, config, errmsg)) return HTTPFault(inmsg,outmsg,HTTP_ERR_FORBIDDEN,"Operation is not allowed",errmsg.c_str()); FileAccessRef file(job.OpenFile(context.subpath,true,false)); if(file) { // File or similar Arc::MCC_Status r = HTTPResponseFile(inmsg,outmsg,file.get(),"application/octet-stream"); return r; } return HTTPFault(inmsg,outmsg,404,"Not found"); } else if(context.method == "PUT") { // Check for proper payload Arc::MessagePayload* payload = inmsg.Payload(); Arc::PayloadStreamInterface* stream = dynamic_cast(payload); Arc::PayloadRawInterface* buf = dynamic_cast(payload); if((!stream) && (!buf)) { logger_.msg(Arc::ERROR, "REST:PUT job %s: file %s: there is no payload", id, context.subpath); return HTTPFault(inmsg, outmsg, 500, "Missing payload"); }; std::string errmsg; if(!ARexConfigContext::CheckOperationAllowed(ARexConfigContext::OperationDataWrite, config, errmsg)) return HTTPFault(inmsg,outmsg,HTTP_ERR_FORBIDDEN,"Operation is not allowed",errmsg.c_str()); // Prepare access to file FileAccessRef file(job.CreateFile(context.subpath)); if(!file) { // TODO: report something logger_.msg(Arc::ERROR, "%s: put file %s: failed to create file: %s", job.ID(), context.subpath, job.Failure()); return HTTPFault(inmsg, outmsg, 500, "Error creating file"); }; FileChunksRef fc(files_chunks_.Get(job.GetFilePath(context.subpath))); Arc::MCC_Status r; std::string err; bool complete(false); if(stream) { r = PutJobFile(inmsg,outmsg,*file,err,*stream,*fc,complete); } else { r = PutJobFile(inmsg,outmsg,*file,err,*buf,*fc,complete); } if(!r) { logger_.msg(Arc::ERROR, "HTTP:PUT %s: put file %s: %s", job.ID(), context.subpath, err); } else { if(complete || fc->Complete()) job.ReportFileComplete(context.subpath); } return r; } else if(context.method == "DELETE") { std::string errmsg; if(!ARexConfigContext::CheckOperationAllowed(ARexConfigContext::OperationDataWrite, config, errmsg)) return HTTPFault(inmsg,outmsg,HTTP_ERR_FORBIDDEN,"Operation is not allowed",errmsg.c_str()); std::string fpath = job.GetFilePath(context.subpath); if(!fpath.empty()) { if((!FileDelete(fpath,job.UID(),job.GID())) && (!DirDelete(fpath,true,job.UID(),job.GID()))) { return HTTPFault(inmsg,outmsg,500,"Failed to delete"); } } return HTTPDELETEResponse(inmsg,outmsg); } else if(context.method == "PROPFIND") { std::string errmsg; if(!ARexConfigContext::CheckOperationAllowed(ARexConfigContext::OperationDataInfo, config, errmsg)) return HTTPFault(inmsg,outmsg,HTTP_ERR_FORBIDDEN,"Operation is not allowed",errmsg.c_str()); int depth = 10; // infinite with common sense std::string depthStr = inmsg.Attributes()->get("HTTP:depth"); if(depthStr == "0") depth = 0; else if(depthStr == "1") depth = 1; std::string fpath = job.GetFilePath(context.subpath); URL url(inmsg.Attributes()->get("ENDPOINT")); Arc::XMLNode multistatus(""); FileAccessRef fa(Arc::FileAccess::Acquire()); if(fa) ProcessPROPFIND(fa,multistatus,url,fpath,job.UID(),job.GID(),depth); std::string payload; multistatus.GetDoc(payload); return HTTPResponse(inmsg,outmsg,payload,"application/xml"); }; return HTTPFault(inmsg,outmsg,501,"Not Implemented"); } // ------------------------------- PER-JOB CNTROL DIR ---------------------------- Arc::MCC_Status ARexRest::processJobControlDir(Arc::Message& inmsg,Arc::Message& outmsg, ProcessingContext& context,std::string const & id) { // GET - return the content of file in A-REX control directory for requested jobID // HEAD - supported. // PUT, POST, DELETE - not supported. char const * const mimeText = "text/plain"; char const * const mimeXml = "application/xml"; struct resourceDef { char const * const name; char const * const mime; }; resourceDef const allowedSubResources[] = { { "failed", mimeText }, { "local", mimeText }, { "errors", mimeText }, { "description", mimeText }, { "diag", mimeText }, { "comment", mimeText }, { "status", mimeText }, { "acl", mimeText }, { "xml", mimeXml }, { "input", mimeText }, { "output", mimeText }, { "input_status", mimeText }, { "output_status", mimeText }, { "statistics", mimeText }, { NULL, NULL } }; std::string subResource = context.subpath; resourceDef const * allowedSubResource = allowedSubResources; for(; allowedSubResource->name; ++allowedSubResource) { if(subResource == allowedSubResource->name) break; } if(!(allowedSubResource->name)) return HTTPFault(inmsg,outmsg,404,"Diagnostic item not found"); if((context.method == "GET") || (context.method == "HEAD")) { ARexConfigContext* config = ARexConfigContext::GetRutimeConfiguration(inmsg,config_,uname_,endpoint_); if(!config) { return HTTPFault(inmsg,outmsg,500,"User can't be assigned configuration"); } std::string errmsg; if(!ARexConfigContext::CheckOperationAllowed(ARexConfigContext::OperationJobInfo, config, errmsg)) { return HTTPFault(inmsg,outmsg,HTTP_ERR_FORBIDDEN,"Operation is not allowed",errmsg.c_str()); } ARexJob job(id,*config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "REST:GET job %s - %s", id, job.Failure()); return HTTPFault(inmsg,outmsg,404,job.Failure().c_str()); } int file = job.OpenLogFile(subResource); if(file == -1) return HTTPFault(inmsg,outmsg,404,"Not found"); Arc::MCC_Status r = HTTPResponseFile(inmsg,outmsg,file,allowedSubResource->mime); if(file != -1) ::close(file); return r; } logger_.msg(Arc::VERBOSE, "process: method %s is not supported for subpath %s",context.method,context.processed); return HTTPFault(inmsg,outmsg,501,"Not Implemented"); } /* Arc::MCC_Status ARexRest::processJobDelegations(Arc::Message& inmsg,Arc::Message& outmsg, ProcessingContext& context,std::string const & id) { std::string delegationId; if(GetPathToken(context.subpath, delegationId)) { context.processed += delegationId; context.processed += "/"; return processJobDelegation(inmsg,outmsg,context,id,delegationId); } ARexConfigContext* config = ARexConfigContext::GetRutimeConfiguration(inmsg,config_,uname_,endpoint_); if(!config) { return HTTPFault(inmsg,outmsg,500,"User can't be assigned configuration"); } ARexJob job(id,*config,logger_); if(!job) { // There is no such job logger_.msg(Arc::ERROR, "REST:GET job %s - %s", id, job.Failure()); return HTTPFault(inmsg,outmsg,404,job.Failure().c_str()); } // GET - retrieves list of delegations belonging to specified job // HEAD - supported. if((context.method == "GET") || (context.method == "HEAD")) { XMLNode listXml(""); std::list ids = delegation_stores_[config_.DelegationDir()].ListLockedCredIDs(job.ID(),config->GridName()); for(std::list::iterator itId = ids.begin(); itId != ids.end(); ++itId) { listXml.NewChild("delegation").NewChild("id") = *itId; } char const * const json_arrays[] = { "delegation", NULL }; return HTTPResponse(inmsg, outmsg, listXml, json_arrays); } logger_.msg(Arc::VERBOSE, "process: method %s is not supported for subpath %s",context.method,context.processed); return HTTPFault(inmsg,outmsg,501,"Not Implemented"); } Arc::MCC_Status ARexRest::processJobDelegation(Arc::Message& inmsg,Arc::Message& outmsg, ProcessingContext& context,std::string const & jobId,std::string const & delegId) { if(!context.subpath.empty()) return HTTPFault(inmsg,outmsg,404,"Not Found"); // no more sub-resources ARexConfigContext* config = ARexConfigContext::GetRutimeConfiguration(inmsg,config_,uname_,endpoint_); if(!config) { return HTTPFault(inmsg,outmsg,500,"User can't be assigned configuration"); } // GET - returns public part of the stored delegation as application/x-pem-file. // HEAD - supported. if((context.method == "GET") || (context.method == "HEAD")) { std::string credentials; if(!delegation_stores_[config_.DelegationDir()].GetDeleg(delegId, config->GridName(), credentials)) { return HTTPFault(inmsg,outmsg,404,"No delegation found"); } return HTTPResponse(inmsg, outmsg, credentials, "application/x-pem-file"); } logger_.msg(Arc::VERBOSE, "process: method %s is not supported for subpath %s",context.method,context.processed); return HTTPFault(inmsg,outmsg,501,"Not Implemented"); } */ #endif // CPPUNITTEST nordugrid-arc-7.1.1/src/services/a-rex/rest/PaxHeaders/Makefile.in0000644000000000000000000000013115067751356022024 xustar0030 mtime=1759498990.760384422 30 atime=1759499017.821252731 29 ctime=1759499030.51899653 nordugrid-arc-7.1.1/src/services/a-rex/rest/Makefile.in0000644000175000002070000007477215067751356023750 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/rest ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libarexrest_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la am_libarexrest_la_OBJECTS = libarexrest_la-rest.lo libarexrest_la_OBJECTS = $(am_libarexrest_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = libarexrest_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libarexrest_la_CXXFLAGS) $(CXXFLAGS) \ $(libarexrest_la_LDFLAGS) $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = ./$(DEPDIR)/libarexrest_la-rest.Plo am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(libarexrest_la_SOURCES) DIST_SOURCES = $(libarexrest_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir distdir-am am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ DIST_SUBDIRS = test SUBDIRS = $(TEST_DIR) noinst_LTLIBRARIES = libarexrest.la libarexrest_la_SOURCES = rest.cpp rest.h libarexrest_la_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) libarexrest_la_LIBADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la libarexrest_la_LDFLAGS = -no-undefined -avoid-version -module all: all-recursive .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/rest/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/rest/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libarexrest.la: $(libarexrest_la_OBJECTS) $(libarexrest_la_DEPENDENCIES) $(EXTRA_libarexrest_la_DEPENDENCIES) $(AM_V_CXXLD)$(libarexrest_la_LINK) $(libarexrest_la_OBJECTS) $(libarexrest_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libarexrest_la-rest.Plo@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< libarexrest_la-rest.lo: rest.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarexrest_la_CXXFLAGS) $(CXXFLAGS) -MT libarexrest_la-rest.lo -MD -MP -MF $(DEPDIR)/libarexrest_la-rest.Tpo -c -o libarexrest_la-rest.lo `test -f 'rest.cpp' || echo '$(srcdir)/'`rest.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libarexrest_la-rest.Tpo $(DEPDIR)/libarexrest_la-rest.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='rest.cpp' object='libarexrest_la-rest.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libarexrest_la_CXXFLAGS) $(CXXFLAGS) -c -o libarexrest_la-rest.lo `test -f 'rest.cpp' || echo '$(srcdir)/'`rest.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-recursive -rm -f ./$(DEPDIR)/libarexrest_la-rest.Plo -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f ./$(DEPDIR)/libarexrest_la-rest.Plo -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \ am--depfiles check check-am clean clean-generic clean-libtool \ clean-noinstLTLIBRARIES cscopelist-am ctags ctags-am distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-am uninstall \ uninstall-am .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/rest/PaxHeaders/test0000644000000000000000000000013015067751426020656 xustar0030 mtime=1759499030.548470492 28 atime=1759499034.7655102 30 ctime=1759499030.548470492 nordugrid-arc-7.1.1/src/services/a-rex/rest/test/0000755000175000002070000000000015067751426022637 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/rest/test/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327022771 xustar0030 mtime=1759498967.772230924 30 atime=1759498967.873493772 30 ctime=1759499030.547510138 nordugrid-arc-7.1.1/src/services/a-rex/rest/test/Makefile.am0000644000175000002070000000072115067751327024673 0ustar00mockbuildmock00000000000000TESTS = RESTTest check_PROGRAMS = $(TESTS) TESTS_ENVIRONMENT = srcdir=$(srcdir) RESTTest_SOURCES = $(top_srcdir)/src/Test.cpp RESTTest.cpp RESTTest_CXXFLAGS = -I$(top_srcdir)/include \ -DCPPUNITTEST=1 $(CPPUNIT_CFLAGS) $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) RESTTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) nordugrid-arc-7.1.1/src/services/a-rex/rest/test/PaxHeaders/Makefile.in0000644000000000000000000000013115067751356023003 xustar0030 mtime=1759498990.810648596 29 atime=1759499017.84025302 30 ctime=1759499030.548470492 nordugrid-arc-7.1.1/src/services/a-rex/rest/test/Makefile.in0000644000175000002070000010013215067751356024703 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ TESTS = RESTTest$(EXEEXT) check_PROGRAMS = $(am__EXEEXT_1) subdir = src/services/a-rex/rest/test ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__EXEEXT_1 = RESTTest$(EXEEXT) am_RESTTest_OBJECTS = RESTTest-Test.$(OBJEXT) \ RESTTest-RESTTest.$(OBJEXT) RESTTest_OBJECTS = $(am_RESTTest_OBJECTS) am__DEPENDENCIES_1 = RESTTest_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = RESTTest_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(RESTTest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = ./$(DEPDIR)/RESTTest-RESTTest.Po \ ./$(DEPDIR)/RESTTest-Test.Po am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = SOURCES = $(RESTTest_SOURCES) DIST_SOURCES = $(RESTTest_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__tty_colors_dummy = \ mgn= red= grn= lgn= blu= brg= std=; \ am__color_tests=no am__tty_colors = { \ $(am__tty_colors_dummy); \ if test "X$(AM_COLOR_TESTS)" = Xno; then \ am__color_tests=no; \ elif test "X$(AM_COLOR_TESTS)" = Xalways; then \ am__color_tests=yes; \ elif test "X$$TERM" != Xdumb && { test -t 1; } 2>/dev/null; then \ am__color_tests=yes; \ fi; \ if test $$am__color_tests = yes; then \ red=''; \ grn=''; \ lgn=''; \ blu=''; \ mgn=''; \ brg=''; \ std=''; \ fi; \ } am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ TESTS_ENVIRONMENT = srcdir=$(srcdir) RESTTest_SOURCES = $(top_srcdir)/src/Test.cpp RESTTest.cpp RESTTest_CXXFLAGS = -I$(top_srcdir)/include \ -DCPPUNITTEST=1 $(CPPUNIT_CFLAGS) $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(AM_CXXFLAGS) RESTTest_LDADD = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(CPPUNIT_LIBS) $(GLIBMM_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/rest/test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/rest/test/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-checkPROGRAMS: @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list RESTTest$(EXEEXT): $(RESTTest_OBJECTS) $(RESTTest_DEPENDENCIES) $(EXTRA_RESTTest_DEPENDENCIES) @rm -f RESTTest$(EXEEXT) $(AM_V_CXXLD)$(RESTTest_LINK) $(RESTTest_OBJECTS) $(RESTTest_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/RESTTest-RESTTest.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/RESTTest-Test.Po@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< RESTTest-Test.o: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RESTTest_CXXFLAGS) $(CXXFLAGS) -MT RESTTest-Test.o -MD -MP -MF $(DEPDIR)/RESTTest-Test.Tpo -c -o RESTTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/RESTTest-Test.Tpo $(DEPDIR)/RESTTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$(top_srcdir)/src/Test.cpp' object='RESTTest-Test.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RESTTest_CXXFLAGS) $(CXXFLAGS) -c -o RESTTest-Test.o `test -f '$(top_srcdir)/src/Test.cpp' || echo '$(srcdir)/'`$(top_srcdir)/src/Test.cpp RESTTest-Test.obj: $(top_srcdir)/src/Test.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RESTTest_CXXFLAGS) $(CXXFLAGS) -MT RESTTest-Test.obj -MD -MP -MF $(DEPDIR)/RESTTest-Test.Tpo -c -o RESTTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/RESTTest-Test.Tpo $(DEPDIR)/RESTTest-Test.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$(top_srcdir)/src/Test.cpp' object='RESTTest-Test.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RESTTest_CXXFLAGS) $(CXXFLAGS) -c -o RESTTest-Test.obj `if test -f '$(top_srcdir)/src/Test.cpp'; then $(CYGPATH_W) '$(top_srcdir)/src/Test.cpp'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/src/Test.cpp'; fi` RESTTest-RESTTest.o: RESTTest.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RESTTest_CXXFLAGS) $(CXXFLAGS) -MT RESTTest-RESTTest.o -MD -MP -MF $(DEPDIR)/RESTTest-RESTTest.Tpo -c -o RESTTest-RESTTest.o `test -f 'RESTTest.cpp' || echo '$(srcdir)/'`RESTTest.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/RESTTest-RESTTest.Tpo $(DEPDIR)/RESTTest-RESTTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='RESTTest.cpp' object='RESTTest-RESTTest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RESTTest_CXXFLAGS) $(CXXFLAGS) -c -o RESTTest-RESTTest.o `test -f 'RESTTest.cpp' || echo '$(srcdir)/'`RESTTest.cpp RESTTest-RESTTest.obj: RESTTest.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RESTTest_CXXFLAGS) $(CXXFLAGS) -MT RESTTest-RESTTest.obj -MD -MP -MF $(DEPDIR)/RESTTest-RESTTest.Tpo -c -o RESTTest-RESTTest.obj `if test -f 'RESTTest.cpp'; then $(CYGPATH_W) 'RESTTest.cpp'; else $(CYGPATH_W) '$(srcdir)/RESTTest.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/RESTTest-RESTTest.Tpo $(DEPDIR)/RESTTest-RESTTest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='RESTTest.cpp' object='RESTTest-RESTTest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(RESTTest_CXXFLAGS) $(CXXFLAGS) -c -o RESTTest-RESTTest.obj `if test -f 'RESTTest.cpp'; then $(CYGPATH_W) 'RESTTest.cpp'; else $(CYGPATH_W) '$(srcdir)/RESTTest.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags check-TESTS: $(TESTS) @failed=0; all=0; xfail=0; xpass=0; skip=0; \ srcdir=$(srcdir); export srcdir; \ list=' $(TESTS) '; \ $(am__tty_colors); \ if test -n "$$list"; then \ for tst in $$list; do \ if test -f ./$$tst; then dir=./; \ elif test -f $$tst; then dir=; \ else dir="$(srcdir)/"; fi; \ if $(TESTS_ENVIRONMENT) $${dir}$$tst $(AM_TESTS_FD_REDIRECT); then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xpass=`expr $$xpass + 1`; \ failed=`expr $$failed + 1`; \ col=$$red; res=XPASS; \ ;; \ *) \ col=$$grn; res=PASS; \ ;; \ esac; \ elif test $$? -ne 77; then \ all=`expr $$all + 1`; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$tst[\ \ ]*) \ xfail=`expr $$xfail + 1`; \ col=$$lgn; res=XFAIL; \ ;; \ *) \ failed=`expr $$failed + 1`; \ col=$$red; res=FAIL; \ ;; \ esac; \ else \ skip=`expr $$skip + 1`; \ col=$$blu; res=SKIP; \ fi; \ echo "$${col}$$res$${std}: $$tst"; \ done; \ if test "$$all" -eq 1; then \ tests="test"; \ All=""; \ else \ tests="tests"; \ All="All "; \ fi; \ if test "$$failed" -eq 0; then \ if test "$$xfail" -eq 0; then \ banner="$$All$$all $$tests passed"; \ else \ if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ fi; \ else \ if test "$$xpass" -eq 0; then \ banner="$$failed of $$all $$tests failed"; \ else \ if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ fi; \ fi; \ dashes="$$banner"; \ skipped=""; \ if test "$$skip" -ne 0; then \ if test "$$skip" -eq 1; then \ skipped="($$skip test was not run)"; \ else \ skipped="($$skip tests were not run)"; \ fi; \ test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$skipped"; \ fi; \ report=""; \ if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ report="Please report to $(PACKAGE_BUGREPORT)"; \ test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ dashes="$$report"; \ fi; \ dashes=`echo "$$dashes" | sed s/./=/g`; \ if test "$$failed" -eq 0; then \ col="$$grn"; \ else \ col="$$red"; \ fi; \ echo "$${col}$$dashes$${std}"; \ echo "$${col}$$banner$${std}"; \ test -z "$$skipped" || echo "$${col}$$skipped$${std}"; \ test -z "$$report" || echo "$${col}$$report$${std}"; \ echo "$${col}$$dashes$${std}"; \ test "$$failed" -eq 0; \ else :; fi distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-checkPROGRAMS clean-generic clean-libtool \ mostlyclean-am distclean: distclean-am -rm -f ./$(DEPDIR)/RESTTest-RESTTest.Po -rm -f ./$(DEPDIR)/RESTTest-Test.Po -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/RESTTest-RESTTest.Po -rm -f ./$(DEPDIR)/RESTTest-Test.Po -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-TESTS \ check-am clean clean-checkPROGRAMS clean-generic clean-libtool \ cscopelist-am ctags ctags-am distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/rest/test/PaxHeaders/RESTTest.cpp0000644000000000000000000000013215067751327023056 xustar0030 mtime=1759498967.772752787 30 atime=1759498967.873493772 30 ctime=1759499030.549524116 nordugrid-arc-7.1.1/src/services/a-rex/rest/test/RESTTest.cpp0000644000175000002070000000263515067751327024766 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include "../rest.cpp" class RESTTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(RESTTest); CPPUNIT_TEST(TestJsonParse); CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown(); void TestJsonParse(); }; void RESTTest::setUp() { } void RESTTest::tearDown() { } void RESTTest::TestJsonParse() { char const * jsonStr = R"({"job":[ {"status-code":"201","reason":"Created","id":"id0","state":"ACCEPTING"}, {"status-code":"201","reason":"Created","id":"id1","state":"ACCEPTING"}, {"status-code":"201","reason":"Created","id":"id2","state":"ACCEPTING"} ]})"; Arc::XMLNode xml(""); char const * end = ParseFromJson(xml, jsonStr); CPPUNIT_ASSERT_EQUAL((jsonStr+strlen(jsonStr)), end); XMLNode jobXml = xml["job"]; for(int n = 0; n < 3; ++n) { CPPUNIT_ASSERT_EQUAL(std::string("201"), static_cast(jobXml[n]["status-code"])); CPPUNIT_ASSERT_EQUAL(std::string("Created"), static_cast(jobXml[n]["reason"])); CPPUNIT_ASSERT_EQUAL(std::string("ACCEPTING"), static_cast(jobXml[n]["state"])); CPPUNIT_ASSERT_EQUAL(std::string("id")+Arc::tostring(n), static_cast(jobXml[n]["id"])); } } CPPUNIT_TEST_SUITE_REGISTRATION(RESTTest); nordugrid-arc-7.1.1/src/services/a-rex/rest/PaxHeaders/rest.h0000644000000000000000000000013215067751327021104 xustar0030 mtime=1759498967.772230924 30 atime=1759498967.873493772 30 ctime=1759499030.521039825 nordugrid-arc-7.1.1/src/services/a-rex/rest/rest.h0000644000175000002070000000610015067751327023003 0ustar00mockbuildmock00000000000000#ifndef __ARC_AREX_REST_H__ #define __ARC_AREX_REST_H__ #include #include #include #include #include "../grid-manager/conf/GMConfig.h" #define HTTP_ERR_NOT_SUPPORTED (501) #define HTTP_ERR_FORBIDDEN (403) namespace ARex { class ARexRest { public: ARexRest(Arc::Config *cfg, Arc::PluginArgument *parg, GMConfig& config, ARex::DelegationStores& delegation_stores, unsigned int& all_jobs_count); virtual ~ARexRest(void); Arc::MCC_Status process(Arc::Message& inmsg,Arc::Message& outmsg); private: class ProcessingContext { public: std::string subpath; std::string method; std::string processed; std::multimap query; std::string operator[](char const * key) const; enum Version { Version_undefined = 0, Version_1_0 = 1, Version_1_1 = 2 }; Version version; ProcessingContext():version(Version_undefined) {}; }; Arc::Logger logger_; std::string uname_; std::string endpoint_; FileChunksList files_chunks_; ARex::GMConfig& config_; ARex::DelegationStores& delegation_stores_; unsigned int& all_jobs_count_; Arc::MCC_Status processVersions(Arc::Message& inmsg,Arc::Message& outmsg,ProcessingContext& context); Arc::MCC_Status processGeneral(Arc::Message& inmsg,Arc::Message& outmsg,ProcessingContext& context); Arc::MCC_Status processInfo(Arc::Message& inmsg,Arc::Message& outmsg,ProcessingContext& context); Arc::MCC_Status processJobs(Arc::Message& inmsg,Arc::Message& outmsg,ProcessingContext& context); Arc::MCC_Status processDelegations(Arc::Message& inmsg,Arc::Message& outmsg,ProcessingContext& context); Arc::MCC_Status processDelegation(Arc::Message& inmsg,Arc::Message& outmsg,ProcessingContext& context, std::string const & id); Arc::MCC_Status processJob(Arc::Message& inmsg,Arc::Message& outmsg,ProcessingContext& context, std::string const & id); Arc::MCC_Status processJobSessionDir(Arc::Message& inmsg,Arc::Message& outmsg,ProcessingContext& context, std::string const & id); Arc::MCC_Status processJobControlDir(Arc::Message& inmsg,Arc::Message& outmsg,ProcessingContext& context, std::string const & id); Arc::MCC_Status processJobSub(Arc::Message& inmsg,Arc::Message& outmsg,ProcessingContext& context, std::string const & id, std::string const & subResource); Arc::MCC_Status processJobSession(Arc::Message& inmsg,Arc::Message& outmsg, ProcessingContext& context,std::string const & id); //Arc::MCC_Status processJobDelegations(Arc::Message& inmsg,Arc::Message& outmsg, // ProcessingContext& context,std::string const & id); //Arc::MCC_Status processJobDelegation(Arc::Message& inmsg,Arc::Message& outmsg, // ProcessingContext& context,std::string const & jobId,std::string const & delegId); }; } // namespace ARex #endif nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/SQLhelpers.h0000644000000000000000000000013115067751327021173 xustar0030 mtime=1759498967.750197335 29 atime=1759498967.86149359 30 ctime=1759499029.399230346 nordugrid-arc-7.1.1/src/services/a-rex/SQLhelpers.h0000644000175000002070000000264715067751327023107 0ustar00mockbuildmock00000000000000#ifndef __AREX_SQL_COMMON_HELPERS_H__ #define __AREX_SQL_COMMON_HELPERS_H__ #include #include #include namespace ARex { static const std::string sql_special_chars("'#\r\n\b\0",6); static const char sql_escape_char('%'); static const Arc::escape_type sql_escape_type(Arc::escape_hex); // Returns SQL-escaped string representation of argumnet inline static std::string sql_escape(const std::string& str) { return Arc::escape_chars(str, sql_special_chars, sql_escape_char, false, sql_escape_type); } inline static std::string sql_escape(int num) { return Arc::tostring(num); } inline static std::string sql_escape(const Arc::Time& val) { if(val.GetTime() == -1) return ""; return Arc::escape_chars((std::string)val, sql_special_chars, sql_escape_char, false, sql_escape_type); } // Unescape SQLite returned values inline static std::string sql_unescape(const std::string& str) { return Arc::unescape_chars(str, sql_escape_char,sql_escape_type); } inline static void sql_unescape(const std::string& str, int& val) { (void)Arc::stringto(Arc::unescape_chars(str, sql_escape_char,sql_escape_type), val); } inline static void sql_unescape(const std::string& str, Arc::Time& val) { if(str.empty()) { val = Arc::Time(); return; } val = Arc::Time(Arc::unescape_chars(str, sql_escape_char,sql_escape_type)); } } // namespace ARex #endif // __AREX_SQL_COMMON_HELPERS_H__ nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/job.cpp0000644000000000000000000000013115067751327020256 xustar0030 mtime=1759498967.764492116 30 atime=1759498967.869493711 29 ctime=1759499029.32974759 nordugrid-arc-7.1.1/src/services/a-rex/job.cpp0000644000175000002070000014607415067751327022175 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include // NOTE: On Solaris errno is not working properly if cerrno is included first #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "grid-manager/conf/GMConfig.h" #include "grid-manager/jobs/GMJob.h" #include "grid-manager/jobs/ContinuationPlugins.h" #include "grid-manager/jobs/JobDescriptionHandler.h" #include "grid-manager/jobs/CommFIFO.h" #include "grid-manager/jobs/JobsList.h" #include "grid-manager/files/ControlFileHandling.h" #include "delegation/DelegationStores.h" #include "delegation/DelegationStore.h" #include "job.h" using namespace ARex; Arc::Logger ARexGMConfig::logger(Arc::Logger::getRootLogger(), "ARexGMConfig"); static std::string rand_uid64(void) { static unsigned int cnt; struct timeval t; gettimeofday(&t,NULL); uint64_t id = (((uint64_t)((cnt++) & 0xffff)) << 48) | (((uint64_t)(t.tv_sec & 0xffff)) << 32) | (((uint64_t)(t.tv_usec & 0xffff)) << 16) | (((uint64_t)(rand() & 0xffff)) << 0); return Arc::inttostr(id,16,16); } static std::string GetPath(std::string url){ std::string::size_type ds, ps; ds=url.find("//"); if (ds==std::string::npos) { ps=url.find("/"); } else { ps=url.find("/", ds+2); } if (ps==std::string::npos) return ""; return url.substr(ps); } ARexConfigContext* ARexConfigContext::GetRutimeConfiguration(Arc::Message& inmsg, GMConfig& gmconfig, std::string const & default_uname, std::string const & default_endpoint) { ARexConfigContext* config = NULL; Arc::MessageContextElement* mcontext = (*inmsg.Context())["arex.gmconfig"]; if(mcontext) { try { config = dynamic_cast(mcontext); logger.msg(Arc::DEBUG,"Using cached local account '%s'", config->User().Name()); } catch(std::exception& e) { }; }; if(config) return config; // TODO: do configuration detection // TODO: do mapping to local unix name std::string uname; uname=inmsg.Attributes()->get("SEC:LOCALID"); if(uname.empty()) uname=default_uname; if(uname.empty()) { if(getuid() == 0) { logger.msg(Arc::ERROR, "Will not map to 'root' account by default"); return NULL; }; struct passwd pwbuf; char buf[4096]; struct passwd* pw; if(getpwuid_r(getuid(),&pwbuf,buf,sizeof(buf),&pw) == 0) { if(pw && pw->pw_name) { uname = pw->pw_name; }; }; }; if(uname.empty()) { logger.msg(Arc::ERROR, "No local account name specified"); return NULL; }; logger.msg(Arc::DEBUG,"Using local account '%s'",uname); std::string grid_name = inmsg.Attributes()->get("TLS:IDENTITYDN"); if(grid_name.empty()) { // Try tokens if TLS has no information about user identity logger.msg(Arc::INFO, "TLS provides no identity, going for OTokens"); grid_name = inmsg.Attributes()->get("OTOKENS:IDENTITYDN"); /* Below is an example on how obtained token can be exchanged. Arc::SecAttr* sattr = inmsg.Auth()->get("OTOKENS"); if(!sattr) sattr = inmsg.AuthContext()->get("OTOKENS"); if(sattr) { std::string token = sattr->get(""); if(!token.empty()) { Arc::OpenIDMetadata tokenMetadata; Arc::OpenIDMetadataFetcher metaFetcher(sattr->get("iss").c_str()); if(metaFetcher.Fetch(tokenMetadata)) { char const * tokenEndpointUrl = tokenMetadata.TokenEndpoint(); if(tokenEndpointUrl) { Arc::OpenIDTokenFetcher tokenFetcher(tokenEndpointUrl, "c85e84e8-c9ea-4ecc-8123-070df2c10e0e", "dRnakcoaT-9YA6T1LzeLAqeEu7jLBxeTWFyQMbJ6BWZonjEcE060-dn8EWAfpZmPq3x7oTjUnu6mamYylBaNhw"); std::list scopes; scopes.push_back("storage.read:/"); scopes.push_back("storage.create:/"); std::list audiences; audiences.push_back("se1.example"); audiences.push_back("se2.example"); Arc::OpenIDTokenFetcher::TokenList tokens; if(tokenFetcher.Fetch("urn:ietf:params:oauth:grant-type:token-exchange", token, scopes, audiences, tokens)) { for(auto const & token : tokens) { logger_.msg(Arc::ERROR, "Token response: %s : %s", token.first, token.second); }; } else logger_.msg(Arc::ERROR, "Failed to fetch token"); } else logger_.msg(Arc::ERROR, "Token metadata contains no token endpoint");; } else logger_.msg(Arc::ERROR, "Failed to fetch token metadata"); } else logger_.msg(Arc::ERROR, "There is no token in sec attr"); } else logger_.msg(Arc::ERROR, "There is no otoken sec attr"); */ }; std::string endpoint = default_endpoint; if(endpoint.empty()) { std::string http_endpoint = inmsg.Attributes()->get("HTTP:ENDPOINT"); std::string tcp_endpoint = inmsg.Attributes()->get("TCP:ENDPOINT"); bool https_proto = ((inmsg.Auth() && (inmsg.Auth()->get("TLS"))) || (inmsg.AuthContext() && (inmsg.AuthContext()->get("TLS")))); endpoint = tcp_endpoint; if(https_proto) { endpoint="https"+endpoint; } else { endpoint="http"+endpoint; }; endpoint+=GetPath(http_endpoint); }; config=new ARexConfigContext(gmconfig,uname,grid_name,endpoint); if(config) { if(*config) { inmsg.Context()->Add("arex.gmconfig",config); } else { delete config; config=NULL; logger.msg(Arc::ERROR, "Failed to acquire A-REX's configuration"); }; }; return config; } static bool match_lists(const std::list& list1, const std::list& list2, std::string& matched) { for(std::list::const_iterator l1 = list1.begin(); l1 != list1.end(); ++l1) { for(std::list::const_iterator l2 = list2.begin(); l2 != list2.end(); ++l2) { if((*l1) == (*l2)) { matched = *l1; return true; }; }; }; return false; } static bool match_lists(const std::list >& list1, const std::list& list2, std::string& matched) { for(std::list >::const_iterator l1 = list1.begin(); l1 != list1.end(); ++l1) { for(std::list::const_iterator l2 = list2.begin(); l2 != list2.end(); ++l2) { if((l1->second) == (*l2)) { matched = l1->second; return l1->first; }; }; }; return false; } static bool match_groups(std::list const & groups, ARexGMConfig& config) { std::string matched_group; if(!groups.empty()) { for(std::list::iterator a = config.beginAuth();a!=config.endAuth();++a) { if(*a) { // This security attribute collected information about user's authorization groups Arc::SecAttr* sattr = (*a)->get("ARCLEGACY"); if(sattr) { if(match_lists(groups, sattr->getAll("GROUP"), matched_group)) { return true; }; }; }; }; }; return false; } static bool match_groups(std::list > const & groups, ARexGMConfig& config) { std::string matched_group; if(!groups.empty()) { for(std::list::iterator a = config.beginAuth();a!=config.endAuth();++a) { if(*a) { // This security attribute collected information about user's authorization groups Arc::SecAttr* sattr = (*a)->get("ARCLEGACY"); if(sattr) { if(match_lists(groups, sattr->getAll("GROUP"), matched_group)) { return true; }; }; }; }; }; return false; } ARexGMConfig::ARexGMConfig(const GMConfig& config,const std::string& uname,const std::string& grid_name,const std::string& service_endpoint): config_(config),user_(uname),readonly_(false),grid_name_(grid_name),service_endpoint_(service_endpoint) { //if(!InitEnvironment(configfile)) return; // const char* uname = user_s.get_uname(); //if((bool)job_map) uname=job_map.unix_name(); if(!user_) { logger.msg(Arc::WARNING, "Cannot handle local user %s", uname); return; } // Do substitutions on session dirs session_roots_ = config_.SessionRoots(); for (std::vector::iterator session = session_roots_.begin(); session != session_roots_.end(); ++session) { config_.Substitute(*session, user_); } session_roots_non_draining_ = config_.SessionRootsNonDraining(); for (std::vector::iterator session = session_roots_non_draining_.begin(); session != session_roots_non_draining_.end(); ++session) { config_.Substitute(*session, user_); } if(!config_.AREXEndpoint().empty()) service_endpoint_ = config_.AREXEndpoint(); } static ARexJobFailure setfail(JobReqResult res) { switch(res.result_type) { case JobReqSuccess: return ARexJobNoError; case JobReqInternalFailure: return ARexJobInternalError; case JobReqSyntaxFailure: return ARexJobDescriptionSyntaxError; case JobReqUnsupportedFailure: return ARexJobDescriptionUnsupportedError; case JobReqMissingFailure: return ARexJobDescriptionMissingError; case JobReqLogicalFailure: return ARexJobDescriptionLogicalError; }; return ARexJobInternalError; } bool ARexJob::is_allowed(bool fast) { allowed_to_see_=false; allowed_to_maintain_=false; // Checking user's grid name against owner if(config_.GridName() == job_.DN) { allowed_to_see_=true; allowed_to_maintain_=true; return true; }; if(fast) return true; // Do fine-grained authorization requested by job's owner if(config_.beginAuth() == config_.endAuth()) return true; std::string acl; if(!job_acl_read_file(id_,config_.GmConfig(),acl)) return true; // safe to ignore if(acl.empty()) return true; // No policy defiled - only owner allowed // Identify and parse policy ArcSec::EvaluatorLoader eval_loader; Arc::AutoPointer policy(eval_loader.getPolicy(ArcSec::Source(acl))); if(!policy) { logger_.msg(Arc::VERBOSE, "%s: Failed to parse user policy", id_); return true; }; Arc::AutoPointer eval(eval_loader.getEvaluator(policy.Ptr())); if(!eval) { logger_.msg(Arc::VERBOSE, "%s: Failed to load evaluator for user policy ", id_); return true; }; std::string policyname = policy->getName(); if((policyname.length() > 7) && (policyname.substr(policyname.length()-7) == ".policy")) { policyname.resize(policyname.length()-7); }; if(policyname == "arc") { // Creating request - directly with XML // Creating top of request document Arc::NS ns; ns["ra"]="http://www.nordugrid.org/schemas/request-arc"; Arc::XMLNode request(ns,"ra:Request"); // Collect all security attributes for(std::list::iterator a = config_.beginAuth();a!=config_.endAuth();++a) { if(*a) (*a)->Export(Arc::SecAttr::ARCAuth,request); }; // Leave only client identities for(Arc::XMLNode item = request["RequestItem"];(bool)item;++item) { for(Arc::XMLNode a = item["Action"];(bool)a;a=item["Action"]) a.Destroy(); for(Arc::XMLNode r = item["Resource"];(bool)r;r=item["Resource"]) r.Destroy(); }; // Fix namespace request.Namespaces(ns); // Create A-Rex specific action // TODO: make helper classes for such operations Arc::XMLNode item = request["ra:RequestItem"]; if(!item) item=request.NewChild("ra:RequestItem"); // Possible operations are Modify and Read Arc::XMLNode action; action=item.NewChild("ra:Action"); action=JOB_POLICY_OPERATION_READ; action.NewAttribute("Type")="string"; action.NewAttribute("AttributeId")=JOB_POLICY_OPERATION_URN; action=item.NewChild("ra:Action"); action=JOB_POLICY_OPERATION_MODIFY; action.NewAttribute("Type")="string"; action.NewAttribute("AttributeId")=JOB_POLICY_OPERATION_URN; // Evaluating policy ArcSec::Response *resp = eval->evaluate(request,policy.Ptr()); // Analyzing response in order to understand which operations are allowed if(!resp) return true; // Not authorized // Following should be somehow made easier ArcSec::ResponseList& rlist = resp->getResponseItems(); for(int n = 0; nres != ArcSec::DECISION_PERMIT) continue; if(!(ritem->reqtp)) continue; for(ArcSec::Action::iterator a = ritem->reqtp->act.begin();a!=ritem->reqtp->act.end();++a) { ArcSec::RequestAttribute* attr = *a; if(!attr) continue; ArcSec::AttributeValue* value = attr->getAttributeValue(); if(!value) continue; std::string action = value->encode(); if(action == "Read") allowed_to_see_=true; if(action == "Modify") allowed_to_maintain_=true; }; }; } else if(policyname == "gacl") { // Creating request - directly with XML Arc::NS ns; Arc::XMLNode request(ns,"gacl"); // Collect all security attributes for(std::list::iterator a = config_.beginAuth();a!=config_.endAuth();++a) { if(*a) (*a)->Export(Arc::SecAttr::GACL,request); }; // Leave only client identities int entries = 0; for(Arc::XMLNode entry = request["entry"];(bool)entry;++entry) { for(Arc::XMLNode a = entry["allow"];(bool)a;a=entry["allow"]) a.Destroy(); for(Arc::XMLNode a = entry["deny"];(bool)a;a=entry["deny"]) a.Destroy(); ++entries; }; if(!entries) request.NewChild("entry"); // Evaluate every action separately for(Arc::XMLNode entry = request["entry"];(bool)entry;++entry) { entry.NewChild("allow").NewChild("read"); }; ArcSec::Response *resp; resp=eval->evaluate(request,policy.Ptr()); if(resp) { ArcSec::ResponseList& rlist = resp->getResponseItems(); for(int n = 0; nres != ArcSec::DECISION_PERMIT) continue; allowed_to_see_=true; break; }; }; for(Arc::XMLNode entry = request["entry"];(bool)entry;++entry) { entry["allow"].Destroy(); entry.NewChild("allow").NewChild("write"); }; resp=eval->evaluate(request,policy.Ptr()); if(resp) { ArcSec::ResponseList& rlist = resp->getResponseItems(); for(int n = 0; nres != ArcSec::DECISION_PERMIT) continue; allowed_to_maintain_=true; break; }; }; // TODO: , } else { logger_.msg(Arc::VERBOSE, "%s: Unknown user policy '%s'", id_, policyname); }; return true; } ARexJob::ARexJob(const std::string& id,ARexGMConfig& config,Arc::Logger& logger,bool fast_auth_check):id_(id),logger_(logger),config_(config),uid_(0),gid_(0) { if(id_.empty()) return; if(!config_) { id_.clear(); return; }; // Reading essential information about job if(!job_local_read_file(id_,config_.GmConfig(),job_)) { id_.clear(); return; }; // Checking if user is allowed to do anything with that job if(!is_allowed(fast_auth_check)) { id_.clear(); return; }; if(!(allowed_to_see_ || allowed_to_maintain_)) { id_.clear(); return; }; // Checking for presence of session dir and identifying local user id. struct stat st; if(job_.sessiondir.empty()) { id_.clear(); return; }; if(stat(job_.sessiondir.c_str(),&st) != 0) { id_.clear(); return; }; uid_ = st.st_uid; gid_ = st.st_gid; } ARexJob::ARexJob(Arc::XMLNode xmljobdesc,ARexGMConfig& config,const std::string& delegid,const std::string& queue,const std::string& clientid,Arc::Logger& logger,JobIDGenerator& idgenerator):id_(""),logger_(logger),config_(config) { if(!config_) return; uid_ = config_.User().get_uid(); gid_ = config_.User().get_gid(); std::string job_desc_str; // Make full XML doc out of subtree { Arc::XMLNode doc; xmljobdesc.New(doc); doc.GetDoc(job_desc_str); }; std::vector id; int max_jobs = 1; int min_jobs = 1; make_new_job(config_,logger_,min_jobs,max_jobs,job_desc_str,delegid,queue,clientid,idgenerator,id,job_,failure_type_,failure_); if(!id.empty()) id_ = id[0]; } ARexJob::ARexJob(std::string const& job_desc_str,ARexGMConfig& config,const std::string& delegid,const std::string& queue,const std::string& clientid, Arc::Logger& logger,JobIDGenerator& idgenerator):id_(""),logger_(logger),config_(config) { if(!config_) return; uid_ = config_.User().get_uid(); gid_ = config_.User().get_gid(); std::vector id; int max_jobs = 1; int min_jobs = 1; make_new_job(config_,logger_,min_jobs,max_jobs,job_desc_str,delegid,queue,clientid,idgenerator,id,job_,failure_type_,failure_); if(!id.empty()) id_ = id[0]; } bool ARexJob::Generate(Arc::XMLNode xmljobdesc,int& min_jobs,int& max_jobs,ARexGMConfig& config,const std::string& delegid,const std::string& queue,const std::string& clientid,Arc::Logger& logger,JobIDGenerator& idgenerator,std::vector& ids,std::string& failure) { std::string job_desc_str; // Make full XML doc out of subtree { Arc::XMLNode doc; xmljobdesc.New(doc); doc.GetDoc(job_desc_str); }; JobLocalDescription job_; ARexJobFailure failure_type_; make_new_job(config,logger,min_jobs,max_jobs,job_desc_str,delegid,queue,clientid,idgenerator,ids,job_,failure_type_,failure); return !ids.empty(); } bool ARexJob::Generate(std::string const& job_desc_str,int min_jobs,int max_jobs,ARexGMConfig& config,const std::string& delegid,const std::string& queue,const std::string& clientid,Arc::Logger& logger,JobIDGenerator& idgenerator,std::vector& ids,std::string& failure) { JobLocalDescription job_; ARexJobFailure failure_type_; make_new_job(config,logger,min_jobs,max_jobs,job_desc_str,delegid,queue,clientid,idgenerator,ids,job_,failure_type_,failure); return !ids.empty(); } void ARexJob::make_new_job(ARexGMConfig& config_, Arc::Logger& logger_, int& min_jobs, int& max_jobs, std::string const& job_desc_str, const std::string& delegid, const std::string& queue, const std::string& clientid, JobIDGenerator& idgenerator, std::vector& ids, JobLocalDescription& job_, ARexJobFailure& failure_type_, std::string& failure_) { ids.clear(); if(!config_) return; uid_t uid_ = config_.User().get_uid(); gid_t gid_ = config_.User().get_gid(); // Common part - do it once for multiple jobs // Configuration related part if((max_jobs < 1) || (min_jobs < 1) || (min_jobs > max_jobs)) { max_jobs = min_jobs-1; failure_="Job instance numbers are wrong"; failure_type_=ARexJobInternalError; return; } if(!config_.GmConfig().AllowNew()) { std::list const & groups = config_.GmConfig().AllowSubmit(); if(!match_groups(groups, config_)) { failure_="New job submission is not allowed"; failure_type_=ARexJobConfigurationError; return; }; }; if((config_.GmConfig().MaxJobDescSize() > 0) && (job_desc_str.size() > config_.GmConfig().MaxJobDescSize())) { failure_="Job description is too big"; failure_type_=ARexJobConfigurationError; return; } DelegationStores* delegs = config_.GmConfig().GetDelegations(); if(!delegs) { failure_="Failed to find delegation store"; failure_type_=ARexJobInternalError; return; } DelegationStore& deleg = delegs->operator[](config_.GmConfig().DelegationDir()); // Choose session directory std::string sessiondir; if (!ChooseSessionDir(config_,logger_,sessiondir)) { failure_="Failed to find valid session directory"; failure_type_=ARexJobInternalError; return; }; // Analyze job description (checking, substituting, etc) JobDescriptionHandler job_desc_handler(config_.GmConfig()); Arc::JobDescription desc; JobReqResult parse_result = job_desc_handler.parse_job_req_from_mem(job_,desc,job_desc_str,true); if((failure_type_=setfail(parse_result)) != ARexJobNoError) { failure_ = parse_result.failure; if(failure_.empty()) { failure_="Failed to parse job description"; failure_type_=ARexJobInternalError; }; return; }; std::string acl(parse_result.acl); if((!job_.action.empty()) && (job_.action != "request")) { failure_="Wrong action in job request: "+job_.action; failure_type_=ARexJobInternalError; return; }; // Check for proper LRMS name in request. If there is no LRMS name // in user configuration that means service is opaque frontend and // accepts any LRMS in request. if((!job_.lrms.empty()) && (!config_.GmConfig().DefaultLRMS().empty())) { if(job_.lrms != config_.GmConfig().DefaultLRMS()) { failure_="Requested LRMS is not supported by this service"; failure_type_=ARexJobInternalError; //failure_type_=ARexJobDescriptionLogicalError; return; }; }; if(job_.lrms.empty()) job_.lrms=config_.GmConfig().DefaultLRMS(); // Handle queue in request. // if (queue in xrsl/adl) submit to that queue w/o modification; // elseif (passed default queue by caller) substitute default queue into xrsl/adl and check authorisation; // elseif (exists default queue in arc.conf) substitute default queue into xrsl and check authorisation; // elseif (VO is authorised in one of the arc.conf queues*) substitute into xrsl the first queue where VO is authorised in arc.conf; // else (reject); if(job_.queue.empty()) // queue in job description? job_.queue=queue; // queue passed by caller if(job_.queue.empty()) job_.queue=config_.GmConfig().DefaultQueue(); // default queue in configuration bool queue_authorized = false; bool queue_matched = false; for(std::list::const_iterator q = config_.GmConfig().Queues().begin(); q != config_.GmConfig().Queues().end(); ++q) { if(!job_.queue.empty()) { if(*q != job_.queue) continue; // skip non-matcing queue }; queue_matched = true; // Check for allowed authorization group std::list > const & groups = config_.GmConfig().MatchingGroups(q->c_str()); if(groups.empty()) { queue_authorized = true; // No authorized groups assigned - all allowed } else { if(match_groups(groups, config_)) { queue_authorized = true; }; }; if(queue_authorized) { if(job_.queue.empty()) job_.queue = *q; // no queue requested - assign first authorized break; }; }; if(!queue_authorized) { // Different error messages for different job requests if(job_.queue.empty()) { failure_="Request has no queue defined and none is allowed for this user"; failure_type_=ARexJobConfigurationError; } else { if(queue_matched) { failure_="Requested queue "+job_.queue+" does not match any of available queues"; failure_type_=ARexJobInternalError; } else { failure_="Requested queue "+job_.queue+" is not allowed for this user"; failure_type_=ARexJobConfigurationError; }; }; return; }; // Check for various unsupported features if(!job_.preexecs.empty()) { failure_="Pre-executables are not supported by this service"; failure_type_=ARexJobDescriptionUnsupportedError; return; }; if(!job_.postexecs.empty()) { failure_="Post-executables are not supported by this service"; failure_type_=ARexJobDescriptionUnsupportedError; return; }; for(std::list::iterator f = desc.DataStaging.OutputFiles.begin();f != desc.DataStaging.OutputFiles.end();++f) { for(std::list::iterator t = f->Targets.begin();t != f->Targets.end();++t) { switch(t->CreationFlag) { case Arc::TargetType::CFE_DEFAULT: case Arc::TargetType::CFE_OVERWRITE: case Arc::TargetType::CFE_DONTOVERWRITE: break; default: failure_="Unsupported creation mode for Target"; failure_type_=ARexJobDescriptionUnsupportedError; return; }; }; }; // TODO: Rerun; // TODO: ExpiryTime; // TODO: ProcessingStartTime; // TODO: Priority; // TODO: Notification; // TODO: CredentialService; // TODO: AccessControl; // TODO: DryRun; // TODO: RemoteLogging // TODO: OperatingSystem; // TODO: Platform; // TODO: NetworkInfo; // TODO: IndividualPhysicalMemory; // TODO: IndividualVirtualMemory; // TODO: DiskSpaceRequirement; // TODO: SessionLifeTime; // TODO: SessionDirectoryAccess; // TODO: IndividualCPUTime; // TODO: TotalCPUTime; // TODO: IndividualWallTime; // TODO: TotalWallTime; // TODO: NodeAccess; // TODO: CEType; // Check that the SlotRequirements make sense. // I.e. that SlotsPerHost do not exceed total Slots // and that SlotsPerHost is a divisor of total Slots if((desc.Resources.SlotRequirement.SlotsPerHost > desc.Resources.SlotRequirement.NumberOfSlots) || (desc.Resources.SlotRequirement.NumberOfSlots % desc.Resources.SlotRequirement.SlotsPerHost != 0)) { failure_="SlotsPerHost exceeding NumberOfSlots is not supported"; failure_type_=ARexJobDescriptionUnsupportedError; return; }; if(!desc.Resources.Coprocessor.v.empty()) { failure_="Coprocessor is not supported yet."; failure_type_=ARexJobDescriptionUnsupportedError; return; }; // There may be 3 sources of delegated credentials: // 1. If job comes through EMI-ES it has delegations assigned only per file // through source and target. But ARC has extension to pass global // delegation for whole DataStaging // 2. In ARC BES extension credentials delegated as part of job creation request. // Those are provided in credentials variable // 3. If neither works and special dynamic output files @list which // have no targets and no delegations are present then any of // per file delegations is used bool need_delegation = false; // not for sure, but most probably needed std::list deleg_ids; // collection of all delegations if(!desc.DataStaging.DelegationID.empty()) { job_.delegationid = desc.DataStaging.DelegationID; // remember that special delegation deleg_ids.push_back(desc.DataStaging.DelegationID); // and store in list of all delegations } else if(!delegid.empty()) { // Have per job credentials - remember and refer by id later job_.delegationid = delegid; // remember that ad-hoc delegation deleg_ids.push_back(delegid); // and store in list of all delegations } else { // No per job delegation provided. // Check if generic delegation is needed at all. for(std::list::iterator f = desc.DataStaging.OutputFiles.begin(); f != desc.DataStaging.OutputFiles.end();++f) { if(f->Name[0] == '@') { // Dynamic file - possibly we need delegation. But we can't know till job finished. // Try to use any of provided delegations. need_delegation = true; break; }; }; }; // Collect other delegations // Delegation ids can be found in parsed job description for(std::list::iterator f = desc.DataStaging.InputFiles.begin();f != desc.DataStaging.InputFiles.end();++f) { for(std::list::iterator s = f->Sources.begin();s != f->Sources.end();++s) { if(!s->DelegationID.empty()) deleg_ids.push_back(s->DelegationID); }; }; for(std::list::iterator f = desc.DataStaging.OutputFiles.begin();f != desc.DataStaging.OutputFiles.end();++f) { for(std::list::iterator t = f->Targets.begin();t != f->Targets.end();++t) { if(!t->DelegationID.empty()) deleg_ids.push_back(t->DelegationID); }; }; if(need_delegation && job_.delegationid.empty()) { // Still need generic per job delegation if(deleg_ids.size() > 0) { // Pick up first delegation as generic one job_.delegationid = *deleg_ids.begin(); } else { // Missing most probably required delegation - play safely failure_="Dynamic output files and no delegation assigned to job are incompatible."; failure_type_=ARexJobDescriptionUnsupportedError; return; }; }; // Start local file (some local attributes are already defined at this point) - only common parts /* !!!!! some parameters are unchecked here - rerun,diskspace !!!!! */ job_.starttime=Arc::Time(); job_.DN=config_.GridName(); job_.clientname=clientid; std::string certificates; job_.expiretime = time(NULL); #if 1 if(!job_.delegationid.empty()) { (void)deleg.GetCred(job_.delegationid, config_.GridName(), certificates); } if(!certificates.empty()) { try { Arc::Credential cred(certificates,"","","",false,false,"",false); job_.expiretime = cred.GetEndTime(); logger_.msg(Arc::VERBOSE, "Credential expires at %s", job_.expiretime.str()); } catch(std::exception const& e) { logger_.msg(Arc::WARNING, "Credential handling exception: %s", e.what()); }; } else #endif // Create user credentials (former "proxy") { for(std::list::iterator a = config_.beginAuth();a!=config_.endAuth();++a) { if(*a) { Arc::SecAttr* sattr = (*a)->get("TLS"); if(sattr) { certificates = sattr->get("CERTIFICATE"); if(!certificates.empty()) { certificates += sattr->get("CERTIFICATECHAIN"); try { Arc::Credential cred(certificates,"","","",false,false,"",false); job_.expiretime = cred.GetEndTime(); logger_.msg(Arc::VERBOSE, "Credential expires at %s", job_.expiretime.str()); } catch(std::exception const& e) { logger_.msg(Arc::WARNING, "Credential handling exception: %s", e.what()); }; break; }; }; }; }; }; // Report VOMS information from credentials (TLS source) if(job_.voms.empty()) { for(std::list::iterator a = config_.beginAuth();a!=config_.endAuth();++a) { if(*a) { Arc::SecAttr* sattr = (*a)->get("TLS"); if(sattr) { std::list voms = sattr->getAll("VOMS"); // These attributes are in different format and need to be converted // into ordinary VOMS FQANs. for(std::list::iterator v = voms.begin();v!=voms.end();++v) { std::string fqan = Arc::VOMSFQANFromFull(*v); if(!fqan.empty()) { job_.voms.insert(job_.voms.end(),fqan); }; }; }; }; }; }; // Add WLCG information from tokens as VOMS if nothing was collected from proxy if(job_.voms.empty()) { if(config_.GmConfig().WLCGtoVOMS()) { for(std::list::iterator a = config_.beginAuth();a!=config_.endAuth();++a) { if(*a) { Arc::SecAttr* sattr = (*a)->get("OTOKENS"); if(sattr) { std::list wgroups = sattr->getAll("wlcg.groups"); for(std::list::iterator wgroup = wgroups.begin();wgroup!=wgroups.end();++wgroup) { job_.voms.insert(job_.voms.end(),*wgroup); }; }; }; }; }; }; // If still no VOMS information is available take forced one from configuration if(job_.voms.empty()) { std::string forced_voms = config_.GmConfig().ForcedVOMS(job_.queue.c_str()); if(forced_voms.empty()) forced_voms = config_.GmConfig().ForcedVOMS(); if(!forced_voms.empty()) { job_.voms.push_back(forced_voms); }; }; // Store information about matched auth groups for(std::list::iterator a = config_.beginAuth();a!=config_.endAuth();++a) { if(*a) { Arc::SecAttr* sattr = (*a)->get("ARCLEGACY"); if(sattr) job_.authgroups = sattr->getAll("GROUP"); if(!job_.authgroups.empty()) break; } } // Pull token claims if available for(std::list::iterator a = config_.beginAuth();a!=config_.endAuth();++a) { if(*a) { Arc::SecAttr* sattr = (*a)->get("OTOKENS"); if(sattr) job_.tokenclaim = sattr->getAll(); if(!job_.tokenclaim.empty()) break; } } Arc::User user(uid_); // For each instance allocate job id and create control files (job description placeholder) ids.resize(max_jobs); max_jobs = make_job_id(config_,logger_,ids); ids.resize(max_jobs); if(max_jobs < min_jobs) { delete_job_id(config_,user,sessiondir,ids); failure_="Failed to allocate enough job identifiers"; failure_type_=ARexJobInternalError; return; } int num_jobs = 0; for(; num_jobs= min_jobs) break; delete_job_id(config_,user,sessiondir,ids); failure_="Failed to store job description"; failure_type_=ARexJobInternalError; max_jobs=num_jobs; return; } // For compatibility reasons during transitional period store full proxy if possible if(!certificates.empty()) { if(!job_proxy_write_file(job,config_.GmConfig(),certificates)) { if(num_jobs >= min_jobs) break; delete_job_id(config_,user,sessiondir,ids); failure_="Failed to write job proxy file"; failure_type_=ARexJobInternalError; max_jobs=num_jobs; return; }; }; // Write local file if(!job_local_write_file(job,config_.GmConfig(),job_)) { if(num_jobs >= min_jobs) break; delete_job_id(config_,user,sessiondir,ids); failure_="Failed to store internal job description"; failure_type_=ARexJobInternalError; max_jobs=num_jobs; return; }; // Write grami file if(!job_desc_handler.write_grami(desc,job,NULL)) { if(num_jobs >= min_jobs) break; delete_job_id(config_,user,sessiondir,ids); failure_="Failed to create grami file"; failure_type_=ARexJobInternalError; max_jobs=num_jobs; return; }; // Write ACL file if(!acl.empty()) { if(!job_acl_write_file(job_.jobid,config_.GmConfig(),acl)) { if(num_jobs >= min_jobs) break; delete_job_id(config_,user,sessiondir,ids); failure_="Failed to process/store job ACL"; failure_type_=ARexJobInternalError; max_jobs=num_jobs; return; }; }; // Call authentication/authorization plugin/exec // Thre is no sense in calling it for each of same jobs - do it only for first one. if(num_jobs == 0) { // talk to external plugin to ask if we can proceed std::list results; ContinuationPlugins* plugins = config_.GmConfig().GetContPlugins(); if(plugins) plugins->run(job,config_.GmConfig(),results); std::list::iterator result = results.begin(); while(result != results.end()) { // analyze results if(result->action == ContinuationPlugins::act_fail) { delete_job_id(config_,user,sessiondir,ids); failure_="Job is not allowed by external plugin: "+result->response; failure_type_=ARexJobInternalError; return; } else if(result->action == ContinuationPlugins::act_log) { // Scream but go ahead logger_.msg(Arc::WARNING, "Failed to run external plugin: %s", result->response); } else if(result->action == ContinuationPlugins::act_pass) { // Just continue if(result->response.length()) { logger_.msg(Arc::INFO, "Plugin response: %s", result->response); }; } else { delete_job_id(config_,user,sessiondir,ids); failure_="Failed to pass external plugin: "+result->response; failure_type_=ARexJobInternalError; return; }; ++result; }; }; // Create session directory if(!config_.GmConfig().CreateSessionDirectory(job.SessionDir(), job.get_user())) { if(num_jobs >= min_jobs) break; delete_job_id(config_,user,sessiondir,ids); failure_="Failed to create session directory"; failure_type_=ARexJobInternalError; max_jobs=num_jobs; return; }; // Create input status file to tell downloader we // are handling input in clever way. job_input_status_add_file(job,config_.GmConfig()); }; // for num_jobs max_jobs = num_jobs; delete_job_id(config_,user,sessiondir,ids,max_jobs); ids.resize(max_jobs); // Create status files (do it last so GM picks job up here) for(num_jobs=0; num_jobs= min_jobs) break; delete_job_id(config_,user,sessiondir,ids); failure_="Failed registering job in A-REX"; failure_type_=ARexJobInternalError; max_jobs=num_jobs; return; }; }; // for num_jobs max_jobs = num_jobs; delete_job_id(config_,user,sessiondir,ids,max_jobs); ids.resize(max_jobs); // Put lock on all delegated credentials of these jobs. // Because same delegation id can be used multiple times remove // duplicates to avoid adding multiple identical locking records. deleg_ids.sort(); deleg_ids.unique(); for(num_jobs=0; num_jobsoperator[](config_.GmConfig().DelegationDir()); if(!deleg.PutCred(job_.delegationid, config_.GridName(), credentials)) return false; Arc::Credential cred(credentials,"","","",false,false,"",false); job_.expiretime = cred.GetEndTime(); GMJob job(id_,Arc::User(uid_), job_.sessiondir,JOB_STATE_ACCEPTED); #if 0 std::string cred_public; cred.OutputCertificate(cred_public); cred.OutputCertificateChain(cred_public); (void)job_proxy_write_file(job,config_.GmConfig(),cred_public); #else // For compatibility reasons during transitional period store full proxy if possible (void)job_proxy_write_file(job,config_.GmConfig(),credentials); #endif // TODO: should job.#.proxy be updated too? return true; } bool ARexJob::make_job_id() { std::vector id(1); std::size_t num = make_job_id(config_, logger_, id); if (num != 1) return false; id_ = id[0]; return true; } // Allocates job id(s) by creating empty job description file std::size_t ARexJob::make_job_id(ARexGMConfig& config_, Arc::Logger& logger_, std::vector& ids) { if(!config_) return 0; if(ids.empty()) return 0; for(std::size_t idx = 0;idx < ids.size();++idx) { ids[idx].clear(); for(int i=0;i<100;i++) { //id_=Arc::tostring((unsigned int)getpid())+ // Arc::tostring((unsigned int)time(NULL))+ // Arc::tostring(rand(),1); std::string id = rand_uid64().substr(4); // 16-4=12 chars in id std::string fname=job_control_path(config_.GmConfig().ControlDir(),id,sfx_desc); struct stat st; if(stat(fname.c_str(),&st) == 0) continue; std::string::size_type sep_pos = fname.rfind('/'); if(sep_pos != std::string::npos) { if(!Arc::DirCreate(fname.substr(0,sep_pos),S_IRWXU|S_IXGRP|S_IRGRP|S_IXOTH|S_IROTH,true)) continue; }; int h = ::open(fname.c_str(),O_RDWR | O_CREAT | O_EXCL,0600); // So far assume control directory is on local fs. // TODO: add locks or links for NFS int err = errno; if(h == -1) { if(err == EEXIST) continue; logger_.msg(Arc::ERROR, "Failed to create job in %s", config_.GmConfig().ControlDir()); return idx; }; fix_file_owner(fname,config_.User()); close(h); ids[idx] = id; break; }; if(ids[idx].empty()) { logger_.msg(Arc::ERROR, "Out of tries while allocating new job ID in %s", config_.GmConfig().ControlDir()); return idx; }; }; return ids.size();; } bool ARexJob::delete_job_id() { if(!config_) return true; if(id_.empty()) return true; // it is ok to have empty sessiondir because job_clean_final can handle such case job_clean_final(GMJob(id_,Arc::User(uid_),job_.sessiondir),config_.GmConfig()); id_=""; return true; } bool ARexJob::delete_job_id(ARexGMConfig& config_, Arc::User user_, std::string const & sessiondir, std::vector& ids, std::size_t offset) { if(!config_) return false; for(std::size_t idx = offset; idx < ids.size(); ++idx) { job_clean_final(GMJob(ids[idx],user_,sessiondir+"/"+ids[idx]),config_.GmConfig()); } ids.resize(offset); return true; } int ARexJob::TotalJobs(ARexGMConfig& config,Arc::Logger& /* logger */) { return JobsList::CountAllJobs(config.GmConfig()); } // TODO: optimize std::list ARexJob::Jobs(ARexGMConfig& config,Arc::Logger& logger) { std::list jlist; JobsList::GetAllJobIds(config.GmConfig(),jlist); std::list::iterator i = jlist.begin(); while(i!=jlist.end()) { ARexJob job(*i,config,logger,true); if(job) { ++i; } else { i = jlist.erase(i); }; }; return jlist; } std::string ARexJob::SessionDir(void) { if(id_.empty()) return ""; return job_.sessiondir; } std::string ARexJob::LogDir(void) { return job_.stdlog; } static bool normalize_filename(std::string& filename) { std::string::size_type p = 0; if(filename[0] != G_DIR_SEPARATOR) filename.insert(0,G_DIR_SEPARATOR_S); for(;p != std::string::npos;) { if((filename[p+1] == '.') && (filename[p+2] == '.') && ((filename[p+3] == 0) || (filename[p+3] == G_DIR_SEPARATOR)) ) { std::string::size_type pr = std::string::npos; if(p > 0) pr = filename.rfind(G_DIR_SEPARATOR,p-1); if(pr == std::string::npos) return false; filename.erase(pr,p-pr+3); p=pr; } else if((filename[p+1] == '.') && (filename[p+2] == G_DIR_SEPARATOR)) { filename.erase(p,2); } else if(filename[p+1] == G_DIR_SEPARATOR) { filename.erase(p,1); }; p = filename.find(G_DIR_SEPARATOR,p+1); }; if(!filename.empty()) filename.erase(0,1); // removing leading separator return true; } Arc::FileAccess* ARexJob::CreateFile(const std::string& filename) { if(id_.empty()) return NULL; std::string fname = filename; if((!normalize_filename(fname)) || (fname.empty())) { failure_="File name is not acceptable"; failure_type_=ARexJobInternalError; return NULL; }; int lname = fname.length(); fname = job_.sessiondir+"/"+fname; // First try to create/open file Arc::FileAccess* fa = Arc::FileAccess::Acquire(); if(!*fa) { delete fa; return NULL; }; if(!fa->fa_setuid(uid_,gid_)) { Arc::FileAccess::Release(fa); return NULL; }; if(!fa->fa_open(fname,O_WRONLY | O_CREAT,S_IRUSR | S_IWUSR)) { if(fa->geterrno() != ENOENT) { Arc::FileAccess::Release(fa); return NULL; }; std::string::size_type n = fname.rfind('/'); if((n == std::string::npos) || (n < (fname.length()-lname))) { Arc::FileAccess::Release(fa); return NULL; }; if(!fa->fa_mkdirp(fname.substr(0,n),S_IRUSR | S_IWUSR | S_IXUSR)) { if(fa->geterrno() != EEXIST) { Arc::FileAccess::Release(fa); return NULL; }; }; if(!fa->fa_open(fname,O_WRONLY | O_CREAT,S_IRUSR | S_IWUSR)) { Arc::FileAccess::Release(fa); return NULL; }; }; return fa; } Arc::FileAccess* ARexJob::OpenFile(const std::string& filename,bool for_read,bool for_write) { if(id_.empty()) return NULL; std::string fname = filename; if((!normalize_filename(fname)) || (fname.empty())) { failure_="File name is not acceptable"; failure_type_=ARexJobInternalError; return NULL; }; fname = job_.sessiondir+"/"+fname; int flags = 0; if(for_read && for_write) { flags=O_RDWR; } else if(for_read) { flags=O_RDONLY; } else if(for_write) { flags=O_WRONLY; } //return Arc::FileOpen(fname,flags,uid_,gid_,0); Arc::FileAccess* fa = Arc::FileAccess::Acquire(); if(*fa) { if(fa->fa_setuid(uid_,gid_)) { if(fa->fa_open(fname,flags,0)) { return fa; }; }; }; failure_="Failed opening file - "+Arc::StrError(fa->geterrno()); failure_type_=ARexJobInternalError; Arc::FileAccess::Release(fa); return NULL; } Arc::FileAccess* ARexJob::OpenDir(const std::string& dirname) { if(id_.empty()) return NULL; std::string dname = dirname; if(!normalize_filename(dname)) { failure_="Directory name is not acceptable"; failure_type_=ARexJobInternalError; return NULL; }; //if(dname.empty()) return NULL; dname = job_.sessiondir+"/"+dname; Arc::FileAccess* fa = Arc::FileAccess::Acquire(); if(*fa) { if(fa->fa_setuid(uid_,gid_)) { if(fa->fa_opendir(dname)) { return fa; }; }; }; failure_="Failed opening directory - "+Arc::StrError(fa->geterrno()); failure_type_=ARexJobInternalError; Arc::FileAccess::Release(fa); return NULL; } int ARexJob::OpenLogFile(const std::string& name) { if(id_.empty()) return -1; if(strchr(name.c_str(),'/')) return -1; int h = -1; std::string fname; h = ::open(fname.c_str(),O_RDONLY); if(name == sfx_status) { fname = config_.GmConfig().ControlDir() + "/" + subdir_cur + "/" + id_ + "." + name; h = ::open(fname.c_str(),O_RDONLY); if(h != -1) return h; fname = config_.GmConfig().ControlDir() + "/" + subdir_new + "/" + id_ + "." + name; h = ::open(fname.c_str(),O_RDONLY); if(h != -1) return h; fname = config_.GmConfig().ControlDir() + "/" + subdir_rew + "/" + id_ + "." + name; h = ::open(fname.c_str(),O_RDONLY); if(h != -1) return h; fname = config_.GmConfig().ControlDir() + "/" + subdir_old + "/" + id_ + "." + name; h = ::open(fname.c_str(),O_RDONLY); } else { fname = job_control_path(config_.GmConfig().ControlDir(),id_,name.c_str()); h = ::open(fname.c_str(),O_RDONLY); }; return h; } std::list ARexJob::LogFiles(void) { std::list logs; if(id_.empty()) return logs; std::string dname = job_control_path(config_.GmConfig().ControlDir(),id_,NULL); Glib::Dir* dir = new Glib::Dir(dname); if(!dir) return logs; for(;;) { std::string name = dir->read_name(); if(name.empty()) break; if(name == ".") continue; if(name == "..") continue; logs.push_back(name); }; delete dir; // Add always present status logs.push_back("status"); return logs; } std::string ARexJob::GetFilePath(const std::string& filename) { if(id_.empty()) return ""; std::string fname = filename; if(!normalize_filename(fname)) return ""; if(fname.empty()) return job_.sessiondir; return job_.sessiondir+"/"+fname; } bool ARexJob::ReportFileComplete(const std::string& filename) { if(id_.empty()) return false; std::string fname = filename; if(!normalize_filename(fname)) return false; if(!job_input_status_add_file(GMJob(id_,Arc::User(uid_)),config_.GmConfig(),"/"+fname)) return false; CommFIFO::Signal(config_.GmConfig().ControlDir(),id_); return true; } bool ARexJob::ReportFilesComplete(void) { if(id_.empty()) return false; if(!job_input_status_add_file(GMJob(id_,Arc::User(uid_)),config_.GmConfig(),"/")) return false; CommFIFO::Signal(config_.GmConfig().ControlDir(),id_); return true; } std::string ARexJob::GetLogFilePath(const std::string& name) { if(id_.empty()) return ""; return job_control_path(config_.GmConfig().ControlDir(),id_,name.c_str()); } bool ARexJob::ChooseSessionDir(ARexGMConfig& config_,Arc::Logger& logger_,std::string& sessiondir) { if (config_.SessionRootsNonDraining().size() == 0) { // no active session dirs available logger_.msg(Arc::ERROR, "No non-draining session dirs available"); return false; } // choose randomly from non-draining session dirs sessiondir = config_.SessionRootsNonDraining().at(rand() % config_.SessionRootsNonDraining().size()); return true; } nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/a-rex-backtrace-collect.8.in0000644000000000000000000000013115067751327024052 xustar0030 mtime=1759498967.750197335 29 atime=1759498967.86149359 30 ctime=1759499029.313216459 nordugrid-arc-7.1.1/src/services/a-rex/a-rex-backtrace-collect.8.in0000644000175000002070000000133515067751327025757 0ustar00mockbuildmock00000000000000.TH a-rex-backtrace-collect 8 "2016-08-16" "NorduGrid @VERSION@" "NorduGrid Toolkit" .SH NAME a-rex-backtrace-collect \- processes core file(s) generated by arched and produces backtrace(s). .SH DESCRIPTION .B a-rex-backtrace-collect processes core file(s) collected in ARC_LOGS_DIR/arccore folder and produces their backtraces. The backtrace(s) are stored in files .backtrace. The ARC installation location can be adjusted using ARC_LOCATION environment variable. The location of configuration file can be specified using ARC_CONFIG environment variable. .SH SYNOPSIS a-rex-backtrace-collect .SH OPTIONS No options are supported. .SH AUTHOR Aleksandr Konstantinov nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/faults.cpp0000644000000000000000000000013215067751327021003 xustar0030 mtime=1759498967.751729254 30 atime=1759498967.862493605 30 ctime=1759499029.335210443 nordugrid-arc-7.1.1/src/services/a-rex/faults.cpp0000644000175000002070000000332615067751327022711 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include "arex.h" #include "tools.h" namespace ARex { /* UnknownActivityIdentifierFault Message (string) InvalidRequestMessageFaultType InvalidElement (string,unbounded) Message (string) */ // A-REX faults static const std::string BES_FACTORY_FAULT_URL("http://schemas.ggf.org/bes/2006/08/bes-factory/BESFactoryPortType/Fault"); static void SetFaultResponse(Arc::SOAPFault& fault) { // Fetch top element of SOAP message - should be better way Arc::XMLNode fault_node = fault; Arc::SOAPEnvelope res(fault_node.Parent().Parent()); // Fault->Body->Envelope Arc::WSAHeader(res).Action(BES_FACTORY_FAULT_URL); } void ARexService::UnknownActivityIdentifierFault(Arc::XMLNode fault,const std::string& message) { fault.Name("bes-factory:UnknownActivityIdentifierFault"); fault.NewChild("bes-factory:Message")=message; return; } void ARexService::UnknownActivityIdentifierFault(Arc::SOAPFault& fault,const std::string& message) { UnknownActivityIdentifierFault(fault.Detail(true).NewChild("dummy"),message); SetFaultResponse(fault); } void ARexService::InvalidRequestMessageFault(Arc::XMLNode fault,const std::string& element,const std::string& message) { fault.Name("bes-factory:InvalidRequestMessageFaultType"); if(!element.empty()) fault.NewChild("bes-factory:InvalidElement")=element; fault.NewChild("bes-factory:Message")=message; return; } void ARexService::InvalidRequestMessageFault(Arc::SOAPFault& fault,const std::string& element,const std::string& message) { InvalidRequestMessageFault(fault.Detail(true).NewChild("dummy"),element,message); SetFaultResponse(fault); } } nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/schema0000644000000000000000000000013015067751426020162 xustar0030 mtime=1759499030.404443933 28 atime=1759499034.7655102 30 ctime=1759499030.404443933 nordugrid-arc-7.1.1/src/services/a-rex/schema/0000755000175000002070000000000015067751426022143 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/schema/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327022275 xustar0030 mtime=1759498967.772902482 30 atime=1759498967.873493772 30 ctime=1759499030.399648939 nordugrid-arc-7.1.1/src/services/a-rex/schema/Makefile.am0000644000175000002070000000020515067751327024174 0ustar00mockbuildmock00000000000000arcschemadir = $(pkgdatadir)/schema arcschema_DATA = a-rex.xsd a-rex_infoprovider.xsd a-rex_lrms.xsd EXTRA_DIST = $(arcschema_DATA) nordugrid-arc-7.1.1/src/services/a-rex/schema/PaxHeaders/Makefile.in0000644000000000000000000000013115067751356022307 xustar0030 mtime=1759498990.874564861 30 atime=1759499019.104272227 29 ctime=1759499030.40088542 nordugrid-arc-7.1.1/src/services/a-rex/schema/Makefile.in0000644000175000002070000005105715067751356024222 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/schema ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(arcschemadir)" DATA = $(arcschema_DATA) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ arcschemadir = $(pkgdatadir)/schema arcschema_DATA = a-rex.xsd a-rex_infoprovider.xsd a-rex_lrms.xsd EXTRA_DIST = $(arcschema_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/schema/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/schema/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcschemaDATA: $(arcschema_DATA) @$(NORMAL_INSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(arcschemadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(arcschemadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcschemadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcschemadir)" || exit $$?; \ done uninstall-arcschemaDATA: @$(NORMAL_UNINSTALL) @list='$(arcschema_DATA)'; test -n "$(arcschemadir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(arcschemadir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(arcschemadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-arcschemaDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-arcschemaDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-arcschemaDATA install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags-am uninstall \ uninstall-am uninstall-arcschemaDATA .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/schema/PaxHeaders/a-rex_lrms.xsd0000644000000000000000000000013215067751327023032 xustar0030 mtime=1759498967.773273145 30 atime=1759498967.873493772 30 ctime=1759499030.405722709 nordugrid-arc-7.1.1/src/services/a-rex/schema/a-rex_lrms.xsd0000644000175000002070000001154115067751327024736 0ustar00mockbuildmock00000000000000 The path to qstat, pbsnodes, qmgr etc PBS binaries. No need to set unless PBS is used. The path of the PBS server logfiles which are used by the GM to determine whether a PBS job is completed. If not specified, GM will use qstat for that. nordugrid-arc-7.1.1/src/services/a-rex/schema/PaxHeaders/a-rex_infoprovider.xsd0000644000000000000000000000013215067751327024563 xustar0030 mtime=1759498967.773273145 30 atime=1759498967.873493772 30 ctime=1759499030.403443918 nordugrid-arc-7.1.1/src/services/a-rex/schema/a-rex_infoprovider.xsd0000644000175000002070000002423015067751327026466 0ustar00mockbuildmock00000000000000 This element can be used to specify benchmark results on the ExecutionEnvironment level. It should contain the name of the benchmark and the benchmark score separated by a space. Please use one of standard benchmark names given below if applicable: bogomips - BogoMips cfp2006 - SPEC CFP 2006 floating point benchmark cint2006 - SPEC CINT 2006 integer benchmark linpack - LINPACK benchmark specfp2000 - SPECfp2000 floating point benchmark specint2000 - SPECint2000 integer benchmark This element is used by the infoprovider to determine which nodes are included in an ExecutionEnvironment. This element represents a group of identical compute nodes in the cluster. nordugrid-arc-7.1.1/src/services/a-rex/schema/PaxHeaders/a-rex.xsd0000644000000000000000000000013215067751327021775 xustar0030 mtime=1759498967.773273145 30 atime=1759498967.873493772 30 ctime=1759499030.402087325 nordugrid-arc-7.1.1/src/services/a-rex/schema/a-rex.xsd0000644000175000002070000007305115067751327023705 0ustar00mockbuildmock00000000000000 This element defines URL of A-REX service as seen from outside. This element defines path to arc0 Grid Manager configuration file. If present values in that file will overwrite those defined as siblings of this element. If string is empty its value is /etc/arc.conf. This element defines how grid-manager part of A-REX is run. * internal - as a thread inside service container. * none - no grid-manager is run. * external - as a separate executable (not supported anymore). Default is 'internal'. Defines parameters for mapping Grid user identity to local account. Currently only default account name can be specified. Defines parameters for controlling LRMS specific and LRMS related functionality. * type - name of LRMS/batch system * defaultShare - optional name of default ComputingShare * sharedFilesystem - either session directory is shared with computing node * sharedScratch - the path where the frontend can access cross-mounted scratch directories of nodes, if applicable * GNUTimeUtility - location and name of GNU time executable * any accommodates numerous LRMS configuration parameters * pbs_bin_path="/usr/bin" * pbs_log_path="/var/spool/pbs/server_logs" * condor_bin_path="/opt/condor/bin" * condor_config="/opt/condor/etc/condor_config" * condor_rank="(1-LoadAvg/2)*(1-LoadAvg/2)*Memory/1000*KFlops/1000000" * slurm_bin_path="/usr/bin" * sge_bin_path="/opt/n1ge6/bin/lx24-x86" * sge_root="/opt/n1ge6" * sge_cell="default" * sge_execd_port="537" * lsf_bin_path="/usr/local/lsf/bin/" * lsf_profile_path="/usr/share/lsf/conf" * ll_bin_path="/opt/ibmll/LoadL/full/bin" * ll_consumable_resources="yes" Defines parameters influencing load imposed on gateway computer. Unless specified missing element means do not limit. * maxJobsTracked - jobs which are not in FINISHED state (jobs tracked in RAM) * maxJobsRun - jobs being run (SUBMITTING, INLRMS states) * maxJobsTotal - jobs in any state * maxJobsPerDN - maximum jobs in the system per user DN * maxJobsTransferred - jobs being processed on frontend (PREPARING, FINISHING states) * maxJobsTransferredAdditional - additional reserved number of jobs being processed on frontend * maxFilesTransferred - number of files being transferred simultaneously by jobs in PREPARING and FINISHING states. Value is per job. * maxLoadShare - sharing mechanism for data transfer - the maximum number of processes that can run per transfer share * loadShareType - sharing mechanism for data transfer - the scheme used to assign jobs to transfer shares. Possible values are "dn", "voms:vo", "voms:role" and "voms:group" * shareLimit - specifies a transfer share that has a limit different from the default value in maxLoadShare * name - the name of the share. Examples for different sharing mechanisms: - dn : /O=Grid/O=NorduGrid/OU=domainname.com/CN=Jane Doe - voms:vo : voname - voms:role : voname:rolename - voms:group : /voname/groupname * limit - the maximum number of processes that can run for this particular share * wakeupPeriod - specifies how often cheks for new jobs arrived, job state change requests, etc are done. That is resposivity of the service. The period is in seconds. Default is 3 minutes. Usually this element is not needed. Parameters related to cache functionality. Multiple caches may be specified. Cached data will be distributed evenly over the caches. If none such element is present caching is disabled. * location - path to a directory to store cached data. Multiple cache directories may be specified by specifying multiple location elements. Cached data will be distributed evenly over the caches. * remotelocation - path to a cache which is managed by another grid-manager. * link - optional path at which the location is accessible on computing nodes, if it is different from the path on the service host. * highWatermark, lowWatermark - specify high and low watermarks for space used by cache, as a percentage of the space on the file system on which the cache directory is located. When the max is exceeded, files will be deleted to bring the used space down to the min level. It is a good idea to have the cache on its own separate file system. To turn off this feature those elements must be absent. * cacheLogFile - the file where messages from cache cleaning are logged. * cacheLogLevel - the log level used by the cache cleaning script. * cacheLifetime - the lifetime of cache files Parameters for new data staging framework: * maxDelivery: maximum number of files in physical transfer * maxProcessor: maximum number of files in each pre or post transfer stage * maxEmergency: maximum number of files which can use emergency slots when regular slots are full * maxPrepared: maximum number of files in prepared state * shareType: transfer shares type * definedShare: share with a defined priority * name: share name * priority: share priority * deliveryService: remote data delivery service endpoint * localDelivery: whether to use local delivery as well as remote * remoteSizeLimit: Lower limit on file size (in bytes) under which transfers always use local delivery * useHostCert: whether to use host certificate for communication with remote delivery services * dtrLog: path to location where DTR state is periodically dumped Specifies how service prepares its control and session directories at startup. yes - directories are created and they ownership and permissions adjusted missing - directories are created and only for those which are created ownership and permission are adjusted no - nothing is created and adjusted This optional parameter can be used to enable publishing of additional information to ISIS. The default is not to publish ("no"). The information, which is considered in some degree to be static, includes HealthState, OSFamily, Platform, PhysicalCPUs, CPUMultiplicity, CPUModel and ApplicationEnvironment. This optional parameter can be used to disable ARC (BES based) job management interface. By default it is enabled. This optional parameter can be used to enable EMI ES job management interface. By default it is disabled. CommonName attribute of bes-factory. LongDescription attribute of bes-factory. Name of Local Resource Management System. Name of Operating System. The values are based on the OSType field of the CIM_OperatingSystem model: http://www.dmtf.org/standards/cim/cim_schema_v29 Some examples of valid choices: LINUX, MACOS, Solaris, Windows 2000 The GLUE2 infoprovider wake up period time in second The information interface (LIDI) max number of simultaneous clients. Default is 10. The max number of simultaneous clients performing job management operations (extended BES). Default is 100. The max number of simultaneous clients performing HTTP PUT and GET operations. Default is 100. Options for the A-REX information provider nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/delegation0000644000000000000000000000013015067751425021034 xustar0030 mtime=1759499029.397428632 28 atime=1759499034.7655102 30 ctime=1759499029.397428632 nordugrid-arc-7.1.1/src/services/a-rex/delegation/0000755000175000002070000000000015067751425023015 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/a-rex/delegation/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327023150 xustar0030 mtime=1759498967.751729254 30 atime=1759498967.862493605 30 ctime=1759499029.386562329 nordugrid-arc-7.1.1/src/services/a-rex/delegation/Makefile.am0000644000175000002070000000074415067751327025057 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libdelegation.la libdelegation_la_SOURCES = \ uid.cpp FileRecord.cpp FileRecordSQLite.cpp \ DelegationStore.cpp DelegationStores.cpp \ uid.h FileRecord.h FileRecordSQLite.h \ DelegationStore.h DelegationStores.h \ ../SQLhelpers.h libdelegation_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(SQLITE_CFLAGS) $(AM_CXXFLAGS) libdelegation_la_LIBADD = $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(SQLITE_LIBS) nordugrid-arc-7.1.1/src/services/a-rex/delegation/PaxHeaders/FileRecord.cpp0000644000000000000000000000013215067751327023636 xustar0030 mtime=1759498967.751729254 30 atime=1759498967.862493605 30 ctime=1759499029.389582375 nordugrid-arc-7.1.1/src/services/a-rex/delegation/FileRecord.cpp0000644000175000002070000000250315067751327025540 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "FileRecord.h" namespace ARex { std::string FileRecord::uid_to_path(const std::string& uid) { std::string path = basepath_; std::string::size_type p = 0; for(;uid.length() > (p+4);) { path = path + G_DIR_SEPARATOR_S + uid.substr(p,3); p += 3; }; return path + G_DIR_SEPARATOR_S + uid.substr(p); } bool FileRecord::make_file(const std::string& uid) { std::string path = uid_to_path(uid); std::string::size_type p = path.rfind(G_DIR_SEPARATOR_S); if((p != std::string::npos) && (p != 0)) { (void)Arc::DirCreate(path.substr(0,p),0,0,S_IXUSR|S_IRUSR|S_IWUSR,true); } return Arc::FileCreate(uid_to_path(uid),"",0,0,S_IRUSR|S_IWUSR); } bool FileRecord::remove_file(const std::string& uid) { std::string path = uid_to_path(uid); if(Arc::FileDelete(path)) { while(true) { std::string::size_type p = path.rfind(G_DIR_SEPARATOR_S); if((p == std::string::npos) || (p == 0)) break; if(p <= basepath_.length()) break; path.resize(p); if(!Arc::DirDelete(path,false)) break; }; return true; }; return false; } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/delegation/PaxHeaders/Makefile.in0000644000000000000000000000013215067751355023162 xustar0030 mtime=1759498989.626743772 30 atime=1759499017.859253309 30 ctime=1759499029.387579643 nordugrid-arc-7.1.1/src/services/a-rex/delegation/Makefile.in0000644000175000002070000010101015067751355025055 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/a-rex/delegation ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) am__DEPENDENCIES_1 = libdelegation_la_DEPENDENCIES = \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) am_libdelegation_la_OBJECTS = libdelegation_la-uid.lo \ libdelegation_la-FileRecord.lo \ libdelegation_la-FileRecordSQLite.lo \ libdelegation_la-DelegationStore.lo \ libdelegation_la-DelegationStores.lo libdelegation_la_OBJECTS = $(am_libdelegation_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = libdelegation_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = \ ./$(DEPDIR)/libdelegation_la-DelegationStore.Plo \ ./$(DEPDIR)/libdelegation_la-DelegationStores.Plo \ ./$(DEPDIR)/libdelegation_la-FileRecord.Plo \ ./$(DEPDIR)/libdelegation_la-FileRecordSQLite.Plo \ ./$(DEPDIR)/libdelegation_la-uid.Plo am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(libdelegation_la_SOURCES) DIST_SOURCES = $(libdelegation_la_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ noinst_LTLIBRARIES = libdelegation.la libdelegation_la_SOURCES = \ uid.cpp FileRecord.cpp FileRecordSQLite.cpp \ DelegationStore.cpp DelegationStores.cpp \ uid.h FileRecord.h FileRecordSQLite.h \ DelegationStore.h DelegationStores.h \ ../SQLhelpers.h libdelegation_la_CXXFLAGS = -I$(top_srcdir)/include \ $(LIBXML2_CFLAGS) $(GLIBMM_CFLAGS) $(SQLITE_CFLAGS) $(AM_CXXFLAGS) libdelegation_la_LIBADD = $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(SQLITE_LIBS) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/a-rex/delegation/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/a-rex/delegation/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libdelegation.la: $(libdelegation_la_OBJECTS) $(libdelegation_la_DEPENDENCIES) $(EXTRA_libdelegation_la_DEPENDENCIES) $(AM_V_CXXLD)$(libdelegation_la_LINK) $(libdelegation_la_OBJECTS) $(libdelegation_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdelegation_la-DelegationStore.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdelegation_la-DelegationStores.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdelegation_la-FileRecord.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdelegation_la-FileRecordSQLite.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libdelegation_la-uid.Plo@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< libdelegation_la-uid.lo: uid.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) -MT libdelegation_la-uid.lo -MD -MP -MF $(DEPDIR)/libdelegation_la-uid.Tpo -c -o libdelegation_la-uid.lo `test -f 'uid.cpp' || echo '$(srcdir)/'`uid.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libdelegation_la-uid.Tpo $(DEPDIR)/libdelegation_la-uid.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='uid.cpp' object='libdelegation_la-uid.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) -c -o libdelegation_la-uid.lo `test -f 'uid.cpp' || echo '$(srcdir)/'`uid.cpp libdelegation_la-FileRecord.lo: FileRecord.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) -MT libdelegation_la-FileRecord.lo -MD -MP -MF $(DEPDIR)/libdelegation_la-FileRecord.Tpo -c -o libdelegation_la-FileRecord.lo `test -f 'FileRecord.cpp' || echo '$(srcdir)/'`FileRecord.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libdelegation_la-FileRecord.Tpo $(DEPDIR)/libdelegation_la-FileRecord.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='FileRecord.cpp' object='libdelegation_la-FileRecord.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) -c -o libdelegation_la-FileRecord.lo `test -f 'FileRecord.cpp' || echo '$(srcdir)/'`FileRecord.cpp libdelegation_la-FileRecordSQLite.lo: FileRecordSQLite.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) -MT libdelegation_la-FileRecordSQLite.lo -MD -MP -MF $(DEPDIR)/libdelegation_la-FileRecordSQLite.Tpo -c -o libdelegation_la-FileRecordSQLite.lo `test -f 'FileRecordSQLite.cpp' || echo '$(srcdir)/'`FileRecordSQLite.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libdelegation_la-FileRecordSQLite.Tpo $(DEPDIR)/libdelegation_la-FileRecordSQLite.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='FileRecordSQLite.cpp' object='libdelegation_la-FileRecordSQLite.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) -c -o libdelegation_la-FileRecordSQLite.lo `test -f 'FileRecordSQLite.cpp' || echo '$(srcdir)/'`FileRecordSQLite.cpp libdelegation_la-DelegationStore.lo: DelegationStore.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) -MT libdelegation_la-DelegationStore.lo -MD -MP -MF $(DEPDIR)/libdelegation_la-DelegationStore.Tpo -c -o libdelegation_la-DelegationStore.lo `test -f 'DelegationStore.cpp' || echo '$(srcdir)/'`DelegationStore.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libdelegation_la-DelegationStore.Tpo $(DEPDIR)/libdelegation_la-DelegationStore.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='DelegationStore.cpp' object='libdelegation_la-DelegationStore.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) -c -o libdelegation_la-DelegationStore.lo `test -f 'DelegationStore.cpp' || echo '$(srcdir)/'`DelegationStore.cpp libdelegation_la-DelegationStores.lo: DelegationStores.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) -MT libdelegation_la-DelegationStores.lo -MD -MP -MF $(DEPDIR)/libdelegation_la-DelegationStores.Tpo -c -o libdelegation_la-DelegationStores.lo `test -f 'DelegationStores.cpp' || echo '$(srcdir)/'`DelegationStores.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libdelegation_la-DelegationStores.Tpo $(DEPDIR)/libdelegation_la-DelegationStores.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='DelegationStores.cpp' object='libdelegation_la-DelegationStores.lo' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libdelegation_la_CXXFLAGS) $(CXXFLAGS) -c -o libdelegation_la-DelegationStores.lo `test -f 'DelegationStores.cpp' || echo '$(srcdir)/'`DelegationStores.cpp mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -f ./$(DEPDIR)/libdelegation_la-DelegationStore.Plo -rm -f ./$(DEPDIR)/libdelegation_la-DelegationStores.Plo -rm -f ./$(DEPDIR)/libdelegation_la-FileRecord.Plo -rm -f ./$(DEPDIR)/libdelegation_la-FileRecordSQLite.Plo -rm -f ./$(DEPDIR)/libdelegation_la-uid.Plo -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libdelegation_la-DelegationStore.Plo -rm -f ./$(DEPDIR)/libdelegation_la-DelegationStores.Plo -rm -f ./$(DEPDIR)/libdelegation_la-FileRecord.Plo -rm -f ./$(DEPDIR)/libdelegation_la-FileRecordSQLite.Plo -rm -f ./$(DEPDIR)/libdelegation_la-uid.Plo -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ clean-generic clean-libtool clean-noinstLTLIBRARIES \ cscopelist-am ctags ctags-am distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/a-rex/delegation/PaxHeaders/uid.cpp0000644000000000000000000000013215067751327022401 xustar0030 mtime=1759498967.751729254 30 atime=1759498967.862493605 30 ctime=1759499029.388599955 nordugrid-arc-7.1.1/src/services/a-rex/delegation/uid.cpp0000644000175000002070000000110315067751327024276 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #ifdef HAVE_STDINT_H #include #endif #include #include "uid.h" namespace ARex { std::string rand_uid64(void) { static unsigned int cnt; struct timeval t; gettimeofday(&t,NULL); uint64_t id = (((uint64_t)((cnt++) & 0xffff)) << 48) | (((uint64_t)(t.tv_sec & 0xffff)) << 32) | (((uint64_t)(t.tv_usec & 0xffff)) << 16) | (((uint64_t)(rand() & 0xffff)) << 0); return Arc::inttostr(id,16,16); } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/delegation/PaxHeaders/FileRecordSQLite.h0000644000000000000000000000013215067751327024365 xustar0030 mtime=1759498967.751729254 30 atime=1759498967.862493605 30 ctime=1759499029.396097418 nordugrid-arc-7.1.1/src/services/a-rex/delegation/FileRecordSQLite.h0000644000175000002070000000550615067751327026275 0ustar00mockbuildmock00000000000000#ifndef __ARC_DELEGATION_FILERECORDSQLITE_H__ #define __ARC_DELEGATION_FILERECORDSQLITE_H__ #include #include #include #include #include "FileRecord.h" namespace ARex { class FileRecordSQLite: public FileRecord { private: std::mutex lock_; // TODO: use DB locking sqlite3* db_; int sqlite3_exec_nobusy(const char *sql, int (*callback)(void*,int,char**,char**), void *arg, char **errmsg); bool dberr(const char* s, int err); bool open(bool create); void close(void); bool verify(void); public: class Iterator: public FileRecord::Iterator { friend class FileRecordSQLite; private: Iterator(const Iterator&); // disabled constructor Iterator(FileRecordSQLite& frec); sqlite3_int64 rowid_; public: ~Iterator(void); virtual Iterator& operator++(void); virtual Iterator& operator--(void); virtual void suspend(void); virtual bool resume(void); virtual operator bool(void) { return (rowid_ != -1); }; virtual bool operator!(void) { return (rowid_ == -1); }; }; friend class FileRecordSQLite::Iterator; FileRecordSQLite(const std::string& base, bool create = true); virtual ~FileRecordSQLite(void); virtual Iterator* NewIterator(void) { return new Iterator(*this); }; virtual bool Recover(void); virtual std::string Add(std::string& id, const std::string& owner, const std::list& meta); virtual bool Add(const std::string& uid, const std::string& id, const std::string& owner, const std::list& meta); virtual std::string Find(const std::string& id, const std::string& owner, std::list& meta); virtual bool Modify(const std::string& id, const std::string& owner, const std::list& meta); virtual bool Remove(const std::string& id, const std::string& owner); // Assign specified credential ids specified lock lock_id virtual bool AddLock(const std::string& lock_id, const std::list& ids, const std::string& owner); // Reomove lock lock_id from all associated credentials virtual bool RemoveLock(const std::string& lock_id); // Reomove lock lock_id from all associated credentials and store // identifiers of associated credentials into ids virtual bool RemoveLock(const std::string& lock_id, std::list >& ids); // Fills locks with all known lock ids. virtual bool ListLocks(std::list& locks); // Fills locks with all lock ids associated with specified credential id virtual bool ListLocks(const std::string& id, const std::string& owner, std::list& locks); // Fills ids with identifiers of credentials locked by specified lock_id lock virtual bool ListLocked(const std::string& lock_id, std::list >& ids); }; } // namespace ARex #endif // __ARC_DELEGATION_FiLERECORDSQLITE_H__ nordugrid-arc-7.1.1/src/services/a-rex/delegation/PaxHeaders/DelegationStores.cpp0000644000000000000000000000013215067751327025073 xustar0030 mtime=1759498967.751729254 30 atime=1759498967.862493605 30 ctime=1759499029.392761834 nordugrid-arc-7.1.1/src/services/a-rex/delegation/DelegationStores.cpp0000644000175000002070000000364315067751327027003 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include "DelegationStore.h" #include "DelegationStores.h" namespace ARex { DelegationStores::DelegationStores(DelegationStore::DbType db_type):db_type_(db_type) { } DelegationStores::~DelegationStores(void) { std::unique_lock lock(lock_); for(std::map::iterator i = stores_.begin(); i != stores_.end(); ++i) { delete i->second; } } DelegationStore& DelegationStores::operator[](const std::string& path) { std::unique_lock lock(lock_); std::map::iterator i = stores_.find(path); if(i != stores_.end()) return *(i->second); DelegationStore* store = new DelegationStore(path,db_type_); stores_.insert(std::pair(path,store)); return *store; } bool DelegationStores::MatchNamespace(const Arc::SOAPEnvelope& in) { return Arc::DelegationContainerSOAP().MatchNamespace(in); } bool DelegationStores::Process(const std::string& path,const Arc::SOAPEnvelope& in,Arc::SOAPEnvelope& out,const std::string& client,std::string& credentials) { return operator[](path).Process(credentials,in,out,client); } bool DelegationStores::GetRequest(const std::string& path,std::string& id,const std::string& client,std::string& request) { return operator[](path).GetRequest(id,client,request); } bool DelegationStores::PutDeleg(const std::string& path,const std::string& id,const std::string& client,const std::string& credentials) { return operator[](path).PutDeleg(id,client,credentials); } bool DelegationStores::PutCred(const std::string& path,std::string& id,const std::string& client,const std::string& credentials,const std::list& meta) { return operator[](path).PutCred(id,client,credentials,meta); } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/delegation/PaxHeaders/DelegationStores.h0000644000000000000000000000013215067751327024540 xustar0030 mtime=1759498967.751729254 30 atime=1759498967.862493605 30 ctime=1759499029.398162478 nordugrid-arc-7.1.1/src/services/a-rex/delegation/DelegationStores.h0000644000175000002070000000447115067751327026450 0ustar00mockbuildmock00000000000000#ifndef __ARC_DELEGATION_STORES_H__ #define __ARC_DELEGATION_STORES_H__ #include #include #include #include #include "DelegationStore.h" namespace ARex { /// Set of service storing delegated credentials class DelegationStores { private: std::mutex lock_; std::map stores_; DelegationStore::DbType db_type_; DelegationStores(const DelegationStores&) { }; public: DelegationStores(DelegationStore::DbType db_type = DelegationStore::DbSQLite); ~DelegationStores(void); void SetDbType(DelegationStore::DbType db_type) { db_type_ = db_type; }; /// Returns or creates delegation storage associated with 'path'. DelegationStore& operator[](const std::string& path); /// Check if SOAP request 'in' can be handled by this implementation. bool MatchNamespace(const Arc::SOAPEnvelope& in); /// Processes SOAP request 'in' using delegation storage associated with 'path'. /// Response is filled into 'out'. The 'client' is identifier of requestor /// used by service internally to recognize owner of stored credentials. /// If operation produces credentials token it is returned in 'credentials'. /// If operation is successful returns true. bool Process(const std::string& path,const Arc::SOAPEnvelope& in,Arc::SOAPEnvelope& out,const std::string& client,std::string& credentials); /// Provides delegation request from storage 'path' for cpecified 'id' and 'client'. If 'id' is empty /// then new storage slot is created and its identifier stored in 'id'. bool GetRequest(const std::string& path,std::string& id,const std::string& client,std::string& request); /// Stores delegated credentials corresponding to delegation request obtained by call to GetRequest(). /// Only public part is expected in 'credentials'. bool PutDeleg(const std::string& path,const std::string& id,const std::string& client,const std::string& credentials); /// Stores full credentials into specified 'id' and 'client'. If 'id' is empty /// then new storage slot is created and its identifier stored in 'id'. bool PutCred(const std::string& path,std::string& id,const std::string& client,const std::string& credentials, const std::list& meta = std::list()); }; } // namespace ARex #endif // __ARC_DELEGATION_STORE_H__ nordugrid-arc-7.1.1/src/services/a-rex/delegation/PaxHeaders/DelegationStore.h0000644000000000000000000000013215067751327024355 xustar0030 mtime=1759498967.751729254 30 atime=1759498967.862493605 30 ctime=1759499029.397134729 nordugrid-arc-7.1.1/src/services/a-rex/delegation/DelegationStore.h0000644000175000002070000001415315067751327026263 0ustar00mockbuildmock00000000000000#ifndef __ARC_DELEGATION_STORE_H__ #define __ARC_DELEGATION_STORE_H__ #include #include #include #include #include #include "FileRecord.h" namespace ARex { class DelegationStore: public Arc::DelegationContainerSOAP { private: class Consumer { public: std::string id; std::string client; std::string path; Consumer(const std::string& id_, const std::string& client_, const std::string& path_): id(id_),client(client_),path(path_) { }; }; std::mutex lock_; std::mutex check_lock_; FileRecord* fstore_; std::map acquired_; unsigned int expiration_; unsigned int maxrecords_; unsigned int mtimeout_; FileRecord::Iterator* mrec_; Arc::Logger logger_; public: enum DbType { DbBerkeley, DbSQLite }; DelegationStore(const std::string& base, DbType db, bool allow_recover = true); ~DelegationStore(void); operator bool(void) { return ((bool)fstore_ && (bool)*fstore_); }; bool operator!(void) { return !((bool)fstore_ && (bool)*fstore_); }; /** Returns description of last error */ std::string Error(void) { return fstore_?fstore_->Error():std::string(""); }; /** Sets expiration time for unlocked credentials */ void Expiration(unsigned int v = 0) { expiration_ = v; }; /** Sets max number of credentials to store */ void MaxRecords(unsigned int v = 0) { maxrecords_ = v; }; void CheckTimeout(unsigned int v = 0) { mtimeout_ = v; }; /** Create a slot for credential storing and return associated delegation consumer. The consumer object must be release with ReleaseConsumer/RemoveConsumer */ virtual Arc::DelegationConsumerSOAP* AddConsumer(std::string& id,const std::string& client); /** Find existing delegation slot and create delegation consumer for it. The consumer object must be release with ReleaseConsumer/RemoveConsumer */ virtual Arc::DelegationConsumerSOAP* FindConsumer(const std::string& id,const std::string& client); /** Store credentials into slot associated with specified consumer object */ virtual bool TouchConsumer(Arc::DelegationConsumerSOAP* c,const std::string& credentials); /** Read credentials stored in slot associated with specified consumer object */ virtual bool QueryConsumer(Arc::DelegationConsumerSOAP* c,std::string& credentials); /** Release consumer object but keep credentials store slot */ virtual void ReleaseConsumer(Arc::DelegationConsumerSOAP* c); /** Release consumer object and delete associated credentials store slot */ virtual bool RemoveConsumer(Arc::DelegationConsumerSOAP* c); virtual void CheckConsumers(void); void PeriodicCheckConsumers(void); /** Store new credentials associated with client and assign id to it */ bool AddCred(std::string& id, const std::string& client, const std::string& credentials); /** Store/update credentials with specified id and associated with client */ bool PutCred(const std::string& id, const std::string& client, const std::string& credentials); /** Returns path to file containing credential with specied id and client */ std::string FindCred(const std::string& id,const std::string& client); /** Returns path to file containing credential with specied id and client along with associated metadata */ std::string FindCred(const std::string& id,const std::string& client, std::list& meta); /** Retrieves credentials with specified id and associated with client */ bool GetCred(const std::string& id, const std::string& client, std::string& credentials); /** Retrieves locks associated with specified id and client */ bool GetLocks(const std::string& id, const std::string& client, std::list& lock_ids); /** Retrieves all locks known */ bool GetLocks(std::list& lock_ids); /** Returns credentials ids associated with specific client */ std::list ListCredIDs(const std::string& client); /** Returns all credentials ids (1st) along with their client ids (2nd) */ std::list > ListCredIDs(void); /** Returns credentials ids and their metadata associated with specific client */ std::list > > ListCredInfos(const std::string& client); /** Locks credentials also associating it with specific lock identifier */ bool LockCred(const std::string& lock_id, const std::list& ids,const std::string& client); /** Release lock set by previous call to LockCred by associated lock id. Optionally it can update credentials usage timestamp and force removal credentials from storage if it is not locked anymore. */ bool ReleaseCred(const std::string& lock_id, bool touch = false, bool remove = false); /** Returns credential ids locked by specific lock id and associated with specified client */ std::list ListLockedCredIDs(const std::string& lock_id, const std::string& client); /** Returns credential ids locked by specific lock id */ std::list > ListLockedCredIDs(const std::string& lock_id); /** Provides delegation request specified 'id' and 'client'. If 'id' is empty then new storage slot is created and its identifier stored in 'id'. */ bool GetRequest(std::string& id,const std::string& client,std::string& request); /** Stores delegated credentials corresponding to delegation request obtained by call to GetRequest(). Only public part is expected in 'credentials'. */ bool PutDeleg(const std::string& id,const std::string& client,const std::string& credentials); /** Retrieves public part of credentials with specified id and associated with client */ bool GetDeleg(const std::string& id, const std::string& client, std::string& credentials); /** Stores full credentials into specified 'id' and 'client'. If 'id' is empty then new storage slot is created and its identifier stored in 'id'. */ bool PutCred(std::string& id,const std::string& client,const std::string& credentials,const std::list& meta = std::list()); }; } // namespace ARex #endif // __ARC_DELEGATION_STORE_H__ nordugrid-arc-7.1.1/src/services/a-rex/delegation/PaxHeaders/uid.h0000644000000000000000000000013215067751327022046 xustar0030 mtime=1759498967.751729254 30 atime=1759498967.862493605 30 ctime=1759499029.393932536 nordugrid-arc-7.1.1/src/services/a-rex/delegation/uid.h0000644000175000002070000000013115067751327023743 0ustar00mockbuildmock00000000000000#include namespace ARex { std::string rand_uid64(void); } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/delegation/PaxHeaders/FileRecord.h0000644000000000000000000000013215067751327023303 xustar0030 mtime=1759498967.751729254 30 atime=1759498967.862493605 30 ctime=1759499029.395032519 nordugrid-arc-7.1.1/src/services/a-rex/delegation/FileRecord.h0000644000175000002070000000737215067751327025216 0ustar00mockbuildmock00000000000000#ifndef __ARC_DELEGATION_FILERECORD_H__ #define __ARC_DELEGATION_FILERECORD_H__ #include #include namespace ARex { class FileRecord { protected: std::string basepath_; int error_num_; std::string error_str_; bool valid_; std::string uid_to_path(const std::string& uid); bool make_file(const std::string& uid); bool remove_file(const std::string& uid); public: class Iterator { private: Iterator(const Iterator&); // disabled copy constructor protected: Iterator(FileRecord& frec):frec_(frec) {}; FileRecord& frec_; std::string uid_; std::string id_; std::string owner_; std::list meta_; public: virtual ~Iterator(void) {}; virtual Iterator& operator++(void) = 0; virtual Iterator& operator--(void) = 0; virtual void suspend(void) = 0; virtual bool resume(void) = 0; virtual operator bool(void) = 0; virtual bool operator!(void) = 0; const std::string& uid(void) const { return uid_; }; const std::string& id(void) const { return id_; }; const std::string& owner(void) const { return owner_; }; const std::list& meta(void) const { return meta_; }; const std::string path(void) const { return frec_.uid_to_path(uid_); }; }; friend class FileRecord::Iterator; FileRecord(const std::string& base, bool create = true): basepath_(base), error_num_(0), valid_(false) {}; virtual ~FileRecord(void) {}; operator bool(void) { return valid_; }; bool operator!(void) { return !valid_; }; /// Returns textual description of last error. std::string Error(void) { return error_str_; }; /// Obtain an iterator for walking through existing credentials slots. virtual Iterator* NewIterator(void) = 0; virtual bool Recover(void) = 0; /// Adds new slot for storing credentials including generation of uid, /// assignment of id (if empty) and creation of file for storing credentials. virtual std::string Add(std::string& id, const std::string& owner, const std::list& meta) = 0; /// Adds only record in database (to be used for database management only). virtual bool Add(const std::string& uid, const std::string& id, const std::string& owner, const std::list& meta) = 0; /// Obtains path to stored credentials. virtual std::string Find(const std::string& id, const std::string& owner, std::list& meta) = 0; /// Modifies existing entry in database with new meta values. virtual bool Modify(const std::string& id, const std::string& owner, const std::list& meta) = 0; /// Fully removes credentials slot including file which stores credentials. virtual bool Remove(const std::string& id, const std::string& owner) = 0; // Assign specified credential ids specified lock lock_id virtual bool AddLock(const std::string& lock_id, const std::list& ids, const std::string& owner) = 0; // Remove lock lock_id from all associated credentials virtual bool RemoveLock(const std::string& lock_id) = 0; // Reomve lock lock_id from all associated credentials and store // identifiers of associated credentials into ids virtual bool RemoveLock(const std::string& lock_id, std::list >& ids) = 0; // Fills locks with all known lock ids. virtual bool ListLocks(std::list& locks) = 0; // Fills locks with all lock ids associated with specified credential id virtual bool ListLocks(const std::string& id, const std::string& owner, std::list& locks) = 0; // Fills ids with identifiers of credentials locked by specified lock_id lock virtual bool ListLocked(const std::string& lock_id, std::list >& ids) = 0; }; } // namespace ARex #endif // __ARC_DELEGATION_FiLERECORD_H__ nordugrid-arc-7.1.1/src/services/a-rex/delegation/PaxHeaders/DelegationStore.cpp0000644000000000000000000000013215067751327024710 xustar0030 mtime=1759498967.750491903 30 atime=1759498967.862493605 30 ctime=1759499029.391659349 nordugrid-arc-7.1.1/src/services/a-rex/delegation/DelegationStore.cpp0000644000175000002070000004306015067751327026615 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include "FileRecordSQLite.h" #include "DelegationStore.h" namespace ARex { DelegationStore::DelegationStore(const std::string& base, DbType db, bool allow_recover): logger_(Arc::Logger::rootLogger, "Delegation Storage") { fstore_ = NULL; expiration_ = 0; maxrecords_ = 0; mtimeout_ = 0; mrec_ = NULL; switch(db) { case DbSQLite: fstore_ = new FileRecordSQLite(base, allow_recover); break; default: failure_ = "Unsupported database type requested for delegation storage."; logger_.msg(Arc::ERROR,"%s",failure_); return; }; if(!*fstore_) { failure_ = "Failed to initialize storage. " + fstore_->Error(); logger_.msg(Arc::WARNING,"%s",failure_); if(allow_recover) { // Database creation failed. Try recovery. if(!fstore_->Recover()) { failure_ = "Failed to recover storage. " + fstore_->Error(); logger_.msg(Arc::WARNING,"%s",failure_); logger_.msg(Arc::WARNING,"Wiping and re-creating whole storage"); delete fstore_; fstore_ = NULL; // Full recreation of database. Delete everything. Glib::Dir dir(base); std::string name; while ((name = dir.read_name()) != "") { std::string fullpath(base); fullpath += G_DIR_SEPARATOR_S + name; struct stat st; if (::lstat(fullpath.c_str(), &st) == 0) { if(S_ISDIR(st.st_mode)) { Arc::DirDelete(fullpath.c_str()); } else { Arc::FileDelete(fullpath.c_str()); }; }; }; switch(db) { case DbSQLite: fstore_ = new FileRecordSQLite(base); break; default: // Must not happen - already sorted out above. return; }; if(!*fstore_) { // Failure failure_ = "Failed to re-create storage. " + fstore_->Error(); logger_.msg(Arc::WARNING,"%s",failure_); } else { // Database recreated. }; }; } else { logger_.msg(Arc::ERROR,"%s",failure_); }; }; // TODO: Do some cleaning on startup } DelegationStore::~DelegationStore(void) { // BDB objects must be destroyed because // somehow BDB does not understand that process // already died and keeps locks forewer. delete mrec_; delete fstore_; /* Following code is not executed because there must be no active consumers when store being destroyed. It is probably safer to leave hanging consumers than to destroy them. Anyway by design this destructor is supposed to be called only when applications exits. while(acquired_.size() > 0) { std::map::iterator i = acquired_.begin(); delete i->first; acquired_.erase(i); }; */ } Arc::DelegationConsumerSOAP* DelegationStore::AddConsumer(std::string& id,const std::string& client) { std::string path = fstore_->Add(id,client,std::list()); if(path.empty()) { failure_ = "Local error - failed to create slot for delegation. "+fstore_->Error(); return NULL; } Arc::DelegationConsumerSOAP* cs = new Arc::DelegationConsumerSOAP(); std::string key; cs->Backup(key); if(!key.empty()) { if(!Arc::FileCreate(path,key,0,0,S_IRUSR|S_IWUSR)) { fstore_->Remove(id,client); delete cs; cs = NULL; failure_ = "Local error - failed to store credentials"; return NULL; }; }; std::unique_lock lock(lock_); acquired_.insert(std::pair(cs,Consumer(id,client,path))); return cs; } static const char* key_start_tag("-----BEGIN RSA PRIVATE KEY-----"); static const char* key_end_tag("-----END RSA PRIVATE KEY-----"); static std::string extract_key(const std::string& proxy) { std::string key; std::string::size_type start = proxy.find(key_start_tag); if(start != std::string::npos) { std::string::size_type end = proxy.find(key_end_tag,start+strlen(key_start_tag)); if(end != std::string::npos) { return proxy.substr(start,end-start+strlen(key_end_tag)); }; }; return ""; } static void remove_key(std::string& proxy) { while(true) { std::string::size_type start = proxy.find(key_start_tag); if(start == std::string::npos) break; std::string::size_type end = proxy.find(key_end_tag,start+strlen(key_start_tag)); if(end == std::string::npos) end = proxy.length(); proxy.erase(start,end-start+strlen(key_end_tag)); }; } static bool compare_no_newline(const std::string& str1, const std::string& str2) { std::string::size_type p1 = 0; std::string::size_type p2 = 0; for(;;) { if((p1 < str1.length()) && ((str1[p1] == '\r') || (str1[p1] == '\n'))) { ++p1; continue; }; if((p2 < str2.length()) && ((str2[p2] == '\r') || (str2[p2] == '\n'))) { ++p2; continue; }; if(p1 >= str1.length()) break; if(p2 >= str2.length()) break; if(str1[p1] != str2[p2]) break; ++p1; ++p2; }; return ((p1 >= str1.length()) && (p2 >= str2.length())); } Arc::DelegationConsumerSOAP* DelegationStore::FindConsumer(const std::string& id,const std::string& client) { std::list meta; std::string path = fstore_->Find(id,client,meta); if(path.empty()) { failure_ = "Identifier not found for client. "+fstore_->Error(); return NULL; }; std::string content; if(!Arc::FileRead(path,content)) { failure_ = "Local error - failed to read credentials"; return NULL; }; Arc::DelegationConsumerSOAP* cs = new Arc::DelegationConsumerSOAP(); if(!content.empty()) { std::string key = extract_key(content); if(!key.empty()) { cs->Restore(key); }; }; std::unique_lock lock(lock_); acquired_.insert(std::pair(cs,Consumer(id,client,path))); return cs; } bool DelegationStore::TouchConsumer(Arc::DelegationConsumerSOAP* c,const std::string& credentials) { if(!c) return false; std::unique_lock lock(lock_); std::map::iterator i = acquired_.find(c); if(i == acquired_.end()) { failure_ = "Delegation not found"; return false; }; if(!credentials.empty()) { if(!Arc::FileCreate(i->second.path,credentials,0,0,S_IRUSR|S_IWUSR)) { failure_ = "Local error - failed to create storage for delegation"; logger_.msg(Arc::WARNING,"DelegationStore: TouchConsumer failed to create file %s",i->second.path); return false; }; }; return true; } bool DelegationStore::QueryConsumer(Arc::DelegationConsumerSOAP* c,std::string& credentials) { if(!c) return false; std::unique_lock lock(lock_); std::map::iterator i = acquired_.find(c); if(i == acquired_.end()) { failure_ = "Delegation not found"; return false; }; Arc::FileRead(i->second.path,credentials); return true; } void DelegationStore::ReleaseConsumer(Arc::DelegationConsumerSOAP* c) { if(!c) return; std::unique_lock lock(lock_); std::map::iterator i = acquired_.find(c); if(i == acquired_.end()) return; // ???? // Check if key changed. If yes then store only key. // TODO: optimize std::string newkey; i->first->Backup(newkey); if(!newkey.empty()) { std::string oldkey; std::string content; Arc::FileRead(i->second.path,content); if(!content.empty()) oldkey = extract_key(content); if(!compare_no_newline(newkey,oldkey)) { Arc::FileCreate(i->second.path,newkey,0,0,S_IRUSR|S_IWUSR); }; }; delete i->first; acquired_.erase(i); } bool DelegationStore::RemoveConsumer(Arc::DelegationConsumerSOAP* c) { if(!c) return false; std::unique_lock lock(lock_); std::map::iterator i = acquired_.find(c); if(i == acquired_.end()) return false; // ???? bool r = fstore_->Remove(i->second.id,i->second.client); // TODO: Handle failure delete i->first; acquired_.erase(i); return r; } void DelegationStore::CheckConsumers(void) { // Not doing any cleaning ocasionally to avoid delegation response delay. // Instead PeriodicCheckConsumers() is called to do periodic cleaning. } void DelegationStore::PeriodicCheckConsumers(void) { // Go through stored credentials // Remove outdated records (those with locks won't be removed) if(expiration_) { time_t start = ::time(NULL); std::unique_lock check_lock(lock_); if(mrec_ != NULL) { if(!mrec_->resume()) { logger_.msg(Arc::WARNING,"DelegationStore: PeriodicCheckConsumers failed to resume iterator"); delete mrec_; mrec_ = NULL; }; }; if(mrec_ == NULL) { mrec_ = fstore_->NewIterator(); }; for(;(bool)(*mrec_);++(*mrec_)) { if(mtimeout_ && (((unsigned int)(::time(NULL) - start)) > mtimeout_)) { mrec_->suspend(); return; } struct stat st; if(::stat(mrec_->path().c_str(),&st) == 0) { if(((unsigned int)(::time(NULL) - st.st_mtime)) > expiration_) { if(fstore_->Remove(mrec_->id(),mrec_->owner())) { } else { // It is ok to fail here because Remove checks for delegation locks. // So reporting only for debuging purposes. logger_.msg(Arc::DEBUG,"DelegationStore: PeriodicCheckConsumers failed to remove old delegation %s - %s", mrec_->uid(), fstore_->Error()); }; }; }; }; delete mrec_; mrec_ = NULL; }; // TODO: Remove records over threshold return; } bool DelegationStore::AddCred(std::string& id, const std::string& client, const std::string& credentials) { std::string path = fstore_->Add(id,client,std::list()); if(path.empty()) { failure_ = "Local error - failed to create slot for delegation. "+fstore_->Error(); return false; } if(!Arc::FileCreate(path,credentials,0,0,S_IRUSR|S_IWUSR)) { fstore_->Remove(id,client); failure_ = "Local error - failed to create storage for delegation"; logger_.msg(Arc::WARNING,"DelegationStore: TouchConsumer failed to create file %s",path); return false; }; return true; } bool DelegationStore::PutCred(const std::string& id, const std::string& client, const std::string& credentials) { std::list meta; std::string path = fstore_->Find(id,client,meta); if(path.empty()) { failure_ = "Local error - failed to find specified credentials. "+fstore_->Error(); return false; } if(!Arc::FileCreate(path,credentials,0,0,S_IRUSR|S_IWUSR)) { failure_ = "Local error - failed to store delegation"; return false; }; return true; } std::string DelegationStore::FindCred(const std::string& id,const std::string& client) { std::list meta; return fstore_->Find(id,client,meta); } std::string DelegationStore::FindCred(const std::string& id,const std::string& client,std::list& meta) { return fstore_->Find(id,client,meta); } bool DelegationStore::GetCred(const std::string& id, const std::string& client, std::string& credentials) { std::list meta; std::string path = fstore_->Find(id,client,meta); if(path.empty()) { failure_ = "Local error - failed to find specified credentials. "+fstore_->Error(); return false; } if(!Arc::FileRead(path,credentials)) { failure_ = "Local error - failed to read credentials"; return false; }; return true; } bool DelegationStore::GetLocks(const std::string& id, const std::string& client, std::list& lock_ids) { return fstore_->ListLocks(id, client, lock_ids); } bool DelegationStore::GetLocks(std::list& lock_ids) { return fstore_->ListLocks(lock_ids); } std::list DelegationStore::ListCredIDs(const std::string& client) { std::list res; FileRecord::Iterator& rec = *(fstore_->NewIterator()); for(;(bool)rec;++rec) { if(rec.owner() == client) res.push_back(rec.id()); }; delete &rec; return res; } std::list > > DelegationStore::ListCredInfos(const std::string& client) { std::list > > res; FileRecord::Iterator& rec = *(fstore_->NewIterator()); for(;(bool)rec;++rec) { if(rec.owner() != client) continue; res.push_back(std::pair >()); res.back().first = rec.id(); res.back().second = rec.meta(); }; delete &rec; return res; } std::list > DelegationStore::ListLockedCredIDs(const std::string& lock_id) { std::list > ids; (void)fstore_->ListLocked(lock_id, ids); return ids; } std::list DelegationStore::ListLockedCredIDs(const std::string& lock_id, const std::string& client) { std::list res; std::list > ids; if(!fstore_->ListLocked(lock_id, ids)) return res; for(std::list >::iterator id = ids.begin(); id != ids.end();++id) { if(id->second == client) res.push_back(id->first); } return res; } std::list > DelegationStore::ListCredIDs(void) { std::list > res; FileRecord::Iterator& rec = *(fstore_->NewIterator()); for(;(bool)rec;++rec) { res.push_back(std::pair(rec.id(),rec.owner())); }; delete &rec; return res; } bool DelegationStore::LockCred(const std::string& lock_id, const std::list& ids,const std::string& client) { if(!fstore_->AddLock(lock_id,ids,client)) { failure_ = "Local error - failed set lock for delegation. "+fstore_->Error(); return false; }; return true; } bool DelegationStore::ReleaseCred(const std::string& lock_id, bool touch, bool remove) { if((!touch) && (!remove)) return fstore_->RemoveLock(lock_id); std::list > ids; if(!fstore_->RemoveLock(lock_id,ids)) return false; for(std::list >::iterator i = ids.begin(); i != ids.end(); ++i) { if(touch) { std::list meta; std::string path = fstore_->Find(i->first,i->second,meta); // TODO: in a future use meta for storing times if(!path.empty()) ::utime(path.c_str(),NULL); }; if(remove) fstore_->Remove(i->first,i->second); }; return true; } bool DelegationStore::GetRequest(std::string& id,const std::string& client,std::string& request) { Arc::DelegationConsumerSOAP* consumer = NULL; if(!id.empty()) { consumer = FindConsumer(id,client); }; if(consumer == NULL) { consumer = AddConsumer(id,client); }; if(consumer == NULL) return false; if(id.empty()) { ReleaseConsumer(consumer); return false; }; bool result = consumer->Request(request); ReleaseConsumer(consumer); return result; } bool DelegationStore::PutDeleg(const std::string& id,const std::string& client,const std::string& credentials) { Arc::DelegationConsumerSOAP* consumer = FindConsumer(id,client); if(consumer == NULL) return false; std::string content(credentials); if(!consumer->Acquire(content)) { ReleaseConsumer(consumer); return false; }; if(!TouchConsumer(consumer,content)) { ReleaseConsumer(consumer); return false; }; ReleaseConsumer(consumer); return true; } bool DelegationStore::GetDeleg(const std::string& id, const std::string& client, std::string& credentials) { std::string creds; if(!GetCred(id, client, credentials)) return false; remove_key(credentials); return true; } bool DelegationStore::PutCred(std::string& id,const std::string& client,const std::string& credentials,const std::list& meta) { if(!id.empty()) { std::list old_meta; std::string path = fstore_->Find(id,client,old_meta); if(path.empty()) { failure_ = "Local error - failed to find delegation slot. "+fstore_->Error(); return false; } if(!Arc::FileCreate(path,credentials,0,0,S_IRUSR|S_IWUSR)) { failure_ = "Local error - failed to store credentials"; return false; }; if(!meta.empty()) fstore_->Modify(id,client,meta); } else { std::string path = fstore_->Add(id,client,meta); if(path.empty()) { failure_ = "Local error - failed to create slot for delegation. "+fstore_->Error(); return false; } if(!Arc::FileCreate(path,credentials,0,0,S_IRUSR|S_IWUSR)) { fstore_->Remove(id,client); failure_ = "Local error - failed to store credentials"; return false; }; }; return true; } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/delegation/PaxHeaders/FileRecordSQLite.cpp0000644000000000000000000000013215067751327024720 xustar0030 mtime=1759498967.751729254 30 atime=1759498967.862493605 30 ctime=1759499029.390570469 nordugrid-arc-7.1.1/src/services/a-rex/delegation/FileRecordSQLite.cpp0000644000175000002070000004706715067751327026640 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include "uid.h" #include "../SQLhelpers.h" #include "FileRecordSQLite.h" namespace ARex { #define FR_DB_NAME "list" bool FileRecordSQLite::dberr(const char* s, int err) { if(err == SQLITE_OK) return true; error_num_ = err; #ifdef HAVE_SQLITE3_ERRSTR error_str_ = std::string(s)+": "+sqlite3_errstr(err); #else error_str_ = std::string(s)+": error code "+Arc::tostring(err); #endif return false; } FileRecordSQLite::FileRecordSQLite(const std::string& base, bool create): FileRecord(base, create), db_(NULL) { valid_ = open(create); } bool FileRecordSQLite::verify(void) { // Not implemented and probably not needed return true; } FileRecordSQLite::~FileRecordSQLite(void) { close(); } int FileRecordSQLite::sqlite3_exec_nobusy(const char *sql, int (*callback)(void*,int,char**,char**), void *arg, char **errmsg) { int err; while((err = sqlite3_exec(db_, sql, callback, arg, errmsg)) == SQLITE_BUSY) { // Access to database is designed in such way that it should not block for long time. // So it should be safe to simply wait for lock to be released without any timeout. struct timespec delay = { 0, 10000000 }; // 0.01s - should be enough for most cases (void)::nanosleep(&delay, NULL); }; return err; } bool FileRecordSQLite::open(bool create) { std::string dbpath = basepath_ + G_DIR_SEPARATOR_S + FR_DB_NAME; if(db_ != NULL) return true; // already open int flags = SQLITE_OPEN_READWRITE; // it will open read-only if access is protected if(create) { flags |= SQLITE_OPEN_CREATE; }; int err; while((err = sqlite3_open_v2(dbpath.c_str(), &db_, flags, NULL)) == SQLITE_BUSY) { // In case something prevents databasre from open right now - retry if(db_) (void)sqlite3_close(db_); db_ = NULL; struct timespec delay = { 0, 10000000 }; // 0.01s - should be enough for most cases (void)::nanosleep(&delay, NULL); }; if(!dberr("Error opening database", err)) { if(db_) (void)sqlite3_close(db_); db_ = NULL; return false; }; if(create) { if(!dberr("Error creating table rec", sqlite3_exec_nobusy("CREATE TABLE IF NOT EXISTS rec(id, owner, uid, meta, UNIQUE(id, owner), UNIQUE(uid))", NULL, NULL, NULL))) { (void)sqlite3_close(db_); // todo: handle error db_ = NULL; return false; }; if(!dberr("Error creating table lock", sqlite3_exec_nobusy("CREATE TABLE IF NOT EXISTS lock(lockid, uid)", NULL, NULL, NULL))) { (void)sqlite3_close(db_); // todo: handle error db_ = NULL; return false; }; if(!dberr("Error creating index lockid", sqlite3_exec_nobusy("CREATE INDEX IF NOT EXISTS lockid ON lock (lockid)", NULL, NULL, NULL))) { (void)sqlite3_close(db_); // todo: handle error db_ = NULL; return false; }; if(!dberr("Error creating index uid", sqlite3_exec_nobusy("CREATE INDEX IF NOT EXISTS uid ON lock (uid)", NULL, NULL, NULL))) { (void)sqlite3_close(db_); // todo: handle error db_ = NULL; return false; }; } else { // SQLite opens database in lazy way. But we still want to know if it is good database. if(!dberr("Error checking database", sqlite3_exec_nobusy("PRAGMA schema_version;", NULL, NULL, NULL))) { (void)sqlite3_close(db_); // todo: handle error db_ = NULL; return false; }; }; return true; } void FileRecordSQLite::close(void) { valid_ = false; if(db_) { (void)sqlite3_close(db_); // todo: handle error db_ = NULL; }; } void store_strings(const std::list& strs, std::string& buf) { if(!strs.empty()) { for(std::list::const_iterator str = strs.begin(); ; ) { buf += sql_escape(*str); ++str; if (str == strs.end()) break; buf += '#'; }; }; } static void parse_strings(std::list& strs, const char* buf) { if(!buf || (*buf == '\0')) return; const char* sep = std::strchr(buf, '#'); while(sep) { strs.push_back(sql_unescape(std::string(buf,sep-buf))); buf = sep+1; sep = std::strchr(buf, '#'); }; if(*buf == '\0') return; strs.push_back(sql_unescape(std::string(buf))); } bool FileRecordSQLite::Recover(void) { std::unique_lock lock(lock_); // Real recovery not implemented yet. close(); error_num_ = -1; error_str_ = "Recovery not implemented yet."; return false; } struct FindCallbackRecArg { sqlite3_int64 rowid; std::string id; std::string owner; std::string uid; std::list meta; FindCallbackRecArg(): rowid(-1) {}; }; static int FindCallbackRec(void* arg, int colnum, char** texts, char** names) { for(int n = 0; n < colnum; ++n) { if(names[n] && texts[n]) { if((strcmp(names[n], "rowid") == 0) || (strcmp(names[n], "_rowid_") == 0)) { (void)Arc::stringto(texts[n], ((FindCallbackRecArg*)arg)->rowid); } else if(strcmp(names[n], "uid") == 0) { ((FindCallbackRecArg*)arg)->uid = texts[n]; } else if(strcmp(names[n], "id") == 0) { ((FindCallbackRecArg*)arg)->id = sql_unescape(texts[n]); } else if(strcmp(names[n], "owner") == 0) { ((FindCallbackRecArg*)arg)->owner = sql_unescape(texts[n]); } else if(strcmp(names[n], "meta") == 0) { parse_strings(((FindCallbackRecArg*)arg)->meta, texts[n]); }; }; }; return 0; } struct FindCallbackUidMetaArg { std::string& uid; std::list& meta; FindCallbackUidMetaArg(std::string& uid, std::list& meta): uid(uid), meta(meta) {}; }; static int FindCallbackUidMeta(void* arg, int colnum, char** texts, char** names) { for(int n = 0; n < colnum; ++n) { if(names[n] && texts[n]) { if(strcmp(names[n], "uid") == 0) { ((FindCallbackUidMetaArg*)arg)->uid = texts[n]; } else if(strcmp(names[n], "meta") == 0) { parse_strings(((FindCallbackUidMetaArg*)arg)->meta, texts[n]); }; }; }; return 0; } struct FindCallbackUidArg { std::string& uid; FindCallbackUidArg(std::string& uid): uid(uid) {}; }; static int FindCallbackUid(void* arg, int colnum, char** texts, char** names) { for(int n = 0; n < colnum; ++n) { if(names[n] && texts[n]) { if(strcmp(names[n], "uid") == 0) { ((FindCallbackUidMetaArg*)arg)->uid = texts[n]; }; }; }; return 0; } struct FindCallbackCountArg { int count; FindCallbackCountArg():count(0) {}; }; static int FindCallbackCount(void* arg, int colnum, char** texts, char** names) { ((FindCallbackCountArg*)arg)->count += 1; return 0; } struct FindCallbackIdOwnerArg { std::list< std::pair >& records; FindCallbackIdOwnerArg(std::list< std::pair >& recs): records(recs) {}; }; static int FindCallbackIdOwner(void* arg, int colnum, char** texts, char** names) { std::pair rec; for(int n = 0; n < colnum; ++n) { if(names[n] && texts[n]) { if(strcmp(names[n], "id") == 0) { rec.first = sql_unescape(texts[n]); } else if(strcmp(names[n], "owner") == 0) { rec.second = sql_unescape(texts[n]); }; }; }; if(!rec.first.empty()) ((FindCallbackIdOwnerArg*)arg)->records.push_back(rec); return 0; } struct FindCallbackLockArg { std::list< std::string >& records; FindCallbackLockArg(std::list< std::string >& recs): records(recs) {}; }; static int FindCallbackLock(void* arg, int colnum, char** texts, char** names) { for(int n = 0; n < colnum; ++n) { if(names[n] && texts[n]) { if(strcmp(names[n], "lockid") == 0) { std::string rec = sql_unescape(texts[n]); if(!rec.empty()) ((FindCallbackLockArg*)arg)->records.push_back(rec); }; }; }; return 0; } std::string FileRecordSQLite::Add(std::string& id, const std::string& owner, const std::list& meta) { if(!valid_) return ""; int uidtries = 10; // some sane number std::string uid; while(true) { if(!(uidtries--)) { error_str_ = "Out of tries adding record to database"; return ""; }; std::unique_lock lock(lock_); uid = rand_uid64().substr(4); std::string metas; store_strings(meta, metas); std::string sqlcmd = "INSERT INTO rec(id, owner, uid, meta) VALUES ('"+ sql_escape(id.empty()?uid:id)+"', '"+ sql_escape(owner)+"', '"+uid+"', '"+metas+"')"; int dbres = sqlite3_exec_nobusy(sqlcmd.c_str(), NULL, NULL, NULL); if(dbres == SQLITE_CONSTRAINT) { // retry due to non-unique id uid.resize(0); continue; }; if(!dberr("Failed to add record to database", dbres)) { return ""; }; if(sqlite3_changes(db_) != 1) { error_str_ = "Failed to add record to database"; return ""; }; break; }; if(id.empty()) id = uid; make_file(uid); return uid_to_path(uid); } bool FileRecordSQLite::Add(const std::string& uid, const std::string& id, const std::string& owner, const std::list& meta) { if(!valid_) return false; std::unique_lock lock(lock_); std::string metas; store_strings(meta, metas); std::string sqlcmd = "INSERT INTO rec(id, owner, uid, meta) VALUES ('"+ sql_escape(id.empty()?uid:id)+"', '"+ sql_escape(owner)+"', '"+uid+"', '"+metas+"')"; int dbres = sqlite3_exec_nobusy(sqlcmd.c_str(), NULL, NULL, NULL); if(!dberr("Failed to add record to database", dbres)) { return false; }; if(sqlite3_changes(db_) != 1) { error_str_ = "Failed to add record to database"; return false; }; return true; } std::string FileRecordSQLite::Find(const std::string& id, const std::string& owner, std::list& meta) { if(!valid_) return ""; std::unique_lock lock(lock_); std::string sqlcmd = "SELECT uid, meta FROM rec WHERE ((id = '"+sql_escape(id)+"') AND (owner = '"+sql_escape(owner)+"'))"; std::string uid; FindCallbackUidMetaArg arg(uid, meta); if(!dberr("Failed to retrieve record from database",sqlite3_exec_nobusy(sqlcmd.c_str(), &FindCallbackUidMeta, &arg, NULL))) { return ""; }; if(uid.empty()) { error_str_ = "Failed to retrieve record from database"; return ""; }; return uid_to_path(uid); } bool FileRecordSQLite::Modify(const std::string& id, const std::string& owner, const std::list& meta) { if(!valid_) return false; std::unique_lock lock(lock_); std::string metas; store_strings(meta, metas); std::string sqlcmd = "UPDATE rec SET meta = '"+metas+"' WHERE ((id = '"+sql_escape(id)+"') AND (owner = '"+sql_escape(owner)+"'))"; if(!dberr("Failed to update record in database",sqlite3_exec_nobusy(sqlcmd.c_str(), NULL, NULL, NULL))) { return false; }; if(sqlite3_changes(db_) < 1) { error_str_ = "Failed to find record in database"; return false; }; return true; } bool FileRecordSQLite::Remove(const std::string& id, const std::string& owner) { if(!valid_) return false; std::unique_lock lock(lock_); std::string uid; { std::string sqlcmd = "SELECT uid FROM rec WHERE ((id = '"+sql_escape(id)+"') AND (owner = '"+sql_escape(owner)+"'))"; FindCallbackUidArg arg(uid); if(!dberr("Failed to retrieve record from database",sqlite3_exec_nobusy(sqlcmd.c_str(), &FindCallbackUid, &arg, NULL))) { return false; // No such record? }; }; if(uid.empty()) { error_str_ = "Record not found"; return false; // No such record }; { std::string sqlcmd = "SELECT uid FROM lock WHERE (uid = '"+uid+"')"; FindCallbackCountArg arg; if(!dberr("Failed to find locks in database",sqlite3_exec_nobusy(sqlcmd.c_str(), &FindCallbackCount, &arg, NULL))) { return false; }; if(arg.count > 0) { error_str_ = "Record has active locks"; return false; // have locks }; }; { std::string sqlcmd = "DELETE FROM rec WHERE (uid = '"+uid+"')"; if(!dberr("Failed to delete record in database",sqlite3_exec_nobusy(sqlcmd.c_str(), NULL, NULL, NULL))) { return false; }; if(sqlite3_changes(db_) < 1) { error_str_ = "Failed to delete record in database"; return false; // no such record }; }; remove_file(uid); return true; } bool FileRecordSQLite::AddLock(const std::string& lock_id, const std::list& ids, const std::string& owner) { if(!valid_) return false; std::unique_lock lock(lock_); for(std::list::const_iterator id = ids.begin(); id != ids.end(); ++id) { std::string uid; { std::string sqlcmd = "SELECT uid FROM rec WHERE ((id = '"+sql_escape(*id)+"') AND (owner = '"+sql_escape(owner)+"'))"; FindCallbackUidArg arg(uid); if(!dberr("Failed to retrieve record from database",sqlite3_exec_nobusy(sqlcmd.c_str(), &FindCallbackUid, &arg, NULL))) { return false; // No such record? }; }; if(uid.empty()) { // No such record continue; }; std::string sqlcmd = "INSERT INTO lock(lockid, uid) VALUES ('"+sql_escape(lock_id)+"','"+uid+"')"; if(!dberr("addlock:put",sqlite3_exec_nobusy(sqlcmd.c_str(), NULL, NULL, NULL))) { return false; }; }; return true; } bool FileRecordSQLite::RemoveLock(const std::string& lock_id) { if(!valid_) return false; std::unique_lock lock(lock_); // map lock to id,owner { std::string sqlcmd = "DELETE FROM lock WHERE (lockid = '"+sql_escape(lock_id)+"')"; if(!dberr("removelock:del",sqlite3_exec_nobusy(sqlcmd.c_str(), NULL, NULL, NULL))) { return false; }; if(sqlite3_changes(db_) < 1) { error_str_ = ""; return false; }; }; return true; } bool FileRecordSQLite::RemoveLock(const std::string& lock_id, std::list >& ids) { if(!valid_) return false; std::unique_lock lock(lock_); // map lock to id,owner { std::string sqlcmd = "SELECT id,owner FROM rec WHERE uid IN (SELECT uid FROM lock WHERE (lockid = '"+sql_escape(lock_id)+"'))"; FindCallbackIdOwnerArg arg(ids); if(!dberr("removelock:get",sqlite3_exec_nobusy(sqlcmd.c_str(), &FindCallbackIdOwner, &arg, NULL))) { //return false; }; }; { std::string sqlcmd = "DELETE FROM lock WHERE (lockid = '"+sql_escape(lock_id)+"')"; if(!dberr("removelock:del",sqlite3_exec_nobusy(sqlcmd.c_str(), NULL, NULL, NULL))) { return false; }; if(sqlite3_changes(db_) < 1) { error_str_ = ""; return false; }; }; return true; } bool FileRecordSQLite::ListLocked(const std::string& lock_id, std::list >& ids) { if(!valid_) return false; std::unique_lock lock(lock_); // map lock to id,owner { std::string sqlcmd = "SELECT id,owner FROM rec WHERE uid IN (SELECT uid FROM lock WHERE (lockid = '"+sql_escape(lock_id)+"'))"; FindCallbackIdOwnerArg arg(ids); if(!dberr("listlocked:get",sqlite3_exec_nobusy(sqlcmd.c_str(), &FindCallbackIdOwner, &arg, NULL))) { return false; }; }; //if(ids.empty()) return false; return true; } bool FileRecordSQLite::ListLocks(std::list& locks) { if(!valid_) return false; std::unique_lock lock(lock_); { std::string sqlcmd = "SELECT lockid FROM lock"; FindCallbackLockArg arg(locks); if(!dberr("listlocks:get",sqlite3_exec_nobusy(sqlcmd.c_str(), &FindCallbackLock, &arg, NULL))) { return false; }; }; return true; } bool FileRecordSQLite::ListLocks(const std::string& id, const std::string& owner, std::list& locks) { if(!valid_) return false; std::unique_lock lock(lock_); std::string uid; { std::string sqlcmd = "SELECT uid FROM rec WHERE ((id = '"+sql_escape(id)+"') AND (owner = '"+sql_escape(owner)+"'))"; FindCallbackUidArg arg(uid); if(!dberr("Failed to retrieve record from database",sqlite3_exec_nobusy(sqlcmd.c_str(), &FindCallbackUid, &arg, NULL))) { return false; // No such record? }; }; if(uid.empty()) { error_str_ = "Record not found"; return false; // No such record }; { std::string sqlcmd = "SELECT lockid FROM lock WHERE (uid = '"+uid+"')"; FindCallbackLockArg arg(locks); if(!dberr("listlocks:get",sqlite3_exec_nobusy(sqlcmd.c_str(), &FindCallbackLock, &arg, NULL))) { return false; }; }; return true; } FileRecordSQLite::Iterator::Iterator(FileRecordSQLite& frec):FileRecord::Iterator(frec) { rowid_ = -1; std::unique_lock lock(frec.lock_); { std::string sqlcmd = "SELECT _rowid_,id,owner,uid,meta FROM rec ORDER BY _rowid_ LIMIT 1"; FindCallbackRecArg arg; if(!frec.dberr("listlocks:get",frec.sqlite3_exec_nobusy(sqlcmd.c_str(), &FindCallbackRec, &arg, NULL))) { return; }; if(arg.uid.empty()) { return; }; uid_ = arg.uid; id_ = arg.id; owner_ = arg.owner; meta_ = arg.meta; rowid_ = arg.rowid; }; } FileRecordSQLite::Iterator::~Iterator(void) { } FileRecordSQLite::Iterator& FileRecordSQLite::Iterator::operator++(void) { if(rowid_ == -1) return *this; FileRecordSQLite& frec((FileRecordSQLite&)frec_); std::unique_lock lock(frec.lock_); { std::string sqlcmd = "SELECT _rowid_,id,owner,uid,meta FROM rec WHERE (_rowid_ > " + Arc::tostring(rowid_) + ") ORDER BY _rowid_ ASC LIMIT 1"; FindCallbackRecArg arg; if(!frec.dberr("listlocks:get",frec.sqlite3_exec_nobusy(sqlcmd.c_str(), &FindCallbackRec, &arg, NULL))) { rowid_ = -1; return *this; }; if(arg.uid.empty()) { rowid_ = -1; return *this; }; uid_ = arg.uid; id_ = arg.id; owner_ = arg.owner; meta_ = arg.meta; rowid_ = arg.rowid; }; return *this; } FileRecordSQLite::Iterator& FileRecordSQLite::Iterator::operator--(void) { if(rowid_ == -1) return *this; FileRecordSQLite& frec((FileRecordSQLite&)frec_); std::unique_lock lock(frec.lock_); { std::string sqlcmd = "SELECT _rowid_,id,owner,uid,meta FROM rec WHERE (_rowid_ < " + Arc::tostring(rowid_) + ") ORDER BY _rowid_ DESC LIMIT 1"; FindCallbackRecArg arg; if(!frec.dberr("listlocks:get",frec.sqlite3_exec_nobusy(sqlcmd.c_str(), &FindCallbackRec, &arg, NULL))) { rowid_ = -1; return *this; }; if(arg.uid.empty()) { rowid_ = -1; return *this; }; uid_ = arg.uid; id_ = arg.id; owner_ = arg.owner; meta_ = arg.meta; rowid_ = arg.rowid; }; return *this; } void FileRecordSQLite::Iterator::suspend(void) { } bool FileRecordSQLite::Iterator::resume(void) { return true; } } // namespace ARex nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/README0000644000000000000000000000013115067751327017660 xustar0030 mtime=1759498967.750197335 29 atime=1759498967.86149359 30 ctime=1759499029.325767116 nordugrid-arc-7.1.1/src/services/a-rex/README0000644000175000002070000000004515067751327021562 0ustar00mockbuildmock00000000000000ARC job management service - A-REX. nordugrid-arc-7.1.1/src/services/a-rex/PaxHeaders/PayloadFile.cpp0000644000000000000000000000013115067751327021675 xustar0030 mtime=1759498967.750197335 29 atime=1759498967.86149359 30 ctime=1759499029.339370306 nordugrid-arc-7.1.1/src/services/a-rex/PayloadFile.cpp0000644000175000002070000001425215067751327023604 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "PayloadFile.h" namespace ARex { PayloadBigFile::Size_t PayloadBigFile::threshold_ = 1024*1024*10; // 10MB by default PayloadFile::PayloadFile(const char* filename,Size_t start,Size_t end) { handle_=::open(filename,O_RDONLY); SetRead(handle_,start,end); } PayloadFile::PayloadFile(int h,Size_t start,Size_t end) { SetRead(h,start,end); } void PayloadFile::SetRead(int h,Size_t start,Size_t end) { handle_=h; start_=start; end_=end; addr_=(char*)MAP_FAILED; size_=0; if(handle_ == -1) return; struct stat st; if(fstat(handle_,&st) != 0) goto error; size_=st.st_size; if((end_ == ((off_t)-1)) || (end_ > size_)) { end_=size_; } if(start_ >= size_) { start_=size_; end_=start_; return; } if(size_ > 0) { addr_=(char*)mmap(NULL,size_,PROT_READ,MAP_SHARED,handle_,0); if(addr_ == (char*)MAP_FAILED) goto error; } return; error: perror("PayloadFile"); if(handle_ != -1) ::close(handle_); handle_=-1; size_=0; addr_=(char*)MAP_FAILED; return; } PayloadFile::~PayloadFile(void) { if(addr_ != (char*)MAP_FAILED) munmap(addr_,size_); if(handle_ != -1) ::close(handle_); handle_=-1; size_=0; addr_=(char*)MAP_FAILED; return; } char* PayloadFile::Content(Size_t pos) { if(handle_ == -1) return NULL; if(addr_ == (char*)MAP_FAILED) return NULL; if(pos >= end_) return NULL; if(pos < start_) return NULL; return (addr_+pos); } char PayloadFile::operator[](Size_t pos) const { if(handle_ == -1) return 0; if(addr_ == (char*)MAP_FAILED) return 0; if(pos >= end_) return 0; if(pos < start_) return 0; return addr_[pos]; } PayloadFile::Size_t PayloadFile::Size(void) const { return size_; } char* PayloadFile::Insert(Size_t /*pos*/,Size_t /*size*/) { // Not supported return NULL; } char* PayloadFile::Insert(const char*,Size_t /*pos*/,Size_t /*size*/) { // Not supported return NULL; } char* PayloadFile::Buffer(unsigned int num) { if(handle_ == -1) return NULL; if(num>0) return NULL; if(addr_ == (char*)MAP_FAILED) return NULL; return addr_+start_; } PayloadFile::Size_t PayloadFile::BufferSize(unsigned int num) const { if(handle_ == -1) return 0; if(num>0) return 0; return (end_-start_); } PayloadFile::Size_t PayloadFile::BufferPos(unsigned int num) const { if(num == 0) return start_; return end_; } bool PayloadFile::Truncate(Size_t /*size*/) { // Not supported return false; } static int open_file_read(const char* filename) { return ::open(filename,O_RDONLY); } //static int open_file_write(const char* filename) { // return ::open(filename,O_WRONLY | O_CREAT,S_IRUSR | S_IWUSR); //} PayloadBigFile::PayloadBigFile(int h,Size_t start,Size_t end): PayloadStream(h) { seekable_ = false; if(handle_ == -1) return; ::lseek(handle_,start,SEEK_SET); limit_ = end; } PayloadBigFile::PayloadBigFile(const char* filename,Size_t start,Size_t end): PayloadStream(open_file_read(filename)) { seekable_ = false; if(handle_ == -1) return; ::lseek(handle_,start,SEEK_SET); limit_ = end; } //PayloadBigFile::PayloadBigFile(const char* filename,Size_t size): // PayloadStream(open_file_write(filename)){ // seekable_ = false; //} PayloadBigFile::~PayloadBigFile(void) { if(handle_ != -1) ::close(handle_); } Arc::PayloadStream::Size_t PayloadBigFile::Pos(void) const { if(handle_ == -1) return 0; return ::lseek(handle_,0,SEEK_CUR); } Arc::PayloadStream::Size_t PayloadBigFile::Size(void) const { if(handle_ == -1) return 0; struct stat st; if(fstat(handle_,&st) != 0) return 0; return st.st_size; } Arc::PayloadStream::Size_t PayloadBigFile::Limit(void) const { Size_t s = Size(); if((limit_ == (off_t)(-1)) || (limit_ > s)) return s; return limit_; } bool PayloadBigFile::Get(char* buf,int& size) { if(handle_ == -1) return false; if(limit_ == (off_t)(-1)) return PayloadStream::Get(buf,size); Size_t cpos = Pos(); if(cpos >= limit_) { size=0; return false; } if((cpos+size) > limit_) size=limit_-cpos; return PayloadStream::Get(buf,size); } PayloadFAFile::PayloadFAFile(Arc::FileAccess* h,Size_t start,Size_t end) { handle_ = h; if(handle_ == NULL) return; handle_->fa_lseek(start,SEEK_SET); limit_ = end; } PayloadFAFile::~PayloadFAFile(void) { if(handle_ != NULL) { handle_->fa_close(); Arc::FileAccess::Release(handle_); }; } Arc::PayloadStream::Size_t PayloadFAFile::Pos(void) const { if(handle_ == NULL) return 0; return handle_->fa_lseek(0,SEEK_CUR); } Arc::PayloadStream::Size_t PayloadFAFile::Size(void) const { if(handle_ == NULL) return 0; struct stat st; if(!handle_->fa_fstat(st)) return 0; return st.st_size; } Arc::PayloadStream::Size_t PayloadFAFile::Limit(void) const { Size_t s = Size(); if((limit_ == (off_t)(-1)) || (limit_ > s)) return s; return limit_; } bool PayloadFAFile::Get(char* buf,int& size) { if(handle_ == NULL) return false; if(limit_ != (off_t)(-1)) { Size_t cpos = Pos(); if(cpos >= limit_) { size=0; return false; } if((cpos+size) > limit_) size=limit_-cpos; }; ssize_t l = handle_->fa_read(buf,size); if(l <= 0) { size=0; return false; } size = (int)l; return true; } Arc::MessagePayload* newFileRead(const char* filename,Arc::PayloadRawInterface::Size_t start,Arc::PayloadRawInterface::Size_t end) { int h = open_file_read(filename); return newFileRead(h,start,end); } Arc::MessagePayload* newFileRead(int h,Arc::PayloadRawInterface::Size_t start,Arc::PayloadRawInterface::Size_t end) { struct stat st; if(fstat(h,&st) != 0) return NULL; if(st.st_size > PayloadBigFile::Threshold()) { PayloadBigFile* f = new PayloadBigFile(h,start,end); if(!*f) { delete f; return NULL; }; return f; } PayloadFile* f = new PayloadFile(h,start,end); if(!*f) { delete f; return NULL; }; return f; } Arc::MessagePayload* newFileRead(Arc::FileAccess* h,Arc::PayloadRawInterface::Size_t start,Arc::PayloadRawInterface::Size_t end) { PayloadFAFile* f = new PayloadFAFile(h,start,end); return f; } } // namespace ARex nordugrid-arc-7.1.1/src/services/PaxHeaders/examples0000644000000000000000000000013015067751427017525 xustar0030 mtime=1759499031.045453674 28 atime=1759499034.7655102 30 ctime=1759499031.045453674 nordugrid-arc-7.1.1/src/services/examples/0000755000175000002070000000000015067751427021506 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/examples/PaxHeaders/Makefile.am0000644000000000000000000000013115067751327021636 xustar0029 mtime=1759498967.77432306 30 atime=1759498967.874493787 30 ctime=1759499031.042185811 nordugrid-arc-7.1.1/src/services/examples/Makefile.am0000644000175000002070000000020315067751327023534 0ustar00mockbuildmock00000000000000if PYTHON_SERVICE PYTHON_SERVICE = echo_python else PYTHON_SERVICE = endif SUBDIRS = $(PYTHON_SERVICE) DIST_SUBDIRS = echo_python nordugrid-arc-7.1.1/src/services/examples/PaxHeaders/Makefile.in0000644000000000000000000000013215067751357021653 xustar0030 mtime=1759498991.010403191 30 atime=1759499019.492278123 30 ctime=1759499031.043223346 nordugrid-arc-7.1.1/src/services/examples/Makefile.in0000644000175000002070000006104515067751357023563 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/examples ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir distdir-am am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ @PYTHON_SERVICE_FALSE@PYTHON_SERVICE = @PYTHON_SERVICE_TRUE@PYTHON_SERVICE = echo_python SUBDIRS = $(PYTHON_SERVICE) DIST_SUBDIRS = echo_python all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/examples/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/examples/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ check-am clean clean-generic clean-libtool cscopelist-am ctags \ ctags-am distclean distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags tags-am uninstall uninstall-am .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/examples/PaxHeaders/echo_python0000644000000000000000000000013015067751427022044 xustar0030 mtime=1759499031.073576552 28 atime=1759499034.7655102 30 ctime=1759499031.073576552 nordugrid-arc-7.1.1/src/services/examples/echo_python/0000755000175000002070000000000015067751427024025 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/examples/echo_python/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327024156 xustar0030 mtime=1759498967.774823302 30 atime=1759498967.874493787 30 ctime=1759499031.070544644 nordugrid-arc-7.1.1/src/services/examples/echo_python/Makefile.am0000644000175000002070000000022615067751327026060 0ustar00mockbuildmock00000000000000exampledir = $(pkgdatadir)/examples/echo_python example_DATA = README EchoService.py __init__.py schema/echo_python.xsd EXTRA_DIST = $(example_DATA) nordugrid-arc-7.1.1/src/services/examples/echo_python/PaxHeaders/EchoService.py0000644000000000000000000000013215067751327024673 xustar0030 mtime=1759498967.774717864 30 atime=1759498967.874493787 30 ctime=1759499031.073576552 nordugrid-arc-7.1.1/src/services/examples/echo_python/EchoService.py0000644000175000002070000002155415067751327026604 0ustar00mockbuildmock00000000000000import arc import time logger = arc.Logger(arc.Logger.getRootLogger(), 'EchoService.py') wsrf_rp_ns = "http://docs.oasis-open.org/wsrf/rp-2" echo_ns = "http://www.nordugrid.org/schemas/echo" import threading class EchoService(object): def __init__(self, cfg): logger.msg(arc.INFO, "EchoService (python) constructor called") # get the response-prefix from the config XML self.prefix = str(cfg.Get('prefix')) # get the response-suffix from the config XML self.suffix = str(cfg.Get('suffix')) logger.msg(arc.DEBUG, "EchoService (python) has prefix %(prefix)s and suffix %(suffix)s" % {'prefix': self.prefix, 'suffix': self.suffix}) self.ssl_config = self.parse_ssl_config(cfg) thread_test = str(cfg.Get('ThreadTest')) if thread_test: threading.Thread(target = self.infinite, args=[thread_test]).start() def __del__(self): logger.msg(arc.INFO, "EchoService (python) destructor called") def parse_ssl_config(self, cfg): try: client_ssl_node = cfg.Get('ClientSSLConfig') fromFile = str(client_ssl_node.Attribute('FromFile')) if fromFile: try: xml_string = file(fromFile).read() client_ssl_node = arc.XMLNode(xml_string) except: log.msg() pass if client_ssl_node.Size() == 0: return {} ssl_config = {} ssl_config['key_file'] = str(client_ssl_node.Get('KeyPath')) ssl_config['cert_file'] = str(client_ssl_node.Get('CertificatePath')) ca_file = str(client_ssl_node.Get('CACertificatePath')) if ca_file: ssl_config['ca_file'] = ca_file else: ssl_config['ca_dir'] = str(client_ssl_node.Get('CACertificatesDir')) return ssl_config except: import traceback logger.msg(arc.ERROR, traceback.format_exc()) return {} def infinite(self, url): logger.msg(arc.INFO, "EchoService (python) thread test starting") i = 0 while True: try: i += 1 cfg = arc.MCCConfig() s = arc.ClientSOAP(cfg, arc.URL(url)) ns = arc.NS('echo', echo_ns) outpayload = arc.PayloadSOAP(ns) outpayload.NewChild('echo:echo').NewChild('echo:say').Set('hi!') resp, status = s.process(outpayload) logger.msg(arc.INFO, "EchoService (python) thread test, iteration %(iteration)s %(status)s" % {'iteration': i, 'status': status}) time.sleep(3) except Exception as e: import traceback logger.msg(arc.DEBUG, traceback.format_exc()) def GetLocalInformation(self): ns = arc.NS({'':'http://schemas.ogf.org/glue/2008/05/spec_2.0_d41_r01'}) info = arc.XMLNode(ns,'Domains') service_node = info.NewChild('AdminDomain').NewChild('Services').NewChild('Service') service_node.NewChild('Type').Set('org.nordugrid.tests.echo_python') endpoint_node = service_node.NewChild('Endpoint') endpoint_node.NewChild('HealthState').Set('ok') endpoint_node.NewChild('ServingState').Set('production') return info def process(self, inmsg, outmsg): logger.msg(arc.DEBUG, "EchoService (python) 'Process' called") # time.sleep(10) # get the payload from the message inpayload = inmsg.Payload() logger.msg(arc.VERBOSE, 'inmsg.Auth().Export(arc.SecAttr.ARCAuth) = %s' % inmsg.Auth().Export(arc.SecAttr.ARCAuth).GetXML()) logger.msg(arc.VERBOSE, 'inmsg.Attributes().getAll() = %s ' % inmsg.Attributes().getAll()) logger.msg(arc.INFO, "EchoService (python) got: %s " % inpayload.GetXML()) # the first child of the payload should be the name of the request request_node = inpayload.Child() # get the namespace request_namespace = request_node.Namespace() logger.msg(arc.DEBUG, "EchoService (python) request_namespace: %s" % request_namespace) if request_namespace != echo_ns: if request_namespace == wsrf_rp_ns: outpayload = arc.PayloadSOAP(arc.NS({'wsrf-rp':wsrf_rp_ns})) outpayload.NewChild('wsrf-rp:GetResourcePropertyDocumentResponse').NewChild(self.GetLocalInformation()) outmsg.Payload(outpayload) logger.msg(arc.DEBUG, "outpayload %s" % outpayload.GetXML()) return arc.MCC_Status(arc.STATUS_OK) raise Exception('wrong namespace. expected: %s' % echo_ns) # get the name of the request without the namespace prefix # this is the name of the Body node's first child request_name = request_node.Name() # create an answer payload ns = arc.NS({'echo': echo_ns}) outpayload = arc.PayloadSOAP(ns) # here we defined that 'echo' prefix will be the namespace prefix of 'http://www.nordugrid.org/schemas/echo' # get the message say = str(request_node.Get('say')) # put it between the response-prefix and the response-suffix hear = self.prefix + say + self.suffix if request_name == 'double': # if the name of the request is 'double' # we create a new echo message which we send to http://localhost:60000/Echo using the ClientSOAP object cfg = arc.MCCConfig() ssl = False if self.ssl_config: cfg.AddCertificate(self.ssl_config.get('cert_file', None)) cfg.AddPrivateKey(self.ssl_config.get('key_file', None)) if 'ca_file' in self.ssl_config: cfg.AddCAFile(self.ssl_config.get('ca_file', None)) else: cfg.AddCADir(self.ssl_config.get('ca_dir', None)) ssl = True if ssl: url = arc.URL('https://localhost:60000/Echo') logger.msg(arc.DEBUG, 'Calling https://localhost:60000/Echo using ClientSOAP') else: url = arc.URL('http://localhost:60000/Echo') logger.msg(arc.DEBUG, 'Calling http://localhost:60000/Echo using ClientSOAP') # creating the ClientSOAP object s = arc.ClientSOAP(cfg, url) new_payload = arc.PayloadSOAP(ns) # creating the message new_payload.NewChild('echo:echo').NewChild('echo:say').Set(hear) logger.msg(arc.DEBUG, 'new_payload %s' % new_payload.GetXML()) # sending the message resp, status = s.process(new_payload) # get the response hear = str(resp.Get('echoResponse').Get('hear')) elif request_name == 'httplib': # if the name of the request is 'httplib' # we create a new echo message which we send to http://localhost:60000/echo using python's built-in http client try: import http.client as httplib except ImportError: import httplib logger.msg(arc.DEBUG, 'Calling http://localhost:60000/Echo using httplib') # create the connection h = httplib.HTTPConnection('localhost', 60000) new_payload = arc.PayloadSOAP(ns) # create the message new_payload.NewChild('echo:echo').NewChild('echo:say').Set(hear) logger.msg(arc.DEBUG, 'new_payload %s' % new_payload.GetXML()) # send the message h.request('POST', '/Echo', new_payload.GetXML()) r = h.getresponse() response = r.read() logger.msg(arc.DEBUG, response) resp = arc.XMLNode(response) # get the response hear = str(resp.Child().Get('echoResponse').Get('hear')) elif request_name == 'wait': logger.msg(arc.DEBUG, 'Start waiting 10 sec...') time.sleep(10) logger.msg(arc.DEBUG, 'Waiting ends.') # we create a node at '/echo:echoResponse/echo:hear' and put the string in it outpayload.NewChild('echo:echoResponse').NewChild('echo:hear').Set(hear) outmsg.Payload(outpayload) logger.msg(arc.DEBUG, "outpayload %s" % outpayload.GetXML()) # return with STATUS_OK return arc.MCC_Status(arc.STATUS_OK) # you can easily test this with this shellscript: """ MESSAGE='HELLO' echo Request: echo $MESSAGE echo echo Response: curl -d "$MESSAGE" http://localhost:60000/Echo echo """ # nordugrid-arc-7.1.1/src/services/examples/echo_python/PaxHeaders/Makefile.in0000644000000000000000000000013215067751357024172 xustar0030 mtime=1759498991.041584864 30 atime=1759499019.511278411 30 ctime=1759499031.071534819 nordugrid-arc-7.1.1/src/services/examples/echo_python/Makefile.in0000644000175000002070000005107015067751357026077 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/examples/echo_python ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(exampledir)" DATA = $(example_DATA) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ exampledir = $(pkgdatadir)/examples/echo_python example_DATA = README EchoService.py __init__.py schema/echo_python.xsd EXTRA_DIST = $(example_DATA) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/examples/echo_python/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/examples/echo_python/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-exampleDATA: $(example_DATA) @$(NORMAL_INSTALL) @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(exampledir)'"; \ $(MKDIR_P) "$(DESTDIR)$(exampledir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(exampledir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(exampledir)" || exit $$?; \ done uninstall-exampleDATA: @$(NORMAL_UNINSTALL) @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(exampledir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(exampledir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-exampleDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-exampleDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exampleDATA install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags-am uninstall \ uninstall-am uninstall-exampleDATA .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/examples/echo_python/PaxHeaders/__init__.py0000644000000000000000000000013215067751327024233 xustar0030 mtime=1759498967.774823302 30 atime=1759498967.774823302 30 ctime=1759499031.074602022 nordugrid-arc-7.1.1/src/services/examples/echo_python/__init__.py0000644000175000002070000000000015067751327026123 0ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/examples/echo_python/PaxHeaders/schema0000644000000000000000000000013015067751427023304 xustar0030 mtime=1759499031.074602022 28 atime=1759499034.7655102 30 ctime=1759499031.074602022 nordugrid-arc-7.1.1/src/services/examples/echo_python/schema/0000755000175000002070000000000015067751427025265 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/examples/echo_python/schema/PaxHeaders/echo_python.xsd0000644000000000000000000000013215067751327026421 xustar0030 mtime=1759498967.774823302 30 atime=1759498967.874493787 30 ctime=1759499031.075599358 nordugrid-arc-7.1.1/src/services/examples/echo_python/schema/echo_python.xsd0000644000175000002070000000463515067751327030333 0ustar00mockbuildmock00000000000000 Prefix of the response string. Suffix of the response string. Defines the path of the XML file which contains TLS related clint configuration. If this attribute defined than the elements inside will be ignored. Container of TLS related client configuration Path of certificate private key Path of certificate Directory location of CA certificates nordugrid-arc-7.1.1/src/services/examples/echo_python/PaxHeaders/README0000644000000000000000000000013215067751327023002 xustar0030 mtime=1759498967.774823302 30 atime=1759498967.874493787 30 ctime=1759499031.072541774 nordugrid-arc-7.1.1/src/services/examples/echo_python/README0000644000175000002070000000011215067751327024676 0ustar00mockbuildmock00000000000000Simple test service to demonstrate how Python based services should work. nordugrid-arc-7.1.1/src/services/PaxHeaders/ldap-infosys0000644000000000000000000000013015067751426020316 xustar0030 mtime=1759499030.578588987 28 atime=1759499034.7655102 30 ctime=1759499030.578588987 nordugrid-arc-7.1.1/src/services/ldap-infosys/0000755000175000002070000000000015067751426022277 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/services/ldap-infosys/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327022431 xustar0030 mtime=1759498967.774823302 30 atime=1759498967.874493787 30 ctime=1759499030.573630909 nordugrid-arc-7.1.1/src/services/ldap-infosys/Makefile.am0000644000175000002070000000055315067751327024336 0ustar00mockbuildmock00000000000000if SYSV_SCRIPTS_ENABLED GRID_INFOSYS_SCRIPT = arc-infosys-ldap else GRID_INFOSYS_SCRIPT = endif initd_SCRIPTS = $(GRID_INFOSYS_SCRIPT) if SYSTEMD_UNITS_ENABLED GRID_INFOSYS_UNIT = arc-infosys-ldap.service arc-infosys-ldap-slapd.service else GRID_INFOSYS_UNIT = endif units_DATA = $(GRID_INFOSYS_UNIT) pkgdata_SCRIPTS = create-bdii-config create-slapd-config nordugrid-arc-7.1.1/src/services/ldap-infosys/PaxHeaders/Makefile.in0000644000000000000000000000013115067751357022444 xustar0030 mtime=1759498991.075502833 29 atime=1759499019.22227402 30 ctime=1759499030.574644543 nordugrid-arc-7.1.1/src/services/ldap-infosys/Makefile.in0000644000175000002070000006174215067751357024361 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/services/ldap-infosys ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = create-bdii-config create-slapd-config \ arc-infosys-ldap arc-infosys-ldap.service \ arc-infosys-ldap-slapd.service CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(initddir)" "$(DESTDIR)$(pkgdatadir)" \ "$(DESTDIR)$(unitsdir)" SCRIPTS = $(initd_SCRIPTS) $(pkgdata_SCRIPTS) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac DATA = $(units_DATA) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in \ $(srcdir)/arc-infosys-ldap-slapd.service.in \ $(srcdir)/arc-infosys-ldap.in \ $(srcdir)/arc-infosys-ldap.service.in \ $(srcdir)/create-bdii-config.in \ $(srcdir)/create-slapd-config.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ @SYSV_SCRIPTS_ENABLED_FALSE@GRID_INFOSYS_SCRIPT = @SYSV_SCRIPTS_ENABLED_TRUE@GRID_INFOSYS_SCRIPT = arc-infosys-ldap initd_SCRIPTS = $(GRID_INFOSYS_SCRIPT) @SYSTEMD_UNITS_ENABLED_FALSE@GRID_INFOSYS_UNIT = @SYSTEMD_UNITS_ENABLED_TRUE@GRID_INFOSYS_UNIT = arc-infosys-ldap.service arc-infosys-ldap-slapd.service units_DATA = $(GRID_INFOSYS_UNIT) pkgdata_SCRIPTS = create-bdii-config create-slapd-config all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/services/ldap-infosys/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/services/ldap-infosys/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): create-bdii-config: $(top_builddir)/config.status $(srcdir)/create-bdii-config.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ create-slapd-config: $(top_builddir)/config.status $(srcdir)/create-slapd-config.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arc-infosys-ldap: $(top_builddir)/config.status $(srcdir)/arc-infosys-ldap.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arc-infosys-ldap.service: $(top_builddir)/config.status $(srcdir)/arc-infosys-ldap.service.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ arc-infosys-ldap-slapd.service: $(top_builddir)/config.status $(srcdir)/arc-infosys-ldap-slapd.service.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-initdSCRIPTS: $(initd_SCRIPTS) @$(NORMAL_INSTALL) @list='$(initd_SCRIPTS)'; test -n "$(initddir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(initddir)'"; \ $(MKDIR_P) "$(DESTDIR)$(initddir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(initddir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(initddir)$$dir" || exit $$?; \ } \ ; done uninstall-initdSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(initd_SCRIPTS)'; test -n "$(initddir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(initddir)'; $(am__uninstall_files_from_dir) install-pkgdataSCRIPTS: $(pkgdata_SCRIPTS) @$(NORMAL_INSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgdatadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgdatadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(pkgdatadir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(pkgdatadir)$$dir" || exit $$?; \ } \ ; done uninstall-pkgdataSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(pkgdata_SCRIPTS)'; test -n "$(pkgdatadir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(pkgdatadir)'; $(am__uninstall_files_from_dir) mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-unitsDATA: $(units_DATA) @$(NORMAL_INSTALL) @list='$(units_DATA)'; test -n "$(unitsdir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(unitsdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(unitsdir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(unitsdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(unitsdir)" || exit $$?; \ done uninstall-unitsDATA: @$(NORMAL_UNINSTALL) @list='$(units_DATA)'; test -n "$(unitsdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(unitsdir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) $(DATA) installdirs: for dir in "$(DESTDIR)$(initddir)" "$(DESTDIR)$(pkgdatadir)" "$(DESTDIR)$(unitsdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-initdSCRIPTS install-pkgdataSCRIPTS \ install-unitsDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-initdSCRIPTS uninstall-pkgdataSCRIPTS \ uninstall-unitsDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am \ install-initdSCRIPTS install-man install-pdf install-pdf-am \ install-pkgdataSCRIPTS install-ps install-ps-am install-strip \ install-unitsDATA installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags-am uninstall uninstall-am uninstall-initdSCRIPTS \ uninstall-pkgdataSCRIPTS uninstall-unitsDATA .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/services/ldap-infosys/PaxHeaders/create-bdii-config.in0000644000000000000000000000013215067751327024340 xustar0030 mtime=1759498967.775071557 30 atime=1759498967.874493787 30 ctime=1759499030.578588987 nordugrid-arc-7.1.1/src/services/ldap-infosys/create-bdii-config.in0000644000175000002070000002405415067751327026247 0ustar00mockbuildmock00000000000000#!/bin/bash # Define logging functions send_systemd_notify() { # return if no systemd-notify found type systemd-notify >/dev/null 2>&1 || return systemd-notify "$@" } log_failure_msg() { send_systemd_notify --status "Error: $@" echo $@ } # Create bdii config for the NorduGrid/ARC information system ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then log_failure_msg "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi # ARC_CONFIG if [ "x$ARC_CONFIG" = "x" ]; then if [ -r $ARC_LOCATION/etc/arc.conf ]; then ARC_CONFIG=$ARC_LOCATION/etc/arc.conf elif [ -r /etc/arc.conf ]; then ARC_CONFIG=/etc/arc.conf fi if [ ! -r "$ARC_CONFIG" ]; then log_failure_msg "arc.conf is missing at path: $ARC_CONFIG or no ARC_LOCATION is set" log_failure_msg "If this file is in a non-standard place it can be set" log_failure_msg " with the ARC_CONFIG environment variable" exit 1 fi fi # Define runtime config location for infosys LDAP prefix=@prefix@ runtime_config_dir=/run/arc if [ ! -d "$runtime_config_dir" ]; then mkdir -p "$runtime_config_dir" fi export ARC_RUNCONFIG="$runtime_config_dir/arc-infosys-ldap.conf" unset runtime_config_dir unset prefix # Define arcconfig-parser and dump running configuration arcconfig_parser=${ARC_LOCATION}/@pkglibexecsubdir@/arcconfig-parser ${arcconfig_parser} -c ${ARC_CONFIG} --save -r ${ARC_RUNCONFIG} # Check for infosys block if ! ${arcconfig_parser} --load -r ${ARC_RUNCONFIG} -b infosys; then log_failure_msg "Missing [infosys] configuration block" exit 1 fi # Check for infosys/ldap block if ! ${arcconfig_parser} --load -r ${ARC_RUNCONFIG} -b infosys/ldap; then log_failure_msg "Missing [infosys/ldap] configuration block" exit 1 fi eval $(${arcconfig_parser} --load -r ${ARC_RUNCONFIG} -b infosys/ldap -b infosys -b common -e bash) bdii_user=$CONFIG_user if [ -z "$bdii_user" ]; then # Get ldap user from passwd bdii_user=`getent passwd ldap openldap | sed 's/:.*//;q'` if [ -z "$bdii_user" ]; then echo "Warning, could not find ldap or openldap user" echo "resorting to using the root user" bdii_user=root fi fi # These values may be set in arc.conf, otherwise use sensible defaults providerlog=${CONFIG_logfile:-/var/log/arc/infoprovider.log} bdii_location=${CONFIG_bdii_location:-/usr} bdii_update_cmd=${CONFIG_bdii_update_cmd:-${bdii_location}/sbin/bdii-update} if [ ! -x $bdii_update_cmd ]; then log_failure_msg "Can not find bdii-update command at: $bdii_update_cmd." echo "Please set bdii_update_cmd in arc.conf" exit 1 fi infosys_ldap_run_dir=${CONFIG_infosys_ldap_run_dir:-/run/arc/infosys} mkdir -p ${infosys_ldap_run_dir} chown ${bdii_user}: ${infosys_ldap_run_dir} # Put BDII update helper to known directory helpers_dir=$infosys_ldap_run_dir if [ -n "$FORCE_ARC_RUNDIR" ]; then helpers_dir="${FORCE_ARC_RUNDIR}/infosys" mkdir -p "${helpers_dir}" fi bdii_update_exechelper_cmd=${helpers_dir}/bdii-update.cmd rm -f $bdii_update_exechelper_cmd bdii_update_posthelper_cmd=${helpers_dir}/bdii-update-post.cmd rm -f $bdii_update_posthelper_cmd bdii_debug_level=${CONFIG_bdii_debug_level:-WARNING} bdii_tmp_dir=${CONFIG_bdii_tmp_dir:-/var/tmp/arc/bdii} bdii_var_dir=${CONFIG_bdii_var_dir:-/var/lib/arc/bdii} bdii_run_dir=${CONFIG_bdii_run_dir:-/run/arc/bdii} bdii_log_dir=${CONFIG_bdii_log_dir:-/var/log/arc/bdii} bdii_log_file="${bdii_log_dir}/bdii-update.log" bdii_slapd_conf=${infosys_ldap_run_dir}/bdii-slapd.conf bdii_default_ldif=${bdii_tmp_dir}/provider/arc-default.ldif.pl bdii_ldif_dir=${bdii_tmp_dir}/ldif bdii_provider_dir=${bdii_tmp_dir}/provider bdii_plugin_dir=${bdii_tmp_dir}/plugin bdii_port=${CONFIG_port:-2135} # Using uppercase characters in bdii_bind will break infosys. bdii_bind="o=grid" # $bdii_provider_timeout refers to the time bdii waits for the provider output to complete. bdii_provider_timeout=${CONFIG_bdii_provider_timeout:-10800} # $infoproviders_timelimit is a-rex's infoproviders timeout. infoproviders_timelimit=$(${arcconfig_parser} --load -r ${ARC_RUNCONFIG} -b arex -o infoproviders_timelimit) infoproviders_timelimit=${infoproviders_timelimit:-10800} # $wakeupperiod is the time a-rex waits before running infoproviders again. wakeupperiod=$(${arcconfig_parser} --load -r ${ARC_RUNCONFIG} -b arex -o wakeupperiod) wakeupperiod=${wakeupperiod:-120} bdii_archive_size=${CONFIG_bdii_archive_size:-0} # The infoprovider does the waiting, no need for BDII to do it too. Use # some small timeout to protect the system in case there is a problem with # the provier bdii_breathe_time=${CONFIG_bdii_breathe_time:-10} # max_cycle is the time bdii will trust the content of any provider to be fresh enough max_cycle=$(( $bdii_provider_timeout + $infoproviders_timelimit + $wakeupperiod )) bdii_read_timeout=${CONFIG_bdii_read_timeout:-$max_cycle} bdii_delete_delay=${CONFIG_bdii_delete_delay:-0} # PIDFile location handling update_pid_file=$( readlink -m ${CONFIG_bdii_update_pid_file:-$bdii_run_dir/bdii-update.pid} ) # forced pidfile location instead of arc.conf-based (if FORCE_ARC_RUNDIR is set) if [ -n "$FORCE_ARC_RUNDIR" ]; then pid_dir="${FORCE_ARC_RUNDIR}/bdii" mkdir -p "$pid_dir" chown -R ${bdii_user}: "$pid_dir" pid_file="$( readlink -m ${pid_dir}/bdii-update.pid )" if [ "x${update_pid_file}" != "x${pid_file}" ]; then custom_pid_file="${update_pid_file}" rm -f "${custom_pid_file}" update_pid_file="${pid_file}" fi unset pid_dir pid_file fi rm -f "${update_pid_file}" # Debian does not have /run/lock/subsys if [ -d /run/lock/subsys ]; then update_lock_file=${update_lock_file:-/run/lock/subsys/arc-bdii-update} else update_lock_file=${update_lock_file:-/run/lock/arc-bdii-update} fi # Check directories and permissions mkdir -p `dirname $providerlog` touch ${providerlog} chown ${bdii_user}: ${providerlog} mkdir -p $bdii_log_dir chown -R ${bdii_user}: ${bdii_log_dir} if ${arcconfig_parser} --load -r ${ARC_RUNCONFIG} -b infosys/nordugrid || \ ${arcconfig_parser} --load -r ${ARC_RUNCONFIG} -b infosys/glue2/ldap; then if [ ! -f "$ARC_LOCATION/@pkgdatasubdir@/InfosysHelper.pm" ]; then log_failure_msg "InfosysHelper.pm not found. Is A-REX installed?" echo "For operation without A-REX, disable publishing of cluster information" echo "([infosys/nordugrid] and [infosys/glue2/ldap])" exit 1 fi fi BDII_CONF=${CONFIG_bdii_conf:-${infosys_ldap_run_dir}/bdii.conf} # Create directories for storing temporary scripts and check permissions etc mkdir -p $bdii_var_dir mkdir -p $bdii_run_dir mkdir -p $bdii_tmp_dir mkdir -p $bdii_tmp_dir/ldif mkdir -p $bdii_tmp_dir/provider mkdir -p $bdii_tmp_dir/plugin # change permissions if user is not root chown -R ${bdii_user}: ${bdii_var_dir} chown -R ${bdii_user}: ${bdii_run_dir} chown -R ${bdii_user}: ${bdii_tmp_dir} # Generate bdii configuration rm -f ${BDII_CONF} cat <<-EOF >> ${BDII_CONF} # This file was automatically generated by $0 # Do not modify BDII_LOG_FILE=$bdii_log_file BDII_PID_FILE=$update_pid_file BDII_LOG_LEVEL=$bdii_debug_level BDII_LDIF_DIR=$bdii_ldif_dir BDII_PROVIDER_DIR=$bdii_provider_dir BDII_PLUGIN_DIR=$bdii_plugin_dir BDII_PORT=$bdii_port BDII_BREATHE_TIME=$bdii_breathe_time BDII_READ_TIMEOUT=$bdii_read_timeout BDII_ARCHIVE_SIZE=$bdii_archive_size BDII_DELETE_DELAY=$bdii_delete_delay BDII_USER=$bdii_user BDII_VAR_DIR=$bdii_var_dir BDII_RUN_DIR=$bdii_run_dir BDII_BIND=$bdii_bind SLAPD_CONF=$bdii_slapd_conf EOF # Generate default ldif cat <<-EOF > $bdii_default_ldif #!/usr/bin/perl # This file was automatically generated by $0 # Do not modify use POSIX; print "\n"; print "dn: o=grid\n"; print "objectClass: organization\n"; print "o: grid\n"; print "\n"; print "dn: Mds-Vo-name=local,o=grid\n"; print "objectClass: Mds\n"; print "Mds-Vo-name: local\n"; print "Mds-validfrom: " . strftime("%Y%m%d%H%M%SZ\n", gmtime()); print "Mds-validto: " . strftime("%Y%m%d%H%M%SZ\n", gmtime(time() + 3600)); print "\n"; print "dn: Mds-Vo-name=resource,o=grid\n"; print "objectClass: Mds\n"; print "Mds-Vo-name: resource\n"; print "Mds-validfrom: " . strftime("%Y%m%d%H%M%SZ\n", gmtime()); print "Mds-validto: " . strftime("%Y%m%d%H%M%SZ\n", gmtime(time() + 3600)); print "\n"; print "dn: o=glue\n"; print "objectClass: organization\n"; print "o: glue\n"; EOF chmod +x $bdii_default_ldif # Create ARC ldif generator file ldif_generator_file=${bdii_tmp_dir}/provider/arc-nordugrid-bdii-ldif rm -f ${ldif_generator_file} touch ${ldif_generator_file} ldif_script=${infosys_ldap_run_dir}/ldif-provider.sh cat <<-EOF > ${ldif_generator_file} #!/usr/bin/perl # This file was automatically generated by the $0 # Do not modify EOF # NG and GLUE2 come directly from a-rex infoprovider cat <<-EOF >> ${ldif_generator_file} BEGIN { unshift @INC, '$ARC_LOCATION/@pkgdatasubdir@'; } use InfosysHelper; exit 1 unless InfosysHelper::ldifIsReady('$infosys_ldap_run_dir', '$max_cycle'); EOF if ${arcconfig_parser} --load -r ${ARC_RUNCONFIG} -b infosys/nordugrid || \ ${arcconfig_parser} --load -r ${ARC_RUNCONFIG} -b infosys/glue2/ldap; then echo "system('$ldif_script');" >> ${ldif_generator_file} fi chmod +x ${ldif_generator_file} # Helper script to start BDII Update switching to BDII user if [ -x /sbin/runuser ]; then RUNUSER=runuser else RUNUSER=su fi USERSHELL=${USERSHELL:-"/bin/sh"} if [ ! -x ${USERSHELL} ]; then log_failure_msg "Could not find ${USERSHELL}" exit 1 fi cat <<-EOF > ${bdii_update_exechelper_cmd} if [ \$( id -u ) = 0 ]; then exec $RUNUSER -s "$USERSHELL" -c "${bdii_update_cmd} -c ${BDII_CONF} -d" ${bdii_user} else exec ${bdii_update_cmd} -c ${BDII_CONF} -d fi EOF cat <<-EOF > ${bdii_update_posthelper_cmd} iterlimit=30 while [ \$iterlimit -ge 0 ] && ! [ -r "${update_pid_file}" ]; do sleep 1 iterlimit=\$(expr \$iterlimit - 1) done EOF # copy forced pidfile to custom arc.conf pidfile (if needed) if [ -n "${custom_pid_file}" ]; then echo "mkdir -p \"${custom_pid_file%/*}\"" >> ${bdii_update_posthelper_cmd} echo "cp -a \"${update_pid_file}\" \"${custom_pid_file}\"" >> ${bdii_update_posthelper_cmd} fi nordugrid-arc-7.1.1/src/services/ldap-infosys/PaxHeaders/arc-infosys-ldap.service.in0000644000000000000000000000013215067751327025537 xustar0030 mtime=1759498967.775071557 30 atime=1759498967.874493787 30 ctime=1759499030.577614708 nordugrid-arc-7.1.1/src/services/ldap-infosys/arc-infosys-ldap.service.in0000644000175000002070000000077415067751327027451 0ustar00mockbuildmock00000000000000[Unit] Description=ARC LDAP-based information services - BDII-Update Requires=arc-infosys-ldap-slapd.service After=arc-infosys-ldap-slapd.service BindsTo=arc-infosys-ldap-slapd.service [Service] Type=forking Environment=FORCE_ARC_RUNDIR=/run/arc PIDFile=/run/arc/bdii/bdii-update.pid ExecStartPre=@prefix@/@pkgdatasubdir@/create-bdii-config ExecStart=/bin/sh /run/arc/infosys/bdii-update.cmd ExecStartPost=/bin/sh /run/arc/infosys/bdii-update-post.cmd NotifyAccess=all [Install] WantedBy=multi-user.target nordugrid-arc-7.1.1/src/services/ldap-infosys/PaxHeaders/arc-infosys-ldap-slapd.service.in0000644000000000000000000000013215067751327026640 xustar0030 mtime=1759498967.775071557 30 atime=1759498967.874493787 30 ctime=1759499030.575647604 nordugrid-arc-7.1.1/src/services/ldap-infosys/arc-infosys-ldap-slapd.service.in0000644000175000002070000000071315067751327030543 0ustar00mockbuildmock00000000000000[Unit] Description=ARC LDAP-based information services - SLAPD After=network.target network-online.target nss-lookup.target PartOf=arc-infosys-ldap.service StopWhenUnneeded=true [Service] Type=forking Environment=FORCE_ARC_RUNDIR=/run/arc PIDFile=/run/arc/bdii/db/slapd.pid ExecStartPre=@prefix@/@pkgdatasubdir@/create-slapd-config ExecStart=/bin/sh /run/arc/infosys/bdii-slapd.cmd ExecStartPost=/bin/sh /run/arc/infosys/bdii-slapd-post.cmd NotifyAccess=all nordugrid-arc-7.1.1/src/services/ldap-infosys/PaxHeaders/arc-infosys-ldap.in0000644000000000000000000000013215067751327024100 xustar0030 mtime=1759498967.775071557 30 atime=1759498967.874493787 30 ctime=1759499030.576624128 nordugrid-arc-7.1.1/src/services/ldap-infosys/arc-infosys-ldap.in0000644000175000002070000002210415067751327026001 0ustar00mockbuildmock00000000000000#!/bin/bash # # Init file for the NorduGrid/ARC LDAP based local resource information system # # chkconfig: 2345 76 24 # description: NorduGrid/ARC local resource information system # # config: /etc/sysconfig/nordugrid # config: /etc/sysconfig/arc-infosys-ldap # config: /etc/arc.conf # ###################################################################### ### BEGIN INIT INFO # Provides: arc-infosys-ldap # Required-Start: $remote_fs $syslog # Required-Stop: $remote_fs $syslog # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: NorduGrid/ARC local resource information system # Description: NorduGrid/ARC LDAP based local resource information system ### END INIT INFO # Helper functions if [ -r /etc/init.d/functions ]; then . /etc/init.d/functions log_success_msg() { echo -n "$@" success "$@" echo } log_warning_msg() { echo -n "$@" warning "$@" echo } log_failure_msg() { echo -n "$@" failure "$@" echo } elif [ -r /lib/lsb/init-functions ]; then . /lib/lsb/init-functions else echo "Error: Cannot source neither init.d nor lsb functions" exit 1 fi RETVAL=0 prog=arc-infosys-ldap RUN=yes # sysconfig files if [ -r /etc/sysconfig/nordugrid ]; then . /etc/sysconfig/nordugrid elif [ -r /etc/default/nordugrid ]; then . /etc/default/nordugrid fi if [ -r /etc/sysconfig/$prog ]; then . /etc/sysconfig/$prog elif [ -r /etc/default/$prog ]; then . /etc/default/$prog fi if [ "x$RUN" != "xyes" ]; then log_warning_msg "$prog disabled, please adjust the configuration to your" log_warning_msg "needs and then set RUN to 'yes' in /etc/default/$prog to enable it." exit 0 fi [ -n "$ARC_LOCATION" ] && export ARC_LOCATION [ -n "$ARC_CONFIG" ] && export ARC_CONFIG ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then log_failure_msg "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi # Define arc.conf location # ARC_CONFIG if [ "x$ARC_CONFIG" = "x" ]; then if [ -r $ARC_LOCATION/etc/arc.conf ]; then ARC_CONFIG=$ARC_LOCATION/etc/arc.conf elif [ -r /etc/arc.conf ]; then ARC_CONFIG=/etc/arc.conf fi if [ ! -r "$ARC_CONFIG" ]; then log_failure_msg "arc.conf is missing at path: $ARC_CONFIG or no ARC_LOCATION is set" log_failure_msg "If this file is in a non-standard place it can be set" log_failure_msg " with the ARC_CONFIG environment variable" exit 1 fi fi # Define runtime config location for infosys LDAP prefix=@prefix@ runtime_config_dir=/run/arc if [ ! -d "$runtime_config_dir" ]; then mkdir -p "$runtime_config_dir" fi export ARC_RUNCONFIG="$runtime_config_dir/arc-infosys-ldap.conf" unset runtime_config_dir unset prefix # Define arcconfig-parser and dump running configuration arcconfig_parser=${ARC_LOCATION}/@pkglibexecsubdir@/arcconfig-parser ${arcconfig_parser} -c ${ARC_CONFIG} --save -r ${ARC_RUNCONFIG} # Check for infosys block if ! ${arcconfig_parser} --load -r ${ARC_RUNCONFIG} -b infosys; then log_failure_msg "Missing [infosys] configuration block" exit 1 fi # Check for infosys/ldap block if ! ${arcconfig_parser} --load -r ${ARC_RUNCONFIG} -b infosys/ldap; then log_failure_msg "Missing [infosys/ldap] configuration block" exit 1 fi eval $(${arcconfig_parser} --load -r ${ARC_RUNCONFIG} -b infosys/ldap -b infosys -b common -e bash) bdii_user=$CONFIG_user if [ -z "$bdii_user" ]; then # Get ldap user from passwd bdii_user=`getent passwd ldap openldap | sed 's/:.*//;q'` if [ -z "$bdii_user" ]; then log_warning_msg "Warning, could not find ldap or openldap user" log_warning_msg "resorting to using the root user" bdii_user=root fi fi bdii_location=${CONFIG_bdii_location:-/usr} bdii_update_cmd=${CONFIG_bdii_update_cmd:-${bdii_location}/sbin/bdii-update} if [ ! -x $bdii_update_cmd ]; then log_failure_msg "Can not find bdii-update command at: $bdii_update_cmd." log_failure_msg "Please set bdii_update_cmd in arc.conf" exit 1 fi bdii_run_dir=${CONFIG_bdii_run_dir:-/run/arc/bdii} slapd_pid_file=${bdii_run_dir}/db/slapd.pid update_pid_file=${CONFIG_bdii_update_pid_file:-$bdii_run_dir/bdii-update.pid} if [ `id -u` = 0 ]; then # Debian does not have /run/lock/subsys if [ -d /run/lock/subsys ]; then slapd_lock_file=${slapd_lock_file:-/run/lock/subsys/$prog-slapd} update_lock_file=${update_lock_file:-/run/lock/subsys/$prog-bdii} else slapd_lock_file=${slapd_lock_file:-/run/lock/$prog-slapd} update_lock_file=${update_lock_file:-/run/lock/$prog-bdii} fi else slapd_lock_file=$HOME/$prog-slapd update_lock_file=$HOME/$prog-bdii fi infosys_ldap_run_dir=${CONFIG_infosys_ldap_run_dir:-/run/arc/infosys} BDII_CONF=${CONFIG_bdii_conf:-${infosys_ldap_run_dir}/bdii.conf} start () { if [ -r "${slapd_lock_file}" ] || [ -r "${update_lock_file}" ]; then result=$($0 status) if [ $? -gt 0 ]; then echo ${result} 1>&2 RETVAL=1 else log_success_msg "$prog already started" RETVAL=0 fi return ${RETVAL} fi ${ARC_LOCATION}/@pkgdatasubdir@/create-slapd-config if [ ! $? = 0 ]; then log_failure_msg "Failed to create configuration for $prog slapd" exit 1 fi ${ARC_LOCATION}/@pkgdatasubdir@/create-bdii-config if [ ! $? = 0 ]; then log_failure_msg "Failed to create configuration for $prog bdii" exit 1 fi # path to generated helper scripts is hardcoded (in both systemd unit and sysV scripts) /bin/sh ${infosys_ldap_run_dir}/bdii-slapd.cmd touch ${slapd_lock_file} /bin/sh ${infosys_ldap_run_dir}/bdii-slapd-post.cmd if ! [ -r "${slapd_pid_file}" ]; then log_failure_msg "$prog slapd failed to start" rm -f ${slapd_lock_file} RETVAL=1 return ${RETVAL} fi /bin/sh ${infosys_ldap_run_dir}/bdii-update.cmd touch ${update_lock_file} /bin/sh ${infosys_ldap_run_dir}/bdii-update-post.cmd if [ ! -r ${update_pid_file} ]; then log_failure_msg "$prog bdii failed to start" rm -f ${update_lock_file} RETVAL=1 return ${RETVAL} fi log_success_msg "$prog started" } stop () { if [ ! -r "${slapd_lock_file}" ] && [ ! -r "${update_lock_file}" ]; then log_success_msg "$prog already stopped" RETVAL=0 return ${RETVAL} fi if [ -r "${update_pid_file}" ]; then update_pid=$(cat ${update_pid_file}) ps ${update_pid} >/dev/null 2>&1 if [ ! $? = 0 ]; then log_failure_msg "$prog bdii pid file exists but the process died" RETVAL=1 return ${RETVAL} fi fi killall -u ${bdii_user} -15 arc-nordugrid-bdii-ldif 2>/dev/null if [ -n "${update_pid}" ]; then kill -15 ${update_pid} 2>/dev/null ps ${update_pid} >/dev/null 2>&1 if [ $? = 0 ]; then sleep 2 ps ${update_pid} >/dev/null 2>&1 if [ $? = 0 ]; then kill -9 ${update_pid} 2>/dev/null sleep 2 ps ${update_pid} >/dev/null 2>&1 if [ $? = 0 ]; then log_failure_msg "Could not kill $prog bdii with pid ${update_pid}" RETVAL=1 return ${RETVAL} fi fi fi fi # Clean up rm -f ${infosys_ldap_run_dir}/arc-glue-bdii-ldif rm -f ${update_pid_file} rm -f ${update_lock_file} log_success_msg "$prog bdii stopped" if [ -r "${slapd_pid_file}" ]; then slapd_pid=$(cat ${slapd_pid_file}) ps ${slapd_pid} >/dev/null 2>&1 if [ ! $? = 0 ]; then log_failure_msg "$prog slapd pid file exists but the process died" RETVAL=1 return ${RETVAL} fi fi if [ -n "${slapd_pid}" ]; then kill -15 ${slapd_pid} 2>/dev/null ps ${slapd_pid} >/dev/null 2>&1 if [ $? = 0 ]; then sleep 2 ps ${slapd_pid} >/dev/null 2>&1 if [ $? = 0 ]; then kill -9 ${slapd_pid} 2>/dev/null sleep 2 ps ${slapd_pid} >/dev/null 2>&1 if [ $? = 0 ]; then log_failure_msg "Could not stop $prog slapd with pid: $slapd_pid" RETVAL=1 return ${RETVAL} fi fi fi fi rm -f ${slapd_pid_file} rm -f ${slapd_lock_file} log_success_msg "$prog slapd stopped" return ${RETVAL} } status () { if [ ! -r "${slapd_lock_file}" ] && [ ! -r "${update_lock_file}" ]; then log_success_msg "$prog is stopped" RETVAL=3 return ${RETVAL} fi if [ -r ${slapd_pid_file} ]; then ps $(cat ${slapd_pid_file}) >/dev/null 2>&1 if [ ! $? = 0 ]; then log_failure_msg "ARIS slapd pid file exists but the process died" RETVAL=1 fi else log_failure_msg "ARIS slapd process has no pid file" RETVAL=2 fi if [ -r ${update_pid_file} ]; then ps $(cat ${update_pid_file}) >/dev/null 2>&1 if [ ! $? = 0 ]; then log_failure_msg "ARIS bdii pid file exists but the process died" RETVAL=1 fi else log_failure_msg "ARIS BDII process has no pid file" RETVAL=2 fi if [ ${RETVAL} = 0 ]; then log_success_msg "$prog is running" fi return ${RETVAL} } case "$1" in start) start ;; stop) stop ;; restart | force-reload) stop # avoid race sleep 3 start ;; reload) ;; status) status ;; condrestart | try-restart) if [ -r ${slapd_lock_file} ] || [ -r ${update_lock_file} ]; then stop # avoid race sleep 3 start fi ;; *) echo "Usage: $0 {start|stop|restart|force-reload|reload|condrestart|try-restart|status}" exit 1 ;; esac exit $RETVAL nordugrid-arc-7.1.1/src/services/ldap-infosys/PaxHeaders/create-slapd-config.in0000644000000000000000000000013215067751327024534 xustar0030 mtime=1759498967.775071557 30 atime=1759498967.874493787 30 ctime=1759499030.579578625 nordugrid-arc-7.1.1/src/services/ldap-infosys/create-slapd-config.in0000644000175000002070000003435715067751327026452 0ustar00mockbuildmock00000000000000#!/bin/bash # Define logging functions send_systemd_notify() { # return if no systemd-notify found type systemd-notify >/dev/null 2>&1 || return systemd-notify "$@" } log_failure_msg() { send_systemd_notify --status "Error: $@" echo $@ } # Create slapd config for the NorduGrid/ARC information system ARC_LOCATION=${ARC_LOCATION:-@prefix@} if [ ! -d "$ARC_LOCATION" ]; then log_failure_msg "ARC_LOCATION ($ARC_LOCATION) not found" exit 1 fi # ARC_CONFIG if [ "x$ARC_CONFIG" = "x" ]; then if [ -r $ARC_LOCATION/etc/arc.conf ]; then ARC_CONFIG=$ARC_LOCATION/etc/arc.conf elif [ -r /etc/arc.conf ]; then ARC_CONFIG=/etc/arc.conf fi if [ ! -r "$ARC_CONFIG" ]; then log_failure_msg "arc.conf is missing at path: $ARC_CONFIG or no ARC_LOCATION is set" log_failure_msg "If this file is in a non-standard place it can be set" log_failure_msg " with the ARC_CONFIG environment variable" exit 1 fi fi # Define runtime config location for infosys LDAP prefix=@prefix@ runtime_config_dir=/run/arc if [ ! -d "$runtime_config_dir" ]; then mkdir -p "$runtime_config_dir" fi export ARC_RUNCONFIG="$runtime_config_dir/arc-infosys-ldap.conf" unset runtime_config_dir unset prefix # Define arcconfig-parser and dump running configuration arcconfig_parser=${ARC_LOCATION}/@pkglibexecsubdir@/arcconfig-parser ${arcconfig_parser} -c ${ARC_CONFIG} --save -r ${ARC_RUNCONFIG} # Check for infosys block if ! ${arcconfig_parser} --load -r ${ARC_RUNCONFIG} -b infosys; then log_failure_msg "Missing [infosys] configuration block" exit 1 fi # Check for infosys/ldap block if ! ${arcconfig_parser} --load -r ${ARC_RUNCONFIG} -b infosys/ldap; then log_failure_msg "Missing [infosys/ldap] configuration block" exit 1 fi eval $(${arcconfig_parser} --load -r ${ARC_RUNCONFIG} -b infosys/ldap -b infosys -b common -e bash) bdii_user=$CONFIG_user if [ -z "$bdii_user" ]; then # Get ldap user from passwd bdii_user=`getent passwd ldap openldap | sed 's/:.*//;q'` if [ -z "$bdii_user" ]; then echo "Warning, could not find ldap or openldap user" echo "resorting to using the root user" bdii_user=root fi fi # These values may be set in arc.conf, otherwise use sensible defaults slapd_loglevel=${CONFIG_slapd_loglevel:-0} slapd_hostnamebind=${CONFIG_slapd_hostnamebind:-"*"} slapd_port=${CONFIG_port:-2135} ldap_schema_dir=${CONFIG_ldap_schema_dir} threads=${CONFIG_threads:-32} timelimit=${CONFIG_timelimit:-2400} bdii_location=${CONFIG_bdii_location:-/usr} infosys_ldap_run_dir=${CONFIG_infosys_ldap_run_dir:-/run/arc/infosys} mkdir -p ${infosys_ldap_run_dir} chown ${bdii_user}: ${infosys_ldap_run_dir} bdii_var_dir=${CONFIG_bdii_var_dir:-/var/lib/arc/bdii} bdii_run_dir=${CONFIG_bdii_run_dir:-/run/arc/bdii} # PIDFile location handling slapd_pid_file=$( readlink -m ${bdii_run_dir}/db/slapd.pid ) # forced pidfile location instead of arc.conf-based (if FORCE_ARC_RUNDIR is set) if [ -n "$FORCE_ARC_RUNDIR" ]; then pid_dir="${FORCE_ARC_RUNDIR}/bdii/db" mkdir -p "$pid_dir" chown -R ${bdii_user}: "$pid_dir" pid_file="$( readlink -m ${pid_dir}/slapd.pid )" if [ "x${slapd_pid_file}" != "x${pid_file}" ]; then custom_pid_file="${slapd_pid_file}" rm -f "$custom_pid_file" slapd_pid_file="${pid_file}" fi unset pid_dir pid_file fi rm -f "${slapd_pid_file}" bdii_db_config=${CONFIG_bdii_db_config:-"/etc/bdii/DB_CONFIG"} # Check for existance of core ldap schema coreschema=$(find /etc/openldap /etc/ldap ${ldap_schema_dir} -name core.schema \ -printf "%h/%f\n" 2>/dev/null) if [ "x" = "x$coreschema" ]; then log_failure_msg "Could not find ldap core schema file" exit 1 fi # Check for existance of Glue schemas. glueschemadir=$(find /etc/openldap /etc/ldap ${ldap_schema_dir} -name Glue-CORE.schema \ -printf "%h\n" 2>/dev/null) if [ "x" = "x$glueschemadir" ]; then log_failure_msg "Could not find glue schema directory under /etc" exit 1 fi # Check for existence of a system ldap, this command will be used by bdii slapd_cmd= if [ "x" = "x$CONFIG_slapd" ]; then O_IFS=$IFS IFS=: for dir in $PATH; do if [ -x "$dir/slapd" ]; then slapd_cmd="$dir/slapd" break fi done IFS=$O_IFS else slapd_cmd=$CONFIG_slapd fi if [ -z "$slapd_cmd" ] || [ ! -x "$slapd_cmd" ]; then log_failure_msg "Could not find ldap server binary, usually /usr/sbin/slapd" exit 1 fi slapd_ver=$($slapd_cmd -VV 2>&1 | head -1 | sed 's!.*slapd \([0-9.]*\).*!\1!') slapd_ver_maj=$(echo $slapd_ver | cut -d. -f1) slapd_ver_min=$(echo $slapd_ver | cut -d. -f2) if [ $slapd_ver_maj -gt 2 ] || ( [ $slapd_ver_maj -eq 2 ] && [ $slapd_ver_min -ge 5 ] ) ; then bdii_database=mdb else bdii_database=hdb fi bdii_database=${CONFIG_bdii_database:-$bdii_database} has_cachesize= db_grid_maxsize= if [ $bdii_database = 'mdb' ] ; then has_cachesize='# ' db_grid_maxsize="maxsize 104857600" fi find_ldap_database_module() { # First try to find a separate module ldapdir=$(find /usr/lib64/openldap /usr/lib/openldap /usr/lib64/ldap \ /usr/lib/ldap -name "back_${database}.la" -printf ":%h/" 2>/dev/null) if [ -n "$ldapdir" ]; then # Separate module found ldapmodule="moduleload back_${database}" grep -E -q "${ldapdir}(:|$)" <<< ${ldapdirs} || \ ldapdirs=${ldapdirs}${ldapdir} else # Separate module not found - check for preloaded module ldapmodule= if [ $(grep -Ec "${database}_db_init|${database}_back_db_init" "$slapd_cmd") -eq 0 ]; then # Module not found database= fi fi } find_ldap_overlay_module() { # Try to find a separate module ldapdir=$(find /usr/lib64/openldap /usr/lib/openldap /usr/lib64/ldap \ /usr/lib/ldap -name "${overlay}.la" -printf ":%h/" 2>/dev/null) if [ -n "$ldapdir" ]; then # Separate module found ldapmodule="moduleload ${overlay}" grep -E -q "${ldapdir}(:|$)" <<< ${ldapdirs} || \ ldapdirs=${ldapdirs}${ldapdir} else # Module not found ldapmodule= overlay= fi } ldapdirs= database=${bdii_database} find_ldap_database_module if [ -z "${database}" ]; then log_failure_msg "Could not find ldap ${bdii_database} database module" exit 1 fi moduleload_bdii="${ldapmodule}" database=relay find_ldap_database_module if [ -z "${database}" ]; then echo "Could not find ldap relay database module, top-bdii integration is disabled." fi moduleload_relay="${ldapmodule}" overlay=rwm find_ldap_overlay_module if [ -z "$overlay" ]; then echo "Could not find ldap rwm overlay module, top-bdii integration is disabled." fi moduleload_rwm="${ldapmodule}" if ${arcconfig_parser} --load -r ${ARC_RUNCONFIG} -b infosys/index; then database=shell find_ldap_database_module if [ -z "${database}" ]; then log_failure_msg "Could not find ldap shell database module" exit 1 fi moduleload_shell="${ldapmodule}" else moduleload_shell= fi ldapdirs=`sed 's/^://' <<< $ldapdirs` #ldapdirs=`sed 's/:$//' <<< $ldapdirs` if [ -n "$ldapdirs" ]; then modulepath="modulepath $ldapdirs" else modulepath= fi for i in "/etc/bdii/BDII.schema" "${bdii_location}/etc/BDII.schema"; do if [ -r $i ]; then bdii_schema="include $i" break fi done bdii_slapd_conf=${infosys_ldap_run_dir}/bdii-slapd.conf rm -f $bdii_slapd_conf # Put SLAPD start helpers to known directory helpers_dir=$infosys_ldap_run_dir if [ -n "$FORCE_ARC_RUNDIR" ]; then helpers_dir="${FORCE_ARC_RUNDIR}/infosys" mkdir -p "${helpers_dir}" fi bdii_slapd_cmd=${helpers_dir}/bdii-slapd.cmd rm -f $bdii_slapd_cmd bdii_slapd_post_cmd=${helpers_dir}/bdii-slapd-post.cmd rm -f $bdii_slapd_post_cmd # Ensure the configuration file is not world-readable, # as it contains the slapd database password (umask 077; > $bdii_slapd_conf) pass=`/usr/bin/mkpasswd -s 0 2> /dev/null` || pass=$RANDOM$RANDOM cat <<-EOF >> $bdii_slapd_conf # This file was automatically generated by $0." # Do not modify. include ${coreschema} ${bdii_schema} #glue schemas include ${glueschemadir}/Glue-CORE.schema include ${glueschemadir}/Glue-CE.schema include ${glueschemadir}/Glue-CESEBind.schema include ${glueschemadir}/Glue-MDS.schema #glue2 schema include ${glueschemadir}/GLUE20.schema #nordugrid specific schemas include ${ARC_LOCATION}/@pkgdatasubdir@/ldap-schema/nordugrid.schema $modulepath $moduleload_bdii $moduleload_relay $moduleload_rwm $moduleload_shell allow bind_v2 pidfile $slapd_pid_file argsfile $bdii_run_dir/db/slapd.args loglevel $slapd_loglevel threads $threads idletimeout 120 sizelimit unlimited timelimit $timelimit EOF if [ -n "${moduleload_rwm}" ]; then admindomain=$(${arcconfig_parser} --load -r ${ARC_RUNCONFIG} -b infosys/glue2 -o admindomain_name) admindomain="urn:ad:${admindomain:-UNDEFINEDVALUE}" cat <<-EOF >> $bdii_slapd_conf # Relay to allow top-bdii to parse info as the CE was a site-bdii database relay suffix "GLUE2GroupID=resource,o=glue" overlay rwm rwm-rewriteEngine on rwm-rewriteContext default rwm-rewriteRule "GLUE2GroupID=resource,o=glue" "GLUE2GroupID=services,o=glue" ":" rwm-rewriteContext searchFilter rwm-rewriteContext searchEntryDN rwm-rewriteContext searchAttrDN rwm-rewriteContext matchedDN database relay suffix "GLUE2GroupID=resource,GLUE2DomainID=$admindomain,o=glue" overlay rwm rwm-rewriteEngine on rwm-rewriteContext default rwm-rewriteRule "GLUE2GroupID=resource,GLUE2DomainID=$admindomain,o=glue" "GLUE2GroupID=services,o=glue" ":" rwm-rewriteContext searchFilter rwm-rewriteContext searchEntryDN rwm-rewriteRule "(.*[^ ],)?[ ]?GLUE2GroupID=services,o=glue" "\$1GLUE2GroupID=services,GLUE2DomainID=$admindomain,o=glue" ":" rwm-rewriteContext searchAttrDN rwm-rewriteContext matchedDN database relay suffix "GLUE2GroupID=services,GLUE2DomainID=$admindomain,o=glue" overlay rwm suffixmassage "GLUE2GroupID=services,o=glue" EOF fi cat <<-EOF >> $bdii_slapd_conf # ${bdii_database} database definitions for o=grid database ${bdii_database} ${has_cachesize}cachesize 150000 dbnosync suffix "o=grid" checkpoint 131072 60 rootdn "o=grid" rootpw $pass directory $bdii_var_dir/db/arc ${db_grid_maxsize} # ${bdii_database} database definitions for o=glue database ${bdii_database} ${has_cachesize}cachesize 150000 dbnosync suffix "o=glue" checkpoint 131072 60 rootdn "o=glue" rootpw $pass directory $bdii_var_dir/db/glue2 # ${bdii_database} database definitions for o=infosys database ${bdii_database} ${has_cachesize}cachesize 60 dbnosync suffix "o=infosys" checkpoint 131072 60 rootdn "o=infosys" rootpw $pass directory $bdii_var_dir/db/stats EOF chown $bdii_user: $bdii_slapd_conf [ -x /sbin/restorecon ] && /sbin/restorecon $bdii_slapd_conf # Write slapd starting command if [ "x$slapd_hostnamebind" = "x*" ]; then echo exec ${slapd_cmd} -f ${bdii_slapd_conf} -h \"ldap://${slapd_hostnamebind}:${slapd_port}\" -u ${bdii_user} > ${bdii_slapd_cmd} else echo exec ${slapd_cmd} -f ${bdii_slapd_conf} -h \"ldap://localhost:${slapd_port} ldap://${slapd_hostnamebind}:${slapd_port}\" -u ${bdii_user} > ${bdii_slapd_cmd} fi chmod +x ${bdii_slapd_cmd} # Write post-exec script to check slapd is up and running cat <<-EOF > ${bdii_slapd_post_cmd} iterlimit=30 while [ \$iterlimit -ge 0 ] && ! [ -r "${slapd_pid_file}" ]; do sleep 1 iterlimit=\$(expr \$iterlimit - 1) done EOF # copy forced pidfile to custom arc.conf pidfile (if forced pid was requested) if [ -n "${custom_pid_file}" ]; then echo "mkdir -p \"${custom_pid_file%/*}\"" >> ${bdii_slapd_post_cmd} echo "cp -a \"${slapd_pid_file}\" \"${custom_pid_file}\"" >> ${bdii_slapd_post_cmd} fi # Initialize the database directories mkdir -p $bdii_run_dir/db mkdir -p $bdii_run_dir/archive chown $bdii_user: $bdii_run_dir chown $bdii_user: $bdii_run_dir/db chown $bdii_user: $bdii_run_dir/archive [ -x /sbin/restorecon ] && /sbin/restorecon -R $bdii_run_dir/db [ -x /sbin/restorecon ] && /sbin/restorecon -R $bdii_run_dir/archive mkdir -p $bdii_var_dir/archive mkdir -p $bdii_var_dir/db/arc mkdir -p $bdii_var_dir/db/glue2 mkdir -p $bdii_var_dir/db/stats rm -f $bdii_var_dir/db/arc/* 2>/dev/null rm -f $bdii_var_dir/db/glue2/* 2>/dev/null rm -f $bdii_var_dir/db/stats/* 2>/dev/null chown $bdii_user: $bdii_var_dir/db chown $bdii_user: $bdii_var_dir/archive chown $bdii_user: $bdii_var_dir/db/arc chown $bdii_user: $bdii_var_dir/db/glue2 chown $bdii_user: $bdii_var_dir/db/stats [ -x /sbin/restorecon ] && /sbin/restorecon -R $bdii_var_dir/db [ -x /sbin/restorecon ] && /sbin/restorecon -R $bdii_var_dir/archive # Workaround for BDII DB_CONFIG cachesize bigger than actual memory set_cachesize_line=`egrep '^[[:space:]]*'set_cachesize ${bdii_db_config}` if [ -n "${set_cachesize_line}" ]; then if [ -e /proc/meminfo ]; then memsize=$(grep MemFree /proc/meminfo | awk '{printf "%.0f", $2 * 1024}') default_set_cachesize=$(echo ${set_cachesize_line} | awk '{print $2 * 1073741824 + $3}') half_memsize=$(( ${memsize} / 2 )) if [ $default_set_cachesize -ge $half_memsize ]; then echo "The system does not fulfill BDII optimal memory requirements" echo "ARC will try to fix it anyway..." new_set_cachesize=$(( $memsize / 16 )) TEMPBDIIDBCONFIG=`mktemp -q /tmp/DB_CONFIG.XXXXXX` chmod 644 $TEMPBDIIDBCONFIG sed "s/^set_cachesize.*$/set_cachesize 0 $new_set_cachesize 1/" ${bdii_db_config} > $TEMPBDIIDBCONFIG bdii_db_config=${TEMPBDIIDBCONFIG} echo "DB_CONFIG set_cachesize is now: 0 $new_set_cachesize 1" fi else echo "/proc/meminfo does not exist. Cannot apply BDII memory workaround" echo "slapd might fail to start" fi fi # End of BDII set_cachesize workaround # copy BDII DB_CONFIG in ARC locations cp ${bdii_db_config} ${bdii_var_dir}/db/arc/DB_CONFIG cp ${bdii_db_config} ${bdii_var_dir}/db/glue2/DB_CONFIG cp ${bdii_db_config} ${bdii_var_dir}/db/stats/DB_CONFIG chown $bdii_user: ${bdii_var_dir}/db/arc/DB_CONFIG chown $bdii_user: ${bdii_var_dir}/db/glue2/DB_CONFIG chown $bdii_user: ${bdii_var_dir}/db/stats/DB_CONFIG # if the BDII low memory workaround has been applied, remove the temp file if [ -r $TEMPBDIIDBCONFIG ]; then rm -f $TEMPBDIIDBCONFIG fi nordugrid-arc-7.1.1/src/PaxHeaders/external0000644000000000000000000000013215067751420015701 xustar0030 mtime=1759499024.762358202 30 atime=1759499034.766510215 30 ctime=1759499024.762358202 nordugrid-arc-7.1.1/src/external/0000755000175000002070000000000015067751420017660 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/external/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327020020 xustar0030 mtime=1759498967.686490931 30 atime=1759498967.829493104 30 ctime=1759499024.759333073 nordugrid-arc-7.1.1/src/external/Makefile.am0000644000175000002070000000004515067751327021721 0ustar00mockbuildmock00000000000000SUBDIRS = cJSON DIST_SUBDIRS = cJSON nordugrid-arc-7.1.1/src/external/PaxHeaders/Makefile.in0000644000000000000000000000013215067751347020033 xustar0030 mtime=1759498983.840120163 30 atime=1759499015.193212798 30 ctime=1759499024.760334721 nordugrid-arc-7.1.1/src/external/Makefile.in0000644000175000002070000006064515067751347021750 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/external ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir distdir-am am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ SUBDIRS = cJSON DIST_SUBDIRS = cJSON all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/external/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/external/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ check-am clean clean-generic clean-libtool cscopelist-am ctags \ ctags-am distclean distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags tags-am uninstall uninstall-am .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/external/PaxHeaders/cJSON0000644000000000000000000000013215067751420016615 xustar0030 mtime=1759499024.791546288 30 atime=1759499034.766510215 30 ctime=1759499024.791546288 nordugrid-arc-7.1.1/src/external/cJSON/0000755000175000002070000000000015067751420020574 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/external/cJSON/PaxHeaders/Makefile.am0000644000000000000000000000013115067751327020733 xustar0029 mtime=1759498967.68772918 30 atime=1759498967.829493104 30 ctime=1759499024.788119748 nordugrid-arc-7.1.1/src/external/cJSON/Makefile.am0000644000175000002070000000014115067751327022632 0ustar00mockbuildmock00000000000000noinst_LTLIBRARIES = libcjson.la libcjson_la_SOURCES = cJSON.c cJSON.h libcjson_la_LIBADD = -lm nordugrid-arc-7.1.1/src/external/cJSON/PaxHeaders/Makefile.in0000644000000000000000000000013215067751347020747 xustar0030 mtime=1759498983.882066731 30 atime=1759499015.213213102 30 ctime=1759499024.789195013 nordugrid-arc-7.1.1/src/external/cJSON/Makefile.in0000644000175000002070000005744115067751347022664 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/external/cJSON ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libcjson_la_DEPENDENCIES = am_libcjson_la_OBJECTS = cJSON.lo libcjson_la_OBJECTS = $(am_libcjson_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = ./$(DEPDIR)/cJSON.Plo am__mv = mv -f COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(libcjson_la_SOURCES) DIST_SOURCES = $(libcjson_la_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ noinst_LTLIBRARIES = libcjson.la libcjson_la_SOURCES = cJSON.c cJSON.h libcjson_la_LIBADD = -lm all: all-am .SUFFIXES: .SUFFIXES: .c .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/external/cJSON/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/external/cJSON/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libcjson.la: $(libcjson_la_OBJECTS) $(libcjson_la_DEPENDENCIES) $(EXTRA_libcjson_la_DEPENDENCIES) $(AM_V_CCLD)$(LINK) $(libcjson_la_OBJECTS) $(libcjson_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cJSON.Plo@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .c.o: @am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $< .c.obj: @am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .c.lo: @am__fastdepCC_TRUE@ $(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $< mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ mostlyclean-am distclean: distclean-am -rm -f ./$(DEPDIR)/cJSON.Plo -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/cJSON.Plo -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ clean-generic clean-libtool clean-noinstLTLIBRARIES \ cscopelist-am ctags ctags-am distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/external/cJSON/PaxHeaders/cJSON.c0000644000000000000000000000013115067751327017757 xustar0029 mtime=1759498967.68772918 30 atime=1759498967.829493104 30 ctime=1759499024.791546288 nordugrid-arc-7.1.1/src/external/cJSON/cJSON.c0000644000175000002070000023172015067751327021667 0ustar00mockbuildmock00000000000000/* Copyright (c) 2009-2017 Dave Gamble and cJSON contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* cJSON */ /* JSON parser in C. */ /* disable warnings about old C89 functions in MSVC */ #if !defined(_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) #define _CRT_SECURE_NO_DEPRECATE #endif #ifdef __GNUC__ #pragma GCC visibility push(default) #endif #if defined(_MSC_VER) #pragma warning (push) /* disable warning about single line comments in system headers */ #pragma warning (disable : 4001) #endif #include #include #include #include #include #include #include #ifdef ENABLE_LOCALES #include #endif #if defined(_MSC_VER) #pragma warning (pop) #endif #ifdef __GNUC__ #pragma GCC visibility pop #endif #include "cJSON.h" /* define our own boolean type */ #ifdef true #undef true #endif #define true ((cJSON_bool)1) #ifdef false #undef false #endif #define false ((cJSON_bool)0) /* define isnan and isinf for ANSI C, if in C99 or above, isnan and isinf has been defined in math.h */ #ifndef isinf #define isinf(d) (isnan((d - d)) && !isnan(d)) #endif #ifndef isnan #define isnan(d) (d != d) #endif #ifndef NAN #ifdef _WIN32 #define NAN sqrt(-1.0) #else #define NAN 0.0/0.0 #endif #endif typedef struct { const unsigned char *json; size_t position; } error; static error global_error = { NULL, 0 }; CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void) { return (const char*) (global_error.json + global_error.position); } CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON * const item) { if (!cJSON_IsString(item)) { return NULL; } return item->valuestring; } CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON * const item) { if (!cJSON_IsNumber(item)) { return (double) NAN; } return item->valuedouble; } /* This is a safeguard to prevent copy-pasters from using incompatible C and header files */ #if (CJSON_VERSION_MAJOR != 1) || (CJSON_VERSION_MINOR != 7) || (CJSON_VERSION_PATCH != 18) #error cJSON.h and cJSON.c have different versions. Make sure that both have the same. #endif CJSON_PUBLIC(const char*) cJSON_Version(void) { static char version[15]; sprintf(version, "%i.%i.%i", CJSON_VERSION_MAJOR, CJSON_VERSION_MINOR, CJSON_VERSION_PATCH); return version; } /* Case insensitive string comparison, doesn't consider two NULL pointers equal though */ static int case_insensitive_strcmp(const unsigned char *string1, const unsigned char *string2) { if ((string1 == NULL) || (string2 == NULL)) { return 1; } if (string1 == string2) { return 0; } for(; tolower(*string1) == tolower(*string2); (void)string1++, string2++) { if (*string1 == '\0') { return 0; } } return tolower(*string1) - tolower(*string2); } typedef struct internal_hooks { void *(CJSON_CDECL *allocate)(size_t size); void (CJSON_CDECL *deallocate)(void *pointer); void *(CJSON_CDECL *reallocate)(void *pointer, size_t size); } internal_hooks; #if defined(_MSC_VER) /* work around MSVC error C2322: '...' address of dllimport '...' is not static */ static void * CJSON_CDECL internal_malloc(size_t size) { return malloc(size); } static void CJSON_CDECL internal_free(void *pointer) { free(pointer); } static void * CJSON_CDECL internal_realloc(void *pointer, size_t size) { return realloc(pointer, size); } #else #define internal_malloc malloc #define internal_free free #define internal_realloc realloc #endif /* strlen of character literals resolved at compile time */ #define static_strlen(string_literal) (sizeof(string_literal) - sizeof("")) static internal_hooks global_hooks = { internal_malloc, internal_free, internal_realloc }; static unsigned char* cJSON_strdup(const unsigned char* string, const internal_hooks * const hooks) { size_t length = 0; unsigned char *copy = NULL; if (string == NULL) { return NULL; } length = strlen((const char*)string) + sizeof(""); copy = (unsigned char*)hooks->allocate(length); if (copy == NULL) { return NULL; } memcpy(copy, string, length); return copy; } CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks) { if (hooks == NULL) { /* Reset hooks */ global_hooks.allocate = malloc; global_hooks.deallocate = free; global_hooks.reallocate = realloc; return; } global_hooks.allocate = malloc; if (hooks->malloc_fn != NULL) { global_hooks.allocate = hooks->malloc_fn; } global_hooks.deallocate = free; if (hooks->free_fn != NULL) { global_hooks.deallocate = hooks->free_fn; } /* use realloc only if both free and malloc are used */ global_hooks.reallocate = NULL; if ((global_hooks.allocate == malloc) && (global_hooks.deallocate == free)) { global_hooks.reallocate = realloc; } } /* Internal constructor. */ static cJSON *cJSON_New_Item(const internal_hooks * const hooks) { cJSON* node = (cJSON*)hooks->allocate(sizeof(cJSON)); if (node) { memset(node, '\0', sizeof(cJSON)); } return node; } /* Delete a cJSON structure. */ CJSON_PUBLIC(void) cJSON_Delete(cJSON *item) { cJSON *next = NULL; while (item != NULL) { next = item->next; if (!(item->type & cJSON_IsReference) && (item->child != NULL)) { cJSON_Delete(item->child); } if (!(item->type & cJSON_IsReference) && (item->valuestring != NULL)) { global_hooks.deallocate(item->valuestring); item->valuestring = NULL; } if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) { global_hooks.deallocate(item->string); item->string = NULL; } global_hooks.deallocate(item); item = next; } } /* get the decimal point character of the current locale */ static unsigned char get_decimal_point(void) { #ifdef ENABLE_LOCALES struct lconv *lconv = localeconv(); return (unsigned char) lconv->decimal_point[0]; #else return '.'; #endif } typedef struct { const unsigned char *content; size_t length; size_t offset; size_t depth; /* How deeply nested (in arrays/objects) is the input at the current offset. */ internal_hooks hooks; } parse_buffer; /* check if the given size is left to read in a given parse buffer (starting with 1) */ #define can_read(buffer, size) ((buffer != NULL) && (((buffer)->offset + size) <= (buffer)->length)) /* check if the buffer can be accessed at the given index (starting with 0) */ #define can_access_at_index(buffer, index) ((buffer != NULL) && (((buffer)->offset + index) < (buffer)->length)) #define cannot_access_at_index(buffer, index) (!can_access_at_index(buffer, index)) /* get a pointer to the buffer at the position */ #define buffer_at_offset(buffer) ((buffer)->content + (buffer)->offset) /* Parse the input text to generate a number, and populate the result into item. */ static cJSON_bool parse_number(cJSON * const item, parse_buffer * const input_buffer) { double number = 0; unsigned char *after_end = NULL; unsigned char number_c_string[64]; unsigned char decimal_point = get_decimal_point(); size_t i = 0; if ((input_buffer == NULL) || (input_buffer->content == NULL)) { return false; } /* copy the number into a temporary buffer and replace '.' with the decimal point * of the current locale (for strtod) * This also takes care of '\0' not necessarily being available for marking the end of the input */ for (i = 0; (i < (sizeof(number_c_string) - 1)) && can_access_at_index(input_buffer, i); i++) { switch (buffer_at_offset(input_buffer)[i]) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '+': case '-': case 'e': case 'E': number_c_string[i] = buffer_at_offset(input_buffer)[i]; break; case '.': number_c_string[i] = decimal_point; break; default: goto loop_end; } } loop_end: number_c_string[i] = '\0'; number = strtod((const char*)number_c_string, (char**)&after_end); if (number_c_string == after_end) { return false; /* parse_error */ } item->valuedouble = number; /* use saturation in case of overflow */ if (number >= INT_MAX) { item->valueint = INT_MAX; } else if (number <= (double)INT_MIN) { item->valueint = INT_MIN; } else { item->valueint = (int)number; } item->type = cJSON_Number; input_buffer->offset += (size_t)(after_end - number_c_string); return true; } /* don't ask me, but the original cJSON_SetNumberValue returns an integer or double */ CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number) { if (number >= INT_MAX) { object->valueint = INT_MAX; } else if (number <= (double)INT_MIN) { object->valueint = INT_MIN; } else { object->valueint = (int)number; } return object->valuedouble = number; } /* Note: when passing a NULL valuestring, cJSON_SetValuestring treats this as an error and return NULL */ CJSON_PUBLIC(char*) cJSON_SetValuestring(cJSON *object, const char *valuestring) { char *copy = NULL; /* if object's type is not cJSON_String or is cJSON_IsReference, it should not set valuestring */ if ((object == NULL) || !(object->type & cJSON_String) || (object->type & cJSON_IsReference)) { return NULL; } /* return NULL if the object is corrupted or valuestring is NULL */ if (object->valuestring == NULL || valuestring == NULL) { return NULL; } if (strlen(valuestring) <= strlen(object->valuestring)) { strcpy(object->valuestring, valuestring); return object->valuestring; } copy = (char*) cJSON_strdup((const unsigned char*)valuestring, &global_hooks); if (copy == NULL) { return NULL; } if (object->valuestring != NULL) { cJSON_free(object->valuestring); } object->valuestring = copy; return copy; } typedef struct { unsigned char *buffer; size_t length; size_t offset; size_t depth; /* current nesting depth (for formatted printing) */ cJSON_bool noalloc; cJSON_bool format; /* is this print a formatted print */ internal_hooks hooks; } printbuffer; /* realloc printbuffer if necessary to have at least "needed" bytes more */ static unsigned char* ensure(printbuffer * const p, size_t needed) { unsigned char *newbuffer = NULL; size_t newsize = 0; if ((p == NULL) || (p->buffer == NULL)) { return NULL; } if ((p->length > 0) && (p->offset >= p->length)) { /* make sure that offset is valid */ return NULL; } if (needed > INT_MAX) { /* sizes bigger than INT_MAX are currently not supported */ return NULL; } needed += p->offset + 1; if (needed <= p->length) { return p->buffer + p->offset; } if (p->noalloc) { return NULL; } /* calculate new buffer size */ if (needed > (INT_MAX / 2)) { /* overflow of int, use INT_MAX if possible */ if (needed <= INT_MAX) { newsize = INT_MAX; } else { return NULL; } } else { newsize = needed * 2; } if (p->hooks.reallocate != NULL) { /* reallocate with realloc if available */ newbuffer = (unsigned char*)p->hooks.reallocate(p->buffer, newsize); if (newbuffer == NULL) { p->hooks.deallocate(p->buffer); p->length = 0; p->buffer = NULL; return NULL; } } else { /* otherwise reallocate manually */ newbuffer = (unsigned char*)p->hooks.allocate(newsize); if (!newbuffer) { p->hooks.deallocate(p->buffer); p->length = 0; p->buffer = NULL; return NULL; } memcpy(newbuffer, p->buffer, p->offset + 1); p->hooks.deallocate(p->buffer); } p->length = newsize; p->buffer = newbuffer; return newbuffer + p->offset; } /* calculate the new length of the string in a printbuffer and update the offset */ static void update_offset(printbuffer * const buffer) { const unsigned char *buffer_pointer = NULL; if ((buffer == NULL) || (buffer->buffer == NULL)) { return; } buffer_pointer = buffer->buffer + buffer->offset; buffer->offset += strlen((const char*)buffer_pointer); } /* securely comparison of floating-point variables */ static cJSON_bool compare_double(double a, double b) { double maxVal = fabs(a) > fabs(b) ? fabs(a) : fabs(b); return (fabs(a - b) <= maxVal * DBL_EPSILON); } /* Render the number nicely from the given item into a string. */ static cJSON_bool print_number(const cJSON * const item, printbuffer * const output_buffer) { unsigned char *output_pointer = NULL; double d = item->valuedouble; int length = 0; size_t i = 0; unsigned char number_buffer[26] = {0}; /* temporary buffer to print the number into */ unsigned char decimal_point = get_decimal_point(); double test = 0.0; if (output_buffer == NULL) { return false; } /* This checks for NaN and Infinity */ if (isnan(d) || isinf(d)) { length = sprintf((char*)number_buffer, "null"); } else if(d == (double)item->valueint) { length = sprintf((char*)number_buffer, "%d", item->valueint); } else { /* Try 15 decimal places of precision to avoid nonsignificant nonzero digits */ length = sprintf((char*)number_buffer, "%1.15g", d); /* Check whether the original double can be recovered */ if ((sscanf((char*)number_buffer, "%lg", &test) != 1) || !compare_double((double)test, d)) { /* If not, print with 17 decimal places of precision */ length = sprintf((char*)number_buffer, "%1.17g", d); } } /* sprintf failed or buffer overrun occurred */ if ((length < 0) || (length > (int)(sizeof(number_buffer) - 1))) { return false; } /* reserve appropriate space in the output */ output_pointer = ensure(output_buffer, (size_t)length + sizeof("")); if (output_pointer == NULL) { return false; } /* copy the printed number to the output and replace locale * dependent decimal point with '.' */ for (i = 0; i < ((size_t)length); i++) { if (number_buffer[i] == decimal_point) { output_pointer[i] = '.'; continue; } output_pointer[i] = number_buffer[i]; } output_pointer[i] = '\0'; output_buffer->offset += (size_t)length; return true; } /* parse 4 digit hexadecimal number */ static unsigned parse_hex4(const unsigned char * const input) { unsigned int h = 0; size_t i = 0; for (i = 0; i < 4; i++) { /* parse digit */ if ((input[i] >= '0') && (input[i] <= '9')) { h += (unsigned int) input[i] - '0'; } else if ((input[i] >= 'A') && (input[i] <= 'F')) { h += (unsigned int) 10 + input[i] - 'A'; } else if ((input[i] >= 'a') && (input[i] <= 'f')) { h += (unsigned int) 10 + input[i] - 'a'; } else /* invalid */ { return 0; } if (i < 3) { /* shift left to make place for the next nibble */ h = h << 4; } } return h; } /* converts a UTF-16 literal to UTF-8 * A literal can be one or two sequences of the form \uXXXX */ static unsigned char utf16_literal_to_utf8(const unsigned char * const input_pointer, const unsigned char * const input_end, unsigned char **output_pointer) { long unsigned int codepoint = 0; unsigned int first_code = 0; const unsigned char *first_sequence = input_pointer; unsigned char utf8_length = 0; unsigned char utf8_position = 0; unsigned char sequence_length = 0; unsigned char first_byte_mark = 0; if ((input_end - first_sequence) < 6) { /* input ends unexpectedly */ goto fail; } /* get the first utf16 sequence */ first_code = parse_hex4(first_sequence + 2); /* check that the code is valid */ if (((first_code >= 0xDC00) && (first_code <= 0xDFFF))) { goto fail; } /* UTF16 surrogate pair */ if ((first_code >= 0xD800) && (first_code <= 0xDBFF)) { const unsigned char *second_sequence = first_sequence + 6; unsigned int second_code = 0; sequence_length = 12; /* \uXXXX\uXXXX */ if ((input_end - second_sequence) < 6) { /* input ends unexpectedly */ goto fail; } if ((second_sequence[0] != '\\') || (second_sequence[1] != 'u')) { /* missing second half of the surrogate pair */ goto fail; } /* get the second utf16 sequence */ second_code = parse_hex4(second_sequence + 2); /* check that the code is valid */ if ((second_code < 0xDC00) || (second_code > 0xDFFF)) { /* invalid second half of the surrogate pair */ goto fail; } /* calculate the unicode codepoint from the surrogate pair */ codepoint = 0x10000 + (((first_code & 0x3FF) << 10) | (second_code & 0x3FF)); } else { sequence_length = 6; /* \uXXXX */ codepoint = first_code; } /* encode as UTF-8 * takes at maximum 4 bytes to encode: * 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */ if (codepoint < 0x80) { /* normal ascii, encoding 0xxxxxxx */ utf8_length = 1; } else if (codepoint < 0x800) { /* two bytes, encoding 110xxxxx 10xxxxxx */ utf8_length = 2; first_byte_mark = 0xC0; /* 11000000 */ } else if (codepoint < 0x10000) { /* three bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx */ utf8_length = 3; first_byte_mark = 0xE0; /* 11100000 */ } else if (codepoint <= 0x10FFFF) { /* four bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx 10xxxxxx */ utf8_length = 4; first_byte_mark = 0xF0; /* 11110000 */ } else { /* invalid unicode codepoint */ goto fail; } /* encode as utf8 */ for (utf8_position = (unsigned char)(utf8_length - 1); utf8_position > 0; utf8_position--) { /* 10xxxxxx */ (*output_pointer)[utf8_position] = (unsigned char)((codepoint | 0x80) & 0xBF); codepoint >>= 6; } /* encode first byte */ if (utf8_length > 1) { (*output_pointer)[0] = (unsigned char)((codepoint | first_byte_mark) & 0xFF); } else { (*output_pointer)[0] = (unsigned char)(codepoint & 0x7F); } *output_pointer += utf8_length; return sequence_length; fail: return 0; } /* Parse the input text into an unescaped cinput, and populate item. */ static cJSON_bool parse_string(cJSON * const item, parse_buffer * const input_buffer) { const unsigned char *input_pointer = buffer_at_offset(input_buffer) + 1; const unsigned char *input_end = buffer_at_offset(input_buffer) + 1; unsigned char *output_pointer = NULL; unsigned char *output = NULL; /* not a string */ if (buffer_at_offset(input_buffer)[0] != '\"') { goto fail; } { /* calculate approximate size of the output (overestimate) */ size_t allocation_length = 0; size_t skipped_bytes = 0; while (((size_t)(input_end - input_buffer->content) < input_buffer->length) && (*input_end != '\"')) { /* is escape sequence */ if (input_end[0] == '\\') { if ((size_t)(input_end + 1 - input_buffer->content) >= input_buffer->length) { /* prevent buffer overflow when last input character is a backslash */ goto fail; } skipped_bytes++; input_end++; } input_end++; } if (((size_t)(input_end - input_buffer->content) >= input_buffer->length) || (*input_end != '\"')) { goto fail; /* string ended unexpectedly */ } /* This is at most how much we need for the output */ allocation_length = (size_t) (input_end - buffer_at_offset(input_buffer)) - skipped_bytes; output = (unsigned char*)input_buffer->hooks.allocate(allocation_length + sizeof("")); if (output == NULL) { goto fail; /* allocation failure */ } } output_pointer = output; /* loop through the string literal */ while (input_pointer < input_end) { if (*input_pointer != '\\') { *output_pointer++ = *input_pointer++; } /* escape sequence */ else { unsigned char sequence_length = 2; if ((input_end - input_pointer) < 1) { goto fail; } switch (input_pointer[1]) { case 'b': *output_pointer++ = '\b'; break; case 'f': *output_pointer++ = '\f'; break; case 'n': *output_pointer++ = '\n'; break; case 'r': *output_pointer++ = '\r'; break; case 't': *output_pointer++ = '\t'; break; case '\"': case '\\': case '/': *output_pointer++ = input_pointer[1]; break; /* UTF-16 literal */ case 'u': sequence_length = utf16_literal_to_utf8(input_pointer, input_end, &output_pointer); if (sequence_length == 0) { /* failed to convert UTF16-literal to UTF-8 */ goto fail; } break; default: goto fail; } input_pointer += sequence_length; } } /* zero terminate the output */ *output_pointer = '\0'; item->type = cJSON_String; item->valuestring = (char*)output; input_buffer->offset = (size_t) (input_end - input_buffer->content); input_buffer->offset++; return true; fail: if (output != NULL) { input_buffer->hooks.deallocate(output); output = NULL; } if (input_pointer != NULL) { input_buffer->offset = (size_t)(input_pointer - input_buffer->content); } return false; } /* Render the cstring provided to an escaped version that can be printed. */ static cJSON_bool print_string_ptr(const unsigned char * const input, printbuffer * const output_buffer) { const unsigned char *input_pointer = NULL; unsigned char *output = NULL; unsigned char *output_pointer = NULL; size_t output_length = 0; /* numbers of additional characters needed for escaping */ size_t escape_characters = 0; if (output_buffer == NULL) { return false; } /* empty string */ if (input == NULL) { output = ensure(output_buffer, sizeof("\"\"")); if (output == NULL) { return false; } strcpy((char*)output, "\"\""); return true; } /* set "flag" to 1 if something needs to be escaped */ for (input_pointer = input; *input_pointer; input_pointer++) { switch (*input_pointer) { case '\"': case '\\': case '\b': case '\f': case '\n': case '\r': case '\t': /* one character escape sequence */ escape_characters++; break; default: if (*input_pointer < 32) { /* UTF-16 escape sequence uXXXX */ escape_characters += 5; } break; } } output_length = (size_t)(input_pointer - input) + escape_characters; output = ensure(output_buffer, output_length + sizeof("\"\"")); if (output == NULL) { return false; } /* no characters have to be escaped */ if (escape_characters == 0) { output[0] = '\"'; memcpy(output + 1, input, output_length); output[output_length + 1] = '\"'; output[output_length + 2] = '\0'; return true; } output[0] = '\"'; output_pointer = output + 1; /* copy the string */ for (input_pointer = input; *input_pointer != '\0'; (void)input_pointer++, output_pointer++) { if ((*input_pointer > 31) && (*input_pointer != '\"') && (*input_pointer != '\\')) { /* normal character, copy */ *output_pointer = *input_pointer; } else { /* character needs to be escaped */ *output_pointer++ = '\\'; switch (*input_pointer) { case '\\': *output_pointer = '\\'; break; case '\"': *output_pointer = '\"'; break; case '\b': *output_pointer = 'b'; break; case '\f': *output_pointer = 'f'; break; case '\n': *output_pointer = 'n'; break; case '\r': *output_pointer = 'r'; break; case '\t': *output_pointer = 't'; break; default: /* escape and print as unicode codepoint */ sprintf((char*)output_pointer, "u%04x", *input_pointer); output_pointer += 4; break; } } } output[output_length + 1] = '\"'; output[output_length + 2] = '\0'; return true; } /* Invoke print_string_ptr (which is useful) on an item. */ static cJSON_bool print_string(const cJSON * const item, printbuffer * const p) { return print_string_ptr((unsigned char*)item->valuestring, p); } /* Predeclare these prototypes. */ static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer); static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer); static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer); static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer); static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer); static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer); /* Utility to jump whitespace and cr/lf */ static parse_buffer *buffer_skip_whitespace(parse_buffer * const buffer) { if ((buffer == NULL) || (buffer->content == NULL)) { return NULL; } if (cannot_access_at_index(buffer, 0)) { return buffer; } while (can_access_at_index(buffer, 0) && (buffer_at_offset(buffer)[0] <= 32)) { buffer->offset++; } if (buffer->offset == buffer->length) { buffer->offset--; } return buffer; } /* skip the UTF-8 BOM (byte order mark) if it is at the beginning of a buffer */ static parse_buffer *skip_utf8_bom(parse_buffer * const buffer) { if ((buffer == NULL) || (buffer->content == NULL) || (buffer->offset != 0)) { return NULL; } if (can_access_at_index(buffer, 4) && (strncmp((const char*)buffer_at_offset(buffer), "\xEF\xBB\xBF", 3) == 0)) { buffer->offset += 3; } return buffer; } CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated) { size_t buffer_length; if (NULL == value) { return NULL; } /* Adding null character size due to require_null_terminated. */ buffer_length = strlen(value) + sizeof(""); return cJSON_ParseWithLengthOpts(value, buffer_length, return_parse_end, require_null_terminated); } /* Parse an object - create a new root, and populate. */ CJSON_PUBLIC(cJSON *) cJSON_ParseWithLengthOpts(const char *value, size_t buffer_length, const char **return_parse_end, cJSON_bool require_null_terminated) { parse_buffer buffer = { 0, 0, 0, 0, { 0, 0, 0 } }; cJSON *item = NULL; /* reset error position */ global_error.json = NULL; global_error.position = 0; if (value == NULL || 0 == buffer_length) { goto fail; } buffer.content = (const unsigned char*)value; buffer.length = buffer_length; buffer.offset = 0; buffer.hooks = global_hooks; item = cJSON_New_Item(&global_hooks); if (item == NULL) /* memory fail */ { goto fail; } if (!parse_value(item, buffer_skip_whitespace(skip_utf8_bom(&buffer)))) { /* parse failure. ep is set. */ goto fail; } /* if we require null-terminated JSON without appended garbage, skip and then check for a null terminator */ if (require_null_terminated) { buffer_skip_whitespace(&buffer); if ((buffer.offset >= buffer.length) || buffer_at_offset(&buffer)[0] != '\0') { goto fail; } } if (return_parse_end) { *return_parse_end = (const char*)buffer_at_offset(&buffer); } return item; fail: if (item != NULL) { cJSON_Delete(item); } if (value != NULL) { error local_error; local_error.json = (const unsigned char*)value; local_error.position = 0; if (buffer.offset < buffer.length) { local_error.position = buffer.offset; } else if (buffer.length > 0) { local_error.position = buffer.length - 1; } if (return_parse_end != NULL) { *return_parse_end = (const char*)local_error.json + local_error.position; } global_error = local_error; } return NULL; } /* Default options for cJSON_Parse */ CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value) { return cJSON_ParseWithOpts(value, 0, 0); } CJSON_PUBLIC(cJSON *) cJSON_ParseWithLength(const char *value, size_t buffer_length) { return cJSON_ParseWithLengthOpts(value, buffer_length, 0, 0); } #define cjson_min(a, b) (((a) < (b)) ? (a) : (b)) static unsigned char *print(const cJSON * const item, cJSON_bool format, const internal_hooks * const hooks) { static const size_t default_buffer_size = 256; printbuffer buffer[1]; unsigned char *printed = NULL; memset(buffer, 0, sizeof(buffer)); /* create buffer */ buffer->buffer = (unsigned char*) hooks->allocate(default_buffer_size); buffer->length = default_buffer_size; buffer->format = format; buffer->hooks = *hooks; if (buffer->buffer == NULL) { goto fail; } /* print the value */ if (!print_value(item, buffer)) { goto fail; } update_offset(buffer); /* check if reallocate is available */ if (hooks->reallocate != NULL) { printed = (unsigned char*) hooks->reallocate(buffer->buffer, buffer->offset + 1); if (printed == NULL) { goto fail; } buffer->buffer = NULL; } else /* otherwise copy the JSON over to a new buffer */ { printed = (unsigned char*) hooks->allocate(buffer->offset + 1); if (printed == NULL) { goto fail; } memcpy(printed, buffer->buffer, cjson_min(buffer->length, buffer->offset + 1)); printed[buffer->offset] = '\0'; /* just to be sure */ /* free the buffer */ hooks->deallocate(buffer->buffer); buffer->buffer = NULL; } return printed; fail: if (buffer->buffer != NULL) { hooks->deallocate(buffer->buffer); buffer->buffer = NULL; } if (printed != NULL) { hooks->deallocate(printed); printed = NULL; } return NULL; } /* Render a cJSON item/entity/structure to text. */ CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item) { return (char*)print(item, true, &global_hooks); } CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item) { return (char*)print(item, false, &global_hooks); } CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt) { printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } }; if (prebuffer < 0) { return NULL; } p.buffer = (unsigned char*)global_hooks.allocate((size_t)prebuffer); if (!p.buffer) { return NULL; } p.length = (size_t)prebuffer; p.offset = 0; p.noalloc = false; p.format = fmt; p.hooks = global_hooks; if (!print_value(item, &p)) { global_hooks.deallocate(p.buffer); p.buffer = NULL; return NULL; } return (char*)p.buffer; } CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buffer, const int length, const cJSON_bool format) { printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } }; if ((length < 0) || (buffer == NULL)) { return false; } p.buffer = (unsigned char*)buffer; p.length = (size_t)length; p.offset = 0; p.noalloc = true; p.format = format; p.hooks = global_hooks; return print_value(item, &p); } /* Parser core - when encountering text, process appropriately. */ static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer) { if ((input_buffer == NULL) || (input_buffer->content == NULL)) { return false; /* no input */ } /* parse the different types of values */ /* null */ if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "null", 4) == 0)) { item->type = cJSON_NULL; input_buffer->offset += 4; return true; } /* false */ if (can_read(input_buffer, 5) && (strncmp((const char*)buffer_at_offset(input_buffer), "false", 5) == 0)) { item->type = cJSON_False; input_buffer->offset += 5; return true; } /* true */ if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "true", 4) == 0)) { item->type = cJSON_True; item->valueint = 1; input_buffer->offset += 4; return true; } /* string */ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '\"')) { return parse_string(item, input_buffer); } /* number */ if (can_access_at_index(input_buffer, 0) && ((buffer_at_offset(input_buffer)[0] == '-') || ((buffer_at_offset(input_buffer)[0] >= '0') && (buffer_at_offset(input_buffer)[0] <= '9')))) { return parse_number(item, input_buffer); } /* array */ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '[')) { return parse_array(item, input_buffer); } /* object */ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '{')) { return parse_object(item, input_buffer); } return false; } /* Render a value to text. */ static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer) { unsigned char *output = NULL; if ((item == NULL) || (output_buffer == NULL)) { return false; } switch ((item->type) & 0xFF) { case cJSON_NULL: output = ensure(output_buffer, 5); if (output == NULL) { return false; } strcpy((char*)output, "null"); return true; case cJSON_False: output = ensure(output_buffer, 6); if (output == NULL) { return false; } strcpy((char*)output, "false"); return true; case cJSON_True: output = ensure(output_buffer, 5); if (output == NULL) { return false; } strcpy((char*)output, "true"); return true; case cJSON_Number: return print_number(item, output_buffer); case cJSON_Raw: { size_t raw_length = 0; if (item->valuestring == NULL) { return false; } raw_length = strlen(item->valuestring) + sizeof(""); output = ensure(output_buffer, raw_length); if (output == NULL) { return false; } memcpy(output, item->valuestring, raw_length); return true; } case cJSON_String: return print_string(item, output_buffer); case cJSON_Array: return print_array(item, output_buffer); case cJSON_Object: return print_object(item, output_buffer); default: return false; } } /* Build an array from input text. */ static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer) { cJSON *head = NULL; /* head of the linked list */ cJSON *current_item = NULL; if (input_buffer->depth >= CJSON_NESTING_LIMIT) { return false; /* to deeply nested */ } input_buffer->depth++; if (buffer_at_offset(input_buffer)[0] != '[') { /* not an array */ goto fail; } input_buffer->offset++; buffer_skip_whitespace(input_buffer); if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ']')) { /* empty array */ goto success; } /* check if we skipped to the end of the buffer */ if (cannot_access_at_index(input_buffer, 0)) { input_buffer->offset--; goto fail; } /* step back to character in front of the first element */ input_buffer->offset--; /* loop through the comma separated array elements */ do { /* allocate next item */ cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); if (new_item == NULL) { goto fail; /* allocation failure */ } /* attach next item to list */ if (head == NULL) { /* start the linked list */ current_item = head = new_item; } else { /* add to the end and advance */ current_item->next = new_item; new_item->prev = current_item; current_item = new_item; } /* parse next value */ input_buffer->offset++; buffer_skip_whitespace(input_buffer); if (!parse_value(current_item, input_buffer)) { goto fail; /* failed to parse value */ } buffer_skip_whitespace(input_buffer); } while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ',')); if (cannot_access_at_index(input_buffer, 0) || buffer_at_offset(input_buffer)[0] != ']') { goto fail; /* expected end of array */ } success: input_buffer->depth--; if (head != NULL) { head->prev = current_item; } item->type = cJSON_Array; item->child = head; input_buffer->offset++; return true; fail: if (head != NULL) { cJSON_Delete(head); } return false; } /* Render an array to text */ static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer) { unsigned char *output_pointer = NULL; size_t length = 0; cJSON *current_element = item->child; if (output_buffer == NULL) { return false; } /* Compose the output array. */ /* opening square bracket */ output_pointer = ensure(output_buffer, 1); if (output_pointer == NULL) { return false; } *output_pointer = '['; output_buffer->offset++; output_buffer->depth++; while (current_element != NULL) { if (!print_value(current_element, output_buffer)) { return false; } update_offset(output_buffer); if (current_element->next) { length = (size_t) (output_buffer->format ? 2 : 1); output_pointer = ensure(output_buffer, length + 1); if (output_pointer == NULL) { return false; } *output_pointer++ = ','; if(output_buffer->format) { *output_pointer++ = ' '; } *output_pointer = '\0'; output_buffer->offset += length; } current_element = current_element->next; } output_pointer = ensure(output_buffer, 2); if (output_pointer == NULL) { return false; } *output_pointer++ = ']'; *output_pointer = '\0'; output_buffer->depth--; return true; } /* Build an object from the text. */ static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer) { cJSON *head = NULL; /* linked list head */ cJSON *current_item = NULL; if (input_buffer->depth >= CJSON_NESTING_LIMIT) { return false; /* to deeply nested */ } input_buffer->depth++; if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '{')) { goto fail; /* not an object */ } input_buffer->offset++; buffer_skip_whitespace(input_buffer); if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '}')) { goto success; /* empty object */ } /* check if we skipped to the end of the buffer */ if (cannot_access_at_index(input_buffer, 0)) { input_buffer->offset--; goto fail; } /* step back to character in front of the first element */ input_buffer->offset--; /* loop through the comma separated array elements */ do { /* allocate next item */ cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); if (new_item == NULL) { goto fail; /* allocation failure */ } /* attach next item to list */ if (head == NULL) { /* start the linked list */ current_item = head = new_item; } else { /* add to the end and advance */ current_item->next = new_item; new_item->prev = current_item; current_item = new_item; } if (cannot_access_at_index(input_buffer, 1)) { goto fail; /* nothing comes after the comma */ } /* parse the name of the child */ input_buffer->offset++; buffer_skip_whitespace(input_buffer); if (!parse_string(current_item, input_buffer)) { goto fail; /* failed to parse name */ } buffer_skip_whitespace(input_buffer); /* swap valuestring and string, because we parsed the name */ current_item->string = current_item->valuestring; current_item->valuestring = NULL; if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != ':')) { goto fail; /* invalid object */ } /* parse the value */ input_buffer->offset++; buffer_skip_whitespace(input_buffer); if (!parse_value(current_item, input_buffer)) { goto fail; /* failed to parse value */ } buffer_skip_whitespace(input_buffer); } while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ',')); if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '}')) { goto fail; /* expected end of object */ } success: input_buffer->depth--; if (head != NULL) { head->prev = current_item; } item->type = cJSON_Object; item->child = head; input_buffer->offset++; return true; fail: if (head != NULL) { cJSON_Delete(head); } return false; } /* Render an object to text. */ static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer) { unsigned char *output_pointer = NULL; size_t length = 0; cJSON *current_item = item->child; if (output_buffer == NULL) { return false; } /* Compose the output: */ length = (size_t) (output_buffer->format ? 2 : 1); /* fmt: {\n */ output_pointer = ensure(output_buffer, length + 1); if (output_pointer == NULL) { return false; } *output_pointer++ = '{'; output_buffer->depth++; if (output_buffer->format) { *output_pointer++ = '\n'; } output_buffer->offset += length; while (current_item) { if (output_buffer->format) { size_t i; output_pointer = ensure(output_buffer, output_buffer->depth); if (output_pointer == NULL) { return false; } for (i = 0; i < output_buffer->depth; i++) { *output_pointer++ = '\t'; } output_buffer->offset += output_buffer->depth; } /* print key */ if (!print_string_ptr((unsigned char*)current_item->string, output_buffer)) { return false; } update_offset(output_buffer); length = (size_t) (output_buffer->format ? 2 : 1); output_pointer = ensure(output_buffer, length); if (output_pointer == NULL) { return false; } *output_pointer++ = ':'; if (output_buffer->format) { *output_pointer++ = '\t'; } output_buffer->offset += length; /* print value */ if (!print_value(current_item, output_buffer)) { return false; } update_offset(output_buffer); /* print comma if not last */ length = ((size_t)(output_buffer->format ? 1 : 0) + (size_t)(current_item->next ? 1 : 0)); output_pointer = ensure(output_buffer, length + 1); if (output_pointer == NULL) { return false; } if (current_item->next) { *output_pointer++ = ','; } if (output_buffer->format) { *output_pointer++ = '\n'; } *output_pointer = '\0'; output_buffer->offset += length; current_item = current_item->next; } output_pointer = ensure(output_buffer, output_buffer->format ? (output_buffer->depth + 1) : 2); if (output_pointer == NULL) { return false; } if (output_buffer->format) { size_t i; for (i = 0; i < (output_buffer->depth - 1); i++) { *output_pointer++ = '\t'; } } *output_pointer++ = '}'; *output_pointer = '\0'; output_buffer->depth--; return true; } /* Get Array size/item / object item. */ CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array) { cJSON *child = NULL; size_t size = 0; if (array == NULL) { return 0; } child = array->child; while(child != NULL) { size++; child = child->next; } /* FIXME: Can overflow here. Cannot be fixed without breaking the API */ return (int)size; } static cJSON* get_array_item(const cJSON *array, size_t index) { cJSON *current_child = NULL; if (array == NULL) { return NULL; } current_child = array->child; while ((current_child != NULL) && (index > 0)) { index--; current_child = current_child->next; } return current_child; } CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index) { if (index < 0) { return NULL; } return get_array_item(array, (size_t)index); } static cJSON *get_object_item(const cJSON * const object, const char * const name, const cJSON_bool case_sensitive) { cJSON *current_element = NULL; if ((object == NULL) || (name == NULL)) { return NULL; } current_element = object->child; if (case_sensitive) { while ((current_element != NULL) && (current_element->string != NULL) && (strcmp(name, current_element->string) != 0)) { current_element = current_element->next; } } else { while ((current_element != NULL) && (case_insensitive_strcmp((const unsigned char*)name, (const unsigned char*)(current_element->string)) != 0)) { current_element = current_element->next; } } if ((current_element == NULL) || (current_element->string == NULL)) { return NULL; } return current_element; } CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string) { return get_object_item(object, string, false); } CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string) { return get_object_item(object, string, true); } CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string) { return cJSON_GetObjectItem(object, string) ? 1 : 0; } /* Utility for array list handling. */ static void suffix_object(cJSON *prev, cJSON *item) { prev->next = item; item->prev = prev; } /* Utility for handling references. */ static cJSON *create_reference(const cJSON *item, const internal_hooks * const hooks) { cJSON *reference = NULL; if (item == NULL) { return NULL; } reference = cJSON_New_Item(hooks); if (reference == NULL) { return NULL; } memcpy(reference, item, sizeof(cJSON)); reference->string = NULL; reference->type |= cJSON_IsReference; reference->next = reference->prev = NULL; return reference; } static cJSON_bool add_item_to_array(cJSON *array, cJSON *item) { cJSON *child = NULL; if ((item == NULL) || (array == NULL) || (array == item)) { return false; } child = array->child; /* * To find the last item in array quickly, we use prev in array */ if (child == NULL) { /* list is empty, start new one */ array->child = item; item->prev = item; item->next = NULL; } else { /* append to the end */ if (child->prev) { suffix_object(child->prev, item); array->child->prev = item; } } return true; } /* Add item to array/object. */ CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item) { return add_item_to_array(array, item); } #if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) #pragma GCC diagnostic push #endif #ifdef __GNUC__ #pragma GCC diagnostic ignored "-Wcast-qual" #endif /* helper function to cast away const */ static void* cast_away_const(const void* string) { return (void*)string; } #if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) #pragma GCC diagnostic pop #endif static cJSON_bool add_item_to_object(cJSON * const object, const char * const string, cJSON * const item, const internal_hooks * const hooks, const cJSON_bool constant_key) { char *new_key = NULL; int new_type = cJSON_Invalid; if ((object == NULL) || (string == NULL) || (item == NULL) || (object == item)) { return false; } if (constant_key) { new_key = (char*)cast_away_const(string); new_type = item->type | cJSON_StringIsConst; } else { new_key = (char*)cJSON_strdup((const unsigned char*)string, hooks); if (new_key == NULL) { return false; } new_type = item->type & ~cJSON_StringIsConst; } if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) { hooks->deallocate(item->string); } item->string = new_key; item->type = new_type; return add_item_to_array(object, item); } CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item) { return add_item_to_object(object, string, item, &global_hooks, false); } /* Add an item to an object with constant string as key */ CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item) { return add_item_to_object(object, string, item, &global_hooks, true); } CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item) { if (array == NULL) { return false; } return add_item_to_array(array, create_reference(item, &global_hooks)); } CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item) { if ((object == NULL) || (string == NULL)) { return false; } return add_item_to_object(object, string, create_reference(item, &global_hooks), &global_hooks, false); } CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name) { cJSON *null = cJSON_CreateNull(); if (add_item_to_object(object, name, null, &global_hooks, false)) { return null; } cJSON_Delete(null); return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name) { cJSON *true_item = cJSON_CreateTrue(); if (add_item_to_object(object, name, true_item, &global_hooks, false)) { return true_item; } cJSON_Delete(true_item); return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name) { cJSON *false_item = cJSON_CreateFalse(); if (add_item_to_object(object, name, false_item, &global_hooks, false)) { return false_item; } cJSON_Delete(false_item); return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean) { cJSON *bool_item = cJSON_CreateBool(boolean); if (add_item_to_object(object, name, bool_item, &global_hooks, false)) { return bool_item; } cJSON_Delete(bool_item); return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number) { cJSON *number_item = cJSON_CreateNumber(number); if (add_item_to_object(object, name, number_item, &global_hooks, false)) { return number_item; } cJSON_Delete(number_item); return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string) { cJSON *string_item = cJSON_CreateString(string); if (add_item_to_object(object, name, string_item, &global_hooks, false)) { return string_item; } cJSON_Delete(string_item); return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw) { cJSON *raw_item = cJSON_CreateRaw(raw); if (add_item_to_object(object, name, raw_item, &global_hooks, false)) { return raw_item; } cJSON_Delete(raw_item); return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name) { cJSON *object_item = cJSON_CreateObject(); if (add_item_to_object(object, name, object_item, &global_hooks, false)) { return object_item; } cJSON_Delete(object_item); return NULL; } CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name) { cJSON *array = cJSON_CreateArray(); if (add_item_to_object(object, name, array, &global_hooks, false)) { return array; } cJSON_Delete(array); return NULL; } CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item) { if ((parent == NULL) || (item == NULL)) { return NULL; } if (item != parent->child) { /* not the first element */ item->prev->next = item->next; } if (item->next != NULL) { /* not the last element */ item->next->prev = item->prev; } if (item == parent->child) { /* first element */ parent->child = item->next; } else if (item->next == NULL) { /* last element */ parent->child->prev = item->prev; } /* make sure the detached item doesn't point anywhere anymore */ item->prev = NULL; item->next = NULL; return item; } CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which) { if (which < 0) { return NULL; } return cJSON_DetachItemViaPointer(array, get_array_item(array, (size_t)which)); } CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which) { cJSON_Delete(cJSON_DetachItemFromArray(array, which)); } CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string) { cJSON *to_detach = cJSON_GetObjectItem(object, string); return cJSON_DetachItemViaPointer(object, to_detach); } CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string) { cJSON *to_detach = cJSON_GetObjectItemCaseSensitive(object, string); return cJSON_DetachItemViaPointer(object, to_detach); } CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string) { cJSON_Delete(cJSON_DetachItemFromObject(object, string)); } CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string) { cJSON_Delete(cJSON_DetachItemFromObjectCaseSensitive(object, string)); } /* Replace array/object items with new ones. */ CJSON_PUBLIC(cJSON_bool) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem) { cJSON *after_inserted = NULL; if (which < 0 || newitem == NULL) { return false; } after_inserted = get_array_item(array, (size_t)which); if (after_inserted == NULL) { return add_item_to_array(array, newitem); } if (after_inserted != array->child && after_inserted->prev == NULL) { /* return false if after_inserted is a corrupted array item */ return false; } newitem->next = after_inserted; newitem->prev = after_inserted->prev; after_inserted->prev = newitem; if (after_inserted == array->child) { array->child = newitem; } else { newitem->prev->next = newitem; } return true; } CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement) { if ((parent == NULL) || (parent->child == NULL) || (replacement == NULL) || (item == NULL)) { return false; } if (replacement == item) { return true; } replacement->next = item->next; replacement->prev = item->prev; if (replacement->next != NULL) { replacement->next->prev = replacement; } if (parent->child == item) { if (parent->child->prev == parent->child) { replacement->prev = replacement; } parent->child = replacement; } else { /* * To find the last item in array quickly, we use prev in array. * We can't modify the last item's next pointer where this item was the parent's child */ if (replacement->prev != NULL) { replacement->prev->next = replacement; } if (replacement->next == NULL) { parent->child->prev = replacement; } } item->next = NULL; item->prev = NULL; cJSON_Delete(item); return true; } CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem) { if (which < 0) { return false; } return cJSON_ReplaceItemViaPointer(array, get_array_item(array, (size_t)which), newitem); } static cJSON_bool replace_item_in_object(cJSON *object, const char *string, cJSON *replacement, cJSON_bool case_sensitive) { if ((replacement == NULL) || (string == NULL)) { return false; } /* replace the name in the replacement */ if (!(replacement->type & cJSON_StringIsConst) && (replacement->string != NULL)) { cJSON_free(replacement->string); } replacement->string = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks); if (replacement->string == NULL) { return false; } replacement->type &= ~cJSON_StringIsConst; return cJSON_ReplaceItemViaPointer(object, get_object_item(object, string, case_sensitive), replacement); } CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem) { return replace_item_in_object(object, string, newitem, false); } CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object, const char *string, cJSON *newitem) { return replace_item_in_object(object, string, newitem, true); } /* Create basic types: */ CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void) { cJSON *item = cJSON_New_Item(&global_hooks); if(item) { item->type = cJSON_NULL; } return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void) { cJSON *item = cJSON_New_Item(&global_hooks); if(item) { item->type = cJSON_True; } return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void) { cJSON *item = cJSON_New_Item(&global_hooks); if(item) { item->type = cJSON_False; } return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean) { cJSON *item = cJSON_New_Item(&global_hooks); if(item) { item->type = boolean ? cJSON_True : cJSON_False; } return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num) { cJSON *item = cJSON_New_Item(&global_hooks); if(item) { item->type = cJSON_Number; item->valuedouble = num; /* use saturation in case of overflow */ if (num >= INT_MAX) { item->valueint = INT_MAX; } else if (num <= (double)INT_MIN) { item->valueint = INT_MIN; } else { item->valueint = (int)num; } } return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string) { cJSON *item = cJSON_New_Item(&global_hooks); if(item) { item->type = cJSON_String; item->valuestring = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks); if(!item->valuestring) { cJSON_Delete(item); return NULL; } } return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string) { cJSON *item = cJSON_New_Item(&global_hooks); if (item != NULL) { item->type = cJSON_String | cJSON_IsReference; item->valuestring = (char*)cast_away_const(string); } return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child) { cJSON *item = cJSON_New_Item(&global_hooks); if (item != NULL) { item->type = cJSON_Object | cJSON_IsReference; item->child = (cJSON*)cast_away_const(child); } return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child) { cJSON *item = cJSON_New_Item(&global_hooks); if (item != NULL) { item->type = cJSON_Array | cJSON_IsReference; item->child = (cJSON*)cast_away_const(child); } return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw) { cJSON *item = cJSON_New_Item(&global_hooks); if(item) { item->type = cJSON_Raw; item->valuestring = (char*)cJSON_strdup((const unsigned char*)raw, &global_hooks); if(!item->valuestring) { cJSON_Delete(item); return NULL; } } return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void) { cJSON *item = cJSON_New_Item(&global_hooks); if(item) { item->type=cJSON_Array; } return item; } CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void) { cJSON *item = cJSON_New_Item(&global_hooks); if (item) { item->type = cJSON_Object; } return item; } /* Create Arrays: */ CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count) { size_t i = 0; cJSON *n = NULL; cJSON *p = NULL; cJSON *a = NULL; if ((count < 0) || (numbers == NULL)) { return NULL; } a = cJSON_CreateArray(); for(i = 0; a && (i < (size_t)count); i++) { n = cJSON_CreateNumber(numbers[i]); if (!n) { cJSON_Delete(a); return NULL; } if(!i) { a->child = n; } else { suffix_object(p, n); } p = n; } if (a && a->child) { a->child->prev = n; } return a; } CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count) { size_t i = 0; cJSON *n = NULL; cJSON *p = NULL; cJSON *a = NULL; if ((count < 0) || (numbers == NULL)) { return NULL; } a = cJSON_CreateArray(); for(i = 0; a && (i < (size_t)count); i++) { n = cJSON_CreateNumber((double)numbers[i]); if(!n) { cJSON_Delete(a); return NULL; } if(!i) { a->child = n; } else { suffix_object(p, n); } p = n; } if (a && a->child) { a->child->prev = n; } return a; } CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count) { size_t i = 0; cJSON *n = NULL; cJSON *p = NULL; cJSON *a = NULL; if ((count < 0) || (numbers == NULL)) { return NULL; } a = cJSON_CreateArray(); for(i = 0; a && (i < (size_t)count); i++) { n = cJSON_CreateNumber(numbers[i]); if(!n) { cJSON_Delete(a); return NULL; } if(!i) { a->child = n; } else { suffix_object(p, n); } p = n; } if (a && a->child) { a->child->prev = n; } return a; } CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char *const *strings, int count) { size_t i = 0; cJSON *n = NULL; cJSON *p = NULL; cJSON *a = NULL; if ((count < 0) || (strings == NULL)) { return NULL; } a = cJSON_CreateArray(); for (i = 0; a && (i < (size_t)count); i++) { n = cJSON_CreateString(strings[i]); if(!n) { cJSON_Delete(a); return NULL; } if(!i) { a->child = n; } else { suffix_object(p,n); } p = n; } if (a && a->child) { a->child->prev = n; } return a; } /* Duplication */ CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse) { cJSON *newitem = NULL; cJSON *child = NULL; cJSON *next = NULL; cJSON *newchild = NULL; /* Bail on bad ptr */ if (!item) { goto fail; } /* Create new item */ newitem = cJSON_New_Item(&global_hooks); if (!newitem) { goto fail; } /* Copy over all vars */ newitem->type = item->type & (~cJSON_IsReference); newitem->valueint = item->valueint; newitem->valuedouble = item->valuedouble; if (item->valuestring) { newitem->valuestring = (char*)cJSON_strdup((unsigned char*)item->valuestring, &global_hooks); if (!newitem->valuestring) { goto fail; } } if (item->string) { newitem->string = (item->type&cJSON_StringIsConst) ? item->string : (char*)cJSON_strdup((unsigned char*)item->string, &global_hooks); if (!newitem->string) { goto fail; } } /* If non-recursive, then we're done! */ if (!recurse) { return newitem; } /* Walk the ->next chain for the child. */ child = item->child; while (child != NULL) { newchild = cJSON_Duplicate(child, true); /* Duplicate (with recurse) each item in the ->next chain */ if (!newchild) { goto fail; } if (next != NULL) { /* If newitem->child already set, then crosswire ->prev and ->next and move on */ next->next = newchild; newchild->prev = next; next = newchild; } else { /* Set newitem->child and move to it */ newitem->child = newchild; next = newchild; } child = child->next; } if (newitem && newitem->child) { newitem->child->prev = newchild; } return newitem; fail: if (newitem != NULL) { cJSON_Delete(newitem); } return NULL; } static void skip_oneline_comment(char **input) { *input += static_strlen("//"); for (; (*input)[0] != '\0'; ++(*input)) { if ((*input)[0] == '\n') { *input += static_strlen("\n"); return; } } } static void skip_multiline_comment(char **input) { *input += static_strlen("/*"); for (; (*input)[0] != '\0'; ++(*input)) { if (((*input)[0] == '*') && ((*input)[1] == '/')) { *input += static_strlen("*/"); return; } } } static void minify_string(char **input, char **output) { (*output)[0] = (*input)[0]; *input += static_strlen("\""); *output += static_strlen("\""); for (; (*input)[0] != '\0'; (void)++(*input), ++(*output)) { (*output)[0] = (*input)[0]; if ((*input)[0] == '\"') { (*output)[0] = '\"'; *input += static_strlen("\""); *output += static_strlen("\""); return; } else if (((*input)[0] == '\\') && ((*input)[1] == '\"')) { (*output)[1] = (*input)[1]; *input += static_strlen("\""); *output += static_strlen("\""); } } } CJSON_PUBLIC(void) cJSON_Minify(char *json) { char *into = json; if (json == NULL) { return; } while (json[0] != '\0') { switch (json[0]) { case ' ': case '\t': case '\r': case '\n': json++; break; case '/': if (json[1] == '/') { skip_oneline_comment(&json); } else if (json[1] == '*') { skip_multiline_comment(&json); } else { json++; } break; case '\"': minify_string(&json, (char**)&into); break; default: into[0] = json[0]; json++; into++; } } /* and null-terminate. */ *into = '\0'; } CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item) { if (item == NULL) { return false; } return (item->type & 0xFF) == cJSON_Invalid; } CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item) { if (item == NULL) { return false; } return (item->type & 0xFF) == cJSON_False; } CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item) { if (item == NULL) { return false; } return (item->type & 0xff) == cJSON_True; } CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item) { if (item == NULL) { return false; } return (item->type & (cJSON_True | cJSON_False)) != 0; } CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item) { if (item == NULL) { return false; } return (item->type & 0xFF) == cJSON_NULL; } CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item) { if (item == NULL) { return false; } return (item->type & 0xFF) == cJSON_Number; } CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item) { if (item == NULL) { return false; } return (item->type & 0xFF) == cJSON_String; } CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item) { if (item == NULL) { return false; } return (item->type & 0xFF) == cJSON_Array; } CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item) { if (item == NULL) { return false; } return (item->type & 0xFF) == cJSON_Object; } CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item) { if (item == NULL) { return false; } return (item->type & 0xFF) == cJSON_Raw; } CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive) { if ((a == NULL) || (b == NULL) || ((a->type & 0xFF) != (b->type & 0xFF))) { return false; } /* check if type is valid */ switch (a->type & 0xFF) { case cJSON_False: case cJSON_True: case cJSON_NULL: case cJSON_Number: case cJSON_String: case cJSON_Raw: case cJSON_Array: case cJSON_Object: break; default: return false; } /* identical objects are equal */ if (a == b) { return true; } switch (a->type & 0xFF) { /* in these cases and equal type is enough */ case cJSON_False: case cJSON_True: case cJSON_NULL: return true; case cJSON_Number: if (compare_double(a->valuedouble, b->valuedouble)) { return true; } return false; case cJSON_String: case cJSON_Raw: if ((a->valuestring == NULL) || (b->valuestring == NULL)) { return false; } if (strcmp(a->valuestring, b->valuestring) == 0) { return true; } return false; case cJSON_Array: { cJSON *a_element = a->child; cJSON *b_element = b->child; for (; (a_element != NULL) && (b_element != NULL);) { if (!cJSON_Compare(a_element, b_element, case_sensitive)) { return false; } a_element = a_element->next; b_element = b_element->next; } /* one of the arrays is longer than the other */ if (a_element != b_element) { return false; } return true; } case cJSON_Object: { cJSON *a_element = NULL; cJSON *b_element = NULL; cJSON_ArrayForEach(a_element, a) { /* TODO This has O(n^2) runtime, which is horrible! */ b_element = get_object_item(b, a_element->string, case_sensitive); if (b_element == NULL) { return false; } if (!cJSON_Compare(a_element, b_element, case_sensitive)) { return false; } } /* doing this twice, once on a and b to prevent true comparison if a subset of b * TODO: Do this the proper way, this is just a fix for now */ cJSON_ArrayForEach(b_element, b) { a_element = get_object_item(a, b_element->string, case_sensitive); if (a_element == NULL) { return false; } if (!cJSON_Compare(b_element, a_element, case_sensitive)) { return false; } } return true; } default: return false; } } CJSON_PUBLIC(void *) cJSON_malloc(size_t size) { return global_hooks.allocate(size); } CJSON_PUBLIC(void) cJSON_free(void *object) { global_hooks.deallocate(object); object = NULL; } nordugrid-arc-7.1.1/src/external/cJSON/PaxHeaders/cJSON.h0000644000000000000000000000013115067751327017764 xustar0029 mtime=1759498967.68772918 30 atime=1759498967.829493104 30 ctime=1759499024.792620656 nordugrid-arc-7.1.1/src/external/cJSON/cJSON.h0000644000175000002070000003750115067751327021675 0ustar00mockbuildmock00000000000000/* Copyright (c) 2009-2017 Dave Gamble and cJSON contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef cJSON__h #define cJSON__h #ifdef __cplusplus extern "C" { #endif #if !defined(__WINDOWS__) && (defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32)) #define __WINDOWS__ #endif #ifdef __WINDOWS__ /* When compiling for windows, we specify a specific calling convention to avoid issues where we are being called from a project with a different default calling convention. For windows you have 3 define options: CJSON_HIDE_SYMBOLS - Define this in the case where you don't want to ever dllexport symbols CJSON_EXPORT_SYMBOLS - Define this on library build when you want to dllexport symbols (default) CJSON_IMPORT_SYMBOLS - Define this if you want to dllimport symbol For *nix builds that support visibility attribute, you can define similar behavior by setting default visibility to hidden by adding -fvisibility=hidden (for gcc) or -xldscope=hidden (for sun cc) to CFLAGS then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way CJSON_EXPORT_SYMBOLS does */ #define CJSON_CDECL __cdecl #define CJSON_STDCALL __stdcall /* export symbols by default, this is necessary for copy pasting the C and header file */ #if !defined(CJSON_HIDE_SYMBOLS) && !defined(CJSON_IMPORT_SYMBOLS) && !defined(CJSON_EXPORT_SYMBOLS) #define CJSON_EXPORT_SYMBOLS #endif #if defined(CJSON_HIDE_SYMBOLS) #define CJSON_PUBLIC(type) type CJSON_STDCALL #elif defined(CJSON_EXPORT_SYMBOLS) #define CJSON_PUBLIC(type) __declspec(dllexport) type CJSON_STDCALL #elif defined(CJSON_IMPORT_SYMBOLS) #define CJSON_PUBLIC(type) __declspec(dllimport) type CJSON_STDCALL #endif #else /* !__WINDOWS__ */ #define CJSON_CDECL #define CJSON_STDCALL #if (defined(__GNUC__) || defined(__SUNPRO_CC) || defined (__SUNPRO_C)) && defined(CJSON_API_VISIBILITY) #define CJSON_PUBLIC(type) __attribute__((visibility("default"))) type #else #define CJSON_PUBLIC(type) type #endif #endif /* project version */ #define CJSON_VERSION_MAJOR 1 #define CJSON_VERSION_MINOR 7 #define CJSON_VERSION_PATCH 18 #include /* cJSON Types: */ #define cJSON_Invalid (0) #define cJSON_False (1 << 0) #define cJSON_True (1 << 1) #define cJSON_NULL (1 << 2) #define cJSON_Number (1 << 3) #define cJSON_String (1 << 4) #define cJSON_Array (1 << 5) #define cJSON_Object (1 << 6) #define cJSON_Raw (1 << 7) /* raw json */ #define cJSON_IsReference 256 #define cJSON_StringIsConst 512 /* The cJSON structure: */ typedef struct cJSON { /* next/prev allow you to walk array/object chains. Alternatively, use GetArraySize/GetArrayItem/GetObjectItem */ struct cJSON *next; struct cJSON *prev; /* An array or object item will have a child pointer pointing to a chain of the items in the array/object. */ struct cJSON *child; /* The type of the item, as above. */ int type; /* The item's string, if type==cJSON_String and type == cJSON_Raw */ char *valuestring; /* writing to valueint is DEPRECATED, use cJSON_SetNumberValue instead */ int valueint; /* The item's number, if type==cJSON_Number */ double valuedouble; /* The item's name string, if this item is the child of, or is in the list of subitems of an object. */ char *string; } cJSON; typedef struct cJSON_Hooks { /* malloc/free are CDECL on Windows regardless of the default calling convention of the compiler, so ensure the hooks allow passing those functions directly. */ void *(CJSON_CDECL *malloc_fn)(size_t sz); void (CJSON_CDECL *free_fn)(void *ptr); } cJSON_Hooks; typedef int cJSON_bool; /* Limits how deeply nested arrays/objects can be before cJSON rejects to parse them. * This is to prevent stack overflows. */ #ifndef CJSON_NESTING_LIMIT #define CJSON_NESTING_LIMIT 1000 #endif /* returns the version of cJSON as a string */ CJSON_PUBLIC(const char*) cJSON_Version(void); /* Supply malloc, realloc and free functions to cJSON */ CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks); /* Memory Management: the caller is always responsible to free the results from all variants of cJSON_Parse (with cJSON_Delete) and cJSON_Print (with stdlib free, cJSON_Hooks.free_fn, or cJSON_free as appropriate). The exception is cJSON_PrintPreallocated, where the caller has full responsibility of the buffer. */ /* Supply a block of JSON, and this returns a cJSON object you can interrogate. */ CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value); CJSON_PUBLIC(cJSON *) cJSON_ParseWithLength(const char *value, size_t buffer_length); /* ParseWithOpts allows you to require (and check) that the JSON is null terminated, and to retrieve the pointer to the final byte parsed. */ /* If you supply a ptr in return_parse_end and parsing fails, then return_parse_end will contain a pointer to the error so will match cJSON_GetErrorPtr(). */ CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated); CJSON_PUBLIC(cJSON *) cJSON_ParseWithLengthOpts(const char *value, size_t buffer_length, const char **return_parse_end, cJSON_bool require_null_terminated); /* Render a cJSON entity to text for transfer/storage. */ CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item); /* Render a cJSON entity to text for transfer/storage without any formatting. */ CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item); /* Render a cJSON entity to text using a buffered strategy. prebuffer is a guess at the final size. guessing well reduces reallocation. fmt=0 gives unformatted, =1 gives formatted */ CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt); /* Render a cJSON entity to text using a buffer already allocated in memory with given length. Returns 1 on success and 0 on failure. */ /* NOTE: cJSON is not always 100% accurate in estimating how much memory it will use, so to be safe allocate 5 bytes more than you actually need */ CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buffer, const int length, const cJSON_bool format); /* Delete a cJSON entity and all subentities. */ CJSON_PUBLIC(void) cJSON_Delete(cJSON *item); /* Returns the number of items in an array (or object). */ CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array); /* Retrieve item number "index" from array "array". Returns NULL if unsuccessful. */ CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index); /* Get item "string" from object. Case insensitive. */ CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string); CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string); CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string); /* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */ CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void); /* Check item type and return its value */ CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON * const item); CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON * const item); /* These functions check the type of an item */ CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item); CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item); CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item); CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item); CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item); CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item); CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item); CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item); CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item); CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item); /* These calls create a cJSON item of the appropriate type. */ CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void); CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void); CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void); CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean); CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num); CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string); /* raw json */ CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw); CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void); CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void); /* Create a string where valuestring references a string so * it will not be freed by cJSON_Delete */ CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string); /* Create an object/array that only references it's elements so * they will not be freed by cJSON_Delete */ CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child); CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child); /* These utilities create an Array of count items. * The parameter count cannot be greater than the number of elements in the number array, otherwise array access will be out of bounds.*/ CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count); CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count); CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count); CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char *const *strings, int count); /* Append item to the specified array/object. */ CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item); CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item); /* Use this when string is definitely const (i.e. a literal, or as good as), and will definitely survive the cJSON object. * WARNING: When this function was used, make sure to always check that (item->type & cJSON_StringIsConst) is zero before * writing to `item->string` */ CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item); /* Append reference to item to the specified array/object. Use this when you want to add an existing cJSON to a new cJSON, but don't want to corrupt your existing cJSON. */ CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item); CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item); /* Remove/Detach items from Arrays/Objects. */ CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item); CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which); CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which); CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string); CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string); CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string); CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string); /* Update array items. */ CJSON_PUBLIC(cJSON_bool) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem); /* Shifts pre-existing items to the right. */ CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement); CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem); CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem); CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object,const char *string,cJSON *newitem); /* Duplicate a cJSON item */ CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse); /* Duplicate will create a new, identical cJSON item to the one you pass, in new memory that will * need to be released. With recurse!=0, it will duplicate any children connected to the item. * The item->next and ->prev pointers are always zero on return from Duplicate. */ /* Recursively compare two cJSON items for equality. If either a or b is NULL or invalid, they will be considered unequal. * case_sensitive determines if object keys are treated case sensitive (1) or case insensitive (0) */ CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive); /* Minify a strings, remove blank characters(such as ' ', '\t', '\r', '\n') from strings. * The input pointer json cannot point to a read-only address area, such as a string constant, * but should point to a readable and writable address area. */ CJSON_PUBLIC(void) cJSON_Minify(char *json); /* Helper functions for creating and adding items to an object at the same time. * They return the added item or NULL on failure. */ CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name); CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name); CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name); CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean); CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number); CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string); CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw); CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name); CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name); /* When assigning an integer value, it needs to be propagated to valuedouble too. */ #define cJSON_SetIntValue(object, number) ((object) ? (object)->valueint = (object)->valuedouble = (number) : (number)) /* helper for the cJSON_SetNumberValue macro */ CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number); #define cJSON_SetNumberValue(object, number) ((object != NULL) ? cJSON_SetNumberHelper(object, (double)number) : (number)) /* Change the valuestring of a cJSON_String object, only takes effect when type of object is cJSON_String */ CJSON_PUBLIC(char*) cJSON_SetValuestring(cJSON *object, const char *valuestring); /* If the object is not a boolean type this does nothing and returns cJSON_Invalid else it returns the new type*/ #define cJSON_SetBoolValue(object, boolValue) ( \ (object != NULL && ((object)->type & (cJSON_False|cJSON_True))) ? \ (object)->type=((object)->type &(~(cJSON_False|cJSON_True)))|((boolValue)?cJSON_True:cJSON_False) : \ cJSON_Invalid\ ) /* Macro for iterating over an array or object */ #define cJSON_ArrayForEach(element, array) for(element = (array != NULL) ? (array)->child : NULL; element != NULL; element = element->next) /* malloc/free objects using the malloc/free functions that have been set with cJSON_InitHooks */ CJSON_PUBLIC(void *) cJSON_malloc(size_t size); CJSON_PUBLIC(void) cJSON_free(void *object); #ifdef __cplusplus } #endif #endif nordugrid-arc-7.1.1/src/external/PaxHeaders/README0000644000000000000000000000013215067751327016644 xustar0030 mtime=1759498967.687518508 30 atime=1759498967.829493104 30 ctime=1759499024.761325649 nordugrid-arc-7.1.1/src/external/README0000644000175000002070000000014015067751327020541 0ustar00mockbuildmock00000000000000External libraries small enough to include source rather than depending on an external package. nordugrid-arc-7.1.1/src/PaxHeaders/clients0000644000000000000000000000013215067751427015527 xustar0030 mtime=1759499031.247456743 30 atime=1759499034.766510215 30 ctime=1759499031.247456743 nordugrid-arc-7.1.1/src/clients/0000755000175000002070000000000015067751427017506 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/clients/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327017637 xustar0030 mtime=1759498967.676396098 30 atime=1759498967.824493028 30 ctime=1759499031.099015763 nordugrid-arc-7.1.1/src/clients/Makefile.am0000644000175000002070000000152315067751327021542 0ustar00mockbuildmock00000000000000if DATA_CLIENT_ENABLED DATA_CLIENT = data else DATA_CLIENT = endif if CREDENTIALS_CLIENT_ENABLED CREDENTIALS_CLIENT = credentials else CREDENTIALS_CLIENT = endif if COMPUTE_CLIENT_ENABLED COMPUTE_CLIENT = compute else COMPUTE_CLIENT = endif if ARCREST_ENABLED PYARCREST = pyarcrest else PYARCREST = endif SUBDIRS = $(DATA_CLIENT) $(CREDENTIALS_CLIENT) $(COMPUTE_CLIENT) $(PYARCREST) DIST_SUBDIRS = data credentials compute pyarcrest arcsysconfdir = $(sysconfdir)/arc arcsysconf_DATA = client.conf exampledir = $(pkgdatadir)/examples example_DATA = client.conf BASH_COMPLETION_SOURCE = client.bash_completion arc-client-tools: $(BASH_COMPLETION_SOURCE) cp $< $@ bashcompletiondir = $(bashcompdir) bashcompletion_DATA = arc-client-tools EXTRA_DIST = $(example_DATA) $(arcsysconf_DATA) $(BASH_COMPLETION_SOURCE) CLEANFILES = arc-client-tools nordugrid-arc-7.1.1/src/clients/PaxHeaders/data0000644000000000000000000000013215067751427016440 xustar0030 mtime=1759499031.144455178 30 atime=1759499034.766510215 30 ctime=1759499031.144455178 nordugrid-arc-7.1.1/src/clients/data/0000755000175000002070000000000015067751427020417 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/clients/data/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327020550 xustar0030 mtime=1759498967.679490824 30 atime=1759498967.825493043 30 ctime=1759499031.130322728 nordugrid-arc-7.1.1/src/clients/data/Makefile.am0000644000175000002070000000302115067751327022446 0ustar00mockbuildmock00000000000000bin_PROGRAMS = arccp arcls arcrm arcmkdir arcrename man_MANS = arccp.1 arcls.1 arcrm.1 arcmkdir.1 arcrename.1 CLILIBS = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la arccp_SOURCES = arccp.cpp utils.cpp utils.h arccp_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arccp_LDADD = $(CLILIBS) $(GLIBMM_LIBS) arcls_SOURCES = arcls.cpp utils.cpp utils.h arcls_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcls_LDADD = $(CLILIBS) $(GLIBMM_LIBS) arcrm_SOURCES = arcrm.cpp utils.cpp utils.h arcrm_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcrm_LDADD = $(CLILIBS) $(GLIBMM_LIBS) arcmkdir_SOURCES = arcmkdir.cpp utils.cpp utils.h arcmkdir_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcmkdir_LDADD = $(CLILIBS) $(GLIBMM_LIBS) arcrename_SOURCES = arcrename.cpp utils.cpp utils.h arcrename_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcrename_LDADD = $(CLILIBS) $(GLIBMM_LIBS) %.1: % %.1.in LANG=C help2man -N -h "-h|sed s/…/.../g" -i $(word 2,$^) -o $@ ./$< EXTRA_DIST = $(man_MANS:=.in) nordugrid-arc-7.1.1/src/clients/data/PaxHeaders/Makefile.in0000644000000000000000000000013015067751347020561 xustar0029 mtime=1759498983.71680637 29 atime=1759499019.60527984 30 ctime=1759499031.131346935 nordugrid-arc-7.1.1/src/clients/data/Makefile.in0000644000175000002070000015071215067751347022473 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ bin_PROGRAMS = arccp$(EXEEXT) arcls$(EXEEXT) arcrm$(EXEEXT) \ arcmkdir$(EXEEXT) arcrename$(EXEEXT) subdir = src/clients/data ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)" PROGRAMS = $(bin_PROGRAMS) am_arccp_OBJECTS = arccp-arccp.$(OBJEXT) arccp-utils.$(OBJEXT) arccp_OBJECTS = $(am_arccp_OBJECTS) am__DEPENDENCIES_1 = arccp_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = arccp_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arccp_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcls_OBJECTS = arcls-arcls.$(OBJEXT) arcls-utils.$(OBJEXT) arcls_OBJECTS = $(am_arcls_OBJECTS) arcls_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) arcls_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arcls_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcmkdir_OBJECTS = arcmkdir-arcmkdir.$(OBJEXT) \ arcmkdir-utils.$(OBJEXT) arcmkdir_OBJECTS = $(am_arcmkdir_OBJECTS) arcmkdir_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) arcmkdir_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arcmkdir_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcrename_OBJECTS = arcrename-arcrename.$(OBJEXT) \ arcrename-utils.$(OBJEXT) arcrename_OBJECTS = $(am_arcrename_OBJECTS) arcrename_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) arcrename_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arcrename_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcrm_OBJECTS = arcrm-arcrm.$(OBJEXT) arcrm-utils.$(OBJEXT) arcrm_OBJECTS = $(am_arcrm_OBJECTS) arcrm_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) arcrm_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arcrm_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = ./$(DEPDIR)/arccp-arccp.Po \ ./$(DEPDIR)/arccp-utils.Po ./$(DEPDIR)/arcls-arcls.Po \ ./$(DEPDIR)/arcls-utils.Po ./$(DEPDIR)/arcmkdir-arcmkdir.Po \ ./$(DEPDIR)/arcmkdir-utils.Po \ ./$(DEPDIR)/arcrename-arcrename.Po \ ./$(DEPDIR)/arcrename-utils.Po ./$(DEPDIR)/arcrm-arcrm.Po \ ./$(DEPDIR)/arcrm-utils.Po am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(arccp_SOURCES) $(arcls_SOURCES) $(arcmkdir_SOURCES) \ $(arcrename_SOURCES) $(arcrm_SOURCES) DIST_SOURCES = $(arccp_SOURCES) $(arcls_SOURCES) $(arcmkdir_SOURCES) \ $(arcrename_SOURCES) $(arcrm_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } man1dir = $(mandir)/man1 NROFF = nroff MANS = $(man_MANS) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ man_MANS = arccp.1 arcls.1 arcrm.1 arcmkdir.1 arcrename.1 CLILIBS = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la arccp_SOURCES = arccp.cpp utils.cpp utils.h arccp_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arccp_LDADD = $(CLILIBS) $(GLIBMM_LIBS) arcls_SOURCES = arcls.cpp utils.cpp utils.h arcls_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcls_LDADD = $(CLILIBS) $(GLIBMM_LIBS) arcrm_SOURCES = arcrm.cpp utils.cpp utils.h arcrm_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcrm_LDADD = $(CLILIBS) $(GLIBMM_LIBS) arcmkdir_SOURCES = arcmkdir.cpp utils.cpp utils.h arcmkdir_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcmkdir_LDADD = $(CLILIBS) $(GLIBMM_LIBS) arcrename_SOURCES = arcrename.cpp utils.cpp utils.h arcrename_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcrename_LDADD = $(CLILIBS) $(GLIBMM_LIBS) EXTRA_DIST = $(man_MANS:=.in) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/clients/data/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/clients/data/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-binPROGRAMS: $(bin_PROGRAMS) @$(NORMAL_INSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \ $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \ fi; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p \ || test -f $$p1 \ ; then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' \ -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ } \ ; done uninstall-binPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' \ `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(bindir)" && rm -f $$files clean-binPROGRAMS: @list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list arccp$(EXEEXT): $(arccp_OBJECTS) $(arccp_DEPENDENCIES) $(EXTRA_arccp_DEPENDENCIES) @rm -f arccp$(EXEEXT) $(AM_V_CXXLD)$(arccp_LINK) $(arccp_OBJECTS) $(arccp_LDADD) $(LIBS) arcls$(EXEEXT): $(arcls_OBJECTS) $(arcls_DEPENDENCIES) $(EXTRA_arcls_DEPENDENCIES) @rm -f arcls$(EXEEXT) $(AM_V_CXXLD)$(arcls_LINK) $(arcls_OBJECTS) $(arcls_LDADD) $(LIBS) arcmkdir$(EXEEXT): $(arcmkdir_OBJECTS) $(arcmkdir_DEPENDENCIES) $(EXTRA_arcmkdir_DEPENDENCIES) @rm -f arcmkdir$(EXEEXT) $(AM_V_CXXLD)$(arcmkdir_LINK) $(arcmkdir_OBJECTS) $(arcmkdir_LDADD) $(LIBS) arcrename$(EXEEXT): $(arcrename_OBJECTS) $(arcrename_DEPENDENCIES) $(EXTRA_arcrename_DEPENDENCIES) @rm -f arcrename$(EXEEXT) $(AM_V_CXXLD)$(arcrename_LINK) $(arcrename_OBJECTS) $(arcrename_LDADD) $(LIBS) arcrm$(EXEEXT): $(arcrm_OBJECTS) $(arcrm_DEPENDENCIES) $(EXTRA_arcrm_DEPENDENCIES) @rm -f arcrm$(EXEEXT) $(AM_V_CXXLD)$(arcrm_LINK) $(arcrm_OBJECTS) $(arcrm_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arccp-arccp.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arccp-utils.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcls-arcls.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcls-utils.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcmkdir-arcmkdir.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcmkdir-utils.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcrename-arcrename.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcrename-utils.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcrm-arcrm.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcrm-utils.Po@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< arccp-arccp.o: arccp.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccp_CXXFLAGS) $(CXXFLAGS) -MT arccp-arccp.o -MD -MP -MF $(DEPDIR)/arccp-arccp.Tpo -c -o arccp-arccp.o `test -f 'arccp.cpp' || echo '$(srcdir)/'`arccp.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arccp-arccp.Tpo $(DEPDIR)/arccp-arccp.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arccp.cpp' object='arccp-arccp.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccp_CXXFLAGS) $(CXXFLAGS) -c -o arccp-arccp.o `test -f 'arccp.cpp' || echo '$(srcdir)/'`arccp.cpp arccp-arccp.obj: arccp.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccp_CXXFLAGS) $(CXXFLAGS) -MT arccp-arccp.obj -MD -MP -MF $(DEPDIR)/arccp-arccp.Tpo -c -o arccp-arccp.obj `if test -f 'arccp.cpp'; then $(CYGPATH_W) 'arccp.cpp'; else $(CYGPATH_W) '$(srcdir)/arccp.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arccp-arccp.Tpo $(DEPDIR)/arccp-arccp.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arccp.cpp' object='arccp-arccp.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccp_CXXFLAGS) $(CXXFLAGS) -c -o arccp-arccp.obj `if test -f 'arccp.cpp'; then $(CYGPATH_W) 'arccp.cpp'; else $(CYGPATH_W) '$(srcdir)/arccp.cpp'; fi` arccp-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccp_CXXFLAGS) $(CXXFLAGS) -MT arccp-utils.o -MD -MP -MF $(DEPDIR)/arccp-utils.Tpo -c -o arccp-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arccp-utils.Tpo $(DEPDIR)/arccp-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arccp-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccp_CXXFLAGS) $(CXXFLAGS) -c -o arccp-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arccp-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccp_CXXFLAGS) $(CXXFLAGS) -MT arccp-utils.obj -MD -MP -MF $(DEPDIR)/arccp-utils.Tpo -c -o arccp-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arccp-utils.Tpo $(DEPDIR)/arccp-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arccp-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccp_CXXFLAGS) $(CXXFLAGS) -c -o arccp-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arcls-arcls.o: arcls.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcls_CXXFLAGS) $(CXXFLAGS) -MT arcls-arcls.o -MD -MP -MF $(DEPDIR)/arcls-arcls.Tpo -c -o arcls-arcls.o `test -f 'arcls.cpp' || echo '$(srcdir)/'`arcls.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcls-arcls.Tpo $(DEPDIR)/arcls-arcls.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcls.cpp' object='arcls-arcls.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcls_CXXFLAGS) $(CXXFLAGS) -c -o arcls-arcls.o `test -f 'arcls.cpp' || echo '$(srcdir)/'`arcls.cpp arcls-arcls.obj: arcls.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcls_CXXFLAGS) $(CXXFLAGS) -MT arcls-arcls.obj -MD -MP -MF $(DEPDIR)/arcls-arcls.Tpo -c -o arcls-arcls.obj `if test -f 'arcls.cpp'; then $(CYGPATH_W) 'arcls.cpp'; else $(CYGPATH_W) '$(srcdir)/arcls.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcls-arcls.Tpo $(DEPDIR)/arcls-arcls.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcls.cpp' object='arcls-arcls.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcls_CXXFLAGS) $(CXXFLAGS) -c -o arcls-arcls.obj `if test -f 'arcls.cpp'; then $(CYGPATH_W) 'arcls.cpp'; else $(CYGPATH_W) '$(srcdir)/arcls.cpp'; fi` arcls-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcls_CXXFLAGS) $(CXXFLAGS) -MT arcls-utils.o -MD -MP -MF $(DEPDIR)/arcls-utils.Tpo -c -o arcls-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcls-utils.Tpo $(DEPDIR)/arcls-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcls-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcls_CXXFLAGS) $(CXXFLAGS) -c -o arcls-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arcls-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcls_CXXFLAGS) $(CXXFLAGS) -MT arcls-utils.obj -MD -MP -MF $(DEPDIR)/arcls-utils.Tpo -c -o arcls-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcls-utils.Tpo $(DEPDIR)/arcls-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcls-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcls_CXXFLAGS) $(CXXFLAGS) -c -o arcls-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arcmkdir-arcmkdir.o: arcmkdir.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcmkdir_CXXFLAGS) $(CXXFLAGS) -MT arcmkdir-arcmkdir.o -MD -MP -MF $(DEPDIR)/arcmkdir-arcmkdir.Tpo -c -o arcmkdir-arcmkdir.o `test -f 'arcmkdir.cpp' || echo '$(srcdir)/'`arcmkdir.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcmkdir-arcmkdir.Tpo $(DEPDIR)/arcmkdir-arcmkdir.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcmkdir.cpp' object='arcmkdir-arcmkdir.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcmkdir_CXXFLAGS) $(CXXFLAGS) -c -o arcmkdir-arcmkdir.o `test -f 'arcmkdir.cpp' || echo '$(srcdir)/'`arcmkdir.cpp arcmkdir-arcmkdir.obj: arcmkdir.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcmkdir_CXXFLAGS) $(CXXFLAGS) -MT arcmkdir-arcmkdir.obj -MD -MP -MF $(DEPDIR)/arcmkdir-arcmkdir.Tpo -c -o arcmkdir-arcmkdir.obj `if test -f 'arcmkdir.cpp'; then $(CYGPATH_W) 'arcmkdir.cpp'; else $(CYGPATH_W) '$(srcdir)/arcmkdir.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcmkdir-arcmkdir.Tpo $(DEPDIR)/arcmkdir-arcmkdir.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcmkdir.cpp' object='arcmkdir-arcmkdir.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcmkdir_CXXFLAGS) $(CXXFLAGS) -c -o arcmkdir-arcmkdir.obj `if test -f 'arcmkdir.cpp'; then $(CYGPATH_W) 'arcmkdir.cpp'; else $(CYGPATH_W) '$(srcdir)/arcmkdir.cpp'; fi` arcmkdir-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcmkdir_CXXFLAGS) $(CXXFLAGS) -MT arcmkdir-utils.o -MD -MP -MF $(DEPDIR)/arcmkdir-utils.Tpo -c -o arcmkdir-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcmkdir-utils.Tpo $(DEPDIR)/arcmkdir-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcmkdir-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcmkdir_CXXFLAGS) $(CXXFLAGS) -c -o arcmkdir-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arcmkdir-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcmkdir_CXXFLAGS) $(CXXFLAGS) -MT arcmkdir-utils.obj -MD -MP -MF $(DEPDIR)/arcmkdir-utils.Tpo -c -o arcmkdir-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcmkdir-utils.Tpo $(DEPDIR)/arcmkdir-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcmkdir-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcmkdir_CXXFLAGS) $(CXXFLAGS) -c -o arcmkdir-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arcrename-arcrename.o: arcrename.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrename_CXXFLAGS) $(CXXFLAGS) -MT arcrename-arcrename.o -MD -MP -MF $(DEPDIR)/arcrename-arcrename.Tpo -c -o arcrename-arcrename.o `test -f 'arcrename.cpp' || echo '$(srcdir)/'`arcrename.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcrename-arcrename.Tpo $(DEPDIR)/arcrename-arcrename.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcrename.cpp' object='arcrename-arcrename.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrename_CXXFLAGS) $(CXXFLAGS) -c -o arcrename-arcrename.o `test -f 'arcrename.cpp' || echo '$(srcdir)/'`arcrename.cpp arcrename-arcrename.obj: arcrename.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrename_CXXFLAGS) $(CXXFLAGS) -MT arcrename-arcrename.obj -MD -MP -MF $(DEPDIR)/arcrename-arcrename.Tpo -c -o arcrename-arcrename.obj `if test -f 'arcrename.cpp'; then $(CYGPATH_W) 'arcrename.cpp'; else $(CYGPATH_W) '$(srcdir)/arcrename.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcrename-arcrename.Tpo $(DEPDIR)/arcrename-arcrename.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcrename.cpp' object='arcrename-arcrename.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrename_CXXFLAGS) $(CXXFLAGS) -c -o arcrename-arcrename.obj `if test -f 'arcrename.cpp'; then $(CYGPATH_W) 'arcrename.cpp'; else $(CYGPATH_W) '$(srcdir)/arcrename.cpp'; fi` arcrename-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrename_CXXFLAGS) $(CXXFLAGS) -MT arcrename-utils.o -MD -MP -MF $(DEPDIR)/arcrename-utils.Tpo -c -o arcrename-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcrename-utils.Tpo $(DEPDIR)/arcrename-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcrename-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrename_CXXFLAGS) $(CXXFLAGS) -c -o arcrename-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arcrename-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrename_CXXFLAGS) $(CXXFLAGS) -MT arcrename-utils.obj -MD -MP -MF $(DEPDIR)/arcrename-utils.Tpo -c -o arcrename-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcrename-utils.Tpo $(DEPDIR)/arcrename-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcrename-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrename_CXXFLAGS) $(CXXFLAGS) -c -o arcrename-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arcrm-arcrm.o: arcrm.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrm_CXXFLAGS) $(CXXFLAGS) -MT arcrm-arcrm.o -MD -MP -MF $(DEPDIR)/arcrm-arcrm.Tpo -c -o arcrm-arcrm.o `test -f 'arcrm.cpp' || echo '$(srcdir)/'`arcrm.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcrm-arcrm.Tpo $(DEPDIR)/arcrm-arcrm.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcrm.cpp' object='arcrm-arcrm.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrm_CXXFLAGS) $(CXXFLAGS) -c -o arcrm-arcrm.o `test -f 'arcrm.cpp' || echo '$(srcdir)/'`arcrm.cpp arcrm-arcrm.obj: arcrm.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrm_CXXFLAGS) $(CXXFLAGS) -MT arcrm-arcrm.obj -MD -MP -MF $(DEPDIR)/arcrm-arcrm.Tpo -c -o arcrm-arcrm.obj `if test -f 'arcrm.cpp'; then $(CYGPATH_W) 'arcrm.cpp'; else $(CYGPATH_W) '$(srcdir)/arcrm.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcrm-arcrm.Tpo $(DEPDIR)/arcrm-arcrm.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcrm.cpp' object='arcrm-arcrm.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrm_CXXFLAGS) $(CXXFLAGS) -c -o arcrm-arcrm.obj `if test -f 'arcrm.cpp'; then $(CYGPATH_W) 'arcrm.cpp'; else $(CYGPATH_W) '$(srcdir)/arcrm.cpp'; fi` arcrm-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrm_CXXFLAGS) $(CXXFLAGS) -MT arcrm-utils.o -MD -MP -MF $(DEPDIR)/arcrm-utils.Tpo -c -o arcrm-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcrm-utils.Tpo $(DEPDIR)/arcrm-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcrm-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrm_CXXFLAGS) $(CXXFLAGS) -c -o arcrm-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arcrm-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrm_CXXFLAGS) $(CXXFLAGS) -MT arcrm-utils.obj -MD -MP -MF $(DEPDIR)/arcrm-utils.Tpo -c -o arcrm-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcrm-utils.Tpo $(DEPDIR)/arcrm-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcrm-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrm_CXXFLAGS) $(CXXFLAGS) -c -o arcrm-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man1: $(man_MANS) @$(NORMAL_INSTALL) @list1=''; \ list2='$(man_MANS)'; \ test -n "$(man1dir)" \ && test -n "`echo $$list1$$list2`" \ || exit 0; \ echo " $(MKDIR_P) '$(DESTDIR)$(man1dir)'"; \ $(MKDIR_P) "$(DESTDIR)$(man1dir)" || exit 1; \ { for i in $$list1; do echo "$$i"; done; \ if test -n "$$list2"; then \ for i in $$list2; do echo "$$i"; done \ | sed -n '/\.1[a-z]*$$/p'; \ fi; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \ done; } uninstall-man1: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man1dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ dir='$(DESTDIR)$(man1dir)'; $(am__uninstall_files_from_dir) ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) $(MANS) installdirs: for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f ./$(DEPDIR)/arccp-arccp.Po -rm -f ./$(DEPDIR)/arccp-utils.Po -rm -f ./$(DEPDIR)/arcls-arcls.Po -rm -f ./$(DEPDIR)/arcls-utils.Po -rm -f ./$(DEPDIR)/arcmkdir-arcmkdir.Po -rm -f ./$(DEPDIR)/arcmkdir-utils.Po -rm -f ./$(DEPDIR)/arcrename-arcrename.Po -rm -f ./$(DEPDIR)/arcrename-utils.Po -rm -f ./$(DEPDIR)/arcrm-arcrm.Po -rm -f ./$(DEPDIR)/arcrm-utils.Po -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-man install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-binPROGRAMS install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-man1 install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/arccp-arccp.Po -rm -f ./$(DEPDIR)/arccp-utils.Po -rm -f ./$(DEPDIR)/arcls-arcls.Po -rm -f ./$(DEPDIR)/arcls-utils.Po -rm -f ./$(DEPDIR)/arcmkdir-arcmkdir.Po -rm -f ./$(DEPDIR)/arcmkdir-utils.Po -rm -f ./$(DEPDIR)/arcrename-arcrename.Po -rm -f ./$(DEPDIR)/arcrename-utils.Po -rm -f ./$(DEPDIR)/arcrm-arcrm.Po -rm -f ./$(DEPDIR)/arcrm-utils.Po -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-binPROGRAMS uninstall-man uninstall-man: uninstall-man1 .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ clean-binPROGRAMS clean-generic clean-libtool cscopelist-am \ ctags ctags-am distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-binPROGRAMS \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-man1 \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags tags-am uninstall uninstall-am \ uninstall-binPROGRAMS uninstall-man uninstall-man1 .PRECIOUS: Makefile %.1: % %.1.in LANG=C help2man -N -h "-h|sed s/…/.../g" -i $(word 2,$^) -o $@ ./$< # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/clients/data/PaxHeaders/arcmkdir.cpp0000644000000000000000000000013115067751327021013 xustar0029 mtime=1759498967.68049084 30 atime=1759498967.826493058 30 ctime=1759499031.137695538 nordugrid-arc-7.1.1/src/clients/data/arcmkdir.cpp0000644000175000002070000001712415067751327022723 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include "utils.h" static Arc::Logger logger(Arc::Logger::getRootLogger(), "arcmkdir"); bool arcmkdir(const Arc::URL& file_url, Arc::UserConfig& usercfg, bool with_parents) { if (!file_url) { logger.msg(Arc::ERROR, "Invalid URL: %s", file_url.str()); return false; } if (file_url.Protocol() == "urllist") { std::list files = Arc::ReadURLList(file_url); if (files.empty()) { logger.msg(Arc::ERROR, "Can't read list of locations from file %s", file_url.Path()); return false; } bool r = true; for (std::list::iterator file = files.begin(); file != files.end(); ++file) { if (!arcmkdir(*file, usercfg, with_parents)) r = false; } return r; } Arc::DataHandle url(file_url, usercfg); if (!url) { logger.msg(Arc::ERROR, "Unsupported URL given"); return false; } if (url->RequiresCredentials()) { if(!initProxy(logger, usercfg, file_url)) return false; } url->SetSecure(false); Arc::DataStatus res = url->CreateDirectory(with_parents); if (!res.Passed()) { logger.msg(Arc::ERROR, std::string(res)); if (res.Retryable()) logger.msg(Arc::ERROR, "This seems like a temporary error, please try again later"); return false; } return true; } static int runmain(int argc, char **argv) { setlocale(LC_ALL, ""); static Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); Arc::OptionParser options(istring("url"), istring("The arcmkdir command creates directories " "on grid storage elements and catalogs.")); bool with_parents = false; options.AddOption('p', "parents", istring("make parent directories as needed"), with_parents); bool show_plugins = false; options.AddOption('P', "listplugins", istring("list the available plugins (protocols supported)"), show_plugins); int timeout = 20; options.AddOption('t', "timeout", istring("timeout in seconds (default 20)"), istring("seconds"), timeout); std::string conffile; options.AddOption('z', "conffile", istring("configuration file (default ~/.arc/client.conf)"), istring("filename"), conffile); bool no_authentication = false; options.AddOption('\0', "no-authentication", istring("do not perform any authentication for opened connections"), no_authentication); bool x509_authentication = false; options.AddOption('\0', "x509-authentication", istring("perform X.509 authentication for opened connections"), x509_authentication); bool token_authentication = false; options.AddOption('\0', "token-authentication", istring("perform token authentication for opened connections"), token_authentication); bool force_system_ca = false; options.AddOption('\0', "systemca", istring("force using CA certificates configuration provided by OpenSSL"), force_system_ca); bool force_grid_ca = false; options.AddOption('\0', "gridca", istring("force using CA certificates configuration for Grid services (typically IGTF)"), force_grid_ca); bool force_any_ca = false; options.AddOption('\0', "anyca", istring("force using both CA certificates configuration for Grid services (typically IGTF) and those provided by OpenSSL"), force_any_ca); bool allow_insecure_connection = false; options.AddOption('\0', "allowinsecureconnection", istring("allow TLS connection which failed verification"), allow_insecure_connection); std::string debug; options.AddOption('d', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); bool version = false; options.AddOption('v', "version", istring("print version information"), version); std::list params = options.Parse(argc, argv); if (version) { std::cout << Arc::IString("%s version %s", "arcrm", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(debug)); logger.msg(Arc::VERBOSE, "Running command: %s", options.GetCommandWithArguments()); if (show_plugins) { std::list modules; Arc::PluginsFactory pf(Arc::BaseConfig().MakeConfig(Arc::Config()).Parent()); pf.scan(Arc::FinderLoader::GetLibrariesList(), modules); Arc::PluginsFactory::FilterByKind("HED:DMC", modules); std::cout << Arc::IString("Protocol plugins available:") << std::endl; for (std::list::iterator itMod = modules.begin(); itMod != modules.end(); ++itMod) { for (std::list::iterator itPlug = itMod->plugins.begin(); itPlug != itMod->plugins.end(); ++itPlug) { std::cout << " " << itPlug->name << " - " << itPlug->description << std::endl; } } return 0; } // credentials will be initialised later if necessary Arc::UserConfig usercfg(conffile, Arc::initializeCredentialsType::TryCredentials); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } usercfg.UtilsDirPath(Arc::UserConfig::ARCUSERDIRECTORY()); usercfg.Timeout(timeout); if (force_system_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(false); } if (force_grid_ca) { usercfg.CAUseSystem(false); usercfg.CAUseGrid(true); } if (force_any_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(true); } if (allow_insecure_connection) usercfg.TLSAllowInsecure(true); AuthenticationType authentication_type = UndefinedAuthentication; if(!getAuthenticationType(logger, usercfg, no_authentication, x509_authentication, token_authentication, authentication_type)) return 1; switch(authentication_type) { case NoAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeNone); break; case X509Authentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeCert); break; case TokenAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeToken); break; case UndefinedAuthentication: default: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeUndefined); break; } if (debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); if (params.size() != 1) { logger.msg(Arc::ERROR, "Wrong number of parameters specified"); return 1; } // add a slash to the end if not present std::string url = params.front(); if (url[url.length()-1] != '/') url += '/'; if (!arcmkdir(url, usercfg, with_parents)) return 1; return 0; } int main(int argc, char **argv) { int xr = runmain(argc,argv); _exit(xr); return 0; } nordugrid-arc-7.1.1/src/clients/data/PaxHeaders/arcrename.cpp0000644000000000000000000000013115067751327021154 xustar0029 mtime=1759498967.68049084 30 atime=1759498967.826493058 30 ctime=1759499031.138749312 nordugrid-arc-7.1.1/src/clients/data/arcrename.cpp0000644000175000002070000001763215067751327023070 0ustar00mockbuildmock00000000000000#ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include "utils.h" static Arc::Logger logger(Arc::Logger::getRootLogger(), "arcrename"); bool arcrename(const Arc::URL& old_url, const Arc::URL& new_url, Arc::UserConfig& usercfg, int timeout) { if (!old_url) { logger.msg(Arc::ERROR, "Invalid URL: %s", old_url.str()); return false; } if (!new_url) { logger.msg(Arc::ERROR, "Invalid URL: %s", new_url.str()); return false; } // Check URLs if (old_url.Protocol() != new_url.Protocol() || old_url.Host() != new_url.Host() || old_url.Port() != new_url.Port()) { logger.msg(Arc::ERROR, "Both URLs must have the same protocol, host and port"); return false; } std::string old_path(old_url.Path()); std::string new_path(new_url.Path()); Arc::CanonicalDir(old_path, true); Arc::CanonicalDir(new_path, true); // LFC URLs can be specified by guid metadata option if ((old_path.find_first_not_of('/') == std::string::npos && old_url.MetaDataOptions().empty()) || new_path.find_first_not_of('/') == std::string::npos) { logger.msg(Arc::ERROR, "Cannot rename to or from root directory"); return false; } if (old_path == new_path && old_url.FullPath() == new_url.FullPath()) { logger.msg(Arc::ERROR, "Cannot rename to the same URL"); return false; } Arc::DataHandle url(old_url, usercfg); if (!url) { logger.msg(Arc::ERROR, "Unsupported URL given"); return false; } if (url->RequiresCredentials()) { if(!initProxy(logger, usercfg, old_url)) return false; } // Insecure by default url->SetSecure(false); // Do the renaming Arc::DataStatus res = url->Rename(new_url); if (!res.Passed()) { logger.msg(Arc::ERROR, std::string(res)); if (res.Retryable()) logger.msg(Arc::ERROR, "This seems like a temporary error, please try again later"); return false; } return true; } static int runmain(int argc, char **argv) { setlocale(LC_ALL, ""); static Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); Arc::OptionParser options(istring("old_url new_url"), istring("The arcrename command renames files on " "grid storage elements.")); bool show_plugins = false; options.AddOption('P', "listplugins", istring("list the available plugins (protocols supported)"), show_plugins); int timeout = 20; options.AddOption('t', "timeout", istring("timeout in seconds (default 20)"), istring("seconds"), timeout); std::string conffile; options.AddOption('z', "conffile", istring("configuration file (default ~/.arc/client.conf)"), istring("filename"), conffile); bool no_authentication = false; options.AddOption('\0', "no-authentication", istring("do not perform any authentication for opened connections"), no_authentication); bool x509_authentication = false; options.AddOption('\0', "x509-authentication", istring("perform X.509 authentication for opened connections"), x509_authentication); bool token_authentication = false; options.AddOption('\0', "token-authentication", istring("perform token authentication for opened connections"), token_authentication); bool force_system_ca = false; options.AddOption('\0', "systemca", istring("force using CA certificates configuration provided by OpenSSL"), force_system_ca); bool force_grid_ca = false; options.AddOption('\0', "gridca", istring("force using CA certificates configuration for Grid services (typically IGTF)"), force_grid_ca); bool force_any_ca = false; options.AddOption('\0', "anyca", istring("force using both CA certificates configuration for Grid services (typically IGTF) and those provided by OpenSSL"), force_any_ca); bool allow_insecure_connection = false; options.AddOption('\0', "allowinsecureconnection", istring("allow TLS connection which failed verification"), allow_insecure_connection); std::string debug; options.AddOption('d', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); bool version = false; options.AddOption('v', "version", istring("print version information"), version); std::list params = options.Parse(argc, argv); if (version) { std::cout << Arc::IString("%s version %s", "arcrename", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(debug)); logger.msg(Arc::VERBOSE, "Running command: %s", options.GetCommandWithArguments()); if (show_plugins) { std::list modules; Arc::PluginsFactory pf(Arc::BaseConfig().MakeConfig(Arc::Config()).Parent()); pf.scan(Arc::FinderLoader::GetLibrariesList(), modules); Arc::PluginsFactory::FilterByKind("HED:DMC", modules); std::cout << Arc::IString("Protocol plugins available:") << std::endl; for (std::list::iterator itMod = modules.begin(); itMod != modules.end(); ++itMod) { for (std::list::iterator itPlug = itMod->plugins.begin(); itPlug != itMod->plugins.end(); ++itPlug) { std::cout << " " << itPlug->name << " - " << itPlug->description << std::endl; } } return 0; } // credentials will be initialised later if necessary Arc::UserConfig usercfg(conffile, Arc::initializeCredentialsType::TryCredentials); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } usercfg.UtilsDirPath(Arc::UserConfig::ARCUSERDIRECTORY()); if (force_system_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(false); } if (force_grid_ca) { usercfg.CAUseSystem(false); usercfg.CAUseGrid(true); } if (force_any_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(true); } if (allow_insecure_connection) usercfg.TLSAllowInsecure(true); AuthenticationType authentication_type = UndefinedAuthentication; if(!getAuthenticationType(logger, usercfg, no_authentication, x509_authentication, token_authentication, authentication_type)) return 1; switch(authentication_type) { case NoAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeNone); break; case X509Authentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeCert); break; case TokenAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeToken); break; case UndefinedAuthentication: default: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeUndefined); break; } if (debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); if (params.size() != 2) { logger.msg(Arc::ERROR, "Wrong number of parameters specified"); return 1; } std::string oldurl(params.front()); std::string newurl(params.back()); if (!arcrename(oldurl, newurl, usercfg, timeout)) return 1; return 0; } int main(int argc, char **argv) { int xr = runmain(argc,argv); _exit(xr); return 0; } nordugrid-arc-7.1.1/src/clients/data/PaxHeaders/arcls.cpp0000644000000000000000000000013215067751327020324 xustar0030 mtime=1759498967.680317736 30 atime=1759498967.826493058 30 ctime=1759499031.136582579 nordugrid-arc-7.1.1/src/clients/data/arcls.cpp0000644000175000002070000003777515067751327022251 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "utils.h" static Arc::Logger logger(Arc::Logger::getRootLogger(), "arcls"); void print_urls(const Arc::FileInfo& file) { for (std::list::const_iterator u = file.GetURLs().begin(); u != file.GetURLs().end(); ++u) std::cout << "\t" << *u << std::endl; } void print_meta(const Arc::FileInfo& file) { std::map md = file.GetMetaData(); for (std::map::iterator mi = md.begin(); mi != md.end(); ++mi) std::cout<first<<":"<second<& files, bool show_urls, bool show_meta) { if (files.empty()) return; unsigned int namewidth = 0; unsigned int sizewidth = 0; unsigned int csumwidth = 0; // find longest length of each field to align the output for (std::list::const_iterator i = files.begin(); i != files.end(); ++i) { if (i->GetName().length() > namewidth) namewidth = i->GetName().length(); if (i->CheckSize() && i->GetSize() > 0 && // log(0) not good! (unsigned int)(log10(i->GetSize()))+1 > sizewidth) sizewidth = (unsigned int)(log10(i->GetSize()))+1; if (i->CheckCheckSum() && i->GetCheckSum().length() > csumwidth) csumwidth = i->GetCheckSum().length(); } std::cout << std::setw(namewidth) << std::left << " "; std::cout << " "; std::cout << std::setw(sizewidth + 4) << std::left << " "; std::cout << " "; std::cout << " "; std::cout << std::setw(csumwidth) << std::right << ""; std::cout << std::endl; // set minimum widths to accommodate headers if (namewidth < 7) namewidth = 7; if (sizewidth < 7) sizewidth = 7; if (csumwidth < 8) csumwidth = 8; for (std::list::const_iterator i = files.begin(); i != files.end(); ++i) { std::cout << std::setw(namewidth) << std::left << i->GetName(); switch (i->GetType()) { case Arc::FileInfo::file_type_file: std::cout << " file"; break; case Arc::FileInfo::file_type_dir: std::cout << " dir"; break; default: std::cout << " (n/a)"; break; } if (i->CheckSize()) { std::cout << " " << std::setw(sizewidth) << std::right << Arc::tostring(i->GetSize()); } else { std::cout << " " << std::setw(sizewidth) << std::right << " (n/a)"; } if (i->CheckModified()) { std::cout << " " << i->GetModified(); } else { std::cout << " (n/a) "; } if (i->CheckCheckSum()) { std::cout << " " << std::setw(csumwidth) << std::left << i->GetCheckSum(); } else { std::cout << " " << std::setw(csumwidth) << std::left << " (n/a)"; } if (i->CheckLatency()) { std::cout << " " << i->GetLatency(); } else { std::cout << " (n/a)"; } std::cout << std::endl; if (show_urls) print_urls(*i); if (show_meta) print_meta(*i); } } static bool arcls(const Arc::URL& dir_url, Arc::UserConfig& usercfg, bool show_details, // longlist bool show_urls, // locations bool show_meta, // metadata bool no_list, // don't list dirs bool force_list, // force dir list bool check_access, // checkaccess int recursion, // recursion int timeout) { // timeout if (!dir_url) { logger.msg(Arc::ERROR, "Invalid URL: %s", dir_url.fullstr()); return false; } if (dir_url.Protocol() == "urllist") { std::list dirs = Arc::ReadURLList(dir_url); if (dirs.empty()) { logger.msg(Arc::ERROR, "Can't read list of locations from file %s", dir_url.Path()); return false; } bool r = true; for (std::list::iterator dir = dirs.begin(); dir != dirs.end(); ++dir) { if(!arcls(*dir, usercfg, show_details, show_urls, show_meta, no_list, force_list, check_access, recursion, timeout)) r = false; } return r; } Arc::DataHandle url(dir_url, usercfg); if (!url) { logger.msg(Arc::ERROR, "Unsupported URL given"); return false; } if (url->RequiresCredentials()) { if(!initProxy(logger, usercfg, dir_url)) return false; } url->SetSecure(false); if(check_access) { std::cout << dir_url << " - "; if(url->Check(false)) { std::cout << "passed" << std::endl; return true; } else { std::cout << "failed" << std::endl; return false; } } Arc::DataPoint::DataPointInfoType verb = (Arc::DataPoint::DataPointInfoType) (Arc::DataPoint::INFO_TYPE_MINIMAL | Arc::DataPoint::INFO_TYPE_NAME); if(show_urls) verb = (Arc::DataPoint::DataPointInfoType) (verb | Arc::DataPoint::INFO_TYPE_STRUCT); if(show_meta) verb = (Arc::DataPoint::DataPointInfoType) (verb | Arc::DataPoint::INFO_TYPE_ALL); if(show_details) verb = (Arc::DataPoint::DataPointInfoType) (verb | Arc::DataPoint::INFO_TYPE_TYPE | Arc::DataPoint::INFO_TYPE_TIMES | Arc::DataPoint::INFO_TYPE_CONTENT | Arc::DataPoint::INFO_TYPE_CKSUM | Arc::DataPoint::INFO_TYPE_ACCESS); if(recursion > 0) verb = (Arc::DataPoint::DataPointInfoType) (verb | Arc::DataPoint::INFO_TYPE_TYPE); Arc::DataStatus res; Arc::FileInfo file; std::list files; if(no_list) { // only requested object is queried res = url->Stat(file, verb); if(res) files.push_back(file); } else if(force_list) { // assume it is directory, fail otherwise res = url->List(files, verb); } else { // try to guess what to do res = url->Stat(file, (Arc::DataPoint::DataPointInfoType)(verb | Arc::DataPoint::INFO_TYPE_TYPE)); if(res && (file.GetType() == Arc::FileInfo::file_type_file)) { // If it is file and we are sure, then just report it. files.push_back(file); } else { // If it is dir then we must list it. But if stat failed or // if type is undefined there is still chance it is directory. Arc::DataStatus res_ = url->List(files, verb); if(!res_) { // If listing failed maybe simply report previous result if any. if(res) { files.push_back(file); } } else { res = res_; } } } if (!res) { if (files.empty()) { logger.msg(Arc::ERROR, std::string(res)); if (res.Retryable()) logger.msg(Arc::ERROR, "This seems like a temporary error, please try again later"); return false; } logger.msg(Arc::INFO, "Warning: " "Failed listing files but some information is obtained"); } files.sort(); // Sort alphabetically by name if (show_details) { print_details(files, show_urls, show_meta); } else { for (std::list::iterator i = files.begin(); i != files.end(); ++i) { std::cout << i->GetName() << std::endl; if (show_urls) print_urls(*i); if (show_meta) print_meta(*i); } } // Do recursion. Recursion has no sense if listing is forbidden. if ((recursion > 0) && (!no_list)) { for (std::list::iterator i = files.begin(); i != files.end(); ++i) { if (i->GetType() == Arc::FileInfo::file_type_dir) { Arc::URL suburl = dir_url; if(suburl.Protocol() != "file") { if (suburl.Path()[suburl.Path().length() - 1] != '/') suburl.ChangePath(suburl.Path() + "/" + i->GetName()); else suburl.ChangePath(suburl.Path() + i->GetName()); } else { if (suburl.Path()[suburl.Path().length() - 1] != G_DIR_SEPARATOR) suburl.ChangePath(suburl.Path() + G_DIR_SEPARATOR_S + i->GetName()); else suburl.ChangePath(suburl.Path() + i->GetName()); } std::cout << std::endl; std::cout << suburl.str() << ":" << std::endl; arcls(suburl, usercfg, show_details, show_urls, show_meta, no_list, force_list, check_access, recursion - 1, timeout); std::cout << std::endl; } } } return true; } static int runmain(int argc, char **argv) { setlocale(LC_ALL, ""); static Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); Arc::OptionParser options(istring("url"), istring("The arcls command is used for listing " "files in grid storage elements " "and file\nindex catalogues.")); bool longlist = false; options.AddOption('l', "long", istring("long format (more information)"), longlist); bool locations = false; options.AddOption('L', "locations", istring("show URLs of file locations"), locations); bool metadata = false; options.AddOption('m', "metadata", istring("display all available metadata"), metadata); bool infinite_recursion = false; options.AddOption('r', "recursive", istring("operate recursively"), infinite_recursion); int recursion = 0; options.AddOption('D', "depth", istring("operate recursively up to specified level"), istring("level"), recursion); bool nolist = false; options.AddOption('n', "nolist", istring("show only description of requested object, do not list content of directories"), nolist); bool forcelist = false; options.AddOption('f', "forcelist", istring("treat requested object as directory and always try to list content"), forcelist); bool checkaccess = false; options.AddOption('c', "checkaccess", istring("check readability of object, does not show any information about object"), checkaccess); bool show_plugins = false; options.AddOption('P', "listplugins", istring("list the available plugins (protocols supported)"), show_plugins); int timeout = 20; options.AddOption('t', "timeout", istring("timeout in seconds (default 20)"), istring("seconds"), timeout); std::string conffile; options.AddOption('z', "conffile", istring("configuration file (default ~/.arc/client.conf)"), istring("filename"), conffile); bool no_authentication = false; options.AddOption('\0', "no-authentication", istring("do not perform any authentication for opened connections"), no_authentication); bool x509_authentication = false; options.AddOption('\0', "x509-authentication", istring("perform X.509 authentication for opened connections"), x509_authentication); bool token_authentication = false; options.AddOption('\0', "token-authentication", istring("perform token authentication for opened connections"), token_authentication); bool force_system_ca = false; options.AddOption('\0', "systemca", istring("force using CA certificates configuration provided by OpenSSL"), force_system_ca); bool force_grid_ca = false; options.AddOption('\0', "gridca", istring("force using CA certificates configuration for Grid services (typically IGTF)"), force_grid_ca); bool force_any_ca = false; options.AddOption('\0', "anyca", istring("force using both CA certificates configuration for Grid services (typically IGTF) and those provided by OpenSSL"), force_any_ca); bool allow_insecure_connection = false; options.AddOption('\0', "allowinsecureconnection", istring("allow TLS connection which failed verification"), allow_insecure_connection); std::string debug; options.AddOption('d', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); bool version = false; options.AddOption('v', "version", istring("print version information"), version); std::list params = options.Parse(argc, argv); if (version) { std::cout << Arc::IString("%s version %s", "arcls", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(debug)); logger.msg(Arc::VERBOSE, "Running command: %s", options.GetCommandWithArguments()); if (show_plugins) { std::list modules; Arc::PluginsFactory pf(Arc::BaseConfig().MakeConfig(Arc::Config()).Parent()); pf.scan(Arc::FinderLoader::GetLibrariesList(), modules); Arc::PluginsFactory::FilterByKind("HED:DMC", modules); std::cout << Arc::IString("Protocol plugins available:") << std::endl; for (std::list::iterator itMod = modules.begin(); itMod != modules.end(); ++itMod) { for (std::list::iterator itPlug = itMod->plugins.begin(); itPlug != itMod->plugins.end(); ++itPlug) { std::cout << " " << itPlug->name << " - " << itPlug->description << std::endl; } } return 0; } // credentials will be initialised later if necessary Arc::UserConfig usercfg(conffile, Arc::initializeCredentialsType::TryCredentials); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } usercfg.UtilsDirPath(Arc::UserConfig::ARCUSERDIRECTORY()); if (force_system_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(false); } if (force_grid_ca) { usercfg.CAUseSystem(false); usercfg.CAUseGrid(true); } if (force_any_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(true); } if (allow_insecure_connection) usercfg.TLSAllowInsecure(true); AuthenticationType authentication_type = UndefinedAuthentication; if(!getAuthenticationType(logger, usercfg, no_authentication, x509_authentication, token_authentication, authentication_type)) return 1; switch(authentication_type) { case NoAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeNone); break; case X509Authentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeCert); break; case TokenAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeToken); break; case UndefinedAuthentication: default: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeUndefined); break; } if (debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); // Analyze options if (params.size() != 1) { logger.msg(Arc::ERROR, "Wrong number of parameters specified"); return 1; } if(forcelist && nolist) { logger.msg(Arc::ERROR, "Incompatible options --nolist and --forcelist requested"); return 1; } if(recursion && nolist) { logger.msg(Arc::ERROR, "Requesting recursion and --nolist has no sense"); return 1; } if(infinite_recursion) recursion = INT_MAX; std::list::iterator it = params.begin(); if(!arcls(*it, usercfg, longlist, locations, metadata, nolist, forcelist, checkaccess, recursion, timeout)) return 1; return 0; } int main(int argc, char **argv) { int xr = runmain(argc,argv); _exit(xr); return 0; } nordugrid-arc-7.1.1/src/clients/data/PaxHeaders/arcrm.1.in0000644000000000000000000000013115067751327020306 xustar0029 mtime=1759498967.68049084 30 atime=1759498967.826493058 30 ctime=1759499031.143051545 nordugrid-arc-7.1.1/src/clients/data/arcrm.1.in0000644000175000002070000000435015067751327022213 0ustar00mockbuildmock00000000000000[NAME] arcrm \- delete files [EXTENDED DESCRIPTION] The .B arcrm command deletes files on grid storage elements and indexing services. In the case of an indexing service url all physical instances of the file corresponding to the given locations are deleted and unregistered. If an indexing service url is given without locations, all physical instances and all meta-information about file are deleted. For more information on ARC URL syntax please read "Protocols, Uniform Resource Locators (URL) and Extensions Supported in ARC" [NORDUGRID-TECH-7] If .B url starts from '@', the remaining argument is assumed to be a path to a local file containing a list of URLs, one per line. In this case .B arcrm performs like it were called with all those URLs as arguments. Depending on the installed libraries (check with .B -P ), the following protocols may be used: file (file:// prefix may be omitted), http, https, httpg, ftp, gsiftp, srm, root. [FILES] .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. [ENVIRONMENT VARIABLES] .TP .B X509_USER_PROXY The location of the user's Grid proxy file. Shouldn't be set unless the proxy is in a non-standard location. .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .TP .B ARC_PLUGIN_PATH The location of ARC plugins can be specified by this variable. Multiple locations can be specified by separating them by : (; in Windows). The default location is \fB$ARC_LOCATION\fR/lib/arc (\\ in Windows). [EXAMPLE] arcrm gsiftp://example.com/grid/file.dat [NOTES] Lack of recursion is a feature. [COPYRIGHT] APACHE LICENSE Version 2.0 [AUTHOR] ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org [SEE ALSO] .BR arccp (1), .BR arcls (1), .BR arcmkdir(1), .BR arcrename (1) nordugrid-arc-7.1.1/src/clients/data/PaxHeaders/arcrename.1.in0000644000000000000000000000013115067751327021137 xustar0029 mtime=1759498967.68049084 30 atime=1759498967.826493058 30 ctime=1759499031.145205231 nordugrid-arc-7.1.1/src/clients/data/arcrename.1.in0000644000175000002070000000444015067751327023044 0ustar00mockbuildmock00000000000000[NAME] arcrename \- rename file or directory [EXTENDED DESCRIPTION] The .B arcrename command renames files or directories on grid storage elements and indexing services. The path component of .B oldurl and .B newurl must differ and it must be the only component of both URLs which is different. .B arcrename will exit with an error if the paths are equivalent or other components of the URLs are different. Renaming a URL to an existing URL will either fail or overwrite the existing URL, depending on the protocol. .B arcrename works purely at the namespace level and does not perform data transfer. For more information on ARC URL syntax please read "Protocols, Uniform Resource Locators (URL) and Extensions Supported in ARC" [NORDUGRID-TECH-7] Depending on the installed libraries (check with .B -P ), the following protocols may be used: file (file:// prefix may be omitted), http, https, httpg, ftp, gsiftp, srm, root. However renaming is not supported or implemented for some of those protocols. [FILES] .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. [ENVIRONMENT VARIABLES] .TP .B X509_USER_PROXY The location of the user's Grid proxy file. Shouldn't be set unless the proxy is in a non-standard location. .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .TP .B ARC_PLUGIN_PATH The location of ARC plugins can be specified by this variable. Multiple locations can be specified by separating them by : (; in Windows). The default location is \fB$ARC_LOCATION\fR/lib/arc (\\ in Windows). [EXAMPLE] arcrename gsiftp://example.com/grid/file.dat gsiftp://example.com/grid/new.file.dat [COPYRIGHT] APACHE LICENSE Version 2.0 [AUTHOR] ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org [SEE ALSO] .BR arccp (1), .BR arcls (1), .BR arcmkdir(1), .BR arcrm(1) nordugrid-arc-7.1.1/src/clients/data/PaxHeaders/arccp.cpp0000644000000000000000000000013215067751327020310 xustar0030 mtime=1759498967.680317736 30 atime=1759498967.825493043 30 ctime=1759499031.133470272 nordugrid-arc-7.1.1/src/clients/data/arccp.cpp0000644000175000002070000006574315067751327022231 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "utils.h" static Arc::Logger logger(Arc::Logger::getRootLogger(), "arccp"); static Arc::SimpleCondition cond; static bool cancelled = false; static void sig_cancel(int) { if (cancelled) _exit(0); cancelled = true; cond.broadcast(); } static void progress(FILE *o, const char*, unsigned int, unsigned long long int all, unsigned long long int max, double, double) { static int rs = 0; const char rs_[4] = { '|', '/', '-', '\\' }; if (max) { fprintf(o, "\r|"); unsigned int l = (74 * all + 37) / max; if (l > 74) l = 74; unsigned int i = 0; for (; i < l; i++) fprintf(o, "="); fprintf(o, "%c", rs_[rs++]); if (rs > 3) rs = 0; for (; i < 74; i++) fprintf(o, " "); fprintf(o, "|\r"); fflush(o); return; } fprintf(o, "\r%llu kB \r", all / 1024); } static void transfer_cb(unsigned long long int bytes_transferred) { fprintf (stderr, "\r%llu kB \r", bytes_transferred / 1024); } static void mover_callback(Arc::DataMover* mover, Arc::DataStatus status, void* arg) { Arc::DataStatus* res = (Arc::DataStatus*)arg; *res = status; if (!res->Passed()) { logger.msg(Arc::ERROR, "Current transfer FAILED: %s", std::string(*res)); if (res->Retryable()) { logger.msg(Arc::ERROR, "This seems like a temporary error, please try again later"); } } cond.broadcast(); } bool arctransfer(const Arc::URL& source_url, const Arc::URL& destination_url, const std::list& locations, Arc::UserConfig& usercfg, bool secure, bool passive, bool verbose, int timeout) { if (!source_url) { logger.msg(Arc::ERROR, "Invalid URL: %s", source_url.str()); return false; } if (!destination_url) { logger.msg(Arc::ERROR, "Invalid URL: %s", destination_url.str()); return false; } // Credentials are always required for 3rd party transfer if (!initProxy(logger, usercfg, source_url)) return false; if (timeout > 0) usercfg.Timeout(timeout); Arc::DataStatus res = Arc::DataPoint::Transfer3rdParty(source_url, destination_url, usercfg, verbose ? &transfer_cb : NULL); if (verbose) std::cerr< sources = Arc::ReadURLList(source_url); std::list destinations = Arc::ReadURLList(destination_url); if (sources.empty()) { logger.msg(Arc::ERROR, "Can't read list of sources from file %s", source_url.Path()); return false; } if (destinations.empty()) { logger.msg(Arc::ERROR, "Can't read list of destinations from file %s", destination_url.Path()); return false; } if (sources.size() != destinations.size()) { logger.msg(Arc::ERROR, "Numbers of sources and destinations do not match"); return false; } bool r = true; for (std::list::iterator source = sources.begin(), destination = destinations.begin(); (source != sources.end()) && (destination != destinations.end()); ++source, ++destination) { if (!arcregister(*source, *destination, usercfg, force_meta)) r = false; if (cancelled) return true; } return r; } if (source_url.Protocol() == "urllist") { std::list sources = Arc::ReadURLList(source_url); if (sources.empty()) { logger.msg(Arc::ERROR, "Can't read list of sources from file %s", source_url.Path()); return false; } bool r = true; for (std::list::iterator source = sources.begin(); source != sources.end(); ++source) { if (!arcregister(*source, destination_url, usercfg, force_meta)) r = false; if (cancelled) return true; } return r; } if (destination_url.Protocol() == "urllist") { std::list destinations = Arc::ReadURLList(destination_url); if (destinations.empty()) { logger.msg(Arc::ERROR, "Can't read list of destinations from file %s", destination_url.Path()); return false; } bool r = true; for (std::list::iterator destination = destinations.begin(); destination != destinations.end(); ++destination) { if (!arcregister(source_url, *destination, usercfg, force_meta)) r = false; if (cancelled) return true; } return r; } if (destination_url.Path()[destination_url.Path().length() - 1] == '/') { logger.msg(Arc::ERROR, "Fileset registration is not supported yet"); return false; } Arc::DataHandle source(source_url, usercfg); Arc::DataHandle destination(destination_url, usercfg); if (!source) { logger.msg(Arc::ERROR, "Unsupported source url: %s", source_url.str()); return false; } if (!destination) { logger.msg(Arc::ERROR, "Unsupported destination url: %s", destination_url.str()); return false; } if ((source->RequiresCredentials() || destination->RequiresCredentials()) && !initProxy(logger, usercfg, source_url)) return false; if (source->IsIndex() || !destination->IsIndex()) { logger.msg(Arc::ERROR, "For registration source must be ordinary URL" " and destination must be indexing service"); return false; } // Obtain meta-information about source Arc::FileInfo fileinfo; Arc::DataPoint::DataPointInfoType verb = (Arc::DataPoint::DataPointInfoType)Arc::DataPoint::INFO_TYPE_CONTENT; Arc::DataStatus res = source->Stat(fileinfo, verb); if (!res) { logger.msg(Arc::ERROR, "Could not obtain information about source: %s", std::string(res)); return false; } // Check if destination is already registered if (destination->Resolve(true)) { // Check meta-info matches source if (!destination->CompareMeta(*source) && !force_meta) { logger.msg(Arc::ERROR, "Metadata of source does not match existing " "destination. Use the --force option to override this."); return false; } // Remove existing locations destination->ClearLocations(); } bool replication = destination->Registered(); destination->SetMeta(*source); // pass metadata // Add new location std::string metaname = source_url.ConnectionURL(); if (!destination->AddLocation(source_url, metaname)) { logger.msg(Arc::ERROR, "Failed to accept new file/destination"); return false; } destination->SetTries(1); res = destination->PreRegister(replication, force_meta); if (!res) { logger.msg(Arc::ERROR, "Failed to register new file/destination: %s", std::string(res)); return false; } res = destination->PostRegister(replication); if (!res) { destination->PreUnregister(replication); logger.msg(Arc::ERROR, "Failed to register new file/destination: %s", std::string(res)); return false; } return true; } static Arc::DataStatus do_mover(const Arc::URL& s_url, const Arc::URL& d_url, const std::list& locations, const std::string& cache_dir, Arc::UserConfig& usercfg, bool secure, bool passive, bool force_meta, int tries, bool verbose, int timeout) { Arc::DataHandle source(s_url, usercfg); Arc::DataHandle destination(d_url, usercfg); if (!source) { logger.msg(Arc::ERROR, "Unsupported source url: %s", s_url.str()); return Arc::DataStatus::ReadAcquireError; } if (!destination) { logger.msg(Arc::ERROR, "Unsupported destination url: %s", d_url.str()); return Arc::DataStatus::WriteAcquireError; } if ((source->RequiresCredentials() || destination->RequiresCredentials()) && !initProxy(logger, usercfg, s_url)) return Arc::DataStatus::CredentialsExpiredError; if (!locations.empty()) { std::string meta(destination->GetURL().Protocol()+"://"+destination->GetURL().Host()); for (std::list::const_iterator i = locations.begin(); i != locations.end(); ++i) { destination->AddLocation(*i, meta); } } Arc::DataMover mover; mover.secure(secure); mover.passive(passive); mover.verbose(verbose); mover.force_to_meta(force_meta); if (tries) { mover.retry(true); // go through all locations source->SetTries(tries); // try all locations "tries" times destination->SetTries(tries); } Arc::User cache_user; Arc::FileCache cache; if (!cache_dir.empty()) cache = Arc::FileCache(cache_dir+" .", "", cache_user.get_uid(), cache_user.get_gid()); if (verbose) mover.set_progress_indicator(&progress); Arc::DataStatus callback_res; Arc::URLMap url_map; Arc::DataStatus res = mover.Transfer(*source, *destination, cache, url_map, 0, 0, 0, timeout, &mover_callback, &callback_res); if (!res.Passed()) { logger.msg(Arc::ERROR, "Current transfer FAILED: %s", std::string(res)); if (res.Retryable()) { logger.msg(Arc::ERROR, "This seems like a temporary error, please try again later"); } return res; } cond.wait(); // wait for mover_callback if (verbose) std::cerr<& locations, const std::string& cache_dir, Arc::UserConfig& usercfg, bool secure, bool passive, bool force_meta, int recursion, int tries, bool verbose, int timeout) { Arc::URL source_url(source_url_); if (!source_url) { logger.msg(Arc::ERROR, "Invalid URL: %s", source_url.str()); return false; } Arc::URL destination_url(destination_url_); if (!destination_url) { logger.msg(Arc::ERROR, "Invalid URL: %s", destination_url.str()); return false; } if (timeout <= 0) timeout = 300; // 5 minute default if (tries < 0) tries = 0; if (source_url.Protocol() == "urllist" && destination_url.Protocol() == "urllist") { std::list sources = Arc::ReadURLList(source_url); std::list destinations = Arc::ReadURLList(destination_url); if (sources.empty()) { logger.msg(Arc::ERROR, "Can't read list of sources from file %s", source_url.Path()); return false; } if (destinations.empty()) { logger.msg(Arc::ERROR, "Can't read list of destinations from file %s", destination_url.Path()); return false; } if (sources.size() != destinations.size()) { logger.msg(Arc::ERROR, "Numbers of sources and destinations do not match"); return false; } bool r = true; for (std::list::iterator source = sources.begin(), destination = destinations.begin(); (source != sources.end()) && (destination != destinations.end()); ++source, ++destination) { if (!arccp(*source, *destination, locations, cache_dir, usercfg, secure, passive, force_meta, recursion, tries, verbose, timeout)) r = false; if (cancelled) return true; } return r; } if (source_url.Protocol() == "urllist") { std::list sources = Arc::ReadURLList(source_url); if (sources.empty()) { logger.msg(Arc::ERROR, "Can't read list of sources from file %s", source_url.Path()); return false; } bool r = true; for (std::list::iterator source = sources.begin(); source != sources.end(); ++source) { if (!arccp(*source, destination_url, locations, cache_dir, usercfg, secure, passive, force_meta, recursion, tries, verbose, timeout)) r = false; if (cancelled) return true; } return r; } if (destination_url.Protocol() == "urllist") { std::list destinations = Arc::ReadURLList(destination_url); if (destinations.empty()) { logger.msg(Arc::ERROR, "Can't read list of destinations from file %s", destination_url.Path()); return false; } bool r = true; for (std::list::iterator destination = destinations.begin(); destination != destinations.end(); ++destination) { if (!arccp(source_url, *destination, locations, cache_dir, usercfg, secure, passive, force_meta, recursion, tries, verbose, timeout)) r = false; if (cancelled) return true; } return r; } if (destination_url.Path()[destination_url.Path().length() - 1] != '/') { if (source_url.Path()[source_url.Path().length() - 1] == '/' && source_url.MetaDataOption("guid").empty()) { // files specified by guid may have path '/' logger.msg(Arc::ERROR, "Fileset copy to single object is not supported yet"); return false; } } else { // Copy TO fileset/directory if (source_url.Path()[source_url.Path().length() - 1] != '/') { // Copy FROM single object std::string::size_type p = source_url.Path().rfind('/'); if (p == std::string::npos) { logger.msg(Arc::ERROR, "Can't extract object's name from source url"); return false; } destination_url.ChangePath(destination_url.Path() + source_url.Path().substr(p + 1)); } else { // Fileset copy Arc::DataHandle source(source_url, usercfg); if (!source) { logger.msg(Arc::ERROR, "Unsupported source url: %s", source_url.str()); return false; } if (source->RequiresCredentials() && !initProxy(logger, usercfg, source_url)) return false; std::list files; Arc::DataStatus result = source->List(files, (Arc::DataPoint::DataPointInfoType) (Arc::DataPoint::INFO_TYPE_NAME | Arc::DataPoint::INFO_TYPE_TYPE)); if (!result.Passed()) { logger.msg(Arc::ERROR, "%s. Cannot copy fileset", std::string(result)); return false; } bool failures = false; // Handle transfer of files first (treat unknown like files) for (std::list::iterator i = files.begin(); i != files.end(); ++i) { if ((i->GetType() != Arc::FileInfo::file_type_unknown) && (i->GetType() != Arc::FileInfo::file_type_file)) continue; logger.msg(Arc::INFO, "Name: %s", i->GetName()); Arc::URL s_url(std::string(source_url.str() + i->GetName())); Arc::URL d_url(std::string(destination_url.str() + i->GetName())); logger.msg(Arc::INFO, "Source: %s", s_url.str()); logger.msg(Arc::INFO, "Destination: %s", d_url.str()); Arc::DataStatus res = do_mover(s_url, d_url, locations, cache_dir, usercfg, secure, passive, force_meta, tries, verbose, timeout); if (cancelled) return true; if (!res.Passed()) failures = true; else logger.msg(Arc::INFO, "Current transfer complete"); } if (failures) { logger.msg(Arc::ERROR, "Some transfers failed"); return false; } // Go deeper if allowed bool r = true; if (recursion > 0) // Handle directories recursively for (std::list::iterator i = files.begin(); i != files.end(); ++i) { if (i->GetType() != Arc::FileInfo::file_type_dir) continue; if (verbose) logger.msg(Arc::INFO, "Directory: %s", i->GetName()); std::string s_url(source_url.str()); std::string d_url(destination_url.str()); s_url += i->GetName(); d_url += i->GetName(); s_url += "/"; d_url += "/"; if (!arccp(s_url, d_url, locations, cache_dir, usercfg, secure, passive, force_meta, recursion - 1, tries, verbose, timeout)) r = false; if (cancelled) return true; } return r; } } Arc::DataStatus res = do_mover(source_url, destination_url, locations, cache_dir, usercfg, secure, passive, force_meta, tries, verbose, timeout); if (cancelled) return true; if (!res.Passed()) return false; logger.msg(Arc::INFO, "Transfer complete"); return true; } static int runmain(int argc, char **argv) { setlocale(LC_ALL, ""); // set signal handlers for safe cancellation signal(SIGTERM, sig_cancel); signal(SIGINT, sig_cancel); static Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); Arc::OptionParser options(istring("source destination"), istring("The arccp command copies files to, from " "and between grid storage elements.")); bool passive = false; options.AddOption('p', "passive", istring("use passive transfer (off by default if secure " "is on, on by default if secure is not requested)"), passive); bool notpassive = false; options.AddOption('n', "nopassive", istring("do not try to force passive transfer"), notpassive); bool force = false; options.AddOption('f', "force", istring("force overwrite of existing destination"), force); bool verbose = false; options.AddOption('i', "indicate", istring("show progress indicator"), verbose); bool nocopy = false; options.AddOption('T', "notransfer", istring("do not transfer, but register source into " "destination. destination must be a meta-url."), nocopy); bool secure = false; options.AddOption('u', "secure", istring("use secure transfer (insecure by default)"), secure); std::string cache_path; options.AddOption('y', "cache", istring("path to local cache (use to put file into cache)"), istring("path"), cache_path); bool infinite_recursion = false; options.AddOption('r', "recursive", istring("operate recursively"), infinite_recursion); int recursion = 0; options.AddOption('D', "depth", istring("operate recursively up to specified level"), istring("level"), recursion); int retries = 0; options.AddOption('R', "retries", istring("number of retries before failing file transfer"), istring("number"), retries); std::list locations; options.AddOption('L', "location", istring("physical location to write to when destination is an indexing service." " Must be specified for indexing services which do not automatically" " generate physical locations. Can be specified multiple times -" " locations will be tried in order until one succeeds."), istring("URL"), locations); bool thirdparty = false; options.AddOption('3', "thirdparty", istring("perform third party transfer, where the destination pulls" " from the source (only available with GFAL plugin)"), thirdparty); bool show_plugins = false; options.AddOption('P', "listplugins", istring("list the available plugins (protocols supported)"), show_plugins); int timeout = 20; options.AddOption('t', "timeout", istring("timeout in seconds (default 20)"), istring("seconds"), timeout); std::string conffile; options.AddOption('z', "conffile", istring("configuration file (default ~/.arc/client.conf)"), istring("filename"), conffile); bool no_authentication = false; options.AddOption('\0', "no-authentication", istring("do not perform any authentication for opened connections"), no_authentication); bool x509_authentication = false; options.AddOption('\0', "x509-authentication", istring("perform X.509 authentication for opened connections"), x509_authentication); bool token_authentication = false; options.AddOption('\0', "token-authentication", istring("perform token authentication for opened connections"), token_authentication); bool force_system_ca = false; options.AddOption('\0', "systemca", istring("force using CA certificates configuration provided by OpenSSL"), force_system_ca); bool force_grid_ca = false; options.AddOption('\0', "gridca", istring("force using CA certificates configuration for Grid services (typically IGTF)"), force_grid_ca); bool force_any_ca = false; options.AddOption('\0', "anyca", istring("force using both CA certificates configuration for Grid services (typically IGTF) and those provided by OpenSSL"), force_any_ca); bool allow_insecure_connection = false; options.AddOption('\0', "allowinsecureconnection", istring("allow TLS connection which failed verification"), allow_insecure_connection); std::string debug; options.AddOption('d', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); bool version = false; options.AddOption('v', "version", istring("print version information"), version); std::list params = options.Parse(argc, argv); if (version) { std::cout << Arc::IString("%s version %s", "arccp", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(debug)); logger.msg(Arc::VERBOSE, "Running command: %s", options.GetCommandWithArguments()); if (show_plugins) { std::list modules; Arc::PluginsFactory pf(Arc::BaseConfig().MakeConfig(Arc::Config()).Parent()); pf.scan(Arc::FinderLoader::GetLibrariesList(), modules); Arc::PluginsFactory::FilterByKind("HED:DMC", modules); std::cout << Arc::IString("Protocol plugins available:") << std::endl; for (std::list::iterator itMod = modules.begin(); itMod != modules.end(); ++itMod) { for (std::list::iterator itPlug = itMod->plugins.begin(); itPlug != itMod->plugins.end(); ++itPlug) { std::cout << " " << itPlug->name << " - " << itPlug->description << std::endl; } } return 0; } // Attempt to acquire credentials. Whether they are required will be // determined later depending on the protocol. Arc::UserConfig usercfg(conffile, Arc::initializeCredentialsType::TryCredentials); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } usercfg.UtilsDirPath(Arc::UserConfig::ARCUSERDIRECTORY()); if (force_system_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(false); } if (force_grid_ca) { usercfg.CAUseSystem(false); usercfg.CAUseGrid(true); } if (force_any_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(true); } if (allow_insecure_connection) usercfg.TLSAllowInsecure(true); AuthenticationType authentication_type = UndefinedAuthentication; if(!getAuthenticationType(logger, usercfg, no_authentication, x509_authentication, token_authentication, authentication_type)) return 1; switch(authentication_type) { case NoAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeNone); break; case X509Authentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeCert); break; case TokenAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeToken); break; case UndefinedAuthentication: default: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeUndefined); break; } if (debug.empty() && !usercfg.Verbosity().empty()) { Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); } if (params.size() != 2) { logger.msg(Arc::ERROR, "Wrong number of parameters specified"); return 1; } if (passive && notpassive) { logger.msg(Arc::ERROR, "Options 'p' and 'n' can't be used simultaneously"); return 1; } if ((!secure) && (!notpassive)) passive = true; if (infinite_recursion) recursion = INT_MAX; std::list::iterator it = params.begin(); std::string source = *it; ++it; std::string destination = *it; if (source == "-") source = "stdio:///stdin"; if (destination == "-") destination = "stdio:///stdout"; if (thirdparty) { if (!arctransfer(source, destination, locations, usercfg, secure, passive, verbose, timeout)) return 1; } else if (nocopy) { if (!arcregister(source, destination, usercfg, force)) return 1; } else { if (!arccp(source, destination, locations, cache_path, usercfg, secure, passive, force, recursion, retries + 1, verbose, timeout)) return 1; } return 0; } int main(int argc, char **argv) { int xr = runmain(argc,argv); _exit(xr); return 0; } nordugrid-arc-7.1.1/src/clients/data/PaxHeaders/arcrm.cpp0000644000000000000000000000013115067751327020323 xustar0029 mtime=1759498967.68049084 30 atime=1759498967.826493058 30 ctime=1759499031.139831728 nordugrid-arc-7.1.1/src/clients/data/arcrm.cpp0000644000175000002070000002075215067751327022234 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include "utils.h" static Arc::Logger logger(Arc::Logger::getRootLogger(), "arcrm"); /// Returns number of files that failed to be deleted int arcrm(const std::list& urls, Arc::UserConfig& usercfg, bool errcont) { Arc::DataHandle* handle = NULL; Arc::DataMover mover; unsigned int failed = 0; for (std::list::const_iterator url = urls.begin(); url != urls.end(); ++url) { if (!(*url)) { logger.msg(Arc::ERROR, "Invalid URL: %s", url->str()); failed++; continue; } if (url->Protocol() == "urllist") { std::list url_files = Arc::ReadURLList(*url); if (url_files.empty()) { logger.msg(Arc::ERROR, "Can't read list of locations from file %s", url->Path()); failed += 1; } else { failed += arcrm(url_files, usercfg, errcont); } continue; } // Depending on protocol SetURL() may allow reusing connections and hence // the same DataHandle object to delete multiple files. If it is not // supported SetURL() returns false and a new DataHandle must be created. if (!handle || !(*handle)->SetURL(*url)) { delete handle; handle = new Arc::DataHandle(*url, usercfg); if (!(*handle)) { logger.msg(Arc::ERROR, "Unsupported URL given: %s", url->str()); failed++; delete handle; handle = NULL; continue; } if ((*handle)->RequiresCredentials()) { if(!initProxy(logger, usercfg, *url)) { failed++; delete handle; handle = NULL; continue; } } } // only one try (*handle)->SetTries(1); Arc::DataStatus res = mover.Delete(**handle, errcont); if (!res.Passed()) { logger.msg(Arc::ERROR, std::string(res)); if (res.Retryable()) { logger.msg(Arc::ERROR, "This seems like a temporary error, please try again later"); } failed++; } } delete handle; return failed; } static int runmain(int argc, char **argv) { setlocale(LC_ALL, ""); static Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); Arc::OptionParser options(istring("url [url ...]"), istring("The arcrm command deletes files on " "grid storage elements.")); bool force = false; options.AddOption('f', "force", istring("remove logical file name registration even " "if not all physical instances were removed"), force); bool show_plugins = false; options.AddOption('P', "listplugins", istring("list the available plugins (protocols supported)"), show_plugins); int timeout = 20; options.AddOption('t', "timeout", istring("timeout in seconds (default 20)"), istring("seconds"), timeout); std::string conffile; options.AddOption('z', "conffile", istring("configuration file (default ~/.arc/client.conf)"), istring("filename"), conffile); bool no_authentication = false; options.AddOption('\0', "no-authentication", istring("do not perform any authentication for opened connections"), no_authentication); bool x509_authentication = false; options.AddOption('\0', "x509-authentication", istring("perform X.509 authentication for opened connections"), x509_authentication); bool token_authentication = false; options.AddOption('\0', "token-authentication", istring("perform token authentication for opened connections"), token_authentication); bool force_system_ca = false; options.AddOption('\0', "systemca", istring("force using CA certificates configuration provided by OpenSSL"), force_system_ca); bool force_grid_ca = false; options.AddOption('\0', "gridca", istring("force using CA certificates configuration for Grid services (typically IGTF)"), force_grid_ca); bool force_any_ca = false; options.AddOption('\0', "anyca", istring("force using both CA certificates configuration for Grid services (typically IGTF) and those provided by OpenSSL"), force_any_ca); bool allow_insecure_connection = false; options.AddOption('\0', "allowinsecureconnection", istring("allow TLS connection which failed verification"), allow_insecure_connection); std::string debug; options.AddOption('d', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); bool version = false; options.AddOption('v', "version", istring("print version information"), version); std::list params = options.Parse(argc, argv); if (version) { std::cout << Arc::IString("%s version %s", "arcrm", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!debug.empty()) { Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(debug)); } logger.msg(Arc::VERBOSE, "Running command: %s", options.GetCommandWithArguments()); if (show_plugins) { std::list modules; Arc::PluginsFactory pf(Arc::BaseConfig().MakeConfig(Arc::Config()).Parent()); pf.scan(Arc::FinderLoader::GetLibrariesList(), modules); Arc::PluginsFactory::FilterByKind("HED:DMC", modules); std::cout << Arc::IString("Protocol plugins available:") << std::endl; for (std::list::iterator itMod = modules.begin(); itMod != modules.end(); ++itMod) { for (std::list::iterator itPlug = itMod->plugins.begin(); itPlug != itMod->plugins.end(); ++itPlug) { std::cout << " " << itPlug->name << " - " << itPlug->description << std::endl; } } return 0; } // credentials will be initialised later if necessary Arc::UserConfig usercfg(conffile, Arc::initializeCredentialsType::TryCredentials); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } usercfg.UtilsDirPath(Arc::UserConfig::ARCUSERDIRECTORY()); usercfg.Timeout(timeout); if (force_system_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(false); } if (force_grid_ca) { usercfg.CAUseSystem(false); usercfg.CAUseGrid(true); } if (force_any_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(true); } if (allow_insecure_connection) usercfg.TLSAllowInsecure(true); AuthenticationType authentication_type = UndefinedAuthentication; if(!getAuthenticationType(logger, usercfg, no_authentication, x509_authentication, token_authentication, authentication_type)) return 1; switch(authentication_type) { case NoAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeNone); break; case X509Authentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeCert); break; case TokenAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeToken); break; case UndefinedAuthentication: default: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeUndefined); break; } if (debug.empty() && !usercfg.Verbosity().empty()) { Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); } if (params.empty()) { logger.msg(Arc::ERROR, "Wrong number of parameters specified"); return 1; } std::list urls; for (std::list::const_iterator i = params.begin(); i != params.end(); ++i) { urls.push_back(*i); } unsigned int failed = arcrm(urls, usercfg, force); if (failed != 0) { if (params.size() != 1 || failed > 1) std::cout< #endif #include #include #include "utils.h" bool checkproxy(const Arc::UserConfig& uc) { if (!uc.ProxyPath().empty() ) { Arc::Credential holder(uc.ProxyPath(), "", "", "", false, false); if (holder.GetEndTime() < Arc::Time()){ std::cout << Arc::IString("Proxy expired. Please run 'arcproxy'!") << std::endl; return false; } } return true; } bool checktoken(const Arc::UserConfig& uc) { if(uc.OToken().empty()) { std::cout << Arc::IString("Cannot find any token. Please run 'oidc-token' or use similar\n" " utility to obtain authentication token!") << std::endl; return false; } return true; } bool getAuthenticationType(Arc::Logger& logger, Arc::UserConfig const& usercfg, bool no_authentication, bool x509_authentication, bool token_authentication, AuthenticationType& authentication_type) { authentication_type = UndefinedAuthentication; if(no_authentication) { if(authentication_type != UndefinedAuthentication) { logger.msg(Arc::ERROR, "Conflicting authentication types specified."); return false; } authentication_type = NoAuthentication; } if(x509_authentication) { if(authentication_type != UndefinedAuthentication) { logger.msg(Arc::ERROR, "Conflicting authentication types specified."); return false; } authentication_type = X509Authentication; } if(token_authentication) { if(authentication_type != UndefinedAuthentication) { logger.msg(Arc::ERROR, "Conflicting authentication types specified."); return false; } authentication_type = TokenAuthentication; } if(authentication_type == X509Authentication) { if (!checkproxy(usercfg)) { return 1; } } else if(authentication_type == TokenAuthentication) { if (!checktoken(usercfg)) { return 1; } } return true; } bool initProxy(Arc::Logger& logger, Arc::UserConfig& usercfg, const Arc::URL& file) { if(usercfg.CommunicationAuthType() != Arc::UserConfig::AuthTypeCert) { usercfg.InitializeCredentials(Arc::initializeCredentialsType::SkipCredentials); return true; } if (!usercfg.InitializeCredentials(Arc::initializeCredentialsType::RequireCredentials)) { logger.msg(Arc::ERROR, "Unable to handle %s", file.str()); logger.msg(Arc::ERROR, "Invalid credentials, please check proxy and/or CA certificates"); return false; } Arc::Credential holder(usercfg); if (!holder.IsValid()) { if (holder.GetEndTime() < Arc::Time()) { logger.msg(Arc::ERROR, "Proxy expired"); } logger.msg(Arc::ERROR, "Unable to handle %s", file.str()); logger.msg(Arc::ERROR, "Invalid credentials, please check proxy and/or CA certificates"); return false; } return true; } nordugrid-arc-7.1.1/src/clients/data/PaxHeaders/utils.h0000644000000000000000000000013115067751327020024 xustar0029 mtime=1759498967.68049084 30 atime=1759498967.826493058 30 ctime=1759499031.135556386 nordugrid-arc-7.1.1/src/clients/data/utils.h0000644000175000002070000000131015067751327021722 0ustar00mockbuildmock00000000000000#ifndef __ARC_CLIENT_DATA_UTILS_H_ #define __ARC_CLIENT_DATA_UTILS_H_ #include #include bool checkproxy(const Arc::UserConfig& uc); bool checktoken(const Arc::UserConfig& uc); enum AuthenticationType { UndefinedAuthentication, NoAuthentication, X509Authentication, TokenAuthentication }; bool getAuthenticationType(Arc::Logger& logger, Arc::UserConfig const& usercfg, bool no_authentication, bool x509_authentication, bool token_authentication, AuthenticationType& authentication_type); bool initProxy(Arc::Logger& logger, Arc::UserConfig& usercfg, const Arc::URL& file); #endif // __ARC_CLIENT_DATA_UTILS_H_ nordugrid-arc-7.1.1/src/clients/data/PaxHeaders/arccp.1.in0000644000000000000000000000013215067751327020273 xustar0030 mtime=1759498967.680317736 30 atime=1759498967.825493043 30 ctime=1759499031.140895331 nordugrid-arc-7.1.1/src/clients/data/arccp.1.in0000644000175000002070000000552515067751327022204 0ustar00mockbuildmock00000000000000[NAME] arccp \- copy files [EXTENDED DESCRIPTION] The .B arccp command copies files to, from and between grid storage elements. It can also be used to register files in file index catalogues, either automatically as the result of a file transfer, or by using the .B --notransfer option. For more information on ARC URL syntax please read "Protocols, Uniform Resource Locators (URL) and Extensions Supported in ARC" [NORDUGRID-TECH-7]. If .B source and/or .B destination start from '@', the remaining argument is assumed to be a path to a local file containing a list of URLs, one per line. In this case .B arccp performs like it were called multiple times - once for every URL. When copying directories both the source and destination must end with the directory separator character and a recursion option .B -r or .B -D must be specified. All data transfer goes through the machine of the caller of arccp, even in the case of two remote endpoints, unless the .B --thirdparty option is used. With this option credentials are delegated to the destination and it pulls the data directly from the source. The timeout option in this case applies to the entire transfer itself and default timeouts are used for connections to the remote endpoints. Note that third-party transfer is only possible if the GFAL2 plugin is installed. Depending on the installed libraries (check with .B -P ), the following protocols may be used: file (file:// prefix may be omitted), http, https, httpg, ftp, gsiftp, srm, root. To connect source or destination to standard input/output use '-' instead of URL. [FILES] .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. [ENVIRONMENT VARIABLES] .TP .B X509_USER_PROXY The location of the user's Grid proxy file. Shouldn't be set unless the proxy is in a non-standard location. .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .TP .B ARC_PLUGIN_PATH The location of ARC plugins can be specified by this variable. Multiple locations can be specified by separating them by : (; in Windows). The default location is \fB$ARC_LOCATION\fR/lib/arc (\\ in Windows). [EXAMPLE] arccp -i gsiftp://example.com/grid/file1.dat /tmp/file1.dat [COPYRIGHT] APACHE LICENSE Version 2.0 [AUTHOR] ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org [SEE ALSO] .BR arcls (1), .BR arcmkdir (1), .BR arcrename (1), .BR arcrm (1) nordugrid-arc-7.1.1/src/clients/data/PaxHeaders/README0000644000000000000000000000013215067751327017374 xustar0030 mtime=1759498967.680317736 30 atime=1759498967.825493043 30 ctime=1759499031.132429066 nordugrid-arc-7.1.1/src/clients/data/README0000644000175000002070000000004015067751327021270 0ustar00mockbuildmock00000000000000CLI for basic data management nordugrid-arc-7.1.1/src/clients/data/PaxHeaders/arcmkdir.1.in0000644000000000000000000000013215067751327020777 xustar0030 mtime=1759498967.680317736 30 atime=1759498967.826493058 30 ctime=1759499031.144124572 nordugrid-arc-7.1.1/src/clients/data/arcmkdir.1.in0000644000175000002070000000423515067751327022705 0ustar00mockbuildmock00000000000000[NAME] arcmkdir \- create directories [EXTENDED DESCRIPTION] The .B arcmkdir command creates directories on grid storage elements and indexing services. If the parent directory does not exist and .B -p is not specified, then arcmkdir will probably fail, but it depends on the protocol. The permissions on the new directory are the default of the server, or if the protocol requires them to be specified then the directory is only readable/writable/searchable by the user (the equivalent of 700 on a file system). If .B url starts from '@', the remaining argument is assumed to be a path to a local file containing a list of URLs, one per line. In this case .B arcmkdir performs like it were called multiple times - once for every URL. Depending on the installed libraries (check with .B -P ), the following protocols may be used: file (file:// prefix may be omitted), http, https, httpg, ftp, gsiftp, srm, root. [FILES] .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. [ENVIRONMENT VARIABLES] .TP .B X509_USER_PROXY The location of the user's Grid proxy file. Shouldn't be set unless the proxy is in a non-standard location. .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .TP .B ARC_PLUGIN_PATH The location of ARC plugins can be specified by this variable. Multiple locations can be specified by separating them by : (; in Windows). The default location is \fB$ARC_LOCATION\fR/lib/arc (\\ in Windows). [EXAMPLE] arcmkdir gsiftp://example.com/grid/newdir [COPYRIGHT] APACHE LICENSE Version 2.0 [AUTHOR] ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org [SEE ALSO] .BR arccp (1), .BR arcls (1), .BR arcrename (1), .BR arcrm (1) nordugrid-arc-7.1.1/src/clients/PaxHeaders/client.bash_completion0000644000000000000000000000013215067751327022151 xustar0030 mtime=1759498967.676561668 30 atime=1759498967.824493028 30 ctime=1759499031.103082436 nordugrid-arc-7.1.1/src/clients/client.bash_completion0000644000175000002070000000311715067751327024055 0ustar00mockbuildmock00000000000000# # Completion for NorduGrid ARC command line tools: # _arccli() { local cur prev opts COMPREPLY=() command="${COMP_WORDS[0]}" cur="${COMP_WORDS[COMP_CWORD]}" prev="${COMP_WORDS[COMP_CWORD-1]}" opts=`$command --help| grep -- --| cut -d" " -f 4| sed "s/=.*//"|uniq; \ $command --help| grep -- --| cut -d" " -f 3| sed "s/,//"|uniq` # debug levels completion if [ "x${prev}" = "x-d" -o "x${prev}" = "x--debug" ]; then COMPREPLY=( $(compgen -W "FATAL ERROR WARNING INFO VERBOSE DEBUG" ${cur}) ) return 0 fi # interface types completion if [ "x$command" = "xarcsub" -o "x$command" = "xarctest" ]; then if [ "x${prev}" = "x-T" -o "x${prev}" = "x--submission-endpoint-type" ]; then COMPREPLY=( $(compgen -W "emies arcrest gridftpjob internal" ${cur}) ) return 0 elif [ "x${prev}" = "x-Q" -o "x${prev}" = "x--info-endpoint-type" ]; then COMPREPLY=( $(compgen -W "emies arcrest ldap.nordugrid ldap.glue2 internal NONE" ${cur}) ) return 0 fi fi COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) } complete -F _arccli arccat complete -F _arccli arcclean complete -F _arccli arccp complete -F _arccli arcget complete -F _arccli arcinfo complete -F _arccli arckill complete -F _arccli arcls complete -F _arccli arcmkdir complete -F _arccli arcproxy complete -F _arccli arcrename complete -F _arccli arcrenew complete -F _arccli arcresume complete -F _arccli arcrm complete -F _arccli arcstat complete -F _arccli arcsub complete -F _arccli arcsync complete -F _arccli arctest nordugrid-arc-7.1.1/src/clients/PaxHeaders/Makefile.in0000644000000000000000000000013215067751347017652 xustar0030 mtime=1759498983.489316567 30 atime=1759499019.587279566 30 ctime=1759499031.100054623 nordugrid-arc-7.1.1/src/clients/Makefile.in0000644000175000002070000007306615067751347021570 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/clients ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(arcsysconfdir)" \ "$(DESTDIR)$(bashcompletiondir)" "$(DESTDIR)$(exampledir)" DATA = $(arcsysconf_DATA) $(bashcompletion_DATA) $(example_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir distdir-am am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ @DATA_CLIENT_ENABLED_FALSE@DATA_CLIENT = @DATA_CLIENT_ENABLED_TRUE@DATA_CLIENT = data @CREDENTIALS_CLIENT_ENABLED_FALSE@CREDENTIALS_CLIENT = @CREDENTIALS_CLIENT_ENABLED_TRUE@CREDENTIALS_CLIENT = credentials @COMPUTE_CLIENT_ENABLED_FALSE@COMPUTE_CLIENT = @COMPUTE_CLIENT_ENABLED_TRUE@COMPUTE_CLIENT = compute @ARCREST_ENABLED_FALSE@PYARCREST = @ARCREST_ENABLED_TRUE@PYARCREST = pyarcrest SUBDIRS = $(DATA_CLIENT) $(CREDENTIALS_CLIENT) $(COMPUTE_CLIENT) $(PYARCREST) DIST_SUBDIRS = data credentials compute pyarcrest arcsysconfdir = $(sysconfdir)/arc arcsysconf_DATA = client.conf exampledir = $(pkgdatadir)/examples example_DATA = client.conf BASH_COMPLETION_SOURCE = client.bash_completion bashcompletiondir = $(bashcompdir) bashcompletion_DATA = arc-client-tools EXTRA_DIST = $(example_DATA) $(arcsysconf_DATA) $(BASH_COMPLETION_SOURCE) CLEANFILES = arc-client-tools all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/clients/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/clients/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-arcsysconfDATA: $(arcsysconf_DATA) @$(NORMAL_INSTALL) @list='$(arcsysconf_DATA)'; test -n "$(arcsysconfdir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(arcsysconfdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(arcsysconfdir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(arcsysconfdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(arcsysconfdir)" || exit $$?; \ done uninstall-arcsysconfDATA: @$(NORMAL_UNINSTALL) @list='$(arcsysconf_DATA)'; test -n "$(arcsysconfdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(arcsysconfdir)'; $(am__uninstall_files_from_dir) install-bashcompletionDATA: $(bashcompletion_DATA) @$(NORMAL_INSTALL) @list='$(bashcompletion_DATA)'; test -n "$(bashcompletiondir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(bashcompletiondir)'"; \ $(MKDIR_P) "$(DESTDIR)$(bashcompletiondir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(bashcompletiondir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(bashcompletiondir)" || exit $$?; \ done uninstall-bashcompletionDATA: @$(NORMAL_UNINSTALL) @list='$(bashcompletion_DATA)'; test -n "$(bashcompletiondir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(bashcompletiondir)'; $(am__uninstall_files_from_dir) install-exampleDATA: $(example_DATA) @$(NORMAL_INSTALL) @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(exampledir)'"; \ $(MKDIR_P) "$(DESTDIR)$(exampledir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(exampledir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(exampledir)" || exit $$?; \ done uninstall-exampleDATA: @$(NORMAL_UNINSTALL) @list='$(example_DATA)'; test -n "$(exampledir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(exampledir)'; $(am__uninstall_files_from_dir) # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(DATA) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(arcsysconfdir)" "$(DESTDIR)$(bashcompletiondir)" "$(DESTDIR)$(exampledir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-arcsysconfDATA install-bashcompletionDATA \ install-exampleDATA install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-arcsysconfDATA uninstall-bashcompletionDATA \ uninstall-exampleDATA .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ check-am clean clean-generic clean-libtool cscopelist-am ctags \ ctags-am distclean distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-arcsysconfDATA \ install-bashcompletionDATA install-data install-data-am \ install-dvi install-dvi-am install-exampleDATA install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-am uninstall \ uninstall-am uninstall-arcsysconfDATA \ uninstall-bashcompletionDATA uninstall-exampleDATA .PRECIOUS: Makefile arc-client-tools: $(BASH_COMPLETION_SOURCE) cp $< $@ # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/clients/PaxHeaders/pyarcrest0000644000000000000000000000013215067751427017543 xustar0030 mtime=1759499031.277457199 30 atime=1759499034.766510215 30 ctime=1759499031.277457199 nordugrid-arc-7.1.1/src/clients/pyarcrest/0000755000175000002070000000000015067751427021522 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/clients/pyarcrest/PaxHeaders/Makefile.am0000644000000000000000000000013115067751327021652 xustar0029 mtime=1759498967.68049084 30 atime=1759498967.826493058 30 ctime=1759499031.272771516 nordugrid-arc-7.1.1/src/clients/pyarcrest/Makefile.am0000644000175000002070000000143715067751327023562 0ustar00mockbuildmock00000000000000install: PIP_OPTIONS="--no-deps --disable-pip-version-check --verbose" ; \ if $(PYTHON) -m pip install --help | grep -q -- --use-pep517 ; then \ PIP_OPTIONS="$$PIP_OPTIONS --use-pep517" ; \ fi ; \ if $(PYTHON) -m pip install --help | grep -q -- --no-build-isolation ; then \ PIP_OPTIONS="$$PIP_OPTIONS --no-build-isolation" ; \ fi ; \ if [ -n "$(DESTDIR)" ] ; then \ PIP_OPTIONS="$$PIP_OPTIONS --root $(DESTDIR)" ; \ fi ; \ echo PIP options: $$PIP_OPTIONS ; \ $(PYTHON) -m pip install $$PIP_OPTIONS --prefix $(prefix) $(srcdir) EXTRA_DIST = \ README.md \ setup.cfg \ setup.py \ src/pyarcrest/arc.py \ src/pyarcrest/cli/__init__.py \ src/pyarcrest/cli/arcrest.py \ src/pyarcrest/errors.py \ src/pyarcrest/__init__.py \ src/pyarcrest/http.py \ src/pyarcrest/x509.py nordugrid-arc-7.1.1/src/clients/pyarcrest/PaxHeaders/Makefile.in0000644000000000000000000000013215067751347021666 xustar0030 mtime=1759498983.747070016 30 atime=1759499019.669280812 30 ctime=1759499031.274074947 nordugrid-arc-7.1.1/src/clients/pyarcrest/Makefile.in0000644000175000002070000004570115067751347023577 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/clients/pyarcrest ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ EXTRA_DIST = \ README.md \ setup.cfg \ setup.py \ src/pyarcrest/arc.py \ src/pyarcrest/cli/__init__.py \ src/pyarcrest/cli/arcrest.py \ src/pyarcrest/errors.py \ src/pyarcrest/__init__.py \ src/pyarcrest/http.py \ src/pyarcrest/x509.py all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/clients/pyarcrest/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/clients/pyarcrest/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile installdirs: install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags-am uninstall uninstall-am .PRECIOUS: Makefile install: PIP_OPTIONS="--no-deps --disable-pip-version-check --verbose" ; \ if $(PYTHON) -m pip install --help | grep -q -- --use-pep517 ; then \ PIP_OPTIONS="$$PIP_OPTIONS --use-pep517" ; \ fi ; \ if $(PYTHON) -m pip install --help | grep -q -- --no-build-isolation ; then \ PIP_OPTIONS="$$PIP_OPTIONS --no-build-isolation" ; \ fi ; \ if [ -n "$(DESTDIR)" ] ; then \ PIP_OPTIONS="$$PIP_OPTIONS --root $(DESTDIR)" ; \ fi ; \ echo PIP options: $$PIP_OPTIONS ; \ $(PYTHON) -m pip install $$PIP_OPTIONS --prefix $(prefix) $(srcdir) # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/clients/pyarcrest/PaxHeaders/setup.py0000644000000000000000000000013215067751327021331 xustar0030 mtime=1759498967.681755712 30 atime=1759498967.826493058 30 ctime=1759499031.278063311 nordugrid-arc-7.1.1/src/clients/pyarcrest/setup.py0000644000175000002070000000004615067751327023233 0ustar00mockbuildmock00000000000000from setuptools import setup setup() nordugrid-arc-7.1.1/src/clients/pyarcrest/PaxHeaders/README.md0000644000000000000000000000013215067751327021076 xustar0030 mtime=1759498967.681755712 30 atime=1759498967.826493058 30 ctime=1759499031.275373509 nordugrid-arc-7.1.1/src/clients/pyarcrest/README.md0000644000175000002070000000131115067751327022774 0ustar00mockbuildmock00000000000000# Dependency on ARC library python bindings Currently, the *pyarcrest.arc* module depends on [ARC](https://www.nordugrid.org/arc/arc7/index.html) library python bindings from *python3-nordugrid-arc* package available in several Linux distributions and their package repositories. The package is also available in [Nordugrid repositories](https://www.nordugrid.org/arc/arc7/common/repos/repository.html). For package to be available in a custom virtual environment, the environment has to be created with additional flag to include system site packages: ``` python3 -m venv --system-site-packages pyarcrest-venv . pyarcrest-venv/bin/activate pip install git+https://github.com/jakobmerljak/pyarcrest.git@dev ``` nordugrid-arc-7.1.1/src/clients/pyarcrest/PaxHeaders/setup.cfg0000644000000000000000000000013215067751327021440 xustar0030 mtime=1759498967.681755712 30 atime=1759498967.826493058 30 ctime=1759499031.276727146 nordugrid-arc-7.1.1/src/clients/pyarcrest/setup.cfg0000644000175000002070000000057015067751327023344 0ustar00mockbuildmock00000000000000[metadata] name = pyarcrest version = 0.1 author = aCT team author_email = act-dev@cern.ch description = Library for ARC CE REST license = Apache 2.0 [options] packages = find: package_dir = =src python_requires = >= 3.6 install_requires = cryptography [options.packages.find] where=src [options.entry_points] console_scripts = arcrest = pyarcrest.cli.arcrest:main nordugrid-arc-7.1.1/src/clients/pyarcrest/PaxHeaders/src0000644000000000000000000000013215067751427020332 xustar0030 mtime=1759499031.270457093 30 atime=1759499034.766510215 30 ctime=1759499031.270457093 nordugrid-arc-7.1.1/src/clients/pyarcrest/src/0000755000175000002070000000000015067751427022311 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/clients/pyarcrest/src/PaxHeaders/pyarcrest0000644000000000000000000000013215067751427022346 xustar0030 mtime=1759499031.286457336 30 atime=1759499034.766510215 30 ctime=1759499031.286457336 nordugrid-arc-7.1.1/src/clients/pyarcrest/src/pyarcrest/0000755000175000002070000000000015067751427024325 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/clients/pyarcrest/src/pyarcrest/PaxHeaders/http.py0000644000000000000000000000013115067751327023752 xustar0029 mtime=1759498967.68248103 30 atime=1759498967.826493058 30 ctime=1759499031.285798711 nordugrid-arc-7.1.1/src/clients/pyarcrest/src/pyarcrest/http.py0000644000175000002070000000712115067751327025656 0ustar00mockbuildmock00000000000000import http.client import json import logging import ssl from http.client import HTTPConnection, HTTPSConnection, RemoteDisconnected from urllib.parse import urlencode, urlparse from pyarcrest.errors import HTTPClientError log = logging.getLogger(__name__) ### Solution to output http.client logs to log instead of print # https://stackoverflow.com/a/58769712 def print_to_log(*args): log.debug(" ".join(args)) http.client.print = print_to_log ### # TODO: blocksize is not used until Python 3.7 becomes minimum version class HTTPClient: def __init__(self, url=None, host=None, port=None, blocksize=None, timeout=None, proxypath=None, isHTTPS=True): """Process parameters and create HTTP connection.""" if url: parts = urlparse(url) if parts.scheme == "https" or parts.scheme == "": useHTTPS = True elif parts.scheme == "http": useHTTPS = False else: raise HTTPClientError(f"URL scheme not http(s) but {parts.scheme}") host = parts.hostname if not host: raise HTTPClientError("No hostname in URL") port = parts.port else: if not host: raise HTTPClientError("No hostname parameter") useHTTPS = isHTTPS port = port if proxypath: if not useHTTPS: raise HTTPClientError("Cannot use proxy without HTTPS") else: context = ssl.SSLContext(ssl.PROTOCOL_TLS) context.load_cert_chain(proxypath) else: context = None kwargs = {} # TODO: must not pass for as long as python 3.6 is used #if blocksize is not None: # kwargs["blocksize"] = blocksize if timeout: kwargs["timeout"] = timeout if useHTTPS: if not port: port = 443 self.conn = HTTPSConnection(host, port=port, context=context, **kwargs) else: if not port: port = 80 self.conn = HTTPConnection(host, port=port, **kwargs) self.conn.set_debuglevel(1) self.isHTTPS = useHTTPS def request(self, method, endpoint, headers={}, token=None, jsonData=None, data=None, params={}): """Send request and retry on ConnectionErrors.""" if token: headers['Authorization'] = f'Bearer {token}' if jsonData: body = json.dumps(jsonData).encode() headers['Content-Type'] = 'application/json' else: body = data for key, value in params.items(): if isinstance(value, list): params[key] = ','.join([str(val) for val in value]) query = '' if params: query = urlencode(params) if query: url = f'{endpoint}?{query}' else: url = endpoint try: self.conn.request(method, url, body=body, headers=headers) resp = self.conn.getresponse() # TODO: should the request be retried for aborted connection by peer? except (RemoteDisconnected, BrokenPipeError, ConnectionAbortedError, ConnectionResetError): # retry request try: self.conn.request(method, url, body=body, headers=headers) resp = self.conn.getresponse() except: self.close() raise except: self.close() raise return resp def close(self): """Close connection.""" self.conn.close() nordugrid-arc-7.1.1/src/clients/pyarcrest/src/pyarcrest/PaxHeaders/errors.py0000644000000000000000000000013115067751327024307 xustar0029 mtime=1759498967.68248103 30 atime=1759498967.826493058 30 ctime=1759499031.282946688 nordugrid-arc-7.1.1/src/clients/pyarcrest/src/pyarcrest/errors.py0000644000175000002070000000406715067751327026221 0ustar00mockbuildmock00000000000000class X509Error(Exception): """Base error for x509 module.""" class HTTPClientError(Exception): """Base error for http module.""" class ARCError(Exception): """Base error for arc module.""" class ARCHTTPError(ARCError): """Job operation error.""" def __init__(self, status, text): super().__init__(status, text) self.status = status self.text = text def __str__(self): return f"Operation error: {self.status} {self.text}" class DescriptionParseError(ARCError): """Description parsing error.""" class DescriptionUnparseError(ARCError): """Description unparsing error.""" class InputFileError(ARCError): """Error with input file in job description.""" class NoValueInARCResult(ARCError): """Error with result from ARC.""" class MatchmakingError(ARCError): """Matchmaking error.""" class InputUploadError(ARCError): """Input file upload error.""" def __init__(self, jobid, state, errors): super().__init__(jobid, state, errors) self.jobid = jobid self.state = state self.errors = errors def __str__(self): return f"Input upload error(s) for job {self.jobid} in state {self.state}:\n" \ + '\n'.join([str(error) for error in self.errors]) class MissingResultFile(ARCError): """ Missing job result file. There needs to be a distinction between a missing output and diagnose file. In some use cases a missing diagnose file might not want to be considered as an error in would require to be handled explicitly. """ def __init__(self, filename): super().__init__(filename) self.filename = filename def __str__(self): return f"Missing result file {self.filename}" class MissingOutputFile(MissingResultFile): """Mising job output file.""" def __str__(self): return f"Missing output file {self.filename}" class MissingDiagnoseFile(MissingResultFile): """Missing job diagnose file.""" def __str__(self): return f"Missing diagnose file {self.filename}" nordugrid-arc-7.1.1/src/clients/pyarcrest/src/pyarcrest/PaxHeaders/x509.py0000644000000000000000000000013115067751327023500 xustar0029 mtime=1759498967.68248103 30 atime=1759498967.826493058 30 ctime=1759499031.287234149 nordugrid-arc-7.1.1/src/clients/pyarcrest/src/pyarcrest/x509.py0000644000175000002070000001541615067751327025412 0ustar00mockbuildmock00000000000000import os import re import time from datetime import datetime, timedelta from cryptography import x509 from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes, serialization from cryptography.hazmat.primitives.asymmetric import rsa from pyarcrest.errors import X509Error PROXYPATH = f"/tmp/x509up_u{os.getuid()}" def isOldProxy(cert): """ Return True if the given proxy Certificate object is in old format. The value of the last CN of an old format proxy is either "proxy" or "limited proxy". """ lastCN = cert.subject.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME)[-1] return lastCN.value in ("proxy", "limited proxy") def validKeyUsage(cert): """ Return True if the given proxy Certificate object's key usage is valid. Key usage is considered valid if digital signature bit is set in extension or if there is no key usage extension. """ try: keyUsage = cert.extensions.get_extension_for_oid(x509.oid.ExtensionOID.KEY_USAGE) return bool(keyUsage.value.digital_signature) except x509.ExtensionNotFound: return True def checkRFCProxy(proxy): """Return True if the given Certificate object is a valid X.509 RFC 3820 proxy.""" for ext in proxy.extensions: if ext.oid.dotted_string == "1.3.6.1.5.5.7.1.14": return True return False def createProxyCSR(issuer, key): """ Create proxy certificate signing request. Args: issuer: A proxy Certificate object of the proxied entity. key: A key object of the proxying entity. The type of object is one of RSAPrivateKey, DSAPrivateKey, EllipticCurvePrivateKey, Ed25519PrivateKey or Ed448PrivateKey. Returns: A CertificateSigningRequest object. """ if isOldProxy(issuer): raise X509Error("Proxy format not supported") if not validKeyUsage(issuer): raise X509Error("Proxy uses invalid keyUsage extension") builder = x509.CertificateSigningRequestBuilder() # copy subject to CSR subject = list(issuer.subject) builder = builder.subject_name(x509.Name(subject)) # add proxyCertInfo extension oid = x509.ObjectIdentifier("1.3.6.1.5.5.7.1.14") value = b"0\x0c0\n\x06\x08+\x06\x01\x05\x05\x07\x15\x01" extension = x509.extensions.UnrecognizedExtension(oid, value) builder = builder.add_extension(extension, critical=True) # sign the proxy CSR with the key return builder.sign( private_key=key, algorithm=hashes.SHA256(), backend=default_backend(), ) def signProxyCSR(csr, proxypath=PROXYPATH, lifetime=None): """ Sign proxy CSR. Args: csr: A CertificateSigningRequest object. proxypath: A string of the path of the proxy file. lifetime: If None, the cert validity will be the same as that of the signing cert. Otherwise, it is used as a number of hours from now which the cert will be valid for. Returns: A proxy Certificate object. """ now = datetime.utcnow() if not csr.is_signature_valid: raise X509Error("Invalid request signature") with open(proxypath, "rb") as f: proxy_pem = f.read() proxy = x509.load_pem_x509_certificate(proxy_pem, default_backend()) if not checkRFCProxy(proxy): raise X509Error("Invalid RFC proxy") key = serialization.load_pem_private_key(proxy_pem, password=None, backend=default_backend()) keyID = x509.SubjectKeyIdentifier.from_public_key(key.public_key()) # add a CN with serial number to subject subject = list(proxy.subject) subject.append(x509.NameAttribute(x509.oid.NameOID.COMMON_NAME, str(int(time.time())))) cert_builder = x509.CertificateBuilder() \ .issuer_name(proxy.subject) \ .not_valid_before(now) \ .serial_number(proxy.serial_number) \ .public_key(csr.public_key()) \ .subject_name(x509.Name(subject)) \ .add_extension(x509.BasicConstraints(ca=False, path_length=None), critical=True) \ .add_extension(x509.KeyUsage(digital_signature=True, content_commitment=False, key_encipherment=False, data_encipherment=False, key_agreement=True, key_cert_sign=False, crl_sign=False, encipher_only=False, decipher_only=False), critical=True) \ .add_extension(x509.AuthorityKeyIdentifier( key_identifier=keyID.digest, authority_cert_issuer=[x509.DirectoryName(proxy.issuer)], authority_cert_serial_number=proxy.serial_number ), critical=False) \ .add_extension(x509.extensions.UnrecognizedExtension( x509.ObjectIdentifier("1.3.6.1.5.5.7.1.14"), b"0\x0c0\n\x06\x08+\x06\x01\x05\x05\x07\x15\x01"), critical=True) if not lifetime: cert_builder = cert_builder.not_valid_after(proxy.not_valid_after) else: cert_builder = cert_builder.not_valid_after(now + timedelta(hours=lifetime)) return cert_builder.sign( private_key=key, algorithm=proxy.signature_hash_algorithm, backend=default_backend() ) def parseProxyPEM(pem): """Return cert, key and chain PEMs from the given proxy PEM.""" sections = re.findall( "-----BEGIN.*?-----.*?-----END.*?-----", pem, flags=re.DOTALL ) try: certPEM = sections[0] keyPEM = sections[1] chainPEMs = sections[2:] except IndexError: raise X509Error("Invalid PEM") else: return f"{certPEM}\n", f"{keyPEM}\n", "\n".join(chainPEMs) + "\n" def generateKey(size=2048): return rsa.generate_private_key( public_exponent=65537, # the docs say that this value should be used key_size=size, backend=default_backend() ) def certToPEM(cert): return cert.public_bytes(serialization.Encoding.PEM).decode() def pemToCert(pem): return x509.load_pem_x509_certificate(pem.encode(), default_backend()) def csrToPEM(csr): return certToPEM(csr) def pemToCSR(pem): return x509.load_pem_x509_csr(pem.encode(), default_backend()) def keyToPEM(key): return key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption() ).decode() def pemToKey(pem): return serialization.load_pem_private_key(pem.encode(), password=None) nordugrid-arc-7.1.1/src/clients/pyarcrest/src/pyarcrest/PaxHeaders/__init__.py0000644000000000000000000000013215067751327024533 xustar0030 mtime=1759498967.681939061 30 atime=1759498967.826493058 30 ctime=1759499031.284460594 nordugrid-arc-7.1.1/src/clients/pyarcrest/src/pyarcrest/__init__.py0000644000175000002070000000011615067751327026433 0ustar00mockbuildmock00000000000000import logging logging.getLogger(__name__).addHandler(logging.NullHandler()) nordugrid-arc-7.1.1/src/clients/pyarcrest/src/pyarcrest/PaxHeaders/cli0000644000000000000000000000013215067751427023115 xustar0030 mtime=1759499031.280457244 30 atime=1759499034.766510215 30 ctime=1759499031.280457244 nordugrid-arc-7.1.1/src/clients/pyarcrest/src/pyarcrest/cli/0000755000175000002070000000000015067751427025074 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/clients/pyarcrest/src/pyarcrest/cli/PaxHeaders/__init__.py0000644000000000000000000000013015067751327025300 xustar0029 mtime=1759498967.68204701 29 atime=1759498967.68204701 30 ctime=1759499031.280429158 nordugrid-arc-7.1.1/src/clients/pyarcrest/src/pyarcrest/cli/__init__.py0000644000175000002070000000000015067751327027172 0ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/clients/pyarcrest/src/pyarcrest/cli/PaxHeaders/arcrest.py0000644000000000000000000000013115067751327025205 xustar0029 mtime=1759498967.68248103 30 atime=1759498967.826493058 30 ctime=1759499031.281521904 nordugrid-arc-7.1.1/src/clients/pyarcrest/src/pyarcrest/cli/arcrest.py0000644000175000002070000001444715067751327027122 0ustar00mockbuildmock00000000000000import argparse import json import logging import os import pathlib import sys from pyarcrest.arc import ARCRest PROXYPATH = f"/tmp/x509up_u{os.getuid()}" def main(): parserCommon = argparse.ArgumentParser(add_help=False) parserCommon.add_argument("-P", "--proxy", type=str, default=PROXYPATH, help="path to proxy cert") parserCommon.add_argument("-v", "--verbose", action="store_true", help="print debug output") parserCommon.add_argument("cluster", type=str, help="hostname (with optional port) of the cluster") parser = argparse.ArgumentParser("Execute ARC operations") subparsers = parser.add_subparsers(dest="command") subparsers.add_parser( "version", help="get supported REST API versions", parents=[parserCommon], ) subparsers.add_parser( "info", help="get CE resource information", parents=[parserCommon], ) jobs_parser = subparsers.add_parser( "jobs", help="execute operations on /jobs endpoint", ) jobs_subparsers = jobs_parser.add_subparsers(dest="jobs") jobs_subparsers.add_parser( "list", help="get list of jobs", parents=[parserCommon], ) jobs_info_parser = jobs_subparsers.add_parser( "info", help="get info for given jobs", parents=[parserCommon], ) jobs_info_parser.add_argument("jobids", type=str, nargs='+', help="job IDs to fetch info for") jobs_status_parser = jobs_subparsers.add_parser( "status", help="get status for given jobs", parents=[parserCommon], ) jobs_status_parser.add_argument("jobids", type=str, nargs='+', help="job IDs to fetch status for") jobs_kill_parser = jobs_subparsers.add_parser( "kill", help="kill given jobs", parents=[parserCommon], ) jobs_kill_parser.add_argument("jobids", type=str, nargs='+', help="job IDs to kill") jobs_clean_parser = jobs_subparsers.add_parser( "clean", help="clean given jobs", parents=[parserCommon], ) jobs_clean_parser.add_argument("jobids", type=str, nargs='+', help="job IDs to clean") jobs_restart_parser = jobs_subparsers.add_parser( "restart", help="restart given jobs", parents=[parserCommon], ) jobs_restart_parser.add_argument("jobids", type=str, nargs='+', help="job IDs to restart") jobs_submit_parser = jobs_subparsers.add_parser( "submit", help="submit given job descriptions", parents=[parserCommon], ) jobs_submit_parser.add_argument("jobdescs", type=pathlib.Path, nargs='+', help="job descs to submit") jobs_submit_parser.add_argument("--queue", type=str, help="queue to submit to") delegs_parser = subparsers.add_parser( "delegations", help="execute operations on /delegations endpoint", ) delegs_subparsers = delegs_parser.add_subparsers(dest="delegations") delegs_subparsers.add_parser( "list", help="list user's delegations", parents=[parserCommon], ) delegs_subparsers.add_parser( "new", help="create new delegation", parents=[parserCommon], ) delegs_get_parser = delegs_subparsers.add_parser( "get", help="get given delegation", parents=[parserCommon], ) delegs_get_parser.add_argument("delegid", type=str, help="delegation ID to get") delegs_renew_parser = delegs_subparsers.add_parser( "renew", help="renew given delegation", parents=[parserCommon], ) delegs_renew_parser.add_argument("delegid", type=str, help="delegation ID to renew") delegs_delete_parser = delegs_subparsers.add_parser( "delete", help="delete given delegation", parents=[parserCommon], ) delegs_delete_parser.add_argument("delegid", type=str, help="delegation ID to delete") args = parser.parse_args() if not args.command: parser.print_help() return if args.command == "jobs" and not args.jobs: jobs_parser.print_help() return elif args.command == "delegations" and not args.delegations: delegs_parser.print_help() return if args.verbose: log = logging.getLogger("pyarcrest") handler = logging.StreamHandler(sys.stdout) handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT)) log.addHandler(handler) log.setLevel(logging.DEBUG) arcrest = ARCRest.getClient(url=args.cluster, proxypath=args.proxy) if args.command == "jobs" and args.jobs == "list": print(json.dumps(arcrest.getJobsList(), indent=4)) elif args.command == "jobs" and args.jobs in ("info", "status", "kill", "clean", "restart"): if args.jobs == "info": results = arcrest.getJobsInfo(args.jobids) elif args.jobs == "status": results = arcrest.getJobsStatus(args.jobids) elif args.jobs == "kill": results = arcrest.killJobs(args.jobids) elif args.jobs == "clean": results = arcrest.cleanJobs(args.jobids) elif args.jobs == "restart": results = arcrest.restartJobs(args.jobids) # default is required to be able to serialize datetime objects print(json.dumps(results, indent=4, default=str)) elif args.command == "jobs" and args.jobs == "submit": descs = [] for desc in args.jobdescs: with desc.open() as f: descs.append(f.read()) results = arcrest.submitJobs(descs, args.queue) print(json.dumps(results, indent=4)) elif args.command == "version": print(json.dumps(arcrest.getAPIVersions(), indent=4)) elif args.command == "info": print(json.dumps(arcrest.getCEInfo(), indent=4)) elif args.command == "delegations" and args.delegations == "list": print(arcrest.getDelegationsList()) elif args.command == "delegations" and args.delegations == "new": print(arcrest.createDelegation()) elif args.command == "delegations" and args.delegations == "get": print(arcrest.getDelegation(args.delegid)) elif args.command == "delegations" and args.delegations == "renew": arcrest.refreshDelegation(args.delegid) elif args.command == "delegations" and args.delegations == "delete": arcrest.deleteDelegation(args.delegid) nordugrid-arc-7.1.1/src/clients/pyarcrest/src/pyarcrest/PaxHeaders/arc.py0000644000000000000000000000013115067751327023540 xustar0029 mtime=1759498967.68204701 30 atime=1759498967.826493058 30 ctime=1759499031.279229961 nordugrid-arc-7.1.1/src/clients/pyarcrest/src/pyarcrest/arc.py0000644000175000002070000014624415067751327025456 0ustar00mockbuildmock00000000000000""" Module for interaction with the ARC CE REST interface. Automatic support for multiple versions of the API is implemented with optional manual selection of the API version. This is done by defining a base class with methods closely reflecting the operations specified in the ARC CE REST interface specification: https://www.nordugrid.org/arc/arc7/tech/rest/rest.html Additionally, the base class defines some higher level methods, e. g. a method to upload job input files using multiple threads. Some operations involved in determining the API version are implemented in class methods instead of instance methods as instance methods are considered to be tied to the API version. Determination of API version should therefore be a static operation. """ import concurrent.futures import datetime import json import logging import os import queue import re import threading from urllib.parse import urlparse from pyarcrest.errors import (ARCError, ARCHTTPError, DescriptionParseError, DescriptionUnparseError, InputFileError, InputUploadError, MatchmakingError, MissingDiagnoseFile, MissingOutputFile, NoValueInARCResult) from pyarcrest.http import HTTPClient from pyarcrest.x509 import certToPEM, parseProxyPEM, pemToCSR, signProxyCSR log = logging.getLogger(__name__) class Result: def __init__(self, value, error=False): assert isinstance(error, bool) self.value = value self.error = True if error else False class ARCRest: DIAGNOSE_FILES = [ "failed", "local", "errors", "description", "diag", "comment", "status", "acl", "xml", "input", "output", "input_status", "output_status", "statistics" ] def __init__(self, httpClient, token=None, proxypath=None, apiBase="/arex", sendsize=None, recvsize=None, timeout=None): """ Initialize the base object. Note that this class should not be instantiated directly because additional implementations of attributes and methods are required from derived classes. """ assert token or proxypath if token: self.token = token self.proxypath = None elif proxypath: self.token = None self.proxypath = proxypath self.httpClient = httpClient self.apiBase = apiBase self.sendsize = sendsize self.recvsize = recvsize or 2 ** 14 # 16KB default download size self.timeout = timeout def close(self): self.httpClient.close() ### Direct operations on ARC CE ### def getAPIVersions(self): return self.getAPIVersionsStatic(self.httpClient, self.apiBase, self.token) def getCEInfo(self): status, text = self._requestJSON("GET", "/info") if status != 200: raise ARCHTTPError(status, text) return json.loads(text) def getJobsList(self): status, text = self._requestJSON("GET", "/jobs") if status != 200: raise ARCHTTPError(status, text) try: jsonData = json.loads(text)["job"] except json.JSONDecodeError as exc: if exc.doc == "": jsonData = [] else: raise # /rest/1.0 compatibility if not isinstance(jsonData, list): jsonData = [jsonData] return [job["id"] for job in jsonData] def createJobs(self, description, queue=None, delegationID=None, isADL=True): raise NotImplementedError def getJobsInfo(self, jobs): responses = self._manageJobs(jobs, "info") results = [] for job, response in zip(jobs, responses): code, reason = int(response["status-code"]), response["reason"] if code != 200: results.append(Result(ARCHTTPError(code, reason), error=True)) elif "info_document" not in response: results.append(Result(NoValueInARCResult(f"No info document in successful info response for job {job}"), error=True)) else: results.append(Result(self._parseJobInfo(response["info_document"]))) return results def getJobsStatus(self, jobs): responses = self._manageJobs(jobs, "status") results = [] for job, response in zip(jobs, responses): code, reason = int(response["status-code"]), response["reason"] if code != 200: results.append(Result(ARCHTTPError(code, reason), error=True)) elif "state" not in response: results.append(Result(NoValueInARCResult("No state in successful status response"), error=True)) else: results.append(Result(response["state"])) return results def killJobs(self, jobs): responses = self._manageJobs(jobs, "kill") results = [] for job, response in zip(jobs, responses): code, reason = int(response["status-code"]), response["reason"] if code != 202: results.append(Result(ARCHTTPError(code, reason), error=True)) else: results.append(Result(True)) return results def cleanJobs(self, jobs): responses = self._manageJobs(jobs, "clean") results = [] for job, response in zip(jobs, responses): code, reason = int(response["status-code"]), response["reason"] if code != 202: results.append(Result(ARCHTTPError(code, reason), error=True)) else: results.append(Result(True)) return results def restartJobs(self, jobs): responses = self._manageJobs(jobs, "restart") results = [] for job, response in zip(jobs, responses): code, reason = int(response["status-code"]), response["reason"] if code != 202: results.append(Result(ARCHTTPError(code, reason), error=True)) else: results.append(Result(True)) return results def getJobsDelegations(self, jobs): responses = self._manageJobs(jobs, "delegations") results = [] for job, response in zip(jobs, responses): code, reason = int(response["status-code"]), response["reason"] if code != 200: results.append(Result(ARCHTTPError(code, reason), error=True)) elif "delegation_id" not in response: results.append(Result(NoValueInARCResult("No delegation ID in successful response"), error=True)) else: # /rest/1.0 compatibility if isinstance(response["delegation_id"], list): delegations = response["delegation_id"] else: delegations = [response["delegation_id"]] results.append(Result(delegations)) return results def downloadFile(self, jobid, sessionPath, filePath): try: self._downloadURL(f"/jobs/{jobid}/session/{sessionPath}", filePath) except ARCHTTPError as exc: if exc.status == 404: raise MissingOutputFile(sessionPath) else: raise def uploadFile(self, jobid, sessionPath, filePath): urlPath = f"/jobs/{jobid}/session/{sessionPath}" with open(filePath, "rb") as f: resp = self._request("PUT", urlPath, data=f) text = resp.read().decode() if resp.status != 200: raise ARCHTTPError(resp.status, text) def downloadListing(self, jobid, sessionPath): urlPath = f"/jobs/{jobid}/session/{sessionPath}" status, text = self._requestJSON("GET", urlPath) if status != 200: raise ARCHTTPError(status, text) # /rest/1.0 compatibility # handle empty response listing = {} try: listing = json.loads(text) except json.JSONDecodeError as exc: if exc.doc != "": raise # handle inexisting file list ... if "file" not in listing: listing["file"] = [] # ... or a single file string elif not isinstance(listing["file"], list): listing["file"] = [listing["file"]] # handle inexsisting dir list ... if "dirs" not in listing: listing["dirs"] = [] # ... or a single dir string elif not isinstance(listing["dirs"], list): listing["dirs"] = [listing["dirs"]] return listing def downloadDiagnoseFile(self, jobid, name, filePath): if name not in self.DIAGNOSE_FILES: raise ARCError(f"Invalid control dir file requested: {name}") try: self._downloadURL(f"/jobs/{jobid}/diagnose/{name}", filePath) except ARCHTTPError as exc: if exc.status == 404: raise MissingDiagnoseFile(name) else: raise def getDelegationsList(self, type=None): params = {} if type: assert type in ("x509", "jwt") params["type"] = type status, text = self._requestJSON("GET", "/delegations", params=params) if status != 200: raise ARCHTTPError(status, text) # /rest/1.0 compatibility try: return json.loads(text)["delegation"] except json.JSONDecodeError as exc: if exc.doc == "": return [] else: raise # priorities: token over proxypath over whatever the access credential is def newDelegation(self, token=None, proxypath=None): params = {"action": "new"} headers = {} tokenDelegation = token or not proxypath and self.token if tokenDelegation: if not token: token = self.token headers["X-Token-Delegation"] = f"Bearer {token}" params["type"] = "jwt" resp = self._request("POST", "/delegations", headers=headers, params=params, token=token) respstr = resp.read().decode() if token: if resp.status != 201: raise ARCHTTPError(resp.status, respstr) respstr = None else: if resp.status != 201: raise ARCHTTPError(resp.status, respstr) return resp.getheader("Location").split("/")[-1], respstr def uploadCertDelegation(self, delegationID, cert): url = f"/delegations/{delegationID}" headers = {"Content-Type": "application/x-pem-file"} resp = self._request("PUT", url, data=cert, headers=headers) respstr = resp.read().decode() if resp.status != 200: raise ARCHTTPError(resp.status, respstr) def getDelegation(self, delegationID): url = f"/delegations/{delegationID}" resp = self._request("POST", url, params={"action": "get"}) respstr = resp.read().decode() if resp.status != 200: raise ARCHTTPError(resp.status, respstr) return respstr # returns CSR if proxy cert is used, None otherwise # # what happens if token is used to renew a proxy delegation? def renewDelegation(self, delegationID, token=None, proxypath=None): params = {"action": "renew"} headers = {} tokenDelegation = token or (not proxypath and self.token) if tokenDelegation: if not token: token = self.token headers["X-Token-Delegation"] = f"Bearer {token}" url = f"/delegations/{delegationID}" resp = self._request("POST", url, headers=headers, params=params, token=token) respstr = resp.read().decode() if token: if resp.status != 200: raise ARCHTTPError(resp.status, respstr) respstr = None else: if resp.status != 201: raise ARCHTTPError(resp.status, respstr) return respstr def deleteDelegation(self, delegationID): url = f"/delegations/{delegationID}" resp = self._request("POST", url, params={"action": "delete"}) respstr = resp.read().decode() if resp.status != 202: raise ARCHTTPError(resp.status, respstr) ### Higher level job operations ### def uploadJobFiles(self, jobids, jobInputs, workers=None, sendsize=None, timeout=None): if not workers: workers = 1 resultDict = {jobid: [] for jobid in jobids} # create upload queue uploadQueue = queue.Queue() for jobid, inputFiles in zip(jobids, jobInputs): try: self._addInputTransfers(uploadQueue, jobid, inputFiles) except InputFileError as exc: resultDict[jobid].append(exc) log.debug(f"Skipping job {jobid} due to input file error: {exc}") if uploadQueue.empty(): log.debug("No local inputs to upload") return [resultDict[jobid] for jobid in jobids] errorQueue = queue.Queue() # create REST clients for workers numWorkers = min(uploadQueue.qsize(), workers) restClients = [] for i in range(numWorkers): restClients.append(self.getClient( host=self.httpClient.conn.host, port=self.httpClient.conn.port, sendsize=sendsize or self.sendsize, timeout=timeout or self.timeout, token=self.token, proxypath=self.proxypath, apiBase=self.apiBase, version=self.version, )) log.debug(f"Created {len(restClients)} upload workers") # run upload threads on upload queue with concurrent.futures.ThreadPoolExecutor(max_workers=numWorkers) as pool: futures = [] for restClient in restClients: futures.append(pool.submit( self._uploadTransferWorker, restClient, uploadQueue, errorQueue, )) concurrent.futures.wait(futures) # close HTTP clients for restClient in restClients: restClient.close() # get transfer errors while not errorQueue.empty(): error = errorQueue.get() resultDict[error["jobid"]].append(error["error"]) errorQueue.task_done() return [resultDict[jobid] for jobid in jobids] def downloadJobFiles(self, downloadDir, jobids, outputFilters={}, diagnoseFiles={}, diagnoseDirs={}, workers=None, recvsize=None, timeout=None): if not workers: workers = 1 resultDict = {jobid: [] for jobid in jobids} transferQueue = TransferQueue(workers) for jobid in jobids: cancelEvent = threading.Event() # add diagnose files to transfer queue try: self._addDiagnoseTransfers(transferQueue, jobid, downloadDir, diagnoseFiles, diagnoseDirs, cancelEvent) except ARCError as exc: resultDict[jobid].append(exc) continue # add job session directory as a listing transfer path = os.path.join(downloadDir, jobid) transferQueue.put(Transfer(jobid, "", path, type="listing", cancelEvent=cancelEvent)) errorQueue = queue.Queue() # create REST clients for workers restClients = [] for i in range(workers): restClients.append(self.getClient( host=self.httpClient.conn.host, port=self.httpClient.conn.port, recvsize=recvsize or self.recvsize, timeout=timeout or self.timeout, token=self.token, proxypath=self.proxypath, apiBase=self.apiBase, version=self.version, )) log.debug(f"Created {len(restClients)} download workers") refilters = {jobid: re.compile(refilter) for jobid, refilter in outputFilters.items()} with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as pool: futures = [] for restClient in restClients: futures.append(pool.submit( self._downloadTransferWorker, restClient, transferQueue, errorQueue, downloadDir, refilters, )) concurrent.futures.wait(futures) for restClient in restClients: restClient.close() # get transfer errors while not errorQueue.empty(): error = errorQueue.get() resultDict[error["jobid"]].append(error["error"]) errorQueue.task_done() return [resultDict[jobid] for jobid in jobids] def createDelegation(self, token=None, proxypath=None, proxyLifetime=None): delegationID, csr = self.newDelegation(token, proxypath) if csr: try: pem = self._signCSR(csr, proxyLifetime) self.uploadCertDelegation(delegationID, pem) return delegationID except Exception: self.deleteDelegation(delegationID) raise return delegationID def refreshDelegation(self, delegationID, token=None, proxypath=None, proxyLifetime=None): csr = self.renewDelegation(delegationID, token, proxypath) if csr: try: pem = self._signCSR(csr, proxyLifetime) self.uploadCertDelegation(delegationID, pem) except Exception: self.deleteDelegation(delegationID) raise def submitJobs(self, descs, queue=None, delegationID=None, processDescs=True, matchDescs=True, uploadData=True, workers=None, sendsize=None, timeout=None): raise NotImplementedError def matchJob(self, ceInfo, queue=None, runtimes=[], walltime=None): if queue: self._matchQueue(ceInfo, queue) # matching walltime requires queue if walltime: self._matchWalltime(ceInfo, queue, walltime) for runtime in runtimes: self._matchRuntime(ceInfo, runtime) ### auth API ### # TODO: should this error on no credentials? def updateCredential(self, token=None, proxypath=None): if token: # if token is updated, the connection is not required to be # recreated if self.token: self.token = token return else: self.token = token self.proxypath = None else: self.proxypath = proxypath if self.token: self.token = None # Since using proxy certificate as client certificate is part of # connection object, the connection has to be recreated whenever the # new proxy certificate is used or the proxy authenticated connection # is switched to token authentication. self.httpClient = self.getHTTPClient( host=self.httpClient.conn.host, port=self.httpClient.conn.port, blocksize=self.sendsize, timeout=self.timeout, token=token, proxypath=proxypath, ) ### Static operations ### @classmethod def getAPIVersionsStatic(cls, httpClient, apiBase="/arex", token=None): status, text = cls._requestJSONStatic(httpClient, "GET", f"{apiBase}/rest", token=token) if status != 200: raise ARCHTTPError(status, text) apiVersions = json.loads(text) # /rest/1.0 compatibility if not isinstance(apiVersions["version"], list): return [apiVersions["version"]] else: return apiVersions["version"] @classmethod def getHTTPClient(cls, url=None, host=None, port=None, blocksize=None, timeout=None, token=None, proxypath=None): assert token or proxypath if token: return HTTPClient(url, host, port, blocksize, timeout) elif proxypath: return HTTPClient(url, host, port, blocksize, timeout, proxypath) # TODO: explain the rationale in documentation about the design of the API # version selection mechanism: # - specific API implementations are in classes # - classes cannot be used as values in class variables or method # parameters without the proper ordering of definitions which is # awkward and inflexible @classmethod def getClient(cls, url=None, host=None, port=None, sendsize=None, recvsize=None, timeout=None, token=None, proxypath=None, apiBase="/arex", version=None, impls=None): if not proxypath: proxypath = f"/tmp/x509up_u{os.getuid()}" httpClient = cls.getHTTPClient(url, host, port, sendsize, timeout, token, proxypath) # get API version to implementation class mapping implementations = impls if not implementations: implementations = cls._getImplementations() # get API versions from CE apiVersions = cls.getAPIVersionsStatic(httpClient, apiBase, token) if not apiVersions: raise ARCError("No supported API versions on CE") # determine the API version to be used based on available # implementations and available versions on ARC CE if version: if version not in implementations: raise ARCError(f"No client support for requested API version {version}") if version not in apiVersions: raise ARCError(f"API version {version} not among CE supported API versions {apiVersions}") apiVersion = version else: # get the highest version of client implementation supported on the # ARC CE apiVersion = None for version in reversed(apiVersions): if version in implementations: apiVersion = version break if not apiVersion: raise ARCError(f"No client support for CE supported API versions: {apiVersions}") log.debug(f"API version {apiVersion} selected") return implementations[apiVersion](httpClient, token, proxypath, apiBase, sendsize, recvsize, timeout) ### Support methods ### def _downloadURL(self, url, path, recvsize=None): resp = self._request("GET", url) if resp.status != 200: text = resp.read().decode() raise ARCHTTPError(resp.status, text) os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, "wb") as f: data = resp.read(recvsize or self.recvsize) while data: f.write(data) data = resp.read(recvsize or self.recvsize) # returns nothing if match successful, raises exception otherwise def _matchQueue(self, ceInfo, queue): if not self._findQueue(ceInfo, queue): raise MatchmakingError(f"Queue {queue} not found") # TODO: is it possible for user to just specify the runtime and any version # is OK or vice versa? # returns nothing if match successful, raises exception otherwise def _matchRuntime(self, ceInfo, runtime): runtimes = self._findRuntimes(ceInfo) if runtime not in runtimes: raise MatchmakingError(f"Runtime {runtime} not found") # returns nothing if match successful, raises exception otherwise def _matchWalltime(self, ceInfo, queue, walltime): queueInfo = self._findQueue(ceInfo, queue) if not queueInfo: raise MatchmakingError(f"Queue {queue} not found to match walltime") if "MaxWallTime" in queueInfo: maxWallTime = int(queueInfo["MaxWallTime"]) if walltime > maxWallTime: raise MatchmakingError(f"Walltime {walltime} higher than max walltime {maxWallTime} for queue {queue}") @classmethod def _get(cls, source, name): val = source.get(name) if val is None: return {} if not isinstance(val, list): return val if val: return val[0] return {} def _findQueue(self, ceInfo, queue): compShares = self._get(self._get(self._get(self._get(ceInfo, "Domains"), "AdminDomain"), "Services"), "ComputingService").get("ComputingShare", []) if not compShares: return None # /rest/1.0 compatibility if not isinstance(compShares, list): compShares = [compShares] for compShare in compShares: if compShare.get("Name", None) == queue: # Queues are defined as ComputingShares. There are some shares # that are mapped to another share. Such a share is never a # queue externally. So if the name of the such share is used as # a queue, the result has to be empty. if "MappingPolicy" in compShare: return None else: return compShare return None def _findRuntimes(self, ceInfo): appenvs = self._get(self._get(self._get(self._get(self._get(self._get(ceInfo, "Domains"), "AdminDomain"), "Services"), "ComputingService"), "ComputingManager"), "ApplicationEnvironments").get("ApplicationEnvironment", []) # /rest/1.0 compatibility if not isinstance(appenvs, list): appenvs = [appenvs] runtimes = [] for env in appenvs: if "AppName" in env: envname = env["AppName"] if "AppVersion" in env: envname += f"-{env['AppVersion']}" runtimes.append(envname) return runtimes def _signCSR(self, csrPEM, lifetime=None): with open(self.proxypath) as f: proxyPEM = f.read() certPEM, _, chainPEM = parseProxyPEM(proxyPEM) chainPEM = certPEM + chainPEM csr = pemToCSR(csrPEM) cert = signProxyCSR(csr, self.proxypath, lifetime=lifetime) return certToPEM(cert) + chainPEM def _addInputTransfers(self, uploadQueue, jobid, inputFiles): cancelEvent = threading.Event() transfers = [] for name, source in inputFiles.items(): try: path = isLocalInputFile(name, source) except ValueError as exc: raise InputFileError(f"Error parsing source {source} of input {name}: {exc}") if not path: continue if not os.path.isfile(path): raise InputFileError(f"Source {source} of input {name} is not a file") transfers.append(Transfer(jobid, name, path, cancelEvent=cancelEvent)) # no exception raised, add transfers to queue for transfer in transfers: uploadQueue.put(transfer) def _addDiagnoseTransfers(self, transferQueue, jobid, downloadDir, diagnoseFiles, diagnoseDirs, cancelEvent): diagnoseList = diagnoseFiles.get(jobid, self.DIAGNOSE_FILES) diagnoseDir = diagnoseDirs.get(jobid, "gmlog") transfers = [] for diagFile in diagnoseList: assert diagFile in self.DIAGNOSE_FILES path = os.path.join(downloadDir, jobid, diagnoseDir, diagFile) transfers.append(Transfer(jobid, diagFile, path, type="diagnose", cancelEvent=cancelEvent)) # no exception raised, add transfers to queue for transfer in transfers: transferQueue.put(transfer) # When name is "", it means the root of the session dir. In this case, # slash must not be added to it. def _addTransfersFromListing(self, transferQueue, jobid, listing, name, path, cancelEvent, refilter=None): for f in listing["file"]: newpath = os.path.join(path, f) if name: newname = f"{name}/{f}" else: newname = f if not refilter or refilter.match(newname): transferQueue.put(Transfer(jobid, newname, newpath, type="file", cancelEvent=cancelEvent)) for d in listing["dirs"]: newpath = os.path.join(path, d) if name: newname = f"{name}/{d}" else: newname = d if not refilter or refilter.match(newname): transferQueue.put(Transfer(jobid, newname, newpath, type="listing", cancelEvent=cancelEvent)) def _requestJSON(self, method, endpoint, headers={}, token=None, jsonData=None, data=None, params={}): headers["Accept"] = "application/json" resp = self._request(method, endpoint, headers, token, jsonData, data, params) text = resp.read().decode() return resp.status, text def _manageJobs(self, jobs, action): if not jobs: return [] # JSON data for request tomanage = [{"id": job} for job in jobs] # /rest/1.0 compatibility if len(tomanage) == 1: jsonData = {"job": tomanage[0]} else: jsonData = {"job": tomanage} # execute action and get JSON result status, text = self._requestJSON("POST", "/jobs", jsonData=jsonData, params={"action": action}) if status != 201: raise ARCHTTPError(status, text) jsonData = json.loads(text) # /rest/1.0 compatibility if not isinstance(jsonData["job"], list): return [jsonData["job"]] else: return jsonData["job"] # TODO: think about what to log and how def _submitJobs(self, descs, queue=None, delegationID=None, processDescs=True, matchDescs=True, uploadData=True, workers=None, sendsize=None, timeout=None, v1_0=False): import arc ceInfo = self.getCEInfo() if not delegationID: delegationID = self.createDelegation() # A list of tuples of index and input file dict for every job # description to be submitted. The index is the description's # position in the given parameter of job descriptions and is # required to create properly aligned results. tosubmit = [] # A dict of a key that is index in given descs list and a value that # is either a list of exceptions for failed submission or a tuple of # jobid and state for successful submission. resultDict = {} jobdescs = arc.JobDescriptionList() bulkdesc = "" for i in range(len(descs)): # parse job description if not arc.JobDescription.Parse(descs[i], jobdescs): resultDict[i] = Result(DescriptionParseError("Failed to parse description"), error=True) continue arcdesc = jobdescs[-1] # get queue, runtimes and walltime from description jobqueue = arcdesc.Resources.QueueName if not jobqueue: jobqueue = queue if jobqueue and v1_0: # set queue in job description arcdesc.Resources.QueueName = jobqueue runtimes = [str(env) for env in arcdesc.Resources.RunTimeEnvironment.getSoftwareList()] if not runtimes: runtimes = [] walltime = arcdesc.Resources.TotalWallTime.range.max if walltime == -1: walltime = None # do matchmaking if matchDescs: try: self.matchJob(ceInfo, jobqueue, runtimes, walltime) except MatchmakingError as error: resultDict[i] = Result(error, error=True) continue if v1_0: # add delegation ID to description arcdesc.DataStaging.DelegationID = delegationID # process job description if processDescs: self._processJobDescription(arcdesc) # get input files from description inputFiles = self._getArclibInputFiles(arcdesc) # unparse modified description, remove xml version node because it # is not accepted by ARC CE, add to bulk description unparseResult = arcdesc.UnParse("emies:adl") if not unparseResult[0]: resultDict[i] = Result(DescriptionUnparseError("Could not unparse processed description"), error=True) continue descstart = unparseResult[1].find(" 1: bulkdesc = f"{bulkdesc}" # submit jobs to ARC # TODO: handle exceptions results = self.createJobs(bulkdesc, queue, delegationID) uploadIXs = [] # a list of job indexes for proper result processing uploadIDs = [] # a list of jobids for which to upload files uploadInputs = [] # a list of job input file dicts for upload for (jobix, inputFiles), result in zip(tosubmit, results): if result.error: resultDict[jobix] = Result(result.value, error=True) else: jobid, state = result.value resultDict[jobix] = Result((jobid, state)) uploadIDs.append(jobid) uploadInputs.append(inputFiles) uploadIXs.append(jobix) # upload jobs' local input data if uploadData: errors = self.uploadJobFiles(uploadIDs, uploadInputs, workers, sendsize, timeout or self.timeout) for jobix, uploadErrors in zip(uploadIXs, errors): if uploadErrors: jobid, state = resultDict[jobix].value resultDict[jobix] = Result(InputUploadError(jobid, state, uploadErrors), error=True) return [resultDict[i] for i in range(len(descs))] def _request(self, method, endpoint, headers={}, token=None, jsonData=None, data=None, params={}): if not token: token = self.token endpoint = f"{self.apiPath}{endpoint}" return self.httpClient.request(method, endpoint, headers, token, jsonData, data, params) ### Static support methods ### @classmethod def _requestJSONStatic(cls, httpClient, method, endpoint, headers={}, token=None, jsonData=None, data=None, params={}): headers["Accept"] = "application/json" resp = httpClient.request(method, endpoint, headers, token, jsonData, data, params) text = resp.read().decode() return resp.status, text @classmethod def _getImplementations(cls): return {"1.0": ARCRest_1_0, "1.1": ARCRest_1_1} @classmethod def _uploadTransferWorker(cls, restClient, uploadQueue, errorQueue): while True: try: upload = uploadQueue.get(block=False) except queue.Empty: break uploadQueue.task_done() if upload.cancelEvent.is_set(): log.debug(f"Skipping upload for cancelled job {upload.jobid}") continue try: restClient.uploadFile(upload.jobid, upload.name, upload.path) except Exception as exc: upload.cancelEvent.set() errorQueue.put({"jobid": upload.jobid, "error": exc}) log.debug(f"Error uploading {upload.path} for job {upload.jobid}: {exc}") # TODO: add bail out parameter for cancelEvent? @classmethod def _downloadTransferWorker(cls, restClient, transferQueue, errorQueue, downloadDir, outputFilters={}): while True: try: transfer = transferQueue.get() except TransferQueueEmpty: break jobid, name, path = transfer.jobid, transfer.name, transfer.path if transfer.cancelEvent.is_set(): log.debug(f"Skipping download for cancelled job {jobid}") continue try: if transfer.type in ("file", "diagnose"): try: if transfer.type == "file": restClient.downloadFile(jobid, name, path) elif transfer.type == "diagnose": restClient.downloadDiagnoseFile(jobid, name, path) except Exception as exc: errorQueue.put({"jobid": jobid, "error": exc}) log.error(f"Download {transfer.type} {name} to {path} for job {jobid} failed: {exc}") elif transfer.type == "listing": try: listing = restClient.downloadListing(jobid, name) except Exception as exc: errorQueue.put({"jobid": jobid, "error": exc}) log.error(f"Download listing {name} for job {jobid} failed: {exc}") else: refilter = outputFilters.get(jobid, None) # create new transfer jobs restClient._addTransfersFromListing( transferQueue, jobid, listing, name, path, transfer.cancelEvent, refilter=refilter, ) # every possible exception needs to be handled, otherwise the # threads will lock up except BaseException as exc: errorQueue.put({"jobid": jobid, "error": exc}) log.debug(f"Download name {name} and path {path} for job {jobid} failed: {exc}") @classmethod def _getArclibInputFiles(cls, desc): inputFiles = {} for infile in desc.DataStaging.InputFiles: source = None if len(infile.Sources) > 0: source = infile.Sources[0].fullstr() inputFiles[infile.Name] = source return inputFiles @classmethod def _processJobDescription(cls, jobdesc): import arc exepath = jobdesc.Application.Executable.Path if exepath and exepath.startswith("/"): # absolute paths are on compute nodes exepath = "" inpath = jobdesc.Application.Input outpath = jobdesc.Application.Output errpath = jobdesc.Application.Error logpath = jobdesc.Application.LogDir exePresent = False stdinPresent = False for infile in jobdesc.DataStaging.InputFiles: if exepath == infile.Name: exePresent = True elif inpath == infile.Name: stdinPresent = True stdoutPresent = False stderrPresent = False logPresent = False for outfile in jobdesc.DataStaging.OutputFiles: if outpath == outfile.Name: stdoutPresent = True elif errpath == outfile.Name: stderrPresent = True elif logpath == outfile.Name or logpath == outfile.Name[:-1]: logPresent = True if exepath and not exePresent: infile = arc.InputFileType() infile.Name = exepath jobdesc.DataStaging.InputFiles.append(infile) if inpath and not stdinPresent: infile = arc.InputFileType() infile.Name = inpath jobdesc.DataStaging.InputFiles.append(infile) if outpath and not stdoutPresent: outfile = arc.OutputFileType() outfile.Name = outpath jobdesc.DataStaging.OutputFiles.append(outfile) if errpath and not stderrPresent: outfile = arc.OutputFileType() outfile.Name = errpath jobdesc.DataStaging.OutputFiles.append(outfile) if logpath and not logPresent: outfile = arc.OutputFileType() if not logpath.endswith('/'): outfile.Name = f'{logpath}/' else: outfile.Name = logpath jobdesc.DataStaging.OutputFiles.append(outfile) @classmethod def _parseJobInfo(cls, infoDocument): jobInfo = {} infoDict = infoDocument.get("ComputingActivity", {}) COPY_KEYS = ["Name", "Type", "LocalIDFromManager", "Owner", "LocalOwner", "StdIn", "StdOut", "StdErr", "LogDir", "Queue"] for key in COPY_KEYS: if key in infoDict: jobInfo[key] = infoDict[key] INT_KEYS = ["UsedTotalWallTime", "UsedTotalCPUTime", "RequestedTotalWallTime", "RequestedTotalCPUTime", "RequestedSlots", "ExitCode", "WaitingPosition", "UsedMainMemory"] for key in INT_KEYS: if key in infoDict: jobInfo[key] = int(infoDict[key]) TSTAMP_KEYS = ["SubmissionTime", "EndTime", "WorkingAreaEraseTime", "ProxyExpirationTime"] for key in TSTAMP_KEYS: if key in infoDict: jobInfo[key] = datetime.datetime.strptime(infoDict[key], "%Y-%m-%dT%H:%M:%SZ") VARIABLE_KEYS = ["Error", "ExecutionNode"] for key in VARIABLE_KEYS: if key in infoDict: jobInfo[key] = infoDict[key] # /rest/1.0 compatibility if not isinstance(jobInfo[key], list): jobInfo[key] = [jobInfo[key]] states = infoDict.get("State", []) # /rest/1.0 compatibility if not isinstance(states, list): states = [states] # get state from a list of states in different systems for state in states: if state.startswith("arcrest:"): jobInfo["state"] = state[len("arcrest:"):] restartStates = infoDict.get("RestartState", []) # /rest/1.0 compatibility if not isinstance(restartStates, list): restartStates = [restartStates] # get restart state from a list of restart states in different systems for state in restartStates: if state.startswith("arcrest:"): jobInfo["restartState"] = state[len("arcrest:"):] return jobInfo class ARCRest_1_0(ARCRest): def __init__(self, httpClient, token=None, proxypath=None, apiBase="/arex", sendsize=None, recvsize=None, timeout=None): assert not token super().__init__(httpClient, None, proxypath, apiBase, sendsize, recvsize, timeout) self.version = "1.0" self.apiPath = f"{self.apiBase}/rest/{self.version}" def createJobs(self, description, queue=None, delegationID=None, isADL=True): contentType = "application/xml" if isADL else "application/rsl" status, text = self._requestJSON( "POST", "/jobs", data=description, headers={"Content-Type": contentType}, params={"action": "new"}, ) if status != 201: raise ARCHTTPError(status, text) jsonData = json.loads(text) # /rest/1.0 compatibility if not isinstance(jsonData["job"], list): responses = [jsonData["job"]] else: responses = jsonData["job"] results = [] for response in responses: code, reason = int(response["status-code"]), response["reason"] if code != 201: results.append(Result(ARCHTTPError(code, reason), error=True)) else: results.append(Result((response["id"], response["state"]))) return results def submitJobs(self, descs, queue=None, delegationID=None, processDescs=True, matchDescs=True, uploadData=True, workers=None, sendsize=None, timeout=None): return self._submitJobs(descs, queue, delegationID, processDescs, matchDescs, uploadData, workers, sendsize, timeout, v1_0=True) def getDelegationsList(self, type=None): return super().getDelegationsList(type=None) class ARCRest_1_1(ARCRest): def __init__(self, httpClient, token=None, proxypath=None, apiBase="/arex", sendsize=None, recvsize=None, timeout=None): super().__init__(httpClient, token, proxypath, apiBase, sendsize, recvsize, timeout) self.version = "1.1" self.apiPath = f"{self.apiBase}/rest/{self.version}" def createJobs(self, description, queue=None, delegationID=None, isADL=True): params = {"action": "new"} if queue: params["queue"] = queue if delegationID: params["delegation_id"] = delegationID headers = {"Content-Type": "application/xml" if isADL else "application/rsl"} status, text = self._requestJSON( "POST", "/jobs", data=description, headers=headers, params=params, ) if status != 201: raise ARCHTTPError(status, text) responses = json.loads(text)["job"] results = [] for response in responses: code, reason = int(response["status-code"]), response["reason"] if code != 201: results.append(Result(ARCHTTPError(code, reason), error=True)) else: results.append(Result((response["id"], response["state"]))) return results def submitJobs(self, descs, queue=None, delegationID=None, processDescs=True, matchDescs=True, uploadData=True, workers=None, sendsize=None, timeout=None): return self._submitJobs(descs, queue, delegationID, processDescs, matchDescs, uploadData, workers, sendsize, timeout) class Transfer: def __init__(self, jobid, name, path, type="file", cancelEvent=None): self.jobid = jobid self.name = name self.path = path self.type = type self.cancelEvent = cancelEvent if not self.cancelEvent: self.cancelEvent = threading.Event() class ARCJob: def __init__(self, id=None, descstr=None): self.id = id self.descstr = descstr self.name = None self.delegid = None self.state = None self.errors = [] self.downloadFiles = [] self.inputFiles = {} self.ExecutionNode = None self.UsedTotalWallTime = None self.UsedTotalCPUTime = None self.RequestedTotalWallTime = None self.RequestedTotalCPUTime = None self.RequestedSlots = None self.ExitCode = None self.Type = None self.LocalIDFromManager = None self.WaitingPosition = None self.Owner = None self.LocalOwner = None self.StdIn = None self.StdOut = None self.StdErr = None self.LogDir = None self.Queue = None self.UsedMainMemory = None self.SubmissionTime = None self.EndTime = None self.WorkingAreaEraseTime = None self.ProxyExpirationTime = None self.RestartState = [] self.Error = [] def updateFromInfo(self, infoDocument): infoDict = infoDocument.get("ComputingActivity", {}) if not infoDict: return if "Name" in infoDict: self.name = infoDict["Name"] # get state from a list of activity states in different systems for state in infoDict.get("State", []): if state.startswith("arcrest:"): self.state = state[len("arcrest:"):] if "Error" in infoDict: # /rest/1.0 compatibility if isinstance(infoDict["Error"], list): self.Error = infoDict["Error"] else: self.Error = [infoDict["Error"]] if "ExecutionNode" in infoDict: # /rest/1.0 compatibility if isinstance(infoDict["ExecutionNode"], list): self.ExecutionNode = infoDict["ExecutionNode"] else: self.ExecutionNode = [infoDict["ExecutionNode"]] # throw out all non ASCII characters from nodes for i in range(len(self.ExecutionNode)): self.ExecutionNode[i] = ''.join([i for i in self.ExecutionNode[i] if ord(i) < 128]) if "UsedTotalWallTime" in infoDict: self.UsedTotalWallTime = int(infoDict["UsedTotalWallTime"]) if "UsedTotalCPUTime" in infoDict: self.UsedTotalCPUTime = int(infoDict["UsedTotalCPUTime"]) if "RequestedTotalWallTime" in infoDict: self.RequestedTotalWallTime = int(infoDict["RequestedTotalWallTime"]) if "RequestedTotalCPUTime" in infoDict: self.RequestedTotalCPUTime = int(infoDict["RequestedTotalCPUTime"]) if "RequestedSlots" in infoDict: self.RequestedSlots = int(infoDict["RequestedSlots"]) if "ExitCode" in infoDict: self.ExitCode = int(infoDict["ExitCode"]) if "Type" in infoDict: self.Type = infoDict["Type"] if "LocalIDFromManager" in infoDict: self.LocalIDFromManager = infoDict["LocalIDFromManager"] if "WaitingPosition" in infoDict: self.WaitingPosition = int(infoDict["WaitingPosition"]) if "Owner" in infoDict: self.Owner = infoDict["Owner"] if "LocalOwner" in infoDict: self.LocalOwner = infoDict["LocalOwner"] if "StdIn" in infoDict: self.StdIn = infoDict["StdIn"] if "StdOut" in infoDict: self.StdOut = infoDict["StdOut"] if "StdErr" in infoDict: self.StdErr = infoDict["StdErr"] if "LogDir" in infoDict: self.LogDir = infoDict["LogDir"] if "Queue" in infoDict: self.Queue = infoDict["Queue"] if "UsedMainMemory" in infoDict: self.UsedMainMemory = int(infoDict["UsedMainMemory"]) if "SubmissionTime" in infoDict: self.SubmissionTime = datetime.datetime.strptime( infoDict["SubmissionTime"], "%Y-%m-%dT%H:%M:%SZ" ) if "EndTime" in infoDict: self.EndTime = datetime.datetime.strptime( infoDict["EndTime"], "%Y-%m-%dT%H:%M:%SZ" ) if "WorkingAreaEraseTime" in infoDict: self.WorkingAreaEraseTime = datetime.datetime.strptime( infoDict["WorkingAreaEraseTime"], "%Y-%m-%dT%H:%M:%SZ" ) if "ProxyExpirationTime" in infoDict: self.ProxyExpirationTime = datetime.datetime.strptime( infoDict["ProxyExpirationTime"], "%Y-%m-%dT%H:%M:%SZ" ) if "RestartState" in infoDict: self.RestartState = infoDict["RestartState"] def getArclibInputFiles(self, desc): self.inputFiles = {} for infile in desc.DataStaging.InputFiles: source = None if len(infile.Sources) > 0: source = infile.Sources[0].fullstr() self.inputFiles[infile.Name] = source class TransferQueue: def __init__(self, numWorkers): self.queue = queue.Queue() self.lock = threading.Lock() self.barrier = threading.Barrier(numWorkers) def put(self, val): with self.lock: self.queue.put(val) self.barrier.reset() def get(self): while True: with self.lock: if not self.queue.empty(): val = self.queue.get() self.queue.task_done() return val try: self.barrier.wait() except threading.BrokenBarrierError: continue else: raise TransferQueueEmpty() class TransferQueueEmpty(Exception): pass def isLocalInputFile(name, source): """ Return path if local or empty string if remote URL. Raises: - ValueError: source cannot be parsed """ if not source: return name url = urlparse(source) if url.scheme not in ("file", None, "") or url.hostname: return "" return url.path nordugrid-arc-7.1.1/src/clients/PaxHeaders/client.conf0000644000000000000000000000013215067751327017730 xustar0030 mtime=1759498967.676561668 30 atime=1759498967.824493028 30 ctime=1759499031.102066814 nordugrid-arc-7.1.1/src/clients/client.conf0000644000175000002070000000670015067751327021635 0ustar00mockbuildmock00000000000000[common] ## certificatepath = path - Specify the location of client certificate file. ## Environmental variable X509_USER_CERT redefines this value. ## default: $HOME/.globus/usercert.pem #certificatepath=/home/user/credentials/cert.pem ## keypath = path - Specify the location of client secret key file. ## Environmental variable X509_USER_KEY redefines this value. ## default: $HOME/.globus/userkey.pem #keypath=/home/user/credentials/key.pem ## cacertificatesdirectory = path - Specify the location of CA certificates directory ## Environmental variable X509_CERT_DIR redefines this value. ## default: /etc/grid-security/certificates #cacertificatesdirectory=/home/user/.globus/certificates ## causesystem = 1|true|0|false - Either default CA layout of OpenSSL (aka system) to be used. ## Environmental variable X509_CERT_POLICY (system|grid|any) redefines this value. ## default: 0 #causesystem=1 ## causegrid = 1|true|0|false - Either Grid CA layout to be used. ## Environmental variable X509_CERT_POLICY (system|grid|any) redefines this value. ## default: 1 #causegrid=0 ## proxypath = path - Specify the location of proxy certificate (both for generation and usage) ## Environmental variable X509_USER_PROXY redefines this value. ## default: /tmp/x509up_u${UID} #proxypath=/home/user/credentials/proxy.pem ## vomsespath = path - Path to file or directory that holds client VOMS configuration ## to generate proxy certificates ## Environmental variables X509_VOMS_FILE and X509_VOMSES redefine this value ## If missing arcproxy will search for vomses in the following locations: ## - ~/.arc/vomses ## - ~/.voms/vomses ## - /etc/vomses ## - /etc/grid-security/vomses ## default: undefined #vomsespath=/home/user/credentials/vomses ## defaultvoms = vo[:command] - Default value for --voms (-S) arcproxy option that is ## used to define VO and optionaly FQANs used during proxy certificate generation ## multivalued ## default: undefined #defaultvoms=atlas:/atlas/Role=pilot #defaultvoms=nordugrid.org:all #defaultvoms=ops.ndgf.org ## rejectdiscovery = service - Specify the FQDN or URLs of the services that should be rejected ## during service discovery process by CLI tools (arcsub, arctest) ## multivalued ## default: undefined #rejectdiscovery=bad.service.org #rejectdiscovery=bad2.service.org ## rejectmanagement = service - Specify the FQDN or URLs of the CEs that should be skipped ## during the job management (e.g. arcstat, arckill) ## multivalued ## default: undefined #rejectmanagement=bad3.service.org #rejectmanagement=bad4.service.org ## brokername = broker - Specify the broker used in resource discovery. ## The full list of installed brokers can be obtained running "arcsub -P" ## default: Random #brokername=FastestQueue ## brokerarguments = args - Specify broker arguments (if applicable to specified broker) ## default: undefined #brokername=PythonBroker #brokerarguments=ACIXBroker.ACIXBroker:https://cacheindex.ndgf.org:6443/data/index ## timeout = seconds - Amount of time to wait for a service to respond before ## considering it dead. ## default: 20 #timeout=60 ## joblist = path - Path to the jobs database that holds all extra data ## about submitted jobs to be used during further job management ## default: $HOME/.arc/jobs.dat #joblist=/home/user/arcjobs.dat ## joblisttype = type - Type of the backend used for jobs database. ## IT IS STRONGLY advised to keep default SQLITE type of backend. ## allowedvalues: SQLITE, XML ## default: SQLITE #joblisttype=XML nordugrid-arc-7.1.1/src/clients/PaxHeaders/credentials0000644000000000000000000000013015067751427020022 xustar0029 mtime=1759499031.17945571 30 atime=1759499034.766510215 29 ctime=1759499031.17945571 nordugrid-arc-7.1.1/src/clients/credentials/0000755000175000002070000000000015067751427022003 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/clients/credentials/PaxHeaders/arcproxy_proxy.cpp0000644000000000000000000000013215067751327023714 xustar0030 mtime=1759498967.679490824 30 atime=1759498967.825493043 30 ctime=1759499031.178072362 nordugrid-arc-7.1.1/src/clients/credentials/arcproxy_proxy.cpp0000644000175000002070000001127015067751327025617 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_NSS #include #endif using namespace ArcCredential; void create_tmp_proxy(std::string& proxy, Arc::Credential& signer) { int keybits = 2048; Arc::Time now; Arc::Period period = 3600 * 12 + 300; std::string req_str; Arc::Credential tmp_proxyreq(now-Arc::Period(300), period, keybits); tmp_proxyreq.GenerateRequest(req_str); std::string proxy_private_key; std::string signing_cert; std::string signing_cert_chain; tmp_proxyreq.OutputPrivatekey(proxy_private_key); signer.OutputCertificate(signing_cert); signer.OutputCertificateChain(signing_cert_chain); if (!signer.SignRequest(&tmp_proxyreq, proxy)) throw std::runtime_error("Failed to sign proxy"); proxy.append(proxy_private_key).append(signing_cert).append(signing_cert_chain); } void create_proxy(std::string& proxy, Arc::Credential& signer, const std::string& proxy_policy, const Arc::Time& proxy_start, const Arc::Period& proxy_period, const std::string& vomsacseq, int keybits, const std::string& signing_algorithm) { std::string private_key, signing_cert, signing_cert_chain; std::string req_str; if(keybits < 0) keybits = signer.GetKeybits(); Arc::Credential cred_request(proxy_start, proxy_period, keybits); cred_request.SetSigningAlgorithm(signer.GetSigningAlgorithm()); if(!signing_algorithm.empty() && signing_algorithm != "inherit") { if(signing_algorithm == "sha1") { cred_request.SetSigningAlgorithm(Arc::SIGN_SHA1); } else if(signing_algorithm == "sha2") { cred_request.SetSigningAlgorithm(Arc::SIGN_SHA256); } else if(signing_algorithm == "sha224") { cred_request.SetSigningAlgorithm(Arc::SIGN_SHA224); } else if(signing_algorithm == "sha256") { cred_request.SetSigningAlgorithm(Arc::SIGN_SHA256); } else if(signing_algorithm == "sha384") { cred_request.SetSigningAlgorithm(Arc::SIGN_SHA384); } else if(signing_algorithm == "sha512") { cred_request.SetSigningAlgorithm(Arc::SIGN_SHA512); } else { throw std::runtime_error("Unknown signing algorithm specified: "+signing_algorithm); } } cred_request.GenerateRequest(req_str); cred_request.OutputPrivatekey(private_key); signer.OutputCertificate(signing_cert); signer.OutputCertificateChain(signing_cert_chain); //Put the voms attribute certificate into proxy certificate if (!vomsacseq.empty()) { bool r = cred_request.AddExtension("acseq", (char**)(vomsacseq.c_str())); if (!r) std::cout << Arc::IString("Failed to add VOMS AC extension. Your proxy may be incomplete.") << std::endl; } if(!proxy_policy.empty()) { cred_request.SetProxyPolicy("rfc", "anylanguage", proxy_policy, -1); } else if(CERT_IS_LIMITED_PROXY(signer.GetType())) { // Gross hack for globus. If Globus marks own proxy as limited // it expects every derived proxy to be limited or at least // independent. Independent proxies has little sense in Grid // world. So here we make our proxy globus-limited to allow // it to be used with globus code. cred_request.SetProxyPolicy("rfc", "limited", proxy_policy, -1); } else { cred_request.SetProxyPolicy("rfc", "inheritAll", proxy_policy, -1); } if (!signer.SignRequest(&cred_request, proxy)) throw std::runtime_error("Failed to sign proxy"); proxy.append(private_key).append(signing_cert).append(signing_cert_chain); } void write_proxy_file(const std::string& path, const std::string& content) { if((!Arc::FileDelete(path)) && (errno != ENOENT)) { throw std::runtime_error("Failed to remove proxy file " + path); } if(!Arc::FileCreate(path, content, 0, 0, S_IRUSR | S_IWUSR)) { throw std::runtime_error("Failed to create proxy file " + path); } } void remove_proxy_file(const std::string& path) { if((!Arc::FileDelete(path)) && (errno != ENOENT)) { throw std::runtime_error("Failed to remove proxy file " + path); } } void remove_cert_file(const std::string& path) { if((!Arc::FileDelete(path)) && (errno != ENOENT)) { throw std::runtime_error("Failed to remove certificate file " + path); } } nordugrid-arc-7.1.1/src/clients/credentials/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327022134 xustar0030 mtime=1759498967.678490809 30 atime=1759498967.825493043 30 ctime=1759499031.171430342 nordugrid-arc-7.1.1/src/clients/credentials/Makefile.am0000644000175000002070000000147215067751327024042 0ustar00mockbuildmock00000000000000bin_PROGRAMS = arcproxy arcproxy_SOURCES = arcproxy.cpp arcproxy_voms.cpp arcproxy_myproxy.cpp arcproxy_proxy.cpp arcproxy.h arcproxy_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcproxy_LDADD = \ $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) man_MANS = arcproxy.1 %.1: % %.1.in LANG=C help2man -N -h "-h|sed s/…/.../g" -i $(word 2,$^) -o $@ ./$< EXTRA_DIST = $(man_MANS:=.in) nordugrid-arc-7.1.1/src/clients/credentials/PaxHeaders/Makefile.in0000644000000000000000000000013215067751347022147 xustar0030 mtime=1759498983.647853675 30 atime=1759499019.625280144 30 ctime=1759499031.172517638 nordugrid-arc-7.1.1/src/clients/credentials/Makefile.in0000644000175000002070000011677415067751347024071 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ bin_PROGRAMS = arcproxy$(EXEEXT) subdir = src/clients/credentials ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)" PROGRAMS = $(bin_PROGRAMS) am_arcproxy_OBJECTS = arcproxy-arcproxy.$(OBJEXT) \ arcproxy-arcproxy_voms.$(OBJEXT) \ arcproxy-arcproxy_myproxy.$(OBJEXT) \ arcproxy-arcproxy_proxy.$(OBJEXT) arcproxy_OBJECTS = $(am_arcproxy_OBJECTS) am__DEPENDENCIES_1 = arcproxy_DEPENDENCIES = $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = arcproxy_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arcproxy_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = ./$(DEPDIR)/arcproxy-arcproxy.Po \ ./$(DEPDIR)/arcproxy-arcproxy_myproxy.Po \ ./$(DEPDIR)/arcproxy-arcproxy_proxy.Po \ ./$(DEPDIR)/arcproxy-arcproxy_voms.Po am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(arcproxy_SOURCES) DIST_SOURCES = $(arcproxy_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } man1dir = $(mandir)/man1 NROFF = nroff MANS = $(man_MANS) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ arcproxy_SOURCES = arcproxy.cpp arcproxy_voms.cpp arcproxy_myproxy.cpp arcproxy_proxy.cpp arcproxy.h arcproxy_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcproxy_LDADD = \ $(top_builddir)/src/hed/libs/credentialstore/libarccredentialstore.la \ $(top_builddir)/src/hed/libs/communication/libarccommunication.la \ $(top_builddir)/src/hed/libs/delegation/libarcdelegation.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/crypto/libarccrypto.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la \ $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) man_MANS = arcproxy.1 EXTRA_DIST = $(man_MANS:=.in) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/clients/credentials/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/clients/credentials/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-binPROGRAMS: $(bin_PROGRAMS) @$(NORMAL_INSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \ $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \ fi; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p \ || test -f $$p1 \ ; then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' \ -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ } \ ; done uninstall-binPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' \ `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(bindir)" && rm -f $$files clean-binPROGRAMS: @list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list arcproxy$(EXEEXT): $(arcproxy_OBJECTS) $(arcproxy_DEPENDENCIES) $(EXTRA_arcproxy_DEPENDENCIES) @rm -f arcproxy$(EXEEXT) $(AM_V_CXXLD)$(arcproxy_LINK) $(arcproxy_OBJECTS) $(arcproxy_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcproxy-arcproxy.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcproxy-arcproxy_myproxy.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcproxy-arcproxy_proxy.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcproxy-arcproxy_voms.Po@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< arcproxy-arcproxy.o: arcproxy.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -MT arcproxy-arcproxy.o -MD -MP -MF $(DEPDIR)/arcproxy-arcproxy.Tpo -c -o arcproxy-arcproxy.o `test -f 'arcproxy.cpp' || echo '$(srcdir)/'`arcproxy.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcproxy-arcproxy.Tpo $(DEPDIR)/arcproxy-arcproxy.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcproxy.cpp' object='arcproxy-arcproxy.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -c -o arcproxy-arcproxy.o `test -f 'arcproxy.cpp' || echo '$(srcdir)/'`arcproxy.cpp arcproxy-arcproxy.obj: arcproxy.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -MT arcproxy-arcproxy.obj -MD -MP -MF $(DEPDIR)/arcproxy-arcproxy.Tpo -c -o arcproxy-arcproxy.obj `if test -f 'arcproxy.cpp'; then $(CYGPATH_W) 'arcproxy.cpp'; else $(CYGPATH_W) '$(srcdir)/arcproxy.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcproxy-arcproxy.Tpo $(DEPDIR)/arcproxy-arcproxy.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcproxy.cpp' object='arcproxy-arcproxy.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -c -o arcproxy-arcproxy.obj `if test -f 'arcproxy.cpp'; then $(CYGPATH_W) 'arcproxy.cpp'; else $(CYGPATH_W) '$(srcdir)/arcproxy.cpp'; fi` arcproxy-arcproxy_voms.o: arcproxy_voms.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -MT arcproxy-arcproxy_voms.o -MD -MP -MF $(DEPDIR)/arcproxy-arcproxy_voms.Tpo -c -o arcproxy-arcproxy_voms.o `test -f 'arcproxy_voms.cpp' || echo '$(srcdir)/'`arcproxy_voms.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcproxy-arcproxy_voms.Tpo $(DEPDIR)/arcproxy-arcproxy_voms.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcproxy_voms.cpp' object='arcproxy-arcproxy_voms.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -c -o arcproxy-arcproxy_voms.o `test -f 'arcproxy_voms.cpp' || echo '$(srcdir)/'`arcproxy_voms.cpp arcproxy-arcproxy_voms.obj: arcproxy_voms.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -MT arcproxy-arcproxy_voms.obj -MD -MP -MF $(DEPDIR)/arcproxy-arcproxy_voms.Tpo -c -o arcproxy-arcproxy_voms.obj `if test -f 'arcproxy_voms.cpp'; then $(CYGPATH_W) 'arcproxy_voms.cpp'; else $(CYGPATH_W) '$(srcdir)/arcproxy_voms.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcproxy-arcproxy_voms.Tpo $(DEPDIR)/arcproxy-arcproxy_voms.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcproxy_voms.cpp' object='arcproxy-arcproxy_voms.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -c -o arcproxy-arcproxy_voms.obj `if test -f 'arcproxy_voms.cpp'; then $(CYGPATH_W) 'arcproxy_voms.cpp'; else $(CYGPATH_W) '$(srcdir)/arcproxy_voms.cpp'; fi` arcproxy-arcproxy_myproxy.o: arcproxy_myproxy.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -MT arcproxy-arcproxy_myproxy.o -MD -MP -MF $(DEPDIR)/arcproxy-arcproxy_myproxy.Tpo -c -o arcproxy-arcproxy_myproxy.o `test -f 'arcproxy_myproxy.cpp' || echo '$(srcdir)/'`arcproxy_myproxy.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcproxy-arcproxy_myproxy.Tpo $(DEPDIR)/arcproxy-arcproxy_myproxy.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcproxy_myproxy.cpp' object='arcproxy-arcproxy_myproxy.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -c -o arcproxy-arcproxy_myproxy.o `test -f 'arcproxy_myproxy.cpp' || echo '$(srcdir)/'`arcproxy_myproxy.cpp arcproxy-arcproxy_myproxy.obj: arcproxy_myproxy.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -MT arcproxy-arcproxy_myproxy.obj -MD -MP -MF $(DEPDIR)/arcproxy-arcproxy_myproxy.Tpo -c -o arcproxy-arcproxy_myproxy.obj `if test -f 'arcproxy_myproxy.cpp'; then $(CYGPATH_W) 'arcproxy_myproxy.cpp'; else $(CYGPATH_W) '$(srcdir)/arcproxy_myproxy.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcproxy-arcproxy_myproxy.Tpo $(DEPDIR)/arcproxy-arcproxy_myproxy.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcproxy_myproxy.cpp' object='arcproxy-arcproxy_myproxy.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -c -o arcproxy-arcproxy_myproxy.obj `if test -f 'arcproxy_myproxy.cpp'; then $(CYGPATH_W) 'arcproxy_myproxy.cpp'; else $(CYGPATH_W) '$(srcdir)/arcproxy_myproxy.cpp'; fi` arcproxy-arcproxy_proxy.o: arcproxy_proxy.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -MT arcproxy-arcproxy_proxy.o -MD -MP -MF $(DEPDIR)/arcproxy-arcproxy_proxy.Tpo -c -o arcproxy-arcproxy_proxy.o `test -f 'arcproxy_proxy.cpp' || echo '$(srcdir)/'`arcproxy_proxy.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcproxy-arcproxy_proxy.Tpo $(DEPDIR)/arcproxy-arcproxy_proxy.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcproxy_proxy.cpp' object='arcproxy-arcproxy_proxy.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -c -o arcproxy-arcproxy_proxy.o `test -f 'arcproxy_proxy.cpp' || echo '$(srcdir)/'`arcproxy_proxy.cpp arcproxy-arcproxy_proxy.obj: arcproxy_proxy.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -MT arcproxy-arcproxy_proxy.obj -MD -MP -MF $(DEPDIR)/arcproxy-arcproxy_proxy.Tpo -c -o arcproxy-arcproxy_proxy.obj `if test -f 'arcproxy_proxy.cpp'; then $(CYGPATH_W) 'arcproxy_proxy.cpp'; else $(CYGPATH_W) '$(srcdir)/arcproxy_proxy.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcproxy-arcproxy_proxy.Tpo $(DEPDIR)/arcproxy-arcproxy_proxy.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcproxy_proxy.cpp' object='arcproxy-arcproxy_proxy.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcproxy_CXXFLAGS) $(CXXFLAGS) -c -o arcproxy-arcproxy_proxy.obj `if test -f 'arcproxy_proxy.cpp'; then $(CYGPATH_W) 'arcproxy_proxy.cpp'; else $(CYGPATH_W) '$(srcdir)/arcproxy_proxy.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man1: $(man_MANS) @$(NORMAL_INSTALL) @list1=''; \ list2='$(man_MANS)'; \ test -n "$(man1dir)" \ && test -n "`echo $$list1$$list2`" \ || exit 0; \ echo " $(MKDIR_P) '$(DESTDIR)$(man1dir)'"; \ $(MKDIR_P) "$(DESTDIR)$(man1dir)" || exit 1; \ { for i in $$list1; do echo "$$i"; done; \ if test -n "$$list2"; then \ for i in $$list2; do echo "$$i"; done \ | sed -n '/\.1[a-z]*$$/p'; \ fi; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \ done; } uninstall-man1: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man1dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ dir='$(DESTDIR)$(man1dir)'; $(am__uninstall_files_from_dir) ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) $(MANS) installdirs: for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f ./$(DEPDIR)/arcproxy-arcproxy.Po -rm -f ./$(DEPDIR)/arcproxy-arcproxy_myproxy.Po -rm -f ./$(DEPDIR)/arcproxy-arcproxy_proxy.Po -rm -f ./$(DEPDIR)/arcproxy-arcproxy_voms.Po -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-man install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-binPROGRAMS install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-man1 install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/arcproxy-arcproxy.Po -rm -f ./$(DEPDIR)/arcproxy-arcproxy_myproxy.Po -rm -f ./$(DEPDIR)/arcproxy-arcproxy_proxy.Po -rm -f ./$(DEPDIR)/arcproxy-arcproxy_voms.Po -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-binPROGRAMS uninstall-man uninstall-man: uninstall-man1 .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ clean-binPROGRAMS clean-generic clean-libtool cscopelist-am \ ctags ctags-am distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-binPROGRAMS \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-man1 \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags tags-am uninstall uninstall-am \ uninstall-binPROGRAMS uninstall-man uninstall-man1 .PRECIOUS: Makefile %.1: % %.1.in LANG=C help2man -N -h "-h|sed s/…/.../g" -i $(word 2,$^) -o $@ ./$< # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/clients/credentials/PaxHeaders/arcproxy.h0000644000000000000000000000013215067751327022120 xustar0030 mtime=1759498967.679365926 30 atime=1759498967.825493043 30 ctime=1759499031.179265735 nordugrid-arc-7.1.1/src/clients/credentials/arcproxy.h0000644000175000002070000000325015067751327024022 0ustar00mockbuildmock00000000000000 // Functions in arcproxy_proxy.cpp // Create simple temporary proxy void create_tmp_proxy(std::string& proxy, Arc::Credential& signer); // Create proxy with all bells and whistles as specified in arguments void create_proxy(std::string& proxy, Arc::Credential& signer, const std::string& proxy_policy, const Arc::Time& proxy_start, const Arc::Period& proxy_period, const std::string& vomsacseq, int keybits, const std::string& signing_algorithm); // Store content of proxy void write_proxy_file(const std::string& path, const std::string& content); // Delete proxy file void remove_proxy_file(const std::string& path); // Delete certificate file void remove_cert_file(const std::string& path); // Functions in arcproxy_voms.cpp // Create simple temporary proxy // Collect VOMS AC from configured Voms servers bool contact_voms_servers(std::map >& vomscmdlist, std::list& orderlist, std::string& vomses_path, bool use_gsi_comm, bool use_http_comm, const std::string& voms_period, Arc::UserConfig& usercfg, Arc::Logger& logger, const std::string& tmp_proxy_path, std::string& vomsacseq); // Functions in arcproxy_myproxy.cpp // Communicate with MyProxy server bool contact_myproxy_server(const std::string& myproxy_server, const std::string& myproxy_command, const std::string& myproxy_user_name, bool use_empty_passphrase, const std::string& myproxy_period, const std::string& retrievable_by_cert, Arc::Time& proxy_start, Arc::Period& proxy_period, std::list& vomslist, std::string& vomses_path, const std::string& proxy_path, Arc::UserConfig& usercfg, Arc::Logger& logger); nordugrid-arc-7.1.1/src/clients/credentials/PaxHeaders/arcproxy.cpp0000644000000000000000000000013215067751327022453 xustar0030 mtime=1759498967.679365926 30 atime=1759498967.825493043 30 ctime=1759499031.174815145 nordugrid-arc-7.1.1/src/clients/credentials/arcproxy.cpp0000644000175000002070000017334415067751327024371 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_NSS #include #endif #include "arcproxy.h" #include "glibmm-compat.h" using namespace ArcCredential; #ifdef HAVE_NSS static void get_default_nssdb_path(std::vector& nss_paths) { const Arc::User user; // The profiles.ini could exist under firefox, seamonkey and thunderbird std::vector profiles_homes; std::string home_path = user.Home(); std::string profiles_home; #if defined(_MACOSX) profiles_home = home_path + G_DIR_SEPARATOR_S "Library" G_DIR_SEPARATOR_S "Application Support" G_DIR_SEPARATOR_S "Firefox"; profiles_homes.push_back(profiles_home); profiles_home = home_path + G_DIR_SEPARATOR_S "Library" G_DIR_SEPARATOR_S "Application Support" G_DIR_SEPARATOR_S "SeaMonkey"; profiles_homes.push_back(profiles_home); profiles_home = home_path + G_DIR_SEPARATOR_S "Library" G_DIR_SEPARATOR_S "Thunderbird"; profiles_homes.push_back(profiles_home); #else //Linux profiles_home = home_path + G_DIR_SEPARATOR_S ".mozilla" G_DIR_SEPARATOR_S "firefox"; profiles_homes.push_back(profiles_home); profiles_home = home_path + G_DIR_SEPARATOR_S ".mozilla" G_DIR_SEPARATOR_S "seamonkey"; profiles_homes.push_back(profiles_home); profiles_home = home_path + G_DIR_SEPARATOR_S ".thunderbird"; profiles_homes.push_back(profiles_home); #endif std::vector pf_homes; // Remove the unreachable directories for(int i=0; i ini_home; // Remove the unreachable "profiles.ini" files for(int i=0; i::iterator it; for(it = ini_home.begin(); it != ini_home.end(); ++it) { std::string pf_ini = (*it).first; std::string pf_home = (*it).second; std::string profiles; std::ifstream in_f(pf_ini.c_str()); std::getline(in_f, profiles, '\0'); std::list lines; Arc::tokenize(profiles, lines, "\n"); // Parse each [Profile] for (std::list::iterator i = lines.begin(); i != lines.end(); ++i) { std::vector inivalue; Arc::tokenize(*i, inivalue, "="); if((inivalue[0].find("Profile") != std::string::npos) && (inivalue[0].find("StartWithLast") == std::string::npos)) { bool is_relative = false; std::string path; std::advance(i, 1); for(; i != lines.end();) { inivalue.clear(); Arc::tokenize(*i, inivalue, "="); if (inivalue.size() == 2) { if (inivalue[0] == "IsRelative") { if(inivalue[1] == "1") is_relative = true; else is_relative = false; } if (inivalue[0] == "Path") path = inivalue[1]; } if(inivalue[0].find("Profile") != std::string::npos) { --i; break; } std::advance(i, 1); } std::string nss_path; if(is_relative) nss_path = pf_home + G_DIR_SEPARATOR_S + path; else nss_path = path; struct stat st; if((::stat(nss_path.c_str(),&st) == 0) && (S_ISDIR(st.st_mode)) && (user.get_uid() == st.st_uid)) nss_paths.push_back(nss_path); if(i == lines.end()) break; } } } return; } static void get_nss_certname(std::string& certname, Arc::Logger& logger) { std::list certInfolist; ArcAuthNSS::nssListUserCertificatesInfo(certInfolist); if(certInfolist.size()) { std::cout<::iterator it; for(it = certInfolist.begin(); it != certInfolist.end(); ++it) { ArcAuthNSS::certInfo cert_info = (*it); std::string sub_dn = cert_info.subject_dn; std::string cn_name; std::string::size_type pos1, pos2; pos1 = sub_dn.find("CN="); if(pos1 != std::string::npos) { pos2 = sub_dn.find(",", pos1); if(pos2 != std::string::npos) cn_name = " ("+sub_dn.substr(pos1+3, pos2-pos1-3) + ")"; } std::cout< cert_info.end) msg = "(expired)"; else if((now + 300) > cert_info.end) msg = "(will be expired in 5 min)"; else if((now + 3600*24) > cert_info.end) { Arc::Period left(cert_info.end - now); msg = std::string("(will be expired in ") + std::string(left) + ")"; } std::cout<1)) { char c = getchar(); int num = c - '0'; if((num<=certInfolist.size()) && (num>=1)) { it = certInfolist.begin(); std::advance(it, num-1); certname = (*it).certname; break; } } } #endif static std::string signTypeToString(Arc::Signalgorithm alg) { switch(alg) { case Arc::SIGN_SHA1: return "sha1"; case Arc::SIGN_SHA224: return "sha224"; case Arc::SIGN_SHA256: return "sha256"; case Arc::SIGN_SHA384: return "sha384"; case Arc::SIGN_SHA512: return "sha512"; default: break; } return "unknown"; } typedef enum { pass_all, pass_private_key, pass_myproxy, pass_myproxy_new, pass_nss } pass_destination_type; std::map passsources; class PasswordSourceFile: public Arc::PasswordSource { private: std::ifstream file_; public: PasswordSourceFile(const std::string& filename):file_(filename.c_str()) { }; virtual Result Get(std::string& password, int minsize, int maxsize) { if(!file_) return Arc::PasswordSource::NO_PASSWORD; std::getline(file_, password); return Arc::PasswordSource::PASSWORD; }; }; static int runmain(int argc, char *argv[]) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arcproxy"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); Arc::OptionParser options(" ", istring("The arcproxy command creates a proxy from a key/certificate pair which can\n" "then be used to access grid resources."), istring("Supported constraints are:\n" " validityStart=time (e.g. 2008-05-29T10:20:30Z; if not specified, start\n" " from now)\n\n" " validityEnd=time\n\n" " validityPeriod=time (e.g. 43200 or 12h or 12H; if both validityPeriod and\n" " validityEnd not specified, the default is 12 hours for local proxy, and\n" " 168 hours for delegated proxy on myproxy server)\n\n" " vomsACvalidityPeriod=time (e.g. 43200 or 12h or 12H; if not specified, the\n" " default is the minimum value of 12 hours and validityPeriod)\n\n" " myproxyvalidityPeriod=time (lifetime of proxies delegated by myproxy server,\n" " e.g. 43200 or 12h or 12H; if not specified, the default is the minimum value\n" " of 12 hours and validityPeriod (which is lifetime of the delegated proxy on\n" " myproxy server))\n\n" " proxyPolicy=policy content\n\n" " proxyPolicyFile=policy file\n\n" " keybits=number - length of the key to generate. Default is 2048 bits.\n" " Special value 'inherit' is to use key length of signing certificate.\n\n" " signingAlgorithm=name - signing algorithm to use for signing public key of\n" " proxy. Possible values are sha1, sha2 (alias for sha256), sha224, sha256,\n" " sha384, sha512 and inherit (use algorithm of signing certificate). Default\n" " is inherit. With old systems, only sha1 is acceptable.\n" "\n" "Supported information item names are:\n" " subject - subject name of proxy certificate.\n\n" " identity - identity subject name of proxy certificate.\n\n" " issuer - issuer subject name of proxy certificate.\n\n" " ca - subject name of CA which issued initial certificate.\n\n" " path - file system path to file containing proxy.\n\n" " type - type of proxy certificate.\n\n" " validityStart - timestamp when proxy validity starts.\n\n" " validityEnd - timestamp when proxy validity ends.\n\n" " validityPeriod - duration of proxy validity in seconds.\n\n" " validityLeft - duration of proxy validity left in seconds.\n\n" " vomsVO - VO name represented by VOMS attribute\n\n" " vomsSubject - subject of certificate for which VOMS attribute is issued\n\n" " vomsIssuer - subject of service which issued VOMS certificate\n\n" " vomsACvalidityStart - timestamp when VOMS attribute validity starts.\n\n" " vomsACvalidityEnd - timestamp when VOMS attribute validity ends.\n\n" " vomsACvalidityPeriod - duration of VOMS attribute validity in seconds.\n\n" " vomsACvalidityLeft - duration of VOMS attribute validity left in seconds.\n\n" " proxyPolicy\n\n" " keybits - size of proxy certificate key in bits.\n\n" " signingAlgorithm - algorithm used to sign proxy certificate.\n\n" "Items are printed in requested order and are separated by newline.\n" "If item has multiple values they are printed in same line separated by |.\n" "\n" "Supported password destinations are:\n" " key - for reading private key\n\n" " myproxy - for accessing credentials at MyProxy service\n\n" " myproxynew - for creating credentials at MyProxy service\n\n" " all - for any purspose.\n" "\n" "Supported password sources are:\n" " quoted string (\"password\") - explicitly specified password\n\n" " int - interactively request password from console\n\n" " stdin - read password from standard input delimited by newline\n\n" " file:filename - read password from file named filename\n\n" " stream:# - read password from input stream number #.\n" " Currently only 0 (standard input) is supported." )); std::string proxy_path; options.AddOption('P', "proxy", istring("path to the proxy file"), istring("path"), proxy_path); std::string cert_path; options.AddOption('C', "cert", istring("path to the certificate file, it can be either PEM, DER, or PKCS12 formatted"), istring("path"), cert_path); std::string key_path; options.AddOption('K', "key", istring("path to the private key file, if the certificate is in PKCS12 format, then no need to give private key"), istring("path"), key_path); std::string ca_dir; options.AddOption('T', "cadir", istring("path to the trusted certificate directory, only needed for the VOMS client functionality"), istring("path"), ca_dir); std::string voms_dir; options.AddOption('s', "vomsdir", istring("path to the top directory of VOMS *.lsc files, only needed for the VOMS client functionality"), istring("path"), voms_dir); std::string vomses_path; options.AddOption('V', "vomses", istring("path to the VOMS server configuration file"), istring("path"), vomses_path); std::list vomslist; options.AddOption('S', "voms", istring("voms<:command>. Specify VOMS server\n" " More than one VOMS server can be specified like this:\n" " --voms VOa:command1 --voms VOb:command2.\n" " :command is optional, and is used to ask for specific attributes (e.g: roles)\n" " command options are:\n\n" " all --- put all of this DN's attributes into AC;\n\n" " list --- list all of the DN's attribute, will not create AC extension;\n\n" " /Role=yourRole --- specify the role, if this DN\n" " has such a role, the role will be put into AC;\n\n" " /voname/groupname/Role=yourRole --- specify the VO, group and role; if this DN\n" " has such a role, the role will be put into AC.\n\n" " If this option is not specified values from configuration files are used.\n" " To avoid anything to be used specify -S with empty value.\n" ), istring("string"), vomslist); std::list orderlist; options.AddOption('o', "order", istring("group<:role>. Specify ordering of attributes\n" " Example: --order /knowarc.eu/coredev:Developer,/knowarc.eu/testers:Tester\n" " or: --order /knowarc.eu/coredev:Developer --order /knowarc.eu/testers:Tester\n" " Note that it does not make sense to specify the order if you have two or more different VOMS servers specified"), istring("string"), orderlist); bool use_gsi_comm = false; options.AddOption('G', "gsicom", istring("use GSI communication protocol for contacting VOMS services"), use_gsi_comm); bool use_http_comm = false; options.AddOption('H', "httpcom", istring("use HTTP communication protocol for contacting VOMS services that provide RESTful access\n" " Note for RESTful access, \'list\' command and multiple VOMS servers are not supported\n"), use_http_comm); bool use_old_comm = false; options.AddOption('B', "oldcom", istring("use old communication protocol for contacting VOMS services instead of RESTful access\n"), use_old_comm); bool use_gsi_proxy = false; options.AddOption('O', "old", istring("this option is not functional (old GSI proxies are not supported anymore)"), use_gsi_proxy); bool info = false; options.AddOption('I', "info", istring("print all information about this proxy."), info); std::list infoitemlist; options.AddOption('i', "infoitem", istring("print selected information about this proxy."), istring("string"), infoitemlist); bool remove_proxy = false; options.AddOption('r', "remove", istring("remove proxy"), remove_proxy); std::string user_name; //user name to MyProxy server options.AddOption('U', "user", istring("username to MyProxy server (if missing subject of user certificate is used)"), istring("string"), user_name); bool use_empty_passphrase = false; //if use empty passphrase to myproxy serveyr options.AddOption('N', "nopassphrase", istring( "don't prompt for a credential passphrase, when retrieving a " "credential from a MyProxy server.\n" " The precondition of this choice is that the credential was PUT onto\n" " the MyProxy server without a passphrase by using the\n" " -R (--retrievable_by_cert) option.\n" " This option is specific to the GET command when contacting a Myproxy\n" " server."), use_empty_passphrase); std::string retrievable_by_cert; //if use empty passphrase to myproxy server options.AddOption('R', "retrievable_by_cert", istring( "Allow specified entity to retrieve credential without passphrase.\n" " This option is specific to the PUT command when contacting a Myproxy\n" " server."), istring("string"), retrievable_by_cert); std::string myproxy_server; //url of MyProxy server options.AddOption('L', "myproxysrv", istring("hostname[:port] of MyProxy server"), istring("string"), myproxy_server); std::string myproxy_command; //command to myproxy server options.AddOption('M', "myproxycmd", istring( "command to MyProxy server. The command can be PUT, GET, INFO, NEWPASS or DESTROY.\n" " PUT -- put a delegated credentials to the MyProxy server;\n\n" " GET -- get a delegated credentials from the MyProxy server;\n\n" " INFO -- get and present information about credentials stored at the MyProxy server;\n\n" " NEWPASS -- change password protecting credentials stored at the MyProxy server;\n\n" " DESTROY -- wipe off credentials stored at the MyProxy server;\n\n" " Local credentials (certificate and key) are not necessary except in case of PUT.\n" " MyProxy functionality can be used together with VOMS functionality.\n" " --voms and --vomses can be used for Get command if VOMS attributes\n" " is required to be included in the proxy.\n" ), istring("string"), myproxy_command); bool use_nssdb = false; #ifdef HAVE_NSS options.AddOption('F', "nssdb", istring("use NSS credential database in default Mozilla profiles, including Firefox, Seamonkey and Thunderbird."), use_nssdb); #endif std::list constraintlist; options.AddOption('c', "constraint", istring("proxy constraints"), istring("string"), constraintlist); std::list passsourcelist; options.AddOption('p', "passwordsource", istring("password destination=password source"), istring("string"), passsourcelist); int timeout = -1; options.AddOption('t', "timeout", istring("timeout in seconds (default 20)"), istring("seconds"), timeout); std::string conffile; options.AddOption('z', "conffile", istring("configuration file (default ~/.arc/client.conf)"), istring("filename"), conffile); std::string debug; options.AddOption('d', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); bool force_system_ca = false; options.AddOption('\0', "systemca", istring("force using CA certificates configuration provided by OpenSSL"), force_system_ca); bool force_grid_ca = false; options.AddOption('\0', "gridca", istring("force using CA certificates configuration for Grid services (typically IGTF)"), force_grid_ca); bool force_any_ca = false; options.AddOption('\0', "anyca", istring("force using CA certificates configuration for Grid services (typically IGTF) and one provided by OpenSSL"), force_any_ca); bool allow_insecure_connection = false; options.AddOption('\0', "allowinsecureconnection", istring("allow TLS connection which failed verification"), allow_insecure_connection); bool version = false; options.AddOption('v', "version", istring("print version information"), version); std::list params = options.Parse(argc, argv); if(use_http_comm && use_old_comm) { logger.msg(Arc::ERROR, "RESTful and old VOMS communication protocols can't be requested simultaneously."); return EXIT_FAILURE; } if (version) { std::cout << Arc::IString("%s version %s", "arcproxy", VERSION) << std::endl; return EXIT_SUCCESS; } // If debug is specified as argument, it should be set before loading the configuration. if (!debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(debug)); logger.msg(Arc::VERBOSE, "Running command: %s", options.GetCommandWithArguments()); // This ensure command line args overwrite all other options if(!cert_path.empty())Arc::SetEnv("X509_USER_CERT", cert_path); if(!key_path.empty())Arc::SetEnv("X509_USER_KEY", key_path); if(!proxy_path.empty())Arc::SetEnv("X509_USER_PROXY", proxy_path); if(!ca_dir.empty())Arc::SetEnv("X509_CERT_DIR", ca_dir); // Set default, predefined or guessed credentials. Also check if they exist. #ifdef HAVE_NSS Arc::UserConfig usercfg(conffile, Arc::initializeCredentialsType(Arc::initializeCredentialsType::TryCredentials)); #else Arc::UserConfig usercfg(conffile, Arc::initializeCredentialsType(Arc::initializeCredentialsType::TryCredentials)); #endif if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization."); return EXIT_FAILURE; } if (force_system_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(false); } if (force_grid_ca) { usercfg.CAUseSystem(false); usercfg.CAUseGrid(true); } if (force_any_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(true); } if (allow_insecure_connection) usercfg.TLSAllowInsecure(true); if(use_nssdb) { usercfg.CertificatePath("");; usercfg.KeyPath("");; } if(vomslist.empty()) { vomslist = usercfg.DefaultVOMSes(); } for(std::list::iterator voms = vomslist.begin(); voms != vomslist.end();) { if(voms->empty()) { voms = vomslist.erase(voms); } else { ++voms; } } // Check for needed credentials objects // Can proxy be used for? Could not find it in documentation. // Key and certificate not needed if only printing proxy information if ( (!(Arc::lower(myproxy_command) == "get")) && (!use_nssdb) ) { if((usercfg.CertificatePath().empty() || ( usercfg.KeyPath().empty() && (usercfg.CertificatePath().find(".p12") == std::string::npos) ) ) && !(info || (infoitemlist.size() > 0) || remove_proxy)) { logger.msg(Arc::ERROR, "Failed to find certificate and/or private key or files have improper permissions or ownership."); logger.msg(Arc::ERROR, "You may try to increase verbosity to get more information."); return EXIT_FAILURE; } } if(!vomslist.empty() || !myproxy_command.empty()) { // For external communication CAs are needed if(usercfg.CACertificatesDirectory().empty()) { logger.msg(Arc::ERROR, "Failed to find CA certificates"); logger.msg(Arc::ERROR, "Cannot find the CA certificates directory path, " "please set environment variable X509_CERT_DIR, " "or cacertificatesdirectory in a configuration file."); logger.msg(Arc::ERROR, "You may try to increase verbosity to get more information."); logger.msg(Arc::ERROR, "The CA certificates directory is required for " "contacting VOMS and MyProxy servers."); return EXIT_FAILURE; } } // Convert list of voms+command into more convenient structure std::map > vomscmdlist; if (!vomslist.empty()) { if (vomses_path.empty()) vomses_path = usercfg.VOMSESPath(); if (vomses_path.empty()) { logger.msg(Arc::ERROR, "$X509_VOMS_FILE, and $X509_VOMSES are not set;\n" "User has not specified the location for vomses information;\n" "There is also not vomses location information in user's configuration file;\n" "Can not find vomses in default locations: ~/.arc/vomses, ~/.voms/vomses,\n" "$ARC_LOCATION/etc/vomses, $ARC_LOCATION/etc/grid-security/vomses, $PWD/vomses,\n" "/etc/vomses, /etc/grid-security/vomses, and the location at the corresponding sub-directory"); return false; } for(std::list::iterator v = vomslist.begin(); v != vomslist.end(); ++v) { std::string::size_type p = v->find(':'); if(p == std::string::npos) { vomscmdlist[*v].push_back(""); } else { vomscmdlist[v->substr(0,p)].push_back(v->substr(p+1)); *v = v->substr(0,p); } } // Remove duplicates vomslist.sort(); vomslist.unique(); } // Proxy is special case. We either need default or predefined path. // No guessing or testing is needed. // By running credentials initialization once more all set values // won't change. But proxy will get default value if not set. { Arc::UserConfig tmpcfg(conffile, Arc::initializeCredentialsType(Arc::initializeCredentialsType::NotTryCredentials)); if(proxy_path.empty()) proxy_path = tmpcfg.ProxyPath(); usercfg.ProxyPath(proxy_path); } // Get back all paths if(key_path.empty()) key_path = usercfg.KeyPath(); if(cert_path.empty()) cert_path = usercfg.CertificatePath(); if(ca_dir.empty()) ca_dir = usercfg.CACertificatesDirectory(); if(voms_dir.empty()) voms_dir = Arc::GetEnv("X509_VOMS_DIR"); if (debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); if (timeout > 0) usercfg.Timeout(timeout); Arc::User user; if (!params.empty()) { logger.msg(Arc::ERROR, "Wrong number of arguments!"); return EXIT_FAILURE; } const Arc::Time now; if (remove_proxy) { if (proxy_path.empty()) { logger.msg(Arc::ERROR, "Cannot find the path of the proxy file, " "please setup environment X509_USER_PROXY, " "or proxypath in a configuration file"); return EXIT_FAILURE; } if(!Arc::FileDelete(proxy_path)) { if(errno != ENOENT) { logger.msg(Arc::ERROR, "Cannot remove proxy file at %s", proxy_path); } else { logger.msg(Arc::ERROR, "Cannot remove proxy file at %s, because it's not there", proxy_path); } return EXIT_FAILURE; } return EXIT_SUCCESS; } if (info) { if(!usercfg.OToken().empty()) { std::cout << Arc::IString("Bearer token is available. It is preferred for job submission.") << std::endl; } std::vector voms_attributes; bool res = false; if (proxy_path.empty()) { logger.msg(Arc::ERROR, "Cannot find the path of the proxy file, " "please setup environment X509_USER_PROXY, " "or proxypath in a configuration file"); return EXIT_FAILURE; } else if (!(Glib::file_test(proxy_path, Glib::FILE_TEST_EXISTS))) { logger.msg(Arc::ERROR, "Cannot find file at %s for getting the proxy. " "Please make sure this file exists.", proxy_path); return EXIT_FAILURE; } Arc::Credential holder(proxy_path, "", "", "", false, false); if(!holder.GetCert()) { logger.msg(Arc::ERROR, "Cannot process proxy file at %s.", proxy_path); return EXIT_FAILURE; } std::cout << Arc::IString("Subject: %s", holder.GetDN()) << std::endl; std::cout << Arc::IString("Issuer: %s", holder.GetIssuerName()) << std::endl; std::cout << Arc::IString("Identity: %s", holder.GetIdentityName()) << std::endl; if (holder.GetEndTime() < now) std::cout << Arc::IString("Time left for proxy: Proxy expired") << std::endl; else if (now < holder.GetStartTime()) std::cout << Arc::IString("Time left for proxy: Proxy not valid yet") << std::endl; else std::cout << Arc::IString("Time left for proxy: %s", (holder.GetEndTime() - now).istr()) << std::endl; std::cout << Arc::IString("Proxy path: %s", proxy_path) << std::endl; std::cout << Arc::IString("Proxy type: %s", certTypeToString(holder.GetType())) << std::endl; std::cout << Arc::IString("Proxy key length: %i", holder.GetKeybits()) << std::endl; std::cout << Arc::IString("Proxy signature: %s", signTypeToString(holder.GetSigningAlgorithm())) << std::endl; Arc::VOMSTrustList voms_trust_dn; voms_trust_dn.AddRegex(".*"); res = parseVOMSAC(holder, usercfg.CAUseGrid() ? ca_dir : "", "", usercfg.CAUseSystem(), voms_dir, voms_trust_dn, voms_attributes, true, true); // Not printing error message because parseVOMSAC will print everything itself //if (!res) logger.msg(Arc::ERROR, "VOMS attribute parsing failed"); for(int n = 0; n 0) { std::cout<<"====== "< attr_elements; Arc::tokenize(attr, attr_elements, "/"); // remove /voname= prefix if ( ! attr_elements.empty() ) attr_elements.pop_front(); if ( attr_elements.empty() ) { logger.msg(Arc::WARNING, "Malformed VOMS AC attribute %s", attr); continue; } // policyAuthority (URI) and AC tags if ( attr_elements.size() == 1 ) { std::string uri = attr_elements.front().substr(9); std::cout << "uri : " << uri < voms_attributes[n].till) { std::cout << Arc::IString("Time left for AC: AC has expired")< 0) { if (proxy_path.empty()) { logger.msg(Arc::ERROR, "Cannot find the path of the proxy file, " "please setup environment X509_USER_PROXY, " "or proxypath in a configuration file"); return EXIT_FAILURE; } else if (!(Glib::file_test(proxy_path, Glib::FILE_TEST_EXISTS))) { logger.msg(Arc::ERROR, "Cannot find file at %s for getting the proxy. " "Please make sure this file exists.", proxy_path); return EXIT_FAILURE; } Arc::Credential holder(proxy_path, "", "", "", false, false); if(!holder.GetCert()) { logger.msg(Arc::ERROR, "Cannot process proxy file at %s.", proxy_path); return EXIT_FAILURE; } Arc::VOMSTrustList voms_trust_dn; voms_trust_dn.AddRegex(".*"); std::vector voms_attributes; parseVOMSAC(holder, usercfg.CAUseGrid() ? ca_dir : "", "", usercfg.CAUseSystem(), voms_dir, voms_trust_dn, voms_attributes, true, true); bool unknownInfo = false; for(std::list::iterator ii = infoitemlist.begin(); ii != infoitemlist.end(); ++ii) { if(*ii == "subject") { std::cout << holder.GetDN() << std::endl; } else if(*ii == "identity") { std::cout << holder.GetIdentityName() << std::endl; } else if(*ii == "issuer") { std::cout << holder.GetIssuerName() << std::endl; } else if(*ii == "ca") { std::cout << holder.GetCAName() << std::endl; } else if(*ii == "path") { std::cout << proxy_path << std::endl; } else if(*ii == "type") { std::cout << certTypeToString(holder.GetType()) << std::endl; // todo:less human readable } else if(*ii == "validityStart") { std::cout << holder.GetStartTime().GetTime() << std::endl; } else if(*ii == "validityEnd") { std::cout << holder.GetEndTime().GetTime() << std::endl; } else if(*ii == "validityPeriod") { std::cout << (holder.GetEndTime() - holder.GetStartTime()).GetPeriod() << std::endl; } else if(*ii == "validityLeft") { std::cout << ((nownow)?(voms_attributes[n].till-now):Arc::Period(0)).GetPeriod(); } std::cout << std::endl; } else if(*ii == "proxyPolicy") { std::cout << holder.GetProxyPolicy() << std::endl; } else if(*ii == "keybits") { std::cout << holder.GetKeybits() << std::endl; } else if(*ii == "signingAlgorithm") { std::cout << signTypeToString(holder.GetSigningAlgorithm()) << std::endl; } else { logger.msg(Arc::ERROR, "Information item '%s' is not known",*ii); unknownInfo = true; } } if (unknownInfo) return EXIT_FAILURE; return EXIT_SUCCESS; } if ((cert_path.empty() || key_path.empty()) && (Arc::lower(myproxy_command) == "put")) { if (cert_path.empty()) logger.msg(Arc::ERROR, "Cannot find the user certificate path, " "please setup environment X509_USER_CERT, " "or certificatepath in a configuration file"); if (key_path.empty()) logger.msg(Arc::ERROR, "Cannot find the user private key path, " "please setup environment X509_USER_KEY, " "or keypath in a configuration file"); return EXIT_FAILURE; } std::map constraints; for (std::list::iterator it = constraintlist.begin(); it != constraintlist.end(); ++it) { std::string::size_type pos = it->find('='); if (pos != std::string::npos) constraints[it->substr(0, pos)] = it->substr(pos + 1); else constraints[*it] = ""; } std::map > passprompts; passprompts[pass_private_key] = std::pair("private key",false); passprompts[pass_myproxy] = std::pair("MyProxy server",false); passprompts[pass_myproxy_new] = std::pair("MyProxy server (new)",true); for (std::list::iterator it = passsourcelist.begin(); it != passsourcelist.end(); ++it) { std::string::size_type pos = it->find('='); if (pos == std::string::npos) { logger.msg(Arc::ERROR, "Cannot parse password source expression %s " "it must be of type=source format", *it); return EXIT_FAILURE; } std::string dest = it->substr(0, pos); pass_destination_type pass_dest; if(dest == "key") { pass_dest = pass_private_key; } else if(dest == "myproxy") { pass_dest = pass_myproxy; } else if(dest == "myproxynew") { pass_dest = pass_myproxy_new; } else if(dest == "nss") { pass_dest = pass_nss; } else if(dest == "all") { pass_dest = pass_all; } else { logger.msg(Arc::ERROR, "Cannot parse password type %s. " "Currently supported values are 'key','myproxy','myproxynew' and 'all'.", dest); return EXIT_FAILURE; } Arc::PasswordSource* pass_source; std::string pass = it->substr(pos + 1); if((pass[0] == '"') && (pass[pass.length()-1] == '"')) { pass_source = new Arc::PasswordSourceString(pass.substr(1,pass.length()-2)); } else if(pass == "int") { pass_source = new Arc::PasswordSourceInteractive(passprompts[pass_private_key].first,passprompts[pass_private_key].second); } else if(pass == "stdin") { pass_source = new Arc::PasswordSourceStream(&std::cin); } else { pos = pass.find(':'); if(pos == std::string::npos) { logger.msg(Arc::ERROR, "Cannot parse password source %s " "it must be of source_type or source_type:data format. " "Supported source types are int, stdin, stream, file.", pass); return EXIT_FAILURE; } std::string data = pass.substr(pos + 1); pass.resize(pos); if(pass == "file") { pass_source = new PasswordSourceFile(data); // TODO: combine same files } else if(pass == "stream") { if(data == "0") { pass_source = new Arc::PasswordSourceStream(&std::cin); } else { logger.msg(Arc::ERROR, "Only standard input is currently supported " "for password source."); return EXIT_FAILURE; } } else { logger.msg(Arc::ERROR, "Cannot parse password source type %s. " "Supported source types are int, stdin, stream, file.", pass); return EXIT_FAILURE; } } if(pass_source) { if(pass_dest != pass_all) { passsources[pass_dest] = pass_source; } else { passsources[pass_private_key] = pass_source; passsources[pass_myproxy] = pass_source; passsources[pass_myproxy_new] = pass_source; passsources[pass_nss] = pass_source; } } } for(std::map >::iterator p = passprompts.begin(); p != passprompts.end();++p) { if(passsources.find(p->first) == passsources.end()) { passsources[p->first] = new Arc::PasswordSourceInteractive(p->second.first,p->second.second); } } //proxy validity period //Set the default proxy validity lifetime to 12 hours if there is //no validity lifetime provided by command caller // Set default values first // TODO: Is default validityPeriod since now or since validityStart? Arc::Time validityStart = now; // now by default Arc::Period validityPeriod(12*60*60); if (Arc::lower(myproxy_command) == "put") { //For myproxy PUT operation, the proxy should be 7 days according to the default //definition in myproxy implementation. validityPeriod = 7*24*60*60; } // Acquire constraints. Check for valid values and conflicts. if((!constraints["validityStart"].empty()) && (!constraints["validityEnd"].empty()) && (!constraints["validityPeriod"].empty())) { std::cerr << Arc::IString("The start, end and period can't be set simultaneously") << std::endl; return EXIT_FAILURE; } if(!constraints["validityStart"].empty()) { validityStart = Arc::Time(constraints["validityStart"]); if (validityStart == Arc::Time(Arc::Time::UNDEFINED)) { std::cerr << Arc::IString("The start time that you set: %s can't be recognized.", (std::string)constraints["validityStart"]) << std::endl; return EXIT_FAILURE; } } if(!constraints["validityPeriod"].empty()) { validityPeriod = Arc::Period(constraints["validityPeriod"]); if (validityPeriod.GetPeriod() <= 0) { std::cerr << Arc::IString("The period that you set: %s can't be recognized.", (std::string)constraints["validityPeriod"]) << std::endl; return EXIT_FAILURE; } } if(!constraints["validityEnd"].empty()) { Arc::Time validityEnd = Arc::Time(constraints["validityEnd"]); if (validityEnd == Arc::Time(Arc::Time::UNDEFINED)) { std::cerr << Arc::IString("The end time that you set: %s can't be recognized.", (std::string)constraints["validityEnd"]) << std::endl; return EXIT_FAILURE; } if(!constraints["validityPeriod"].empty()) { // If period is explicitly set then start is derived from end and period validityStart = validityEnd - validityPeriod; } else { // otherwise start - optionally - and end are set, period is derived if(validityEnd < validityStart) { std::cerr << Arc::IString("The end time that you set: %s is before start time: %s.", (std::string)validityEnd,(std::string)validityStart) << std::endl; // error return EXIT_FAILURE; } validityPeriod = validityEnd - validityStart; } } // Here we have validityStart and validityPeriod defined Arc::Time validityEnd = validityStart + validityPeriod; // Warn user about strange times but do not prevent user from doing anything legal if(validityStart < now) { std::cout << Arc::IString("WARNING: The start time that you set: %s is before current time: %s", (std::string)validityStart, (std::string)now) << std::endl; } if(validityEnd < now) { std::cout << Arc::IString("WARNING: The end time that you set: %s is before current time: %s", (std::string)validityEnd, (std::string)now) << std::endl; } //voms AC valitity period //Set the default voms AC validity lifetime to 12 hours if there is //no validity lifetime provided by command caller Arc::Period vomsACvalidityPeriod(12*60*60); if(!constraints["vomsACvalidityPeriod"].empty()) { vomsACvalidityPeriod = Arc::Period(constraints["vomsACvalidityPeriod"]); if (vomsACvalidityPeriod.GetPeriod() == 0) { std::cerr << Arc::IString("The VOMS AC period that you set: %s can't be recognized.", (std::string)constraints["vomsACvalidityPeriod"]) << std::endl; return EXIT_FAILURE; } } else { if(validityPeriod < vomsACvalidityPeriod) vomsACvalidityPeriod = validityPeriod; // It is strange that VOMS AC may be valid less than proxy itself. // Maybe it would be more correct to have it valid by default from // now till validityEnd. } std::string voms_period = Arc::tostring(vomsACvalidityPeriod.GetPeriod()); //myproxy validity period. //Set the default myproxy validity lifetime to 12 hours if there is //no validity lifetime provided by command caller Arc::Period myproxyvalidityPeriod(12*60*60); if(!constraints["myproxyvalidityPeriod"].empty()) { myproxyvalidityPeriod = Arc::Period(constraints["myproxyvalidityPeriod"]); if (myproxyvalidityPeriod.GetPeriod() == 0) { std::cerr << Arc::IString("The MyProxy period that you set: %s can't be recognized.", (std::string)constraints["myproxyvalidityPeriod"]) << std::endl; return EXIT_FAILURE; } } else { if(validityPeriod < myproxyvalidityPeriod) myproxyvalidityPeriod = validityPeriod; // see vomsACvalidityPeriod } std::string myproxy_period = Arc::tostring(myproxyvalidityPeriod.GetPeriod()); std::string signing_algorithm = constraints["signingAlgorithm"]; int keybits = 0; if(!constraints["keybits"].empty()) { if(constraints["keybits"] == "inherit") { keybits = -1; } else if((!Arc::stringto(constraints["keybits"],keybits)) || (keybits <= 0)) { std::cerr << Arc::IString("The keybits constraint is wrong: %s.", (std::string)constraints["keybits"]) << std::endl; return EXIT_FAILURE; } } #ifdef HAVE_NSS // TODO: move to spearate file //Using nss db dominate other option if(use_nssdb) { // Get the nss db paths from firefox's profile.ini file std::vector nssdb_paths; get_default_nssdb_path(nssdb_paths); if(nssdb_paths.empty()) { std::cout << Arc::IString("The NSS database can not be detected in the Firefox profile") << std::endl; return EXIT_FAILURE; } // Let user to choose which profile to use // if multiple profiles exist bool res; std::string configdir; if(nssdb_paths.size() > 1) { std::cout<=1)) { configdir = nssdb_paths[num-1]; break; } } } else { configdir = nssdb_paths[0]; } res = ArcAuthNSS::nssInit(configdir); std::cout<< Arc::IString("NSS database to be accessed: %s\n", configdir.c_str()); //The nss db under firefox profile seems to not be protected by any passphrase by default bool ascii = true; const char* trusts = "u,u,u"; // Generate CSR std::string proxy_csrfile = "proxy.csr"; std::string proxy_keyname = "proxykey"; std::string proxy_privk_str; res = ArcAuthNSS::nssGenerateCSR(proxy_keyname, "CN=Test,OU=ARC,O=EMI", *passsources[pass_nss], proxy_csrfile, proxy_privk_str, ascii); if(!res) return EXIT_FAILURE; // Create a temporary proxy and contact voms server std::string issuername; std::string vomsacseq; if (!vomslist.empty()) { std::string tmp_proxy_path; if(!Arc::TmpFileCreate(tmp_proxy_path,"")) return EXIT_FAILURE; get_nss_certname(issuername, logger); // Create tmp proxy cert int duration = 12; res = ArcAuthNSS::nssCreateCert(proxy_csrfile, issuername, NULL, duration, "", tmp_proxy_path, ascii); if(!res) { remove_proxy_file(tmp_proxy_path); return EXIT_FAILURE; } // TODO: Use FileUtils std::string tmp_proxy_cred_str; std::ifstream tmp_proxy_cert_s(tmp_proxy_path.c_str()); std::getline(tmp_proxy_cert_s, tmp_proxy_cred_str,'\0'); tmp_proxy_cert_s.close(); // Export EEC std::string cert_file; if(!Arc::TmpFileCreate(cert_file,"")) { remove_proxy_file(tmp_proxy_path); return EXIT_FAILURE; } res = ArcAuthNSS::nssExportCertificate(issuername, cert_file); if(!res) { remove_cert_file(cert_file); remove_proxy_file(tmp_proxy_path); return EXIT_FAILURE; } std::string eec_cert_str; std::ifstream eec_s(cert_file.c_str()); std::getline(eec_s, eec_cert_str,'\0'); eec_s.close(); remove_cert_file(cert_file); // Compose tmp proxy file tmp_proxy_cred_str.append(proxy_privk_str).append(eec_cert_str); write_proxy_file(tmp_proxy_path, tmp_proxy_cred_str); if(!contact_voms_servers(vomscmdlist, orderlist, vomses_path, use_gsi_comm, use_http_comm || !use_old_comm, voms_period, usercfg, logger, tmp_proxy_path, vomsacseq)) { remove_proxy_file(tmp_proxy_path); return EXIT_FAILURE; } remove_proxy_file(tmp_proxy_path); } // Create proxy with VOMS AC std::string proxy_certfile = "myproxy.pem"; // Let user to choose which credential to use if(issuername.empty()) get_nss_certname(issuername, logger); std::cout< voms_attributes; bool r = parseVOMSAC(holder, usercfg.CAUseSystem() ? ca_dir : "", "", usercfg.CAUseSystem(), voms_dir, voms_trust_dn, voms_attributes, true, true); if (!r) logger.msg(Arc::ERROR, "VOMS attribute parsing failed"); if(voms_attributes.size() == 0) { logger.msg(Arc::INFO, "Myproxy server did not return proxy with VOMS AC included"); std::string vomsacseq; contact_voms_servers(vomscmdlist, orderlist, vomses_path, use_gsi_comm, use_http_comm || !use_old_comm, voms_period, usercfg, logger, proxy_path, vomsacseq); if(!vomsacseq.empty()) { Arc::Credential signer(proxy_path, proxy_path, "", "", false, false); std::string proxy_cert; create_proxy(proxy_cert, signer, policy, proxy_start, proxy_period, vomsacseq, keybits, signing_algorithm); write_proxy_file(proxy_path, proxy_cert); } } return EXIT_SUCCESS; } else return EXIT_FAILURE; } //Create proxy or voms proxy try { Arc::Credential signer(cert_path, key_path, "", "", false, false, *passsources[pass_private_key]); if (signer.GetIdentityName().empty()) { std::cerr << Arc::IString("Proxy generation failed: No valid certificate found.") << std::endl; return EXIT_FAILURE; } EVP_PKEY* pkey = signer.GetPrivKey(); if(!pkey) { std::cerr << Arc::IString("Proxy generation failed: No valid private key found.") << std::endl; return EXIT_FAILURE; } if(pkey) EVP_PKEY_free(pkey); std::cout << Arc::IString("Your identity: %s", signer.GetIdentityName()) << std::endl; if (now > signer.GetEndTime()) { std::cerr << Arc::IString("Proxy generation failed: Certificate has expired.") << std::endl; return EXIT_FAILURE; } else if (now < signer.GetStartTime()) { std::cerr << Arc::IString("Proxy generation failed: Certificate is not valid yet.") << std::endl; return EXIT_FAILURE; } std::string vomsacseq; if (!vomslist.empty()) { //Generate a temporary self-signed proxy certificate //to contact the voms server std::string tmp_proxy_path; std::string tmp_proxy; if(!Arc::TmpFileCreate(tmp_proxy_path,"")) { std::cerr << Arc::IString("Proxy generation failed: Failed to create temporary file.") << std::endl; return EXIT_FAILURE; } create_tmp_proxy(tmp_proxy, signer); write_proxy_file(tmp_proxy_path, tmp_proxy); if(!contact_voms_servers(vomscmdlist, orderlist, vomses_path, use_gsi_comm, use_http_comm || !use_old_comm, voms_period, usercfg, logger, tmp_proxy_path, vomsacseq)) { remove_proxy_file(tmp_proxy_path); std::cerr << Arc::IString("Proxy generation failed: Failed to retrieve VOMS information.") << std::endl; return EXIT_FAILURE; } remove_proxy_file(tmp_proxy_path); } std::string proxy_cert; create_proxy(proxy_cert, signer, policy, proxy_start, proxy_period, vomsacseq, keybits, signing_algorithm); //If myproxy command is "Put", then the proxy path is set to /tmp/myproxy-proxy.uid.pid if (Arc::lower(myproxy_command) == "put") proxy_path = Glib::build_filename(Glib::get_tmp_dir(), "myproxy-proxy." + Arc::tostring(user.get_uid()) + Arc::tostring((int)(getpid()))); write_proxy_file(proxy_path,proxy_cert); Arc::Credential proxy_cred(proxy_path, proxy_path, "", "", false, false); Arc::Time left = proxy_cred.GetEndTime(); std::cout << Arc::IString("Proxy generation succeeded") << std::endl; std::cout << Arc::IString("Your proxy is valid until: %s", left.str(Arc::UserTime)) << std::endl; //return EXIT_SUCCESS; } catch (std::exception& err) { logger.msg(Arc::ERROR, err.what()); return EXIT_FAILURE; } //Delegate the former self-delegated credential to //myproxy server if (Arc::lower(myproxy_command) == "put") { bool res = contact_myproxy_server( myproxy_server, myproxy_command, user_name, use_empty_passphrase, myproxy_period, retrievable_by_cert, proxy_start, proxy_period, vomslist, vomses_path, proxy_path, usercfg, logger); if(res) return EXIT_SUCCESS; else return EXIT_FAILURE; } return EXIT_SUCCESS; } int main(int argc, char **argv) { int xr = runmain(argc,argv); _exit(xr); return 0; } nordugrid-arc-7.1.1/src/clients/credentials/PaxHeaders/arcproxy_myproxy.cpp0000644000000000000000000000013215067751327024262 xustar0030 mtime=1759498967.679490824 30 atime=1759498967.825493043 30 ctime=1759499031.176934171 nordugrid-arc-7.1.1/src/clients/credentials/arcproxy_myproxy.cpp0000644000175000002070000002640415067751327026172 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_NSS #include #endif #include "arcproxy.h" using namespace ArcCredential; typedef enum { pass_all, pass_private_key, pass_myproxy, pass_myproxy_new, pass_nss } pass_destination_type; extern std::map passsources; static std::string get_cert_dn(const std::string& cert_file) { std::string dn_str; Arc::Credential cert(cert_file, "", "", "", false, false); dn_str = cert.GetIdentityName(); return dn_str; } bool contact_myproxy_server(const std::string& myproxy_server, const std::string& myproxy_command, const std::string& myproxy_user_name, bool use_empty_passphrase, const std::string& myproxy_period, const std::string& retrievable_by_cert, Arc::Time& proxy_start, Arc::Period& proxy_period, std::list& vomslist, std::string& vomses_path, const std::string& proxy_path, Arc::UserConfig& usercfg, Arc::Logger& logger) { std::string user_name = myproxy_user_name; std::string key_path, cert_path, ca_dir; key_path = usercfg.KeyPath(); cert_path = usercfg.CertificatePath(); ca_dir = usercfg.CACertificatesDirectory(); if(user_name.empty()) { user_name = get_cert_dn(proxy_path); } if(user_name.empty() && !cert_path.empty()) { user_name = get_cert_dn(cert_path); } //If the "INFO" myproxy command is given, try to get the //information about the existence of stored credentials //on the myproxy server. try { if (Arc::lower(myproxy_command) == "info") { if (myproxy_server.empty()) throw std::invalid_argument("URL of MyProxy server is missing"); if (user_name.empty()) throw std::invalid_argument("Username to MyProxy server is missing"); std::string respinfo; //if(usercfg.CertificatePath().empty()) usercfg.CertificatePath(cert_path); //if(usercfg.KeyPath().empty()) usercfg.KeyPath(key_path); if(usercfg.ProxyPath().empty() && !proxy_path.empty()) usercfg.ProxyPath(proxy_path); else { if(usercfg.CertificatePath().empty() && !cert_path.empty()) usercfg.CertificatePath(cert_path); if(usercfg.KeyPath().empty() && !key_path.empty()) usercfg.KeyPath(key_path); } if(usercfg.CACertificatesDirectory().empty()) usercfg.CACertificatesDirectory(ca_dir); Arc::CredentialStore cstore(usercfg,Arc::URL("myproxy://"+myproxy_server)); std::map myproxyopt; myproxyopt["username"] = user_name; if(!cstore.Info(myproxyopt,respinfo)) throw std::invalid_argument("Failed to get info from MyProxy service"); std::cout << Arc::IString("Succeeded to get info from MyProxy server") << std::endl; std::cout << respinfo << std::endl; return true; } } catch (std::exception& err) { logger.msg(Arc::ERROR, err.what()); return false; } //If the "NEWPASS" myproxy command is given, try to get the //information about the existence of stored credentials //on the myproxy server. try { if (Arc::lower(myproxy_command) == "newpass") { if (myproxy_server.empty()) throw std::invalid_argument("URL of MyProxy server is missing"); if (user_name.empty()) throw std::invalid_argument("Username to MyProxy server is missing"); std::string passphrase; if(passsources[pass_myproxy]->Get(passphrase, 4, 256) != Arc::PasswordSource::PASSWORD) throw std::invalid_argument("Error entering passphrase"); std::string newpassphrase; if(passsources[pass_myproxy_new]->Get(newpassphrase, 4, 256) != Arc::PasswordSource::PASSWORD) throw std::invalid_argument("Error entering passphrase"); if(usercfg.ProxyPath().empty() && !proxy_path.empty()) usercfg.ProxyPath(proxy_path); else { if(usercfg.CertificatePath().empty() && !cert_path.empty()) usercfg.CertificatePath(cert_path); if(usercfg.KeyPath().empty() && !key_path.empty()) usercfg.KeyPath(key_path); } if(usercfg.CACertificatesDirectory().empty()) usercfg.CACertificatesDirectory(ca_dir); Arc::CredentialStore cstore(usercfg,Arc::URL("myproxy://"+myproxy_server)); std::map myproxyopt; myproxyopt["username"] = user_name; myproxyopt["password"] = passphrase; myproxyopt["newpassword"] = newpassphrase; if(!cstore.ChangePassword(myproxyopt)) throw std::invalid_argument("Failed to change password MyProxy service"); std::cout << Arc::IString("Succeeded to change password on MyProxy server") << std::endl; return true; } } catch (std::exception& err) { logger.msg(Arc::ERROR, err.what()); return false; } //If the "DESTROY" myproxy command is given, try to get the //information about the existence of stored credentials //on the myproxy server. try { if (Arc::lower(myproxy_command) == "destroy") { if (myproxy_server.empty()) throw std::invalid_argument("URL of MyProxy server is missing"); if (user_name.empty()) throw std::invalid_argument("Username to MyProxy server is missing"); std::string passphrase; if(passsources[pass_myproxy]->Get(passphrase, 4, 256) != Arc::PasswordSource::PASSWORD) throw std::invalid_argument("Error entering passphrase"); std::string respinfo; if(usercfg.ProxyPath().empty() && !proxy_path.empty()) usercfg.ProxyPath(proxy_path); else { if(usercfg.CertificatePath().empty() && !cert_path.empty()) usercfg.CertificatePath(cert_path); if(usercfg.KeyPath().empty() && !key_path.empty()) usercfg.KeyPath(key_path); } if(usercfg.CACertificatesDirectory().empty()) usercfg.CACertificatesDirectory(ca_dir); Arc::CredentialStore cstore(usercfg,Arc::URL("myproxy://"+myproxy_server)); std::map myproxyopt; myproxyopt["username"] = user_name; myproxyopt["password"] = passphrase; if(!cstore.Destroy(myproxyopt)) throw std::invalid_argument("Failed to destroy credential on MyProxy service"); std::cout << Arc::IString("Succeeded to destroy credential on MyProxy server") << std::endl; return true; } } catch (std::exception& err) { logger.msg(Arc::ERROR, err.what()); return false; } //If the "GET" myproxy command is given, try to get a delegated //certificate from the myproxy server. //For "GET" command, certificate and key are not needed, and //anonymous GSSAPI is used (GSS_C_ANON_FLAG) try { if (Arc::lower(myproxy_command) == "get") { if (myproxy_server.empty()) throw std::invalid_argument("URL of MyProxy server is missing"); if (user_name.empty()) throw std::invalid_argument("Username to MyProxy server is missing"); std::string passphrase; if(!use_empty_passphrase) { if(passsources[pass_myproxy]->Get(passphrase, 4, 256) != Arc::PasswordSource::PASSWORD) throw std::invalid_argument("Error entering passphrase"); } std::string proxy_cred_str_pem; Arc::initializeCredentialsType cred_type(Arc::initializeCredentialsType::SkipCredentials); Arc::UserConfig usercfg_tmp(cred_type); usercfg_tmp.CACertificatesDirectory(usercfg.CACertificatesDirectory()); Arc::CredentialStore cstore(usercfg_tmp,Arc::URL("myproxy://"+myproxy_server)); std::map myproxyopt; myproxyopt["username"] = user_name; myproxyopt["password"] = passphrase; myproxyopt["lifetime"] = myproxy_period; // TODO? According to the protocol of myproxy, the "Get" command can // include the information about vo name, so that myproxy server // can contact voms server to retrieve AC for myproxy client // See 2.4 of http://grid.ncsa.illinois.edu/myproxy/protocol/ // "When VONAME appears in the message, the server will generate VOMS // proxy certificate using VONAME and VOMSES, or the server's VOMS server information." if(!cstore.Retrieve(myproxyopt,proxy_cred_str_pem)) throw std::invalid_argument("Failed to retrieve proxy from MyProxy service"); write_proxy_file(proxy_path,proxy_cred_str_pem); //Assign proxy_path to cert_path and key_path, //so the later voms functionality can use the proxy_path //to create proxy with voms AC extension. In this //case, "--cert" and "--key" is not needed. cert_path = proxy_path; key_path = proxy_path; std::cout << Arc::IString("Succeeded to get a proxy in %s from MyProxy server %s", proxy_path, myproxy_server) << std::endl; return true; } } catch (std::exception& err) { logger.msg(Arc::ERROR, err.what()); return false; } //Delegate the former self-delegated credential to //myproxy server try { if (Arc::lower(myproxy_command) == "put") { if (myproxy_server.empty()) throw std::invalid_argument("URL of MyProxy server is missing"); if (user_name.empty()) throw std::invalid_argument("Username to MyProxy server is missing"); std::string prompt1 = "MyProxy server"; std::string passphrase; if(retrievable_by_cert.empty()) { if(passsources[pass_myproxy_new]->Get(passphrase, 4, 256) != Arc::PasswordSource::PASSWORD) throw std::invalid_argument("Error entering passphrase"); } std::string proxy_cred_str_pem; std::ifstream proxy_cred_file(proxy_path.c_str()); if(!proxy_cred_file) throw std::invalid_argument("Failed to read proxy file "+proxy_path); std::getline(proxy_cred_file,proxy_cred_str_pem,'\0'); if(proxy_cred_str_pem.empty()) throw std::invalid_argument("Failed to read proxy file "+proxy_path); proxy_cred_file.close(); usercfg.ProxyPath(proxy_path); if(usercfg.CACertificatesDirectory().empty()) { usercfg.CACertificatesDirectory(ca_dir); } Arc::CredentialStore cstore(usercfg,Arc::URL("myproxy://"+myproxy_server)); std::map myproxyopt; myproxyopt["username"] = user_name; myproxyopt["password"] = passphrase; myproxyopt["lifetime"] = myproxy_period; if(!retrievable_by_cert.empty()) { myproxyopt["retriever_trusted"] = retrievable_by_cert; } if(!cstore.Store(myproxyopt,proxy_cred_str_pem,true,proxy_start,proxy_period)) throw std::invalid_argument("Failed to delegate proxy to MyProxy service"); remove_proxy_file(proxy_path); std::cout << Arc::IString("Succeeded to put a proxy onto MyProxy server") << std::endl; return true; } } catch (std::exception& err) { logger.msg(Arc::ERROR, err.what()); remove_proxy_file(proxy_path); return false; } return true; } nordugrid-arc-7.1.1/src/clients/credentials/PaxHeaders/arcproxy.1.in0000644000000000000000000000013215067751327022436 xustar0030 mtime=1759498967.679365926 30 atime=1759498967.825493043 30 ctime=1759499031.180367486 nordugrid-arc-7.1.1/src/clients/credentials/arcproxy.1.in0000644000175000002070000000450715067751327024346 0ustar00mockbuildmock00000000000000[NAME] arcproxy \- ARC Credentials Proxy generation utility [REPORTING BUGS] Report bugs to http://bugzilla.nordugrid.org/ [ENVIRONMENT VARIABLES] .TP .B ARC_LOCATION The location where ARC is installed can be specified by this variable. If not specified the install location will be determined from the path to the command being executed, and if this fails a WARNING will be given stating the location which will be used. .TP .B ARC_PLUGIN_PATH The location of ARC plugins can be specified by this variable. Multiple locations can be specified by separating them by : (; in Windows). The default location is \fB$ARC_LOCATION\fR/lib/arc (\\ in Windows). [COPYRIGHT] APACHE LICENSE Version 2.0 [FILES] .TP .B /etc/vomses Common file containing a list of selected VO contact point, one VO per line, for example: .RS .IP """gin"" ""kuiken.nikhef.nl"" ""15050"" ""/O=dutchgrid/O=hosts/OU=nikhef.nl/CN=kuiken.nikhef.nl"" ""gin.ggf.org""" .IP """nordugrid.org"" ""voms.uninett.no"" ""15015"" ""/O=Grid/O=NorduGrid/CN=host/voms.ndgf.org"" ""nordugrid.org""" .RE .TP .B ~/.voms/vomses Same as .B /etc/vomses but located in user's home area. If exists, has precedence over .B /etc/vomses .RS The order of the parsing of vomses location is: .RS 1. command line options .RE .RS 2. client configuration file ~/.arc/client.conf .RE .RS 3. $X509_VOMSES or $X509_VOMS_FILE .RE .RS 4. ~/.arc/vomses .RE .RS 5. ~/.voms/vomses .RE .RS 6. $ARC_LOCATION/etc/vomses (this is for Windows environment) .RE .RS 7. $ARC_LOCATION/etc/grid-security/vomses (this is for Windows environment) .RE .RS 8. $PWD/vomses .RE .RS 9. /etc/vomses .RE .RS 10. /etc/grid-security/vomses .RE .RE .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. [AUTHOR] ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org [SEE ALSO] .BR arccat (1), .BR arcclean (1), .BR arccp (1), .BR arcget (1), .BR arcinfo (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcrenew (1), .BR arcresume (1), .BR arcrm (1), .BR arcstat (1), .BR arcsub (1), .BR arcsync (1), .BR arctest (1) nordugrid-arc-7.1.1/src/clients/credentials/PaxHeaders/arcproxy_voms.cpp0000644000000000000000000000013215067751327023517 xustar0030 mtime=1759498967.679490824 30 atime=1759498967.825493043 30 ctime=1759499031.175873177 nordugrid-arc-7.1.1/src/clients/credentials/arcproxy_voms.cpp0000644000175000002070000002247715067751327025435 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include bool contact_voms_servers(std::map >& vomscmdlist, std::list& orderlist, std::string& vomses_path, bool use_gsi_comm, bool use_http_comm, const std::string& voms_period, Arc::UserConfig& usercfg, Arc::Logger& logger, const std::string& tmp_proxy_path, std::string& vomsacseq) { std::string ca_dir; ca_dir = usercfg.CACertificatesDirectory(); class voms_match: public Arc::VOMSConfig::filter { private: const std::string& voms_; public: virtual bool match(const Arc::VOMSConfigLine& line) const { return ((line.Name() == voms_) || (line.Alias() == voms_)); }; voms_match(const std::string& voms):voms_(voms) { }; }; class vomses_match: public Arc::VOMSConfig::filter { private: const std::map >& vomses_; public: virtual bool match(const Arc::VOMSConfigLine& line) const { // TODO: this will not scale for many voms servers specified at command line for(std::map >::const_iterator voms = vomses_.begin(); voms != vomses_.end(); ++voms) { if((line.Name() == voms->first) || (line.Alias() == voms->first)) return true; }; return false; }; vomses_match(const std::map >& vomses):vomses_(vomses) { }; }; Arc::VOMSConfig voms_config(vomses_path, vomses_match(vomscmdlist)); if(!voms_config) { // logger logger.msg(Arc::ERROR, "Failed to process VOMS configuration or no suitable configuration lines found."); return false; } //Contact the voms servers to retrieve attribute certificate Arc::MCCConfig cfg; cfg.AddProxy(tmp_proxy_path); cfg.AddCADir(ca_dir); Arc::Period lifetime; if(!voms_period.empty()) { time_t voms_period_sec; if(!Arc::stringto(voms_period,voms_period_sec)) { logger.msg(Arc::ERROR, "Failed to parse requested VOMS lifetime: %s", voms_period); return false; } lifetime = voms_period_sec; } // TODO: group commands by server. Is it really useful? Can it really be used effectively. // Loop through commands for (std::map >::iterator vomscmd = vomscmdlist.begin(); vomscmd != vomscmdlist.end(); ++vomscmd) { const std::string& voms_server = vomscmd->first; // server name const std::list& voms_commands = vomscmd->second; // command to send bool succeeded = false; int voms_lines_num = 0; // Loop through suitable voms configuration lines for (Arc::VOMSConfig::iterator vomsline = voms_config.First(voms_match(voms_server)); (bool)vomsline; vomsline = vomsline.Next(voms_match(voms_server))) { if(vomsline->Host().empty()) { logger.msg(Arc::ERROR, "Cannot get VOMS server address information from vomses line: \"%s\"", vomsline->Str()); throw std::runtime_error("Cannot get VOMS server address information from vomses line: \"" + vomsline->Str() + "\""); } ++voms_lines_num; logger.msg(Arc::INFO, "Contacting VOMS server (named %s): %s on port: %s", vomsline->Name(), vomsline->Host(), vomsline->Port()); std::cout << Arc::IString("Contacting VOMS server (named %s): %s on port: %s", vomsline->Name(), vomsline->Host(), vomsline->Port()) << std::endl; unsigned int port_num = 0; if(!vomsline->Port().empty()) { if(!Arc::stringto(vomsline->Port(),port_num)) { // Failed to parse port logger.msg(Arc::ERROR, "Failed to parse requested VOMS server port number: %s", vomsline->Port()); continue; } } else { port_num = 8443; // default VOMS port ? } if(use_http_comm) { // RESTful interface std::list fqans; for(std::list::const_iterator c_it = voms_commands.begin(); c_it != voms_commands.end(); ++c_it) { if (c_it->empty()) { // ?? fqans.push_back("/"+voms_name); } else if (Arc::lower(*c_it) == "all") { // ?? fqans.push_back("/"+voms_name); } else if (Arc::lower(*c_it) == "list") { // N // not supported logger.msg(Arc::ERROR, "List functionality is not supported for RESTful VOMS interface"); continue; } else { fqans.push_back(*c_it); // ?? } } Arc::ClientVOMSRESTful client(cfg, vomsline->Host(), port_num, Arc::TLSSec, usercfg.Timeout()/* todo: , proxy_host, proxy_port*/); std::string response; Arc::MCC_Status status = client.process(fqans, lifetime, response); if (!status) { std::cout << Arc::IString("The VOMS server with the information:\n\t%s\ncan not be reached, please make sure it is available.", vomsline->Str()) << std::endl; std::cout << Arc::IString("Collected error is:\n\t%s",(std::string)status) << std::endl; continue; //There could be another voms replicated server with the same name exists } if (response.empty()) { logger.msg(Arc::ERROR, "No valid response from VOMS server: %s", vomsline->Name()); std::cout << Arc::IString("Collected error is:\n\t%s",(std::string)status) << std::endl; continue; } vomsacseq.append(response); succeeded = true; break; // ?? } else { // old interface std::list commands; for(std::list::const_iterator c_it = voms_commands.begin(); c_it != voms_commands.end(); ++c_it) { if (c_it->empty()) { commands.push_back(Arc::VOMSCommand().GetGroup("/"+vomsline->Name())); } else if (Arc::lower(*c_it) == "all") { commands.push_back(Arc::VOMSCommand().GetEverything()); } else if (Arc::lower(*c_it) == "list") { // N // not supported logger.msg(Arc::ERROR, "List functionality is not supported for legacy VOMS interface"); continue; } else { std::string::size_type pos = c_it->find("/Role="); if (pos == 0) { commands.push_back(Arc::VOMSCommand().GetRole(c_it->substr(pos+6))); } else if((pos != std::string::npos) && (pos > 0)) { commands.push_back(Arc::VOMSCommand().GetRoleInGroup(c_it->substr(0, pos),c_it->substr(pos + 6))); } else if((*c_it)[0] == '/') { commands.push_back(Arc::VOMSCommand().GetGroup(*c_it)); } else { // unexpected logger.msg(Arc::ERROR, "Failed to parse VOMS command: %s",*c_it); continue; } } } std::list > ordering; for(std::list::iterator o_it = orderlist.begin(); o_it != orderlist.end(); ++o_it) { std::string::size_type pos = o_it->find(':'); if(pos == std::string::npos) { ordering.push_back(std::pair(*o_it,"")); } else { ordering.push_back(std::pair(o_it->substr(0,pos),o_it->substr(pos+1))); } } //logger.msg(Arc::VERBOSE, "Try to get attribute from VOMS server with order: %s", ordering); //logger.msg(Arc::VERBOSE, "Message sent to VOMS server %s is: %s", voms_name, send_msg); Arc::ClientVOMS client(cfg, vomsline->Host(), port_num, use_gsi_comm ? Arc::GSISec : Arc::TLSSec, usercfg.Timeout()); std::string response; Arc::MCC_Status status = client.process(commands, ordering, lifetime, response); if (!status) { std::cout << Arc::IString("The VOMS server with the information:\n\t%s\ncan not be reached, please make sure it is available.", vomsline->Str()) << std::endl; std::cout << Arc::IString("Collected error is:\n\t%s",(std::string)status) << std::endl; continue; //There could be another voms replicated server with the same name exists } if (response.empty()) { logger.msg(Arc::ERROR, "No valid response from VOMS server: %s", vomsline->Name()); std::cout << Arc::IString("Collected error is:\n\t%s",(std::string)status) << std::endl; continue; } vomsacseq.append(response); succeeded = true; break; } } // voms lines if(succeeded == false) { if(voms_lines_num > 1) { std::cout << Arc::IString("There are %d servers with the same name: %s in your vomses file, but none of them can be reached, or can return a valid message.", voms_lines_num, voms_server) << std::endl; } return false; } } // voms servers return true; } nordugrid-arc-7.1.1/src/clients/credentials/PaxHeaders/README0000644000000000000000000000013215067751327020760 xustar0030 mtime=1759498967.679365926 30 atime=1759498967.825493043 30 ctime=1759499031.173733513 nordugrid-arc-7.1.1/src/clients/credentials/README0000644000175000002070000000006015067751327022656 0ustar00mockbuildmock00000000000000User tools for manipulating user credentials. nordugrid-arc-7.1.1/src/clients/PaxHeaders/README0000644000000000000000000000013215067751327016463 xustar0030 mtime=1759498967.676561668 30 atime=1759498967.824493028 30 ctime=1759499031.101067966 nordugrid-arc-7.1.1/src/clients/README0000644000175000002070000000013515067751327020364 0ustar00mockbuildmock00000000000000ARC provides a number of command line clients that implement interfaces to various services. nordugrid-arc-7.1.1/src/clients/PaxHeaders/compute0000644000000000000000000000013215067751427017203 xustar0030 mtime=1759499031.245456713 30 atime=1759499034.766510215 30 ctime=1759499031.245456713 nordugrid-arc-7.1.1/src/clients/compute/0000755000175000002070000000000015067751427021162 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327021313 xustar0030 mtime=1759498967.676561668 30 atime=1759498967.824493028 30 ctime=1759499031.210213207 nordugrid-arc-7.1.1/src/clients/compute/Makefile.am0000644000175000002070000000650215067751327023220 0ustar00mockbuildmock00000000000000bin_PROGRAMS = arcsub arcget arcstat arcinfo arckill arcclean arccat arcsync \ arcresume arcrenew arctest man_MANS = arcsub.1 arcget.1 arcstat.1 arcinfo.1 arckill.1 arcclean.1 arccat.1 \ arcsync.1 arcresume.1 arcrenew.1 arctest.1 CLILIBS = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la arcsub_SOURCES = arcsub.cpp utils.cpp utils.h submit.cpp submit.h arcsub_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcsub_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) arctest_SOURCES = arctest.cpp utils.cpp utils.h submit.cpp submit.h arctest_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arctest_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) arcget_SOURCES = arcget.cpp utils.cpp utils.h arcget_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcget_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) arcstat_SOURCES = arcstat.cpp utils.cpp utils.h arcstat_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcstat_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) arcinfo_SOURCES = arcinfo.cpp utils.cpp utils.h arcinfo_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcinfo_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) arckill_SOURCES = arckill.cpp utils.cpp utils.h arckill_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arckill_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) arcclean_SOURCES = arcclean.cpp utils.cpp utils.h arcclean_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcclean_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) arccat_SOURCES = arccat.cpp utils.cpp utils.h arccat_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arccat_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) arcsync_SOURCES = arcsync.cpp utils.cpp utils.h arcsync_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcsync_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) arcrenew_SOURCES = arcrenew.cpp utils.cpp utils.h arcrenew_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcrenew_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) arcresume_SOURCES = arcresume.cpp utils.cpp utils.h arcresume_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcresume_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) %.1: % %.1.in LANG=C help2man -N -h "-h|sed s/…/.../g" -i $(word 2,$^) -o $@ ./$< EXTRA_DIST = $(man_MANS:=.in) nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/arctest.1.in0000644000000000000000000000013215067751327021413 xustar0030 mtime=1759498967.677490794 30 atime=1759498967.825493043 30 ctime=1759499031.246147214 nordugrid-arc-7.1.1/src/clients/compute/arctest.1.in0000644000175000002070000000443115067751327023317 0ustar00mockbuildmock00000000000000[NAME] arctest \- ARC Test Suite [EXTENDED DESCRIPTION] The .B arctest command is mainly used for basic testing of job submission to ARC CE using various pre-defined test jobs. It supports all target selection options available for .B arcsub and use the same brockering logic. It can also print basic information about user's certificate. The command is complementary to .B arcinfo and .B arcproxy -I The test jobs available in this version of arctest are: Test job 1: This test-job calculates prime-numbers for a number of minutes given by .B -r (default 5) and outputs the list to stderr. The source-code for the prime-number program, the Makefile and the executable are downloaded to the cluster from HTTP and FTP servers and the program is compiled before running. In this way, the test job constitutes a fairly comprehensive test of the basic setup of a grid cluster. Test job 2: attempts to list all environment variables at the remote site Test job 3: copies a remote file from an HTTP server into a local file [EXAMPLES] .br \fBarctest -J 1 -C \fR will submit test job number 1 to the specified CE. [FILES] .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .TP .B ~/.arc/jobs.dat This a local database of the user's active jobs. When a job is successfully submitted it is added to database and when it is removed from the remote CE it is removed from this list. This database is used as the list of all active jobs when the user specifies the .B --all option to the various NorduGrid ARC user interface commands. By using the .B --joblist option a different file can be used than the default. [COPYRIGHT] APACHE LICENSE Version 2.0 [AUTHOR] ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org [SEE ALSO] .BR arccat (1), .BR arcclean (1), .BR arccp (1), .BR arcget (1), .BR arcinfo (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcrenew (1), .BR arcresume (1), .BR arcrm (1), .BR arcstat (1), .BR arcsub (1), .BR arcsync (1) nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/Makefile.in0000644000000000000000000000013215067751347021326 xustar0030 mtime=1759498983.594299269 30 atime=1759499019.647280478 30 ctime=1759499031.211443561 nordugrid-arc-7.1.1/src/clients/compute/Makefile.in0000644000175000002070000025415615067751347023245 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ bin_PROGRAMS = arcsub$(EXEEXT) arcget$(EXEEXT) arcstat$(EXEEXT) \ arcinfo$(EXEEXT) arckill$(EXEEXT) arcclean$(EXEEXT) \ arccat$(EXEEXT) arcsync$(EXEEXT) arcresume$(EXEEXT) \ arcrenew$(EXEEXT) arctest$(EXEEXT) subdir = src/clients/compute ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)" PROGRAMS = $(bin_PROGRAMS) am_arccat_OBJECTS = arccat-arccat.$(OBJEXT) arccat-utils.$(OBJEXT) arccat_OBJECTS = $(am_arccat_OBJECTS) am__DEPENDENCIES_1 = arccat_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = arccat_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arccat_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcclean_OBJECTS = arcclean-arcclean.$(OBJEXT) \ arcclean-utils.$(OBJEXT) arcclean_OBJECTS = $(am_arcclean_OBJECTS) arcclean_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) arcclean_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arcclean_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcget_OBJECTS = arcget-arcget.$(OBJEXT) arcget-utils.$(OBJEXT) arcget_OBJECTS = $(am_arcget_OBJECTS) arcget_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) arcget_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arcget_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcinfo_OBJECTS = arcinfo-arcinfo.$(OBJEXT) arcinfo-utils.$(OBJEXT) arcinfo_OBJECTS = $(am_arcinfo_OBJECTS) arcinfo_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) arcinfo_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arcinfo_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arckill_OBJECTS = arckill-arckill.$(OBJEXT) arckill-utils.$(OBJEXT) arckill_OBJECTS = $(am_arckill_OBJECTS) arckill_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) arckill_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arckill_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcrenew_OBJECTS = arcrenew-arcrenew.$(OBJEXT) \ arcrenew-utils.$(OBJEXT) arcrenew_OBJECTS = $(am_arcrenew_OBJECTS) arcrenew_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) arcrenew_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arcrenew_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcresume_OBJECTS = arcresume-arcresume.$(OBJEXT) \ arcresume-utils.$(OBJEXT) arcresume_OBJECTS = $(am_arcresume_OBJECTS) arcresume_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) arcresume_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arcresume_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcstat_OBJECTS = arcstat-arcstat.$(OBJEXT) arcstat-utils.$(OBJEXT) arcstat_OBJECTS = $(am_arcstat_OBJECTS) arcstat_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) arcstat_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arcstat_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcsub_OBJECTS = arcsub-arcsub.$(OBJEXT) arcsub-utils.$(OBJEXT) \ arcsub-submit.$(OBJEXT) arcsub_OBJECTS = $(am_arcsub_OBJECTS) arcsub_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) arcsub_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arcsub_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arcsync_OBJECTS = arcsync-arcsync.$(OBJEXT) arcsync-utils.$(OBJEXT) arcsync_OBJECTS = $(am_arcsync_OBJECTS) arcsync_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) arcsync_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arcsync_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ am_arctest_OBJECTS = arctest-arctest.$(OBJEXT) arctest-utils.$(OBJEXT) \ arctest-submit.$(OBJEXT) arctest_OBJECTS = $(am_arctest_OBJECTS) arctest_DEPENDENCIES = $(CLILIBS) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) arctest_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(arctest_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles am__depfiles_remade = ./$(DEPDIR)/arccat-arccat.Po \ ./$(DEPDIR)/arccat-utils.Po ./$(DEPDIR)/arcclean-arcclean.Po \ ./$(DEPDIR)/arcclean-utils.Po ./$(DEPDIR)/arcget-arcget.Po \ ./$(DEPDIR)/arcget-utils.Po ./$(DEPDIR)/arcinfo-arcinfo.Po \ ./$(DEPDIR)/arcinfo-utils.Po ./$(DEPDIR)/arckill-arckill.Po \ ./$(DEPDIR)/arckill-utils.Po ./$(DEPDIR)/arcrenew-arcrenew.Po \ ./$(DEPDIR)/arcrenew-utils.Po \ ./$(DEPDIR)/arcresume-arcresume.Po \ ./$(DEPDIR)/arcresume-utils.Po ./$(DEPDIR)/arcstat-arcstat.Po \ ./$(DEPDIR)/arcstat-utils.Po ./$(DEPDIR)/arcsub-arcsub.Po \ ./$(DEPDIR)/arcsub-submit.Po ./$(DEPDIR)/arcsub-utils.Po \ ./$(DEPDIR)/arcsync-arcsync.Po ./$(DEPDIR)/arcsync-utils.Po \ ./$(DEPDIR)/arctest-arctest.Po ./$(DEPDIR)/arctest-submit.Po \ ./$(DEPDIR)/arctest-utils.Po am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(arccat_SOURCES) $(arcclean_SOURCES) $(arcget_SOURCES) \ $(arcinfo_SOURCES) $(arckill_SOURCES) $(arcrenew_SOURCES) \ $(arcresume_SOURCES) $(arcstat_SOURCES) $(arcsub_SOURCES) \ $(arcsync_SOURCES) $(arctest_SOURCES) DIST_SOURCES = $(arccat_SOURCES) $(arcclean_SOURCES) $(arcget_SOURCES) \ $(arcinfo_SOURCES) $(arckill_SOURCES) $(arcrenew_SOURCES) \ $(arcresume_SOURCES) $(arcstat_SOURCES) $(arcsub_SOURCES) \ $(arcsync_SOURCES) $(arctest_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } man1dir = $(mandir)/man1 NROFF = nroff MANS = $(man_MANS) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ man_MANS = arcsub.1 arcget.1 arcstat.1 arcinfo.1 arckill.1 arcclean.1 arccat.1 \ arcsync.1 arcresume.1 arcrenew.1 arctest.1 CLILIBS = \ $(top_builddir)/src/hed/libs/compute/libarccompute.la \ $(top_builddir)/src/hed/libs/data/libarcdata.la \ $(top_builddir)/src/hed/libs/credential/libarccredential.la \ $(top_builddir)/src/hed/libs/message/libarcmessage.la \ $(top_builddir)/src/hed/libs/loader/libarcloader.la \ $(top_builddir)/src/hed/libs/common/libarccommon.la arcsub_SOURCES = arcsub.cpp utils.cpp utils.h submit.cpp submit.h arcsub_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcsub_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) arctest_SOURCES = arctest.cpp utils.cpp utils.h submit.cpp submit.h arctest_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arctest_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) arcget_SOURCES = arcget.cpp utils.cpp utils.h arcget_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcget_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) arcstat_SOURCES = arcstat.cpp utils.cpp utils.h arcstat_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcstat_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) arcinfo_SOURCES = arcinfo.cpp utils.cpp utils.h arcinfo_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcinfo_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) arckill_SOURCES = arckill.cpp utils.cpp utils.h arckill_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arckill_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) arcclean_SOURCES = arcclean.cpp utils.cpp utils.h arcclean_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcclean_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) arccat_SOURCES = arccat.cpp utils.cpp utils.h arccat_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arccat_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) arcsync_SOURCES = arcsync.cpp utils.cpp utils.h arcsync_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcsync_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) arcrenew_SOURCES = arcrenew.cpp utils.cpp utils.h arcrenew_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcrenew_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) arcresume_SOURCES = arcresume.cpp utils.cpp utils.h arcresume_CXXFLAGS = -I$(top_srcdir)/include \ $(GLIBMM_CFLAGS) $(LIBXML2_CFLAGS) $(OPENSSL_CFLAGS) $(AM_CXXFLAGS) arcresume_LDADD = $(CLILIBS) $(GLIBMM_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) EXTRA_DIST = $(man_MANS:=.in) all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/clients/compute/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/clients/compute/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-binPROGRAMS: $(bin_PROGRAMS) @$(NORMAL_INSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \ $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \ fi; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p \ || test -f $$p1 \ ; then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' \ -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ } \ ; done uninstall-binPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' \ `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(bindir)" && rm -f $$files clean-binPROGRAMS: @list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list arccat$(EXEEXT): $(arccat_OBJECTS) $(arccat_DEPENDENCIES) $(EXTRA_arccat_DEPENDENCIES) @rm -f arccat$(EXEEXT) $(AM_V_CXXLD)$(arccat_LINK) $(arccat_OBJECTS) $(arccat_LDADD) $(LIBS) arcclean$(EXEEXT): $(arcclean_OBJECTS) $(arcclean_DEPENDENCIES) $(EXTRA_arcclean_DEPENDENCIES) @rm -f arcclean$(EXEEXT) $(AM_V_CXXLD)$(arcclean_LINK) $(arcclean_OBJECTS) $(arcclean_LDADD) $(LIBS) arcget$(EXEEXT): $(arcget_OBJECTS) $(arcget_DEPENDENCIES) $(EXTRA_arcget_DEPENDENCIES) @rm -f arcget$(EXEEXT) $(AM_V_CXXLD)$(arcget_LINK) $(arcget_OBJECTS) $(arcget_LDADD) $(LIBS) arcinfo$(EXEEXT): $(arcinfo_OBJECTS) $(arcinfo_DEPENDENCIES) $(EXTRA_arcinfo_DEPENDENCIES) @rm -f arcinfo$(EXEEXT) $(AM_V_CXXLD)$(arcinfo_LINK) $(arcinfo_OBJECTS) $(arcinfo_LDADD) $(LIBS) arckill$(EXEEXT): $(arckill_OBJECTS) $(arckill_DEPENDENCIES) $(EXTRA_arckill_DEPENDENCIES) @rm -f arckill$(EXEEXT) $(AM_V_CXXLD)$(arckill_LINK) $(arckill_OBJECTS) $(arckill_LDADD) $(LIBS) arcrenew$(EXEEXT): $(arcrenew_OBJECTS) $(arcrenew_DEPENDENCIES) $(EXTRA_arcrenew_DEPENDENCIES) @rm -f arcrenew$(EXEEXT) $(AM_V_CXXLD)$(arcrenew_LINK) $(arcrenew_OBJECTS) $(arcrenew_LDADD) $(LIBS) arcresume$(EXEEXT): $(arcresume_OBJECTS) $(arcresume_DEPENDENCIES) $(EXTRA_arcresume_DEPENDENCIES) @rm -f arcresume$(EXEEXT) $(AM_V_CXXLD)$(arcresume_LINK) $(arcresume_OBJECTS) $(arcresume_LDADD) $(LIBS) arcstat$(EXEEXT): $(arcstat_OBJECTS) $(arcstat_DEPENDENCIES) $(EXTRA_arcstat_DEPENDENCIES) @rm -f arcstat$(EXEEXT) $(AM_V_CXXLD)$(arcstat_LINK) $(arcstat_OBJECTS) $(arcstat_LDADD) $(LIBS) arcsub$(EXEEXT): $(arcsub_OBJECTS) $(arcsub_DEPENDENCIES) $(EXTRA_arcsub_DEPENDENCIES) @rm -f arcsub$(EXEEXT) $(AM_V_CXXLD)$(arcsub_LINK) $(arcsub_OBJECTS) $(arcsub_LDADD) $(LIBS) arcsync$(EXEEXT): $(arcsync_OBJECTS) $(arcsync_DEPENDENCIES) $(EXTRA_arcsync_DEPENDENCIES) @rm -f arcsync$(EXEEXT) $(AM_V_CXXLD)$(arcsync_LINK) $(arcsync_OBJECTS) $(arcsync_LDADD) $(LIBS) arctest$(EXEEXT): $(arctest_OBJECTS) $(arctest_DEPENDENCIES) $(EXTRA_arctest_DEPENDENCIES) @rm -f arctest$(EXEEXT) $(AM_V_CXXLD)$(arctest_LINK) $(arctest_OBJECTS) $(arctest_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arccat-arccat.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arccat-utils.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcclean-arcclean.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcclean-utils.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcget-arcget.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcget-utils.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcinfo-arcinfo.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcinfo-utils.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arckill-arckill.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arckill-utils.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcrenew-arcrenew.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcrenew-utils.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcresume-arcresume.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcresume-utils.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcstat-arcstat.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcstat-utils.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcsub-arcsub.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcsub-submit.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcsub-utils.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcsync-arcsync.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arcsync-utils.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arctest-arctest.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arctest-submit.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arctest-utils.Po@am__quote@ # am--include-marker $(am__depfiles_remade): @$(MKDIR_P) $(@D) @echo '# dummy' >$@-t && $(am__mv) $@-t $@ am--depfiles: $(am__depfiles_remade) .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< arccat-arccat.o: arccat.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccat_CXXFLAGS) $(CXXFLAGS) -MT arccat-arccat.o -MD -MP -MF $(DEPDIR)/arccat-arccat.Tpo -c -o arccat-arccat.o `test -f 'arccat.cpp' || echo '$(srcdir)/'`arccat.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arccat-arccat.Tpo $(DEPDIR)/arccat-arccat.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arccat.cpp' object='arccat-arccat.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccat_CXXFLAGS) $(CXXFLAGS) -c -o arccat-arccat.o `test -f 'arccat.cpp' || echo '$(srcdir)/'`arccat.cpp arccat-arccat.obj: arccat.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccat_CXXFLAGS) $(CXXFLAGS) -MT arccat-arccat.obj -MD -MP -MF $(DEPDIR)/arccat-arccat.Tpo -c -o arccat-arccat.obj `if test -f 'arccat.cpp'; then $(CYGPATH_W) 'arccat.cpp'; else $(CYGPATH_W) '$(srcdir)/arccat.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arccat-arccat.Tpo $(DEPDIR)/arccat-arccat.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arccat.cpp' object='arccat-arccat.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccat_CXXFLAGS) $(CXXFLAGS) -c -o arccat-arccat.obj `if test -f 'arccat.cpp'; then $(CYGPATH_W) 'arccat.cpp'; else $(CYGPATH_W) '$(srcdir)/arccat.cpp'; fi` arccat-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccat_CXXFLAGS) $(CXXFLAGS) -MT arccat-utils.o -MD -MP -MF $(DEPDIR)/arccat-utils.Tpo -c -o arccat-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arccat-utils.Tpo $(DEPDIR)/arccat-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arccat-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccat_CXXFLAGS) $(CXXFLAGS) -c -o arccat-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arccat-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccat_CXXFLAGS) $(CXXFLAGS) -MT arccat-utils.obj -MD -MP -MF $(DEPDIR)/arccat-utils.Tpo -c -o arccat-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arccat-utils.Tpo $(DEPDIR)/arccat-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arccat-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arccat_CXXFLAGS) $(CXXFLAGS) -c -o arccat-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arcclean-arcclean.o: arcclean.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcclean_CXXFLAGS) $(CXXFLAGS) -MT arcclean-arcclean.o -MD -MP -MF $(DEPDIR)/arcclean-arcclean.Tpo -c -o arcclean-arcclean.o `test -f 'arcclean.cpp' || echo '$(srcdir)/'`arcclean.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcclean-arcclean.Tpo $(DEPDIR)/arcclean-arcclean.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcclean.cpp' object='arcclean-arcclean.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcclean_CXXFLAGS) $(CXXFLAGS) -c -o arcclean-arcclean.o `test -f 'arcclean.cpp' || echo '$(srcdir)/'`arcclean.cpp arcclean-arcclean.obj: arcclean.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcclean_CXXFLAGS) $(CXXFLAGS) -MT arcclean-arcclean.obj -MD -MP -MF $(DEPDIR)/arcclean-arcclean.Tpo -c -o arcclean-arcclean.obj `if test -f 'arcclean.cpp'; then $(CYGPATH_W) 'arcclean.cpp'; else $(CYGPATH_W) '$(srcdir)/arcclean.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcclean-arcclean.Tpo $(DEPDIR)/arcclean-arcclean.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcclean.cpp' object='arcclean-arcclean.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcclean_CXXFLAGS) $(CXXFLAGS) -c -o arcclean-arcclean.obj `if test -f 'arcclean.cpp'; then $(CYGPATH_W) 'arcclean.cpp'; else $(CYGPATH_W) '$(srcdir)/arcclean.cpp'; fi` arcclean-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcclean_CXXFLAGS) $(CXXFLAGS) -MT arcclean-utils.o -MD -MP -MF $(DEPDIR)/arcclean-utils.Tpo -c -o arcclean-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcclean-utils.Tpo $(DEPDIR)/arcclean-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcclean-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcclean_CXXFLAGS) $(CXXFLAGS) -c -o arcclean-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arcclean-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcclean_CXXFLAGS) $(CXXFLAGS) -MT arcclean-utils.obj -MD -MP -MF $(DEPDIR)/arcclean-utils.Tpo -c -o arcclean-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcclean-utils.Tpo $(DEPDIR)/arcclean-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcclean-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcclean_CXXFLAGS) $(CXXFLAGS) -c -o arcclean-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arcget-arcget.o: arcget.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcget_CXXFLAGS) $(CXXFLAGS) -MT arcget-arcget.o -MD -MP -MF $(DEPDIR)/arcget-arcget.Tpo -c -o arcget-arcget.o `test -f 'arcget.cpp' || echo '$(srcdir)/'`arcget.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcget-arcget.Tpo $(DEPDIR)/arcget-arcget.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcget.cpp' object='arcget-arcget.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcget_CXXFLAGS) $(CXXFLAGS) -c -o arcget-arcget.o `test -f 'arcget.cpp' || echo '$(srcdir)/'`arcget.cpp arcget-arcget.obj: arcget.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcget_CXXFLAGS) $(CXXFLAGS) -MT arcget-arcget.obj -MD -MP -MF $(DEPDIR)/arcget-arcget.Tpo -c -o arcget-arcget.obj `if test -f 'arcget.cpp'; then $(CYGPATH_W) 'arcget.cpp'; else $(CYGPATH_W) '$(srcdir)/arcget.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcget-arcget.Tpo $(DEPDIR)/arcget-arcget.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcget.cpp' object='arcget-arcget.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcget_CXXFLAGS) $(CXXFLAGS) -c -o arcget-arcget.obj `if test -f 'arcget.cpp'; then $(CYGPATH_W) 'arcget.cpp'; else $(CYGPATH_W) '$(srcdir)/arcget.cpp'; fi` arcget-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcget_CXXFLAGS) $(CXXFLAGS) -MT arcget-utils.o -MD -MP -MF $(DEPDIR)/arcget-utils.Tpo -c -o arcget-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcget-utils.Tpo $(DEPDIR)/arcget-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcget-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcget_CXXFLAGS) $(CXXFLAGS) -c -o arcget-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arcget-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcget_CXXFLAGS) $(CXXFLAGS) -MT arcget-utils.obj -MD -MP -MF $(DEPDIR)/arcget-utils.Tpo -c -o arcget-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcget-utils.Tpo $(DEPDIR)/arcget-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcget-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcget_CXXFLAGS) $(CXXFLAGS) -c -o arcget-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arcinfo-arcinfo.o: arcinfo.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcinfo_CXXFLAGS) $(CXXFLAGS) -MT arcinfo-arcinfo.o -MD -MP -MF $(DEPDIR)/arcinfo-arcinfo.Tpo -c -o arcinfo-arcinfo.o `test -f 'arcinfo.cpp' || echo '$(srcdir)/'`arcinfo.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcinfo-arcinfo.Tpo $(DEPDIR)/arcinfo-arcinfo.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcinfo.cpp' object='arcinfo-arcinfo.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcinfo_CXXFLAGS) $(CXXFLAGS) -c -o arcinfo-arcinfo.o `test -f 'arcinfo.cpp' || echo '$(srcdir)/'`arcinfo.cpp arcinfo-arcinfo.obj: arcinfo.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcinfo_CXXFLAGS) $(CXXFLAGS) -MT arcinfo-arcinfo.obj -MD -MP -MF $(DEPDIR)/arcinfo-arcinfo.Tpo -c -o arcinfo-arcinfo.obj `if test -f 'arcinfo.cpp'; then $(CYGPATH_W) 'arcinfo.cpp'; else $(CYGPATH_W) '$(srcdir)/arcinfo.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcinfo-arcinfo.Tpo $(DEPDIR)/arcinfo-arcinfo.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcinfo.cpp' object='arcinfo-arcinfo.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcinfo_CXXFLAGS) $(CXXFLAGS) -c -o arcinfo-arcinfo.obj `if test -f 'arcinfo.cpp'; then $(CYGPATH_W) 'arcinfo.cpp'; else $(CYGPATH_W) '$(srcdir)/arcinfo.cpp'; fi` arcinfo-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcinfo_CXXFLAGS) $(CXXFLAGS) -MT arcinfo-utils.o -MD -MP -MF $(DEPDIR)/arcinfo-utils.Tpo -c -o arcinfo-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcinfo-utils.Tpo $(DEPDIR)/arcinfo-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcinfo-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcinfo_CXXFLAGS) $(CXXFLAGS) -c -o arcinfo-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arcinfo-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcinfo_CXXFLAGS) $(CXXFLAGS) -MT arcinfo-utils.obj -MD -MP -MF $(DEPDIR)/arcinfo-utils.Tpo -c -o arcinfo-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcinfo-utils.Tpo $(DEPDIR)/arcinfo-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcinfo-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcinfo_CXXFLAGS) $(CXXFLAGS) -c -o arcinfo-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arckill-arckill.o: arckill.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arckill_CXXFLAGS) $(CXXFLAGS) -MT arckill-arckill.o -MD -MP -MF $(DEPDIR)/arckill-arckill.Tpo -c -o arckill-arckill.o `test -f 'arckill.cpp' || echo '$(srcdir)/'`arckill.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arckill-arckill.Tpo $(DEPDIR)/arckill-arckill.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arckill.cpp' object='arckill-arckill.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arckill_CXXFLAGS) $(CXXFLAGS) -c -o arckill-arckill.o `test -f 'arckill.cpp' || echo '$(srcdir)/'`arckill.cpp arckill-arckill.obj: arckill.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arckill_CXXFLAGS) $(CXXFLAGS) -MT arckill-arckill.obj -MD -MP -MF $(DEPDIR)/arckill-arckill.Tpo -c -o arckill-arckill.obj `if test -f 'arckill.cpp'; then $(CYGPATH_W) 'arckill.cpp'; else $(CYGPATH_W) '$(srcdir)/arckill.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arckill-arckill.Tpo $(DEPDIR)/arckill-arckill.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arckill.cpp' object='arckill-arckill.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arckill_CXXFLAGS) $(CXXFLAGS) -c -o arckill-arckill.obj `if test -f 'arckill.cpp'; then $(CYGPATH_W) 'arckill.cpp'; else $(CYGPATH_W) '$(srcdir)/arckill.cpp'; fi` arckill-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arckill_CXXFLAGS) $(CXXFLAGS) -MT arckill-utils.o -MD -MP -MF $(DEPDIR)/arckill-utils.Tpo -c -o arckill-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arckill-utils.Tpo $(DEPDIR)/arckill-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arckill-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arckill_CXXFLAGS) $(CXXFLAGS) -c -o arckill-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arckill-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arckill_CXXFLAGS) $(CXXFLAGS) -MT arckill-utils.obj -MD -MP -MF $(DEPDIR)/arckill-utils.Tpo -c -o arckill-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arckill-utils.Tpo $(DEPDIR)/arckill-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arckill-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arckill_CXXFLAGS) $(CXXFLAGS) -c -o arckill-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arcrenew-arcrenew.o: arcrenew.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrenew_CXXFLAGS) $(CXXFLAGS) -MT arcrenew-arcrenew.o -MD -MP -MF $(DEPDIR)/arcrenew-arcrenew.Tpo -c -o arcrenew-arcrenew.o `test -f 'arcrenew.cpp' || echo '$(srcdir)/'`arcrenew.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcrenew-arcrenew.Tpo $(DEPDIR)/arcrenew-arcrenew.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcrenew.cpp' object='arcrenew-arcrenew.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrenew_CXXFLAGS) $(CXXFLAGS) -c -o arcrenew-arcrenew.o `test -f 'arcrenew.cpp' || echo '$(srcdir)/'`arcrenew.cpp arcrenew-arcrenew.obj: arcrenew.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrenew_CXXFLAGS) $(CXXFLAGS) -MT arcrenew-arcrenew.obj -MD -MP -MF $(DEPDIR)/arcrenew-arcrenew.Tpo -c -o arcrenew-arcrenew.obj `if test -f 'arcrenew.cpp'; then $(CYGPATH_W) 'arcrenew.cpp'; else $(CYGPATH_W) '$(srcdir)/arcrenew.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcrenew-arcrenew.Tpo $(DEPDIR)/arcrenew-arcrenew.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcrenew.cpp' object='arcrenew-arcrenew.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrenew_CXXFLAGS) $(CXXFLAGS) -c -o arcrenew-arcrenew.obj `if test -f 'arcrenew.cpp'; then $(CYGPATH_W) 'arcrenew.cpp'; else $(CYGPATH_W) '$(srcdir)/arcrenew.cpp'; fi` arcrenew-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrenew_CXXFLAGS) $(CXXFLAGS) -MT arcrenew-utils.o -MD -MP -MF $(DEPDIR)/arcrenew-utils.Tpo -c -o arcrenew-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcrenew-utils.Tpo $(DEPDIR)/arcrenew-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcrenew-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrenew_CXXFLAGS) $(CXXFLAGS) -c -o arcrenew-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arcrenew-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrenew_CXXFLAGS) $(CXXFLAGS) -MT arcrenew-utils.obj -MD -MP -MF $(DEPDIR)/arcrenew-utils.Tpo -c -o arcrenew-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcrenew-utils.Tpo $(DEPDIR)/arcrenew-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcrenew-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcrenew_CXXFLAGS) $(CXXFLAGS) -c -o arcrenew-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arcresume-arcresume.o: arcresume.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresume_CXXFLAGS) $(CXXFLAGS) -MT arcresume-arcresume.o -MD -MP -MF $(DEPDIR)/arcresume-arcresume.Tpo -c -o arcresume-arcresume.o `test -f 'arcresume.cpp' || echo '$(srcdir)/'`arcresume.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcresume-arcresume.Tpo $(DEPDIR)/arcresume-arcresume.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcresume.cpp' object='arcresume-arcresume.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresume_CXXFLAGS) $(CXXFLAGS) -c -o arcresume-arcresume.o `test -f 'arcresume.cpp' || echo '$(srcdir)/'`arcresume.cpp arcresume-arcresume.obj: arcresume.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresume_CXXFLAGS) $(CXXFLAGS) -MT arcresume-arcresume.obj -MD -MP -MF $(DEPDIR)/arcresume-arcresume.Tpo -c -o arcresume-arcresume.obj `if test -f 'arcresume.cpp'; then $(CYGPATH_W) 'arcresume.cpp'; else $(CYGPATH_W) '$(srcdir)/arcresume.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcresume-arcresume.Tpo $(DEPDIR)/arcresume-arcresume.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcresume.cpp' object='arcresume-arcresume.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresume_CXXFLAGS) $(CXXFLAGS) -c -o arcresume-arcresume.obj `if test -f 'arcresume.cpp'; then $(CYGPATH_W) 'arcresume.cpp'; else $(CYGPATH_W) '$(srcdir)/arcresume.cpp'; fi` arcresume-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresume_CXXFLAGS) $(CXXFLAGS) -MT arcresume-utils.o -MD -MP -MF $(DEPDIR)/arcresume-utils.Tpo -c -o arcresume-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcresume-utils.Tpo $(DEPDIR)/arcresume-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcresume-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresume_CXXFLAGS) $(CXXFLAGS) -c -o arcresume-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arcresume-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresume_CXXFLAGS) $(CXXFLAGS) -MT arcresume-utils.obj -MD -MP -MF $(DEPDIR)/arcresume-utils.Tpo -c -o arcresume-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcresume-utils.Tpo $(DEPDIR)/arcresume-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcresume-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcresume_CXXFLAGS) $(CXXFLAGS) -c -o arcresume-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arcstat-arcstat.o: arcstat.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcstat_CXXFLAGS) $(CXXFLAGS) -MT arcstat-arcstat.o -MD -MP -MF $(DEPDIR)/arcstat-arcstat.Tpo -c -o arcstat-arcstat.o `test -f 'arcstat.cpp' || echo '$(srcdir)/'`arcstat.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcstat-arcstat.Tpo $(DEPDIR)/arcstat-arcstat.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcstat.cpp' object='arcstat-arcstat.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcstat_CXXFLAGS) $(CXXFLAGS) -c -o arcstat-arcstat.o `test -f 'arcstat.cpp' || echo '$(srcdir)/'`arcstat.cpp arcstat-arcstat.obj: arcstat.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcstat_CXXFLAGS) $(CXXFLAGS) -MT arcstat-arcstat.obj -MD -MP -MF $(DEPDIR)/arcstat-arcstat.Tpo -c -o arcstat-arcstat.obj `if test -f 'arcstat.cpp'; then $(CYGPATH_W) 'arcstat.cpp'; else $(CYGPATH_W) '$(srcdir)/arcstat.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcstat-arcstat.Tpo $(DEPDIR)/arcstat-arcstat.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcstat.cpp' object='arcstat-arcstat.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcstat_CXXFLAGS) $(CXXFLAGS) -c -o arcstat-arcstat.obj `if test -f 'arcstat.cpp'; then $(CYGPATH_W) 'arcstat.cpp'; else $(CYGPATH_W) '$(srcdir)/arcstat.cpp'; fi` arcstat-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcstat_CXXFLAGS) $(CXXFLAGS) -MT arcstat-utils.o -MD -MP -MF $(DEPDIR)/arcstat-utils.Tpo -c -o arcstat-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcstat-utils.Tpo $(DEPDIR)/arcstat-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcstat-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcstat_CXXFLAGS) $(CXXFLAGS) -c -o arcstat-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arcstat-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcstat_CXXFLAGS) $(CXXFLAGS) -MT arcstat-utils.obj -MD -MP -MF $(DEPDIR)/arcstat-utils.Tpo -c -o arcstat-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcstat-utils.Tpo $(DEPDIR)/arcstat-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcstat-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcstat_CXXFLAGS) $(CXXFLAGS) -c -o arcstat-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arcsub-arcsub.o: arcsub.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsub_CXXFLAGS) $(CXXFLAGS) -MT arcsub-arcsub.o -MD -MP -MF $(DEPDIR)/arcsub-arcsub.Tpo -c -o arcsub-arcsub.o `test -f 'arcsub.cpp' || echo '$(srcdir)/'`arcsub.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcsub-arcsub.Tpo $(DEPDIR)/arcsub-arcsub.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcsub.cpp' object='arcsub-arcsub.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsub_CXXFLAGS) $(CXXFLAGS) -c -o arcsub-arcsub.o `test -f 'arcsub.cpp' || echo '$(srcdir)/'`arcsub.cpp arcsub-arcsub.obj: arcsub.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsub_CXXFLAGS) $(CXXFLAGS) -MT arcsub-arcsub.obj -MD -MP -MF $(DEPDIR)/arcsub-arcsub.Tpo -c -o arcsub-arcsub.obj `if test -f 'arcsub.cpp'; then $(CYGPATH_W) 'arcsub.cpp'; else $(CYGPATH_W) '$(srcdir)/arcsub.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcsub-arcsub.Tpo $(DEPDIR)/arcsub-arcsub.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcsub.cpp' object='arcsub-arcsub.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsub_CXXFLAGS) $(CXXFLAGS) -c -o arcsub-arcsub.obj `if test -f 'arcsub.cpp'; then $(CYGPATH_W) 'arcsub.cpp'; else $(CYGPATH_W) '$(srcdir)/arcsub.cpp'; fi` arcsub-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsub_CXXFLAGS) $(CXXFLAGS) -MT arcsub-utils.o -MD -MP -MF $(DEPDIR)/arcsub-utils.Tpo -c -o arcsub-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcsub-utils.Tpo $(DEPDIR)/arcsub-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcsub-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsub_CXXFLAGS) $(CXXFLAGS) -c -o arcsub-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arcsub-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsub_CXXFLAGS) $(CXXFLAGS) -MT arcsub-utils.obj -MD -MP -MF $(DEPDIR)/arcsub-utils.Tpo -c -o arcsub-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcsub-utils.Tpo $(DEPDIR)/arcsub-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcsub-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsub_CXXFLAGS) $(CXXFLAGS) -c -o arcsub-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arcsub-submit.o: submit.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsub_CXXFLAGS) $(CXXFLAGS) -MT arcsub-submit.o -MD -MP -MF $(DEPDIR)/arcsub-submit.Tpo -c -o arcsub-submit.o `test -f 'submit.cpp' || echo '$(srcdir)/'`submit.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcsub-submit.Tpo $(DEPDIR)/arcsub-submit.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='submit.cpp' object='arcsub-submit.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsub_CXXFLAGS) $(CXXFLAGS) -c -o arcsub-submit.o `test -f 'submit.cpp' || echo '$(srcdir)/'`submit.cpp arcsub-submit.obj: submit.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsub_CXXFLAGS) $(CXXFLAGS) -MT arcsub-submit.obj -MD -MP -MF $(DEPDIR)/arcsub-submit.Tpo -c -o arcsub-submit.obj `if test -f 'submit.cpp'; then $(CYGPATH_W) 'submit.cpp'; else $(CYGPATH_W) '$(srcdir)/submit.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcsub-submit.Tpo $(DEPDIR)/arcsub-submit.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='submit.cpp' object='arcsub-submit.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsub_CXXFLAGS) $(CXXFLAGS) -c -o arcsub-submit.obj `if test -f 'submit.cpp'; then $(CYGPATH_W) 'submit.cpp'; else $(CYGPATH_W) '$(srcdir)/submit.cpp'; fi` arcsync-arcsync.o: arcsync.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsync_CXXFLAGS) $(CXXFLAGS) -MT arcsync-arcsync.o -MD -MP -MF $(DEPDIR)/arcsync-arcsync.Tpo -c -o arcsync-arcsync.o `test -f 'arcsync.cpp' || echo '$(srcdir)/'`arcsync.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcsync-arcsync.Tpo $(DEPDIR)/arcsync-arcsync.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcsync.cpp' object='arcsync-arcsync.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsync_CXXFLAGS) $(CXXFLAGS) -c -o arcsync-arcsync.o `test -f 'arcsync.cpp' || echo '$(srcdir)/'`arcsync.cpp arcsync-arcsync.obj: arcsync.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsync_CXXFLAGS) $(CXXFLAGS) -MT arcsync-arcsync.obj -MD -MP -MF $(DEPDIR)/arcsync-arcsync.Tpo -c -o arcsync-arcsync.obj `if test -f 'arcsync.cpp'; then $(CYGPATH_W) 'arcsync.cpp'; else $(CYGPATH_W) '$(srcdir)/arcsync.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcsync-arcsync.Tpo $(DEPDIR)/arcsync-arcsync.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arcsync.cpp' object='arcsync-arcsync.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsync_CXXFLAGS) $(CXXFLAGS) -c -o arcsync-arcsync.obj `if test -f 'arcsync.cpp'; then $(CYGPATH_W) 'arcsync.cpp'; else $(CYGPATH_W) '$(srcdir)/arcsync.cpp'; fi` arcsync-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsync_CXXFLAGS) $(CXXFLAGS) -MT arcsync-utils.o -MD -MP -MF $(DEPDIR)/arcsync-utils.Tpo -c -o arcsync-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcsync-utils.Tpo $(DEPDIR)/arcsync-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcsync-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsync_CXXFLAGS) $(CXXFLAGS) -c -o arcsync-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arcsync-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsync_CXXFLAGS) $(CXXFLAGS) -MT arcsync-utils.obj -MD -MP -MF $(DEPDIR)/arcsync-utils.Tpo -c -o arcsync-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arcsync-utils.Tpo $(DEPDIR)/arcsync-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arcsync-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arcsync_CXXFLAGS) $(CXXFLAGS) -c -o arcsync-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arctest-arctest.o: arctest.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arctest_CXXFLAGS) $(CXXFLAGS) -MT arctest-arctest.o -MD -MP -MF $(DEPDIR)/arctest-arctest.Tpo -c -o arctest-arctest.o `test -f 'arctest.cpp' || echo '$(srcdir)/'`arctest.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arctest-arctest.Tpo $(DEPDIR)/arctest-arctest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arctest.cpp' object='arctest-arctest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arctest_CXXFLAGS) $(CXXFLAGS) -c -o arctest-arctest.o `test -f 'arctest.cpp' || echo '$(srcdir)/'`arctest.cpp arctest-arctest.obj: arctest.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arctest_CXXFLAGS) $(CXXFLAGS) -MT arctest-arctest.obj -MD -MP -MF $(DEPDIR)/arctest-arctest.Tpo -c -o arctest-arctest.obj `if test -f 'arctest.cpp'; then $(CYGPATH_W) 'arctest.cpp'; else $(CYGPATH_W) '$(srcdir)/arctest.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arctest-arctest.Tpo $(DEPDIR)/arctest-arctest.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='arctest.cpp' object='arctest-arctest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arctest_CXXFLAGS) $(CXXFLAGS) -c -o arctest-arctest.obj `if test -f 'arctest.cpp'; then $(CYGPATH_W) 'arctest.cpp'; else $(CYGPATH_W) '$(srcdir)/arctest.cpp'; fi` arctest-utils.o: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arctest_CXXFLAGS) $(CXXFLAGS) -MT arctest-utils.o -MD -MP -MF $(DEPDIR)/arctest-utils.Tpo -c -o arctest-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arctest-utils.Tpo $(DEPDIR)/arctest-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arctest-utils.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arctest_CXXFLAGS) $(CXXFLAGS) -c -o arctest-utils.o `test -f 'utils.cpp' || echo '$(srcdir)/'`utils.cpp arctest-utils.obj: utils.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arctest_CXXFLAGS) $(CXXFLAGS) -MT arctest-utils.obj -MD -MP -MF $(DEPDIR)/arctest-utils.Tpo -c -o arctest-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arctest-utils.Tpo $(DEPDIR)/arctest-utils.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='utils.cpp' object='arctest-utils.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arctest_CXXFLAGS) $(CXXFLAGS) -c -o arctest-utils.obj `if test -f 'utils.cpp'; then $(CYGPATH_W) 'utils.cpp'; else $(CYGPATH_W) '$(srcdir)/utils.cpp'; fi` arctest-submit.o: submit.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arctest_CXXFLAGS) $(CXXFLAGS) -MT arctest-submit.o -MD -MP -MF $(DEPDIR)/arctest-submit.Tpo -c -o arctest-submit.o `test -f 'submit.cpp' || echo '$(srcdir)/'`submit.cpp @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arctest-submit.Tpo $(DEPDIR)/arctest-submit.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='submit.cpp' object='arctest-submit.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arctest_CXXFLAGS) $(CXXFLAGS) -c -o arctest-submit.o `test -f 'submit.cpp' || echo '$(srcdir)/'`submit.cpp arctest-submit.obj: submit.cpp @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arctest_CXXFLAGS) $(CXXFLAGS) -MT arctest-submit.obj -MD -MP -MF $(DEPDIR)/arctest-submit.Tpo -c -o arctest-submit.obj `if test -f 'submit.cpp'; then $(CYGPATH_W) 'submit.cpp'; else $(CYGPATH_W) '$(srcdir)/submit.cpp'; fi` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/arctest-submit.Tpo $(DEPDIR)/arctest-submit.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='submit.cpp' object='arctest-submit.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(arctest_CXXFLAGS) $(CXXFLAGS) -c -o arctest-submit.obj `if test -f 'submit.cpp'; then $(CYGPATH_W) 'submit.cpp'; else $(CYGPATH_W) '$(srcdir)/submit.cpp'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man1: $(man_MANS) @$(NORMAL_INSTALL) @list1=''; \ list2='$(man_MANS)'; \ test -n "$(man1dir)" \ && test -n "`echo $$list1$$list2`" \ || exit 0; \ echo " $(MKDIR_P) '$(DESTDIR)$(man1dir)'"; \ $(MKDIR_P) "$(DESTDIR)$(man1dir)" || exit 1; \ { for i in $$list1; do echo "$$i"; done; \ if test -n "$$list2"; then \ for i in $$list2; do echo "$$i"; done \ | sed -n '/\.1[a-z]*$$/p'; \ fi; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \ done; } uninstall-man1: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man1dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ dir='$(DESTDIR)$(man1dir)'; $(am__uninstall_files_from_dir) ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) $(MANS) installdirs: for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(man1dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f ./$(DEPDIR)/arccat-arccat.Po -rm -f ./$(DEPDIR)/arccat-utils.Po -rm -f ./$(DEPDIR)/arcclean-arcclean.Po -rm -f ./$(DEPDIR)/arcclean-utils.Po -rm -f ./$(DEPDIR)/arcget-arcget.Po -rm -f ./$(DEPDIR)/arcget-utils.Po -rm -f ./$(DEPDIR)/arcinfo-arcinfo.Po -rm -f ./$(DEPDIR)/arcinfo-utils.Po -rm -f ./$(DEPDIR)/arckill-arckill.Po -rm -f ./$(DEPDIR)/arckill-utils.Po -rm -f ./$(DEPDIR)/arcrenew-arcrenew.Po -rm -f ./$(DEPDIR)/arcrenew-utils.Po -rm -f ./$(DEPDIR)/arcresume-arcresume.Po -rm -f ./$(DEPDIR)/arcresume-utils.Po -rm -f ./$(DEPDIR)/arcstat-arcstat.Po -rm -f ./$(DEPDIR)/arcstat-utils.Po -rm -f ./$(DEPDIR)/arcsub-arcsub.Po -rm -f ./$(DEPDIR)/arcsub-submit.Po -rm -f ./$(DEPDIR)/arcsub-utils.Po -rm -f ./$(DEPDIR)/arcsync-arcsync.Po -rm -f ./$(DEPDIR)/arcsync-utils.Po -rm -f ./$(DEPDIR)/arctest-arctest.Po -rm -f ./$(DEPDIR)/arctest-submit.Po -rm -f ./$(DEPDIR)/arctest-utils.Po -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-man install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-binPROGRAMS install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-man1 install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/arccat-arccat.Po -rm -f ./$(DEPDIR)/arccat-utils.Po -rm -f ./$(DEPDIR)/arcclean-arcclean.Po -rm -f ./$(DEPDIR)/arcclean-utils.Po -rm -f ./$(DEPDIR)/arcget-arcget.Po -rm -f ./$(DEPDIR)/arcget-utils.Po -rm -f ./$(DEPDIR)/arcinfo-arcinfo.Po -rm -f ./$(DEPDIR)/arcinfo-utils.Po -rm -f ./$(DEPDIR)/arckill-arckill.Po -rm -f ./$(DEPDIR)/arckill-utils.Po -rm -f ./$(DEPDIR)/arcrenew-arcrenew.Po -rm -f ./$(DEPDIR)/arcrenew-utils.Po -rm -f ./$(DEPDIR)/arcresume-arcresume.Po -rm -f ./$(DEPDIR)/arcresume-utils.Po -rm -f ./$(DEPDIR)/arcstat-arcstat.Po -rm -f ./$(DEPDIR)/arcstat-utils.Po -rm -f ./$(DEPDIR)/arcsub-arcsub.Po -rm -f ./$(DEPDIR)/arcsub-submit.Po -rm -f ./$(DEPDIR)/arcsub-utils.Po -rm -f ./$(DEPDIR)/arcsync-arcsync.Po -rm -f ./$(DEPDIR)/arcsync-utils.Po -rm -f ./$(DEPDIR)/arctest-arctest.Po -rm -f ./$(DEPDIR)/arctest-submit.Po -rm -f ./$(DEPDIR)/arctest-utils.Po -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-binPROGRAMS uninstall-man uninstall-man: uninstall-man1 .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ clean-binPROGRAMS clean-generic clean-libtool cscopelist-am \ ctags ctags-am distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-binPROGRAMS \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-man1 \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags tags-am uninstall uninstall-am \ uninstall-binPROGRAMS uninstall-man uninstall-man1 .PRECIOUS: Makefile %.1: % %.1.in LANG=C help2man -N -h "-h|sed s/…/.../g" -i $(word 2,$^) -o $@ ./$< # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/arckill.1.in0000644000000000000000000000013215067751327021367 xustar0030 mtime=1759498967.676867254 30 atime=1759498967.824493028 30 ctime=1759499031.237808501 nordugrid-arc-7.1.1/src/clients/compute/arckill.1.in0000644000175000002070000000467015067751327023300 0ustar00mockbuildmock00000000000000[NAME] arckill \- ARC Kill [EXTENDED DESCRIPTION] The .B arckill command kills a running job on an ARC enabled resource. The job can be referred to either by the jobid that was returned by .BR arcsub (1) at submission time or by its jobname if the job description that was submitted contained a jobname attribute. More than one jobid and/or jobname can be given. If several jobs were submitted with the same jobname all those jobs are killed. If the .B --joblist option is used the list of jobs is read from a file with the specified filename. By specifying the .B --all option, all jobs can be killed. The .B --computing-element option can be used to select or reject jobs at specific clusters. The .B --status option can be used to select jobs in a specific state. These options can be repeated several times. See .BR arcstat (1) for possible state values. If the job was successfully killed the attepmt to remove the job from the remote cluster will be made unless the .B --keep option was specified. Depending on functionality of service job killing procedure may take time and it may be impossible to clean job immediately. In that case .B arckill will report number of cleaned jobs smaller than processed ones. Cleaning of leftover jobs may be performed by running .B arcclean later. [FILES] .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .TP .B ~/.arc/jobs.dat This a local database of the user's active jobs. When a job is successfully submitted it is added to database and when it is removed from the remote CE it is removed from this list. This database is used as the list of all active jobs when the user specifies the .B --all option to the various NorduGrid ARC user interface commands. By using the .B --joblist option a different file can be used than the default. [COPYRIGHT] APACHE LICENSE Version 2.0 [AUTHOR] ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org [SEE ALSO] .BR arccat (1), .BR arcclean (1), .BR arccp (1), .BR arcget (1), .BR arcinfo (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcrenew (1), .BR arcresume (1), .BR arcrm (1), .BR arcstat (1), .BR arcsub (1), .BR arcsync (1), .BR arctest (1) nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/arcsub.cpp0000644000000000000000000000013115067751327021241 xustar0030 mtime=1759498967.677490794 30 atime=1759498967.824493028 29 ctime=1759499031.22618704 nordugrid-arc-7.1.1/src/clients/compute/arcsub.cpp0000644000175000002070000001706215067751327023152 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "utils.h" #include "submit.h" static Arc::Logger logger(Arc::Logger::getRootLogger(), "arcsub"); int RUNMAIN(arcsub)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_SUB, istring("[filename ...]"), istring("The arcsub command is used for " "submitting jobs to Grid enabled " "computing\nresources.")); std::list params = opt.Parse(argc, argv); if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arcsub", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); Arc::UserConfig usercfg(opt.conffile, opt.joblist); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (opt.force_system_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(false); } if (opt.force_grid_ca) { usercfg.CAUseSystem(false); usercfg.CAUseGrid(true); } if (opt.force_any_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(true); } if (opt.show_plugins) { std::list types; types.push_back("HED:SubmitterPlugin"); types.push_back("HED:ServiceEndpointRetrieverPlugin"); types.push_back("HED:TargetInformationRetrieverPlugin"); types.push_back("HED:JobDescriptionParserPlugin"); types.push_back("HED:BrokerPlugin"); showplugins("arcsub", types, logger, usercfg.Broker().first); return 0; } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); if (opt.timeout > 0) usercfg.Timeout(opt.timeout); if (!opt.broker.empty()) usercfg.Broker(opt.broker); opt.jobdescriptionfiles.insert(opt.jobdescriptionfiles.end(), params.begin(), params.end()); if (opt.jobdescriptionfiles.empty() && opt.jobdescriptionstrings.empty()) { logger.msg(Arc::ERROR, "No job description input specified"); return 1; } std::list jobdescriptionlist; // Loop over input job description files for (std::list::iterator it = opt.jobdescriptionfiles.begin(); it != opt.jobdescriptionfiles.end(); ++it) { std::ifstream descriptionfile(it->c_str()); if (!descriptionfile) { logger.msg(Arc::ERROR, "Can not open job description file: %s", *it); return 1; } descriptionfile.seekg(0, std::ios::end); std::streamsize length = descriptionfile.tellg(); descriptionfile.seekg(0, std::ios::beg); char *buffer = new char[length + 1]; descriptionfile.read(buffer, length); descriptionfile.close(); buffer[length] = '\0'; std::list jobdescs; Arc::JobDescriptionResult parseres = Arc::JobDescription::Parse((std::string)buffer, jobdescs); if (parseres) { for (std::list::iterator itJ = jobdescs.begin(); itJ != jobdescs.end(); ++itJ) { itJ->Application.DryRun = opt.dryrun; for (std::list::iterator itJAlt = itJ->GetAlternatives().begin(); itJAlt != itJ->GetAlternatives().end(); ++itJAlt) { itJAlt->Application.DryRun = opt.dryrun; } } jobdescriptionlist.insert(jobdescriptionlist.end(), jobdescs.begin(), jobdescs.end()); } else { logger.msg(Arc::ERROR, "Invalid JobDescription:"); std::cout << buffer << std::endl; delete[] buffer; std::cerr << parseres.str() << std::endl; return 1; } delete[] buffer; } //Loop over job description input strings for (std::list::iterator it = opt.jobdescriptionstrings.begin(); it != opt.jobdescriptionstrings.end(); ++it) { std::list jobdescs; Arc::JobDescriptionResult parseres = Arc::JobDescription::Parse(*it, jobdescs); if (parseres) { for (std::list::iterator itJ = jobdescs.begin(); itJ != jobdescs.end(); ++itJ) { itJ->Application.DryRun = opt.dryrun; for (std::list::iterator itJAlt = itJ->GetAlternatives().begin(); itJAlt != itJ->GetAlternatives().end(); ++itJAlt) { itJAlt->Application.DryRun = opt.dryrun; } } jobdescriptionlist.insert(jobdescriptionlist.end(), jobdescs.begin(), jobdescs.end()); } else { logger.msg(Arc::ERROR, "Invalid JobDescription:"); std::cout << *it << std::endl; std::cerr << parseres.str() << std::endl; return 1; } } DelegationType delegation_type = UndefinedDelegation; if(!opt.getDelegationType(logger, usercfg, delegation_type)) return 1; AuthenticationType authentication_type = UndefinedAuthentication; if(!opt.getAuthenticationType(logger, usercfg, authentication_type)) return 1; switch(authentication_type) { case NoAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeNone); break; case X509Authentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeCert); break; case TokenAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeToken); break; case UndefinedAuthentication: default: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeUndefined); break; } // canonicalize endpoint types if (!opt.canonicalizeARCInterfaceTypes(logger)) return 1; // get endpoint batches std::list > endpoint_batches; bool info_discovery = prepare_submission_endpoint_batches(usercfg, opt, endpoint_batches); // add rejectdiscovery if defined if (!opt.rejectdiscovery.empty()) usercfg.AddRejectDiscoveryURLs(opt.rejectdiscovery); // action: dumpjobdescription if (opt.dumpdescription) { if (!info_discovery) { logger.msg(Arc::ERROR,"Cannot adapt job description to the submission target when information discovery is turned off"); return 1; } // dump description only for priority submission interface, no fallbacks std::list services = endpoint_batches.front(); std::string req_sub_iface; if (!opt.submit_types.empty()) req_sub_iface = opt.submit_types.front(); return dumpjobdescription(usercfg, jobdescriptionlist, services, req_sub_iface); } // default action: start submission cycle return submit_jobs(usercfg, endpoint_batches, info_discovery, opt.jobidoutfile, jobdescriptionlist, delegation_type, opt.instances_min, opt.instances_max); } nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/arccat.cpp0000644000000000000000000000013215067751327021220 xustar0030 mtime=1759498967.676867254 30 atime=1759498967.824493028 30 ctime=1759499031.213456226 nordugrid-arc-7.1.1/src/clients/compute/arccat.cpp0000644000175000002070000002202115067751327023117 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include "utils.h" int RUNMAIN(arccat)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arccat"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_CAT, istring("[job ...]"), istring("The arccat command performs the cat " "command on the stdout, stderr or grid\n" "manager's error log of the job.")); std::list jobidentifiers = opt.Parse(argc, argv); if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arccat", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); if (opt.show_plugins) { std::list types; types.push_back("HED:JobControllerPlugin"); showplugins("arccat", types, logger); return 0; } Arc::UserConfig usercfg(opt.conffile, opt.joblist); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (opt.force_system_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(false); } if (opt.force_grid_ca) { usercfg.CAUseSystem(false); usercfg.CAUseGrid(true); } if (opt.force_any_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(true); } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); for (std::list::const_iterator it = opt.jobidinfiles.begin(); it != opt.jobidinfiles.end(); it++) { if (!Arc::Job::ReadJobIDsFromFile(*it, jobidentifiers)) { logger.msg(Arc::WARNING, "Cannot read specified jobid file: %s", *it); } } if (opt.timeout > 0) usercfg.Timeout(opt.timeout); AuthenticationType authentication_type = UndefinedAuthentication; if(!opt.getAuthenticationType(logger, usercfg, authentication_type)) return 1; switch(authentication_type) { case NoAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeNone); break; case X509Authentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeCert); break; case TokenAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeToken); break; case UndefinedAuthentication: default: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeUndefined); break; } if ((!opt.joblist.empty() || !opt.status.empty()) && jobidentifiers.empty() && opt.computing_elements.empty()) opt.all = true; if (jobidentifiers.empty() && opt.computing_elements.empty() && !opt.all) { logger.msg(Arc::ERROR, "No jobs given"); return 1; } std::list selectedURLs; if (!opt.computing_elements.empty()) { selectedURLs = getSelectedURLsFromUserConfigAndCommandLine(usercfg, opt.computing_elements); } std::list rejectManagementURLs = getRejectManagementURLsFromUserConfigAndCommandLine(usercfg, opt.rejectmanagement); std::list jobs; Arc::JobInformationStorage *jobstore = createJobInformationStorage(usercfg); if (jobstore != NULL && !jobstore->IsStorageExisting()) { logger.msg(Arc::ERROR, "Job list file (%s) doesn't exist", usercfg.JobListFile()); delete jobstore; return 1; } if (jobstore == NULL || ( opt.all && !jobstore->ReadAll(jobs, rejectManagementURLs)) || (!opt.all && !jobstore->Read(jobs, jobidentifiers, selectedURLs, rejectManagementURLs))) { logger.msg(Arc::ERROR, "Unable to read job information from file (%s)", usercfg.JobListFile()); delete jobstore; return 1; } delete jobstore; if (!opt.all) { for (std::list::const_iterator itJIDAndName = jobidentifiers.begin(); itJIDAndName != jobidentifiers.end(); ++itJIDAndName) { std::cout << Arc::IString("Warning: Job not found in job list: %s", *itJIDAndName) << std::endl; } } Arc::JobSupervisor jobmaster(usercfg, jobs); jobmaster.Update(); jobmaster.SelectValid(); if (!opt.status.empty()) { jobmaster.SelectByStatus(opt.status); } jobs = jobmaster.GetSelectedJobs(); if (jobs.empty()) { std::cout << Arc::IString("No jobs") << std::endl; return 1; } std::string resourceName; if (opt.show_joblog) { resourceName = "joblog"; } else if (opt.show_stderr) { resourceName = "stderr"; } else if (!opt.show_file.empty()) { resourceName = "session file"; } else { resourceName = "stdout"; } // saving to a temp file is necessary because chunks from server // may arrive out of order std::string filename = Glib::build_filename(Glib::get_tmp_dir(), "arccat.XXXXXX"); int tmp_h = Glib::mkstemp(filename); if (tmp_h == -1) { logger.msg(Arc::INFO, "Could not create temporary file \"%s\"", filename); logger.msg(Arc::ERROR, "Cannot create output of %s for any jobs", resourceName); return 1; } Arc::URL dst("stdio:///"+Arc::tostring(tmp_h)); if (!dst) { logger.msg(Arc::ERROR, "Cannot create output of %s for any jobs", resourceName); logger.msg(Arc::INFO, "Invalid destination URL %s", dst.str()); close(tmp_h); unlink(filename.c_str()); return 1; } Arc::URL stdoutdst("stdio:///stdout"); int retval = 0; for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { if (!it->State || (!opt.status.empty() && std::find(opt.status.begin(), opt.status.end(), it->State()) == opt.status.end() && std::find(opt.status.begin(), opt.status.end(), it->State.GetGeneralState()) == opt.status.end())) { continue; } if (it->State == Arc::JobState::DELETED) { logger.msg(Arc::WARNING, "Job deleted: %s", it->JobID); retval = 1; continue; } // The job-log might be available before the job has started (middleware dependent). if (!opt.show_joblog && !it->State.IsFinished() && it->State != Arc::JobState::RUNNING && it->State != Arc::JobState::FINISHING) { logger.msg(Arc::WARNING, "Job has not started yet: %s", it->JobID); retval = 1; continue; } //if ((opt.show_joblog && it->LogDir.empty()) || // (!opt.show_joblog && opt.show_stderr && it->StdErr.empty()) || // (!opt.show_joblog && !opt.show_stderr && it->StdOut.empty())) { // logger.msg(Arc::ERROR, "Cannot determine the %s location: %s", resourceName, it->JobID); // retval = 1; // continue; //} Arc::Job::ResourceType resource; if (opt.show_joblog) { resource = Arc::Job::JOBLOG; } else if (opt.show_stderr) { resource = Arc::Job::STDERR; } else if (!opt.show_file.empty()) { switch((Arc::JobState::StateType)it->State) { case Arc::JobState::ACCEPTED: case Arc::JobState::PREPARING: case Arc::JobState::SUBMITTING: case Arc::JobState::HOLD: resource = Arc::Job::STAGEINDIR; break; case Arc::JobState::QUEUING: case Arc::JobState::RUNNING: case Arc::JobState::OTHER: default: resource = Arc::Job::SESSIONDIR; break; case Arc::JobState::FINISHING: case Arc::JobState::FINISHED: case Arc::JobState::FAILED: case Arc::JobState::KILLED: resource = Arc::Job::STAGEOUTDIR; break; } } else { resource = Arc::Job::STDOUT; } Arc::URL src; if(!it->GetURLToResource(resource, src)) { logger.msg(Arc::ERROR, "Cannot determine the %s location: %s", resourceName, it->JobID); retval = 1; continue; } if (!src) { logger.msg(Arc::ERROR, "Cannot create output of %s for job (%s): Invalid source %s", resourceName, it->JobID, src.str()); retval = 1; continue; } if (!opt.show_file.empty()) { src.ChangePath(src.Path()+"/"+opt.show_file); } if (!it->CopyJobFile(usercfg, src, dst, true)) { retval = 1; continue; } logger.msg(Arc::VERBOSE, "Catting %s for job %s", resourceName, it->JobID); // Use File DMC in order to handle proper writing to stdout (e.g. supporting redirection and piping from shell). if (!it->CopyJobFile(usercfg, dst, stdoutdst, true)) { retval = 1; continue; } } close(tmp_h); unlink(filename.c_str()); return retval; } nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/arcsync.1.in0000644000000000000000000000013015067751327021406 xustar0030 mtime=1759498967.677490794 30 atime=1759498967.824493028 28 ctime=1759499031.2411534 nordugrid-arc-7.1.1/src/clients/compute/arcsync.1.in0000644000175000002070000000400215067751327023306 0ustar00mockbuildmock00000000000000[NAME] arcsync \- ARC Synchronize [EXTENDED DESCRIPTION] The ARC CLI keeps a local database of active jobs in the users home directory (see \fBarcsub\fR(1)). If this file is lost, or the user wants to recreate the file on a different computer, the \fBarcsync\fR command can be used to recreate the file from the information available at the specified computing element or registry. Since the information about a job retrieved from a CE can be slightly out of date if the user very recently submitted or removed a job a warning is issued when this command is run. The \fB--force\fR option disables this warning. If the joblist is not empty when invoking synchronization the old jobs will be merged with the new jobs, unless the .B --truncate option is given, in which case the joblist will first be cleaned of old jobs and then the new jobs will be added. [FILES] .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .TP .B ~/.arc/jobs.dat This a local database of the user's active jobs. When a job is successfully submitted it is added to database and when it is removed from the remote CE it is removed from this list. This database is used as the list of all active jobs when the user specifies the .B --all option to the various NorduGrid ARC user interface commands. By using the .B --joblist option a different file can be used than the default. [COPYRIGHT] APACHE LICENSE Version 2.0 [AUTHOR] ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org [SEE ALSO] .BR arccat (1), .BR arcclean (1), .BR arccp (1), .BR arcget (1), .BR arcinfo (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcrenew (1), .BR arcresume (1), .BR arcrm (1), .BR arcstat (1), .BR arcsub (1), .BR arctest (1) nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/arcresume.cpp0000644000000000000000000000013215067751327021751 xustar0030 mtime=1759498967.677490794 30 atime=1759498967.824493028 30 ctime=1759499031.223565136 nordugrid-arc-7.1.1/src/clients/compute/arcresume.cpp0000644000175000002070000001165415067751327023662 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include "utils.h" int RUNMAIN(arcresume)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arcresume"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_RESUME, istring("[job ...]")); std::list jobidentifiers = opt.Parse(argc, argv); if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arcresume", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); Arc::UserConfig usercfg(opt.conffile, opt.joblist); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (opt.force_system_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(false); } if (opt.force_grid_ca) { usercfg.CAUseSystem(false); usercfg.CAUseGrid(true); } if (opt.force_any_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(true); } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); if (opt.show_plugins) { std::list types; types.push_back("HED:JobControllerPlugin"); showplugins("arcresume", types, logger); return 0; } for (std::list::const_iterator it = opt.jobidinfiles.begin(); it != opt.jobidinfiles.end(); ++it) { if (!Arc::Job::ReadJobIDsFromFile(*it, jobidentifiers)) { logger.msg(Arc::WARNING, "Cannot read specified jobid file: %s", *it); } } if (opt.timeout > 0) usercfg.Timeout(opt.timeout); AuthenticationType authentication_type = UndefinedAuthentication; if(!opt.getAuthenticationType(logger, usercfg, authentication_type)) return 1; switch(authentication_type) { case NoAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeNone); break; case X509Authentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeCert); break; case TokenAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeToken); break; case UndefinedAuthentication: default: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeUndefined); break; } if ((!opt.joblist.empty() || !opt.status.empty()) && jobidentifiers.empty() && opt.computing_elements.empty()) opt.all = true; if (jobidentifiers.empty() && opt.computing_elements.empty() && !opt.all) { logger.msg(Arc::ERROR, "No jobs given"); return 1; } std::list selectedURLs; if (!opt.computing_elements.empty()) { selectedURLs = getSelectedURLsFromUserConfigAndCommandLine(usercfg, opt.computing_elements); } std::list rejectManagementURLs = getRejectDiscoveryURLsFromUserConfigAndCommandLine(usercfg, opt.rejectdiscovery); std::list jobs; Arc::JobInformationStorage *jobstore = createJobInformationStorage(usercfg); if (jobstore != NULL && !jobstore->IsStorageExisting()) { logger.msg(Arc::ERROR, "Job list file (%s) doesn't exist", usercfg.JobListFile()); delete jobstore; return 1; } if (jobstore == NULL || ( opt.all && !jobstore->ReadAll(jobs, rejectManagementURLs)) || (!opt.all && !jobstore->Read(jobs, jobidentifiers, selectedURLs, rejectManagementURLs))) { logger.msg(Arc::ERROR, "Unable to read job information from file (%s)", usercfg.JobListFile()); delete jobstore; return 1; } delete jobstore; if (!opt.all) { for (std::list::const_iterator itJIDAndName = jobidentifiers.begin(); itJIDAndName != jobidentifiers.end(); ++itJIDAndName) { std::cout << Arc::IString("Warning: Job not found in job list: %s", *itJIDAndName) << std::endl; } } Arc::JobSupervisor jobmaster(usercfg, jobs); jobmaster.Update(); jobmaster.SelectValid(); if (!opt.status.empty()) { jobmaster.SelectByStatus(opt.status); } if (jobmaster.GetSelectedJobs().empty()) { std::cout << Arc::IString("No jobs") << std::endl; return 1; } int retval = (int)!jobmaster.Resume(); std::cout << Arc::IString("Jobs processed: %d, resumed: %d", jobmaster.GetIDsProcessed().size()+jobmaster.GetIDsNotProcessed().size(), jobmaster.GetIDsProcessed().size()) << std::endl; return retval; } nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/arcstat.cpp0000644000000000000000000000013215067751327021424 xustar0030 mtime=1759498967.677490794 30 atime=1759498967.824493028 30 ctime=1759499031.224835212 nordugrid-arc-7.1.1/src/clients/compute/arcstat.cpp0000644000175000002070000001656415067751327023342 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include "utils.h" int RUNMAIN(arcstat)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arcstat"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_STAT, istring("[job ...]"), istring("The arcstat command is used for " "obtaining the status of jobs that have\n" "been submitted to Grid enabled resources.")); std::list jobidentifiers = opt.Parse(argc, argv); if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arcstat", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); if (opt.show_plugins) { std::list types; types.push_back("HED:JobControllerPlugin"); showplugins("arcstat", types, logger); return 0; } Arc::UserConfig usercfg(opt.conffile, opt.joblist); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (opt.force_system_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(false); } if (opt.force_grid_ca) { usercfg.CAUseSystem(false); usercfg.CAUseGrid(true); } if (opt.force_any_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(true); } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); for (std::list::const_iterator it = opt.jobidinfiles.begin(); it != opt.jobidinfiles.end(); ++it) { if (!Arc::Job::ReadJobIDsFromFile(*it, jobidentifiers)) { logger.msg(Arc::WARNING, "Cannot read specified jobid file: %s", *it); } } if (opt.timeout > 0) usercfg.Timeout(opt.timeout); AuthenticationType authentication_type = UndefinedAuthentication; if(!opt.getAuthenticationType(logger, usercfg, authentication_type)) return 1; switch(authentication_type) { case NoAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeNone); break; case X509Authentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeCert); break; case TokenAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeToken); break; case UndefinedAuthentication: default: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeUndefined); break; } if (!opt.sort.empty() && !opt.rsort.empty()) { logger.msg(Arc::ERROR, "The 'sort' and 'rsort' flags cannot be specified at the same time."); return 1; } if (!opt.rsort.empty()) { opt.sort = opt.rsort; } typedef bool (*JobSorting)(const Arc::Job&, const Arc::Job&); std::map orderings; orderings["jobid"] = &Arc::Job::CompareJobID; orderings["submissiontime"] = &Arc::Job::CompareSubmissionTime; orderings["jobname"] = &Arc::Job::CompareJobName; if (!opt.sort.empty() && orderings.find(opt.sort) == orderings.end()) { std::cerr << "Jobs cannot be sorted by \"" << opt.sort << "\", the following orderings are supported:" << std::endl; for (std::map::const_iterator it = orderings.begin(); it != orderings.end(); ++it) std::cerr << it->first << std::endl; return 1; } if ((!opt.joblist.empty() || !opt.status.empty()) && jobidentifiers.empty() && opt.computing_elements.empty()) opt.all = true; if (jobidentifiers.empty() && opt.computing_elements.empty() && !opt.all) { logger.msg(Arc::ERROR, "No jobs given"); return 1; } std::list selectedURLs; if (!opt.computing_elements.empty()) { selectedURLs = getSelectedURLsFromUserConfigAndCommandLine(usercfg, opt.computing_elements); } std::list rejectManagementURLs = getRejectManagementURLsFromUserConfigAndCommandLine(usercfg, opt.rejectmanagement); std::list jobs; Arc::JobInformationStorage *jobstore = createJobInformationStorage(usercfg); if (jobstore != NULL && !jobstore->IsStorageExisting()) { logger.msg(Arc::ERROR, "Job list file (%s) doesn't exist", usercfg.JobListFile()); delete jobstore; return 1; } if (jobstore == NULL || ( opt.all && !jobstore->ReadAll(jobs, rejectManagementURLs)) || (!opt.all && !jobstore->Read(jobs, jobidentifiers, selectedURLs, rejectManagementURLs))) { logger.msg(Arc::ERROR, "Unable to read job information from file (%s)", usercfg.JobListFile()); delete jobstore; return 1; } delete jobstore; if (!opt.all) { for (std::list::const_iterator itJIDAndName = jobidentifiers.begin(); itJIDAndName != jobidentifiers.end(); ++itJIDAndName) { std::cout << Arc::IString("Warning: Job not found in job list: %s", *itJIDAndName) << std::endl; } } Arc::JobSupervisor jobmaster(usercfg, jobs); jobmaster.Update(); unsigned int queried_num = jobmaster.GetAllJobs().size(); if (!opt.status.empty()) { jobmaster.SelectByStatus(opt.status); } if (!opt.show_unavailable) { jobmaster.SelectValid(); } jobs = jobmaster.GetSelectedJobs(); if (queried_num == 0) { std::cout << Arc::IString("No jobs found, try later") << std::endl; return 1; } std::vector jobsSortable(jobs.begin(), jobs.end()); if (!opt.sort.empty()) { opt.rsort.empty() ? std::sort(jobsSortable.begin(), jobsSortable.end(), orderings[opt.sort]) : std::sort(jobsSortable.rbegin(), jobsSortable.rend(), orderings[opt.sort]); } if (!opt.show_json) { for (std::vector::const_iterator it = jobsSortable.begin(); it != jobsSortable.end(); ++it) { // Option 'long' (longlist) takes precedence over option 'print-jobids' (printids) if (opt.longlist || !opt.printids) { it->SaveToStream(std::cout, opt.longlist); } else { std::cout << it->JobID << std::endl; } } } else { std::cout << "{\"jobs\": ["; for (std::vector::const_iterator it = jobsSortable.begin(); it != jobsSortable.end(); ++it) { std::cout << (it==jobsSortable.begin()?"":",") << std::endl; if (opt.longlist || !opt.printids) { it->SaveToStreamJSON(std::cout, opt.longlist); } else { std::cout << "\"" << it->JobID << "\""; } } std::cout << std::endl; std::cout << "]}" << std::endl; } if (opt.show_unavailable) { jobmaster.SelectValid(); } unsigned int returned_info_num = jobmaster.GetSelectedJobs().size(); if (!opt.show_json) { std::cout << Arc::IString("Status of %d jobs was queried, %d jobs returned information", queried_num, returned_info_num) << std::endl; } return 0; } nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/arcget.cpp0000644000000000000000000000013215067751327021230 xustar0030 mtime=1759498967.676867254 30 atime=1759498967.824493028 30 ctime=1759499031.218764773 nordugrid-arc-7.1.1/src/clients/compute/arcget.cpp0000644000175000002070000001657215067751327023145 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include "utils.h" int RUNMAIN(arcget)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arcget"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_GET, istring("[job ...]"), istring("The arcget command is used for " "retrieving the results from a job.")); std::list jobidentifiers = opt.Parse(argc, argv); if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arcget", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); if (opt.show_plugins) { std::list types; types.push_back("HED:JobControllerPlugin"); showplugins("arcget", types, logger); return 0; } Arc::UserConfig usercfg(opt.conffile, opt.joblist); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (opt.force_system_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(false); } if (opt.force_grid_ca) { usercfg.CAUseSystem(false); usercfg.CAUseGrid(true); } if (opt.force_any_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(true); } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); if (opt.downloaddir.empty()) { if (!usercfg.JobDownloadDirectory().empty()) { opt.downloaddir = usercfg.JobDownloadDirectory(); logger.msg(Arc::INFO, "Job download directory from user configuration file: %s", opt.downloaddir); } else { logger.msg(Arc::INFO, "Job download directory will be created in present working directory."); } } else { logger.msg(Arc::INFO, "Job download directory: %s", opt.downloaddir); } for (std::list::const_iterator it = opt.jobidinfiles.begin(); it != opt.jobidinfiles.end(); ++it) { if (!Arc::Job::ReadJobIDsFromFile(*it, jobidentifiers)) { logger.msg(Arc::WARNING, "Cannot read specified jobid file: %s", *it); } } if (opt.timeout > 0) usercfg.Timeout(opt.timeout); AuthenticationType authentication_type = UndefinedAuthentication; if(!opt.getAuthenticationType(logger, usercfg, authentication_type)) return 1; switch(authentication_type) { case NoAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeNone); break; case X509Authentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeCert); break; case TokenAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeToken); break; case UndefinedAuthentication: default: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeUndefined); break; } if ((!opt.joblist.empty() || !opt.status.empty()) && jobidentifiers.empty() && opt.computing_elements.empty()) opt.all = true; if (jobidentifiers.empty() && opt.computing_elements.empty() && !opt.all) { logger.msg(Arc::ERROR, "No jobs given"); return 1; } std::list selectedURLs; if (!opt.computing_elements.empty()) { selectedURLs = getSelectedURLsFromUserConfigAndCommandLine(usercfg, opt.computing_elements); } std::list rejectManagementURLs = getRejectManagementURLsFromUserConfigAndCommandLine(usercfg, opt.rejectmanagement); std::list jobs; Arc::JobInformationStorage *jobstore = createJobInformationStorage(usercfg); if (jobstore != NULL && !jobstore->IsStorageExisting()) { logger.msg(Arc::ERROR, "Job list file (%s) doesn't exist", usercfg.JobListFile()); delete jobstore; return 1; } if (jobstore == NULL || ( opt.all && !jobstore->ReadAll(jobs, rejectManagementURLs)) || (!opt.all && !jobstore->Read(jobs, jobidentifiers, selectedURLs, rejectManagementURLs))) { logger.msg(Arc::ERROR, "Unable to read job information from file (%s)", usercfg.JobListFile()); delete jobstore; return 1; } if (!opt.all) { for (std::list::const_iterator itJIdentifier = jobidentifiers.begin(); itJIdentifier != jobidentifiers.end(); ++itJIdentifier) { std::cout << Arc::IString("Warning: Job not found in job list: %s", *itJIdentifier) << std::endl; } } Arc::JobSupervisor jobmaster(usercfg, jobs); jobmaster.Update(); jobmaster.SelectValid(); if (!opt.status.empty()) { jobmaster.SelectByStatus(opt.status); } if (jobmaster.GetSelectedJobs().empty()) { std::cout << Arc::IString("No jobs") << std::endl; delete jobstore; return 1; } if(!opt.downloaddir.empty()) { Arc::URL dirpath(opt.downloaddir); if(dirpath.Protocol() == "file") { if(!Arc::DirCreate(dirpath.Path(),S_IRWXU,true)) { std::string errstr = Arc::StrError(); logger.msg(Arc::ERROR, "Unable to create directory for storing results (%s) - %s", dirpath.Path(), errstr); return 1; } } } std::list downloaddirectories; int retval = (int)!jobmaster.Retrieve(opt.downloaddir, opt.usejobname, opt.forcedownload, downloaddirectories); for (std::list::const_iterator it = downloaddirectories.begin(); it != downloaddirectories.end(); ++it) { std::cout << Arc::IString("Results stored at: %s", *it) << std::endl; } unsigned int processed_num = jobmaster.GetIDsProcessed().size(); unsigned int retrieved_num = downloaddirectories.size(); unsigned int cleaned_num = 0; if (!opt.keep) { std::list retrieved = jobmaster.GetIDsProcessed(); // No need to clean selection because retrieved is subset of selected jobmaster.SelectByID(retrieved); if(!jobmaster.Clean()) { std::cout << Arc::IString("Warning: Some jobs were not removed from server") << std::endl; std::cout << Arc::IString(" Use arcclean to remove retrieved jobs from job list", usercfg.JobListFile()) << std::endl; retval = 1; } cleaned_num = jobmaster.GetIDsProcessed().size(); if (!jobstore->Remove(jobmaster.GetIDsProcessed())) { std::cout << Arc::IString("Warning: Failed removing jobs from file (%s)", usercfg.JobListFile()) << std::endl; std::cout << Arc::IString(" Use arcclean to remove retrieved jobs from job list", usercfg.JobListFile()) << std::endl; retval = 1; } std::cout << Arc::IString("Jobs processed: %d, successfully retrieved: %d, successfully cleaned: %d", processed_num, retrieved_num, cleaned_num) << std::endl; } else { std::cout << Arc::IString("Jobs processed: %d, successfully retrieved: %d", processed_num, retrieved_num) << std::endl; } delete jobstore; return retval; } nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/submit.cpp0000644000000000000000000000013215067751327021266 xustar0030 mtime=1759498967.677490794 30 atime=1759498967.825493043 30 ctime=1759499031.227497041 nordugrid-arc-7.1.1/src/clients/compute/submit.cpp0000644000175000002070000004340215067751327023173 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "submit.h" static Arc::Logger logger(Arc::Logger::getRootLogger(), "submit"); void HandleSubmittedJobs::addEntity(const Arc::Job& j) { std::cout << Arc::IString("Job submitted with jobid: %s", j.JobID) << std::endl; submittedJobs.push_back(j); } void HandleSubmittedJobs::write() const { if (!jobidfile.empty() && !Arc::Job::WriteJobIDsToFile(submittedJobs, jobidfile)) { logger.msg(Arc::WARNING, "Cannot write job IDs to file (%s)", jobidfile); } Arc::JobInformationStorage* jobStore = createJobInformationStorage(uc); if (jobStore == NULL || !jobStore->Write(submittedJobs)) { if (jobStore == NULL) { logger.msg(Arc::WARNING, "Unable to open job list file (%s), unknown format", uc.JobListFile()); } else { logger.msg(Arc::WARNING, "Failed to write job information to database (%s)", uc.JobListFile()); } logger.msg(Arc::WARNING, "To recover missing jobs, run arcsync"); } logger.msg(Arc::DEBUG, "Record about new job successfully added to the database (%s)", uc.JobListFile()); delete jobStore; } void HandleSubmittedJobs::printsummary(const std::list& originalDescriptions, const std::list& notsubmitted) const { if (originalDescriptions.size() > 1) { std::cout << std::endl << Arc::IString("Job submission summary:") << std::endl; std::cout << "-----------------------" << std::endl; std::cout << Arc::IString("%d of %d jobs were submitted", submittedJobs.size(), submittedJobs.size()+notsubmitted.size()) << std::endl; if (!notsubmitted.empty()) { std::cout << std::endl << Arc::IString("The following jobs were not submitted:") << std::endl; int jobnr = 1; for (std::list::const_iterator it = notsubmitted.begin(); it != notsubmitted.end(); ++it) { std::cout << " * " << Arc::IString("Job nr.") << " " << jobnr << ":" << std::endl; (*it)->SaveToStream(std::cout, "userlong"); jobnr++; } } } } int process_submission_status(Arc::SubmissionStatus status, const Arc::UserConfig& usercfg) { if (status.isSet(Arc::SubmissionStatus::BROKER_PLUGIN_NOT_LOADED)) { std::cerr << Arc::IString("ERROR: Unable to load broker %s", usercfg.Broker().first) << std::endl; return 2; } if (status.isSet(Arc::SubmissionStatus::NO_SERVICES)) { std::cerr << Arc::IString("ERROR: Job submission aborted because no resource returned any information") << std::endl; return 2; } if (status.isSet(Arc::SubmissionStatus::DESCRIPTION_NOT_SUBMITTED)) { std::cerr << Arc::IString("ERROR: One or multiple job descriptions was not submitted.") << std::endl; return 1; } return 0; } void check_missing_plugins(Arc::Submitter s, int is_error) { bool gridFTPJobPluginFailed = false; for (std::map::const_iterator it = s.GetEndpointSubmissionStatuses().begin(); it != s.GetEndpointSubmissionStatuses().end(); ++it) { if (it->first.InterfaceName == "org.nordugrid.gridftpjob" && it->second == Arc::EndpointSubmissionStatus::NOPLUGIN) { gridFTPJobPluginFailed = true; } } if (gridFTPJobPluginFailed) { Arc::LogLevel level = (is_error ? Arc::ERROR : Arc::INFO); std::string indent = (is_error ? " " : " "); logger.msg(level, "A computing resource using the GridFTP interface was requested, but\n" "%sthe corresponding plugin could not be loaded. Is the plugin installed?\n" "%sIf not, please install the package 'nordugrid-arc-plugins-globus'.\n" "%sDepending on your type of installation the package name might differ.", indent, indent, indent); } // TODO: What to do when failing to load other plugins. } int dumpjobdescription(const Arc::UserConfig& usercfg, const std::list& jobdescriptionlist, const std::list& services, const std::string& requestedSubmissionInterface) { int retval = 0; std::set preferredInterfaceNames; if (usercfg.InfoInterface().empty()) { preferredInterfaceNames.insert("org.nordugrid.ldapglue2"); } else { preferredInterfaceNames.insert(usercfg.InfoInterface()); } Arc::ComputingServiceUniq csu; Arc::ComputingServiceRetriever csr(usercfg, std::list(), usercfg.RejectDiscoveryURLs(), preferredInterfaceNames); csr.addConsumer(csu); for (std::list::const_iterator it = services.begin(); it != services.end(); ++it) { csr.addEndpoint(*it); } csr.wait(); std::list CEs = csu.getServices(); if (CEs.empty()) { std::cout << Arc::IString("Unable to adapt job description to any resource, no resource information could be obtained.") << std::endl; std::cout << Arc::IString("Original job description is listed below:") << std::endl; for (std::list::const_iterator it = jobdescriptionlist.begin(); it != jobdescriptionlist.end(); ++it) { std::string descOutput; it->UnParse(descOutput, it->GetSourceLanguage()); std::cout << descOutput << std::endl; } return 1; } Arc::Broker broker(usercfg, usercfg.Broker().first); if (!broker.isValid(false)) { logger.msg(Arc::ERROR, "Dumping job description aborted: Unable to load broker %s", usercfg.Broker().first); return 1; } Arc::ExecutionTargetSorter ets(broker, CEs); std::list::const_iterator itJAlt; // Iterator to use for alternative job descriptions. for (std::list::const_iterator itJ = jobdescriptionlist.begin(); itJ != jobdescriptionlist.end(); ++itJ) { const Arc::JobDescription* currentJobDesc = &*itJ; bool descriptionDumped = false; do { Arc::JobDescription jobdescdump(*currentJobDesc); ets.set(jobdescdump); for (ets.reset(); !ets.endOfList(); ets.next()) { if(!requestedSubmissionInterface.empty() && ets->ComputingEndpoint->InterfaceName != requestedSubmissionInterface) continue; if (!jobdescdump.Prepare(*ets)) { logger.msg(Arc::INFO, "Unable to prepare job description according to needs of the target resource (%s).", ets->ComputingEndpoint->URLString); continue; } std::string jobdesclang = "emies:adl"; if (ets->ComputingEndpoint->InterfaceName == "org.nordugrid.gridftpjob") { jobdesclang = "nordugrid:xrsl"; } else if (ets->ComputingEndpoint->InterfaceName == "org.ogf.glue.emies.activitycreation") { jobdesclang = "emies:adl"; } else if (ets->ComputingEndpoint->InterfaceName == "org.nordugrid.internal") { jobdesclang = "emies:adl"; } std::string jobdesc; if (!jobdescdump.UnParse(jobdesc, jobdesclang)) { logger.msg(Arc::INFO, "An error occurred during the generation of job description to be sent to %s", ets->ComputingEndpoint->URLString); continue; } std::cout << Arc::IString("Job description to be sent to %s:", ets->AdminDomain->Name) << std::endl; std::cout << jobdesc << std::endl; descriptionDumped = true; break; } if (!descriptionDumped && itJ->HasAlternatives()) { // Alternative job descriptions. if (currentJobDesc == &*itJ) { itJAlt = itJ->GetAlternatives().begin(); } else { ++itJAlt; } currentJobDesc = &*itJAlt; } } while (!descriptionDumped && itJ->HasAlternatives() && itJAlt != itJ->GetAlternatives().end()); if (ets.endOfList()) { std::cout << Arc::IString("Unable to prepare job description according to needs of the target resource.") << std::endl; retval = 1; } } //end loop over all job descriptions return retval; } bool prepare_submission_endpoint_batches(const Arc::UserConfig& usercfg, const ClientOptions& opt, std::list >& endpoint_batches) { bool info_discovery = true; // Computing element direct targets for (std::list::const_iterator it = opt.computing_elements.begin(); it != opt.computing_elements.end(); ++it) { if (opt.info_types.empty()) { std::list endpoints; // any interfaces can be used: start with discovery if (opt.submit_types.empty()) { Arc::Endpoint service(*it); service.Capability.insert(Arc::Endpoint::GetStringForCapability(Arc::Endpoint::COMPUTINGINFO)); service.RequestedSubmissionInterfaceName = ""; endpoints.push_back(service); } else { // discovery is disabled - submit directly in the defined interface order info_discovery = false; for (std::list::const_iterator sit = opt.submit_types.begin(); sit != opt.submit_types.end(); ++sit) { Arc::Endpoint service(*it); service.Capability.insert(Arc::Endpoint::GetStringForCapability(Arc::Endpoint::JOBSUBMIT)); service.Capability.insert(Arc::Endpoint::GetStringForCapability(Arc::Endpoint::JOBCREATION)); service.InterfaceName = *sit; endpoints.push_back(service); } } endpoint_batches.push_back(endpoints); // add infointerfaces of all defined types when discovery is used } else { for (std::list::const_iterator sit = opt.submit_types.begin(); sit != opt.submit_types.end(); ++sit) { std::list endpoints; for (std::list::const_iterator iit = opt.info_types.begin(); iit != opt.info_types.end(); ++iit) { Arc::Endpoint service(*it); service.Capability.insert(Arc::Endpoint::GetStringForCapability(Arc::Endpoint::COMPUTINGINFO)); service.InterfaceName = *iit; service.RequestedSubmissionInterfaceName = *sit; endpoints.push_back(service); } endpoint_batches.push_back(endpoints); } } } // Query the registries for available endpoints if (!opt.registries.empty()) { Arc::EntityContainer registry_endpoints; // Get all service endpoints regardless of capabilities std::list rejectDiscoveryURLs = getRejectDiscoveryURLsFromUserConfigAndCommandLine(usercfg, opt.rejectdiscovery); std::list capabilityFilter; Arc::ServiceEndpointRetriever ser(usercfg, Arc::EndpointQueryOptions( true, capabilityFilter, rejectDiscoveryURLs)); ser.addConsumer(registry_endpoints); for (std::list::const_iterator it = opt.registries.begin(); it != opt.registries.end(); ++it) { Arc::Endpoint registry(*it); registry.Capability.insert(Arc::Endpoint::GetStringForCapability(Arc::Endpoint::REGISTRY)); ser.addEndpoint(registry); } ser.wait(); // Loop over endpoints returned by registry and match against interface types if ( !opt.info_types.empty() ) { for (std::list::const_iterator sit = opt.submit_types.begin(); sit != opt.submit_types.end(); ++sit) { std::list endpoints; for (Arc::EntityContainer::iterator eit = registry_endpoints.begin(); eit != registry_endpoints.end(); ++eit) { for (std::list::const_iterator iit = opt.info_types.begin(); iit != opt.info_types.end(); ++iit) { if ( eit->InterfaceName == *iit ) { Arc::Endpoint service(*eit); logger.msg(Arc::INFO, "Service endpoint %s (type %s) added to the list for resource discovery", eit->URLString, eit->InterfaceName); service.RequestedSubmissionInterfaceName = *sit; endpoints.push_back(service); } } } if (!endpoints.empty()) { endpoint_batches.push_back(endpoints); } else { logger.msg(Arc::WARNING, "There are no endpoints in registry that match requested info endpoint type"); } } // endpoint types was not requested at all } else if ( opt.submit_types.empty() ) { // try all infodiscovery endpoints but prioritize the interfaces in the following order std::list info_priority; info_priority.push_back("org.ogf.glue.emies.resourceinfo"); info_priority.push_back("org.nordugrid.arcrest"); info_priority.push_back("org.nordugrid.ldapglue2"); info_priority.push_back("org.nordugrid.ldapng"); for (std::list::const_iterator iit = info_priority.begin(); iit != info_priority.end(); ++iit) { std::list endpoints; for (Arc::EntityContainer::iterator eit = registry_endpoints.begin(); eit != registry_endpoints.end(); ++eit) { if ( eit->InterfaceName == *iit ) { Arc::Endpoint service(*eit); service.RequestedSubmissionInterfaceName = ""; endpoints.push_back(service); logger.msg(Arc::INFO, "Service endpoint %s (type %s) added to the list for resource discovery", eit->URLString, eit->InterfaceName); } } if (!endpoints.empty()) endpoint_batches.push_back(endpoints); } // it was requested to disable infodiscovery for targets } else { info_discovery = false; std::list endpoints; for (std::list::const_iterator sit = opt.submit_types.begin(); sit != opt.submit_types.end(); ++sit) { for (Arc::EntityContainer::iterator eit = registry_endpoints.begin(); eit != registry_endpoints.end(); ++eit) { if ( eit->InterfaceName == *sit ) { Arc::Endpoint service(*eit); service.Capability.clear(); service.Capability.insert(Arc::Endpoint::GetStringForCapability(Arc::Endpoint::JOBSUBMIT)); service.Capability.insert(Arc::Endpoint::GetStringForCapability(Arc::Endpoint::JOBCREATION)); service.InterfaceName = *sit; endpoints.push_back(service); logger.msg(Arc::INFO, "Service endpoint %s (type %s) added to the list for direct submission", eit->URLString, eit->InterfaceName); } } } if (!endpoints.empty()) { endpoint_batches.push_back(endpoints); } else { logger.msg(Arc::WARNING, "There are no endpoints in registry that match requested submission endpoint type"); } } } return info_discovery; } int submit_jobs(const Arc::UserConfig& usercfg, const std::list >& endpoint_batches, bool info_discovery, const std::string& jobidfile, const std::list& jobdescriptionlist, DelegationType delegation_type, int instances_min, int instances_max) { HandleSubmittedJobs hsj(jobidfile, usercfg); Arc::Submitter submitter(usercfg); submitter.addConsumer(hsj); std::list w_jobdescriptionlist(jobdescriptionlist); int error_check = 0; for(std::list::iterator it = w_jobdescriptionlist.begin(); it != w_jobdescriptionlist.end(); ++it) { it->X509Delegation = (delegation_type == X509Delegation); it->TokenDelegation = (delegation_type == TokenDelegation); it->InstancesMin = instances_min; it->InstancesMax = instances_max; for(std::list::iterator itAlt = it->GetAlternatives().begin(); itAlt != it->GetAlternatives().end(); ++itAlt) { itAlt->X509Delegation = (delegation_type == X509Delegation); itAlt->TokenDelegation = (delegation_type == TokenDelegation); itAlt->InstancesMin = instances_min; itAlt->InstancesMax = instances_max; } } for (std::list >::const_iterator it = endpoint_batches.begin(); it != endpoint_batches.end(); ++it) { Arc::SubmissionStatus status; if (info_discovery) { status = submitter.BrokeredSubmit(*it, w_jobdescriptionlist); } else { status = submitter.Submit(*it, w_jobdescriptionlist); } hsj.write(); error_check = process_submission_status(status, usercfg); if (error_check == 2) return 1; if (submitter.GetDescriptionsNotSubmitted().empty()) break; if (status.isSet(Arc::SubmissionStatus::SUBMITTER_PLUGIN_NOT_LOADED)) check_missing_plugins(submitter, error_check); // remove already submitted jobs from description list std::list failedjd = submitter.GetDescriptionsNotSubmitted(); std::list::iterator itOrig = w_jobdescriptionlist.begin(); while ( itOrig != w_jobdescriptionlist.end() ) { bool is_failedjd = false; for (std::list::const_iterator itFailed = failedjd.begin(); itFailed != failedjd.end(); ++itFailed) { if (&(*itOrig) == *itFailed) { is_failedjd = true; break; } } if (is_failedjd) { ++itOrig; continue; } w_jobdescriptionlist.erase(itOrig++); } } hsj.printsummary(jobdescriptionlist, submitter.GetDescriptionsNotSubmitted()); return error_check; } nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/arcrenew.1.in0000644000000000000000000000013215067751327021554 xustar0030 mtime=1759498967.676867254 30 atime=1759498967.824493028 30 ctime=1759499031.244336725 nordugrid-arc-7.1.1/src/clients/compute/arcrenew.1.in0000644000175000002070000000410715067751327023460 0ustar00mockbuildmock00000000000000[NAME] arcrenew \- ARC Proxy Renewal [EXTENDED DESCRIPTION] The .B arcrenew command renews the delegated x509 proxy of a job submitted an ARC CEs. The job can be referred to either by the jobid that was returned by .BR arcsub (1) at submission time or by its jobname if the job description that was submitted contained a jobname attribute. More than one jobid and/or jobname can be given. If several jobs were submitted with the same jobname the proxies of all those jobs are renewed. If the .B --joblist option is used the list of jobs is read from a file with the specified filename. By specifying the .B --all option, the proxies of all active jobs will be renewed. The .B --computing-element option can be used to select or reject jobs at specific clusters. The .B --status option can be used to select jobs in a specific state. These options can be repeated several times. See .BR arcstat (1) for possible state values. [FILES] .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .TP .B ~/.arc/jobs.dat This a local database of the user's active jobs. When a job is successfully submitted it is added to database and when it is removed from the remote CE it is removed from this list. This database is used as the list of all active jobs when the user specifies the .B --all option to the various NorduGrid ARC user interface commands. By using the .B --joblist option a different file can be used than the default. [COPYRIGHT] APACHE LICENSE Version 2.0 [AUTHOR] ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org [SEE ALSO] .BR arccat (1), .BR arcclean (1), .BR arccp (1), .BR arcget (1), .BR arcinfo (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcresume (1), .BR arcrm (1), .BR arcstat (1), .BR arcsub (1), .BR arcsync (1), .BR arctest (1) nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/arctest.cpp0000644000000000000000000000013215067751327021430 xustar0030 mtime=1759498967.677490794 30 atime=1759498967.825493043 30 ctime=1759499031.231684684 nordugrid-arc-7.1.1/src/clients/compute/arctest.cpp0000644000175000002070000003231515067751327023336 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "utils.h" #include "submit.h" #include "glibmm-compat.h" static Arc::Logger logger(Arc::Logger::getRootLogger(), "arcsub"); int test(const Arc::UserConfig& usercfg, Arc::ExecutionTargetSorter& ets, const Arc::JobDescription& testJob, const std::string& jobidfile); int dumpjobdescription_arctest_legacy(const Arc::UserConfig& usercfg, Arc::ExecutionTargetSorter& ets, const Arc::JobDescription& testJob); static bool get_hash_value(const Arc::Credential& c, std::string& hash_str); int RUNMAIN(arctest)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_TEST, istring(" "), istring("The arctest command is used for " "testing clusters as resources.")); std::list params = opt.Parse(argc, argv); if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arctest", VERSION) << std::endl; return 0; } if ((opt.testjobid == -1) && (!opt.show_credentials) && (!opt.show_plugins)) { std::cout << Arc::IString("Nothing to do:\n" "you have to either specify a test job id with -J (--job)\n" "or query information about the certificates with -E (--certificate)\n"); return 0; } if ((opt.testjobid == 1) && (!opt.runtime)) { std::cout << Arc::IString("For the 1st test job " "you also have to specify a runtime value with -r (--runtime) option."); return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); Arc::UserConfig usercfg(opt.conffile, opt.joblist); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (opt.force_system_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(false); } if (opt.force_grid_ca) { usercfg.CAUseSystem(false); usercfg.CAUseGrid(true); } if (opt.force_any_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(true); } if (opt.allow_insecure_connection) usercfg.TLSAllowInsecure(true); if (opt.show_plugins) { std::list types; types.push_back("HED:SubmitterPlugin"); types.push_back("HED:ServiceEndpointRetrieverPlugin"); types.push_back("HED:TargetInformationRetrieverPlugin"); types.push_back("HED:JobDescriptionParserPlugin"); types.push_back("HED:BrokerPlugin"); showplugins("arctest", types, logger, usercfg.Broker().first); return 0; } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); if (opt.show_credentials) { const Arc::Time now; std::cout << Arc::IString("Certificate information:") << std::endl; std::string certificate_issuer = ""; if (usercfg.CertificatePath().empty()) { std::cout << " " << Arc::IString("No user-certificate found") << std::endl << std::endl; } else { Arc::Credential holder(usercfg.CertificatePath(), "", usercfg.CACertificatesDirectory(), "", usercfg.CAUseSystem(), usercfg.CAUseGrid()); std::cout << " " << Arc::IString("Certificate: %s", usercfg.CertificatePath()) << std::endl; if (!holder.GetDN().empty()) { std::cout << " " << Arc::IString("Subject name: %s", holder.GetDN()) << std::endl; std::cout << " " << Arc::IString("Valid until: %s", (std::string) holder.GetEndTime() ) << std::endl << std::endl; certificate_issuer = holder.GetIssuerName(); } else { std::cout << " " << Arc::IString("Unable to determine certificate information") << std::endl << std::endl; } } std::cout << Arc::IString("Proxy certificate information:") << std::endl; if (usercfg.ProxyPath().empty()) { std::cout << " " << Arc::IString("No proxy found") << std::endl << std::endl; } else { Arc::Credential holder(usercfg.ProxyPath(), "", usercfg.CACertificatesDirectory(), "", usercfg.CAUseSystem(), usercfg.CAUseGrid()); std::cout << " " << Arc::IString("Proxy: %s", usercfg.ProxyPath()) << std::endl; std::cout << " " << Arc::IString("Proxy-subject: %s", holder.GetDN()) << std::endl; if (holder.GetEndTime() < now) { std::cout << " " << Arc::IString("Valid for: Proxy expired") << std::endl << std::endl; } else if (!holder.GetVerification()) { std::cout << " " << Arc::IString("Valid for: Proxy not valid") << std::endl << std::endl; } else { std::cout << " " << Arc::IString("Valid for: %s", (holder.GetEndTime() - now).istr()) << std::endl << std::endl; } } if (!certificate_issuer.empty()) { std::cout << Arc::IString("Certificate issuer: %s", certificate_issuer) << std::endl << std::endl; } bool issuer_certificate_found = false; std::cout << Arc::IString("CA-certificates installed:") << std::endl; Glib::Dir cadir(usercfg.CACertificatesDirectory()); for (Glib::DirIterator it = cadir.begin(); it != cadir.end(); ++it) { std::string cafile = Glib::build_filename(usercfg.CACertificatesDirectory(), *it); // Assume certificates have file ending ".0", ".1" or ".2". Very OpenSSL specific. if (Glib::file_test(cafile, Glib::FILE_TEST_IS_REGULAR) && (*it)[(*it).size()-2] == '.' && ((*it)[(*it).size()-1] == '0' || (*it)[(*it).size()-1] == '1' || (*it)[(*it).size()-1] == '2')) { Arc::Credential cred(cafile, "", "", "", false, false); std::string dn = cred.GetDN(); if (dn.empty()) continue; std::string hash; // Only accept certificates with correct hash. if (!get_hash_value(cred, hash) || hash != (*it).substr(0, (*it).size()-2)) continue; if (dn == certificate_issuer) issuer_certificate_found = true; std::cout << " " << dn << std::endl; } } if (certificate_issuer.empty()) { std::cout << std::endl << Arc::IString("Unable to detect if issuer certificate is installed.") << std::endl; } else if (!issuer_certificate_found) { logger.msg(Arc::WARNING, "Your issuer's certificate is not installed"); } return EXIT_SUCCESS; } if (opt.timeout > 0) usercfg.Timeout(opt.timeout); if (!opt.broker.empty()) usercfg.Broker(opt.broker); Arc::JobDescription testJob; if (!Arc::JobDescription::GetTestJob(opt.testjobid, testJob)) { std::cout << Arc::IString("No test-job, with ID \"%d\"", opt.testjobid) << std::endl; return 1; } DelegationType delegation_type = UndefinedDelegation; if(!opt.getDelegationType(logger, usercfg, delegation_type)) return 1; AuthenticationType authentication_type = UndefinedAuthentication; if(!opt.getAuthenticationType(logger, usercfg, authentication_type)) return 1; switch(authentication_type) { case NoAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeNone); break; case X509Authentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeCert); break; case TokenAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeToken); break; case UndefinedAuthentication: default: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeUndefined); break; } // Set user input variables into job description if (opt.testjobid == 1) { testJob.Application.Executable.Argument.back() = Arc::tostring(opt.runtime); testJob.Resources.TotalCPUTime = (opt.runtime+3)*60; for ( std::map::iterator iter = testJob.OtherAttributes.begin(); iter != testJob.OtherAttributes.end(); ++iter ) { char buffer [iter->second.length()+255]; sprintf(buffer, iter->second.c_str(), opt.runtime, opt.runtime+3); iter->second = (std::string) buffer; } } // arctest only works with single test job in jobdescription list std::list jobdescriptionlist; jobdescriptionlist.push_back(testJob); // canonicalize endpoint types if (!opt.canonicalizeARCInterfaceTypes(logger)) return 1; // get endpoint batches std::list > endpoint_batches; bool info_discovery = prepare_submission_endpoint_batches(usercfg, opt, endpoint_batches); // add rejectdiscovery if defined if (!opt.rejectdiscovery.empty()) usercfg.AddRejectDiscoveryURLs(opt.rejectdiscovery); // action: dumpjobdescription if (opt.dumpdescription) { if (!info_discovery) { logger.msg(Arc::ERROR,"Cannot adapt job description to the submission target when information discovery is turned off"); return 1; } // dump description only for priority submission interface, no fallbacks std::list services = endpoint_batches.front(); std::string req_sub_iface; if (!opt.submit_types.empty()) req_sub_iface = opt.submit_types.front(); return dumpjobdescription(usercfg, jobdescriptionlist, services, req_sub_iface); } // default action: start submission cycle return submit_jobs(usercfg, endpoint_batches, info_discovery, opt.jobidoutfile, jobdescriptionlist, delegation_type, opt.instances_min, opt.instances_max); } void printjobid(const std::string& jobid, const std::string& jobidfile) { if (!jobidfile.empty()) if (!Arc::Job::WriteJobIDToFile(jobid, jobidfile)) logger.msg(Arc::WARNING, "Cannot write jobid (%s) to file (%s)", jobid, jobidfile); std::cout << Arc::IString("Test submitted with jobid: %s", jobid) << std::endl; } int test(const Arc::UserConfig& usercfg, Arc::ExecutionTargetSorter& ets, const Arc::JobDescription& testJob, const std::string& jobidfile) { int retval = 0; std::list jobids; std::list submittedJobs; std::map notsubmitted; submittedJobs.push_back(Arc::Job()); for (ets.reset(); !ets.endOfList(); ets.next()) { if (ets->Submit(usercfg, testJob, submittedJobs.back())) { printjobid(submittedJobs.back().JobID, jobidfile); std::cout << Arc::IString("Computing service: %s", ets->ComputingService->Name) << std::endl; break; } } if (ets.endOfList()) { std::cout << Arc::IString("Test failed, no more possible targets") << std::endl; submittedJobs.pop_back(); retval = 1; } Arc::JobInformationStorage *jobstore = createJobInformationStorage(usercfg); if (jobstore == NULL) { logger.msg(Arc::ERROR, "Unable to read job information from file (%s)", usercfg.JobListFile()); return 1; } if (!jobstore->Write(submittedJobs)) { std::cout << Arc::IString("Warning: Failed to write job information to file (%s)", usercfg.JobListFile()) << std::endl; std::cout << Arc::IString("To recover missing jobs, run arcsync") << std::endl; } delete jobstore; return retval; } int dumpjobdescription_arctest_legacy(const Arc::UserConfig& usercfg, Arc::ExecutionTargetSorter& ets, const Arc::JobDescription& testJob) { for (ets.reset(); !ets.endOfList(); ets.next()) { Arc::JobDescription preparedTestJob(testJob); std::string jobdesc; // Prepare the test jobdescription according to the chosen ExecutionTarget if (!preparedTestJob.Prepare(*ets)) { logger.msg(Arc::INFO, "Unable to prepare job description according to needs of the target resource (%s).", ets->ComputingEndpoint->URLString); continue; } std::string jobdesclang = "emies:adl"; if (ets->ComputingEndpoint->InterfaceName == "org.nordugrid.gridftpjob") { jobdesclang = "nordugrid:xrsl"; } if (!preparedTestJob.UnParse(jobdesc, jobdesclang)) { logger.msg(Arc::INFO, "An error occurred during the generation of job description to be sent to %s", ets->ComputingEndpoint->URLString); continue; } std::cout << Arc::IString("Job description to be sent to %s:", ets->AdminDomain->Name) << std::endl; std::cout << jobdesc << std::endl; break; } return (!ets.endOfList()); } static bool get_hash_value(const Arc::Credential& c, std::string& hash_str) { X509* cert = c.GetCert(); if(!cert) return false; X509_NAME* cert_name = X509_get_subject_name(cert); if(!cert_name) return false; char hash[32]; memset(hash, 0, 32); snprintf(hash, 32, "%08lx", X509_NAME_hash(cert_name)); hash_str = hash; X509_free(cert); return true; } nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/arckill.cpp0000644000000000000000000000013215067751327021404 xustar0030 mtime=1759498967.676867254 30 atime=1759498967.824493028 30 ctime=1759499031.221168387 nordugrid-arc-7.1.1/src/clients/compute/arckill.cpp0000644000175000002070000001424315067751327023312 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include "utils.h" int RUNMAIN(arckill)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arckill"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_KILL, istring("[job ...]"), istring("The arckill command is used to kill " "running jobs.")); std::list jobidentifiers = opt.Parse(argc, argv); if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arckill", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); if (opt.show_plugins) { std::list types; types.push_back("HED:JobControllerPlugin"); showplugins("arckill", types, logger); return 0; } Arc::UserConfig usercfg(opt.conffile, opt.joblist); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (opt.force_system_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(false); } if (opt.force_grid_ca) { usercfg.CAUseSystem(false); usercfg.CAUseGrid(true); } if (opt.force_any_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(true); } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); for (std::list::const_iterator it = opt.jobidinfiles.begin(); it != opt.jobidinfiles.end(); ++it) { if (!Arc::Job::ReadJobIDsFromFile(*it, jobidentifiers)) { logger.msg(Arc::WARNING, "Cannot read specified jobid file: %s", *it); } } if (opt.timeout > 0) usercfg.Timeout(opt.timeout); AuthenticationType authentication_type = UndefinedAuthentication; if(!opt.getAuthenticationType(logger, usercfg, authentication_type)) return 1; switch(authentication_type) { case NoAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeNone); break; case X509Authentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeCert); break; case TokenAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeToken); break; case UndefinedAuthentication: default: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeUndefined); break; } if ((!opt.joblist.empty() || !opt.status.empty()) && jobidentifiers.empty() && opt.computing_elements.empty()) opt.all = true; if (jobidentifiers.empty() && opt.computing_elements.empty() && !opt.all) { logger.msg(Arc::ERROR, "No jobs given"); return 1; } std::list selectedURLs; if (!opt.computing_elements.empty()) { selectedURLs = getSelectedURLsFromUserConfigAndCommandLine(usercfg, opt.computing_elements); } std::list rejectManagementURLs = getRejectManagementURLsFromUserConfigAndCommandLine(usercfg, opt.rejectmanagement); std::list jobs; Arc::JobInformationStorage *jobstore = createJobInformationStorage(usercfg); if (jobstore != NULL && !jobstore->IsStorageExisting()) { logger.msg(Arc::ERROR, "Job list file (%s) doesn't exist", usercfg.JobListFile()); delete jobstore; return 1; } if (jobstore == NULL || ( opt.all && !jobstore->ReadAll(jobs, rejectManagementURLs)) || (!opt.all && !jobstore->Read(jobs, jobidentifiers, selectedURLs, rejectManagementURLs))) { logger.msg(Arc::ERROR, "Unable to read job information from file (%s)", usercfg.JobListFile()); delete jobstore; return 1; } if (!opt.all) { for (std::list::const_iterator itJIDAndName = jobidentifiers.begin(); itJIDAndName != jobidentifiers.end(); ++itJIDAndName) { std::cout << Arc::IString("Warning: Job not found in job list: %s", *itJIDAndName) << std::endl; } } Arc::JobSupervisor jobmaster(usercfg, jobs); jobmaster.Update(); jobmaster.SelectValid(); if (!opt.status.empty()) { jobmaster.SelectByStatus(opt.status); } if (jobmaster.GetSelectedJobs().empty()) { std::cout << Arc::IString("No jobs") << std::endl; delete jobstore; return 1; } int retval = (int)!jobmaster.Cancel(); unsigned int selected_num = jobmaster.GetSelectedJobs().size(); unsigned int canceled_num = jobmaster.GetIDsProcessed().size(); unsigned int cleaned_num = 0; if (!opt.keep) { std::list canceled = jobmaster.GetIDsProcessed(); // No need to clean selection because retrieved is subset of selected jobmaster.SelectByID(canceled); if(!jobmaster.Clean()) { std::cout << Arc::IString("Warning: Some jobs were not removed from server") << std::endl; std::cout << Arc::IString(" Use arcclean to remove retrieved jobs from job list", usercfg.JobListFile()) << std::endl; retval = 1; } cleaned_num = jobmaster.GetIDsProcessed().size(); if (!jobstore->Remove(jobmaster.GetIDsProcessed())) { std::cout << Arc::IString("Warning: Failed removing jobs from file (%s)", usercfg.JobListFile()) << std::endl; std::cout << Arc::IString(" Run 'arcclean -s Undefined' to remove killed jobs from job list") << std::endl; retval = 1; } std::cout << Arc::IString("Jobs processed: %d, successfully killed: %d, successfully cleaned: %d", selected_num, canceled_num, cleaned_num) << std::endl; } else { std::cout << Arc::IString("Jobs processed: %d, successfully killed: %d", selected_num, canceled_num) << std::endl; } delete jobstore; return retval; } nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/arcsub.1.in0000644000000000000000000000013215067751327021225 xustar0030 mtime=1759498967.677490794 30 atime=1759498967.824493028 30 ctime=1759499031.233073088 nordugrid-arc-7.1.1/src/clients/compute/arcsub.1.in0000644000175000002070000000436215067751327023134 0ustar00mockbuildmock00000000000000[NAME] arcsub \- ARC Submission [EXTENDED DESCRIPTION] \fBarcsub\fR is the key command when submitting jobs to distributed computing resources with the ARC client. Job submission can be accomplished by specifying a job description file to submit as an argument. \fBarcsub\fR will then by default perform resource discovery and then the discovered resources will be matched to the job description and ranked according to the chosen broker (\fB--broker\fR option). The brokers available can be seen using .B arcsub -P. Please refer to ARC Documentation at http://www.nordugrid.org/arc for more information about job submission and management. [EXAMPLES] Submission of a job description file "helloworld.xrsl" to specified compute element: .br \fBarcsub -C ce.example.com helloworld.xrsl\fR Submission of a job description file "helloworld.xrsl" to compute elements discovered via the global NorduGrid registry: .br \fBarcsub -Y nordugrid.org helloworld.xrsl\fR Direct submission to a CE without querying the information: .br \fBarcsub -Q NONE -T arcrest -C ce.example.com helloworld.xrsl\fR [FILES] .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .TP .B ~/.arc/jobs.dat This a local database of the user's active jobs. When a job is successfully submitted it is added to database and when it is removed from the remote CE it is removed from this list. This database is used as the list of all active jobs when the user specifies the .B --all option to the various NorduGrid ARC user interface commands. By using the .B --joblist option a different file can be used than the default. [COPYRIGHT] APACHE LICENSE Version 2.0 [AUTHOR] ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org [SEE ALSO] .BR arccat (1), .BR arcclean (1), .BR arccp (1), .BR arcget (1), .BR arcinfo (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcrenew (1), .BR arcresume (1), .BR arcrm (1), .BR arcstat (1), .BR arcsync (1), .BR arctest (1) nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/arcsync.cpp0000644000000000000000000000013215067751327021425 xustar0030 mtime=1759498967.677490794 30 atime=1759498967.824493028 30 ctime=1759499031.230457872 nordugrid-arc-7.1.1/src/clients/compute/arcsync.cpp0000644000175000002070000002274315067751327023337 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "utils.h" class JobSynchronizer : public Arc::EntityConsumer { public: JobSynchronizer( const Arc::UserConfig& uc, const std::list& services, const std::list& rejectedServices = std::list(), const std::set& preferredInterfaceNames = std::set(), const std::list& capabilityFilter = std::list(1, Arc::Endpoint::GetStringForCapability(Arc::Endpoint::COMPUTINGINFO)) ) : uc(uc), ser(uc, Arc::EndpointQueryOptions(true, capabilityFilter, rejectedServices)), jlr(uc, Arc::EndpointQueryOptions(preferredInterfaceNames)) { jlr.needAllResults(); ser.addConsumer(*this); jlr.addConsumer(jobs); for (std::list::const_iterator it = services.begin(); it != services.end(); ++it) { if (it->HasCapability(Arc::Endpoint::REGISTRY)) { ser.addEndpoint(*it); } else { jlr.addEndpoint(*it); } } } void wait() { ser.wait(); jlr.wait(); } void addEntity(const Arc::Endpoint& service) { if (service.HasCapability(Arc::Endpoint::COMPUTINGINFO)) { jlr.addEndpoint(service); } } bool writeJobs(bool truncate) { bool jobsWritten = false; bool jobsReported = false; Arc::JobInformationStorage *jobstore = createJobInformationStorage(uc); if (jobstore == NULL) { std::cerr << Arc::IString("Warning: Unable to open job list file (%s), unknown format", uc.JobListFile()) << std::endl; return false; } // Write extracted job info to joblist if (truncate) { jobstore->Clean(); if ( (jobsWritten = jobstore->Write(jobs)) ) { for (std::list::const_iterator it = jobs.begin(); it != jobs.end(); ++it) { if (!jobsReported) { std::cout << Arc::IString("Found the following jobs:")<Name.empty()) { std::cout << it->Name << " (" << it->JobID << ")" << std::endl; } else { std::cout << it->JobID << std::endl; } } std::cout << Arc::IString("Total number of jobs found: ") << jobs.size() << std::endl; } } else { std::list newJobs; std::set prunedServices; jlr.getServicesWithStatus(Arc::EndpointQueryingStatus::SUCCESSFUL, prunedServices); if ( (jobsWritten = jobstore->Write(jobs, prunedServices, newJobs)) ) { for (std::list::const_iterator it = newJobs.begin(); it != newJobs.end(); ++it) { if (!jobsReported) { std::cout << Arc::IString("Found the following new jobs:")<Name.empty()) { std::cout << (*it)->Name << " (" << (*it)->JobID << ")" << std::endl; } else { std::cout << (*it)->JobID << std::endl; } } std::cout << Arc::IString("Total number of new jobs found: ") << newJobs.size() << std::endl; } } delete jobstore; if (!jobsWritten) { std::cout << Arc::IString("ERROR: Failed to write job information to file (%s)", uc.JobListFile()) << std::endl; return false; } return true; } private: const Arc::UserConfig& uc; Arc::ServiceEndpointRetriever ser; Arc::JobListRetriever jlr; Arc::EntityContainer jobs; }; int RUNMAIN(arcsync)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arcsync"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_SYNC, " ", istring("The arcsync command synchronizes your " "local job list with the information at\n" "the given CEs or registry servers.")); std::list params = opt.Parse(argc, argv); if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arcsync", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); if (opt.show_plugins) { std::list types; types.push_back("HED:JobListRetrieverPlugin"); showplugins("arcsync", types, logger); return 0; } Arc::UserConfig usercfg(opt.conffile, opt.joblist); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (opt.force_system_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(false); } if (opt.force_grid_ca) { usercfg.CAUseSystem(false); usercfg.CAUseGrid(true); } if (opt.force_any_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(true); } if (opt.convert) { Arc::JobInformationStorage *jobstore = createJobInformationStorage(usercfg); if (jobstore == NULL) { std::cerr << Arc::IString("Warning: Unable to open job list file (%s), unknown format", usercfg.JobListFile()) << std::endl; return 1; } // Read current jobs std::list jobs; if (!jobstore->ReadAll(jobs)) { std::cerr << Arc::IString("Warning: Unable to read local list of jobs from file (%s)", usercfg.JobListFile()) << std::endl; return 1; } // Delete existing database so new on is created with specified format if (!jobstore->Clean()) { std::cerr << Arc::IString("Warning: Unable to truncate local list of jobs in file (%s)", usercfg.JobListFile()) << std::endl; return 1; } delete jobstore; jobstore = createJobInformationStorage(usercfg); if (jobstore == NULL) { std::cerr << Arc::IString("Warning: Unable to create job list file (%s), jobs list is destroyed", usercfg.JobListFile()) << std::endl; return 1; } if (!jobstore->Write(jobs)) { std::cerr << Arc::IString("Warning: Failed to write local list of jobs into file (%s), jobs list is destroyed", usercfg.JobListFile()) << std::endl; return 1; } return 0; } if (opt.timeout > 0) usercfg.Timeout(opt.timeout); AuthenticationType authentication_type = UndefinedAuthentication; if(!opt.getAuthenticationType(logger, usercfg, authentication_type)) return 1; switch(authentication_type) { case NoAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeNone); break; case X509Authentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeCert); break; case TokenAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeToken); break; case UndefinedAuthentication: default: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeUndefined); break; } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); //sanity check if (!opt.forcesync) { std::cout << Arc::IString("Synchronizing the local list of active jobs with the information in the\n" "information system can result in some inconsistencies. Very recently submitted\n" "jobs might not yet be present, whereas jobs very recently scheduled for\n" "deletion can still be present." ) << std::endl; std::cout << Arc::IString("Are you sure you want to synchronize your local job list?") << " [" << Arc::IString("y") << "/" << Arc::IString("n") << "] "; std::string response; std::cin >> response; if (Arc::lower(response) != std::string(Arc::FindTrans("y"))) { std::cout << Arc::IString("Cancelling synchronization request") << std::endl; return 0; } } std::list endpoints = getServicesFromUserConfigAndCommandLine(usercfg, opt.registries, opt.computing_elements); std::list rejectDiscoveryURLs = getRejectDiscoveryURLsFromUserConfigAndCommandLine(usercfg, opt.rejectdiscovery); if (endpoints.empty()) { logger.msg(Arc::ERROR, "No services specified. Please configure default services in the client configuration, " "or specify a cluster or registry (-C or -Y options, see arcsync -h)."); return 1; } std::set preferredInterfaceNames; if (usercfg.InfoInterface().empty()) { preferredInterfaceNames.insert("org.nordugrid.arcrest"); } else { preferredInterfaceNames.insert(usercfg.InfoInterface()); } JobSynchronizer js(usercfg, endpoints, rejectDiscoveryURLs, preferredInterfaceNames); js.wait(); return js.writeJobs(opt.truncate)?0:1; // true -> 0, false -> 1. } nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/arcrenew.cpp0000644000000000000000000000013215067751327021571 xustar0030 mtime=1759498967.676867254 30 atime=1759498967.824493028 30 ctime=1759499031.222211937 nordugrid-arc-7.1.1/src/clients/compute/arcrenew.cpp0000644000175000002070000001165015067751327023476 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include "utils.h" int RUNMAIN(arcrenew)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arcrenew"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_RENEW, istring("[job ...]")); std::list jobidentifiers = opt.Parse(argc, argv); if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arcrenew", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); if (opt.show_plugins) { std::list types; types.push_back("HED:JobControllerPlugin"); showplugins("arcrenew", types, logger); return 0; } Arc::UserConfig usercfg(opt.conffile, opt.joblist); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (opt.force_system_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(false); } if (opt.force_grid_ca) { usercfg.CAUseSystem(false); usercfg.CAUseGrid(true); } if (opt.force_any_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(true); } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); for (std::list::const_iterator it = opt.jobidinfiles.begin(); it != opt.jobidinfiles.end(); ++it) { if (!Arc::Job::ReadJobIDsFromFile(*it, jobidentifiers)) { logger.msg(Arc::WARNING, "Cannot read specified jobid file: %s", *it); } } if (opt.timeout > 0) usercfg.Timeout(opt.timeout); AuthenticationType authentication_type = UndefinedAuthentication; if(!opt.getAuthenticationType(logger, usercfg, authentication_type)) return 1; switch(authentication_type) { case NoAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeNone); break; case X509Authentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeCert); break; case TokenAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeToken); break; case UndefinedAuthentication: default: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeUndefined); break; } if ((!opt.joblist.empty() || !opt.status.empty()) && jobidentifiers.empty() && opt.computing_elements.empty()) opt.all = true; if (jobidentifiers.empty() && opt.computing_elements.empty() && !opt.all) { logger.msg(Arc::ERROR, "No jobs given"); return 1; } std::list selectedURLs; if (!opt.computing_elements.empty()) { selectedURLs = getSelectedURLsFromUserConfigAndCommandLine(usercfg, opt.computing_elements); } std::list rejectManagementURLs = getRejectManagementURLsFromUserConfigAndCommandLine(usercfg, opt.rejectmanagement); std::list jobs; Arc::JobInformationStorage *jobstore = createJobInformationStorage(usercfg); if (jobstore != NULL && !jobstore->IsStorageExisting()) { logger.msg(Arc::ERROR, "Job list file (%s) doesn't exist", usercfg.JobListFile()); delete jobstore; return 1; } if (jobstore == NULL || ( opt.all && !jobstore->ReadAll(jobs, rejectManagementURLs)) || (!opt.all && !jobstore->Read(jobs, jobidentifiers, selectedURLs, rejectManagementURLs))) { logger.msg(Arc::ERROR, "Unable to read job information from file (%s)", usercfg.JobListFile()); delete jobstore; return 1; } delete jobstore; if (!opt.all) { for (std::list::const_iterator itJIDAndName = jobidentifiers.begin(); itJIDAndName != jobidentifiers.end(); ++itJIDAndName) { std::cout << Arc::IString("Warning: Job not found in job list: %s", *itJIDAndName) << std::endl; } } Arc::JobSupervisor jobmaster(usercfg, jobs); jobmaster.Update(); jobmaster.SelectValid(); if (!opt.status.empty()) { jobmaster.SelectByStatus(opt.status); } if (jobmaster.GetSelectedJobs().empty()) { std::cout << Arc::IString("No jobs") << std::endl; return 1; } int retval = (int)!jobmaster.Renew(); std::cout << Arc::IString("Jobs processed: %d, renewed: %d", jobmaster.GetIDsProcessed().size()+jobmaster.GetIDsNotProcessed().size(), jobmaster.GetIDsProcessed().size()) << std::endl; return retval; } nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/utils.cpp0000644000000000000000000000013215067751327021123 xustar0030 mtime=1759498967.678490809 30 atime=1759498967.825493043 30 ctime=1759499031.215202461 nordugrid-arc-7.1.1/src/clients/compute/utils.cpp0000644000175000002070000006715415067751327023042 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "utils.h" #include #include #include "glibmm-compat.h" ConsoleRecovery::ConsoleRecovery(void) { ti = new termios; if (tcgetattr(STDIN_FILENO, ti) == 0) return; delete ti; ti = NULL; } ConsoleRecovery::~ConsoleRecovery(void) { if(ti) tcsetattr(STDIN_FILENO, TCSANOW, ti); delete ti; } std::list getSelectedURLsFromUserConfigAndCommandLine(Arc::UserConfig usercfg, std::list computingelements) { std::list endpoints = getServicesFromUserConfigAndCommandLine(usercfg, std::list(), computingelements); std::list serviceURLs; for (std::list::const_iterator it = endpoints.begin(); it != endpoints.end(); ++it) { serviceURLs.push_back(it->URLString); } return serviceURLs; } std::list getRejectDiscoveryURLsFromUserConfigAndCommandLine(Arc::UserConfig usercfg, std::list rejectdiscovery) { std::list rejectDiscoveryURLs = usercfg.RejectDiscoveryURLs(); rejectDiscoveryURLs.insert(rejectDiscoveryURLs.end(), rejectdiscovery.begin(), rejectdiscovery.end()); return rejectDiscoveryURLs; } std::list getRejectManagementURLsFromUserConfigAndCommandLine(Arc::UserConfig usercfg, std::list rejectmanagement) { std::list rejectManagementURLs = usercfg.RejectManagementURLs(); rejectManagementURLs.insert(rejectManagementURLs.end(), rejectmanagement.begin(), rejectmanagement.end()); return rejectManagementURLs; } std::list getServicesFromUserConfigAndCommandLine(Arc::UserConfig usercfg, std::list registries, std::list computingelements, std::string requestedSubmissionInterfaceName, std::string infointerface) { std::list services; if (computingelements.empty() && registries.empty()) { std::list endpoints = usercfg.GetDefaultServices(); for (std::list::const_iterator its = endpoints.begin(); its != endpoints.end(); ++its) { services.push_back(*its); } } else { for (std::list::const_iterator it = computingelements.begin(); it != computingelements.end(); ++it) { // check if the string is a group or alias std::list newServices = usercfg.GetServices(*it, Arc::ConfigEndpoint::COMPUTINGINFO); if (newServices.empty()) { // if it was not an alias or a group, then it should be the URL Arc::Endpoint service(*it); service.Capability.insert(Arc::Endpoint::GetStringForCapability(Arc::Endpoint::COMPUTINGINFO)); if (!infointerface.empty()) { service.InterfaceName = infointerface; } service.RequestedSubmissionInterfaceName = requestedSubmissionInterfaceName; services.push_back(service); } else { // if it was a group (or an alias), add all the services for (std::list::iterator its = newServices.begin(); its != newServices.end(); ++its) { if (!requestedSubmissionInterfaceName.empty()) { // if there was a submission interface requested, this overrides the one from the config its->RequestedSubmissionInterfaceName = requestedSubmissionInterfaceName; } services.push_back(*its); } } } for (std::list::const_iterator it = registries.begin(); it != registries.end(); ++it) { // check if the string is a name of a group std::list newServices = usercfg.GetServices(*it, Arc::ConfigEndpoint::REGISTRY); if (newServices.empty()) { // if it was not an alias or a group, then it should be the URL Arc::Endpoint service(*it); service.Capability.insert(Arc::Endpoint::GetStringForCapability(Arc::Endpoint::REGISTRY)); services.push_back(service); } else { // if it was a group (or an alias), add all the services services.insert(services.end(), newServices.begin(), newServices.end()); } } } return services; } void showplugins(const std::string& program, const std::list& types, Arc::Logger& logger, const std::string& chosenBroker) { for (std::list::const_iterator itType = types.begin(); itType != types.end(); ++itType) { if (*itType == "HED:SubmitterPlugin") { std::cout << Arc::IString("Types of execution services that %s is able to submit jobs to:", program) << std::endl; } else if (*itType == "HED:ServiceEndpointRetrieverPlugin") { std::cout << Arc::IString("Types of registry services that %s is able to collect information from:", program) << std::endl; } else if (*itType == "HED:TargetInformationRetrieverPlugin") { std::cout << Arc::IString("Types of local information services that %s is able to collect information from:", program) << std::endl; } else if (*itType == "HED:JobListRetriever") { std::cout << Arc::IString("Types of local information services that %s is able to collect job information from:", program) << std::endl; } else if (*itType == "HED:JobControllerPlugin") { std::cout << Arc::IString("Types of services that %s is able to manage jobs at:", program) << std::endl; } else if (*itType == "HED:JobDescriptionParserPlugin") { std::cout << Arc::IString("Job description languages supported by %s:", program) << std::endl; } else if (*itType == "HED:BrokerPlugin") { std::cout << Arc::IString("Brokers available to %s:", program) << std::endl; } std::list modules; Arc::PluginsFactory pf(Arc::BaseConfig().MakeConfig(Arc::Config()).Parent()); bool isDefaultBrokerLocated = false; pf.scan(Arc::FinderLoader::GetLibrariesList(), modules); Arc::PluginsFactory::FilterByKind(*itType, modules); for (std::list::iterator itMod = modules.begin(); itMod != modules.end(); ++itMod) { for (std::list::iterator itPlug = itMod->plugins.begin(); itPlug != itMod->plugins.end(); ++itPlug) { std::cout << " " << itPlug->name; if (*itType == "HED:BrokerPlugin" && itPlug->name == chosenBroker) { std::cout << " (default)"; isDefaultBrokerLocated = true; } std::cout << " - " << itPlug->description << std::endl; } } if (*itType == "HED:BrokerPlugin" && !isDefaultBrokerLocated) { logger.msg(Arc::WARNING, "Default broker (%s) is not available. When using %s a broker should be specified explicitly (-b option).", chosenBroker, program); } } } bool checkproxy(const Arc::UserConfig& uc) { if (!uc.ProxyPath().empty() ) { Arc::Credential holder(uc.ProxyPath(), "", "", "", false, false); if (holder.GetEndTime() < Arc::Time()){ std::cout << Arc::IString("Proxy expired. Job submission aborted. Please run 'arcproxy'!") << std::endl; return false; } } else { std::cout << Arc::IString("Cannot find any proxy. This application currently cannot run without a proxy.\n" " If you have the proxy file in a non-default location,\n" " please make sure the path is specified in the client configuration file.\n" " If you don't have a proxy yet, please run 'arcproxy'!") << std::endl; return false; } return true; } bool checktoken(const Arc::UserConfig& uc) { if(uc.OToken().empty()) { std::cout << Arc::IString("Cannot find any token. Please run 'oidc-token' or use similar\n" " utility to obtain authentication token!") << std::endl; return false; } return true; } static bool urlisinsecure(Arc::URL const & url) { std::string protocol = url.Protocol(); return protocol.empty() || (protocol == "http") || (protocol == "ftp") || (protocol == "ldap"); } bool jobneedsproxy(const Arc::JobDescription& job) { // Check if X.509 credentials are needed for data staging std::list inputFiles = job.DataStaging.InputFiles; for(std::list::iterator fileIt = inputFiles.begin(); fileIt != inputFiles.end(); ++fileIt) { for(std::list::iterator sourceIt = fileIt->Sources.begin(); sourceIt != fileIt->Sources.end(); ++sourceIt) { if(!urlisinsecure(*sourceIt)) { return true; } } } std::list outputFiles = job.DataStaging.OutputFiles; for(std::list::iterator fileIt = outputFiles.begin(); fileIt != outputFiles.end(); ++fileIt) { for(std::list::iterator targetIt = fileIt->Targets.begin(); targetIt != fileIt->Targets.end(); ++targetIt) { if(!urlisinsecure(*targetIt)) { return true; } } } return false; } void splitendpoints(std::list& selected, std::list& rejected) { // Removes slashes from end of endpoint strings, and put strings with leading '-' into rejected list. for (std::list::iterator it = selected.begin(); it != selected.end();) { if ((*it)[it->length()-1] == '/') { it->erase(it->length()-1); continue; } if (it->empty()) { it = selected.erase(it); continue; } if ((*it)[0] == '-') { rejected.push_back(it->substr(1)); it = selected.erase(it); } else { ++it; } } } Arc::JobInformationStorage* createJobInformationStorage(const Arc::UserConfig& uc) { Arc::JobInformationStorage* jis = NULL; if (Glib::file_test(uc.JobListFile(), Glib::FILE_TEST_EXISTS)) { for (int i = 0; Arc::JobInformationStorage::AVAILABLE_TYPES[i].name != NULL; ++i) { jis = (Arc::JobInformationStorage::AVAILABLE_TYPES[i].instance)(uc.JobListFile()); if (jis && jis->IsValid()) { return jis; } delete jis; } return NULL; } for (int i = 0; Arc::JobInformationStorage::AVAILABLE_TYPES[i].name != NULL; ++i) { if (uc.JobListType() == Arc::JobInformationStorage::AVAILABLE_TYPES[i].name) { jis = (Arc::JobInformationStorage::AVAILABLE_TYPES[i].instance)(uc.JobListFile()); if (jis && jis->IsValid()) { return jis; } delete jis; return NULL; } } if (Arc::JobInformationStorage::AVAILABLE_TYPES[0].instance != NULL) { jis = (Arc::JobInformationStorage::AVAILABLE_TYPES[0].instance)(uc.JobListFile()); if (jis && jis->IsValid()) { return jis; } delete jis; } return NULL; } bool ClientOptions::canonicalizeARCInterfaceTypes(Arc::Logger& logger) { std::string s(requested_submission_endpoint_type); std::string i(requested_info_endpoint_type); // canonicalize submission endpoint if ( !s.empty() ) { if (s.find(".") == std::string::npos) { s = "org.nordugrid." + s; } } // canonicalize information endpoint if ( !i.empty() && Arc::lower(i) != "none" ) { if (i.find(".") == std::string::npos ) { i = "org.nordugrid." + i; } else if ( i == "ldap.nordugrid" ) { i = "org.nordugrid.ldapng"; } else if ( i == "ldap.glue2" ) { i = "org.nordugrid.ldapglue2"; } } // nothing specified - any interface can be used if ( s.empty() && i.empty() ) return true; // define info based on submission (and verify submission type is supported) if ( !s.empty() ) { const std::string notify_template = "Automatically adding %s information endpoint type based on desired submission interface"; if ( s == "org.nordugrid.arcrest" ) { if ( i.empty() ) { logger.msg(Arc::VERBOSE, notify_template, "org.nordugrid.arcrest"); info_types.push_back("org.nordugrid.arcrest"); } } else if ( s == "org.nordugrid.internal" ) { if ( i.empty() ) { logger.msg(Arc::VERBOSE, notify_template, "org.nordugrid.internal"); info_types.push_back("org.nordugrid.internal"); } } else { logger.msg(Arc::ERROR, "Unsupported submission endpoint type: %s", s); return false; } submit_types.push_back(s); } // define submission type based on info (and verify info type is supported) if ( !i.empty() ) { const std::string notify_template = "Add arcrest submission endpoint type."; if ( s.empty() ) { logger.msg(Arc::VERBOSE, notify_template, "org.nordugrid.arcrest"); submit_types.push_back("org.nordugrid.arcrest"); } else if ( i == "org.nordugrid.internal" ) { if ( s.empty() ) { logger.msg(Arc::VERBOSE, notify_template, "org.nordugrid.internal"); submit_types.push_back("org.nordugrid.internal"); } } else if ( Arc::lower(i) == "none" ) { if ( s.empty() ) { logger.msg(Arc::VERBOSE, "Requested to skip resource discovery. Will try direct submission to arcrest endpoint type."); submit_types.push_back("org.nordugrid.arcrest"); } return true; } else { logger.msg(Arc::ERROR, "Unsupported information endpoint type: %s", i); return false; } info_types.push_back(i); } return true; } ClientOptions::ClientOptions(Client_t c, const std::string& arguments, const std::string& summary, const std::string& description) : Arc::OptionParser(arguments, summary, description), dryrun(false), dumpdescription(false), show_credentials(false), show_plugins(false), showversion(false), all(false), keep(false), forcesync(false), truncate(false), convert(false), longlist(false), printids(false), forceclean(false), show_stdout(true), show_stderr(false), show_joblog(false), show_json(false), usejobname(false), forcedownload(false), direct_submission(false), show_unavailable(false), no_delegation(false), x509_delegation(false), token_delegation(false), no_authentication(false), x509_authentication(false), token_authentication(false), force_system_ca(false), force_grid_ca(false), force_any_ca(false), allow_insecure_connection(false), testjobid(-1), runtime(5), timeout(-1), instances_min(1), instances_max(1) { bool cIsJobMan = (c == CO_CAT || c == CO_CLEAN || c == CO_GET || c == CO_KILL || c == CO_RENEW || c == CO_RESUME || c == CO_STAT || c == CO_ACL); DefineOptionsGroup("xaction", istring("Other actions")); DefineOptionsGroup("filtering", istring("Brokering and filtering")); DefineOptionsGroup("format", istring("Output format modifiers")); DefineOptionsGroup("tuning", istring("Behaviour tuning")); DefineOptionsGroup("arc-target", istring("Target endpoint selection")); if ( c == CO_SUB || c == CO_TEST || c == CO_SYNC || c == CO_INFO ) { GroupAddOption("arc-target", 'C', "computing-element", istring("computing element hostname or a complete endpoint URL"), istring("ce"), computing_elements); GroupAddOption("arc-target", 'Y', "registry", istring("registry service URL with optional specification of protocol"), istring("registry"), registries); } else { GroupAddOption("filtering", 'C', "computing-element", istring("only select jobs that were submitted to this computing element"), istring("ce"), computing_elements); } if ( c == CO_SUB || c == CO_TEST ) { GroupAddOption("arc-target", 'T', "submission-endpoint-type", istring("require the specified endpoint type for job submission.\n" "\tAllowed values are: arcrest and internal."), istring("type"), requested_submission_endpoint_type); } if (c == CO_SUB || c == CO_TEST || c == CO_INFO) { GroupAddOption("filtering", 'R', "rejectdiscovery", istring("skip the service with the given URL during service discovery"), istring("URL"), rejectdiscovery); GroupAddOption("arc-target", 'Q', "info-endpoint-type", istring("require information query using the specified information endpoint type.\n" "\tSpecial value 'NONE' will disable all resource information queries and the following brokering.\n" "\tAllowed values are: ldap.nordugrid, ldap.glue2, arcrest and internal."), istring("type"), requested_info_endpoint_type); } if (c == CO_INFO) { GroupAddOption("arc-target", 'T', "submission-endpoint-type", istring("only get information about executon targets that support this job submission endpoint type.\n" "\tAllowed values are: arcrest and internal."), istring("type"), requested_submission_endpoint_type); } if (c == CO_GET || c == CO_KILL ) { GroupAddOption("tuning", 'k', "keep", istring("keep the files on the server (do not clean)"), keep); } if (c == CO_SYNC) { GroupAddOption("tuning", 'f', "force", istring("do not ask for verification"), forcesync); GroupAddOption("tuning", 'T', "truncate", istring("truncate the joblist before synchronizing"), truncate); GroupAddOption("xaction", 0, "convert", istring("do not collect information, only convert jobs storage format"), convert); } if (c == CO_INFO || c == CO_STAT) { GroupAddOption("format", 'l', "long", istring("long format (more information)"), longlist); } if (c == CO_CAT) { GroupAddOption("xaction", 'o', "stdout", istring("show the stdout of the job (default)"), show_stdout); GroupAddOption("xaction", 'e', "stderr", istring("show the stderr of the job"), show_stderr); GroupAddOption("xaction", 'l', "joblog", istring("show the CE's error log of the job"), show_joblog); GroupAddOption("xaction", 'f', "file", istring("show the specified file from job's session directory"), istring("filepath"), show_file); } if (c == CO_GET) { GroupAddOption("tuning", 'D', "dir", istring("download directory (the job directory will" " be created in this directory)"), istring("dirname"), downloaddir); GroupAddOption("tuning", 'J', "usejobname", istring("use the jobname instead of the short ID as" " the job directory name"), usejobname); GroupAddOption("tuning", 'f', "force", istring("force download (overwrite existing job directory)"), forcedownload); } if (c == CO_STAT) { // Option 'long' takes precedence over this option (print-jobids). GroupAddOption("xaction", 'p', "print-jobids", istring("instead of the status only the IDs of " "the selected jobs will be printed"), printids); GroupAddOption("tuning", 'S', "sort", istring("sort jobs according to jobid, submissiontime or jobname"), istring("order"), sort); GroupAddOption("tuning", 'R', "rsort", istring("reverse sorting of jobs according to jobid, submissiontime or jobname"), istring("order"), rsort); GroupAddOption("tuning", 'u', "show-unavailable", istring("show jobs where status information is unavailable"), show_unavailable); GroupAddOption("format", 'J', "json", istring("show status information in JSON format"), show_json); } if (c == CO_CLEAN) { GroupAddOption("tuning", 'f', "force", istring("remove the job from the local list of jobs " "even if the job is not found in the infosys"), forceclean); } if (c == CO_TEST) { GroupAddOption("xaction", 'J', "job", istring("submit test job given by the number"), istring("int"), testjobid); GroupAddOption("xaction", 'r', "runtime", istring("test job runtime specified by the number"), istring("int"), runtime); } if (cIsJobMan) { GroupAddOption("filtering", 's', "status", istring("only select jobs whose status is statusstr"), istring("statusstr"), status); GroupAddOption("filtering", 'a', "all", istring("all jobs"), all); } if (c == CO_SUB) { GroupAddOption("tuning", 'e', "jobdescrstring", istring("jobdescription string describing the job to " "be submitted"), istring("string"), jobdescriptionstrings); GroupAddOption("tuning", 'f', "jobdescrfile", istring("jobdescription file describing the job to " "be submitted"), istring("string"), jobdescriptionfiles); } if (c == CO_SUB || c == CO_TEST) { GroupAddOption("filtering", 'b', "broker", istring("select broker method (list available brokers with --listplugins flag)"), istring("broker"), broker); GroupAddOption("tuning", 'o', "jobids-to-file", istring("the IDs of the submitted jobs will be appended to this file"), istring("filename"), jobidoutfile); GroupAddOption("tuning", 'n', "no-delegation", istring("do not perform any delegation for submitted jobs"), no_delegation); GroupAddOption("tuning", 'X', "x509-delegation", istring("perform X.509 delegation for submitted jobs"), x509_delegation); GroupAddOption("tuning", 'K', "token-delegation", istring("perform token delegation for submitted jobs"), token_delegation); GroupAddOption("tuning", '\0', "instances-max", istring("request at most this number of job instances submitted in single submit request"), "", instances_max); GroupAddOption("tuning", '\0', "instances-min", istring("request at least this number of job instances submitted in single submit request"), "", instances_min); } if (cIsJobMan) { GroupAddOption("tuning", 'i', "jobids-from-file", istring("a file containing a list of jobIDs"), istring("filename"), jobidinfiles); GroupAddOption("filtering", 'r', "rejectmanagement", istring("skip jobs that are on a computing element with a given URL"), istring("URL"), rejectmanagement); } if (c == CO_SUB || c == CO_TEST) { GroupAddOption("xaction", 'D', "dryrun", istring("submit jobs as dry run (no submission to batch system)"), dryrun); GroupAddOption("xaction", 'x', "dumpdescription", istring("do not submit - dump job description " "in the language accepted by the target"), dumpdescription); } if (c == CO_TEST) { GroupAddOption("xaction", 'E', "certificate", istring("prints info about installed user- and CA-certificates"), show_credentials); GroupAddOption("tuning", '\0', "allowinsecureconnection", istring("allow TLS connection which failed verification"), allow_insecure_connection); } if (c != CO_INFO) { GroupAddOption("tuning", 'j', "joblist", Arc::IString("the file storing information about active jobs (default %s)", Arc::UserConfig::JOBLISTFILE()).str(), istring("filename"), joblist); } /* --- Standard options below --- */ AddOption('z', "conffile", istring("configuration file (default ~/.arc/client.conf)"), istring("filename"), conffile); AddOption('t', "timeout", istring("timeout in seconds (default 20)"), istring("seconds"), timeout); GroupAddOption("xaction", 'P', "listplugins", istring("list the available plugins"), show_plugins); AddOption('d', "debug", istring("FATAL, ERROR, WARNING, INFO, VERBOSE or DEBUG"), istring("debuglevel"), debug); AddOption('v', "version", istring("print version information"), showversion); /* --- Common options below --- */ GroupAddOption("tuning", '\0', "no-authentication", istring("do not perform any authentication for opened connections"), no_authentication); GroupAddOption("tuning", '\0', "x509-authentication", istring("perform X.509 authentication for opened connections"), x509_authentication); GroupAddOption("tuning", '\0', "token-authentication", istring("perform token authentication for opened connections"), token_authentication); GroupAddOption("tuning", '\0', "systemca", istring("force using CA certificates configuration provided by OpenSSL"), force_system_ca); GroupAddOption("tuning", '\0', "gridca", istring("force using CA certificates configuration for Grid services (typically IGTF)"), force_grid_ca); GroupAddOption("tuning", '\0', "anyca", istring("force using CA certificates configuration for Grid services (typically IGTF) and one provided by OpenSSL"), force_any_ca); } bool ClientOptions::getDelegationType(Arc::Logger& logger, Arc::UserConfig const& usercfg, DelegationType& delegation_type) const { delegation_type = UndefinedDelegation; if(no_delegation) { if(delegation_type != UndefinedDelegation) { logger.msg(Arc::ERROR, "Conflicting delegation types specified."); return false; } delegation_type = NoDelegation; } if(x509_delegation) { if(delegation_type != UndefinedDelegation) { logger.msg(Arc::ERROR, "Conflicting delegation types specified."); return false; } delegation_type = X509Delegation; } if(token_delegation) { if(delegation_type != UndefinedDelegation) { logger.msg(Arc::ERROR, "Conflicting delegation types specified."); return false; } delegation_type = TokenDelegation; } // If delegation is not specified try to guess it if(delegation_type == UndefinedDelegation) { if(!usercfg.OToken().empty()) { delegation_type = TokenDelegation; } else { delegation_type = X509Delegation; } } if(delegation_type == X509Delegation) { if (!checkproxy(usercfg)) { return 1; } } else if(delegation_type == TokenDelegation) { if (!checktoken(usercfg)) { return 1; } } return true; } bool ClientOptions::getAuthenticationType(Arc::Logger& logger, Arc::UserConfig const& usercfg, AuthenticationType& authentication_type) const { authentication_type = UndefinedAuthentication; if(no_authentication) { if(authentication_type != UndefinedAuthentication) { logger.msg(Arc::ERROR, "Conflicting authentication types specified."); return false; } authentication_type = NoAuthentication; } if(x509_authentication) { if(authentication_type != UndefinedAuthentication) { logger.msg(Arc::ERROR, "Conflicting authentication types specified."); return false; } authentication_type = X509Authentication; } if(token_authentication) { if(authentication_type != UndefinedAuthentication) { logger.msg(Arc::ERROR, "Conflicting authentication types specified."); return false; } authentication_type = TokenAuthentication; } if(authentication_type == X509Authentication) { if (!checkproxy(usercfg)) { return 1; } } else if(authentication_type == TokenAuthentication) { if (!checktoken(usercfg)) { return 1; } } return true; } nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/utils.h0000644000000000000000000000013215067751327020570 xustar0030 mtime=1759498967.678490809 30 atime=1759498967.825493043 30 ctime=1759499031.216420366 nordugrid-arc-7.1.1/src/clients/compute/utils.h0000644000175000002070000002157015067751327022477 0ustar00mockbuildmock00000000000000#ifndef __ARC_CLIENT_COMPUTE_UTILS_H_ #define __ARC_CLIENT_COMPUTE_UTILS_H_ #include #include #include #include #include #include #include #include #include #include struct termios; // This class records current state of console // when created and recovers it when destroyed. // Its main purpose is to recover console in // case application had to cancel any UI actions // involving changing console state like // password input. class ConsoleRecovery { private: ConsoleRecovery(ConsoleRecovery const&); ConsoleRecovery& operator=(ConsoleRecovery const&); struct termios * ti; public: ConsoleRecovery(void); ~ConsoleRecovery(void); }; #ifdef TEST #define RUNMAIN(X) test_##X##_main #else #define RUNMAIN(X) X(int argc, char **argv); \ int main(int argc, char **argv) { int xr = 0; { ConsoleRecovery cr; xr = X(argc,argv); }; _exit(xr); return 0; } \ int X #endif /// Returns the URLs of computing elements selected by alias, group name, URL or the default ones /** This helper method gets a list of string representing computing elements. Each item of the list is either an alias of service configured in the UserConfig, a name of a group configured in the UserConfig, or a URL of service not configured in the UserConfig. If the list is empty, the default services will be selected from the UserConfig. The method returns the URLs of the selected services. This is meant to be used by the command line programs where the user is specifying a list of computing elements by alias, group name (which has to be looked up in the UserConfig), or by URL. \param[in] usercfg is the UserConfig object containing information about configured services \param[in] computingelements is a list of strings containing aliases, group names, or URLs of computing elements \return a list of URL strings, the endpoints of the selected services, or the default ones if none was selected */ std::list getSelectedURLsFromUserConfigAndCommandLine(Arc::UserConfig usercfg, std::list computingelements); /// Combine the list of rejected discovery URLs from the UserConfig with the ones specified in a list /** Helper method for the command line programs to combine the list of rejected discovery URLs specified by the user at the command line with the ones configured in the UserConfig. The rejected discovery URLs supposed to cause the service discovery not to discovery computing elements whose URL matches any of these strings. \param[in] usercfg is the UserConfig object containing information about configured services \param[in] rejectdiscovery is a list of strings, which will be also added to the resulting list besides the ones from the UserConfig \return a list of strings which are the rejected URLs from the UserConfig and the ones given as the second argument combined */ std::list getRejectDiscoveryURLsFromUserConfigAndCommandLine(Arc::UserConfig usercfg, std::list rejectdiscovery); /// Combine the list of rejected management URLs from the UserConfig with the ones specified in a list /** Helper method for the command line programs to combine the list of rejected management URLs specified by the user at the command line with the ones configured in the UserConfig. The rejected management URLs supposed to cause the job management commands not to manage jobs which reside on computing elements whose URL matches any of the items in the list \param[in] usercfg is the UserConfig object containing information about configured services \param[in] rejectmanagement is a list of strings, which will be also added to the resulting list besides the ones from the UserConfig \return a list of strings which are the rejected URLs from the UserConfig and the ones given as the second argument combined */ std::list getRejectManagementURLsFromUserConfigAndCommandLine(Arc::UserConfig usercfg, std::list rejectmanagement); /// Looks up or creates Endpoints from strings specified at the command line using the information from the UserConfig /** This helper method gets a list of strings representing service registries and computing element, along with a requested submisison interface, looks up all the services from the UserConfig, and return the Endpoints found there, or create new Endpoints for services not found in the Userconfig. If there are no registries or computing elements given, then the default services will be returned. This is meant to be used by the command line programs where the user is specifying service registries and/or computing elements with several strings, which could refer to services configured in the UserConfig (aliases or groups), or they can be URLs refering to services which are not configured in the UserConfig. This method looks up the aliases and group names, and if a string is not an alias or a group name, then it's assumed to be a URL. \param[in] usercfg is the UserConfig object containing information about configured services \param[in] registries is a list of strings containing aliases, group names, or URLs of service registries \param[in] computingelements is a list of strings containing aliases, group names, or URLs of computing elements \return a list of Endpoint objects containing the services corresponding the given strings or the default services. */ std::list getServicesFromUserConfigAndCommandLine(Arc::UserConfig usercfg, std::list registries, std::list computingelements, std::string requestedSubmissionInterfaceName = "", std::string infointerface = ""); void showplugins(const std::string& program, const std::list& types, Arc::Logger& logger, const std::string& chosenBroker = ""); bool checkproxy(const Arc::UserConfig& uc); bool checktoken(const Arc::UserConfig& uc); bool jobneedsproxy(const Arc::JobDescription& job); void splitendpoints(std::list& selected, std::list& rejected); /** * Creates a new JobInformationStorage object. Caller has responsibility of * deleting returned object. */ Arc::JobInformationStorage* createJobInformationStorage(const Arc::UserConfig& uc); enum AuthenticationType { UndefinedAuthentication, NoAuthentication, X509Authentication, TokenAuthentication }; enum DelegationType { UndefinedDelegation, NoDelegation, X509Delegation, TokenDelegation }; class ClientOptions : public Arc::OptionParser { public: enum Client_t { CO_SUB, CO_TEST, CO_CAT, CO_CLEAN, CO_GET, CO_KILL, CO_RENEW, CO_RESUME, CO_STAT, CO_SYNC, CO_INFO, CO_ACL }; ClientOptions(Client_t c, const std::string& arguments = "", const std::string& summary = "", const std::string& description = ""); /// Implement ARC consistent info/submission endpoint types logic bool canonicalizeARCInterfaceTypes(Arc::Logger& logger); bool getDelegationType(Arc::Logger& logger, Arc::UserConfig const& usercfg, DelegationType& delegation_type) const; bool getAuthenticationType(Arc::Logger& logger, Arc::UserConfig const& usercfg, AuthenticationType& authentication_type) const; bool dryrun; bool dumpdescription; bool show_credentials; bool show_plugins; bool showversion; bool all; bool keep; bool forcesync; bool truncate; bool convert; bool longlist; bool printids; bool forceclean; bool show_stdout; bool show_stderr; bool show_joblog; bool show_json; bool usejobname; bool forcedownload; bool direct_submission; bool show_unavailable; bool no_delegation; bool x509_delegation; bool token_delegation; bool no_authentication; bool x509_authentication; bool token_authentication; bool force_system_ca; bool force_grid_ca; bool force_any_ca; bool allow_insecure_connection; int testjobid; int runtime; int timeout; int instances_min; int instances_max; std::string show_file; std::string joblist; std::string jobidoutfile; std::string conffile; std::string debug; std::string broker; std::string sort; std::string rsort; std::string downloaddir; std::string requestedSubmissionInterfaceName; std::string infointerface; std::list jobdescriptionstrings; std::list jobdescriptionfiles; std::list jobidinfiles; std::list status; std::list rejectdiscovery; std::list rejectmanagement; // command line options std::list computing_elements; std::list registries; std::string requested_submission_endpoint_type; std::string requested_info_endpoint_type; // post-processed interface types std::list submit_types; std::list info_types; }; #endif // __ARC_CLIENT_COMPUTE_UTILS_H_ nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/submit.h0000644000000000000000000000013215067751327020733 xustar0030 mtime=1759498967.677490794 30 atime=1759498967.825493043 30 ctime=1759499031.228574792 nordugrid-arc-7.1.1/src/clients/compute/submit.h0000644000175000002070000000667415067751327022652 0ustar00mockbuildmock00000000000000#ifndef __ARC_CLIENT_COMPUTE_SUBMIT_COMMON_H_ #define __ARC_CLIENT_COMPUTE_SUBMIT_COMMON_H_ #include #include #include #include #include #include #include #include #include #include #include #include "utils.h" int process_submission_status(Arc::SubmissionStatus status, const Arc::UserConfig& usercfg); void check_missing_plugins(Arc::Submitter s, int is_error); int legacy_submit(const Arc::UserConfig& usercfg, const std::list& jobdescriptionlist, std::list& services, const std::string& requestedSubmissionInterface, const std::string& jobidfile, bool direct_submission, DelegationType delegation_type, int instances_min, int instances_max); int dumpjobdescription(const Arc::UserConfig& usercfg, const std::list& jobdescriptionlist, const std::list& services, const std::string& requestedSubmissionInterface); /// Implements targets selection logic based on info/submission endpoint types requested /** This helper method process requested types, computing elements and registry and defines the endpoint batches for submission tries. \param[in] usercfg is the UserConfig object containing information about configured services \param[in] opt ClientOptions object containing request options \param endpoint_batches list of lists of Endpoint objects \return a bool indicating the need of target information lookup versus direct submission. */ bool prepare_submission_endpoint_batches(const Arc::UserConfig& usercfg, const ClientOptions& opt, std::list >& endpoint_batches); /// Submit job using defined endpoint batches and submission type /** This helper method try to submit jobs to the list of endpoint batches with (brokering) or without inforamtion quueries \param[in] usercfg is the UserConfig object containing information about configured services \param[in] endpoint_batches list of lists of Endpoint objects \param[in] info_discovery boolean indicating the need or inforamtion quueries and brokering \param[in] jobidoutfile Path to file to store jobids \param[in] jobdescriptionlist list of job descriptions to submit \return a bool indicating the need of target information lookup versus direct submission. */ int submit_jobs(const Arc::UserConfig& usercfg, const std::list >& endpoint_batches, bool info_discovery, const std::string& jobidfile, const std::list& jobdescriptionlist, DelegationType delegation_type, int instances_min, int instances_max); /// Class to handle submitted job and present the results to user class HandleSubmittedJobs : public Arc::EntityConsumer { public: HandleSubmittedJobs(const std::string& jobidfile, const Arc::UserConfig& uc) : jobidfile(jobidfile), uc(uc), submittedJobs() {} ~HandleSubmittedJobs() {} void addEntity(const Arc::Job& j); void write() const; void printsummary(const std::list& originalDescriptions, const std::list& notsubmitted) const; void clearsubmittedjobs() { submittedJobs.clear(); } private: const std::string jobidfile; const Arc::UserConfig& uc; std::list submittedJobs; }; #endif // __ARC_CLIENT_COMPUTE_SUBMIT_COMMON_H_ nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/arccat.1.in0000644000000000000000000000013215067751327021203 xustar0030 mtime=1759498967.676867254 30 atime=1759498967.824493028 30 ctime=1759499031.240102329 nordugrid-arc-7.1.1/src/clients/compute/arccat.1.in0000644000175000002070000000440315067751327023106 0ustar00mockbuildmock00000000000000[NAME] arccat \- ARC Catenate [EXTENDED DESCRIPTION] The .B arccat command displays the stdout or stderr of running jobs. It can also display A-REX's error log of a job. The job can be referred to either by the jobid that was returned by .BR arcsub (1) at submission time or by its jobname if the job description that was submitted contained a jobname attribute. More than one jobid and/or jobname can be given. If several jobs were submitted with the same jobname the stdout, stderr or A-REX error log of all those jobs are shown. If the .B --joblist option is used the list of jobs is read from a file with the specified filename. By specifying the .B --all option, the stdout, stderr or A-REX error log of all active jobs will be shown. The .B --computing-element option can be used to select or reject jobs at specific CEs. The .B --status option can be used to select jobs in a specific state. These options can be repeated several times. See .BR arstat (1) for possible state values. Only jobs where the stdout or stderr argument was given in the job description can display the contents of those files. [FILES] .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .TP .B ~/.arc/jobs.dat This a local database of the user's active jobs. When a job is successfully submitted it is added to database and when it is removed from the remote CE it is removed from this list. This database is used as the list of all active jobs when the user specifies the .B --all option to the various NorduGrid ARC user interface commands. By using the .B --joblist option a different file can be used than the default. [COPYRIGHT] APACHE LICENSE Version 2.0 [AUTHOR] ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org [SEE ALSO] .BR arcclean (1), .BR arccp (1), .BR arcget (1), .BR arcinfo (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcrenew (1), .BR arcresume (1), .BR arcrm (1), .BR arcstat (1), .BR arcsub (1), .BR arcsync (1), .BR arctest (1) nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/arcclean.1.in0000644000000000000000000000013215067751327021516 xustar0030 mtime=1759498967.676867254 30 atime=1759498967.824493028 30 ctime=1759499031.238852193 nordugrid-arc-7.1.1/src/clients/compute/arcclean.1.in0000644000175000002070000000454415067751327023427 0ustar00mockbuildmock00000000000000[NAME] arcclean \- ARC Clean [EXTENDED DESCRIPTION] The .B arcclean command removes a job from the computing element. Only jobs that have finished can be removed. The job can be referred to either by the jobid that was returned by .BR arcsub (1) at submission time or by its jobname if the job description that was submitted contained a jobname attribute. More than one jobid and/or jobname can be given. If several jobs were submitted with the same jobname all those jobs are removed. If the .B --joblist option is used the list of jobs is read from a file with the specified filename. By specifying the .B --all option, all active jobs can be removed. The .B --computing-element option can be used to select or reject jobs at specific clusters. The .B --status option can be used to select jobs in a specific state. These options can be repeated several times. See .BR arcstat (1) for possible state values. The .B --force option removes the job from your local database of jobs even if the job can not be found in the remote information system. Jobs not appearing in the remote information system can also be removed from the local database by specifying the .B --status option with value \fBUndefined\fR. [FILES] .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .TP .B ~/.arc/jobs.dat This a local database of the user's active jobs. When a job is successfully submitted it is added to database and when it is removed from the remote CE it is removed from this list. This database is used as the list of all active jobs when the user specifies the .B --all option to the various NorduGrid ARC user interface commands. By using the .B --joblist option a different file can be used than the default. [COPYRIGHT] APACHE LICENSE Version 2.0 [AUTHOR] ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org [SEE ALSO] .BR arccat (1), .BR arccp (1), .BR arcget (1), .BR arcinfo (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcrenew (1), .BR arcresume (1), .BR arcrm (1), .BR arcstat (1), .BR arcsub (1), .BR arcsync (1), .BR arctest (1) nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/arcinfo.cpp0000644000000000000000000000013215067751327021404 xustar0030 mtime=1759498967.676867254 30 atime=1759498967.824493028 30 ctime=1759499031.219456318 nordugrid-arc-7.1.1/src/clients/compute/arcinfo.cpp0000644000175000002070000001672615067751327023322 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include "utils.h" int RUNMAIN(arcinfo)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arcinfo"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_INFO, istring("[resource ...]"), istring("The arcinfo command is used for " "obtaining the status of computing " "resources on the Grid.")); { std::list clusterstmp = opt.Parse(argc, argv); opt.computing_elements.insert(opt.computing_elements.end(), clusterstmp.begin(), clusterstmp.end()); } if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arcinfo", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); if (opt.show_plugins) { std::list types; types.push_back("HED:ServiceEndpointRetrieverPlugin"); types.push_back("HED:TargetInformationRetrieverPlugin"); showplugins("arcinfo", types, logger); return 0; } Arc::UserConfig usercfg(opt.conffile); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (opt.force_system_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(false); } if (opt.force_grid_ca) { usercfg.CAUseSystem(false); usercfg.CAUseGrid(true); } if (opt.force_any_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(true); } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); if (opt.timeout > 0) usercfg.Timeout(opt.timeout); AuthenticationType authentication_type = UndefinedAuthentication; if(!opt.getAuthenticationType(logger, usercfg, authentication_type)) return 1; switch(authentication_type) { case NoAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeNone); break; case X509Authentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeCert); break; case TokenAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeToken); break; case UndefinedAuthentication: default: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeUndefined); break; } if (!opt.canonicalizeARCInterfaceTypes(logger)) return 1; std::string req_sub_iface; std::string req_info_iface; if (!opt.submit_types.empty()) req_sub_iface = opt.submit_types.front(); if (!opt.info_types.empty()) req_info_iface = opt.info_types.front(); std::list endpoints = getServicesFromUserConfigAndCommandLine(usercfg, opt.registries, opt.computing_elements, req_sub_iface, req_info_iface); std::set preferredInterfaceNames; if (usercfg.InfoInterface().empty()) { preferredInterfaceNames.insert("org.nordugrid.arcrest"); } else { preferredInterfaceNames.insert(usercfg.InfoInterface()); } std::list rejectDiscoveryURLs = getRejectDiscoveryURLsFromUserConfigAndCommandLine(usercfg, opt.rejectdiscovery); Arc::ComputingServiceUniq csu; Arc::ComputingServiceRetriever csr(usercfg, std::list(), rejectDiscoveryURLs, preferredInterfaceNames); csr.addConsumer(csu); for (std::list::const_iterator it = endpoints.begin(); it != endpoints.end(); ++it) { csr.addEndpoint(*it); } csr.wait(); std::list services = csu.getServices(); for (std::list::const_iterator it = services.begin(); it != services.end(); ++it) { if (opt.longlist) { if (it != services.begin()) std::cout << std::endl; std::cout << *it; std::cout << std::flush; } else { std::cout << "Computing service: " << (**it).Name; if (!(**it).QualityLevel.empty()) { std::cout << " (" << (**it).QualityLevel << ")"; } std::cout << std::endl; std::stringstream infostream, submissionstream; for (std::map::const_iterator itCE = it->ComputingEndpoint.begin(); itCE != it->ComputingEndpoint.end(); ++itCE) { if (itCE->second->Capability.count(Arc::Endpoint::GetStringForCapability(Arc::Endpoint::COMPUTINGINFO))) { infostream << " " << Arc::IString("Information endpoint") << ": " << itCE->second->URLString; if ( !itCE->second->InterfaceName.empty() ) { infostream << " (" << itCE->second->InterfaceName << ")"; } infostream << std::endl; } if (itCE->second->Capability.empty() || itCE->second->Capability.count(Arc::Endpoint::GetStringForCapability(Arc::Endpoint::JOBSUBMIT)) || itCE->second->Capability.count(Arc::Endpoint::GetStringForCapability(Arc::Endpoint::JOBCREATION))) { submissionstream << " "; submissionstream << Arc::IString("Submission endpoint") << ": "; submissionstream << itCE->second->URLString; submissionstream << " (" << Arc::IString("status") << ": "; submissionstream << itCE->second->HealthState << ", "; submissionstream << Arc::IString("interface") << ": "; submissionstream << itCE->second->InterfaceName << ")" << std::endl; } } std::cout << infostream.str() << submissionstream.str(); } } bool anEndpointFailed = false; // Check if querying endpoint succeeded. Arc::EndpointStatusMap statusMap = csr.getAllStatuses(); for (std::list::const_iterator it = endpoints.begin(); it != endpoints.end(); ++it) { Arc::EndpointStatusMap::const_iterator itStatus = statusMap.find(*it); if (itStatus != statusMap.end() && itStatus->second != Arc::EndpointQueryingStatus::SUCCESSFUL && itStatus->second != Arc::EndpointQueryingStatus::SUSPENDED_NOTREQUIRED) { if (!anEndpointFailed) { anEndpointFailed = true; std::cerr << Arc::IString("ERROR: Failed to retrieve information from the following endpoints:") << std::endl; } std::cerr << " " << it->URLString; if (!itStatus->second.getDescription().empty()) { std::cerr << " (" << itStatus->second.getDescription() << ")"; } std::cerr << std::endl; } } if (anEndpointFailed) return 1; if (services.empty()) { std::cerr << Arc::IString("ERROR: Failed to retrieve information"); if (!endpoints.empty()) { std::cerr << " " << Arc::IString("from the following endpoints:") << std::endl; for (std::list::const_iterator it = endpoints.begin(); it != endpoints.end(); ++it) { std::cerr << " " << it->URLString << std::endl; } } else { std::cerr << std::endl; } return 1; } return 0; } nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/arcstat.1.in0000644000000000000000000000013215067751327021407 xustar0030 mtime=1759498967.677490794 30 atime=1759498967.824493028 30 ctime=1759499031.235432137 nordugrid-arc-7.1.1/src/clients/compute/arcstat.1.in0000644000175000002070000000733415067751327023320 0ustar00mockbuildmock00000000000000[NAME] arcstat \- ARC Job Status [EXTENDED DESCRIPTION] The .B arcstat command gives the status of a job submitted to a ARC CE. The job can be referred to either by the jobid that was returned by .BR arcsub (1) at submission time or by its jobname if the job description that was submitted contained a jobname attribute. More than one jobid and/or jobname can be given. If several jobs were submitted with the same jobname the status of all those jobs are shown. If the .B --joblist option is used the list of jobs is read from a file with the specified filename. By specifying the .B --all option, the status of all active jobs will be shown. By default .B arcstat presents job states as defined internally followed by middleware specific representation of job state in brackets. The following internal job states are defined: .B Accepted - job accepted on cluster but not being processed yet .B Preparing - job is in phase of preparing for submission to batch system .B Submitting - communication with batch system in ongoing .B Hold - job's processing is suspended dueto internal reason or user request .B Queuing - job is passed to batch system but not being executed yet .B Running - job being execcuted in batch system .B Finishing - job in phase of post-execution procedures being run .B Finished - job successfully completed all processing phases .B Killed - job processing was interrupted by user request .B Failed - job processing was interrupted due to detected failure .B Deleted - job was removed from cluster (usually because it stayed there too long) .B Other - middleware specific job state could not be adequately mappped to internal state Those are also states which are used by .BR arccat (1), .BR arcclean (1), .BR arcget (1), .BR arckill (1), .BR arcrenew (1), .BR arcresume (1) to perform job filtering. If the .B --long option is given more detailed information is shown. Jobs can be sorted according to the jobid, submissiontime or jobname, either in normal or reverse order. By using the .B --sort or .B --rsort option followed by the desired ordering ('jobid', 'submissiontime' or 'jobname'), jobs will be sorted in normal or reverse order. Note that the options .B --sort and .B --rsort cannot be used at the same time. The .B --computing-element option can be used to select or reject jobs at specific clusters. The .B --status option can be used to select jobs in a specific state. These options can be repeated several times. See .BR arcstat (1) for possible state values. Also in this case the .B --long option can be used to obtain more detailed information. [FILES] .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .TP .B ~/.arc/jobs.dat This a local database of the user's active jobs. When a job is successfully submitted it is added to database and when it is removed from the remote CE it is removed from this list. This database is used as the list of all active jobs when the user specifies the .B --all option to the various NorduGrid ARC user interface commands. By using the .B --joblist option a different file can be used than the default. [COPYRIGHT] APACHE LICENSE Version 2.0 [AUTHOR] ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org [SEE ALSO] .BR arccat (1), .BR arcclean (1), .BR arccp (1), .BR arcget (1), .BR arcinfo (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcrenew (1), .BR arcresume (1), .BR arcrm (1), .BR arcsub (1), .BR arcsync (1), .BR arctest (1) nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/arcresume.1.in0000644000000000000000000000013115067751327021733 xustar0030 mtime=1759498967.677490794 30 atime=1759498967.824493028 29 ctime=1759499031.24307715 nordugrid-arc-7.1.1/src/clients/compute/arcresume.1.in0000644000175000002070000000400715067751327023637 0ustar00mockbuildmock00000000000000[NAME] arcresume \- ARC Resume [EXTENDED DESCRIPTION] The .B arcresume command resumes a job submitted an ARC CE. The job can be referred to either by the jobid that was returned by .BR arcsub (1) at submission time or by its jobname if the job description that was submitted contained a jobname attribute. More than one jobid and/or jobname can be given. If several jobs were submitted with the same jobname all those jobs are resumed. If the .B --joblist option is used the list of jobs is read from a file with the specified filename. By specifying the .B --all option, all active jobs will be resumed. The .B --computing-element option can be used to select or reject jobs at specific clusters. The .B --status option can be used to select jobs in a specific state. These options can be repeated several times. See .BR arcstat (1) for possible state values. [FILES] .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .TP .B ~/.arc/jobs.dat This a local database of the user's active jobs. When a job is successfully submitted it is added to database and when it is removed from the remote CE it is removed from this list. This database is used as the list of all active jobs when the user specifies the .B --all option to the various NorduGrid ARC user interface commands. By using the .B --joblist option a different file can be used than the default. [COPYRIGHT] APACHE LICENSE Version 2.0 [AUTHOR] ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org [SEE ALSO] .BR arccat (1), .BR arcclean (1), .BR arccp (1), .BR arcget (1), .BR arcinfo (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcrenew (1), .BR arcrm (1), .BR arcstat (1), .BR arcsub (1), .BR arcsync (1), .BR arctest (1) nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/arcinfo.1.in0000644000000000000000000000013215067751327021367 xustar0030 mtime=1759498967.676867254 30 atime=1759498967.824493028 30 ctime=1759499031.236530873 nordugrid-arc-7.1.1/src/clients/compute/arcinfo.1.in0000644000175000002070000000231415067751327023271 0ustar00mockbuildmock00000000000000[NAME] arcinfo \- ARC Info [EXTENDED DESCRIPTION] The .B arcinfo command is used to get the status and information about computing elements. You can specify the URLs of CE endpoint with the .B --computing-element option, or by just listing them as arguments. The .B --registry flag can be used to specify an registry server which should be queried for CEs endpoints. Detailed information about queried computing services can be obtained by specifying the .B --long flag. [FILES] .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. [COPYRIGHT] APACHE LICENSE Version 2.0 [AUTHOR] ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org [SEE ALSO] .BR arccat (1), .BR arcclean (1), .BR arccp (1), .BR arcget (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcrenew (1), .BR arcresume (1), .BR arcrm (1), .BR arcstat (1), .BR arcsub (1), .BR arcsync (1), .BR arctest (1) nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/arcget.1.in0000644000000000000000000000013215067751327021213 xustar0030 mtime=1759498967.676867254 30 atime=1759498967.824493028 30 ctime=1759499031.234165747 nordugrid-arc-7.1.1/src/clients/compute/arcget.1.in0000644000175000002070000000472015067751327023120 0ustar00mockbuildmock00000000000000[NAME] arcget \- ARC Get [EXTENDED DESCRIPTION] The .B arcget command downloads the results after a job has completed on the computing element. Only the results of jobs that have finished can be downloaded. The job can be referred to either by the jobid that was returned by .BR arcsub (1) at submission time or by its jobname if the job description that was submitted contained a jobname attribute. More than one jobid and/or jobname can be given. If several jobs were submitted with the same jobname the results of all those jobs are downloaded. If the .B --joblist option is used the list of jobs is read from a file with the specified filename. By specifying the .B --all option, the results of all active jobs are downloaded. The .B --compute-element option can be used to select or reject jobs at specific clusters. See .BR arcsub (1) for a discussion of the format of arguments to this option. The .B --status option can be used to select jobs in a specific state. These options can be repeated several times. See .BR arstat (1) for possible state values. For each job that is downloaded a subdirectory will be created in the download directory that will contain the downloaded files. If the download was successful the job will be removed from the remote cluster unless the .B --keep option was specified. [FILES] .TP .B ~/.arc/client.conf Some options can be given default values by specifying them in the ARC client configuration file. By using the .B --conffile option a different configuration file can be used than the default. .TP .B ~/.arc/jobs.dat This a local database of the user's active jobs. When a job is successfully submitted it is added to database and when it is removed from the remote CE it is removed from this list. This database is used as the list of all active jobs when the user specifies the .B --all option to the various NorduGrid ARC user interface commands. By using the .B --joblist option a different file can be used than the default. [COPYRIGHT] APACHE LICENSE Version 2.0 [AUTHOR] ARC software is developed by the NorduGrid Collaboration (http://www.nordugrid.org), please consult the AUTHORS file distributed with ARC. Please report bugs and feature requests to http://bugzilla.nordugrid.org [SEE ALSO] .BR arccat (1), .BR arcclean (1), .BR arccp (1), .BR arcinfo (1), .BR arckill (1), .BR arcls (1), .BR arcmkdir (1), .BR arcproxy (1), .BR arcrenew (1), .BR arcresume (1), .BR arcrm (1), .BR arcstat (1), .BR arcsub (1), .BR arcsync (1), .BR arctest (1) nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/README0000644000000000000000000000013215067751327020137 xustar0030 mtime=1759498967.676867254 30 atime=1759498967.824493028 30 ctime=1759499031.212726167 nordugrid-arc-7.1.1/src/clients/compute/README0000644000175000002070000000005315067751327022037 0ustar00mockbuildmock00000000000000ARC command line tools for job management nordugrid-arc-7.1.1/src/clients/compute/PaxHeaders/arcclean.cpp0000644000000000000000000000013215067751327021533 xustar0030 mtime=1759498967.676867254 30 atime=1759498967.824493028 30 ctime=1759499031.217655707 nordugrid-arc-7.1.1/src/clients/compute/arcclean.cpp0000644000175000002070000001552115067751327023441 0ustar00mockbuildmock00000000000000// -*- indent-tabs-mode: nil -*- #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include "utils.h" int RUNMAIN(arcclean)(int argc, char **argv) { setlocale(LC_ALL, ""); Arc::Logger logger(Arc::Logger::getRootLogger(), "arcclean"); Arc::LogStream logcerr(std::cerr); logcerr.setFormat(Arc::ShortFormat); Arc::Logger::getRootLogger().addDestination(logcerr); Arc::Logger::getRootLogger().setThreshold(Arc::WARNING); Arc::ArcLocation::Init(argv[0]); ClientOptions opt(ClientOptions::CO_CLEAN, istring("[job ...]"), istring("The arcclean command removes a job " "from the computing resource.")); std::list jobidentifiers = opt.Parse(argc, argv); if (opt.showversion) { std::cout << Arc::IString("%s version %s", "arcclean", VERSION) << std::endl; return 0; } // If debug is specified as argument, it should be set before loading the configuration. if (!opt.debug.empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(opt.debug)); logger.msg(Arc::VERBOSE, "Running command: %s", opt.GetCommandWithArguments()); if (opt.show_plugins) { std::list types; types.push_back("HED:JobControllerPlugin"); showplugins("arcclean", types, logger); return 0; } Arc::UserConfig usercfg(opt.conffile, opt.joblist); if (!usercfg) { logger.msg(Arc::ERROR, "Failed configuration initialization"); return 1; } if (opt.force_system_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(false); } if (opt.force_grid_ca) { usercfg.CAUseSystem(false); usercfg.CAUseGrid(true); } if (opt.force_any_ca) { usercfg.CAUseSystem(true); usercfg.CAUseGrid(true); } if (opt.debug.empty() && !usercfg.Verbosity().empty()) Arc::Logger::getRootLogger().setThreshold(Arc::istring_to_level(usercfg.Verbosity())); for (std::list::const_iterator it = opt.jobidinfiles.begin(); it != opt.jobidinfiles.end(); ++it) { if (!Arc::Job::ReadJobIDsFromFile(*it, jobidentifiers)) { logger.msg(Arc::WARNING, "Cannot read specified jobid file: %s", *it); } } if (opt.timeout > 0) usercfg.Timeout(opt.timeout); AuthenticationType authentication_type = UndefinedAuthentication; if(!opt.getAuthenticationType(logger, usercfg, authentication_type)) return 1; switch(authentication_type) { case NoAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeNone); break; case X509Authentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeCert); break; case TokenAuthentication: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeToken); break; case UndefinedAuthentication: default: usercfg.CommunicationAuthType(Arc::UserConfig::AuthTypeUndefined); break; } if ((!opt.joblist.empty() || !opt.status.empty()) && jobidentifiers.empty() && opt.computing_elements.empty()) opt.all = true; if (jobidentifiers.empty() && opt.computing_elements.empty() && !opt.all) { logger.msg(Arc::ERROR, "No jobs given"); return 1; } std::list selectedURLs; if (!opt.computing_elements.empty()) { selectedURLs = getSelectedURLsFromUserConfigAndCommandLine(usercfg, opt.computing_elements); } std::list rejectManagementURLs = getRejectManagementURLsFromUserConfigAndCommandLine(usercfg, opt.rejectmanagement); std::list jobs; Arc::JobInformationStorage *jobstore = createJobInformationStorage(usercfg); if (jobstore != NULL && !jobstore->IsStorageExisting()) { logger.msg(Arc::ERROR, "Job list file (%s) doesn't exist", usercfg.JobListFile()); delete jobstore; return 1; } if (jobstore == NULL || ( opt.all && !jobstore->ReadAll(jobs, rejectManagementURLs)) || (!opt.all && !jobstore->Read(jobs, jobidentifiers, selectedURLs, rejectManagementURLs))) { logger.msg(Arc::ERROR, "Unable to read job information from file (%s)", usercfg.JobListFile()); delete jobstore; return 1; } if (!opt.all) { for (std::list::const_iterator itJIDAndName = jobidentifiers.begin(); itJIDAndName != jobidentifiers.end(); ++itJIDAndName) { std::cout << Arc::IString("Warning: Job not found in job list: %s", *itJIDAndName) << std::endl; } } Arc::JobSupervisor jobmaster(usercfg, jobs); jobmaster.Update(); jobmaster.SelectValid(); if (!opt.status.empty()) { jobmaster.SelectByStatus(opt.status); } //if (jobmaster.GetSelectedJobs().empty()) { // std::cout << Arc::IString("No jobs") << std::endl; // return 1; //} int retval = (int)!jobmaster.Clean(); std::list cleaned = jobmaster.GetIDsProcessed(); const std::list& notcleaned = jobmaster.GetIDsNotProcessed(); if ((!opt.status.empty() && std::find(opt.status.begin(), opt.status.end(), "Undefined") != opt.status.end()) || opt.forceclean) { std::string response = ""; if (!opt.forceclean) { std::cout << Arc::IString("You are about to remove jobs from the job list for which no information could be\n" "found. NOTE: Recently submitted jobs might not have appeared in the information\n" "system, and this action will also remove such jobs.") << std::endl; std::cout << Arc::IString("Are you sure you want to clean jobs missing information?") << " [" << Arc::IString("y") << "/" << Arc::IString("n") << "] "; std::cin >> response; } if (!opt.forceclean && Arc::lower(response) != std::string(Arc::FindTrans("y"))) { std::cout << Arc::IString("Jobs missing information will not be cleaned!") << std::endl; if (cleaned.empty() && notcleaned.empty()) { return retval; } } else { for (std::list::const_iterator it = jobmaster.GetAllJobs().begin(); it != jobmaster.GetAllJobs().end(); ++it) { if (it->State == Arc::JobState::UNDEFINED) { cleaned.push_back(it->JobID); } } } } if (!jobstore->Remove(cleaned)) { std::cout << Arc::IString("Warning: Failed to write job information to file (%s)", usercfg.JobListFile()) << std::endl; std::cout << Arc::IString(" Run 'arcclean -s Undefined' to remove cleaned jobs from job list", usercfg.JobListFile()) << std::endl; } delete jobstore; if (cleaned.empty() && notcleaned.empty()) { std::cout << Arc::IString("No jobs") << std::endl; return 1; } std::cout << Arc::IString("Jobs processed: %d, deleted: %d", cleaned.size()+notcleaned.size(), cleaned.size()) << std::endl; return retval; } nordugrid-arc-7.1.1/src/PaxHeaders/utils0000644000000000000000000000013215067751427015226 xustar0030 mtime=1759499031.528461013 30 atime=1759499034.766510215 30 ctime=1759499031.528461013 nordugrid-arc-7.1.1/src/utils/0000755000175000002070000000000015067751427017205 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/utils/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327017336 xustar0030 mtime=1759498967.788341109 30 atime=1759498967.881493894 30 ctime=1759499031.312488997 nordugrid-arc-7.1.1/src/utils/Makefile.am0000644000175000002070000000020715067751327021237 0ustar00mockbuildmock00000000000000if HED_ENABLED HED = hed else HED = endif SUBDIRS = $(HED) python archery arc-exporter DIST_SUBDIRS = hed python archery arc-exporter nordugrid-arc-7.1.1/src/utils/PaxHeaders/Makefile.in0000644000000000000000000000013115067751357017351 xustar0030 mtime=1759498991.917765358 29 atime=1759499020.27328999 30 ctime=1759499031.313793844 nordugrid-arc-7.1.1/src/utils/Makefile.in0000644000175000002070000006101115067751357021253 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/utils ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir distdir-am am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in README DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ @HED_ENABLED_FALSE@HED = @HED_ENABLED_TRUE@HED = hed SUBDIRS = $(HED) python archery arc-exporter DIST_SUBDIRS = hed python archery arc-exporter all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/utils/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/utils/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ check-am clean clean-generic clean-libtool cscopelist-am ctags \ ctags-am distclean distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags tags-am uninstall uninstall-am .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/utils/PaxHeaders/archery0000644000000000000000000000013215067751427016663 xustar0030 mtime=1759499031.526460982 30 atime=1759499034.766510215 30 ctime=1759499031.526460982 nordugrid-arc-7.1.1/src/utils/archery/0000755000175000002070000000000015067751427020642 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/utils/archery/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327020773 xustar0030 mtime=1759498967.788556955 30 atime=1759498967.881493894 30 ctime=1759499031.524634607 nordugrid-arc-7.1.1/src/utils/archery/Makefile.am0000644000175000002070000000003615067751327022674 0ustar00mockbuildmock00000000000000sbin_SCRIPTS = archery-manage nordugrid-arc-7.1.1/src/utils/archery/PaxHeaders/Makefile.in0000644000000000000000000000013215067751357021007 xustar0030 mtime=1759498991.979549348 30 atime=1759499020.004285903 30 ctime=1759499031.525460967 nordugrid-arc-7.1.1/src/utils/archery/Makefile.in0000644000175000002070000005230315067751357022714 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/utils/archery ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = archery-manage CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(sbindir)" SCRIPTS = $(sbin_SCRIPTS) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/archery-manage.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ sbin_SCRIPTS = archery-manage all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/utils/archery/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/utils/archery/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): archery-manage: $(top_builddir)/config.status $(srcdir)/archery-manage.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-sbinSCRIPTS: $(sbin_SCRIPTS) @$(NORMAL_INSTALL) @list='$(sbin_SCRIPTS)'; test -n "$(sbindir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(sbindir)'"; \ $(MKDIR_P) "$(DESTDIR)$(sbindir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(sbindir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(sbindir)$$dir" || exit $$?; \ } \ ; done uninstall-sbinSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(sbin_SCRIPTS)'; test -n "$(sbindir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(sbindir)'; $(am__uninstall_files_from_dir) mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) installdirs: for dir in "$(DESTDIR)$(sbindir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-sbinSCRIPTS install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-sbinSCRIPTS .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-sbinSCRIPTS install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags-am uninstall uninstall-am uninstall-sbinSCRIPTS .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/utils/archery/PaxHeaders/archery-manage.in0000644000000000000000000000013215067751327022152 xustar0030 mtime=1759498967.788743624 30 atime=1759498967.881493894 30 ctime=1759499031.527304776 nordugrid-arc-7.1.1/src/utils/archery/archery-manage.in0000644000175000002070000025572615067751327024075 0ustar00mockbuildmock00000000000000#!@PYTHON@ from __future__ import print_function # general puprose import os import sys import time import logging import argparse import hashlib import re import json # Connectivity testing import socket # DATA fetching import ldap try: import http.client as httplib except ImportError: import httplib import ssl import xml.etree.ElementTree as ElementTree # DNS processing import dns.rdatatype import dns.resolver import dns.update import dns.query import dns.tsig import dns.tsigkeyring from dns.exception import DNSException # Multithreading from threading import Thread, Lock try: from queue import Queue except ImportError: from Queue import Queue # Software import subprocess # gpg import base64 import tempfile try: from urllib.parse import quote, unquote except ImportError: from urllib import quote, unquote # GLOBAL VARIABLES _fetch_timeout = 10 # Initialize logger logger = logging.getLogger('ARC.ARCHERY-Manage') logger.setLevel(logging.WARNING) log_handler_stderr = logging.StreamHandler() log_handler_stderr.setFormatter( logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] [%(process)d] [%(message)s]')) logger.addHandler(log_handler_stderr) # # GENERAL HELPERS # class HTTPSInsecureConnection(httplib.HTTPSConnection): """Class to make a HTTPS connection without CA Cert verification (compatible with 2.6+ Python)""" def __init__(self, host, port=443, timeout=30): httplib.HTTPSConnection.__init__(self, host, port) self.timeout = timeout def connect(self): """Redefine the sock without CA check enforcement""" sock = socket.create_connection((self.host, self.port), self.timeout) if self._tunnel_host: self.sock = sock self._tunnel() # Don't force Server Certificate Check self.sock = ssl.wrap_socket(sock, cert_reqs=ssl.CERT_NONE) class TimeoutQueue(Queue): """FIFO Queue with defined timeout to wait for Queue Join""" def join_with_timeout(self, timeout): self.all_tasks_done.acquire() try: endtime = time.time() + timeout while self.unfinished_tasks: remaining = endtime - time.time() if remaining <= 0: raise OSError('Timeout waiting for the Queue Join') self.all_tasks_done.wait(remaining) finally: self.all_tasks_done.release() def LDAPStrError(ldape): """Get string error from LDAP exception""" if str(ldape) == dict: err = str(ldape) elif len(ldape.args) and type(ldape.args[0]) == dict: err = ldape.args[0] else: return str(ldape) errstr = '' if 'desc' in err: errstr += err['desc'] if 'info' in err: errstr += ' ({0})'.format(err['info']) else: errstr += str(ldape) return errstr # # OUTPUT FORMATTING FUNCTIONS # def output_arc_celist(archery_object, cmd_args): """Output the list of ARC CE hostnames (JSON capable)""" arcce_ids = [s['id'] for s in archery_services(archery_object, 'org.nordugrid.arex')] if cmd_args.json: print(json.dumps(arcce_ids)) else: for ce in arcce_ids: print(ce) def json_config_object(archery_object): """Translate internal extended JSON to expanded JSON config that can be used as a source""" # helper function to use with output_json_config() jconf = {} if 'id' in archery_object: jconf['id'] = archery_object['id'] elif 'dns-name' in archery_object: jconf['dns-name'] = archery_object['dns-name'] if 'type' in archery_object: jconf['type'] = archery_object['type'] if 'object' in archery_object: kind = archery_object['object'] elif 'reftype' in archery_object: kind = archery_object['reftype'] else: kind = 'group' if 'raw-dns' in archery_object: jconf['raw-dns'] = archery_object['raw-dns'] external_object = None if 'pointer_rr_data' in archery_object: external_object = archery_object['pointer_rr_data'].split(' ')[0].replace('u=', '') if kind == 'service': jconf['endpoints'] = [] if 'endpoints' in archery_object: if archery_object['endpoints']: external_object = None # do not mix endpoints and external for e in archery_object['endpoints']: if 's' in e and e['s'] == '0': jconf['endpoints'].append({ 'url': e['u'], 'type': e['t'], 'status': False }) else: jconf['endpoints'].append({e['u']: e['t']}) if kind == 'software': if 'endpoints' in archery_object: external_object = None for e in archery_object['endpoints']: if 't' in e and e['t'] == 'gpg.pubkey': jconf['pubkey_url'] = e['u'] if kind == 'rte': jconf['name'] = archery_object['id'] del jconf['id'] if 'description' in archery_object: jconf['description'] = unquote(archery_object['description']) if 'endpoints' in archery_object: external_object = None for e in archery_object['endpoints']: if 't' in e and e['t'] == 'gpg.signed': jconf['url'] = e['u'] if kind == 'gpg.pubkey.base64': if 'endpoints' in archery_object: for e in archery_object['endpoints']: if 'rr_data' in e: jconf['pubkey'] = e['rr_data'] if kind == 'gpg.signed.base64': if 'endpoints' in archery_object: for e in archery_object['endpoints']: if 'rr_data' in e: jconf['data'] = e['rr_data'] if 'contains' in archery_object: if archery_object['contains']: external_object = None # do not mix natively nested objects and external for child in archery_object['contains']: ckind, cobj = json_config_object(child) if ckind == 'service': if 'services' not in jconf: jconf['services'] = [] jconf['services'].append(cobj) elif ckind == 'software': jconf['software'] = cobj elif ckind == 'rte': if 'rtes' not in jconf: jconf['rtes'] = [] jconf['rtes'].append(cobj) elif ckind == 'gpg.pubkey.base64': if 'pubkey' in cobj: jconf['pubkey'] = cobj['pubkey'] elif ckind == 'gpg.signed.base64': if 'data' in cobj: jconf['data'] = cobj['data'] elif ckind == 'group': if 'groups' not in jconf: jconf['groups'] = [] jconf['groups'].append(cobj) if external_object is not None: jconf['external-archery-object'] = external_object # do not put leftovers to external object config for accidental_key in ['id', 'type', 'endpoints', 'contains']: if accidental_key in jconf: del jconf[accidental_key] return kind, jconf def output_json_config(archery_object, cmd_args): """Output the entire ARCHERY internal object in JSON to stdout (for debugging purposes)""" _, jconf = json_config_object(archery_object) print(json.dumps(jconf, indent=2)) def output_internal_object(archery_object, cmd_args): """Output the entire ARCHERY internal object in JSON to stdout (for debugging purposes)""" print(json.dumps(archery_object, indent=2)) def output_endpoints(archery_object, cmd_args): """Output the list of endpoints with types (JSON capable)""" elist = archery_endpoints(archery_object) if cmd_args.json: print(json.dumps(elist)) else: for e in elist: if 's' in e and e['s'] != '1': if not cmd_args.output_all: continue print('{u:<60} : {t}'.format(**e)) def output_services(archery_object, cmd_args): """Output the list of services with types (JSON capable)""" slist = archery_services(archery_object) if cmd_args.json: print(json.dumps(slist)) else: for s in slist: if 's' in s and s['s'] != '1': if not cmd_args.output_all: continue print('{id:<60} : {type}'.format(**s)) def txt_255(txt, getlist=False): """TXT record have 255 bytes limit and should be split to subsequent strings if longer""" txtlen = len(txt) if txtlen <= 255: if getlist: return [txt] return '"' + txt + '"' # split by 255 clen = 0 parts = [] while clen < txtlen: parts.append(txt[clen:clen + 254]) clen += 254 if getlist: return parts return '"' + '" "'.join(parts) + '"' def output_zonefile(archery_object, cmd_args): """Output the content of BIND zone file""" if cmd_args.json: logger.error('JSON format is not supported by DNS zone file formatter') if cmd_args.output_all: logger.debug('ARCHERY zone file formatter includes all endpoints by default and ignore --output-all option') ttl = cmd_args.ttl rrset = list(archery_txt_rrset(archery_object)) rrset.sort() for rr in rrset: rr_mod = rr.split(' ', 1) txtdata = txt_255(rr_mod[1]) print('{0:<64}{1:>6} TXT {2}'.format(rr_mod[0], ttl, txtdata)) if 'raw-dns' in archery_object: if archery_object['rr_owner']: print('$ORIGIN {0}'.format(archery_object['rr_owner'])) for rdns in archery_object['raw-dns']: if not rdns['rdata']: continue if 'ttl' not in rdns: rdns['ttl'] = ttl if isinstance(rdns['rdata'], list): for rd in rdns['rdata']: srdns = rdns.copy() if srdns['type'] in ['TXT', 'SRV']: srdns['rdata'] = txt_255(rd) else: srdns['rdata'] = rd print('{name:<64}{ttl:>6} {type} {rdata}'.format(**srdns)) else: if rdns['type'] in ['TXT', 'SRV']: rdns['rdata'] = txt_255(rdns['rdata']) print('{name:<64}{ttl:>6} {type} {rdata}'.format(**rdns)) _output_formatters = { 'arc-CEs': output_arc_celist, 'services': output_services, 'endpoints': output_endpoints, 'zonefile': output_zonefile, 'json': output_json_config, '_debug': output_internal_object } # # ENDPOINT FILTERING CLASSES # class EndpointFilter(object): """Base interface class for implementing endpoint filters""" def __init__(self): self.filter_on_fetch = False def filter(self, endpoint_dict): raise NotImplementedError('Filter function should be implemented (return True means filtering)') def set_on_fetch(self): self.filter_on_fetch = True def on_fetch(self): return self.filter_on_fetch def help(self): raise NotImplementedError('Help function should be implemented') class EndpointFilterType(EndpointFilter): """Filter endpoints by type""" _resourseinfo_endpoint_types = [ 'org.nordugrid.ldapglue2', 'org.nordugrid.ldapng', 'org.ogf.glue.emies.resourceinfo', 'org.nordugrid.arcrest' ] @staticmethod def type(): return 'type' def __init__(self, args='arc-resourceinfo'): super(EndpointFilterType, self).__init__() self._allowed_endpoint_types = args.split(',') # handle resourceinfo alias for all nordugrid information endpoint types if 'arc-resourceinfo' in self._allowed_endpoint_types: self._allowed_endpoint_types += self._resourseinfo_endpoint_types self._allowed_endpoint_types.remove('arc-resourceinfo') logger.debug('Applying endpoints filtering with the following allowed types: %s', ','.join(self._allowed_endpoint_types)) def filter(self, endpoint_dict): if endpoint_dict['t'] not in self._allowed_endpoint_types: logger.info('Endpoint %s (type %s) filtered (type filter)', endpoint_dict['u'], endpoint_dict['t']) return True logger.debug('Endpoint %s (type %s) is allowed by defined type filter', endpoint_dict['u'], endpoint_dict['t']) return False def help(self): print('Endpoint type filter: \'-f type:[,[...]]\'') class EndpointFilterPortscan(EndpointFilter): """Filter endpoints by port connectivity check""" __uri_re = re.compile(r'^(?P(?:[^:]+)://(?P[^:/]+):(?P[0-9]+))/*.*') def __init__(self, args=None): super(EndpointFilterPortscan, self).__init__() self.__args = args self.timeout = _fetch_timeout @staticmethod def type(): return 'portscan' def filter(self, endpoint_dict): uri_data = self.__uri_re.match(endpoint_dict['u']) if uri_data: uri_parms = uri_data.groupdict() s = socket.socket() address = uri_parms['host'] port = int(uri_parms['port']) try: logger.debug('Testing connectivity to %s:%s network endpoint', address, port) s.settimeout(self.timeout) s.connect((address, port)) except Exception as err: logger.info('Endpoint %s (type %s) filtered (port connectivity filter) %s', endpoint_dict['u'], endpoint_dict['t'], str(err)) return True finally: s.close() logger.debug('Endpoint %s (type %s) is allowed by port connectivity filter', endpoint_dict['u'], endpoint_dict['t']) return False else: logger.error('Endpoint %s (type %s) filtered (port connectivity filter). Failed to parse URI.', endpoint_dict['u'], endpoint_dict['t']) return True def help(self): print('Endpoint port connectivity filter: \'-f portscan\'') class EndpointFilterAllowedVO(EndpointFilter): """Filter endpoints by allowed VO information in LDAP""" __uri_re = re.compile(r'^(?P(?P[^:/]+)://(?P[^:/]+)(?P:[0-9]+))/*.*') __ldap_uri_re = re.compile(r'^(?Pldap://(?P[^:/]+)(?::[0-9]+))/(?P.*)') def __init__(self, args=''): super(EndpointFilterAllowedVO, self).__init__() self.filter_on_fetch = True self._allowed_vos = args.split(',') self.timeout = _fetch_timeout @staticmethod def type(): return 'vo' def filter(self, endpoint_dict): if 'vos' not in endpoint_dict: logger.debug('No VO policy defined for endpoint %s (type %s). Filter will not block it.', endpoint_dict['u'], endpoint_dict['t']) return False for vo in self._allowed_vos: if vo not in endpoint_dict['vos']: logger.info('Endpoint %s (type %s) filtered (allowed VO filter)', endpoint_dict['u'], endpoint_dict['t']) return True logger.debug('Endpoint %s (type %s) is allowed by defined VO filter', endpoint_dict['u'], endpoint_dict['t']) return False def help(self): print('Endpoint allowed VO filter: \'-f vo:[,[...]]\'') _filters = { 'type': EndpointFilterType, 'vo': EndpointFilterAllowedVO, 'portscan': EndpointFilterPortscan } def filter_endpoints(archery_object, filters): """Recursively loop over archery onject and apply filters""" if archery_object['object'] == 'service': if 'endpoints' in archery_object and archery_object['endpoints']: filtered_endpoints = [] for e in archery_object['endpoints']: filtered = False for fo in filters: if not fo.on_fetch(): if fo.filter(e): filtered = True break if not filtered: filtered_endpoints.append(e) archery_object['endpoints'] = filtered_endpoints if not filtered_endpoints: return False else: if 'contains' in archery_object: filtered_contains = [] for c in archery_object['contains']: if filter_endpoints(c, filters): filtered_contains.append(c) archery_object['contains'] = filtered_contains return True # # ARCHERY DATA PROCESSING # def archery_endpoints(archery_object, etype=None): """Return list of endpoint data from ARCHERY object tree""" endpoints = [] # add endpoint records if 'endpoints' in archery_object: for edata in archery_object['endpoints']: # filter by endtpoint type if requested if etype is not None: if edata['t'] != etype: continue endpoints.append(edata) # process child records if 'contains' in archery_object: for cdata in archery_object['contains']: endpoints += archery_endpoints(cdata, etype) return endpoints def archery_services(archery_object, stype=None): """Return list of services data from ARCHERY object tree""" services = [] # detect type of object if 'object' in archery_object: object_kind = archery_object['object'] elif 'contains' in archery_object and archery_object['contains']: object_kind = 'group' else: object_kind = 'service' # process data if object_kind == 'group': if 'contains' in archery_object: for cdata in archery_object['contains']: services += archery_services(cdata, stype) elif object_kind == 'service': # filter by service type if stype is not None: if 'type' not in archery_object: logger.debug('There is no service type defined for service object at %s. Skipping.', archery_object['rr_owner']) return services if archery_object['type'] != stype: logger.debug('Skipping service object at %s (type %s does not match requested %s).', archery_object['rr_owner'], archery_object['type'], stype) return services # check for service id if 'id' not in archery_object: logger.debug('There is no ID defined for service object at %s. Skipping.', archery_object['rr_owner']) return services # append service services.append({ 'id': archery_object['id'], 'type': archery_object['type'] if 'type' in archery_object else None, }) return services def archery_txt_rrset(archery_object, parent_rr_owner=''): """Return set of TXT RRs for ARCHERY object tree""" rrset = set() if 'rr_owner' not in archery_object: logger.error('Malformed archery object to generate RRSet data. Execution aborted.') sys.exit(1) rr_owner = archery_object['rr_owner'] if parent_rr_owner: rr_owner += '.' + parent_rr_owner # construct object record (if not already exists) if 'rr_data' not in archery_object and 'object' in archery_object: rr = 'o=' + archery_object['object'] if 'type' in archery_object: rr += ' t=' + archery_object['type'].replace(' ', '-') if 'id' in archery_object: rr += ' id=' + archery_object['id'].replace(' ', '-') if 'description' in archery_object: rr += ' d=' + archery_object['description'].replace(' ', '-') # group object without type and id is the default behaviour (no RRSet needed) if rr != 'o=group': archery_object['rr_data'] = rr # add endpoint records has_endpoints = False if 'endpoints' in archery_object and archery_object['endpoints']: for edata in archery_object['endpoints']: # construct TXT rendering for endpoint record (if not already exists) if 'rr_data' not in edata: estatus = '' if 's' in edata and edata['s'] != 1: estatus = ' s={0}'.format(edata['s']) edata['rr_data'] = 'u={0} t={1}{2}'.format(edata['u'], edata['t'].replace(' ', '-'), estatus) # add service endpoints RRSet rrset.add('{0} {1}'.format(rr_owner, edata['rr_data'])) has_endpoints = True # add object id-record RRSet (if defined and not dummy service) if 'rr_data' in archery_object: if has_endpoints or archery_object['rr_data'] != 'o=service': rrset.add('{0} {1}'.format(rr_owner, archery_object['rr_data'])) # add child records if 'contains' in archery_object: for cdata in archery_object['contains']: # construct TXT rendering for pointer record (if not already exists) if 'pointer_rr_data' not in cdata: # status estatus = '' if 'status' in cdata and cdata['status'] != 1: estatus = ' s={0}'.format(cdata['status']) # child type if 'reftype' in cdata: ctype = cdata['reftype'] elif 'object' in cdata: ctype = 'archery.' + cdata['object'] else: if 'contains' in cdata and len(cdata['contains']) > 0: ctype = 'archery.group' else: ctype = 'archery.service' cdata['pointer_rr_data'] = 'u=dns://{0}.{1} t={2}{3}'.format( cdata['rr_owner'], rr_owner, ctype, estatus ) # add pointed record RRSet prr_owner = rr_owner if not parent_rr_owner: # predefined entry point prr_owner = '_archery' if rr_owner: prr_owner += '.' + rr_owner rrset.add('{0} {1}'.format(prr_owner, cdata['pointer_rr_data'])) # add child object data RRSet rrset |= archery_txt_rrset(cdata, rr_owner) return rrset # # INFORMATION SOURCES PROCESSING FUNCTIONS # def get_file_celist(fpath): """Load hostnames from static list stored in file""" ce_list = [] try: with open(fpath, 'r') as fd: ce_list = [line.strip() for line in fd] return ce_list except EnvironmentError: logger.error('Failed to open file %s to read AEC CE list', fpath) return ce_list def get_egiis_celist(egiis_uri, conn_timeout=_fetch_timeout): """Fetch CE hostnames from EGIIS (for migration)""" ce_list = [] ldap_uri_re = re.compile(r'^(?Pldap://[^:/]+(?::[0-9]+))/(?P.*)') parse_egiis_uri = ldap_uri_re.match(egiis_uri) if parse_egiis_uri: egiis_params = parse_egiis_uri.groupdict() ldap_uri = egiis_params['uri'] ldap_basedn = egiis_params['basedn'] else: logger.error('Failed to parse provided EGIIS URL %s. ' 'Expected format ldap://:/mds-vo-name=,o=grid. ', egiis_uri) return ce_list try: ldap_conn = ldap.initialize(ldap_uri) ldap_conn.set_option(ldap.OPT_NETWORK_TIMEOUT, conn_timeout) ldap_conn.set_option(ldap.OPT_TIMEOUT, conn_timeout) ldap_conn.set_option(ldap.OPT_PROTOCOL_VERSION, ldap.VERSION3) logger.debug('Querying EGIIS: %s', egiis_uri) egiis_entries = ldap_conn.search_s(ldap_basedn, ldap.SCOPE_BASE) if egiis_entries is None: logger.error('EGIIS %s query returns empty result set.', egiis_uri) return ce_list for egiis_dn, egiis_entry in egiis_entries: if egiis_dn.startswith('nordugrid-cluster-name='): ce_list.append(egiis_entry['Mds-Service-hn'][0].decode()) else: ce_list += get_egiis_celist('ldap://{Mds-Service-hn[0]}:2135/' '{Mds-Service-Ldap-suffix[0]}'.format(**egiis_entry), conn_timeout) except ldap.LDAPError as err: logger.warning('Failed to query EGIIS %s. Error: %s', egiis_uri, LDAPStrError(err)) return list(set(ce_list)) # # INFO ENDPOINTS PROCESSING FUNCTION # def get_arc_ce_endpoints_arcrest(hostname, port=443, conn_timeout=_fetch_timeout, filters=None): """Get ARC CE endpoints by querying ARC REST info endpoint""" # define filters fetch_vos = False if filters is None: filters = [] else: for f in filters: if f.type() == 'vo': fetch_vos = True break endpoints = [] # Query REST info endpoint req_path = '/arex/rest/1.0/info?schema=glue2' req_headers = {'Accept': 'application/xml'} conn = HTTPSInsecureConnection(hostname, port, timeout=conn_timeout) try: # fetch the data conn.request('GET', req_path, headers=req_headers) response = conn.getresponse() if response.status != 200: logger.error('Failed to get info from ARC REST API at http://%s:%s%s. HTTP reason: %s', hostname, port, req_path, response.reason) else: # parse the XML try: info_xml = ElementTree.fromstring(response.read()) for e in info_xml.findall(".//*[@BaseType='Endpoint']"): e_id = e.find('{*}ID').text e_url = e.find('{*}URL').text e_type = e.find('{*}InterfaceName').text e_entry = {'id': e_id, 'u': e_url, 't': e_type} if e.find('{*}HealthState').text.upper() != 'OK': e_entry['s'] = '0' # fetch access policy if VO filtering is requested if fetch_vos: e_vos = [] for policy_rule in e.findall('{*}AccessPolicy/{*}Rule'): e_vos.append(policy_rule.text.split(':', 1)[1]) if e_vos: e_entry['vos'] = e_vos # apply filters if any for fo in filters: if fo.on_fetch(): if fo.filter(e_entry): break else: # add endpoint if not filtered logger.debug('Found endpoint %s (type %s) for ARC CE %s', e_url, e_type, hostname) endpoints.append(e_entry) except ElementTree.ParseError as err: logger.error('Failed to parse info XML. Error: %s', str(err)) except Exception as e: logger.error('Failed to query ARC REST API at http://%s:%s%s. Error: %s', hostname, port, req_path, e) # fallback to LDAP GLUE2 for ARC CEs without REST enabled if not endpoints: logger.warning('There are no endpoints fetched for %s using ARC REST. Falling back to LDAP GLUE2.', hostname) endpoints = get_arc_ce_endpoints_ldapglue2(hostname, 2135, conn_timeout, filters) return endpoints def get_arc_ce_endpoints_ldapglue2(hostname, port=2135, conn_timeout=_fetch_timeout, filters=None): """Get ARC CE endpoints by querying LDAP GLUE2 (fallback to LDAP NG query)""" # define filters fetch_vos = False if filters is None: filters = [] else: for f in filters: if f.type() == 'vo': fetch_vos = True break endpoints = [] ldap_uri = 'ldap://{0}:{1}'.format(hostname, port) ldap_basedn = 'o=glue' ldap_filter = '(objectClass=GLUE2ComputingEndpoint)' ldap_attrs = ['GLUE2EndpointID', 'GLUE2EndpointURL', 'GLUE2EndpointHealthState', 'GLUE2EndpointInterfaceName'] try: ldap_conn = ldap.initialize(ldap_uri) ldap_conn.set_option(ldap.OPT_NETWORK_TIMEOUT, conn_timeout) ldap_conn.set_option(ldap.OPT_TIMEOUT, conn_timeout) ldap_conn.set_option(ldap.OPT_PROTOCOL_VERSION, ldap.VERSION3) ldap_endpoints_list = ldap_conn.search_s(ldap_basedn, ldap.SCOPE_SUBTREE, ldap_filter, ldap_attrs) if ldap_endpoints_list is None: logger.error('LDAP GLUE2 query for %s returns empty result set.', hostname) return endpoints for ldap_dn, ldap_ee in ldap_endpoints_list: if 'GLUE2EndpointURL' not in ldap_ee: logger.warning( 'Failed to find endpoint URL in LDAP response for DN %s. ' 'It seams GLUE2 rendering is broken for %s.', ldap_dn.decode(), ldap_uri) continue # get endpoint data e_id = ldap_ee['GLUE2EndpointID'][0].decode() e_url = ldap_ee['GLUE2EndpointURL'][0].decode() e_type = ldap_ee['GLUE2EndpointInterfaceName'][0].decode() e_entry = {'id': e_id, 'u': e_url, 't': e_type} if ldap_ee['GLUE2EndpointHealthState'][0].decode().upper() != 'OK': e_entry['s'] = '0' # fetch access policy if VO filtering is requested if fetch_vos: ldap_vo_filter = '(&(objectClass=GLUE2AccessPolicy)' \ '(GLUE2AccessPolicyEndpointForeignKey={0}))'.format(e_id) logger.debug('Querying AccessPolicy for endpoint %s (type %s)', e_url, e_type) vo_q_res = ldap_conn.search_s(ldap_basedn, ldap.SCOPE_SUBTREE, ldap_vo_filter, ['GLUE2PolicyRule']) if vo_q_res: for(_, policy_list) in vo_q_res: if 'GLUE2PolicyRule' in policy_list: e_vos = [v.decode().split(':', 1)[1] for v in policy_list['GLUE2PolicyRule']] if e_vos: e_entry['vos'] = e_vos # apply filters if any for fo in filters: if fo.on_fetch(): if fo.filter(e_entry): break else: # add endpoint if not filtered logger.debug('Found endpoint %s (type %s) for ARC CE %s', e_url, e_type, hostname) endpoints.append(e_entry) except (ldap.SERVER_DOWN, ldap.CONNECT_ERROR, ldap.TIMEOUT) as err: logger.error('Failed to connect to LDAP server for %s CE. Error: %s', hostname, LDAPStrError(err)) return endpoints except ldap.LDAPError as err: logger.error('Failed to query LDAP GLUE2 for %s. Error: %s', hostname, LDAPStrError(err)) # fallback to LDAP NG for classic legacy ARC CEs without GLUE2 support if not endpoints: logger.warning('There are no endpoints fetched for %s using LDAP GLUE2. Falling back to LDAP NG.', hostname) endpoints = get_arc_ce_endpoints_ldapng(hostname, port, conn_timeout, filters) return endpoints def get_arc_ce_endpoints_ldapng(hostname, port=2135, conn_timeout=_fetch_timeout, filters=None): """Get ARC CE endpoints by querying Legacy LDAP NorduGrid Schema""" if filters is None: filters = [] endpoints = [] ldap_uri = 'ldap://{0}:{1}'.format(hostname, port) ldap_basedn = 'Mds-Vo-name=local,o=grid' ldap_filter = '(objectClass=nordugrid-cluster)' ldap_attrs = ['nordugrid-cluster-contactstring', 'nordugrid-cluster-name', 'nordugrid-cluster-acl'] try: ldap_conn = ldap.initialize(ldap_uri) ldap_conn.set_option(ldap.OPT_NETWORK_TIMEOUT, conn_timeout) ldap_conn.set_option(ldap.OPT_TIMEOUT, conn_timeout) ldap_conn.set_option(ldap.OPT_PROTOCOL_VERSION, ldap.VERSION3) ldap_endpoints_list = ldap_conn.search_s(ldap_basedn, ldap.SCOPE_SUBTREE, ldap_filter, ldap_attrs) if ldap_endpoints_list is None: logger.error('LDAP NG query for %s returns empty result set.', hostname) return endpoints for ldap_dn, ldap_ee in ldap_endpoints_list: if 'nordugrid-cluster-contactstring' not in ldap_ee: logger.warning( 'Failed to find endpoint URL (contactstring) in LDAP response for DN %s. ' 'It seams NG rendering is broken for %s.', ldap_dn.decode(), ldap_uri) continue # get endpoint data e_id = ldap_ee['nordugrid-cluster-name'][0].decode() e_url = ldap_ee['nordugrid-cluster-contactstring'][0].decode() e_type = 'org.nordugrid.gridftpjob' e_entry = {'id': e_id, 'u': e_url, 't': e_type} # get authorized VOs if available if 'nordugrid-cluster-acl' in ldap_ee: e_vos = [v.decode().split(':', 1)[1] for v in ldap_ee['nordugrid-cluster-acl']] if e_vos: e_entry['vos'] = e_vos # apply filters if any for fo in filters: if fo.on_fetch(): if fo.filter(e_entry): break else: # add endpoint if not filtered logger.debug('Found endpoint %s (type %s) for ARC CE %s', e_url, e_type, hostname) endpoints.append(e_entry) # also add ldapng endpoint to comply gridftpjob ldapng_uri = '{0}/{1}'.format(ldap_uri, ldap_basedn) ldapng_entry = {'id': ldapng_uri, 'u': ldapng_uri, 't': 'org.nordugrid.ldapng'} endpoints.append(ldapng_entry) except ldap.LDAPError as err: logger.error('Failed to query LDAP NG for %s. Error: %s', hostname, LDAPStrError(err)) return endpoints def _ldap_uri_dict(uri): """Parse LDAP URI and return the dict of URI components""" __ldap_uri_re = re.compile(r'^(?Pldap://(?P[^:/]+)(?::[0-9]+))/(?P.*)') ldap_uri_match = __ldap_uri_re.match(uri) if ldap_uri_match: ldap_uri_dict = ldap_uri_match.groupdict() else: logger.error('Cannot parse URI %s as LDAP URI. Skipping information fetching.', uri) return None return ldap_uri_dict def get_sitebdii_endpoints_ldapglue1(uri, conn_timeout=_fetch_timeout, filters=None): """Get services and their endpoints by querying Site-BDII LDAP GLUE1""" ldap_uri_dict = _ldap_uri_dict(uri) if ldap_uri_dict is None: return [] if filters is None: filters = [] services = {} ldap_uri = ldap_uri_dict['uri'] ldap_basedn = ldap_uri_dict['basedn'] try: ldap_conn = ldap.initialize(ldap_uri) ldap_conn.set_option(ldap.OPT_NETWORK_TIMEOUT, conn_timeout) ldap_conn.set_option(ldap.OPT_TIMEOUT, conn_timeout) ldap_conn.set_option(ldap.OPT_PROTOCOL_VERSION, ldap.VERSION3) # Query info (3 completely different kind of objects in Glue1: Serive, CE and SE) ldap_service_filter = '(|(objectClass=GlueService)(objectClass=GlueCE)' \ '(objectClass=GlueSE)(objectClass=GlueSEControlProtocol)' \ '(objectClass=GlueSEAccessProtocol))' # Service object attributes ldap_service_attrs = ['GlueServiceEndpoint', 'GlueServiceStatus', 'GlueServiceType', 'GlueServiceName'] # CE attribute ldap_service_attrs += ['GlueCEInfoContactString', 'GlueCEImplementationName', 'GlueInformationServiceURL', 'GlueForeignKey'] # SE attributes ldap_service_attrs += ['GlueSEImplementationName', 'GlueSEUniqueID', 'GlueChunkKey', 'GlueSEControlProtocolEndpoint', 'GlueSEControlProtocolType', 'GlueSEAccessProtocolEndpoint', 'GlueSEAccessProtocolType'] glue1_data = ldap_conn.search_s(ldap_basedn, ldap.SCOPE_SUBTREE, ldap_service_filter, ldap_service_attrs) if glue1_data is None: logger.error('Site-BDII LDAP GLUE1.3 query for %s/%s returns empty result set.', ldap_uri, ldap_basedn) return [] if filters: logger.warning('No on-fetch filters support for legacy GLUE1.3') for ldap_dn, ldap_data in glue1_data: if 'GlueServiceEndpoint' in ldap_data: # General service object parsing (both service and endpoint) s_id = ldap_data['GlueServiceName'][0].decode() se_type = ldap_data['GlueServiceType'][0].decode() if s_id not in services: services[s_id] = {'object': 'service', 'type': se_type, 'id': s_id, 'endpoints': []} services[s_id]['rr_owner'] = dns_rr_owner_name(services[s_id], ldap_dn) e_id = ldap_dn.decode() e_url = ldap_data['GlueServiceEndpoint'][0].decode() e_entry = {'id': e_id, 'u': e_url, 't': se_type} if ldap_data['GlueServiceStatus'][0].decode().upper() != 'OK': e_entry['s'] = '0' services[s_id]['endpoints'].append(e_entry) elif 'GlueCEInfoContactString' in ldap_data: # CE object parsing (both service and endpoint) s_id = ldap_data['GlueForeignKey'][0].decode() s_id = s_id[20:] # remove GlueClusterUniqueID= s_type = ldap_data['GlueCEImplementationName'][0].decode() se_type = s_type ie_type = s_type if s_type == 'CREAM': se_type = 'org.glite.ce.CREAM' ie_type = 'bdii_site' elif s_type == 'ARC-CE': se_type = 'org.nordugrid.gridftpjob' ie_type = 'org.nordugrid.ldapng' se_url = ldap_data['GlueCEInfoContactString'][0].decode() ie_url = ldap_data['GlueInformationServiceURL'][0].decode() if s_id not in services: services[s_id] = {'object': 'service', 'type': s_type, 'id': s_id, 'endpoints': []} services[s_id]['rr_owner'] = dns_rr_owner_name(services[s_id], ldap_dn) se_entry = {'id': se_url, 'u': se_url, 't': se_type} services[s_id]['endpoints'].append(se_entry) ie_entry = {'id': ie_url, 'u': ie_url, 't': ie_type} services[s_id]['endpoints'].append(ie_entry) elif 'GlueSE' in ldap_data: # SE object (service) s_id = ldap_data['GlueSEUniqueID'][0].decode() s_type = ldap_data['GlueSEImplementationName'][0].decode() services[s_id] = {'object': 'service', 'type': s_type, 'id': s_id, 'endpoints': []} elif 'GlueChunkKey' in ldap_data: # SE endpoint objects s_id = ldap_data['GlueChunkKey'][0].decode() s_id = s_id[15:] # remove GlueSEUniqueID= if 'GlueSEControlProtocolEndpoint' in ldap_data: e_url = ldap_data['GlueSEControlProtocolEndpoint'][0].decode() e_type = ldap_data['GlueSEControlProtocolType'][0].decode() else: e_url = ldap_data['GlueSEAccessProtocolEndpoint'][0].decode() e_type = ldap_data['GlueSEAccessProtocolType'][0].decode() if s_id not in services: continue e_entry = {'id': e_url, 'u': e_url, 't': e_type} services[s_id]['endpoints'].append(e_entry) else: logger.warning( 'Failed to find any known service data in the LDAP response for DN %s. ' 'It seams GLUE1.3 rendering is broken for %s/%s.', ldap_dn.decode(), ldap_uri, ldap_basedn) continue except ldap.LDAPError as err: logger.error('Failed to query LDAP GLUE1.3 for %s/%s. Error: %s', ldap_uri, ldap_basedn, LDAPStrError(err)) return services.values() def get_sitebdii_endpoints_ldapglue2(uri, conn_timeout=_fetch_timeout, filters=None): """Get services and their endpoints by querying Site-BDII LDAP GLUE2""" ldap_uri_dict = _ldap_uri_dict(uri) if ldap_uri_dict is None: return [] # define filters fetch_vos = False if filters is None: filters = [] else: for f in filters: if f.type() == 'vo': fetch_vos = True break services = {} ldap_uri = ldap_uri_dict['uri'] glue1_fallback = False # construct GLUE2 base DN ldap_basedn = ldap_uri_dict['basedn'] if ldap_basedn.endswith('o=grid'): glue1_fallback = True # legacy glue1.3 basedn given: remove suffix, replace mds-vo-name ldap_basedn = ldap_basedn[:-6] + 'o=glue' ldap_basedn = 'GLUE2DomainID' + ldap_basedn[11:] try: ldap_conn = ldap.initialize(ldap_uri) ldap_conn.set_option(ldap.OPT_NETWORK_TIMEOUT, conn_timeout) ldap_conn.set_option(ldap.OPT_TIMEOUT, conn_timeout) ldap_conn.set_option(ldap.OPT_PROTOCOL_VERSION, ldap.VERSION3) # Query services info ldap_service_filter = '(objectClass=GLUE2Service)' ldap_service_attrs = ['GLUE2ServiceID', 'GLUE2ServiceType'] service_info = ldap_conn.search_s(ldap_basedn, ldap.SCOPE_SUBTREE, ldap_service_filter, ldap_service_attrs) if service_info is None: logger.error('Site-BDII LDAP GLUE2 query for %s/%s returns empty result set.', ldap_uri, ldap_basedn) return [] for ldap_dn, ldap_s in service_info: if 'GLUE2ServiceID' not in ldap_s: logger.warning( 'Failed to find service ID in the LDAP response for DN %s. ' 'It seams GLUE2 rendering is broken for %s/%s.', ldap_dn.decode(), ldap_uri, ldap_basedn) continue # get service data s_id = ldap_s['GLUE2ServiceID'][0].decode() s_type = ldap_s['GLUE2ServiceType'][0].decode() services[s_id] = {'object': 'service', 'type': s_type, 'id': s_id, 'endpoints': []} logger.debug('Found service %s (type %s)', s_id, s_type) # Query endpoints info ldap_endpoints_filter = '(objectClass=GLUE2Endpoint)' ldap_endpoints_arrts = ['GLUE2EndpointID', 'GLUE2EndpointURL', 'GLUE2EndpointInterfaceName', 'GLUE2EndpointHealthState', 'GLUE2EndpointServiceForeignKey'] endpoints_info = ldap_conn.search_s(ldap_basedn, ldap.SCOPE_SUBTREE, ldap_endpoints_filter, ldap_endpoints_arrts) for ldap_dn, ldap_ee in endpoints_info: if 'GLUE2EndpointURL' not in ldap_ee: logger.warning( 'Failed to find endpoint URL in LDAP response for DN %s. ' 'It seams GLUE2 rendering is broken for %s.', ldap_dn.decode(), ldap_uri, ldap_basedn) continue e_id = ldap_ee['GLUE2EndpointID'][0].decode() e_url = ldap_ee['GLUE2EndpointURL'][0].decode() e_type = ldap_ee['GLUE2EndpointInterfaceName'][0].decode() e_entry = {'id': e_id, 'u': e_url, 't': e_type} if ldap_ee['GLUE2EndpointHealthState'][0].decode().upper() != 'OK': e_entry['s'] = '0' e_service = ldap_ee['GLUE2EndpointServiceForeignKey'][0].decode() # fetch access policy if VO filtering is requested if fetch_vos: ldap_vo_filter = '(&(objectClass=GLUE2AccessPolicy)' \ '(GLUE2AccessPolicyEndpointForeignKey={0}))'.format(e_id) logger.debug('Querying AccessPolicy for endpoint %s (type %s)', e_url, e_type) vo_q_res = ldap_conn.search_s(ldap_basedn, ldap.SCOPE_SUBTREE, ldap_vo_filter, ['GLUE2PolicyRule']) if vo_q_res: for(_, policy_list) in vo_q_res: if 'GLUE2PolicyRule' in policy_list: e_vos = [v.decode().split(':', 1)[1] for v in policy_list['GLUE2PolicyRule'] if v.decode().lower().startswith('vo:')] if e_vos: e_entry['vos'] = e_vos # apply filters if any for fo in filters: if fo.on_fetch(): if fo.filter(e_entry): break else: # add endpoint if not filtered logger.debug('Found endpoint %s (type %s) for %s service', e_url, e_type, e_service) if e_service not in services: logger.error('Found endpoint %s (type %s) for service ID %s, ' 'but service itself is missing in the rendering.', e_url, e_type, e_service) continue services[e_service]['endpoints'].append(e_entry) except (ldap.SERVER_DOWN, ldap.CONNECT_ERROR, ldap.TIMEOUT) as err: logger.error('Failed to connect to LDAP server %s. Error: %s', ldap_uri, LDAPStrError(err)) return services.values() except ldap.LDAPError as err: logger.error('Failed to query LDAP GLUE2 for %s/%s. Error: %s', ldap_uri, ldap_basedn, LDAPStrError(err)) # fallback to LDAP GLUE1 for legacy Site-BDII without GLUE2 support if not services and glue1_fallback: logger.warning('There are no service endpoints fetched for LDAP GLUE2 URI %s. ' 'Falling back to LDAP GLUE1 site-bdii query.', ldap_uri) return get_sitebdii_endpoints_ldapglue1(uri, conn_timeout, filters) return services.values() _fetch_data_map = { 'arc-rest': get_arc_ce_endpoints_arcrest, 'arc-ldapglue2': get_arc_ce_endpoints_ldapglue2, 'arc-ldapng': get_arc_ce_endpoints_ldapng, 'sitebdii': get_sitebdii_endpoints_ldapglue2, 'sitebdii-glue1': get_sitebdii_endpoints_ldapglue1 } def _worker_info_fetch(fetch_queue, lock): """Worker process to fetch enqueued data and add it to ARCHERY object""" # { method, uri, obj, obj_attr, filters } while True: pdata = fetch_queue.get() logger.debug('Processing %s data fetching', pdata['uri']) fetch_f = _fetch_data_map[pdata['method']] fetch_data = fetch_f(pdata['uri'], filters=pdata['filters']) with lock: # add fetched data to the list if fetch_data: # generate rr_owner for nested objects if pdata['obj_attr'] == 'contains': for fobj in fetch_data: if 'endpoints' in fobj and not fobj['endpoints']: logger.warning('Service %s (type %s) contains no valid endpoints. Skipping.', fobj['id'], fobj['type']) continue fobj['rr_owner'] = dns_rr_owner_name(fobj, pdata['obj']['rr_owner']) pdata['obj']['contains'].append(fobj) else: pdata['obj'][pdata['obj_attr']] += fetch_data # handle status (mark as inactive if no endpoints are fetched) if not pdata['obj'][pdata['obj_attr']]: pdata['obj']['status'] = 0 elif 'status' in pdata['obj']: del pdata['obj']['status'] fetch_queue.task_done() def enqueue_object_data_fetch(fetch_queue, archery_object, applied_filters=None): """Process topology tree and enqueue object data to be fetched""" if 'endpoints' in archery_object: if 'endpoints_fetch' in archery_object: for fetch_method in archery_object['endpoints_fetch'].keys(): fetch_uri = archery_object['endpoints_fetch'][fetch_method] logger.debug('Enqueueing endpoints data fetch from %s using %s method.', fetch_uri, fetch_method) fetch_queue.put({ 'method': fetch_method, 'uri': fetch_uri, 'obj': archery_object, 'obj_attr': 'endpoints', 'filters': applied_filters }) if 'contains' in archery_object: if 'contains_fetch' in archery_object: # per-source filters from config source_filters = [] if 'filters' in archery_object['contains_fetch']: source_filters.extend(get_configured_fillters(archery_object['contains_fetch']['filters'], True)) if applied_filters is not None: source_filters.extend(applied_filters) for fetch_method in archery_object['contains_fetch'].keys(): if fetch_method == 'filters': continue fetch_uri = archery_object['contains_fetch'][fetch_method] logger.debug('Enqueueing group data fetch from %s using %s method.', fetch_uri, fetch_method) fetch_queue.put({ 'method': fetch_method, 'uri': fetch_uri, 'obj': archery_object, 'obj_attr': 'contains', 'filters': source_filters }) # recursively process tree for child_object in archery_object['contains']: enqueue_object_data_fetch(fetch_queue, child_object, applied_filters) def get_configured_fillters(filters_list=None, force_on_fetch=False): """Return list of filtering objects from """ applied_filters = [] if filters_list is not None: for f in filters_list: if f == 'help': print('Supported filters are:') for _, fclass in _filters.items(): fci = fclass() fci.help() sys.exit(0) fdef = f.split(':', 1) ftype = fdef[0] fargs = fdef[1] if len(fdef) > 1 else '' if ftype not in list(_filters.keys()): logger.error('Ignoring bad filter definition: %s', f) continue fclass = _filters[ftype] fobj = fclass(fargs) if force_on_fetch: fobj.set_on_fetch() applied_filters.append(fobj) return applied_filters def fetch_infosys_data(archery_object, applied_filters=None, threads=10): """Fetch infosys data to be added into the ARCHERY""" # create queue and object lock fetch_queue = TimeoutQueue() object_lock = Lock() # recursively add fetch tasks to the fetch queue enqueue_object_data_fetch(fetch_queue, archery_object, applied_filters) # start worker threads for i in range(threads): logger.debug('Staring worker thread %s to fetch infosys data.', i) worker = Thread(target=_worker_info_fetch, args=(fetch_queue, object_lock,)) worker.setDaemon(True) worker.start() # wait for parallel fetch to complete logger.info('Waiting for endpoint data fetching completion...') # make it killable while not fetch_queue.empty(): time.sleep(0.3) # join with timeout (in case of some stuck ldap connections) try: fetch_queue.join_with_timeout(_fetch_timeout*3) except OSError as e: logger.error(str(e)) sys.exit(1) # # ARCHERY TOPOLOGY PROCESSING # # FLAT ARC-CE LIST def get_arcce_topology(ce_list, rr_owner=''): """Create ARCHERY data object that represent ARC CE flat CE list topology""" archery_object = { 'object': 'group', 'rr_owner': rr_owner, 'contains': [], } for ce in ce_list: service_object = { 'object': 'service', 'type': 'org.nordugrid.arex', 'id': ce, 'endpoints': [] } service_object['rr_owner'] = dns_rr_owner_name(service_object, rr_owner) service_object['endpoints_fetch'] = { 'arc-rest': ce } archery_object['contains'].append(service_object) return archery_object # JSON CONFIG def get_json_topology(json_file, rr_owner='', timeout=_fetch_timeout): """Create ARCHERY data object that represent arbitrary topology defined in JSON config file""" try: with open(json_file, 'r') as jconf_f: jconf = json.load(jconf_f) except IOError as err: logger.error('Failed to open JSON config file %s. Error: %s', json_file, str(err)) sys.exit(1) except ValueError as err: logger.error('Failed to parse JSON config file %s. Error: %s', json_file, str(err)) sys.exit(1) # process groups recursively and return archery-manage internal object archery_object = group_object_from_json(jconf, rr_owner) # support raw DNS records only on the top-level if 'raw-dns' in jconf: archery_object['raw-dns'] = jconf['raw-dns'] return archery_object # JSON: helpers for groups/services def service_object_from_json(sconf): """Define service object content based on JSON config""" sobj = { 'object': 'service', 'endpoints': [] } if 'external-archery-object' in sconf: sobj['pointer_rr_data'] = 'u={0} t=archery.service'.format(sconf['external-archery-object']) return sobj if 'id' in sconf: sobj['id'] = sconf['id'] else: logger.error('Service description in config is missing mandatory "id" attribute. Service will be skipped. ' 'Provided JSON service description: %s', json.dumps(sconf)) return None if 'type' in sconf: sobj['type'] = sconf['type'] else: logger.error('Service description in config is missing mandatory "type" attribute. Service will be skipped. ' 'Provided JSON service description: %s', json.dumps(sconf)) return None if 'endpoints' in sconf: for edict in sconf['endpoints']: erecord = {} for ekey in edict.keys(): if ekey == 'url': erecord['u'] = edict[ekey] elif ekey == 'type': erecord['t'] = edict[ekey] elif ekey == 'status': if not edict[ekey]: erecord['s'] = '0' else: erecord['u'] = ekey erecord['t'] = edict[ekey] sobj['endpoints'].append(erecord) return sobj def group_object_from_json(jconf, rr_owner): """Define service object content based on JSON config""" archery_object = { 'object': 'group', 'rr_owner': rr_owner, 'contains': [], } if 'external-archery-object' in jconf: archery_object['pointer_rr_data'] = 'u={0} t=archery.group'.format(jconf['external-archery-object']) return archery_object if 'arc-services' in jconf: archery_arcces_object = get_arcce_topology(jconf['arc-services'], rr_owner) archery_object['contains'].extend(archery_arcces_object['contains']) if 'services' in jconf: for sconf in jconf['services']: sobj = service_object_from_json(sconf) if sobj is not None: sobj['rr_owner'] = dns_rr_owner_name(sobj, rr_owner) archery_object['contains'].append(sobj) if 'id' in jconf: archery_object['id'] = jconf['id'] if 'dns-name' in jconf: archery_object['dns-name'] = jconf['dns-name'] if 'type' in jconf: archery_object['type'] = jconf['type'] if 'external-source' in jconf: archery_object['contains_fetch'] = jconf['external-source'] if 'software' in jconf: swobj = software_object_from_json(jconf['software']) if swobj is not None: archery_object['contains'].append(swobj) if 'groups' in jconf: g_idx = 0 for gconf in jconf['groups']: g_idx += 1 if 'dns-name' in gconf: g_rr_owner = gconf['dns-name'] elif 'id' in gconf: g_rr_owner = gconf['id'].replace(' ', '-') else: gconf['dns-name'] = dns_rr_owner_name(gconf, 'group{0}.{1}'.format(g_idx, rr_owner)) g_rr_owner = gconf['dns-name'] archery_object['contains'].append(group_object_from_json(gconf, g_rr_owner)) return archery_object # JSON: helpers for software objects def __get_rte_description(rte_path): """Extract embedded RTE description from RTE file""" with open(rte_path) as rte_f: max_lines = 10 description = None for line in rte_f: descr_re = re.match(r'^#+\s*description:\s*(.*)\s*$', line, flags=re.IGNORECASE) if descr_re: description = descr_re.group(1) max_lines -= 1 if not max_lines: break return description def __get_dir_rtes(rtedir): """Get all RTEs defined by classic directory structure""" rtes = {} for path, _, files in os.walk(rtedir): rtebase = path.lstrip(rtedir + '/') for f in files: rtename = rtebase + '/' + f if rtebase else f rtepath = path + '/' + f if os.path.islink(rtepath): rtepath = os.readlink(rtepath) rtes[rtename] = rtepath return rtes def software_object_from_json(jconf): """Get necessary data and define software object based on JSON config""" sconf = { 'object': 'software', 'rr_owner': '_software', 'endpoints': [], 'contains': [] } # set gpg options gpg_home = [] warn_gpg_home = True if 'gpg_home' in jconf: gpg_home = ['--homedir', jconf['gpg_home']] warn_gpg_home = False keyid = [] if 'gpg_keyid' in jconf: keyid.append(jconf['gpg_keyid']) # set directory to hold signed RTE files signed_dir = 'signed' if 'signed_rtes_dir' in jconf: signed_dir = jconf['signed_rtes_dir'] # public key data for archery.software object if 'pubkey_url' in jconf: sconf['endpoints'].append({ 'u': jconf['pubkey_url'], 't': 'gpg.pubkey' }) else: if 'pubkey' not in jconf: # if there is no defined public key, export from GPG automatically if warn_gpg_home: logger.warning('There is no GPG home defined in the configuration. Using default GPG path.') warn_gpg_home = False keyout = tempfile.mkstemp(suffix='.key', prefix='pubkey-')[1] os.unlink(keyout) gpgcmd = ['gpg'] + gpg_home + ['--output', keyout, '--export'] + keyid logger.info('Exporting public key from GPG database using: %s', ' '.join(gpgcmd)) gpgproc = subprocess.Popen(gpgcmd) gpgproc.wait() if gpgproc.returncode != 0 or not os.path.exists(keyout): logger.error('Failed to export public key from GPG database') sys.exit(1) with open(keyout, 'rb') as key_f: jconf['pubkey'] = base64.b64encode(key_f.read()).decode() os.unlink(keyout) # add child object with key in the DNS sconf['contains'].append({ 'reftype': 'gpg.pubkey.base64', 'rr_owner': '_pubkey', 'endpoints': [{ 'rr_data': jconf['pubkey'] }] }) # generate RTEs from directory (if defined) if 'rtes_dir' in jconf: if not os.path.exists(jconf['rtes_dir']): logger.error('Path to RTEs directory (%s) does not exists.', jconf['rtes_dir']) else: dirrtes = __get_dir_rtes(jconf['rtes_dir']) if dirrtes and 'rtes' not in jconf: jconf['rtes'] = [] for rte in dirrtes: logger.debug('Adding RTE %s to software registry', rte) jconf['rtes'].append({ 'name': rte, 'path': dirrtes[rte] }) # process RTE objects if 'rtes' not in jconf: logger.warning('No RTEs defined in the software object. Nothing to do.') return sconf for rte in jconf['rtes']: # rte object info if 'name' not in rte: logger.error('Malformed RTE definition. Name is missing in %s', json.dumps(rte)) continue rtename = rte['name'] rteobj = { 'object': 'rte', 'id': rtename, 'endpoints': [] } rteobj['rr_owner'] = dns_rr_owner_name(rteobj, sconf['rr_owner']) if 'description' in rte: rteobj['description'] = quote(rte['description']) # rte content endpoint if 'url' in rte: rteobj['endpoints'].append({ 'u': rte['url'], 't': 'gpg.signed' }) elif 'data' in rte: try: base64.b64decode(rte['data']) except TypeError: logger.error('Cannon parse RTE %s data as base64 encoded. Skipping.') continue if 'contains' not in rteobj: rteobj['contains'] = [] rteobj['contains'].append({ 'reftype': 'gpg.signed.base64', 'rr_owner': '_data', 'endpoints': [{ 'rr_data': rte['data'] }] }) elif 'path' in rte: rtepath = rte['path'] if not os.path.exists(rtepath): logger.error('Malformed RTE %s definition. RTE path %s does not exists.', rtename, rte['path']) continue if 'description' not in rteobj: logger.debug('Trying to fetch description from RTE file at %s', rtepath) filedescr = __get_rte_description(rtepath) if filedescr: rteobj['description'] = quote(filedescr) if not os.path.exists(signed_dir): try: os.mkdir(signed_dir, 0o755) except IOError as e: logger.error('Failed to create directory for signed RTEs in %s. Error: %s', signed_dir, str(e)) # signed RTE path srtename = rtename.replace('/', '-') + '.signed' srtepath = os.path.join(signed_dir, srtename) sign_needed = True if os.path.exists(srtepath): rte_mtime = os.path.getmtime(rtepath) srte_mtime = os.path.getmtime(srtepath) if rte_mtime > srte_mtime: logger.info('Signed RTE file for %s is already exist (%s). ' 'But the RTE file updated more recently. Going to recreate signed RTE.', rtename, srtename) os.unlink(srtepath) else: logger.info('Signed RTE file for %s is already exist (%s). Skipping signing.', rtename, srtename) sign_needed = False # sign rtes if sign_needed: if warn_gpg_home: logger.warning('There is no GPG home defined in the configuration. Using default GPG path.') warn_gpg_home = False gpgcmd = ['gpg'] + gpg_home + ['--output', srtepath, '--sign', rtepath] logger.info('Signing RunTimeEnvironment %s with GPG using %s', rtename, ' '.join(gpgcmd)) gpgproc = subprocess.Popen(gpgcmd) gpgproc.wait() if gpgproc.returncode != 0 or not os.path.exists(srtepath): logger.error('Failed to sign RunTimeEnvironment %s', rtename) sys.exit(1) # if URL is defined, just add endpoint if 'signed_rtes_url' in jconf: rteurl = jconf['signed_rtes_url'].rstrip('/') + '/' rteobj['endpoints'].append({ 'u': rteurl + srtename, 't': 'gpg.signed' }) else: # or embedd RTE into the DNS with open(srtepath, 'rb') as srte_f: if 'contains' not in rteobj: rteobj['contains'] = [] rteobj['contains'].append({ 'reftype': 'gpg.signed.base64', 'rr_owner': '_data', 'endpoints': [{ 'rr_data': base64.b64encode(srte_f.read()).decode() }] }) # add rte object to software object sconf['contains'].append(rteobj) # remind about RTEs upload if were configured if 'signed_rtes_url' in jconf: logger.info('NOTE! According to configuration signed RTEs should be uploaded to %s ' 'from "%s" directory to be accessible.', jconf['signed_rtes_url'], signed_dir) return sconf # CONFIG FROM GOCDB def get_gocdb_topology(rr_owner='', timeout=_fetch_timeout): """Create ARCHERY data object that represent GOCDB-defined EGI topology""" gocdb_host = 'goc.egi.eu' gocdb_path = '/gocdbpi/public/?method=get_site_list' # fetch EGI topology data topology = {} conn = HTTPSInsecureConnection(gocdb_host, timeout=timeout) try: # fetch the data conn.request('GET', gocdb_path) response = conn.getresponse() if response.status != 200: logger.error('Failed to get sites list from GOCDB PI at http://%s%s. HTTP reason: %s', gocdb_host, gocdb_path, response.reason) sys.exit(1) # parse the XML gocdb_xml = ElementTree.fromstring(response.read()) for site in gocdb_xml: ngi = site.attrib['ROC'] if str(site.attrib['GIIS_URL']).strip() == '': logger.warning('Site %s in %s NGI contains no Site-BDII information. Skipping.', site.attrib['NAME'], ngi) continue if ngi not in topology: topology[ngi] = {} topology[ngi][site.attrib['NAME']] = site.attrib['GIIS_URL'] except Exception as e: logger.error('Failed to query GOCDB PI at http://%s%s. Error: %s', gocdb_host, gocdb_path, e) # create archery object skeleton archery_object = { 'object': 'group', 'type': 'org.egi.infrastructure', 'id': 'EGI', 'rr_owner': rr_owner, 'contains': [], } for ngi in topology.keys(): ngi_object = { 'object': 'group', 'type': 'org.egi.ngi', 'id': ngi, 'rr_owner': ngi.replace(' ', '-'), 'contains': [], } for site in topology[ngi].keys(): site_object = { 'object': 'group', 'type': 'org.egi.site', 'id': site, 'rr_owner': site.replace(' ', '-'), 'contains': [], 'contains_fetch': { 'sitebdii': topology[ngi][site] } } ngi_object['contains'].append(site_object) archery_object['contains'].append(ngi_object) return archery_object # # ARCHERY DNS PROCESSING # def dns_rr_owner_name(archery_object, parent_owner): """Generate RR owner name based on the object content and parent owner name""" # TODO: consider to add another naming schemes # concatenate different object attribute values strid = parent_owner if 'object' in archery_object: strid += archery_object['object'] else: strid += 'group' if 'type' in archery_object: strid += archery_object['type'] if 'id' in archery_object: strid += archery_object['id'] # produce SHA1 hash (SHA1 selected for best speed) and shorten it return hashlib.sha1(strid.encode()).hexdigest()[:10] def parse_archery_txt(txtstr): """Get data dict from ARCHERY DNS TXT string representation""" rrdata = {} for kv in txtstr.split(' '): # in case of broken records if len(kv) < 3: logger.warning('Malformed archery TXT entry "%s" ("%s" too short for k=v)', txtstr, kv) continue # only one letter keys and 'id' is supported now if kv[1] == '=': rrdata[kv[0]] = kv[2:] elif kv.startswith('id='): rrdata['id'] = kv[3:] else: logger.warning('Malformed archery TXT entry "%s" (%s does not match k=value)', txtstr, kv) return rrdata def fetch_archery_dns_data(dns_name, nameserver=None, threads=1): """Get ARCHERY data object from DNS endpoint""" archery_object = { 'contains': [], 'endpoints': [], 'rr_owner': '' } req_queue = Queue() req_queue.put({ 'name': dns_name, 'obj': archery_object, 'parent_name': '', }) # start worker threads for i in range(threads): logger.debug('Staring worker thread %s to fetch DNS data.', i) worker = Thread(target=_worker_resolver, args=(req_queue, nameserver,)) worker.setDaemon(True) worker.start() # wait for parallel fetch to complete logger.info('Waiting for DNS queries completion...') req_queue.join() return archery_object def _worker_resolver(req_queue, nameserver=None): """Worker thread to fetch DNS data""" # thread DNS resolver resolver = dns.resolver.Resolver() if nameserver is not None: resolver.nameservers = [nameserver] # request while True: req = req_queue.get() __fetch_archery_dns_data(req_queue, req['obj'], req['name'], resolver, req['parent_name']) req_queue.task_done() def __fetch_archery_dns_data(req_queue, archery_object, dns_name, resolver, parent_name=''): """Process ARCHERY data from DNS RRSet""" # construct archery exact domain name to query (ensure the dot is at the end) if dns_name[0:6] == 'dns://': dns_name = dns_name[6:].rstrip('.') + '.' else: dns_name = dns_name.rstrip('.') + '.' pdns_name = dns_name qdns_name = dns_name if not parent_name: # default entry point qdns_name = '_archery.' + dns_name # query TXT RRSet logger.debug('Querying ARCHERY data from: %s', dns_name) try: archery_rrs = resolver.query(qdns_name, 'TXT') # get owner name (without full DNS suffix including dot) rrset_name = pdns_name if parent_name and rrset_name.endswith(parent_name): rrset_name = rrset_name[:-(len(parent_name)+1)] archery_object['rr_owner'] = rrset_name for rr in archery_rrs: # fetch all records txt = '' for rri in rr.strings: txt += rri.decode() # special cases for '_pubkey' and '_data' that contains raw data if rrset_name in ['_pubkey', '_data']: archery_object['endpoints'].append({'rr_data': txt}) continue # parse object data rrdata = parse_archery_txt(txt) # object description resource record found if 'o' in rrdata: archery_object['rr_data'] = txt archery_object['object'] = rrdata['o'] # type and id for the object if available if 't' in rrdata: archery_object['type'] = rrdata['t'] if 'id' in rrdata: archery_object['id'] = rrdata['id'] # description for archery.rte object if 'd' in rrdata: archery_object['description'] = rrdata['d'] # other records that contains endpoint/grouping data elif 'u' in rrdata: if 't' in rrdata: if rrdata['t'] in ['archery.group', 'archery.service', 'org.nordugrid.archery', 'archery.software', 'archery.rte', 'gpg.pubkey.base64', 'gpg.signed.base64']: # fetch the data from DNS child_object = { 'contains': [], 'endpoints': [], 'pointer_rr_data': txt, 'rr_owner': '' } # add reftype for raw data referenced objects if rrdata['t'] in ['gpg.pubkey.base64', 'gpg.signed.base64']: child_object['reftype'] = rrdata['t'] # add status for child object if defined if 's' in rrdata and rrdata['s'] != '1': child_object['status'] = 0 archery_object['contains'].append(child_object) # enqueue request to fetch child data (child object is already created) req_queue.put({ 'name': rrdata['u'], 'obj': child_object, 'parent_name': pdns_name, }) else: rrdata['rr_data'] = txt archery_object['endpoints'].append(rrdata) else: logger.error('ARCHERY data in %s contains broken endpoint record without type: %s', dns_name, txt) continue # check for objects with no endpoints if not archery_object['contains'] and not archery_object['endpoints']: logger.warning('ARCHERY service object defined by %s has no endpoints.', dns_name) except DNSException as err: logger.warning('Failed to query ARCHERY data from %s (Error: %s)', dns_name, err) # if query failed (leftover objects in DNS) - still provide RR owner for the pointer record rrset_name = dns_name if rrset_name.endswith(parent_name): rrset_name = rrset_name[:-len(parent_name)] archery_object['rr_owner'] = rrset_name def _raw_dns_fetch(dnsdata, domain, nameserver=None): resolver = dns.resolver.Resolver() if nameserver is not None: resolver.nameservers = [nameserver] for rdns in dnsdata: # format configured data rdns['config_data'] = set() if rdns['rdata']: if isinstance(rdns['rdata'], list): for rdata in rdns['rdata']: rdns['config_data'].add(rdata) else: rdns['config_data'].add(rdns['rdata']) # fetch and construct server data rdns['server_data'] = set() resolve_name = domain if rdns['name']: resolve_name = rdns['name'] + '.' + resolve_name try: handle_no_answer = True if rdns['type'] == 'NS' else False logger.debug('Querying raw DNS data (type %s) from %s', rdns['type'], resolve_name) rrs = resolver.query(resolve_name, rdns['type'], raise_on_no_answer=(not handle_no_answer)) # handle NS records if not rrs.response.answer: for rr in rrs.response.authority: for lrr in rr.to_text().split('\n'): rdns['server_data'].add(lrr.split(' ')[-1]) except dns.resolver.NXDOMAIN as e: logger.debug('NXDOMAIN received for %s DNS query for domain %s.', rdns['type'], resolve_name) except dns.resolver.NoAnswer as e: logger.warning('No answer for %s DNS query for domain %s. Error: %s', rdns['type'], resolve_name, str(e)) except dns.resolver.NoNameservers as e: logger.warning('No namservers received for %s DNS query for domain %s. Error: %s', rdns['type'], resolve_name, str(e)) else: for rr in rrs: rdns['server_data'].add(rr.to_text().strip('"')) # # HANDLE DDNS UPDATE # _tsig_algorithms = { 'HMAC-MD5': dns.tsig.HMAC_MD5, 'HMAC-SHA1': dns.tsig.HMAC_SHA1, 'HMAC-SHA224': dns.tsig.HMAC_SHA224, 'HMAC-SHA256': dns.tsig.HMAC_SHA256, 'HMAC-SHA384': dns.tsig.HMAC_SHA384, 'HMAC-SHA512': dns.tsig.HMAC_SHA512, } def archery_ddns_update(domain, nameserver, keyring_dict, new_archery_object, ttl=3600, fetch_threads=1, keyalgorithm=dns.tsig.default_algorithm): """Incrementally updates ARCHERY data records in DNS""" keyring = dns.tsigkeyring.from_text(keyring_dict) main_rr_owner = domain.rstrip('.') + '.' # new ARCHERY TXT data according to provided data object new_dns_rrset = archery_txt_rrset(new_archery_object) # old endpoints from querying the ARCHERY DNS zone dns_archery_object = fetch_archery_dns_data(main_rr_owner, nameserver=nameserver, threads=fetch_threads) old_dns_rrset = archery_txt_rrset(dns_archery_object) # print(json.dumps(list(new_dns_rrset), indent=2)) # print(json.dumps(list(old_dns_rrset), indent=2)) remove_rrs = old_dns_rrset - new_dns_rrset add_rrs = new_dns_rrset - old_dns_rrset logger.info('DNS incremental update includes %s records to add and %s records to remove', len(add_rrs), len(remove_rrs)) # print(json.dumps(list(add_rrs), indent=2)) # print(json.dumps(list(remove_rrs), indent=2)) try: update = dns.update.Update(domain, keyring=keyring, keyalgorithm=keyalgorithm) for r in remove_rrs: logger.debug('Going to REMOVE record by means of DDNS update: %s', r) rr = r.split(' ', 1) txts = txt_255(rr[1].replace(' ', r'\ '), getlist=True) update.delete(rr[0], 'txt', ' '.join(txts)) if len(update.to_wire()) > 65000: logger.info('Size limit reached. Sending partial DDNS update.') dns.query.tcp(update, nameserver) update = dns.update.Update(domain, keyring=keyring, keyalgorithm=keyalgorithm) for a in add_rrs: logger.debug('Going to ADD record by means of DDNS update: %s', a) ar = a.split(' ', 1) txts = txt_255(ar[1].replace(' ', r'\ '), getlist=True) update.add(ar[0], ttl, 'txt', ' '.join(txts)) if len(update.to_wire()) > 65000: logger.info('Size limit reached. Sending partial DDNS update.') dns.query.tcp(update, nameserver) update = dns.update.Update(domain, keyring=keyring, keyalgorithm=keyalgorithm) # if exception is not raised we have succeeded with update dns.query.tcp(update, nameserver) logger.info('ARCHERY information has been updated for zone %s', domain) except DNSException as e: logger.error('Failed in ARCHERY data DDNS update. Error: %s', e) # check raw DNS records are in sync in the zone if 'raw-dns' in archery_object: logger.info('Raw DNS data is defined in the config. Goind to check defined records consistency.') _raw_dns_fetch(archery_object['raw-dns'], domain, nameserver) try: rawupdate = dns.update.Update(domain, keyring=keyring, keyalgorithm=keyalgorithm) needs_rawupdate = False for rr in archery_object['raw-dns']: for cr in list(rr['server_data'] - rr['config_data']): logger.debug('Going to REMOVE raw DNS record by means of DDNS update: %s %s %s', rr['name'], rr['type'], cr) needs_rawupdate = True rawupdate.delete(rr['name'], dns.rdatatype.from_text(rr['type']), cr) for cr in list(rr['config_data'] - rr['server_data']): logger.debug('Going to ADD raw DNS record by means of DDNS update: %s %s %s', rr['name'], rr['type'], cr) needs_rawupdate = True rawupdate.add(rr['name'], ttl, dns.rdatatype.from_text(rr['type']), cr) if needs_rawupdate: dns.query.tcp(rawupdate, nameserver) logger.info('Defined raw DNS data has been updated for zone %s', domain) else: logger.info('Defined raw DNS data is in sync for zone %s', domain) except DNSException as e: logger.error('Failed in raw DNS data DDNS update. Error: %s', e) # # MAIN EXECUTION CYCLE # def get_parser(): """Command line arguments parser""" parser = argparse.ArgumentParser(description='The archery-manage tool used to simplify common operations with ARCHERY, including registry initial bootstrap, integration with topology databases and keeping dynamic information up to date.') parser.add_argument('-d', '--debug', action='store', default='INFO', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG']) parser.add_argument('-s', '--source', action='store', required=True, help='Services topology source (use \'help\' value to print available sources)') parser.add_argument('-f', '--filter', action='append', help='Add endpoints filter (use \'help\' value to print available filters)') parser.add_argument('-o', '--output', choices=list(_output_formatters.keys()), help='Write requested data to stdout') parser.add_argument('--json', action='store_true', help='Change output format from plaintext to JSON') parser.add_argument('--output-all', action='store_true', help='Output all services/endpoints including inactive (filters are still applied)') parser.add_argument('-u', '--ddns-update', action='store_true', help='Invoke DNS zone incremental DDNS update secured by TSIG key') parser.add_argument('--domain', help='Domain name of the ARCHERY endpoint to use (required for DDNS update)') parser.add_argument('--ddns-master-ip', help='Master DNS IP address (required for DDNS update)') parser.add_argument('--ddns-tsig-keyfile', help='TSIG keyfile (required for DDNS update)') parser.add_argument('--ddns-tsig-algorithm', help='Cryptographic algorithm for TSIG', choices=list(_tsig_algorithms.keys()), default='HMAC-MD5') parser.add_argument('--ttl', action='store', default='3600', type=int, help='DNS resource records TTL value to use (default is %(default)s)') parser.add_argument('--threads', action='store', default='8', type=int, help='Number of treads to fetch information in parallel (default is %(default)s)') parser.add_argument('--timeout', action='store', default='10', type=int, help='Per-source information fetching timeout (default is %(default)s seconds)') return parser if __name__ == '__main__': # Process command line arguments parser = get_parser() cmd_args = parser.parse_args() # Set requested logging level logger.setLevel(getattr(logging, cmd_args.debug, 20)) # Set per-source fetch timeout value _fetch_timeout = cmd_args.timeout # Domain name to work with domain = cmd_args.domain # Check DDNS update required options before doing anything if cmd_args.ddns_update: # check for domain if domain is None: logger.error('Domain name (--domain) is required to use DDNS update') sys.exit(1) # check for master nameserver IP if cmd_args.ddns_master_ip is None: logger.error('DNS master IP (--ddns-master-ip) is required to use DDNS update') sys.exit(1) nameserver = cmd_args.ddns_master_ip # check for keyring if cmd_args.ddns_tsig_keyfile is None: logger.error('TSIG keyfile (--ddns-tsig-keyfile) is required to use DDNS update') sys.exit(1) else: try: logger.debug('Reading TSIG key from %s', cmd_args.ddns_tsig_keyfile) with open(cmd_args.ddns_tsig_keyfile, 'r') as tsig_f: keyring_str = tsig_f.readline() keyring_s = keyring_str.split(':') if len(keyring_s) != 2: logger.error('Failed to parse TSIG keyfile %s. Expected format is keyname:secret', cmd_args.ddns_tsig_keyfile) sys.exit(1) logger.debug('TSIG key %s has been read successfully', keyring_s[0]) keyring_dict = {keyring_s[0]: keyring_s[1]} except EnvironmentError as err: logger.error('Failed to read TSIG keyfile %s. Error: %s', cmd_args.ddns_tsig_keyfile, err) sys.exit(1) # Parse filters for fetching endpoints applied_filters = get_configured_fillters(cmd_args.filter) # Base domain name to work with rr_owner = '' if domain is not None: rr_owner += domain.rstrip('.') + '.' # Define services topology logger.info('Constructing ARCHERY objects topology according to configuration.') source = cmd_args.source if source.startswith('json:'): logger.info('Obtaining services topology from JSON configuration file: %s', source[5:]) archery_object = get_json_topology(source[5:], rr_owner) elif source.startswith('file:'): logger.error('The \'file:\' source type is deprecated. Use \'arcce-list:\' for the same behavior.') sys.exit(1) elif source.startswith('arcce-list:'): logger.info('Obtaining ARC CEs list from file: %s', source[11:]) ce_list = get_file_celist(source[11:]) archery_object = get_arcce_topology(ce_list, rr_owner) elif source.startswith('egiis:'): logger.info('Obtaining ARC CEs list from EGIIS: %s', source[6:]) ce_list = get_egiis_celist(source[6:]) logger.debug('Fetched EGIIS CEs list to work with: %s', ', '.join(ce_list)) archery_object = get_arcce_topology(ce_list, rr_owner) elif source.startswith('archery:'): logger.info('Obtaining services topology from ARCHERY DNS endpoint: %s', source[8:]) archery_object = fetch_archery_dns_data(source[8:], threads=cmd_args.threads) elif source.startswith('gocdb'): logger.info('Obtaining services topology from EGI GOCDB.') archery_object = get_gocdb_topology(rr_owner) elif source == 'help': sources_types = { 'json': 'Topology defined in JSON configuration file', 'arcce-list': 'List of ARC CE hostnames stored in file', 'archery': 'ARCHERY endpoint', 'egiis': 'Legacy EGIIS LDAP URI', 'gocdb': 'EGI GOCDB', } print('Supported sources types:') for st, sd in sources_types.items(): print(' {0:>12}: {1}'.format(st, sd)) sys.exit(0) else: logger.error('Unsupported source: %s', source) sys.exit(1) # Fetch topology data from defined infosys services logger.info('Fetching endpoints data from information system.') fetch_infosys_data(archery_object, applied_filters, threads=cmd_args.threads) # Post-fetch endpoint filtering do_filtering = False for f in applied_filters: # at least on not on-fetch filter should be defined if not f.on_fetch(): do_filtering = True break if do_filtering: logger.info('Starting endpoint filtering loop') filter_endpoints(archery_object, applied_filters) # Invoke DDNS update if requested if cmd_args.ddns_update: logger.info('Sending update to DNS master %s via DDNS protocol (using TSIG key %s)', nameserver, list(keyring_dict.keys())[0]) archery_ddns_update(domain, nameserver, keyring_dict, archery_object, ttl=cmd_args.ttl, fetch_threads=cmd_args.threads, keyalgorithm=_tsig_algorithms[cmd_args.ddns_tsig_algorithm]) # Output information if requested if cmd_args.output: formatter_f = _output_formatters[cmd_args.output] formatter_f(archery_object, cmd_args) nordugrid-arc-7.1.1/src/utils/PaxHeaders/arc-exporter0000644000000000000000000000013215067751427017641 xustar0030 mtime=1759499031.555461423 30 atime=1759499034.766510215 30 ctime=1759499031.555461423 nordugrid-arc-7.1.1/src/utils/arc-exporter/0000755000175000002070000000000015067751427021620 5ustar00mockbuildmock00000000000000nordugrid-arc-7.1.1/src/utils/arc-exporter/PaxHeaders/Makefile.am0000644000000000000000000000013215067751327021751 xustar0030 mtime=1759498967.788474789 30 atime=1759498967.881493894 30 ctime=1759499031.553461393 nordugrid-arc-7.1.1/src/utils/arc-exporter/Makefile.am0000644000175000002070000000003415067751327023650 0ustar00mockbuildmock00000000000000sbin_SCRIPTS = arc-exporter nordugrid-arc-7.1.1/src/utils/arc-exporter/PaxHeaders/Makefile.in0000644000000000000000000000013215067751357021765 xustar0030 mtime=1759498991.948702395 30 atime=1759499019.966285325 30 ctime=1759499031.555123029 nordugrid-arc-7.1.1/src/utils/arc-exporter/Makefile.in0000644000175000002070000005231015067751357023670 0ustar00mockbuildmock00000000000000# Makefile.in generated by automake 1.16.2 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2020 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = src/utils/arc-exporter ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ac_cxx_have_sstream.m4 \ $(top_srcdir)/m4/ac_cxx_namespaces.m4 \ $(top_srcdir)/m4/arc_api.m4 $(top_srcdir)/m4/arc_paths.m4 \ $(top_srcdir)/m4/fsusage.m4 $(top_srcdir)/m4/gettext.m4 \ $(top_srcdir)/m4/gpt.m4 $(top_srcdir)/m4/iconv.m4 \ $(top_srcdir)/m4/intlmacosx.m4 $(top_srcdir)/m4/lib-ld.m4 \ $(top_srcdir)/m4/lib-link.m4 $(top_srcdir)/m4/lib-prefix.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/m4/nls.m4 \ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \ $(top_srcdir)/VERSION $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = arc-exporter CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(sbindir)" SCRIPTS = $(sbin_SCRIPTS) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/arc-exporter.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) pkgdatadir = @pkgdatadir@ pkgincludedir = @pkgincludedir@ pkglibdir = @pkglibdir@ pkglibexecdir = @pkglibexecdir@ ACLOCAL = @ACLOCAL@ ALTPYTHON = @ALTPYTHON@ ALTPYTHON_CFLAGS = @ALTPYTHON_CFLAGS@ ALTPYTHON_EXT_SUFFIX = @ALTPYTHON_EXT_SUFFIX@ ALTPYTHON_LIBS = @ALTPYTHON_LIBS@ ALTPYTHON_SITE_ARCH = @ALTPYTHON_SITE_ARCH@ ALTPYTHON_SITE_LIB = @ALTPYTHON_SITE_LIB@ ALTPYTHON_VERSION = @ALTPYTHON_VERSION@ AMTAR = @AMTAR@ AM_CXXFLAGS = @AM_CXXFLAGS@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ ARCCLIENT_CFLAGS = @ARCCLIENT_CFLAGS@ ARCCLIENT_LIBS = @ARCCLIENT_LIBS@ ARCCOMMON_CFLAGS = @ARCCOMMON_CFLAGS@ ARCCOMMON_LIBS = @ARCCOMMON_LIBS@ ARCCREDENTIAL_CFLAGS = @ARCCREDENTIAL_CFLAGS@ ARCCREDENTIAL_LIBS = @ARCCREDENTIAL_LIBS@ ARCDATA_CFLAGS = @ARCDATA_CFLAGS@ ARCDATA_LIBS = @ARCDATA_LIBS@ ARCINFOSYS_CFLAGS = @ARCINFOSYS_CFLAGS@ ARCINFOSYS_LIBS = @ARCINFOSYS_LIBS@ ARCJOB_CFLAGS = @ARCJOB_CFLAGS@ ARCJOB_LIBS = @ARCJOB_LIBS@ ARCLOADER_CFLAGS = @ARCLOADER_CFLAGS@ ARCLOADER_LIBS = @ARCLOADER_LIBS@ ARCMESSAGE_CFLAGS = @ARCMESSAGE_CFLAGS@ ARCMESSAGE_LIBS = @ARCMESSAGE_LIBS@ ARCOTOKENS_CFLAGS = @ARCOTOKENS_CFLAGS@ ARCOTOKENS_LIBS = @ARCOTOKENS_LIBS@ ARCSECURITY_CFLAGS = @ARCSECURITY_CFLAGS@ ARCSECURITY_LIBS = @ARCSECURITY_LIBS@ ARCWSADDRESSING_CFLAGS = @ARCWSADDRESSING_CFLAGS@ ARCWSADDRESSING_LIBS = @ARCWSADDRESSING_LIBS@ ARCWSSECURITY_CFLAGS = @ARCWSSECURITY_CFLAGS@ ARCWSSECURITY_LIBS = @ARCWSSECURITY_LIBS@ ARCXMLSEC_CFLAGS = @ARCXMLSEC_CFLAGS@ ARCXMLSEC_LIBS = @ARCXMLSEC_LIBS@ ARC_VERSION = @ARC_VERSION@ ARC_VERSION_MAJOR = @ARC_VERSION_MAJOR@ ARC_VERSION_MINOR = @ARC_VERSION_MINOR@ ARC_VERSION_NUM = @ARC_VERSION_NUM@ ARC_VERSION_PATCH = @ARC_VERSION_PATCH@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ BASH_COMPLETION_CFLAGS = @BASH_COMPLETION_CFLAGS@ BASH_COMPLETION_LIBS = @BASH_COMPLETION_LIBS@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DATE = @DATE@ DATER = @DATER@ DEFAULT_GLOBUS_LOCATION = @DEFAULT_GLOBUS_LOCATION@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DLOPEN_LIBS = @DLOPEN_LIBS@ DOT = @DOT@ DOXYGEN = @DOXYGEN@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GETTEXT_MACRO_VERSION = @GETTEXT_MACRO_VERSION@ GFAL2_CFLAGS = @GFAL2_CFLAGS@ GFAL2_LIBS = @GFAL2_LIBS@ GLIBMM_CFLAGS = @GLIBMM_CFLAGS@ GLIBMM_LIBS = @GLIBMM_LIBS@ GLOBUS_COMMON_CFLAGS = @GLOBUS_COMMON_CFLAGS@ GLOBUS_COMMON_LIBS = @GLOBUS_COMMON_LIBS@ GLOBUS_FTP_CLIENT_CFLAGS = @GLOBUS_FTP_CLIENT_CFLAGS@ GLOBUS_FTP_CLIENT_LIBS = @GLOBUS_FTP_CLIENT_LIBS@ GLOBUS_FTP_CONTROL_CFLAGS = @GLOBUS_FTP_CONTROL_CFLAGS@ GLOBUS_FTP_CONTROL_LIBS = @GLOBUS_FTP_CONTROL_LIBS@ GLOBUS_GSI_CALLBACK_CFLAGS = @GLOBUS_GSI_CALLBACK_CFLAGS@ GLOBUS_GSI_CALLBACK_LIBS = @GLOBUS_GSI_CALLBACK_LIBS@ GLOBUS_GSI_CERT_UTILS_CFLAGS = @GLOBUS_GSI_CERT_UTILS_CFLAGS@ GLOBUS_GSI_CERT_UTILS_LIBS = @GLOBUS_GSI_CERT_UTILS_LIBS@ GLOBUS_GSI_CREDENTIAL_CFLAGS = @GLOBUS_GSI_CREDENTIAL_CFLAGS@ GLOBUS_GSI_CREDENTIAL_LIBS = @GLOBUS_GSI_CREDENTIAL_LIBS@ GLOBUS_GSSAPI_GSI_CFLAGS = @GLOBUS_GSSAPI_GSI_CFLAGS@ GLOBUS_GSSAPI_GSI_LIBS = @GLOBUS_GSSAPI_GSI_LIBS@ GLOBUS_GSS_ASSIST_CFLAGS = @GLOBUS_GSS_ASSIST_CFLAGS@ GLOBUS_GSS_ASSIST_LIBS = @GLOBUS_GSS_ASSIST_LIBS@ GLOBUS_IO_CFLAGS = @GLOBUS_IO_CFLAGS@ GLOBUS_IO_LIBS = @GLOBUS_IO_LIBS@ GLOBUS_MAKEFILE_HEADER = @GLOBUS_MAKEFILE_HEADER@ GLOBUS_OPENSSL_CFLAGS = @GLOBUS_OPENSSL_CFLAGS@ GLOBUS_OPENSSL_LIBS = @GLOBUS_OPENSSL_LIBS@ GLOBUS_OPENSSL_MODULE_CFLAGS = @GLOBUS_OPENSSL_MODULE_CFLAGS@ GLOBUS_OPENSSL_MODULE_LIBS = @GLOBUS_OPENSSL_MODULE_LIBS@ GMSGFMT = @GMSGFMT@ GMSGFMT_015 = @GMSGFMT_015@ GPT_FLAVOR_CONFIGURATION = @GPT_FLAVOR_CONFIGURATION@ GPT_QUERY = @GPT_QUERY@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ INTLLIBS = @INTLLIBS@ INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@ LCAS_CFLAGS = @LCAS_CFLAGS@ LCAS_LIBS = @LCAS_LIBS@ LCAS_LOCATION = @LCAS_LOCATION@ LCMAPS_CFLAGS = @LCMAPS_CFLAGS@ LCMAPS_LIBS = @LCMAPS_LIBS@ LCMAPS_LOCATION = @LCMAPS_LOCATION@ LD = @LD@ LDFLAGS = @LDFLAGS@ LDNS_CFLAGS = @LDNS_CFLAGS@ LDNS_CONFIG = @LDNS_CONFIG@ LDNS_LIBS = @LDNS_LIBS@ LIBICONV = @LIBICONV@ LIBINTL = @LIBINTL@ LIBOBJS = @LIBOBJS@ LIBRESOLV = @LIBRESOLV@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBXML2_CFLAGS = @LIBXML2_CFLAGS@ LIBXML2_LIBS = @LIBXML2_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBICONV = @LTLIBICONV@ LTLIBINTL = @LTLIBINTL@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ MSGFMT = @MSGFMT@ MSGFMT_015 = @MSGFMT_015@ MSGMERGE = @MSGMERGE@ NM = @NM@ NMEDIT = @NMEDIT@ NSS_CFLAGS = @NSS_CFLAGS@ NSS_LIBS = @NSS_LIBS@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OPENSSL_CFLAGS = @OPENSSL_CFLAGS@ OPENSSL_LIBS = @OPENSSL_LIBS@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PDFLATEX = @PDFLATEX@ PERL = @PERL@ PERL_TEST_DIR = @PERL_TEST_DIR@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ POSUB = @POSUB@ PYLINT = @PYLINT@ PYLINT_ARGS = @PYLINT_ARGS@ PYLINT_ARGS_ARGUMENTS_DIFFER = @PYLINT_ARGS_ARGUMENTS_DIFFER@ PYTHON = @PYTHON@ PYTHON_CFLAGS = @PYTHON_CFLAGS@ PYTHON_EXT_SUFFIX = @PYTHON_EXT_SUFFIX@ PYTHON_LIBS = @PYTHON_LIBS@ PYTHON_SITE_ARCH = @PYTHON_SITE_ARCH@ PYTHON_SITE_LIB = @PYTHON_SITE_LIB@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ S3_CPPFLAGS = @S3_CPPFLAGS@ S3_LIBS = @S3_LIBS@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SPECDATE = @SPECDATE@ SQLITE_CFLAGS = @SQLITE_CFLAGS@ SQLITE_LIBS = @SQLITE_LIBS@ STRIP = @STRIP@ SWIG = @SWIG@ SWIG2 = @SWIG2@ SWIG_PYTHON_NAMING = @SWIG_PYTHON_NAMING@ SYSTEMD_DAEMON_LIBS = @SYSTEMD_DAEMON_LIBS@ TEST_DIR = @TEST_DIR@ USE_NLS = @USE_NLS@ UUID_LIBS = @UUID_LIBS@ VERSION = @VERSION@ XGETTEXT = @XGETTEXT@ XGETTEXT_015 = @XGETTEXT_015@ XGETTEXT_EXTRA_OPTIONS = @XGETTEXT_EXTRA_OPTIONS@ XMLSEC_CFLAGS = @XMLSEC_CFLAGS@ XMLSEC_LIBS = @XMLSEC_LIBS@ XMLSEC_OPENSSL_CFLAGS = @XMLSEC_OPENSSL_CFLAGS@ XMLSEC_OPENSSL_LIBS = @XMLSEC_OPENSSL_LIBS@ XROOTD_CPPFLAGS = @XROOTD_CPPFLAGS@ XROOTD_LIBS = @XROOTD_LIBS@ ZLIB_CFLAGS = @ZLIB_CFLAGS@ ZLIB_LIBS = @ZLIB_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ arc_location = @arc_location@ baseversion = @baseversion@ bashcompdir = @bashcompdir@ bindir = @bindir@ bindir_rel_to_pkglibexecdir = @bindir_rel_to_pkglibexecdir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ cronddir = @cronddir@ datadir = @datadir@ datarootdir = @datarootdir@ debianversion = @debianversion@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ extpkglibdir = @extpkglibdir@ fedorarelease = @fedorarelease@ fedorasetupopts = @fedorasetupopts@ gnu_time = @gnu_time@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ initddir = @initddir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ libsubdir = @libsubdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ monitor_prefix = @monitor_prefix@ nodename = @nodename@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgconfigdir = @pkgconfigdir@ pkgdatadir_rel_to_pkglibexecdir = @pkgdatadir_rel_to_pkglibexecdir@ pkgdatasubdir = @pkgdatasubdir@ pkglibdir_rel_to_pkglibexecdir = @pkglibdir_rel_to_pkglibexecdir@ pkglibexecsubdir = @pkglibexecsubdir@ pkglibsubdir = @pkglibsubdir@ posix_shell = @posix_shell@ prefix = @prefix@ preversion = @preversion@ program_transform_name = @program_transform_name@ psdir = @psdir@ runstatedir = @runstatedir@ sbindir = @sbindir@ sbindir_rel_to_pkglibexecdir = @sbindir_rel_to_pkglibexecdir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ tmp_dir = @tmp_dir@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ unitsdir = @unitsdir@ sbin_SCRIPTS = arc-exporter all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/utils/arc-exporter/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign src/utils/arc-exporter/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): arc-exporter: $(top_builddir)/config.status $(srcdir)/arc-exporter.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ install-sbinSCRIPTS: $(sbin_SCRIPTS) @$(NORMAL_INSTALL) @list='$(sbin_SCRIPTS)'; test -n "$(sbindir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(sbindir)'"; \ $(MKDIR_P) "$(DESTDIR)$(sbindir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(sbindir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(sbindir)$$dir" || exit $$?; \ } \ ; done uninstall-sbinSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(sbin_SCRIPTS)'; test -n "$(sbindir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(sbindir)'; $(am__uninstall_files_from_dir) mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(BUILT_SOURCES) $(MAKE) $(AM_MAKEFLAGS) distdir-am distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) installdirs: for dir in "$(DESTDIR)$(sbindir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-sbinSCRIPTS install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-sbinSCRIPTS .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-sbinSCRIPTS install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags-am uninstall uninstall-am uninstall-sbinSCRIPTS .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: nordugrid-arc-7.1.1/src/utils/arc-exporter/PaxHeaders/arc-exporter.in0000644000000000000000000000013215067751327022660 xustar0030 mtime=1759498967.788556955 30 atime=1759498967.881493894 30 ctime=1759499031.556386186 nordugrid-arc-7.1.1/src/utils/arc-exporter/arc-exporter.in0000644000175000002070000003557115067751327024575 0ustar00mockbuildmock00000000000000#!@PYTHON@ from prometheus_client import start_http_server, Summary, Gauge, Counter from dateutil.parser import parse import sys import time import os import re import getopt from datetime import datetime from urllib.parse import urlparse import logging import xml.etree.ElementTree as ET import subprocess from collections import Counter from prometheus_client.core import GaugeMetricFamily, CounterMetricFamily, REGISTRY from arc.utils import config class LogReader: "Log reader helper, starts with current logfile and it follows logrotate" def __init__(self, file): self.file = file self.f = None # Alt. self.f = open(self.file, 'r') to get failure on first open def __iter__(self): """Iterator that yields all lines in currently in self.file starting from where it last left off. If the file has been overwritten it will reopen the file and start from the beginning.""" # Maybe "if self.needs_reopen():"? That way one could make it # handle complex corner cases. # Do flush() on filehandle so we can to tell() # XXX - What does flush do on a readonly filehandle? What are # the side effects? if self.f and self.f.flush() and self.f.tell() > os.path.getsize(self.file): self.f.close() self.f = None if not self.f: try: self.f = open(self.file, 'r') except OSError: # Limit to FileNotFoundError, PermissionError? # Leave the iterator and assume that things will improve # next time. return try: # Alt. yield from self.f for line in self.f: yield line except UnicodeError: # XXX - Cheating a bit here, should probably try to seek # to next newline and continue from there. As it # is now one bad byte will lead to the rest of the # file being skipped! pass class DTRCollector(object): # Custom collector for DTR where metrics need to be reset at each cycle def collect(self): d = Counter() with open(dtrState, 'r') as fh: for line in fh: # DTR-ID STATE PRIO SHARE [URL [HOST]] fields = line.split() if len(fields) < 4: continue state = fields[1] share = fields[3] if len(fields) == 5: host = 'local' elif len(fields) == 6: host = fields[5] else: host = "" d[(state, share, host)] += 1 c = GaugeMetricFamily('arc_transfers', 'ARC Transfers', labels=['state', 'share', 'host']) for i in d.items(): c.add_metric(list(i[0]), i[1]) yield c def getARCCoreCounts(): """ Extract corecounts from info.xml """ try: # Run the cat command using sudo result = subprocess.run(['sudo', 'cat', infoxml], capture_output=True, text=True) if result.returncode != 0: logging.error(f"Failed to read the file: {result.stderr.strip()}") return # Parse the XML content root = ET.fromstring(result.stdout) # Define the namespace namespace = {'glue': 'http://schemas.ogf.org/glue/2009/03/spec_2.0_r1'} # Find all ComputingShares - there can be several ComputingShares (queues) # Each can have multivalued OtherInfo xml tags of type: # CoreCount=INLRMS:R=431 # CoreCount=PREPARING=2543 # CoreCount=INLRMS:E=1 computing_shares = root.findall('.//glue:ComputingShare', namespace) if computing_shares is not None: for computing_share in computing_shares: share = computing_share.find('glue:Name', namespace).text other_info_tags = computing_share.findall('.//glue:OtherInfo',namespace) for other_info in other_info_tags: try: parts = other_info.text.split('=') state = parts[1] count = parts[2] arc_corecount.labels(share,state).set(int(count)) except ValueError as e: logging.error(f"Error parsing count value: {count} - {str(e)}") except IndexError as e: logging.error(f"Error fetching information about corecount per state: {str(e)}") except Exception as e: logging.error(f"Unexpected error fetching information about corecount per state {str(e)}") else: logging.warning("No ComputingShare tag not found in XML.") except FileNotFoundError as e: logging.error(f"File not found: {str(e)}") except ET.ParseError as e: logging.error(f"Error parsing XML file: {str(e)}") except Exception as e: logging.error(f"Unexpected error: {str(e)}") def getCoreCounts(): """ Useful metrics for pledge monitoring """ cmd = "sacct -S now-1hour -a --format=JobID,account,user,partition,State,alloccpus,ReqCPUS,node -n -P | grep -v batch" sacct_result = subprocess.run(cmd, shell=True, capture_output=True, text=True) sacct_result = sacct_result.stdout.strip().split('\n') # Reset the gauge each time I scrape the metrics to get fresh numbers # of RUNNING, PENDING etc corecount.clear() # Loop over all the lines in the sacct_result and extract account, # state, and number of cores for this account and state. Fill gauge # with the metric. Must be increased per account and state. for item in sacct_result: try: parts = item.split('|') account = str(parts[1]) user = str(parts[2]) partition = str(parts[3]) state = str(parts[4]) alloc_cores = str(parts[5]) req_cores = str(parts[6]) cores = alloc_cores except: continue if int(cores) == 0: # If job is pending, no cores yet allocated. Set corecount as # requested cores cores = req_cores if 'CANCELLED' in state: # Typically CANCELLED by state = state.split('by')[0].strip() corecount.labels(account, user, partition, state).inc(int(cores)) def arcJobStats(): # jobs cmd = ['arcctl', '-d', 'CRITICAL', 'job', 'stats'] out, err = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate() for line in out.decode().split('\n'): j = line.split() if len(j) > 1: m = j[0][:-1] n = j[1] jobs.labels(m).set(n) def arexLogStats(): # arex log for line in arexlog: j = line.split() if len(j) < 8: continue if "Job failure detected" in line: jstats.labels("FAILED").inc() if j[6] != 'State:': continue s = j[7] s = s.strip(':') jstats.labels(s).inc() def heartBeat(): # gm heartbeat mtime = os.stat(ctrldir + '/gm-heartbeat').st_mtime gmtime.set(time.time() - mtime) def dtrStats(): # delivery log for line in deliveryLog: j = line.split() if len(j) < 11: continue if j[11] == 'TRANSFERRED': dstats.inc() def cdtrStats(): # central staging: # Depends on debug level being at least INFO for line in cdeliveryLog: # [